file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
symdumper.rs | use std::ffi::OsStr;
use std::os::windows::ffi::OsStrExt;
use std::iter::once;
use std::process::Command;
use std::io::{Error, ErrorKind};
use crate::win32::ModuleInfo;
type HANDLE = usize;
extern {
fn GetCurrentProcess() -> HANDLE;
}
#[allow(non_snake_case)]
#[repr(C)]
struct SrcCodeInfoW {
SizeOfStruct: u32,
Key: usize,
ModBase: u64,
Obj: [u16; 261],
FileName: [u16; 261],
LineNumber: u32,
Address: u64,
}
impl Default for SrcCodeInfoW {
fn default() -> Self {
SrcCodeInfoW {
SizeOfStruct: std::mem::size_of::<SrcCodeInfoW>() as u32,
Key: 0,
ModBase: 0,
Obj: [0; 261],
FileName: [0; 261],
LineNumber: 0,
Address: 0,
}
}
}
#[allow(non_snake_case)]
#[repr(C)]
struct SymbolInfoW {
SizeOfStruct: u32,
TypeIndex: u32,
Reserved: [u64; 2],
Index: u32,
Size: u32,
ModBase: u64,
Flags: u32,
Value: u64,
Address: u64,
Register: u32,
Scope: u32,
Tag: u32,
NameLen: u32,
MaxNameLen: u32,
// technically this field is dynamically sized as specified by MaxNameLen
Name: [u16; 8192],
}
impl Default for SymbolInfoW {
fn default() -> Self {
SymbolInfoW {
// Subtract off the size of the dynamic component, one byte
// already included in the structure
SizeOfStruct: std::mem::size_of::<SymbolInfoW>() as u32 - 8192*2,
TypeIndex: 0,
Reserved: [0; 2],
Index: 0,
Size: 0,
ModBase: 0,
Flags: 0,
Value: 0,
Address: 0,
Register: 0,
Scope: 0,
Tag: 0,
NameLen: 0,
MaxNameLen: 8192,
Name: [0; 8192],
}
}
}
#[allow(non_snake_case)]
#[repr(C)]
struct ImagehlpLineW64 {
SizeOfStruct: u32,
Key: usize,
LineNumber: u32,
FileName: *const u16,
Address: u64,
}
impl Default for ImagehlpLineW64 {
fn default() -> Self |
}
#[allow(non_snake_case)]
#[repr(C)]
struct ImagehlpModule64W {
SizeOfStruct: u32,
BaseOfImage: u64,
ImageSize: u32,
TimeDateStamp: u32,
CheckSum: u32,
NumSyms: u32,
SymType: u32,
ModuleName: [u16; 32],
ImageName: [u16; 256],
LoadedImageName: [u16; 256],
LoadedPdbName: [u16; 256],
CVSig: u32,
CVData: [u16; 780],
PdbSig: u32,
PdbSig70: [u8; 16],
PdbAge: u32,
PdbUnmatched: bool,
DbgUnmatched: bool,
LineNumbers: bool,
GlobalSymbols: bool,
TypeInfo: bool,
SourceIndexed: bool,
Publics: bool,
}
impl Default for ImagehlpModule64W {
fn default() -> Self {
ImagehlpModule64W {
SizeOfStruct: std::mem::size_of::<ImagehlpModule64W>() as u32,
BaseOfImage: 0,
ImageSize: 0,
TimeDateStamp: 0,
CheckSum: 0,
NumSyms: 0,
SymType: 0,
ModuleName: [0; 32],
ImageName: [0; 256],
LoadedImageName: [0; 256],
LoadedPdbName: [0; 256],
CVSig: 0,
CVData: [0; 780],
PdbSig: 0,
PdbSig70: [0; 16],
PdbAge: 0,
PdbUnmatched: false,
DbgUnmatched: false,
LineNumbers: false,
GlobalSymbols: false,
TypeInfo: false,
SourceIndexed: false,
Publics: false,
}
}
}
/// Vector of (virtual address, symbol name, symbol size)
type Context = *mut SymbolContext;
extern fn srcline_callback(srcline_info: *const SrcCodeInfoW, context: usize) -> bool {
let srcline = unsafe { &*srcline_info };
let context = unsafe { &mut *(context as Context) };
let mut filename = Vec::with_capacity(srcline.FileName.len());
for &val in srcline.FileName.iter() {
if val == 0 { break; }
filename.push(val);
}
let source_filename = String::from_utf16(&filename)
.expect("Failed to decode UTF-16 file name");
context.sourceline.push((srcline.Address - srcline.ModBase, source_filename, srcline.LineNumber as u64));
true
}
extern fn sym_callback(sym_info: *const SymbolInfoW, size: u32, context: usize) -> bool {
let symbol = unsafe { &*sym_info };
let context = unsafe { &mut *(context as Context) };
// Technically NameLen isn't supposed to contain the null terminator... but it does.
// Yay!
if symbol.NameLen < 1 {
return true;
}
let symbol_name = String::from_utf16(&symbol.Name[..symbol.NameLen as usize - 1])
.expect("Failed to decode UTF-16 symbol name");
context.symbols.push((symbol.Address - symbol.ModBase, symbol_name, size as u64));
true
}
#[link(name = "dbghelp")]
extern {
fn SymInitializeW(hProcess: HANDLE, UserSearchPath: *const u16,
fInvadeProcess: bool) -> bool;
fn SymLoadModuleExW(hProcess: HANDLE, hFile: HANDLE, ImageName: *const u16,
ModuleName: *const u16, BaseOfDll: u64, DllSize: u32,
Data: usize, Flags: u32) -> u64;
fn SymGetModuleInfoW64(hProcess: HANDLE, dwAddr: u64,
ModuleInfo: *mut ImagehlpModule64W) -> bool;
fn SymEnumSymbolsW(hProcess: HANDLE, BaseOfDll: u64, Mask: usize,
callback: extern fn(sym_info: *const SymbolInfoW, size: u32, context: usize) -> bool,
context: usize) -> bool;
fn SymEnumSourceLinesW(hProcess: HANDLE, Base: u64, Obj: usize, File: usize, Line: u32,
Flags: u32, callback: extern fn(LineInfo: *const SrcCodeInfoW, UserContext: usize) -> bool,
UserContext: usize) -> bool;
fn SymUnloadModule64(hProcess: HANDLE, BaseOfDll: u64) -> bool;
fn SymCleanup(hProcess: HANDLE) -> bool;
}
pub fn win16_for_str(s: &str) -> Vec<u16> {
OsStr::new(s).encode_wide().chain(once(0)).collect()
}
#[repr(C)]
#[derive(Clone, Default)]
pub struct SymbolContext {
pub symbols: Vec<(u64, String, u64)>,
pub sourceline: Vec<(u64, String, u64)>,
}
/// Get all of the symbols from a PE file `pe_file`
pub fn get_symbols_from_file(pe_file: &str) -> SymbolContext {
let mut symdb = SymbolContext {
symbols: Vec::new(),
sourceline: Vec::new(),
};
let module_base;
unsafe {
let cur_process = GetCurrentProcess();
// Initialize the symbol library for this process
assert!(SymInitializeW(cur_process, 0 as *const _, false),
"Failed to SymInitializeW()");
// Load up a module into the current process as the base address
// the file specified
let filename = win16_for_str(pe_file);
module_base = SymLoadModuleExW(cur_process, 0, filename.as_ptr(), std::ptr::null(), 0, 0, 0, 0);
assert!(module_base!= 0, "Failed to SymLoadModuleExW()");
// Get information about the module we just loaded
let mut module_info = ImagehlpModule64W::default();
assert!(SymGetModuleInfoW64(cur_process, module_base,
&mut module_info as *mut _),
"Failed to SymGetModuleInfoW64()");
// This is pedantic but we might as well check it
assert!(module_info.BaseOfImage == module_base);
assert!(SymEnumSymbolsW(cur_process, module_base, 0, sym_callback, &mut symdb as *mut _ as usize));
if!SymEnumSourceLinesW(cur_process, module_base, 0, 0, 0, 0, srcline_callback, &mut symdb as *mut _ as usize) {
// Eh just silently fail here, most people won't have private
// symbols so this would just spam
//print!("Warning: Could not enumerate sourcelines\n");
}
assert!(SymUnloadModule64(cur_process, module_base),
"Failed to SymUnloadModule64()");
assert!(SymCleanup(cur_process), "Failed to SymCleanup()");
}
symdb.symbols.sort_by_key(|x| x.0);
symdb.sourceline.sort_by_key(|x| x.0);
symdb
}
/// Get all of the symbols from a module `module_name` with a TimeDateStamp
/// and SizeOfImage from the PE header. This will automatically download the
/// module and PDB from the symbol store using symchk
pub fn get_symbols_from_module(module: &ModuleInfo)
-> std::io::Result<SymbolContext>
{
// Use symchk to download the module and symbols
let module = download_symbol(module.name(), module.time(), module.size())?;
Ok(get_symbols_from_file(&module))
}
/// Download a module and the corresponding PDB based on module_name,
/// it's TimeDateStamp and SizeOfImage from it's PE header
///
/// Returns a string containing a filename of the downloaded module
fn download_symbol(module_name: &str, timedatestamp: u32, sizeofimage: u32)
-> std::io::Result<String> {
let mut dir = std::env::temp_dir();
dir.push("applepie_manifest");
// Create manifest file for symchk
std::fs::write(&dir, format!("{},{:x}{:x},1\r\n",
module_name, timedatestamp, sizeofimage))?;
// Run symchk to download this module
let res = Command::new("symchk")
.arg("/v")
.arg("/im")
.arg(dir)
.output()?;
if!res.status.success() {
return Err(Error::new(ErrorKind::Other, "symchk returned with error"));
}
// Symchk apparently ran, check output
let stderr = std::str::from_utf8(&res.stderr)
.expect("Failed to convert symchk output to utf-8");
let mut filename = None;
for line in stderr.lines() {
const PREFIX: &'static str = "DBGHELP: ";
const POSTFIX: &'static str = " - OK";
// The line that contains the filename looks like:
// DBGHELP: C:\symbols\calc.exe\8f598a9eb000\calc.exe - OK
if!line.starts_with(PREFIX) { continue; }
if!line.ends_with(POSTFIX) { continue; }
// We only expect one line of output to match the above criteria
// If there are multiple we'll need to improve this "parser"
assert!(filename.is_none(), "Multiple filenames in symchk output");
// Save the filename we found
filename = Some(&line[PREFIX.len()..line.len() - POSTFIX.len()]);
}
// Fail hard if we didn't get the output filename from symchk
let filename = filename.expect("Did not get expected symchk output");
// Run symchk to download the pdb for the file
let res = Command::new("symchk")
.arg(filename)
.output()?;
if!res.status.success() {
return Err(Error::new(ErrorKind::Other, "symchk returned with error"));
}
// Now we have downloaded the PDB for this file :)
Ok(filename.into())
}
#[test]
fn test_symchk() {
download_symbol("calc.exe", 0x8F598A9E, 0xB000)
.expect("Failed to download symbol");
}
| {
ImagehlpLineW64 {
SizeOfStruct: std::mem::size_of::<ImagehlpLineW64>() as u32,
Key: 0,
LineNumber: 0,
FileName: std::ptr::null(),
Address: 0,
}
} | identifier_body |
symdumper.rs | use std::ffi::OsStr;
use std::os::windows::ffi::OsStrExt;
use std::iter::once;
use std::process::Command;
use std::io::{Error, ErrorKind};
use crate::win32::ModuleInfo;
type HANDLE = usize;
extern {
fn GetCurrentProcess() -> HANDLE;
}
#[allow(non_snake_case)]
#[repr(C)]
struct | {
SizeOfStruct: u32,
Key: usize,
ModBase: u64,
Obj: [u16; 261],
FileName: [u16; 261],
LineNumber: u32,
Address: u64,
}
impl Default for SrcCodeInfoW {
fn default() -> Self {
SrcCodeInfoW {
SizeOfStruct: std::mem::size_of::<SrcCodeInfoW>() as u32,
Key: 0,
ModBase: 0,
Obj: [0; 261],
FileName: [0; 261],
LineNumber: 0,
Address: 0,
}
}
}
#[allow(non_snake_case)]
#[repr(C)]
struct SymbolInfoW {
SizeOfStruct: u32,
TypeIndex: u32,
Reserved: [u64; 2],
Index: u32,
Size: u32,
ModBase: u64,
Flags: u32,
Value: u64,
Address: u64,
Register: u32,
Scope: u32,
Tag: u32,
NameLen: u32,
MaxNameLen: u32,
// technically this field is dynamically sized as specified by MaxNameLen
Name: [u16; 8192],
}
impl Default for SymbolInfoW {
fn default() -> Self {
SymbolInfoW {
// Subtract off the size of the dynamic component, one byte
// already included in the structure
SizeOfStruct: std::mem::size_of::<SymbolInfoW>() as u32 - 8192*2,
TypeIndex: 0,
Reserved: [0; 2],
Index: 0,
Size: 0,
ModBase: 0,
Flags: 0,
Value: 0,
Address: 0,
Register: 0,
Scope: 0,
Tag: 0,
NameLen: 0,
MaxNameLen: 8192,
Name: [0; 8192],
}
}
}
#[allow(non_snake_case)]
#[repr(C)]
struct ImagehlpLineW64 {
SizeOfStruct: u32,
Key: usize,
LineNumber: u32,
FileName: *const u16,
Address: u64,
}
impl Default for ImagehlpLineW64 {
fn default() -> Self {
ImagehlpLineW64 {
SizeOfStruct: std::mem::size_of::<ImagehlpLineW64>() as u32,
Key: 0,
LineNumber: 0,
FileName: std::ptr::null(),
Address: 0,
}
}
}
#[allow(non_snake_case)]
#[repr(C)]
struct ImagehlpModule64W {
SizeOfStruct: u32,
BaseOfImage: u64,
ImageSize: u32,
TimeDateStamp: u32,
CheckSum: u32,
NumSyms: u32,
SymType: u32,
ModuleName: [u16; 32],
ImageName: [u16; 256],
LoadedImageName: [u16; 256],
LoadedPdbName: [u16; 256],
CVSig: u32,
CVData: [u16; 780],
PdbSig: u32,
PdbSig70: [u8; 16],
PdbAge: u32,
PdbUnmatched: bool,
DbgUnmatched: bool,
LineNumbers: bool,
GlobalSymbols: bool,
TypeInfo: bool,
SourceIndexed: bool,
Publics: bool,
}
impl Default for ImagehlpModule64W {
fn default() -> Self {
ImagehlpModule64W {
SizeOfStruct: std::mem::size_of::<ImagehlpModule64W>() as u32,
BaseOfImage: 0,
ImageSize: 0,
TimeDateStamp: 0,
CheckSum: 0,
NumSyms: 0,
SymType: 0,
ModuleName: [0; 32],
ImageName: [0; 256],
LoadedImageName: [0; 256],
LoadedPdbName: [0; 256],
CVSig: 0,
CVData: [0; 780],
PdbSig: 0,
PdbSig70: [0; 16],
PdbAge: 0,
PdbUnmatched: false,
DbgUnmatched: false,
LineNumbers: false,
GlobalSymbols: false,
TypeInfo: false,
SourceIndexed: false,
Publics: false,
}
}
}
/// Vector of (virtual address, symbol name, symbol size)
type Context = *mut SymbolContext;
extern fn srcline_callback(srcline_info: *const SrcCodeInfoW, context: usize) -> bool {
let srcline = unsafe { &*srcline_info };
let context = unsafe { &mut *(context as Context) };
let mut filename = Vec::with_capacity(srcline.FileName.len());
for &val in srcline.FileName.iter() {
if val == 0 { break; }
filename.push(val);
}
let source_filename = String::from_utf16(&filename)
.expect("Failed to decode UTF-16 file name");
context.sourceline.push((srcline.Address - srcline.ModBase, source_filename, srcline.LineNumber as u64));
true
}
extern fn sym_callback(sym_info: *const SymbolInfoW, size: u32, context: usize) -> bool {
let symbol = unsafe { &*sym_info };
let context = unsafe { &mut *(context as Context) };
// Technically NameLen isn't supposed to contain the null terminator... but it does.
// Yay!
if symbol.NameLen < 1 {
return true;
}
let symbol_name = String::from_utf16(&symbol.Name[..symbol.NameLen as usize - 1])
.expect("Failed to decode UTF-16 symbol name");
context.symbols.push((symbol.Address - symbol.ModBase, symbol_name, size as u64));
true
}
#[link(name = "dbghelp")]
extern {
fn SymInitializeW(hProcess: HANDLE, UserSearchPath: *const u16,
fInvadeProcess: bool) -> bool;
fn SymLoadModuleExW(hProcess: HANDLE, hFile: HANDLE, ImageName: *const u16,
ModuleName: *const u16, BaseOfDll: u64, DllSize: u32,
Data: usize, Flags: u32) -> u64;
fn SymGetModuleInfoW64(hProcess: HANDLE, dwAddr: u64,
ModuleInfo: *mut ImagehlpModule64W) -> bool;
fn SymEnumSymbolsW(hProcess: HANDLE, BaseOfDll: u64, Mask: usize,
callback: extern fn(sym_info: *const SymbolInfoW, size: u32, context: usize) -> bool,
context: usize) -> bool;
fn SymEnumSourceLinesW(hProcess: HANDLE, Base: u64, Obj: usize, File: usize, Line: u32,
Flags: u32, callback: extern fn(LineInfo: *const SrcCodeInfoW, UserContext: usize) -> bool,
UserContext: usize) -> bool;
fn SymUnloadModule64(hProcess: HANDLE, BaseOfDll: u64) -> bool;
fn SymCleanup(hProcess: HANDLE) -> bool;
}
pub fn win16_for_str(s: &str) -> Vec<u16> {
OsStr::new(s).encode_wide().chain(once(0)).collect()
}
#[repr(C)]
#[derive(Clone, Default)]
pub struct SymbolContext {
pub symbols: Vec<(u64, String, u64)>,
pub sourceline: Vec<(u64, String, u64)>,
}
/// Get all of the symbols from a PE file `pe_file`
pub fn get_symbols_from_file(pe_file: &str) -> SymbolContext {
let mut symdb = SymbolContext {
symbols: Vec::new(),
sourceline: Vec::new(),
};
let module_base;
unsafe {
let cur_process = GetCurrentProcess();
// Initialize the symbol library for this process
assert!(SymInitializeW(cur_process, 0 as *const _, false),
"Failed to SymInitializeW()");
// Load up a module into the current process as the base address
// the file specified
let filename = win16_for_str(pe_file);
module_base = SymLoadModuleExW(cur_process, 0, filename.as_ptr(), std::ptr::null(), 0, 0, 0, 0);
assert!(module_base!= 0, "Failed to SymLoadModuleExW()");
// Get information about the module we just loaded
let mut module_info = ImagehlpModule64W::default();
assert!(SymGetModuleInfoW64(cur_process, module_base,
&mut module_info as *mut _),
"Failed to SymGetModuleInfoW64()");
// This is pedantic but we might as well check it
assert!(module_info.BaseOfImage == module_base);
assert!(SymEnumSymbolsW(cur_process, module_base, 0, sym_callback, &mut symdb as *mut _ as usize));
if!SymEnumSourceLinesW(cur_process, module_base, 0, 0, 0, 0, srcline_callback, &mut symdb as *mut _ as usize) {
// Eh just silently fail here, most people won't have private
// symbols so this would just spam
//print!("Warning: Could not enumerate sourcelines\n");
}
assert!(SymUnloadModule64(cur_process, module_base),
"Failed to SymUnloadModule64()");
assert!(SymCleanup(cur_process), "Failed to SymCleanup()");
}
symdb.symbols.sort_by_key(|x| x.0);
symdb.sourceline.sort_by_key(|x| x.0);
symdb
}
/// Get all of the symbols from a module `module_name` with a TimeDateStamp
/// and SizeOfImage from the PE header. This will automatically download the
/// module and PDB from the symbol store using symchk
pub fn get_symbols_from_module(module: &ModuleInfo)
-> std::io::Result<SymbolContext>
{
// Use symchk to download the module and symbols
let module = download_symbol(module.name(), module.time(), module.size())?;
Ok(get_symbols_from_file(&module))
}
/// Download a module and the corresponding PDB based on module_name,
/// it's TimeDateStamp and SizeOfImage from it's PE header
///
/// Returns a string containing a filename of the downloaded module
fn download_symbol(module_name: &str, timedatestamp: u32, sizeofimage: u32)
-> std::io::Result<String> {
let mut dir = std::env::temp_dir();
dir.push("applepie_manifest");
// Create manifest file for symchk
std::fs::write(&dir, format!("{},{:x}{:x},1\r\n",
module_name, timedatestamp, sizeofimage))?;
// Run symchk to download this module
let res = Command::new("symchk")
.arg("/v")
.arg("/im")
.arg(dir)
.output()?;
if!res.status.success() {
return Err(Error::new(ErrorKind::Other, "symchk returned with error"));
}
// Symchk apparently ran, check output
let stderr = std::str::from_utf8(&res.stderr)
.expect("Failed to convert symchk output to utf-8");
let mut filename = None;
for line in stderr.lines() {
const PREFIX: &'static str = "DBGHELP: ";
const POSTFIX: &'static str = " - OK";
// The line that contains the filename looks like:
// DBGHELP: C:\symbols\calc.exe\8f598a9eb000\calc.exe - OK
if!line.starts_with(PREFIX) { continue; }
if!line.ends_with(POSTFIX) { continue; }
// We only expect one line of output to match the above criteria
// If there are multiple we'll need to improve this "parser"
assert!(filename.is_none(), "Multiple filenames in symchk output");
// Save the filename we found
filename = Some(&line[PREFIX.len()..line.len() - POSTFIX.len()]);
}
// Fail hard if we didn't get the output filename from symchk
let filename = filename.expect("Did not get expected symchk output");
// Run symchk to download the pdb for the file
let res = Command::new("symchk")
.arg(filename)
.output()?;
if!res.status.success() {
return Err(Error::new(ErrorKind::Other, "symchk returned with error"));
}
// Now we have downloaded the PDB for this file :)
Ok(filename.into())
}
#[test]
fn test_symchk() {
download_symbol("calc.exe", 0x8F598A9E, 0xB000)
.expect("Failed to download symbol");
}
| SrcCodeInfoW | identifier_name |
symdumper.rs | use std::ffi::OsStr;
use std::os::windows::ffi::OsStrExt;
use std::iter::once;
use std::process::Command;
use std::io::{Error, ErrorKind};
use crate::win32::ModuleInfo;
type HANDLE = usize;
extern {
fn GetCurrentProcess() -> HANDLE;
}
#[allow(non_snake_case)]
#[repr(C)]
struct SrcCodeInfoW {
SizeOfStruct: u32,
Key: usize,
ModBase: u64,
Obj: [u16; 261],
FileName: [u16; 261],
LineNumber: u32,
Address: u64,
}
impl Default for SrcCodeInfoW {
fn default() -> Self {
SrcCodeInfoW {
SizeOfStruct: std::mem::size_of::<SrcCodeInfoW>() as u32,
Key: 0,
ModBase: 0,
Obj: [0; 261],
FileName: [0; 261],
LineNumber: 0,
Address: 0,
}
}
}
#[allow(non_snake_case)]
#[repr(C)]
struct SymbolInfoW {
SizeOfStruct: u32,
TypeIndex: u32,
Reserved: [u64; 2],
Index: u32,
Size: u32,
ModBase: u64,
Flags: u32,
Value: u64,
Address: u64,
Register: u32,
Scope: u32,
Tag: u32,
NameLen: u32,
MaxNameLen: u32,
// technically this field is dynamically sized as specified by MaxNameLen
Name: [u16; 8192],
}
impl Default for SymbolInfoW {
fn default() -> Self {
SymbolInfoW {
// Subtract off the size of the dynamic component, one byte
// already included in the structure
SizeOfStruct: std::mem::size_of::<SymbolInfoW>() as u32 - 8192*2,
TypeIndex: 0,
Reserved: [0; 2],
Index: 0,
Size: 0,
ModBase: 0,
Flags: 0,
Value: 0,
Address: 0,
Register: 0,
Scope: 0,
Tag: 0,
NameLen: 0,
MaxNameLen: 8192,
Name: [0; 8192],
}
}
}
#[allow(non_snake_case)]
#[repr(C)]
struct ImagehlpLineW64 {
SizeOfStruct: u32,
Key: usize,
LineNumber: u32,
FileName: *const u16,
Address: u64,
}
impl Default for ImagehlpLineW64 {
fn default() -> Self {
ImagehlpLineW64 {
SizeOfStruct: std::mem::size_of::<ImagehlpLineW64>() as u32,
Key: 0,
LineNumber: 0,
FileName: std::ptr::null(),
Address: 0,
}
}
}
#[allow(non_snake_case)]
#[repr(C)]
struct ImagehlpModule64W {
SizeOfStruct: u32,
BaseOfImage: u64,
ImageSize: u32,
TimeDateStamp: u32,
CheckSum: u32,
NumSyms: u32,
SymType: u32,
ModuleName: [u16; 32],
ImageName: [u16; 256],
LoadedImageName: [u16; 256],
LoadedPdbName: [u16; 256],
CVSig: u32,
CVData: [u16; 780],
PdbSig: u32,
PdbSig70: [u8; 16],
PdbAge: u32,
PdbUnmatched: bool,
DbgUnmatched: bool,
LineNumbers: bool,
GlobalSymbols: bool,
TypeInfo: bool,
SourceIndexed: bool,
Publics: bool,
}
impl Default for ImagehlpModule64W {
fn default() -> Self {
ImagehlpModule64W {
SizeOfStruct: std::mem::size_of::<ImagehlpModule64W>() as u32,
BaseOfImage: 0,
ImageSize: 0,
TimeDateStamp: 0,
CheckSum: 0,
NumSyms: 0,
SymType: 0,
ModuleName: [0; 32],
ImageName: [0; 256],
LoadedImageName: [0; 256],
LoadedPdbName: [0; 256],
CVSig: 0,
CVData: [0; 780],
PdbSig: 0,
PdbSig70: [0; 16],
PdbAge: 0,
PdbUnmatched: false,
DbgUnmatched: false,
LineNumbers: false,
GlobalSymbols: false,
TypeInfo: false,
SourceIndexed: false,
Publics: false,
}
}
}
/// Vector of (virtual address, symbol name, symbol size)
type Context = *mut SymbolContext;
extern fn srcline_callback(srcline_info: *const SrcCodeInfoW, context: usize) -> bool {
let srcline = unsafe { &*srcline_info };
let context = unsafe { &mut *(context as Context) };
let mut filename = Vec::with_capacity(srcline.FileName.len());
for &val in srcline.FileName.iter() {
if val == 0 { break; }
filename.push(val);
}
let source_filename = String::from_utf16(&filename)
.expect("Failed to decode UTF-16 file name");
context.sourceline.push((srcline.Address - srcline.ModBase, source_filename, srcline.LineNumber as u64));
true
}
extern fn sym_callback(sym_info: *const SymbolInfoW, size: u32, context: usize) -> bool {
let symbol = unsafe { &*sym_info };
let context = unsafe { &mut *(context as Context) };
// Technically NameLen isn't supposed to contain the null terminator... but it does.
// Yay!
if symbol.NameLen < 1 {
return true;
}
let symbol_name = String::from_utf16(&symbol.Name[..symbol.NameLen as usize - 1])
.expect("Failed to decode UTF-16 symbol name");
context.symbols.push((symbol.Address - symbol.ModBase, symbol_name, size as u64));
true
}
#[link(name = "dbghelp")]
extern {
fn SymInitializeW(hProcess: HANDLE, UserSearchPath: *const u16,
fInvadeProcess: bool) -> bool;
fn SymLoadModuleExW(hProcess: HANDLE, hFile: HANDLE, ImageName: *const u16,
ModuleName: *const u16, BaseOfDll: u64, DllSize: u32,
Data: usize, Flags: u32) -> u64;
fn SymGetModuleInfoW64(hProcess: HANDLE, dwAddr: u64,
ModuleInfo: *mut ImagehlpModule64W) -> bool;
fn SymEnumSymbolsW(hProcess: HANDLE, BaseOfDll: u64, Mask: usize,
callback: extern fn(sym_info: *const SymbolInfoW, size: u32, context: usize) -> bool,
context: usize) -> bool;
fn SymEnumSourceLinesW(hProcess: HANDLE, Base: u64, Obj: usize, File: usize, Line: u32,
Flags: u32, callback: extern fn(LineInfo: *const SrcCodeInfoW, UserContext: usize) -> bool,
UserContext: usize) -> bool;
fn SymUnloadModule64(hProcess: HANDLE, BaseOfDll: u64) -> bool;
fn SymCleanup(hProcess: HANDLE) -> bool;
}
pub fn win16_for_str(s: &str) -> Vec<u16> {
OsStr::new(s).encode_wide().chain(once(0)).collect()
}
#[repr(C)]
#[derive(Clone, Default)]
pub struct SymbolContext {
pub symbols: Vec<(u64, String, u64)>,
pub sourceline: Vec<(u64, String, u64)>,
}
/// Get all of the symbols from a PE file `pe_file`
pub fn get_symbols_from_file(pe_file: &str) -> SymbolContext {
let mut symdb = SymbolContext {
symbols: Vec::new(),
sourceline: Vec::new(),
};
let module_base;
unsafe {
let cur_process = GetCurrentProcess();
// Initialize the symbol library for this process
assert!(SymInitializeW(cur_process, 0 as *const _, false),
"Failed to SymInitializeW()");
// Load up a module into the current process as the base address
// the file specified
let filename = win16_for_str(pe_file);
module_base = SymLoadModuleExW(cur_process, 0, filename.as_ptr(), std::ptr::null(), 0, 0, 0, 0);
assert!(module_base!= 0, "Failed to SymLoadModuleExW()");
// Get information about the module we just loaded
let mut module_info = ImagehlpModule64W::default();
assert!(SymGetModuleInfoW64(cur_process, module_base,
&mut module_info as *mut _),
"Failed to SymGetModuleInfoW64()");
// This is pedantic but we might as well check it
assert!(module_info.BaseOfImage == module_base);
assert!(SymEnumSymbolsW(cur_process, module_base, 0, sym_callback, &mut symdb as *mut _ as usize));
if!SymEnumSourceLinesW(cur_process, module_base, 0, 0, 0, 0, srcline_callback, &mut symdb as *mut _ as usize) {
// Eh just silently fail here, most people won't have private
// symbols so this would just spam
//print!("Warning: Could not enumerate sourcelines\n");
}
assert!(SymUnloadModule64(cur_process, module_base),
"Failed to SymUnloadModule64()");
assert!(SymCleanup(cur_process), "Failed to SymCleanup()");
}
symdb.symbols.sort_by_key(|x| x.0);
symdb.sourceline.sort_by_key(|x| x.0);
symdb
}
/// Get all of the symbols from a module `module_name` with a TimeDateStamp
/// and SizeOfImage from the PE header. This will automatically download the
/// module and PDB from the symbol store using symchk
pub fn get_symbols_from_module(module: &ModuleInfo)
-> std::io::Result<SymbolContext>
{
// Use symchk to download the module and symbols
let module = download_symbol(module.name(), module.time(), module.size())?;
Ok(get_symbols_from_file(&module))
}
/// Download a module and the corresponding PDB based on module_name,
/// it's TimeDateStamp and SizeOfImage from it's PE header
///
/// Returns a string containing a filename of the downloaded module
fn download_symbol(module_name: &str, timedatestamp: u32, sizeofimage: u32)
-> std::io::Result<String> {
let mut dir = std::env::temp_dir();
dir.push("applepie_manifest");
// Create manifest file for symchk
std::fs::write(&dir, format!("{},{:x}{:x},1\r\n",
module_name, timedatestamp, sizeofimage))?;
// Run symchk to download this module
let res = Command::new("symchk")
.arg("/v")
.arg("/im")
.arg(dir)
.output()?;
if!res.status.success() |
// Symchk apparently ran, check output
let stderr = std::str::from_utf8(&res.stderr)
.expect("Failed to convert symchk output to utf-8");
let mut filename = None;
for line in stderr.lines() {
const PREFIX: &'static str = "DBGHELP: ";
const POSTFIX: &'static str = " - OK";
// The line that contains the filename looks like:
// DBGHELP: C:\symbols\calc.exe\8f598a9eb000\calc.exe - OK
if!line.starts_with(PREFIX) { continue; }
if!line.ends_with(POSTFIX) { continue; }
// We only expect one line of output to match the above criteria
// If there are multiple we'll need to improve this "parser"
assert!(filename.is_none(), "Multiple filenames in symchk output");
// Save the filename we found
filename = Some(&line[PREFIX.len()..line.len() - POSTFIX.len()]);
}
// Fail hard if we didn't get the output filename from symchk
let filename = filename.expect("Did not get expected symchk output");
// Run symchk to download the pdb for the file
let res = Command::new("symchk")
.arg(filename)
.output()?;
if!res.status.success() {
return Err(Error::new(ErrorKind::Other, "symchk returned with error"));
}
// Now we have downloaded the PDB for this file :)
Ok(filename.into())
}
#[test]
fn test_symchk() {
download_symbol("calc.exe", 0x8F598A9E, 0xB000)
.expect("Failed to download symbol");
}
| {
return Err(Error::new(ErrorKind::Other, "symchk returned with error"));
} | conditional_block |
cursor_renderer.rs | use std::time::{Duration, Instant};
use skulpin::skia_safe::{Canvas, Paint, Path, Point};
use crate::renderer::CachingShaper;
use crate::editor::{EDITOR, Colors, Cursor, CursorShape};
use crate::redraw_scheduler::REDRAW_SCHEDULER;
const AVERAGE_MOTION_PERCENTAGE: f32 = 0.7;
const MOTION_PERCENTAGE_SPREAD: f32 = 0.5;
const COMMAND_LINE_DELAY_FRAMES: u64 = 5;
const DEFAULT_CELL_PERCENTAGE: f32 = 1.0 / 8.0;
const STANDARD_CORNERS: &[(f32, f32); 4] = &[(-0.5, -0.5), (0.5, -0.5), (0.5, 0.5), (-0.5, 0.5)];
enum BlinkState {
Waiting,
On,
Off
}
struct BlinkStatus {
state: BlinkState,
last_transition: Instant,
previous_cursor: Option<Cursor>
}
impl BlinkStatus {
pub fn new() -> BlinkStatus {
BlinkStatus {
state: BlinkState::Waiting,
last_transition: Instant::now(),
previous_cursor: None
}
}
pub fn update_status(&mut self, new_cursor: &Cursor) -> bool {
if self.previous_cursor.is_none() || new_cursor!= self.previous_cursor.as_ref().unwrap() {
self.previous_cursor = Some(new_cursor.clone());
self.last_transition = Instant::now();
if new_cursor.blinkwait.is_some() && new_cursor.blinkwait!= Some(0) {
self.state = BlinkState::Waiting;
} else {
self.state = BlinkState::On;
}
}
if new_cursor.blinkwait == Some(0) ||
new_cursor.blinkoff == Some(0) ||
new_cursor.blinkon == Some(0) {
return true;
}
let delay = match self.state {
BlinkState::Waiting => new_cursor.blinkwait,
BlinkState::Off => new_cursor.blinkoff,
BlinkState::On => new_cursor.blinkon
}.filter(|millis| millis > &0).map(|millis| Duration::from_millis(millis));
if delay.map(|delay| self.last_transition + delay < Instant::now()).unwrap_or(false) {
self.state = match self.state {
BlinkState::Waiting => BlinkState::On,
BlinkState::On => BlinkState::Off,
BlinkState::Off => BlinkState::On
};
self.last_transition = Instant::now();
}
let scheduled_frame = (match self.state {
BlinkState::Waiting => new_cursor.blinkwait,
BlinkState::Off => new_cursor.blinkoff,
BlinkState::On => new_cursor.blinkon
}).map(|delay| self.last_transition + Duration::from_millis(delay));
| match self.state {
BlinkState::Waiting | BlinkState::Off => false,
BlinkState::On => true
}
}
}
#[derive(Debug, Clone)]
pub struct Corner {
pub current_position: Point,
pub relative_position: Point,
}
impl Corner {
pub fn new(relative_position: Point) -> Corner {
Corner {
current_position: Point::new(0.0, 0.0),
relative_position
}
}
pub fn update(&mut self, font_dimensions: Point, destination: Point) -> bool {
let relative_scaled_position: Point =
(self.relative_position.x * font_dimensions.x, self.relative_position.y * font_dimensions.y).into();
let corner_destination = destination + relative_scaled_position;
let delta = corner_destination - self.current_position;
if delta.length() > 0.0 {
// Project relative_scaled_position (actual possition of the corner relative to the
// center of the cursor) onto the remaining distance vector. This gives us the relative
// distance to the destination along the delta vector which we can then use to scale the
// motion_percentage.
let motion_scale = delta.dot(relative_scaled_position) / delta.length() / font_dimensions.length();
// The motion_percentage is then equal to the motion_scale factor times the
// MOTION_PERCENTAGE_SPREAD and added to the AVERAGE_MOTION_PERCENTAGE. This way all of
// the percentages are positive and spread out by the spread constant.
let motion_percentage = motion_scale * MOTION_PERCENTAGE_SPREAD + AVERAGE_MOTION_PERCENTAGE;
// Then the current_position is animated by taking the delta vector, multiplying it by
// the motion_percentage and adding the resulting value to the current position causing
// the cursor to "jump" toward the target destination. Since further away corners jump
// slower, the cursor appears to smear toward the destination in a satisfying and
// visually trackable way.
let delta = corner_destination - self.current_position;
self.current_position += delta * motion_percentage;
}
delta.length() > 0.001
}
}
pub struct CursorRenderer {
pub corners: Vec<Corner>,
pub previous_position: (u64, u64),
pub command_line_delay: u64,
blink_status: BlinkStatus
}
impl CursorRenderer {
pub fn new() -> CursorRenderer {
let mut renderer = CursorRenderer {
corners: vec![Corner::new((0.0, 0.0).into()); 4],
previous_position: (0, 0),
command_line_delay: 0,
blink_status: BlinkStatus::new()
};
renderer.set_cursor_shape(&CursorShape::Block, DEFAULT_CELL_PERCENTAGE);
renderer
}
fn set_cursor_shape(&mut self, cursor_shape: &CursorShape, cell_percentage: f32) {
self.corners = self.corners
.clone()
.into_iter().enumerate()
.map(|(i, corner)| {
let (x, y) = STANDARD_CORNERS[i];
Corner {
relative_position: match cursor_shape {
CursorShape::Block => (x, y).into(),
// Transform the x position so that the right side is translated over to
// the BAR_WIDTH position
CursorShape::Vertical => ((x + 0.5) * cell_percentage - 0.5, y).into(),
// Do the same as above, but flip the y coordinate and then flip the result
// so that the horizontal bar is at the bottom of the character space
// instead of the top.
CursorShape::Horizontal => (x, -((-y + 0.5) * cell_percentage - 0.5)).into()
},
.. corner
}
})
.collect::<Vec<Corner>>();
}
pub fn draw(&mut self,
cursor: Cursor, default_colors: &Colors,
font_width: f32, font_height: f32,
paint: &mut Paint, shaper: &mut CachingShaper,
canvas: &mut Canvas) {
let render = self.blink_status.update_status(&cursor);
self.previous_position = {
let editor = EDITOR.lock().unwrap();
let (_, grid_y) = cursor.position;
let (_, previous_y) = self.previous_position;
let (_, height) = editor.size;
if grid_y == height - 1 && previous_y!= grid_y {
self.command_line_delay = self.command_line_delay + 1;
if self.command_line_delay < COMMAND_LINE_DELAY_FRAMES {
self.previous_position
} else {
self.command_line_delay = 0;
cursor.position
}
} else {
self.command_line_delay = 0;
cursor.position
}
};
let (grid_x, grid_y) = self.previous_position;
let (character, font_dimensions): (String, Point) = {
let editor = EDITOR.lock().unwrap();
let character = editor.grid
.get(grid_y as usize)
.and_then(|row| row.get(grid_x as usize).cloned())
.flatten()
.map(|(character, _)| character)
.unwrap_or(' '.to_string());
let is_double = editor.grid
.get(grid_y as usize)
.and_then(|row| row.get(grid_x as usize + 1).cloned())
.flatten()
.map(|(character, _)| character.is_empty())
.unwrap_or(false);
let font_width = match (is_double, &cursor.shape) {
(true, CursorShape::Block) => font_width * 2.0,
_ => font_width
};
(character, (font_width, font_height).into())
};
let destination: Point = (grid_x as f32 * font_width, grid_y as f32 * font_height).into();
let center_destination = destination + font_dimensions * 0.5;
self.set_cursor_shape(&cursor.shape, cursor.cell_percentage.unwrap_or(DEFAULT_CELL_PERCENTAGE));
let mut animating = false;
if!center_destination.is_zero() {
for corner in self.corners.iter_mut() {
let corner_animating = corner.update(font_dimensions, center_destination);
animating = animating || corner_animating;
}
}
if animating {
REDRAW_SCHEDULER.queue_next_frame();
}
if cursor.enabled && render {
// Draw Background
paint.set_color(cursor.background(&default_colors).to_color());
// The cursor is made up of four points, so I create a path with each of the four
// corners.
let mut path = Path::new();
path.move_to(self.corners[0].current_position);
path.line_to(self.corners[1].current_position);
path.line_to(self.corners[2].current_position);
path.line_to(self.corners[3].current_position);
path.close();
canvas.draw_path(&path, &paint);
// Draw foreground
paint.set_color(cursor.foreground(&default_colors).to_color());
canvas.save();
canvas.clip_path(&path, None, Some(false));
let blobs = &shaper.shape_cached(&character.to_string(), false, false);
for blob in blobs.iter() {
canvas.draw_text_blob(&blob, destination, &paint);
}
canvas.restore();
}
}
} |
if let Some(scheduled_frame) = scheduled_frame {
REDRAW_SCHEDULER.schedule(scheduled_frame);
}
| random_line_split |
cursor_renderer.rs | use std::time::{Duration, Instant};
use skulpin::skia_safe::{Canvas, Paint, Path, Point};
use crate::renderer::CachingShaper;
use crate::editor::{EDITOR, Colors, Cursor, CursorShape};
use crate::redraw_scheduler::REDRAW_SCHEDULER;
const AVERAGE_MOTION_PERCENTAGE: f32 = 0.7;
const MOTION_PERCENTAGE_SPREAD: f32 = 0.5;
const COMMAND_LINE_DELAY_FRAMES: u64 = 5;
const DEFAULT_CELL_PERCENTAGE: f32 = 1.0 / 8.0;
const STANDARD_CORNERS: &[(f32, f32); 4] = &[(-0.5, -0.5), (0.5, -0.5), (0.5, 0.5), (-0.5, 0.5)];
enum BlinkState {
Waiting,
On,
Off
}
struct BlinkStatus {
state: BlinkState,
last_transition: Instant,
previous_cursor: Option<Cursor>
}
impl BlinkStatus {
pub fn new() -> BlinkStatus {
BlinkStatus {
state: BlinkState::Waiting,
last_transition: Instant::now(),
previous_cursor: None
}
}
pub fn update_status(&mut self, new_cursor: &Cursor) -> bool {
if self.previous_cursor.is_none() || new_cursor!= self.previous_cursor.as_ref().unwrap() {
self.previous_cursor = Some(new_cursor.clone());
self.last_transition = Instant::now();
if new_cursor.blinkwait.is_some() && new_cursor.blinkwait!= Some(0) {
self.state = BlinkState::Waiting;
} else {
self.state = BlinkState::On;
}
}
if new_cursor.blinkwait == Some(0) ||
new_cursor.blinkoff == Some(0) ||
new_cursor.blinkon == Some(0) {
return true;
}
let delay = match self.state {
BlinkState::Waiting => new_cursor.blinkwait,
BlinkState::Off => new_cursor.blinkoff,
BlinkState::On => new_cursor.blinkon
}.filter(|millis| millis > &0).map(|millis| Duration::from_millis(millis));
if delay.map(|delay| self.last_transition + delay < Instant::now()).unwrap_or(false) {
self.state = match self.state {
BlinkState::Waiting => BlinkState::On,
BlinkState::On => BlinkState::Off,
BlinkState::Off => BlinkState::On
};
self.last_transition = Instant::now();
}
let scheduled_frame = (match self.state {
BlinkState::Waiting => new_cursor.blinkwait,
BlinkState::Off => new_cursor.blinkoff,
BlinkState::On => new_cursor.blinkon
}).map(|delay| self.last_transition + Duration::from_millis(delay));
if let Some(scheduled_frame) = scheduled_frame {
REDRAW_SCHEDULER.schedule(scheduled_frame);
}
match self.state {
BlinkState::Waiting | BlinkState::Off => false,
BlinkState::On => true
}
}
}
#[derive(Debug, Clone)]
pub struct Corner {
pub current_position: Point,
pub relative_position: Point,
}
impl Corner {
pub fn new(relative_position: Point) -> Corner {
Corner {
current_position: Point::new(0.0, 0.0),
relative_position
}
}
pub fn update(&mut self, font_dimensions: Point, destination: Point) -> bool {
let relative_scaled_position: Point =
(self.relative_position.x * font_dimensions.x, self.relative_position.y * font_dimensions.y).into();
let corner_destination = destination + relative_scaled_position;
let delta = corner_destination - self.current_position;
if delta.length() > 0.0 {
// Project relative_scaled_position (actual possition of the corner relative to the
// center of the cursor) onto the remaining distance vector. This gives us the relative
// distance to the destination along the delta vector which we can then use to scale the
// motion_percentage.
let motion_scale = delta.dot(relative_scaled_position) / delta.length() / font_dimensions.length();
// The motion_percentage is then equal to the motion_scale factor times the
// MOTION_PERCENTAGE_SPREAD and added to the AVERAGE_MOTION_PERCENTAGE. This way all of
// the percentages are positive and spread out by the spread constant.
let motion_percentage = motion_scale * MOTION_PERCENTAGE_SPREAD + AVERAGE_MOTION_PERCENTAGE;
// Then the current_position is animated by taking the delta vector, multiplying it by
// the motion_percentage and adding the resulting value to the current position causing
// the cursor to "jump" toward the target destination. Since further away corners jump
// slower, the cursor appears to smear toward the destination in a satisfying and
// visually trackable way.
let delta = corner_destination - self.current_position;
self.current_position += delta * motion_percentage;
}
delta.length() > 0.001
}
}
pub struct CursorRenderer {
pub corners: Vec<Corner>,
pub previous_position: (u64, u64),
pub command_line_delay: u64,
blink_status: BlinkStatus
}
impl CursorRenderer {
pub fn new() -> CursorRenderer {
let mut renderer = CursorRenderer {
corners: vec![Corner::new((0.0, 0.0).into()); 4],
previous_position: (0, 0),
command_line_delay: 0,
blink_status: BlinkStatus::new()
};
renderer.set_cursor_shape(&CursorShape::Block, DEFAULT_CELL_PERCENTAGE);
renderer
}
fn set_cursor_shape(&mut self, cursor_shape: &CursorShape, cell_percentage: f32) {
self.corners = self.corners
.clone()
.into_iter().enumerate()
.map(|(i, corner)| {
let (x, y) = STANDARD_CORNERS[i];
Corner {
relative_position: match cursor_shape {
CursorShape::Block => (x, y).into(),
// Transform the x position so that the right side is translated over to
// the BAR_WIDTH position
CursorShape::Vertical => ((x + 0.5) * cell_percentage - 0.5, y).into(),
// Do the same as above, but flip the y coordinate and then flip the result
// so that the horizontal bar is at the bottom of the character space
// instead of the top.
CursorShape::Horizontal => (x, -((-y + 0.5) * cell_percentage - 0.5)).into()
},
.. corner
}
})
.collect::<Vec<Corner>>();
}
pub fn | (&mut self,
cursor: Cursor, default_colors: &Colors,
font_width: f32, font_height: f32,
paint: &mut Paint, shaper: &mut CachingShaper,
canvas: &mut Canvas) {
let render = self.blink_status.update_status(&cursor);
self.previous_position = {
let editor = EDITOR.lock().unwrap();
let (_, grid_y) = cursor.position;
let (_, previous_y) = self.previous_position;
let (_, height) = editor.size;
if grid_y == height - 1 && previous_y!= grid_y {
self.command_line_delay = self.command_line_delay + 1;
if self.command_line_delay < COMMAND_LINE_DELAY_FRAMES {
self.previous_position
} else {
self.command_line_delay = 0;
cursor.position
}
} else {
self.command_line_delay = 0;
cursor.position
}
};
let (grid_x, grid_y) = self.previous_position;
let (character, font_dimensions): (String, Point) = {
let editor = EDITOR.lock().unwrap();
let character = editor.grid
.get(grid_y as usize)
.and_then(|row| row.get(grid_x as usize).cloned())
.flatten()
.map(|(character, _)| character)
.unwrap_or(' '.to_string());
let is_double = editor.grid
.get(grid_y as usize)
.and_then(|row| row.get(grid_x as usize + 1).cloned())
.flatten()
.map(|(character, _)| character.is_empty())
.unwrap_or(false);
let font_width = match (is_double, &cursor.shape) {
(true, CursorShape::Block) => font_width * 2.0,
_ => font_width
};
(character, (font_width, font_height).into())
};
let destination: Point = (grid_x as f32 * font_width, grid_y as f32 * font_height).into();
let center_destination = destination + font_dimensions * 0.5;
self.set_cursor_shape(&cursor.shape, cursor.cell_percentage.unwrap_or(DEFAULT_CELL_PERCENTAGE));
let mut animating = false;
if!center_destination.is_zero() {
for corner in self.corners.iter_mut() {
let corner_animating = corner.update(font_dimensions, center_destination);
animating = animating || corner_animating;
}
}
if animating {
REDRAW_SCHEDULER.queue_next_frame();
}
if cursor.enabled && render {
// Draw Background
paint.set_color(cursor.background(&default_colors).to_color());
// The cursor is made up of four points, so I create a path with each of the four
// corners.
let mut path = Path::new();
path.move_to(self.corners[0].current_position);
path.line_to(self.corners[1].current_position);
path.line_to(self.corners[2].current_position);
path.line_to(self.corners[3].current_position);
path.close();
canvas.draw_path(&path, &paint);
// Draw foreground
paint.set_color(cursor.foreground(&default_colors).to_color());
canvas.save();
canvas.clip_path(&path, None, Some(false));
let blobs = &shaper.shape_cached(&character.to_string(), false, false);
for blob in blobs.iter() {
canvas.draw_text_blob(&blob, destination, &paint);
}
canvas.restore();
}
}
}
| draw | identifier_name |
cursor_renderer.rs | use std::time::{Duration, Instant};
use skulpin::skia_safe::{Canvas, Paint, Path, Point};
use crate::renderer::CachingShaper;
use crate::editor::{EDITOR, Colors, Cursor, CursorShape};
use crate::redraw_scheduler::REDRAW_SCHEDULER;
const AVERAGE_MOTION_PERCENTAGE: f32 = 0.7;
const MOTION_PERCENTAGE_SPREAD: f32 = 0.5;
const COMMAND_LINE_DELAY_FRAMES: u64 = 5;
const DEFAULT_CELL_PERCENTAGE: f32 = 1.0 / 8.0;
const STANDARD_CORNERS: &[(f32, f32); 4] = &[(-0.5, -0.5), (0.5, -0.5), (0.5, 0.5), (-0.5, 0.5)];
enum BlinkState {
Waiting,
On,
Off
}
struct BlinkStatus {
state: BlinkState,
last_transition: Instant,
previous_cursor: Option<Cursor>
}
impl BlinkStatus {
pub fn new() -> BlinkStatus {
BlinkStatus {
state: BlinkState::Waiting,
last_transition: Instant::now(),
previous_cursor: None
}
}
pub fn update_status(&mut self, new_cursor: &Cursor) -> bool | BlinkState::On => new_cursor.blinkon
}.filter(|millis| millis > &0).map(|millis| Duration::from_millis(millis));
if delay.map(|delay| self.last_transition + delay < Instant::now()).unwrap_or(false) {
self.state = match self.state {
BlinkState::Waiting => BlinkState::On,
BlinkState::On => BlinkState::Off,
BlinkState::Off => BlinkState::On
};
self.last_transition = Instant::now();
}
let scheduled_frame = (match self.state {
BlinkState::Waiting => new_cursor.blinkwait,
BlinkState::Off => new_cursor.blinkoff,
BlinkState::On => new_cursor.blinkon
}).map(|delay| self.last_transition + Duration::from_millis(delay));
if let Some(scheduled_frame) = scheduled_frame {
REDRAW_SCHEDULER.schedule(scheduled_frame);
}
match self.state {
BlinkState::Waiting | BlinkState::Off => false,
BlinkState::On => true
}
}
}
#[derive(Debug, Clone)]
pub struct Corner {
pub current_position: Point,
pub relative_position: Point,
}
impl Corner {
pub fn new(relative_position: Point) -> Corner {
Corner {
current_position: Point::new(0.0, 0.0),
relative_position
}
}
pub fn update(&mut self, font_dimensions: Point, destination: Point) -> bool {
let relative_scaled_position: Point =
(self.relative_position.x * font_dimensions.x, self.relative_position.y * font_dimensions.y).into();
let corner_destination = destination + relative_scaled_position;
let delta = corner_destination - self.current_position;
if delta.length() > 0.0 {
// Project relative_scaled_position (actual possition of the corner relative to the
// center of the cursor) onto the remaining distance vector. This gives us the relative
// distance to the destination along the delta vector which we can then use to scale the
// motion_percentage.
let motion_scale = delta.dot(relative_scaled_position) / delta.length() / font_dimensions.length();
// The motion_percentage is then equal to the motion_scale factor times the
// MOTION_PERCENTAGE_SPREAD and added to the AVERAGE_MOTION_PERCENTAGE. This way all of
// the percentages are positive and spread out by the spread constant.
let motion_percentage = motion_scale * MOTION_PERCENTAGE_SPREAD + AVERAGE_MOTION_PERCENTAGE;
// Then the current_position is animated by taking the delta vector, multiplying it by
// the motion_percentage and adding the resulting value to the current position causing
// the cursor to "jump" toward the target destination. Since further away corners jump
// slower, the cursor appears to smear toward the destination in a satisfying and
// visually trackable way.
let delta = corner_destination - self.current_position;
self.current_position += delta * motion_percentage;
}
delta.length() > 0.001
}
}
pub struct CursorRenderer {
pub corners: Vec<Corner>,
pub previous_position: (u64, u64),
pub command_line_delay: u64,
blink_status: BlinkStatus
}
impl CursorRenderer {
pub fn new() -> CursorRenderer {
let mut renderer = CursorRenderer {
corners: vec![Corner::new((0.0, 0.0).into()); 4],
previous_position: (0, 0),
command_line_delay: 0,
blink_status: BlinkStatus::new()
};
renderer.set_cursor_shape(&CursorShape::Block, DEFAULT_CELL_PERCENTAGE);
renderer
}
fn set_cursor_shape(&mut self, cursor_shape: &CursorShape, cell_percentage: f32) {
self.corners = self.corners
.clone()
.into_iter().enumerate()
.map(|(i, corner)| {
let (x, y) = STANDARD_CORNERS[i];
Corner {
relative_position: match cursor_shape {
CursorShape::Block => (x, y).into(),
// Transform the x position so that the right side is translated over to
// the BAR_WIDTH position
CursorShape::Vertical => ((x + 0.5) * cell_percentage - 0.5, y).into(),
// Do the same as above, but flip the y coordinate and then flip the result
// so that the horizontal bar is at the bottom of the character space
// instead of the top.
CursorShape::Horizontal => (x, -((-y + 0.5) * cell_percentage - 0.5)).into()
},
.. corner
}
})
.collect::<Vec<Corner>>();
}
pub fn draw(&mut self,
cursor: Cursor, default_colors: &Colors,
font_width: f32, font_height: f32,
paint: &mut Paint, shaper: &mut CachingShaper,
canvas: &mut Canvas) {
let render = self.blink_status.update_status(&cursor);
self.previous_position = {
let editor = EDITOR.lock().unwrap();
let (_, grid_y) = cursor.position;
let (_, previous_y) = self.previous_position;
let (_, height) = editor.size;
if grid_y == height - 1 && previous_y!= grid_y {
self.command_line_delay = self.command_line_delay + 1;
if self.command_line_delay < COMMAND_LINE_DELAY_FRAMES {
self.previous_position
} else {
self.command_line_delay = 0;
cursor.position
}
} else {
self.command_line_delay = 0;
cursor.position
}
};
let (grid_x, grid_y) = self.previous_position;
let (character, font_dimensions): (String, Point) = {
let editor = EDITOR.lock().unwrap();
let character = editor.grid
.get(grid_y as usize)
.and_then(|row| row.get(grid_x as usize).cloned())
.flatten()
.map(|(character, _)| character)
.unwrap_or(' '.to_string());
let is_double = editor.grid
.get(grid_y as usize)
.and_then(|row| row.get(grid_x as usize + 1).cloned())
.flatten()
.map(|(character, _)| character.is_empty())
.unwrap_or(false);
let font_width = match (is_double, &cursor.shape) {
(true, CursorShape::Block) => font_width * 2.0,
_ => font_width
};
(character, (font_width, font_height).into())
};
let destination: Point = (grid_x as f32 * font_width, grid_y as f32 * font_height).into();
let center_destination = destination + font_dimensions * 0.5;
self.set_cursor_shape(&cursor.shape, cursor.cell_percentage.unwrap_or(DEFAULT_CELL_PERCENTAGE));
let mut animating = false;
if!center_destination.is_zero() {
for corner in self.corners.iter_mut() {
let corner_animating = corner.update(font_dimensions, center_destination);
animating = animating || corner_animating;
}
}
if animating {
REDRAW_SCHEDULER.queue_next_frame();
}
if cursor.enabled && render {
// Draw Background
paint.set_color(cursor.background(&default_colors).to_color());
// The cursor is made up of four points, so I create a path with each of the four
// corners.
let mut path = Path::new();
path.move_to(self.corners[0].current_position);
path.line_to(self.corners[1].current_position);
path.line_to(self.corners[2].current_position);
path.line_to(self.corners[3].current_position);
path.close();
canvas.draw_path(&path, &paint);
// Draw foreground
paint.set_color(cursor.foreground(&default_colors).to_color());
canvas.save();
canvas.clip_path(&path, None, Some(false));
let blobs = &shaper.shape_cached(&character.to_string(), false, false);
for blob in blobs.iter() {
canvas.draw_text_blob(&blob, destination, &paint);
}
canvas.restore();
}
}
}
| {
if self.previous_cursor.is_none() || new_cursor != self.previous_cursor.as_ref().unwrap() {
self.previous_cursor = Some(new_cursor.clone());
self.last_transition = Instant::now();
if new_cursor.blinkwait.is_some() && new_cursor.blinkwait != Some(0) {
self.state = BlinkState::Waiting;
} else {
self.state = BlinkState::On;
}
}
if new_cursor.blinkwait == Some(0) ||
new_cursor.blinkoff == Some(0) ||
new_cursor.blinkon == Some(0) {
return true;
}
let delay = match self.state {
BlinkState::Waiting => new_cursor.blinkwait,
BlinkState::Off => new_cursor.blinkoff,
| identifier_body |
reader.rs | use std::sync::mpsc::{Receiver, Sender, SyncSender, channel};
use std::error::Error;
use item::Item;
use std::sync::{Arc, RwLock};
use std::process::{Command, Stdio, Child};
use std::io::{BufRead, BufReader};
use event::{EventSender, EventReceiver, Event, EventArg};
use std::thread::JoinHandle;
use std::thread;
use std::time::Duration;
use std::collections::HashMap;
use std::mem;
use std::fs::File;
use regex::Regex;
use sender::CachedSender;
use field::{FieldRange, parse_range};
use clap::ArgMatches;
struct ReaderOption {
pub use_ansi_color: bool,
pub default_arg: String,
pub transform_fields: Vec<FieldRange>,
pub matching_fields: Vec<FieldRange>,
pub delimiter: Regex,
pub replace_str: String,
pub line_ending: u8,
}
impl ReaderOption {
pub fn new() -> Self {
ReaderOption {
use_ansi_color: false,
default_arg: String::new(),
transform_fields: Vec::new(),
matching_fields: Vec::new(),
delimiter: Regex::new(r".*?\t").unwrap(),
replace_str: "{}".to_string(),
line_ending: b'\n',
}
}
pub fn parse_options(&mut self, options: &ArgMatches) | .filter_map(|string| {
parse_range(string)
}).collect();
}
if options.is_present("read0") {
self.line_ending = b'\0';
}
}
}
pub struct Reader {
rx_cmd: EventReceiver,
tx_item: SyncSender<(Event, EventArg)>,
option: Arc<RwLock<ReaderOption>>,
real_stdin: Option<File>, // used to support piped output
}
impl Reader {
pub fn new(rx_cmd: EventReceiver,
tx_item: SyncSender<(Event, EventArg)>,
real_stdin: Option<File>) -> Self {
Reader {
rx_cmd: rx_cmd,
tx_item: tx_item,
option: Arc::new(RwLock::new(ReaderOption::new())),
real_stdin,
}
}
pub fn parse_options(&mut self, options: &ArgMatches) {
let mut option = self.option.write().unwrap();
option.parse_options(options);
}
pub fn run(&mut self) {
// event loop
let mut thread_reader: Option<JoinHandle<()>> = None;
let mut tx_reader: Option<Sender<bool>> = None;
let mut last_command = "".to_string();
let mut last_query = "".to_string();
// start sender
let (tx_sender, rx_sender) = channel();
let tx_item = self.tx_item.clone();
let mut sender = CachedSender::new(rx_sender, tx_item);
thread::spawn(move || {
sender.run();
});
while let Ok((ev, arg)) = self.rx_cmd.recv() {
match ev {
Event::EvReaderRestart => {
// close existing command or file if exists
let (cmd, query, force_update) = *arg.downcast::<(String, String, bool)>().unwrap();
if!force_update && cmd == last_command && query == last_query { continue; }
// restart command with new `command`
if cmd!= last_command {
// stop existing command
tx_reader.take().map(|tx| {tx.send(true)});
thread_reader.take().map(|thrd| {thrd.join()});
// create needed data for thread
let (tx, rx_reader) = channel();
tx_reader = Some(tx);
let cmd_clone = cmd.clone();
let option_clone = Arc::clone(&self.option);
let tx_sender_clone = tx_sender.clone();
let query_clone = query.clone();
let real_stdin = self.real_stdin.take();
// start the new command
thread_reader = Some(thread::spawn(move || {
let _ = tx_sender_clone.send((Event::EvReaderStarted, Box::new(true)));
let _ = tx_sender_clone.send((Event::EvSenderRestart, Box::new(query_clone)));
reader(&cmd_clone, rx_reader, &tx_sender_clone, option_clone, real_stdin);
let _ = tx_sender_clone.send((Event::EvReaderStopped, Box::new(true)));
}));
} else {
// tell sender to restart
let _ = tx_sender.send((Event::EvSenderRestart, Box::new(query.clone())));
}
last_command = cmd;
last_query = query;
}
Event::EvActAccept => {
// stop existing command
tx_reader.take().map(|tx| {tx.send(true)});
thread_reader.take().map(|thrd| {thrd.join()});
let tx_ack: Sender<usize> = *arg.downcast().unwrap();
let _ = tx_ack.send(0);
}
_ => {
// do nothing
}
}
}
}
}
fn get_command_output(cmd: &str) -> Result<(Option<Child>, Box<BufRead>), Box<Error>> {
let mut command = try!(Command::new("sh")
.arg("-c")
.arg(cmd)
.stdout(Stdio::piped())
.stderr(Stdio::null())
.spawn());
let stdout = try!(command.stdout.take().ok_or_else(|| "command output: unwrap failed".to_owned()));
Ok((Some(command), Box::new(BufReader::new(stdout))))
}
// Consider that you invoke a command with different arguments several times
// If you select some items each time, how will skim remeber it?
// => Well, we'll give each invokation a number, i.e. RUN_NUM
// What if you invoke the same command and same arguments twice?
// => We use NUM_MAP to specify the same run number.
lazy_static! {
static ref RUN_NUM: RwLock<usize> = RwLock::new(0);
static ref NUM_MAP: RwLock<HashMap<String, usize>> = RwLock::new(HashMap::new());
}
fn reader(cmd: &str,
rx_cmd: Receiver<bool>,
tx_sender: &EventSender,
option: Arc<RwLock<ReaderOption>>,
source_file: Option<File>) {
debug!("reader:reader: called");
let (command, mut source): (Option<Child>, Box<BufRead>) = if source_file.is_some() {
(None, Box::new(BufReader::new(source_file.unwrap())))
} else {
get_command_output(cmd).expect("command not found")
};
let (tx_control, rx_control) = channel();
thread::spawn(move || {
// listen to `rx` for command to quit reader
// kill command if it is got
loop {
if rx_cmd.try_recv().is_ok() {
// clean up resources
command.map(|mut x| {
let _ = x.kill();
let _ = x.wait();
});
break;
}
if rx_control.try_recv().is_ok() {
command.map(|mut x| {
let _ = x.kill();
let _ = x.wait();
});
break;
}
thread::sleep(Duration::from_millis(5));
}
});
let opt = option.read().unwrap();
// set the proper run number
let run_num = {*RUN_NUM.read().unwrap()};
let run_num = *NUM_MAP.write()
.unwrap()
.entry(cmd.to_string())
.or_insert_with(|| {
*(RUN_NUM.write().unwrap()) = run_num + 1;
run_num + 1
});
let mut index = 0;
let mut item_group = Vec::new();
let mut buffer = Vec::with_capacity(100);
loop {
buffer.clear();
// start reading
match source.read_until(opt.line_ending, &mut buffer) {
Ok(n) => {
if n == 0 { break; }
debug!("reader:reader: read a new line. index = {}", index);
if buffer.ends_with(&[b'\r', b'\n']) {
buffer.pop();
buffer.pop();
} else if buffer.ends_with(&[b'\n']) || buffer.ends_with(&[b'\0']) {
buffer.pop();
}
debug!("reader:reader: create new item. index = {}", index);
let item = Item::new(String::from_utf8_lossy(&buffer),
opt.use_ansi_color,
&opt.transform_fields,
&opt.matching_fields,
&opt.delimiter,
(run_num, index));
item_group.push(Arc::new(item));
debug!("reader:reader: item created. index = {}", index);
index += 1;
// % 4096 == 0
if index.trailing_zeros() > 12 {
let _ = tx_sender.send((Event::EvReaderNewItem, Box::new(mem::replace(&mut item_group, Vec::new()))));
}
}
Err(_err) => {} // String not UTF8 or other error, skip.
}
}
if!item_group.is_empty() {
let _ = tx_sender.send((Event::EvReaderNewItem, Box::new(mem::replace(&mut item_group, Vec::new()))));
}
let _ = tx_control.send(true);
}
| {
if options.is_present("ansi") {
self.use_ansi_color = true;
}
if let Some(delimiter) = options.value_of("delimiter") {
self.delimiter = Regex::new(&(".*?".to_string() + delimiter))
.unwrap_or_else(|_| Regex::new(r".*?[\t ]").unwrap());
}
if let Some(transform_fields) = options.value_of("with-nth") {
self.transform_fields = transform_fields.split(',')
.filter_map(|string| {
parse_range(string)
})
.collect();
}
if let Some(matching_fields) = options.value_of("nth") {
self.matching_fields = matching_fields.split(',') | identifier_body |
reader.rs | use std::sync::mpsc::{Receiver, Sender, SyncSender, channel};
use std::error::Error;
use item::Item;
use std::sync::{Arc, RwLock};
use std::process::{Command, Stdio, Child};
use std::io::{BufRead, BufReader};
use event::{EventSender, EventReceiver, Event, EventArg};
use std::thread::JoinHandle;
use std::thread;
use std::time::Duration;
use std::collections::HashMap;
use std::mem;
use std::fs::File;
use regex::Regex;
use sender::CachedSender;
use field::{FieldRange, parse_range};
use clap::ArgMatches;
struct ReaderOption {
pub use_ansi_color: bool,
pub default_arg: String,
pub transform_fields: Vec<FieldRange>,
pub matching_fields: Vec<FieldRange>,
pub delimiter: Regex,
pub replace_str: String,
pub line_ending: u8,
}
impl ReaderOption {
pub fn new() -> Self {
ReaderOption {
use_ansi_color: false,
default_arg: String::new(),
transform_fields: Vec::new(),
matching_fields: Vec::new(),
delimiter: Regex::new(r".*?\t").unwrap(),
replace_str: "{}".to_string(),
line_ending: b'\n',
}
}
pub fn parse_options(&mut self, options: &ArgMatches) {
if options.is_present("ansi") {
self.use_ansi_color = true;
}
if let Some(delimiter) = options.value_of("delimiter") {
self.delimiter = Regex::new(&(".*?".to_string() + delimiter))
.unwrap_or_else(|_| Regex::new(r".*?[\t ]").unwrap());
}
if let Some(transform_fields) = options.value_of("with-nth") {
self.transform_fields = transform_fields.split(',')
.filter_map(|string| {
parse_range(string)
})
.collect();
}
if let Some(matching_fields) = options.value_of("nth") {
self.matching_fields = matching_fields.split(',')
.filter_map(|string| {
parse_range(string)
}).collect();
}
if options.is_present("read0") {
self.line_ending = b'\0';
}
}
}
pub struct Reader {
rx_cmd: EventReceiver,
tx_item: SyncSender<(Event, EventArg)>,
option: Arc<RwLock<ReaderOption>>,
real_stdin: Option<File>, // used to support piped output
}
impl Reader {
pub fn new(rx_cmd: EventReceiver,
tx_item: SyncSender<(Event, EventArg)>,
real_stdin: Option<File>) -> Self {
Reader {
rx_cmd: rx_cmd,
tx_item: tx_item,
option: Arc::new(RwLock::new(ReaderOption::new())),
real_stdin,
}
}
pub fn parse_options(&mut self, options: &ArgMatches) {
let mut option = self.option.write().unwrap();
option.parse_options(options);
}
pub fn | (&mut self) {
// event loop
let mut thread_reader: Option<JoinHandle<()>> = None;
let mut tx_reader: Option<Sender<bool>> = None;
let mut last_command = "".to_string();
let mut last_query = "".to_string();
// start sender
let (tx_sender, rx_sender) = channel();
let tx_item = self.tx_item.clone();
let mut sender = CachedSender::new(rx_sender, tx_item);
thread::spawn(move || {
sender.run();
});
while let Ok((ev, arg)) = self.rx_cmd.recv() {
match ev {
Event::EvReaderRestart => {
// close existing command or file if exists
let (cmd, query, force_update) = *arg.downcast::<(String, String, bool)>().unwrap();
if!force_update && cmd == last_command && query == last_query { continue; }
// restart command with new `command`
if cmd!= last_command {
// stop existing command
tx_reader.take().map(|tx| {tx.send(true)});
thread_reader.take().map(|thrd| {thrd.join()});
// create needed data for thread
let (tx, rx_reader) = channel();
tx_reader = Some(tx);
let cmd_clone = cmd.clone();
let option_clone = Arc::clone(&self.option);
let tx_sender_clone = tx_sender.clone();
let query_clone = query.clone();
let real_stdin = self.real_stdin.take();
// start the new command
thread_reader = Some(thread::spawn(move || {
let _ = tx_sender_clone.send((Event::EvReaderStarted, Box::new(true)));
let _ = tx_sender_clone.send((Event::EvSenderRestart, Box::new(query_clone)));
reader(&cmd_clone, rx_reader, &tx_sender_clone, option_clone, real_stdin);
let _ = tx_sender_clone.send((Event::EvReaderStopped, Box::new(true)));
}));
} else {
// tell sender to restart
let _ = tx_sender.send((Event::EvSenderRestart, Box::new(query.clone())));
}
last_command = cmd;
last_query = query;
}
Event::EvActAccept => {
// stop existing command
tx_reader.take().map(|tx| {tx.send(true)});
thread_reader.take().map(|thrd| {thrd.join()});
let tx_ack: Sender<usize> = *arg.downcast().unwrap();
let _ = tx_ack.send(0);
}
_ => {
// do nothing
}
}
}
}
}
fn get_command_output(cmd: &str) -> Result<(Option<Child>, Box<BufRead>), Box<Error>> {
let mut command = try!(Command::new("sh")
.arg("-c")
.arg(cmd)
.stdout(Stdio::piped())
.stderr(Stdio::null())
.spawn());
let stdout = try!(command.stdout.take().ok_or_else(|| "command output: unwrap failed".to_owned()));
Ok((Some(command), Box::new(BufReader::new(stdout))))
}
// Consider that you invoke a command with different arguments several times
// If you select some items each time, how will skim remeber it?
// => Well, we'll give each invokation a number, i.e. RUN_NUM
// What if you invoke the same command and same arguments twice?
// => We use NUM_MAP to specify the same run number.
lazy_static! {
static ref RUN_NUM: RwLock<usize> = RwLock::new(0);
static ref NUM_MAP: RwLock<HashMap<String, usize>> = RwLock::new(HashMap::new());
}
fn reader(cmd: &str,
rx_cmd: Receiver<bool>,
tx_sender: &EventSender,
option: Arc<RwLock<ReaderOption>>,
source_file: Option<File>) {
debug!("reader:reader: called");
let (command, mut source): (Option<Child>, Box<BufRead>) = if source_file.is_some() {
(None, Box::new(BufReader::new(source_file.unwrap())))
} else {
get_command_output(cmd).expect("command not found")
};
let (tx_control, rx_control) = channel();
thread::spawn(move || {
// listen to `rx` for command to quit reader
// kill command if it is got
loop {
if rx_cmd.try_recv().is_ok() {
// clean up resources
command.map(|mut x| {
let _ = x.kill();
let _ = x.wait();
});
break;
}
if rx_control.try_recv().is_ok() {
command.map(|mut x| {
let _ = x.kill();
let _ = x.wait();
});
break;
}
thread::sleep(Duration::from_millis(5));
}
});
let opt = option.read().unwrap();
// set the proper run number
let run_num = {*RUN_NUM.read().unwrap()};
let run_num = *NUM_MAP.write()
.unwrap()
.entry(cmd.to_string())
.or_insert_with(|| {
*(RUN_NUM.write().unwrap()) = run_num + 1;
run_num + 1
});
let mut index = 0;
let mut item_group = Vec::new();
let mut buffer = Vec::with_capacity(100);
loop {
buffer.clear();
// start reading
match source.read_until(opt.line_ending, &mut buffer) {
Ok(n) => {
if n == 0 { break; }
debug!("reader:reader: read a new line. index = {}", index);
if buffer.ends_with(&[b'\r', b'\n']) {
buffer.pop();
buffer.pop();
} else if buffer.ends_with(&[b'\n']) || buffer.ends_with(&[b'\0']) {
buffer.pop();
}
debug!("reader:reader: create new item. index = {}", index);
let item = Item::new(String::from_utf8_lossy(&buffer),
opt.use_ansi_color,
&opt.transform_fields,
&opt.matching_fields,
&opt.delimiter,
(run_num, index));
item_group.push(Arc::new(item));
debug!("reader:reader: item created. index = {}", index);
index += 1;
// % 4096 == 0
if index.trailing_zeros() > 12 {
let _ = tx_sender.send((Event::EvReaderNewItem, Box::new(mem::replace(&mut item_group, Vec::new()))));
}
}
Err(_err) => {} // String not UTF8 or other error, skip.
}
}
if!item_group.is_empty() {
let _ = tx_sender.send((Event::EvReaderNewItem, Box::new(mem::replace(&mut item_group, Vec::new()))));
}
let _ = tx_control.send(true);
}
| run | identifier_name |
reader.rs | use std::sync::mpsc::{Receiver, Sender, SyncSender, channel};
use std::error::Error;
use item::Item;
use std::sync::{Arc, RwLock};
use std::process::{Command, Stdio, Child};
use std::io::{BufRead, BufReader};
use event::{EventSender, EventReceiver, Event, EventArg};
use std::thread::JoinHandle;
use std::thread;
use std::time::Duration;
use std::collections::HashMap;
use std::mem;
use std::fs::File;
use regex::Regex;
use sender::CachedSender;
use field::{FieldRange, parse_range};
use clap::ArgMatches;
struct ReaderOption {
pub use_ansi_color: bool,
pub default_arg: String,
pub transform_fields: Vec<FieldRange>,
pub matching_fields: Vec<FieldRange>,
pub delimiter: Regex,
pub replace_str: String,
pub line_ending: u8,
}
impl ReaderOption {
pub fn new() -> Self {
ReaderOption {
use_ansi_color: false,
default_arg: String::new(),
transform_fields: Vec::new(),
matching_fields: Vec::new(),
delimiter: Regex::new(r".*?\t").unwrap(),
replace_str: "{}".to_string(),
line_ending: b'\n',
}
}
pub fn parse_options(&mut self, options: &ArgMatches) {
if options.is_present("ansi") {
self.use_ansi_color = true;
}
if let Some(delimiter) = options.value_of("delimiter") {
self.delimiter = Regex::new(&(".*?".to_string() + delimiter))
.unwrap_or_else(|_| Regex::new(r".*?[\t ]").unwrap());
}
if let Some(transform_fields) = options.value_of("with-nth") {
self.transform_fields = transform_fields.split(',')
.filter_map(|string| {
parse_range(string)
})
.collect();
}
if let Some(matching_fields) = options.value_of("nth") {
self.matching_fields = matching_fields.split(',')
.filter_map(|string| {
parse_range(string)
}).collect();
}
if options.is_present("read0") {
self.line_ending = b'\0';
}
}
}
pub struct Reader {
rx_cmd: EventReceiver,
tx_item: SyncSender<(Event, EventArg)>,
option: Arc<RwLock<ReaderOption>>,
real_stdin: Option<File>, // used to support piped output
}
impl Reader {
pub fn new(rx_cmd: EventReceiver,
tx_item: SyncSender<(Event, EventArg)>,
real_stdin: Option<File>) -> Self {
Reader {
rx_cmd: rx_cmd,
tx_item: tx_item,
option: Arc::new(RwLock::new(ReaderOption::new())),
real_stdin,
}
}
pub fn parse_options(&mut self, options: &ArgMatches) {
let mut option = self.option.write().unwrap();
option.parse_options(options);
}
pub fn run(&mut self) {
// event loop
let mut thread_reader: Option<JoinHandle<()>> = None;
let mut tx_reader: Option<Sender<bool>> = None;
let mut last_command = "".to_string();
let mut last_query = "".to_string();
// start sender
let (tx_sender, rx_sender) = channel();
let tx_item = self.tx_item.clone();
let mut sender = CachedSender::new(rx_sender, tx_item);
thread::spawn(move || {
sender.run();
});
while let Ok((ev, arg)) = self.rx_cmd.recv() {
match ev {
Event::EvReaderRestart => {
// close existing command or file if exists
let (cmd, query, force_update) = *arg.downcast::<(String, String, bool)>().unwrap();
if!force_update && cmd == last_command && query == last_query { continue; }
// restart command with new `command`
if cmd!= last_command {
// stop existing command
tx_reader.take().map(|tx| {tx.send(true)});
thread_reader.take().map(|thrd| {thrd.join()});
// create needed data for thread
let (tx, rx_reader) = channel();
tx_reader = Some(tx);
let cmd_clone = cmd.clone();
let option_clone = Arc::clone(&self.option);
let tx_sender_clone = tx_sender.clone();
let query_clone = query.clone();
let real_stdin = self.real_stdin.take();
// start the new command
thread_reader = Some(thread::spawn(move || {
let _ = tx_sender_clone.send((Event::EvReaderStarted, Box::new(true)));
let _ = tx_sender_clone.send((Event::EvSenderRestart, Box::new(query_clone)));
reader(&cmd_clone, rx_reader, &tx_sender_clone, option_clone, real_stdin);
let _ = tx_sender_clone.send((Event::EvReaderStopped, Box::new(true)));
}));
} else {
// tell sender to restart
let _ = tx_sender.send((Event::EvSenderRestart, Box::new(query.clone())));
}
last_command = cmd;
last_query = query;
}
Event::EvActAccept => {
// stop existing command
tx_reader.take().map(|tx| {tx.send(true)});
thread_reader.take().map(|thrd| {thrd.join()});
let tx_ack: Sender<usize> = *arg.downcast().unwrap();
let _ = tx_ack.send(0);
}
_ => {
// do nothing
}
}
}
}
}
fn get_command_output(cmd: &str) -> Result<(Option<Child>, Box<BufRead>), Box<Error>> {
let mut command = try!(Command::new("sh")
.arg("-c")
.arg(cmd)
.stdout(Stdio::piped())
.stderr(Stdio::null())
.spawn());
let stdout = try!(command.stdout.take().ok_or_else(|| "command output: unwrap failed".to_owned()));
Ok((Some(command), Box::new(BufReader::new(stdout))))
}
// Consider that you invoke a command with different arguments several times
// If you select some items each time, how will skim remeber it?
// => Well, we'll give each invokation a number, i.e. RUN_NUM
// What if you invoke the same command and same arguments twice?
// => We use NUM_MAP to specify the same run number.
lazy_static! {
static ref RUN_NUM: RwLock<usize> = RwLock::new(0);
static ref NUM_MAP: RwLock<HashMap<String, usize>> = RwLock::new(HashMap::new());
}
fn reader(cmd: &str,
rx_cmd: Receiver<bool>,
tx_sender: &EventSender,
option: Arc<RwLock<ReaderOption>>,
source_file: Option<File>) {
debug!("reader:reader: called");
let (command, mut source): (Option<Child>, Box<BufRead>) = if source_file.is_some() {
(None, Box::new(BufReader::new(source_file.unwrap())))
} else {
get_command_output(cmd).expect("command not found")
};
let (tx_control, rx_control) = channel();
thread::spawn(move || {
// listen to `rx` for command to quit reader
// kill command if it is got
loop {
if rx_cmd.try_recv().is_ok() {
// clean up resources
command.map(|mut x| {
let _ = x.kill();
let _ = x.wait();
});
break;
}
if rx_control.try_recv().is_ok() {
command.map(|mut x| {
let _ = x.kill();
let _ = x.wait();
});
break;
}
thread::sleep(Duration::from_millis(5));
}
});
let opt = option.read().unwrap();
// set the proper run number
let run_num = {*RUN_NUM.read().unwrap()};
let run_num = *NUM_MAP.write()
.unwrap()
.entry(cmd.to_string())
.or_insert_with(|| {
*(RUN_NUM.write().unwrap()) = run_num + 1;
run_num + 1
});
let mut index = 0;
let mut item_group = Vec::new();
let mut buffer = Vec::with_capacity(100);
loop {
buffer.clear();
// start reading
match source.read_until(opt.line_ending, &mut buffer) {
Ok(n) => {
if n == 0 { break; }
debug!("reader:reader: read a new line. index = {}", index);
if buffer.ends_with(&[b'\r', b'\n']) {
buffer.pop();
buffer.pop();
} else if buffer.ends_with(&[b'\n']) || buffer.ends_with(&[b'\0']) {
buffer.pop();
}
debug!("reader:reader: create new item. index = {}", index);
let item = Item::new(String::from_utf8_lossy(&buffer),
opt.use_ansi_color,
&opt.transform_fields,
&opt.matching_fields,
&opt.delimiter,
(run_num, index));
item_group.push(Arc::new(item));
debug!("reader:reader: item created. index = {}", index);
index += 1;
// % 4096 == 0
if index.trailing_zeros() > 12 |
}
Err(_err) => {} // String not UTF8 or other error, skip.
}
}
if!item_group.is_empty() {
let _ = tx_sender.send((Event::EvReaderNewItem, Box::new(mem::replace(&mut item_group, Vec::new()))));
}
let _ = tx_control.send(true);
}
| {
let _ = tx_sender.send((Event::EvReaderNewItem, Box::new(mem::replace(&mut item_group, Vec::new()))));
} | conditional_block |
reader.rs | use std::sync::mpsc::{Receiver, Sender, SyncSender, channel};
use std::error::Error;
use item::Item;
use std::sync::{Arc, RwLock};
use std::process::{Command, Stdio, Child};
use std::io::{BufRead, BufReader};
use event::{EventSender, EventReceiver, Event, EventArg};
use std::thread::JoinHandle;
use std::thread;
use std::time::Duration;
use std::collections::HashMap;
use std::mem;
use std::fs::File;
use regex::Regex;
use sender::CachedSender;
use field::{FieldRange, parse_range};
use clap::ArgMatches;
struct ReaderOption {
pub use_ansi_color: bool,
pub default_arg: String,
pub transform_fields: Vec<FieldRange>,
pub matching_fields: Vec<FieldRange>,
pub delimiter: Regex,
pub replace_str: String,
pub line_ending: u8,
}
impl ReaderOption {
pub fn new() -> Self {
ReaderOption {
use_ansi_color: false,
default_arg: String::new(),
transform_fields: Vec::new(), | }
}
pub fn parse_options(&mut self, options: &ArgMatches) {
if options.is_present("ansi") {
self.use_ansi_color = true;
}
if let Some(delimiter) = options.value_of("delimiter") {
self.delimiter = Regex::new(&(".*?".to_string() + delimiter))
.unwrap_or_else(|_| Regex::new(r".*?[\t ]").unwrap());
}
if let Some(transform_fields) = options.value_of("with-nth") {
self.transform_fields = transform_fields.split(',')
.filter_map(|string| {
parse_range(string)
})
.collect();
}
if let Some(matching_fields) = options.value_of("nth") {
self.matching_fields = matching_fields.split(',')
.filter_map(|string| {
parse_range(string)
}).collect();
}
if options.is_present("read0") {
self.line_ending = b'\0';
}
}
}
pub struct Reader {
rx_cmd: EventReceiver,
tx_item: SyncSender<(Event, EventArg)>,
option: Arc<RwLock<ReaderOption>>,
real_stdin: Option<File>, // used to support piped output
}
impl Reader {
pub fn new(rx_cmd: EventReceiver,
tx_item: SyncSender<(Event, EventArg)>,
real_stdin: Option<File>) -> Self {
Reader {
rx_cmd: rx_cmd,
tx_item: tx_item,
option: Arc::new(RwLock::new(ReaderOption::new())),
real_stdin,
}
}
pub fn parse_options(&mut self, options: &ArgMatches) {
let mut option = self.option.write().unwrap();
option.parse_options(options);
}
pub fn run(&mut self) {
// event loop
let mut thread_reader: Option<JoinHandle<()>> = None;
let mut tx_reader: Option<Sender<bool>> = None;
let mut last_command = "".to_string();
let mut last_query = "".to_string();
// start sender
let (tx_sender, rx_sender) = channel();
let tx_item = self.tx_item.clone();
let mut sender = CachedSender::new(rx_sender, tx_item);
thread::spawn(move || {
sender.run();
});
while let Ok((ev, arg)) = self.rx_cmd.recv() {
match ev {
Event::EvReaderRestart => {
// close existing command or file if exists
let (cmd, query, force_update) = *arg.downcast::<(String, String, bool)>().unwrap();
if!force_update && cmd == last_command && query == last_query { continue; }
// restart command with new `command`
if cmd!= last_command {
// stop existing command
tx_reader.take().map(|tx| {tx.send(true)});
thread_reader.take().map(|thrd| {thrd.join()});
// create needed data for thread
let (tx, rx_reader) = channel();
tx_reader = Some(tx);
let cmd_clone = cmd.clone();
let option_clone = Arc::clone(&self.option);
let tx_sender_clone = tx_sender.clone();
let query_clone = query.clone();
let real_stdin = self.real_stdin.take();
// start the new command
thread_reader = Some(thread::spawn(move || {
let _ = tx_sender_clone.send((Event::EvReaderStarted, Box::new(true)));
let _ = tx_sender_clone.send((Event::EvSenderRestart, Box::new(query_clone)));
reader(&cmd_clone, rx_reader, &tx_sender_clone, option_clone, real_stdin);
let _ = tx_sender_clone.send((Event::EvReaderStopped, Box::new(true)));
}));
} else {
// tell sender to restart
let _ = tx_sender.send((Event::EvSenderRestart, Box::new(query.clone())));
}
last_command = cmd;
last_query = query;
}
Event::EvActAccept => {
// stop existing command
tx_reader.take().map(|tx| {tx.send(true)});
thread_reader.take().map(|thrd| {thrd.join()});
let tx_ack: Sender<usize> = *arg.downcast().unwrap();
let _ = tx_ack.send(0);
}
_ => {
// do nothing
}
}
}
}
}
fn get_command_output(cmd: &str) -> Result<(Option<Child>, Box<BufRead>), Box<Error>> {
let mut command = try!(Command::new("sh")
.arg("-c")
.arg(cmd)
.stdout(Stdio::piped())
.stderr(Stdio::null())
.spawn());
let stdout = try!(command.stdout.take().ok_or_else(|| "command output: unwrap failed".to_owned()));
Ok((Some(command), Box::new(BufReader::new(stdout))))
}
// Consider that you invoke a command with different arguments several times
// If you select some items each time, how will skim remeber it?
// => Well, we'll give each invokation a number, i.e. RUN_NUM
// What if you invoke the same command and same arguments twice?
// => We use NUM_MAP to specify the same run number.
lazy_static! {
static ref RUN_NUM: RwLock<usize> = RwLock::new(0);
static ref NUM_MAP: RwLock<HashMap<String, usize>> = RwLock::new(HashMap::new());
}
fn reader(cmd: &str,
rx_cmd: Receiver<bool>,
tx_sender: &EventSender,
option: Arc<RwLock<ReaderOption>>,
source_file: Option<File>) {
debug!("reader:reader: called");
let (command, mut source): (Option<Child>, Box<BufRead>) = if source_file.is_some() {
(None, Box::new(BufReader::new(source_file.unwrap())))
} else {
get_command_output(cmd).expect("command not found")
};
let (tx_control, rx_control) = channel();
thread::spawn(move || {
// listen to `rx` for command to quit reader
// kill command if it is got
loop {
if rx_cmd.try_recv().is_ok() {
// clean up resources
command.map(|mut x| {
let _ = x.kill();
let _ = x.wait();
});
break;
}
if rx_control.try_recv().is_ok() {
command.map(|mut x| {
let _ = x.kill();
let _ = x.wait();
});
break;
}
thread::sleep(Duration::from_millis(5));
}
});
let opt = option.read().unwrap();
// set the proper run number
let run_num = {*RUN_NUM.read().unwrap()};
let run_num = *NUM_MAP.write()
.unwrap()
.entry(cmd.to_string())
.or_insert_with(|| {
*(RUN_NUM.write().unwrap()) = run_num + 1;
run_num + 1
});
let mut index = 0;
let mut item_group = Vec::new();
let mut buffer = Vec::with_capacity(100);
loop {
buffer.clear();
// start reading
match source.read_until(opt.line_ending, &mut buffer) {
Ok(n) => {
if n == 0 { break; }
debug!("reader:reader: read a new line. index = {}", index);
if buffer.ends_with(&[b'\r', b'\n']) {
buffer.pop();
buffer.pop();
} else if buffer.ends_with(&[b'\n']) || buffer.ends_with(&[b'\0']) {
buffer.pop();
}
debug!("reader:reader: create new item. index = {}", index);
let item = Item::new(String::from_utf8_lossy(&buffer),
opt.use_ansi_color,
&opt.transform_fields,
&opt.matching_fields,
&opt.delimiter,
(run_num, index));
item_group.push(Arc::new(item));
debug!("reader:reader: item created. index = {}", index);
index += 1;
// % 4096 == 0
if index.trailing_zeros() > 12 {
let _ = tx_sender.send((Event::EvReaderNewItem, Box::new(mem::replace(&mut item_group, Vec::new()))));
}
}
Err(_err) => {} // String not UTF8 or other error, skip.
}
}
if!item_group.is_empty() {
let _ = tx_sender.send((Event::EvReaderNewItem, Box::new(mem::replace(&mut item_group, Vec::new()))));
}
let _ = tx_control.send(true);
} | matching_fields: Vec::new(),
delimiter: Regex::new(r".*?\t").unwrap(),
replace_str: "{}".to_string(),
line_ending: b'\n', | random_line_split |
main.rs | //! A simple demonstration of how to construct and use Canvasses by splitting up the window.
#[macro_use] extern crate conrod;
#[macro_use] extern crate serde_derive;
extern crate chrono;
extern crate serde;
mod support;
fn main() {
feature::main();
}
mod feature {
const FILENAME : &str = "timetracker.json";
extern crate find_folder;
use std::fs::File;
use std::io::prelude::*;
use std::time::Duration;
use std::thread::sleep;
use conrod;
use conrod::backend::glium::glium;
use conrod::backend::glium::glium::Surface;
extern crate serde_json;
use support;
use chrono::prelude::*;
use chrono;
pub fn main() {
const WIDTH: u32 = 800;
const HEIGHT: u32 = 600;
const SLEEPTIME: Duration = Duration::from_millis(500);
// Build the window.
let mut events_loop = glium::glutin::EventsLoop::new();
let window = glium::glutin::WindowBuilder::new()
.with_title("Timetracker")
.with_dimensions(WIDTH, HEIGHT);
let context = glium::glutin::ContextBuilder::new()
.with_vsync(true)
.with_multisampling(4);
let display = glium::Display::new(window, context, &events_loop).unwrap();
// construct our `Ui`.
let mut ui = conrod::UiBuilder::new([WIDTH as f64, HEIGHT as f64]).build();
// Add a `Font` to the `Ui`'s `font::Map` from file.
let assets = find_folder::Search::KidsThenParents(3, 5).for_folder("assets").unwrap();
let font_path = assets.join("fonts/NotoSans/NotoSans-Regular.ttf");
ui.fonts.insert_from_file(font_path).unwrap();
// A type used for converting `conrod::render::Primitives` into `Command`s that can be used
// for drawing to the glium `Surface`.
let mut renderer = conrod::backend::glium::Renderer::new(&display).unwrap();
// The image map describing each of our widget->image mappings (in our case, none).
let image_map = conrod::image::Map::<glium::texture::Texture2d>::new();
// Instantiate the generated list of widget identifiers.
let ids = &mut Ids::new(ui.widget_id_generator());
let mut ids_list = Vec::new();
let mut curname = "Enter name".to_string();
// Poll events from the window.
let mut event_loop = support::EventLoop::new();
let mut timerstates : Vec<support::TimerState> = match File::open(FILENAME) {
Ok(mut a) => {
let mut s = String::new();
a.read_to_string(&mut s).expect("Failed to read config");
serde_json::from_str(&s).expect("Failed convert to json")
},
Err(_e) => {
Vec::new()
}
};
'main: loop {
sleep(SLEEPTIME);
// Handle all events.
for event in event_loop.next(&mut events_loop) {
// Use the `winit` backend feature to convert the winit event to a conrod one.
if let Some(event) = conrod::backend::winit::convert_event(event.clone(), &display) {
ui.handle_event(event);
event_loop.needs_update();
}
match event {
glium::glutin::Event::WindowEvent { event,.. } => match event {
// Break from the loop upon `Escape`.
glium::glutin::WindowEvent::Closed |
glium::glutin::WindowEvent::KeyboardInput {
input: glium::glutin::KeyboardInput {
virtual_keycode: Some(glium::glutin::VirtualKeyCode::Escape),
..
},
..
} => {
let mut f = File::create(FILENAME).unwrap();
f.write_all(serde_json::to_string(&timerstates)
.unwrap()
.as_bytes()).unwrap();
break'main
},
_ => (),
},
_ => (),
}
}
// Instantiate all widgets in the GUI.
set_widgets(ui.set_widgets(), ids, &mut ids_list, &mut timerstates, &mut curname);
// Render the `Ui` and then display it on the screen.
if let Some(primitives) = ui.draw_if_changed() {
renderer.fill(&display, primitives, &image_map);
let mut target = display.draw();
target.clear_color(0.0, 0.0, 0.0, 1.0);
renderer.draw(&display, &mut target, &image_map).unwrap();
target.finish().unwrap();
}
}
}
// Draw the Ui.
fn set_widgets(ref mut ui: conrod::UiCell, ids: &mut Ids, ids_list: &mut Vec<ListItem>, timerstates : &mut Vec<support::TimerState>,text : &mut String) {
use conrod::{color, widget, Colorable, Borderable, Positionable, Labelable, Sizeable, Widget};
let main_color = color::rgb(0.2,0.2,0.3);
let other_color = color::rgb(0.1,0.1,0.2);
let green_color = color::rgb(0.45,1.,0.12);
// Construct our main `Canvas` tree.
widget::Canvas::new().flow_down(&[
(ids.header, widget::Canvas::new().color(main_color).length(70.0)),
(ids.body, widget::Canvas::new().color(color::ORANGE).scroll_kids_vertically()),
]).set(ids.master, ui);
// A scrollbar for the `FOOTER` canvas.
widget::Scrollbar::y_axis(ids.body).auto_hide(false).set(ids.footer_scrollbar, ui);
widget::Text::new("Time tracker")
.color(color::LIGHT_ORANGE)
.font_size(28)
.mid_left_with_margin_on(ids.header,28.)
.left_justify()
.set(ids.title, ui);
// Here we make some canvas `Tabs` in the middle column.
widget::Tabs::new(&[(ids.tab_timers, "Timers")/*,(ids.tab_statistics, "Statistics")*/])
.wh_of(ids.body)
.color(other_color)
.border(0.)
.label_color(color::WHITE)
.middle_of(ids.body)
.set(ids.tabs, ui);
while ids_list.len() < timerstates.len() {
ids_list.push(ListItem::new(ui.widget_id_generator()));
}
let (mut items, _scrollbar) = widget::List::flow_down(timerstates.len())
.item_size(50.0)
.scrollbar_on_top()
.middle_of(ids.tab_timers)
.wh_of(ids.tab_timers)
.set(ids.timer_list, ui);
while let Some(item) = items.next(ui) {
let i = item.i;
let mut label;
let dummy = widget::Canvas::new().w_of(ids.timer_list);
item.set(dummy, ui);
widget::Canvas::new()
.wh_of(item.widget_id)
.middle_of(item.widget_id)
.set(ids_list[i].master, ui);
//Make the label for the toggle button
if timerstates[i].active {
let zero : u32 = 0;
let timesince : DateTime<Utc> = chrono::MIN_DATE.and_hms(zero,zero,zero).checked_add_signed(duration_elapsed(timerstates[i].active_since)).unwrap();
let delta = format_time(timesince);
label = format!("{}", delta);
}
else {
label = format!("{}",format_time(timerstates[i].total));
}
for b in widget::Toggle::new(timerstates[i].active)
.h_of(ids_list[i].master)
.padded_w_of(ids_list[i].master,25.)
.label(&label)
.label_color(if timerstates[i].active {color::BLACK} else {color::LIGHT_ORANGE})
.mid_left_of(ids_list[i].master)
.color(if timerstates[i].active {green_color}else {other_color})
.set(ids_list[i].toggle, ui) {
if b {
timerstates[i].active_since = Utc::now();
}
else {
timerstates[i].total = timerstates[i].total.checked_add_signed(duration_elapsed(timerstates[i].active_since)).unwrap();
}
timerstates[i].active = b;
}
widget::Text::new(timerstates[i].name.as_str())
.color(if timerstates[i].active {color::BLACK} else {color::LIGHT_ORANGE})
.font_size(28)
.bottom_left_with_margin_on(ids_list[i].toggle,14.)
.left_justify()
.set(ids_list[i].name, ui);
for _press in widget::Button::new()
.h_of(ids_list[i].master)
.w(50.)
.label("-")
.mid_right_of(ids_list[i].master)
.set(ids_list[i].remove, ui){
timerstates.remove(i);
ids_list.remove(i);
return;
}
}
for edit in widget::TextBox::new(text)
.color(color::WHITE)
.h(50.)
.padded_w_of(ids.tab_timers, 25.0)
.bottom_left_of(ids.tab_timers)
.center_justify()
.set(ids.add_name, ui)
{
use conrod::widget::text_box::Event::{Update,Enter};
match edit {
Update(txt) => {
*text = txt;
},
Enter => {
timerstates.push(support::TimerState::new(text.clone()));
},
}
}
for _press in widget::Button::new()
.h(50.)
.w(50.)
.label("+")
.bottom_right_of(ids.tab_timers)
.set(ids.plus_button, ui){
timerstates.push(support::TimerState::new(text.clone()));
}
}
fn format_time(t : chrono::DateTime<Utc>) -> String {
let dur = t.signed_duration_since(chrono::MIN_DATE.and_hms(0u32,0u32,0u32));
let ret = format!(
"{:02}:{:02}:{:02}",
dur.num_hours(),
dur.num_minutes()%60,
dur.num_seconds()%60
);
ret
}
fn duration_elapsed(t : chrono::DateTime<Utc>) -> chrono::Duration {
chrono::offset::Utc::now().signed_duration_since(t)
}
// Generate a unique `WidgetId` for each widget.
widget_ids! {
struct Ids {
master,
header,
body,
timer_list,
plus_button,
add_name,
footer_scrollbar,
tabs,
tab_timers,
tab_statistics,
title,
subtitle,
}
} | widget_ids! {
struct ListItem {
master,
toggle,
remove,
name,
time,
session,
}
}
} | random_line_split |
|
main.rs | //! A simple demonstration of how to construct and use Canvasses by splitting up the window.
#[macro_use] extern crate conrod;
#[macro_use] extern crate serde_derive;
extern crate chrono;
extern crate serde;
mod support;
fn main() {
feature::main();
}
mod feature {
const FILENAME : &str = "timetracker.json";
extern crate find_folder;
use std::fs::File;
use std::io::prelude::*;
use std::time::Duration;
use std::thread::sleep;
use conrod;
use conrod::backend::glium::glium;
use conrod::backend::glium::glium::Surface;
extern crate serde_json;
use support;
use chrono::prelude::*;
use chrono;
pub fn main() | let font_path = assets.join("fonts/NotoSans/NotoSans-Regular.ttf");
ui.fonts.insert_from_file(font_path).unwrap();
// A type used for converting `conrod::render::Primitives` into `Command`s that can be used
// for drawing to the glium `Surface`.
let mut renderer = conrod::backend::glium::Renderer::new(&display).unwrap();
// The image map describing each of our widget->image mappings (in our case, none).
let image_map = conrod::image::Map::<glium::texture::Texture2d>::new();
// Instantiate the generated list of widget identifiers.
let ids = &mut Ids::new(ui.widget_id_generator());
let mut ids_list = Vec::new();
let mut curname = "Enter name".to_string();
// Poll events from the window.
let mut event_loop = support::EventLoop::new();
let mut timerstates : Vec<support::TimerState> = match File::open(FILENAME) {
Ok(mut a) => {
let mut s = String::new();
a.read_to_string(&mut s).expect("Failed to read config");
serde_json::from_str(&s).expect("Failed convert to json")
},
Err(_e) => {
Vec::new()
}
};
'main: loop {
sleep(SLEEPTIME);
// Handle all events.
for event in event_loop.next(&mut events_loop) {
// Use the `winit` backend feature to convert the winit event to a conrod one.
if let Some(event) = conrod::backend::winit::convert_event(event.clone(), &display) {
ui.handle_event(event);
event_loop.needs_update();
}
match event {
glium::glutin::Event::WindowEvent { event,.. } => match event {
// Break from the loop upon `Escape`.
glium::glutin::WindowEvent::Closed |
glium::glutin::WindowEvent::KeyboardInput {
input: glium::glutin::KeyboardInput {
virtual_keycode: Some(glium::glutin::VirtualKeyCode::Escape),
..
},
..
} => {
let mut f = File::create(FILENAME).unwrap();
f.write_all(serde_json::to_string(&timerstates)
.unwrap()
.as_bytes()).unwrap();
break'main
},
_ => (),
},
_ => (),
}
}
// Instantiate all widgets in the GUI.
set_widgets(ui.set_widgets(), ids, &mut ids_list, &mut timerstates, &mut curname);
// Render the `Ui` and then display it on the screen.
if let Some(primitives) = ui.draw_if_changed() {
renderer.fill(&display, primitives, &image_map);
let mut target = display.draw();
target.clear_color(0.0, 0.0, 0.0, 1.0);
renderer.draw(&display, &mut target, &image_map).unwrap();
target.finish().unwrap();
}
}
}
// Draw the Ui.
fn set_widgets(ref mut ui: conrod::UiCell, ids: &mut Ids, ids_list: &mut Vec<ListItem>, timerstates : &mut Vec<support::TimerState>,text : &mut String) {
use conrod::{color, widget, Colorable, Borderable, Positionable, Labelable, Sizeable, Widget};
let main_color = color::rgb(0.2,0.2,0.3);
let other_color = color::rgb(0.1,0.1,0.2);
let green_color = color::rgb(0.45,1.,0.12);
// Construct our main `Canvas` tree.
widget::Canvas::new().flow_down(&[
(ids.header, widget::Canvas::new().color(main_color).length(70.0)),
(ids.body, widget::Canvas::new().color(color::ORANGE).scroll_kids_vertically()),
]).set(ids.master, ui);
// A scrollbar for the `FOOTER` canvas.
widget::Scrollbar::y_axis(ids.body).auto_hide(false).set(ids.footer_scrollbar, ui);
widget::Text::new("Time tracker")
.color(color::LIGHT_ORANGE)
.font_size(28)
.mid_left_with_margin_on(ids.header,28.)
.left_justify()
.set(ids.title, ui);
// Here we make some canvas `Tabs` in the middle column.
widget::Tabs::new(&[(ids.tab_timers, "Timers")/*,(ids.tab_statistics, "Statistics")*/])
.wh_of(ids.body)
.color(other_color)
.border(0.)
.label_color(color::WHITE)
.middle_of(ids.body)
.set(ids.tabs, ui);
while ids_list.len() < timerstates.len() {
ids_list.push(ListItem::new(ui.widget_id_generator()));
}
let (mut items, _scrollbar) = widget::List::flow_down(timerstates.len())
.item_size(50.0)
.scrollbar_on_top()
.middle_of(ids.tab_timers)
.wh_of(ids.tab_timers)
.set(ids.timer_list, ui);
while let Some(item) = items.next(ui) {
let i = item.i;
let mut label;
let dummy = widget::Canvas::new().w_of(ids.timer_list);
item.set(dummy, ui);
widget::Canvas::new()
.wh_of(item.widget_id)
.middle_of(item.widget_id)
.set(ids_list[i].master, ui);
//Make the label for the toggle button
if timerstates[i].active {
let zero : u32 = 0;
let timesince : DateTime<Utc> = chrono::MIN_DATE.and_hms(zero,zero,zero).checked_add_signed(duration_elapsed(timerstates[i].active_since)).unwrap();
let delta = format_time(timesince);
label = format!("{}", delta);
}
else {
label = format!("{}",format_time(timerstates[i].total));
}
for b in widget::Toggle::new(timerstates[i].active)
.h_of(ids_list[i].master)
.padded_w_of(ids_list[i].master,25.)
.label(&label)
.label_color(if timerstates[i].active {color::BLACK} else {color::LIGHT_ORANGE})
.mid_left_of(ids_list[i].master)
.color(if timerstates[i].active {green_color}else {other_color})
.set(ids_list[i].toggle, ui) {
if b {
timerstates[i].active_since = Utc::now();
}
else {
timerstates[i].total = timerstates[i].total.checked_add_signed(duration_elapsed(timerstates[i].active_since)).unwrap();
}
timerstates[i].active = b;
}
widget::Text::new(timerstates[i].name.as_str())
.color(if timerstates[i].active {color::BLACK} else {color::LIGHT_ORANGE})
.font_size(28)
.bottom_left_with_margin_on(ids_list[i].toggle,14.)
.left_justify()
.set(ids_list[i].name, ui);
for _press in widget::Button::new()
.h_of(ids_list[i].master)
.w(50.)
.label("-")
.mid_right_of(ids_list[i].master)
.set(ids_list[i].remove, ui){
timerstates.remove(i);
ids_list.remove(i);
return;
}
}
for edit in widget::TextBox::new(text)
.color(color::WHITE)
.h(50.)
.padded_w_of(ids.tab_timers, 25.0)
.bottom_left_of(ids.tab_timers)
.center_justify()
.set(ids.add_name, ui)
{
use conrod::widget::text_box::Event::{Update,Enter};
match edit {
Update(txt) => {
*text = txt;
},
Enter => {
timerstates.push(support::TimerState::new(text.clone()));
},
}
}
for _press in widget::Button::new()
.h(50.)
.w(50.)
.label("+")
.bottom_right_of(ids.tab_timers)
.set(ids.plus_button, ui){
timerstates.push(support::TimerState::new(text.clone()));
}
}
fn format_time(t : chrono::DateTime<Utc>) -> String {
let dur = t.signed_duration_since(chrono::MIN_DATE.and_hms(0u32,0u32,0u32));
let ret = format!(
"{:02}:{:02}:{:02}",
dur.num_hours(),
dur.num_minutes()%60,
dur.num_seconds()%60
);
ret
}
fn duration_elapsed(t : chrono::DateTime<Utc>) -> chrono::Duration {
chrono::offset::Utc::now().signed_duration_since(t)
}
// Generate a unique `WidgetId` for each widget.
widget_ids! {
struct Ids {
master,
header,
body,
timer_list,
plus_button,
add_name,
footer_scrollbar,
tabs,
tab_timers,
tab_statistics,
title,
subtitle,
}
}
widget_ids! {
struct ListItem {
master,
toggle,
remove,
name,
time,
session,
}
}
} | {
const WIDTH: u32 = 800;
const HEIGHT: u32 = 600;
const SLEEPTIME: Duration = Duration::from_millis(500);
// Build the window.
let mut events_loop = glium::glutin::EventsLoop::new();
let window = glium::glutin::WindowBuilder::new()
.with_title("Timetracker")
.with_dimensions(WIDTH, HEIGHT);
let context = glium::glutin::ContextBuilder::new()
.with_vsync(true)
.with_multisampling(4);
let display = glium::Display::new(window, context, &events_loop).unwrap();
// construct our `Ui`.
let mut ui = conrod::UiBuilder::new([WIDTH as f64, HEIGHT as f64]).build();
// Add a `Font` to the `Ui`'s `font::Map` from file.
let assets = find_folder::Search::KidsThenParents(3, 5).for_folder("assets").unwrap(); | identifier_body |
main.rs | //! A simple demonstration of how to construct and use Canvasses by splitting up the window.
#[macro_use] extern crate conrod;
#[macro_use] extern crate serde_derive;
extern crate chrono;
extern crate serde;
mod support;
fn main() {
feature::main();
}
mod feature {
const FILENAME : &str = "timetracker.json";
extern crate find_folder;
use std::fs::File;
use std::io::prelude::*;
use std::time::Duration;
use std::thread::sleep;
use conrod;
use conrod::backend::glium::glium;
use conrod::backend::glium::glium::Surface;
extern crate serde_json;
use support;
use chrono::prelude::*;
use chrono;
pub fn main() {
const WIDTH: u32 = 800;
const HEIGHT: u32 = 600;
const SLEEPTIME: Duration = Duration::from_millis(500);
// Build the window.
let mut events_loop = glium::glutin::EventsLoop::new();
let window = glium::glutin::WindowBuilder::new()
.with_title("Timetracker")
.with_dimensions(WIDTH, HEIGHT);
let context = glium::glutin::ContextBuilder::new()
.with_vsync(true)
.with_multisampling(4);
let display = glium::Display::new(window, context, &events_loop).unwrap();
// construct our `Ui`.
let mut ui = conrod::UiBuilder::new([WIDTH as f64, HEIGHT as f64]).build();
// Add a `Font` to the `Ui`'s `font::Map` from file.
let assets = find_folder::Search::KidsThenParents(3, 5).for_folder("assets").unwrap();
let font_path = assets.join("fonts/NotoSans/NotoSans-Regular.ttf");
ui.fonts.insert_from_file(font_path).unwrap();
// A type used for converting `conrod::render::Primitives` into `Command`s that can be used
// for drawing to the glium `Surface`.
let mut renderer = conrod::backend::glium::Renderer::new(&display).unwrap();
// The image map describing each of our widget->image mappings (in our case, none).
let image_map = conrod::image::Map::<glium::texture::Texture2d>::new();
// Instantiate the generated list of widget identifiers.
let ids = &mut Ids::new(ui.widget_id_generator());
let mut ids_list = Vec::new();
let mut curname = "Enter name".to_string();
// Poll events from the window.
let mut event_loop = support::EventLoop::new();
let mut timerstates : Vec<support::TimerState> = match File::open(FILENAME) {
Ok(mut a) => {
let mut s = String::new();
a.read_to_string(&mut s).expect("Failed to read config");
serde_json::from_str(&s).expect("Failed convert to json")
},
Err(_e) => {
Vec::new()
}
};
'main: loop {
sleep(SLEEPTIME);
// Handle all events.
for event in event_loop.next(&mut events_loop) {
// Use the `winit` backend feature to convert the winit event to a conrod one.
if let Some(event) = conrod::backend::winit::convert_event(event.clone(), &display) {
ui.handle_event(event);
event_loop.needs_update();
}
match event {
glium::glutin::Event::WindowEvent { event,.. } => match event {
// Break from the loop upon `Escape`.
glium::glutin::WindowEvent::Closed |
glium::glutin::WindowEvent::KeyboardInput {
input: glium::glutin::KeyboardInput {
virtual_keycode: Some(glium::glutin::VirtualKeyCode::Escape),
..
},
..
} => {
let mut f = File::create(FILENAME).unwrap();
f.write_all(serde_json::to_string(&timerstates)
.unwrap()
.as_bytes()).unwrap();
break'main
},
_ => (),
},
_ => (),
}
}
// Instantiate all widgets in the GUI.
set_widgets(ui.set_widgets(), ids, &mut ids_list, &mut timerstates, &mut curname);
// Render the `Ui` and then display it on the screen.
if let Some(primitives) = ui.draw_if_changed() {
renderer.fill(&display, primitives, &image_map);
let mut target = display.draw();
target.clear_color(0.0, 0.0, 0.0, 1.0);
renderer.draw(&display, &mut target, &image_map).unwrap();
target.finish().unwrap();
}
}
}
// Draw the Ui.
fn set_widgets(ref mut ui: conrod::UiCell, ids: &mut Ids, ids_list: &mut Vec<ListItem>, timerstates : &mut Vec<support::TimerState>,text : &mut String) {
use conrod::{color, widget, Colorable, Borderable, Positionable, Labelable, Sizeable, Widget};
let main_color = color::rgb(0.2,0.2,0.3);
let other_color = color::rgb(0.1,0.1,0.2);
let green_color = color::rgb(0.45,1.,0.12);
// Construct our main `Canvas` tree.
widget::Canvas::new().flow_down(&[
(ids.header, widget::Canvas::new().color(main_color).length(70.0)),
(ids.body, widget::Canvas::new().color(color::ORANGE).scroll_kids_vertically()),
]).set(ids.master, ui);
// A scrollbar for the `FOOTER` canvas.
widget::Scrollbar::y_axis(ids.body).auto_hide(false).set(ids.footer_scrollbar, ui);
widget::Text::new("Time tracker")
.color(color::LIGHT_ORANGE)
.font_size(28)
.mid_left_with_margin_on(ids.header,28.)
.left_justify()
.set(ids.title, ui);
// Here we make some canvas `Tabs` in the middle column.
widget::Tabs::new(&[(ids.tab_timers, "Timers")/*,(ids.tab_statistics, "Statistics")*/])
.wh_of(ids.body)
.color(other_color)
.border(0.)
.label_color(color::WHITE)
.middle_of(ids.body)
.set(ids.tabs, ui);
while ids_list.len() < timerstates.len() {
ids_list.push(ListItem::new(ui.widget_id_generator()));
}
let (mut items, _scrollbar) = widget::List::flow_down(timerstates.len())
.item_size(50.0)
.scrollbar_on_top()
.middle_of(ids.tab_timers)
.wh_of(ids.tab_timers)
.set(ids.timer_list, ui);
while let Some(item) = items.next(ui) {
let i = item.i;
let mut label;
let dummy = widget::Canvas::new().w_of(ids.timer_list);
item.set(dummy, ui);
widget::Canvas::new()
.wh_of(item.widget_id)
.middle_of(item.widget_id)
.set(ids_list[i].master, ui);
//Make the label for the toggle button
if timerstates[i].active {
let zero : u32 = 0;
let timesince : DateTime<Utc> = chrono::MIN_DATE.and_hms(zero,zero,zero).checked_add_signed(duration_elapsed(timerstates[i].active_since)).unwrap();
let delta = format_time(timesince);
label = format!("{}", delta);
}
else {
label = format!("{}",format_time(timerstates[i].total));
}
for b in widget::Toggle::new(timerstates[i].active)
.h_of(ids_list[i].master)
.padded_w_of(ids_list[i].master,25.)
.label(&label)
.label_color(if timerstates[i].active {color::BLACK} else {color::LIGHT_ORANGE})
.mid_left_of(ids_list[i].master)
.color(if timerstates[i].active {green_color}else {other_color})
.set(ids_list[i].toggle, ui) {
if b {
timerstates[i].active_since = Utc::now();
}
else {
timerstates[i].total = timerstates[i].total.checked_add_signed(duration_elapsed(timerstates[i].active_since)).unwrap();
}
timerstates[i].active = b;
}
widget::Text::new(timerstates[i].name.as_str())
.color(if timerstates[i].active {color::BLACK} else {color::LIGHT_ORANGE})
.font_size(28)
.bottom_left_with_margin_on(ids_list[i].toggle,14.)
.left_justify()
.set(ids_list[i].name, ui);
for _press in widget::Button::new()
.h_of(ids_list[i].master)
.w(50.)
.label("-")
.mid_right_of(ids_list[i].master)
.set(ids_list[i].remove, ui){
timerstates.remove(i);
ids_list.remove(i);
return;
}
}
for edit in widget::TextBox::new(text)
.color(color::WHITE)
.h(50.)
.padded_w_of(ids.tab_timers, 25.0)
.bottom_left_of(ids.tab_timers)
.center_justify()
.set(ids.add_name, ui)
{
use conrod::widget::text_box::Event::{Update,Enter};
match edit {
Update(txt) => {
*text = txt;
},
Enter => {
timerstates.push(support::TimerState::new(text.clone()));
},
}
}
for _press in widget::Button::new()
.h(50.)
.w(50.)
.label("+")
.bottom_right_of(ids.tab_timers)
.set(ids.plus_button, ui){
timerstates.push(support::TimerState::new(text.clone()));
}
}
fn | (t : chrono::DateTime<Utc>) -> String {
let dur = t.signed_duration_since(chrono::MIN_DATE.and_hms(0u32,0u32,0u32));
let ret = format!(
"{:02}:{:02}:{:02}",
dur.num_hours(),
dur.num_minutes()%60,
dur.num_seconds()%60
);
ret
}
fn duration_elapsed(t : chrono::DateTime<Utc>) -> chrono::Duration {
chrono::offset::Utc::now().signed_duration_since(t)
}
// Generate a unique `WidgetId` for each widget.
widget_ids! {
struct Ids {
master,
header,
body,
timer_list,
plus_button,
add_name,
footer_scrollbar,
tabs,
tab_timers,
tab_statistics,
title,
subtitle,
}
}
widget_ids! {
struct ListItem {
master,
toggle,
remove,
name,
time,
session,
}
}
} | format_time | identifier_name |
main.rs | //! A simple demonstration of how to construct and use Canvasses by splitting up the window.
#[macro_use] extern crate conrod;
#[macro_use] extern crate serde_derive;
extern crate chrono;
extern crate serde;
mod support;
fn main() {
feature::main();
}
mod feature {
const FILENAME : &str = "timetracker.json";
extern crate find_folder;
use std::fs::File;
use std::io::prelude::*;
use std::time::Duration;
use std::thread::sleep;
use conrod;
use conrod::backend::glium::glium;
use conrod::backend::glium::glium::Surface;
extern crate serde_json;
use support;
use chrono::prelude::*;
use chrono;
pub fn main() {
const WIDTH: u32 = 800;
const HEIGHT: u32 = 600;
const SLEEPTIME: Duration = Duration::from_millis(500);
// Build the window.
let mut events_loop = glium::glutin::EventsLoop::new();
let window = glium::glutin::WindowBuilder::new()
.with_title("Timetracker")
.with_dimensions(WIDTH, HEIGHT);
let context = glium::glutin::ContextBuilder::new()
.with_vsync(true)
.with_multisampling(4);
let display = glium::Display::new(window, context, &events_loop).unwrap();
// construct our `Ui`.
let mut ui = conrod::UiBuilder::new([WIDTH as f64, HEIGHT as f64]).build();
// Add a `Font` to the `Ui`'s `font::Map` from file.
let assets = find_folder::Search::KidsThenParents(3, 5).for_folder("assets").unwrap();
let font_path = assets.join("fonts/NotoSans/NotoSans-Regular.ttf");
ui.fonts.insert_from_file(font_path).unwrap();
// A type used for converting `conrod::render::Primitives` into `Command`s that can be used
// for drawing to the glium `Surface`.
let mut renderer = conrod::backend::glium::Renderer::new(&display).unwrap();
// The image map describing each of our widget->image mappings (in our case, none).
let image_map = conrod::image::Map::<glium::texture::Texture2d>::new();
// Instantiate the generated list of widget identifiers.
let ids = &mut Ids::new(ui.widget_id_generator());
let mut ids_list = Vec::new();
let mut curname = "Enter name".to_string();
// Poll events from the window.
let mut event_loop = support::EventLoop::new();
let mut timerstates : Vec<support::TimerState> = match File::open(FILENAME) {
Ok(mut a) => {
let mut s = String::new();
a.read_to_string(&mut s).expect("Failed to read config");
serde_json::from_str(&s).expect("Failed convert to json")
},
Err(_e) => {
Vec::new()
}
};
'main: loop {
sleep(SLEEPTIME);
// Handle all events.
for event in event_loop.next(&mut events_loop) {
// Use the `winit` backend feature to convert the winit event to a conrod one.
if let Some(event) = conrod::backend::winit::convert_event(event.clone(), &display) {
ui.handle_event(event);
event_loop.needs_update();
}
match event {
glium::glutin::Event::WindowEvent { event,.. } => match event {
// Break from the loop upon `Escape`.
glium::glutin::WindowEvent::Closed |
glium::glutin::WindowEvent::KeyboardInput {
input: glium::glutin::KeyboardInput {
virtual_keycode: Some(glium::glutin::VirtualKeyCode::Escape),
..
},
..
} => {
let mut f = File::create(FILENAME).unwrap();
f.write_all(serde_json::to_string(&timerstates)
.unwrap()
.as_bytes()).unwrap();
break'main
},
_ => (),
},
_ => (),
}
}
// Instantiate all widgets in the GUI.
set_widgets(ui.set_widgets(), ids, &mut ids_list, &mut timerstates, &mut curname);
// Render the `Ui` and then display it on the screen.
if let Some(primitives) = ui.draw_if_changed() {
renderer.fill(&display, primitives, &image_map);
let mut target = display.draw();
target.clear_color(0.0, 0.0, 0.0, 1.0);
renderer.draw(&display, &mut target, &image_map).unwrap();
target.finish().unwrap();
}
}
}
// Draw the Ui.
fn set_widgets(ref mut ui: conrod::UiCell, ids: &mut Ids, ids_list: &mut Vec<ListItem>, timerstates : &mut Vec<support::TimerState>,text : &mut String) {
use conrod::{color, widget, Colorable, Borderable, Positionable, Labelable, Sizeable, Widget};
let main_color = color::rgb(0.2,0.2,0.3);
let other_color = color::rgb(0.1,0.1,0.2);
let green_color = color::rgb(0.45,1.,0.12);
// Construct our main `Canvas` tree.
widget::Canvas::new().flow_down(&[
(ids.header, widget::Canvas::new().color(main_color).length(70.0)),
(ids.body, widget::Canvas::new().color(color::ORANGE).scroll_kids_vertically()),
]).set(ids.master, ui);
// A scrollbar for the `FOOTER` canvas.
widget::Scrollbar::y_axis(ids.body).auto_hide(false).set(ids.footer_scrollbar, ui);
widget::Text::new("Time tracker")
.color(color::LIGHT_ORANGE)
.font_size(28)
.mid_left_with_margin_on(ids.header,28.)
.left_justify()
.set(ids.title, ui);
// Here we make some canvas `Tabs` in the middle column.
widget::Tabs::new(&[(ids.tab_timers, "Timers")/*,(ids.tab_statistics, "Statistics")*/])
.wh_of(ids.body)
.color(other_color)
.border(0.)
.label_color(color::WHITE)
.middle_of(ids.body)
.set(ids.tabs, ui);
while ids_list.len() < timerstates.len() {
ids_list.push(ListItem::new(ui.widget_id_generator()));
}
let (mut items, _scrollbar) = widget::List::flow_down(timerstates.len())
.item_size(50.0)
.scrollbar_on_top()
.middle_of(ids.tab_timers)
.wh_of(ids.tab_timers)
.set(ids.timer_list, ui);
while let Some(item) = items.next(ui) {
let i = item.i;
let mut label;
let dummy = widget::Canvas::new().w_of(ids.timer_list);
item.set(dummy, ui);
widget::Canvas::new()
.wh_of(item.widget_id)
.middle_of(item.widget_id)
.set(ids_list[i].master, ui);
//Make the label for the toggle button
if timerstates[i].active {
let zero : u32 = 0;
let timesince : DateTime<Utc> = chrono::MIN_DATE.and_hms(zero,zero,zero).checked_add_signed(duration_elapsed(timerstates[i].active_since)).unwrap();
let delta = format_time(timesince);
label = format!("{}", delta);
}
else {
label = format!("{}",format_time(timerstates[i].total));
}
for b in widget::Toggle::new(timerstates[i].active)
.h_of(ids_list[i].master)
.padded_w_of(ids_list[i].master,25.)
.label(&label)
.label_color(if timerstates[i].active {color::BLACK} else {color::LIGHT_ORANGE})
.mid_left_of(ids_list[i].master)
.color(if timerstates[i].active {green_color}else {other_color})
.set(ids_list[i].toggle, ui) {
if b {
timerstates[i].active_since = Utc::now();
}
else {
timerstates[i].total = timerstates[i].total.checked_add_signed(duration_elapsed(timerstates[i].active_since)).unwrap();
}
timerstates[i].active = b;
}
widget::Text::new(timerstates[i].name.as_str())
.color(if timerstates[i].active {color::BLACK} else {color::LIGHT_ORANGE})
.font_size(28)
.bottom_left_with_margin_on(ids_list[i].toggle,14.)
.left_justify()
.set(ids_list[i].name, ui);
for _press in widget::Button::new()
.h_of(ids_list[i].master)
.w(50.)
.label("-")
.mid_right_of(ids_list[i].master)
.set(ids_list[i].remove, ui){
timerstates.remove(i);
ids_list.remove(i);
return;
}
}
for edit in widget::TextBox::new(text)
.color(color::WHITE)
.h(50.)
.padded_w_of(ids.tab_timers, 25.0)
.bottom_left_of(ids.tab_timers)
.center_justify()
.set(ids.add_name, ui)
{
use conrod::widget::text_box::Event::{Update,Enter};
match edit {
Update(txt) => | ,
Enter => {
timerstates.push(support::TimerState::new(text.clone()));
},
}
}
for _press in widget::Button::new()
.h(50.)
.w(50.)
.label("+")
.bottom_right_of(ids.tab_timers)
.set(ids.plus_button, ui){
timerstates.push(support::TimerState::new(text.clone()));
}
}
fn format_time(t : chrono::DateTime<Utc>) -> String {
let dur = t.signed_duration_since(chrono::MIN_DATE.and_hms(0u32,0u32,0u32));
let ret = format!(
"{:02}:{:02}:{:02}",
dur.num_hours(),
dur.num_minutes()%60,
dur.num_seconds()%60
);
ret
}
fn duration_elapsed(t : chrono::DateTime<Utc>) -> chrono::Duration {
chrono::offset::Utc::now().signed_duration_since(t)
}
// Generate a unique `WidgetId` for each widget.
widget_ids! {
struct Ids {
master,
header,
body,
timer_list,
plus_button,
add_name,
footer_scrollbar,
tabs,
tab_timers,
tab_statistics,
title,
subtitle,
}
}
widget_ids! {
struct ListItem {
master,
toggle,
remove,
name,
time,
session,
}
}
} | {
*text = txt;
} | conditional_block |
function_system.rs | bool,
pub(crate) last_run: Tick,
}
impl SystemMeta {
pub(crate) fn new<T>() -> Self {
Self {
name: std::any::type_name::<T>().into(),
archetype_component_access: Access::default(),
component_access_set: FilteredAccessSet::default(),
is_send: true,
last_run: Tick::new(0),
}
}
/// Returns the system's name
#[inline]
pub fn name(&self) -> &str {
&self.name
}
/// Returns true if the system is [`Send`].
#[inline]
pub fn is_send(&self) -> bool {
self.is_send
}
/// Sets the system to be not [`Send`].
///
/// This is irreversible.
#[inline]
pub fn set_non_send(&mut self) {
self.is_send = false;
}
}
// TODO: Actually use this in FunctionSystem. We should probably only do this once Systems are constructed using a World reference
// (to avoid the need for unwrapping to retrieve SystemMeta)
/// Holds on to persistent state required to drive [`SystemParam`] for a [`System`].
///
/// This is a very powerful and convenient tool for working with exclusive world access,
/// allowing you to fetch data from the [`World`] as if you were running a [`System`].
///
/// Borrow-checking is handled for you, allowing you to mutably access multiple compatible system parameters at once,
/// and arbitrary system parameters (like [`EventWriter`](crate::event::EventWriter)) can be conveniently fetched.
///
/// For an alternative approach to split mutable access to the world, see [`World::resource_scope`].
///
/// # Warning
///
/// [`SystemState`] values created can be cached to improve performance,
/// and *must* be cached and reused in order for system parameters that rely on local state to work correctly.
/// These include:
/// - [`Added`](crate::query::Added) and [`Changed`](crate::query::Changed) query filters
/// - [`Local`](crate::system::Local) variables that hold state
/// - [`EventReader`](crate::event::EventReader) system parameters, which rely on a [`Local`](crate::system::Local) to track which events have been seen
///
/// # Example
///
/// Basic usage:
/// ```rust
/// # use bevy_ecs::prelude::*;
/// # use bevy_ecs::system::SystemState;
/// # use bevy_ecs::event::Events;
/// #
/// # #[derive(Event)]
/// # struct MyEvent;
/// # #[derive(Resource)]
/// # struct MyResource(u32);
/// #
/// # #[derive(Component)]
/// # struct MyComponent;
/// #
/// // Work directly on the `World`
/// let mut world = World::new();
/// world.init_resource::<Events<MyEvent>>();
///
/// // Construct a `SystemState` struct, passing in a tuple of `SystemParam`
/// // as if you were writing an ordinary system.
/// let mut system_state: SystemState<(
/// EventWriter<MyEvent>,
/// Option<ResMut<MyResource>>,
/// Query<&MyComponent>,
/// )> = SystemState::new(&mut world);
///
/// // Use system_state.get_mut(&mut world) and unpack your system parameters into variables!
/// // system_state.get(&world) provides read-only versions of your system parameters instead.
/// let (event_writer, maybe_resource, query) = system_state.get_mut(&mut world);
///
/// // If you are using `Commands`, you can choose when you want to apply them to the world.
/// // You need to manually call `.apply(world)` on the `SystemState` to apply them.
/// ```
/// Caching:
/// ```rust
/// # use bevy_ecs::prelude::*;
/// # use bevy_ecs::system::SystemState;
/// # use bevy_ecs::event::Events;
/// #
/// # #[derive(Event)]
/// # struct MyEvent;
/// #[derive(Resource)]
/// struct CachedSystemState {
/// event_state: SystemState<EventReader<'static,'static, MyEvent>>,
/// }
///
/// // Create and store a system state once
/// let mut world = World::new();
/// world.init_resource::<Events<MyEvent>>();
/// let initial_state: SystemState<EventReader<MyEvent>> = SystemState::new(&mut world);
///
/// // The system state is cached in a resource
/// world.insert_resource(CachedSystemState {
/// event_state: initial_state,
/// });
///
/// // Later, fetch the cached system state, saving on overhead
/// world.resource_scope(|world, mut cached_state: Mut<CachedSystemState>| {
/// let mut event_reader = cached_state.event_state.get_mut(world);
///
/// for events in event_reader.iter() {
/// println!("Hello World!");
/// }
/// });
/// ```
pub struct SystemState<Param: SystemParam +'static> {
meta: SystemMeta,
param_state: Param::State,
world_id: WorldId,
archetype_generation: ArchetypeGeneration,
}
impl<Param: SystemParam> SystemState<Param> {
/// Creates a new [`SystemState`] with default state.
///
/// ## Note
/// For users of [`SystemState::get_manual`] or [`get_manual_mut`](SystemState::get_manual_mut):
///
/// `new` does not cache any of the world's archetypes, so you must call [`SystemState::update_archetypes`]
/// manually before calling `get_manual{_mut}`.
pub fn new(world: &mut World) -> Self {
let mut meta = SystemMeta::new::<Param>();
meta.last_run = world.change_tick().relative_to(Tick::MAX);
let param_state = Param::init_state(world, &mut meta);
Self {
meta,
param_state,
world_id: world.id(),
archetype_generation: ArchetypeGeneration::initial(),
}
}
/// Gets the metadata for this instance.
#[inline]
pub fn meta(&self) -> &SystemMeta {
&self.meta
}
/// Retrieve the [`SystemParam`] values. This can only be called when all parameters are read-only.
#[inline]
pub fn get<'w,'s>(&'s mut self, world: &'w World) -> SystemParamItem<'w,'s, Param>
where
Param: ReadOnlySystemParam,
{
self.validate_world(world.id());
self.update_archetypes(world);
// SAFETY: Param is read-only and doesn't allow mutable access to World.
// It also matches the World this SystemState was created with.
unsafe { self.get_unchecked_manual(world.as_unsafe_world_cell_readonly()) }
}
/// Retrieve the mutable [`SystemParam`] values.
#[inline]
pub fn get_mut<'w,'s>(&'s mut self, world: &'w mut World) -> SystemParamItem<'w,'s, Param> {
self.validate_world(world.id());
self.update_archetypes(world);
// SAFETY: World is uniquely borrowed and matches the World this SystemState was created with.
unsafe { self.get_unchecked_manual(world.as_unsafe_world_cell()) }
}
/// Applies all state queued up for [`SystemParam`] values. For example, this will apply commands queued up
/// by a [`Commands`](`super::Commands`) parameter to the given [`World`].
/// This function should be called manually after the values returned by [`SystemState::get`] and [`SystemState::get_mut`]
/// are finished being used.
pub fn apply(&mut self, world: &mut World) {
Param::apply(&mut self.param_state, &self.meta, world);
}
/// Returns `true` if `world_id` matches the [`World`] that was used to call [`SystemState::new`].
/// Otherwise, this returns false.
#[inline]
pub fn matches_world(&self, world_id: WorldId) -> bool {
self.world_id == world_id
}
/// Asserts that the [`SystemState`] matches the provided world.
#[inline]
fn validate_world(&self, world_id: WorldId) {
assert!(self.matches_world(world_id), "Encountered a mismatched World. A SystemState cannot be used with Worlds other than the one it was created with.");
}
/// Updates the state's internal view of the [`World`]'s archetypes. If this is not called before fetching the parameters,
/// the results may not accurately reflect what is in the `world`.
///
/// This is only required if [`SystemState::get_manual`] or [`SystemState::get_manual_mut`] is being called, and it only needs to
/// be called if the `world` has been structurally mutated (i.e. added/removed a component or resource). Users using
/// [`SystemState::get`] or [`SystemState::get_mut`] do not need to call this as it will be automatically called for them.
#[inline]
pub fn update_archetypes(&mut self, world: &World) {
self.update_archetypes_unsafe_world_cell(world.as_unsafe_world_cell_readonly());
}
/// Updates the state's internal view of the `world`'s archetypes. If this is not called before fetching the parameters,
/// the results may not accurately reflect what is in the `world`.
///
/// This is only required if [`SystemState::get_manual`] or [`SystemState::get_manual_mut`] is being called, and it only needs to
/// be called if the `world` has been structurally mutated (i.e. added/removed a component or resource). Users using
/// [`SystemState::get`] or [`SystemState::get_mut`] do not need to call this as it will be automatically called for them.
///
/// # Note
///
/// This method only accesses world metadata.
#[inline]
pub fn update_archetypes_unsafe_world_cell(&mut self, world: UnsafeWorldCell) {
let archetypes = world.archetypes();
let new_generation = archetypes.generation();
let old_generation = std::mem::replace(&mut self.archetype_generation, new_generation);
let archetype_index_range = old_generation.value()..new_generation.value();
for archetype_index in archetype_index_range {
Param::new_archetype(
&mut self.param_state,
&archetypes[ArchetypeId::new(archetype_index)],
&mut self.meta,
);
}
}
/// Retrieve the [`SystemParam`] values. This can only be called when all parameters are read-only.
/// This will not update the state's view of the world's archetypes automatically nor increment the
/// world's change tick.
///
/// For this to return accurate results, ensure [`SystemState::update_archetypes`] is called before this
/// function.
///
/// Users should strongly prefer to use [`SystemState::get`] over this function.
#[inline]
pub fn get_manual<'w,'s>(&'s mut self, world: &'w World) -> SystemParamItem<'w,'s, Param>
where
Param: ReadOnlySystemParam,
{
self.validate_world(world.id());
let change_tick = world.read_change_tick();
// SAFETY: Param is read-only and doesn't allow mutable access to World.
// It also matches the World this SystemState was created with.
unsafe { self.fetch(world.as_unsafe_world_cell_readonly(), change_tick) }
}
/// Retrieve the mutable [`SystemParam`] values. This will not update the state's view of the world's archetypes
/// automatically nor increment the world's change tick.
///
/// For this to return accurate results, ensure [`SystemState::update_archetypes`] is called before this
/// function.
///
/// Users should strongly prefer to use [`SystemState::get_mut`] over this function.
#[inline]
pub fn get_manual_mut<'w,'s>(
&'s mut self,
world: &'w mut World,
) -> SystemParamItem<'w,'s, Param> {
self.validate_world(world.id());
let change_tick = world.change_tick();
// SAFETY: World is uniquely borrowed and matches the World this SystemState was created with.
unsafe { self.fetch(world.as_unsafe_world_cell(), change_tick) }
}
/// Retrieve the [`SystemParam`] values. This will not update archetypes automatically.
///
/// # Safety
/// This call might access any of the input parameters in a way that violates Rust's mutability rules. Make sure the data
/// access is safe in the context of global [`World`] access. The passed-in [`World`] _must_ be the [`World`] the [`SystemState`] was
/// created with.
#[inline]
pub unsafe fn get_unchecked_manual<'w,'s>(
&'s mut self,
world: UnsafeWorldCell<'w>,
) -> SystemParamItem<'w,'s, Param> {
let change_tick = world.increment_change_tick();
self.fetch(world, change_tick)
}
/// # Safety
/// This call might access any of the input parameters in a way that violates Rust's mutability rules. Make sure the data
/// access is safe in the context of global [`World`] access. The passed-in [`World`] _must_ be the [`World`] the [`SystemState`] was
/// created with.
#[inline]
unsafe fn fetch<'w,'s>(
&'s mut self,
world: UnsafeWorldCell<'w>,
change_tick: Tick,
) -> SystemParamItem<'w,'s, Param> {
let param = Param::get_param(&mut self.param_state, &self.meta, world, change_tick);
self.meta.last_run = change_tick;
param
}
}
impl<Param: SystemParam> FromWorld for SystemState<Param> {
fn from_world(world: &mut World) -> Self {
Self::new(world)
}
}
/// The [`System`] counter part of an ordinary function.
///
/// You get this by calling [`IntoSystem::into_system`] on a function that only accepts
/// [`SystemParam`]s. The output of the system becomes the functions return type, while the input
/// becomes the functions [`In`] tagged parameter or `()` if no such parameter exists.
///
/// [`FunctionSystem`] must be `.initialized` before they can be run.
///
/// The [`Clone`] implementation for [`FunctionSystem`] returns a new instance which
/// is NOT initialized. The cloned system must also be `.initialized` before it can be run.
pub struct FunctionSystem<Marker, F>
where
F: SystemParamFunction<Marker>,
{
func: F,
param_state: Option<<F::Param as SystemParam>::State>,
system_meta: SystemMeta,
world_id: Option<WorldId>,
archetype_generation: ArchetypeGeneration,
// NOTE: PhantomData<fn()-> T> gives this safe Send/Sync impls
marker: PhantomData<fn() -> Marker>,
}
// De-initializes the cloned system.
impl<Marker, F> Clone for FunctionSystem<Marker, F>
where
F: SystemParamFunction<Marker> + Clone,
{
fn clone(&self) -> Self {
Self {
func: self.func.clone(),
param_state: None,
system_meta: SystemMeta::new::<F>(),
world_id: None,
archetype_generation: ArchetypeGeneration::initial(),
marker: PhantomData,
}
}
}
/// A marker type used to distinguish regular function systems from exclusive function systems.
#[doc(hidden)]
pub struct IsFunctionSystem;
impl<Marker, F> IntoSystem<F::In, F::Out, (IsFunctionSystem, Marker)> for F
where
Marker:'static,
F: SystemParamFunction<Marker>,
{
type System = FunctionSystem<Marker, F>;
fn into_system(func: Self) -> Self::System {
FunctionSystem {
func,
param_state: None,
system_meta: SystemMeta::new::<F>(),
world_id: None,
archetype_generation: ArchetypeGeneration::initial(),
marker: PhantomData,
}
}
}
impl<Marker, F> FunctionSystem<Marker, F>
where
F: SystemParamFunction<Marker>,
{
/// Message shown when a system isn't initialised
// When lines get too long, rustfmt can sometimes refuse to format them.
// Work around this by storing the message separately.
const PARAM_MESSAGE: &'static str = "System's param_state was not found. Did you forget to initialize this system before running it?";
}
impl<Marker, F> System for FunctionSystem<Marker, F>
where
Marker:'static,
F: SystemParamFunction<Marker>,
{
type In = F::In;
type Out = F::Out;
#[inline]
fn name(&self) -> Cow<'static, str> {
self.system_meta.name.clone()
}
#[inline]
fn type_id(&self) -> TypeId {
TypeId::of::<F>()
}
#[inline]
fn component_access(&self) -> &Access<ComponentId> {
self.system_meta.component_access_set.combined_access()
}
#[inline]
fn archetype_component_access(&self) -> &Access<ArchetypeComponentId> {
&self.system_meta.archetype_component_access
}
#[inline]
fn is_send(&self) -> bool |
#[inline]
fn is_exclusive(&self) -> bool {
false
}
#[inline]
unsafe fn run_unsafe(&mut self, input: Self::In, world: UnsafeWorldCell) -> Self::Out {
let change_tick = world.increment_change_tick();
// SAFETY:
// - The caller has invoked `update_archetype_component_access`, which will panic
// if the world does not match.
// - All world accesses used by `F::Param` have been registered, so the caller
// will ensure that there are no data access conflicts.
let params = F::Param::get_param(
self.param_state.as_mut().expect(Self::PARAM_MESSAGE),
&self.system_meta,
world,
change_tick,
);
let out = self.func.run(input, params);
self.system_meta.last_run = change_tick;
out
}
fn get_last_run(&self) -> Tick {
self.system_meta.last_run
}
fn set_last_run(&mut self, last_run: Tick) {
self.system_meta.last_run = last_run;
}
#[inline]
fn apply_deferred(&mut self, world: &mut World) {
let param_state = self.param_state.as_mut().expect(Self::PARAM_MESSAGE);
F::Param::apply(param_state, &self.system_meta, world);
}
#[inline]
fn initialize(&mut self, world: &mut World) {
self.world_id = Some(world.id());
self.system_meta.last_run = world.change_tick().relative_to(Tick::MAX);
self.param_state = Some(F::Param::init_state(world, &mut self.system_meta));
}
fn update_archetype_component_access(&mut self, world: UnsafeWorldCell) {
assert!(self.world_id == Some(world.id()), "Encountered a mismatched World. A System cannot be used with Worlds other than the one it was initialized with.");
let archetypes = world.archetypes();
let new_generation = archetypes.generation();
let old_generation = std::mem::replace(&mut self.archetype_generation, new_generation);
let archetype_index_range = old_generation.value()..new_generation.value();
for archetype_index in archetype_index_range {
let param_state = self.param_state.as_mut().unwrap();
F::Param::new_archetype(
param_state,
&archetypes[ArchetypeId::new(archetype_index)],
&mut self.system_meta,
);
}
}
#[inline]
fn check_change_tick(&mut self, change_tick: Tick) {
check_system_change_tick(
&mut self.system_meta.last_run,
change_tick,
self.system_meta.name.as_ref(),
);
}
fn default_system_sets(&self) -> Vec<Box<dyn crate::schedule::SystemSet>> {
let set = crate::schedule::SystemTypeSet::<F>::new();
vec![Box::new(set)]
}
}
/// SAFETY: `F`'s param is [`ReadOnlySystemParam`], so this system will only read from the world.
unsafe impl<Marker, F> ReadOnlySystem for FunctionSystem<Marker, F>
where
Marker:'static,
F: SystemParamFunction<Marker>,
F::Param: ReadOnlySystemParam,
{
}
/// A trait implemented for all functions that can be used as [`System`]s.
///
/// This trait can be useful for making your own systems which accept other systems,
/// sometimes called higher order systems.
///
/// This should be used in combination with [`ParamSet`] when calling other systems
/// within your system.
/// Using [`ParamSet`] in this case avoids [`SystemParam`] collisions.
///
/// # Example
///
/// To create something like [`PipeSystem`], but in entirely safe code.
///
/// ```rust
/// use std::num::ParseIntError;
///
/// use bevy_ecs::prelude::*;
///
/// /// Pipe creates a new system which calls `a`, then calls `b` with the output of `a`
/// pub fn pipe<A, B, AMarker, BMarker>(
/// mut a: A,
/// mut b: B,
/// ) -> impl FnMut(In<A::In>, ParamSet<(A::Param, B::Param)>) -> B::Out
/// where
/// // We need A and B to be systems, add those bounds
/// A: SystemParamFunction<AMarker>,
/// B: SystemParamFunction<BMarker, In = A::Out>,
/// {
/// // The type of `params` is inferred based on the return of this function above
/// move |In(a_in), mut params| {
/// let shared = a.run(a_in, params.p0());
/// b.run(shared, params.p1())
/// }
/// }
///
/// // Usage example for `pipe`:
/// fn main() {
/// let mut world = World::default();
/// world.insert_resource(Message("42".to_string()));
///
/// // pipe the `parse_message_system`'s output into the `filter_system`s input
/// let mut piped_system = IntoSystem::into_system(pipe(parse_message, filter));
/// piped_system.initialize(&mut world);
/// assert_eq!(piped_system.run((), &mut world), Some(42));
/// }
///
/// #[derive(Resource)]
/// struct Message(String);
///
/// fn parse_message(message: Res<Message>) -> Result<usize, ParseIntError> {
/// message.0.parse::<usize>()
/// }
///
/// fn filter(In(result): In<Result<usize, ParseIntError>>) -> Option<usize> {
/// result.ok().filter(|&n| n < 100)
/// }
/// ```
/// [`PipeSystem`]: crate::system::PipeSystem
/// [`ParamSet`]: crate::system::ParamSet
pub trait SystemParamFunction<Marker>: Send + Sync +'static {
/// The input type to this system. See [`System::In`].
type In;
/// The return type of this system. See [`System::Out`].
type Out;
/// The [`SystemParam`]/s used by this system to access the [`World`].
type Param: SystemParam;
/// Executes this system once. See [`System::run`] or [`System::run_unsafe`].
fn run(&mut self, input: Self::In, param_value: SystemParamItem<Self::Param>) -> Self::Out;
}
macro_rules! impl_system_function {
($($param: ident),*) => {
#[allow(non_snake_case)]
impl<Out, Func: Send + Sync +'static, $($param: SystemParam),*> SystemParamFunction<fn($($param,)*) -> Out> for Func
where
for <'a> &'a mut Func:
FnMut($($param),*) -> Out +
FnMut($(SystemParamItem<$param>),*) -> Out, Out:'static
{
type In = ();
type Out = Out;
type Param = ($($param,)*);
#[inline]
fn run(&mut self, _input: (), param_value: SystemParamItem< ($($param,)*)>) -> Out {
// Yes, this is strange, but `rustc` fails to compile this impl
// without using this function. It fails to recognize that `func`
// is a function, potentially because of the multiple impls of `FnMut`
#[allow(clippy::too_many_arguments)]
fn call_inner<Out, $($param,)*>(
mut f: impl FnMut($($param,)*)->Out,
$($param: $param,)*
)->Out{
f($($param,)*)
}
let ($($param,)*) = param_value;
call_inner(self, $($param),*)
| {
self.system_meta.is_send
} | identifier_body |
function_system.rs | : bool,
pub(crate) last_run: Tick,
}
impl SystemMeta {
pub(crate) fn new<T>() -> Self {
Self {
name: std::any::type_name::<T>().into(),
archetype_component_access: Access::default(),
component_access_set: FilteredAccessSet::default(),
is_send: true,
last_run: Tick::new(0),
}
}
/// Returns the system's name
#[inline]
pub fn name(&self) -> &str {
&self.name
}
/// Returns true if the system is [`Send`].
#[inline]
pub fn is_send(&self) -> bool {
self.is_send
}
/// Sets the system to be not [`Send`].
///
/// This is irreversible.
#[inline]
pub fn set_non_send(&mut self) {
self.is_send = false;
}
}
// TODO: Actually use this in FunctionSystem. We should probably only do this once Systems are constructed using a World reference
// (to avoid the need for unwrapping to retrieve SystemMeta)
/// Holds on to persistent state required to drive [`SystemParam`] for a [`System`].
///
/// This is a very powerful and convenient tool for working with exclusive world access,
/// allowing you to fetch data from the [`World`] as if you were running a [`System`].
///
/// Borrow-checking is handled for you, allowing you to mutably access multiple compatible system parameters at once,
/// and arbitrary system parameters (like [`EventWriter`](crate::event::EventWriter)) can be conveniently fetched.
///
/// For an alternative approach to split mutable access to the world, see [`World::resource_scope`].
///
/// # Warning
///
/// [`SystemState`] values created can be cached to improve performance,
/// and *must* be cached and reused in order for system parameters that rely on local state to work correctly.
/// These include:
/// - [`Added`](crate::query::Added) and [`Changed`](crate::query::Changed) query filters
/// - [`Local`](crate::system::Local) variables that hold state
/// - [`EventReader`](crate::event::EventReader) system parameters, which rely on a [`Local`](crate::system::Local) to track which events have been seen
///
/// # Example
///
/// Basic usage:
/// ```rust
/// # use bevy_ecs::prelude::*;
/// # use bevy_ecs::system::SystemState;
/// # use bevy_ecs::event::Events;
/// #
/// # #[derive(Event)]
/// # struct MyEvent;
/// # #[derive(Resource)]
/// # struct MyResource(u32);
/// #
/// # #[derive(Component)]
/// # struct MyComponent;
/// #
/// // Work directly on the `World`
/// let mut world = World::new();
/// world.init_resource::<Events<MyEvent>>();
///
/// // Construct a `SystemState` struct, passing in a tuple of `SystemParam`
/// // as if you were writing an ordinary system.
/// let mut system_state: SystemState<(
/// EventWriter<MyEvent>,
/// Option<ResMut<MyResource>>,
/// Query<&MyComponent>,
/// )> = SystemState::new(&mut world);
///
/// // Use system_state.get_mut(&mut world) and unpack your system parameters into variables!
/// // system_state.get(&world) provides read-only versions of your system parameters instead.
/// let (event_writer, maybe_resource, query) = system_state.get_mut(&mut world);
///
/// // If you are using `Commands`, you can choose when you want to apply them to the world.
/// // You need to manually call `.apply(world)` on the `SystemState` to apply them.
/// ```
/// Caching:
/// ```rust
/// # use bevy_ecs::prelude::*;
/// # use bevy_ecs::system::SystemState;
/// # use bevy_ecs::event::Events;
/// #
/// # #[derive(Event)]
/// # struct MyEvent;
/// #[derive(Resource)]
/// struct CachedSystemState {
/// event_state: SystemState<EventReader<'static,'static, MyEvent>>,
/// }
///
/// // Create and store a system state once
/// let mut world = World::new();
/// world.init_resource::<Events<MyEvent>>();
/// let initial_state: SystemState<EventReader<MyEvent>> = SystemState::new(&mut world);
///
/// // The system state is cached in a resource
/// world.insert_resource(CachedSystemState {
/// event_state: initial_state,
/// });
///
/// // Later, fetch the cached system state, saving on overhead
/// world.resource_scope(|world, mut cached_state: Mut<CachedSystemState>| {
/// let mut event_reader = cached_state.event_state.get_mut(world);
///
/// for events in event_reader.iter() {
/// println!("Hello World!");
/// }
/// });
/// ```
pub struct SystemState<Param: SystemParam +'static> {
meta: SystemMeta,
param_state: Param::State, |
impl<Param: SystemParam> SystemState<Param> {
/// Creates a new [`SystemState`] with default state.
///
/// ## Note
/// For users of [`SystemState::get_manual`] or [`get_manual_mut`](SystemState::get_manual_mut):
///
/// `new` does not cache any of the world's archetypes, so you must call [`SystemState::update_archetypes`]
/// manually before calling `get_manual{_mut}`.
pub fn new(world: &mut World) -> Self {
let mut meta = SystemMeta::new::<Param>();
meta.last_run = world.change_tick().relative_to(Tick::MAX);
let param_state = Param::init_state(world, &mut meta);
Self {
meta,
param_state,
world_id: world.id(),
archetype_generation: ArchetypeGeneration::initial(),
}
}
/// Gets the metadata for this instance.
#[inline]
pub fn meta(&self) -> &SystemMeta {
&self.meta
}
/// Retrieve the [`SystemParam`] values. This can only be called when all parameters are read-only.
#[inline]
pub fn get<'w,'s>(&'s mut self, world: &'w World) -> SystemParamItem<'w,'s, Param>
where
Param: ReadOnlySystemParam,
{
self.validate_world(world.id());
self.update_archetypes(world);
// SAFETY: Param is read-only and doesn't allow mutable access to World.
// It also matches the World this SystemState was created with.
unsafe { self.get_unchecked_manual(world.as_unsafe_world_cell_readonly()) }
}
/// Retrieve the mutable [`SystemParam`] values.
#[inline]
pub fn get_mut<'w,'s>(&'s mut self, world: &'w mut World) -> SystemParamItem<'w,'s, Param> {
self.validate_world(world.id());
self.update_archetypes(world);
// SAFETY: World is uniquely borrowed and matches the World this SystemState was created with.
unsafe { self.get_unchecked_manual(world.as_unsafe_world_cell()) }
}
/// Applies all state queued up for [`SystemParam`] values. For example, this will apply commands queued up
/// by a [`Commands`](`super::Commands`) parameter to the given [`World`].
/// This function should be called manually after the values returned by [`SystemState::get`] and [`SystemState::get_mut`]
/// are finished being used.
pub fn apply(&mut self, world: &mut World) {
Param::apply(&mut self.param_state, &self.meta, world);
}
/// Returns `true` if `world_id` matches the [`World`] that was used to call [`SystemState::new`].
/// Otherwise, this returns false.
#[inline]
pub fn matches_world(&self, world_id: WorldId) -> bool {
self.world_id == world_id
}
/// Asserts that the [`SystemState`] matches the provided world.
#[inline]
fn validate_world(&self, world_id: WorldId) {
assert!(self.matches_world(world_id), "Encountered a mismatched World. A SystemState cannot be used with Worlds other than the one it was created with.");
}
/// Updates the state's internal view of the [`World`]'s archetypes. If this is not called before fetching the parameters,
/// the results may not accurately reflect what is in the `world`.
///
/// This is only required if [`SystemState::get_manual`] or [`SystemState::get_manual_mut`] is being called, and it only needs to
/// be called if the `world` has been structurally mutated (i.e. added/removed a component or resource). Users using
/// [`SystemState::get`] or [`SystemState::get_mut`] do not need to call this as it will be automatically called for them.
#[inline]
pub fn update_archetypes(&mut self, world: &World) {
self.update_archetypes_unsafe_world_cell(world.as_unsafe_world_cell_readonly());
}
/// Updates the state's internal view of the `world`'s archetypes. If this is not called before fetching the parameters,
/// the results may not accurately reflect what is in the `world`.
///
/// This is only required if [`SystemState::get_manual`] or [`SystemState::get_manual_mut`] is being called, and it only needs to
/// be called if the `world` has been structurally mutated (i.e. added/removed a component or resource). Users using
/// [`SystemState::get`] or [`SystemState::get_mut`] do not need to call this as it will be automatically called for them.
///
/// # Note
///
/// This method only accesses world metadata.
#[inline]
pub fn update_archetypes_unsafe_world_cell(&mut self, world: UnsafeWorldCell) {
let archetypes = world.archetypes();
let new_generation = archetypes.generation();
let old_generation = std::mem::replace(&mut self.archetype_generation, new_generation);
let archetype_index_range = old_generation.value()..new_generation.value();
for archetype_index in archetype_index_range {
Param::new_archetype(
&mut self.param_state,
&archetypes[ArchetypeId::new(archetype_index)],
&mut self.meta,
);
}
}
/// Retrieve the [`SystemParam`] values. This can only be called when all parameters are read-only.
/// This will not update the state's view of the world's archetypes automatically nor increment the
/// world's change tick.
///
/// For this to return accurate results, ensure [`SystemState::update_archetypes`] is called before this
/// function.
///
/// Users should strongly prefer to use [`SystemState::get`] over this function.
#[inline]
pub fn get_manual<'w,'s>(&'s mut self, world: &'w World) -> SystemParamItem<'w,'s, Param>
where
Param: ReadOnlySystemParam,
{
self.validate_world(world.id());
let change_tick = world.read_change_tick();
// SAFETY: Param is read-only and doesn't allow mutable access to World.
// It also matches the World this SystemState was created with.
unsafe { self.fetch(world.as_unsafe_world_cell_readonly(), change_tick) }
}
/// Retrieve the mutable [`SystemParam`] values. This will not update the state's view of the world's archetypes
/// automatically nor increment the world's change tick.
///
/// For this to return accurate results, ensure [`SystemState::update_archetypes`] is called before this
/// function.
///
/// Users should strongly prefer to use [`SystemState::get_mut`] over this function.
#[inline]
pub fn get_manual_mut<'w,'s>(
&'s mut self,
world: &'w mut World,
) -> SystemParamItem<'w,'s, Param> {
self.validate_world(world.id());
let change_tick = world.change_tick();
// SAFETY: World is uniquely borrowed and matches the World this SystemState was created with.
unsafe { self.fetch(world.as_unsafe_world_cell(), change_tick) }
}
/// Retrieve the [`SystemParam`] values. This will not update archetypes automatically.
///
/// # Safety
/// This call might access any of the input parameters in a way that violates Rust's mutability rules. Make sure the data
/// access is safe in the context of global [`World`] access. The passed-in [`World`] _must_ be the [`World`] the [`SystemState`] was
/// created with.
#[inline]
pub unsafe fn get_unchecked_manual<'w,'s>(
&'s mut self,
world: UnsafeWorldCell<'w>,
) -> SystemParamItem<'w,'s, Param> {
let change_tick = world.increment_change_tick();
self.fetch(world, change_tick)
}
/// # Safety
/// This call might access any of the input parameters in a way that violates Rust's mutability rules. Make sure the data
/// access is safe in the context of global [`World`] access. The passed-in [`World`] _must_ be the [`World`] the [`SystemState`] was
/// created with.
#[inline]
unsafe fn fetch<'w,'s>(
&'s mut self,
world: UnsafeWorldCell<'w>,
change_tick: Tick,
) -> SystemParamItem<'w,'s, Param> {
let param = Param::get_param(&mut self.param_state, &self.meta, world, change_tick);
self.meta.last_run = change_tick;
param
}
}
impl<Param: SystemParam> FromWorld for SystemState<Param> {
fn from_world(world: &mut World) -> Self {
Self::new(world)
}
}
/// The [`System`] counter part of an ordinary function.
///
/// You get this by calling [`IntoSystem::into_system`] on a function that only accepts
/// [`SystemParam`]s. The output of the system becomes the functions return type, while the input
/// becomes the functions [`In`] tagged parameter or `()` if no such parameter exists.
///
/// [`FunctionSystem`] must be `.initialized` before they can be run.
///
/// The [`Clone`] implementation for [`FunctionSystem`] returns a new instance which
/// is NOT initialized. The cloned system must also be `.initialized` before it can be run.
pub struct FunctionSystem<Marker, F>
where
F: SystemParamFunction<Marker>,
{
func: F,
param_state: Option<<F::Param as SystemParam>::State>,
system_meta: SystemMeta,
world_id: Option<WorldId>,
archetype_generation: ArchetypeGeneration,
// NOTE: PhantomData<fn()-> T> gives this safe Send/Sync impls
marker: PhantomData<fn() -> Marker>,
}
// De-initializes the cloned system.
impl<Marker, F> Clone for FunctionSystem<Marker, F>
where
F: SystemParamFunction<Marker> + Clone,
{
fn clone(&self) -> Self {
Self {
func: self.func.clone(),
param_state: None,
system_meta: SystemMeta::new::<F>(),
world_id: None,
archetype_generation: ArchetypeGeneration::initial(),
marker: PhantomData,
}
}
}
/// A marker type used to distinguish regular function systems from exclusive function systems.
#[doc(hidden)]
pub struct IsFunctionSystem;
impl<Marker, F> IntoSystem<F::In, F::Out, (IsFunctionSystem, Marker)> for F
where
Marker:'static,
F: SystemParamFunction<Marker>,
{
type System = FunctionSystem<Marker, F>;
fn into_system(func: Self) -> Self::System {
FunctionSystem {
func,
param_state: None,
system_meta: SystemMeta::new::<F>(),
world_id: None,
archetype_generation: ArchetypeGeneration::initial(),
marker: PhantomData,
}
}
}
impl<Marker, F> FunctionSystem<Marker, F>
where
F: SystemParamFunction<Marker>,
{
/// Message shown when a system isn't initialised
// When lines get too long, rustfmt can sometimes refuse to format them.
// Work around this by storing the message separately.
const PARAM_MESSAGE: &'static str = "System's param_state was not found. Did you forget to initialize this system before running it?";
}
impl<Marker, F> System for FunctionSystem<Marker, F>
where
Marker:'static,
F: SystemParamFunction<Marker>,
{
type In = F::In;
type Out = F::Out;
#[inline]
fn name(&self) -> Cow<'static, str> {
self.system_meta.name.clone()
}
#[inline]
fn type_id(&self) -> TypeId {
TypeId::of::<F>()
}
#[inline]
fn component_access(&self) -> &Access<ComponentId> {
self.system_meta.component_access_set.combined_access()
}
#[inline]
fn archetype_component_access(&self) -> &Access<ArchetypeComponentId> {
&self.system_meta.archetype_component_access
}
#[inline]
fn is_send(&self) -> bool {
self.system_meta.is_send
}
#[inline]
fn is_exclusive(&self) -> bool {
false
}
#[inline]
unsafe fn run_unsafe(&mut self, input: Self::In, world: UnsafeWorldCell) -> Self::Out {
let change_tick = world.increment_change_tick();
// SAFETY:
// - The caller has invoked `update_archetype_component_access`, which will panic
// if the world does not match.
// - All world accesses used by `F::Param` have been registered, so the caller
// will ensure that there are no data access conflicts.
let params = F::Param::get_param(
self.param_state.as_mut().expect(Self::PARAM_MESSAGE),
&self.system_meta,
world,
change_tick,
);
let out = self.func.run(input, params);
self.system_meta.last_run = change_tick;
out
}
fn get_last_run(&self) -> Tick {
self.system_meta.last_run
}
fn set_last_run(&mut self, last_run: Tick) {
self.system_meta.last_run = last_run;
}
#[inline]
fn apply_deferred(&mut self, world: &mut World) {
let param_state = self.param_state.as_mut().expect(Self::PARAM_MESSAGE);
F::Param::apply(param_state, &self.system_meta, world);
}
#[inline]
fn initialize(&mut self, world: &mut World) {
self.world_id = Some(world.id());
self.system_meta.last_run = world.change_tick().relative_to(Tick::MAX);
self.param_state = Some(F::Param::init_state(world, &mut self.system_meta));
}
fn update_archetype_component_access(&mut self, world: UnsafeWorldCell) {
assert!(self.world_id == Some(world.id()), "Encountered a mismatched World. A System cannot be used with Worlds other than the one it was initialized with.");
let archetypes = world.archetypes();
let new_generation = archetypes.generation();
let old_generation = std::mem::replace(&mut self.archetype_generation, new_generation);
let archetype_index_range = old_generation.value()..new_generation.value();
for archetype_index in archetype_index_range {
let param_state = self.param_state.as_mut().unwrap();
F::Param::new_archetype(
param_state,
&archetypes[ArchetypeId::new(archetype_index)],
&mut self.system_meta,
);
}
}
#[inline]
fn check_change_tick(&mut self, change_tick: Tick) {
check_system_change_tick(
&mut self.system_meta.last_run,
change_tick,
self.system_meta.name.as_ref(),
);
}
fn default_system_sets(&self) -> Vec<Box<dyn crate::schedule::SystemSet>> {
let set = crate::schedule::SystemTypeSet::<F>::new();
vec![Box::new(set)]
}
}
/// SAFETY: `F`'s param is [`ReadOnlySystemParam`], so this system will only read from the world.
unsafe impl<Marker, F> ReadOnlySystem for FunctionSystem<Marker, F>
where
Marker:'static,
F: SystemParamFunction<Marker>,
F::Param: ReadOnlySystemParam,
{
}
/// A trait implemented for all functions that can be used as [`System`]s.
///
/// This trait can be useful for making your own systems which accept other systems,
/// sometimes called higher order systems.
///
/// This should be used in combination with [`ParamSet`] when calling other systems
/// within your system.
/// Using [`ParamSet`] in this case avoids [`SystemParam`] collisions.
///
/// # Example
///
/// To create something like [`PipeSystem`], but in entirely safe code.
///
/// ```rust
/// use std::num::ParseIntError;
///
/// use bevy_ecs::prelude::*;
///
/// /// Pipe creates a new system which calls `a`, then calls `b` with the output of `a`
/// pub fn pipe<A, B, AMarker, BMarker>(
/// mut a: A,
/// mut b: B,
/// ) -> impl FnMut(In<A::In>, ParamSet<(A::Param, B::Param)>) -> B::Out
/// where
/// // We need A and B to be systems, add those bounds
/// A: SystemParamFunction<AMarker>,
/// B: SystemParamFunction<BMarker, In = A::Out>,
/// {
/// // The type of `params` is inferred based on the return of this function above
/// move |In(a_in), mut params| {
/// let shared = a.run(a_in, params.p0());
/// b.run(shared, params.p1())
/// }
/// }
///
/// // Usage example for `pipe`:
/// fn main() {
/// let mut world = World::default();
/// world.insert_resource(Message("42".to_string()));
///
/// // pipe the `parse_message_system`'s output into the `filter_system`s input
/// let mut piped_system = IntoSystem::into_system(pipe(parse_message, filter));
/// piped_system.initialize(&mut world);
/// assert_eq!(piped_system.run((), &mut world), Some(42));
/// }
///
/// #[derive(Resource)]
/// struct Message(String);
///
/// fn parse_message(message: Res<Message>) -> Result<usize, ParseIntError> {
/// message.0.parse::<usize>()
/// }
///
/// fn filter(In(result): In<Result<usize, ParseIntError>>) -> Option<usize> {
/// result.ok().filter(|&n| n < 100)
/// }
/// ```
/// [`PipeSystem`]: crate::system::PipeSystem
/// [`ParamSet`]: crate::system::ParamSet
pub trait SystemParamFunction<Marker>: Send + Sync +'static {
/// The input type to this system. See [`System::In`].
type In;
/// The return type of this system. See [`System::Out`].
type Out;
/// The [`SystemParam`]/s used by this system to access the [`World`].
type Param: SystemParam;
/// Executes this system once. See [`System::run`] or [`System::run_unsafe`].
fn run(&mut self, input: Self::In, param_value: SystemParamItem<Self::Param>) -> Self::Out;
}
macro_rules! impl_system_function {
($($param: ident),*) => {
#[allow(non_snake_case)]
impl<Out, Func: Send + Sync +'static, $($param: SystemParam),*> SystemParamFunction<fn($($param,)*) -> Out> for Func
where
for <'a> &'a mut Func:
FnMut($($param),*) -> Out +
FnMut($(SystemParamItem<$param>),*) -> Out, Out:'static
{
type In = ();
type Out = Out;
type Param = ($($param,)*);
#[inline]
fn run(&mut self, _input: (), param_value: SystemParamItem< ($($param,)*)>) -> Out {
// Yes, this is strange, but `rustc` fails to compile this impl
// without using this function. It fails to recognize that `func`
// is a function, potentially because of the multiple impls of `FnMut`
#[allow(clippy::too_many_arguments)]
fn call_inner<Out, $($param,)*>(
mut f: impl FnMut($($param,)*)->Out,
$($param: $param,)*
)->Out{
f($($param,)*)
}
let ($($param,)*) = param_value;
call_inner(self, $($param),*)
| world_id: WorldId,
archetype_generation: ArchetypeGeneration,
} | random_line_split |
function_system.rs | bool,
pub(crate) last_run: Tick,
}
impl SystemMeta {
pub(crate) fn new<T>() -> Self {
Self {
name: std::any::type_name::<T>().into(),
archetype_component_access: Access::default(),
component_access_set: FilteredAccessSet::default(),
is_send: true,
last_run: Tick::new(0),
}
}
/// Returns the system's name
#[inline]
pub fn name(&self) -> &str {
&self.name
}
/// Returns true if the system is [`Send`].
#[inline]
pub fn is_send(&self) -> bool {
self.is_send
}
/// Sets the system to be not [`Send`].
///
/// This is irreversible.
#[inline]
pub fn set_non_send(&mut self) {
self.is_send = false;
}
}
// TODO: Actually use this in FunctionSystem. We should probably only do this once Systems are constructed using a World reference
// (to avoid the need for unwrapping to retrieve SystemMeta)
/// Holds on to persistent state required to drive [`SystemParam`] for a [`System`].
///
/// This is a very powerful and convenient tool for working with exclusive world access,
/// allowing you to fetch data from the [`World`] as if you were running a [`System`].
///
/// Borrow-checking is handled for you, allowing you to mutably access multiple compatible system parameters at once,
/// and arbitrary system parameters (like [`EventWriter`](crate::event::EventWriter)) can be conveniently fetched.
///
/// For an alternative approach to split mutable access to the world, see [`World::resource_scope`].
///
/// # Warning
///
/// [`SystemState`] values created can be cached to improve performance,
/// and *must* be cached and reused in order for system parameters that rely on local state to work correctly.
/// These include:
/// - [`Added`](crate::query::Added) and [`Changed`](crate::query::Changed) query filters
/// - [`Local`](crate::system::Local) variables that hold state
/// - [`EventReader`](crate::event::EventReader) system parameters, which rely on a [`Local`](crate::system::Local) to track which events have been seen
///
/// # Example
///
/// Basic usage:
/// ```rust
/// # use bevy_ecs::prelude::*;
/// # use bevy_ecs::system::SystemState;
/// # use bevy_ecs::event::Events;
/// #
/// # #[derive(Event)]
/// # struct MyEvent;
/// # #[derive(Resource)]
/// # struct MyResource(u32);
/// #
/// # #[derive(Component)]
/// # struct MyComponent;
/// #
/// // Work directly on the `World`
/// let mut world = World::new();
/// world.init_resource::<Events<MyEvent>>();
///
/// // Construct a `SystemState` struct, passing in a tuple of `SystemParam`
/// // as if you were writing an ordinary system.
/// let mut system_state: SystemState<(
/// EventWriter<MyEvent>,
/// Option<ResMut<MyResource>>,
/// Query<&MyComponent>,
/// )> = SystemState::new(&mut world);
///
/// // Use system_state.get_mut(&mut world) and unpack your system parameters into variables!
/// // system_state.get(&world) provides read-only versions of your system parameters instead.
/// let (event_writer, maybe_resource, query) = system_state.get_mut(&mut world);
///
/// // If you are using `Commands`, you can choose when you want to apply them to the world.
/// // You need to manually call `.apply(world)` on the `SystemState` to apply them.
/// ```
/// Caching:
/// ```rust
/// # use bevy_ecs::prelude::*;
/// # use bevy_ecs::system::SystemState;
/// # use bevy_ecs::event::Events;
/// #
/// # #[derive(Event)]
/// # struct MyEvent;
/// #[derive(Resource)]
/// struct CachedSystemState {
/// event_state: SystemState<EventReader<'static,'static, MyEvent>>,
/// }
///
/// // Create and store a system state once
/// let mut world = World::new();
/// world.init_resource::<Events<MyEvent>>();
/// let initial_state: SystemState<EventReader<MyEvent>> = SystemState::new(&mut world);
///
/// // The system state is cached in a resource
/// world.insert_resource(CachedSystemState {
/// event_state: initial_state,
/// });
///
/// // Later, fetch the cached system state, saving on overhead
/// world.resource_scope(|world, mut cached_state: Mut<CachedSystemState>| {
/// let mut event_reader = cached_state.event_state.get_mut(world);
///
/// for events in event_reader.iter() {
/// println!("Hello World!");
/// }
/// });
/// ```
pub struct SystemState<Param: SystemParam +'static> {
meta: SystemMeta,
param_state: Param::State,
world_id: WorldId,
archetype_generation: ArchetypeGeneration,
}
impl<Param: SystemParam> SystemState<Param> {
/// Creates a new [`SystemState`] with default state.
///
/// ## Note
/// For users of [`SystemState::get_manual`] or [`get_manual_mut`](SystemState::get_manual_mut):
///
/// `new` does not cache any of the world's archetypes, so you must call [`SystemState::update_archetypes`]
/// manually before calling `get_manual{_mut}`.
pub fn new(world: &mut World) -> Self {
let mut meta = SystemMeta::new::<Param>();
meta.last_run = world.change_tick().relative_to(Tick::MAX);
let param_state = Param::init_state(world, &mut meta);
Self {
meta,
param_state,
world_id: world.id(),
archetype_generation: ArchetypeGeneration::initial(),
}
}
/// Gets the metadata for this instance.
#[inline]
pub fn meta(&self) -> &SystemMeta {
&self.meta
}
/// Retrieve the [`SystemParam`] values. This can only be called when all parameters are read-only.
#[inline]
pub fn get<'w,'s>(&'s mut self, world: &'w World) -> SystemParamItem<'w,'s, Param>
where
Param: ReadOnlySystemParam,
{
self.validate_world(world.id());
self.update_archetypes(world);
// SAFETY: Param is read-only and doesn't allow mutable access to World.
// It also matches the World this SystemState was created with.
unsafe { self.get_unchecked_manual(world.as_unsafe_world_cell_readonly()) }
}
/// Retrieve the mutable [`SystemParam`] values.
#[inline]
pub fn get_mut<'w,'s>(&'s mut self, world: &'w mut World) -> SystemParamItem<'w,'s, Param> {
self.validate_world(world.id());
self.update_archetypes(world);
// SAFETY: World is uniquely borrowed and matches the World this SystemState was created with.
unsafe { self.get_unchecked_manual(world.as_unsafe_world_cell()) }
}
/// Applies all state queued up for [`SystemParam`] values. For example, this will apply commands queued up
/// by a [`Commands`](`super::Commands`) parameter to the given [`World`].
/// This function should be called manually after the values returned by [`SystemState::get`] and [`SystemState::get_mut`]
/// are finished being used.
pub fn apply(&mut self, world: &mut World) {
Param::apply(&mut self.param_state, &self.meta, world);
}
/// Returns `true` if `world_id` matches the [`World`] that was used to call [`SystemState::new`].
/// Otherwise, this returns false.
#[inline]
pub fn matches_world(&self, world_id: WorldId) -> bool {
self.world_id == world_id
}
/// Asserts that the [`SystemState`] matches the provided world.
#[inline]
fn | (&self, world_id: WorldId) {
assert!(self.matches_world(world_id), "Encountered a mismatched World. A SystemState cannot be used with Worlds other than the one it was created with.");
}
/// Updates the state's internal view of the [`World`]'s archetypes. If this is not called before fetching the parameters,
/// the results may not accurately reflect what is in the `world`.
///
/// This is only required if [`SystemState::get_manual`] or [`SystemState::get_manual_mut`] is being called, and it only needs to
/// be called if the `world` has been structurally mutated (i.e. added/removed a component or resource). Users using
/// [`SystemState::get`] or [`SystemState::get_mut`] do not need to call this as it will be automatically called for them.
#[inline]
pub fn update_archetypes(&mut self, world: &World) {
self.update_archetypes_unsafe_world_cell(world.as_unsafe_world_cell_readonly());
}
/// Updates the state's internal view of the `world`'s archetypes. If this is not called before fetching the parameters,
/// the results may not accurately reflect what is in the `world`.
///
/// This is only required if [`SystemState::get_manual`] or [`SystemState::get_manual_mut`] is being called, and it only needs to
/// be called if the `world` has been structurally mutated (i.e. added/removed a component or resource). Users using
/// [`SystemState::get`] or [`SystemState::get_mut`] do not need to call this as it will be automatically called for them.
///
/// # Note
///
/// This method only accesses world metadata.
#[inline]
pub fn update_archetypes_unsafe_world_cell(&mut self, world: UnsafeWorldCell) {
let archetypes = world.archetypes();
let new_generation = archetypes.generation();
let old_generation = std::mem::replace(&mut self.archetype_generation, new_generation);
let archetype_index_range = old_generation.value()..new_generation.value();
for archetype_index in archetype_index_range {
Param::new_archetype(
&mut self.param_state,
&archetypes[ArchetypeId::new(archetype_index)],
&mut self.meta,
);
}
}
/// Retrieve the [`SystemParam`] values. This can only be called when all parameters are read-only.
/// This will not update the state's view of the world's archetypes automatically nor increment the
/// world's change tick.
///
/// For this to return accurate results, ensure [`SystemState::update_archetypes`] is called before this
/// function.
///
/// Users should strongly prefer to use [`SystemState::get`] over this function.
#[inline]
pub fn get_manual<'w,'s>(&'s mut self, world: &'w World) -> SystemParamItem<'w,'s, Param>
where
Param: ReadOnlySystemParam,
{
self.validate_world(world.id());
let change_tick = world.read_change_tick();
// SAFETY: Param is read-only and doesn't allow mutable access to World.
// It also matches the World this SystemState was created with.
unsafe { self.fetch(world.as_unsafe_world_cell_readonly(), change_tick) }
}
/// Retrieve the mutable [`SystemParam`] values. This will not update the state's view of the world's archetypes
/// automatically nor increment the world's change tick.
///
/// For this to return accurate results, ensure [`SystemState::update_archetypes`] is called before this
/// function.
///
/// Users should strongly prefer to use [`SystemState::get_mut`] over this function.
#[inline]
pub fn get_manual_mut<'w,'s>(
&'s mut self,
world: &'w mut World,
) -> SystemParamItem<'w,'s, Param> {
self.validate_world(world.id());
let change_tick = world.change_tick();
// SAFETY: World is uniquely borrowed and matches the World this SystemState was created with.
unsafe { self.fetch(world.as_unsafe_world_cell(), change_tick) }
}
/// Retrieve the [`SystemParam`] values. This will not update archetypes automatically.
///
/// # Safety
/// This call might access any of the input parameters in a way that violates Rust's mutability rules. Make sure the data
/// access is safe in the context of global [`World`] access. The passed-in [`World`] _must_ be the [`World`] the [`SystemState`] was
/// created with.
#[inline]
pub unsafe fn get_unchecked_manual<'w,'s>(
&'s mut self,
world: UnsafeWorldCell<'w>,
) -> SystemParamItem<'w,'s, Param> {
let change_tick = world.increment_change_tick();
self.fetch(world, change_tick)
}
/// # Safety
/// This call might access any of the input parameters in a way that violates Rust's mutability rules. Make sure the data
/// access is safe in the context of global [`World`] access. The passed-in [`World`] _must_ be the [`World`] the [`SystemState`] was
/// created with.
#[inline]
unsafe fn fetch<'w,'s>(
&'s mut self,
world: UnsafeWorldCell<'w>,
change_tick: Tick,
) -> SystemParamItem<'w,'s, Param> {
let param = Param::get_param(&mut self.param_state, &self.meta, world, change_tick);
self.meta.last_run = change_tick;
param
}
}
impl<Param: SystemParam> FromWorld for SystemState<Param> {
fn from_world(world: &mut World) -> Self {
Self::new(world)
}
}
/// The [`System`] counter part of an ordinary function.
///
/// You get this by calling [`IntoSystem::into_system`] on a function that only accepts
/// [`SystemParam`]s. The output of the system becomes the functions return type, while the input
/// becomes the functions [`In`] tagged parameter or `()` if no such parameter exists.
///
/// [`FunctionSystem`] must be `.initialized` before they can be run.
///
/// The [`Clone`] implementation for [`FunctionSystem`] returns a new instance which
/// is NOT initialized. The cloned system must also be `.initialized` before it can be run.
pub struct FunctionSystem<Marker, F>
where
F: SystemParamFunction<Marker>,
{
func: F,
param_state: Option<<F::Param as SystemParam>::State>,
system_meta: SystemMeta,
world_id: Option<WorldId>,
archetype_generation: ArchetypeGeneration,
// NOTE: PhantomData<fn()-> T> gives this safe Send/Sync impls
marker: PhantomData<fn() -> Marker>,
}
// De-initializes the cloned system.
impl<Marker, F> Clone for FunctionSystem<Marker, F>
where
F: SystemParamFunction<Marker> + Clone,
{
fn clone(&self) -> Self {
Self {
func: self.func.clone(),
param_state: None,
system_meta: SystemMeta::new::<F>(),
world_id: None,
archetype_generation: ArchetypeGeneration::initial(),
marker: PhantomData,
}
}
}
/// A marker type used to distinguish regular function systems from exclusive function systems.
#[doc(hidden)]
pub struct IsFunctionSystem;
impl<Marker, F> IntoSystem<F::In, F::Out, (IsFunctionSystem, Marker)> for F
where
Marker:'static,
F: SystemParamFunction<Marker>,
{
type System = FunctionSystem<Marker, F>;
fn into_system(func: Self) -> Self::System {
FunctionSystem {
func,
param_state: None,
system_meta: SystemMeta::new::<F>(),
world_id: None,
archetype_generation: ArchetypeGeneration::initial(),
marker: PhantomData,
}
}
}
impl<Marker, F> FunctionSystem<Marker, F>
where
F: SystemParamFunction<Marker>,
{
/// Message shown when a system isn't initialised
// When lines get too long, rustfmt can sometimes refuse to format them.
// Work around this by storing the message separately.
const PARAM_MESSAGE: &'static str = "System's param_state was not found. Did you forget to initialize this system before running it?";
}
impl<Marker, F> System for FunctionSystem<Marker, F>
where
Marker:'static,
F: SystemParamFunction<Marker>,
{
type In = F::In;
type Out = F::Out;
#[inline]
fn name(&self) -> Cow<'static, str> {
self.system_meta.name.clone()
}
#[inline]
fn type_id(&self) -> TypeId {
TypeId::of::<F>()
}
#[inline]
fn component_access(&self) -> &Access<ComponentId> {
self.system_meta.component_access_set.combined_access()
}
#[inline]
fn archetype_component_access(&self) -> &Access<ArchetypeComponentId> {
&self.system_meta.archetype_component_access
}
#[inline]
fn is_send(&self) -> bool {
self.system_meta.is_send
}
#[inline]
fn is_exclusive(&self) -> bool {
false
}
#[inline]
unsafe fn run_unsafe(&mut self, input: Self::In, world: UnsafeWorldCell) -> Self::Out {
let change_tick = world.increment_change_tick();
// SAFETY:
// - The caller has invoked `update_archetype_component_access`, which will panic
// if the world does not match.
// - All world accesses used by `F::Param` have been registered, so the caller
// will ensure that there are no data access conflicts.
let params = F::Param::get_param(
self.param_state.as_mut().expect(Self::PARAM_MESSAGE),
&self.system_meta,
world,
change_tick,
);
let out = self.func.run(input, params);
self.system_meta.last_run = change_tick;
out
}
fn get_last_run(&self) -> Tick {
self.system_meta.last_run
}
fn set_last_run(&mut self, last_run: Tick) {
self.system_meta.last_run = last_run;
}
#[inline]
fn apply_deferred(&mut self, world: &mut World) {
let param_state = self.param_state.as_mut().expect(Self::PARAM_MESSAGE);
F::Param::apply(param_state, &self.system_meta, world);
}
#[inline]
fn initialize(&mut self, world: &mut World) {
self.world_id = Some(world.id());
self.system_meta.last_run = world.change_tick().relative_to(Tick::MAX);
self.param_state = Some(F::Param::init_state(world, &mut self.system_meta));
}
fn update_archetype_component_access(&mut self, world: UnsafeWorldCell) {
assert!(self.world_id == Some(world.id()), "Encountered a mismatched World. A System cannot be used with Worlds other than the one it was initialized with.");
let archetypes = world.archetypes();
let new_generation = archetypes.generation();
let old_generation = std::mem::replace(&mut self.archetype_generation, new_generation);
let archetype_index_range = old_generation.value()..new_generation.value();
for archetype_index in archetype_index_range {
let param_state = self.param_state.as_mut().unwrap();
F::Param::new_archetype(
param_state,
&archetypes[ArchetypeId::new(archetype_index)],
&mut self.system_meta,
);
}
}
#[inline]
fn check_change_tick(&mut self, change_tick: Tick) {
check_system_change_tick(
&mut self.system_meta.last_run,
change_tick,
self.system_meta.name.as_ref(),
);
}
fn default_system_sets(&self) -> Vec<Box<dyn crate::schedule::SystemSet>> {
let set = crate::schedule::SystemTypeSet::<F>::new();
vec![Box::new(set)]
}
}
/// SAFETY: `F`'s param is [`ReadOnlySystemParam`], so this system will only read from the world.
unsafe impl<Marker, F> ReadOnlySystem for FunctionSystem<Marker, F>
where
Marker:'static,
F: SystemParamFunction<Marker>,
F::Param: ReadOnlySystemParam,
{
}
/// A trait implemented for all functions that can be used as [`System`]s.
///
/// This trait can be useful for making your own systems which accept other systems,
/// sometimes called higher order systems.
///
/// This should be used in combination with [`ParamSet`] when calling other systems
/// within your system.
/// Using [`ParamSet`] in this case avoids [`SystemParam`] collisions.
///
/// # Example
///
/// To create something like [`PipeSystem`], but in entirely safe code.
///
/// ```rust
/// use std::num::ParseIntError;
///
/// use bevy_ecs::prelude::*;
///
/// /// Pipe creates a new system which calls `a`, then calls `b` with the output of `a`
/// pub fn pipe<A, B, AMarker, BMarker>(
/// mut a: A,
/// mut b: B,
/// ) -> impl FnMut(In<A::In>, ParamSet<(A::Param, B::Param)>) -> B::Out
/// where
/// // We need A and B to be systems, add those bounds
/// A: SystemParamFunction<AMarker>,
/// B: SystemParamFunction<BMarker, In = A::Out>,
/// {
/// // The type of `params` is inferred based on the return of this function above
/// move |In(a_in), mut params| {
/// let shared = a.run(a_in, params.p0());
/// b.run(shared, params.p1())
/// }
/// }
///
/// // Usage example for `pipe`:
/// fn main() {
/// let mut world = World::default();
/// world.insert_resource(Message("42".to_string()));
///
/// // pipe the `parse_message_system`'s output into the `filter_system`s input
/// let mut piped_system = IntoSystem::into_system(pipe(parse_message, filter));
/// piped_system.initialize(&mut world);
/// assert_eq!(piped_system.run((), &mut world), Some(42));
/// }
///
/// #[derive(Resource)]
/// struct Message(String);
///
/// fn parse_message(message: Res<Message>) -> Result<usize, ParseIntError> {
/// message.0.parse::<usize>()
/// }
///
/// fn filter(In(result): In<Result<usize, ParseIntError>>) -> Option<usize> {
/// result.ok().filter(|&n| n < 100)
/// }
/// ```
/// [`PipeSystem`]: crate::system::PipeSystem
/// [`ParamSet`]: crate::system::ParamSet
pub trait SystemParamFunction<Marker>: Send + Sync +'static {
/// The input type to this system. See [`System::In`].
type In;
/// The return type of this system. See [`System::Out`].
type Out;
/// The [`SystemParam`]/s used by this system to access the [`World`].
type Param: SystemParam;
/// Executes this system once. See [`System::run`] or [`System::run_unsafe`].
fn run(&mut self, input: Self::In, param_value: SystemParamItem<Self::Param>) -> Self::Out;
}
macro_rules! impl_system_function {
($($param: ident),*) => {
#[allow(non_snake_case)]
impl<Out, Func: Send + Sync +'static, $($param: SystemParam),*> SystemParamFunction<fn($($param,)*) -> Out> for Func
where
for <'a> &'a mut Func:
FnMut($($param),*) -> Out +
FnMut($(SystemParamItem<$param>),*) -> Out, Out:'static
{
type In = ();
type Out = Out;
type Param = ($($param,)*);
#[inline]
fn run(&mut self, _input: (), param_value: SystemParamItem< ($($param,)*)>) -> Out {
// Yes, this is strange, but `rustc` fails to compile this impl
// without using this function. It fails to recognize that `func`
// is a function, potentially because of the multiple impls of `FnMut`
#[allow(clippy::too_many_arguments)]
fn call_inner<Out, $($param,)*>(
mut f: impl FnMut($($param,)*)->Out,
$($param: $param,)*
)->Out{
f($($param,)*)
}
let ($($param,)*) = param_value;
call_inner(self, $($param),*)
| validate_world | identifier_name |
cellgrid.rs | use ggez::graphics;
use ggez::GameResult;
use ggez::nalgebra as na;
use crate::simulation::{SimGrid, Automaton};
use crate::commons::cells::BinaryCell;
use crate::commons::grids::CellGrid;
use crate::gameoflife::GameOfLife;
/// Implementation of the Automaton trait for GameOfLife with a CellGrid grid,
impl Automaton for GameOfLife<CellGrid<BinaryCell>> {
/// Defines the type of grid for the automaton.
type Grid = CellGrid<BinaryCell>;
/// A constructor method that creates a null automaton
/// ands sets the initial state and cell size parameters.
fn new(initialstate: &str, cellsize: f32) -> Self {
Self {
grid: Self::Grid::new(cellsize),
initialstate: initialstate.to_string(),
cellsize,
generation: 0,
alive: 0,
dead: 0,
}
}
/// A method that initializes the automaton for the given dimensions.
fn initialize(&mut self, dimensions: graphics::Rect) {
// Create a new dimensions object for the grid of cells (60 px removed for the banner)
let griddimensions = graphics::Rect::new(0.0, 0.0, dimensions.w, dimensions.h - 60.0);
// Set the grid dimensions to the grid
self.grid.setdimensions(griddimensions);
// Check the value of the initial state field
match self.initialstate.as_str() {
// Default initial state (random-balanced)
"default" => {
// Set the initial state string of the automaton
self.initialstate = "Random [1:1]".to_string();
// Create a grid of random cells with a balanced ratio of dead and alive cells
let randomgrid = CellGrid::<BinaryCell>::generate_randomgrid_balanced(self.cellsize, griddimensions);
// Set the generated grid to the automaton grid
self.grid.setgrid(randomgrid);
},
// Balanced Random initial state
"random-balanced" => {
// Set the initial state string of the automaton
self.initialstate = "Random [1:1]".to_string();
// Create a grid of random cells with a balanced ratio of dead and alive cells
let randomgrid = CellGrid::<BinaryCell>::generate_randomgrid_balanced(self.cellsize, griddimensions);
// Set the generated grid to the automaton grid
self.grid.setgrid(randomgrid);
},
// Invalid initial state
_ => {
// Print an error and exit
eprintln!("[error] invalid initial state for 'gameoflife'");
std::process::exit(0);
}
}
}
/// A method that advances the game of life to the next generation.
fn advance(&mut self) {
// Declare counter variables for the number of alive and dead cells
let mut alive: u32 = 0;
let mut dead: u32 = 0;
// Check if the cell grid exists
if self.grid.vector.is_some() {
// Create a clone of the cell grid
let mut newgrid = self.grid.vector.clone().unwrap();
// Iterate over the grid
for (x, y, cell) in self.grid.clone() {
// Check the vicinity of the cell
let cell = match (cell, self.scan_vicinity(x, y)) {
// If a cell is alive, and there are either too many live
// neighbors or not enough live neighbors, kill it.
(BinaryCell::Active, n) if n < 2 || n > 3 => BinaryCell::Passive,
// If a cell is alive and has either 2
// or 3 live neighbors, keep it alive
(BinaryCell::Active, n) if n == 3 || n == 2 => BinaryCell::Active,
// If a cell is dead and has exactly 3 live neighbors, revive it
(BinaryCell::Passive, 3) => BinaryCell::Active,
// Otherwise, keep the cell state
(c, _) => c,
};
// Add the new cell to the new grid
newgrid[x][y] = cell.clone();
// Increment the alive or dead counter
match cell {
BinaryCell::Passive => dead += 1, | }
// Assign the new grid to the grid struct
self.grid.setgrid(newgrid);
}
// Update the alive and dead cell value in the grid struct
self.alive = alive;
self.dead = dead;
// Increment the generation value in the grid struct
self.generation += 1;
}
/// A method that returns the state of the automaton as a string.
/// Format: "Generation: {} | Alive: {} | Dead: {}"
fn state(&self) -> String {
format!("Generation: {} | Alive: {} | Dead: {}", self.generation, self.alive, self.dead)
}
/// A method that returns the name of the automaton as a string.
/// Format: "Conway's Game of Life"
fn name(&self) -> String {
"Conway's Game of Life".to_string()
}
/// A method that returns the name of the automaton as a string
/// along with its initial state and grid type.
/// Format: "Conway's Game of Life | Grid | {}"
fn fullname(&self) -> String {
format!("Conway's Game of Life | Grid | {}", self.initialstate)
}
}
// Implementation of helper methods for GameOfLife with a CellGrid grid,
impl GameOfLife<CellGrid<BinaryCell>> {
// A function that retrieves the number of alive cells in
// the neighbouring vicity of a given cell (x, y)
fn scan_vicinity(&mut self, x: usize, y: usize) -> i32 {
// Declare a counter
let mut count = 0;
// Check if the cell grid exists
if let Some(grid) = &self.grid.vector {
// Iterate over the cells in the vicinity of the cell at (x, y).
// The [-1,0,1] vectors represent the vicinity offsets for the x and y axis each.
for x_off in vec![-1, 0, 1] {
for y_off in vec![-1, 0, 1] {
// Create the position of the cell in the
// grid based on the vicinity offsets
let nx = x as i32 + x_off;
let ny = y as i32 + y_off;
// Check if position is out of grid bounds (x axis)
if nx < 0 || nx >= grid.len() as i32 {
continue;
}
// Check if position is out of grid bounds (y axis)
if ny < 0 || ny >= grid[nx as usize].len() as i32 {
continue;
}
// Check if position points to the cell itself i.e (0,0) offsets
if nx == x as i32 && ny == y as i32 {
continue;
}
// Check if the cell if alive
match grid[nx as usize][ny as usize].clone() {
// Increment the counter if the cell is alive
BinaryCell::Active => count = count+1,
_ => continue,
}
}
}
}
// Return the counter value
return count
}
}
// Implementation of the Drawable trait for GameOfLife with a CellGrid grid,
impl graphics::Drawable for GameOfLife<CellGrid<BinaryCell>> {
// A method that returns the dimensions of the automaton
fn dimensions(&self, _ctx: &mut ggez::Context) -> Option<graphics::Rect> {
// Get the grid dimesions and add the banner height
if let Some(dimensions) = &self.grid.dimensions {
Some(graphics::Rect::new(0.0, 0.0, dimensions.w, dimensions.h + 60.0))
} else {None}
}
// A method that returns the graphics blending mode of the automaton grid
fn blend_mode(&self) -> Option<graphics::BlendMode> {
Some(graphics::BlendMode::Add)
}
// A method that set the graphics blend mode of the automaton grid (currently does nothing)
fn set_blend_mode(&mut self, _: Option<graphics::BlendMode>) {}
// A method that renders the automaton grid and state and returns a GameResult
fn draw(&self, ctx: &mut ggez::Context, param: graphics::DrawParam) -> GameResult<()> {
// Create a new graphic mesh builder
let mut mb = graphics::MeshBuilder::new();
// Iterate through each cell in the grid
for (x, y, cell) in self.grid.clone() {
// Create the bounds of the cell
let cellbounds = graphics::Rect::new(
(x as f32) * self.cellsize,
(y as f32) * self.cellsize,
self.cellsize,
self.cellsize,
);
// Add the cell fill to the mesh builder
mb.rectangle(
graphics::DrawMode::Fill(graphics::FillOptions::default()),
cellbounds,
// Set the cell color based on cell state
match cell {
BinaryCell::Passive => [0.0, 0.0, 0.0, 1.0].into(),
BinaryCell::Active => [1.0, 1.0, 1.0, 1.0].into(),
},
)
// Add the cell boundary to the mesh builder
.rectangle(
graphics::DrawMode::Stroke(graphics::StrokeOptions::default()),
cellbounds,
[1.0, 1.0, 1.0, 0.25].into(),
);
}
// Build and Draw the mesh
mb.build(ctx)?.draw(ctx, param)?;
// Declare a variable for the font size
let font_size = 18.0;
// Create the text graphics for the banner
let mut name_text = graphics::Text::new(self.fullname());
let mut state_text = graphics::Text::new(self.state());
// Set the font styling for the text graphics
state_text.set_font(graphics::Font::default(), graphics::Scale::uniform(font_size));
name_text.set_font(graphics::Font::default(), graphics::Scale::uniform(font_size));
// Chekc the grid dimesions
if let Some(dimensions) = &self.grid.dimensions {
// Calculate the spacing between banner elements.
// Assumes 2 units of spacing above the name text and below the state text
// and 1 unit of spacing between the name and state text.
let spacing = (60.0 - (font_size * 2.0)) / 5.0;
// Calculate the position of the name text
let name_offset = dimensions.h + (spacing * 2.0);
// Calculate the position of the state text
let state_offset = dimensions.h + (spacing * 3.0) + font_size;
// Draw the banner text graphics
name_text.draw(ctx, (na::Point2::new(param.dest.x + 10.0, param.dest.y + name_offset),).into())?;
state_text.draw(ctx, (na::Point2::new(param.dest.x + 10.0, param.dest.y + state_offset),).into())?;
}
// Return GameResult::Ok
Ok(())
}
} | BinaryCell::Active => alive += 1
} | random_line_split |
cellgrid.rs | use ggez::graphics;
use ggez::GameResult;
use ggez::nalgebra as na;
use crate::simulation::{SimGrid, Automaton};
use crate::commons::cells::BinaryCell;
use crate::commons::grids::CellGrid;
use crate::gameoflife::GameOfLife;
/// Implementation of the Automaton trait for GameOfLife with a CellGrid grid,
impl Automaton for GameOfLife<CellGrid<BinaryCell>> {
/// Defines the type of grid for the automaton.
type Grid = CellGrid<BinaryCell>;
/// A constructor method that creates a null automaton
/// ands sets the initial state and cell size parameters.
fn new(initialstate: &str, cellsize: f32) -> Self {
Self {
grid: Self::Grid::new(cellsize),
initialstate: initialstate.to_string(),
cellsize,
generation: 0,
alive: 0,
dead: 0,
}
}
/// A method that initializes the automaton for the given dimensions.
fn initialize(&mut self, dimensions: graphics::Rect) {
// Create a new dimensions object for the grid of cells (60 px removed for the banner)
let griddimensions = graphics::Rect::new(0.0, 0.0, dimensions.w, dimensions.h - 60.0);
// Set the grid dimensions to the grid
self.grid.setdimensions(griddimensions);
// Check the value of the initial state field
match self.initialstate.as_str() {
// Default initial state (random-balanced)
"default" => {
// Set the initial state string of the automaton
self.initialstate = "Random [1:1]".to_string();
// Create a grid of random cells with a balanced ratio of dead and alive cells
let randomgrid = CellGrid::<BinaryCell>::generate_randomgrid_balanced(self.cellsize, griddimensions);
// Set the generated grid to the automaton grid
self.grid.setgrid(randomgrid);
},
// Balanced Random initial state
"random-balanced" => {
// Set the initial state string of the automaton
self.initialstate = "Random [1:1]".to_string();
// Create a grid of random cells with a balanced ratio of dead and alive cells
let randomgrid = CellGrid::<BinaryCell>::generate_randomgrid_balanced(self.cellsize, griddimensions);
// Set the generated grid to the automaton grid
self.grid.setgrid(randomgrid);
},
// Invalid initial state
_ => {
// Print an error and exit
eprintln!("[error] invalid initial state for 'gameoflife'");
std::process::exit(0);
}
}
}
/// A method that advances the game of life to the next generation.
fn advance(&mut self) {
// Declare counter variables for the number of alive and dead cells
let mut alive: u32 = 0;
let mut dead: u32 = 0;
// Check if the cell grid exists
if self.grid.vector.is_some() {
// Create a clone of the cell grid
let mut newgrid = self.grid.vector.clone().unwrap();
// Iterate over the grid
for (x, y, cell) in self.grid.clone() {
// Check the vicinity of the cell
let cell = match (cell, self.scan_vicinity(x, y)) {
// If a cell is alive, and there are either too many live
// neighbors or not enough live neighbors, kill it.
(BinaryCell::Active, n) if n < 2 || n > 3 => BinaryCell::Passive,
// If a cell is alive and has either 2
// or 3 live neighbors, keep it alive
(BinaryCell::Active, n) if n == 3 || n == 2 => BinaryCell::Active,
// If a cell is dead and has exactly 3 live neighbors, revive it
(BinaryCell::Passive, 3) => BinaryCell::Active,
// Otherwise, keep the cell state
(c, _) => c,
};
// Add the new cell to the new grid
newgrid[x][y] = cell.clone();
// Increment the alive or dead counter
match cell {
BinaryCell::Passive => dead += 1,
BinaryCell::Active => alive += 1
}
}
// Assign the new grid to the grid struct
self.grid.setgrid(newgrid);
}
// Update the alive and dead cell value in the grid struct
self.alive = alive;
self.dead = dead;
// Increment the generation value in the grid struct
self.generation += 1;
}
/// A method that returns the state of the automaton as a string.
/// Format: "Generation: {} | Alive: {} | Dead: {}"
fn state(&self) -> String {
format!("Generation: {} | Alive: {} | Dead: {}", self.generation, self.alive, self.dead)
}
/// A method that returns the name of the automaton as a string.
/// Format: "Conway's Game of Life"
fn name(&self) -> String {
"Conway's Game of Life".to_string()
}
/// A method that returns the name of the automaton as a string
/// along with its initial state and grid type.
/// Format: "Conway's Game of Life | Grid | {}"
fn fullname(&self) -> String {
format!("Conway's Game of Life | Grid | {}", self.initialstate)
}
}
// Implementation of helper methods for GameOfLife with a CellGrid grid,
impl GameOfLife<CellGrid<BinaryCell>> {
// A function that retrieves the number of alive cells in
// the neighbouring vicity of a given cell (x, y)
fn scan_vicinity(&mut self, x: usize, y: usize) -> i32 {
// Declare a counter
let mut count = 0;
// Check if the cell grid exists
if let Some(grid) = &self.grid.vector {
// Iterate over the cells in the vicinity of the cell at (x, y).
// The [-1,0,1] vectors represent the vicinity offsets for the x and y axis each.
for x_off in vec![-1, 0, 1] {
for y_off in vec![-1, 0, 1] {
// Create the position of the cell in the
// grid based on the vicinity offsets
let nx = x as i32 + x_off;
let ny = y as i32 + y_off;
// Check if position is out of grid bounds (x axis)
if nx < 0 || nx >= grid.len() as i32 {
continue;
}
// Check if position is out of grid bounds (y axis)
if ny < 0 || ny >= grid[nx as usize].len() as i32 {
continue;
}
// Check if position points to the cell itself i.e (0,0) offsets
if nx == x as i32 && ny == y as i32 {
continue;
}
// Check if the cell if alive
match grid[nx as usize][ny as usize].clone() {
// Increment the counter if the cell is alive
BinaryCell::Active => count = count+1,
_ => continue,
}
}
}
}
// Return the counter value
return count
}
}
// Implementation of the Drawable trait for GameOfLife with a CellGrid grid,
impl graphics::Drawable for GameOfLife<CellGrid<BinaryCell>> {
// A method that returns the dimensions of the automaton
fn dimensions(&self, _ctx: &mut ggez::Context) -> Option<graphics::Rect> |
// A method that returns the graphics blending mode of the automaton grid
fn blend_mode(&self) -> Option<graphics::BlendMode> {
Some(graphics::BlendMode::Add)
}
// A method that set the graphics blend mode of the automaton grid (currently does nothing)
fn set_blend_mode(&mut self, _: Option<graphics::BlendMode>) {}
// A method that renders the automaton grid and state and returns a GameResult
fn draw(&self, ctx: &mut ggez::Context, param: graphics::DrawParam) -> GameResult<()> {
// Create a new graphic mesh builder
let mut mb = graphics::MeshBuilder::new();
// Iterate through each cell in the grid
for (x, y, cell) in self.grid.clone() {
// Create the bounds of the cell
let cellbounds = graphics::Rect::new(
(x as f32) * self.cellsize,
(y as f32) * self.cellsize,
self.cellsize,
self.cellsize,
);
// Add the cell fill to the mesh builder
mb.rectangle(
graphics::DrawMode::Fill(graphics::FillOptions::default()),
cellbounds,
// Set the cell color based on cell state
match cell {
BinaryCell::Passive => [0.0, 0.0, 0.0, 1.0].into(),
BinaryCell::Active => [1.0, 1.0, 1.0, 1.0].into(),
},
)
// Add the cell boundary to the mesh builder
.rectangle(
graphics::DrawMode::Stroke(graphics::StrokeOptions::default()),
cellbounds,
[1.0, 1.0, 1.0, 0.25].into(),
);
}
// Build and Draw the mesh
mb.build(ctx)?.draw(ctx, param)?;
// Declare a variable for the font size
let font_size = 18.0;
// Create the text graphics for the banner
let mut name_text = graphics::Text::new(self.fullname());
let mut state_text = graphics::Text::new(self.state());
// Set the font styling for the text graphics
state_text.set_font(graphics::Font::default(), graphics::Scale::uniform(font_size));
name_text.set_font(graphics::Font::default(), graphics::Scale::uniform(font_size));
// Chekc the grid dimesions
if let Some(dimensions) = &self.grid.dimensions {
// Calculate the spacing between banner elements.
// Assumes 2 units of spacing above the name text and below the state text
// and 1 unit of spacing between the name and state text.
let spacing = (60.0 - (font_size * 2.0)) / 5.0;
// Calculate the position of the name text
let name_offset = dimensions.h + (spacing * 2.0);
// Calculate the position of the state text
let state_offset = dimensions.h + (spacing * 3.0) + font_size;
// Draw the banner text graphics
name_text.draw(ctx, (na::Point2::new(param.dest.x + 10.0, param.dest.y + name_offset),).into())?;
state_text.draw(ctx, (na::Point2::new(param.dest.x + 10.0, param.dest.y + state_offset),).into())?;
}
// Return GameResult::Ok
Ok(())
}
} | {
// Get the grid dimesions and add the banner height
if let Some(dimensions) = &self.grid.dimensions {
Some(graphics::Rect::new(0.0, 0.0, dimensions.w, dimensions.h + 60.0))
} else {None}
} | identifier_body |
cellgrid.rs | use ggez::graphics;
use ggez::GameResult;
use ggez::nalgebra as na;
use crate::simulation::{SimGrid, Automaton};
use crate::commons::cells::BinaryCell;
use crate::commons::grids::CellGrid;
use crate::gameoflife::GameOfLife;
/// Implementation of the Automaton trait for GameOfLife with a CellGrid grid,
impl Automaton for GameOfLife<CellGrid<BinaryCell>> {
/// Defines the type of grid for the automaton.
type Grid = CellGrid<BinaryCell>;
/// A constructor method that creates a null automaton
/// ands sets the initial state and cell size parameters.
fn new(initialstate: &str, cellsize: f32) -> Self {
Self {
grid: Self::Grid::new(cellsize),
initialstate: initialstate.to_string(),
cellsize,
generation: 0,
alive: 0,
dead: 0,
}
}
/// A method that initializes the automaton for the given dimensions.
fn initialize(&mut self, dimensions: graphics::Rect) {
// Create a new dimensions object for the grid of cells (60 px removed for the banner)
let griddimensions = graphics::Rect::new(0.0, 0.0, dimensions.w, dimensions.h - 60.0);
// Set the grid dimensions to the grid
self.grid.setdimensions(griddimensions);
// Check the value of the initial state field
match self.initialstate.as_str() {
// Default initial state (random-balanced)
"default" => {
// Set the initial state string of the automaton
self.initialstate = "Random [1:1]".to_string();
// Create a grid of random cells with a balanced ratio of dead and alive cells
let randomgrid = CellGrid::<BinaryCell>::generate_randomgrid_balanced(self.cellsize, griddimensions);
// Set the generated grid to the automaton grid
self.grid.setgrid(randomgrid);
},
// Balanced Random initial state
"random-balanced" => {
// Set the initial state string of the automaton
self.initialstate = "Random [1:1]".to_string();
// Create a grid of random cells with a balanced ratio of dead and alive cells
let randomgrid = CellGrid::<BinaryCell>::generate_randomgrid_balanced(self.cellsize, griddimensions);
// Set the generated grid to the automaton grid
self.grid.setgrid(randomgrid);
},
// Invalid initial state
_ => {
// Print an error and exit
eprintln!("[error] invalid initial state for 'gameoflife'");
std::process::exit(0);
}
}
}
/// A method that advances the game of life to the next generation.
fn advance(&mut self) {
// Declare counter variables for the number of alive and dead cells
let mut alive: u32 = 0;
let mut dead: u32 = 0;
// Check if the cell grid exists
if self.grid.vector.is_some() {
// Create a clone of the cell grid
let mut newgrid = self.grid.vector.clone().unwrap();
// Iterate over the grid
for (x, y, cell) in self.grid.clone() {
// Check the vicinity of the cell
let cell = match (cell, self.scan_vicinity(x, y)) {
// If a cell is alive, and there are either too many live
// neighbors or not enough live neighbors, kill it.
(BinaryCell::Active, n) if n < 2 || n > 3 => BinaryCell::Passive,
// If a cell is alive and has either 2
// or 3 live neighbors, keep it alive
(BinaryCell::Active, n) if n == 3 || n == 2 => BinaryCell::Active,
// If a cell is dead and has exactly 3 live neighbors, revive it
(BinaryCell::Passive, 3) => BinaryCell::Active,
// Otherwise, keep the cell state
(c, _) => c,
};
// Add the new cell to the new grid
newgrid[x][y] = cell.clone();
// Increment the alive or dead counter
match cell {
BinaryCell::Passive => dead += 1,
BinaryCell::Active => alive += 1
}
}
// Assign the new grid to the grid struct
self.grid.setgrid(newgrid);
}
// Update the alive and dead cell value in the grid struct
self.alive = alive;
self.dead = dead;
// Increment the generation value in the grid struct
self.generation += 1;
}
/// A method that returns the state of the automaton as a string.
/// Format: "Generation: {} | Alive: {} | Dead: {}"
fn state(&self) -> String {
format!("Generation: {} | Alive: {} | Dead: {}", self.generation, self.alive, self.dead)
}
/// A method that returns the name of the automaton as a string.
/// Format: "Conway's Game of Life"
fn name(&self) -> String {
"Conway's Game of Life".to_string()
}
/// A method that returns the name of the automaton as a string
/// along with its initial state and grid type.
/// Format: "Conway's Game of Life | Grid | {}"
fn | (&self) -> String {
format!("Conway's Game of Life | Grid | {}", self.initialstate)
}
}
// Implementation of helper methods for GameOfLife with a CellGrid grid,
impl GameOfLife<CellGrid<BinaryCell>> {
// A function that retrieves the number of alive cells in
// the neighbouring vicity of a given cell (x, y)
fn scan_vicinity(&mut self, x: usize, y: usize) -> i32 {
// Declare a counter
let mut count = 0;
// Check if the cell grid exists
if let Some(grid) = &self.grid.vector {
// Iterate over the cells in the vicinity of the cell at (x, y).
// The [-1,0,1] vectors represent the vicinity offsets for the x and y axis each.
for x_off in vec![-1, 0, 1] {
for y_off in vec![-1, 0, 1] {
// Create the position of the cell in the
// grid based on the vicinity offsets
let nx = x as i32 + x_off;
let ny = y as i32 + y_off;
// Check if position is out of grid bounds (x axis)
if nx < 0 || nx >= grid.len() as i32 {
continue;
}
// Check if position is out of grid bounds (y axis)
if ny < 0 || ny >= grid[nx as usize].len() as i32 {
continue;
}
// Check if position points to the cell itself i.e (0,0) offsets
if nx == x as i32 && ny == y as i32 {
continue;
}
// Check if the cell if alive
match grid[nx as usize][ny as usize].clone() {
// Increment the counter if the cell is alive
BinaryCell::Active => count = count+1,
_ => continue,
}
}
}
}
// Return the counter value
return count
}
}
// Implementation of the Drawable trait for GameOfLife with a CellGrid grid,
impl graphics::Drawable for GameOfLife<CellGrid<BinaryCell>> {
// A method that returns the dimensions of the automaton
fn dimensions(&self, _ctx: &mut ggez::Context) -> Option<graphics::Rect> {
// Get the grid dimesions and add the banner height
if let Some(dimensions) = &self.grid.dimensions {
Some(graphics::Rect::new(0.0, 0.0, dimensions.w, dimensions.h + 60.0))
} else {None}
}
// A method that returns the graphics blending mode of the automaton grid
fn blend_mode(&self) -> Option<graphics::BlendMode> {
Some(graphics::BlendMode::Add)
}
// A method that set the graphics blend mode of the automaton grid (currently does nothing)
fn set_blend_mode(&mut self, _: Option<graphics::BlendMode>) {}
// A method that renders the automaton grid and state and returns a GameResult
fn draw(&self, ctx: &mut ggez::Context, param: graphics::DrawParam) -> GameResult<()> {
// Create a new graphic mesh builder
let mut mb = graphics::MeshBuilder::new();
// Iterate through each cell in the grid
for (x, y, cell) in self.grid.clone() {
// Create the bounds of the cell
let cellbounds = graphics::Rect::new(
(x as f32) * self.cellsize,
(y as f32) * self.cellsize,
self.cellsize,
self.cellsize,
);
// Add the cell fill to the mesh builder
mb.rectangle(
graphics::DrawMode::Fill(graphics::FillOptions::default()),
cellbounds,
// Set the cell color based on cell state
match cell {
BinaryCell::Passive => [0.0, 0.0, 0.0, 1.0].into(),
BinaryCell::Active => [1.0, 1.0, 1.0, 1.0].into(),
},
)
// Add the cell boundary to the mesh builder
.rectangle(
graphics::DrawMode::Stroke(graphics::StrokeOptions::default()),
cellbounds,
[1.0, 1.0, 1.0, 0.25].into(),
);
}
// Build and Draw the mesh
mb.build(ctx)?.draw(ctx, param)?;
// Declare a variable for the font size
let font_size = 18.0;
// Create the text graphics for the banner
let mut name_text = graphics::Text::new(self.fullname());
let mut state_text = graphics::Text::new(self.state());
// Set the font styling for the text graphics
state_text.set_font(graphics::Font::default(), graphics::Scale::uniform(font_size));
name_text.set_font(graphics::Font::default(), graphics::Scale::uniform(font_size));
// Chekc the grid dimesions
if let Some(dimensions) = &self.grid.dimensions {
// Calculate the spacing between banner elements.
// Assumes 2 units of spacing above the name text and below the state text
// and 1 unit of spacing between the name and state text.
let spacing = (60.0 - (font_size * 2.0)) / 5.0;
// Calculate the position of the name text
let name_offset = dimensions.h + (spacing * 2.0);
// Calculate the position of the state text
let state_offset = dimensions.h + (spacing * 3.0) + font_size;
// Draw the banner text graphics
name_text.draw(ctx, (na::Point2::new(param.dest.x + 10.0, param.dest.y + name_offset),).into())?;
state_text.draw(ctx, (na::Point2::new(param.dest.x + 10.0, param.dest.y + state_offset),).into())?;
}
// Return GameResult::Ok
Ok(())
}
} | fullname | identifier_name |
instance.rs | // write the whole struct into place over the uninitialized page
ptr::write(&mut *handle, inst);
};
handle.reset()?;
Ok(handle)
}
pub fn instance_handle_to_raw(inst: InstanceHandle) -> *mut Instance {
let ptr = inst.inst.as_ptr();
std::mem::forget(inst);
ptr
}
pub unsafe fn instance_handle_from_raw(ptr: *mut Instance) -> InstanceHandle {
InstanceHandle {
inst: NonNull::new_unchecked(ptr),
}
}
// Safety argument for these deref impls: the instance's `Alloc` field contains an `Arc` to the
// region that backs this memory, keeping the page containing the `Instance` alive as long as the
// region exists
impl Deref for InstanceHandle {
type Target = Instance;
fn deref(&self) -> &Self::Target {
unsafe { self.inst.as_ref() }
}
}
impl DerefMut for InstanceHandle {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { self.inst.as_mut() }
}
}
impl Drop for InstanceHandle {
fn drop(&mut self) {
// eprintln!("InstanceHandle::drop()");
// zero out magic, then run the destructor by taking and dropping the inner `Instance`
self.magic = 0;
unsafe {
mem::replace(self.inst.as_mut(), mem::uninitialized());
}
}
}
/// A Lucet program, together with its dedicated memory and signal handlers.
///
/// This is the primary interface for running programs, examining return values, and accessing the
/// WebAssembly heap.
///
/// `Instance`s are never created by runtime users directly, but rather are acquired from
/// [`Region`](trait.Region.html)s and often accessed through
/// [`InstanceHandle`](struct.InstanceHandle.html) smart pointers. This guarantees that instances
/// and their fields are never moved in memory, otherwise raw pointers in the metadata could be
/// unsafely invalidated.
#[repr(C)]
pub struct Instance {
/// Used to catch bugs in pointer math used to find the address of the instance
magic: u64,
/// The embedding context is a map containing embedder-specific values that are used to
/// implement hostcalls
pub(crate) embed_ctx: CtxMap,
/// The program (WebAssembly module) that is the entrypoint for the instance.
module: Arc<dyn Module>,
/// The `Context` in which the guest program runs
ctx: Context,
/// Instance state and error information
pub(crate) state: State,
/// The memory allocated for this instance
alloc: Alloc,
/// Handler run for signals that do not arise from a known WebAssembly trap, or that involve
/// memory outside of the current instance.
fatal_handler: fn(&Instance) ->!,
/// A fatal handler set from C
c_fatal_handler: Option<unsafe extern "C" fn(*mut Instance)>,
/// Handler run when `SIGBUS`, `SIGFPE`, `SIGILL`, or `SIGSEGV` are caught by the instance thread.
signal_handler: Box<
dyn Fn(
&Instance,
&TrapCode,
libc::c_int,
*const siginfo_t,
*const c_void,
) -> SignalBehavior,
>,
/// Pointer to the function used as the entrypoint (for use in backtraces)
entrypoint: *const extern "C" fn(),
/// Padding to ensure the pointer to globals at the end of the page occupied by the `Instance`
_reserved: [u8; INSTANCE_PADDING],
/// Pointer to the globals
///
/// This is accessed through the `vmctx` pointer, which points to the heap that begins
/// immediately after this struct, so it has to come at the very end.
globals_ptr: *const i64,
}
/// APIs that are internal, but useful to implementors of extension modules; you probably don't want
/// this trait!
///
/// This is a trait rather than inherent `impl`s in order to keep the `lucet-runtime` API clean and
/// safe.
pub trait InstanceInternal {
fn alloc(&self) -> &Alloc;
fn alloc_mut(&mut self) -> &mut Alloc;
fn module(&self) -> &dyn Module;
fn state(&self) -> &State;
fn valid_magic(&self) -> bool;
}
impl InstanceInternal for Instance {
/// Get a reference to the instance's `Alloc`.
fn alloc(&self) -> &Alloc {
&self.alloc
}
/// Get a mutable reference to the instance's `Alloc`.
fn alloc_mut(&mut self) -> &mut Alloc {
&mut self.alloc
}
/// Get a reference to the instance's `Module`.
fn module(&self) -> &dyn Module {
self.module.deref()
}
/// Get a reference to the instance's `State`.
fn state(&self) -> &State {
&self.state
}
/// Check whether the instance magic is valid.
fn valid_magic(&self) -> bool {
self.magic == LUCET_INSTANCE_MAGIC
}
}
// Public API
impl Instance {
/// Run a function with arguments in the guest context at the given entrypoint.
///
/// ```no_run
/// # use lucet_runtime_internals::instance::InstanceHandle;
/// # let instance: InstanceHandle = unimplemented!();
/// // regular execution yields `Ok(UntypedRetVal)`
/// let retval = instance.run(b"factorial", &[5u64.into()]).unwrap();
/// assert_eq!(u64::from(retval), 120u64);
///
/// // runtime faults yield `Err(Error)`
/// let result = instance.run(b"faulting_function", &[]);
/// assert!(result.is_err());
/// ```
///
/// # Safety
///
/// This is unsafe in two ways:
///
/// - The type of the entrypoint might not be correct. It might take a different number or
/// different types of arguments than are provided to `args`. It might not even point to a
/// function! We will likely add type information to `lucetc` output so we can dynamically check
/// the type in the future.
///
/// - The entrypoint is foreign code. While we may be convinced that WebAssembly compiled to
/// native code by `lucetc` is safe, we do not have the same guarantee for the hostcalls that a
/// guest may invoke. They might be implemented in an unsafe language, so we must treat this
/// call as unsafe, just like any other FFI call.
///
/// For the moment, we do not mark this as `unsafe` in the Rust type system, but that may change
/// in the future.
pub fn run(&mut self, entrypoint: &[u8], args: &[Val]) -> Result<UntypedRetVal, Error> {
let func = self.module.get_export_func(entrypoint)?;
self.run_func(func, &args)
}
/// Run a function with arguments in the guest context from the [WebAssembly function
/// table](https://webassembly.github.io/spec/core/syntax/modules.html#tables).
///
/// The same safety caveats of [`Instance::run()`](struct.Instance.html#method.run) apply.
pub fn run_func_idx(
&mut self,
table_idx: u32,
func_idx: u32,
args: &[Val],
) -> Result<UntypedRetVal, Error> {
let func = self.module.get_func_from_idx(table_idx, func_idx)?;
self.run_func(func, &args)
}
/// Reset the instance's heap and global variables to their initial state.
///
/// The WebAssembly `start` section will also be run, if one exists.
///
/// The embedder contexts present at instance creation or added with
/// [`Instance::insert_embed_ctx()`](struct.Instance.html#method.insert_embed_ctx) are not
/// modified by this call; it is the embedder's responsibility to clear or reset their state if
/// necessary.
///
/// # Safety
///
/// This function runs the guest code for the WebAssembly `start` section, and running any guest
/// code is potentially unsafe; see [`Instance::run()`](struct.Instance.html#method.run).
pub fn reset(&mut self) -> Result<(), Error> {
self.alloc.reset_heap(self.module.as_ref())?;
let globals = unsafe { self.alloc.globals_mut() };
let mod_globals = self.module.globals();
for (i, v) in mod_globals.iter().enumerate() {
globals[i] = match v.global() {
Global::Import {.. } => {
return Err(Error::Unsupported(format!(
"global imports are unsupported; found: {:?}",
i
)));
}
Global::Def { def } => def.init_val(),
};
}
self.state = State::Ready {
retval: UntypedRetVal::default(),
};
self.run_start()?;
Ok(())
}
/// Grow the guest memory by the given number of WebAssembly pages.
///
/// On success, returns the number of pages that existed before the call.
pub fn grow_memory(&mut self, additional_pages: u32) -> Result<u32, Error> {
let orig_len = self
.alloc
.expand_heap(additional_pages * WASM_PAGE_SIZE, self.module.as_ref())?;
Ok(orig_len / WASM_PAGE_SIZE)
}
/// Return the WebAssembly heap as a slice of bytes.
pub fn heap(&self) -> &[u8] {
unsafe { self.alloc.heap() }
}
/// Return the WebAssembly heap as a mutable slice of bytes.
pub fn heap_mut(&mut self) -> &mut [u8] {
unsafe { self.alloc.heap_mut() }
}
/// Return the WebAssembly heap as a slice of `u32`s.
pub fn heap_u32(&self) -> &[u32] {
unsafe { self.alloc.heap_u32() }
}
/// Return the WebAssembly heap as a mutable slice of `u32`s.
pub fn heap_u32_mut(&mut self) -> &mut [u32] {
unsafe { self.alloc.heap_u32_mut() }
}
/// Return the WebAssembly globals as a slice of `i64`s.
pub fn globals(&self) -> &[i64] {
unsafe { self.alloc.globals() }
}
/// Return the WebAssembly globals as a mutable slice of `i64`s.
pub fn globals_mut(&mut self) -> &mut [i64] {
unsafe { self.alloc.globals_mut() }
}
/// Check whether a given range in the host address space overlaps with the memory that backs
/// the instance heap.
pub fn check_heap<T>(&self, ptr: *const T, len: usize) -> bool {
self.alloc.mem_in_heap(ptr, len)
}
/// Check whether a context value of a particular type exists.
pub fn contains_embed_ctx<T: Any>(&self) -> bool {
self.embed_ctx.contains::<T>()
}
/// Get a reference to a context value of a particular type, if it exists.
pub fn get_embed_ctx<T: Any>(&self) -> Option<&T> {
self.embed_ctx.get::<T>()
}
/// Get a mutable reference to a context value of a particular type, if it exists.
pub fn get_embed_ctx_mut<T: Any>(&mut self) -> Option<&mut T> {
self.embed_ctx.get_mut::<T>()
}
/// Insert a context value.
///
/// If a context value of the same type already existed, it is returned.
///
/// **Note**: this method is intended for embedder contexts that need to be added _after_ an
/// instance is created and initialized. To add a context for an instance's entire lifetime,
/// including the execution of its `start` section, see
/// [`Region::new_instance_builder()`](trait.Region.html#method.new_instance_builder).
pub fn insert_embed_ctx<T: Any>(&mut self, x: T) -> Option<T> {
self.embed_ctx.insert(x)
}
/// Remove a context value of a particular type, returning it if it exists.
pub fn remove_embed_ctx<T: Any>(&mut self) -> Option<T> {
self.embed_ctx.remove::<T>()
}
/// Set the handler run when `SIGBUS`, `SIGFPE`, `SIGILL`, or `SIGSEGV` are caught by the
/// instance thread.
///
/// In most cases, these signals are unrecoverable for the instance that raised them, but do not
/// affect the rest of the process.
///
/// The default signal handler returns
/// [`SignalBehavior::Default`](enum.SignalBehavior.html#variant.Default), which yields a
/// runtime fault error.
///
/// The signal handler must be
/// [signal-safe](http://man7.org/linux/man-pages/man7/signal-safety.7.html).
pub fn set_signal_handler<H>(&mut self, handler: H)
where
H:'static
+ Fn(&Instance, &TrapCode, libc::c_int, *const siginfo_t, *const c_void) -> SignalBehavior,
{
self.signal_handler = Box::new(handler) as Box<SignalHandler>;
}
/// Set the handler run for signals that do not arise from a known WebAssembly trap, or that
/// involve memory outside of the current instance.
///
/// Fatal signals are not only unrecoverable for the instance that raised them, but may
/// compromise the correctness of the rest of the process if unhandled.
///
/// The default fatal handler calls `panic!()`.
pub fn set_fatal_handler(&mut self, handler: fn(&Instance) ->!) {
self.fatal_handler = handler;
}
/// Set the fatal handler to a C-compatible function.
///
/// This is a separate interface, because C functions can't return the `!` type. Like the
/// regular `fatal_handler`, it is not expected to return, but we cannot enforce that through
/// types.
///
/// When a fatal error occurs, this handler is run first, and then the regular `fatal_handler`
/// runs in case it returns.
pub fn set_c_fatal_handler(&mut self, handler: unsafe extern "C" fn(*mut Instance)) {
self.c_fatal_handler = Some(handler);
}
}
// Private API
impl Instance {
fn new(alloc: Alloc, module: Arc<dyn Module>, embed_ctx: CtxMap) -> Self {
let globals_ptr = alloc.slot().globals as *mut i64;
Instance {
magic: LUCET_INSTANCE_MAGIC,
embed_ctx: embed_ctx,
module,
ctx: Context::new(),
state: State::Ready {
retval: UntypedRetVal::default(),
},
alloc,
fatal_handler: default_fatal_handler,
c_fatal_handler: None,
signal_handler: Box::new(signal_handler_none) as Box<SignalHandler>,
entrypoint: ptr::null(),
_reserved: [0; INSTANCE_PADDING],
globals_ptr,
}
}
/// Run a function in guest context at the given entrypoint.
fn run_func(
&mut self,
func: *const extern "C" fn(),
args: &[Val],
) -> Result<UntypedRetVal, Error> {
lucet_ensure!(
self.state.is_ready(),
"instance must be ready; this is a bug"
);
if func.is_null() {
return Err(Error::InvalidArgument(
"entrypoint function cannot be null; this is probably a malformed module",
));
}
self.entrypoint = func;
let mut args_with_vmctx = vec![Val::from(self.alloc.slot().heap)];
args_with_vmctx.extend_from_slice(args);
HOST_CTX.with(|host_ctx| {
Context::init(
unsafe { self.alloc.stack_u64_mut() },
unsafe { &mut *host_ctx.get() },
&mut self.ctx,
func,
&args_with_vmctx,
)
})?;
self.state = State::Running;
// there should never be another instance running on this thread when we enter this function
CURRENT_INSTANCE.with(|current_instance| {
let mut current_instance = current_instance.borrow_mut();
assert!(
current_instance.is_none(),
"no other instance is running on this thread"
);
// safety: `self` is not null if we are in this function
*current_instance = Some(unsafe { NonNull::new_unchecked(self) });
});
self.with_signals_on(|i| {
HOST_CTX.with(|host_ctx| {
// Save the current context into `host_ctx`, and jump to the guest context. The
// lucet context is linked to host_ctx, so it will return here after it finishes,
// successfully or otherwise.
unsafe { Context::swap(&mut *host_ctx.get(), &mut i.ctx) };
Ok(())
})
})?;
CURRENT_INSTANCE.with(|current_instance| {
*current_instance.borrow_mut() = None;
});
// Sandbox has jumped back to the host process, indicating it has either:
//
// * trapped, or called hostcall_error: state tag changed to something other than `Running`
// * function body returned: set state back to `Ready` with return value
match &self.state {
State::Running => {
let retval = self.ctx.get_untyped_retval();
self.state = State::Ready { retval };
Ok(retval)
}
State::Terminated { details,.. } => Err(Error::RuntimeTerminated(details.clone())),
State::Fault {.. } => {
// Sandbox is no longer runnable. It's unsafe to determine all error details in the signal
// handler, so we fill in extra details here.
self.populate_fault_detail()?;
if let State::Fault { ref details,.. } = self.state {
if details.fatal {
// Some errors indicate that the guest is not functioning correctly or that
// the loaded code violated some assumption, so bail out via the fatal
// handler.
// Run the C-style fatal handler, if it exists.
self.c_fatal_handler
.map(|h| unsafe { h(self as *mut Instance) });
// If there is no C-style fatal handler, or if it (erroneously) returns,
// call the Rust handler that we know will not return
(self.fatal_handler)(self)
} else {
// leave the full fault details in the instance state, and return the
// higher-level info to the user
Err(Error::RuntimeFault(details.clone()))
}
} else {
panic!("state remains Fault after populate_fault_detail()")
}
}
State::Ready {.. } => {
panic!("instance in Ready state after returning from guest context")
}
}
}
fn run_start(&mut self) -> Result<(), Error> {
if let Some(start) = self.module.get_start_func()? {
self.run_func(start, &[])?;
}
Ok(())
}
fn populate_fault_detail(&mut self) -> Result<(), Error> {
if let State::Fault {
details:
FaultDetails {
rip_addr,
trapcode,
ref mut fatal,
ref mut rip_addr_details,
..
},
siginfo,
..
} = self.state
{
// We do this after returning from the signal handler because it requires `dladdr`
// calls, which are not signal safe
*rip_addr_details = self.module.addr_details(rip_addr as *const c_void)?.clone();
// If the trap table lookup returned unknown, it is a fatal error
let unknown_fault = trapcode.ty == TrapCodeType::Unknown;
// If the trap was a segv or bus fault and the addressed memory was outside the
// guard pages, it is also a fatal error
let outside_guard = (siginfo.si_signo == SIGSEGV || siginfo.si_signo == SIGBUS)
&&!self.alloc.addr_in_heap_guard(siginfo.si_addr());
*fatal = unknown_fault || outside_guard;
}
Ok(())
}
}
pub enum State {
Ready {
retval: UntypedRetVal,
},
Running,
Fault {
details: FaultDetails,
siginfo: libc::siginfo_t,
context: libc::ucontext_t,
},
Terminated {
details: TerminationDetails,
},
}
/// Information about a runtime fault.
///
/// Runtime faults are raised implictly by signal handlers that return `SignalBehavior::Default` in
/// response to signals arising while a guest is running.
#[derive(Clone, Debug)]
pub struct | {
/// If true, the instance's `fatal_handler` will be called.
pub fatal: bool,
/// Information about the type of fault that occurred.
pub trapcode: TrapCode,
/// The instruction pointer where the fault occurred.
pub rip_addr: uintptr_t,
/// Extra information about the instruction pointer's location, if available.
pub rip_addr_details: Option<module::AddrDetails>,
}
impl std::fmt::Display for FaultDetails {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
if self.fatal {
write!(f, "fault FATAL ")?;
} else {
write!(f, "fault ")?;
}
self.trapcode.fmt(f)?;
write!(f, "code at address {:p}", self.rip_addr as *const c_void)?;
if let Some(ref addr_details) = self.rip_addr_details {
if let Some(ref fname) = addr_details.file_name {
| FaultDetails | identifier_name |
instance.rs | // write the whole struct into place over the uninitialized page
ptr::write(&mut *handle, inst);
};
handle.reset()?;
Ok(handle)
}
pub fn instance_handle_to_raw(inst: InstanceHandle) -> *mut Instance {
let ptr = inst.inst.as_ptr();
std::mem::forget(inst);
ptr
}
pub unsafe fn instance_handle_from_raw(ptr: *mut Instance) -> InstanceHandle {
InstanceHandle {
inst: NonNull::new_unchecked(ptr),
}
}
// Safety argument for these deref impls: the instance's `Alloc` field contains an `Arc` to the
// region that backs this memory, keeping the page containing the `Instance` alive as long as the
// region exists
impl Deref for InstanceHandle {
type Target = Instance;
fn deref(&self) -> &Self::Target {
unsafe { self.inst.as_ref() }
}
}
impl DerefMut for InstanceHandle {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { self.inst.as_mut() }
}
}
impl Drop for InstanceHandle {
fn drop(&mut self) {
// eprintln!("InstanceHandle::drop()");
// zero out magic, then run the destructor by taking and dropping the inner `Instance`
self.magic = 0;
unsafe {
mem::replace(self.inst.as_mut(), mem::uninitialized());
}
}
}
/// A Lucet program, together with its dedicated memory and signal handlers.
///
/// This is the primary interface for running programs, examining return values, and accessing the
/// WebAssembly heap.
///
/// `Instance`s are never created by runtime users directly, but rather are acquired from
/// [`Region`](trait.Region.html)s and often accessed through
/// [`InstanceHandle`](struct.InstanceHandle.html) smart pointers. This guarantees that instances
/// and their fields are never moved in memory, otherwise raw pointers in the metadata could be
/// unsafely invalidated.
#[repr(C)]
pub struct Instance {
/// Used to catch bugs in pointer math used to find the address of the instance
magic: u64,
/// The embedding context is a map containing embedder-specific values that are used to
/// implement hostcalls
pub(crate) embed_ctx: CtxMap,
/// The program (WebAssembly module) that is the entrypoint for the instance.
module: Arc<dyn Module>,
/// The `Context` in which the guest program runs
ctx: Context,
/// Instance state and error information
pub(crate) state: State,
/// The memory allocated for this instance
alloc: Alloc,
/// Handler run for signals that do not arise from a known WebAssembly trap, or that involve
/// memory outside of the current instance.
fatal_handler: fn(&Instance) ->!,
/// A fatal handler set from C
c_fatal_handler: Option<unsafe extern "C" fn(*mut Instance)>,
/// Handler run when `SIGBUS`, `SIGFPE`, `SIGILL`, or `SIGSEGV` are caught by the instance thread.
signal_handler: Box<
dyn Fn(
&Instance,
&TrapCode,
libc::c_int,
*const siginfo_t,
*const c_void,
) -> SignalBehavior,
>,
/// Pointer to the function used as the entrypoint (for use in backtraces)
entrypoint: *const extern "C" fn(),
/// Padding to ensure the pointer to globals at the end of the page occupied by the `Instance`
_reserved: [u8; INSTANCE_PADDING],
/// Pointer to the globals
///
/// This is accessed through the `vmctx` pointer, which points to the heap that begins
/// immediately after this struct, so it has to come at the very end.
globals_ptr: *const i64,
}
/// APIs that are internal, but useful to implementors of extension modules; you probably don't want
/// this trait!
///
/// This is a trait rather than inherent `impl`s in order to keep the `lucet-runtime` API clean and
/// safe.
pub trait InstanceInternal {
fn alloc(&self) -> &Alloc;
fn alloc_mut(&mut self) -> &mut Alloc;
fn module(&self) -> &dyn Module;
fn state(&self) -> &State;
fn valid_magic(&self) -> bool;
}
impl InstanceInternal for Instance {
/// Get a reference to the instance's `Alloc`.
fn alloc(&self) -> &Alloc {
&self.alloc
}
/// Get a mutable reference to the instance's `Alloc`.
fn alloc_mut(&mut self) -> &mut Alloc {
&mut self.alloc
}
/// Get a reference to the instance's `Module`.
fn module(&self) -> &dyn Module {
self.module.deref()
}
/// Get a reference to the instance's `State`.
fn state(&self) -> &State {
&self.state
}
/// Check whether the instance magic is valid.
fn valid_magic(&self) -> bool {
self.magic == LUCET_INSTANCE_MAGIC
}
}
// Public API
impl Instance {
/// Run a function with arguments in the guest context at the given entrypoint.
///
/// ```no_run
/// # use lucet_runtime_internals::instance::InstanceHandle;
/// # let instance: InstanceHandle = unimplemented!();
/// // regular execution yields `Ok(UntypedRetVal)`
/// let retval = instance.run(b"factorial", &[5u64.into()]).unwrap();
/// assert_eq!(u64::from(retval), 120u64);
///
/// // runtime faults yield `Err(Error)`
/// let result = instance.run(b"faulting_function", &[]);
/// assert!(result.is_err());
/// ```
///
/// # Safety
///
/// This is unsafe in two ways:
///
/// - The type of the entrypoint might not be correct. It might take a different number or
/// different types of arguments than are provided to `args`. It might not even point to a
/// function! We will likely add type information to `lucetc` output so we can dynamically check
/// the type in the future.
///
/// - The entrypoint is foreign code. While we may be convinced that WebAssembly compiled to
/// native code by `lucetc` is safe, we do not have the same guarantee for the hostcalls that a
/// guest may invoke. They might be implemented in an unsafe language, so we must treat this
/// call as unsafe, just like any other FFI call.
///
/// For the moment, we do not mark this as `unsafe` in the Rust type system, but that may change
/// in the future.
pub fn run(&mut self, entrypoint: &[u8], args: &[Val]) -> Result<UntypedRetVal, Error> {
let func = self.module.get_export_func(entrypoint)?;
self.run_func(func, &args)
}
/// Run a function with arguments in the guest context from the [WebAssembly function
/// table](https://webassembly.github.io/spec/core/syntax/modules.html#tables).
///
/// The same safety caveats of [`Instance::run()`](struct.Instance.html#method.run) apply.
pub fn run_func_idx(
&mut self,
table_idx: u32,
func_idx: u32,
args: &[Val],
) -> Result<UntypedRetVal, Error> {
let func = self.module.get_func_from_idx(table_idx, func_idx)?;
self.run_func(func, &args)
}
/// Reset the instance's heap and global variables to their initial state.
///
/// The WebAssembly `start` section will also be run, if one exists.
///
/// The embedder contexts present at instance creation or added with
/// [`Instance::insert_embed_ctx()`](struct.Instance.html#method.insert_embed_ctx) are not
/// modified by this call; it is the embedder's responsibility to clear or reset their state if
/// necessary.
///
/// # Safety
///
/// This function runs the guest code for the WebAssembly `start` section, and running any guest
/// code is potentially unsafe; see [`Instance::run()`](struct.Instance.html#method.run).
pub fn reset(&mut self) -> Result<(), Error> {
self.alloc.reset_heap(self.module.as_ref())?;
let globals = unsafe { self.alloc.globals_mut() };
let mod_globals = self.module.globals();
for (i, v) in mod_globals.iter().enumerate() {
globals[i] = match v.global() {
Global::Import {.. } => {
return Err(Error::Unsupported(format!(
"global imports are unsupported; found: {:?}",
i
)));
}
Global::Def { def } => def.init_val(),
};
}
self.state = State::Ready {
retval: UntypedRetVal::default(),
};
self.run_start()?;
Ok(())
}
/// Grow the guest memory by the given number of WebAssembly pages.
///
/// On success, returns the number of pages that existed before the call.
pub fn grow_memory(&mut self, additional_pages: u32) -> Result<u32, Error> {
let orig_len = self
.alloc
.expand_heap(additional_pages * WASM_PAGE_SIZE, self.module.as_ref())?;
Ok(orig_len / WASM_PAGE_SIZE)
}
/// Return the WebAssembly heap as a slice of bytes.
pub fn heap(&self) -> &[u8] {
unsafe { self.alloc.heap() }
}
/// Return the WebAssembly heap as a mutable slice of bytes.
pub fn heap_mut(&mut self) -> &mut [u8] {
unsafe { self.alloc.heap_mut() }
}
/// Return the WebAssembly heap as a slice of `u32`s.
pub fn heap_u32(&self) -> &[u32] {
unsafe { self.alloc.heap_u32() }
}
/// Return the WebAssembly heap as a mutable slice of `u32`s.
pub fn heap_u32_mut(&mut self) -> &mut [u32] {
unsafe { self.alloc.heap_u32_mut() }
}
/// Return the WebAssembly globals as a slice of `i64`s. | pub fn globals(&self) -> &[i64] {
unsafe { self.alloc.globals() }
}
/// Return the WebAssembly globals as a mutable slice of `i64`s.
pub fn globals_mut(&mut self) -> &mut [i64] {
unsafe { self.alloc.globals_mut() }
}
/// Check whether a given range in the host address space overlaps with the memory that backs
/// the instance heap.
pub fn check_heap<T>(&self, ptr: *const T, len: usize) -> bool {
self.alloc.mem_in_heap(ptr, len)
}
/// Check whether a context value of a particular type exists.
pub fn contains_embed_ctx<T: Any>(&self) -> bool {
self.embed_ctx.contains::<T>()
}
/// Get a reference to a context value of a particular type, if it exists.
pub fn get_embed_ctx<T: Any>(&self) -> Option<&T> {
self.embed_ctx.get::<T>()
}
/// Get a mutable reference to a context value of a particular type, if it exists.
pub fn get_embed_ctx_mut<T: Any>(&mut self) -> Option<&mut T> {
self.embed_ctx.get_mut::<T>()
}
/// Insert a context value.
///
/// If a context value of the same type already existed, it is returned.
///
/// **Note**: this method is intended for embedder contexts that need to be added _after_ an
/// instance is created and initialized. To add a context for an instance's entire lifetime,
/// including the execution of its `start` section, see
/// [`Region::new_instance_builder()`](trait.Region.html#method.new_instance_builder).
pub fn insert_embed_ctx<T: Any>(&mut self, x: T) -> Option<T> {
self.embed_ctx.insert(x)
}
/// Remove a context value of a particular type, returning it if it exists.
pub fn remove_embed_ctx<T: Any>(&mut self) -> Option<T> {
self.embed_ctx.remove::<T>()
}
/// Set the handler run when `SIGBUS`, `SIGFPE`, `SIGILL`, or `SIGSEGV` are caught by the
/// instance thread.
///
/// In most cases, these signals are unrecoverable for the instance that raised them, but do not
/// affect the rest of the process.
///
/// The default signal handler returns
/// [`SignalBehavior::Default`](enum.SignalBehavior.html#variant.Default), which yields a
/// runtime fault error.
///
/// The signal handler must be
/// [signal-safe](http://man7.org/linux/man-pages/man7/signal-safety.7.html).
pub fn set_signal_handler<H>(&mut self, handler: H)
where
H:'static
+ Fn(&Instance, &TrapCode, libc::c_int, *const siginfo_t, *const c_void) -> SignalBehavior,
{
self.signal_handler = Box::new(handler) as Box<SignalHandler>;
}
/// Set the handler run for signals that do not arise from a known WebAssembly trap, or that
/// involve memory outside of the current instance.
///
/// Fatal signals are not only unrecoverable for the instance that raised them, but may
/// compromise the correctness of the rest of the process if unhandled.
///
/// The default fatal handler calls `panic!()`.
pub fn set_fatal_handler(&mut self, handler: fn(&Instance) ->!) {
self.fatal_handler = handler;
}
/// Set the fatal handler to a C-compatible function.
///
/// This is a separate interface, because C functions can't return the `!` type. Like the
/// regular `fatal_handler`, it is not expected to return, but we cannot enforce that through
/// types.
///
/// When a fatal error occurs, this handler is run first, and then the regular `fatal_handler`
/// runs in case it returns.
pub fn set_c_fatal_handler(&mut self, handler: unsafe extern "C" fn(*mut Instance)) {
self.c_fatal_handler = Some(handler);
}
}
// Private API
impl Instance {
fn new(alloc: Alloc, module: Arc<dyn Module>, embed_ctx: CtxMap) -> Self {
let globals_ptr = alloc.slot().globals as *mut i64;
Instance {
magic: LUCET_INSTANCE_MAGIC,
embed_ctx: embed_ctx,
module,
ctx: Context::new(),
state: State::Ready {
retval: UntypedRetVal::default(),
},
alloc,
fatal_handler: default_fatal_handler,
c_fatal_handler: None,
signal_handler: Box::new(signal_handler_none) as Box<SignalHandler>,
entrypoint: ptr::null(),
_reserved: [0; INSTANCE_PADDING],
globals_ptr,
}
}
/// Run a function in guest context at the given entrypoint.
fn run_func(
&mut self,
func: *const extern "C" fn(),
args: &[Val],
) -> Result<UntypedRetVal, Error> {
lucet_ensure!(
self.state.is_ready(),
"instance must be ready; this is a bug"
);
if func.is_null() {
return Err(Error::InvalidArgument(
"entrypoint function cannot be null; this is probably a malformed module",
));
}
self.entrypoint = func;
let mut args_with_vmctx = vec![Val::from(self.alloc.slot().heap)];
args_with_vmctx.extend_from_slice(args);
HOST_CTX.with(|host_ctx| {
Context::init(
unsafe { self.alloc.stack_u64_mut() },
unsafe { &mut *host_ctx.get() },
&mut self.ctx,
func,
&args_with_vmctx,
)
})?;
self.state = State::Running;
// there should never be another instance running on this thread when we enter this function
CURRENT_INSTANCE.with(|current_instance| {
let mut current_instance = current_instance.borrow_mut();
assert!(
current_instance.is_none(),
"no other instance is running on this thread"
);
// safety: `self` is not null if we are in this function
*current_instance = Some(unsafe { NonNull::new_unchecked(self) });
});
self.with_signals_on(|i| {
HOST_CTX.with(|host_ctx| {
// Save the current context into `host_ctx`, and jump to the guest context. The
// lucet context is linked to host_ctx, so it will return here after it finishes,
// successfully or otherwise.
unsafe { Context::swap(&mut *host_ctx.get(), &mut i.ctx) };
Ok(())
})
})?;
CURRENT_INSTANCE.with(|current_instance| {
*current_instance.borrow_mut() = None;
});
// Sandbox has jumped back to the host process, indicating it has either:
//
// * trapped, or called hostcall_error: state tag changed to something other than `Running`
// * function body returned: set state back to `Ready` with return value
match &self.state {
State::Running => {
let retval = self.ctx.get_untyped_retval();
self.state = State::Ready { retval };
Ok(retval)
}
State::Terminated { details,.. } => Err(Error::RuntimeTerminated(details.clone())),
State::Fault {.. } => {
// Sandbox is no longer runnable. It's unsafe to determine all error details in the signal
// handler, so we fill in extra details here.
self.populate_fault_detail()?;
if let State::Fault { ref details,.. } = self.state {
if details.fatal {
// Some errors indicate that the guest is not functioning correctly or that
// the loaded code violated some assumption, so bail out via the fatal
// handler.
// Run the C-style fatal handler, if it exists.
self.c_fatal_handler
.map(|h| unsafe { h(self as *mut Instance) });
// If there is no C-style fatal handler, or if it (erroneously) returns,
// call the Rust handler that we know will not return
(self.fatal_handler)(self)
} else {
// leave the full fault details in the instance state, and return the
// higher-level info to the user
Err(Error::RuntimeFault(details.clone()))
}
} else {
panic!("state remains Fault after populate_fault_detail()")
}
}
State::Ready {.. } => {
panic!("instance in Ready state after returning from guest context")
}
}
}
fn run_start(&mut self) -> Result<(), Error> {
if let Some(start) = self.module.get_start_func()? {
self.run_func(start, &[])?;
}
Ok(())
}
fn populate_fault_detail(&mut self) -> Result<(), Error> {
if let State::Fault {
details:
FaultDetails {
rip_addr,
trapcode,
ref mut fatal,
ref mut rip_addr_details,
..
},
siginfo,
..
} = self.state
{
// We do this after returning from the signal handler because it requires `dladdr`
// calls, which are not signal safe
*rip_addr_details = self.module.addr_details(rip_addr as *const c_void)?.clone();
// If the trap table lookup returned unknown, it is a fatal error
let unknown_fault = trapcode.ty == TrapCodeType::Unknown;
// If the trap was a segv or bus fault and the addressed memory was outside the
// guard pages, it is also a fatal error
let outside_guard = (siginfo.si_signo == SIGSEGV || siginfo.si_signo == SIGBUS)
&&!self.alloc.addr_in_heap_guard(siginfo.si_addr());
*fatal = unknown_fault || outside_guard;
}
Ok(())
}
}
pub enum State {
Ready {
retval: UntypedRetVal,
},
Running,
Fault {
details: FaultDetails,
siginfo: libc::siginfo_t,
context: libc::ucontext_t,
},
Terminated {
details: TerminationDetails,
},
}
/// Information about a runtime fault.
///
/// Runtime faults are raised implictly by signal handlers that return `SignalBehavior::Default` in
/// response to signals arising while a guest is running.
#[derive(Clone, Debug)]
pub struct FaultDetails {
/// If true, the instance's `fatal_handler` will be called.
pub fatal: bool,
/// Information about the type of fault that occurred.
pub trapcode: TrapCode,
/// The instruction pointer where the fault occurred.
pub rip_addr: uintptr_t,
/// Extra information about the instruction pointer's location, if available.
pub rip_addr_details: Option<module::AddrDetails>,
}
impl std::fmt::Display for FaultDetails {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
if self.fatal {
write!(f, "fault FATAL ")?;
} else {
write!(f, "fault ")?;
}
self.trapcode.fmt(f)?;
write!(f, "code at address {:p}", self.rip_addr as *const c_void)?;
if let Some(ref addr_details) = self.rip_addr_details {
if let Some(ref fname) = addr_details.file_name {
| random_line_split |
|
instance.rs | // write the whole struct into place over the uninitialized page
ptr::write(&mut *handle, inst);
};
handle.reset()?;
Ok(handle)
}
pub fn instance_handle_to_raw(inst: InstanceHandle) -> *mut Instance {
let ptr = inst.inst.as_ptr();
std::mem::forget(inst);
ptr
}
pub unsafe fn instance_handle_from_raw(ptr: *mut Instance) -> InstanceHandle {
InstanceHandle {
inst: NonNull::new_unchecked(ptr),
}
}
// Safety argument for these deref impls: the instance's `Alloc` field contains an `Arc` to the
// region that backs this memory, keeping the page containing the `Instance` alive as long as the
// region exists
impl Deref for InstanceHandle {
type Target = Instance;
fn deref(&self) -> &Self::Target {
unsafe { self.inst.as_ref() }
}
}
impl DerefMut for InstanceHandle {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { self.inst.as_mut() }
}
}
impl Drop for InstanceHandle {
fn drop(&mut self) {
// eprintln!("InstanceHandle::drop()");
// zero out magic, then run the destructor by taking and dropping the inner `Instance`
self.magic = 0;
unsafe {
mem::replace(self.inst.as_mut(), mem::uninitialized());
}
}
}
/// A Lucet program, together with its dedicated memory and signal handlers.
///
/// This is the primary interface for running programs, examining return values, and accessing the
/// WebAssembly heap.
///
/// `Instance`s are never created by runtime users directly, but rather are acquired from
/// [`Region`](trait.Region.html)s and often accessed through
/// [`InstanceHandle`](struct.InstanceHandle.html) smart pointers. This guarantees that instances
/// and their fields are never moved in memory, otherwise raw pointers in the metadata could be
/// unsafely invalidated.
#[repr(C)]
pub struct Instance {
/// Used to catch bugs in pointer math used to find the address of the instance
magic: u64,
/// The embedding context is a map containing embedder-specific values that are used to
/// implement hostcalls
pub(crate) embed_ctx: CtxMap,
/// The program (WebAssembly module) that is the entrypoint for the instance.
module: Arc<dyn Module>,
/// The `Context` in which the guest program runs
ctx: Context,
/// Instance state and error information
pub(crate) state: State,
/// The memory allocated for this instance
alloc: Alloc,
/// Handler run for signals that do not arise from a known WebAssembly trap, or that involve
/// memory outside of the current instance.
fatal_handler: fn(&Instance) ->!,
/// A fatal handler set from C
c_fatal_handler: Option<unsafe extern "C" fn(*mut Instance)>,
/// Handler run when `SIGBUS`, `SIGFPE`, `SIGILL`, or `SIGSEGV` are caught by the instance thread.
signal_handler: Box<
dyn Fn(
&Instance,
&TrapCode,
libc::c_int,
*const siginfo_t,
*const c_void,
) -> SignalBehavior,
>,
/// Pointer to the function used as the entrypoint (for use in backtraces)
entrypoint: *const extern "C" fn(),
/// Padding to ensure the pointer to globals at the end of the page occupied by the `Instance`
_reserved: [u8; INSTANCE_PADDING],
/// Pointer to the globals
///
/// This is accessed through the `vmctx` pointer, which points to the heap that begins
/// immediately after this struct, so it has to come at the very end.
globals_ptr: *const i64,
}
/// APIs that are internal, but useful to implementors of extension modules; you probably don't want
/// this trait!
///
/// This is a trait rather than inherent `impl`s in order to keep the `lucet-runtime` API clean and
/// safe.
pub trait InstanceInternal {
fn alloc(&self) -> &Alloc;
fn alloc_mut(&mut self) -> &mut Alloc;
fn module(&self) -> &dyn Module;
fn state(&self) -> &State;
fn valid_magic(&self) -> bool;
}
impl InstanceInternal for Instance {
/// Get a reference to the instance's `Alloc`.
fn alloc(&self) -> &Alloc {
&self.alloc
}
/// Get a mutable reference to the instance's `Alloc`.
fn alloc_mut(&mut self) -> &mut Alloc {
&mut self.alloc
}
/// Get a reference to the instance's `Module`.
fn module(&self) -> &dyn Module {
self.module.deref()
}
/// Get a reference to the instance's `State`.
fn state(&self) -> &State {
&self.state
}
/// Check whether the instance magic is valid.
fn valid_magic(&self) -> bool {
self.magic == LUCET_INSTANCE_MAGIC
}
}
// Public API
impl Instance {
/// Run a function with arguments in the guest context at the given entrypoint.
///
/// ```no_run
/// # use lucet_runtime_internals::instance::InstanceHandle;
/// # let instance: InstanceHandle = unimplemented!();
/// // regular execution yields `Ok(UntypedRetVal)`
/// let retval = instance.run(b"factorial", &[5u64.into()]).unwrap();
/// assert_eq!(u64::from(retval), 120u64);
///
/// // runtime faults yield `Err(Error)`
/// let result = instance.run(b"faulting_function", &[]);
/// assert!(result.is_err());
/// ```
///
/// # Safety
///
/// This is unsafe in two ways:
///
/// - The type of the entrypoint might not be correct. It might take a different number or
/// different types of arguments than are provided to `args`. It might not even point to a
/// function! We will likely add type information to `lucetc` output so we can dynamically check
/// the type in the future.
///
/// - The entrypoint is foreign code. While we may be convinced that WebAssembly compiled to
/// native code by `lucetc` is safe, we do not have the same guarantee for the hostcalls that a
/// guest may invoke. They might be implemented in an unsafe language, so we must treat this
/// call as unsafe, just like any other FFI call.
///
/// For the moment, we do not mark this as `unsafe` in the Rust type system, but that may change
/// in the future.
pub fn run(&mut self, entrypoint: &[u8], args: &[Val]) -> Result<UntypedRetVal, Error> {
let func = self.module.get_export_func(entrypoint)?;
self.run_func(func, &args)
}
/// Run a function with arguments in the guest context from the [WebAssembly function
/// table](https://webassembly.github.io/spec/core/syntax/modules.html#tables).
///
/// The same safety caveats of [`Instance::run()`](struct.Instance.html#method.run) apply.
pub fn run_func_idx(
&mut self,
table_idx: u32,
func_idx: u32,
args: &[Val],
) -> Result<UntypedRetVal, Error> {
let func = self.module.get_func_from_idx(table_idx, func_idx)?;
self.run_func(func, &args)
}
/// Reset the instance's heap and global variables to their initial state.
///
/// The WebAssembly `start` section will also be run, if one exists.
///
/// The embedder contexts present at instance creation or added with
/// [`Instance::insert_embed_ctx()`](struct.Instance.html#method.insert_embed_ctx) are not
/// modified by this call; it is the embedder's responsibility to clear or reset their state if
/// necessary.
///
/// # Safety
///
/// This function runs the guest code for the WebAssembly `start` section, and running any guest
/// code is potentially unsafe; see [`Instance::run()`](struct.Instance.html#method.run).
pub fn reset(&mut self) -> Result<(), Error> {
self.alloc.reset_heap(self.module.as_ref())?;
let globals = unsafe { self.alloc.globals_mut() };
let mod_globals = self.module.globals();
for (i, v) in mod_globals.iter().enumerate() {
globals[i] = match v.global() {
Global::Import {.. } => {
return Err(Error::Unsupported(format!(
"global imports are unsupported; found: {:?}",
i
)));
}
Global::Def { def } => def.init_val(),
};
}
self.state = State::Ready {
retval: UntypedRetVal::default(),
};
self.run_start()?;
Ok(())
}
/// Grow the guest memory by the given number of WebAssembly pages.
///
/// On success, returns the number of pages that existed before the call.
pub fn grow_memory(&mut self, additional_pages: u32) -> Result<u32, Error> {
let orig_len = self
.alloc
.expand_heap(additional_pages * WASM_PAGE_SIZE, self.module.as_ref())?;
Ok(orig_len / WASM_PAGE_SIZE)
}
/// Return the WebAssembly heap as a slice of bytes.
pub fn heap(&self) -> &[u8] {
unsafe { self.alloc.heap() }
}
/// Return the WebAssembly heap as a mutable slice of bytes.
pub fn heap_mut(&mut self) -> &mut [u8] {
unsafe { self.alloc.heap_mut() }
}
/// Return the WebAssembly heap as a slice of `u32`s.
pub fn heap_u32(&self) -> &[u32] {
unsafe { self.alloc.heap_u32() }
}
/// Return the WebAssembly heap as a mutable slice of `u32`s.
pub fn heap_u32_mut(&mut self) -> &mut [u32] {
unsafe { self.alloc.heap_u32_mut() }
}
/// Return the WebAssembly globals as a slice of `i64`s.
pub fn globals(&self) -> &[i64] {
unsafe { self.alloc.globals() }
}
/// Return the WebAssembly globals as a mutable slice of `i64`s.
pub fn globals_mut(&mut self) -> &mut [i64] {
unsafe { self.alloc.globals_mut() }
}
/// Check whether a given range in the host address space overlaps with the memory that backs
/// the instance heap.
pub fn check_heap<T>(&self, ptr: *const T, len: usize) -> bool {
self.alloc.mem_in_heap(ptr, len)
}
/// Check whether a context value of a particular type exists.
pub fn contains_embed_ctx<T: Any>(&self) -> bool {
self.embed_ctx.contains::<T>()
}
/// Get a reference to a context value of a particular type, if it exists.
pub fn get_embed_ctx<T: Any>(&self) -> Option<&T> {
self.embed_ctx.get::<T>()
}
/// Get a mutable reference to a context value of a particular type, if it exists.
pub fn get_embed_ctx_mut<T: Any>(&mut self) -> Option<&mut T> {
self.embed_ctx.get_mut::<T>()
}
/// Insert a context value.
///
/// If a context value of the same type already existed, it is returned.
///
/// **Note**: this method is intended for embedder contexts that need to be added _after_ an
/// instance is created and initialized. To add a context for an instance's entire lifetime,
/// including the execution of its `start` section, see
/// [`Region::new_instance_builder()`](trait.Region.html#method.new_instance_builder).
pub fn insert_embed_ctx<T: Any>(&mut self, x: T) -> Option<T> {
self.embed_ctx.insert(x)
}
/// Remove a context value of a particular type, returning it if it exists.
pub fn remove_embed_ctx<T: Any>(&mut self) -> Option<T> {
self.embed_ctx.remove::<T>()
}
/// Set the handler run when `SIGBUS`, `SIGFPE`, `SIGILL`, or `SIGSEGV` are caught by the
/// instance thread.
///
/// In most cases, these signals are unrecoverable for the instance that raised them, but do not
/// affect the rest of the process.
///
/// The default signal handler returns
/// [`SignalBehavior::Default`](enum.SignalBehavior.html#variant.Default), which yields a
/// runtime fault error.
///
/// The signal handler must be
/// [signal-safe](http://man7.org/linux/man-pages/man7/signal-safety.7.html).
pub fn set_signal_handler<H>(&mut self, handler: H)
where
H:'static
+ Fn(&Instance, &TrapCode, libc::c_int, *const siginfo_t, *const c_void) -> SignalBehavior,
{
self.signal_handler = Box::new(handler) as Box<SignalHandler>;
}
/// Set the handler run for signals that do not arise from a known WebAssembly trap, or that
/// involve memory outside of the current instance.
///
/// Fatal signals are not only unrecoverable for the instance that raised them, but may
/// compromise the correctness of the rest of the process if unhandled.
///
/// The default fatal handler calls `panic!()`.
pub fn set_fatal_handler(&mut self, handler: fn(&Instance) ->!) {
self.fatal_handler = handler;
}
/// Set the fatal handler to a C-compatible function.
///
/// This is a separate interface, because C functions can't return the `!` type. Like the
/// regular `fatal_handler`, it is not expected to return, but we cannot enforce that through
/// types.
///
/// When a fatal error occurs, this handler is run first, and then the regular `fatal_handler`
/// runs in case it returns.
pub fn set_c_fatal_handler(&mut self, handler: unsafe extern "C" fn(*mut Instance)) {
self.c_fatal_handler = Some(handler);
}
}
// Private API
impl Instance {
fn new(alloc: Alloc, module: Arc<dyn Module>, embed_ctx: CtxMap) -> Self {
let globals_ptr = alloc.slot().globals as *mut i64;
Instance {
magic: LUCET_INSTANCE_MAGIC,
embed_ctx: embed_ctx,
module,
ctx: Context::new(),
state: State::Ready {
retval: UntypedRetVal::default(),
},
alloc,
fatal_handler: default_fatal_handler,
c_fatal_handler: None,
signal_handler: Box::new(signal_handler_none) as Box<SignalHandler>,
entrypoint: ptr::null(),
_reserved: [0; INSTANCE_PADDING],
globals_ptr,
}
}
/// Run a function in guest context at the given entrypoint.
fn run_func(
&mut self,
func: *const extern "C" fn(),
args: &[Val],
) -> Result<UntypedRetVal, Error> {
lucet_ensure!(
self.state.is_ready(),
"instance must be ready; this is a bug"
);
if func.is_null() {
return Err(Error::InvalidArgument(
"entrypoint function cannot be null; this is probably a malformed module",
));
}
self.entrypoint = func;
let mut args_with_vmctx = vec![Val::from(self.alloc.slot().heap)];
args_with_vmctx.extend_from_slice(args);
HOST_CTX.with(|host_ctx| {
Context::init(
unsafe { self.alloc.stack_u64_mut() },
unsafe { &mut *host_ctx.get() },
&mut self.ctx,
func,
&args_with_vmctx,
)
})?;
self.state = State::Running;
// there should never be another instance running on this thread when we enter this function
CURRENT_INSTANCE.with(|current_instance| {
let mut current_instance = current_instance.borrow_mut();
assert!(
current_instance.is_none(),
"no other instance is running on this thread"
);
// safety: `self` is not null if we are in this function
*current_instance = Some(unsafe { NonNull::new_unchecked(self) });
});
self.with_signals_on(|i| {
HOST_CTX.with(|host_ctx| {
// Save the current context into `host_ctx`, and jump to the guest context. The
// lucet context is linked to host_ctx, so it will return here after it finishes,
// successfully or otherwise.
unsafe { Context::swap(&mut *host_ctx.get(), &mut i.ctx) };
Ok(())
})
})?;
CURRENT_INSTANCE.with(|current_instance| {
*current_instance.borrow_mut() = None;
});
// Sandbox has jumped back to the host process, indicating it has either:
//
// * trapped, or called hostcall_error: state tag changed to something other than `Running`
// * function body returned: set state back to `Ready` with return value
match &self.state {
State::Running => {
let retval = self.ctx.get_untyped_retval();
self.state = State::Ready { retval };
Ok(retval)
}
State::Terminated { details,.. } => Err(Error::RuntimeTerminated(details.clone())),
State::Fault {.. } => | Err(Error::RuntimeFault(details.clone()))
}
} else {
panic!("state remains Fault after populate_fault_detail()")
}
}
State::Ready {.. } => {
panic!("instance in Ready state after returning from guest context")
}
}
}
fn run_start(&mut self) -> Result<(), Error> {
if let Some(start) = self.module.get_start_func()? {
self.run_func(start, &[])?;
}
Ok(())
}
fn populate_fault_detail(&mut self) -> Result<(), Error> {
if let State::Fault {
details:
FaultDetails {
rip_addr,
trapcode,
ref mut fatal,
ref mut rip_addr_details,
..
},
siginfo,
..
} = self.state
{
// We do this after returning from the signal handler because it requires `dladdr`
// calls, which are not signal safe
*rip_addr_details = self.module.addr_details(rip_addr as *const c_void)?.clone();
// If the trap table lookup returned unknown, it is a fatal error
let unknown_fault = trapcode.ty == TrapCodeType::Unknown;
// If the trap was a segv or bus fault and the addressed memory was outside the
// guard pages, it is also a fatal error
let outside_guard = (siginfo.si_signo == SIGSEGV || siginfo.si_signo == SIGBUS)
&&!self.alloc.addr_in_heap_guard(siginfo.si_addr());
*fatal = unknown_fault || outside_guard;
}
Ok(())
}
}
pub enum State {
Ready {
retval: UntypedRetVal,
},
Running,
Fault {
details: FaultDetails,
siginfo: libc::siginfo_t,
context: libc::ucontext_t,
},
Terminated {
details: TerminationDetails,
},
}
/// Information about a runtime fault.
///
/// Runtime faults are raised implictly by signal handlers that return `SignalBehavior::Default` in
/// response to signals arising while a guest is running.
#[derive(Clone, Debug)]
pub struct FaultDetails {
/// If true, the instance's `fatal_handler` will be called.
pub fatal: bool,
/// Information about the type of fault that occurred.
pub trapcode: TrapCode,
/// The instruction pointer where the fault occurred.
pub rip_addr: uintptr_t,
/// Extra information about the instruction pointer's location, if available.
pub rip_addr_details: Option<module::AddrDetails>,
}
impl std::fmt::Display for FaultDetails {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
if self.fatal {
write!(f, "fault FATAL ")?;
} else {
write!(f, "fault ")?;
}
self.trapcode.fmt(f)?;
write!(f, "code at address {:p}", self.rip_addr as *const c_void)?;
if let Some(ref addr_details) = self.rip_addr_details {
if let Some(ref fname) = addr_details.file_name {
| {
// Sandbox is no longer runnable. It's unsafe to determine all error details in the signal
// handler, so we fill in extra details here.
self.populate_fault_detail()?;
if let State::Fault { ref details, .. } = self.state {
if details.fatal {
// Some errors indicate that the guest is not functioning correctly or that
// the loaded code violated some assumption, so bail out via the fatal
// handler.
// Run the C-style fatal handler, if it exists.
self.c_fatal_handler
.map(|h| unsafe { h(self as *mut Instance) });
// If there is no C-style fatal handler, or if it (erroneously) returns,
// call the Rust handler that we know will not return
(self.fatal_handler)(self)
} else {
// leave the full fault details in the instance state, and return the
// higher-level info to the user | conditional_block |
instance.rs | // write the whole struct into place over the uninitialized page
ptr::write(&mut *handle, inst);
};
handle.reset()?;
Ok(handle)
}
pub fn instance_handle_to_raw(inst: InstanceHandle) -> *mut Instance {
let ptr = inst.inst.as_ptr();
std::mem::forget(inst);
ptr
}
pub unsafe fn instance_handle_from_raw(ptr: *mut Instance) -> InstanceHandle {
InstanceHandle {
inst: NonNull::new_unchecked(ptr),
}
}
// Safety argument for these deref impls: the instance's `Alloc` field contains an `Arc` to the
// region that backs this memory, keeping the page containing the `Instance` alive as long as the
// region exists
impl Deref for InstanceHandle {
type Target = Instance;
fn deref(&self) -> &Self::Target {
unsafe { self.inst.as_ref() }
}
}
impl DerefMut for InstanceHandle {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { self.inst.as_mut() }
}
}
impl Drop for InstanceHandle {
fn drop(&mut self) {
// eprintln!("InstanceHandle::drop()");
// zero out magic, then run the destructor by taking and dropping the inner `Instance`
self.magic = 0;
unsafe {
mem::replace(self.inst.as_mut(), mem::uninitialized());
}
}
}
/// A Lucet program, together with its dedicated memory and signal handlers.
///
/// This is the primary interface for running programs, examining return values, and accessing the
/// WebAssembly heap.
///
/// `Instance`s are never created by runtime users directly, but rather are acquired from
/// [`Region`](trait.Region.html)s and often accessed through
/// [`InstanceHandle`](struct.InstanceHandle.html) smart pointers. This guarantees that instances
/// and their fields are never moved in memory, otherwise raw pointers in the metadata could be
/// unsafely invalidated.
#[repr(C)]
pub struct Instance {
/// Used to catch bugs in pointer math used to find the address of the instance
magic: u64,
/// The embedding context is a map containing embedder-specific values that are used to
/// implement hostcalls
pub(crate) embed_ctx: CtxMap,
/// The program (WebAssembly module) that is the entrypoint for the instance.
module: Arc<dyn Module>,
/// The `Context` in which the guest program runs
ctx: Context,
/// Instance state and error information
pub(crate) state: State,
/// The memory allocated for this instance
alloc: Alloc,
/// Handler run for signals that do not arise from a known WebAssembly trap, or that involve
/// memory outside of the current instance.
fatal_handler: fn(&Instance) ->!,
/// A fatal handler set from C
c_fatal_handler: Option<unsafe extern "C" fn(*mut Instance)>,
/// Handler run when `SIGBUS`, `SIGFPE`, `SIGILL`, or `SIGSEGV` are caught by the instance thread.
signal_handler: Box<
dyn Fn(
&Instance,
&TrapCode,
libc::c_int,
*const siginfo_t,
*const c_void,
) -> SignalBehavior,
>,
/// Pointer to the function used as the entrypoint (for use in backtraces)
entrypoint: *const extern "C" fn(),
/// Padding to ensure the pointer to globals at the end of the page occupied by the `Instance`
_reserved: [u8; INSTANCE_PADDING],
/// Pointer to the globals
///
/// This is accessed through the `vmctx` pointer, which points to the heap that begins
/// immediately after this struct, so it has to come at the very end.
globals_ptr: *const i64,
}
/// APIs that are internal, but useful to implementors of extension modules; you probably don't want
/// this trait!
///
/// This is a trait rather than inherent `impl`s in order to keep the `lucet-runtime` API clean and
/// safe.
pub trait InstanceInternal {
fn alloc(&self) -> &Alloc;
fn alloc_mut(&mut self) -> &mut Alloc;
fn module(&self) -> &dyn Module;
fn state(&self) -> &State;
fn valid_magic(&self) -> bool;
}
impl InstanceInternal for Instance {
/// Get a reference to the instance's `Alloc`.
fn alloc(&self) -> &Alloc |
/// Get a mutable reference to the instance's `Alloc`.
fn alloc_mut(&mut self) -> &mut Alloc {
&mut self.alloc
}
/// Get a reference to the instance's `Module`.
fn module(&self) -> &dyn Module {
self.module.deref()
}
/// Get a reference to the instance's `State`.
fn state(&self) -> &State {
&self.state
}
/// Check whether the instance magic is valid.
fn valid_magic(&self) -> bool {
self.magic == LUCET_INSTANCE_MAGIC
}
}
// Public API
impl Instance {
/// Run a function with arguments in the guest context at the given entrypoint.
///
/// ```no_run
/// # use lucet_runtime_internals::instance::InstanceHandle;
/// # let instance: InstanceHandle = unimplemented!();
/// // regular execution yields `Ok(UntypedRetVal)`
/// let retval = instance.run(b"factorial", &[5u64.into()]).unwrap();
/// assert_eq!(u64::from(retval), 120u64);
///
/// // runtime faults yield `Err(Error)`
/// let result = instance.run(b"faulting_function", &[]);
/// assert!(result.is_err());
/// ```
///
/// # Safety
///
/// This is unsafe in two ways:
///
/// - The type of the entrypoint might not be correct. It might take a different number or
/// different types of arguments than are provided to `args`. It might not even point to a
/// function! We will likely add type information to `lucetc` output so we can dynamically check
/// the type in the future.
///
/// - The entrypoint is foreign code. While we may be convinced that WebAssembly compiled to
/// native code by `lucetc` is safe, we do not have the same guarantee for the hostcalls that a
/// guest may invoke. They might be implemented in an unsafe language, so we must treat this
/// call as unsafe, just like any other FFI call.
///
/// For the moment, we do not mark this as `unsafe` in the Rust type system, but that may change
/// in the future.
pub fn run(&mut self, entrypoint: &[u8], args: &[Val]) -> Result<UntypedRetVal, Error> {
let func = self.module.get_export_func(entrypoint)?;
self.run_func(func, &args)
}
/// Run a function with arguments in the guest context from the [WebAssembly function
/// table](https://webassembly.github.io/spec/core/syntax/modules.html#tables).
///
/// The same safety caveats of [`Instance::run()`](struct.Instance.html#method.run) apply.
pub fn run_func_idx(
&mut self,
table_idx: u32,
func_idx: u32,
args: &[Val],
) -> Result<UntypedRetVal, Error> {
let func = self.module.get_func_from_idx(table_idx, func_idx)?;
self.run_func(func, &args)
}
/// Reset the instance's heap and global variables to their initial state.
///
/// The WebAssembly `start` section will also be run, if one exists.
///
/// The embedder contexts present at instance creation or added with
/// [`Instance::insert_embed_ctx()`](struct.Instance.html#method.insert_embed_ctx) are not
/// modified by this call; it is the embedder's responsibility to clear or reset their state if
/// necessary.
///
/// # Safety
///
/// This function runs the guest code for the WebAssembly `start` section, and running any guest
/// code is potentially unsafe; see [`Instance::run()`](struct.Instance.html#method.run).
pub fn reset(&mut self) -> Result<(), Error> {
self.alloc.reset_heap(self.module.as_ref())?;
let globals = unsafe { self.alloc.globals_mut() };
let mod_globals = self.module.globals();
for (i, v) in mod_globals.iter().enumerate() {
globals[i] = match v.global() {
Global::Import {.. } => {
return Err(Error::Unsupported(format!(
"global imports are unsupported; found: {:?}",
i
)));
}
Global::Def { def } => def.init_val(),
};
}
self.state = State::Ready {
retval: UntypedRetVal::default(),
};
self.run_start()?;
Ok(())
}
/// Grow the guest memory by the given number of WebAssembly pages.
///
/// On success, returns the number of pages that existed before the call.
pub fn grow_memory(&mut self, additional_pages: u32) -> Result<u32, Error> {
let orig_len = self
.alloc
.expand_heap(additional_pages * WASM_PAGE_SIZE, self.module.as_ref())?;
Ok(orig_len / WASM_PAGE_SIZE)
}
/// Return the WebAssembly heap as a slice of bytes.
pub fn heap(&self) -> &[u8] {
unsafe { self.alloc.heap() }
}
/// Return the WebAssembly heap as a mutable slice of bytes.
pub fn heap_mut(&mut self) -> &mut [u8] {
unsafe { self.alloc.heap_mut() }
}
/// Return the WebAssembly heap as a slice of `u32`s.
pub fn heap_u32(&self) -> &[u32] {
unsafe { self.alloc.heap_u32() }
}
/// Return the WebAssembly heap as a mutable slice of `u32`s.
pub fn heap_u32_mut(&mut self) -> &mut [u32] {
unsafe { self.alloc.heap_u32_mut() }
}
/// Return the WebAssembly globals as a slice of `i64`s.
pub fn globals(&self) -> &[i64] {
unsafe { self.alloc.globals() }
}
/// Return the WebAssembly globals as a mutable slice of `i64`s.
pub fn globals_mut(&mut self) -> &mut [i64] {
unsafe { self.alloc.globals_mut() }
}
/// Check whether a given range in the host address space overlaps with the memory that backs
/// the instance heap.
pub fn check_heap<T>(&self, ptr: *const T, len: usize) -> bool {
self.alloc.mem_in_heap(ptr, len)
}
/// Check whether a context value of a particular type exists.
pub fn contains_embed_ctx<T: Any>(&self) -> bool {
self.embed_ctx.contains::<T>()
}
/// Get a reference to a context value of a particular type, if it exists.
pub fn get_embed_ctx<T: Any>(&self) -> Option<&T> {
self.embed_ctx.get::<T>()
}
/// Get a mutable reference to a context value of a particular type, if it exists.
pub fn get_embed_ctx_mut<T: Any>(&mut self) -> Option<&mut T> {
self.embed_ctx.get_mut::<T>()
}
/// Insert a context value.
///
/// If a context value of the same type already existed, it is returned.
///
/// **Note**: this method is intended for embedder contexts that need to be added _after_ an
/// instance is created and initialized. To add a context for an instance's entire lifetime,
/// including the execution of its `start` section, see
/// [`Region::new_instance_builder()`](trait.Region.html#method.new_instance_builder).
pub fn insert_embed_ctx<T: Any>(&mut self, x: T) -> Option<T> {
self.embed_ctx.insert(x)
}
/// Remove a context value of a particular type, returning it if it exists.
pub fn remove_embed_ctx<T: Any>(&mut self) -> Option<T> {
self.embed_ctx.remove::<T>()
}
/// Set the handler run when `SIGBUS`, `SIGFPE`, `SIGILL`, or `SIGSEGV` are caught by the
/// instance thread.
///
/// In most cases, these signals are unrecoverable for the instance that raised them, but do not
/// affect the rest of the process.
///
/// The default signal handler returns
/// [`SignalBehavior::Default`](enum.SignalBehavior.html#variant.Default), which yields a
/// runtime fault error.
///
/// The signal handler must be
/// [signal-safe](http://man7.org/linux/man-pages/man7/signal-safety.7.html).
pub fn set_signal_handler<H>(&mut self, handler: H)
where
H:'static
+ Fn(&Instance, &TrapCode, libc::c_int, *const siginfo_t, *const c_void) -> SignalBehavior,
{
self.signal_handler = Box::new(handler) as Box<SignalHandler>;
}
/// Set the handler run for signals that do not arise from a known WebAssembly trap, or that
/// involve memory outside of the current instance.
///
/// Fatal signals are not only unrecoverable for the instance that raised them, but may
/// compromise the correctness of the rest of the process if unhandled.
///
/// The default fatal handler calls `panic!()`.
pub fn set_fatal_handler(&mut self, handler: fn(&Instance) ->!) {
self.fatal_handler = handler;
}
/// Set the fatal handler to a C-compatible function.
///
/// This is a separate interface, because C functions can't return the `!` type. Like the
/// regular `fatal_handler`, it is not expected to return, but we cannot enforce that through
/// types.
///
/// When a fatal error occurs, this handler is run first, and then the regular `fatal_handler`
/// runs in case it returns.
pub fn set_c_fatal_handler(&mut self, handler: unsafe extern "C" fn(*mut Instance)) {
self.c_fatal_handler = Some(handler);
}
}
// Private API
impl Instance {
fn new(alloc: Alloc, module: Arc<dyn Module>, embed_ctx: CtxMap) -> Self {
let globals_ptr = alloc.slot().globals as *mut i64;
Instance {
magic: LUCET_INSTANCE_MAGIC,
embed_ctx: embed_ctx,
module,
ctx: Context::new(),
state: State::Ready {
retval: UntypedRetVal::default(),
},
alloc,
fatal_handler: default_fatal_handler,
c_fatal_handler: None,
signal_handler: Box::new(signal_handler_none) as Box<SignalHandler>,
entrypoint: ptr::null(),
_reserved: [0; INSTANCE_PADDING],
globals_ptr,
}
}
/// Run a function in guest context at the given entrypoint.
fn run_func(
&mut self,
func: *const extern "C" fn(),
args: &[Val],
) -> Result<UntypedRetVal, Error> {
lucet_ensure!(
self.state.is_ready(),
"instance must be ready; this is a bug"
);
if func.is_null() {
return Err(Error::InvalidArgument(
"entrypoint function cannot be null; this is probably a malformed module",
));
}
self.entrypoint = func;
let mut args_with_vmctx = vec![Val::from(self.alloc.slot().heap)];
args_with_vmctx.extend_from_slice(args);
HOST_CTX.with(|host_ctx| {
Context::init(
unsafe { self.alloc.stack_u64_mut() },
unsafe { &mut *host_ctx.get() },
&mut self.ctx,
func,
&args_with_vmctx,
)
})?;
self.state = State::Running;
// there should never be another instance running on this thread when we enter this function
CURRENT_INSTANCE.with(|current_instance| {
let mut current_instance = current_instance.borrow_mut();
assert!(
current_instance.is_none(),
"no other instance is running on this thread"
);
// safety: `self` is not null if we are in this function
*current_instance = Some(unsafe { NonNull::new_unchecked(self) });
});
self.with_signals_on(|i| {
HOST_CTX.with(|host_ctx| {
// Save the current context into `host_ctx`, and jump to the guest context. The
// lucet context is linked to host_ctx, so it will return here after it finishes,
// successfully or otherwise.
unsafe { Context::swap(&mut *host_ctx.get(), &mut i.ctx) };
Ok(())
})
})?;
CURRENT_INSTANCE.with(|current_instance| {
*current_instance.borrow_mut() = None;
});
// Sandbox has jumped back to the host process, indicating it has either:
//
// * trapped, or called hostcall_error: state tag changed to something other than `Running`
// * function body returned: set state back to `Ready` with return value
match &self.state {
State::Running => {
let retval = self.ctx.get_untyped_retval();
self.state = State::Ready { retval };
Ok(retval)
}
State::Terminated { details,.. } => Err(Error::RuntimeTerminated(details.clone())),
State::Fault {.. } => {
// Sandbox is no longer runnable. It's unsafe to determine all error details in the signal
// handler, so we fill in extra details here.
self.populate_fault_detail()?;
if let State::Fault { ref details,.. } = self.state {
if details.fatal {
// Some errors indicate that the guest is not functioning correctly or that
// the loaded code violated some assumption, so bail out via the fatal
// handler.
// Run the C-style fatal handler, if it exists.
self.c_fatal_handler
.map(|h| unsafe { h(self as *mut Instance) });
// If there is no C-style fatal handler, or if it (erroneously) returns,
// call the Rust handler that we know will not return
(self.fatal_handler)(self)
} else {
// leave the full fault details in the instance state, and return the
// higher-level info to the user
Err(Error::RuntimeFault(details.clone()))
}
} else {
panic!("state remains Fault after populate_fault_detail()")
}
}
State::Ready {.. } => {
panic!("instance in Ready state after returning from guest context")
}
}
}
fn run_start(&mut self) -> Result<(), Error> {
if let Some(start) = self.module.get_start_func()? {
self.run_func(start, &[])?;
}
Ok(())
}
fn populate_fault_detail(&mut self) -> Result<(), Error> {
if let State::Fault {
details:
FaultDetails {
rip_addr,
trapcode,
ref mut fatal,
ref mut rip_addr_details,
..
},
siginfo,
..
} = self.state
{
// We do this after returning from the signal handler because it requires `dladdr`
// calls, which are not signal safe
*rip_addr_details = self.module.addr_details(rip_addr as *const c_void)?.clone();
// If the trap table lookup returned unknown, it is a fatal error
let unknown_fault = trapcode.ty == TrapCodeType::Unknown;
// If the trap was a segv or bus fault and the addressed memory was outside the
// guard pages, it is also a fatal error
let outside_guard = (siginfo.si_signo == SIGSEGV || siginfo.si_signo == SIGBUS)
&&!self.alloc.addr_in_heap_guard(siginfo.si_addr());
*fatal = unknown_fault || outside_guard;
}
Ok(())
}
}
pub enum State {
Ready {
retval: UntypedRetVal,
},
Running,
Fault {
details: FaultDetails,
siginfo: libc::siginfo_t,
context: libc::ucontext_t,
},
Terminated {
details: TerminationDetails,
},
}
/// Information about a runtime fault.
///
/// Runtime faults are raised implictly by signal handlers that return `SignalBehavior::Default` in
/// response to signals arising while a guest is running.
#[derive(Clone, Debug)]
pub struct FaultDetails {
/// If true, the instance's `fatal_handler` will be called.
pub fatal: bool,
/// Information about the type of fault that occurred.
pub trapcode: TrapCode,
/// The instruction pointer where the fault occurred.
pub rip_addr: uintptr_t,
/// Extra information about the instruction pointer's location, if available.
pub rip_addr_details: Option<module::AddrDetails>,
}
impl std::fmt::Display for FaultDetails {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
if self.fatal {
write!(f, "fault FATAL ")?;
} else {
write!(f, "fault ")?;
}
self.trapcode.fmt(f)?;
write!(f, "code at address {:p}", self.rip_addr as *const c_void)?;
if let Some(ref addr_details) = self.rip_addr_details {
if let Some(ref fname) = addr_details.file_name {
| {
&self.alloc
} | identifier_body |
queued.rs | // Copyright (c) 2013-2016 Sandstorm Development Group, Inc. and contributors
// Licensed under the MIT License:
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use capnp::any_pointer;
use capnp::capability::Promise;
use capnp::private::capability::{ClientHook, ParamsHook, PipelineHook, PipelineOp, ResultsHook};
use capnp::Error;
use futures::{Future, FutureExt, TryFutureExt};
use std::cell::RefCell;
use std::rc::{Rc, Weak};
use crate::attach::Attach;
use crate::sender_queue::SenderQueue;
use crate::{broken, local};
pub struct PipelineInner {
// Once the promise resolves, this will become non-null and point to the underlying object.
redirect: Option<Box<dyn PipelineHook>>,
promise_to_drive: futures::future::Shared<Promise<(), Error>>,
clients_to_resolve: SenderQueue<(Weak<RefCell<ClientInner>>, Vec<PipelineOp>), ()>,
}
impl PipelineInner {
fn resolve(this: &Rc<RefCell<Self>>, result: Result<Box<dyn PipelineHook>, Error>) {
assert!(this.borrow().redirect.is_none());
let pipeline = match result {
Ok(pipeline_hook) => pipeline_hook,
Err(e) => Box::new(broken::Pipeline::new(e)),
};
this.borrow_mut().redirect = Some(pipeline.add_ref());
for ((weak_client, ops), waiter) in this.borrow_mut().clients_to_resolve.drain() {
if let Some(client) = weak_client.upgrade() {
let clienthook = pipeline.get_pipelined_cap_move(ops);
ClientInner::resolve(&client, Ok(clienthook));
}
let _ = waiter.send(());
}
this.borrow_mut().promise_to_drive = Promise::ok(()).shared();
}
}
pub struct PipelineInnerSender {
inner: Option<Weak<RefCell<PipelineInner>>>,
}
impl Drop for PipelineInnerSender {
fn drop(&mut self) {
if let Some(weak_queued) = self.inner.take() {
if let Some(pipeline_inner) = weak_queued.upgrade() {
PipelineInner::resolve(
&pipeline_inner,
Ok(Box::new(crate::broken::Pipeline::new(Error::failed(
"PipelineInnerSender was canceled".into(),
)))),
);
}
}
}
}
impl PipelineInnerSender {
pub fn complete(mut self, pipeline: Box<dyn PipelineHook>) {
if let Some(weak_queued) = self.inner.take() {
if let Some(pipeline_inner) = weak_queued.upgrade() {
crate::queued::PipelineInner::resolve(&pipeline_inner, Ok(pipeline));
}
}
}
}
pub struct Pipeline {
inner: Rc<RefCell<PipelineInner>>,
}
impl Pipeline {
pub fn new() -> (PipelineInnerSender, Self) {
let inner = Rc::new(RefCell::new(PipelineInner {
redirect: None,
promise_to_drive: Promise::ok(()).shared(),
clients_to_resolve: SenderQueue::new(),
}));
(
PipelineInnerSender {
inner: Some(Rc::downgrade(&inner)),
},
Self { inner },
)
}
pub fn drive<F>(&mut self, promise: F)
where
F: Future<Output = Result<(), Error>> +'static + Unpin,
{
let new = Promise::from_future(
futures::future::try_join(self.inner.borrow_mut().promise_to_drive.clone(), promise)
.map_ok(|_| ()),
)
.shared();
self.inner.borrow_mut().promise_to_drive = new;
}
}
impl Clone for Pipeline {
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
}
}
}
impl PipelineHook for Pipeline {
fn | (&self) -> Box<dyn PipelineHook> {
Box::new(self.clone())
}
fn get_pipelined_cap(&self, ops: &[PipelineOp]) -> Box<dyn ClientHook> {
self.get_pipelined_cap_move(ops.into())
}
fn get_pipelined_cap_move(&self, ops: Vec<PipelineOp>) -> Box<dyn ClientHook> {
if let Some(p) = &self.inner.borrow().redirect {
return p.get_pipelined_cap_move(ops);
}
let mut queued_client = Client::new(Some(self.inner.clone()));
queued_client.drive(self.inner.borrow().promise_to_drive.clone());
let weak_queued = Rc::downgrade(&queued_client.inner);
self.inner
.borrow_mut()
.clients_to_resolve
.push_detach((weak_queued, ops));
Box::new(queued_client)
}
}
pub struct ClientInner {
// Once the promise resolves, this will become non-null and point to the underlying object.
redirect: Option<Box<dyn ClientHook>>,
// The queued::PipelineInner that this client is derived from, if any. We need to hold on
// to a reference to it so that it doesn't get canceled before the client is resolved.
pipeline_inner: Option<Rc<RefCell<PipelineInner>>>,
promise_to_drive: Option<futures::future::Shared<Promise<(), Error>>>,
// When this promise resolves, each queued call will be forwarded to the real client. This needs
// to occur *before* any 'whenMoreResolved()' promises resolve, because we want to make sure
// previously-queued calls are delivered before any new calls made in response to the resolution.
call_forwarding_queue:
SenderQueue<(u64, u16, Box<dyn ParamsHook>, Box<dyn ResultsHook>), Promise<(), Error>>,
// whenMoreResolved() returns forks of this promise. These must resolve *after* queued calls
// have been initiated (so that any calls made in the whenMoreResolved() handler are correctly
// delivered after calls made earlier), but *before* any queued calls return (because it might
// confuse the application if a queued call returns before the capability on which it was made
// resolves). Luckily, we know that queued calls will involve, at the very least, an
// eventLoop.evalLater.
client_resolution_queue: SenderQueue<(), Box<dyn ClientHook>>,
}
impl ClientInner {
pub fn resolve(state: &Rc<RefCell<Self>>, result: Result<Box<dyn ClientHook>, Error>) {
assert!(state.borrow().redirect.is_none());
let client = match result {
Ok(clienthook) => clienthook,
Err(e) => broken::new_cap(e),
};
state.borrow_mut().redirect = Some(client.add_ref());
for (args, waiter) in state.borrow_mut().call_forwarding_queue.drain() {
let (interface_id, method_id, params, results) = args;
let result_promise = client.call(interface_id, method_id, params, results);
let _ = waiter.send(result_promise);
}
for ((), waiter) in state.borrow_mut().client_resolution_queue.drain() {
let _ = waiter.send(client.add_ref());
}
state.borrow_mut().promise_to_drive.take();
state.borrow_mut().pipeline_inner.take();
}
}
pub struct Client {
pub inner: Rc<RefCell<ClientInner>>,
}
impl Client {
pub fn new(pipeline_inner: Option<Rc<RefCell<PipelineInner>>>) -> Self {
let inner = Rc::new(RefCell::new(ClientInner {
promise_to_drive: None,
pipeline_inner,
redirect: None,
call_forwarding_queue: SenderQueue::new(),
client_resolution_queue: SenderQueue::new(),
}));
Self { inner }
}
pub fn drive<F>(&mut self, promise: F)
where
F: Future<Output = Result<(), Error>> +'static + Unpin,
{
assert!(self.inner.borrow().promise_to_drive.is_none());
self.inner.borrow_mut().promise_to_drive = Some(Promise::from_future(promise).shared());
}
}
impl ClientHook for Client {
fn add_ref(&self) -> Box<dyn ClientHook> {
Box::new(Self {
inner: self.inner.clone(),
})
}
fn new_call(
&self,
interface_id: u64,
method_id: u16,
size_hint: Option<::capnp::MessageSize>,
) -> ::capnp::capability::Request<any_pointer::Owned, any_pointer::Owned> {
::capnp::capability::Request::new(Box::new(local::Request::new(
interface_id,
method_id,
size_hint,
self.add_ref(),
)))
}
fn call(
&self,
interface_id: u64,
method_id: u16,
params: Box<dyn ParamsHook>,
results: Box<dyn ResultsHook>,
) -> Promise<(), Error> {
if let Some(client) = &self.inner.borrow().redirect {
return client.call(interface_id, method_id, params, results);
}
let inner_clone = self.inner.clone();
let promise = self
.inner
.borrow_mut()
.call_forwarding_queue
.push((interface_id, method_id, params, results))
.attach(inner_clone)
.and_then(|x| x);
match self.inner.borrow().promise_to_drive {
Some(ref p) => {
Promise::from_future(futures::future::try_join(p.clone(), promise).map_ok(|v| v.1))
}
None => Promise::from_future(promise),
}
}
fn get_ptr(&self) -> usize {
(&*self.inner.borrow()) as *const _ as usize
}
fn get_brand(&self) -> usize {
0
}
fn get_resolved(&self) -> Option<Box<dyn ClientHook>> {
match &self.inner.borrow().redirect {
Some(inner) => Some(inner.clone()),
None => None,
}
}
fn when_more_resolved(&self) -> Option<Promise<Box<dyn ClientHook>, Error>> {
if let Some(client) = &self.inner.borrow().redirect {
return Some(Promise::ok(client.add_ref()));
}
let promise = self.inner.borrow_mut().client_resolution_queue.push(());
match &self.inner.borrow().promise_to_drive {
Some(p) => Some(Promise::from_future(
futures::future::try_join(p.clone(), promise).map_ok(|v| v.1),
)),
None => Some(Promise::from_future(promise)),
}
}
fn when_resolved(&self) -> Promise<(), Error> {
crate::rpc::default_when_resolved_impl(self)
}
}
| add_ref | identifier_name |
queued.rs | // Copyright (c) 2013-2016 Sandstorm Development Group, Inc. and contributors
// Licensed under the MIT License:
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use capnp::any_pointer;
use capnp::capability::Promise;
use capnp::private::capability::{ClientHook, ParamsHook, PipelineHook, PipelineOp, ResultsHook};
use capnp::Error;
use futures::{Future, FutureExt, TryFutureExt};
use std::cell::RefCell;
use std::rc::{Rc, Weak};
use crate::attach::Attach;
use crate::sender_queue::SenderQueue;
use crate::{broken, local};
pub struct PipelineInner {
// Once the promise resolves, this will become non-null and point to the underlying object.
redirect: Option<Box<dyn PipelineHook>>,
promise_to_drive: futures::future::Shared<Promise<(), Error>>,
clients_to_resolve: SenderQueue<(Weak<RefCell<ClientInner>>, Vec<PipelineOp>), ()>,
}
impl PipelineInner {
fn resolve(this: &Rc<RefCell<Self>>, result: Result<Box<dyn PipelineHook>, Error>) {
assert!(this.borrow().redirect.is_none());
let pipeline = match result {
Ok(pipeline_hook) => pipeline_hook,
Err(e) => Box::new(broken::Pipeline::new(e)),
};
this.borrow_mut().redirect = Some(pipeline.add_ref());
for ((weak_client, ops), waiter) in this.borrow_mut().clients_to_resolve.drain() {
if let Some(client) = weak_client.upgrade() {
let clienthook = pipeline.get_pipelined_cap_move(ops);
ClientInner::resolve(&client, Ok(clienthook));
}
let _ = waiter.send(());
}
this.borrow_mut().promise_to_drive = Promise::ok(()).shared();
}
}
pub struct PipelineInnerSender {
inner: Option<Weak<RefCell<PipelineInner>>>,
}
impl Drop for PipelineInnerSender {
fn drop(&mut self) {
if let Some(weak_queued) = self.inner.take() {
if let Some(pipeline_inner) = weak_queued.upgrade() {
PipelineInner::resolve(
&pipeline_inner,
Ok(Box::new(crate::broken::Pipeline::new(Error::failed(
"PipelineInnerSender was canceled".into(),
)))),
);
}
}
}
}
impl PipelineInnerSender {
pub fn complete(mut self, pipeline: Box<dyn PipelineHook>) {
if let Some(weak_queued) = self.inner.take() {
if let Some(pipeline_inner) = weak_queued.upgrade() {
crate::queued::PipelineInner::resolve(&pipeline_inner, Ok(pipeline));
}
}
}
}
pub struct Pipeline {
inner: Rc<RefCell<PipelineInner>>,
}
impl Pipeline {
pub fn new() -> (PipelineInnerSender, Self) {
let inner = Rc::new(RefCell::new(PipelineInner {
redirect: None,
promise_to_drive: Promise::ok(()).shared(),
clients_to_resolve: SenderQueue::new(),
}));
(
PipelineInnerSender {
inner: Some(Rc::downgrade(&inner)),
},
Self { inner },
)
}
pub fn drive<F>(&mut self, promise: F)
where
F: Future<Output = Result<(), Error>> +'static + Unpin,
{
let new = Promise::from_future(
futures::future::try_join(self.inner.borrow_mut().promise_to_drive.clone(), promise)
.map_ok(|_| ()),
)
.shared();
self.inner.borrow_mut().promise_to_drive = new;
}
}
impl Clone for Pipeline {
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
}
}
}
impl PipelineHook for Pipeline {
fn add_ref(&self) -> Box<dyn PipelineHook> {
Box::new(self.clone())
}
fn get_pipelined_cap(&self, ops: &[PipelineOp]) -> Box<dyn ClientHook> {
self.get_pipelined_cap_move(ops.into())
}
fn get_pipelined_cap_move(&self, ops: Vec<PipelineOp>) -> Box<dyn ClientHook> {
if let Some(p) = &self.inner.borrow().redirect {
return p.get_pipelined_cap_move(ops);
}
let mut queued_client = Client::new(Some(self.inner.clone()));
queued_client.drive(self.inner.borrow().promise_to_drive.clone());
let weak_queued = Rc::downgrade(&queued_client.inner);
self.inner
.borrow_mut()
.clients_to_resolve
.push_detach((weak_queued, ops));
Box::new(queued_client)
}
}
pub struct ClientInner {
// Once the promise resolves, this will become non-null and point to the underlying object.
redirect: Option<Box<dyn ClientHook>>,
// The queued::PipelineInner that this client is derived from, if any. We need to hold on
// to a reference to it so that it doesn't get canceled before the client is resolved.
pipeline_inner: Option<Rc<RefCell<PipelineInner>>>,
promise_to_drive: Option<futures::future::Shared<Promise<(), Error>>>,
// When this promise resolves, each queued call will be forwarded to the real client. This needs
// to occur *before* any 'whenMoreResolved()' promises resolve, because we want to make sure
// previously-queued calls are delivered before any new calls made in response to the resolution.
call_forwarding_queue:
SenderQueue<(u64, u16, Box<dyn ParamsHook>, Box<dyn ResultsHook>), Promise<(), Error>>,
// whenMoreResolved() returns forks of this promise. These must resolve *after* queued calls
// have been initiated (so that any calls made in the whenMoreResolved() handler are correctly | client_resolution_queue: SenderQueue<(), Box<dyn ClientHook>>,
}
impl ClientInner {
pub fn resolve(state: &Rc<RefCell<Self>>, result: Result<Box<dyn ClientHook>, Error>) {
assert!(state.borrow().redirect.is_none());
let client = match result {
Ok(clienthook) => clienthook,
Err(e) => broken::new_cap(e),
};
state.borrow_mut().redirect = Some(client.add_ref());
for (args, waiter) in state.borrow_mut().call_forwarding_queue.drain() {
let (interface_id, method_id, params, results) = args;
let result_promise = client.call(interface_id, method_id, params, results);
let _ = waiter.send(result_promise);
}
for ((), waiter) in state.borrow_mut().client_resolution_queue.drain() {
let _ = waiter.send(client.add_ref());
}
state.borrow_mut().promise_to_drive.take();
state.borrow_mut().pipeline_inner.take();
}
}
pub struct Client {
pub inner: Rc<RefCell<ClientInner>>,
}
impl Client {
pub fn new(pipeline_inner: Option<Rc<RefCell<PipelineInner>>>) -> Self {
let inner = Rc::new(RefCell::new(ClientInner {
promise_to_drive: None,
pipeline_inner,
redirect: None,
call_forwarding_queue: SenderQueue::new(),
client_resolution_queue: SenderQueue::new(),
}));
Self { inner }
}
pub fn drive<F>(&mut self, promise: F)
where
F: Future<Output = Result<(), Error>> +'static + Unpin,
{
assert!(self.inner.borrow().promise_to_drive.is_none());
self.inner.borrow_mut().promise_to_drive = Some(Promise::from_future(promise).shared());
}
}
impl ClientHook for Client {
fn add_ref(&self) -> Box<dyn ClientHook> {
Box::new(Self {
inner: self.inner.clone(),
})
}
fn new_call(
&self,
interface_id: u64,
method_id: u16,
size_hint: Option<::capnp::MessageSize>,
) -> ::capnp::capability::Request<any_pointer::Owned, any_pointer::Owned> {
::capnp::capability::Request::new(Box::new(local::Request::new(
interface_id,
method_id,
size_hint,
self.add_ref(),
)))
}
fn call(
&self,
interface_id: u64,
method_id: u16,
params: Box<dyn ParamsHook>,
results: Box<dyn ResultsHook>,
) -> Promise<(), Error> {
if let Some(client) = &self.inner.borrow().redirect {
return client.call(interface_id, method_id, params, results);
}
let inner_clone = self.inner.clone();
let promise = self
.inner
.borrow_mut()
.call_forwarding_queue
.push((interface_id, method_id, params, results))
.attach(inner_clone)
.and_then(|x| x);
match self.inner.borrow().promise_to_drive {
Some(ref p) => {
Promise::from_future(futures::future::try_join(p.clone(), promise).map_ok(|v| v.1))
}
None => Promise::from_future(promise),
}
}
fn get_ptr(&self) -> usize {
(&*self.inner.borrow()) as *const _ as usize
}
fn get_brand(&self) -> usize {
0
}
fn get_resolved(&self) -> Option<Box<dyn ClientHook>> {
match &self.inner.borrow().redirect {
Some(inner) => Some(inner.clone()),
None => None,
}
}
fn when_more_resolved(&self) -> Option<Promise<Box<dyn ClientHook>, Error>> {
if let Some(client) = &self.inner.borrow().redirect {
return Some(Promise::ok(client.add_ref()));
}
let promise = self.inner.borrow_mut().client_resolution_queue.push(());
match &self.inner.borrow().promise_to_drive {
Some(p) => Some(Promise::from_future(
futures::future::try_join(p.clone(), promise).map_ok(|v| v.1),
)),
None => Some(Promise::from_future(promise)),
}
}
fn when_resolved(&self) -> Promise<(), Error> {
crate::rpc::default_when_resolved_impl(self)
}
} | // delivered after calls made earlier), but *before* any queued calls return (because it might
// confuse the application if a queued call returns before the capability on which it was made
// resolves). Luckily, we know that queued calls will involve, at the very least, an
// eventLoop.evalLater. | random_line_split |
framework.rs | surface: wgpu::Surface,
adapter: wgpu::Adapter,
device: wgpu::Device,
queue: wgpu::Queue,
#[cfg(target_arch = "wasm32")]
offscreen_canvas_setup: Option<OffscreenCanvasSetup>,
}
#[cfg(target_arch = "wasm32")]
struct OffscreenCanvasSetup {
offscreen_canvas: OffscreenCanvas,
bitmap_renderer: ImageBitmapRenderingContext,
}
async fn setup<E: Example>(title: &str) -> Setup {
#[cfg(not(target_arch = "wasm32"))]
{
env_logger::init();
};
let event_loop = EventLoop::new();
let mut builder = winit::window::WindowBuilder::new();
builder = builder.with_title(title);
#[cfg(windows_OFF)] // TODO
{
use winit::platform::windows::WindowBuilderExtWindows;
builder = builder.with_no_redirection_bitmap(true);
}
let window = builder.build(&event_loop).unwrap();
#[cfg(target_arch = "wasm32")]
{
use winit::platform::web::WindowExtWebSys;
let query_string = web_sys::window().unwrap().location().search().unwrap();
let level: log::Level = parse_url_query_string(&query_string, "RUST_LOG")
.and_then(|x| x.parse().ok())
.unwrap_or(log::Level::Error);
console_log::init_with_level(level).expect("could not initialize logger");
std::panic::set_hook(Box::new(console_error_panic_hook::hook));
// On wasm, append the canvas to the document body
web_sys::window()
.and_then(|win| win.document())
.and_then(|doc| doc.body())
.and_then(|body| {
body.append_child(&web_sys::Element::from(window.canvas()))
.ok()
})
.expect("couldn't append canvas to document body");
}
#[cfg(target_arch = "wasm32")]
let mut offscreen_canvas_setup: Option<OffscreenCanvasSetup> = None;
#[cfg(target_arch = "wasm32")]
{
use wasm_bindgen::JsCast;
use winit::platform::web::WindowExtWebSys;
let query_string = web_sys::window().unwrap().location().search().unwrap();
if let Some(offscreen_canvas_param) =
parse_url_query_string(&query_string, "offscreen_canvas")
{
if FromStr::from_str(offscreen_canvas_param) == Ok(true) {
log::info!("Creating OffscreenCanvasSetup");
let offscreen_canvas =
OffscreenCanvas::new(1024, 768).expect("couldn't create OffscreenCanvas");
let bitmap_renderer = window
.canvas()
.get_context("bitmaprenderer")
.expect("couldn't create ImageBitmapRenderingContext (Result)")
.expect("couldn't create ImageBitmapRenderingContext (Option)")
.dyn_into::<ImageBitmapRenderingContext>()
.expect("couldn't convert into ImageBitmapRenderingContext");
offscreen_canvas_setup = Some(OffscreenCanvasSetup {
offscreen_canvas,
bitmap_renderer,
})
}
}
};
log::info!("Initializing the surface...");
let backends = wgpu::util::backend_bits_from_env().unwrap_or_else(wgpu::Backends::all);
let dx12_shader_compiler = wgpu::util::dx12_shader_compiler_from_env().unwrap_or_default();
let gles_minor_version = wgpu::util::gles_minor_version_from_env().unwrap_or_default();
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends,
dx12_shader_compiler,
gles_minor_version,
});
let (size, surface) = unsafe {
let size = window.inner_size();
#[cfg(any(not(target_arch = "wasm32"), target_os = "emscripten"))]
let surface = instance.create_surface(&window).unwrap();
#[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
let surface = {
if let Some(offscreen_canvas_setup) = &offscreen_canvas_setup {
log::info!("Creating surface from OffscreenCanvas");
instance.create_surface_from_offscreen_canvas(
offscreen_canvas_setup.offscreen_canvas.clone(),
)
} else {
instance.create_surface(&window)
}
}
.unwrap();
(size, surface)
};
let adapter = wgpu::util::initialize_adapter_from_env_or_default(&instance, Some(&surface))
.await
.expect("No suitable GPU adapters found on the system!");
#[cfg(not(target_arch = "wasm32"))]
{
let adapter_info = adapter.get_info();
println!("Using {} ({:?})", adapter_info.name, adapter_info.backend);
}
let optional_features = E::optional_features();
let required_features = E::required_features();
let adapter_features = adapter.features();
assert!(
adapter_features.contains(required_features),
"Adapter does not support required features for this example: {:?}",
required_features - adapter_features
);
let required_downlevel_capabilities = E::required_downlevel_capabilities();
let downlevel_capabilities = adapter.get_downlevel_capabilities();
assert!(
downlevel_capabilities.shader_model >= required_downlevel_capabilities.shader_model,
"Adapter does not support the minimum shader model required to run this example: {:?}",
required_downlevel_capabilities.shader_model
);
assert!(
downlevel_capabilities
.flags
.contains(required_downlevel_capabilities.flags),
"Adapter does not support the downlevel capabilities required to run this example: {:?}",
required_downlevel_capabilities.flags - downlevel_capabilities.flags
);
// Make sure we use the texture resolution limits from the adapter, so we can support images the size of the surface.
let needed_limits = E::required_limits().using_resolution(adapter.limits());
let trace_dir = std::env::var("WGPU_TRACE");
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
label: None,
features: (optional_features & adapter_features) | required_features,
limits: needed_limits,
},
trace_dir.ok().as_ref().map(std::path::Path::new),
)
.await
.expect("Unable to find a suitable GPU adapter!");
Setup {
window,
event_loop,
instance,
size,
surface,
adapter,
device,
queue,
#[cfg(target_arch = "wasm32")]
offscreen_canvas_setup,
}
}
fn start<E: Example>(
#[cfg(not(target_arch = "wasm32"))] Setup {
window,
event_loop,
instance,
size,
surface,
adapter,
device,
queue,
}: Setup,
#[cfg(target_arch = "wasm32")] Setup {
window,
event_loop,
instance,
size,
surface,
adapter,
device,
queue,
offscreen_canvas_setup,
}: Setup,
) {
let spawner = Spawner::new();
let mut config = surface
.get_default_config(&adapter, size.width, size.height)
.expect("Surface isn't supported by the adapter.");
let surface_view_format = config.format.add_srgb_suffix();
config.view_formats.push(surface_view_format);
surface.configure(&device, &config);
log::info!("Initializing the example...");
let mut example = E::init(&config, &adapter, &device, &queue);
#[cfg(not(target_arch = "wasm32"))]
let mut last_frame_inst = Instant::now();
#[cfg(not(target_arch = "wasm32"))]
let (mut frame_count, mut accum_time) = (0, 0.0);
log::info!("Entering render loop...");
event_loop.run(move |event, _, control_flow| {
let _ = (&instance, &adapter); // force ownership by the closure
*control_flow = if cfg!(feature = "metal-auto-capture") {
ControlFlow::Exit
} else {
ControlFlow::Poll
};
match event {
event::Event::RedrawEventsCleared => {
#[cfg(not(target_arch = "wasm32"))]
spawner.run_until_stalled();
window.request_redraw();
}
event::Event::WindowEvent {
event:
WindowEvent::Resized(size)
| WindowEvent::ScaleFactorChanged {
new_inner_size: &mut size,
..
},
..
} => {
// Once winit is fixed, the detection conditions here can be removed.
// https://github.com/rust-windowing/winit/issues/2876
let max_dimension = adapter.limits().max_texture_dimension_2d;
if size.width > max_dimension || size.height > max_dimension {
log::warn!(
"The resizing size {:?} exceeds the limit of {}.",
size,
max_dimension
);
} else {
log::info!("Resizing to {:?}", size);
config.width = size.width.max(1);
config.height = size.height.max(1);
example.resize(&config, &device, &queue);
surface.configure(&device, &config);
}
}
event::Event::WindowEvent { event,.. } => match event {
WindowEvent::KeyboardInput {
input:
event::KeyboardInput {
virtual_keycode: Some(event::VirtualKeyCode::Escape),
state: event::ElementState::Pressed,
..
},
..
}
| WindowEvent::CloseRequested => {
*control_flow = ControlFlow::Exit;
}
#[cfg(not(target_arch = "wasm32"))]
WindowEvent::KeyboardInput {
input:
event::KeyboardInput {
virtual_keycode: Some(event::VirtualKeyCode::R),
state: event::ElementState::Pressed,
..
},
..
} => {
println!("{:#?}", instance.generate_report());
}
_ => {
example.update(event);
}
},
event::Event::RedrawRequested(_) => {
#[cfg(not(target_arch = "wasm32"))]
{
accum_time += last_frame_inst.elapsed().as_secs_f32();
last_frame_inst = Instant::now();
frame_count += 1;
if frame_count == 100 {
println!(
"Avg frame time {}ms",
accum_time * 1000.0 / frame_count as f32
);
accum_time = 0.0;
frame_count = 0;
}
}
let frame = match surface.get_current_texture() {
Ok(frame) => frame,
Err(_) => {
surface.configure(&device, &config);
surface
.get_current_texture()
.expect("Failed to acquire next surface texture!")
}
};
let view = frame.texture.create_view(&wgpu::TextureViewDescriptor {
format: Some(surface_view_format),
..wgpu::TextureViewDescriptor::default()
});
example.render(&view, &device, &queue, &spawner);
frame.present();
#[cfg(target_arch = "wasm32")]
{
if let Some(offscreen_canvas_setup) = &offscreen_canvas_setup {
let image_bitmap = offscreen_canvas_setup
.offscreen_canvas
.transfer_to_image_bitmap()
.expect("couldn't transfer offscreen canvas to image bitmap.");
offscreen_canvas_setup
.bitmap_renderer
.transfer_from_image_bitmap(&image_bitmap);
log::info!("Transferring OffscreenCanvas to ImageBitmapRenderer");
}
}
}
_ => {}
}
});
}
#[cfg(not(target_arch = "wasm32"))]
pub struct Spawner<'a> {
executor: async_executor::LocalExecutor<'a>,
}
#[cfg(not(target_arch = "wasm32"))]
impl<'a> Spawner<'a> {
fn new() -> Self {
Self {
executor: async_executor::LocalExecutor::new(),
}
}
#[allow(dead_code)]
pub fn spawn_local(&self, future: impl Future<Output = ()> + 'a) {
self.executor.spawn(future).detach();
}
|
#[cfg(target_arch = "wasm32")]
pub struct Spawner {}
#[cfg(target_arch = "wasm32")]
impl Spawner {
fn new() -> Self {
Self {}
}
#[allow(dead_code)]
pub fn spawn_local(&self, future: impl Future<Output = ()> +'static) {
wasm_bindgen_futures::spawn_local(future);
}
}
#[cfg(not(target_arch = "wasm32"))]
pub fn run<E: Example>(title: &str) {
let setup = pollster::block_on(setup::<E>(title));
start::<E>(setup);
}
#[cfg(target_arch = "wasm32")]
pub fn run<E: Example>(title: &str) {
use wasm_bindgen::prelude::*;
let title = title.to_owned();
wasm_bindgen_futures::spawn_local(async move {
let setup = setup::<E>(&title).await;
let start_closure = Closure::once_into_js(move || start::<E>(setup));
// make sure to handle JS exceptions thrown inside start.
// Otherwise wasm_bindgen_futures Queue would break and never handle any tasks again.
// This is required, because winit uses JS exception for control flow to escape from `run`.
if let Err(error) = call_catch(&start_closure) {
let is_control_flow_exception = error.dyn_ref::<js_sys::Error>().map_or(false, |e| {
e.message().includes("Using exceptions for control flow", 0)
});
if!is_control_flow_exception {
web_sys::console::error_1(&error);
}
}
#[wasm_bindgen]
extern "C" {
#[wasm_bindgen(catch, js_namespace = Function, js_name = "prototype.call.call")]
fn call_catch(this: &JsValue) -> Result<(), JsValue>;
}
});
}
#[cfg(target_arch = "wasm32")]
/// Parse the query string as returned by `web_sys::window()?.location().search()?` and get a
/// specific key out of it.
pub fn parse_url_query_string<'a>(query: &'a str, search_key: &str) -> Option<&'a str> {
let query_string = query.strip_prefix('?')?;
for pair in query_string.split('&') {
let mut pair = pair.split('=');
let key = pair.next()?;
let value = pair.next()?;
if key == search_key {
return Some(value);
}
}
None
}
pub use wgpu_test::image::ComparisonType;
pub struct FrameworkRefTest {
// Path to the reference image, relative to the root of the repo.
pub image_path: &'static str,
pub width: u32,
pub height: u32,
pub optional_features: wgpu::Features,
pub base_test_parameters: wgpu_test::TestParameters,
/// Comparisons against FLIP statistics that determine if the test passes or fails.
pub comparisons: &'static [ComparisonType],
}
#[allow(dead_code)]
pub fn test<E: Example>(mut params: FrameworkRefTest) {
use std::mem;
assert_eq!(params.width % 64, 0, "width needs to be aligned 64");
let features = E::required_features() | params.optional_features;
wgpu_test::initialize_test(
mem::take(&mut params.base_test_parameters).features(features),
|ctx| {
let spawner = Spawner::new();
let dst_texture = ctx.device.create_texture(&wgpu::TextureDescriptor {
label: Some("destination"),
size: wgpu::Extent3d {
width: params.width,
height: params.height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::COPY_SRC,
view_formats: &[],
});
let dst_view = dst_texture.create_view(&wgpu::TextureViewDescriptor::default());
let dst_buffer = ctx.device.create_buffer(&wgpu::BufferDescriptor {
label: Some("image map buffer"),
size: params.width as u64 * params.height as u64 * 4,
usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
mapped_at_creation: false,
});
| fn run_until_stalled(&self) {
while self.executor.try_tick() {}
}
} | random_line_split |
framework.rs | surface: wgpu::Surface,
adapter: wgpu::Adapter,
device: wgpu::Device,
queue: wgpu::Queue,
#[cfg(target_arch = "wasm32")]
offscreen_canvas_setup: Option<OffscreenCanvasSetup>,
}
#[cfg(target_arch = "wasm32")]
struct OffscreenCanvasSetup {
offscreen_canvas: OffscreenCanvas,
bitmap_renderer: ImageBitmapRenderingContext,
}
async fn setup<E: Example>(title: &str) -> Setup | let level: log::Level = parse_url_query_string(&query_string, "RUST_LOG")
.and_then(|x| x.parse().ok())
.unwrap_or(log::Level::Error);
console_log::init_with_level(level).expect("could not initialize logger");
std::panic::set_hook(Box::new(console_error_panic_hook::hook));
// On wasm, append the canvas to the document body
web_sys::window()
.and_then(|win| win.document())
.and_then(|doc| doc.body())
.and_then(|body| {
body.append_child(&web_sys::Element::from(window.canvas()))
.ok()
})
.expect("couldn't append canvas to document body");
}
#[cfg(target_arch = "wasm32")]
let mut offscreen_canvas_setup: Option<OffscreenCanvasSetup> = None;
#[cfg(target_arch = "wasm32")]
{
use wasm_bindgen::JsCast;
use winit::platform::web::WindowExtWebSys;
let query_string = web_sys::window().unwrap().location().search().unwrap();
if let Some(offscreen_canvas_param) =
parse_url_query_string(&query_string, "offscreen_canvas")
{
if FromStr::from_str(offscreen_canvas_param) == Ok(true) {
log::info!("Creating OffscreenCanvasSetup");
let offscreen_canvas =
OffscreenCanvas::new(1024, 768).expect("couldn't create OffscreenCanvas");
let bitmap_renderer = window
.canvas()
.get_context("bitmaprenderer")
.expect("couldn't create ImageBitmapRenderingContext (Result)")
.expect("couldn't create ImageBitmapRenderingContext (Option)")
.dyn_into::<ImageBitmapRenderingContext>()
.expect("couldn't convert into ImageBitmapRenderingContext");
offscreen_canvas_setup = Some(OffscreenCanvasSetup {
offscreen_canvas,
bitmap_renderer,
})
}
}
};
log::info!("Initializing the surface...");
let backends = wgpu::util::backend_bits_from_env().unwrap_or_else(wgpu::Backends::all);
let dx12_shader_compiler = wgpu::util::dx12_shader_compiler_from_env().unwrap_or_default();
let gles_minor_version = wgpu::util::gles_minor_version_from_env().unwrap_or_default();
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends,
dx12_shader_compiler,
gles_minor_version,
});
let (size, surface) = unsafe {
let size = window.inner_size();
#[cfg(any(not(target_arch = "wasm32"), target_os = "emscripten"))]
let surface = instance.create_surface(&window).unwrap();
#[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
let surface = {
if let Some(offscreen_canvas_setup) = &offscreen_canvas_setup {
log::info!("Creating surface from OffscreenCanvas");
instance.create_surface_from_offscreen_canvas(
offscreen_canvas_setup.offscreen_canvas.clone(),
)
} else {
instance.create_surface(&window)
}
}
.unwrap();
(size, surface)
};
let adapter = wgpu::util::initialize_adapter_from_env_or_default(&instance, Some(&surface))
.await
.expect("No suitable GPU adapters found on the system!");
#[cfg(not(target_arch = "wasm32"))]
{
let adapter_info = adapter.get_info();
println!("Using {} ({:?})", adapter_info.name, adapter_info.backend);
}
let optional_features = E::optional_features();
let required_features = E::required_features();
let adapter_features = adapter.features();
assert!(
adapter_features.contains(required_features),
"Adapter does not support required features for this example: {:?}",
required_features - adapter_features
);
let required_downlevel_capabilities = E::required_downlevel_capabilities();
let downlevel_capabilities = adapter.get_downlevel_capabilities();
assert!(
downlevel_capabilities.shader_model >= required_downlevel_capabilities.shader_model,
"Adapter does not support the minimum shader model required to run this example: {:?}",
required_downlevel_capabilities.shader_model
);
assert!(
downlevel_capabilities
.flags
.contains(required_downlevel_capabilities.flags),
"Adapter does not support the downlevel capabilities required to run this example: {:?}",
required_downlevel_capabilities.flags - downlevel_capabilities.flags
);
// Make sure we use the texture resolution limits from the adapter, so we can support images the size of the surface.
let needed_limits = E::required_limits().using_resolution(adapter.limits());
let trace_dir = std::env::var("WGPU_TRACE");
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
label: None,
features: (optional_features & adapter_features) | required_features,
limits: needed_limits,
},
trace_dir.ok().as_ref().map(std::path::Path::new),
)
.await
.expect("Unable to find a suitable GPU adapter!");
Setup {
window,
event_loop,
instance,
size,
surface,
adapter,
device,
queue,
#[cfg(target_arch = "wasm32")]
offscreen_canvas_setup,
}
}
fn start<E: Example>(
#[cfg(not(target_arch = "wasm32"))] Setup {
window,
event_loop,
instance,
size,
surface,
adapter,
device,
queue,
}: Setup,
#[cfg(target_arch = "wasm32")] Setup {
window,
event_loop,
instance,
size,
surface,
adapter,
device,
queue,
offscreen_canvas_setup,
}: Setup,
) {
let spawner = Spawner::new();
let mut config = surface
.get_default_config(&adapter, size.width, size.height)
.expect("Surface isn't supported by the adapter.");
let surface_view_format = config.format.add_srgb_suffix();
config.view_formats.push(surface_view_format);
surface.configure(&device, &config);
log::info!("Initializing the example...");
let mut example = E::init(&config, &adapter, &device, &queue);
#[cfg(not(target_arch = "wasm32"))]
let mut last_frame_inst = Instant::now();
#[cfg(not(target_arch = "wasm32"))]
let (mut frame_count, mut accum_time) = (0, 0.0);
log::info!("Entering render loop...");
event_loop.run(move |event, _, control_flow| {
let _ = (&instance, &adapter); // force ownership by the closure
*control_flow = if cfg!(feature = "metal-auto-capture") {
ControlFlow::Exit
} else {
ControlFlow::Poll
};
match event {
event::Event::RedrawEventsCleared => {
#[cfg(not(target_arch = "wasm32"))]
spawner.run_until_stalled();
window.request_redraw();
}
event::Event::WindowEvent {
event:
WindowEvent::Resized(size)
| WindowEvent::ScaleFactorChanged {
new_inner_size: &mut size,
..
},
..
} => {
// Once winit is fixed, the detection conditions here can be removed.
// https://github.com/rust-windowing/winit/issues/2876
let max_dimension = adapter.limits().max_texture_dimension_2d;
if size.width > max_dimension || size.height > max_dimension {
log::warn!(
"The resizing size {:?} exceeds the limit of {}.",
size,
max_dimension
);
} else {
log::info!("Resizing to {:?}", size);
config.width = size.width.max(1);
config.height = size.height.max(1);
example.resize(&config, &device, &queue);
surface.configure(&device, &config);
}
}
event::Event::WindowEvent { event,.. } => match event {
WindowEvent::KeyboardInput {
input:
event::KeyboardInput {
virtual_keycode: Some(event::VirtualKeyCode::Escape),
state: event::ElementState::Pressed,
..
},
..
}
| WindowEvent::CloseRequested => {
*control_flow = ControlFlow::Exit;
}
#[cfg(not(target_arch = "wasm32"))]
WindowEvent::KeyboardInput {
input:
event::KeyboardInput {
virtual_keycode: Some(event::VirtualKeyCode::R),
state: event::ElementState::Pressed,
..
},
..
} => {
println!("{:#?}", instance.generate_report());
}
_ => {
example.update(event);
}
},
event::Event::RedrawRequested(_) => {
#[cfg(not(target_arch = "wasm32"))]
{
accum_time += last_frame_inst.elapsed().as_secs_f32();
last_frame_inst = Instant::now();
frame_count += 1;
if frame_count == 100 {
println!(
"Avg frame time {}ms",
accum_time * 1000.0 / frame_count as f32
);
accum_time = 0.0;
frame_count = 0;
}
}
let frame = match surface.get_current_texture() {
Ok(frame) => frame,
Err(_) => {
surface.configure(&device, &config);
surface
.get_current_texture()
.expect("Failed to acquire next surface texture!")
}
};
let view = frame.texture.create_view(&wgpu::TextureViewDescriptor {
format: Some(surface_view_format),
..wgpu::TextureViewDescriptor::default()
});
example.render(&view, &device, &queue, &spawner);
frame.present();
#[cfg(target_arch = "wasm32")]
{
if let Some(offscreen_canvas_setup) = &offscreen_canvas_setup {
let image_bitmap = offscreen_canvas_setup
.offscreen_canvas
.transfer_to_image_bitmap()
.expect("couldn't transfer offscreen canvas to image bitmap.");
offscreen_canvas_setup
.bitmap_renderer
.transfer_from_image_bitmap(&image_bitmap);
log::info!("Transferring OffscreenCanvas to ImageBitmapRenderer");
}
}
}
_ => {}
}
});
}
#[cfg(not(target_arch = "wasm32"))]
pub struct Spawner<'a> {
executor: async_executor::LocalExecutor<'a>,
}
#[cfg(not(target_arch = "wasm32"))]
impl<'a> Spawner<'a> {
fn new() -> Self {
Self {
executor: async_executor::LocalExecutor::new(),
}
}
#[allow(dead_code)]
pub fn spawn_local(&self, future: impl Future<Output = ()> + 'a) {
self.executor.spawn(future).detach();
}
fn run_until_stalled(&self) {
while self.executor.try_tick() {}
}
}
#[cfg(target_arch = "wasm32")]
pub struct Spawner {}
#[cfg(target_arch = "wasm32")]
impl Spawner {
fn new() -> Self {
Self {}
}
#[allow(dead_code)]
pub fn spawn_local(&self, future: impl Future<Output = ()> +'static) {
wasm_bindgen_futures::spawn_local(future);
}
}
#[cfg(not(target_arch = "wasm32"))]
pub fn run<E: Example>(title: &str) {
let setup = pollster::block_on(setup::<E>(title));
start::<E>(setup);
}
#[cfg(target_arch = "wasm32")]
pub fn run<E: Example>(title: &str) {
use wasm_bindgen::prelude::*;
let title = title.to_owned();
wasm_bindgen_futures::spawn_local(async move {
let setup = setup::<E>(&title).await;
let start_closure = Closure::once_into_js(move || start::<E>(setup));
// make sure to handle JS exceptions thrown inside start.
// Otherwise wasm_bindgen_futures Queue would break and never handle any tasks again.
// This is required, because winit uses JS exception for control flow to escape from `run`.
if let Err(error) = call_catch(&start_closure) {
let is_control_flow_exception = error.dyn_ref::<js_sys::Error>().map_or(false, |e| {
e.message().includes("Using exceptions for control flow", 0)
});
if!is_control_flow_exception {
web_sys::console::error_1(&error);
}
}
#[wasm_bindgen]
extern "C" {
#[wasm_bindgen(catch, js_namespace = Function, js_name = "prototype.call.call")]
fn call_catch(this: &JsValue) -> Result<(), JsValue>;
}
});
}
#[cfg(target_arch = "wasm32")]
/// Parse the query string as returned by `web_sys::window()?.location().search()?` and get a
/// specific key out of it.
pub fn parse_url_query_string<'a>(query: &'a str, search_key: &str) -> Option<&'a str> {
let query_string = query.strip_prefix('?')?;
for pair in query_string.split('&') {
let mut pair = pair.split('=');
let key = pair.next()?;
let value = pair.next()?;
if key == search_key {
return Some(value);
}
}
None
}
pub use wgpu_test::image::ComparisonType;
pub struct FrameworkRefTest {
// Path to the reference image, relative to the root of the repo.
pub image_path: &'static str,
pub width: u32,
pub height: u32,
pub optional_features: wgpu::Features,
pub base_test_parameters: wgpu_test::TestParameters,
/// Comparisons against FLIP statistics that determine if the test passes or fails.
pub comparisons: &'static [ComparisonType],
}
#[allow(dead_code)]
pub fn test<E: Example>(mut params: FrameworkRefTest) {
use std::mem;
assert_eq!(params.width % 64, 0, "width needs to be aligned 64");
let features = E::required_features() | params.optional_features;
wgpu_test::initialize_test(
mem::take(&mut params.base_test_parameters).features(features),
|ctx| {
let spawner = Spawner::new();
let dst_texture = ctx.device.create_texture(&wgpu::TextureDescriptor {
label: Some("destination"),
size: wgpu::Extent3d {
width: params.width,
height: params.height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::COPY_SRC,
view_formats: &[],
});
let dst_view = dst_texture.create_view(&wgpu::TextureViewDescriptor::default());
let dst_buffer = ctx.device.create_buffer(&wgpu::BufferDescriptor {
label: Some("image map buffer"),
size: params.width as u64 * params.height as u64 * 4,
usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
mapped_at_creation: false,
});
| {
#[cfg(not(target_arch = "wasm32"))]
{
env_logger::init();
};
let event_loop = EventLoop::new();
let mut builder = winit::window::WindowBuilder::new();
builder = builder.with_title(title);
#[cfg(windows_OFF)] // TODO
{
use winit::platform::windows::WindowBuilderExtWindows;
builder = builder.with_no_redirection_bitmap(true);
}
let window = builder.build(&event_loop).unwrap();
#[cfg(target_arch = "wasm32")]
{
use winit::platform::web::WindowExtWebSys;
let query_string = web_sys::window().unwrap().location().search().unwrap(); | identifier_body |
framework.rs | surface: wgpu::Surface,
adapter: wgpu::Adapter,
device: wgpu::Device,
queue: wgpu::Queue,
#[cfg(target_arch = "wasm32")]
offscreen_canvas_setup: Option<OffscreenCanvasSetup>,
}
#[cfg(target_arch = "wasm32")]
struct OffscreenCanvasSetup {
offscreen_canvas: OffscreenCanvas,
bitmap_renderer: ImageBitmapRenderingContext,
}
async fn setup<E: Example>(title: &str) -> Setup {
#[cfg(not(target_arch = "wasm32"))]
{
env_logger::init();
};
let event_loop = EventLoop::new();
let mut builder = winit::window::WindowBuilder::new();
builder = builder.with_title(title);
#[cfg(windows_OFF)] // TODO
{
use winit::platform::windows::WindowBuilderExtWindows;
builder = builder.with_no_redirection_bitmap(true);
}
let window = builder.build(&event_loop).unwrap();
#[cfg(target_arch = "wasm32")]
{
use winit::platform::web::WindowExtWebSys;
let query_string = web_sys::window().unwrap().location().search().unwrap();
let level: log::Level = parse_url_query_string(&query_string, "RUST_LOG")
.and_then(|x| x.parse().ok())
.unwrap_or(log::Level::Error);
console_log::init_with_level(level).expect("could not initialize logger");
std::panic::set_hook(Box::new(console_error_panic_hook::hook));
// On wasm, append the canvas to the document body
web_sys::window()
.and_then(|win| win.document())
.and_then(|doc| doc.body())
.and_then(|body| {
body.append_child(&web_sys::Element::from(window.canvas()))
.ok()
})
.expect("couldn't append canvas to document body");
}
#[cfg(target_arch = "wasm32")]
let mut offscreen_canvas_setup: Option<OffscreenCanvasSetup> = None;
#[cfg(target_arch = "wasm32")]
{
use wasm_bindgen::JsCast;
use winit::platform::web::WindowExtWebSys;
let query_string = web_sys::window().unwrap().location().search().unwrap();
if let Some(offscreen_canvas_param) =
parse_url_query_string(&query_string, "offscreen_canvas")
{
if FromStr::from_str(offscreen_canvas_param) == Ok(true) {
log::info!("Creating OffscreenCanvasSetup");
let offscreen_canvas =
OffscreenCanvas::new(1024, 768).expect("couldn't create OffscreenCanvas");
let bitmap_renderer = window
.canvas()
.get_context("bitmaprenderer")
.expect("couldn't create ImageBitmapRenderingContext (Result)")
.expect("couldn't create ImageBitmapRenderingContext (Option)")
.dyn_into::<ImageBitmapRenderingContext>()
.expect("couldn't convert into ImageBitmapRenderingContext");
offscreen_canvas_setup = Some(OffscreenCanvasSetup {
offscreen_canvas,
bitmap_renderer,
})
}
}
};
log::info!("Initializing the surface...");
let backends = wgpu::util::backend_bits_from_env().unwrap_or_else(wgpu::Backends::all);
let dx12_shader_compiler = wgpu::util::dx12_shader_compiler_from_env().unwrap_or_default();
let gles_minor_version = wgpu::util::gles_minor_version_from_env().unwrap_or_default();
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends,
dx12_shader_compiler,
gles_minor_version,
});
let (size, surface) = unsafe {
let size = window.inner_size();
#[cfg(any(not(target_arch = "wasm32"), target_os = "emscripten"))]
let surface = instance.create_surface(&window).unwrap();
#[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
let surface = {
if let Some(offscreen_canvas_setup) = &offscreen_canvas_setup {
log::info!("Creating surface from OffscreenCanvas");
instance.create_surface_from_offscreen_canvas(
offscreen_canvas_setup.offscreen_canvas.clone(),
)
} else {
instance.create_surface(&window)
}
}
.unwrap();
(size, surface)
};
let adapter = wgpu::util::initialize_adapter_from_env_or_default(&instance, Some(&surface))
.await
.expect("No suitable GPU adapters found on the system!");
#[cfg(not(target_arch = "wasm32"))]
{
let adapter_info = adapter.get_info();
println!("Using {} ({:?})", adapter_info.name, adapter_info.backend);
}
let optional_features = E::optional_features();
let required_features = E::required_features();
let adapter_features = adapter.features();
assert!(
adapter_features.contains(required_features),
"Adapter does not support required features for this example: {:?}",
required_features - adapter_features
);
let required_downlevel_capabilities = E::required_downlevel_capabilities();
let downlevel_capabilities = adapter.get_downlevel_capabilities();
assert!(
downlevel_capabilities.shader_model >= required_downlevel_capabilities.shader_model,
"Adapter does not support the minimum shader model required to run this example: {:?}",
required_downlevel_capabilities.shader_model
);
assert!(
downlevel_capabilities
.flags
.contains(required_downlevel_capabilities.flags),
"Adapter does not support the downlevel capabilities required to run this example: {:?}",
required_downlevel_capabilities.flags - downlevel_capabilities.flags
);
// Make sure we use the texture resolution limits from the adapter, so we can support images the size of the surface.
let needed_limits = E::required_limits().using_resolution(adapter.limits());
let trace_dir = std::env::var("WGPU_TRACE");
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
label: None,
features: (optional_features & adapter_features) | required_features,
limits: needed_limits,
},
trace_dir.ok().as_ref().map(std::path::Path::new),
)
.await
.expect("Unable to find a suitable GPU adapter!");
Setup {
window,
event_loop,
instance,
size,
surface,
adapter,
device,
queue,
#[cfg(target_arch = "wasm32")]
offscreen_canvas_setup,
}
}
fn start<E: Example>(
#[cfg(not(target_arch = "wasm32"))] Setup {
window,
event_loop,
instance,
size,
surface,
adapter,
device,
queue,
}: Setup,
#[cfg(target_arch = "wasm32")] Setup {
window,
event_loop,
instance,
size,
surface,
adapter,
device,
queue,
offscreen_canvas_setup,
}: Setup,
) {
let spawner = Spawner::new();
let mut config = surface
.get_default_config(&adapter, size.width, size.height)
.expect("Surface isn't supported by the adapter.");
let surface_view_format = config.format.add_srgb_suffix();
config.view_formats.push(surface_view_format);
surface.configure(&device, &config);
log::info!("Initializing the example...");
let mut example = E::init(&config, &adapter, &device, &queue);
#[cfg(not(target_arch = "wasm32"))]
let mut last_frame_inst = Instant::now();
#[cfg(not(target_arch = "wasm32"))]
let (mut frame_count, mut accum_time) = (0, 0.0);
log::info!("Entering render loop...");
event_loop.run(move |event, _, control_flow| {
let _ = (&instance, &adapter); // force ownership by the closure
*control_flow = if cfg!(feature = "metal-auto-capture") {
ControlFlow::Exit
} else {
ControlFlow::Poll
};
match event {
event::Event::RedrawEventsCleared => {
#[cfg(not(target_arch = "wasm32"))]
spawner.run_until_stalled();
window.request_redraw();
}
event::Event::WindowEvent {
event:
WindowEvent::Resized(size)
| WindowEvent::ScaleFactorChanged {
new_inner_size: &mut size,
..
},
..
} => {
// Once winit is fixed, the detection conditions here can be removed.
// https://github.com/rust-windowing/winit/issues/2876
let max_dimension = adapter.limits().max_texture_dimension_2d;
if size.width > max_dimension || size.height > max_dimension {
log::warn!(
"The resizing size {:?} exceeds the limit of {}.",
size,
max_dimension
);
} else {
log::info!("Resizing to {:?}", size);
config.width = size.width.max(1);
config.height = size.height.max(1);
example.resize(&config, &device, &queue);
surface.configure(&device, &config);
}
}
event::Event::WindowEvent { event,.. } => match event {
WindowEvent::KeyboardInput {
input:
event::KeyboardInput {
virtual_keycode: Some(event::VirtualKeyCode::Escape),
state: event::ElementState::Pressed,
..
},
..
}
| WindowEvent::CloseRequested => {
*control_flow = ControlFlow::Exit;
}
#[cfg(not(target_arch = "wasm32"))]
WindowEvent::KeyboardInput {
input:
event::KeyboardInput {
virtual_keycode: Some(event::VirtualKeyCode::R),
state: event::ElementState::Pressed,
..
},
..
} => {
println!("{:#?}", instance.generate_report());
}
_ => {
example.update(event);
}
},
event::Event::RedrawRequested(_) => {
#[cfg(not(target_arch = "wasm32"))]
{
accum_time += last_frame_inst.elapsed().as_secs_f32();
last_frame_inst = Instant::now();
frame_count += 1;
if frame_count == 100 {
println!(
"Avg frame time {}ms",
accum_time * 1000.0 / frame_count as f32
);
accum_time = 0.0;
frame_count = 0;
}
}
let frame = match surface.get_current_texture() {
Ok(frame) => frame,
Err(_) => |
};
let view = frame.texture.create_view(&wgpu::TextureViewDescriptor {
format: Some(surface_view_format),
..wgpu::TextureViewDescriptor::default()
});
example.render(&view, &device, &queue, &spawner);
frame.present();
#[cfg(target_arch = "wasm32")]
{
if let Some(offscreen_canvas_setup) = &offscreen_canvas_setup {
let image_bitmap = offscreen_canvas_setup
.offscreen_canvas
.transfer_to_image_bitmap()
.expect("couldn't transfer offscreen canvas to image bitmap.");
offscreen_canvas_setup
.bitmap_renderer
.transfer_from_image_bitmap(&image_bitmap);
log::info!("Transferring OffscreenCanvas to ImageBitmapRenderer");
}
}
}
_ => {}
}
});
}
#[cfg(not(target_arch = "wasm32"))]
pub struct Spawner<'a> {
executor: async_executor::LocalExecutor<'a>,
}
#[cfg(not(target_arch = "wasm32"))]
impl<'a> Spawner<'a> {
fn new() -> Self {
Self {
executor: async_executor::LocalExecutor::new(),
}
}
#[allow(dead_code)]
pub fn spawn_local(&self, future: impl Future<Output = ()> + 'a) {
self.executor.spawn(future).detach();
}
fn run_until_stalled(&self) {
while self.executor.try_tick() {}
}
}
#[cfg(target_arch = "wasm32")]
pub struct Spawner {}
#[cfg(target_arch = "wasm32")]
impl Spawner {
fn new() -> Self {
Self {}
}
#[allow(dead_code)]
pub fn spawn_local(&self, future: impl Future<Output = ()> +'static) {
wasm_bindgen_futures::spawn_local(future);
}
}
#[cfg(not(target_arch = "wasm32"))]
pub fn run<E: Example>(title: &str) {
let setup = pollster::block_on(setup::<E>(title));
start::<E>(setup);
}
#[cfg(target_arch = "wasm32")]
pub fn run<E: Example>(title: &str) {
use wasm_bindgen::prelude::*;
let title = title.to_owned();
wasm_bindgen_futures::spawn_local(async move {
let setup = setup::<E>(&title).await;
let start_closure = Closure::once_into_js(move || start::<E>(setup));
// make sure to handle JS exceptions thrown inside start.
// Otherwise wasm_bindgen_futures Queue would break and never handle any tasks again.
// This is required, because winit uses JS exception for control flow to escape from `run`.
if let Err(error) = call_catch(&start_closure) {
let is_control_flow_exception = error.dyn_ref::<js_sys::Error>().map_or(false, |e| {
e.message().includes("Using exceptions for control flow", 0)
});
if!is_control_flow_exception {
web_sys::console::error_1(&error);
}
}
#[wasm_bindgen]
extern "C" {
#[wasm_bindgen(catch, js_namespace = Function, js_name = "prototype.call.call")]
fn call_catch(this: &JsValue) -> Result<(), JsValue>;
}
});
}
#[cfg(target_arch = "wasm32")]
/// Parse the query string as returned by `web_sys::window()?.location().search()?` and get a
/// specific key out of it.
pub fn parse_url_query_string<'a>(query: &'a str, search_key: &str) -> Option<&'a str> {
let query_string = query.strip_prefix('?')?;
for pair in query_string.split('&') {
let mut pair = pair.split('=');
let key = pair.next()?;
let value = pair.next()?;
if key == search_key {
return Some(value);
}
}
None
}
pub use wgpu_test::image::ComparisonType;
pub struct FrameworkRefTest {
// Path to the reference image, relative to the root of the repo.
pub image_path: &'static str,
pub width: u32,
pub height: u32,
pub optional_features: wgpu::Features,
pub base_test_parameters: wgpu_test::TestParameters,
/// Comparisons against FLIP statistics that determine if the test passes or fails.
pub comparisons: &'static [ComparisonType],
}
#[allow(dead_code)]
pub fn test<E: Example>(mut params: FrameworkRefTest) {
use std::mem;
assert_eq!(params.width % 64, 0, "width needs to be aligned 64");
let features = E::required_features() | params.optional_features;
wgpu_test::initialize_test(
mem::take(&mut params.base_test_parameters).features(features),
|ctx| {
let spawner = Spawner::new();
let dst_texture = ctx.device.create_texture(&wgpu::TextureDescriptor {
label: Some("destination"),
size: wgpu::Extent3d {
width: params.width,
height: params.height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::COPY_SRC,
view_formats: &[],
});
let dst_view = dst_texture.create_view(&wgpu::TextureViewDescriptor::default());
let dst_buffer = ctx.device.create_buffer(&wgpu::BufferDescriptor {
label: Some("image map buffer"),
size: params.width as u64 * params.height as u64 * 4,
usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
mapped_at_creation: false,
});
| {
surface.configure(&device, &config);
surface
.get_current_texture()
.expect("Failed to acquire next surface texture!")
} | conditional_block |
framework.rs | surface: wgpu::Surface,
adapter: wgpu::Adapter,
device: wgpu::Device,
queue: wgpu::Queue,
#[cfg(target_arch = "wasm32")]
offscreen_canvas_setup: Option<OffscreenCanvasSetup>,
}
#[cfg(target_arch = "wasm32")]
struct OffscreenCanvasSetup {
offscreen_canvas: OffscreenCanvas,
bitmap_renderer: ImageBitmapRenderingContext,
}
async fn setup<E: Example>(title: &str) -> Setup {
#[cfg(not(target_arch = "wasm32"))]
{
env_logger::init();
};
let event_loop = EventLoop::new();
let mut builder = winit::window::WindowBuilder::new();
builder = builder.with_title(title);
#[cfg(windows_OFF)] // TODO
{
use winit::platform::windows::WindowBuilderExtWindows;
builder = builder.with_no_redirection_bitmap(true);
}
let window = builder.build(&event_loop).unwrap();
#[cfg(target_arch = "wasm32")]
{
use winit::platform::web::WindowExtWebSys;
let query_string = web_sys::window().unwrap().location().search().unwrap();
let level: log::Level = parse_url_query_string(&query_string, "RUST_LOG")
.and_then(|x| x.parse().ok())
.unwrap_or(log::Level::Error);
console_log::init_with_level(level).expect("could not initialize logger");
std::panic::set_hook(Box::new(console_error_panic_hook::hook));
// On wasm, append the canvas to the document body
web_sys::window()
.and_then(|win| win.document())
.and_then(|doc| doc.body())
.and_then(|body| {
body.append_child(&web_sys::Element::from(window.canvas()))
.ok()
})
.expect("couldn't append canvas to document body");
}
#[cfg(target_arch = "wasm32")]
let mut offscreen_canvas_setup: Option<OffscreenCanvasSetup> = None;
#[cfg(target_arch = "wasm32")]
{
use wasm_bindgen::JsCast;
use winit::platform::web::WindowExtWebSys;
let query_string = web_sys::window().unwrap().location().search().unwrap();
if let Some(offscreen_canvas_param) =
parse_url_query_string(&query_string, "offscreen_canvas")
{
if FromStr::from_str(offscreen_canvas_param) == Ok(true) {
log::info!("Creating OffscreenCanvasSetup");
let offscreen_canvas =
OffscreenCanvas::new(1024, 768).expect("couldn't create OffscreenCanvas");
let bitmap_renderer = window
.canvas()
.get_context("bitmaprenderer")
.expect("couldn't create ImageBitmapRenderingContext (Result)")
.expect("couldn't create ImageBitmapRenderingContext (Option)")
.dyn_into::<ImageBitmapRenderingContext>()
.expect("couldn't convert into ImageBitmapRenderingContext");
offscreen_canvas_setup = Some(OffscreenCanvasSetup {
offscreen_canvas,
bitmap_renderer,
})
}
}
};
log::info!("Initializing the surface...");
let backends = wgpu::util::backend_bits_from_env().unwrap_or_else(wgpu::Backends::all);
let dx12_shader_compiler = wgpu::util::dx12_shader_compiler_from_env().unwrap_or_default();
let gles_minor_version = wgpu::util::gles_minor_version_from_env().unwrap_or_default();
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends,
dx12_shader_compiler,
gles_minor_version,
});
let (size, surface) = unsafe {
let size = window.inner_size();
#[cfg(any(not(target_arch = "wasm32"), target_os = "emscripten"))]
let surface = instance.create_surface(&window).unwrap();
#[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
let surface = {
if let Some(offscreen_canvas_setup) = &offscreen_canvas_setup {
log::info!("Creating surface from OffscreenCanvas");
instance.create_surface_from_offscreen_canvas(
offscreen_canvas_setup.offscreen_canvas.clone(),
)
} else {
instance.create_surface(&window)
}
}
.unwrap();
(size, surface)
};
let adapter = wgpu::util::initialize_adapter_from_env_or_default(&instance, Some(&surface))
.await
.expect("No suitable GPU adapters found on the system!");
#[cfg(not(target_arch = "wasm32"))]
{
let adapter_info = adapter.get_info();
println!("Using {} ({:?})", adapter_info.name, adapter_info.backend);
}
let optional_features = E::optional_features();
let required_features = E::required_features();
let adapter_features = adapter.features();
assert!(
adapter_features.contains(required_features),
"Adapter does not support required features for this example: {:?}",
required_features - adapter_features
);
let required_downlevel_capabilities = E::required_downlevel_capabilities();
let downlevel_capabilities = adapter.get_downlevel_capabilities();
assert!(
downlevel_capabilities.shader_model >= required_downlevel_capabilities.shader_model,
"Adapter does not support the minimum shader model required to run this example: {:?}",
required_downlevel_capabilities.shader_model
);
assert!(
downlevel_capabilities
.flags
.contains(required_downlevel_capabilities.flags),
"Adapter does not support the downlevel capabilities required to run this example: {:?}",
required_downlevel_capabilities.flags - downlevel_capabilities.flags
);
// Make sure we use the texture resolution limits from the adapter, so we can support images the size of the surface.
let needed_limits = E::required_limits().using_resolution(adapter.limits());
let trace_dir = std::env::var("WGPU_TRACE");
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
label: None,
features: (optional_features & adapter_features) | required_features,
limits: needed_limits,
},
trace_dir.ok().as_ref().map(std::path::Path::new),
)
.await
.expect("Unable to find a suitable GPU adapter!");
Setup {
window,
event_loop,
instance,
size,
surface,
adapter,
device,
queue,
#[cfg(target_arch = "wasm32")]
offscreen_canvas_setup,
}
}
fn start<E: Example>(
#[cfg(not(target_arch = "wasm32"))] Setup {
window,
event_loop,
instance,
size,
surface,
adapter,
device,
queue,
}: Setup,
#[cfg(target_arch = "wasm32")] Setup {
window,
event_loop,
instance,
size,
surface,
adapter,
device,
queue,
offscreen_canvas_setup,
}: Setup,
) {
let spawner = Spawner::new();
let mut config = surface
.get_default_config(&adapter, size.width, size.height)
.expect("Surface isn't supported by the adapter.");
let surface_view_format = config.format.add_srgb_suffix();
config.view_formats.push(surface_view_format);
surface.configure(&device, &config);
log::info!("Initializing the example...");
let mut example = E::init(&config, &adapter, &device, &queue);
#[cfg(not(target_arch = "wasm32"))]
let mut last_frame_inst = Instant::now();
#[cfg(not(target_arch = "wasm32"))]
let (mut frame_count, mut accum_time) = (0, 0.0);
log::info!("Entering render loop...");
event_loop.run(move |event, _, control_flow| {
let _ = (&instance, &adapter); // force ownership by the closure
*control_flow = if cfg!(feature = "metal-auto-capture") {
ControlFlow::Exit
} else {
ControlFlow::Poll
};
match event {
event::Event::RedrawEventsCleared => {
#[cfg(not(target_arch = "wasm32"))]
spawner.run_until_stalled();
window.request_redraw();
}
event::Event::WindowEvent {
event:
WindowEvent::Resized(size)
| WindowEvent::ScaleFactorChanged {
new_inner_size: &mut size,
..
},
..
} => {
// Once winit is fixed, the detection conditions here can be removed.
// https://github.com/rust-windowing/winit/issues/2876
let max_dimension = adapter.limits().max_texture_dimension_2d;
if size.width > max_dimension || size.height > max_dimension {
log::warn!(
"The resizing size {:?} exceeds the limit of {}.",
size,
max_dimension
);
} else {
log::info!("Resizing to {:?}", size);
config.width = size.width.max(1);
config.height = size.height.max(1);
example.resize(&config, &device, &queue);
surface.configure(&device, &config);
}
}
event::Event::WindowEvent { event,.. } => match event {
WindowEvent::KeyboardInput {
input:
event::KeyboardInput {
virtual_keycode: Some(event::VirtualKeyCode::Escape),
state: event::ElementState::Pressed,
..
},
..
}
| WindowEvent::CloseRequested => {
*control_flow = ControlFlow::Exit;
}
#[cfg(not(target_arch = "wasm32"))]
WindowEvent::KeyboardInput {
input:
event::KeyboardInput {
virtual_keycode: Some(event::VirtualKeyCode::R),
state: event::ElementState::Pressed,
..
},
..
} => {
println!("{:#?}", instance.generate_report());
}
_ => {
example.update(event);
}
},
event::Event::RedrawRequested(_) => {
#[cfg(not(target_arch = "wasm32"))]
{
accum_time += last_frame_inst.elapsed().as_secs_f32();
last_frame_inst = Instant::now();
frame_count += 1;
if frame_count == 100 {
println!(
"Avg frame time {}ms",
accum_time * 1000.0 / frame_count as f32
);
accum_time = 0.0;
frame_count = 0;
}
}
let frame = match surface.get_current_texture() {
Ok(frame) => frame,
Err(_) => {
surface.configure(&device, &config);
surface
.get_current_texture()
.expect("Failed to acquire next surface texture!")
}
};
let view = frame.texture.create_view(&wgpu::TextureViewDescriptor {
format: Some(surface_view_format),
..wgpu::TextureViewDescriptor::default()
});
example.render(&view, &device, &queue, &spawner);
frame.present();
#[cfg(target_arch = "wasm32")]
{
if let Some(offscreen_canvas_setup) = &offscreen_canvas_setup {
let image_bitmap = offscreen_canvas_setup
.offscreen_canvas
.transfer_to_image_bitmap()
.expect("couldn't transfer offscreen canvas to image bitmap.");
offscreen_canvas_setup
.bitmap_renderer
.transfer_from_image_bitmap(&image_bitmap);
log::info!("Transferring OffscreenCanvas to ImageBitmapRenderer");
}
}
}
_ => {}
}
});
}
#[cfg(not(target_arch = "wasm32"))]
pub struct Spawner<'a> {
executor: async_executor::LocalExecutor<'a>,
}
#[cfg(not(target_arch = "wasm32"))]
impl<'a> Spawner<'a> {
fn new() -> Self {
Self {
executor: async_executor::LocalExecutor::new(),
}
}
#[allow(dead_code)]
pub fn spawn_local(&self, future: impl Future<Output = ()> + 'a) {
self.executor.spawn(future).detach();
}
fn run_until_stalled(&self) {
while self.executor.try_tick() {}
}
}
#[cfg(target_arch = "wasm32")]
pub struct Spawner {}
#[cfg(target_arch = "wasm32")]
impl Spawner {
fn new() -> Self {
Self {}
}
#[allow(dead_code)]
pub fn spawn_local(&self, future: impl Future<Output = ()> +'static) {
wasm_bindgen_futures::spawn_local(future);
}
}
#[cfg(not(target_arch = "wasm32"))]
pub fn run<E: Example>(title: &str) {
let setup = pollster::block_on(setup::<E>(title));
start::<E>(setup);
}
#[cfg(target_arch = "wasm32")]
pub fn | <E: Example>(title: &str) {
use wasm_bindgen::prelude::*;
let title = title.to_owned();
wasm_bindgen_futures::spawn_local(async move {
let setup = setup::<E>(&title).await;
let start_closure = Closure::once_into_js(move || start::<E>(setup));
// make sure to handle JS exceptions thrown inside start.
// Otherwise wasm_bindgen_futures Queue would break and never handle any tasks again.
// This is required, because winit uses JS exception for control flow to escape from `run`.
if let Err(error) = call_catch(&start_closure) {
let is_control_flow_exception = error.dyn_ref::<js_sys::Error>().map_or(false, |e| {
e.message().includes("Using exceptions for control flow", 0)
});
if!is_control_flow_exception {
web_sys::console::error_1(&error);
}
}
#[wasm_bindgen]
extern "C" {
#[wasm_bindgen(catch, js_namespace = Function, js_name = "prototype.call.call")]
fn call_catch(this: &JsValue) -> Result<(), JsValue>;
}
});
}
#[cfg(target_arch = "wasm32")]
/// Parse the query string as returned by `web_sys::window()?.location().search()?` and get a
/// specific key out of it.
pub fn parse_url_query_string<'a>(query: &'a str, search_key: &str) -> Option<&'a str> {
let query_string = query.strip_prefix('?')?;
for pair in query_string.split('&') {
let mut pair = pair.split('=');
let key = pair.next()?;
let value = pair.next()?;
if key == search_key {
return Some(value);
}
}
None
}
pub use wgpu_test::image::ComparisonType;
pub struct FrameworkRefTest {
// Path to the reference image, relative to the root of the repo.
pub image_path: &'static str,
pub width: u32,
pub height: u32,
pub optional_features: wgpu::Features,
pub base_test_parameters: wgpu_test::TestParameters,
/// Comparisons against FLIP statistics that determine if the test passes or fails.
pub comparisons: &'static [ComparisonType],
}
#[allow(dead_code)]
pub fn test<E: Example>(mut params: FrameworkRefTest) {
use std::mem;
assert_eq!(params.width % 64, 0, "width needs to be aligned 64");
let features = E::required_features() | params.optional_features;
wgpu_test::initialize_test(
mem::take(&mut params.base_test_parameters).features(features),
|ctx| {
let spawner = Spawner::new();
let dst_texture = ctx.device.create_texture(&wgpu::TextureDescriptor {
label: Some("destination"),
size: wgpu::Extent3d {
width: params.width,
height: params.height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::COPY_SRC,
view_formats: &[],
});
let dst_view = dst_texture.create_view(&wgpu::TextureViewDescriptor::default());
let dst_buffer = ctx.device.create_buffer(&wgpu::BufferDescriptor {
label: Some("image map buffer"),
size: params.width as u64 * params.height as u64 * 4,
usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
mapped_at_creation: false,
});
| run | identifier_name |
main.rs | use std::convert::Infallible;
use hyper::{Body, Request, Response, Server};
use hyper::service::{make_service_fn, service_fn};
mod github;
mod zulip;
const BOT_NAME: &'static str = "bisect-bot ";
const USER_AGENT: &'static str = "https://github.com/bjorn3/cargo-bisect-rustc-bot";
const REPO_WHITELIST: &'static [&'static str] = &["bjorn3/cargo-bisect-rustc-bot", JOB_REPO];
const JOB_REPO: &'static str = "bjorn3/cargo-bisect-rustc-bot-jobs";
lazy_static::lazy_static! {
static ref GITHUB_USERNAME: String = std::env::var("GITHUB_USERNAME").expect("github username not defined");
static ref GITHUB_TOKEN: String = std::env::var("GITHUB_TOKEN").expect("github personal access token not defined");
static ref ZULIP_USER: String = std::env::var("ZULIP_USERNAME").expect("zulip username not defined");
static ref ZULIP_TOKEN: String = std::env::var("ZULIP_TOKEN").expect("zulip api token not defined");
}
#[tokio::main]
async fn main() {
let _zulip = tokio::spawn(crate::zulip::zulip_task());
let addr = (
[0, 0, 0, 0],
std::env::var("PORT")
.unwrap_or("3000".to_string())
.parse::<u16>()
.unwrap(),
)
.into();
let make_svc = make_service_fn(|_conn| async {
Ok::<_, Infallible>(service_fn(request_handler))
});
let server = Server::bind(&addr).serve(make_svc);
// Run this server for... forever!
if let Err(e) = server.await {
eprintln!("server error: {}", e);
}
}
async fn request_handler(req: Request<Body>) -> Result<Response<Body>, Box<dyn std::error::Error + Send + Sync>> {
crate::github::web_hook(req).await.map_err(|err| {
println!("error: {}", err);
err
})
}
#[derive(Debug, PartialEq)]
enum ReplyTo {
Github {
repo: String,
issue_number: u64,
},
ZulipPublic {
stream_id: u64,
subject: String,
},
ZulipPrivate {
user_id: u64,
},
}
impl ReplyTo {
async fn comment(&self, body: &str) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
match *self {
ReplyTo::Github { ref repo, issue_number } => {
crate::github::gh_post_comment(repo, issue_number, body).await?;
Ok(())
}
ReplyTo::ZulipPublic { stream_id, ref subject } => {
crate::zulip::zulip_post_public_message(stream_id, subject, body).await
}
ReplyTo::ZulipPrivate { user_id } => {
crate::zulip::zulip_post_private_message(user_id, body).await
}
}
}
const COMMIT_HEADER: &'static str = "X-Bisectbot-Reply-To";
fn to_commit_header(&self) -> String {
match *self {
ReplyTo::Github { ref repo, issue_number } => {
format!("{}: github {}#{}", Self::COMMIT_HEADER, repo, issue_number)
}
ReplyTo::ZulipPublic { stream_id, ref subject } => {
format!("{}: zulip-public {} | {}", Self::COMMIT_HEADER, stream_id, subject)
}
ReplyTo::ZulipPrivate { user_id } => {
format!("{}: zulip-private {}", Self::COMMIT_HEADER, user_id)
}
}
}
fn from_commit_message(message: &str) -> Result<Self, ()> {
for line in message.lines() {
let line = line.trim();
if!line.starts_with(Self::COMMIT_HEADER) {
continue;
}
let header = line[Self::COMMIT_HEADER.len()+1..].trim();
let mut split = header.split(" ");
let kind = split.next().ok_or(())?.trim();
let to = split.next().ok_or(())?.trim();
match kind {
"github" => {
if split.next().is_some() {
return Err(());
}
let mut split = to.split("#");
let repo = split.next().ok_or(())?.trim();
let issue_number = split.next().ok_or(())?.trim().parse().map_err(|_| ())?;
if split.next().is_some() {
return Err(());
}
return Ok(ReplyTo::Github {
repo: repo.to_string(),
issue_number,
});
}
"zulip-public" => {
let stream_id: u64 = to.parse().map_err(|_| ())?;
let subject = header[header.find("|").ok_or(())?+2..].to_string();
return Ok(ReplyTo::ZulipPublic {
stream_id,
subject,
})
}
"zulip-private" => {
if split.next().is_some() {
return Err(());
}
let user_id = to.parse().map_err(|_| ())?;
return Ok(ReplyTo::ZulipPrivate {
user_id,
});
}
_ => return Err(()),
}
}
Err(())
}
}
#[test]
fn test_reply_to_parsing() {
assert_eq!(
ReplyTo::from_commit_message("X-Bisectbot-Reply-To: github a/b#5"),
Ok(ReplyTo::Github { repo: "a/b".to_string(), issue_number: 5}),
);
assert_eq!(
ReplyTo::from_commit_message("X-Bisectbot-Reply-To: zulip-public 123 | this is the #1 topic on this zulip instance!"),
Ok(ReplyTo::ZulipPublic { stream_id: 123, subject: "this is the #1 topic on this zulip instance!".to_string() }),
);
assert_eq!(
ReplyTo::from_commit_message("X-Bisectbot-Reply-To: zulip-private 123"),
Ok(ReplyTo::ZulipPrivate { user_id: 123 }),
);
}
enum Command {
Bisect {
start: Option<String>,
end: String,
code: String,
},
}
impl Command {
fn parse_comment(comment: &str) -> Result<Option<Command>, String> {
let mut lines = comment.lines();
while let Some(line) = lines.next() {
let line = line.trim();
if!line.starts_with(BOT_NAME) {
continue;
}
let line = line[BOT_NAME.len()..].trim();
let mut parts = line.split(" ").map(|part| part.trim());
match parts.next() {
Some("bisect") => {
let mut start = None;
let mut end = None;
for part in parts {
if part.starts_with("start=") {
if start.is_some() {
return Err(format!("start range specified twice"));
}
start = Some(part["start=".len()..].to_string());
} else if part.starts_with("end=") {
if end.is_some() {
return Err(format!("end range specified twice"));
}
end = Some(part["end=".len()..].to_string());
} else {
return Err(format!("unknown command part {:?}", part));
}
}
let end = end.ok_or("missing end range")?;
loop {
match lines.next() {
Some(line) if line.trim() == "```rust" => break,
Some(_) => {}
None => {
return Err("didn't find repro code".to_string());
}
}
}
let code = lines.take_while(|line| line.trim()!= "```").collect::<Vec<_>>().join("\n");
return Ok(Some(Command::Bisect {
start,
end,
code,
}));
}
cmd => {
return Err(format!("unknown command {:?}", cmd));
}
}
}
return Ok(None);
}
}
async fn parse_comment(reply_to: &ReplyTo, comment_id: &str, comment: &str) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
match Command::parse_comment(comment)? {
Some(Command::Bisect {
start,
end,
code,
}) => {
let mut cmds = Vec::new();
if let Some(start) = start {
cmds.push(format!("--start={}", start)); | cmds.push(format!("--end={}", end));
println!("{:?}", &cmds);
push_job(&reply_to, comment_id, &cmds, &code).await?;
}
None => {}
}
Ok(())
}
async fn push_job(reply_to: &ReplyTo, job_id: &str, bisect_cmds: &[String], repro: &str) -> reqwest::Result<()> {
// Escape commands and join with whitespace
let bisect_cmds = bisect_cmds.iter().map(|cmd| format!("{:?}", cmd)).collect::<Vec<_>>().join(" ");
let src_lib = create_blob(repro).await?;
let src = create_tree(&[TreeEntry {
path: "lib.rs".to_string(),
mode: TreeEntryMode::File,
type_: TreeEntryType::Blob,
sha: src_lib,
}]).await?;
let github_workflow_bisect = create_blob(&format!(
r#"
name: Bisect
on:
- push
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Cache cargo installed crates
uses: actions/[email protected]
with:
path: ~/.cargo/bin
key: cargo-installed-crates-2
- run: cargo install cargo-bisect-rustc || true
- name: Bisect
run: cargo bisect-rustc {} --access=github | grep -v "for x86_64-unknown-linux-gnu" || true
"#,
bisect_cmds,
)).await?;
let github_workflow = create_tree(&[TreeEntry {
path: "bisect.yaml".to_string(),
mode: TreeEntryMode::File,
type_: TreeEntryType::Blob,
sha: github_workflow_bisect,
}]).await?;
let github = create_tree(&[TreeEntry {
path: "workflows".to_string(),
mode: TreeEntryMode::Subdirectory,
type_: TreeEntryType::Tree,
sha: github_workflow,
}]).await?;
let cargo = create_blob(r#"[package]
name = "cargo-bisect-bot-job"
version = "0.0.0"
edition = "2018"
publish = false
[dependencies]
"#).await?;
let root = create_tree(&[
TreeEntry {
path: "src".to_string(),
mode: TreeEntryMode::Subdirectory,
type_: TreeEntryType::Tree,
sha: src,
},
TreeEntry {
path: ".github".to_string(),
mode: TreeEntryMode::Subdirectory,
type_: TreeEntryType::Tree,
sha: github,
},
TreeEntry {
path: "Cargo.toml".to_string(),
mode: TreeEntryMode::File,
type_: TreeEntryType::Blob,
sha: cargo,
}
]).await?;
let commit = create_commit(
&format!("Bisect job for comment id {}\n\n{}", job_id, reply_to.to_commit_header()),
&root,
&[],
).await?;
push_branch(&format!("job-{}", job_id), &commit).await?;
Ok(())
}
async fn create_blob(content: &str) -> reqwest::Result<String> {
let res = crate::github::gh_api_post(&format!("https://api.github.com/repos/{}/git/blobs", JOB_REPO), serde_json::to_string(&serde_json::json!({
"content": content,
"encoding": "utf-8",
})).unwrap()).await?;
let res: serde_json::Value = serde_json::from_str(&res).unwrap();
let sha = res["sha"].as_str().unwrap().to_string();
println!("created blob: {}", sha);
Ok(sha)
}
async fn create_tree(content: &[TreeEntry]) -> reqwest::Result<String> {
let res = crate::github::gh_api_post(&format!("https://api.github.com/repos/{}/git/trees", JOB_REPO), serde_json::to_string(&serde_json::json!({
"tree": content,
})).unwrap()).await?;
let res: serde_json::Value = serde_json::from_str(&res).unwrap();
let sha = res["sha"].as_str().unwrap().to_string();
println!("created tree: {}", sha);
Ok(sha)
}
#[derive(serde::Serialize)]
struct TreeEntry {
path: String,
mode: TreeEntryMode,
#[serde(rename = "type")]
type_: TreeEntryType,
sha: String,
}
#[derive(serde::Serialize)]
enum TreeEntryMode {
#[serde(rename = "100644")]
File,
#[serde(rename = "100755")]
Executable,
#[serde(rename = "040000")]
Subdirectory,
#[serde(rename = "160000")]
Submodule,
#[serde(rename = "120000")]
Symlink,
}
#[derive(serde::Serialize)]
enum TreeEntryType {
#[serde(rename = "blob")]
Blob,
#[serde(rename = "tree")]
Tree,
#[serde(rename = "commit")]
Commit,
}
async fn create_commit(message: &str, tree: &str, parents: &[&str]) -> reqwest::Result<String> {
let res = crate::github::gh_api_post(&format!("https://api.github.com/repos/{}/git/commits", JOB_REPO), serde_json::to_string(&serde_json::json!({
"message": message,
"tree": tree,
"parents": parents,
})).unwrap()).await?;
let res: serde_json::Value = serde_json::from_str(&res).unwrap();
let sha = res["sha"].as_str().unwrap().to_string();
println!("created commit: {}", sha);
Ok(sha)
}
async fn push_branch(branch: &str, commit: &str) -> reqwest::Result<()> {
let res = crate::github::gh_api_post(&format!("https://api.github.com/repos/{}/git/refs", JOB_REPO), serde_json::to_string(&serde_json::json!({
"ref": format!("refs/heads/{}", branch),
"sha": commit,
})).unwrap()).await?;
println!("pushed branch: {}", res);
Ok(())
} | } | random_line_split |
main.rs | use std::convert::Infallible;
use hyper::{Body, Request, Response, Server};
use hyper::service::{make_service_fn, service_fn};
mod github;
mod zulip;
const BOT_NAME: &'static str = "bisect-bot ";
const USER_AGENT: &'static str = "https://github.com/bjorn3/cargo-bisect-rustc-bot";
const REPO_WHITELIST: &'static [&'static str] = &["bjorn3/cargo-bisect-rustc-bot", JOB_REPO];
const JOB_REPO: &'static str = "bjorn3/cargo-bisect-rustc-bot-jobs";
lazy_static::lazy_static! {
static ref GITHUB_USERNAME: String = std::env::var("GITHUB_USERNAME").expect("github username not defined");
static ref GITHUB_TOKEN: String = std::env::var("GITHUB_TOKEN").expect("github personal access token not defined");
static ref ZULIP_USER: String = std::env::var("ZULIP_USERNAME").expect("zulip username not defined");
static ref ZULIP_TOKEN: String = std::env::var("ZULIP_TOKEN").expect("zulip api token not defined");
}
#[tokio::main]
async fn main() {
let _zulip = tokio::spawn(crate::zulip::zulip_task());
let addr = (
[0, 0, 0, 0],
std::env::var("PORT")
.unwrap_or("3000".to_string())
.parse::<u16>()
.unwrap(),
)
.into();
let make_svc = make_service_fn(|_conn| async {
Ok::<_, Infallible>(service_fn(request_handler))
});
let server = Server::bind(&addr).serve(make_svc);
// Run this server for... forever!
if let Err(e) = server.await {
eprintln!("server error: {}", e);
}
}
async fn request_handler(req: Request<Body>) -> Result<Response<Body>, Box<dyn std::error::Error + Send + Sync>> {
crate::github::web_hook(req).await.map_err(|err| {
println!("error: {}", err);
err
})
}
#[derive(Debug, PartialEq)]
enum ReplyTo {
Github {
repo: String,
issue_number: u64,
},
ZulipPublic {
stream_id: u64,
subject: String,
},
ZulipPrivate {
user_id: u64,
},
}
impl ReplyTo {
async fn comment(&self, body: &str) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
match *self {
ReplyTo::Github { ref repo, issue_number } => {
crate::github::gh_post_comment(repo, issue_number, body).await?;
Ok(())
}
ReplyTo::ZulipPublic { stream_id, ref subject } => {
crate::zulip::zulip_post_public_message(stream_id, subject, body).await
}
ReplyTo::ZulipPrivate { user_id } => {
crate::zulip::zulip_post_private_message(user_id, body).await
}
}
}
const COMMIT_HEADER: &'static str = "X-Bisectbot-Reply-To";
fn to_commit_header(&self) -> String {
match *self {
ReplyTo::Github { ref repo, issue_number } => {
format!("{}: github {}#{}", Self::COMMIT_HEADER, repo, issue_number)
}
ReplyTo::ZulipPublic { stream_id, ref subject } => {
format!("{}: zulip-public {} | {}", Self::COMMIT_HEADER, stream_id, subject)
}
ReplyTo::ZulipPrivate { user_id } => {
format!("{}: zulip-private {}", Self::COMMIT_HEADER, user_id)
}
}
}
fn from_commit_message(message: &str) -> Result<Self, ()> {
for line in message.lines() {
let line = line.trim();
if!line.starts_with(Self::COMMIT_HEADER) {
continue;
}
let header = line[Self::COMMIT_HEADER.len()+1..].trim();
let mut split = header.split(" ");
let kind = split.next().ok_or(())?.trim();
let to = split.next().ok_or(())?.trim();
match kind {
"github" => {
if split.next().is_some() {
return Err(());
}
let mut split = to.split("#");
let repo = split.next().ok_or(())?.trim();
let issue_number = split.next().ok_or(())?.trim().parse().map_err(|_| ())?;
if split.next().is_some() {
return Err(());
}
return Ok(ReplyTo::Github {
repo: repo.to_string(),
issue_number,
});
}
"zulip-public" => {
let stream_id: u64 = to.parse().map_err(|_| ())?;
let subject = header[header.find("|").ok_or(())?+2..].to_string();
return Ok(ReplyTo::ZulipPublic {
stream_id,
subject,
})
}
"zulip-private" => {
if split.next().is_some() {
return Err(());
}
let user_id = to.parse().map_err(|_| ())?;
return Ok(ReplyTo::ZulipPrivate {
user_id,
});
}
_ => return Err(()),
}
}
Err(())
}
}
#[test]
fn test_reply_to_parsing() {
assert_eq!(
ReplyTo::from_commit_message("X-Bisectbot-Reply-To: github a/b#5"),
Ok(ReplyTo::Github { repo: "a/b".to_string(), issue_number: 5}),
);
assert_eq!(
ReplyTo::from_commit_message("X-Bisectbot-Reply-To: zulip-public 123 | this is the #1 topic on this zulip instance!"),
Ok(ReplyTo::ZulipPublic { stream_id: 123, subject: "this is the #1 topic on this zulip instance!".to_string() }),
);
assert_eq!(
ReplyTo::from_commit_message("X-Bisectbot-Reply-To: zulip-private 123"),
Ok(ReplyTo::ZulipPrivate { user_id: 123 }),
);
}
enum Command {
Bisect {
start: Option<String>,
end: String,
code: String,
},
}
impl Command {
fn parse_comment(comment: &str) -> Result<Option<Command>, String> {
let mut lines = comment.lines();
while let Some(line) = lines.next() {
let line = line.trim();
if!line.starts_with(BOT_NAME) {
continue;
}
let line = line[BOT_NAME.len()..].trim();
let mut parts = line.split(" ").map(|part| part.trim());
match parts.next() {
Some("bisect") => {
let mut start = None;
let mut end = None;
for part in parts {
if part.starts_with("start=") | else if part.starts_with("end=") {
if end.is_some() {
return Err(format!("end range specified twice"));
}
end = Some(part["end=".len()..].to_string());
} else {
return Err(format!("unknown command part {:?}", part));
}
}
let end = end.ok_or("missing end range")?;
loop {
match lines.next() {
Some(line) if line.trim() == "```rust" => break,
Some(_) => {}
None => {
return Err("didn't find repro code".to_string());
}
}
}
let code = lines.take_while(|line| line.trim()!= "```").collect::<Vec<_>>().join("\n");
return Ok(Some(Command::Bisect {
start,
end,
code,
}));
}
cmd => {
return Err(format!("unknown command {:?}", cmd));
}
}
}
return Ok(None);
}
}
async fn parse_comment(reply_to: &ReplyTo, comment_id: &str, comment: &str) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
match Command::parse_comment(comment)? {
Some(Command::Bisect {
start,
end,
code,
}) => {
let mut cmds = Vec::new();
if let Some(start) = start {
cmds.push(format!("--start={}", start));
}
cmds.push(format!("--end={}", end));
println!("{:?}", &cmds);
push_job(&reply_to, comment_id, &cmds, &code).await?;
}
None => {}
}
Ok(())
}
async fn push_job(reply_to: &ReplyTo, job_id: &str, bisect_cmds: &[String], repro: &str) -> reqwest::Result<()> {
// Escape commands and join with whitespace
let bisect_cmds = bisect_cmds.iter().map(|cmd| format!("{:?}", cmd)).collect::<Vec<_>>().join(" ");
let src_lib = create_blob(repro).await?;
let src = create_tree(&[TreeEntry {
path: "lib.rs".to_string(),
mode: TreeEntryMode::File,
type_: TreeEntryType::Blob,
sha: src_lib,
}]).await?;
let github_workflow_bisect = create_blob(&format!(
r#"
name: Bisect
on:
- push
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Cache cargo installed crates
uses: actions/[email protected]
with:
path: ~/.cargo/bin
key: cargo-installed-crates-2
- run: cargo install cargo-bisect-rustc || true
- name: Bisect
run: cargo bisect-rustc {} --access=github | grep -v "for x86_64-unknown-linux-gnu" || true
"#,
bisect_cmds,
)).await?;
let github_workflow = create_tree(&[TreeEntry {
path: "bisect.yaml".to_string(),
mode: TreeEntryMode::File,
type_: TreeEntryType::Blob,
sha: github_workflow_bisect,
}]).await?;
let github = create_tree(&[TreeEntry {
path: "workflows".to_string(),
mode: TreeEntryMode::Subdirectory,
type_: TreeEntryType::Tree,
sha: github_workflow,
}]).await?;
let cargo = create_blob(r#"[package]
name = "cargo-bisect-bot-job"
version = "0.0.0"
edition = "2018"
publish = false
[dependencies]
"#).await?;
let root = create_tree(&[
TreeEntry {
path: "src".to_string(),
mode: TreeEntryMode::Subdirectory,
type_: TreeEntryType::Tree,
sha: src,
},
TreeEntry {
path: ".github".to_string(),
mode: TreeEntryMode::Subdirectory,
type_: TreeEntryType::Tree,
sha: github,
},
TreeEntry {
path: "Cargo.toml".to_string(),
mode: TreeEntryMode::File,
type_: TreeEntryType::Blob,
sha: cargo,
}
]).await?;
let commit = create_commit(
&format!("Bisect job for comment id {}\n\n{}", job_id, reply_to.to_commit_header()),
&root,
&[],
).await?;
push_branch(&format!("job-{}", job_id), &commit).await?;
Ok(())
}
async fn create_blob(content: &str) -> reqwest::Result<String> {
let res = crate::github::gh_api_post(&format!("https://api.github.com/repos/{}/git/blobs", JOB_REPO), serde_json::to_string(&serde_json::json!({
"content": content,
"encoding": "utf-8",
})).unwrap()).await?;
let res: serde_json::Value = serde_json::from_str(&res).unwrap();
let sha = res["sha"].as_str().unwrap().to_string();
println!("created blob: {}", sha);
Ok(sha)
}
async fn create_tree(content: &[TreeEntry]) -> reqwest::Result<String> {
let res = crate::github::gh_api_post(&format!("https://api.github.com/repos/{}/git/trees", JOB_REPO), serde_json::to_string(&serde_json::json!({
"tree": content,
})).unwrap()).await?;
let res: serde_json::Value = serde_json::from_str(&res).unwrap();
let sha = res["sha"].as_str().unwrap().to_string();
println!("created tree: {}", sha);
Ok(sha)
}
#[derive(serde::Serialize)]
struct TreeEntry {
path: String,
mode: TreeEntryMode,
#[serde(rename = "type")]
type_: TreeEntryType,
sha: String,
}
#[derive(serde::Serialize)]
enum TreeEntryMode {
#[serde(rename = "100644")]
File,
#[serde(rename = "100755")]
Executable,
#[serde(rename = "040000")]
Subdirectory,
#[serde(rename = "160000")]
Submodule,
#[serde(rename = "120000")]
Symlink,
}
#[derive(serde::Serialize)]
enum TreeEntryType {
#[serde(rename = "blob")]
Blob,
#[serde(rename = "tree")]
Tree,
#[serde(rename = "commit")]
Commit,
}
async fn create_commit(message: &str, tree: &str, parents: &[&str]) -> reqwest::Result<String> {
let res = crate::github::gh_api_post(&format!("https://api.github.com/repos/{}/git/commits", JOB_REPO), serde_json::to_string(&serde_json::json!({
"message": message,
"tree": tree,
"parents": parents,
})).unwrap()).await?;
let res: serde_json::Value = serde_json::from_str(&res).unwrap();
let sha = res["sha"].as_str().unwrap().to_string();
println!("created commit: {}", sha);
Ok(sha)
}
async fn push_branch(branch: &str, commit: &str) -> reqwest::Result<()> {
let res = crate::github::gh_api_post(&format!("https://api.github.com/repos/{}/git/refs", JOB_REPO), serde_json::to_string(&serde_json::json!({
"ref": format!("refs/heads/{}", branch),
"sha": commit,
})).unwrap()).await?;
println!("pushed branch: {}", res);
Ok(())
}
| {
if start.is_some() {
return Err(format!("start range specified twice"));
}
start = Some(part["start=".len()..].to_string());
} | conditional_block |
main.rs | use std::convert::Infallible;
use hyper::{Body, Request, Response, Server};
use hyper::service::{make_service_fn, service_fn};
mod github;
mod zulip;
const BOT_NAME: &'static str = "bisect-bot ";
const USER_AGENT: &'static str = "https://github.com/bjorn3/cargo-bisect-rustc-bot";
const REPO_WHITELIST: &'static [&'static str] = &["bjorn3/cargo-bisect-rustc-bot", JOB_REPO];
const JOB_REPO: &'static str = "bjorn3/cargo-bisect-rustc-bot-jobs";
lazy_static::lazy_static! {
static ref GITHUB_USERNAME: String = std::env::var("GITHUB_USERNAME").expect("github username not defined");
static ref GITHUB_TOKEN: String = std::env::var("GITHUB_TOKEN").expect("github personal access token not defined");
static ref ZULIP_USER: String = std::env::var("ZULIP_USERNAME").expect("zulip username not defined");
static ref ZULIP_TOKEN: String = std::env::var("ZULIP_TOKEN").expect("zulip api token not defined");
}
#[tokio::main]
async fn main() {
let _zulip = tokio::spawn(crate::zulip::zulip_task());
let addr = (
[0, 0, 0, 0],
std::env::var("PORT")
.unwrap_or("3000".to_string())
.parse::<u16>()
.unwrap(),
)
.into();
let make_svc = make_service_fn(|_conn| async {
Ok::<_, Infallible>(service_fn(request_handler))
});
let server = Server::bind(&addr).serve(make_svc);
// Run this server for... forever!
if let Err(e) = server.await {
eprintln!("server error: {}", e);
}
}
async fn request_handler(req: Request<Body>) -> Result<Response<Body>, Box<dyn std::error::Error + Send + Sync>> {
crate::github::web_hook(req).await.map_err(|err| {
println!("error: {}", err);
err
})
}
#[derive(Debug, PartialEq)]
enum ReplyTo {
Github {
repo: String,
issue_number: u64,
},
ZulipPublic {
stream_id: u64,
subject: String,
},
ZulipPrivate {
user_id: u64,
},
}
impl ReplyTo {
async fn comment(&self, body: &str) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
match *self {
ReplyTo::Github { ref repo, issue_number } => {
crate::github::gh_post_comment(repo, issue_number, body).await?;
Ok(())
}
ReplyTo::ZulipPublic { stream_id, ref subject } => {
crate::zulip::zulip_post_public_message(stream_id, subject, body).await
}
ReplyTo::ZulipPrivate { user_id } => {
crate::zulip::zulip_post_private_message(user_id, body).await
}
}
}
const COMMIT_HEADER: &'static str = "X-Bisectbot-Reply-To";
fn to_commit_header(&self) -> String {
match *self {
ReplyTo::Github { ref repo, issue_number } => {
format!("{}: github {}#{}", Self::COMMIT_HEADER, repo, issue_number)
}
ReplyTo::ZulipPublic { stream_id, ref subject } => {
format!("{}: zulip-public {} | {}", Self::COMMIT_HEADER, stream_id, subject)
}
ReplyTo::ZulipPrivate { user_id } => {
format!("{}: zulip-private {}", Self::COMMIT_HEADER, user_id)
}
}
}
fn from_commit_message(message: &str) -> Result<Self, ()> {
for line in message.lines() {
let line = line.trim();
if!line.starts_with(Self::COMMIT_HEADER) {
continue;
}
let header = line[Self::COMMIT_HEADER.len()+1..].trim();
let mut split = header.split(" ");
let kind = split.next().ok_or(())?.trim();
let to = split.next().ok_or(())?.trim();
match kind {
"github" => {
if split.next().is_some() {
return Err(());
}
let mut split = to.split("#");
let repo = split.next().ok_or(())?.trim();
let issue_number = split.next().ok_or(())?.trim().parse().map_err(|_| ())?;
if split.next().is_some() {
return Err(());
}
return Ok(ReplyTo::Github {
repo: repo.to_string(),
issue_number,
});
}
"zulip-public" => {
let stream_id: u64 = to.parse().map_err(|_| ())?;
let subject = header[header.find("|").ok_or(())?+2..].to_string();
return Ok(ReplyTo::ZulipPublic {
stream_id,
subject,
})
}
"zulip-private" => {
if split.next().is_some() {
return Err(());
}
let user_id = to.parse().map_err(|_| ())?;
return Ok(ReplyTo::ZulipPrivate {
user_id,
});
}
_ => return Err(()),
}
}
Err(())
}
}
#[test]
fn test_reply_to_parsing() {
assert_eq!(
ReplyTo::from_commit_message("X-Bisectbot-Reply-To: github a/b#5"),
Ok(ReplyTo::Github { repo: "a/b".to_string(), issue_number: 5}),
);
assert_eq!(
ReplyTo::from_commit_message("X-Bisectbot-Reply-To: zulip-public 123 | this is the #1 topic on this zulip instance!"),
Ok(ReplyTo::ZulipPublic { stream_id: 123, subject: "this is the #1 topic on this zulip instance!".to_string() }),
);
assert_eq!(
ReplyTo::from_commit_message("X-Bisectbot-Reply-To: zulip-private 123"),
Ok(ReplyTo::ZulipPrivate { user_id: 123 }),
);
}
enum Command {
Bisect {
start: Option<String>,
end: String,
code: String,
},
}
impl Command {
fn parse_comment(comment: &str) -> Result<Option<Command>, String> {
let mut lines = comment.lines();
while let Some(line) = lines.next() {
let line = line.trim();
if!line.starts_with(BOT_NAME) {
continue;
}
let line = line[BOT_NAME.len()..].trim();
let mut parts = line.split(" ").map(|part| part.trim());
match parts.next() {
Some("bisect") => {
let mut start = None;
let mut end = None;
for part in parts {
if part.starts_with("start=") {
if start.is_some() {
return Err(format!("start range specified twice"));
}
start = Some(part["start=".len()..].to_string());
} else if part.starts_with("end=") {
if end.is_some() {
return Err(format!("end range specified twice"));
}
end = Some(part["end=".len()..].to_string());
} else {
return Err(format!("unknown command part {:?}", part));
}
}
let end = end.ok_or("missing end range")?;
loop {
match lines.next() {
Some(line) if line.trim() == "```rust" => break,
Some(_) => {}
None => {
return Err("didn't find repro code".to_string());
}
}
}
let code = lines.take_while(|line| line.trim()!= "```").collect::<Vec<_>>().join("\n");
return Ok(Some(Command::Bisect {
start,
end,
code,
}));
}
cmd => {
return Err(format!("unknown command {:?}", cmd));
}
}
}
return Ok(None);
}
}
async fn parse_comment(reply_to: &ReplyTo, comment_id: &str, comment: &str) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
match Command::parse_comment(comment)? {
Some(Command::Bisect {
start,
end,
code,
}) => {
let mut cmds = Vec::new();
if let Some(start) = start {
cmds.push(format!("--start={}", start));
}
cmds.push(format!("--end={}", end));
println!("{:?}", &cmds);
push_job(&reply_to, comment_id, &cmds, &code).await?;
}
None => {}
}
Ok(())
}
async fn push_job(reply_to: &ReplyTo, job_id: &str, bisect_cmds: &[String], repro: &str) -> reqwest::Result<()> {
// Escape commands and join with whitespace
let bisect_cmds = bisect_cmds.iter().map(|cmd| format!("{:?}", cmd)).collect::<Vec<_>>().join(" ");
let src_lib = create_blob(repro).await?;
let src = create_tree(&[TreeEntry {
path: "lib.rs".to_string(),
mode: TreeEntryMode::File,
type_: TreeEntryType::Blob,
sha: src_lib,
}]).await?;
let github_workflow_bisect = create_blob(&format!(
r#"
name: Bisect
on:
- push
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Cache cargo installed crates
uses: actions/[email protected]
with:
path: ~/.cargo/bin
key: cargo-installed-crates-2
- run: cargo install cargo-bisect-rustc || true
- name: Bisect
run: cargo bisect-rustc {} --access=github | grep -v "for x86_64-unknown-linux-gnu" || true
"#,
bisect_cmds,
)).await?;
let github_workflow = create_tree(&[TreeEntry {
path: "bisect.yaml".to_string(),
mode: TreeEntryMode::File,
type_: TreeEntryType::Blob,
sha: github_workflow_bisect,
}]).await?;
let github = create_tree(&[TreeEntry {
path: "workflows".to_string(),
mode: TreeEntryMode::Subdirectory,
type_: TreeEntryType::Tree,
sha: github_workflow,
}]).await?;
let cargo = create_blob(r#"[package]
name = "cargo-bisect-bot-job"
version = "0.0.0"
edition = "2018"
publish = false
[dependencies]
"#).await?;
let root = create_tree(&[
TreeEntry {
path: "src".to_string(),
mode: TreeEntryMode::Subdirectory,
type_: TreeEntryType::Tree,
sha: src,
},
TreeEntry {
path: ".github".to_string(),
mode: TreeEntryMode::Subdirectory,
type_: TreeEntryType::Tree,
sha: github,
},
TreeEntry {
path: "Cargo.toml".to_string(),
mode: TreeEntryMode::File,
type_: TreeEntryType::Blob,
sha: cargo,
}
]).await?;
let commit = create_commit(
&format!("Bisect job for comment id {}\n\n{}", job_id, reply_to.to_commit_header()),
&root,
&[],
).await?;
push_branch(&format!("job-{}", job_id), &commit).await?;
Ok(())
}
async fn create_blob(content: &str) -> reqwest::Result<String> {
let res = crate::github::gh_api_post(&format!("https://api.github.com/repos/{}/git/blobs", JOB_REPO), serde_json::to_string(&serde_json::json!({
"content": content,
"encoding": "utf-8",
})).unwrap()).await?;
let res: serde_json::Value = serde_json::from_str(&res).unwrap();
let sha = res["sha"].as_str().unwrap().to_string();
println!("created blob: {}", sha);
Ok(sha)
}
async fn create_tree(content: &[TreeEntry]) -> reqwest::Result<String> {
let res = crate::github::gh_api_post(&format!("https://api.github.com/repos/{}/git/trees", JOB_REPO), serde_json::to_string(&serde_json::json!({
"tree": content,
})).unwrap()).await?;
let res: serde_json::Value = serde_json::from_str(&res).unwrap();
let sha = res["sha"].as_str().unwrap().to_string();
println!("created tree: {}", sha);
Ok(sha)
}
#[derive(serde::Serialize)]
struct | {
path: String,
mode: TreeEntryMode,
#[serde(rename = "type")]
type_: TreeEntryType,
sha: String,
}
#[derive(serde::Serialize)]
enum TreeEntryMode {
#[serde(rename = "100644")]
File,
#[serde(rename = "100755")]
Executable,
#[serde(rename = "040000")]
Subdirectory,
#[serde(rename = "160000")]
Submodule,
#[serde(rename = "120000")]
Symlink,
}
#[derive(serde::Serialize)]
enum TreeEntryType {
#[serde(rename = "blob")]
Blob,
#[serde(rename = "tree")]
Tree,
#[serde(rename = "commit")]
Commit,
}
async fn create_commit(message: &str, tree: &str, parents: &[&str]) -> reqwest::Result<String> {
let res = crate::github::gh_api_post(&format!("https://api.github.com/repos/{}/git/commits", JOB_REPO), serde_json::to_string(&serde_json::json!({
"message": message,
"tree": tree,
"parents": parents,
})).unwrap()).await?;
let res: serde_json::Value = serde_json::from_str(&res).unwrap();
let sha = res["sha"].as_str().unwrap().to_string();
println!("created commit: {}", sha);
Ok(sha)
}
async fn push_branch(branch: &str, commit: &str) -> reqwest::Result<()> {
let res = crate::github::gh_api_post(&format!("https://api.github.com/repos/{}/git/refs", JOB_REPO), serde_json::to_string(&serde_json::json!({
"ref": format!("refs/heads/{}", branch),
"sha": commit,
})).unwrap()).await?;
println!("pushed branch: {}", res);
Ok(())
}
| TreeEntry | identifier_name |
context.rs | (pub u64);
#[derive(Copy, Clone, Debug, Hash, PartialEq)]
pub struct PassHandle(pub u64);
#[derive(Copy, Clone, Debug, Hash, PartialEq)]
pub struct ImageHandle(pub u64);
#[derive(Copy, Clone, Debug, Hash, PartialEq)]
pub struct ShaderHandle(pub u64);
pub struct Context {
window: winit::window::Window,
event_loop: winit::event_loop::EventLoop<()>,
// Graph being built in the current frame
pub builder_passes: Vec<(PassHandle, BuilderPass)>,
pub shader_list: ShaderList,
// TODO: Move these to the graph builder instead?
pub image_list: ImageList,
pub buffer_list: BufferList,
graph_cache: Vec<(Graph, GraphHandle)>, // (graph, hash) // TODO: Make this a proper LRU and move it to its own file
pub command_pool: vk::CommandPool,
pub sync_idx: usize, // Index of the synchronization primitives
pub swapchain_idx: usize, // Index of the swapchain frame
_watcher: notify::RecommendedWatcher, // Need to keep this alive to keep the receiver alive
watch_rx: std::sync::mpsc::Receiver<notify::DebouncedEvent>,
pub command_buffers: Vec<vk::CommandBuffer>,
pub facade: Facade, // Resolution-dependent apparatus
pub debug_utils: DebugUtils,
pub gpu: Gpu,
pub basis: Basis,
}
impl Drop for Context {
fn drop(&mut self) {
unsafe {
self.gpu
.device
.device_wait_idle()
.expect("Failed to wait device idle!");
self.gpu
.device
.free_command_buffers(self.command_pool, &self.command_buffers);
self.gpu
.device
.destroy_command_pool(self.command_pool, None);
self.facade.destroy(&mut self.image_list);
}
}
}
impl Context {
pub fn recreate_resolution_dependent_state(&mut self) {
unsafe {
self.gpu
.device
.device_wait_idle()
.expect("Failed to wait device idle.")
};
// Recreate swapchain
self.facade.destroy(&mut self.image_list);
self.facade = Facade::new(
&self.basis,
&self.gpu,
&self.window,
&mut self.image_list,
&self.debug_utils,
);
// Recreate the images which depend on the resolution of the swapchain
for i in 0..self.image_list.list.len() {
let (_, internal_image) = &mut self.image_list.list[i];
if let ImageKind::RelativeSized { scale } = internal_image.kind {
let w = (self.facade.swapchain_width as f32 * scale) as u32;
let h = (self.facade.swapchain_height as f32 * scale) as u32;
internal_image.image = Image::new(
&internal_image.image.name,
w,
h,
internal_image.image.format,
internal_image.image.usage,
internal_image.image.aspect_flags,
&self.gpu,
&self.debug_utils,
);
}
}
}
pub fn new() -> Context {
const APP_NAME: &str = "";
// # Init window
let event_loop = EventLoop::new();
let window = {
winit::window::WindowBuilder::new()
.with_title(APP_NAME)
.with_inner_size(winit::dpi::LogicalSize::new(800, 600))
.with_maximized(true)
.build(&event_loop)
.expect("Failed to create window.")
};
let basis = Basis::new(APP_NAME, &window);
let gpu = Gpu::new(&basis);
let debug_utils = DebugUtils::new(&basis, &gpu, ENABLE_DEBUG_MESSENGER_CALLBACK);
// # Create command pool
let command_pool = {
let info = vk::CommandPoolCreateInfo::builder()
.flags(vk::CommandPoolCreateFlags::RESET_COMMAND_BUFFER)
.queue_family_index(gpu.graphics_queue_idx);
unsafe {
gpu.device
.create_command_pool(&info, None)
.expect("Failed to create command pool")
}
};
let shader_list = ShaderList::new(gpu.device.clone());
// TODO: Move this up?
let mut image_list = ImageList::new();
let facade = Facade::new(&basis, &gpu, &window, &mut image_list, &debug_utils);
let buffer_list = BufferList::new();
// # Allocate command buffers
let command_buffers = {
let info = vk::CommandBufferAllocateInfo::builder()
.command_pool(command_pool)
.level(vk::CommandBufferLevel::PRIMARY)
.command_buffer_count(facade.num_frames as u32);
unsafe {
gpu.device
.allocate_command_buffers(&info)
.expect("Failed to allocate command buffer.")
}
};
// Add expect messages to all these unwraps
let (watcher, watch_rx) = {
use notify::{RecommendedWatcher, RecursiveMode, Watcher};
use std::sync::mpsc::channel;
use std::time::Duration;
let (tx, rx) = channel();
let mut watcher: RecommendedWatcher = Watcher::new(tx, Duration::from_secs(2)).unwrap();
watcher.watch("./assets", RecursiveMode::Recursive).unwrap();
(watcher, rx)
};
Context {
window,
event_loop,
builder_passes: Vec::new(),
shader_list,
image_list,
buffer_list,
graph_cache: Vec::new(),
command_pool,
sync_idx: 0,
swapchain_idx: 0,
_watcher: watcher,
watch_rx,
command_buffers,
facade,
debug_utils,
gpu,
basis,
}
}
pub fn build_graph(&mut self) -> GraphHandle {
// Get the hash of the graph builder
let req_hash: u64 = {
let mut hasher = DefaultHasher::new();
self.builder_passes.hash(&mut hasher);
hasher.finish()
};
// Try finding the requested graph in the cache
let opt_idx = self
.graph_cache
.iter()
.position(|(_, cached_hash)| cached_hash.0 == req_hash);
if opt_idx.is_none() {
// The requested graph doesn't exist. Build it and add it to the cache.
println!("Adding graph to cache");
self.graph_cache.push((
Graph::new(
&self.gpu,
&self.builder_passes,
&self.shader_list,
&self.buffer_list,
&self.image_list,
),
GraphHandle(req_hash),
));
}
GraphHandle(req_hash)
}
pub fn begin_frame(&mut self) -> bool {
// Clear the passes of the current graph
self.builder_passes.clear();
// Execute the event loop
let mut is_running = true;
let mut resize_needed = false;
let swapchain_width = self.facade.swapchain_width;
let swapchain_height = self.facade.swapchain_height;
self.event_loop.run_return(|event, _, control_flow| {
*control_flow = ControlFlow::Wait;
match event {
Event::WindowEvent { event,.. } => match event {
WindowEvent::CloseRequested => is_running = false,
#[allow(clippy::match_single_binding)] // TODO: Simplify this
WindowEvent::KeyboardInput { input,.. } => match input {
KeyboardInput {
virtual_keycode,
state,
..
} => match (virtual_keycode, state) {
(Some(VirtualKeyCode::Escape), ElementState::Pressed)
| (Some(VirtualKeyCode::Return), ElementState::Pressed) => {
is_running = false;
}
_ => {}
},
},
WindowEvent::Resized(physical_size) => {
if swapchain_width!= physical_size.width
|| swapchain_height!= physical_size.height
{
resize_needed = true;
}
}
_ => {}
},
Event::MainEventsCleared => {
*control_flow = ControlFlow::Exit;
}
_ => (),
}
});
// This mechanism is need on Windows:
if resize_needed {
self.recreate_resolution_dependent_state();
}
// This mechanism suffices on Linux:
// Acquiring the swapchain image fails if the window has been resized. If this happens, we need
// to loop over and recreate the resolution-dependent state, and then try again.
let mut opt_frame_idx = None;
loop {
let wait_fences = [self.facade.command_buffer_complete_fences[self.sync_idx]];
unsafe {
self.gpu
.device
.wait_for_fences(&wait_fences, true, std::u64::MAX)
.expect("Failed to wait for Fence.");
let result = self.facade.ext_swapchain.acquire_next_image(
self.facade.swapchain,
std::u64::MAX,
self.facade.image_available_semaphores[self.sync_idx],
vk::Fence::null(),
);
match result {
Ok((idx, _is_suboptimal)) => {
opt_frame_idx = Some(idx as usize);
}
Err(error_code) => {
match error_code {
vk::Result::ERROR_OUT_OF_DATE_KHR => {
// Window is resized. Recreate the swapchain
// and exit early without drawing this frame.
self.recreate_resolution_dependent_state();
}
_ => panic!("Failed to acquire swapchain image."),
}
}
}
}
if opt_frame_idx.is_some() {
break;
}
}
self.swapchain_idx = opt_frame_idx.unwrap();
let cmd_buf = self.command_buffers[self.swapchain_idx];
// Reset command buffer
unsafe {
self.gpu
.device
.reset_command_buffer(cmd_buf, vk::CommandBufferResetFlags::empty())
.unwrap();
}
// Begin command buffer. TODO: Is this in the right place?
let command_buffer_begin_info = vk::CommandBufferBeginInfo::builder()
.flags(vk::CommandBufferUsageFlags::SIMULTANEOUS_USE);
unsafe {
self.gpu
.device
.begin_command_buffer(cmd_buf, &command_buffer_begin_info)
.expect("Failed to begin recording command buffer.");
}
/* Naming the command buffer doesn't seem to work on creating it, so we
name it on every begin frame instead.*/
self.debug_utils
.set_command_buffer_name(cmd_buf, &format!("command_buffer_{}", self.swapchain_idx));
is_running
}
pub fn end_frame(&mut self) {
// End command buffer. TODO: Is this in the right place?
unsafe {
self.gpu
.device
.end_command_buffer(self.command_buffers[self.swapchain_idx])
.expect("Failed to end recording command buffer.");
}
let wait_stages = [vk::PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT];
let wait_semaphores = [self.facade.image_available_semaphores[self.sync_idx]];
let signal_semaphores = [self.facade.render_finished_semaphores[self.sync_idx]];
let command_buffers = [self.command_buffers[self.swapchain_idx as usize]];
let submit_infos = [vk::SubmitInfo {
wait_semaphore_count: wait_semaphores.len() as u32,
p_wait_semaphores: wait_semaphores.as_ptr(),
p_wait_dst_stage_mask: wait_stages.as_ptr(),
command_buffer_count: command_buffers.len() as u32,
p_command_buffers: command_buffers.as_ptr(),
signal_semaphore_count: signal_semaphores.len() as u32,
p_signal_semaphores: signal_semaphores.as_ptr(),
..Default::default()
}];
let wait_fences = [self.facade.command_buffer_complete_fences[self.sync_idx]];
unsafe {
self.gpu
.device
.reset_fences(&wait_fences)
.expect("Failed to reset fence.");
self.gpu
.device
.queue_submit(
self.gpu.graphics_queue,
&submit_infos,
self.facade.command_buffer_complete_fences[self.sync_idx],
)
.expect("Failed to execute queue submit.");
}
self.sync_idx = (self.sync_idx + 1) % self.facade.num_frames;
let swapchains = [self.facade.swapchain];
let image_indices = [self.swapchain_idx as u32];
let present_info = vk::PresentInfoKHR::builder()
.wait_semaphores(&signal_semaphores)
.swapchains(&swapchains)
.image_indices(&image_indices);
/* Present the queue */
// According to Vulkan spec, queue_present() can fail if a resize occurs.
// We handle this in begin_frame(), so we should be able to ignore failure here,
// if it does happen. This works fine, when tested on Windows and on Linux on an
// integrated GPU. If this fails on some other platform, consider calling
// recreate_resolution_dependent_state() on error.
let _ = unsafe {
self.facade
.ext_swapchain
.queue_present(self.gpu.present_queue, &present_info)
};
for event in self.watch_rx.try_iter() {
use notify::DebouncedEvent::*;
match event {
Write(_) | Remove(_) | Rename(_, _) => {
unsafe {
self.gpu
.device
.device_wait_idle()
.expect("Failed to wait device idle!");
}
self.shader_list.hot_reload(&mut self.graph_cache);
}
_ => (),
}
}
}
pub fn begin_pass(&self, graph_handle: GraphHandle, pass_handle: PassHandle) {
let (graph, _) = self
.graph_cache
.iter()
.find(|(_, cached_hash)| cached_hash.0 == graph_handle.0)
.expect("Graph not found in cache. Have you called build_graph()?");
graph.begin_pass(pass_handle, self.command_buffers[self.swapchain_idx])
}
pub fn end_pass(&self, graph_handle: GraphHandle) {
let (graph, _) = self
.graph_cache
.iter()
.find(|(_, cached_hash)| cached_hash.0 == graph_handle.0)
.expect("Graph not found in cache. Have you called build_graph()?");
graph.end_pass(self.command_buffers[self.swapchain_idx]);
}
#[allow(clippy::too_many_arguments)]
pub fn add_pass(
&mut self,
name: &str,
vertex_shader: ShaderHandle,
fragment_shader: ShaderHandle,
output_images: &[ImageHandle],
opt_depth_image: Option<ImageHandle>,
uniform_buffer: BufferHandle,
image_handle: ImageHandle,
environment_sampler: &Sampler,
) -> Result<PassHandle, String> {
// TODO: Assert that color and depth images have the same resolution
let img = self
.image_list
.get_image_from_handle(image_handle)
.unwrap_or_else(|| {
panic!(
| GraphHandle | identifier_name |
|
context.rs | pub swapchain_idx: usize, // Index of the swapchain frame
_watcher: notify::RecommendedWatcher, // Need to keep this alive to keep the receiver alive
watch_rx: std::sync::mpsc::Receiver<notify::DebouncedEvent>,
pub command_buffers: Vec<vk::CommandBuffer>,
pub facade: Facade, // Resolution-dependent apparatus
pub debug_utils: DebugUtils,
pub gpu: Gpu,
pub basis: Basis,
}
impl Drop for Context {
fn drop(&mut self) {
unsafe {
self.gpu
.device
.device_wait_idle()
.expect("Failed to wait device idle!");
self.gpu
.device
.free_command_buffers(self.command_pool, &self.command_buffers);
self.gpu
.device
.destroy_command_pool(self.command_pool, None);
self.facade.destroy(&mut self.image_list);
}
}
}
impl Context {
pub fn recreate_resolution_dependent_state(&mut self) {
unsafe {
self.gpu
.device
.device_wait_idle()
.expect("Failed to wait device idle.")
};
// Recreate swapchain
self.facade.destroy(&mut self.image_list);
self.facade = Facade::new(
&self.basis,
&self.gpu,
&self.window, | &mut self.image_list,
&self.debug_utils,
);
// Recreate the images which depend on the resolution of the swapchain
for i in 0..self.image_list.list.len() {
let (_, internal_image) = &mut self.image_list.list[i];
if let ImageKind::RelativeSized { scale } = internal_image.kind {
let w = (self.facade.swapchain_width as f32 * scale) as u32;
let h = (self.facade.swapchain_height as f32 * scale) as u32;
internal_image.image = Image::new(
&internal_image.image.name,
w,
h,
internal_image.image.format,
internal_image.image.usage,
internal_image.image.aspect_flags,
&self.gpu,
&self.debug_utils,
);
}
}
}
pub fn new() -> Context {
const APP_NAME: &str = "";
// # Init window
let event_loop = EventLoop::new();
let window = {
winit::window::WindowBuilder::new()
.with_title(APP_NAME)
.with_inner_size(winit::dpi::LogicalSize::new(800, 600))
.with_maximized(true)
.build(&event_loop)
.expect("Failed to create window.")
};
let basis = Basis::new(APP_NAME, &window);
let gpu = Gpu::new(&basis);
let debug_utils = DebugUtils::new(&basis, &gpu, ENABLE_DEBUG_MESSENGER_CALLBACK);
// # Create command pool
let command_pool = {
let info = vk::CommandPoolCreateInfo::builder()
.flags(vk::CommandPoolCreateFlags::RESET_COMMAND_BUFFER)
.queue_family_index(gpu.graphics_queue_idx);
unsafe {
gpu.device
.create_command_pool(&info, None)
.expect("Failed to create command pool")
}
};
let shader_list = ShaderList::new(gpu.device.clone());
// TODO: Move this up?
let mut image_list = ImageList::new();
let facade = Facade::new(&basis, &gpu, &window, &mut image_list, &debug_utils);
let buffer_list = BufferList::new();
// # Allocate command buffers
let command_buffers = {
let info = vk::CommandBufferAllocateInfo::builder()
.command_pool(command_pool)
.level(vk::CommandBufferLevel::PRIMARY)
.command_buffer_count(facade.num_frames as u32);
unsafe {
gpu.device
.allocate_command_buffers(&info)
.expect("Failed to allocate command buffer.")
}
};
// Add expect messages to all these unwraps
let (watcher, watch_rx) = {
use notify::{RecommendedWatcher, RecursiveMode, Watcher};
use std::sync::mpsc::channel;
use std::time::Duration;
let (tx, rx) = channel();
let mut watcher: RecommendedWatcher = Watcher::new(tx, Duration::from_secs(2)).unwrap();
watcher.watch("./assets", RecursiveMode::Recursive).unwrap();
(watcher, rx)
};
Context {
window,
event_loop,
builder_passes: Vec::new(),
shader_list,
image_list,
buffer_list,
graph_cache: Vec::new(),
command_pool,
sync_idx: 0,
swapchain_idx: 0,
_watcher: watcher,
watch_rx,
command_buffers,
facade,
debug_utils,
gpu,
basis,
}
}
pub fn build_graph(&mut self) -> GraphHandle {
// Get the hash of the graph builder
let req_hash: u64 = {
let mut hasher = DefaultHasher::new();
self.builder_passes.hash(&mut hasher);
hasher.finish()
};
// Try finding the requested graph in the cache
let opt_idx = self
.graph_cache
.iter()
.position(|(_, cached_hash)| cached_hash.0 == req_hash);
if opt_idx.is_none() {
// The requested graph doesn't exist. Build it and add it to the cache.
println!("Adding graph to cache");
self.graph_cache.push((
Graph::new(
&self.gpu,
&self.builder_passes,
&self.shader_list,
&self.buffer_list,
&self.image_list,
),
GraphHandle(req_hash),
));
}
GraphHandle(req_hash)
}
pub fn begin_frame(&mut self) -> bool {
// Clear the passes of the current graph
self.builder_passes.clear();
// Execute the event loop
let mut is_running = true;
let mut resize_needed = false;
let swapchain_width = self.facade.swapchain_width;
let swapchain_height = self.facade.swapchain_height;
self.event_loop.run_return(|event, _, control_flow| {
*control_flow = ControlFlow::Wait;
match event {
Event::WindowEvent { event,.. } => match event {
WindowEvent::CloseRequested => is_running = false,
#[allow(clippy::match_single_binding)] // TODO: Simplify this
WindowEvent::KeyboardInput { input,.. } => match input {
KeyboardInput {
virtual_keycode,
state,
..
} => match (virtual_keycode, state) {
(Some(VirtualKeyCode::Escape), ElementState::Pressed)
| (Some(VirtualKeyCode::Return), ElementState::Pressed) => {
is_running = false;
}
_ => {}
},
},
WindowEvent::Resized(physical_size) => {
if swapchain_width!= physical_size.width
|| swapchain_height!= physical_size.height
{
resize_needed = true;
}
}
_ => {}
},
Event::MainEventsCleared => {
*control_flow = ControlFlow::Exit;
}
_ => (),
}
});
// This mechanism is need on Windows:
if resize_needed {
self.recreate_resolution_dependent_state();
}
// This mechanism suffices on Linux:
// Acquiring the swapchain image fails if the window has been resized. If this happens, we need
// to loop over and recreate the resolution-dependent state, and then try again.
let mut opt_frame_idx = None;
loop {
let wait_fences = [self.facade.command_buffer_complete_fences[self.sync_idx]];
unsafe {
self.gpu
.device
.wait_for_fences(&wait_fences, true, std::u64::MAX)
.expect("Failed to wait for Fence.");
let result = self.facade.ext_swapchain.acquire_next_image(
self.facade.swapchain,
std::u64::MAX,
self.facade.image_available_semaphores[self.sync_idx],
vk::Fence::null(),
);
match result {
Ok((idx, _is_suboptimal)) => {
opt_frame_idx = Some(idx as usize);
}
Err(error_code) => {
match error_code {
vk::Result::ERROR_OUT_OF_DATE_KHR => {
// Window is resized. Recreate the swapchain
// and exit early without drawing this frame.
self.recreate_resolution_dependent_state();
}
_ => panic!("Failed to acquire swapchain image."),
}
}
}
}
if opt_frame_idx.is_some() {
break;
}
}
self.swapchain_idx = opt_frame_idx.unwrap();
let cmd_buf = self.command_buffers[self.swapchain_idx];
// Reset command buffer
unsafe {
self.gpu
.device
.reset_command_buffer(cmd_buf, vk::CommandBufferResetFlags::empty())
.unwrap();
}
// Begin command buffer. TODO: Is this in the right place?
let command_buffer_begin_info = vk::CommandBufferBeginInfo::builder()
.flags(vk::CommandBufferUsageFlags::SIMULTANEOUS_USE);
unsafe {
self.gpu
.device
.begin_command_buffer(cmd_buf, &command_buffer_begin_info)
.expect("Failed to begin recording command buffer.");
}
/* Naming the command buffer doesn't seem to work on creating it, so we
name it on every begin frame instead.*/
self.debug_utils
.set_command_buffer_name(cmd_buf, &format!("command_buffer_{}", self.swapchain_idx));
is_running
}
pub fn end_frame(&mut self) {
// End command buffer. TODO: Is this in the right place?
unsafe {
self.gpu
.device
.end_command_buffer(self.command_buffers[self.swapchain_idx])
.expect("Failed to end recording command buffer.");
}
let wait_stages = [vk::PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT];
let wait_semaphores = [self.facade.image_available_semaphores[self.sync_idx]];
let signal_semaphores = [self.facade.render_finished_semaphores[self.sync_idx]];
let command_buffers = [self.command_buffers[self.swapchain_idx as usize]];
let submit_infos = [vk::SubmitInfo {
wait_semaphore_count: wait_semaphores.len() as u32,
p_wait_semaphores: wait_semaphores.as_ptr(),
p_wait_dst_stage_mask: wait_stages.as_ptr(),
command_buffer_count: command_buffers.len() as u32,
p_command_buffers: command_buffers.as_ptr(),
signal_semaphore_count: signal_semaphores.len() as u32,
p_signal_semaphores: signal_semaphores.as_ptr(),
..Default::default()
}];
let wait_fences = [self.facade.command_buffer_complete_fences[self.sync_idx]];
unsafe {
self.gpu
.device
.reset_fences(&wait_fences)
.expect("Failed to reset fence.");
self.gpu
.device
.queue_submit(
self.gpu.graphics_queue,
&submit_infos,
self.facade.command_buffer_complete_fences[self.sync_idx],
)
.expect("Failed to execute queue submit.");
}
self.sync_idx = (self.sync_idx + 1) % self.facade.num_frames;
let swapchains = [self.facade.swapchain];
let image_indices = [self.swapchain_idx as u32];
let present_info = vk::PresentInfoKHR::builder()
.wait_semaphores(&signal_semaphores)
.swapchains(&swapchains)
.image_indices(&image_indices);
/* Present the queue */
// According to Vulkan spec, queue_present() can fail if a resize occurs.
// We handle this in begin_frame(), so we should be able to ignore failure here,
// if it does happen. This works fine, when tested on Windows and on Linux on an
// integrated GPU. If this fails on some other platform, consider calling
// recreate_resolution_dependent_state() on error.
let _ = unsafe {
self.facade
.ext_swapchain
.queue_present(self.gpu.present_queue, &present_info)
};
for event in self.watch_rx.try_iter() {
use notify::DebouncedEvent::*;
match event {
Write(_) | Remove(_) | Rename(_, _) => {
unsafe {
self.gpu
.device
.device_wait_idle()
.expect("Failed to wait device idle!");
}
self.shader_list.hot_reload(&mut self.graph_cache);
}
_ => (),
}
}
}
pub fn begin_pass(&self, graph_handle: GraphHandle, pass_handle: PassHandle) {
let (graph, _) = self
.graph_cache
.iter()
.find(|(_, cached_hash)| cached_hash.0 == graph_handle.0)
.expect("Graph not found in cache. Have you called build_graph()?");
graph.begin_pass(pass_handle, self.command_buffers[self.swapchain_idx])
}
pub fn end_pass(&self, graph_handle: GraphHandle) {
let (graph, _) = self
.graph_cache
.iter()
.find(|(_, cached_hash)| cached_hash.0 == graph_handle.0)
.expect("Graph not found in cache. Have you called build_graph()?");
graph.end_pass(self.command_buffers[self.swapchain_idx]);
}
#[allow(clippy::too_many_arguments)]
pub fn add_pass(
&mut self,
name: &str,
vertex_shader: ShaderHandle,
fragment_shader: ShaderHandle,
output_images: &[ImageHandle],
opt_depth_image: Option<ImageHandle>,
uniform_buffer: BufferHandle,
image_handle: ImageHandle,
environment_sampler: &Sampler,
) -> Result<PassHandle, String> {
// TODO: Assert that color and depth images have the same resolution
let img = self
.image_list
.get_image_from_handle(image_handle)
.unwrap_or_else(|| {
panic!(
"Image with handle `{:?}` not found in the context.",
image_handle
)
});
let pass = BuilderPass {
name: String::from(name),
vertex_shader,
fragment_shader,
output_images: output_images.to_owned(),
input_image: (img.image.image_view, environment_sampler.vk_sampler),
opt_depth_image,
viewport_width: self.facade.swapchain_width,
viewport_height: self.facade.swapchain_height,
| random_line_split |
|
context.rs | &internal_image.image.name,
w,
h,
internal_image.image.format,
internal_image.image.usage,
internal_image.image.aspect_flags,
&self.gpu,
&self.debug_utils,
);
}
}
}
pub fn new() -> Context {
const APP_NAME: &str = "";
// # Init window
let event_loop = EventLoop::new();
let window = {
winit::window::WindowBuilder::new()
.with_title(APP_NAME)
.with_inner_size(winit::dpi::LogicalSize::new(800, 600))
.with_maximized(true)
.build(&event_loop)
.expect("Failed to create window.")
};
let basis = Basis::new(APP_NAME, &window);
let gpu = Gpu::new(&basis);
let debug_utils = DebugUtils::new(&basis, &gpu, ENABLE_DEBUG_MESSENGER_CALLBACK);
// # Create command pool
let command_pool = {
let info = vk::CommandPoolCreateInfo::builder()
.flags(vk::CommandPoolCreateFlags::RESET_COMMAND_BUFFER)
.queue_family_index(gpu.graphics_queue_idx);
unsafe {
gpu.device
.create_command_pool(&info, None)
.expect("Failed to create command pool")
}
};
let shader_list = ShaderList::new(gpu.device.clone());
// TODO: Move this up?
let mut image_list = ImageList::new();
let facade = Facade::new(&basis, &gpu, &window, &mut image_list, &debug_utils);
let buffer_list = BufferList::new();
// # Allocate command buffers
let command_buffers = {
let info = vk::CommandBufferAllocateInfo::builder()
.command_pool(command_pool)
.level(vk::CommandBufferLevel::PRIMARY)
.command_buffer_count(facade.num_frames as u32);
unsafe {
gpu.device
.allocate_command_buffers(&info)
.expect("Failed to allocate command buffer.")
}
};
// Add expect messages to all these unwraps
let (watcher, watch_rx) = {
use notify::{RecommendedWatcher, RecursiveMode, Watcher};
use std::sync::mpsc::channel;
use std::time::Duration;
let (tx, rx) = channel();
let mut watcher: RecommendedWatcher = Watcher::new(tx, Duration::from_secs(2)).unwrap();
watcher.watch("./assets", RecursiveMode::Recursive).unwrap();
(watcher, rx)
};
Context {
window,
event_loop,
builder_passes: Vec::new(),
shader_list,
image_list,
buffer_list,
graph_cache: Vec::new(),
command_pool,
sync_idx: 0,
swapchain_idx: 0,
_watcher: watcher,
watch_rx,
command_buffers,
facade,
debug_utils,
gpu,
basis,
}
}
pub fn build_graph(&mut self) -> GraphHandle {
// Get the hash of the graph builder
let req_hash: u64 = {
let mut hasher = DefaultHasher::new();
self.builder_passes.hash(&mut hasher);
hasher.finish()
};
// Try finding the requested graph in the cache
let opt_idx = self
.graph_cache
.iter()
.position(|(_, cached_hash)| cached_hash.0 == req_hash);
if opt_idx.is_none() {
// The requested graph doesn't exist. Build it and add it to the cache.
println!("Adding graph to cache");
self.graph_cache.push((
Graph::new(
&self.gpu,
&self.builder_passes,
&self.shader_list,
&self.buffer_list,
&self.image_list,
),
GraphHandle(req_hash),
));
}
GraphHandle(req_hash)
}
pub fn begin_frame(&mut self) -> bool {
// Clear the passes of the current graph
self.builder_passes.clear();
// Execute the event loop
let mut is_running = true;
let mut resize_needed = false;
let swapchain_width = self.facade.swapchain_width;
let swapchain_height = self.facade.swapchain_height;
self.event_loop.run_return(|event, _, control_flow| {
*control_flow = ControlFlow::Wait;
match event {
Event::WindowEvent { event,.. } => match event {
WindowEvent::CloseRequested => is_running = false,
#[allow(clippy::match_single_binding)] // TODO: Simplify this
WindowEvent::KeyboardInput { input,.. } => match input {
KeyboardInput {
virtual_keycode,
state,
..
} => match (virtual_keycode, state) {
(Some(VirtualKeyCode::Escape), ElementState::Pressed)
| (Some(VirtualKeyCode::Return), ElementState::Pressed) => {
is_running = false;
}
_ => {}
},
},
WindowEvent::Resized(physical_size) => {
if swapchain_width!= physical_size.width
|| swapchain_height!= physical_size.height
{
resize_needed = true;
}
}
_ => {}
},
Event::MainEventsCleared => {
*control_flow = ControlFlow::Exit;
}
_ => (),
}
});
// This mechanism is need on Windows:
if resize_needed {
self.recreate_resolution_dependent_state();
}
// This mechanism suffices on Linux:
// Acquiring the swapchain image fails if the window has been resized. If this happens, we need
// to loop over and recreate the resolution-dependent state, and then try again.
let mut opt_frame_idx = None;
loop {
let wait_fences = [self.facade.command_buffer_complete_fences[self.sync_idx]];
unsafe {
self.gpu
.device
.wait_for_fences(&wait_fences, true, std::u64::MAX)
.expect("Failed to wait for Fence.");
let result = self.facade.ext_swapchain.acquire_next_image(
self.facade.swapchain,
std::u64::MAX,
self.facade.image_available_semaphores[self.sync_idx],
vk::Fence::null(),
);
match result {
Ok((idx, _is_suboptimal)) => {
opt_frame_idx = Some(idx as usize);
}
Err(error_code) => {
match error_code {
vk::Result::ERROR_OUT_OF_DATE_KHR => {
// Window is resized. Recreate the swapchain
// and exit early without drawing this frame.
self.recreate_resolution_dependent_state();
}
_ => panic!("Failed to acquire swapchain image."),
}
}
}
}
if opt_frame_idx.is_some() {
break;
}
}
self.swapchain_idx = opt_frame_idx.unwrap();
let cmd_buf = self.command_buffers[self.swapchain_idx];
// Reset command buffer
unsafe {
self.gpu
.device
.reset_command_buffer(cmd_buf, vk::CommandBufferResetFlags::empty())
.unwrap();
}
// Begin command buffer. TODO: Is this in the right place?
let command_buffer_begin_info = vk::CommandBufferBeginInfo::builder()
.flags(vk::CommandBufferUsageFlags::SIMULTANEOUS_USE);
unsafe {
self.gpu
.device
.begin_command_buffer(cmd_buf, &command_buffer_begin_info)
.expect("Failed to begin recording command buffer.");
}
/* Naming the command buffer doesn't seem to work on creating it, so we
name it on every begin frame instead.*/
self.debug_utils
.set_command_buffer_name(cmd_buf, &format!("command_buffer_{}", self.swapchain_idx));
is_running
}
pub fn end_frame(&mut self) {
// End command buffer. TODO: Is this in the right place?
unsafe {
self.gpu
.device
.end_command_buffer(self.command_buffers[self.swapchain_idx])
.expect("Failed to end recording command buffer.");
}
let wait_stages = [vk::PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT];
let wait_semaphores = [self.facade.image_available_semaphores[self.sync_idx]];
let signal_semaphores = [self.facade.render_finished_semaphores[self.sync_idx]];
let command_buffers = [self.command_buffers[self.swapchain_idx as usize]];
let submit_infos = [vk::SubmitInfo {
wait_semaphore_count: wait_semaphores.len() as u32,
p_wait_semaphores: wait_semaphores.as_ptr(),
p_wait_dst_stage_mask: wait_stages.as_ptr(),
command_buffer_count: command_buffers.len() as u32,
p_command_buffers: command_buffers.as_ptr(),
signal_semaphore_count: signal_semaphores.len() as u32,
p_signal_semaphores: signal_semaphores.as_ptr(),
..Default::default()
}];
let wait_fences = [self.facade.command_buffer_complete_fences[self.sync_idx]];
unsafe {
self.gpu
.device
.reset_fences(&wait_fences)
.expect("Failed to reset fence.");
self.gpu
.device
.queue_submit(
self.gpu.graphics_queue,
&submit_infos,
self.facade.command_buffer_complete_fences[self.sync_idx],
)
.expect("Failed to execute queue submit.");
}
self.sync_idx = (self.sync_idx + 1) % self.facade.num_frames;
let swapchains = [self.facade.swapchain];
let image_indices = [self.swapchain_idx as u32];
let present_info = vk::PresentInfoKHR::builder()
.wait_semaphores(&signal_semaphores)
.swapchains(&swapchains)
.image_indices(&image_indices);
/* Present the queue */
// According to Vulkan spec, queue_present() can fail if a resize occurs.
// We handle this in begin_frame(), so we should be able to ignore failure here,
// if it does happen. This works fine, when tested on Windows and on Linux on an
// integrated GPU. If this fails on some other platform, consider calling
// recreate_resolution_dependent_state() on error.
let _ = unsafe {
self.facade
.ext_swapchain
.queue_present(self.gpu.present_queue, &present_info)
};
for event in self.watch_rx.try_iter() {
use notify::DebouncedEvent::*;
match event {
Write(_) | Remove(_) | Rename(_, _) => {
unsafe {
self.gpu
.device
.device_wait_idle()
.expect("Failed to wait device idle!");
}
self.shader_list.hot_reload(&mut self.graph_cache);
}
_ => (),
}
}
}
pub fn begin_pass(&self, graph_handle: GraphHandle, pass_handle: PassHandle) {
let (graph, _) = self
.graph_cache
.iter()
.find(|(_, cached_hash)| cached_hash.0 == graph_handle.0)
.expect("Graph not found in cache. Have you called build_graph()?");
graph.begin_pass(pass_handle, self.command_buffers[self.swapchain_idx])
}
pub fn end_pass(&self, graph_handle: GraphHandle) {
let (graph, _) = self
.graph_cache
.iter()
.find(|(_, cached_hash)| cached_hash.0 == graph_handle.0)
.expect("Graph not found in cache. Have you called build_graph()?");
graph.end_pass(self.command_buffers[self.swapchain_idx]);
}
#[allow(clippy::too_many_arguments)]
pub fn add_pass(
&mut self,
name: &str,
vertex_shader: ShaderHandle,
fragment_shader: ShaderHandle,
output_images: &[ImageHandle],
opt_depth_image: Option<ImageHandle>,
uniform_buffer: BufferHandle,
image_handle: ImageHandle,
environment_sampler: &Sampler,
) -> Result<PassHandle, String> {
// TODO: Assert that color and depth images have the same resolution
let img = self
.image_list
.get_image_from_handle(image_handle)
.unwrap_or_else(|| {
panic!(
"Image with handle `{:?}` not found in the context.",
image_handle
)
});
let pass = BuilderPass {
name: String::from(name),
vertex_shader,
fragment_shader,
output_images: output_images.to_owned(),
input_image: (img.image.image_view, environment_sampler.vk_sampler),
opt_depth_image,
viewport_width: self.facade.swapchain_width,
viewport_height: self.facade.swapchain_height,
uniform_buffer,
};
let pass_handle = {
let mut hasher = DefaultHasher::new();
pass.hash(&mut hasher);
PassHandle(hasher.finish())
};
self.builder_passes.push((pass_handle, pass));
Ok(pass_handle)
}
/* Shaders */
pub fn new_shader(
&mut self,
name: &str,
shader_stage: ShaderStage,
path: &str,
) -> Result<ShaderHandle, String> {
self.shader_list.new_shader(name, shader_stage, path)
}
/* Buffers */
pub fn new_buffer(
&mut self,
name: &str,
size: usize,
usage: vk::BufferUsageFlags,
) -> Result<BufferHandle, String> {
self.buffer_list
.new_buffer(name, size, usage, &self.gpu, &self.debug_utils)
}
pub fn upload_data<T>(&self, buffer_handle: BufferHandle, data: &[T]) {
self.buffer_list.upload_data(buffer_handle, data);
}
/* Images */
pub fn new_image_relative_size(
&mut self,
name: &str,
scale: f32,
format: vk::Format,
usage: vk::ImageUsageFlags,
aspect_flags: vk::ImageAspectFlags,
) -> Result<ImageHandle, String> {
self.image_list.new_image_relative_size(
name,
scale,
format,
usage,
aspect_flags,
&self.facade,
&self.gpu,
&self.debug_utils,
)
}
pub fn new_image_from_file(&mut self, name: &str, path: &str) -> Result<ImageHandle, String> | {
self.image_list.new_image_from_file(
name,
path,
&self.gpu,
self.command_pool,
&self.debug_utils,
)
} | identifier_body |
|
context.rs | swapchain_idx: usize, // Index of the swapchain frame
_watcher: notify::RecommendedWatcher, // Need to keep this alive to keep the receiver alive
watch_rx: std::sync::mpsc::Receiver<notify::DebouncedEvent>,
pub command_buffers: Vec<vk::CommandBuffer>,
pub facade: Facade, // Resolution-dependent apparatus
pub debug_utils: DebugUtils,
pub gpu: Gpu,
pub basis: Basis,
}
impl Drop for Context {
fn drop(&mut self) {
unsafe {
self.gpu
.device
.device_wait_idle()
.expect("Failed to wait device idle!");
self.gpu
.device
.free_command_buffers(self.command_pool, &self.command_buffers);
self.gpu
.device
.destroy_command_pool(self.command_pool, None);
self.facade.destroy(&mut self.image_list);
}
}
}
impl Context {
pub fn recreate_resolution_dependent_state(&mut self) {
unsafe {
self.gpu
.device
.device_wait_idle()
.expect("Failed to wait device idle.")
};
// Recreate swapchain
self.facade.destroy(&mut self.image_list);
self.facade = Facade::new(
&self.basis,
&self.gpu,
&self.window,
&mut self.image_list,
&self.debug_utils,
);
// Recreate the images which depend on the resolution of the swapchain
for i in 0..self.image_list.list.len() {
let (_, internal_image) = &mut self.image_list.list[i];
if let ImageKind::RelativeSized { scale } = internal_image.kind {
let w = (self.facade.swapchain_width as f32 * scale) as u32;
let h = (self.facade.swapchain_height as f32 * scale) as u32;
internal_image.image = Image::new(
&internal_image.image.name,
w,
h,
internal_image.image.format,
internal_image.image.usage,
internal_image.image.aspect_flags,
&self.gpu,
&self.debug_utils,
);
}
}
}
pub fn new() -> Context {
const APP_NAME: &str = "";
// # Init window
let event_loop = EventLoop::new();
let window = {
winit::window::WindowBuilder::new()
.with_title(APP_NAME)
.with_inner_size(winit::dpi::LogicalSize::new(800, 600))
.with_maximized(true)
.build(&event_loop)
.expect("Failed to create window.")
};
let basis = Basis::new(APP_NAME, &window);
let gpu = Gpu::new(&basis);
let debug_utils = DebugUtils::new(&basis, &gpu, ENABLE_DEBUG_MESSENGER_CALLBACK);
// # Create command pool
let command_pool = {
let info = vk::CommandPoolCreateInfo::builder()
.flags(vk::CommandPoolCreateFlags::RESET_COMMAND_BUFFER)
.queue_family_index(gpu.graphics_queue_idx);
unsafe {
gpu.device
.create_command_pool(&info, None)
.expect("Failed to create command pool")
}
};
let shader_list = ShaderList::new(gpu.device.clone());
// TODO: Move this up?
let mut image_list = ImageList::new();
let facade = Facade::new(&basis, &gpu, &window, &mut image_list, &debug_utils);
let buffer_list = BufferList::new();
// # Allocate command buffers
let command_buffers = {
let info = vk::CommandBufferAllocateInfo::builder()
.command_pool(command_pool)
.level(vk::CommandBufferLevel::PRIMARY)
.command_buffer_count(facade.num_frames as u32);
unsafe {
gpu.device
.allocate_command_buffers(&info)
.expect("Failed to allocate command buffer.")
}
};
// Add expect messages to all these unwraps
let (watcher, watch_rx) = {
use notify::{RecommendedWatcher, RecursiveMode, Watcher};
use std::sync::mpsc::channel;
use std::time::Duration;
let (tx, rx) = channel();
let mut watcher: RecommendedWatcher = Watcher::new(tx, Duration::from_secs(2)).unwrap();
watcher.watch("./assets", RecursiveMode::Recursive).unwrap();
(watcher, rx)
};
Context {
window,
event_loop,
builder_passes: Vec::new(),
shader_list,
image_list,
buffer_list,
graph_cache: Vec::new(),
command_pool,
sync_idx: 0,
swapchain_idx: 0,
_watcher: watcher,
watch_rx,
command_buffers,
facade,
debug_utils,
gpu,
basis,
}
}
pub fn build_graph(&mut self) -> GraphHandle {
// Get the hash of the graph builder
let req_hash: u64 = {
let mut hasher = DefaultHasher::new();
self.builder_passes.hash(&mut hasher);
hasher.finish()
};
// Try finding the requested graph in the cache
let opt_idx = self
.graph_cache
.iter()
.position(|(_, cached_hash)| cached_hash.0 == req_hash);
if opt_idx.is_none() {
// The requested graph doesn't exist. Build it and add it to the cache.
println!("Adding graph to cache");
self.graph_cache.push((
Graph::new(
&self.gpu,
&self.builder_passes,
&self.shader_list,
&self.buffer_list,
&self.image_list,
),
GraphHandle(req_hash),
));
}
GraphHandle(req_hash)
}
pub fn begin_frame(&mut self) -> bool {
// Clear the passes of the current graph
self.builder_passes.clear();
// Execute the event loop
let mut is_running = true;
let mut resize_needed = false;
let swapchain_width = self.facade.swapchain_width;
let swapchain_height = self.facade.swapchain_height;
self.event_loop.run_return(|event, _, control_flow| {
*control_flow = ControlFlow::Wait;
match event {
Event::WindowEvent { event,.. } => match event {
WindowEvent::CloseRequested => is_running = false,
#[allow(clippy::match_single_binding)] // TODO: Simplify this
WindowEvent::KeyboardInput { input,.. } => match input {
KeyboardInput {
virtual_keycode,
state,
..
} => match (virtual_keycode, state) {
(Some(VirtualKeyCode::Escape), ElementState::Pressed)
| (Some(VirtualKeyCode::Return), ElementState::Pressed) => {
is_running = false;
}
_ => {}
},
},
WindowEvent::Resized(physical_size) => {
if swapchain_width!= physical_size.width
|| swapchain_height!= physical_size.height
{
resize_needed = true;
}
}
_ => {}
},
Event::MainEventsCleared => |
_ => (),
}
});
// This mechanism is need on Windows:
if resize_needed {
self.recreate_resolution_dependent_state();
}
// This mechanism suffices on Linux:
// Acquiring the swapchain image fails if the window has been resized. If this happens, we need
// to loop over and recreate the resolution-dependent state, and then try again.
let mut opt_frame_idx = None;
loop {
let wait_fences = [self.facade.command_buffer_complete_fences[self.sync_idx]];
unsafe {
self.gpu
.device
.wait_for_fences(&wait_fences, true, std::u64::MAX)
.expect("Failed to wait for Fence.");
let result = self.facade.ext_swapchain.acquire_next_image(
self.facade.swapchain,
std::u64::MAX,
self.facade.image_available_semaphores[self.sync_idx],
vk::Fence::null(),
);
match result {
Ok((idx, _is_suboptimal)) => {
opt_frame_idx = Some(idx as usize);
}
Err(error_code) => {
match error_code {
vk::Result::ERROR_OUT_OF_DATE_KHR => {
// Window is resized. Recreate the swapchain
// and exit early without drawing this frame.
self.recreate_resolution_dependent_state();
}
_ => panic!("Failed to acquire swapchain image."),
}
}
}
}
if opt_frame_idx.is_some() {
break;
}
}
self.swapchain_idx = opt_frame_idx.unwrap();
let cmd_buf = self.command_buffers[self.swapchain_idx];
// Reset command buffer
unsafe {
self.gpu
.device
.reset_command_buffer(cmd_buf, vk::CommandBufferResetFlags::empty())
.unwrap();
}
// Begin command buffer. TODO: Is this in the right place?
let command_buffer_begin_info = vk::CommandBufferBeginInfo::builder()
.flags(vk::CommandBufferUsageFlags::SIMULTANEOUS_USE);
unsafe {
self.gpu
.device
.begin_command_buffer(cmd_buf, &command_buffer_begin_info)
.expect("Failed to begin recording command buffer.");
}
/* Naming the command buffer doesn't seem to work on creating it, so we
name it on every begin frame instead.*/
self.debug_utils
.set_command_buffer_name(cmd_buf, &format!("command_buffer_{}", self.swapchain_idx));
is_running
}
pub fn end_frame(&mut self) {
// End command buffer. TODO: Is this in the right place?
unsafe {
self.gpu
.device
.end_command_buffer(self.command_buffers[self.swapchain_idx])
.expect("Failed to end recording command buffer.");
}
let wait_stages = [vk::PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT];
let wait_semaphores = [self.facade.image_available_semaphores[self.sync_idx]];
let signal_semaphores = [self.facade.render_finished_semaphores[self.sync_idx]];
let command_buffers = [self.command_buffers[self.swapchain_idx as usize]];
let submit_infos = [vk::SubmitInfo {
wait_semaphore_count: wait_semaphores.len() as u32,
p_wait_semaphores: wait_semaphores.as_ptr(),
p_wait_dst_stage_mask: wait_stages.as_ptr(),
command_buffer_count: command_buffers.len() as u32,
p_command_buffers: command_buffers.as_ptr(),
signal_semaphore_count: signal_semaphores.len() as u32,
p_signal_semaphores: signal_semaphores.as_ptr(),
..Default::default()
}];
let wait_fences = [self.facade.command_buffer_complete_fences[self.sync_idx]];
unsafe {
self.gpu
.device
.reset_fences(&wait_fences)
.expect("Failed to reset fence.");
self.gpu
.device
.queue_submit(
self.gpu.graphics_queue,
&submit_infos,
self.facade.command_buffer_complete_fences[self.sync_idx],
)
.expect("Failed to execute queue submit.");
}
self.sync_idx = (self.sync_idx + 1) % self.facade.num_frames;
let swapchains = [self.facade.swapchain];
let image_indices = [self.swapchain_idx as u32];
let present_info = vk::PresentInfoKHR::builder()
.wait_semaphores(&signal_semaphores)
.swapchains(&swapchains)
.image_indices(&image_indices);
/* Present the queue */
// According to Vulkan spec, queue_present() can fail if a resize occurs.
// We handle this in begin_frame(), so we should be able to ignore failure here,
// if it does happen. This works fine, when tested on Windows and on Linux on an
// integrated GPU. If this fails on some other platform, consider calling
// recreate_resolution_dependent_state() on error.
let _ = unsafe {
self.facade
.ext_swapchain
.queue_present(self.gpu.present_queue, &present_info)
};
for event in self.watch_rx.try_iter() {
use notify::DebouncedEvent::*;
match event {
Write(_) | Remove(_) | Rename(_, _) => {
unsafe {
self.gpu
.device
.device_wait_idle()
.expect("Failed to wait device idle!");
}
self.shader_list.hot_reload(&mut self.graph_cache);
}
_ => (),
}
}
}
pub fn begin_pass(&self, graph_handle: GraphHandle, pass_handle: PassHandle) {
let (graph, _) = self
.graph_cache
.iter()
.find(|(_, cached_hash)| cached_hash.0 == graph_handle.0)
.expect("Graph not found in cache. Have you called build_graph()?");
graph.begin_pass(pass_handle, self.command_buffers[self.swapchain_idx])
}
pub fn end_pass(&self, graph_handle: GraphHandle) {
let (graph, _) = self
.graph_cache
.iter()
.find(|(_, cached_hash)| cached_hash.0 == graph_handle.0)
.expect("Graph not found in cache. Have you called build_graph()?");
graph.end_pass(self.command_buffers[self.swapchain_idx]);
}
#[allow(clippy::too_many_arguments)]
pub fn add_pass(
&mut self,
name: &str,
vertex_shader: ShaderHandle,
fragment_shader: ShaderHandle,
output_images: &[ImageHandle],
opt_depth_image: Option<ImageHandle>,
uniform_buffer: BufferHandle,
image_handle: ImageHandle,
environment_sampler: &Sampler,
) -> Result<PassHandle, String> {
// TODO: Assert that color and depth images have the same resolution
let img = self
.image_list
.get_image_from_handle(image_handle)
.unwrap_or_else(|| {
panic!(
"Image with handle `{:?}` not found in the context.",
image_handle
)
});
let pass = BuilderPass {
name: String::from(name),
vertex_shader,
fragment_shader,
output_images: output_images.to_owned(),
input_image: (img.image.image_view, environment_sampler.vk_sampler),
opt_depth_image,
viewport_width: self.facade.swapchain_width,
viewport_height: self.facade.swapchain_height,
| {
*control_flow = ControlFlow::Exit;
} | conditional_block |
ner.rs | // Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
// Copyright (c) 2018 chakki (https://github.com/chakki-works/seqeval/blob/master/seqeval/metrics/sequence_labeling.py)
// Copyright 2019 Guillaume Becquin
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! # Named Entity Recognition pipeline
//! Extracts entities (Person, Location, Organization, Miscellaneous) from text.
//! Pretrained models are available for the following languages:
//! - English
//! - German
//! - Spanish
//! - Dutch
//!
//! The default NER mode is an English BERT cased large model finetuned on CoNNL03, contributed by the [MDZ Digital Library team at the Bavarian State Library](https://github.com/dbmdz)
//! All resources for this model can be downloaded using the Python utility script included in this repository.
//! 1. Set-up a Python virtual environment and install dependencies (in./requirements.txt)
//! 2. Run the conversion script python /utils/download-dependencies_bert_ner.py.
//! The dependencies will be downloaded to the user's home directory, under ~/rustbert/bert-ner
//!
//! The example below illustrate how to run the model for the default English NER model
//! ```no_run
//! use rust_bert::pipelines::ner::NERModel;
//! # fn main() -> anyhow::Result<()> {
//! let ner_model = NERModel::new(Default::default())?;
//!
//! let input = [
//! "My name is Amy. I live in Paris.",
//! "Paris is a city in France.",
//! ];
//! let output = ner_model.predict(&input);
//! # Ok(())
//! # }
//! ```
//! Output: \
//! ```no_run
//! # use rust_bert::pipelines::ner::Entity;
//! # use rust_tokenizers::Offset;
//! # let output =
//! [
//! [
//! Entity {
//! word: String::from("Amy"),
//! score: 0.9986,
//! label: String::from("I-PER"),
//! offset: Offset { begin: 11, end: 14 },
//! },
//! Entity {
//! word: String::from("Paris"),
//! score: 0.9985,
//! label: String::from("I-LOC"),
//! offset: Offset { begin: 26, end: 31 },
//! },
//! ],
//! [
//! Entity {
//! word: String::from("Paris"),
//! score: 0.9988,
//! label: String::from("I-LOC"),
//! offset: Offset { begin: 0, end: 5 },
//! },
//! Entity {
//! word: String::from("France"),
//! score: 0.9993,
//! label: String::from("I-LOC"),
//! offset: Offset { begin: 19, end: 25 },
//! },
//! ],
//! ]
//! # ;
//! ```
//!
//! To run the pipeline for another language, change the NERModel configuration from its default:
//!
//! ```no_run
//! use rust_bert::pipelines::common::ModelType;
//! use rust_bert::pipelines::ner::NERModel;
//! use rust_bert::pipelines::token_classification::TokenClassificationConfig;
//! use rust_bert::resources::RemoteResource;
//! use rust_bert::roberta::{
//! RobertaConfigResources, RobertaModelResources, RobertaVocabResources,
//! };
//! use tch::Device;
//!
//! # fn main() -> anyhow::Result<()> {
//! use rust_bert::pipelines::common::ModelResource;
//! let ner_config = TokenClassificationConfig {
//! model_type: ModelType::XLMRoberta,
//! model_resource: ModelResource::Torch(Box::new(RemoteResource::from_pretrained(
//! RobertaModelResources::XLM_ROBERTA_NER_DE,
//! ))),
//! config_resource: Box::new(RemoteResource::from_pretrained(
//! RobertaConfigResources::XLM_ROBERTA_NER_DE,
//! )),
//! vocab_resource: Box::new(RemoteResource::from_pretrained(
//! RobertaVocabResources::XLM_ROBERTA_NER_DE,
//! )),
//! lower_case: false,
//! device: Device::cuda_if_available(),
//! ..Default::default()
//! };
//!
//! let ner_model = NERModel::new(ner_config)?;
//!
//! // Define input
//! let input = [
//! "Mein Name ist Amélie. Ich lebe in Paris.",
//! "Paris ist eine Stadt in Frankreich.",
//! ];
//! let output = ner_model.predict(&input);
//! # Ok(())
//! # }
//! ```
//! The XLMRoberta models for the languages are defined as follows:
//!
//! | **Language** |**Model name**|
//! :-----:|:----:
//! English| XLM_ROBERTA_NER_EN |
//! German| XLM_ROBERTA_NER_DE |
//! Spanish| XLM_ROBERTA_NER_ES |
//! Dutch| XLM_ROBERTA_NER_NL |
use crate::common::error::RustBertError;
use crate::pipelines::common::TokenizerOption;
use crate::pipelines::token_classification::{
Token, TokenClassificationConfig, TokenClassificationModel,
};
use rust_tokenizers::Offset;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
/// # Entity generated by a `NERModel`
pub struct Entity {
/// String representation of the Entity
pub word: String,
/// Confidence score
pub score: f64,
/// Entity label (e.g. ORG, LOC...)
pub label: String,
/// Token offsets
pub offset: Offset,
}
//type alias for some backward compatibility
type NERConfig = TokenClassificationConfig;
/// # NERModel to extract named entities
pub struct NERModel {
token_classification_model: TokenClassificationModel,
}
impl NERModel {
/// Build a new `NERModel`
///
/// # Arguments
///
/// * `ner_config` - `NERConfig` object containing the resource references (model, vocabulary, configuration) and device placement (CPU/GPU)
///
/// # Example
///
/// ```no_run
/// # fn main() -> anyhow::Result<()> {
/// use rust_bert::pipelines::ner::NERModel;
///
/// let ner_model = NERModel::new(Default::default())?;
/// # Ok(())
/// # }
/// ```
pub fn new(ner_config: NERConfig) -> Result<NERModel, RustBertError> {
let model = TokenClassificationModel::new(ner_config)?;
Ok(NERModel {
token_classification_model: model,
})
}
/// Build a new `NERModel` with a provided tokenizer.
///
/// # Arguments
///
/// * `ner_config` - `NERConfig` object containing the resource references (model, vocabulary, configuration) and device placement (CPU/GPU)
/// * `tokenizer` - `TokenizerOption` tokenizer to use for token classification
///
/// # Example
///
/// ```no_run
/// # fn main() -> anyhow::Result<()> {
/// use rust_bert::pipelines::common::{ModelType, TokenizerOption};
/// use rust_bert::pipelines::ner::NERModel;
/// let tokenizer = TokenizerOption::from_file(
/// ModelType::Bert,
/// "path/to/vocab.txt",
/// None,
/// false,
/// None,
/// None,
/// )?;
/// let ner_model = NERModel::new_with_tokenizer(Default::default(), tokenizer)?;
/// # Ok(())
/// # }
/// ```
pub fn new_with_tokenizer(
ner_config: NERConfig,
tokenizer: TokenizerOption,
) -> Result<NERModel, RustBertError> {
let model = TokenClassificationModel::new_with_tokenizer(ner_config, tokenizer)?;
Ok(NERModel {
token_classification_model: model,
})
}
/// Get a reference to the model tokenizer.
pub fn get_tokenizer(&self) -> &TokenizerOption {
self.token_classification_model.get_tokenizer()
}
/// Get a mutable reference to the model tokenizer.
pub fn get_tokenizer_mut(&mut self) -> &mut TokenizerOption {
self.token_classification_model.get_tokenizer_mut()
}
/// Extract entities from a text
///
/// # Arguments
///
/// * `input` - `&[&str]` Array of texts to extract entities from.
///
/// # Returns
///
/// * `Vec<Vec<Entity>>` containing extracted entities
///
/// # Example
///
/// ```no_run
/// # fn main() -> anyhow::Result<()> {
/// # use rust_bert::pipelines::ner::NERModel;
///
/// let ner_model = NERModel::new(Default::default())?;
/// let input = [
/// "My name is Amy. I live in Paris.",
/// "Paris is a city in France.",
/// ];
/// let output = ner_model.predict(&input);
/// # Ok(())
/// # }
/// ```
pub fn predict<S>(&self, input: &[S]) -> Vec<Vec<Entity>>
where
S: AsRef<str>,
{
self.token_classification_model
.predict(input, true, false)
.into_iter()
.map(|sequence_tokens| {
sequence_tokens
.into_iter()
.filter(|token| token.label!= "O")
.map(|token| Entity {
offset: token.offset.unwrap(),
word: token.text,
score: token.score,
label: token.label,
})
.collect::<Vec<Entity>>()
})
.collect::<Vec<Vec<Entity>>>()
}
/// Extract full entities from a text performing entity chunking. Follows the algorithm for entities
/// chunking described in [Erik F. Tjong Kim Sang, Jorn Veenstra, Representing Text Chunks](https://www.aclweb.org/anthology/E99-1023/)
/// The proposed implementation is inspired by the [Python seqeval library](https://github.com/chakki-works/seqeval) (shared under MIT license).
///
/// # Arguments
///
/// * `input` - `&[&str]` Array of texts to extract entities from.
///
/// # Returns
///
/// * `Vec<Entity>` containing consolidated extracted entities
///
/// # Example
///
/// ```no_run
/// # fn main() -> anyhow::Result<()> {
/// # use rust_bert::pipelines::ner::NERModel;
///
/// let ner_model = NERModel::new(Default::default())?;
/// let input = ["Asked John Smith about Acme Corp"];
/// let output = ner_model.predict_full_entities(&input);
/// # Ok(())
/// # }
/// ```
///
/// Outputs:
///
/// Output: \
/// ```no_run
/// # use rust_bert::pipelines::question_answering::Answer;
/// # use rust_bert::pipelines::ner::Entity;
/// # use rust_tokenizers::Offset;
/// # let output =
/// [[
/// Entity {
/// word: String::from("John Smith"),
/// score: 0.9747,
/// label: String::from("PER"),
/// offset: Offset { begin: 6, end: 16 },
/// },
/// Entity {
/// word: String::from("Acme Corp"),
/// score: 0.8847,
/// label: String::from("I-LOC"),
/// offset: Offset { begin: 23, end: 32 },
/// },
/// ]]
/// # ;
/// ```
pub fn predict_full_entities<S>(&self, input: &[S]) -> Vec<Vec<Entity>>
where
S: AsRef<str>,
{ |
fn consolidate_entities(tokens: &[Token]) -> Vec<Entity> {
let mut entities: Vec<Entity> = Vec::new();
let mut entity_builder = EntityBuilder::new();
for (position, token) in tokens.iter().enumerate() {
let tag = token.get_tag();
let label = token.get_label();
if let Some(entity) = entity_builder.handle_current_tag(tag, label, position, tokens) {
entities.push(entity)
}
}
if let Some(entity) = entity_builder.flush_and_reset(tokens.len(), tokens) {
entities.push(entity);
}
entities
}
}
struct EntityBuilder<'a> {
previous_node: Option<(usize, Tag, &'a str)>,
}
impl<'a> EntityBuilder<'a> {
fn new() -> Self {
EntityBuilder {
previous_node: None,
}
}
fn handle_current_tag(
&mut self,
tag: Tag,
label: &'a str,
position: usize,
tokens: &[Token],
) -> Option<Entity> {
match tag {
Tag::Outside => self.flush_and_reset(position, tokens),
Tag::Begin | Tag::Single => {
let entity = self.flush_and_reset(position, tokens);
self.start_new(position, tag, label);
entity
}
Tag::Inside | Tag::End => {
if let Some((_, previous_tag, previous_label)) = self.previous_node {
if (previous_tag == Tag::End)
| (previous_tag == Tag::Single)
| (previous_label!= label)
{
let entity = self.flush_and_reset(position, tokens);
self.start_new(position, tag, label);
entity
} else {
None
}
} else {
self.start_new(position, tag, label);
None
}
}
}
}
fn flush_and_reset(&mut self, position: usize, tokens: &[Token]) -> Option<Entity> {
let entity = if let Some((start, _, label)) = self.previous_node {
let entity_tokens = &tokens[start..position];
Some(Entity {
word: entity_tokens
.iter()
.map(|token| token.text.as_str())
.collect::<Vec<&str>>()
.join(" "),
score: entity_tokens.iter().map(|token| token.score).product(),
label: label.to_string(),
offset: Offset {
begin: entity_tokens.first()?.offset?.begin,
end: entity_tokens.last()?.offset?.end,
},
})
} else {
None
};
self.previous_node = None;
entity
}
fn start_new(&mut self, position: usize, tag: Tag, label: &'a str) {
self.previous_node = Some((position, tag, label))
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
enum Tag {
Begin,
Inside,
Outside,
End,
Single,
}
impl Token {
fn get_tag(&self) -> Tag {
match self.label.split('-').collect::<Vec<&str>>()[0] {
"B" => Tag::Begin,
"I" => Tag::Inside,
"O" => Tag::Outside,
"E" => Tag::End,
"S" => Tag::Single,
_ => panic!("Invalid tag encountered for token {:?}", self),
}
}
fn get_label(&self) -> &str {
let split_label = self.label.split('-').collect::<Vec<&str>>();
if split_label.len() > 1 {
split_label[1]
} else {
""
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
#[ignore] // no need to run, compilation is enough to verify it is Send
fn test() {
let config = NERConfig::default();
let _: Box<dyn Send> = Box::new(NERModel::new(config));
}
}
|
let tokens = self.token_classification_model.predict(input, true, false);
let mut entities: Vec<Vec<Entity>> = Vec::new();
for sequence_tokens in tokens {
entities.push(Self::consolidate_entities(&sequence_tokens));
}
entities
}
| identifier_body |
ner.rs | // Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
// Copyright (c) 2018 chakki (https://github.com/chakki-works/seqeval/blob/master/seqeval/metrics/sequence_labeling.py)
// Copyright 2019 Guillaume Becquin
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! # Named Entity Recognition pipeline
//! Extracts entities (Person, Location, Organization, Miscellaneous) from text.
//! Pretrained models are available for the following languages:
//! - English
//! - German
//! - Spanish
//! - Dutch
//!
//! The default NER mode is an English BERT cased large model finetuned on CoNNL03, contributed by the [MDZ Digital Library team at the Bavarian State Library](https://github.com/dbmdz)
//! All resources for this model can be downloaded using the Python utility script included in this repository.
//! 1. Set-up a Python virtual environment and install dependencies (in./requirements.txt)
//! 2. Run the conversion script python /utils/download-dependencies_bert_ner.py.
//! The dependencies will be downloaded to the user's home directory, under ~/rustbert/bert-ner
//!
//! The example below illustrate how to run the model for the default English NER model
//! ```no_run
//! use rust_bert::pipelines::ner::NERModel;
//! # fn main() -> anyhow::Result<()> {
//! let ner_model = NERModel::new(Default::default())?;
//!
//! let input = [
//! "My name is Amy. I live in Paris.",
//! "Paris is a city in France.",
//! ];
//! let output = ner_model.predict(&input);
//! # Ok(())
//! # }
//! ```
//! Output: \
//! ```no_run
//! # use rust_bert::pipelines::ner::Entity;
//! # use rust_tokenizers::Offset;
//! # let output =
//! [
//! [
//! Entity {
//! word: String::from("Amy"),
//! score: 0.9986,
//! label: String::from("I-PER"),
//! offset: Offset { begin: 11, end: 14 },
//! },
//! Entity {
//! word: String::from("Paris"),
//! score: 0.9985,
//! label: String::from("I-LOC"),
//! offset: Offset { begin: 26, end: 31 },
//! },
//! ],
//! [
//! Entity {
//! word: String::from("Paris"),
//! score: 0.9988,
//! label: String::from("I-LOC"),
//! offset: Offset { begin: 0, end: 5 },
//! },
//! Entity {
//! word: String::from("France"),
//! score: 0.9993,
//! label: String::from("I-LOC"),
//! offset: Offset { begin: 19, end: 25 },
//! },
//! ],
//! ]
//! # ;
//! ```
//!
//! To run the pipeline for another language, change the NERModel configuration from its default:
//!
//! ```no_run
//! use rust_bert::pipelines::common::ModelType;
//! use rust_bert::pipelines::ner::NERModel;
//! use rust_bert::pipelines::token_classification::TokenClassificationConfig;
//! use rust_bert::resources::RemoteResource;
//! use rust_bert::roberta::{
//! RobertaConfigResources, RobertaModelResources, RobertaVocabResources,
//! };
//! use tch::Device;
//!
//! # fn main() -> anyhow::Result<()> {
//! use rust_bert::pipelines::common::ModelResource;
//! let ner_config = TokenClassificationConfig {
//! model_type: ModelType::XLMRoberta,
//! model_resource: ModelResource::Torch(Box::new(RemoteResource::from_pretrained(
//! RobertaModelResources::XLM_ROBERTA_NER_DE,
//! ))),
//! config_resource: Box::new(RemoteResource::from_pretrained(
//! RobertaConfigResources::XLM_ROBERTA_NER_DE,
//! )),
//! vocab_resource: Box::new(RemoteResource::from_pretrained(
//! RobertaVocabResources::XLM_ROBERTA_NER_DE,
//! )),
//! lower_case: false,
//! device: Device::cuda_if_available(),
//! ..Default::default()
//! };
//!
//! let ner_model = NERModel::new(ner_config)?;
//!
//! // Define input
//! let input = [
//! "Mein Name ist Amélie. Ich lebe in Paris.",
//! "Paris ist eine Stadt in Frankreich.",
//! ];
//! let output = ner_model.predict(&input);
//! # Ok(())
//! # }
//! ```
//! The XLMRoberta models for the languages are defined as follows:
//!
//! | **Language** |**Model name**|
//! :-----:|:----:
//! English| XLM_ROBERTA_NER_EN |
//! German| XLM_ROBERTA_NER_DE |
//! Spanish| XLM_ROBERTA_NER_ES |
//! Dutch| XLM_ROBERTA_NER_NL |
use crate::common::error::RustBertError;
use crate::pipelines::common::TokenizerOption;
use crate::pipelines::token_classification::{
Token, TokenClassificationConfig, TokenClassificationModel,
};
use rust_tokenizers::Offset;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
/// # Entity generated by a `NERModel`
pub struct Entity {
/// String representation of the Entity
pub word: String,
/// Confidence score
pub score: f64,
/// Entity label (e.g. ORG, LOC...)
pub label: String,
/// Token offsets
pub offset: Offset,
}
//type alias for some backward compatibility
type NERConfig = TokenClassificationConfig;
/// # NERModel to extract named entities
pub struct NERModel {
token_classification_model: TokenClassificationModel,
}
impl NERModel {
/// Build a new `NERModel`
///
/// # Arguments
///
/// * `ner_config` - `NERConfig` object containing the resource references (model, vocabulary, configuration) and device placement (CPU/GPU)
///
/// # Example
///
/// ```no_run
/// # fn main() -> anyhow::Result<()> {
/// use rust_bert::pipelines::ner::NERModel;
///
/// let ner_model = NERModel::new(Default::default())?;
/// # Ok(())
/// # }
/// ```
pub fn new(ner_config: NERConfig) -> Result<NERModel, RustBertError> {
let model = TokenClassificationModel::new(ner_config)?;
Ok(NERModel {
token_classification_model: model,
})
}
/// Build a new `NERModel` with a provided tokenizer.
///
/// # Arguments
///
/// * `ner_config` - `NERConfig` object containing the resource references (model, vocabulary, configuration) and device placement (CPU/GPU)
/// * `tokenizer` - `TokenizerOption` tokenizer to use for token classification
///
/// # Example
///
/// ```no_run
/// # fn main() -> anyhow::Result<()> {
/// use rust_bert::pipelines::common::{ModelType, TokenizerOption};
/// use rust_bert::pipelines::ner::NERModel;
/// let tokenizer = TokenizerOption::from_file(
/// ModelType::Bert,
/// "path/to/vocab.txt",
/// None,
/// false,
/// None,
/// None,
/// )?;
/// let ner_model = NERModel::new_with_tokenizer(Default::default(), tokenizer)?;
/// # Ok(())
/// # }
/// ```
pub fn new_with_tokenizer(
ner_config: NERConfig,
tokenizer: TokenizerOption,
) -> Result<NERModel, RustBertError> {
let model = TokenClassificationModel::new_with_tokenizer(ner_config, tokenizer)?;
Ok(NERModel {
token_classification_model: model,
})
}
/// Get a reference to the model tokenizer.
pub fn get_tokenizer(&self) -> &TokenizerOption {
self.token_classification_model.get_tokenizer()
}
/// Get a mutable reference to the model tokenizer.
pub fn get_tokenizer_mut(&mut self) -> &mut TokenizerOption {
self.token_classification_model.get_tokenizer_mut()
}
/// Extract entities from a text
///
/// # Arguments
///
/// * `input` - `&[&str]` Array of texts to extract entities from.
///
/// # Returns
///
/// * `Vec<Vec<Entity>>` containing extracted entities
///
/// # Example
///
/// ```no_run
/// # fn main() -> anyhow::Result<()> {
/// # use rust_bert::pipelines::ner::NERModel;
///
/// let ner_model = NERModel::new(Default::default())?;
/// let input = [
/// "My name is Amy. I live in Paris.",
/// "Paris is a city in France.",
/// ];
/// let output = ner_model.predict(&input);
/// # Ok(())
/// # }
/// ```
pub fn predict<S>(&self, input: &[S]) -> Vec<Vec<Entity>>
where
S: AsRef<str>,
{
self.token_classification_model
.predict(input, true, false)
.into_iter()
.map(|sequence_tokens| {
sequence_tokens
.into_iter()
.filter(|token| token.label!= "O")
.map(|token| Entity {
offset: token.offset.unwrap(),
word: token.text,
score: token.score,
label: token.label,
})
.collect::<Vec<Entity>>()
})
.collect::<Vec<Vec<Entity>>>()
}
/// Extract full entities from a text performing entity chunking. Follows the algorithm for entities
/// chunking described in [Erik F. Tjong Kim Sang, Jorn Veenstra, Representing Text Chunks](https://www.aclweb.org/anthology/E99-1023/)
/// The proposed implementation is inspired by the [Python seqeval library](https://github.com/chakki-works/seqeval) (shared under MIT license).
///
/// # Arguments
///
/// * `input` - `&[&str]` Array of texts to extract entities from.
///
/// # Returns
///
/// * `Vec<Entity>` containing consolidated extracted entities
///
/// # Example
///
/// ```no_run
/// # fn main() -> anyhow::Result<()> {
/// # use rust_bert::pipelines::ner::NERModel;
///
/// let ner_model = NERModel::new(Default::default())?;
/// let input = ["Asked John Smith about Acme Corp"];
/// let output = ner_model.predict_full_entities(&input);
/// # Ok(())
/// # }
/// ```
///
/// Outputs:
///
/// Output: \
/// ```no_run
/// # use rust_bert::pipelines::question_answering::Answer;
/// # use rust_bert::pipelines::ner::Entity;
/// # use rust_tokenizers::Offset;
/// # let output =
/// [[
/// Entity {
/// word: String::from("John Smith"),
/// score: 0.9747,
/// label: String::from("PER"),
/// offset: Offset { begin: 6, end: 16 },
/// },
/// Entity {
/// word: String::from("Acme Corp"),
/// score: 0.8847,
/// label: String::from("I-LOC"),
/// offset: Offset { begin: 23, end: 32 },
/// },
/// ]]
/// # ;
/// ```
pub fn predict_full_entities<S>(&self, input: &[S]) -> Vec<Vec<Entity>>
where
S: AsRef<str>,
{
let tokens = self.token_classification_model.predict(input, true, false);
let mut entities: Vec<Vec<Entity>> = Vec::new();
for sequence_tokens in tokens {
entities.push(Self::consolidate_entities(&sequence_tokens));
}
entities
}
fn consolidate_entities(tokens: &[Token]) -> Vec<Entity> {
let mut entities: Vec<Entity> = Vec::new();
let mut entity_builder = EntityBuilder::new();
for (position, token) in tokens.iter().enumerate() {
let tag = token.get_tag();
let label = token.get_label();
if let Some(entity) = entity_builder.handle_current_tag(tag, label, position, tokens) {
entities.push(entity)
}
}
if let Some(entity) = entity_builder.flush_and_reset(tokens.len(), tokens) {
entities.push(entity);
}
entities
}
}
struct EntityBuilder<'a> {
previous_node: Option<(usize, Tag, &'a str)>,
}
impl<'a> EntityBuilder<'a> {
fn new() -> Self {
EntityBuilder {
previous_node: None,
}
}
fn handle_current_tag(
&mut self,
tag: Tag,
label: &'a str,
position: usize,
tokens: &[Token],
) -> Option<Entity> {
match tag {
Tag::Outside => self.flush_and_reset(position, tokens),
Tag::Begin | Tag::Single => {
let entity = self.flush_and_reset(position, tokens);
self.start_new(position, tag, label);
entity
}
Tag::Inside | Tag::End => { | }
}
fn flush_and_reset(&mut self, position: usize, tokens: &[Token]) -> Option<Entity> {
let entity = if let Some((start, _, label)) = self.previous_node {
let entity_tokens = &tokens[start..position];
Some(Entity {
word: entity_tokens
.iter()
.map(|token| token.text.as_str())
.collect::<Vec<&str>>()
.join(" "),
score: entity_tokens.iter().map(|token| token.score).product(),
label: label.to_string(),
offset: Offset {
begin: entity_tokens.first()?.offset?.begin,
end: entity_tokens.last()?.offset?.end,
},
})
} else {
None
};
self.previous_node = None;
entity
}
fn start_new(&mut self, position: usize, tag: Tag, label: &'a str) {
self.previous_node = Some((position, tag, label))
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
enum Tag {
Begin,
Inside,
Outside,
End,
Single,
}
impl Token {
fn get_tag(&self) -> Tag {
match self.label.split('-').collect::<Vec<&str>>()[0] {
"B" => Tag::Begin,
"I" => Tag::Inside,
"O" => Tag::Outside,
"E" => Tag::End,
"S" => Tag::Single,
_ => panic!("Invalid tag encountered for token {:?}", self),
}
}
fn get_label(&self) -> &str {
let split_label = self.label.split('-').collect::<Vec<&str>>();
if split_label.len() > 1 {
split_label[1]
} else {
""
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
#[ignore] // no need to run, compilation is enough to verify it is Send
fn test() {
let config = NERConfig::default();
let _: Box<dyn Send> = Box::new(NERModel::new(config));
}
}
|
if let Some((_, previous_tag, previous_label)) = self.previous_node {
if (previous_tag == Tag::End)
| (previous_tag == Tag::Single)
| (previous_label != label)
{
let entity = self.flush_and_reset(position, tokens);
self.start_new(position, tag, label);
entity
} else {
None
}
} else {
self.start_new(position, tag, label);
None
}
}
| conditional_block |
ner.rs | // Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
// Copyright (c) 2018 chakki (https://github.com/chakki-works/seqeval/blob/master/seqeval/metrics/sequence_labeling.py)
// Copyright 2019 Guillaume Becquin
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! # Named Entity Recognition pipeline
//! Extracts entities (Person, Location, Organization, Miscellaneous) from text.
//! Pretrained models are available for the following languages:
//! - English
//! - German
//! - Spanish
//! - Dutch
//!
//! The default NER mode is an English BERT cased large model finetuned on CoNNL03, contributed by the [MDZ Digital Library team at the Bavarian State Library](https://github.com/dbmdz)
//! All resources for this model can be downloaded using the Python utility script included in this repository.
//! 1. Set-up a Python virtual environment and install dependencies (in./requirements.txt)
//! 2. Run the conversion script python /utils/download-dependencies_bert_ner.py.
//! The dependencies will be downloaded to the user's home directory, under ~/rustbert/bert-ner
//!
//! The example below illustrate how to run the model for the default English NER model
//! ```no_run
//! use rust_bert::pipelines::ner::NERModel;
//! # fn main() -> anyhow::Result<()> {
//! let ner_model = NERModel::new(Default::default())?;
//!
//! let input = [
//! "My name is Amy. I live in Paris.",
//! "Paris is a city in France.",
//! ];
//! let output = ner_model.predict(&input);
//! # Ok(())
//! # }
//! ```
//! Output: \
//! ```no_run
//! # use rust_bert::pipelines::ner::Entity;
//! # use rust_tokenizers::Offset;
//! # let output =
//! [
//! [
//! Entity {
//! word: String::from("Amy"),
//! score: 0.9986,
//! label: String::from("I-PER"),
//! offset: Offset { begin: 11, end: 14 },
//! },
//! Entity {
//! word: String::from("Paris"),
//! score: 0.9985,
//! label: String::from("I-LOC"),
//! offset: Offset { begin: 26, end: 31 },
//! },
//! ],
//! [
//! Entity {
//! word: String::from("Paris"),
//! score: 0.9988,
//! label: String::from("I-LOC"),
//! offset: Offset { begin: 0, end: 5 },
//! },
//! Entity {
//! word: String::from("France"),
//! score: 0.9993,
//! label: String::from("I-LOC"),
//! offset: Offset { begin: 19, end: 25 },
//! },
//! ],
//! ]
//! # ;
//! ```
//!
//! To run the pipeline for another language, change the NERModel configuration from its default:
//!
//! ```no_run
//! use rust_bert::pipelines::common::ModelType;
//! use rust_bert::pipelines::ner::NERModel;
//! use rust_bert::pipelines::token_classification::TokenClassificationConfig;
//! use rust_bert::resources::RemoteResource;
//! use rust_bert::roberta::{
//! RobertaConfigResources, RobertaModelResources, RobertaVocabResources,
//! };
//! use tch::Device;
//!
//! # fn main() -> anyhow::Result<()> {
//! use rust_bert::pipelines::common::ModelResource;
//! let ner_config = TokenClassificationConfig {
//! model_type: ModelType::XLMRoberta,
//! model_resource: ModelResource::Torch(Box::new(RemoteResource::from_pretrained(
//! RobertaModelResources::XLM_ROBERTA_NER_DE,
//! ))),
//! config_resource: Box::new(RemoteResource::from_pretrained(
//! RobertaConfigResources::XLM_ROBERTA_NER_DE,
//! )),
//! vocab_resource: Box::new(RemoteResource::from_pretrained(
//! RobertaVocabResources::XLM_ROBERTA_NER_DE,
//! )),
//! lower_case: false,
//! device: Device::cuda_if_available(),
//! ..Default::default()
//! };
//!
//! let ner_model = NERModel::new(ner_config)?;
//!
//! // Define input
//! let input = [
//! "Mein Name ist Amélie. Ich lebe in Paris.",
//! "Paris ist eine Stadt in Frankreich.",
//! ];
//! let output = ner_model.predict(&input);
//! # Ok(())
//! # }
//! ```
//! The XLMRoberta models for the languages are defined as follows:
//!
//! | **Language** |**Model name**|
//! :-----:|:----:
//! English| XLM_ROBERTA_NER_EN |
//! German| XLM_ROBERTA_NER_DE |
//! Spanish| XLM_ROBERTA_NER_ES |
//! Dutch| XLM_ROBERTA_NER_NL |
use crate::common::error::RustBertError;
use crate::pipelines::common::TokenizerOption;
use crate::pipelines::token_classification::{
Token, TokenClassificationConfig, TokenClassificationModel,
};
use rust_tokenizers::Offset;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
/// # Entity generated by a `NERModel`
pub struct Entity {
/// String representation of the Entity
pub word: String,
/// Confidence score
pub score: f64,
/// Entity label (e.g. ORG, LOC...)
pub label: String,
/// Token offsets
pub offset: Offset,
}
//type alias for some backward compatibility
type NERConfig = TokenClassificationConfig;
/// # NERModel to extract named entities
pub struct NERModel {
token_classification_model: TokenClassificationModel,
}
impl NERModel {
/// Build a new `NERModel`
///
/// # Arguments
///
/// * `ner_config` - `NERConfig` object containing the resource references (model, vocabulary, configuration) and device placement (CPU/GPU)
///
/// # Example
///
/// ```no_run
/// # fn main() -> anyhow::Result<()> {
/// use rust_bert::pipelines::ner::NERModel;
///
/// let ner_model = NERModel::new(Default::default())?;
/// # Ok(())
/// # }
/// ```
pub fn new(ner_config: NERConfig) -> Result<NERModel, RustBertError> {
let model = TokenClassificationModel::new(ner_config)?;
Ok(NERModel {
token_classification_model: model,
})
}
/// Build a new `NERModel` with a provided tokenizer.
///
/// # Arguments
///
/// * `ner_config` - `NERConfig` object containing the resource references (model, vocabulary, configuration) and device placement (CPU/GPU)
/// * `tokenizer` - `TokenizerOption` tokenizer to use for token classification
///
/// # Example
///
/// ```no_run
/// # fn main() -> anyhow::Result<()> {
/// use rust_bert::pipelines::common::{ModelType, TokenizerOption};
/// use rust_bert::pipelines::ner::NERModel;
/// let tokenizer = TokenizerOption::from_file(
/// ModelType::Bert,
/// "path/to/vocab.txt",
/// None,
/// false,
/// None,
/// None,
/// )?;
/// let ner_model = NERModel::new_with_tokenizer(Default::default(), tokenizer)?;
/// # Ok(())
/// # }
/// ```
pub fn new_with_tokenizer(
ner_config: NERConfig,
tokenizer: TokenizerOption,
) -> Result<NERModel, RustBertError> {
let model = TokenClassificationModel::new_with_tokenizer(ner_config, tokenizer)?;
Ok(NERModel {
token_classification_model: model,
})
}
/// Get a reference to the model tokenizer.
pub fn get_tokenizer(&self) -> &TokenizerOption {
self.token_classification_model.get_tokenizer()
}
/// Get a mutable reference to the model tokenizer.
pub fn get_tokenizer_mut(&mut self) -> &mut TokenizerOption {
self.token_classification_model.get_tokenizer_mut()
}
/// Extract entities from a text
///
/// # Arguments
///
/// * `input` - `&[&str]` Array of texts to extract entities from.
///
/// # Returns
///
/// * `Vec<Vec<Entity>>` containing extracted entities
///
/// # Example
///
/// ```no_run
/// # fn main() -> anyhow::Result<()> {
/// # use rust_bert::pipelines::ner::NERModel;
///
/// let ner_model = NERModel::new(Default::default())?;
/// let input = [
/// "My name is Amy. I live in Paris.",
/// "Paris is a city in France.",
/// ];
/// let output = ner_model.predict(&input);
/// # Ok(())
/// # }
/// ```
pub fn predict<S>(&self, input: &[S]) -> Vec<Vec<Entity>>
where
S: AsRef<str>,
{
self.token_classification_model
.predict(input, true, false)
.into_iter()
.map(|sequence_tokens| {
sequence_tokens
.into_iter()
.filter(|token| token.label!= "O")
.map(|token| Entity {
offset: token.offset.unwrap(),
word: token.text,
score: token.score,
label: token.label,
})
.collect::<Vec<Entity>>()
})
.collect::<Vec<Vec<Entity>>>()
}
/// Extract full entities from a text performing entity chunking. Follows the algorithm for entities
/// chunking described in [Erik F. Tjong Kim Sang, Jorn Veenstra, Representing Text Chunks](https://www.aclweb.org/anthology/E99-1023/)
/// The proposed implementation is inspired by the [Python seqeval library](https://github.com/chakki-works/seqeval) (shared under MIT license).
///
/// # Arguments
///
/// * `input` - `&[&str]` Array of texts to extract entities from.
///
/// # Returns
///
/// * `Vec<Entity>` containing consolidated extracted entities
///
/// # Example
///
/// ```no_run
/// # fn main() -> anyhow::Result<()> {
/// # use rust_bert::pipelines::ner::NERModel;
///
/// let ner_model = NERModel::new(Default::default())?;
/// let input = ["Asked John Smith about Acme Corp"];
/// let output = ner_model.predict_full_entities(&input);
/// # Ok(())
/// # }
/// ```
///
/// Outputs:
///
/// Output: \
/// ```no_run
/// # use rust_bert::pipelines::question_answering::Answer;
/// # use rust_bert::pipelines::ner::Entity;
/// # use rust_tokenizers::Offset;
/// # let output =
/// [[
/// Entity {
/// word: String::from("John Smith"),
/// score: 0.9747,
/// label: String::from("PER"),
/// offset: Offset { begin: 6, end: 16 },
/// },
/// Entity {
/// word: String::from("Acme Corp"),
/// score: 0.8847,
/// label: String::from("I-LOC"),
/// offset: Offset { begin: 23, end: 32 },
/// },
/// ]]
/// # ;
/// ```
pub fn predict_full_entities<S>(&self, input: &[S]) -> Vec<Vec<Entity>>
where
S: AsRef<str>,
{
let tokens = self.token_classification_model.predict(input, true, false);
let mut entities: Vec<Vec<Entity>> = Vec::new();
for sequence_tokens in tokens {
entities.push(Self::consolidate_entities(&sequence_tokens));
}
entities
}
fn consolidate_entities(tokens: &[Token]) -> Vec<Entity> {
let mut entities: Vec<Entity> = Vec::new();
let mut entity_builder = EntityBuilder::new();
for (position, token) in tokens.iter().enumerate() {
let tag = token.get_tag();
let label = token.get_label();
if let Some(entity) = entity_builder.handle_current_tag(tag, label, position, tokens) {
entities.push(entity)
}
}
if let Some(entity) = entity_builder.flush_and_reset(tokens.len(), tokens) {
entities.push(entity);
}
entities
}
}
struct EntityBuilder<'a> {
previous_node: Option<(usize, Tag, &'a str)>,
}
impl<'a> EntityBuilder<'a> {
fn new() -> Self {
EntityBuilder {
previous_node: None,
}
}
fn h |
&mut self,
tag: Tag,
label: &'a str,
position: usize,
tokens: &[Token],
) -> Option<Entity> {
match tag {
Tag::Outside => self.flush_and_reset(position, tokens),
Tag::Begin | Tag::Single => {
let entity = self.flush_and_reset(position, tokens);
self.start_new(position, tag, label);
entity
}
Tag::Inside | Tag::End => {
if let Some((_, previous_tag, previous_label)) = self.previous_node {
if (previous_tag == Tag::End)
| (previous_tag == Tag::Single)
| (previous_label!= label)
{
let entity = self.flush_and_reset(position, tokens);
self.start_new(position, tag, label);
entity
} else {
None
}
} else {
self.start_new(position, tag, label);
None
}
}
}
}
fn flush_and_reset(&mut self, position: usize, tokens: &[Token]) -> Option<Entity> {
let entity = if let Some((start, _, label)) = self.previous_node {
let entity_tokens = &tokens[start..position];
Some(Entity {
word: entity_tokens
.iter()
.map(|token| token.text.as_str())
.collect::<Vec<&str>>()
.join(" "),
score: entity_tokens.iter().map(|token| token.score).product(),
label: label.to_string(),
offset: Offset {
begin: entity_tokens.first()?.offset?.begin,
end: entity_tokens.last()?.offset?.end,
},
})
} else {
None
};
self.previous_node = None;
entity
}
fn start_new(&mut self, position: usize, tag: Tag, label: &'a str) {
self.previous_node = Some((position, tag, label))
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
enum Tag {
Begin,
Inside,
Outside,
End,
Single,
}
impl Token {
fn get_tag(&self) -> Tag {
match self.label.split('-').collect::<Vec<&str>>()[0] {
"B" => Tag::Begin,
"I" => Tag::Inside,
"O" => Tag::Outside,
"E" => Tag::End,
"S" => Tag::Single,
_ => panic!("Invalid tag encountered for token {:?}", self),
}
}
fn get_label(&self) -> &str {
let split_label = self.label.split('-').collect::<Vec<&str>>();
if split_label.len() > 1 {
split_label[1]
} else {
""
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
#[ignore] // no need to run, compilation is enough to verify it is Send
fn test() {
let config = NERConfig::default();
let _: Box<dyn Send> = Box::new(NERModel::new(config));
}
}
| andle_current_tag( | identifier_name |
ner.rs | // Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
// Copyright (c) 2018 chakki (https://github.com/chakki-works/seqeval/blob/master/seqeval/metrics/sequence_labeling.py)
// Copyright 2019 Guillaume Becquin
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! # Named Entity Recognition pipeline
//! Extracts entities (Person, Location, Organization, Miscellaneous) from text.
//! Pretrained models are available for the following languages:
//! - English
//! - German
//! - Spanish
//! - Dutch
//!
//! The default NER mode is an English BERT cased large model finetuned on CoNNL03, contributed by the [MDZ Digital Library team at the Bavarian State Library](https://github.com/dbmdz)
//! All resources for this model can be downloaded using the Python utility script included in this repository.
//! 1. Set-up a Python virtual environment and install dependencies (in./requirements.txt)
//! 2. Run the conversion script python /utils/download-dependencies_bert_ner.py.
//! The dependencies will be downloaded to the user's home directory, under ~/rustbert/bert-ner
//!
//! The example below illustrate how to run the model for the default English NER model
//! ```no_run
//! use rust_bert::pipelines::ner::NERModel;
//! # fn main() -> anyhow::Result<()> {
//! let ner_model = NERModel::new(Default::default())?;
//!
//! let input = [
//! "My name is Amy. I live in Paris.",
//! "Paris is a city in France.",
//! ];
//! let output = ner_model.predict(&input);
//! # Ok(())
//! # }
//! ```
//! Output: \
//! ```no_run
//! # use rust_bert::pipelines::ner::Entity;
//! # use rust_tokenizers::Offset;
//! # let output =
//! [
//! [
//! Entity {
//! word: String::from("Amy"),
//! score: 0.9986,
//! label: String::from("I-PER"),
//! offset: Offset { begin: 11, end: 14 },
//! },
//! Entity {
//! word: String::from("Paris"),
//! score: 0.9985,
//! label: String::from("I-LOC"),
//! offset: Offset { begin: 26, end: 31 },
//! },
//! ],
//! [
//! Entity {
//! word: String::from("Paris"),
//! score: 0.9988,
//! label: String::from("I-LOC"),
//! offset: Offset { begin: 0, end: 5 },
//! },
//! Entity {
//! word: String::from("France"),
//! score: 0.9993,
//! label: String::from("I-LOC"),
//! offset: Offset { begin: 19, end: 25 },
//! },
//! ],
//! ]
//! # ;
//! ```
//!
//! To run the pipeline for another language, change the NERModel configuration from its default:
//!
//! ```no_run
//! use rust_bert::pipelines::common::ModelType;
//! use rust_bert::pipelines::ner::NERModel;
//! use rust_bert::pipelines::token_classification::TokenClassificationConfig;
//! use rust_bert::resources::RemoteResource;
//! use rust_bert::roberta::{
//! RobertaConfigResources, RobertaModelResources, RobertaVocabResources,
//! };
//! use tch::Device;
//!
//! # fn main() -> anyhow::Result<()> {
//! use rust_bert::pipelines::common::ModelResource;
//! let ner_config = TokenClassificationConfig {
//! model_type: ModelType::XLMRoberta,
//! model_resource: ModelResource::Torch(Box::new(RemoteResource::from_pretrained(
//! RobertaModelResources::XLM_ROBERTA_NER_DE,
//! ))),
//! config_resource: Box::new(RemoteResource::from_pretrained(
//! RobertaConfigResources::XLM_ROBERTA_NER_DE,
//! )),
//! vocab_resource: Box::new(RemoteResource::from_pretrained(
//! RobertaVocabResources::XLM_ROBERTA_NER_DE,
//! )), | //! device: Device::cuda_if_available(),
//! ..Default::default()
//! };
//!
//! let ner_model = NERModel::new(ner_config)?;
//!
//! // Define input
//! let input = [
//! "Mein Name ist Amélie. Ich lebe in Paris.",
//! "Paris ist eine Stadt in Frankreich.",
//! ];
//! let output = ner_model.predict(&input);
//! # Ok(())
//! # }
//! ```
//! The XLMRoberta models for the languages are defined as follows:
//!
//! | **Language** |**Model name**|
//! :-----:|:----:
//! English| XLM_ROBERTA_NER_EN |
//! German| XLM_ROBERTA_NER_DE |
//! Spanish| XLM_ROBERTA_NER_ES |
//! Dutch| XLM_ROBERTA_NER_NL |
use crate::common::error::RustBertError;
use crate::pipelines::common::TokenizerOption;
use crate::pipelines::token_classification::{
Token, TokenClassificationConfig, TokenClassificationModel,
};
use rust_tokenizers::Offset;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
/// # Entity generated by a `NERModel`
pub struct Entity {
/// String representation of the Entity
pub word: String,
/// Confidence score
pub score: f64,
/// Entity label (e.g. ORG, LOC...)
pub label: String,
/// Token offsets
pub offset: Offset,
}
//type alias for some backward compatibility
type NERConfig = TokenClassificationConfig;
/// # NERModel to extract named entities
pub struct NERModel {
token_classification_model: TokenClassificationModel,
}
impl NERModel {
/// Build a new `NERModel`
///
/// # Arguments
///
/// * `ner_config` - `NERConfig` object containing the resource references (model, vocabulary, configuration) and device placement (CPU/GPU)
///
/// # Example
///
/// ```no_run
/// # fn main() -> anyhow::Result<()> {
/// use rust_bert::pipelines::ner::NERModel;
///
/// let ner_model = NERModel::new(Default::default())?;
/// # Ok(())
/// # }
/// ```
pub fn new(ner_config: NERConfig) -> Result<NERModel, RustBertError> {
let model = TokenClassificationModel::new(ner_config)?;
Ok(NERModel {
token_classification_model: model,
})
}
/// Build a new `NERModel` with a provided tokenizer.
///
/// # Arguments
///
/// * `ner_config` - `NERConfig` object containing the resource references (model, vocabulary, configuration) and device placement (CPU/GPU)
/// * `tokenizer` - `TokenizerOption` tokenizer to use for token classification
///
/// # Example
///
/// ```no_run
/// # fn main() -> anyhow::Result<()> {
/// use rust_bert::pipelines::common::{ModelType, TokenizerOption};
/// use rust_bert::pipelines::ner::NERModel;
/// let tokenizer = TokenizerOption::from_file(
/// ModelType::Bert,
/// "path/to/vocab.txt",
/// None,
/// false,
/// None,
/// None,
/// )?;
/// let ner_model = NERModel::new_with_tokenizer(Default::default(), tokenizer)?;
/// # Ok(())
/// # }
/// ```
pub fn new_with_tokenizer(
ner_config: NERConfig,
tokenizer: TokenizerOption,
) -> Result<NERModel, RustBertError> {
let model = TokenClassificationModel::new_with_tokenizer(ner_config, tokenizer)?;
Ok(NERModel {
token_classification_model: model,
})
}
/// Get a reference to the model tokenizer.
pub fn get_tokenizer(&self) -> &TokenizerOption {
self.token_classification_model.get_tokenizer()
}
/// Get a mutable reference to the model tokenizer.
pub fn get_tokenizer_mut(&mut self) -> &mut TokenizerOption {
self.token_classification_model.get_tokenizer_mut()
}
/// Extract entities from a text
///
/// # Arguments
///
/// * `input` - `&[&str]` Array of texts to extract entities from.
///
/// # Returns
///
/// * `Vec<Vec<Entity>>` containing extracted entities
///
/// # Example
///
/// ```no_run
/// # fn main() -> anyhow::Result<()> {
/// # use rust_bert::pipelines::ner::NERModel;
///
/// let ner_model = NERModel::new(Default::default())?;
/// let input = [
/// "My name is Amy. I live in Paris.",
/// "Paris is a city in France.",
/// ];
/// let output = ner_model.predict(&input);
/// # Ok(())
/// # }
/// ```
pub fn predict<S>(&self, input: &[S]) -> Vec<Vec<Entity>>
where
S: AsRef<str>,
{
self.token_classification_model
.predict(input, true, false)
.into_iter()
.map(|sequence_tokens| {
sequence_tokens
.into_iter()
.filter(|token| token.label!= "O")
.map(|token| Entity {
offset: token.offset.unwrap(),
word: token.text,
score: token.score,
label: token.label,
})
.collect::<Vec<Entity>>()
})
.collect::<Vec<Vec<Entity>>>()
}
/// Extract full entities from a text performing entity chunking. Follows the algorithm for entities
/// chunking described in [Erik F. Tjong Kim Sang, Jorn Veenstra, Representing Text Chunks](https://www.aclweb.org/anthology/E99-1023/)
/// The proposed implementation is inspired by the [Python seqeval library](https://github.com/chakki-works/seqeval) (shared under MIT license).
///
/// # Arguments
///
/// * `input` - `&[&str]` Array of texts to extract entities from.
///
/// # Returns
///
/// * `Vec<Entity>` containing consolidated extracted entities
///
/// # Example
///
/// ```no_run
/// # fn main() -> anyhow::Result<()> {
/// # use rust_bert::pipelines::ner::NERModel;
///
/// let ner_model = NERModel::new(Default::default())?;
/// let input = ["Asked John Smith about Acme Corp"];
/// let output = ner_model.predict_full_entities(&input);
/// # Ok(())
/// # }
/// ```
///
/// Outputs:
///
/// Output: \
/// ```no_run
/// # use rust_bert::pipelines::question_answering::Answer;
/// # use rust_bert::pipelines::ner::Entity;
/// # use rust_tokenizers::Offset;
/// # let output =
/// [[
/// Entity {
/// word: String::from("John Smith"),
/// score: 0.9747,
/// label: String::from("PER"),
/// offset: Offset { begin: 6, end: 16 },
/// },
/// Entity {
/// word: String::from("Acme Corp"),
/// score: 0.8847,
/// label: String::from("I-LOC"),
/// offset: Offset { begin: 23, end: 32 },
/// },
/// ]]
/// # ;
/// ```
pub fn predict_full_entities<S>(&self, input: &[S]) -> Vec<Vec<Entity>>
where
S: AsRef<str>,
{
let tokens = self.token_classification_model.predict(input, true, false);
let mut entities: Vec<Vec<Entity>> = Vec::new();
for sequence_tokens in tokens {
entities.push(Self::consolidate_entities(&sequence_tokens));
}
entities
}
fn consolidate_entities(tokens: &[Token]) -> Vec<Entity> {
let mut entities: Vec<Entity> = Vec::new();
let mut entity_builder = EntityBuilder::new();
for (position, token) in tokens.iter().enumerate() {
let tag = token.get_tag();
let label = token.get_label();
if let Some(entity) = entity_builder.handle_current_tag(tag, label, position, tokens) {
entities.push(entity)
}
}
if let Some(entity) = entity_builder.flush_and_reset(tokens.len(), tokens) {
entities.push(entity);
}
entities
}
}
struct EntityBuilder<'a> {
previous_node: Option<(usize, Tag, &'a str)>,
}
impl<'a> EntityBuilder<'a> {
fn new() -> Self {
EntityBuilder {
previous_node: None,
}
}
fn handle_current_tag(
&mut self,
tag: Tag,
label: &'a str,
position: usize,
tokens: &[Token],
) -> Option<Entity> {
match tag {
Tag::Outside => self.flush_and_reset(position, tokens),
Tag::Begin | Tag::Single => {
let entity = self.flush_and_reset(position, tokens);
self.start_new(position, tag, label);
entity
}
Tag::Inside | Tag::End => {
if let Some((_, previous_tag, previous_label)) = self.previous_node {
if (previous_tag == Tag::End)
| (previous_tag == Tag::Single)
| (previous_label!= label)
{
let entity = self.flush_and_reset(position, tokens);
self.start_new(position, tag, label);
entity
} else {
None
}
} else {
self.start_new(position, tag, label);
None
}
}
}
}
fn flush_and_reset(&mut self, position: usize, tokens: &[Token]) -> Option<Entity> {
let entity = if let Some((start, _, label)) = self.previous_node {
let entity_tokens = &tokens[start..position];
Some(Entity {
word: entity_tokens
.iter()
.map(|token| token.text.as_str())
.collect::<Vec<&str>>()
.join(" "),
score: entity_tokens.iter().map(|token| token.score).product(),
label: label.to_string(),
offset: Offset {
begin: entity_tokens.first()?.offset?.begin,
end: entity_tokens.last()?.offset?.end,
},
})
} else {
None
};
self.previous_node = None;
entity
}
fn start_new(&mut self, position: usize, tag: Tag, label: &'a str) {
self.previous_node = Some((position, tag, label))
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
enum Tag {
Begin,
Inside,
Outside,
End,
Single,
}
impl Token {
fn get_tag(&self) -> Tag {
match self.label.split('-').collect::<Vec<&str>>()[0] {
"B" => Tag::Begin,
"I" => Tag::Inside,
"O" => Tag::Outside,
"E" => Tag::End,
"S" => Tag::Single,
_ => panic!("Invalid tag encountered for token {:?}", self),
}
}
fn get_label(&self) -> &str {
let split_label = self.label.split('-').collect::<Vec<&str>>();
if split_label.len() > 1 {
split_label[1]
} else {
""
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
#[ignore] // no need to run, compilation is enough to verify it is Send
fn test() {
let config = NERConfig::default();
let _: Box<dyn Send> = Box::new(NERModel::new(config));
}
} | //! lower_case: false, | random_line_split |
detours.rs | #![allow(non_camel_case_types)]
extern crate winapi;
extern crate kernel32;
extern crate field_offset;
use winapi::*;
#[allow(unused_imports)]
use self::field_offset::*;
use std::mem;
use std::ffi::CStr;
use std::ptr::null_mut;
use std::iter::once;
use std::ffi::OsStr;
use std::os::windows::ffi::OsStrExt;
// Copied from winapi-rs since we are having issues with macro-use
macro_rules! DEF_STRUCT {
{$(#[$attrs:meta])* nodebug struct $name:ident { $($field:ident: $ftype:ty,)+ }} => {
#[repr(C)] $(#[$attrs])*
pub struct $name {
$(pub $field: $ftype,)+
}
impl Copy for $name {}
impl Clone for $name { fn clone(&self) -> $name { *self } }
};
{$(#[$attrs:meta])* struct $name:ident { $($field:ident: $ftype:ty,)+ }} => {
#[repr(C)] #[derive(Debug)] $(#[$attrs])*
pub struct $name {
$(pub $field: $ftype,)+
}
impl Copy for $name {}
impl Clone for $name { fn clone(&self) -> $name { *self } }
};
}
DEF_STRUCT!{struct IMAGE_DOS_HEADER {
e_magic: WORD,
e_cblp: WORD,
e_cp: WORD,
e_crlc: WORD,
e_cparhdr: WORD,
e_minalloc: WORD,
e_maxalloc: WORD,
e_ss: WORD,
e_sp: WORD,
e_csum: WORD,
e_ip: WORD,
e_cs: WORD,
e_lfarlc: WORD,
e_ovno: WORD,
e_res: [WORD; 4],
e_oemid: WORD,
e_oeminfo: WORD,
e_res2: [WORD; 10],
e_lfanew: LONG,
}}
pub type PIMAGE_DOS_HEADER = *mut IMAGE_DOS_HEADER;
DEF_STRUCT!{struct IMAGE_IMPORT_DESCRIPTOR {
OriginalFirstThunk: DWORD,
TimeDateStamp: DWORD,
ForwarderChain: DWORD,
Name: DWORD,
FirstThunk: DWORD,
}}
pub type PIMAGE_IMPORT_DESCRIPTOR = *mut IMAGE_IMPORT_DESCRIPTOR;
DEF_STRUCT!{struct IMAGE_THUNK_DATA32 {
u1: DWORD,
}}
pub type PIMAGE_THUNK_DATA32 = *mut IMAGE_THUNK_DATA32;
DEF_STRUCT!{struct IMAGE_IMPORT_BY_NAME {
Hint: WORD,
Name: BYTE,
}}
pub type PIMAGE_IMPORT_BY_NAME = *mut IMAGE_IMPORT_BY_NAME;
const IMAGE_DOS_SIGNATURE: WORD = 0x5a4d;
const IMAGE_NT_SIGNATURE: DWORD = 0x4550;
const IMAGE_ORDINAL_FLAG: DWORD = 0x80000000;
struct MemoryWriteLock {
addr: LPVOID,
size: SIZE_T,
old_protect: DWORD,
}
impl MemoryWriteLock {
pub fn new(addr: LPVOID, size: SIZE_T) -> Option<MemoryWriteLock> {
let mut lock = MemoryWriteLock {
addr: addr,
size: size,
old_protect: 0 as DWORD,
};
if unsafe {
kernel32::VirtualProtect(addr, size, PAGE_READWRITE, &mut lock.old_protect)
} == 0 {
return None;
}
Some(lock)
}
}
impl Drop for MemoryWriteLock {
fn drop(&mut self) {
let mut old_protect: DWORD = 0 as DWORD;
unsafe {
kernel32::VirtualProtect(self.addr, self.size, self.old_protect, &mut old_protect)
};
}
}
#[cfg(test)]
fn assert_mem_protect(addr: LPVOID, size: SIZE_T, protect: DWORD) {
let mut mbi: MEMORY_BASIC_INFORMATION = unsafe { mem::zeroed() };
assert!(unsafe { kernel32::VirtualQuery(addr, &mut mbi, size) }!= 0);
assert_eq!(mbi.Protect, protect);
}
#[test]
fn test_memorywritelock() {
let size = 0x1000;
let addr = unsafe { kernel32::VirtualAlloc(null_mut(), size, MEM_COMMIT, PAGE_READONLY) };
assert!(addr!= 0 as LPVOID);
assert_mem_protect(addr, size, PAGE_READONLY);
{
let lock = MemoryWriteLock::new(addr, size);
assert!(lock.is_some());
assert_mem_protect(addr, size, PAGE_READWRITE);
}
assert_mem_protect(addr, size, PAGE_READONLY);
}
pub struct | {
module: HMODULE,
}
impl Module {
#[allow(dead_code)]
pub fn target(moduleName: &str) -> Option<Module> {
let mut library = Module { module: 0 as HMODULE };
let wModuleName: Vec<u16> = OsStr::new(moduleName)
.encode_wide()
.chain(once(0))
.collect();
library.module = unsafe { kernel32::GetModuleHandleW(wModuleName.as_ptr()) };
if library.module == 0 as HMODULE {
return None;
}
Some(library)
}
#[allow(dead_code)]
pub fn self_target() -> Module {
Module { module: unsafe { kernel32::GetModuleHandleW(null_mut()) } }
}
pub fn intercept(&self,
targetModule: &str,
funcName: &str,
replaceFuncPtr: LPVOID)
-> Option<LPVOID> {
let base_addr: PBYTE = unsafe { mem::transmute::<HMODULE, PBYTE>(self.module) };
let dos_hdr: PIMAGE_DOS_HEADER =
unsafe { mem::transmute::<HMODULE, PIMAGE_DOS_HEADER>(self.module) };
if unsafe { (*dos_hdr).e_magic }!= IMAGE_DOS_SIGNATURE {
return None;
}
let nt_hdr: PIMAGE_NT_HEADERS32 =
unsafe {
mem::transmute::<PBYTE, PIMAGE_NT_HEADERS32>(base_addr.offset((*dos_hdr).e_lfanew as
isize))
};
if unsafe { (*nt_hdr).Signature }!= IMAGE_NT_SIGNATURE {
return None;
}
if unsafe { (*nt_hdr).FileHeader.Machine }!= IMAGE_FILE_MACHINE_I386 {
// TODO: Think about adding support for IMAGE_FILE_MACHINE_AMD64 later
return None;
}
let import_desc_array: PIMAGE_IMPORT_DESCRIPTOR = unsafe {
mem::transmute::<PBYTE, PIMAGE_IMPORT_DESCRIPTOR>(
base_addr.offset((*nt_hdr).OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT as usize].VirtualAddress as isize)
)
};
let mut i = 0;
loop {
let import_desc = unsafe { (*import_desc_array.offset(i)) };
if import_desc.OriginalFirstThunk == 0 {
break;
}
let dll_name =
unsafe { CStr::from_ptr(base_addr.offset(import_desc.Name as isize) as *const i8) }
.to_string_lossy();
if targetModule.to_string().to_lowercase() == dll_name.to_lowercase() {
if import_desc.FirstThunk == 0 || import_desc.OriginalFirstThunk == 0 {
return None;
}
let thunk_ptr: PIMAGE_THUNK_DATA32 =
unsafe {
mem::transmute::<PBYTE,
PIMAGE_THUNK_DATA32>(base_addr
.offset(import_desc.FirstThunk as
isize))
};
let orig_thunk_ptr: PIMAGE_THUNK_DATA32 =
unsafe {
mem::transmute::<PBYTE,
PIMAGE_THUNK_DATA32>(base_addr
.offset(import_desc.OriginalFirstThunk as
isize))
};
let mut j = 0;
loop {
let orig_thunk = unsafe { *orig_thunk_ptr.offset(j) };
if orig_thunk.u1 == 0 {
break;
}
if (orig_thunk.u1 & IMAGE_ORDINAL_FLAG)!= 0 {
continue;
}
let import: PIMAGE_IMPORT_BY_NAME =
unsafe {
mem::transmute::<PBYTE,
PIMAGE_IMPORT_BY_NAME>(base_addr
.offset(orig_thunk.u1 as
isize))
};
let name_field = offset_of!(IMAGE_IMPORT_BY_NAME => Name);
let func_name =
unsafe { CStr::from_ptr(name_field.apply_ptr(import) as *const i8) }
.to_string_lossy();
if funcName == func_name {
let old_func_ptr: LONG;
let iat_ptr_field = offset_of!(IMAGE_THUNK_DATA32 => u1);
{
#[allow(unused_variables)]
let lock =
MemoryWriteLock::new(iat_ptr_field.apply_ptr(unsafe { thunk_ptr.offset(j) }) as
LPVOID,
mem::size_of::<LPVOID>() as u32);
old_func_ptr = unsafe {
kernel32::InterlockedExchange(
iat_ptr_field.apply_ptr_mut(thunk_ptr.offset(j)) as *mut LONG,
replaceFuncPtr as LONG)
};
}
return Some(old_func_ptr as LPVOID);
}
j += 1;
}
}
i += 1;
}
None
}
}
#[allow(unused_variables)]
#[cfg(test)]
extern "system" fn myCreatePipe(hReadPipe: PHANDLE,
hWritePipe: PHANDLE,
lpPipeAttributes: LPVOID,
nSize: DWORD)
-> BOOL {
0x31337
}
#[test]
fn test_intercept() {
let target = Module::self_target();
let mut result = target.intercept("kernel32.dll", "CreatePipe", unsafe {
mem::transmute::<extern "system" fn(PHANDLE,
PHANDLE,
LPVOID,
DWORD)
-> BOOL,
LPVOID>(myCreatePipe)
});
assert!(result.is_some());
let ret = unsafe { kernel32::CreatePipe(null_mut(), null_mut(), null_mut(), 0x1337) };
assert_eq!(ret, 0x31337);
result = target.intercept("kernel32.dll", "CreatePipe", result.unwrap());
assert!(result.is_some());
} | Module | identifier_name |
detours.rs | #![allow(non_camel_case_types)]
extern crate winapi;
extern crate kernel32;
extern crate field_offset;
use winapi::*;
#[allow(unused_imports)]
use self::field_offset::*;
use std::mem;
use std::ffi::CStr;
use std::ptr::null_mut;
use std::iter::once;
use std::ffi::OsStr;
use std::os::windows::ffi::OsStrExt;
// Copied from winapi-rs since we are having issues with macro-use
macro_rules! DEF_STRUCT {
{$(#[$attrs:meta])* nodebug struct $name:ident { $($field:ident: $ftype:ty,)+ }} => {
#[repr(C)] $(#[$attrs])*
pub struct $name {
$(pub $field: $ftype,)+
}
impl Copy for $name {}
impl Clone for $name { fn clone(&self) -> $name { *self } }
};
{$(#[$attrs:meta])* struct $name:ident { $($field:ident: $ftype:ty,)+ }} => {
#[repr(C)] #[derive(Debug)] $(#[$attrs])*
pub struct $name {
$(pub $field: $ftype,)+
}
impl Copy for $name {}
impl Clone for $name { fn clone(&self) -> $name { *self } }
};
}
DEF_STRUCT!{struct IMAGE_DOS_HEADER {
e_magic: WORD,
e_cblp: WORD,
e_cp: WORD,
e_crlc: WORD,
e_cparhdr: WORD,
e_minalloc: WORD,
e_maxalloc: WORD,
e_ss: WORD,
e_sp: WORD,
e_csum: WORD,
e_ip: WORD,
e_cs: WORD,
e_lfarlc: WORD,
e_ovno: WORD,
e_res: [WORD; 4],
e_oemid: WORD,
e_oeminfo: WORD,
e_res2: [WORD; 10],
e_lfanew: LONG,
}}
pub type PIMAGE_DOS_HEADER = *mut IMAGE_DOS_HEADER;
DEF_STRUCT!{struct IMAGE_IMPORT_DESCRIPTOR {
OriginalFirstThunk: DWORD,
TimeDateStamp: DWORD,
ForwarderChain: DWORD,
Name: DWORD,
FirstThunk: DWORD,
}}
pub type PIMAGE_IMPORT_DESCRIPTOR = *mut IMAGE_IMPORT_DESCRIPTOR;
DEF_STRUCT!{struct IMAGE_THUNK_DATA32 {
u1: DWORD,
}}
pub type PIMAGE_THUNK_DATA32 = *mut IMAGE_THUNK_DATA32;
DEF_STRUCT!{struct IMAGE_IMPORT_BY_NAME {
Hint: WORD,
Name: BYTE,
}}
pub type PIMAGE_IMPORT_BY_NAME = *mut IMAGE_IMPORT_BY_NAME;
const IMAGE_DOS_SIGNATURE: WORD = 0x5a4d;
const IMAGE_NT_SIGNATURE: DWORD = 0x4550;
const IMAGE_ORDINAL_FLAG: DWORD = 0x80000000;
struct MemoryWriteLock {
addr: LPVOID,
size: SIZE_T,
old_protect: DWORD,
}
impl MemoryWriteLock {
pub fn new(addr: LPVOID, size: SIZE_T) -> Option<MemoryWriteLock> {
let mut lock = MemoryWriteLock {
addr: addr,
size: size,
old_protect: 0 as DWORD,
};
if unsafe {
kernel32::VirtualProtect(addr, size, PAGE_READWRITE, &mut lock.old_protect)
} == 0 {
return None;
}
Some(lock)
}
}
impl Drop for MemoryWriteLock {
fn drop(&mut self) {
let mut old_protect: DWORD = 0 as DWORD;
unsafe {
kernel32::VirtualProtect(self.addr, self.size, self.old_protect, &mut old_protect)
};
}
}
#[cfg(test)]
fn assert_mem_protect(addr: LPVOID, size: SIZE_T, protect: DWORD) {
let mut mbi: MEMORY_BASIC_INFORMATION = unsafe { mem::zeroed() };
assert!(unsafe { kernel32::VirtualQuery(addr, &mut mbi, size) }!= 0);
assert_eq!(mbi.Protect, protect);
}
#[test]
fn test_memorywritelock() {
let size = 0x1000;
let addr = unsafe { kernel32::VirtualAlloc(null_mut(), size, MEM_COMMIT, PAGE_READONLY) };
assert!(addr!= 0 as LPVOID);
assert_mem_protect(addr, size, PAGE_READONLY);
{
let lock = MemoryWriteLock::new(addr, size);
assert!(lock.is_some());
assert_mem_protect(addr, size, PAGE_READWRITE);
}
assert_mem_protect(addr, size, PAGE_READONLY);
}
pub struct Module {
module: HMODULE,
}
impl Module {
#[allow(dead_code)]
pub fn target(moduleName: &str) -> Option<Module> {
let mut library = Module { module: 0 as HMODULE };
let wModuleName: Vec<u16> = OsStr::new(moduleName)
.encode_wide()
.chain(once(0))
.collect();
library.module = unsafe { kernel32::GetModuleHandleW(wModuleName.as_ptr()) };
if library.module == 0 as HMODULE {
return None;
}
Some(library)
}
#[allow(dead_code)]
pub fn self_target() -> Module {
Module { module: unsafe { kernel32::GetModuleHandleW(null_mut()) } }
}
pub fn intercept(&self,
targetModule: &str,
funcName: &str,
replaceFuncPtr: LPVOID)
-> Option<LPVOID> | // TODO: Think about adding support for IMAGE_FILE_MACHINE_AMD64 later
return None;
}
let import_desc_array: PIMAGE_IMPORT_DESCRIPTOR = unsafe {
mem::transmute::<PBYTE, PIMAGE_IMPORT_DESCRIPTOR>(
base_addr.offset((*nt_hdr).OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT as usize].VirtualAddress as isize)
)
};
let mut i = 0;
loop {
let import_desc = unsafe { (*import_desc_array.offset(i)) };
if import_desc.OriginalFirstThunk == 0 {
break;
}
let dll_name =
unsafe { CStr::from_ptr(base_addr.offset(import_desc.Name as isize) as *const i8) }
.to_string_lossy();
if targetModule.to_string().to_lowercase() == dll_name.to_lowercase() {
if import_desc.FirstThunk == 0 || import_desc.OriginalFirstThunk == 0 {
return None;
}
let thunk_ptr: PIMAGE_THUNK_DATA32 =
unsafe {
mem::transmute::<PBYTE,
PIMAGE_THUNK_DATA32>(base_addr
.offset(import_desc.FirstThunk as
isize))
};
let orig_thunk_ptr: PIMAGE_THUNK_DATA32 =
unsafe {
mem::transmute::<PBYTE,
PIMAGE_THUNK_DATA32>(base_addr
.offset(import_desc.OriginalFirstThunk as
isize))
};
let mut j = 0;
loop {
let orig_thunk = unsafe { *orig_thunk_ptr.offset(j) };
if orig_thunk.u1 == 0 {
break;
}
if (orig_thunk.u1 & IMAGE_ORDINAL_FLAG)!= 0 {
continue;
}
let import: PIMAGE_IMPORT_BY_NAME =
unsafe {
mem::transmute::<PBYTE,
PIMAGE_IMPORT_BY_NAME>(base_addr
.offset(orig_thunk.u1 as
isize))
};
let name_field = offset_of!(IMAGE_IMPORT_BY_NAME => Name);
let func_name =
unsafe { CStr::from_ptr(name_field.apply_ptr(import) as *const i8) }
.to_string_lossy();
if funcName == func_name {
let old_func_ptr: LONG;
let iat_ptr_field = offset_of!(IMAGE_THUNK_DATA32 => u1);
{
#[allow(unused_variables)]
let lock =
MemoryWriteLock::new(iat_ptr_field.apply_ptr(unsafe { thunk_ptr.offset(j) }) as
LPVOID,
mem::size_of::<LPVOID>() as u32);
old_func_ptr = unsafe {
kernel32::InterlockedExchange(
iat_ptr_field.apply_ptr_mut(thunk_ptr.offset(j)) as *mut LONG,
replaceFuncPtr as LONG)
};
}
return Some(old_func_ptr as LPVOID);
}
j += 1;
}
}
i += 1;
}
None
}
}
#[allow(unused_variables)]
#[cfg(test)]
extern "system" fn myCreatePipe(hReadPipe: PHANDLE,
hWritePipe: PHANDLE,
lpPipeAttributes: LPVOID,
nSize: DWORD)
-> BOOL {
0x31337
}
#[test]
fn test_intercept() {
let target = Module::self_target();
let mut result = target.intercept("kernel32.dll", "CreatePipe", unsafe {
mem::transmute::<extern "system" fn(PHANDLE,
PHANDLE,
LPVOID,
DWORD)
-> BOOL,
LPVOID>(myCreatePipe)
});
assert!(result.is_some());
let ret = unsafe { kernel32::CreatePipe(null_mut(), null_mut(), null_mut(), 0x1337) };
assert_eq!(ret, 0x31337);
result = target.intercept("kernel32.dll", "CreatePipe", result.unwrap());
assert!(result.is_some());
} | {
let base_addr: PBYTE = unsafe { mem::transmute::<HMODULE, PBYTE>(self.module) };
let dos_hdr: PIMAGE_DOS_HEADER =
unsafe { mem::transmute::<HMODULE, PIMAGE_DOS_HEADER>(self.module) };
if unsafe { (*dos_hdr).e_magic } != IMAGE_DOS_SIGNATURE {
return None;
}
let nt_hdr: PIMAGE_NT_HEADERS32 =
unsafe {
mem::transmute::<PBYTE, PIMAGE_NT_HEADERS32>(base_addr.offset((*dos_hdr).e_lfanew as
isize))
};
if unsafe { (*nt_hdr).Signature } != IMAGE_NT_SIGNATURE {
return None;
}
if unsafe { (*nt_hdr).FileHeader.Machine } != IMAGE_FILE_MACHINE_I386 { | identifier_body |
detours.rs | #![allow(non_camel_case_types)]
extern crate winapi;
extern crate kernel32;
extern crate field_offset;
use winapi::*;
#[allow(unused_imports)]
use self::field_offset::*;
use std::mem;
use std::ffi::CStr;
use std::ptr::null_mut;
use std::iter::once;
use std::ffi::OsStr;
use std::os::windows::ffi::OsStrExt;
// Copied from winapi-rs since we are having issues with macro-use
macro_rules! DEF_STRUCT {
{$(#[$attrs:meta])* nodebug struct $name:ident { $($field:ident: $ftype:ty,)+ }} => {
#[repr(C)] $(#[$attrs])*
pub struct $name {
$(pub $field: $ftype,)+
}
impl Copy for $name {}
impl Clone for $name { fn clone(&self) -> $name { *self } }
};
{$(#[$attrs:meta])* struct $name:ident { $($field:ident: $ftype:ty,)+ }} => {
#[repr(C)] #[derive(Debug)] $(#[$attrs])*
pub struct $name {
$(pub $field: $ftype,)+
}
impl Copy for $name {}
impl Clone for $name { fn clone(&self) -> $name { *self } }
};
}
DEF_STRUCT!{struct IMAGE_DOS_HEADER {
e_magic: WORD,
e_cblp: WORD,
e_cp: WORD,
e_crlc: WORD,
e_cparhdr: WORD,
e_minalloc: WORD,
e_maxalloc: WORD,
e_ss: WORD,
e_sp: WORD,
e_csum: WORD,
e_ip: WORD,
e_cs: WORD,
e_lfarlc: WORD,
e_ovno: WORD,
e_res: [WORD; 4],
e_oemid: WORD,
e_oeminfo: WORD,
e_res2: [WORD; 10],
e_lfanew: LONG,
}}
pub type PIMAGE_DOS_HEADER = *mut IMAGE_DOS_HEADER;
DEF_STRUCT!{struct IMAGE_IMPORT_DESCRIPTOR {
OriginalFirstThunk: DWORD,
TimeDateStamp: DWORD,
ForwarderChain: DWORD,
Name: DWORD,
FirstThunk: DWORD,
}}
pub type PIMAGE_IMPORT_DESCRIPTOR = *mut IMAGE_IMPORT_DESCRIPTOR;
DEF_STRUCT!{struct IMAGE_THUNK_DATA32 {
u1: DWORD,
}}
pub type PIMAGE_THUNK_DATA32 = *mut IMAGE_THUNK_DATA32;
DEF_STRUCT!{struct IMAGE_IMPORT_BY_NAME {
Hint: WORD,
Name: BYTE,
}}
pub type PIMAGE_IMPORT_BY_NAME = *mut IMAGE_IMPORT_BY_NAME;
const IMAGE_DOS_SIGNATURE: WORD = 0x5a4d;
const IMAGE_NT_SIGNATURE: DWORD = 0x4550;
const IMAGE_ORDINAL_FLAG: DWORD = 0x80000000;
struct MemoryWriteLock {
addr: LPVOID,
size: SIZE_T,
old_protect: DWORD,
}
impl MemoryWriteLock {
pub fn new(addr: LPVOID, size: SIZE_T) -> Option<MemoryWriteLock> {
let mut lock = MemoryWriteLock {
addr: addr,
size: size,
old_protect: 0 as DWORD,
};
if unsafe {
kernel32::VirtualProtect(addr, size, PAGE_READWRITE, &mut lock.old_protect)
} == 0 {
return None;
}
Some(lock)
}
}
impl Drop for MemoryWriteLock {
fn drop(&mut self) {
let mut old_protect: DWORD = 0 as DWORD;
unsafe {
kernel32::VirtualProtect(self.addr, self.size, self.old_protect, &mut old_protect)
};
}
}
#[cfg(test)]
fn assert_mem_protect(addr: LPVOID, size: SIZE_T, protect: DWORD) {
let mut mbi: MEMORY_BASIC_INFORMATION = unsafe { mem::zeroed() };
assert!(unsafe { kernel32::VirtualQuery(addr, &mut mbi, size) }!= 0);
assert_eq!(mbi.Protect, protect);
}
#[test]
fn test_memorywritelock() {
let size = 0x1000;
let addr = unsafe { kernel32::VirtualAlloc(null_mut(), size, MEM_COMMIT, PAGE_READONLY) };
assert!(addr!= 0 as LPVOID);
assert_mem_protect(addr, size, PAGE_READONLY);
{
let lock = MemoryWriteLock::new(addr, size);
assert!(lock.is_some());
assert_mem_protect(addr, size, PAGE_READWRITE);
}
assert_mem_protect(addr, size, PAGE_READONLY);
}
pub struct Module {
module: HMODULE,
}
impl Module {
#[allow(dead_code)]
pub fn target(moduleName: &str) -> Option<Module> {
let mut library = Module { module: 0 as HMODULE };
let wModuleName: Vec<u16> = OsStr::new(moduleName)
.encode_wide()
.chain(once(0))
.collect();
library.module = unsafe { kernel32::GetModuleHandleW(wModuleName.as_ptr()) };
if library.module == 0 as HMODULE {
return None;
}
Some(library)
}
#[allow(dead_code)]
pub fn self_target() -> Module {
Module { module: unsafe { kernel32::GetModuleHandleW(null_mut()) } }
}
pub fn intercept(&self,
targetModule: &str,
funcName: &str,
replaceFuncPtr: LPVOID)
-> Option<LPVOID> {
let base_addr: PBYTE = unsafe { mem::transmute::<HMODULE, PBYTE>(self.module) };
let dos_hdr: PIMAGE_DOS_HEADER =
unsafe { mem::transmute::<HMODULE, PIMAGE_DOS_HEADER>(self.module) };
if unsafe { (*dos_hdr).e_magic }!= IMAGE_DOS_SIGNATURE {
return None;
}
let nt_hdr: PIMAGE_NT_HEADERS32 =
unsafe {
mem::transmute::<PBYTE, PIMAGE_NT_HEADERS32>(base_addr.offset((*dos_hdr).e_lfanew as
isize))
};
if unsafe { (*nt_hdr).Signature }!= IMAGE_NT_SIGNATURE {
return None;
}
if unsafe { (*nt_hdr).FileHeader.Machine }!= IMAGE_FILE_MACHINE_I386 {
// TODO: Think about adding support for IMAGE_FILE_MACHINE_AMD64 later
return None;
}
let import_desc_array: PIMAGE_IMPORT_DESCRIPTOR = unsafe {
mem::transmute::<PBYTE, PIMAGE_IMPORT_DESCRIPTOR>(
base_addr.offset((*nt_hdr).OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT as usize].VirtualAddress as isize)
)
};
let mut i = 0;
loop {
let import_desc = unsafe { (*import_desc_array.offset(i)) };
if import_desc.OriginalFirstThunk == 0 {
break;
}
let dll_name =
unsafe { CStr::from_ptr(base_addr.offset(import_desc.Name as isize) as *const i8) }
.to_string_lossy();
if targetModule.to_string().to_lowercase() == dll_name.to_lowercase() {
if import_desc.FirstThunk == 0 || import_desc.OriginalFirstThunk == 0 {
return None;
}
let thunk_ptr: PIMAGE_THUNK_DATA32 =
unsafe {
mem::transmute::<PBYTE,
PIMAGE_THUNK_DATA32>(base_addr
.offset(import_desc.FirstThunk as
isize))
};
let orig_thunk_ptr: PIMAGE_THUNK_DATA32 =
unsafe {
mem::transmute::<PBYTE,
PIMAGE_THUNK_DATA32>(base_addr
.offset(import_desc.OriginalFirstThunk as
isize))
};
let mut j = 0;
loop {
let orig_thunk = unsafe { *orig_thunk_ptr.offset(j) };
if orig_thunk.u1 == 0 |
if (orig_thunk.u1 & IMAGE_ORDINAL_FLAG)!= 0 {
continue;
}
let import: PIMAGE_IMPORT_BY_NAME =
unsafe {
mem::transmute::<PBYTE,
PIMAGE_IMPORT_BY_NAME>(base_addr
.offset(orig_thunk.u1 as
isize))
};
let name_field = offset_of!(IMAGE_IMPORT_BY_NAME => Name);
let func_name =
unsafe { CStr::from_ptr(name_field.apply_ptr(import) as *const i8) }
.to_string_lossy();
if funcName == func_name {
let old_func_ptr: LONG;
let iat_ptr_field = offset_of!(IMAGE_THUNK_DATA32 => u1);
{
#[allow(unused_variables)]
let lock =
MemoryWriteLock::new(iat_ptr_field.apply_ptr(unsafe { thunk_ptr.offset(j) }) as
LPVOID,
mem::size_of::<LPVOID>() as u32);
old_func_ptr = unsafe {
kernel32::InterlockedExchange(
iat_ptr_field.apply_ptr_mut(thunk_ptr.offset(j)) as *mut LONG,
replaceFuncPtr as LONG)
};
}
return Some(old_func_ptr as LPVOID);
}
j += 1;
}
}
i += 1;
}
None
}
}
#[allow(unused_variables)]
#[cfg(test)]
extern "system" fn myCreatePipe(hReadPipe: PHANDLE,
hWritePipe: PHANDLE,
lpPipeAttributes: LPVOID,
nSize: DWORD)
-> BOOL {
0x31337
}
#[test]
fn test_intercept() {
let target = Module::self_target();
let mut result = target.intercept("kernel32.dll", "CreatePipe", unsafe {
mem::transmute::<extern "system" fn(PHANDLE,
PHANDLE,
LPVOID,
DWORD)
-> BOOL,
LPVOID>(myCreatePipe)
});
assert!(result.is_some());
let ret = unsafe { kernel32::CreatePipe(null_mut(), null_mut(), null_mut(), 0x1337) };
assert_eq!(ret, 0x31337);
result = target.intercept("kernel32.dll", "CreatePipe", result.unwrap());
assert!(result.is_some());
} | {
break;
} | conditional_block |
detours.rs | #![allow(non_camel_case_types)]
extern crate winapi;
extern crate kernel32;
extern crate field_offset;
use winapi::*;
#[allow(unused_imports)]
use self::field_offset::*;
use std::mem;
use std::ffi::CStr;
use std::ptr::null_mut;
use std::iter::once;
use std::ffi::OsStr;
use std::os::windows::ffi::OsStrExt;
// Copied from winapi-rs since we are having issues with macro-use
macro_rules! DEF_STRUCT {
{$(#[$attrs:meta])* nodebug struct $name:ident { $($field:ident: $ftype:ty,)+ }} => {
#[repr(C)] $(#[$attrs])*
pub struct $name {
$(pub $field: $ftype,)+
}
impl Copy for $name {}
impl Clone for $name { fn clone(&self) -> $name { *self } }
};
{$(#[$attrs:meta])* struct $name:ident { $($field:ident: $ftype:ty,)+ }} => {
#[repr(C)] #[derive(Debug)] $(#[$attrs])*
pub struct $name {
$(pub $field: $ftype,)+
}
impl Copy for $name {}
impl Clone for $name { fn clone(&self) -> $name { *self } }
};
}
DEF_STRUCT!{struct IMAGE_DOS_HEADER {
e_magic: WORD,
e_cblp: WORD,
e_cp: WORD,
e_crlc: WORD,
e_cparhdr: WORD,
e_minalloc: WORD,
e_maxalloc: WORD,
e_ss: WORD,
e_sp: WORD,
e_csum: WORD,
e_ip: WORD,
e_cs: WORD,
e_lfarlc: WORD,
e_ovno: WORD,
e_res: [WORD; 4],
e_oemid: WORD,
e_oeminfo: WORD,
e_res2: [WORD; 10],
e_lfanew: LONG,
}}
pub type PIMAGE_DOS_HEADER = *mut IMAGE_DOS_HEADER;
DEF_STRUCT!{struct IMAGE_IMPORT_DESCRIPTOR {
OriginalFirstThunk: DWORD,
TimeDateStamp: DWORD,
ForwarderChain: DWORD,
Name: DWORD,
FirstThunk: DWORD,
}}
pub type PIMAGE_IMPORT_DESCRIPTOR = *mut IMAGE_IMPORT_DESCRIPTOR;
DEF_STRUCT!{struct IMAGE_THUNK_DATA32 {
u1: DWORD,
}}
pub type PIMAGE_THUNK_DATA32 = *mut IMAGE_THUNK_DATA32;
DEF_STRUCT!{struct IMAGE_IMPORT_BY_NAME {
Hint: WORD,
Name: BYTE,
}}
pub type PIMAGE_IMPORT_BY_NAME = *mut IMAGE_IMPORT_BY_NAME;
const IMAGE_DOS_SIGNATURE: WORD = 0x5a4d;
const IMAGE_NT_SIGNATURE: DWORD = 0x4550;
const IMAGE_ORDINAL_FLAG: DWORD = 0x80000000;
struct MemoryWriteLock {
addr: LPVOID,
size: SIZE_T,
old_protect: DWORD,
}
impl MemoryWriteLock {
pub fn new(addr: LPVOID, size: SIZE_T) -> Option<MemoryWriteLock> {
let mut lock = MemoryWriteLock {
addr: addr,
size: size,
old_protect: 0 as DWORD,
};
if unsafe {
kernel32::VirtualProtect(addr, size, PAGE_READWRITE, &mut lock.old_protect)
} == 0 {
return None;
}
Some(lock)
}
}
impl Drop for MemoryWriteLock {
fn drop(&mut self) {
let mut old_protect: DWORD = 0 as DWORD;
unsafe {
kernel32::VirtualProtect(self.addr, self.size, self.old_protect, &mut old_protect)
};
}
}
#[cfg(test)]
fn assert_mem_protect(addr: LPVOID, size: SIZE_T, protect: DWORD) {
let mut mbi: MEMORY_BASIC_INFORMATION = unsafe { mem::zeroed() };
assert!(unsafe { kernel32::VirtualQuery(addr, &mut mbi, size) }!= 0);
assert_eq!(mbi.Protect, protect);
}
#[test]
fn test_memorywritelock() {
let size = 0x1000;
let addr = unsafe { kernel32::VirtualAlloc(null_mut(), size, MEM_COMMIT, PAGE_READONLY) };
assert!(addr!= 0 as LPVOID);
assert_mem_protect(addr, size, PAGE_READONLY);
{
let lock = MemoryWriteLock::new(addr, size);
assert!(lock.is_some());
assert_mem_protect(addr, size, PAGE_READWRITE);
}
assert_mem_protect(addr, size, PAGE_READONLY);
}
pub struct Module {
module: HMODULE,
}
impl Module {
#[allow(dead_code)]
pub fn target(moduleName: &str) -> Option<Module> {
let mut library = Module { module: 0 as HMODULE };
let wModuleName: Vec<u16> = OsStr::new(moduleName)
.encode_wide()
.chain(once(0))
.collect();
library.module = unsafe { kernel32::GetModuleHandleW(wModuleName.as_ptr()) };
if library.module == 0 as HMODULE {
return None;
}
Some(library)
}
#[allow(dead_code)]
pub fn self_target() -> Module {
Module { module: unsafe { kernel32::GetModuleHandleW(null_mut()) } }
}
pub fn intercept(&self,
targetModule: &str,
funcName: &str,
replaceFuncPtr: LPVOID)
-> Option<LPVOID> {
let base_addr: PBYTE = unsafe { mem::transmute::<HMODULE, PBYTE>(self.module) };
let dos_hdr: PIMAGE_DOS_HEADER =
unsafe { mem::transmute::<HMODULE, PIMAGE_DOS_HEADER>(self.module) };
if unsafe { (*dos_hdr).e_magic }!= IMAGE_DOS_SIGNATURE {
return None;
}
let nt_hdr: PIMAGE_NT_HEADERS32 =
unsafe {
mem::transmute::<PBYTE, PIMAGE_NT_HEADERS32>(base_addr.offset((*dos_hdr).e_lfanew as
isize))
};
if unsafe { (*nt_hdr).Signature }!= IMAGE_NT_SIGNATURE {
return None;
}
if unsafe { (*nt_hdr).FileHeader.Machine }!= IMAGE_FILE_MACHINE_I386 {
// TODO: Think about adding support for IMAGE_FILE_MACHINE_AMD64 later
return None;
}
let import_desc_array: PIMAGE_IMPORT_DESCRIPTOR = unsafe {
mem::transmute::<PBYTE, PIMAGE_IMPORT_DESCRIPTOR>(
base_addr.offset((*nt_hdr).OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT as usize].VirtualAddress as isize)
)
};
let mut i = 0;
loop {
let import_desc = unsafe { (*import_desc_array.offset(i)) };
if import_desc.OriginalFirstThunk == 0 {
break;
}
let dll_name =
unsafe { CStr::from_ptr(base_addr.offset(import_desc.Name as isize) as *const i8) }
.to_string_lossy();
if targetModule.to_string().to_lowercase() == dll_name.to_lowercase() {
if import_desc.FirstThunk == 0 || import_desc.OriginalFirstThunk == 0 {
return None;
}
let thunk_ptr: PIMAGE_THUNK_DATA32 =
unsafe {
mem::transmute::<PBYTE,
PIMAGE_THUNK_DATA32>(base_addr
.offset(import_desc.FirstThunk as
isize))
};
let orig_thunk_ptr: PIMAGE_THUNK_DATA32 =
unsafe {
mem::transmute::<PBYTE,
PIMAGE_THUNK_DATA32>(base_addr
.offset(import_desc.OriginalFirstThunk as
isize))
};
let mut j = 0;
loop {
let orig_thunk = unsafe { *orig_thunk_ptr.offset(j) };
if orig_thunk.u1 == 0 {
break;
}
if (orig_thunk.u1 & IMAGE_ORDINAL_FLAG)!= 0 {
continue;
}
let import: PIMAGE_IMPORT_BY_NAME =
unsafe {
mem::transmute::<PBYTE,
PIMAGE_IMPORT_BY_NAME>(base_addr
.offset(orig_thunk.u1 as
isize))
};
let name_field = offset_of!(IMAGE_IMPORT_BY_NAME => Name);
let func_name =
unsafe { CStr::from_ptr(name_field.apply_ptr(import) as *const i8) }
.to_string_lossy();
if funcName == func_name {
let old_func_ptr: LONG;
let iat_ptr_field = offset_of!(IMAGE_THUNK_DATA32 => u1);
{
#[allow(unused_variables)]
let lock =
MemoryWriteLock::new(iat_ptr_field.apply_ptr(unsafe { thunk_ptr.offset(j) }) as
LPVOID,
mem::size_of::<LPVOID>() as u32);
old_func_ptr = unsafe {
kernel32::InterlockedExchange(
iat_ptr_field.apply_ptr_mut(thunk_ptr.offset(j)) as *mut LONG,
replaceFuncPtr as LONG)
};
}
return Some(old_func_ptr as LPVOID);
}
j += 1;
}
}
i += 1;
}
None
}
}
#[allow(unused_variables)]
#[cfg(test)]
extern "system" fn myCreatePipe(hReadPipe: PHANDLE,
hWritePipe: PHANDLE,
lpPipeAttributes: LPVOID,
nSize: DWORD)
-> BOOL {
0x31337
} | let mut result = target.intercept("kernel32.dll", "CreatePipe", unsafe {
mem::transmute::<extern "system" fn(PHANDLE,
PHANDLE,
LPVOID,
DWORD)
-> BOOL,
LPVOID>(myCreatePipe)
});
assert!(result.is_some());
let ret = unsafe { kernel32::CreatePipe(null_mut(), null_mut(), null_mut(), 0x1337) };
assert_eq!(ret, 0x31337);
result = target.intercept("kernel32.dll", "CreatePipe", result.unwrap());
assert!(result.is_some());
} |
#[test]
fn test_intercept() {
let target = Module::self_target();
| random_line_split |
j1f.rs | /* origin: FreeBSD /usr/src/lib/msun/src/e_j1f.c */
/*
* Conversion to float by Ian Lance Taylor, Cygnus Support, [email protected].
*/
/*
* ====================================================
* Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
*
* Developed at SunPro, a Sun Microsystems, Inc. business.
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
use super::{cosf, fabsf, logf, sinf, sqrtf};
const INVSQRTPI: f32 = 5.6418961287e-01; /* 0x3f106ebb */
const TPI: f32 = 6.3661974669e-01; /* 0x3f22f983 */
fn | (ix: u32, x: f32, y1: bool, sign: bool) -> f32 {
let z: f64;
let mut s: f64;
let c: f64;
let mut ss: f64;
let mut cc: f64;
s = sinf(x) as f64;
if y1 {
s = -s;
}
c = cosf(x) as f64;
cc = s - c;
if ix < 0x7f000000 {
ss = -s - c;
z = cosf(2.0 * x) as f64;
if s * c > 0.0 {
cc = z / ss;
} else {
ss = z / cc;
}
if ix < 0x58800000 {
if y1 {
ss = -ss;
}
cc = (ponef(x) as f64) * cc - (qonef(x) as f64) * ss;
}
}
if sign {
cc = -cc;
}
return (((INVSQRTPI as f64) * cc) / (sqrtf(x) as f64)) as f32;
}
/* R0/S0 on [0,2] */
const R00: f32 = -6.2500000000e-02; /* 0xbd800000 */
const R01: f32 = 1.4070566976e-03; /* 0x3ab86cfd */
const R02: f32 = -1.5995563444e-05; /* 0xb7862e36 */
const R03: f32 = 4.9672799207e-08; /* 0x335557d2 */
const S01: f32 = 1.9153760746e-02; /* 0x3c9ce859 */
const S02: f32 = 1.8594678841e-04; /* 0x3942fab6 */
const S03: f32 = 1.1771846857e-06; /* 0x359dffc2 */
const S04: f32 = 5.0463624390e-09; /* 0x31ad6446 */
const S05: f32 = 1.2354227016e-11; /* 0x2d59567e */
pub fn j1f(x: f32) -> f32 {
let mut z: f32;
let r: f32;
let s: f32;
let mut ix: u32;
let sign: bool;
ix = x.to_bits();
sign = (ix >> 31)!= 0;
ix &= 0x7fffffff;
if ix >= 0x7f800000 {
return 1.0 / (x * x);
}
if ix >= 0x40000000 {
/* |x| >= 2 */
return common(ix, fabsf(x), false, sign);
}
if ix >= 0x39000000 {
/* |x| >= 2**-13 */
z = x * x;
r = z * (R00 + z * (R01 + z * (R02 + z * R03)));
s = 1.0 + z * (S01 + z * (S02 + z * (S03 + z * (S04 + z * S05))));
z = 0.5 + r / s;
} else {
z = 0.5;
}
return z * x;
}
const U0: [f32; 5] = [
-1.9605709612e-01, /* 0xbe48c331 */
5.0443872809e-02, /* 0x3d4e9e3c */
-1.9125689287e-03, /* 0xbafaaf2a */
2.3525259166e-05, /* 0x37c5581c */
-9.1909917899e-08, /* 0xb3c56003 */
];
const V0: [f32; 5] = [
1.9916731864e-02, /* 0x3ca3286a */
2.0255257550e-04, /* 0x3954644b */
1.3560879779e-06, /* 0x35b602d4 */
6.2274145840e-09, /* 0x31d5f8eb */
1.6655924903e-11, /* 0x2d9281cf */
];
pub fn y1f(x: f32) -> f32 {
let z: f32;
let u: f32;
let v: f32;
let ix: u32;
ix = x.to_bits();
if (ix & 0x7fffffff) == 0 {
return -1.0 / 0.0;
}
if (ix >> 31)!= 0 {
return 0.0 / 0.0;
}
if ix >= 0x7f800000 {
return 1.0 / x;
}
if ix >= 0x40000000 {
/* |x| >= 2.0 */
return common(ix, x, true, false);
}
if ix < 0x33000000 {
/* x < 2**-25 */
return -TPI / x;
}
z = x * x;
u = U0[0] + z * (U0[1] + z * (U0[2] + z * (U0[3] + z * U0[4])));
v = 1.0 + z * (V0[0] + z * (V0[1] + z * (V0[2] + z * (V0[3] + z * V0[4]))));
return x * (u / v) + TPI * (j1f(x) * logf(x) - 1.0 / x);
}
/* For x >= 8, the asymptotic expansions of pone is
* 1 + 15/128 s^2 - 4725/2^15 s^4 -..., where s = 1/x.
* We approximate pone by
* pone(x) = 1 + (R/S)
* where R = pr0 + pr1*s^2 + pr2*s^4 +... + pr5*s^10
* S = 1 + ps0*s^2 +... + ps4*s^10
* and
* | pone(x)-1-R/S | <= 2 ** ( -60.06)
*/
const PR8: [f32; 6] = [
/* for x in [inf, 8]=1/[0,0.125] */
0.0000000000e+00, /* 0x00000000 */
1.1718750000e-01, /* 0x3df00000 */
1.3239480972e+01, /* 0x4153d4ea */
4.1205184937e+02, /* 0x43ce06a3 */
3.8747453613e+03, /* 0x45722bed */
7.9144794922e+03, /* 0x45f753d6 */
];
const PS8: [f32; 5] = [
1.1420736694e+02, /* 0x42e46a2c */
3.6509309082e+03, /* 0x45642ee5 */
3.6956207031e+04, /* 0x47105c35 */
9.7602796875e+04, /* 0x47bea166 */
3.0804271484e+04, /* 0x46f0a88b */
];
const PR5: [f32; 6] = [
/* for x in [8,4.5454]=1/[0.125,0.22001] */
1.3199052094e-11, /* 0x2d68333f */
1.1718749255e-01, /* 0x3defffff */
6.8027510643e+00, /* 0x40d9b023 */
1.0830818176e+02, /* 0x42d89dca */
5.1763616943e+02, /* 0x440168b7 */
5.2871520996e+02, /* 0x44042dc6 */
];
const PS5: [f32; 5] = [
5.9280597687e+01, /* 0x426d1f55 */
9.9140142822e+02, /* 0x4477d9b1 */
5.3532670898e+03, /* 0x45a74a23 */
7.8446904297e+03, /* 0x45f52586 */
1.5040468750e+03, /* 0x44bc0180 */
];
const PR3: [f32; 6] = [
3.0250391081e-09, /* 0x314fe10d */
1.1718686670e-01, /* 0x3defffab */
3.9329774380e+00, /* 0x407bb5e7 */
3.5119403839e+01, /* 0x420c7a45 */
9.1055007935e+01, /* 0x42b61c2a */
4.8559066772e+01, /* 0x42423c7c */
];
const PS3: [f32; 5] = [
3.4791309357e+01, /* 0x420b2a4d */
3.3676245117e+02, /* 0x43a86198 */
1.0468714600e+03, /* 0x4482dbe3 */
8.9081134033e+02, /* 0x445eb3ed */
1.0378793335e+02, /* 0x42cf936c */
];
const PR2: [f32; 6] = [
/* for x in [2.8570,2]=1/[0.3499,0.5] */
1.0771083225e-07, /* 0x33e74ea8 */
1.1717621982e-01, /* 0x3deffa16 */
2.3685150146e+00, /* 0x401795c0 */
1.2242610931e+01, /* 0x4143e1bc */
1.7693971634e+01, /* 0x418d8d41 */
5.0735230446e+00, /* 0x40a25a4d */
];
const PS2: [f32; 5] = [
2.1436485291e+01, /* 0x41ab7dec */
1.2529022980e+02, /* 0x42fa9499 */
2.3227647400e+02, /* 0x436846c7 */
1.1767937469e+02, /* 0x42eb5bd7 */
8.3646392822e+00, /* 0x4105d590 */
];
fn ponef(x: f32) -> f32 {
let p: &[f32; 6];
let q: &[f32; 5];
let z: f32;
let r: f32;
let s: f32;
let mut ix: u32;
ix = x.to_bits();
ix &= 0x7fffffff;
if ix >= 0x41000000 {
p = &PR8;
q = &PS8;
} else if ix >= 0x409173eb {
p = &PR5;
q = &PS5;
} else if ix >= 0x4036d917 {
p = &PR3;
q = &PS3;
} else
/*ix >= 0x40000000*/
{
p = &PR2;
q = &PS2;
}
z = 1.0 / (x * x);
r = p[0] + z * (p[1] + z * (p[2] + z * (p[3] + z * (p[4] + z * p[5]))));
s = 1.0 + z * (q[0] + z * (q[1] + z * (q[2] + z * (q[3] + z * q[4]))));
return 1.0 + r / s;
}
/* For x >= 8, the asymptotic expansions of qone is
* 3/8 s - 105/1024 s^3 -..., where s = 1/x.
* We approximate pone by
* qone(x) = s*(0.375 + (R/S))
* where R = qr1*s^2 + qr2*s^4 +... + qr5*s^10
* S = 1 + qs1*s^2 +... + qs6*s^12
* and
* | qone(x)/s -0.375-R/S | <= 2 ** ( -61.13)
*/
const QR8: [f32; 6] = [
/* for x in [inf, 8]=1/[0,0.125] */
0.0000000000e+00, /* 0x00000000 */
-1.0253906250e-01, /* 0xbdd20000 */
-1.6271753311e+01, /* 0xc1822c8d */
-7.5960174561e+02, /* 0xc43de683 */
-1.1849806641e+04, /* 0xc639273a */
-4.8438511719e+04, /* 0xc73d3683 */
];
const QS8: [f32; 6] = [
1.6139537048e+02, /* 0x43216537 */
7.8253862305e+03, /* 0x45f48b17 */
1.3387534375e+05, /* 0x4802bcd6 */
7.1965775000e+05, /* 0x492fb29c */
6.6660125000e+05, /* 0x4922be94 */
-2.9449025000e+05, /* 0xc88fcb48 */
];
const QR5: [f32; 6] = [
/* for x in [8,4.5454]=1/[0.125,0.22001] */
-2.0897993405e-11, /* 0xadb7d219 */
-1.0253904760e-01, /* 0xbdd1fffe */
-8.0564479828e+00, /* 0xc100e736 */
-1.8366960144e+02, /* 0xc337ab6b */
-1.3731937256e+03, /* 0xc4aba633 */
-2.6124443359e+03, /* 0xc523471c */
];
const QS5: [f32; 6] = [
8.1276550293e+01, /* 0x42a28d98 */
1.9917987061e+03, /* 0x44f8f98f */
1.7468484375e+04, /* 0x468878f8 */
4.9851425781e+04, /* 0x4742bb6d */
2.7948074219e+04, /* 0x46da5826 */
-4.7191835938e+03, /* 0xc5937978 */
];
const QR3: [f32; 6] = [
-5.0783124372e-09, /* 0xb1ae7d4f */
-1.0253783315e-01, /* 0xbdd1ff5b */
-4.6101160049e+00, /* 0xc0938612 */
-5.7847221375e+01, /* 0xc267638e */
-2.2824453735e+02, /* 0xc3643e9a */
-2.1921012878e+02, /* 0xc35b35cb */
];
const QS3: [f32; 6] = [
4.7665153503e+01, /* 0x423ea91e */
6.7386511230e+02, /* 0x4428775e */
3.3801528320e+03, /* 0x45534272 */
5.5477290039e+03, /* 0x45ad5dd5 */
1.9031191406e+03, /* 0x44ede3d0 */
-1.3520118713e+02, /* 0xc3073381 */
];
const QR2: [f32; 6] = [
/* for x in [2.8570,2]=1/[0.3499,0.5] */
-1.7838172539e-07, /* 0xb43f8932 */
-1.0251704603e-01, /* 0xbdd1f475 */
-2.7522056103e+00, /* 0xc0302423 */
-1.9663616180e+01, /* 0xc19d4f16 */
-4.2325313568e+01, /* 0xc2294d1f */
-2.1371921539e+01, /* 0xc1aaf9b2 */
];
const QS2: [f32; 6] = [
2.9533363342e+01, /* 0x41ec4454 */
2.5298155212e+02, /* 0x437cfb47 */
7.5750280762e+02, /* 0x443d602e */
7.3939318848e+02, /* 0x4438d92a */
1.5594900513e+02, /* 0x431bf2f2 */
-4.9594988823e+00, /* 0xc09eb437 */
];
fn qonef(x: f32) -> f32 {
let p: &[f32; 6];
let q: &[f32; 6];
let s: f32;
let r: f32;
let z: f32;
let mut ix: u32;
ix = x.to_bits();
ix &= 0x7fffffff;
if ix >= 0x41000000 {
p = &QR8;
q = &QS8;
} else if ix >= 0x409173eb {
p = &QR5;
q = &QS5;
} else if ix >= 0x4036d917 {
p = &QR3;
q = &QS3;
} else
/*ix >= 0x40000000*/
{
p = &QR2;
q = &QS2;
}
z = 1.0 / (x * x);
r = p[0] + z * (p[1] + z * (p[2] + z * (p[3] + z * (p[4] + z * p[5]))));
s = 1.0 + z * (q[0] + z * (q[1] + z * (q[2] + z * (q[3] + z * (q[4] + z * q[5])))));
return (0.375 + r / s) / x;
}
// PowerPC tests are failing on LLVM 13: https://github.com/rust-lang/rust/issues/88520
#[cfg(not(target_arch = "powerpc64"))]
#[cfg(test)]
mod tests {
use super::{j1f, y1f};
#[test]
fn test_j1f_2488() {
// 0x401F3E49
assert_eq!(j1f(2.4881766_f32), 0.49999475_f32);
}
#[test]
fn test_y1f_2002() {
//allow slightly different result on x87
let res = y1f(2.0000002_f32);
if cfg!(all(target_arch = "x86", not(target_feature = "sse2"))) && (res == -0.10703231_f32)
{
return;
}
assert_eq!(res, -0.10703229_f32);
}
}
| common | identifier_name |
j1f.rs | /* origin: FreeBSD /usr/src/lib/msun/src/e_j1f.c */
/*
* Conversion to float by Ian Lance Taylor, Cygnus Support, [email protected].
*/
/*
* ====================================================
* Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
*
* Developed at SunPro, a Sun Microsystems, Inc. business.
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
use super::{cosf, fabsf, logf, sinf, sqrtf};
const INVSQRTPI: f32 = 5.6418961287e-01; /* 0x3f106ebb */
const TPI: f32 = 6.3661974669e-01; /* 0x3f22f983 */
fn common(ix: u32, x: f32, y1: bool, sign: bool) -> f32 {
let z: f64;
let mut s: f64;
let c: f64;
let mut ss: f64;
let mut cc: f64;
s = sinf(x) as f64;
if y1 {
s = -s;
}
c = cosf(x) as f64;
cc = s - c;
if ix < 0x7f000000 {
ss = -s - c;
z = cosf(2.0 * x) as f64;
if s * c > 0.0 {
cc = z / ss;
} else {
ss = z / cc;
}
if ix < 0x58800000 {
if y1 {
ss = -ss;
}
cc = (ponef(x) as f64) * cc - (qonef(x) as f64) * ss;
}
}
if sign {
cc = -cc;
}
return (((INVSQRTPI as f64) * cc) / (sqrtf(x) as f64)) as f32;
}
/* R0/S0 on [0,2] */
const R00: f32 = -6.2500000000e-02; /* 0xbd800000 */
const R01: f32 = 1.4070566976e-03; /* 0x3ab86cfd */
const R02: f32 = -1.5995563444e-05; /* 0xb7862e36 */
const R03: f32 = 4.9672799207e-08; /* 0x335557d2 */
const S01: f32 = 1.9153760746e-02; /* 0x3c9ce859 */
const S02: f32 = 1.8594678841e-04; /* 0x3942fab6 */
const S03: f32 = 1.1771846857e-06; /* 0x359dffc2 */
const S04: f32 = 5.0463624390e-09; /* 0x31ad6446 */
const S05: f32 = 1.2354227016e-11; /* 0x2d59567e */
pub fn j1f(x: f32) -> f32 {
let mut z: f32;
let r: f32;
let s: f32;
let mut ix: u32;
let sign: bool;
ix = x.to_bits();
sign = (ix >> 31)!= 0;
ix &= 0x7fffffff;
if ix >= 0x7f800000 {
return 1.0 / (x * x);
}
if ix >= 0x40000000 {
/* |x| >= 2 */
return common(ix, fabsf(x), false, sign);
}
if ix >= 0x39000000 {
/* |x| >= 2**-13 */
z = x * x;
r = z * (R00 + z * (R01 + z * (R02 + z * R03)));
s = 1.0 + z * (S01 + z * (S02 + z * (S03 + z * (S04 + z * S05))));
z = 0.5 + r / s;
} else {
z = 0.5;
}
return z * x;
}
const U0: [f32; 5] = [
-1.9605709612e-01, /* 0xbe48c331 */
5.0443872809e-02, /* 0x3d4e9e3c */
-1.9125689287e-03, /* 0xbafaaf2a */
2.3525259166e-05, /* 0x37c5581c */
-9.1909917899e-08, /* 0xb3c56003 */
];
const V0: [f32; 5] = [
1.9916731864e-02, /* 0x3ca3286a */
2.0255257550e-04, /* 0x3954644b */
1.3560879779e-06, /* 0x35b602d4 */
6.2274145840e-09, /* 0x31d5f8eb */
1.6655924903e-11, /* 0x2d9281cf */
];
pub fn y1f(x: f32) -> f32 {
let z: f32;
let u: f32;
let v: f32;
let ix: u32;
ix = x.to_bits();
if (ix & 0x7fffffff) == 0 {
return -1.0 / 0.0;
}
if (ix >> 31)!= 0 {
return 0.0 / 0.0;
}
if ix >= 0x7f800000 {
return 1.0 / x;
}
if ix >= 0x40000000 {
/* |x| >= 2.0 */
return common(ix, x, true, false);
}
if ix < 0x33000000 {
/* x < 2**-25 */
return -TPI / x;
}
z = x * x;
u = U0[0] + z * (U0[1] + z * (U0[2] + z * (U0[3] + z * U0[4])));
v = 1.0 + z * (V0[0] + z * (V0[1] + z * (V0[2] + z * (V0[3] + z * V0[4]))));
return x * (u / v) + TPI * (j1f(x) * logf(x) - 1.0 / x);
}
/* For x >= 8, the asymptotic expansions of pone is
* 1 + 15/128 s^2 - 4725/2^15 s^4 -..., where s = 1/x.
* We approximate pone by
* pone(x) = 1 + (R/S)
* where R = pr0 + pr1*s^2 + pr2*s^4 +... + pr5*s^10
* S = 1 + ps0*s^2 +... + ps4*s^10
* and
* | pone(x)-1-R/S | <= 2 ** ( -60.06)
*/
const PR8: [f32; 6] = [
/* for x in [inf, 8]=1/[0,0.125] */
0.0000000000e+00, /* 0x00000000 */
1.1718750000e-01, /* 0x3df00000 */
1.3239480972e+01, /* 0x4153d4ea */
4.1205184937e+02, /* 0x43ce06a3 */
3.8747453613e+03, /* 0x45722bed */
7.9144794922e+03, /* 0x45f753d6 */
];
const PS8: [f32; 5] = [
1.1420736694e+02, /* 0x42e46a2c */
3.6509309082e+03, /* 0x45642ee5 */
3.6956207031e+04, /* 0x47105c35 */
9.7602796875e+04, /* 0x47bea166 */
3.0804271484e+04, /* 0x46f0a88b */
];
const PR5: [f32; 6] = [
/* for x in [8,4.5454]=1/[0.125,0.22001] */
1.3199052094e-11, /* 0x2d68333f */
1.1718749255e-01, /* 0x3defffff */
6.8027510643e+00, /* 0x40d9b023 */
1.0830818176e+02, /* 0x42d89dca */
5.1763616943e+02, /* 0x440168b7 */
5.2871520996e+02, /* 0x44042dc6 */
];
const PS5: [f32; 5] = [
5.9280597687e+01, /* 0x426d1f55 */
9.9140142822e+02, /* 0x4477d9b1 */
5.3532670898e+03, /* 0x45a74a23 */
7.8446904297e+03, /* 0x45f52586 */
1.5040468750e+03, /* 0x44bc0180 */
];
const PR3: [f32; 6] = [
3.0250391081e-09, /* 0x314fe10d */
1.1718686670e-01, /* 0x3defffab */
3.9329774380e+00, /* 0x407bb5e7 */
3.5119403839e+01, /* 0x420c7a45 */
9.1055007935e+01, /* 0x42b61c2a */
4.8559066772e+01, /* 0x42423c7c */
];
const PS3: [f32; 5] = [
3.4791309357e+01, /* 0x420b2a4d */
3.3676245117e+02, /* 0x43a86198 */
1.0468714600e+03, /* 0x4482dbe3 */
8.9081134033e+02, /* 0x445eb3ed */
1.0378793335e+02, /* 0x42cf936c */
];
const PR2: [f32; 6] = [
/* for x in [2.8570,2]=1/[0.3499,0.5] */
1.0771083225e-07, /* 0x33e74ea8 */
1.1717621982e-01, /* 0x3deffa16 */
2.3685150146e+00, /* 0x401795c0 */
1.2242610931e+01, /* 0x4143e1bc */
1.7693971634e+01, /* 0x418d8d41 */
5.0735230446e+00, /* 0x40a25a4d */
];
const PS2: [f32; 5] = [
2.1436485291e+01, /* 0x41ab7dec */
1.2529022980e+02, /* 0x42fa9499 */
2.3227647400e+02, /* 0x436846c7 */
1.1767937469e+02, /* 0x42eb5bd7 */
8.3646392822e+00, /* 0x4105d590 */
];
fn ponef(x: f32) -> f32 {
let p: &[f32; 6];
let q: &[f32; 5];
let z: f32;
let r: f32;
let s: f32;
let mut ix: u32;
ix = x.to_bits();
ix &= 0x7fffffff;
if ix >= 0x41000000 {
p = &PR8;
q = &PS8;
} else if ix >= 0x409173eb {
p = &PR5;
q = &PS5;
} else if ix >= 0x4036d917 {
p = &PR3;
q = &PS3;
} else
/*ix >= 0x40000000*/
{
p = &PR2;
q = &PS2;
}
z = 1.0 / (x * x);
r = p[0] + z * (p[1] + z * (p[2] + z * (p[3] + z * (p[4] + z * p[5]))));
s = 1.0 + z * (q[0] + z * (q[1] + z * (q[2] + z * (q[3] + z * q[4]))));
return 1.0 + r / s;
}
/* For x >= 8, the asymptotic expansions of qone is
* 3/8 s - 105/1024 s^3 -..., where s = 1/x.
* We approximate pone by
* qone(x) = s*(0.375 + (R/S))
* where R = qr1*s^2 + qr2*s^4 +... + qr5*s^10
* S = 1 + qs1*s^2 +... + qs6*s^12
* and
* | qone(x)/s -0.375-R/S | <= 2 ** ( -61.13)
*/
const QR8: [f32; 6] = [
/* for x in [inf, 8]=1/[0,0.125] */
0.0000000000e+00, /* 0x00000000 */
-1.0253906250e-01, /* 0xbdd20000 */
-1.6271753311e+01, /* 0xc1822c8d */
-7.5960174561e+02, /* 0xc43de683 */
-1.1849806641e+04, /* 0xc639273a */
-4.8438511719e+04, /* 0xc73d3683 */
];
const QS8: [f32; 6] = [
1.6139537048e+02, /* 0x43216537 */
7.8253862305e+03, /* 0x45f48b17 */
1.3387534375e+05, /* 0x4802bcd6 */
7.1965775000e+05, /* 0x492fb29c */
6.6660125000e+05, /* 0x4922be94 */
-2.9449025000e+05, /* 0xc88fcb48 */
];
const QR5: [f32; 6] = [
/* for x in [8,4.5454]=1/[0.125,0.22001] */
-2.0897993405e-11, /* 0xadb7d219 */
-1.0253904760e-01, /* 0xbdd1fffe */
-8.0564479828e+00, /* 0xc100e736 */
-1.8366960144e+02, /* 0xc337ab6b */
-1.3731937256e+03, /* 0xc4aba633 */
-2.6124443359e+03, /* 0xc523471c */
];
const QS5: [f32; 6] = [
8.1276550293e+01, /* 0x42a28d98 */
1.9917987061e+03, /* 0x44f8f98f */
1.7468484375e+04, /* 0x468878f8 */
4.9851425781e+04, /* 0x4742bb6d */
2.7948074219e+04, /* 0x46da5826 */
-4.7191835938e+03, /* 0xc5937978 */
];
const QR3: [f32; 6] = [
-5.0783124372e-09, /* 0xb1ae7d4f */
-1.0253783315e-01, /* 0xbdd1ff5b */
-4.6101160049e+00, /* 0xc0938612 */
-5.7847221375e+01, /* 0xc267638e */
-2.2824453735e+02, /* 0xc3643e9a */
-2.1921012878e+02, /* 0xc35b35cb */
];
const QS3: [f32; 6] = [
4.7665153503e+01, /* 0x423ea91e */
6.7386511230e+02, /* 0x4428775e */
3.3801528320e+03, /* 0x45534272 */
5.5477290039e+03, /* 0x45ad5dd5 */
1.9031191406e+03, /* 0x44ede3d0 */
-1.3520118713e+02, /* 0xc3073381 */
];
const QR2: [f32; 6] = [
/* for x in [2.8570,2]=1/[0.3499,0.5] */
-1.7838172539e-07, /* 0xb43f8932 */
-1.0251704603e-01, /* 0xbdd1f475 */
-2.7522056103e+00, /* 0xc0302423 */
-1.9663616180e+01, /* 0xc19d4f16 */
-4.2325313568e+01, /* 0xc2294d1f */
-2.1371921539e+01, /* 0xc1aaf9b2 */
];
const QS2: [f32; 6] = [
2.9533363342e+01, /* 0x41ec4454 */
2.5298155212e+02, /* 0x437cfb47 */
7.5750280762e+02, /* 0x443d602e */
7.3939318848e+02, /* 0x4438d92a */
1.5594900513e+02, /* 0x431bf2f2 */
-4.9594988823e+00, /* 0xc09eb437 */
];
fn qonef(x: f32) -> f32 {
let p: &[f32; 6];
let q: &[f32; 6];
let s: f32;
let r: f32;
let z: f32;
let mut ix: u32;
ix = x.to_bits();
ix &= 0x7fffffff;
if ix >= 0x41000000 {
p = &QR8;
q = &QS8;
} else if ix >= 0x409173eb {
p = &QR5;
q = &QS5;
} else if ix >= 0x4036d917 {
p = &QR3;
q = &QS3;
} else
/*ix >= 0x40000000*/
{
p = &QR2;
q = &QS2;
}
z = 1.0 / (x * x);
r = p[0] + z * (p[1] + z * (p[2] + z * (p[3] + z * (p[4] + z * p[5]))));
s = 1.0 + z * (q[0] + z * (q[1] + z * (q[2] + z * (q[3] + z * (q[4] + z * q[5])))));
return (0.375 + r / s) / x;
}
// PowerPC tests are failing on LLVM 13: https://github.com/rust-lang/rust/issues/88520
#[cfg(not(target_arch = "powerpc64"))]
#[cfg(test)]
mod tests {
use super::{j1f, y1f};
#[test]
fn test_j1f_2488() |
#[test]
fn test_y1f_2002() {
//allow slightly different result on x87
let res = y1f(2.0000002_f32);
if cfg!(all(target_arch = "x86", not(target_feature = "sse2"))) && (res == -0.10703231_f32)
{
return;
}
assert_eq!(res, -0.10703229_f32);
}
}
| {
// 0x401F3E49
assert_eq!(j1f(2.4881766_f32), 0.49999475_f32);
} | identifier_body |
j1f.rs | /* origin: FreeBSD /usr/src/lib/msun/src/e_j1f.c */
/*
* Conversion to float by Ian Lance Taylor, Cygnus Support, [email protected].
*/
/*
* ====================================================
* Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
*
* Developed at SunPro, a Sun Microsystems, Inc. business.
* Permission to use, copy, modify, and distribute this
* software is freely granted, provided that this notice
* is preserved.
* ====================================================
*/
use super::{cosf, fabsf, logf, sinf, sqrtf};
const INVSQRTPI: f32 = 5.6418961287e-01; /* 0x3f106ebb */
const TPI: f32 = 6.3661974669e-01; /* 0x3f22f983 */
fn common(ix: u32, x: f32, y1: bool, sign: bool) -> f32 {
let z: f64;
let mut s: f64;
let c: f64;
let mut ss: f64;
let mut cc: f64;
s = sinf(x) as f64;
if y1 {
s = -s;
}
c = cosf(x) as f64;
cc = s - c;
if ix < 0x7f000000 {
ss = -s - c;
z = cosf(2.0 * x) as f64;
if s * c > 0.0 {
cc = z / ss;
} else {
ss = z / cc;
}
if ix < 0x58800000 {
if y1 {
ss = -ss;
}
cc = (ponef(x) as f64) * cc - (qonef(x) as f64) * ss;
}
}
if sign {
cc = -cc;
}
return (((INVSQRTPI as f64) * cc) / (sqrtf(x) as f64)) as f32;
}
/* R0/S0 on [0,2] */
const R00: f32 = -6.2500000000e-02; /* 0xbd800000 */
const R01: f32 = 1.4070566976e-03; /* 0x3ab86cfd */
const R02: f32 = -1.5995563444e-05; /* 0xb7862e36 */
const R03: f32 = 4.9672799207e-08; /* 0x335557d2 */
const S01: f32 = 1.9153760746e-02; /* 0x3c9ce859 */
const S02: f32 = 1.8594678841e-04; /* 0x3942fab6 */
const S03: f32 = 1.1771846857e-06; /* 0x359dffc2 */
const S04: f32 = 5.0463624390e-09; /* 0x31ad6446 */
const S05: f32 = 1.2354227016e-11; /* 0x2d59567e */
pub fn j1f(x: f32) -> f32 {
let mut z: f32;
let r: f32;
let s: f32;
let mut ix: u32; | ix = x.to_bits();
sign = (ix >> 31)!= 0;
ix &= 0x7fffffff;
if ix >= 0x7f800000 {
return 1.0 / (x * x);
}
if ix >= 0x40000000 {
/* |x| >= 2 */
return common(ix, fabsf(x), false, sign);
}
if ix >= 0x39000000 {
/* |x| >= 2**-13 */
z = x * x;
r = z * (R00 + z * (R01 + z * (R02 + z * R03)));
s = 1.0 + z * (S01 + z * (S02 + z * (S03 + z * (S04 + z * S05))));
z = 0.5 + r / s;
} else {
z = 0.5;
}
return z * x;
}
const U0: [f32; 5] = [
-1.9605709612e-01, /* 0xbe48c331 */
5.0443872809e-02, /* 0x3d4e9e3c */
-1.9125689287e-03, /* 0xbafaaf2a */
2.3525259166e-05, /* 0x37c5581c */
-9.1909917899e-08, /* 0xb3c56003 */
];
const V0: [f32; 5] = [
1.9916731864e-02, /* 0x3ca3286a */
2.0255257550e-04, /* 0x3954644b */
1.3560879779e-06, /* 0x35b602d4 */
6.2274145840e-09, /* 0x31d5f8eb */
1.6655924903e-11, /* 0x2d9281cf */
];
pub fn y1f(x: f32) -> f32 {
let z: f32;
let u: f32;
let v: f32;
let ix: u32;
ix = x.to_bits();
if (ix & 0x7fffffff) == 0 {
return -1.0 / 0.0;
}
if (ix >> 31)!= 0 {
return 0.0 / 0.0;
}
if ix >= 0x7f800000 {
return 1.0 / x;
}
if ix >= 0x40000000 {
/* |x| >= 2.0 */
return common(ix, x, true, false);
}
if ix < 0x33000000 {
/* x < 2**-25 */
return -TPI / x;
}
z = x * x;
u = U0[0] + z * (U0[1] + z * (U0[2] + z * (U0[3] + z * U0[4])));
v = 1.0 + z * (V0[0] + z * (V0[1] + z * (V0[2] + z * (V0[3] + z * V0[4]))));
return x * (u / v) + TPI * (j1f(x) * logf(x) - 1.0 / x);
}
/* For x >= 8, the asymptotic expansions of pone is
* 1 + 15/128 s^2 - 4725/2^15 s^4 -..., where s = 1/x.
* We approximate pone by
* pone(x) = 1 + (R/S)
* where R = pr0 + pr1*s^2 + pr2*s^4 +... + pr5*s^10
* S = 1 + ps0*s^2 +... + ps4*s^10
* and
* | pone(x)-1-R/S | <= 2 ** ( -60.06)
*/
const PR8: [f32; 6] = [
/* for x in [inf, 8]=1/[0,0.125] */
0.0000000000e+00, /* 0x00000000 */
1.1718750000e-01, /* 0x3df00000 */
1.3239480972e+01, /* 0x4153d4ea */
4.1205184937e+02, /* 0x43ce06a3 */
3.8747453613e+03, /* 0x45722bed */
7.9144794922e+03, /* 0x45f753d6 */
];
const PS8: [f32; 5] = [
1.1420736694e+02, /* 0x42e46a2c */
3.6509309082e+03, /* 0x45642ee5 */
3.6956207031e+04, /* 0x47105c35 */
9.7602796875e+04, /* 0x47bea166 */
3.0804271484e+04, /* 0x46f0a88b */
];
const PR5: [f32; 6] = [
/* for x in [8,4.5454]=1/[0.125,0.22001] */
1.3199052094e-11, /* 0x2d68333f */
1.1718749255e-01, /* 0x3defffff */
6.8027510643e+00, /* 0x40d9b023 */
1.0830818176e+02, /* 0x42d89dca */
5.1763616943e+02, /* 0x440168b7 */
5.2871520996e+02, /* 0x44042dc6 */
];
const PS5: [f32; 5] = [
5.9280597687e+01, /* 0x426d1f55 */
9.9140142822e+02, /* 0x4477d9b1 */
5.3532670898e+03, /* 0x45a74a23 */
7.8446904297e+03, /* 0x45f52586 */
1.5040468750e+03, /* 0x44bc0180 */
];
const PR3: [f32; 6] = [
3.0250391081e-09, /* 0x314fe10d */
1.1718686670e-01, /* 0x3defffab */
3.9329774380e+00, /* 0x407bb5e7 */
3.5119403839e+01, /* 0x420c7a45 */
9.1055007935e+01, /* 0x42b61c2a */
4.8559066772e+01, /* 0x42423c7c */
];
const PS3: [f32; 5] = [
3.4791309357e+01, /* 0x420b2a4d */
3.3676245117e+02, /* 0x43a86198 */
1.0468714600e+03, /* 0x4482dbe3 */
8.9081134033e+02, /* 0x445eb3ed */
1.0378793335e+02, /* 0x42cf936c */
];
const PR2: [f32; 6] = [
/* for x in [2.8570,2]=1/[0.3499,0.5] */
1.0771083225e-07, /* 0x33e74ea8 */
1.1717621982e-01, /* 0x3deffa16 */
2.3685150146e+00, /* 0x401795c0 */
1.2242610931e+01, /* 0x4143e1bc */
1.7693971634e+01, /* 0x418d8d41 */
5.0735230446e+00, /* 0x40a25a4d */
];
const PS2: [f32; 5] = [
2.1436485291e+01, /* 0x41ab7dec */
1.2529022980e+02, /* 0x42fa9499 */
2.3227647400e+02, /* 0x436846c7 */
1.1767937469e+02, /* 0x42eb5bd7 */
8.3646392822e+00, /* 0x4105d590 */
];
fn ponef(x: f32) -> f32 {
let p: &[f32; 6];
let q: &[f32; 5];
let z: f32;
let r: f32;
let s: f32;
let mut ix: u32;
ix = x.to_bits();
ix &= 0x7fffffff;
if ix >= 0x41000000 {
p = &PR8;
q = &PS8;
} else if ix >= 0x409173eb {
p = &PR5;
q = &PS5;
} else if ix >= 0x4036d917 {
p = &PR3;
q = &PS3;
} else
/*ix >= 0x40000000*/
{
p = &PR2;
q = &PS2;
}
z = 1.0 / (x * x);
r = p[0] + z * (p[1] + z * (p[2] + z * (p[3] + z * (p[4] + z * p[5]))));
s = 1.0 + z * (q[0] + z * (q[1] + z * (q[2] + z * (q[3] + z * q[4]))));
return 1.0 + r / s;
}
/* For x >= 8, the asymptotic expansions of qone is
* 3/8 s - 105/1024 s^3 -..., where s = 1/x.
* We approximate pone by
* qone(x) = s*(0.375 + (R/S))
* where R = qr1*s^2 + qr2*s^4 +... + qr5*s^10
* S = 1 + qs1*s^2 +... + qs6*s^12
* and
* | qone(x)/s -0.375-R/S | <= 2 ** ( -61.13)
*/
const QR8: [f32; 6] = [
/* for x in [inf, 8]=1/[0,0.125] */
0.0000000000e+00, /* 0x00000000 */
-1.0253906250e-01, /* 0xbdd20000 */
-1.6271753311e+01, /* 0xc1822c8d */
-7.5960174561e+02, /* 0xc43de683 */
-1.1849806641e+04, /* 0xc639273a */
-4.8438511719e+04, /* 0xc73d3683 */
];
const QS8: [f32; 6] = [
1.6139537048e+02, /* 0x43216537 */
7.8253862305e+03, /* 0x45f48b17 */
1.3387534375e+05, /* 0x4802bcd6 */
7.1965775000e+05, /* 0x492fb29c */
6.6660125000e+05, /* 0x4922be94 */
-2.9449025000e+05, /* 0xc88fcb48 */
];
const QR5: [f32; 6] = [
/* for x in [8,4.5454]=1/[0.125,0.22001] */
-2.0897993405e-11, /* 0xadb7d219 */
-1.0253904760e-01, /* 0xbdd1fffe */
-8.0564479828e+00, /* 0xc100e736 */
-1.8366960144e+02, /* 0xc337ab6b */
-1.3731937256e+03, /* 0xc4aba633 */
-2.6124443359e+03, /* 0xc523471c */
];
const QS5: [f32; 6] = [
8.1276550293e+01, /* 0x42a28d98 */
1.9917987061e+03, /* 0x44f8f98f */
1.7468484375e+04, /* 0x468878f8 */
4.9851425781e+04, /* 0x4742bb6d */
2.7948074219e+04, /* 0x46da5826 */
-4.7191835938e+03, /* 0xc5937978 */
];
const QR3: [f32; 6] = [
-5.0783124372e-09, /* 0xb1ae7d4f */
-1.0253783315e-01, /* 0xbdd1ff5b */
-4.6101160049e+00, /* 0xc0938612 */
-5.7847221375e+01, /* 0xc267638e */
-2.2824453735e+02, /* 0xc3643e9a */
-2.1921012878e+02, /* 0xc35b35cb */
];
const QS3: [f32; 6] = [
4.7665153503e+01, /* 0x423ea91e */
6.7386511230e+02, /* 0x4428775e */
3.3801528320e+03, /* 0x45534272 */
5.5477290039e+03, /* 0x45ad5dd5 */
1.9031191406e+03, /* 0x44ede3d0 */
-1.3520118713e+02, /* 0xc3073381 */
];
const QR2: [f32; 6] = [
/* for x in [2.8570,2]=1/[0.3499,0.5] */
-1.7838172539e-07, /* 0xb43f8932 */
-1.0251704603e-01, /* 0xbdd1f475 */
-2.7522056103e+00, /* 0xc0302423 */
-1.9663616180e+01, /* 0xc19d4f16 */
-4.2325313568e+01, /* 0xc2294d1f */
-2.1371921539e+01, /* 0xc1aaf9b2 */
];
const QS2: [f32; 6] = [
2.9533363342e+01, /* 0x41ec4454 */
2.5298155212e+02, /* 0x437cfb47 */
7.5750280762e+02, /* 0x443d602e */
7.3939318848e+02, /* 0x4438d92a */
1.5594900513e+02, /* 0x431bf2f2 */
-4.9594988823e+00, /* 0xc09eb437 */
];
fn qonef(x: f32) -> f32 {
let p: &[f32; 6];
let q: &[f32; 6];
let s: f32;
let r: f32;
let z: f32;
let mut ix: u32;
ix = x.to_bits();
ix &= 0x7fffffff;
if ix >= 0x41000000 {
p = &QR8;
q = &QS8;
} else if ix >= 0x409173eb {
p = &QR5;
q = &QS5;
} else if ix >= 0x4036d917 {
p = &QR3;
q = &QS3;
} else
/*ix >= 0x40000000*/
{
p = &QR2;
q = &QS2;
}
z = 1.0 / (x * x);
r = p[0] + z * (p[1] + z * (p[2] + z * (p[3] + z * (p[4] + z * p[5]))));
s = 1.0 + z * (q[0] + z * (q[1] + z * (q[2] + z * (q[3] + z * (q[4] + z * q[5])))));
return (0.375 + r / s) / x;
}
// PowerPC tests are failing on LLVM 13: https://github.com/rust-lang/rust/issues/88520
#[cfg(not(target_arch = "powerpc64"))]
#[cfg(test)]
mod tests {
use super::{j1f, y1f};
#[test]
fn test_j1f_2488() {
// 0x401F3E49
assert_eq!(j1f(2.4881766_f32), 0.49999475_f32);
}
#[test]
fn test_y1f_2002() {
//allow slightly different result on x87
let res = y1f(2.0000002_f32);
if cfg!(all(target_arch = "x86", not(target_feature = "sse2"))) && (res == -0.10703231_f32)
{
return;
}
assert_eq!(res, -0.10703229_f32);
}
} | let sign: bool;
| random_line_split |
main.rs | #![feature(proc_macro_hygiene, decl_macro)]
extern crate dotenv;
extern crate chrono;
extern crate uuid;
#[macro_use] extern crate rocket;
extern crate rocket_contrib;
extern crate base64;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate r2d2;
extern crate r2d2_diesel;
#[macro_use]
extern crate diesel;
extern crate egg_mode;
extern crate tokio;
extern crate lettre;
extern crate lettre_email;
extern crate htmlescape;
extern crate reing_text2image;
extern crate log;
use std::sync::mpsc::{SyncSender, sync_channel};
use std::collections::HashMap;
use std::env;
use std::path::{Path, PathBuf};
use rocket::http::{Header, Status};
use rocket::request;
use rocket::response;
use rocket::response::status;
use rocket::Request;
use rocket_contrib::templates::Template;
use rocket_contrib::json::Json;
use chrono::prelude::*;
use std::{thread, time};
use rocket::State;
mod web;
mod model;
mod db;
mod tweet;
mod utils;
mod notify;
#[derive(Serialize, Debug)]
struct AnswerDTO {
pub id: i32,
pub body: String,
pub question: QuestionDTO,
pub created_at: DateTime<Local>,
pub created_at_recognizable: String,
}
impl AnswerDTO {
fn from(a: model::Answer) -> Self {
Self {
id: a.id, body: a.body, created_at: a.created_at,
created_at_recognizable: utils::recognizable_datetime(a.created_at),
question: QuestionDTO::from(a.question)
}
}
}
#[derive(Serialize, Debug)]
struct QuestionDTO {
pub id: i32,
pub body: String,
pub created_at: DateTime<Local>,
pub created_at_recognizable: String,
}
impl QuestionDTO {
fn from(q: model::Question) -> Self {
Self {
id: q.id, body: q.body, created_at: q.created_at,
created_at_recognizable: utils::recognizable_datetime(q.created_at)
}
}
}
/* Force ssl */
#[get("/<path..>")]
fn redirect_ssl(path: PathBuf, _ssl: web::guard::ForceSSL) -> response::Redirect {
let redirect_to = format!("https://{}/{}", env::var("APPLICATION_DOMAIN").unwrap(), path.as_path().display());
println!("Redirect to:{}", redirect_to);
response::Redirect::to(redirect_to)
}
/* GET /static/ */
#[get("/static/<file..>")]
fn files(file: PathBuf) -> Result<web::CachedFile, status::NotFound<String>> {
let path = Path::new("static/").join(file);
response::NamedFile::open(&path)
.map_err(|_| status::NotFound(format!("Bad path: {:?}", path)))
.map(|nf| web::CachedFile(nf))
}
/* GET / */
#[derive(Serialize, Debug)]
struct IndexDTO {
pub profile: ProfileDTO,
pub answers: Vec<AnswerDTO>,
pub site_url: String,
pub next_page: Option<i64>,
pub prev_page: Option<i64>,
}
#[derive(Serialize, Debug)]
struct ProfileDTO {
pub username: String,
pub image_url: String,
}
#[test]
fn next_prev_page_test() {
assert!((None, Some(1)) == next_prev_page(0));
assert!((Some(0), Some(2)) == next_prev_page(1));
assert!((Some(1), Some(3)) == next_prev_page(2));
}
// next: newer, prev: older
// older -> page number increases
fn next_prev_page(current_page: i64) -> (Option<i64>, Option<i64>) {
let prev_page = Some(current_page + 1);
let next_page = if current_page <= 0 {
None
} else {
Some(current_page - 1)
};
return (next_page, prev_page);
}
const ANSWER_COUNT_PER_PAGE : i64 = 30;
#[get("/")]
fn index(repo: web::guard::Repository, profile: State<UserProfile>) -> Template {
let page = 0;
index_with_page(repo, profile, page)
}
#[get("/page/<page>")]
fn index_with_page(repo: web::guard::Repository, profile: State<UserProfile>, page: i64) -> Template {
let offset = page * ANSWER_COUNT_PER_PAGE;
let answer_dtos = repo.answers(offset, ANSWER_COUNT_PER_PAGE)
.into_iter()
.map(|a| AnswerDTO::from(a))
.collect::<Vec<_>>();
let (next_page, prev_page) = next_prev_page(page);
let context = IndexDTO {
profile: ProfileDTO {
username: profile.clone().name,
image_url: String::from("/static/image/profile.jpg")
},
answers: answer_dtos,
site_url: format!("https://{}/", env::var("APPLICATION_DOMAIN").unwrap()),
prev_page: prev_page,
next_page: next_page,
};
Template::render("index", &context)
}
#[derive(Serialize, Debug)]
struct SearchDTO {
pub profile: ProfileDTO,
pub search_results: Vec<AnswerDTO>,
pub site_url: String,
pub query: String,
}
#[get("/search?<query>")]
fn search(repo: web::guard::Repository, profile: State<UserProfile>, query: String) -> Template |
/* POST /questions */
#[derive(FromForm)]
struct PostQuestionForm {
body: String
}
#[derive(Serialize, Debug)]
struct PostQuestionFailedDTO {
reason: String
}
#[post("/questions", data = "<params>")]
fn post_question(repo: web::guard::Repository, client_ip: web::guard::ClientIP, params: request::Form<PostQuestionForm>)
-> Result<response::Redirect, Template> {
match repo.store_question(params.body.clone(), client_ip.address()) {
Ok(question) => {
let question_id = question.id;
notify::send_email(question);
Ok(response::Redirect::to(format!("/question/{}/after_post", question_id)))
},
Err(err) => {
match err {
model::StoreQuestionError::BlankBody => {
let context = PostQuestionFailedDTO { reason: String::from("質問の内容が空です") };
Err(Template::render("question/post_failed", &context))
}
}
}
}
}
/* GET /question/after_post */
#[derive(Serialize, Debug)]
struct AfterPostQuestionDTO{
pub question: QuestionDTO
}
#[get("/question/<question_id>/after_post")]
fn after_post_question(question_id: i32, repo: web::guard::Repository) -> Result<Template, response::Redirect> {
if let Some(question) = repo.find_question(question_id) {
let context = AfterPostQuestionDTO{
question: QuestionDTO::from(question)
};
Ok(Template::render("question/after_post", &context))
} else {
Err(response::Redirect::to("/"))
}
}
/* GET /answer/<question_id> */
#[derive(Serialize, Debug)]
struct ShowAnswerDTO {
pub answer: AnswerDTO,
pub next_answer: Option<AnswerDTO>,
pub prev_answer: Option<AnswerDTO>,
}
#[get("/question/<question_id>")]
fn show_question(question_id: i32, repo: web::guard::Repository) -> Result<response::Redirect, status::NotFound<&'static str>> {
match repo.find_answer_by_question_id(question_id) {
Some(answer) => Ok(response::Redirect::to(format!("/answer/{}", answer.id))),
None => Err(status::NotFound("not found"))
}
}
#[get("/answer/<_answer_id>")]
fn show_answer(_answer_id: i32, app_env: State<AppEnvironment>) -> Template {
let mut context: HashMap<String, bool> = HashMap::new();
context.insert(String::from("is_production"), app_env.is_production);
return Template::render("answer/show", &context);
}
#[get("/api/answer/<answer_id>")]
fn show_answer_json(answer_id: i32, repo: web::guard::Repository) -> Result<Json<ShowAnswerDTO>, status::NotFound<&'static str>> {
if let Some(answer) = repo.find_answer(answer_id) {
let next_answer_opt = repo.find_next_answer(answer.created_at);
let prev_answer_opt = repo.find_prev_answer(answer.created_at);
let context = ShowAnswerDTO {
answer: AnswerDTO::from(answer),
next_answer: next_answer_opt.map(|a| AnswerDTO::from(a)),
prev_answer: prev_answer_opt.map(|a| AnswerDTO::from(a))
};
return Ok(Json(context));
}
return Err(status::NotFound("not found"));
}
/* GET /admin */
#[derive(Serialize, Debug)]
struct AdminIndexDTO {
pub questions: Vec<QuestionDTO>
}
#[get("/admin")]
fn admin_index(repo: web::guard::Repository, _auth: web::guard::BasicAuth) -> Template {
let question_dtos = repo.not_answered_questions()
.into_iter()
.filter(|q|!q.hidden )
.map(|q| QuestionDTO::from(q))
.collect::<Vec<_>>();
let context = AdminIndexDTO { questions: question_dtos };
Template::render("admin/index", &context)
}
/* GET /admin/question/<question_id> */
#[get("/admin/question/<question_id>")]
fn admin_show_question(question_id: i32, repo: web::guard::Repository, _auth: web::guard::BasicAuth) -> Template {
let question = repo.find_question(question_id).unwrap();
let context = QuestionDTO::from(question);
Template::render("admin/questions/show", &context)
}
/* POST /question/<question_id>/answer */
#[derive(FromForm)]
struct PostAnswerForm {
body: String
}
#[post("/admin/question/<question_id>/answer", data = "<params>")]
fn admin_post_answer(
question_id: i32, repo: web::guard::Repository,
params: request::Form<PostAnswerForm>,
tweet_sender: State<SyncSender<model::Answer>>,
_auth: web::guard::BasicAuth
) -> response::Redirect {
let answer_body = params.body.clone();
if let Some(answer) = repo.store_answer(question_id, answer_body.clone()) {
tweet_sender.send(answer).unwrap();
}
response::Redirect::to("/admin")
}
/* POST /admin/question/<question_id>/hide */
#[post("/admin/question/<question_id>/hide")]
fn admin_hide_question(question_id: i32, repo: web::guard::Repository, _auth: web::guard::BasicAuth ) -> response::Redirect {
let mut question = repo.find_question(question_id).unwrap();
question.hidden = true;
repo.update_question(question);
response::Redirect::to("/admin")
}
/* Force login */
struct RequireLogin();
impl<'r> response::Responder<'r> for RequireLogin {
fn respond_to(self, _req: &Request) -> Result<response::Response<'r>, Status> {
response::Response::build()
.status(Status::Unauthorized)
.header(Header::new("WWW-Authenticate", "Basic realm=\"SECRET AREA\""))
.ok()
}
}
#[catch(401)]
fn unauthorized(_req: &Request) -> RequireLogin {
RequireLogin()
}
#[derive(Clone)]
struct UserProfile {
pub name: String
}
#[derive(Clone)]
struct AppEnvironment {
pub is_production: bool
}
fn main() {
dotenv::dotenv().ok();
let manager = r2d2_diesel::ConnectionManager::<diesel::PgConnection>::new(
env::var("DATABASE_URL").unwrap()
);
let pool = r2d2::Pool::builder()
.max_size(15)
.build(manager)
.unwrap();
let (tweet_sender, tweet_receiver) = sync_channel(1000);
thread::spawn(move || {
loop {
let answer = tweet_receiver.recv().unwrap();
tweet::tweet_answer(answer);
thread::sleep(time::Duration::from_secs(5 * 60));
}
});
let user_profile = UserProfile {
name: tweet::get_twitter_username()
};
let app_env = AppEnvironment {
is_production: env::var("MODE").map(|mode| mode == "production").unwrap_or(false)
};
rocket::ignite()
.manage(pool)
.manage(tweet_sender)
.manage(user_profile)
.manage(app_env)
.mount("/", routes![
index, index_with_page, files, post_question, after_post_question, show_answer,
admin_index, admin_post_answer, admin_show_question, admin_hide_question, search,
show_question, show_answer_json
])
.register(catchers![unauthorized])
.attach(Template::fairing())
.launch();
}
| {
let answer_dtos = repo.search_answers(query.clone())
.into_iter()
.map(|a| AnswerDTO::from(a))
.collect::<Vec<_>>();
let context = SearchDTO {
profile: ProfileDTO {
username: profile.clone().name,
image_url: String::from("/static/image/profile.jpg")
},
search_results: answer_dtos,
site_url: format!("https://{}/", env::var("APPLICATION_DOMAIN").unwrap()),
query: query,
};
Template::render("search", &context)
} | identifier_body |
main.rs | #![feature(proc_macro_hygiene, decl_macro)]
extern crate dotenv;
extern crate chrono;
extern crate uuid;
#[macro_use] extern crate rocket;
extern crate rocket_contrib;
extern crate base64;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate r2d2;
extern crate r2d2_diesel;
#[macro_use]
extern crate diesel;
extern crate egg_mode;
extern crate tokio;
extern crate lettre;
extern crate lettre_email;
extern crate htmlescape;
extern crate reing_text2image;
extern crate log;
use std::sync::mpsc::{SyncSender, sync_channel};
use std::collections::HashMap;
use std::env;
use std::path::{Path, PathBuf};
use rocket::http::{Header, Status};
use rocket::request;
use rocket::response;
use rocket::response::status;
use rocket::Request;
use rocket_contrib::templates::Template;
use rocket_contrib::json::Json;
use chrono::prelude::*;
use std::{thread, time};
use rocket::State;
mod web;
mod model;
mod db;
mod tweet;
mod utils;
mod notify;
#[derive(Serialize, Debug)]
struct AnswerDTO {
pub id: i32,
pub body: String,
pub question: QuestionDTO,
pub created_at: DateTime<Local>,
pub created_at_recognizable: String,
}
impl AnswerDTO {
fn from(a: model::Answer) -> Self {
Self {
id: a.id, body: a.body, created_at: a.created_at,
created_at_recognizable: utils::recognizable_datetime(a.created_at),
question: QuestionDTO::from(a.question)
}
}
}
#[derive(Serialize, Debug)]
struct QuestionDTO {
pub id: i32,
pub body: String,
pub created_at: DateTime<Local>,
pub created_at_recognizable: String,
}
impl QuestionDTO {
fn from(q: model::Question) -> Self {
Self {
id: q.id, body: q.body, created_at: q.created_at,
created_at_recognizable: utils::recognizable_datetime(q.created_at)
}
}
}
/* Force ssl */
#[get("/<path..>")]
fn redirect_ssl(path: PathBuf, _ssl: web::guard::ForceSSL) -> response::Redirect {
let redirect_to = format!("https://{}/{}", env::var("APPLICATION_DOMAIN").unwrap(), path.as_path().display());
println!("Redirect to:{}", redirect_to);
response::Redirect::to(redirect_to)
}
/* GET /static/ */
#[get("/static/<file..>")]
fn files(file: PathBuf) -> Result<web::CachedFile, status::NotFound<String>> {
let path = Path::new("static/").join(file);
response::NamedFile::open(&path)
.map_err(|_| status::NotFound(format!("Bad path: {:?}", path)))
.map(|nf| web::CachedFile(nf))
}
/* GET / */
#[derive(Serialize, Debug)]
struct IndexDTO {
pub profile: ProfileDTO,
pub answers: Vec<AnswerDTO>,
pub site_url: String,
pub next_page: Option<i64>,
pub prev_page: Option<i64>,
}
#[derive(Serialize, Debug)]
struct ProfileDTO {
pub username: String,
pub image_url: String,
}
#[test]
fn next_prev_page_test() {
assert!((None, Some(1)) == next_prev_page(0));
assert!((Some(0), Some(2)) == next_prev_page(1));
assert!((Some(1), Some(3)) == next_prev_page(2));
}
// next: newer, prev: older
// older -> page number increases
fn next_prev_page(current_page: i64) -> (Option<i64>, Option<i64>) {
let prev_page = Some(current_page + 1);
let next_page = if current_page <= 0 {
None
} else {
Some(current_page - 1)
};
return (next_page, prev_page);
}
const ANSWER_COUNT_PER_PAGE : i64 = 30;
#[get("/")]
fn index(repo: web::guard::Repository, profile: State<UserProfile>) -> Template {
let page = 0;
index_with_page(repo, profile, page)
}
#[get("/page/<page>")]
fn index_with_page(repo: web::guard::Repository, profile: State<UserProfile>, page: i64) -> Template {
let offset = page * ANSWER_COUNT_PER_PAGE;
let answer_dtos = repo.answers(offset, ANSWER_COUNT_PER_PAGE)
.into_iter()
.map(|a| AnswerDTO::from(a))
.collect::<Vec<_>>();
let (next_page, prev_page) = next_prev_page(page);
let context = IndexDTO {
profile: ProfileDTO {
username: profile.clone().name,
image_url: String::from("/static/image/profile.jpg")
},
answers: answer_dtos,
site_url: format!("https://{}/", env::var("APPLICATION_DOMAIN").unwrap()),
prev_page: prev_page,
next_page: next_page,
};
Template::render("index", &context)
}
#[derive(Serialize, Debug)]
struct SearchDTO {
pub profile: ProfileDTO,
pub search_results: Vec<AnswerDTO>,
pub site_url: String,
pub query: String,
}
#[get("/search?<query>")]
fn search(repo: web::guard::Repository, profile: State<UserProfile>, query: String) -> Template {
let answer_dtos = repo.search_answers(query.clone())
.into_iter()
.map(|a| AnswerDTO::from(a))
.collect::<Vec<_>>();
let context = SearchDTO {
profile: ProfileDTO {
username: profile.clone().name,
image_url: String::from("/static/image/profile.jpg")
},
search_results: answer_dtos,
site_url: format!("https://{}/", env::var("APPLICATION_DOMAIN").unwrap()),
query: query,
};
Template::render("search", &context)
}
/* POST /questions */
#[derive(FromForm)]
struct PostQuestionForm {
body: String
}
#[derive(Serialize, Debug)]
struct PostQuestionFailedDTO {
reason: String
}
#[post("/questions", data = "<params>")]
fn post_question(repo: web::guard::Repository, client_ip: web::guard::ClientIP, params: request::Form<PostQuestionForm>)
-> Result<response::Redirect, Template> {
match repo.store_question(params.body.clone(), client_ip.address()) {
Ok(question) => {
let question_id = question.id;
notify::send_email(question);
Ok(response::Redirect::to(format!("/question/{}/after_post", question_id)))
},
Err(err) => {
match err {
model::StoreQuestionError::BlankBody => {
let context = PostQuestionFailedDTO { reason: String::from("質問の内容が空です") };
Err(Template::render("question/post_failed", &context))
}
}
}
}
}
/* GET /question/after_post */
#[derive(Serialize, Debug)]
struct AfterPostQuestionDTO{
pub question: QuestionDTO
}
#[get("/question/<question_id>/after_post")]
fn after_post_question(question_id: i32, repo: web::guard::Repository) -> Result<Template, response::Redirect> {
if let Some(question) = repo.find_question(question_id) {
let context = AfterPostQuestionDTO{
question: QuestionDTO::from(question)
};
Ok(Template::render("question/after_post", &context))
} else {
Err(response::Redirect::to("/"))
}
}
/* GET /answer/<question_id> */
#[derive(Serialize, Debug)]
struct ShowAnswerDTO {
pub answer: AnswerDTO,
pub next_answer: Option<AnswerDTO>,
pub prev_answer: Option<AnswerDTO>,
}
#[get("/question/<question_id>")]
fn show_question(question_id: i32, repo: web::guard::Repository) -> Result<response::Redirect, status::NotFound<&'static str>> {
match repo.find_answer_by_question_id(question_id) {
Some(answer) => Ok(response::Redirect::to(format!("/answer/{}", answer.id))),
None => Err(status::NotFound("not found"))
}
}
#[get("/answer/<_answer_id>")]
fn show_answer(_answer_id: i32, app_env: State<AppEnvironment>) -> Template {
let mut context: HashMap<String, bool> = HashMap::new();
context.insert(String::from("is_production"), app_env.is_production);
return Template::render("answer/show", &context);
}
#[get("/api/answer/<answer_id>")]
fn show_answer_json(answer_id: i32, repo: web::guard::Repository) -> Result<Json<ShowAnswerDTO>, status::NotFound<&'static str>> {
if let Some(answer) = repo.find_answer(answer_id) {
let next_answer_opt = repo.find_next_answer(answer.created_at);
let prev_answer_opt = repo.find_prev_answer(answer.created_at);
let context = ShowAnswerDTO {
answer: AnswerDTO::from(answer),
next_answer: next_answer_opt.map(|a| AnswerDTO::from(a)),
prev_answer: prev_answer_opt.map(|a| AnswerDTO::from(a))
};
return Ok(Json(context));
}
return Err(status::NotFound("not found"));
}
/* GET /admin */
#[derive(Serialize, Debug)]
struct AdminIndexDTO {
pub questions: Vec<QuestionDTO>
}
#[get("/admin")]
fn admin_index(repo: web::guard::Repository, _auth: web::guard::BasicAuth) -> Template {
let question_dtos = repo.not_answered_questions()
.into_iter()
.filter(|q|!q.hidden )
.map(|q| QuestionDTO::from(q))
.collect::<Vec<_>>();
let context = AdminIndexDTO { questions: question_dtos };
Template::render("admin/index", &context)
}
/* GET /admin/question/<question_id> */
#[get("/admin/question/<question_id>")]
fn admin_show_question(question_id: i32, repo: web::guard::Repository, _auth: web::guard::BasicAuth) -> Template {
let question = repo.find_question(question_id).unwrap();
let context = QuestionDTO::from(question);
Template::render("admin/questions/show", &context)
}
/* POST /question/<question_id>/answer */
#[derive(FromForm)]
struct PostAnswerForm {
body: String
}
#[post("/admin/question/<question_id>/answer", data = "<params>")]
fn admin_post_answer(
question_id: i32, repo: web::guard::Repository,
params: request::Form<PostAnswerForm>,
tweet_sender: State<SyncSender<model::Answer>>,
_auth: web::guard::BasicAuth
) -> response::Redirect {
let answer_body = params.body.clone();
if let Some(answer) = repo.store_answer(question_id, answer_body.clone()) {
tweet_sender.send(answer).unwrap();
}
response::Redirect::to("/admin")
}
/* POST /admin/question/<question_id>/hide */
#[post("/admin/question/<question_id>/hide")]
fn admin_hide_question(question_id: i32, repo: web::guard::Repository, _auth: web::guard::BasicAuth ) -> response::Redirect {
let mut question = repo.find_question(question_id).unwrap();
question.hidden = true;
repo.update_question(question);
response::Redirect::to("/admin")
}
/* Force login */
struct RequireLogin();
i | onse::Responder<'r> for RequireLogin {
fn respond_to(self, _req: &Request) -> Result<response::Response<'r>, Status> {
response::Response::build()
.status(Status::Unauthorized)
.header(Header::new("WWW-Authenticate", "Basic realm=\"SECRET AREA\""))
.ok()
}
}
#[catch(401)]
fn unauthorized(_req: &Request) -> RequireLogin {
RequireLogin()
}
#[derive(Clone)]
struct UserProfile {
pub name: String
}
#[derive(Clone)]
struct AppEnvironment {
pub is_production: bool
}
fn main() {
dotenv::dotenv().ok();
let manager = r2d2_diesel::ConnectionManager::<diesel::PgConnection>::new(
env::var("DATABASE_URL").unwrap()
);
let pool = r2d2::Pool::builder()
.max_size(15)
.build(manager)
.unwrap();
let (tweet_sender, tweet_receiver) = sync_channel(1000);
thread::spawn(move || {
loop {
let answer = tweet_receiver.recv().unwrap();
tweet::tweet_answer(answer);
thread::sleep(time::Duration::from_secs(5 * 60));
}
});
let user_profile = UserProfile {
name: tweet::get_twitter_username()
};
let app_env = AppEnvironment {
is_production: env::var("MODE").map(|mode| mode == "production").unwrap_or(false)
};
rocket::ignite()
.manage(pool)
.manage(tweet_sender)
.manage(user_profile)
.manage(app_env)
.mount("/", routes![
index, index_with_page, files, post_question, after_post_question, show_answer,
admin_index, admin_post_answer, admin_show_question, admin_hide_question, search,
show_question, show_answer_json
])
.register(catchers![unauthorized])
.attach(Template::fairing())
.launch();
}
| mpl<'r> resp | identifier_name |
main.rs | #![feature(proc_macro_hygiene, decl_macro)]
extern crate dotenv;
extern crate chrono;
extern crate uuid;
#[macro_use] extern crate rocket;
extern crate rocket_contrib;
extern crate base64;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate r2d2;
extern crate r2d2_diesel;
#[macro_use]
extern crate diesel;
extern crate egg_mode;
extern crate tokio;
extern crate lettre;
extern crate lettre_email;
extern crate htmlescape;
extern crate reing_text2image;
extern crate log;
use std::sync::mpsc::{SyncSender, sync_channel};
use std::collections::HashMap;
use std::env;
use std::path::{Path, PathBuf};
use rocket::http::{Header, Status};
use rocket::request;
use rocket::response;
use rocket::response::status;
use rocket::Request;
use rocket_contrib::templates::Template;
use rocket_contrib::json::Json;
use chrono::prelude::*;
use std::{thread, time};
use rocket::State;
mod web;
mod model;
mod db;
mod tweet;
mod utils;
mod notify;
#[derive(Serialize, Debug)]
struct AnswerDTO {
pub id: i32,
pub body: String,
pub question: QuestionDTO,
pub created_at: DateTime<Local>,
pub created_at_recognizable: String,
}
impl AnswerDTO {
fn from(a: model::Answer) -> Self {
Self {
id: a.id, body: a.body, created_at: a.created_at,
created_at_recognizable: utils::recognizable_datetime(a.created_at),
question: QuestionDTO::from(a.question)
}
}
}
#[derive(Serialize, Debug)]
struct QuestionDTO {
pub id: i32,
pub body: String,
pub created_at: DateTime<Local>,
pub created_at_recognizable: String,
}
impl QuestionDTO {
fn from(q: model::Question) -> Self {
Self {
id: q.id, body: q.body, created_at: q.created_at,
created_at_recognizable: utils::recognizable_datetime(q.created_at)
}
}
}
/* Force ssl */
#[get("/<path..>")]
fn redirect_ssl(path: PathBuf, _ssl: web::guard::ForceSSL) -> response::Redirect {
let redirect_to = format!("https://{}/{}", env::var("APPLICATION_DOMAIN").unwrap(), path.as_path().display());
println!("Redirect to:{}", redirect_to);
response::Redirect::to(redirect_to)
}
/* GET /static/ */
#[get("/static/<file..>")]
fn files(file: PathBuf) -> Result<web::CachedFile, status::NotFound<String>> {
let path = Path::new("static/").join(file);
response::NamedFile::open(&path)
.map_err(|_| status::NotFound(format!("Bad path: {:?}", path)))
.map(|nf| web::CachedFile(nf))
}
/* GET / */
#[derive(Serialize, Debug)]
struct IndexDTO {
pub profile: ProfileDTO,
pub answers: Vec<AnswerDTO>,
pub site_url: String,
pub next_page: Option<i64>,
pub prev_page: Option<i64>,
}
#[derive(Serialize, Debug)]
struct ProfileDTO {
pub username: String,
pub image_url: String,
}
#[test]
fn next_prev_page_test() {
assert!((None, Some(1)) == next_prev_page(0));
assert!((Some(0), Some(2)) == next_prev_page(1));
assert!((Some(1), Some(3)) == next_prev_page(2));
}
// next: newer, prev: older
// older -> page number increases
fn next_prev_page(current_page: i64) -> (Option<i64>, Option<i64>) {
let prev_page = Some(current_page + 1);
let next_page = if current_page <= 0 {
None
} else {
Some(current_page - 1)
};
return (next_page, prev_page);
}
const ANSWER_COUNT_PER_PAGE : i64 = 30;
#[get("/")]
fn index(repo: web::guard::Repository, profile: State<UserProfile>) -> Template {
let page = 0;
index_with_page(repo, profile, page)
}
#[get("/page/<page>")]
fn index_with_page(repo: web::guard::Repository, profile: State<UserProfile>, page: i64) -> Template {
let offset = page * ANSWER_COUNT_PER_PAGE;
let answer_dtos = repo.answers(offset, ANSWER_COUNT_PER_PAGE)
.into_iter()
.map(|a| AnswerDTO::from(a))
.collect::<Vec<_>>();
let (next_page, prev_page) = next_prev_page(page);
let context = IndexDTO {
profile: ProfileDTO {
username: profile.clone().name,
image_url: String::from("/static/image/profile.jpg")
},
answers: answer_dtos,
site_url: format!("https://{}/", env::var("APPLICATION_DOMAIN").unwrap()),
prev_page: prev_page,
next_page: next_page,
};
Template::render("index", &context)
}
#[derive(Serialize, Debug)]
struct SearchDTO {
pub profile: ProfileDTO,
pub search_results: Vec<AnswerDTO>,
pub site_url: String,
pub query: String,
}
#[get("/search?<query>")]
fn search(repo: web::guard::Repository, profile: State<UserProfile>, query: String) -> Template {
let answer_dtos = repo.search_answers(query.clone())
.into_iter()
.map(|a| AnswerDTO::from(a))
.collect::<Vec<_>>();
let context = SearchDTO {
profile: ProfileDTO {
username: profile.clone().name,
image_url: String::from("/static/image/profile.jpg")
},
search_results: answer_dtos,
site_url: format!("https://{}/", env::var("APPLICATION_DOMAIN").unwrap()),
query: query,
};
Template::render("search", &context)
}
/* POST /questions */
#[derive(FromForm)]
struct PostQuestionForm {
body: String
}
#[derive(Serialize, Debug)]
struct PostQuestionFailedDTO {
reason: String
}
#[post("/questions", data = "<params>")]
fn post_question(repo: web::guard::Repository, client_ip: web::guard::ClientIP, params: request::Form<PostQuestionForm>)
-> Result<response::Redirect, Template> {
match repo.store_question(params.body.clone(), client_ip.address()) {
Ok(question) => | ,
Err(err) => {
match err {
model::StoreQuestionError::BlankBody => {
let context = PostQuestionFailedDTO { reason: String::from("質問の内容が空です") };
Err(Template::render("question/post_failed", &context))
}
}
}
}
}
/* GET /question/after_post */
#[derive(Serialize, Debug)]
struct AfterPostQuestionDTO{
pub question: QuestionDTO
}
#[get("/question/<question_id>/after_post")]
fn after_post_question(question_id: i32, repo: web::guard::Repository) -> Result<Template, response::Redirect> {
if let Some(question) = repo.find_question(question_id) {
let context = AfterPostQuestionDTO{
question: QuestionDTO::from(question)
};
Ok(Template::render("question/after_post", &context))
} else {
Err(response::Redirect::to("/"))
}
}
/* GET /answer/<question_id> */
#[derive(Serialize, Debug)]
struct ShowAnswerDTO {
pub answer: AnswerDTO,
pub next_answer: Option<AnswerDTO>,
pub prev_answer: Option<AnswerDTO>,
}
#[get("/question/<question_id>")]
fn show_question(question_id: i32, repo: web::guard::Repository) -> Result<response::Redirect, status::NotFound<&'static str>> {
match repo.find_answer_by_question_id(question_id) {
Some(answer) => Ok(response::Redirect::to(format!("/answer/{}", answer.id))),
None => Err(status::NotFound("not found"))
}
}
#[get("/answer/<_answer_id>")]
fn show_answer(_answer_id: i32, app_env: State<AppEnvironment>) -> Template {
let mut context: HashMap<String, bool> = HashMap::new();
context.insert(String::from("is_production"), app_env.is_production);
return Template::render("answer/show", &context);
}
#[get("/api/answer/<answer_id>")]
fn show_answer_json(answer_id: i32, repo: web::guard::Repository) -> Result<Json<ShowAnswerDTO>, status::NotFound<&'static str>> {
if let Some(answer) = repo.find_answer(answer_id) {
let next_answer_opt = repo.find_next_answer(answer.created_at);
let prev_answer_opt = repo.find_prev_answer(answer.created_at);
let context = ShowAnswerDTO {
answer: AnswerDTO::from(answer),
next_answer: next_answer_opt.map(|a| AnswerDTO::from(a)),
prev_answer: prev_answer_opt.map(|a| AnswerDTO::from(a))
};
return Ok(Json(context));
}
return Err(status::NotFound("not found"));
}
/* GET /admin */
#[derive(Serialize, Debug)]
struct AdminIndexDTO {
pub questions: Vec<QuestionDTO>
}
#[get("/admin")]
fn admin_index(repo: web::guard::Repository, _auth: web::guard::BasicAuth) -> Template {
let question_dtos = repo.not_answered_questions()
.into_iter()
.filter(|q|!q.hidden )
.map(|q| QuestionDTO::from(q))
.collect::<Vec<_>>();
let context = AdminIndexDTO { questions: question_dtos };
Template::render("admin/index", &context)
}
/* GET /admin/question/<question_id> */
#[get("/admin/question/<question_id>")]
fn admin_show_question(question_id: i32, repo: web::guard::Repository, _auth: web::guard::BasicAuth) -> Template {
let question = repo.find_question(question_id).unwrap();
let context = QuestionDTO::from(question);
Template::render("admin/questions/show", &context)
}
/* POST /question/<question_id>/answer */
#[derive(FromForm)]
struct PostAnswerForm {
body: String
}
#[post("/admin/question/<question_id>/answer", data = "<params>")]
fn admin_post_answer(
question_id: i32, repo: web::guard::Repository,
params: request::Form<PostAnswerForm>,
tweet_sender: State<SyncSender<model::Answer>>,
_auth: web::guard::BasicAuth
) -> response::Redirect {
let answer_body = params.body.clone();
if let Some(answer) = repo.store_answer(question_id, answer_body.clone()) {
tweet_sender.send(answer).unwrap();
}
response::Redirect::to("/admin")
}
/* POST /admin/question/<question_id>/hide */
#[post("/admin/question/<question_id>/hide")]
fn admin_hide_question(question_id: i32, repo: web::guard::Repository, _auth: web::guard::BasicAuth ) -> response::Redirect {
let mut question = repo.find_question(question_id).unwrap();
question.hidden = true;
repo.update_question(question);
response::Redirect::to("/admin")
}
/* Force login */
struct RequireLogin();
impl<'r> response::Responder<'r> for RequireLogin {
fn respond_to(self, _req: &Request) -> Result<response::Response<'r>, Status> {
response::Response::build()
.status(Status::Unauthorized)
.header(Header::new("WWW-Authenticate", "Basic realm=\"SECRET AREA\""))
.ok()
}
}
#[catch(401)]
fn unauthorized(_req: &Request) -> RequireLogin {
RequireLogin()
}
#[derive(Clone)]
struct UserProfile {
pub name: String
}
#[derive(Clone)]
struct AppEnvironment {
pub is_production: bool
}
fn main() {
dotenv::dotenv().ok();
let manager = r2d2_diesel::ConnectionManager::<diesel::PgConnection>::new(
env::var("DATABASE_URL").unwrap()
);
let pool = r2d2::Pool::builder()
.max_size(15)
.build(manager)
.unwrap();
let (tweet_sender, tweet_receiver) = sync_channel(1000);
thread::spawn(move || {
loop {
let answer = tweet_receiver.recv().unwrap();
tweet::tweet_answer(answer);
thread::sleep(time::Duration::from_secs(5 * 60));
}
});
let user_profile = UserProfile {
name: tweet::get_twitter_username()
};
let app_env = AppEnvironment {
is_production: env::var("MODE").map(|mode| mode == "production").unwrap_or(false)
};
rocket::ignite()
.manage(pool)
.manage(tweet_sender)
.manage(user_profile)
.manage(app_env)
.mount("/", routes![
index, index_with_page, files, post_question, after_post_question, show_answer,
admin_index, admin_post_answer, admin_show_question, admin_hide_question, search,
show_question, show_answer_json
])
.register(catchers![unauthorized])
.attach(Template::fairing())
.launch();
}
| {
let question_id = question.id;
notify::send_email(question);
Ok(response::Redirect::to(format!("/question/{}/after_post", question_id)))
} | conditional_block |
main.rs | #![feature(proc_macro_hygiene, decl_macro)]
extern crate dotenv;
extern crate chrono;
extern crate uuid;
#[macro_use] extern crate rocket;
extern crate rocket_contrib;
extern crate base64;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate r2d2;
extern crate r2d2_diesel;
#[macro_use]
extern crate diesel;
extern crate egg_mode;
extern crate tokio;
extern crate lettre;
extern crate lettre_email;
extern crate htmlescape;
extern crate reing_text2image;
extern crate log;
use std::sync::mpsc::{SyncSender, sync_channel};
use std::collections::HashMap;
use std::env;
use std::path::{Path, PathBuf};
use rocket::http::{Header, Status};
use rocket::request;
use rocket::response;
use rocket::response::status;
use rocket::Request;
use rocket_contrib::templates::Template;
use rocket_contrib::json::Json;
use chrono::prelude::*;
use std::{thread, time};
use rocket::State;
mod web;
mod model;
mod db;
mod tweet;
mod utils;
mod notify;
#[derive(Serialize, Debug)]
struct AnswerDTO {
pub id: i32,
pub body: String,
pub question: QuestionDTO,
pub created_at: DateTime<Local>,
pub created_at_recognizable: String,
}
impl AnswerDTO {
fn from(a: model::Answer) -> Self {
Self {
id: a.id, body: a.body, created_at: a.created_at,
created_at_recognizable: utils::recognizable_datetime(a.created_at),
question: QuestionDTO::from(a.question)
}
}
}
#[derive(Serialize, Debug)]
struct QuestionDTO {
pub id: i32,
pub body: String,
pub created_at: DateTime<Local>,
pub created_at_recognizable: String,
}
impl QuestionDTO {
fn from(q: model::Question) -> Self {
Self {
id: q.id, body: q.body, created_at: q.created_at,
created_at_recognizable: utils::recognizable_datetime(q.created_at)
}
}
}
/* Force ssl */
#[get("/<path..>")]
fn redirect_ssl(path: PathBuf, _ssl: web::guard::ForceSSL) -> response::Redirect {
let redirect_to = format!("https://{}/{}", env::var("APPLICATION_DOMAIN").unwrap(), path.as_path().display());
println!("Redirect to:{}", redirect_to);
response::Redirect::to(redirect_to)
}
/* GET /static/ */
#[get("/static/<file..>")]
fn files(file: PathBuf) -> Result<web::CachedFile, status::NotFound<String>> {
let path = Path::new("static/").join(file);
response::NamedFile::open(&path)
.map_err(|_| status::NotFound(format!("Bad path: {:?}", path)))
.map(|nf| web::CachedFile(nf))
}
/* GET / */
#[derive(Serialize, Debug)]
struct IndexDTO {
pub profile: ProfileDTO,
pub answers: Vec<AnswerDTO>,
pub site_url: String,
pub next_page: Option<i64>,
pub prev_page: Option<i64>,
}
#[derive(Serialize, Debug)]
struct ProfileDTO {
pub username: String,
pub image_url: String,
}
#[test]
fn next_prev_page_test() {
assert!((None, Some(1)) == next_prev_page(0));
assert!((Some(0), Some(2)) == next_prev_page(1));
assert!((Some(1), Some(3)) == next_prev_page(2));
}
// next: newer, prev: older
// older -> page number increases
fn next_prev_page(current_page: i64) -> (Option<i64>, Option<i64>) {
let prev_page = Some(current_page + 1);
let next_page = if current_page <= 0 {
None
} else {
Some(current_page - 1)
};
return (next_page, prev_page);
}
const ANSWER_COUNT_PER_PAGE : i64 = 30;
#[get("/")]
fn index(repo: web::guard::Repository, profile: State<UserProfile>) -> Template {
let page = 0;
index_with_page(repo, profile, page)
}
#[get("/page/<page>")]
fn index_with_page(repo: web::guard::Repository, profile: State<UserProfile>, page: i64) -> Template {
let offset = page * ANSWER_COUNT_PER_PAGE;
let answer_dtos = repo.answers(offset, ANSWER_COUNT_PER_PAGE)
.into_iter()
.map(|a| AnswerDTO::from(a))
.collect::<Vec<_>>();
let (next_page, prev_page) = next_prev_page(page);
let context = IndexDTO {
profile: ProfileDTO {
username: profile.clone().name,
image_url: String::from("/static/image/profile.jpg")
},
answers: answer_dtos,
site_url: format!("https://{}/", env::var("APPLICATION_DOMAIN").unwrap()),
prev_page: prev_page,
next_page: next_page,
};
Template::render("index", &context)
}
#[derive(Serialize, Debug)]
struct SearchDTO {
pub profile: ProfileDTO,
pub search_results: Vec<AnswerDTO>,
pub site_url: String,
pub query: String,
}
#[get("/search?<query>")]
fn search(repo: web::guard::Repository, profile: State<UserProfile>, query: String) -> Template {
let answer_dtos = repo.search_answers(query.clone())
.into_iter()
.map(|a| AnswerDTO::from(a))
.collect::<Vec<_>>();
let context = SearchDTO {
profile: ProfileDTO {
username: profile.clone().name,
image_url: String::from("/static/image/profile.jpg")
},
search_results: answer_dtos,
site_url: format!("https://{}/", env::var("APPLICATION_DOMAIN").unwrap()),
query: query,
};
Template::render("search", &context)
}
/* POST /questions */
#[derive(FromForm)]
struct PostQuestionForm {
body: String
}
#[derive(Serialize, Debug)]
struct PostQuestionFailedDTO {
reason: String
}
#[post("/questions", data = "<params>")]
fn post_question(repo: web::guard::Repository, client_ip: web::guard::ClientIP, params: request::Form<PostQuestionForm>)
-> Result<response::Redirect, Template> {
match repo.store_question(params.body.clone(), client_ip.address()) {
Ok(question) => {
let question_id = question.id;
notify::send_email(question);
Ok(response::Redirect::to(format!("/question/{}/after_post", question_id)))
},
Err(err) => {
match err {
model::StoreQuestionError::BlankBody => {
let context = PostQuestionFailedDTO { reason: String::from("質問の内容が空です") };
Err(Template::render("question/post_failed", &context))
}
}
}
}
}
/* GET /question/after_post */
#[derive(Serialize, Debug)]
struct AfterPostQuestionDTO{
pub question: QuestionDTO
}
#[get("/question/<question_id>/after_post")]
fn after_post_question(question_id: i32, repo: web::guard::Repository) -> Result<Template, response::Redirect> {
if let Some(question) = repo.find_question(question_id) {
let context = AfterPostQuestionDTO{
question: QuestionDTO::from(question)
};
Ok(Template::render("question/after_post", &context))
} else {
Err(response::Redirect::to("/"))
}
}
/* GET /answer/<question_id> */ | #[derive(Serialize, Debug)]
struct ShowAnswerDTO {
pub answer: AnswerDTO,
pub next_answer: Option<AnswerDTO>,
pub prev_answer: Option<AnswerDTO>,
}
#[get("/question/<question_id>")]
fn show_question(question_id: i32, repo: web::guard::Repository) -> Result<response::Redirect, status::NotFound<&'static str>> {
match repo.find_answer_by_question_id(question_id) {
Some(answer) => Ok(response::Redirect::to(format!("/answer/{}", answer.id))),
None => Err(status::NotFound("not found"))
}
}
#[get("/answer/<_answer_id>")]
fn show_answer(_answer_id: i32, app_env: State<AppEnvironment>) -> Template {
let mut context: HashMap<String, bool> = HashMap::new();
context.insert(String::from("is_production"), app_env.is_production);
return Template::render("answer/show", &context);
}
#[get("/api/answer/<answer_id>")]
fn show_answer_json(answer_id: i32, repo: web::guard::Repository) -> Result<Json<ShowAnswerDTO>, status::NotFound<&'static str>> {
if let Some(answer) = repo.find_answer(answer_id) {
let next_answer_opt = repo.find_next_answer(answer.created_at);
let prev_answer_opt = repo.find_prev_answer(answer.created_at);
let context = ShowAnswerDTO {
answer: AnswerDTO::from(answer),
next_answer: next_answer_opt.map(|a| AnswerDTO::from(a)),
prev_answer: prev_answer_opt.map(|a| AnswerDTO::from(a))
};
return Ok(Json(context));
}
return Err(status::NotFound("not found"));
}
/* GET /admin */
#[derive(Serialize, Debug)]
struct AdminIndexDTO {
pub questions: Vec<QuestionDTO>
}
#[get("/admin")]
fn admin_index(repo: web::guard::Repository, _auth: web::guard::BasicAuth) -> Template {
let question_dtos = repo.not_answered_questions()
.into_iter()
.filter(|q|!q.hidden )
.map(|q| QuestionDTO::from(q))
.collect::<Vec<_>>();
let context = AdminIndexDTO { questions: question_dtos };
Template::render("admin/index", &context)
}
/* GET /admin/question/<question_id> */
#[get("/admin/question/<question_id>")]
fn admin_show_question(question_id: i32, repo: web::guard::Repository, _auth: web::guard::BasicAuth) -> Template {
let question = repo.find_question(question_id).unwrap();
let context = QuestionDTO::from(question);
Template::render("admin/questions/show", &context)
}
/* POST /question/<question_id>/answer */
#[derive(FromForm)]
struct PostAnswerForm {
body: String
}
#[post("/admin/question/<question_id>/answer", data = "<params>")]
fn admin_post_answer(
question_id: i32, repo: web::guard::Repository,
params: request::Form<PostAnswerForm>,
tweet_sender: State<SyncSender<model::Answer>>,
_auth: web::guard::BasicAuth
) -> response::Redirect {
let answer_body = params.body.clone();
if let Some(answer) = repo.store_answer(question_id, answer_body.clone()) {
tweet_sender.send(answer).unwrap();
}
response::Redirect::to("/admin")
}
/* POST /admin/question/<question_id>/hide */
#[post("/admin/question/<question_id>/hide")]
fn admin_hide_question(question_id: i32, repo: web::guard::Repository, _auth: web::guard::BasicAuth ) -> response::Redirect {
let mut question = repo.find_question(question_id).unwrap();
question.hidden = true;
repo.update_question(question);
response::Redirect::to("/admin")
}
/* Force login */
struct RequireLogin();
impl<'r> response::Responder<'r> for RequireLogin {
fn respond_to(self, _req: &Request) -> Result<response::Response<'r>, Status> {
response::Response::build()
.status(Status::Unauthorized)
.header(Header::new("WWW-Authenticate", "Basic realm=\"SECRET AREA\""))
.ok()
}
}
#[catch(401)]
fn unauthorized(_req: &Request) -> RequireLogin {
RequireLogin()
}
#[derive(Clone)]
struct UserProfile {
pub name: String
}
#[derive(Clone)]
struct AppEnvironment {
pub is_production: bool
}
fn main() {
dotenv::dotenv().ok();
let manager = r2d2_diesel::ConnectionManager::<diesel::PgConnection>::new(
env::var("DATABASE_URL").unwrap()
);
let pool = r2d2::Pool::builder()
.max_size(15)
.build(manager)
.unwrap();
let (tweet_sender, tweet_receiver) = sync_channel(1000);
thread::spawn(move || {
loop {
let answer = tweet_receiver.recv().unwrap();
tweet::tweet_answer(answer);
thread::sleep(time::Duration::from_secs(5 * 60));
}
});
let user_profile = UserProfile {
name: tweet::get_twitter_username()
};
let app_env = AppEnvironment {
is_production: env::var("MODE").map(|mode| mode == "production").unwrap_or(false)
};
rocket::ignite()
.manage(pool)
.manage(tweet_sender)
.manage(user_profile)
.manage(app_env)
.mount("/", routes![
index, index_with_page, files, post_question, after_post_question, show_answer,
admin_index, admin_post_answer, admin_show_question, admin_hide_question, search,
show_question, show_answer_json
])
.register(catchers![unauthorized])
.attach(Template::fairing())
.launch();
} | random_line_split |
|
lib.rs | // Copyright (c) 2019, Bayu Aldi Yansyah <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Crabsformer is an easy-to-use fundamental library for scientific computing
//! with Rust, highly inspired by [NumPy].
//!
//! **Notice!** This project is in early phase. Expect bugs and missing
//! features.
//!
//! [NumPy]: http://www.numpy.org/
//!
//! # Usage
//! Add this to your `Cargo.toml`:
//!
//! ```toml
//! [dependencies]
//! crabsformer = "2019.3.17"
//! ``` | //! ```
//!
//! To get started using Crabsformer, read the quickstart tutorial below.
//!
//! # Quickstart Tutorial
//!
//! ## Prerequisites
//! Before reading this quick tutorial you should know a bit of Rust. If you
//! would like to refresh your memory, take a look at the [Rust book].
//!
//! [Rust book]: https://doc.rust-lang.org/book/
//!
//! ## The Basics
//! There are two main data structures in Crabsformer:
//!
//! 1. [`Vector<T>`] is a fixed-length list of elements of the same
//! [numeric type]. It has one atribute called [`len`] to represent the
//! total number of elements.
//! 2. [`Matrix<T>`] is a table of elements of the same [numeric type]. It has
//! one atribute called [`shape`] that represent the number of rows and
//! the number of columns.
//!
//! `Vector<T>` is pronounced as 'numeric vector' to avoid confussion with
//! Rust's vector [`Vec<T>`] data structure.
//!
//! [`Vector<T>`]: vector/struct.Vector.html
//! [`Matrix<T>`]: matrix/struct.Matrix.html
//! [`len`]: vector/struct.Vector.html#method.len
//! [`shape`]: matrix/struct.Matrix.html#method.shape
//! [`Vec<T>`]: https://doc.rust-lang.org/std/vec/struct.Vec.html
//!
//! ### Numeric Vector Builders
//! There are several ways to create numeric vector.
//!
//! For example, you can create a numeric vector from a Rust vector using
//! `Vector::from` static method. The type of the resulting numeric vector is
//! deduced from the type of the elements in the sequences.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let x = vec![3, 1, 4, 1, 5];
//! let y = Vector::from(x);
//! ```
//!
//! The [`vector!`] macro is provided to make initialization of the numeric
//! vector more convenient.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let v = vector![1, 10, 11, 314];
//! ```
//!
//! It can also initialize each element of a numeric vector with a given value.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let v = vector![0; 5]; // vector![0, 0, 0, 0, 0]
//! ```
//!
//! To create a numeric vector of evenly spaced values, Crabformer provide
//! [`Vector::range`] function.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let x = Vector::range(0, 10, 1).unwrap();
//! assert_eq!(x, vector![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
//! ```
//!
//! To create random numeric vectors, Crabsformer provide
//! [`RandomVectorBuilder`]. It can be explicitly seeded to make the results
//! are reproducible.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let mut rvb = RandomVectorBuilder::new();
//! ```
//!
//! The method [`rvb.uniform`] creates a numeric vector of the given length
//! and populate it with random samples from a uniform distribution over the
//! half-open interval.
//!
//! ```
//! # use crabsformer::prelude::*;
//! # let mut rvb = RandomVectorBuilder::new();
//! let v = rvb.uniform(5, 0.0, 1.0).unwrap();
//! // Random
//! // [0.054709196, 0.86043775, 0.21187294, 0.6413728, 0.14186311]
//! ```
//!
//! See also: [Numeric Vector Builders].
//!
//! [`vector!`]: macro.vector.html
//! [`RandomVectorBuilder`]: vector/builders/struct.RandomVectorBuilder.html
//! [`rvb.uniform`]: vector/builders/struct.RandomVectorBuilder.html#method.uniform
//! [Numeric Vector Builders]: vector/builders/index.html
//! [`Vector::range`]: vector/struct.Vector.html#method.range
//!
//! ### Numeric Vector Basic Operations
//! You can perform arithmetic operations on a numeric vector. Arithmetic
//! operators on numeric vectors apply elementwise. A new numeric vector is
//! created and filled with the result.
//!
//! For example, if you add the numeric vector, the arithmetic operator
//! will work element-wise. The output will be a numeric vector of the same
//! length.
//!
//! ```rust
//! # use crabsformer::prelude::*;
//! let x = vector![2, 4, 6] + vector![1, 3, 5];
//! assert_eq!(x, vector![3, 7, 11]);
//! ```
//!
//! Numeric vector substraction and multiplication also works the same:
//!
//! ```rust
//! # use crabsformer::prelude::*;
//! let x = vector![3, 1, 5] - vector![1, 3, 5];
//! assert_eq!(x, vector![2, -2, 0]);
//!
//! let y = vector![5, 4, 1] * vector![2, 1, 4];
//! assert_eq!(y, vector![10, 4, 4]);
//! ```
//!
//! You can run an arithmetic operation on the numeric vector with a scalar
//! value too. For example, this code multiplies each element of the numeric
//! vector by 2.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let x = vector![3, 1, 4] * 2;
//! assert_eq!(x, vector![6, 2, 8]);
//! ```
//!
//! Some operations, such as `+=` and `*=`, act in place to modify an
//! existing numeric vector rather than create a new one.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let mut x = vector![3, 1, 4];
//!
//! x += 3;
//! assert_eq!(x, vector![6, 4, 7]);
//!
//! x -= 1;
//! assert_eq!(x, vector![5, 3, 6]);
//!
//! x *= 2;
//! assert_eq!(x, vector![10, 6, 12]);
//! ```
//!
//! If you try to add, substract or multiply numeric vector with a different
//! number of elements, you will get an error. For example:
//!
//! ```should_panic
//! # use crabsformer::prelude::*;
//! let x = vector![3, 1, 4, 1, 5] + vector![2, 10, 9];
//! // thread'main' panicked at 'Vector addition with invalid length: 5!= 3' src/main.rs:12:13
//! ```
//!
//! *TODO: add alternative x.add() to return Result instead of panics*
//!
//! If you would like to square of the individual elements of the numeric
//! vector, or even higher up, use the [`power`] method. Here, each element of the
//! numeric vector is raised to the power 2.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let x = vector![3, 1, 4, 1];
//! let y = x.power(2);
//! assert_eq!(y, vector![9, 1, 16, 1]);
//! ```
//!
//! [`power`]: struct.Vector.html#method.power
//!
//! When operating with numeric vectors of different types,
//! the Rust compiler will raise error like the following:
//!
//! ```text
//! cannot add `vector::Vector<{integer}>` to `vector::Vector<{float}>`
//! ```
//!
//! Many unary operations, such as computing the sum of all the elements in the
//! numeric vector, are implemented as methods.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let x = vector![3, 1, 4];
//! let sum = x.sum();
//! assert_eq!(sum, 8);
//! assert_eq!(*x.max(), 4);
//! assert_eq!(*x.min(), 1);
//! ```
//!
//! See also: [`power`], [`filter`], [`sum`], [`max`], [`min`].
//!
//! [`power`]: struct.Vector.html#method.power
//! [`filter`]: struct.Vector.html#method.filter
//! [`sum`]: struct.Vector.html#method.sum
//! [`max`]: struct.Vector.html#method.max
//! [`min`]: struct.Vector.html#method.min
//!
//! ### Indexing, Slicing and Iterating Numeric Vector
//! Numeric vectors can be indexed, sliced and iterated over, much like
//! Rust's vector.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let x = vector![3, 1, 4, 1];
//!
//! // Indexing numeric vector
//! assert_eq!(x[0], 3);
//! assert_eq!(x[2], 4);
//!
//! // Slicing numeric vector
//! x.slice(0..2); // [3, 1]
//! x.slice(2..); // [4, 1]
//! x.slice(..2); // [3, 1]
//!
//! // Iterating over element of numeric vector
//! for element in x.elements() {
//! println!("element = {:?}", element);
//! }
//! ```
//!
//! ### Matrix Builders
//! There are several ways to create matrix too.
//!
//! For example, you can create a matrix from a Rust's vector using
//! `Matrix::from` static method. The type of the resulting matrix is
//! deduced from the type of the elements in the sequences.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let x = vec![
//! vec![3, 1, 4],
//! vec![1, 5, 9],
//! vec![0, 1, 2],
//! ];
//! let w = Matrix::from(x);
//! ```
//!
//! The number of the columns should be consistent
//! otherwise it will panic. For example:
//!
//! ```should_panic
//! # use crabsformer::prelude::*;
//! let x = vec![
//! vec![3, 1, 4],
//! vec![1, 5],
//! ];
//! let w = Matrix::from(x);
//! // thread'main' panicked at 'Invalid matrix: the number of columns is inconsistent',
//! ```
//!
//!
//! The [`matrix!`] macro is provided to make initialization of the
//! matrix more convenient.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let w = matrix![
//! 3.0, 1.0, 4.0;
//! 1.0, 5.0, 9.0;
//! ];
//! ```
//!
//! It can also initialize each element of a matrix with a given shape
//! and value.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let w = matrix![[3, 3] => 0]; // matrix![0, 0, 0; 0, 0, 0; 0, 0, 0]
//! ```
//!
//! To create random matrix, Crabsformer provide
//! [`RandomMatrixBuilder`]. It can be explicitly seeded to make the results
//! are reproducible.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let mut rmb = RandomMatrixBuilder::new();
//! ```
//!
//! The method [`rmb.uniform`] creates a matrix of the given shape and
//! populate it with random samples from a uniform distribution over the
//! half-open interval.
//!
//! ```
//! # use crabsformer::prelude::*;
//! # let mut rmb = RandomMatrixBuilder::new();
//! let v = rmb.uniform([5, 5], 0.0, 1.0).unwrap();
//! ```
//!
//! See also: [Matrix Builders].
//!
//! [`matrix!`]: macro.matrix.html
//! [`RandomMatrixBuilder`]: matrix/builders/struct.RandomMatrixBuilder.html
//! [`rmb.uniform`]: matrix/builders/struct.RandomMatrixBuilder.html#method.uniform
//! [Matrix Builders]: matrix/builders/index.html
//!
//! ### Matrix Basic Operations
//! You can perform arithmetic operations on a matrix.
//! Arithmetic operators on matrices apply elementwise.
//! A new matrix is created and filled with the result.
//! For example, if you add the matrix, the arithmetic operator
//! will work element-wise. The output will be a matrix of the same
//! shape.
//!
//!
//! ```rust
//! # use crabsformer::prelude::*;
//! let w1 = matrix![
//! 2, 4, 6;
//! 3, 1, 1;
//! 4, 5, 6;
//! ];
//!
//! let w2 = matrix![
//! 1, 3, 5;
//! 3, 1, 3;
//! 1, 1, 1;
//! ];
//!
//! let w3 = w1 + w2;
//!
//! assert_eq!(w3, matrix![
//! 3, 7, 11;
//! 6, 2, 4;
//! 5, 6, 7;
//! ]);
//! ```
//!
//! Matrix substraction and multiplication also works the same:
//!
//! ```rust
//! # use crabsformer::prelude::*;
//! let w1 = matrix![2, 4; 3, 1] - matrix![1, 3; 3, 1];
//! assert_eq!(w1, matrix![
//! 1, 1;
//! 0, 0;
//! ]);
//!
//! let w2 = matrix![0, 1; 2, 0] - matrix![1, 1; 0, 1];
//! assert_eq!(w2, matrix![
//! -1, 0;
//! 2, -1;
//! ]);
//!
//! let w3 = matrix![0, 1; 1, 0] * matrix![1, 1; 1, 1];
//! assert_eq!(w3, matrix![
//! 0, 1;
//! 1, 0;
//! ]);
//! ```
//!
//! You can run an arithmetic operation on the matrix with
//! a scalar value too. For example, this code multiplies each element
//! of the matrix by 2.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let w = matrix![3, 1; 4, 1] * 2;
//! assert_eq!(w, matrix![6, 2; 8, 2]);
//! ```
//!
//! Some operations, such as `+=` and `*=`, act in place to modify an
//! existing matrix rather than create a new one.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let mut w = matrix![3, 1; 4, 1];
//!
//! w += 3;
//! assert_eq!(w, matrix![6, 4; 7, 4]);
//!
//! w -= 1;
//! assert_eq!(w, matrix![5, 3; 6, 3]);
//!
//! w *= 2;
//! assert_eq!(w, matrix![10, 6; 12, 6]);
//! ```
//!
//! If you try to add, substract or multiply matrix with a
//! different shape, you will get an error. For example:
//!
//! ```should_panic
//! # use crabsformer::prelude::*;
//! let x = matrix![3, 1; 4, 1] + matrix![2, 10, 9; 1, 4, 7];
//! // thread'main' panicked at 'Matrix addition with invalid shape: [2, 2]!= [3, 3]' src/main.rs:12:13
//! ```
//!
//! If you would like to square of the individual elements of the matrix
//!, or even higher up, use the [`power`][m.power] method. Here, each element
//! of the matrix is raised to the power 2.
//!
//! ```
//! # use crabsformer::prelude::*;
//! let w1 = matrix![3, 1; 4, 1];
//! let w2 = w1.power(2);
//! assert_eq!(w2, matrix![9, 1; 16, 1]);
//! ```
//!
//! [m.power]: struct.Matrix.html#method.power
//!
//! When operating with matrices of different types,
//! the Rust compiler will raise error like the following:
//!
//! ```text
//! cannot add `matrix::Matrix<{integer}>` to `matrix::Matrix<{float}>`
//! ```
//!
//! ---
//! TODO(pyk): Continue quick tutorial here
//!
//! ---
//!
//! [numeric type]: https://doc.rust-lang.org/reference/types/numeric.html
//! [pyk]: https://github.com/pyk
//!
//! ## Getting help
//! Feel free to start discussion at [GitHub issues].
//!
//! [Github issues]: https://github.com/pyk/crabsformer/issues/new/choose
//!
//! ## License
//! Crabsformer is licensed under the [Apache-2.0] license.
//!
//! Unless you explicitly state otherwise, any contribution intentionally
//! submitted for inclusion in Crabsformer by you, as defined in the Apache-2.0
//! license, shall be licensed as above, without
//! any additional terms or conditions.
//!
//! [Apache-2.0]: https://github.com/pyk/crabsformer/blob/master/LICENSE
//!
pub mod matrix;
pub mod prelude;
pub mod utils;
pub mod vector; | //!
//! and this to your crate root:
//!
//! ```
//! use crabsformer::prelude::*; | random_line_split |
widget.rs | //! Widget controller.
//!
//! The Widget Controller is responsible for querying the language server for information about
//! the node's widget configuration or resolving it from local cache.
mod configuration;
mod response;
use crate::prelude::*;
use crate::controller::visualization::manager::Manager;
use crate::controller::visualization::manager::Notification;
use crate::controller::ExecutedGraph;
use crate::executor::global::spawn_stream_handler;
use crate::model::execution_context::VisualizationUpdateData;
use engine_protocol::language_server::SuggestionId;
use ensogl::define_endpoints_2;
use ide_view::graph_editor::component::visualization;
use ide_view::graph_editor::component::visualization::Metadata;
use ide_view::graph_editor::data::enso::Code;
use ide_view::graph_editor::ArgumentWidgetConfig;
use ide_view::graph_editor::CallWidgetsConfig;
// =================
// === Constants ===
// =================
/// A module containing the widget visualization method.
const WIDGET_VISUALIZATION_MODULE: &str = "Standard.Visualization.Widgets";
/// A name of the widget visualization method.
const WIDGET_VISUALIZATION_METHOD: &str = "get_widget_json";
// ===============
// === Aliases ===
// ===============
/// An ID of a node in the graph. Always refers to the root expression.
type NodeId = ast::Id;
// An ID of any sub expression in the node, which can have a widget attached to it.
type ExpressionId = ast::Id;
// ==================
// === Controller ===
// ==================
define_endpoints_2! {
Input {
/// Create or update widget query with given definition.
request_widgets(Request),
/// Remove all widget queries of given node that are not on this list.
retain_node_expressions(NodeId, HashSet<ast::Id>),
/// Remove all widget data associated with given node.
remove_all_node_widgets(NodeId),
}
Output {
/// Emitted when the node's visualization has been set.
widget_data(NodeId, CallWidgetsConfig),
}
}
/// Graph widgets controller. Handles requests for widget configuration using visualizations. Maps
/// response data to the relevant node Id updates, and dispatches them over the FRP output.
/// Guarantees that each individual query eventually receives an update. It internally caches the
/// results of the last queries, so that the configuration can be delivered to the presenter even
/// when no visualization change is necessary.
#[derive(Debug, Deref)]
pub struct Controller {
#[deref]
frp: Frp,
#[allow(dead_code)]
model: Rc<RefCell<Model>>,
}
impl Controller {
/// Constructor
pub fn new(executed_graph: ExecutedGraph) -> Self {
let (manager, manager_notifications) = Manager::new(executed_graph.clone_ref());
let frp = Frp::new();
let model = Rc::new(RefCell::new(Model {
manager,
graph: executed_graph.clone_ref(),
widgets_of_node: default(),
widget_queries: default(),
}));
let network = &frp.network;
let input = &frp.input;
let output = &frp.private.output;
frp::extend! { network
updates_from_cache <- input.request_widgets.filter_map(
f!((definition) model.borrow_mut().request_widget(definition))
);
output.widget_data <+ updates_from_cache;
eval input.retain_node_expressions(((node_id, expr_ids)) {
model.borrow_mut().retain_node_expressions(*node_id, expr_ids)
});
eval input.remove_all_node_widgets((node_id) {
model.borrow_mut().remove_all_node_widgets(*node_id)
});
};
let out_widget_data = output.widget_data.clone_ref();
let weak = Rc::downgrade(&model);
spawn_stream_handler(weak, manager_notifications, move |notification, model| {
let data = model.borrow_mut().handle_notification(notification);
if let Some(data) = data {
out_widget_data.emit(data);
}
std::future::ready(())
});
Self { frp, model }
}
}
// =============
// === Model ===
// =============
/// Model of the Widget controller. Manages the widget queries, stores responses in cache. See
/// [`Controller`] for more information.
#[derive(Debug)]
pub struct Model {
manager: Rc<Manager>,
graph: ExecutedGraph,
widgets_of_node: NodeToWidgetsMapping,
/// Map of queries by the target expression ID. Required to be able to map visualization update
/// responses to the corresponding widgets.
widget_queries: HashMap<ExpressionId, QueryData>,
}
impl Model {
/// Visualization update notification handler. Updates the cache and returns the widget updates
/// when the notification provides new data.
fn handle_notification(
&mut self,
notification: Notification,
) -> Option<(NodeId, CallWidgetsConfig)> {
let report_error = |message, error| {
error!("{message}: {error}");
None
};
match notification {
Notification::ValueUpdate { target, data,.. } =>
self.handle_visualization_value_update(target, data),
Notification::FailedToAttach { error,.. } =>
report_error("Failed to attach widget visualization", error),
Notification::FailedToDetach { error,.. } =>
report_error("Failed to detach widget visualization", error),
Notification::FailedToModify { error,.. } =>
report_error("Failed to modify widget visualization", error),
}
}
/// Handle visualization data update. Return widget update data.
fn handle_visualization_value_update(
&mut self,
target: ast::Id,
data: VisualizationUpdateData,
) -> Option<(NodeId, CallWidgetsConfig)> {
let query_data = self.widget_queries.get_mut(&target)?;
let (definitions, errors) = configuration::deserialize_widget_definitions(
&data,
&self.graph.suggestion_db(),
&self.graph.parser(),
);
for error in errors {
error!("{:?}", error);
}
trace!("Widget definitions: {definitions:?}");
let definitions = Rc::new(definitions);
query_data.last_definitions = Some(definitions.clone());
let call_id = query_data.call_expression;
Some((query_data.node_id, CallWidgetsConfig { call_id, definitions }))
}
/// Handle a widget request from presenter. Returns the widget updates if the request can be
/// immediately fulfilled from the cache.
fn request_widget(&mut self, request: &Request) -> Option<(NodeId, CallWidgetsConfig)> {
let suggestion_db = self.graph.suggestion_db();
let suggestion = suggestion_db.lookup(request.call_suggestion).ok()?;
use std::collections::hash_map::Entry;
match self.widget_queries.entry(request.target_expression) {
Entry::Occupied(mut occupied) => {
let query = occupied.get_mut();
if query.node_id!= request.node_id {
self.widgets_of_node.remove_widget(query.node_id, request.target_expression);
self.widgets_of_node.insert_widget(request.node_id, request.target_expression);
}
let visualization_modified = query.update(&suggestion, request);
if visualization_modified {
trace!("Updating widget visualization for {}", request.target_expression);
query.request_visualization(&self.manager, request.target_expression);
// The request is now pending. Once the request completes, the widget update
// will happen in the response handler.
None
} else {
// In the event that the visualization was not modified, we want to respond with
// the last known visualization data. Each widget request needs to be responded
// to, otherwise the widget might not be displayed after the widget view has
// been temporarily removed and created again.
query.last_definitions()
}
}
Entry::Vacant(vacant) => {
self.widgets_of_node.insert_widget(request.node_id, request.target_expression);
let query = vacant.insert(QueryData::new(&suggestion, request));
trace!("Registering widget visualization for {}", request.target_expression);
query.request_visualization(&self.manager, request.target_expression);
// The request is now pending. Once the request completes, the widget update will
// happen in the response handler.
None
}
}
}
/// Remove all widget queries of given node that are attached to expressions outside of provided
/// list. No widget update is emitted after a query is cleaned up.
fn retain_node_expressions(&mut self, node_id: NodeId, expressions: &HashSet<ast::Id>) {
self.widgets_of_node.retain_node_widgets(node_id, expressions, |expr_id| {
self.manager.remove_visualization(expr_id);
});
}
/// Remove all widget queries of given node. No widget update is emitted after a query is
/// cleaned up.
fn remove_all_node_widgets(&mut self, node_id: NodeId) {
for expr_id in self.widgets_of_node.remove_node_widgets(node_id) {
self.manager.remove_visualization(expr_id);
}
}
}
// ============================
// === NodeToWidgetsMapping ===
// ============================
/// A map of widgets attached to nodes. Used to perform cleanup of node widget queries when node is
/// removed.
#[derive(Debug, Default)]
struct NodeToWidgetsMapping {
attached_widgets: HashMap<NodeId, Vec<ExpressionId>>,
}
impl NodeToWidgetsMapping {
fn remove_widget(&mut self, node_id: NodeId, target: ast::Id) {
self.attached_widgets.entry(node_id).and_modify(|exprs| {
let Some(index) = exprs.iter().position(|e| *e == target) else { return };
exprs.swap_remove(index);
});
}
fn insert_widget(&mut self, node_id: NodeId, target: ast::Id) {
self.attached_widgets.entry(node_id).or_default().push(target);
}
fn retain_node_widgets(
&mut self,
node_id: NodeId,
remaining_expressions: &HashSet<ast::Id>,
mut on_remove: impl FnMut(ExpressionId),
) {
if let Some(registered) = self.attached_widgets.get_mut(&node_id) {
registered.retain(|expr_id| {
let retained = remaining_expressions.contains(expr_id);
if!retained {
on_remove(*expr_id);
}
retained
});
}
}
fn remove_node_widgets(&mut self, node_id: NodeId) -> Vec<ExpressionId> {
self.attached_widgets.remove(&node_id).unwrap_or_default()
}
}
// ===============
// === Request ===
// ===============
/// Definition of a widget request. Defines the node subexpression that the widgets will be attached
/// to, and the method call that corresponds to that expression.
#[derive(Debug, Default, Clone, Copy)]
pub struct Request {
/// The node ID of a node that contains the widget.
pub node_id: NodeId,
/// Expression of the whole method call. Only used to correlate the visualization response with
/// the widget view.
pub call_expression: ExpressionId,
/// Target (`self`) argument in the call expression. Used as a visualization target.
pub target_expression: ExpressionId,
/// The suggestion ID of the method that this call refers to.
pub call_suggestion: SuggestionId,
}
// =================
// === QueryData ===
// =================
/// Data of ongoing widget query. Defines which expressions a visualization query is attached to,
/// and maintains enough data to correlate the response with respective widget view.
#[derive(Debug)]
struct QueryData {
node_id: NodeId,
call_expression: ExpressionId,
method_name: ImString,
arguments: Vec<ImString>,
last_definitions: Option<Rc<Vec<ArgumentWidgetConfig>>>,
}
impl QueryData {
fn new(suggestion: &enso_suggestion_database::Entry, req: &Request) -> Self {
let node_id = req.node_id;
let arguments = suggestion.arguments.iter().map(|arg| arg.name.clone().into()).collect();
let method_name = suggestion.name.clone();
let call_expression = req.call_expression;
let last_definitions = None;
QueryData { node_id, arguments, method_name, call_expression, last_definitions }
}
/// Update existing query data on new request. Returns true if the visualization query needs to
/// be updated.
fn update(&mut self, suggestion: &enso_suggestion_database::Entry, req: &Request) -> bool {
let mut visualization_modified = false;
if self.method_name!= suggestion.name {
self.method_name = suggestion.name.clone();
visualization_modified = true;
}
let mut zipped_arguments = self.arguments.iter().zip(&suggestion.arguments);
if self.arguments.len()!= suggestion.arguments.len()
||!zipped_arguments.all(|(a, b)| a == &b.name)
{
self.arguments =
suggestion.arguments.iter().map(|arg| arg.name.clone().into()).collect();
visualization_modified = true;
}
self.node_id = req.node_id;
self.call_expression = req.call_expression;
visualization_modified
}
fn last_definitions(&self) -> Option<(NodeId, CallWidgetsConfig)> {
self.last_definitions.as_ref().map(|definitions| {
let call_id = self.call_expression;
let config = CallWidgetsConfig { call_id, definitions: definitions.clone() };
(self.node_id, config)
})
}
fn request_visualization(&mut self, manager: &Rc<Manager>, target_expression: ast::Id) {
// When visualization is requested, remove stale queried value to prevent updates while
// language server request is pending.
self.last_definitions.take();
let vis_metadata = self.visualization_metadata();
manager.request_visualization(target_expression, vis_metadata);
}
/// Generate visualization metadata for this query.
fn visualization_metadata(&self) -> Metadata {
let arguments: Vec<Code> = vec![
Self::as_unresolved_symbol(&self.method_name).into(),
Self::arg_sequence(&self.arguments).into(),
];
let preprocessor = visualization::instance::PreprocessorConfiguration {
module: WIDGET_VISUALIZATION_MODULE.into(),
method: WIDGET_VISUALIZATION_METHOD.into(),
arguments: Rc::new(arguments),
};
Metadata { preprocessor }
}
/// Escape a string to be used as a visualization argument. Transforms the string into an enso
/// expression with string literal.
fn escape_visualization_argument(arg: &str) -> String {
Ast::raw_text_literal(arg).repr()
}
/// Creates unresolved symbol via ".name" syntax. Unresolved symbol contains name and also
/// module scope to resolve it properly.
fn as_unresolved_symbol(arg: &str) -> String {
format!(".{arg}")
}
/// Escape a list of strings to be used as a visualization argument. Transforms the strings into
/// an enso expression with a list of string literals.
fn arg_sequence(args: &[ImString]) -> String {
let mut buffer = String::from("[");
for (i, arg) in args.iter().enumerate() {
if i > 0 {
buffer.push_str(", ");
}
buffer.push_str(&Self::escape_visualization_argument(arg)); | buffer
}
} | }
buffer.push(']'); | random_line_split |
widget.rs | //! Widget controller.
//!
//! The Widget Controller is responsible for querying the language server for information about
//! the node's widget configuration or resolving it from local cache.
mod configuration;
mod response;
use crate::prelude::*;
use crate::controller::visualization::manager::Manager;
use crate::controller::visualization::manager::Notification;
use crate::controller::ExecutedGraph;
use crate::executor::global::spawn_stream_handler;
use crate::model::execution_context::VisualizationUpdateData;
use engine_protocol::language_server::SuggestionId;
use ensogl::define_endpoints_2;
use ide_view::graph_editor::component::visualization;
use ide_view::graph_editor::component::visualization::Metadata;
use ide_view::graph_editor::data::enso::Code;
use ide_view::graph_editor::ArgumentWidgetConfig;
use ide_view::graph_editor::CallWidgetsConfig;
// =================
// === Constants ===
// =================
/// A module containing the widget visualization method.
const WIDGET_VISUALIZATION_MODULE: &str = "Standard.Visualization.Widgets";
/// A name of the widget visualization method.
const WIDGET_VISUALIZATION_METHOD: &str = "get_widget_json";
// ===============
// === Aliases ===
// ===============
/// An ID of a node in the graph. Always refers to the root expression.
type NodeId = ast::Id;
// An ID of any sub expression in the node, which can have a widget attached to it.
type ExpressionId = ast::Id;
// ==================
// === Controller ===
// ==================
define_endpoints_2! {
Input {
/// Create or update widget query with given definition.
request_widgets(Request),
/// Remove all widget queries of given node that are not on this list.
retain_node_expressions(NodeId, HashSet<ast::Id>),
/// Remove all widget data associated with given node.
remove_all_node_widgets(NodeId),
}
Output {
/// Emitted when the node's visualization has been set.
widget_data(NodeId, CallWidgetsConfig),
}
}
/// Graph widgets controller. Handles requests for widget configuration using visualizations. Maps
/// response data to the relevant node Id updates, and dispatches them over the FRP output.
/// Guarantees that each individual query eventually receives an update. It internally caches the
/// results of the last queries, so that the configuration can be delivered to the presenter even
/// when no visualization change is necessary.
#[derive(Debug, Deref)]
pub struct Controller {
#[deref]
frp: Frp,
#[allow(dead_code)]
model: Rc<RefCell<Model>>,
}
impl Controller {
/// Constructor
pub fn new(executed_graph: ExecutedGraph) -> Self {
let (manager, manager_notifications) = Manager::new(executed_graph.clone_ref());
let frp = Frp::new();
let model = Rc::new(RefCell::new(Model {
manager,
graph: executed_graph.clone_ref(),
widgets_of_node: default(),
widget_queries: default(),
}));
let network = &frp.network;
let input = &frp.input;
let output = &frp.private.output;
frp::extend! { network
updates_from_cache <- input.request_widgets.filter_map(
f!((definition) model.borrow_mut().request_widget(definition))
);
output.widget_data <+ updates_from_cache;
eval input.retain_node_expressions(((node_id, expr_ids)) {
model.borrow_mut().retain_node_expressions(*node_id, expr_ids)
});
eval input.remove_all_node_widgets((node_id) {
model.borrow_mut().remove_all_node_widgets(*node_id)
});
};
let out_widget_data = output.widget_data.clone_ref();
let weak = Rc::downgrade(&model);
spawn_stream_handler(weak, manager_notifications, move |notification, model| {
let data = model.borrow_mut().handle_notification(notification);
if let Some(data) = data {
out_widget_data.emit(data);
}
std::future::ready(())
});
Self { frp, model }
}
}
// =============
// === Model ===
// =============
/// Model of the Widget controller. Manages the widget queries, stores responses in cache. See
/// [`Controller`] for more information.
#[derive(Debug)]
pub struct Model {
manager: Rc<Manager>,
graph: ExecutedGraph,
widgets_of_node: NodeToWidgetsMapping,
/// Map of queries by the target expression ID. Required to be able to map visualization update
/// responses to the corresponding widgets.
widget_queries: HashMap<ExpressionId, QueryData>,
}
impl Model {
/// Visualization update notification handler. Updates the cache and returns the widget updates
/// when the notification provides new data.
fn handle_notification(
&mut self,
notification: Notification,
) -> Option<(NodeId, CallWidgetsConfig)> {
let report_error = |message, error| {
error!("{message}: {error}");
None
};
match notification {
Notification::ValueUpdate { target, data,.. } =>
self.handle_visualization_value_update(target, data),
Notification::FailedToAttach { error,.. } =>
report_error("Failed to attach widget visualization", error),
Notification::FailedToDetach { error,.. } =>
report_error("Failed to detach widget visualization", error),
Notification::FailedToModify { error,.. } =>
report_error("Failed to modify widget visualization", error),
}
}
/// Handle visualization data update. Return widget update data.
fn handle_visualization_value_update(
&mut self,
target: ast::Id,
data: VisualizationUpdateData,
) -> Option<(NodeId, CallWidgetsConfig)> {
let query_data = self.widget_queries.get_mut(&target)?;
let (definitions, errors) = configuration::deserialize_widget_definitions(
&data,
&self.graph.suggestion_db(),
&self.graph.parser(),
);
for error in errors {
error!("{:?}", error);
}
trace!("Widget definitions: {definitions:?}");
let definitions = Rc::new(definitions);
query_data.last_definitions = Some(definitions.clone());
let call_id = query_data.call_expression;
Some((query_data.node_id, CallWidgetsConfig { call_id, definitions }))
}
/// Handle a widget request from presenter. Returns the widget updates if the request can be
/// immediately fulfilled from the cache.
fn request_widget(&mut self, request: &Request) -> Option<(NodeId, CallWidgetsConfig)> {
let suggestion_db = self.graph.suggestion_db();
let suggestion = suggestion_db.lookup(request.call_suggestion).ok()?;
use std::collections::hash_map::Entry;
match self.widget_queries.entry(request.target_expression) {
Entry::Occupied(mut occupied) => {
let query = occupied.get_mut();
if query.node_id!= request.node_id {
self.widgets_of_node.remove_widget(query.node_id, request.target_expression);
self.widgets_of_node.insert_widget(request.node_id, request.target_expression);
}
let visualization_modified = query.update(&suggestion, request);
if visualization_modified {
trace!("Updating widget visualization for {}", request.target_expression);
query.request_visualization(&self.manager, request.target_expression);
// The request is now pending. Once the request completes, the widget update
// will happen in the response handler.
None
} else {
// In the event that the visualization was not modified, we want to respond with
// the last known visualization data. Each widget request needs to be responded
// to, otherwise the widget might not be displayed after the widget view has
// been temporarily removed and created again.
query.last_definitions()
}
}
Entry::Vacant(vacant) => {
self.widgets_of_node.insert_widget(request.node_id, request.target_expression);
let query = vacant.insert(QueryData::new(&suggestion, request));
trace!("Registering widget visualization for {}", request.target_expression);
query.request_visualization(&self.manager, request.target_expression);
// The request is now pending. Once the request completes, the widget update will
// happen in the response handler.
None
}
}
}
/// Remove all widget queries of given node that are attached to expressions outside of provided
/// list. No widget update is emitted after a query is cleaned up.
fn retain_node_expressions(&mut self, node_id: NodeId, expressions: &HashSet<ast::Id>) {
self.widgets_of_node.retain_node_widgets(node_id, expressions, |expr_id| {
self.manager.remove_visualization(expr_id);
});
}
/// Remove all widget queries of given node. No widget update is emitted after a query is
/// cleaned up.
fn remove_all_node_widgets(&mut self, node_id: NodeId) {
for expr_id in self.widgets_of_node.remove_node_widgets(node_id) {
self.manager.remove_visualization(expr_id);
}
}
}
// ============================
// === NodeToWidgetsMapping ===
// ============================
/// A map of widgets attached to nodes. Used to perform cleanup of node widget queries when node is
/// removed.
#[derive(Debug, Default)]
struct NodeToWidgetsMapping {
attached_widgets: HashMap<NodeId, Vec<ExpressionId>>,
}
impl NodeToWidgetsMapping {
fn remove_widget(&mut self, node_id: NodeId, target: ast::Id) {
self.attached_widgets.entry(node_id).and_modify(|exprs| {
let Some(index) = exprs.iter().position(|e| *e == target) else { return };
exprs.swap_remove(index);
});
}
fn insert_widget(&mut self, node_id: NodeId, target: ast::Id) {
self.attached_widgets.entry(node_id).or_default().push(target);
}
fn retain_node_widgets(
&mut self,
node_id: NodeId,
remaining_expressions: &HashSet<ast::Id>,
mut on_remove: impl FnMut(ExpressionId),
) {
if let Some(registered) = self.attached_widgets.get_mut(&node_id) {
registered.retain(|expr_id| {
let retained = remaining_expressions.contains(expr_id);
if!retained {
on_remove(*expr_id);
}
retained
});
}
}
fn remove_node_widgets(&mut self, node_id: NodeId) -> Vec<ExpressionId> {
self.attached_widgets.remove(&node_id).unwrap_or_default()
}
}
// ===============
// === Request ===
// ===============
/// Definition of a widget request. Defines the node subexpression that the widgets will be attached
/// to, and the method call that corresponds to that expression.
#[derive(Debug, Default, Clone, Copy)]
pub struct Request {
/// The node ID of a node that contains the widget.
pub node_id: NodeId,
/// Expression of the whole method call. Only used to correlate the visualization response with
/// the widget view.
pub call_expression: ExpressionId,
/// Target (`self`) argument in the call expression. Used as a visualization target.
pub target_expression: ExpressionId,
/// The suggestion ID of the method that this call refers to.
pub call_suggestion: SuggestionId,
}
// =================
// === QueryData ===
// =================
/// Data of ongoing widget query. Defines which expressions a visualization query is attached to,
/// and maintains enough data to correlate the response with respective widget view.
#[derive(Debug)]
struct QueryData {
node_id: NodeId,
call_expression: ExpressionId,
method_name: ImString,
arguments: Vec<ImString>,
last_definitions: Option<Rc<Vec<ArgumentWidgetConfig>>>,
}
impl QueryData {
fn new(suggestion: &enso_suggestion_database::Entry, req: &Request) -> Self {
let node_id = req.node_id;
let arguments = suggestion.arguments.iter().map(|arg| arg.name.clone().into()).collect();
let method_name = suggestion.name.clone();
let call_expression = req.call_expression;
let last_definitions = None;
QueryData { node_id, arguments, method_name, call_expression, last_definitions }
}
/// Update existing query data on new request. Returns true if the visualization query needs to
/// be updated.
fn | (&mut self, suggestion: &enso_suggestion_database::Entry, req: &Request) -> bool {
let mut visualization_modified = false;
if self.method_name!= suggestion.name {
self.method_name = suggestion.name.clone();
visualization_modified = true;
}
let mut zipped_arguments = self.arguments.iter().zip(&suggestion.arguments);
if self.arguments.len()!= suggestion.arguments.len()
||!zipped_arguments.all(|(a, b)| a == &b.name)
{
self.arguments =
suggestion.arguments.iter().map(|arg| arg.name.clone().into()).collect();
visualization_modified = true;
}
self.node_id = req.node_id;
self.call_expression = req.call_expression;
visualization_modified
}
fn last_definitions(&self) -> Option<(NodeId, CallWidgetsConfig)> {
self.last_definitions.as_ref().map(|definitions| {
let call_id = self.call_expression;
let config = CallWidgetsConfig { call_id, definitions: definitions.clone() };
(self.node_id, config)
})
}
fn request_visualization(&mut self, manager: &Rc<Manager>, target_expression: ast::Id) {
// When visualization is requested, remove stale queried value to prevent updates while
// language server request is pending.
self.last_definitions.take();
let vis_metadata = self.visualization_metadata();
manager.request_visualization(target_expression, vis_metadata);
}
/// Generate visualization metadata for this query.
fn visualization_metadata(&self) -> Metadata {
let arguments: Vec<Code> = vec![
Self::as_unresolved_symbol(&self.method_name).into(),
Self::arg_sequence(&self.arguments).into(),
];
let preprocessor = visualization::instance::PreprocessorConfiguration {
module: WIDGET_VISUALIZATION_MODULE.into(),
method: WIDGET_VISUALIZATION_METHOD.into(),
arguments: Rc::new(arguments),
};
Metadata { preprocessor }
}
/// Escape a string to be used as a visualization argument. Transforms the string into an enso
/// expression with string literal.
fn escape_visualization_argument(arg: &str) -> String {
Ast::raw_text_literal(arg).repr()
}
/// Creates unresolved symbol via ".name" syntax. Unresolved symbol contains name and also
/// module scope to resolve it properly.
fn as_unresolved_symbol(arg: &str) -> String {
format!(".{arg}")
}
/// Escape a list of strings to be used as a visualization argument. Transforms the strings into
/// an enso expression with a list of string literals.
fn arg_sequence(args: &[ImString]) -> String {
let mut buffer = String::from("[");
for (i, arg) in args.iter().enumerate() {
if i > 0 {
buffer.push_str(", ");
}
buffer.push_str(&Self::escape_visualization_argument(arg));
}
buffer.push(']');
buffer
}
}
| update | identifier_name |
client.rs | //! Module defines LS binary protocol client `API` and its two implementation: `Client` and
//! `MockClient`.
use crate::prelude::*;
use crate::binary::message::ErrorPayload;
use crate::binary::message::FromServerPayloadOwned;
use crate::binary::message::MessageFromServerOwned;
use crate::binary::message::MessageToServerRef;
use crate::binary::message::ToServerPayload;
use crate::binary::message::VisualizationContext;
use crate::common::error::UnexpectedMessage;
use crate::handler::Disposition;
use crate::handler::Handler;
use crate::language_server::types::Path;
use crate::types::Sha3_224;
use json_rpc::Transport;
use json_rpc::TransportEvent;
use mockall::automock;
// ==============
// === Errors ===
// ==============
#[allow(missing_docs)]
#[derive(Debug, Fail, Clone, Copy)]
#[fail(display = "Received a text message when expecting only the binary ones.")]
pub struct UnexpectedTextMessage;
/// Errors that can cause a remote call to fail.
pub type RpcError = json_rpc::error::RpcError<ErrorPayload>;
// ====================
// === Notification ===
// ====================
/// The notifications that binary protocol client may receive.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum Notification {
/// A new data has been sent for a visualization.
VisualizationUpdate {
/// Identifies the specific visualization.
context: VisualizationContext,
/// Data to be passed to the visualization.
data: Vec<u8>,
},
}
/// Events emitted by the LS binary protocol client.
pub type Event = crate::common::event::Event<Notification>;
// ===========
// === API ===
// ===========
/// The Engine Services Language Server Binary Protocol Client API.
#[automock]
pub trait API {
/// Initializes the protocol. Must be called exactly once before making any other calls.
fn init(&self, client_id: Uuid) -> StaticBoxFuture<FallibleResult>;
/// Writes binary data to the file.
fn write_file(&self, path: &Path, contents: &[u8]) -> StaticBoxFuture<FallibleResult>;
/// Retrieves the file contents as a binary data.
fn read_file(&self, path: &Path) -> StaticBoxFuture<FallibleResult<Vec<u8>>>;
/// Writes a set of bytes to the specified file at the specified offset.
fn write_bytes(
&self,
path: &Path,
byte_offset: u64,
overwrite: bool,
bytes: &[u8],
) -> StaticBoxFuture<FallibleResult<Sha3_224>>;
/// Asynchronous event stream with notification and errors.
///
/// On a repeated call, previous stream is closed.
fn event_stream(&self) -> StaticBoxStream<Event>;
}
// ==============
// === Client ===
// ==============
/// The client for Engine Services Language Server Binary Protocol.
#[derive(Clone, Derivative)]
#[derivative(Debug)]
pub struct Client {
handler: Handler<Uuid, FromServerPayloadOwned, Notification>,
}
impl Client {
/// Helper function that fails if the received message represents a remote error.
fn expect_success(result: FromServerPayloadOwned) -> FallibleResult {
if let FromServerPayloadOwned::Success {} = result {
Ok(())
} else |
}
/// Function that does early processing of the peer's message and decides how it shall be
/// handled. Returns a function so that it may be passed to the `Handler`.
fn processor(
) -> impl FnMut(TransportEvent) -> Disposition<Uuid, FromServerPayloadOwned, Notification> +'static
{
move |event: TransportEvent| {
let binary_data = match event {
TransportEvent::BinaryMessage(data) => data,
_ => return Disposition::error(UnexpectedTextMessage),
};
let message = match MessageFromServerOwned::deserialize(&binary_data) {
Ok(message) => message,
Err(e) => return Disposition::error(e),
};
debug!("Deserialized incoming binary message: {message:?}");
let correlation_id = message.correlation_id;
match message.0.payload {
FromServerPayloadOwned::VisualizationUpdate { context, data } =>
Disposition::notify(Notification::VisualizationUpdate { data, context }),
payload => {
if let Some(id) = correlation_id {
Disposition::HandleReply { id, reply: payload }
} else {
// Not a known notification and yet not a response to our request.
Disposition::error(UnexpectedMessage)
}
}
}
}
}
/// Creates a new client from the given transport to the Language Server Data Endpoint.
///
/// Before client is functional:
/// * `runner` must be scheduled for execution;
/// * `init` must be called or it needs to be wrapped into `Connection`.
pub fn new(transport: impl Transport +'static) -> Client {
let processor = Self::processor();
Client { handler: Handler::new(transport, processor) }
}
/// Starts a new request, described by the given payload.
/// Function `f` serves to retrieve the request's result from the more general `Reply` type.
pub fn make_request<F, R>(
&self,
payload: ToServerPayload,
f: F,
) -> StaticBoxFuture<FallibleResult<R>>
where
F: FnOnce(FromServerPayloadOwned) -> FallibleResult<R>,
R:'static,
F:'static,
{
let message = MessageToServerRef::new(payload);
let id = message.message_id;
let completer = move |reply| {
info!("Completing request {id} with a reply: {reply:?}");
if let FromServerPayloadOwned::Error { code, message, data } = reply {
let code = code as i64;
let error = json_rpc::messages::Error { code, message, data };
Err(RpcError::RemoteError(error).into())
} else {
f(reply)
}
};
let fut = self.handler.make_request(&message, completer);
Box::pin(fut)
}
/// A `runner`. Its execution must be scheduled for `Client` to be able to complete requests and
/// emit events.
pub fn runner(&self) -> impl Future<Output = ()> {
self.handler.runner()
}
}
impl API for Client {
fn init(&self, client_id: Uuid) -> StaticBoxFuture<FallibleResult> {
info!("Initializing binary connection as client with id {client_id}.");
let payload = ToServerPayload::InitSession { client_id };
self.make_request(payload, Self::expect_success)
}
fn write_file(&self, path: &Path, contents: &[u8]) -> StaticBoxFuture<FallibleResult> {
info!("Writing file {} with {} bytes.", path, contents.len());
let payload = ToServerPayload::WriteFile { path, contents };
self.make_request(payload, Self::expect_success)
}
fn read_file(&self, path: &Path) -> StaticBoxFuture<FallibleResult<Vec<u8>>> {
info!("Reading file {path}.");
let payload = ToServerPayload::ReadFile { path };
self.make_request(payload, move |result| {
if let FromServerPayloadOwned::FileContentsReply { contents } = result {
Ok(contents)
} else {
Err(RpcError::MismatchedResponseType.into())
}
})
}
fn write_bytes(
&self,
path: &Path,
byte_offset: u64,
overwrite: bool,
bytes: &[u8],
) -> StaticBoxFuture<FallibleResult<Sha3_224>> {
info!("Writing {} bytes to {path} at offset {byte_offset}", bytes.len());
let payload = ToServerPayload::WriteBytes { path, byte_offset, overwrite, bytes };
self.make_request(payload, move |result| {
if let FromServerPayloadOwned::WriteBytesReply { checksum } = result {
Ok(checksum.into())
} else {
Err(RpcError::MismatchedResponseType.into())
}
})
}
fn event_stream(&self) -> StaticBoxStream<Event> {
self.handler.event_stream().boxed_local()
}
}
// =============
// === Tests ===
// =============
#[cfg(test)]
mod tests {
use super::*;
use crate::binary::message::MessageFromServer;
use crate::binary::message::MessageToServerOwned;
use crate::binary::message::ToServerPayloadOwned;
use futures::task::LocalSpawnExt;
use json_rpc::test_util::transport::mock::MockTransport;
// ===============
// === Fixture ===
// ===============
struct ClientFixture {
transport: MockTransport,
client: Client,
executor: futures::executor::LocalPool,
}
impl ClientFixture {
fn new() -> ClientFixture {
let transport = MockTransport::new();
let client = Client::new(transport.clone());
let executor = futures::executor::LocalPool::new();
executor.spawner().spawn_local(client.runner()).unwrap();
ClientFixture { transport, client, executor }
}
}
// ========================
// === Testing Requests ===
// ========================
fn test_request<R>(
make_request: impl Fn(&Client) -> StaticBoxFuture<FallibleResult<R>>,
expected_result: R,
expected_request: ToServerPayloadOwned,
mock_reply: FromServerPayloadOwned,
) where
R: Debug + PartialEq + Sized,
{
let mut fixture = ClientFixture::new();
let mut fut = make_request(&fixture.client);
let generated_message = fixture.transport.expect_binary_message();
let generated_message = MessageToServerOwned::deserialize(&generated_message).unwrap();
assert_eq!(generated_message.payload, expected_request);
fut.expect_pending();
let mut mock_reply = MessageFromServer::new(mock_reply);
mock_reply.correlation_id = Some(generated_message.message_id);
mock_reply.with_serialized(|data| fixture.transport.mock_peer_binary_message(data));
fixture.executor.run_until_stalled();
assert_eq!(fut.expect_ok(), expected_result);
// Repeat request but now answer with error.
let mut fut = make_request(&fixture.client);
let generated_message = fixture.transport.expect_binary_message();
let generated_message = MessageToServerOwned::deserialize(&generated_message).unwrap();
let mock_error_code = 444;
let mock_error_message = "This is error".to_string();
let mut mock_reply = MessageFromServer::new(FromServerPayloadOwned::Error {
code: mock_error_code,
message: mock_error_message,
data: None,
});
mock_reply.correlation_id = Some(generated_message.message_id);
mock_reply.with_serialized(|data| fixture.transport.mock_peer_binary_message(data));
fixture.executor.run_until_stalled();
fut.expect_err();
}
#[test]
fn test_init() {
let client_id = Uuid::new_v4();
test_request(
|client| client.init(client_id),
(),
ToServerPayloadOwned::InitSession { client_id },
FromServerPayloadOwned::Success {},
);
}
#[test]
fn test_write_file() {
let root_id = Uuid::new_v4();
let path = Path::new(root_id, &["Main.enso"]);
let data = Vec::from("hello".as_bytes());
test_request(
|client| client.write_file(&path, &data),
(),
ToServerPayloadOwned::WriteFile { contents: data.clone(), path: path.clone() },
FromServerPayloadOwned::Success {},
);
}
#[test]
fn test_read_file() {
let root_id = Uuid::new_v4();
let path = Path::new(root_id, &["Main.enso"]);
let data = Vec::from("hello".as_bytes());
test_request(
|client| client.read_file(&path),
data.clone(),
ToServerPayloadOwned::ReadFile { path: path.clone() },
FromServerPayloadOwned::FileContentsReply { contents: data },
);
}
// =============================
// === Testing Notifications ===
// =============================
#[test]
fn test_visualization_update() {
let mut fixture = ClientFixture::new();
let mut event_fut = fixture.client.event_stream().into_future().boxed_local();
fixture.executor.run_until_stalled();
event_fut.expect_pending();
let context = VisualizationContext {
visualization_id: Uuid::new_v4(),
expression_id: Uuid::new_v4(),
context_id: Uuid::new_v4(),
};
let data = Vec::from("Hello".as_bytes());
let message = MessageFromServer::new(FromServerPayloadOwned::VisualizationUpdate {
data: data.clone(),
context,
});
message.with_serialized(|data| fixture.transport.mock_peer_binary_message(data));
fixture.executor.run_until_stalled();
let expected_notification = Notification::VisualizationUpdate { context, data };
let (event, tail) = event_fut.expect_ready();
match event.expect("Expected some notification.") {
Event::Notification(notification) => assert_eq!(notification, expected_notification),
event => panic!("Expected notification event, got: {event:?}"),
}
tail.boxed_local().expect_pending();
}
}
| {
Err(RpcError::MismatchedResponseType.into())
} | conditional_block |
client.rs | //! Module defines LS binary protocol client `API` and its two implementation: `Client` and
//! `MockClient`.
use crate::prelude::*;
use crate::binary::message::ErrorPayload;
use crate::binary::message::FromServerPayloadOwned;
use crate::binary::message::MessageFromServerOwned;
use crate::binary::message::MessageToServerRef;
use crate::binary::message::ToServerPayload;
use crate::binary::message::VisualizationContext;
use crate::common::error::UnexpectedMessage;
use crate::handler::Disposition;
use crate::handler::Handler;
use crate::language_server::types::Path;
use crate::types::Sha3_224;
use json_rpc::Transport;
use json_rpc::TransportEvent;
use mockall::automock;
// ==============
// === Errors ===
// ==============
#[allow(missing_docs)]
#[derive(Debug, Fail, Clone, Copy)]
#[fail(display = "Received a text message when expecting only the binary ones.")]
pub struct UnexpectedTextMessage;
/// Errors that can cause a remote call to fail.
pub type RpcError = json_rpc::error::RpcError<ErrorPayload>;
// ====================
// === Notification ===
// ====================
/// The notifications that binary protocol client may receive.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum Notification {
/// A new data has been sent for a visualization.
VisualizationUpdate {
/// Identifies the specific visualization.
context: VisualizationContext,
/// Data to be passed to the visualization.
data: Vec<u8>,
},
}
/// Events emitted by the LS binary protocol client.
pub type Event = crate::common::event::Event<Notification>;
// ===========
// === API ===
// ===========
/// The Engine Services Language Server Binary Protocol Client API.
#[automock]
pub trait API {
/// Initializes the protocol. Must be called exactly once before making any other calls.
fn init(&self, client_id: Uuid) -> StaticBoxFuture<FallibleResult>;
/// Writes binary data to the file.
fn write_file(&self, path: &Path, contents: &[u8]) -> StaticBoxFuture<FallibleResult>;
/// Retrieves the file contents as a binary data.
fn read_file(&self, path: &Path) -> StaticBoxFuture<FallibleResult<Vec<u8>>>;
/// Writes a set of bytes to the specified file at the specified offset.
fn write_bytes(
&self,
path: &Path,
byte_offset: u64,
overwrite: bool,
bytes: &[u8],
) -> StaticBoxFuture<FallibleResult<Sha3_224>>;
/// Asynchronous event stream with notification and errors.
///
/// On a repeated call, previous stream is closed.
fn event_stream(&self) -> StaticBoxStream<Event>;
}
// ==============
// === Client ===
// ==============
/// The client for Engine Services Language Server Binary Protocol.
#[derive(Clone, Derivative)]
#[derivative(Debug)]
pub struct Client {
handler: Handler<Uuid, FromServerPayloadOwned, Notification>,
}
impl Client {
/// Helper function that fails if the received message represents a remote error.
fn expect_success(result: FromServerPayloadOwned) -> FallibleResult {
if let FromServerPayloadOwned::Success {} = result {
Ok(())
} else {
Err(RpcError::MismatchedResponseType.into())
}
}
/// Function that does early processing of the peer's message and decides how it shall be
/// handled. Returns a function so that it may be passed to the `Handler`.
fn | (
) -> impl FnMut(TransportEvent) -> Disposition<Uuid, FromServerPayloadOwned, Notification> +'static
{
move |event: TransportEvent| {
let binary_data = match event {
TransportEvent::BinaryMessage(data) => data,
_ => return Disposition::error(UnexpectedTextMessage),
};
let message = match MessageFromServerOwned::deserialize(&binary_data) {
Ok(message) => message,
Err(e) => return Disposition::error(e),
};
debug!("Deserialized incoming binary message: {message:?}");
let correlation_id = message.correlation_id;
match message.0.payload {
FromServerPayloadOwned::VisualizationUpdate { context, data } =>
Disposition::notify(Notification::VisualizationUpdate { data, context }),
payload => {
if let Some(id) = correlation_id {
Disposition::HandleReply { id, reply: payload }
} else {
// Not a known notification and yet not a response to our request.
Disposition::error(UnexpectedMessage)
}
}
}
}
}
/// Creates a new client from the given transport to the Language Server Data Endpoint.
///
/// Before client is functional:
/// * `runner` must be scheduled for execution;
/// * `init` must be called or it needs to be wrapped into `Connection`.
pub fn new(transport: impl Transport +'static) -> Client {
let processor = Self::processor();
Client { handler: Handler::new(transport, processor) }
}
/// Starts a new request, described by the given payload.
/// Function `f` serves to retrieve the request's result from the more general `Reply` type.
pub fn make_request<F, R>(
&self,
payload: ToServerPayload,
f: F,
) -> StaticBoxFuture<FallibleResult<R>>
where
F: FnOnce(FromServerPayloadOwned) -> FallibleResult<R>,
R:'static,
F:'static,
{
let message = MessageToServerRef::new(payload);
let id = message.message_id;
let completer = move |reply| {
info!("Completing request {id} with a reply: {reply:?}");
if let FromServerPayloadOwned::Error { code, message, data } = reply {
let code = code as i64;
let error = json_rpc::messages::Error { code, message, data };
Err(RpcError::RemoteError(error).into())
} else {
f(reply)
}
};
let fut = self.handler.make_request(&message, completer);
Box::pin(fut)
}
/// A `runner`. Its execution must be scheduled for `Client` to be able to complete requests and
/// emit events.
pub fn runner(&self) -> impl Future<Output = ()> {
self.handler.runner()
}
}
impl API for Client {
fn init(&self, client_id: Uuid) -> StaticBoxFuture<FallibleResult> {
info!("Initializing binary connection as client with id {client_id}.");
let payload = ToServerPayload::InitSession { client_id };
self.make_request(payload, Self::expect_success)
}
fn write_file(&self, path: &Path, contents: &[u8]) -> StaticBoxFuture<FallibleResult> {
info!("Writing file {} with {} bytes.", path, contents.len());
let payload = ToServerPayload::WriteFile { path, contents };
self.make_request(payload, Self::expect_success)
}
fn read_file(&self, path: &Path) -> StaticBoxFuture<FallibleResult<Vec<u8>>> {
info!("Reading file {path}.");
let payload = ToServerPayload::ReadFile { path };
self.make_request(payload, move |result| {
if let FromServerPayloadOwned::FileContentsReply { contents } = result {
Ok(contents)
} else {
Err(RpcError::MismatchedResponseType.into())
}
})
}
fn write_bytes(
&self,
path: &Path,
byte_offset: u64,
overwrite: bool,
bytes: &[u8],
) -> StaticBoxFuture<FallibleResult<Sha3_224>> {
info!("Writing {} bytes to {path} at offset {byte_offset}", bytes.len());
let payload = ToServerPayload::WriteBytes { path, byte_offset, overwrite, bytes };
self.make_request(payload, move |result| {
if let FromServerPayloadOwned::WriteBytesReply { checksum } = result {
Ok(checksum.into())
} else {
Err(RpcError::MismatchedResponseType.into())
}
})
}
fn event_stream(&self) -> StaticBoxStream<Event> {
self.handler.event_stream().boxed_local()
}
}
// =============
// === Tests ===
// =============
#[cfg(test)]
mod tests {
use super::*;
use crate::binary::message::MessageFromServer;
use crate::binary::message::MessageToServerOwned;
use crate::binary::message::ToServerPayloadOwned;
use futures::task::LocalSpawnExt;
use json_rpc::test_util::transport::mock::MockTransport;
// ===============
// === Fixture ===
// ===============
struct ClientFixture {
transport: MockTransport,
client: Client,
executor: futures::executor::LocalPool,
}
impl ClientFixture {
fn new() -> ClientFixture {
let transport = MockTransport::new();
let client = Client::new(transport.clone());
let executor = futures::executor::LocalPool::new();
executor.spawner().spawn_local(client.runner()).unwrap();
ClientFixture { transport, client, executor }
}
}
// ========================
// === Testing Requests ===
// ========================
fn test_request<R>(
make_request: impl Fn(&Client) -> StaticBoxFuture<FallibleResult<R>>,
expected_result: R,
expected_request: ToServerPayloadOwned,
mock_reply: FromServerPayloadOwned,
) where
R: Debug + PartialEq + Sized,
{
let mut fixture = ClientFixture::new();
let mut fut = make_request(&fixture.client);
let generated_message = fixture.transport.expect_binary_message();
let generated_message = MessageToServerOwned::deserialize(&generated_message).unwrap();
assert_eq!(generated_message.payload, expected_request);
fut.expect_pending();
let mut mock_reply = MessageFromServer::new(mock_reply);
mock_reply.correlation_id = Some(generated_message.message_id);
mock_reply.with_serialized(|data| fixture.transport.mock_peer_binary_message(data));
fixture.executor.run_until_stalled();
assert_eq!(fut.expect_ok(), expected_result);
// Repeat request but now answer with error.
let mut fut = make_request(&fixture.client);
let generated_message = fixture.transport.expect_binary_message();
let generated_message = MessageToServerOwned::deserialize(&generated_message).unwrap();
let mock_error_code = 444;
let mock_error_message = "This is error".to_string();
let mut mock_reply = MessageFromServer::new(FromServerPayloadOwned::Error {
code: mock_error_code,
message: mock_error_message,
data: None,
});
mock_reply.correlation_id = Some(generated_message.message_id);
mock_reply.with_serialized(|data| fixture.transport.mock_peer_binary_message(data));
fixture.executor.run_until_stalled();
fut.expect_err();
}
#[test]
fn test_init() {
let client_id = Uuid::new_v4();
test_request(
|client| client.init(client_id),
(),
ToServerPayloadOwned::InitSession { client_id },
FromServerPayloadOwned::Success {},
);
}
#[test]
fn test_write_file() {
let root_id = Uuid::new_v4();
let path = Path::new(root_id, &["Main.enso"]);
let data = Vec::from("hello".as_bytes());
test_request(
|client| client.write_file(&path, &data),
(),
ToServerPayloadOwned::WriteFile { contents: data.clone(), path: path.clone() },
FromServerPayloadOwned::Success {},
);
}
#[test]
fn test_read_file() {
let root_id = Uuid::new_v4();
let path = Path::new(root_id, &["Main.enso"]);
let data = Vec::from("hello".as_bytes());
test_request(
|client| client.read_file(&path),
data.clone(),
ToServerPayloadOwned::ReadFile { path: path.clone() },
FromServerPayloadOwned::FileContentsReply { contents: data },
);
}
// =============================
// === Testing Notifications ===
// =============================
#[test]
fn test_visualization_update() {
let mut fixture = ClientFixture::new();
let mut event_fut = fixture.client.event_stream().into_future().boxed_local();
fixture.executor.run_until_stalled();
event_fut.expect_pending();
let context = VisualizationContext {
visualization_id: Uuid::new_v4(),
expression_id: Uuid::new_v4(),
context_id: Uuid::new_v4(),
};
let data = Vec::from("Hello".as_bytes());
let message = MessageFromServer::new(FromServerPayloadOwned::VisualizationUpdate {
data: data.clone(),
context,
});
message.with_serialized(|data| fixture.transport.mock_peer_binary_message(data));
fixture.executor.run_until_stalled();
let expected_notification = Notification::VisualizationUpdate { context, data };
let (event, tail) = event_fut.expect_ready();
match event.expect("Expected some notification.") {
Event::Notification(notification) => assert_eq!(notification, expected_notification),
event => panic!("Expected notification event, got: {event:?}"),
}
tail.boxed_local().expect_pending();
}
}
| processor | identifier_name |
client.rs | //! Module defines LS binary protocol client `API` and its two implementation: `Client` and
//! `MockClient`.
use crate::prelude::*;
use crate::binary::message::ErrorPayload;
use crate::binary::message::FromServerPayloadOwned;
use crate::binary::message::MessageFromServerOwned;
use crate::binary::message::MessageToServerRef;
use crate::binary::message::ToServerPayload;
use crate::binary::message::VisualizationContext;
use crate::common::error::UnexpectedMessage;
use crate::handler::Disposition;
use crate::handler::Handler;
use crate::language_server::types::Path;
use crate::types::Sha3_224;
use json_rpc::Transport;
use json_rpc::TransportEvent;
use mockall::automock;
// ==============
// === Errors ===
// ==============
#[allow(missing_docs)]
#[derive(Debug, Fail, Clone, Copy)]
#[fail(display = "Received a text message when expecting only the binary ones.")]
pub struct UnexpectedTextMessage;
/// Errors that can cause a remote call to fail.
pub type RpcError = json_rpc::error::RpcError<ErrorPayload>;
// ====================
// === Notification ===
// ====================
/// The notifications that binary protocol client may receive.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum Notification {
/// A new data has been sent for a visualization.
VisualizationUpdate {
/// Identifies the specific visualization.
context: VisualizationContext,
/// Data to be passed to the visualization.
data: Vec<u8>,
},
}
/// Events emitted by the LS binary protocol client.
pub type Event = crate::common::event::Event<Notification>;
// ===========
// === API ===
// ===========
/// The Engine Services Language Server Binary Protocol Client API.
#[automock]
pub trait API {
/// Initializes the protocol. Must be called exactly once before making any other calls.
fn init(&self, client_id: Uuid) -> StaticBoxFuture<FallibleResult>;
/// Writes binary data to the file.
fn write_file(&self, path: &Path, contents: &[u8]) -> StaticBoxFuture<FallibleResult>;
/// Retrieves the file contents as a binary data.
fn read_file(&self, path: &Path) -> StaticBoxFuture<FallibleResult<Vec<u8>>>;
/// Writes a set of bytes to the specified file at the specified offset.
fn write_bytes(
&self,
path: &Path,
byte_offset: u64,
overwrite: bool,
bytes: &[u8],
) -> StaticBoxFuture<FallibleResult<Sha3_224>>;
/// Asynchronous event stream with notification and errors.
///
/// On a repeated call, previous stream is closed.
fn event_stream(&self) -> StaticBoxStream<Event>;
}
// ==============
// === Client ===
// ==============
/// The client for Engine Services Language Server Binary Protocol.
#[derive(Clone, Derivative)]
#[derivative(Debug)]
pub struct Client {
handler: Handler<Uuid, FromServerPayloadOwned, Notification>,
}
impl Client {
/// Helper function that fails if the received message represents a remote error.
fn expect_success(result: FromServerPayloadOwned) -> FallibleResult {
if let FromServerPayloadOwned::Success {} = result {
Ok(())
} else {
Err(RpcError::MismatchedResponseType.into())
}
}
/// Function that does early processing of the peer's message and decides how it shall be
/// handled. Returns a function so that it may be passed to the `Handler`.
fn processor(
) -> impl FnMut(TransportEvent) -> Disposition<Uuid, FromServerPayloadOwned, Notification> +'static
{
move |event: TransportEvent| {
let binary_data = match event {
TransportEvent::BinaryMessage(data) => data,
_ => return Disposition::error(UnexpectedTextMessage),
};
let message = match MessageFromServerOwned::deserialize(&binary_data) {
Ok(message) => message,
Err(e) => return Disposition::error(e),
};
debug!("Deserialized incoming binary message: {message:?}");
let correlation_id = message.correlation_id;
match message.0.payload {
FromServerPayloadOwned::VisualizationUpdate { context, data } =>
Disposition::notify(Notification::VisualizationUpdate { data, context }),
payload => {
if let Some(id) = correlation_id {
Disposition::HandleReply { id, reply: payload }
} else {
// Not a known notification and yet not a response to our request.
Disposition::error(UnexpectedMessage)
}
}
}
}
}
/// Creates a new client from the given transport to the Language Server Data Endpoint.
///
/// Before client is functional:
/// * `runner` must be scheduled for execution;
/// * `init` must be called or it needs to be wrapped into `Connection`.
pub fn new(transport: impl Transport +'static) -> Client {
let processor = Self::processor();
Client { handler: Handler::new(transport, processor) }
}
/// Starts a new request, described by the given payload.
/// Function `f` serves to retrieve the request's result from the more general `Reply` type.
pub fn make_request<F, R>(
&self,
payload: ToServerPayload,
f: F,
) -> StaticBoxFuture<FallibleResult<R>>
where
F: FnOnce(FromServerPayloadOwned) -> FallibleResult<R>,
R:'static,
F:'static,
{
let message = MessageToServerRef::new(payload);
let id = message.message_id;
let completer = move |reply| {
info!("Completing request {id} with a reply: {reply:?}");
if let FromServerPayloadOwned::Error { code, message, data } = reply {
let code = code as i64;
let error = json_rpc::messages::Error { code, message, data };
Err(RpcError::RemoteError(error).into())
} else {
f(reply)
}
};
let fut = self.handler.make_request(&message, completer);
Box::pin(fut)
}
/// A `runner`. Its execution must be scheduled for `Client` to be able to complete requests and
/// emit events.
pub fn runner(&self) -> impl Future<Output = ()> {
self.handler.runner()
}
}
impl API for Client {
fn init(&self, client_id: Uuid) -> StaticBoxFuture<FallibleResult> |
fn write_file(&self, path: &Path, contents: &[u8]) -> StaticBoxFuture<FallibleResult> {
info!("Writing file {} with {} bytes.", path, contents.len());
let payload = ToServerPayload::WriteFile { path, contents };
self.make_request(payload, Self::expect_success)
}
fn read_file(&self, path: &Path) -> StaticBoxFuture<FallibleResult<Vec<u8>>> {
info!("Reading file {path}.");
let payload = ToServerPayload::ReadFile { path };
self.make_request(payload, move |result| {
if let FromServerPayloadOwned::FileContentsReply { contents } = result {
Ok(contents)
} else {
Err(RpcError::MismatchedResponseType.into())
}
})
}
fn write_bytes(
&self,
path: &Path,
byte_offset: u64,
overwrite: bool,
bytes: &[u8],
) -> StaticBoxFuture<FallibleResult<Sha3_224>> {
info!("Writing {} bytes to {path} at offset {byte_offset}", bytes.len());
let payload = ToServerPayload::WriteBytes { path, byte_offset, overwrite, bytes };
self.make_request(payload, move |result| {
if let FromServerPayloadOwned::WriteBytesReply { checksum } = result {
Ok(checksum.into())
} else {
Err(RpcError::MismatchedResponseType.into())
}
})
}
fn event_stream(&self) -> StaticBoxStream<Event> {
self.handler.event_stream().boxed_local()
}
}
// =============
// === Tests ===
// =============
#[cfg(test)]
mod tests {
use super::*;
use crate::binary::message::MessageFromServer;
use crate::binary::message::MessageToServerOwned;
use crate::binary::message::ToServerPayloadOwned;
use futures::task::LocalSpawnExt;
use json_rpc::test_util::transport::mock::MockTransport;
// ===============
// === Fixture ===
// ===============
struct ClientFixture {
transport: MockTransport,
client: Client,
executor: futures::executor::LocalPool,
}
impl ClientFixture {
fn new() -> ClientFixture {
let transport = MockTransport::new();
let client = Client::new(transport.clone());
let executor = futures::executor::LocalPool::new();
executor.spawner().spawn_local(client.runner()).unwrap();
ClientFixture { transport, client, executor }
}
}
// ========================
// === Testing Requests ===
// ========================
fn test_request<R>(
make_request: impl Fn(&Client) -> StaticBoxFuture<FallibleResult<R>>,
expected_result: R,
expected_request: ToServerPayloadOwned,
mock_reply: FromServerPayloadOwned,
) where
R: Debug + PartialEq + Sized,
{
let mut fixture = ClientFixture::new();
let mut fut = make_request(&fixture.client);
let generated_message = fixture.transport.expect_binary_message();
let generated_message = MessageToServerOwned::deserialize(&generated_message).unwrap();
assert_eq!(generated_message.payload, expected_request);
fut.expect_pending();
let mut mock_reply = MessageFromServer::new(mock_reply);
mock_reply.correlation_id = Some(generated_message.message_id);
mock_reply.with_serialized(|data| fixture.transport.mock_peer_binary_message(data));
fixture.executor.run_until_stalled();
assert_eq!(fut.expect_ok(), expected_result);
// Repeat request but now answer with error.
let mut fut = make_request(&fixture.client);
let generated_message = fixture.transport.expect_binary_message();
let generated_message = MessageToServerOwned::deserialize(&generated_message).unwrap();
let mock_error_code = 444;
let mock_error_message = "This is error".to_string();
let mut mock_reply = MessageFromServer::new(FromServerPayloadOwned::Error {
code: mock_error_code,
message: mock_error_message,
data: None,
});
mock_reply.correlation_id = Some(generated_message.message_id);
mock_reply.with_serialized(|data| fixture.transport.mock_peer_binary_message(data));
fixture.executor.run_until_stalled();
fut.expect_err();
}
#[test]
fn test_init() {
let client_id = Uuid::new_v4();
test_request(
|client| client.init(client_id),
(),
ToServerPayloadOwned::InitSession { client_id },
FromServerPayloadOwned::Success {},
);
}
#[test]
fn test_write_file() {
let root_id = Uuid::new_v4();
let path = Path::new(root_id, &["Main.enso"]);
let data = Vec::from("hello".as_bytes());
test_request(
|client| client.write_file(&path, &data),
(),
ToServerPayloadOwned::WriteFile { contents: data.clone(), path: path.clone() },
FromServerPayloadOwned::Success {},
);
}
#[test]
fn test_read_file() {
let root_id = Uuid::new_v4();
let path = Path::new(root_id, &["Main.enso"]);
let data = Vec::from("hello".as_bytes());
test_request(
|client| client.read_file(&path),
data.clone(),
ToServerPayloadOwned::ReadFile { path: path.clone() },
FromServerPayloadOwned::FileContentsReply { contents: data },
);
}
// =============================
// === Testing Notifications ===
// =============================
#[test]
fn test_visualization_update() {
let mut fixture = ClientFixture::new();
let mut event_fut = fixture.client.event_stream().into_future().boxed_local();
fixture.executor.run_until_stalled();
event_fut.expect_pending();
let context = VisualizationContext {
visualization_id: Uuid::new_v4(),
expression_id: Uuid::new_v4(),
context_id: Uuid::new_v4(),
};
let data = Vec::from("Hello".as_bytes());
let message = MessageFromServer::new(FromServerPayloadOwned::VisualizationUpdate {
data: data.clone(),
context,
});
message.with_serialized(|data| fixture.transport.mock_peer_binary_message(data));
fixture.executor.run_until_stalled();
let expected_notification = Notification::VisualizationUpdate { context, data };
let (event, tail) = event_fut.expect_ready();
match event.expect("Expected some notification.") {
Event::Notification(notification) => assert_eq!(notification, expected_notification),
event => panic!("Expected notification event, got: {event:?}"),
}
tail.boxed_local().expect_pending();
}
}
| {
info!("Initializing binary connection as client with id {client_id}.");
let payload = ToServerPayload::InitSession { client_id };
self.make_request(payload, Self::expect_success)
} | identifier_body |
client.rs | //! Module defines LS binary protocol client `API` and its two implementation: `Client` and
//! `MockClient`.
use crate::prelude::*;
use crate::binary::message::ErrorPayload;
use crate::binary::message::FromServerPayloadOwned;
use crate::binary::message::MessageFromServerOwned;
use crate::binary::message::MessageToServerRef;
use crate::binary::message::ToServerPayload;
use crate::binary::message::VisualizationContext;
use crate::common::error::UnexpectedMessage;
use crate::handler::Disposition;
use crate::handler::Handler;
use crate::language_server::types::Path;
use crate::types::Sha3_224;
use json_rpc::Transport;
use json_rpc::TransportEvent;
use mockall::automock;
// ==============
// === Errors ===
// ==============
#[allow(missing_docs)]
#[derive(Debug, Fail, Clone, Copy)]
#[fail(display = "Received a text message when expecting only the binary ones.")]
pub struct UnexpectedTextMessage;
/// Errors that can cause a remote call to fail.
pub type RpcError = json_rpc::error::RpcError<ErrorPayload>;
// ====================
// === Notification ===
// ====================
/// The notifications that binary protocol client may receive.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum Notification {
/// A new data has been sent for a visualization.
VisualizationUpdate {
/// Identifies the specific visualization.
context: VisualizationContext,
/// Data to be passed to the visualization.
data: Vec<u8>,
},
}
/// Events emitted by the LS binary protocol client.
pub type Event = crate::common::event::Event<Notification>;
// ===========
// === API ===
// ===========
/// The Engine Services Language Server Binary Protocol Client API.
#[automock]
pub trait API {
/// Initializes the protocol. Must be called exactly once before making any other calls.
fn init(&self, client_id: Uuid) -> StaticBoxFuture<FallibleResult>;
/// Writes binary data to the file.
fn write_file(&self, path: &Path, contents: &[u8]) -> StaticBoxFuture<FallibleResult>;
/// Retrieves the file contents as a binary data.
fn read_file(&self, path: &Path) -> StaticBoxFuture<FallibleResult<Vec<u8>>>;
/// Writes a set of bytes to the specified file at the specified offset.
fn write_bytes(
&self,
path: &Path,
byte_offset: u64,
overwrite: bool,
bytes: &[u8],
) -> StaticBoxFuture<FallibleResult<Sha3_224>>;
/// Asynchronous event stream with notification and errors.
///
/// On a repeated call, previous stream is closed.
fn event_stream(&self) -> StaticBoxStream<Event>;
}
// ==============
// === Client ===
// ==============
/// The client for Engine Services Language Server Binary Protocol.
#[derive(Clone, Derivative)]
#[derivative(Debug)]
pub struct Client {
handler: Handler<Uuid, FromServerPayloadOwned, Notification>,
}
impl Client {
/// Helper function that fails if the received message represents a remote error.
fn expect_success(result: FromServerPayloadOwned) -> FallibleResult {
if let FromServerPayloadOwned::Success {} = result {
Ok(())
} else {
Err(RpcError::MismatchedResponseType.into())
}
}
/// Function that does early processing of the peer's message and decides how it shall be
/// handled. Returns a function so that it may be passed to the `Handler`.
fn processor(
) -> impl FnMut(TransportEvent) -> Disposition<Uuid, FromServerPayloadOwned, Notification> +'static
{
move |event: TransportEvent| {
let binary_data = match event {
TransportEvent::BinaryMessage(data) => data,
_ => return Disposition::error(UnexpectedTextMessage),
};
let message = match MessageFromServerOwned::deserialize(&binary_data) {
Ok(message) => message,
Err(e) => return Disposition::error(e),
};
debug!("Deserialized incoming binary message: {message:?}");
let correlation_id = message.correlation_id;
match message.0.payload {
FromServerPayloadOwned::VisualizationUpdate { context, data } =>
Disposition::notify(Notification::VisualizationUpdate { data, context }),
payload => {
if let Some(id) = correlation_id {
Disposition::HandleReply { id, reply: payload }
} else {
// Not a known notification and yet not a response to our request.
Disposition::error(UnexpectedMessage)
}
}
}
}
}
/// Creates a new client from the given transport to the Language Server Data Endpoint.
///
/// Before client is functional:
/// * `runner` must be scheduled for execution;
/// * `init` must be called or it needs to be wrapped into `Connection`.
pub fn new(transport: impl Transport +'static) -> Client {
let processor = Self::processor();
Client { handler: Handler::new(transport, processor) }
}
/// Starts a new request, described by the given payload.
/// Function `f` serves to retrieve the request's result from the more general `Reply` type.
pub fn make_request<F, R>(
&self,
payload: ToServerPayload,
f: F,
) -> StaticBoxFuture<FallibleResult<R>>
where
F: FnOnce(FromServerPayloadOwned) -> FallibleResult<R>,
R:'static,
F:'static,
{
let message = MessageToServerRef::new(payload);
let id = message.message_id;
let completer = move |reply| {
info!("Completing request {id} with a reply: {reply:?}");
if let FromServerPayloadOwned::Error { code, message, data } = reply {
let code = code as i64;
let error = json_rpc::messages::Error { code, message, data };
Err(RpcError::RemoteError(error).into())
} else {
f(reply)
}
};
let fut = self.handler.make_request(&message, completer);
Box::pin(fut)
}
/// A `runner`. Its execution must be scheduled for `Client` to be able to complete requests and
/// emit events.
pub fn runner(&self) -> impl Future<Output = ()> {
self.handler.runner()
}
}
impl API for Client {
fn init(&self, client_id: Uuid) -> StaticBoxFuture<FallibleResult> {
info!("Initializing binary connection as client with id {client_id}.");
let payload = ToServerPayload::InitSession { client_id };
self.make_request(payload, Self::expect_success)
}
fn write_file(&self, path: &Path, contents: &[u8]) -> StaticBoxFuture<FallibleResult> {
info!("Writing file {} with {} bytes.", path, contents.len());
let payload = ToServerPayload::WriteFile { path, contents };
self.make_request(payload, Self::expect_success)
}
fn read_file(&self, path: &Path) -> StaticBoxFuture<FallibleResult<Vec<u8>>> {
info!("Reading file {path}.");
let payload = ToServerPayload::ReadFile { path };
self.make_request(payload, move |result| {
if let FromServerPayloadOwned::FileContentsReply { contents } = result {
Ok(contents)
} else {
Err(RpcError::MismatchedResponseType.into())
}
})
}
fn write_bytes(
&self,
path: &Path,
byte_offset: u64,
overwrite: bool,
bytes: &[u8],
) -> StaticBoxFuture<FallibleResult<Sha3_224>> {
info!("Writing {} bytes to {path} at offset {byte_offset}", bytes.len());
let payload = ToServerPayload::WriteBytes { path, byte_offset, overwrite, bytes };
self.make_request(payload, move |result| {
if let FromServerPayloadOwned::WriteBytesReply { checksum } = result {
Ok(checksum.into())
} else {
Err(RpcError::MismatchedResponseType.into())
}
})
}
fn event_stream(&self) -> StaticBoxStream<Event> {
self.handler.event_stream().boxed_local()
}
}
// =============
// === Tests ===
// =============
#[cfg(test)]
mod tests {
use super::*;
use crate::binary::message::MessageFromServer;
use crate::binary::message::MessageToServerOwned;
use crate::binary::message::ToServerPayloadOwned;
use futures::task::LocalSpawnExt;
use json_rpc::test_util::transport::mock::MockTransport;
| // ===============
struct ClientFixture {
transport: MockTransport,
client: Client,
executor: futures::executor::LocalPool,
}
impl ClientFixture {
fn new() -> ClientFixture {
let transport = MockTransport::new();
let client = Client::new(transport.clone());
let executor = futures::executor::LocalPool::new();
executor.spawner().spawn_local(client.runner()).unwrap();
ClientFixture { transport, client, executor }
}
}
// ========================
// === Testing Requests ===
// ========================
fn test_request<R>(
make_request: impl Fn(&Client) -> StaticBoxFuture<FallibleResult<R>>,
expected_result: R,
expected_request: ToServerPayloadOwned,
mock_reply: FromServerPayloadOwned,
) where
R: Debug + PartialEq + Sized,
{
let mut fixture = ClientFixture::new();
let mut fut = make_request(&fixture.client);
let generated_message = fixture.transport.expect_binary_message();
let generated_message = MessageToServerOwned::deserialize(&generated_message).unwrap();
assert_eq!(generated_message.payload, expected_request);
fut.expect_pending();
let mut mock_reply = MessageFromServer::new(mock_reply);
mock_reply.correlation_id = Some(generated_message.message_id);
mock_reply.with_serialized(|data| fixture.transport.mock_peer_binary_message(data));
fixture.executor.run_until_stalled();
assert_eq!(fut.expect_ok(), expected_result);
// Repeat request but now answer with error.
let mut fut = make_request(&fixture.client);
let generated_message = fixture.transport.expect_binary_message();
let generated_message = MessageToServerOwned::deserialize(&generated_message).unwrap();
let mock_error_code = 444;
let mock_error_message = "This is error".to_string();
let mut mock_reply = MessageFromServer::new(FromServerPayloadOwned::Error {
code: mock_error_code,
message: mock_error_message,
data: None,
});
mock_reply.correlation_id = Some(generated_message.message_id);
mock_reply.with_serialized(|data| fixture.transport.mock_peer_binary_message(data));
fixture.executor.run_until_stalled();
fut.expect_err();
}
#[test]
fn test_init() {
let client_id = Uuid::new_v4();
test_request(
|client| client.init(client_id),
(),
ToServerPayloadOwned::InitSession { client_id },
FromServerPayloadOwned::Success {},
);
}
#[test]
fn test_write_file() {
let root_id = Uuid::new_v4();
let path = Path::new(root_id, &["Main.enso"]);
let data = Vec::from("hello".as_bytes());
test_request(
|client| client.write_file(&path, &data),
(),
ToServerPayloadOwned::WriteFile { contents: data.clone(), path: path.clone() },
FromServerPayloadOwned::Success {},
);
}
#[test]
fn test_read_file() {
let root_id = Uuid::new_v4();
let path = Path::new(root_id, &["Main.enso"]);
let data = Vec::from("hello".as_bytes());
test_request(
|client| client.read_file(&path),
data.clone(),
ToServerPayloadOwned::ReadFile { path: path.clone() },
FromServerPayloadOwned::FileContentsReply { contents: data },
);
}
// =============================
// === Testing Notifications ===
// =============================
#[test]
fn test_visualization_update() {
let mut fixture = ClientFixture::new();
let mut event_fut = fixture.client.event_stream().into_future().boxed_local();
fixture.executor.run_until_stalled();
event_fut.expect_pending();
let context = VisualizationContext {
visualization_id: Uuid::new_v4(),
expression_id: Uuid::new_v4(),
context_id: Uuid::new_v4(),
};
let data = Vec::from("Hello".as_bytes());
let message = MessageFromServer::new(FromServerPayloadOwned::VisualizationUpdate {
data: data.clone(),
context,
});
message.with_serialized(|data| fixture.transport.mock_peer_binary_message(data));
fixture.executor.run_until_stalled();
let expected_notification = Notification::VisualizationUpdate { context, data };
let (event, tail) = event_fut.expect_ready();
match event.expect("Expected some notification.") {
Event::Notification(notification) => assert_eq!(notification, expected_notification),
event => panic!("Expected notification event, got: {event:?}"),
}
tail.boxed_local().expect_pending();
}
} |
// ===============
// === Fixture === | random_line_split |
control.rs | //! Runtime control utils.
//!
//! ellidri is built on tokio and the future ecosystem. Therefore the main thing it does is manage
//! tasks. Tasks are useful because they can be created, polled, and stopped. This module, and
//! `Control` more specificaly, is responsible for loading and reloading the configuration file,
//! starting and stopping the necessary tasks.
//!
//! # Top-level tasks
//!
//! At the moment, the only kind of "top-level" task that ellidri runs are bindings; tasks that
//! bind then listen on a port. They are defined in `net::listen`. Bindings run with two data
//! "channels":
//!
//! - A "stop button": the binding task will send its listening address when it fails unexpectedly
//! (when it is not closed by `Control`),
//! - A command channel: bindings accept commands that change their configuration. All commands
//! are described in the `Command` enum.
//!
//! # The configuration file
//!
//! ellidri reads a configuration file at startup. This configuration file is meant to specify its
//! running state. It can be reloaded at runtime, to change the whole state of the server.
//!
//! The first time the configuration file is read, ellidri uses it to create the tokio runtime.
//! This is because the number of workers is yet unknown, and cannot be changed afterwards.
//!
//! Configuration can then be reloaded upon receiving a SIGUSR1 signal (on UNIX systems only,
//! windows is not yet supported), or a REHASH command. When it happens, `Control` reread the
//! configuration file and performs a diff algorithm to know which task needs to be stopped. This
//! is really simple:
//!
//! - If an old binding is not present in the new configuration, `Control` drops the binding,
//! - If a new binding was not present in the old configuration, `Control` spawns the binding on
//! the runtime,
//! - If a binding is present in both configurations, `Control` will keep the binding and send a
//! command to it, either to make it listen for raw TCP connections, or to listen for TLS
//! connections with a given `TlsAcceptor` (see `tokio-tls` doc for that).
//!
//! Bindings are identified by their socket address (IP address + TCP port). TLS identities are
//! not kept track of, thus ellidri might reload the same TLS identity for a binding (it is fine to
//! let it do we are not reading thousands for TLS identities here).
use crate::{Config, net, State, tls};
use crate::config::{Binding, Tls};
use std::future::Future;
use std::net::SocketAddr;
use std::sync::Arc;
use std::{fs, process};
use tokio::runtime as rt;
use tokio::sync::{mpsc, Notify};
use tokio::task;
/// A command from `Control` to binding tasks.
pub enum Command {
/// Ask the binding task to listen for raw TCP connections and not use TLS.
UsePlain,
/// Ask the binding task to listen for TLS connections with the given acceptor.
UseTls(tls::Acceptor),
}
/// A binding task that is ready to be spawned on the runtime.
struct LoadedBinding<F> {
/// The address to be bound.
address: SocketAddr,
/// Either `None` when the binding listens for raw TCP connections, or `Some(acceptor)` when the
/// bindings listens for TLS connections with `acceptor`.
acceptor: Option<tls::Acceptor>,
/// The sending end of the channel that brings commands to the task.
handle: mpsc::Sender<Command>,
/// The actual task, ready to be polled.
future: F,
}
/// Creates a tokio runtime with the given number of worker threads.
fn create_runtime(workers: usize) -> rt::Runtime {
let mut builder = rt::Builder::new_multi_thread();
if workers!= 0 {
builder.worker_threads(workers);
}
builder
.enable_io()
.enable_time()
.build()
.unwrap_or_else(|err| {
log::error!("Failed to start the tokio runtime: {}", err);
process::exit(1);
})
}
/// Creates the bindings tasks and spawns them on the given runtime.
///
/// This function is what `Control` calls on startup to generate the bindings. Because it exits
/// the program on failure, it is not to be called for reloading.
///
/// It spawns all the generated bindings on the runtime, and returns their listening address and
/// command channel.
fn load_bindings(
bindings: Vec<Binding>,
shared: &State,
stop: &mpsc::Sender<SocketAddr>,
) -> Vec<(SocketAddr, mpsc::Sender<Command>)> {
let mut res = Vec::with_capacity(bindings.len());
let mut store = tls::IdentityStore::default();
for Binding { address, tls } in bindings {
let (handle, commands) = mpsc::channel(8);
if let Some(Tls { certificate, key,.. }) = tls {
let acceptor = match store.acceptor(certificate, key) {
Ok(acceptor) => acceptor,
Err(_) => process::exit(1),
};
let server = net::listen(
address,
shared.clone(),
Some(acceptor),
stop.clone(),
commands,
);
res.push((address, handle));
tokio::spawn(server);
} else {
let server = net::listen(address, shared.clone(), None, stop.clone(), commands);
res.push((address, handle));
tokio::spawn(server);
}
}
res
}
/// Reloads the configuration at `config_path`.
///
/// In four steps:
///
/// - Read the configuration and load the authentication provider,
/// - Remove old bindings that are not used anymore,
/// - Add new bindings, or send them a command to listen for raw TCP or TLS connections,
/// - Update the shared state.
async fn do_rehash(
config_path: String,
shared: &State,
stop: mpsc::Sender<SocketAddr>,
bindings: &mut Vec<(SocketAddr, mpsc::Sender<Command>)>,
) {
log::info!("Reloading configuration from {:?}", config_path);
let shared_clone = shared.clone();
let reloaded = task::spawn_blocking(|| reload_config(config_path, shared_clone, stop)).await;
let (cfg, new_bindings) = match reloaded {
Ok(Some(reloaded)) => reloaded,
_ => return,
};
let mut i = 0;
while i < bindings.len() {
let old_address = bindings[i].0;
if new_bindings
.iter()
.all(|new_b| old_address!= new_b.address)
{
bindings.swap_remove(i);
} else {
i += 1;
}
}
for new_b in new_bindings {
if let Some(i) = bindings.iter().position(|old_b| old_b.0 == new_b.address) {
let res = bindings[i]
.1
.send(match new_b.acceptor {
Some(acceptor) => Command::UseTls(acceptor),
None => Command::UsePlain,
})
.await;
if res.is_err() |
} else {
tokio::spawn(new_b.future);
bindings.push((new_b.address, new_b.handle));
}
}
shared.rehash(cfg.state).await;
log::info!("Configuration reloaded");
}
/// Re-read the configuration file and re-generate the bindings.
///
/// See documentation of `reload_bindings` for how bindings are re-generated.
///
/// This function will put the contents of the MOTD file into `Config.motd_file`, so that the
/// shared state can use the field as-is, since it must not use blocking operations such as reading
/// a file.
fn reload_config(
config_path: String,
shared: State,
stop: mpsc::Sender<SocketAddr>,
) -> Option<(Config, Vec<LoadedBinding<impl Future<Output = ()>>>)> {
let mut cfg = match Config::from_file(&config_path) {
Ok(cfg) => cfg,
Err(err) => {
log::error!("Failed to read {:?}: {}", config_path, err);
return None;
}
};
cfg.state.motd_file = match fs::read_to_string(&cfg.state.motd_file) {
Ok(motd) => motd,
Err(err) => {
log::warn!("Failed to read {:?}: {}", cfg.state.motd_file, err);
String::new()
}
};
let new_bindings = reload_bindings(&cfg.bindings, &shared, &stop);
Some((cfg, new_bindings))
}
/// Equivalent of `load_bindings` for when exiting the program is not acceptable.
///
/// Instead of spawning the binding tasks on the runtime, this function returns them in an array.
/// Also instead of exiting on failure, it continues its process. Binding tasks that could not
/// be generated are not returned.
///
/// Otherwise both functions have the same behavior.
fn reload_bindings(
bindings: &[Binding],
shared: &State,
stop: &mpsc::Sender<SocketAddr>,
) -> Vec<LoadedBinding<impl Future<Output = ()>>> {
let mut res = Vec::with_capacity(bindings.len());
let mut store = tls::IdentityStore::default();
for Binding { address, tls } in bindings {
let (handle, commands) = mpsc::channel(8);
if let Some(Tls { certificate, key,.. }) = tls {
let acceptor = match store.acceptor(certificate, key) {
Ok(acceptor) => acceptor,
Err(_) => continue,
};
let future = net::listen(
*address,
shared.clone(),
Some(acceptor.clone()),
stop.clone(),
commands,
);
res.push(LoadedBinding {
address: *address,
acceptor: Some(acceptor),
handle,
future,
});
} else {
let future = net::listen(*address, shared.clone(), None, stop.clone(), commands);
res.push(LoadedBinding {
address: *address,
acceptor: None,
handle,
future,
});
}
}
res
}
pub fn load_config_and_run(config_path: String) {
let cfg = Config::from_file(&config_path).unwrap_or_else(|err| {
log::error!("Failed to read {:?}: {}", config_path, err);
process::exit(1);
});
let runtime = create_runtime(cfg.workers);
runtime.block_on(run(config_path, cfg));
}
pub async fn run(config_path: String, cfg: Config) {
let signal_fail = |err| {
log::error!("Cannot listen for signals to reload the configuration: {}", err);
process::exit(1);
};
#[cfg(unix)]
let mut signals = {
use tokio::signal::unix;
unix::signal(unix::SignalKind::user_defined1()).unwrap_or_else(signal_fail)
};
#[cfg(windows)]
let mut signals = {
use tokio::signal::windows;
windows::ctrl_break().unwrap_or_else(signal_fail)
};
let (stop, mut failures) = mpsc::channel(8);
let rehash = Arc::new(Notify::new());
let shared = State::new(cfg.state, rehash.clone()).await;
let mut bindings = load_bindings(cfg.bindings, &shared, &stop);
loop {
tokio::select! {
addr = failures.recv() => match addr {
Some(addr) => for i in 0..bindings.len() {
if bindings[i].0 == addr {
bindings.swap_remove(i);
break;
}
}
None => {
// `failures.recv()` returns `None` when all senders have been dropped, so
// when all bindings tasks have stopped.
log::error!("No binding left, exiting.");
return;
}
},
_ = rehash.notified() => {
do_rehash(config_path.clone(), &shared, stop.clone(), &mut bindings).await;
},
_ = signals.recv() => {
do_rehash(config_path.clone(), &shared, stop.clone(), &mut bindings).await;
},
}
}
}
| {
// Failure to send the command means either the binding task have dropped the
// command channel, or the binding task doesn't exist anymore. Both possibilities
// shouldn't happen (see doc for `Control.bindings`); but in the opposite case
// let's remove the binding from the array that keeps track of them, and spawn the
// new one on the runtime.
bindings.swap_remove(i);
tokio::spawn(new_b.future);
bindings.push((new_b.address, new_b.handle));
} | conditional_block |
control.rs | //! Runtime control utils.
//!
//! ellidri is built on tokio and the future ecosystem. Therefore the main thing it does is manage
//! tasks. Tasks are useful because they can be created, polled, and stopped. This module, and
//! `Control` more specificaly, is responsible for loading and reloading the configuration file,
//! starting and stopping the necessary tasks.
//!
//! # Top-level tasks
//!
//! At the moment, the only kind of "top-level" task that ellidri runs are bindings; tasks that
//! bind then listen on a port. They are defined in `net::listen`. Bindings run with two data
//! "channels":
//!
//! - A "stop button": the binding task will send its listening address when it fails unexpectedly
//! (when it is not closed by `Control`),
//! - A command channel: bindings accept commands that change their configuration. All commands
//! are described in the `Command` enum.
//!
//! # The configuration file
//!
//! ellidri reads a configuration file at startup. This configuration file is meant to specify its
//! running state. It can be reloaded at runtime, to change the whole state of the server.
//!
//! The first time the configuration file is read, ellidri uses it to create the tokio runtime.
//! This is because the number of workers is yet unknown, and cannot be changed afterwards.
//!
//! Configuration can then be reloaded upon receiving a SIGUSR1 signal (on UNIX systems only,
//! windows is not yet supported), or a REHASH command. When it happens, `Control` reread the
//! configuration file and performs a diff algorithm to know which task needs to be stopped. This
//! is really simple:
//!
//! - If an old binding is not present in the new configuration, `Control` drops the binding,
//! - If a new binding was not present in the old configuration, `Control` spawns the binding on
//! the runtime,
//! - If a binding is present in both configurations, `Control` will keep the binding and send a
//! command to it, either to make it listen for raw TCP connections, or to listen for TLS
//! connections with a given `TlsAcceptor` (see `tokio-tls` doc for that).
//!
//! Bindings are identified by their socket address (IP address + TCP port). TLS identities are
//! not kept track of, thus ellidri might reload the same TLS identity for a binding (it is fine to
//! let it do we are not reading thousands for TLS identities here).
use crate::{Config, net, State, tls};
use crate::config::{Binding, Tls};
use std::future::Future;
use std::net::SocketAddr;
use std::sync::Arc;
use std::{fs, process};
use tokio::runtime as rt;
use tokio::sync::{mpsc, Notify};
use tokio::task;
/// A command from `Control` to binding tasks.
pub enum Command {
/// Ask the binding task to listen for raw TCP connections and not use TLS.
UsePlain,
/// Ask the binding task to listen for TLS connections with the given acceptor.
UseTls(tls::Acceptor),
}
/// A binding task that is ready to be spawned on the runtime.
struct LoadedBinding<F> {
/// The address to be bound.
address: SocketAddr,
/// Either `None` when the binding listens for raw TCP connections, or `Some(acceptor)` when the
/// bindings listens for TLS connections with `acceptor`.
acceptor: Option<tls::Acceptor>,
/// The sending end of the channel that brings commands to the task.
handle: mpsc::Sender<Command>,
/// The actual task, ready to be polled.
future: F,
}
/// Creates a tokio runtime with the given number of worker threads.
fn create_runtime(workers: usize) -> rt::Runtime {
let mut builder = rt::Builder::new_multi_thread();
if workers!= 0 {
builder.worker_threads(workers);
}
builder
.enable_io()
.enable_time()
.build()
.unwrap_or_else(|err| {
log::error!("Failed to start the tokio runtime: {}", err);
process::exit(1);
})
}
/// Creates the bindings tasks and spawns them on the given runtime.
///
/// This function is what `Control` calls on startup to generate the bindings. Because it exits
/// the program on failure, it is not to be called for reloading.
///
/// It spawns all the generated bindings on the runtime, and returns their listening address and
/// command channel.
fn load_bindings(
bindings: Vec<Binding>,
shared: &State,
stop: &mpsc::Sender<SocketAddr>,
) -> Vec<(SocketAddr, mpsc::Sender<Command>)> {
let mut res = Vec::with_capacity(bindings.len());
let mut store = tls::IdentityStore::default();
for Binding { address, tls } in bindings {
let (handle, commands) = mpsc::channel(8);
if let Some(Tls { certificate, key,.. }) = tls {
let acceptor = match store.acceptor(certificate, key) {
Ok(acceptor) => acceptor,
Err(_) => process::exit(1),
};
let server = net::listen(
address,
shared.clone(),
Some(acceptor),
stop.clone(),
commands,
);
res.push((address, handle));
tokio::spawn(server);
} else {
let server = net::listen(address, shared.clone(), None, stop.clone(), commands);
res.push((address, handle));
tokio::spawn(server);
}
}
res
}
/// Reloads the configuration at `config_path`.
///
/// In four steps:
///
/// - Read the configuration and load the authentication provider,
/// - Remove old bindings that are not used anymore,
/// - Add new bindings, or send them a command to listen for raw TCP or TLS connections,
/// - Update the shared state.
async fn do_rehash(
config_path: String,
shared: &State,
stop: mpsc::Sender<SocketAddr>,
bindings: &mut Vec<(SocketAddr, mpsc::Sender<Command>)>,
) {
log::info!("Reloading configuration from {:?}", config_path);
let shared_clone = shared.clone();
let reloaded = task::spawn_blocking(|| reload_config(config_path, shared_clone, stop)).await;
let (cfg, new_bindings) = match reloaded {
Ok(Some(reloaded)) => reloaded,
_ => return,
};
let mut i = 0;
while i < bindings.len() {
let old_address = bindings[i].0;
if new_bindings
.iter()
.all(|new_b| old_address!= new_b.address)
{
bindings.swap_remove(i);
} else {
i += 1;
}
}
for new_b in new_bindings {
if let Some(i) = bindings.iter().position(|old_b| old_b.0 == new_b.address) {
let res = bindings[i]
.1
.send(match new_b.acceptor {
Some(acceptor) => Command::UseTls(acceptor),
None => Command::UsePlain,
})
.await;
if res.is_err() {
// Failure to send the command means either the binding task have dropped the
// command channel, or the binding task doesn't exist anymore. Both possibilities
// shouldn't happen (see doc for `Control.bindings`); but in the opposite case
// let's remove the binding from the array that keeps track of them, and spawn the
// new one on the runtime.
bindings.swap_remove(i);
tokio::spawn(new_b.future);
bindings.push((new_b.address, new_b.handle));
}
} else {
tokio::spawn(new_b.future);
bindings.push((new_b.address, new_b.handle));
}
}
shared.rehash(cfg.state).await;
log::info!("Configuration reloaded");
}
/// Re-read the configuration file and re-generate the bindings.
///
/// See documentation of `reload_bindings` for how bindings are re-generated.
///
/// This function will put the contents of the MOTD file into `Config.motd_file`, so that the
/// shared state can use the field as-is, since it must not use blocking operations such as reading
/// a file.
fn | (
config_path: String,
shared: State,
stop: mpsc::Sender<SocketAddr>,
) -> Option<(Config, Vec<LoadedBinding<impl Future<Output = ()>>>)> {
let mut cfg = match Config::from_file(&config_path) {
Ok(cfg) => cfg,
Err(err) => {
log::error!("Failed to read {:?}: {}", config_path, err);
return None;
}
};
cfg.state.motd_file = match fs::read_to_string(&cfg.state.motd_file) {
Ok(motd) => motd,
Err(err) => {
log::warn!("Failed to read {:?}: {}", cfg.state.motd_file, err);
String::new()
}
};
let new_bindings = reload_bindings(&cfg.bindings, &shared, &stop);
Some((cfg, new_bindings))
}
/// Equivalent of `load_bindings` for when exiting the program is not acceptable.
///
/// Instead of spawning the binding tasks on the runtime, this function returns them in an array.
/// Also instead of exiting on failure, it continues its process. Binding tasks that could not
/// be generated are not returned.
///
/// Otherwise both functions have the same behavior.
fn reload_bindings(
bindings: &[Binding],
shared: &State,
stop: &mpsc::Sender<SocketAddr>,
) -> Vec<LoadedBinding<impl Future<Output = ()>>> {
let mut res = Vec::with_capacity(bindings.len());
let mut store = tls::IdentityStore::default();
for Binding { address, tls } in bindings {
let (handle, commands) = mpsc::channel(8);
if let Some(Tls { certificate, key,.. }) = tls {
let acceptor = match store.acceptor(certificate, key) {
Ok(acceptor) => acceptor,
Err(_) => continue,
};
let future = net::listen(
*address,
shared.clone(),
Some(acceptor.clone()),
stop.clone(),
commands,
);
res.push(LoadedBinding {
address: *address,
acceptor: Some(acceptor),
handle,
future,
});
} else {
let future = net::listen(*address, shared.clone(), None, stop.clone(), commands);
res.push(LoadedBinding {
address: *address,
acceptor: None,
handle,
future,
});
}
}
res
}
pub fn load_config_and_run(config_path: String) {
let cfg = Config::from_file(&config_path).unwrap_or_else(|err| {
log::error!("Failed to read {:?}: {}", config_path, err);
process::exit(1);
});
let runtime = create_runtime(cfg.workers);
runtime.block_on(run(config_path, cfg));
}
pub async fn run(config_path: String, cfg: Config) {
let signal_fail = |err| {
log::error!("Cannot listen for signals to reload the configuration: {}", err);
process::exit(1);
};
#[cfg(unix)]
let mut signals = {
use tokio::signal::unix;
unix::signal(unix::SignalKind::user_defined1()).unwrap_or_else(signal_fail)
};
#[cfg(windows)]
let mut signals = {
use tokio::signal::windows;
windows::ctrl_break().unwrap_or_else(signal_fail)
};
let (stop, mut failures) = mpsc::channel(8);
let rehash = Arc::new(Notify::new());
let shared = State::new(cfg.state, rehash.clone()).await;
let mut bindings = load_bindings(cfg.bindings, &shared, &stop);
loop {
tokio::select! {
addr = failures.recv() => match addr {
Some(addr) => for i in 0..bindings.len() {
if bindings[i].0 == addr {
bindings.swap_remove(i);
break;
}
}
None => {
// `failures.recv()` returns `None` when all senders have been dropped, so
// when all bindings tasks have stopped.
log::error!("No binding left, exiting.");
return;
}
},
_ = rehash.notified() => {
do_rehash(config_path.clone(), &shared, stop.clone(), &mut bindings).await;
},
_ = signals.recv() => {
do_rehash(config_path.clone(), &shared, stop.clone(), &mut bindings).await;
},
}
}
}
| reload_config | identifier_name |
control.rs | //! Runtime control utils.
//!
//! ellidri is built on tokio and the future ecosystem. Therefore the main thing it does is manage
//! tasks. Tasks are useful because they can be created, polled, and stopped. This module, and
//! `Control` more specificaly, is responsible for loading and reloading the configuration file,
//! starting and stopping the necessary tasks.
//!
//! # Top-level tasks
//!
//! At the moment, the only kind of "top-level" task that ellidri runs are bindings; tasks that
//! bind then listen on a port. They are defined in `net::listen`. Bindings run with two data
//! "channels":
//!
//! - A "stop button": the binding task will send its listening address when it fails unexpectedly
//! (when it is not closed by `Control`),
//! - A command channel: bindings accept commands that change their configuration. All commands
//! are described in the `Command` enum.
//!
//! # The configuration file
//!
//! ellidri reads a configuration file at startup. This configuration file is meant to specify its
//! running state. It can be reloaded at runtime, to change the whole state of the server.
//!
//! The first time the configuration file is read, ellidri uses it to create the tokio runtime.
//! This is because the number of workers is yet unknown, and cannot be changed afterwards.
//!
//! Configuration can then be reloaded upon receiving a SIGUSR1 signal (on UNIX systems only,
//! windows is not yet supported), or a REHASH command. When it happens, `Control` reread the
//! configuration file and performs a diff algorithm to know which task needs to be stopped. This
//! is really simple:
//!
//! - If an old binding is not present in the new configuration, `Control` drops the binding,
//! - If a new binding was not present in the old configuration, `Control` spawns the binding on
//! the runtime, | //!
//! Bindings are identified by their socket address (IP address + TCP port). TLS identities are
//! not kept track of, thus ellidri might reload the same TLS identity for a binding (it is fine to
//! let it do we are not reading thousands for TLS identities here).
use crate::{Config, net, State, tls};
use crate::config::{Binding, Tls};
use std::future::Future;
use std::net::SocketAddr;
use std::sync::Arc;
use std::{fs, process};
use tokio::runtime as rt;
use tokio::sync::{mpsc, Notify};
use tokio::task;
/// A command from `Control` to binding tasks.
pub enum Command {
/// Ask the binding task to listen for raw TCP connections and not use TLS.
UsePlain,
/// Ask the binding task to listen for TLS connections with the given acceptor.
UseTls(tls::Acceptor),
}
/// A binding task that is ready to be spawned on the runtime.
struct LoadedBinding<F> {
/// The address to be bound.
address: SocketAddr,
/// Either `None` when the binding listens for raw TCP connections, or `Some(acceptor)` when the
/// bindings listens for TLS connections with `acceptor`.
acceptor: Option<tls::Acceptor>,
/// The sending end of the channel that brings commands to the task.
handle: mpsc::Sender<Command>,
/// The actual task, ready to be polled.
future: F,
}
/// Creates a tokio runtime with the given number of worker threads.
fn create_runtime(workers: usize) -> rt::Runtime {
let mut builder = rt::Builder::new_multi_thread();
if workers!= 0 {
builder.worker_threads(workers);
}
builder
.enable_io()
.enable_time()
.build()
.unwrap_or_else(|err| {
log::error!("Failed to start the tokio runtime: {}", err);
process::exit(1);
})
}
/// Creates the bindings tasks and spawns them on the given runtime.
///
/// This function is what `Control` calls on startup to generate the bindings. Because it exits
/// the program on failure, it is not to be called for reloading.
///
/// It spawns all the generated bindings on the runtime, and returns their listening address and
/// command channel.
fn load_bindings(
bindings: Vec<Binding>,
shared: &State,
stop: &mpsc::Sender<SocketAddr>,
) -> Vec<(SocketAddr, mpsc::Sender<Command>)> {
let mut res = Vec::with_capacity(bindings.len());
let mut store = tls::IdentityStore::default();
for Binding { address, tls } in bindings {
let (handle, commands) = mpsc::channel(8);
if let Some(Tls { certificate, key,.. }) = tls {
let acceptor = match store.acceptor(certificate, key) {
Ok(acceptor) => acceptor,
Err(_) => process::exit(1),
};
let server = net::listen(
address,
shared.clone(),
Some(acceptor),
stop.clone(),
commands,
);
res.push((address, handle));
tokio::spawn(server);
} else {
let server = net::listen(address, shared.clone(), None, stop.clone(), commands);
res.push((address, handle));
tokio::spawn(server);
}
}
res
}
/// Reloads the configuration at `config_path`.
///
/// In four steps:
///
/// - Read the configuration and load the authentication provider,
/// - Remove old bindings that are not used anymore,
/// - Add new bindings, or send them a command to listen for raw TCP or TLS connections,
/// - Update the shared state.
async fn do_rehash(
config_path: String,
shared: &State,
stop: mpsc::Sender<SocketAddr>,
bindings: &mut Vec<(SocketAddr, mpsc::Sender<Command>)>,
) {
log::info!("Reloading configuration from {:?}", config_path);
let shared_clone = shared.clone();
let reloaded = task::spawn_blocking(|| reload_config(config_path, shared_clone, stop)).await;
let (cfg, new_bindings) = match reloaded {
Ok(Some(reloaded)) => reloaded,
_ => return,
};
let mut i = 0;
while i < bindings.len() {
let old_address = bindings[i].0;
if new_bindings
.iter()
.all(|new_b| old_address!= new_b.address)
{
bindings.swap_remove(i);
} else {
i += 1;
}
}
for new_b in new_bindings {
if let Some(i) = bindings.iter().position(|old_b| old_b.0 == new_b.address) {
let res = bindings[i]
.1
.send(match new_b.acceptor {
Some(acceptor) => Command::UseTls(acceptor),
None => Command::UsePlain,
})
.await;
if res.is_err() {
// Failure to send the command means either the binding task have dropped the
// command channel, or the binding task doesn't exist anymore. Both possibilities
// shouldn't happen (see doc for `Control.bindings`); but in the opposite case
// let's remove the binding from the array that keeps track of them, and spawn the
// new one on the runtime.
bindings.swap_remove(i);
tokio::spawn(new_b.future);
bindings.push((new_b.address, new_b.handle));
}
} else {
tokio::spawn(new_b.future);
bindings.push((new_b.address, new_b.handle));
}
}
shared.rehash(cfg.state).await;
log::info!("Configuration reloaded");
}
/// Re-read the configuration file and re-generate the bindings.
///
/// See documentation of `reload_bindings` for how bindings are re-generated.
///
/// This function will put the contents of the MOTD file into `Config.motd_file`, so that the
/// shared state can use the field as-is, since it must not use blocking operations such as reading
/// a file.
fn reload_config(
config_path: String,
shared: State,
stop: mpsc::Sender<SocketAddr>,
) -> Option<(Config, Vec<LoadedBinding<impl Future<Output = ()>>>)> {
let mut cfg = match Config::from_file(&config_path) {
Ok(cfg) => cfg,
Err(err) => {
log::error!("Failed to read {:?}: {}", config_path, err);
return None;
}
};
cfg.state.motd_file = match fs::read_to_string(&cfg.state.motd_file) {
Ok(motd) => motd,
Err(err) => {
log::warn!("Failed to read {:?}: {}", cfg.state.motd_file, err);
String::new()
}
};
let new_bindings = reload_bindings(&cfg.bindings, &shared, &stop);
Some((cfg, new_bindings))
}
/// Equivalent of `load_bindings` for when exiting the program is not acceptable.
///
/// Instead of spawning the binding tasks on the runtime, this function returns them in an array.
/// Also instead of exiting on failure, it continues its process. Binding tasks that could not
/// be generated are not returned.
///
/// Otherwise both functions have the same behavior.
fn reload_bindings(
bindings: &[Binding],
shared: &State,
stop: &mpsc::Sender<SocketAddr>,
) -> Vec<LoadedBinding<impl Future<Output = ()>>> {
let mut res = Vec::with_capacity(bindings.len());
let mut store = tls::IdentityStore::default();
for Binding { address, tls } in bindings {
let (handle, commands) = mpsc::channel(8);
if let Some(Tls { certificate, key,.. }) = tls {
let acceptor = match store.acceptor(certificate, key) {
Ok(acceptor) => acceptor,
Err(_) => continue,
};
let future = net::listen(
*address,
shared.clone(),
Some(acceptor.clone()),
stop.clone(),
commands,
);
res.push(LoadedBinding {
address: *address,
acceptor: Some(acceptor),
handle,
future,
});
} else {
let future = net::listen(*address, shared.clone(), None, stop.clone(), commands);
res.push(LoadedBinding {
address: *address,
acceptor: None,
handle,
future,
});
}
}
res
}
pub fn load_config_and_run(config_path: String) {
let cfg = Config::from_file(&config_path).unwrap_or_else(|err| {
log::error!("Failed to read {:?}: {}", config_path, err);
process::exit(1);
});
let runtime = create_runtime(cfg.workers);
runtime.block_on(run(config_path, cfg));
}
pub async fn run(config_path: String, cfg: Config) {
let signal_fail = |err| {
log::error!("Cannot listen for signals to reload the configuration: {}", err);
process::exit(1);
};
#[cfg(unix)]
let mut signals = {
use tokio::signal::unix;
unix::signal(unix::SignalKind::user_defined1()).unwrap_or_else(signal_fail)
};
#[cfg(windows)]
let mut signals = {
use tokio::signal::windows;
windows::ctrl_break().unwrap_or_else(signal_fail)
};
let (stop, mut failures) = mpsc::channel(8);
let rehash = Arc::new(Notify::new());
let shared = State::new(cfg.state, rehash.clone()).await;
let mut bindings = load_bindings(cfg.bindings, &shared, &stop);
loop {
tokio::select! {
addr = failures.recv() => match addr {
Some(addr) => for i in 0..bindings.len() {
if bindings[i].0 == addr {
bindings.swap_remove(i);
break;
}
}
None => {
// `failures.recv()` returns `None` when all senders have been dropped, so
// when all bindings tasks have stopped.
log::error!("No binding left, exiting.");
return;
}
},
_ = rehash.notified() => {
do_rehash(config_path.clone(), &shared, stop.clone(), &mut bindings).await;
},
_ = signals.recv() => {
do_rehash(config_path.clone(), &shared, stop.clone(), &mut bindings).await;
},
}
}
} | //! - If a binding is present in both configurations, `Control` will keep the binding and send a
//! command to it, either to make it listen for raw TCP connections, or to listen for TLS
//! connections with a given `TlsAcceptor` (see `tokio-tls` doc for that). | random_line_split |
config.rs | use clap::{CommandFactory, Parser};
use pathfinder_common::AllowedOrigins;
use pathfinder_storage::JournalMode;
use reqwest::Url;
use std::collections::HashSet;
use std::net::SocketAddr;
use std::num::NonZeroUsize;
use std::path::PathBuf;
use pathfinder_common::consts::VERGEN_GIT_DESCRIBE;
#[derive(Parser)]
#[command(name = "Pathfinder")]
#[command(author = "Equilibrium Labs")]
#[command(version = VERGEN_GIT_DESCRIBE)]
#[command(
about = "A Starknet node implemented by Equilibrium Labs. Submit bug reports and issues at https://github.com/eqlabs/pathfinder."
)]
struct Cli {
#[arg(
long,
value_name = "DIR",
value_hint = clap::ValueHint::DirPath,
long_help = "Directory where the node should store its data",
env = "PATHFINDER_DATA_DIRECTORY",
default_value_os_t = (&std::path::Component::CurDir).into()
)]
data_directory: PathBuf, | env = "PATHFINDER_ETHEREUM_API_PASSWORD",
)]
ethereum_password: Option<String>,
#[arg(
long = "ethereum.url",
long_help = r"This should point to the HTTP RPC endpoint of your Ethereum entry-point, typically a local Ethereum client or a hosted gateway service such as Infura or Cloudflare.
Examples:
infura: https://goerli.infura.io/v3/<PROJECT_ID>
geth: https://localhost:8545",
value_name = "HTTP(s) URL",
value_hint = clap::ValueHint::Url,
env = "PATHFINDER_ETHEREUM_API_URL",
)]
ethereum_url: Url,
#[arg(
long = "http-rpc",
long_help = "HTTP-RPC listening address",
value_name = "IP:PORT",
default_value = "127.0.0.1:9545",
env = "PATHFINDER_HTTP_RPC_ADDRESS"
)]
rpc_address: SocketAddr,
#[arg(
long = "rpc.websocket",
long_help = "Enable RPC WebSocket transport",
default_value = "false",
env = "PATHFINDER_RPC_WEBSOCKET"
)]
ws: bool,
#[arg(
long = "rpc.websocket.capacity",
long_help = "Maximum number of websocket subscriptions per subscription type",
default_value = "100",
env = "PATHFINDER_RPC_WEBSOCKET_CAPACITY"
)]
ws_capacity: NonZeroUsize,
#[arg(
long = "rpc.cors-domains",
long_help = r"Comma separated list of domains from which Cross-Origin requests will be accepted by the RPC server.
Use '*' to indicate any domain and an empty list to disable CORS.
Examples:
single: http://one.io
a list: http://first.com,http://second.com:1234
any: *",
value_name = "DOMAIN-LIST",
value_delimiter = ',',
env = "PATHFINDER_RPC_CORS_DOMAINS"
)]
rpc_cors_domains: Vec<String>,
#[arg(
long = "monitor-address",
long_help = "The address at which pathfinder will serve monitoring related information",
value_name = "IP:PORT",
env = "PATHFINDER_MONITOR_ADDRESS"
)]
monitor_address: Option<SocketAddr>,
#[clap(flatten)]
network: NetworkCli,
#[arg(
long = "poll-pending",
long_help = "Enable polling pending block",
action = clap::ArgAction::Set,
default_value = "false",
env = "PATHFINDER_POLL_PENDING",
)]
poll_pending: bool,
#[arg(
long = "python-subprocesses",
long_help = "Number of Python starknet VMs subprocesses to start",
default_value = "2",
env = "PATHFINDER_PYTHON_SUBPROCESSES"
)]
python_subprocesses: std::num::NonZeroUsize,
#[arg(
long = "sqlite-wal",
long_help = "Enable SQLite write-ahead logging",
action = clap::ArgAction::Set,
default_value = "true",
env = "PATHFINDER_SQLITE_WAL",
)]
sqlite_wal: bool,
#[arg(
long = "max-rpc-connections",
long_help = "Set the maximum number of connections allowed",
env = "PATHFINDER_MAX_RPC_CONNECTIONS",
default_value = "1024"
)]
max_rpc_connections: std::num::NonZeroU32,
#[arg(
long = "sync.poll-interval",
long_help = "New block poll interval in seconds",
default_value = "5",
env = "PATHFINDER_HEAD_POLL_INTERVAL_SECONDS"
)]
poll_interval: std::num::NonZeroU64,
#[arg(
long = "color",
long_help = "This flag controls when to use colors in the output logs.",
default_value = "auto",
env = "PATHFINDER_COLOR",
value_name = "WHEN"
)]
color: Color,
}
#[derive(clap::ValueEnum, Debug, Clone, Copy, PartialEq)]
pub enum Color {
Auto,
Never,
Always,
}
impl Color {
/// Returns true if color should be enabled, either because the setting is [Color::Always],
/// or because it is [Color::Auto] and stdout is targetting a terminal.
pub fn is_color_enabled(&self) -> bool {
use std::io::IsTerminal;
match self {
Color::Auto => std::io::stdout().is_terminal(),
Color::Never => false,
Color::Always => true,
}
}
}
#[derive(clap::Args)]
struct NetworkCli {
#[arg(
long = "network",
long_help = r"Specify the Starknet network for pathfinder to operate on.
Note that 'custom' requires also setting the --gateway-url and --feeder-gateway-url options.",
value_enum,
env = "PATHFINDER_NETWORK"
)]
network: Option<Network>,
#[arg(
long,
long_help = "Set a custom Starknet chain ID (e.g. SN_GOERLI)",
value_name = "CHAIN ID",
env = "PATHFINDER_CHAIN_ID",
required_if_eq("network", Network::Custom)
)]
chain_id: Option<String>,
#[arg(
long = "feeder-gateway-url",
value_name = "URL",
value_hint = clap::ValueHint::Url,
long_help = "Specify a custom Starknet feeder gateway url. Can be used to run pathfinder on a custom Starknet network, or to use a gateway proxy. Requires '--network custom'.",
env = "PATHFINDER_FEEDER_GATEWAY_URL",
required_if_eq("network", Network::Custom),
)]
feeder_gateway: Option<Url>,
#[arg(
long = "gateway-url",
value_name = "URL",
value_hint = clap::ValueHint::Url,
long_help = "Specify a custom Starknet gateway url. Can be used to run pathfinder on a custom Starknet network, or to use a gateway proxy. Requires '--network custom'.",
env = "PATHFINDER_GATEWAY_URL",
required_if_eq("network", Network::Custom),
)]
gateway: Option<Url>,
}
#[derive(clap::ValueEnum, Clone)]
enum Network {
Mainnet,
Testnet,
Testnet2,
Integration,
Custom,
}
impl From<Network> for clap::builder::OsStr {
fn from(value: Network) -> Self {
match value {
Network::Mainnet => "mainnet",
Network::Testnet => "testnet",
Network::Testnet2 => "testnet2",
Network::Integration => "integration",
Network::Custom => "custom",
}
.into()
}
}
fn parse_cors(inputs: Vec<String>) -> Result<Option<AllowedOrigins>, RpcCorsDomainsParseError> {
if inputs.is_empty() {
return Ok(None);
}
if inputs.len() == 1 && inputs[0] == "*" {
return Ok(Some(AllowedOrigins::Any));
}
if inputs.iter().any(|s| s == "*") {
return Err(RpcCorsDomainsParseError::WildcardAmongOtherValues);
}
let valid_origins = inputs
.into_iter()
.map(|input| match url::Url::parse(&input) {
// Valid URL but has to be limited to origin form, i.e. no path, query, trailing slash for default path etc.
Ok(url) => {
let origin = url.origin();
if!origin.is_tuple() {
return Err(RpcCorsDomainsParseError::InvalidDomain(input));
}
if origin.ascii_serialization() == input {
Ok(input)
} else {
// Valid URL but not a valid origin
Err(RpcCorsDomainsParseError::InvalidDomain(input))
}
}
// Not an URL hence invalid origin
Err(_e) => {
eprintln!("Url_parse_error: {_e}");
Err(RpcCorsDomainsParseError::InvalidDomain(input))
}
})
.collect::<Result<HashSet<_>, RpcCorsDomainsParseError>>()?;
Ok(Some(AllowedOrigins::List(
valid_origins.into_iter().collect(),
)))
}
pub fn parse_cors_or_exit(input: Vec<String>) -> Option<AllowedOrigins> {
use clap::error::ErrorKind;
match parse_cors(input) {
Ok(parsed) => parsed,
Err(error) => Cli::command()
.error(ErrorKind::ValueValidation, error)
.exit(),
}
}
#[derive(Debug, thiserror::Error, PartialEq)]
#[error("Invalid domain for CORS: {0}")]
struct InvalidCorsDomainError(String);
#[derive(Debug, thiserror::Error, PartialEq)]
enum RpcCorsDomainsParseError {
#[error("Invalid allowed domain for CORS: {0}.")]
InvalidDomain(String),
#[error(
"Specify either wildcard '*' or a comma separated list of allowed domains for CORS, not both."
)]
WildcardAmongOtherValues,
}
pub struct Config {
pub data_directory: PathBuf,
pub ethereum: Ethereum,
pub rpc_address: SocketAddr,
pub rpc_cors_domains: Option<AllowedOrigins>,
pub ws: Option<WebSocket>,
pub monitor_address: Option<SocketAddr>,
pub network: Option<NetworkConfig>,
pub poll_pending: bool,
pub python_subprocesses: std::num::NonZeroUsize,
pub sqlite_wal: JournalMode,
pub max_rpc_connections: std::num::NonZeroU32,
pub poll_interval: std::time::Duration,
pub color: Color,
}
pub struct WebSocket {
pub capacity: NonZeroUsize,
}
pub struct Ethereum {
pub url: Url,
pub password: Option<String>,
}
pub enum NetworkConfig {
Mainnet,
Testnet,
Testnet2,
Integration,
Custom {
gateway: Url,
feeder_gateway: Url,
chain_id: String,
},
}
impl NetworkConfig {
fn from_components(args: NetworkCli) -> Option<Self> {
use Network::*;
let cfg = match (
args.network,
args.gateway,
args.feeder_gateway,
args.chain_id,
) {
(None, None, None, None) => return None,
(Some(Custom), Some(gateway), Some(feeder_gateway), Some(chain_id)) => {
NetworkConfig::Custom {
gateway,
feeder_gateway,
chain_id,
}
}
(Some(Custom), _, _, _) => {
unreachable!("`--network custom` requirements are handled by clap derive")
}
// Handle non-custom variants in an inner match so that the compiler will force
// us to handle a new network variants explicitly. Otherwise we end up with a
// catch-all arm that would swallow new variants silently.
(Some(non_custom), None, None, None) => match non_custom {
Mainnet => NetworkConfig::Mainnet,
Testnet => NetworkConfig::Testnet,
Testnet2 => NetworkConfig::Testnet2,
Integration => NetworkConfig::Integration,
Custom => unreachable!("Network::Custom handled in outer arm already"),
},
// clap does not support disallowing args based on an enum value, so we have check for
// `--network non-custom` + custom required args manually.
_ => {
use clap::error::ErrorKind;
Cli::command().error(ErrorKind::ArgumentConflict, "--gateway-url, --feeder-gateway-url and --chain-id may only be used with --network custom").exit()
}
};
Some(cfg)
}
}
impl Config {
pub fn parse() -> Self {
let cli = Cli::parse();
let network = NetworkConfig::from_components(cli.network);
Config {
data_directory: cli.data_directory,
ethereum: Ethereum {
password: cli.ethereum_password,
url: cli.ethereum_url,
},
rpc_address: cli.rpc_address,
rpc_cors_domains: parse_cors_or_exit(cli.rpc_cors_domains),
ws: cli.ws.then_some(WebSocket {
capacity: cli.ws_capacity,
}),
monitor_address: cli.monitor_address,
network,
poll_pending: cli.poll_pending,
python_subprocesses: cli.python_subprocesses,
sqlite_wal: match cli.sqlite_wal {
true => JournalMode::WAL,
false => JournalMode::Rollback,
},
max_rpc_connections: cli.max_rpc_connections,
poll_interval: std::time::Duration::from_secs(cli.poll_interval.get()),
color: cli.color,
}
}
}
#[cfg(test)]
mod tests {
use super::{AllowedOrigins, RpcCorsDomainsParseError};
use crate::config::parse_cors;
#[test]
fn parse_cors_domains() {
let empty = String::new();
let wildcard = "*".to_owned();
let valid = "http://valid.com:1234".to_owned();
let not_url = "not_url".to_string();
let with_path = "http://a.com/path".to_string();
let with_query = "http://a.com/?query=x".to_string();
let with_trailing_slash = format!("{valid}/");
[
(
vec![empty.clone()],
RpcCorsDomainsParseError::InvalidDomain(empty.clone()),
),
(
vec![empty, wildcard.clone()],
RpcCorsDomainsParseError::WildcardAmongOtherValues,
),
(
vec![valid.clone(), wildcard.clone()],
RpcCorsDomainsParseError::WildcardAmongOtherValues,
),
(
vec![wildcard.clone(), wildcard.clone()],
RpcCorsDomainsParseError::WildcardAmongOtherValues,
),
(
vec![valid.clone(), with_trailing_slash.clone()],
RpcCorsDomainsParseError::InvalidDomain(with_trailing_slash),
),
(
vec![valid.clone(), not_url.clone()],
RpcCorsDomainsParseError::InvalidDomain(not_url),
),
(
vec![valid.clone(), with_path.clone()],
RpcCorsDomainsParseError::InvalidDomain(with_path),
),
(
vec![valid.clone(), with_query.clone()],
RpcCorsDomainsParseError::InvalidDomain(with_query),
),
]
.into_iter()
.for_each(|(input, expected_error)| {
assert_eq!(
parse_cors(input.clone()).unwrap_err(),
expected_error,
"input: {input:?}"
);
});
[
(vec![], None),
(vec![wildcard], Some(AllowedOrigins::Any)),
(
vec![valid.clone()],
Some(AllowedOrigins::List(vec![valid.clone()])),
),
(
vec![valid.clone(), valid.clone()],
Some(AllowedOrigins::List(vec![valid])),
),
]
.into_iter()
.for_each(|(input, expected_ok)| {
assert_eq!(
parse_cors(input.clone()).unwrap(),
expected_ok,
"input: {input:?}"
)
});
}
} |
#[arg(
long = "ethereum.password",
long_help = "The optional password to use for the Ethereum API",
value_name = None, | random_line_split |
config.rs | use clap::{CommandFactory, Parser};
use pathfinder_common::AllowedOrigins;
use pathfinder_storage::JournalMode;
use reqwest::Url;
use std::collections::HashSet;
use std::net::SocketAddr;
use std::num::NonZeroUsize;
use std::path::PathBuf;
use pathfinder_common::consts::VERGEN_GIT_DESCRIBE;
#[derive(Parser)]
#[command(name = "Pathfinder")]
#[command(author = "Equilibrium Labs")]
#[command(version = VERGEN_GIT_DESCRIBE)]
#[command(
about = "A Starknet node implemented by Equilibrium Labs. Submit bug reports and issues at https://github.com/eqlabs/pathfinder."
)]
struct | {
#[arg(
long,
value_name = "DIR",
value_hint = clap::ValueHint::DirPath,
long_help = "Directory where the node should store its data",
env = "PATHFINDER_DATA_DIRECTORY",
default_value_os_t = (&std::path::Component::CurDir).into()
)]
data_directory: PathBuf,
#[arg(
long = "ethereum.password",
long_help = "The optional password to use for the Ethereum API",
value_name = None,
env = "PATHFINDER_ETHEREUM_API_PASSWORD",
)]
ethereum_password: Option<String>,
#[arg(
long = "ethereum.url",
long_help = r"This should point to the HTTP RPC endpoint of your Ethereum entry-point, typically a local Ethereum client or a hosted gateway service such as Infura or Cloudflare.
Examples:
infura: https://goerli.infura.io/v3/<PROJECT_ID>
geth: https://localhost:8545",
value_name = "HTTP(s) URL",
value_hint = clap::ValueHint::Url,
env = "PATHFINDER_ETHEREUM_API_URL",
)]
ethereum_url: Url,
#[arg(
long = "http-rpc",
long_help = "HTTP-RPC listening address",
value_name = "IP:PORT",
default_value = "127.0.0.1:9545",
env = "PATHFINDER_HTTP_RPC_ADDRESS"
)]
rpc_address: SocketAddr,
#[arg(
long = "rpc.websocket",
long_help = "Enable RPC WebSocket transport",
default_value = "false",
env = "PATHFINDER_RPC_WEBSOCKET"
)]
ws: bool,
#[arg(
long = "rpc.websocket.capacity",
long_help = "Maximum number of websocket subscriptions per subscription type",
default_value = "100",
env = "PATHFINDER_RPC_WEBSOCKET_CAPACITY"
)]
ws_capacity: NonZeroUsize,
#[arg(
long = "rpc.cors-domains",
long_help = r"Comma separated list of domains from which Cross-Origin requests will be accepted by the RPC server.
Use '*' to indicate any domain and an empty list to disable CORS.
Examples:
single: http://one.io
a list: http://first.com,http://second.com:1234
any: *",
value_name = "DOMAIN-LIST",
value_delimiter = ',',
env = "PATHFINDER_RPC_CORS_DOMAINS"
)]
rpc_cors_domains: Vec<String>,
#[arg(
long = "monitor-address",
long_help = "The address at which pathfinder will serve monitoring related information",
value_name = "IP:PORT",
env = "PATHFINDER_MONITOR_ADDRESS"
)]
monitor_address: Option<SocketAddr>,
#[clap(flatten)]
network: NetworkCli,
#[arg(
long = "poll-pending",
long_help = "Enable polling pending block",
action = clap::ArgAction::Set,
default_value = "false",
env = "PATHFINDER_POLL_PENDING",
)]
poll_pending: bool,
#[arg(
long = "python-subprocesses",
long_help = "Number of Python starknet VMs subprocesses to start",
default_value = "2",
env = "PATHFINDER_PYTHON_SUBPROCESSES"
)]
python_subprocesses: std::num::NonZeroUsize,
#[arg(
long = "sqlite-wal",
long_help = "Enable SQLite write-ahead logging",
action = clap::ArgAction::Set,
default_value = "true",
env = "PATHFINDER_SQLITE_WAL",
)]
sqlite_wal: bool,
#[arg(
long = "max-rpc-connections",
long_help = "Set the maximum number of connections allowed",
env = "PATHFINDER_MAX_RPC_CONNECTIONS",
default_value = "1024"
)]
max_rpc_connections: std::num::NonZeroU32,
#[arg(
long = "sync.poll-interval",
long_help = "New block poll interval in seconds",
default_value = "5",
env = "PATHFINDER_HEAD_POLL_INTERVAL_SECONDS"
)]
poll_interval: std::num::NonZeroU64,
#[arg(
long = "color",
long_help = "This flag controls when to use colors in the output logs.",
default_value = "auto",
env = "PATHFINDER_COLOR",
value_name = "WHEN"
)]
color: Color,
}
#[derive(clap::ValueEnum, Debug, Clone, Copy, PartialEq)]
pub enum Color {
Auto,
Never,
Always,
}
impl Color {
/// Returns true if color should be enabled, either because the setting is [Color::Always],
/// or because it is [Color::Auto] and stdout is targetting a terminal.
pub fn is_color_enabled(&self) -> bool {
use std::io::IsTerminal;
match self {
Color::Auto => std::io::stdout().is_terminal(),
Color::Never => false,
Color::Always => true,
}
}
}
#[derive(clap::Args)]
struct NetworkCli {
#[arg(
long = "network",
long_help = r"Specify the Starknet network for pathfinder to operate on.
Note that 'custom' requires also setting the --gateway-url and --feeder-gateway-url options.",
value_enum,
env = "PATHFINDER_NETWORK"
)]
network: Option<Network>,
#[arg(
long,
long_help = "Set a custom Starknet chain ID (e.g. SN_GOERLI)",
value_name = "CHAIN ID",
env = "PATHFINDER_CHAIN_ID",
required_if_eq("network", Network::Custom)
)]
chain_id: Option<String>,
#[arg(
long = "feeder-gateway-url",
value_name = "URL",
value_hint = clap::ValueHint::Url,
long_help = "Specify a custom Starknet feeder gateway url. Can be used to run pathfinder on a custom Starknet network, or to use a gateway proxy. Requires '--network custom'.",
env = "PATHFINDER_FEEDER_GATEWAY_URL",
required_if_eq("network", Network::Custom),
)]
feeder_gateway: Option<Url>,
#[arg(
long = "gateway-url",
value_name = "URL",
value_hint = clap::ValueHint::Url,
long_help = "Specify a custom Starknet gateway url. Can be used to run pathfinder on a custom Starknet network, or to use a gateway proxy. Requires '--network custom'.",
env = "PATHFINDER_GATEWAY_URL",
required_if_eq("network", Network::Custom),
)]
gateway: Option<Url>,
}
#[derive(clap::ValueEnum, Clone)]
enum Network {
Mainnet,
Testnet,
Testnet2,
Integration,
Custom,
}
impl From<Network> for clap::builder::OsStr {
fn from(value: Network) -> Self {
match value {
Network::Mainnet => "mainnet",
Network::Testnet => "testnet",
Network::Testnet2 => "testnet2",
Network::Integration => "integration",
Network::Custom => "custom",
}
.into()
}
}
fn parse_cors(inputs: Vec<String>) -> Result<Option<AllowedOrigins>, RpcCorsDomainsParseError> {
if inputs.is_empty() {
return Ok(None);
}
if inputs.len() == 1 && inputs[0] == "*" {
return Ok(Some(AllowedOrigins::Any));
}
if inputs.iter().any(|s| s == "*") {
return Err(RpcCorsDomainsParseError::WildcardAmongOtherValues);
}
let valid_origins = inputs
.into_iter()
.map(|input| match url::Url::parse(&input) {
// Valid URL but has to be limited to origin form, i.e. no path, query, trailing slash for default path etc.
Ok(url) => {
let origin = url.origin();
if!origin.is_tuple() {
return Err(RpcCorsDomainsParseError::InvalidDomain(input));
}
if origin.ascii_serialization() == input {
Ok(input)
} else {
// Valid URL but not a valid origin
Err(RpcCorsDomainsParseError::InvalidDomain(input))
}
}
// Not an URL hence invalid origin
Err(_e) => {
eprintln!("Url_parse_error: {_e}");
Err(RpcCorsDomainsParseError::InvalidDomain(input))
}
})
.collect::<Result<HashSet<_>, RpcCorsDomainsParseError>>()?;
Ok(Some(AllowedOrigins::List(
valid_origins.into_iter().collect(),
)))
}
pub fn parse_cors_or_exit(input: Vec<String>) -> Option<AllowedOrigins> {
use clap::error::ErrorKind;
match parse_cors(input) {
Ok(parsed) => parsed,
Err(error) => Cli::command()
.error(ErrorKind::ValueValidation, error)
.exit(),
}
}
#[derive(Debug, thiserror::Error, PartialEq)]
#[error("Invalid domain for CORS: {0}")]
struct InvalidCorsDomainError(String);
#[derive(Debug, thiserror::Error, PartialEq)]
enum RpcCorsDomainsParseError {
#[error("Invalid allowed domain for CORS: {0}.")]
InvalidDomain(String),
#[error(
"Specify either wildcard '*' or a comma separated list of allowed domains for CORS, not both."
)]
WildcardAmongOtherValues,
}
pub struct Config {
pub data_directory: PathBuf,
pub ethereum: Ethereum,
pub rpc_address: SocketAddr,
pub rpc_cors_domains: Option<AllowedOrigins>,
pub ws: Option<WebSocket>,
pub monitor_address: Option<SocketAddr>,
pub network: Option<NetworkConfig>,
pub poll_pending: bool,
pub python_subprocesses: std::num::NonZeroUsize,
pub sqlite_wal: JournalMode,
pub max_rpc_connections: std::num::NonZeroU32,
pub poll_interval: std::time::Duration,
pub color: Color,
}
pub struct WebSocket {
pub capacity: NonZeroUsize,
}
pub struct Ethereum {
pub url: Url,
pub password: Option<String>,
}
pub enum NetworkConfig {
Mainnet,
Testnet,
Testnet2,
Integration,
Custom {
gateway: Url,
feeder_gateway: Url,
chain_id: String,
},
}
impl NetworkConfig {
fn from_components(args: NetworkCli) -> Option<Self> {
use Network::*;
let cfg = match (
args.network,
args.gateway,
args.feeder_gateway,
args.chain_id,
) {
(None, None, None, None) => return None,
(Some(Custom), Some(gateway), Some(feeder_gateway), Some(chain_id)) => {
NetworkConfig::Custom {
gateway,
feeder_gateway,
chain_id,
}
}
(Some(Custom), _, _, _) => {
unreachable!("`--network custom` requirements are handled by clap derive")
}
// Handle non-custom variants in an inner match so that the compiler will force
// us to handle a new network variants explicitly. Otherwise we end up with a
// catch-all arm that would swallow new variants silently.
(Some(non_custom), None, None, None) => match non_custom {
Mainnet => NetworkConfig::Mainnet,
Testnet => NetworkConfig::Testnet,
Testnet2 => NetworkConfig::Testnet2,
Integration => NetworkConfig::Integration,
Custom => unreachable!("Network::Custom handled in outer arm already"),
},
// clap does not support disallowing args based on an enum value, so we have check for
// `--network non-custom` + custom required args manually.
_ => {
use clap::error::ErrorKind;
Cli::command().error(ErrorKind::ArgumentConflict, "--gateway-url, --feeder-gateway-url and --chain-id may only be used with --network custom").exit()
}
};
Some(cfg)
}
}
impl Config {
pub fn parse() -> Self {
let cli = Cli::parse();
let network = NetworkConfig::from_components(cli.network);
Config {
data_directory: cli.data_directory,
ethereum: Ethereum {
password: cli.ethereum_password,
url: cli.ethereum_url,
},
rpc_address: cli.rpc_address,
rpc_cors_domains: parse_cors_or_exit(cli.rpc_cors_domains),
ws: cli.ws.then_some(WebSocket {
capacity: cli.ws_capacity,
}),
monitor_address: cli.monitor_address,
network,
poll_pending: cli.poll_pending,
python_subprocesses: cli.python_subprocesses,
sqlite_wal: match cli.sqlite_wal {
true => JournalMode::WAL,
false => JournalMode::Rollback,
},
max_rpc_connections: cli.max_rpc_connections,
poll_interval: std::time::Duration::from_secs(cli.poll_interval.get()),
color: cli.color,
}
}
}
#[cfg(test)]
mod tests {
use super::{AllowedOrigins, RpcCorsDomainsParseError};
use crate::config::parse_cors;
#[test]
fn parse_cors_domains() {
let empty = String::new();
let wildcard = "*".to_owned();
let valid = "http://valid.com:1234".to_owned();
let not_url = "not_url".to_string();
let with_path = "http://a.com/path".to_string();
let with_query = "http://a.com/?query=x".to_string();
let with_trailing_slash = format!("{valid}/");
[
(
vec![empty.clone()],
RpcCorsDomainsParseError::InvalidDomain(empty.clone()),
),
(
vec![empty, wildcard.clone()],
RpcCorsDomainsParseError::WildcardAmongOtherValues,
),
(
vec![valid.clone(), wildcard.clone()],
RpcCorsDomainsParseError::WildcardAmongOtherValues,
),
(
vec![wildcard.clone(), wildcard.clone()],
RpcCorsDomainsParseError::WildcardAmongOtherValues,
),
(
vec![valid.clone(), with_trailing_slash.clone()],
RpcCorsDomainsParseError::InvalidDomain(with_trailing_slash),
),
(
vec![valid.clone(), not_url.clone()],
RpcCorsDomainsParseError::InvalidDomain(not_url),
),
(
vec![valid.clone(), with_path.clone()],
RpcCorsDomainsParseError::InvalidDomain(with_path),
),
(
vec![valid.clone(), with_query.clone()],
RpcCorsDomainsParseError::InvalidDomain(with_query),
),
]
.into_iter()
.for_each(|(input, expected_error)| {
assert_eq!(
parse_cors(input.clone()).unwrap_err(),
expected_error,
"input: {input:?}"
);
});
[
(vec![], None),
(vec![wildcard], Some(AllowedOrigins::Any)),
(
vec![valid.clone()],
Some(AllowedOrigins::List(vec![valid.clone()])),
),
(
vec![valid.clone(), valid.clone()],
Some(AllowedOrigins::List(vec![valid])),
),
]
.into_iter()
.for_each(|(input, expected_ok)| {
assert_eq!(
parse_cors(input.clone()).unwrap(),
expected_ok,
"input: {input:?}"
)
});
}
}
| Cli | identifier_name |
lib.rs | `Token` that has been validated. Your user is
//! authenticated!
//!
//! You can also take a more nuanced approach that gives you more fine grained control:
//!
//! ```rust,ignore
//! use oidc;
//! use reqwest;
//! use std::default::Default;
//!
//! let id = "my client".to_string();
//! let secret = "a secret to everybody".to_string();
//! let redirect = reqwest::Url::parse("https://my-redirect.foo/dest")?;
//! let issuer = oidc::issuer::google();
//! let http = reqwest::Client::new();
//!
//! let config = oidc::discovery::discover(&http, issuer)?;
//! let jwks = oidc::discovery::jwks(&http, config.jwks_uri.clone())?;
//! let provider = oidc::discovery::Discovered(config);
//!
//! let client = oidc::new(id, secret, redirect, provider, jwks);
//! let auth_url = client.auth_url(Default::default());
//!
//! //... send your user to auth_url, get an auth_code back at your redirect url handler
//!
//! let mut token = client.request_token(&http, auth_code)?;
//! client.decode_token(&mut token)?;
//! client.validate_token(&token, None, None)?;
//! let userinfo = client.request_userinfo(&http, &token)?;
//! ```
//!
//! This more complicated version uses the discovery module directly. Important distinctions to make
//! between the two:
//!
//! - The complex pattern avoids constructing a new reqwest client every time an outbound method is
//! called. Especially for token decoding having to rebuild reqwest every time can be a large
//! performance penalty.
//! - Tokens don't come decoded or validated. You need to do both manually.
//! - This version demonstrates userinfo. It is not required by spec, so make sure its available!
//! (you get an Error::Userinfo::Nourl if it is not)
pub mod discovery;
pub mod error;
pub mod issuer;
pub mod token;
use std::collections::HashMap;
pub use crate::error::Error;
use biscuit::jwa::{self, SignatureAlgorithm};
use biscuit::jwk::{AlgorithmParameters, JWKSet};
use biscuit::jws::{Compact, Secret};
use biscuit::{Empty, SingleOrMultiple};
use chrono::{Duration, NaiveDate, Utc};
use inth_oauth2::token::Token as _t;
use reqwest::Url;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use validator::Validate;
use validator_derive::Validate;
use crate::discovery::{Config, Discovered};
use crate::error::{Decode, Expiry, Mismatch, Missing, Validation};
use crate::token::{Claims, Token};
type IdToken = Compact<Claims, Empty>;
/// OpenID Connect Client for a provider specified at construction.
pub struct Client {
oauth: inth_oauth2::Client<Discovered>,
jwks: JWKSet<Empty>,
}
// Common pattern in the Client::decode function when dealing with mismatched keys
macro_rules! wrong_key {
($expected:expr, $actual:expr) => {
Err(error::Jose::WrongKeyType {
expected: format!("{:?}", $expected),
actual: format!("{:?}", $actual),
}
.into())
};
}
impl Client {
/// Constructs a client from an issuer url and client parameters via discovery
pub fn discover(id: String, secret: String, redirect: Url, issuer: Url) -> Result<Self, Error> {
discovery::secure(&redirect)?;
let client = reqwest::Client::new();
let config = discovery::discover(&client, issuer)?;
let jwks = discovery::jwks(&client, config.jwks_uri.clone())?;
let provider = Discovered(config);
Ok(Self::new(id, secret, redirect, provider, jwks))
}
/// Constructs a client from a given provider, key set, and parameters. Unlike ::discover(..)
/// this function does not perform any network operations.
pub fn new(
id: String,
secret: String,
redirect: Url,
provider: Discovered,
jwks: JWKSet<Empty>,
) -> Self {
Client {
oauth: inth_oauth2::Client::new(provider, id, secret, Some(redirect.into_string())),
jwks,
}
}
/// Passthrough to the redirect_url stored in inth_oauth2 as a str.
pub fn redirect_url(&self) -> &str {
self.oauth
.redirect_uri
.as_ref()
.expect("We always require a redirect to construct client!")
}
/// Passthrough to the inth_oauth2::client's request token.
pub fn request_token(&self, client: &reqwest::Client, auth_code: &str) -> Result<Token, Error> {
self.oauth
.request_token(client, auth_code)
.map_err(Error::from)
}
/// A reference to the config document of the provider obtained via discovery
pub fn config(&self) -> &Config {
&self.oauth.provider.0
}
/// Constructs the auth_url to redirect a client to the provider. Options are... optional. Use
/// them as needed. Keep the Options struct around for authentication, or at least the nonce
/// and max_age parameter - we need to verify they stay the same and validate if you used them.
pub fn auth_url(&self, options: &Options) -> Url {
let scope = match options.scope {
Some(ref scope) => {
if!scope.contains("openid") {
String::from("openid ") + scope
} else {
scope.clone()
}
}
// Default scope value
None => String::from("openid"),
};
let mut url = self
.oauth
.auth_uri(Some(&scope), options.state.as_ref().map(String::as_str));
{
let mut query = url.query_pairs_mut();
if let Some(ref nonce) = options.nonce {
query.append_pair("nonce", nonce.as_str());
}
if let Some(ref display) = options.display {
query.append_pair("display", display.as_str());
}
if let Some(ref prompt) = options.prompt {
let s = prompt
.iter()
.map(|s| s.as_str())
.collect::<Vec<_>>()
.join(" ");
query.append_pair("prompt", s.as_str());
}
if let Some(max_age) = options.max_age {
query.append_pair("max_age", max_age.num_seconds().to_string().as_str());
}
if let Some(ref ui_locales) = options.ui_locales {
query.append_pair("ui_locales", ui_locales.as_str());
}
if let Some(ref claims_locales) = options.claims_locales {
query.append_pair("claims_locales", claims_locales.as_str());
}
if let Some(ref id_token_hint) = options.id_token_hint {
query.append_pair("id_token_hint", id_token_hint.as_str());
}
if let Some(ref login_hint) = options.login_hint {
query.append_pair("login_hint", login_hint.as_str());
}
if let Some(ref acr_values) = options.acr_values {
query.append_pair("acr_values", acr_values.as_str());
}
}
url
}
/// Given an auth_code and auth options, request the token, decode, and validate it.
pub fn authenticate(
&self,
auth_code: &str,
nonce: Option<&str>,
max_age: Option<&Duration>,
) -> Result<Token, Error> {
let client = reqwest::Client::new();
let mut token = self.request_token(&client, auth_code)?;
self.decode_token(&mut token.id_token)?;
self.validate_token(&token.id_token, nonce, max_age)?;
Ok(token)
}
/// Mutates a Compact::encoded Token to Compact::decoded. Errors are:
///
/// - Decode::MissingKid if the keyset has multiple keys but the key id on the token is missing
/// - Decode::MissingKey if the given key id is not in the key set
/// - Decode::EmptySet if the keyset is empty
/// - Jose::WrongKeyType if the alg of the key and the alg in the token header mismatch
/// - Jose::WrongKeyType if the specified key alg isn't a signature algorithm
/// - Jose error if decoding fails
pub fn decode_token(&self, token: &mut IdToken) -> Result<(), Error> {
// This is an early return if the token is already decoded
if let Compact::Decoded {.. } = *token {
return Ok(());
}
let header = token.unverified_header()?;
// If there is more than one key, the token MUST have a key id
let key = if self.jwks.keys.len() > 1 {
let token_kid = header.registered.key_id.ok_or(Decode::MissingKid)?;
self.jwks
.find(&token_kid)
.ok_or(Decode::MissingKey(token_kid))?
} else {
// TODO We would want to verify the keyset is >1 in the constructor
// rather than every decode call, but we can't return an error in new().
self.jwks.keys.first().as_ref().ok_or(Decode::EmptySet)?
};
if let Some(alg) = key.common.algorithm.as_ref() {
if let &jwa::Algorithm::Signature(sig) = alg {
if header.registered.algorithm!= sig {
return wrong_key!(sig, header.registered.algorithm);
}
} else {
return wrong_key!(SignatureAlgorithm::default(), alg);
}
}
let alg = header.registered.algorithm;
match key.algorithm {
// HMAC
AlgorithmParameters::OctectKey { ref value,.. } => match alg {
SignatureAlgorithm::HS256
| SignatureAlgorithm::HS384
| SignatureAlgorithm::HS512 => {
*token = token.decode(&Secret::Bytes(value.clone()), alg)?;
Ok(())
}
_ => wrong_key!("HS256 | HS384 | HS512", alg),
},
AlgorithmParameters::RSA(ref params) => match alg {
SignatureAlgorithm::RS256
| SignatureAlgorithm::RS384
| SignatureAlgorithm::RS512 => {
let pkcs = Secret::RSAModulusExponent {
n: params.n.clone(),
e: params.e.clone(),
};
*token = token.decode(&pkcs, alg)?;
Ok(())
}
_ => wrong_key!("RS256 | RS384 | RS512", alg),
},
AlgorithmParameters::EllipticCurve(_) => unimplemented!("No support for EC keys yet"),
}
}
/// Validate a decoded token. If you don't get an error, its valid! Nonce and max_age come from
/// your auth_uri options. Errors are:
///
/// - Jose Error if the Token isn't decoded
/// - Validation::Mismatch::Issuer if the provider issuer and token issuer mismatch
/// - Validation::Mismatch::Nonce if a given nonce and the token nonce mismatch
/// - Validation::Missing::Nonce if either the token or args has a nonce and the other does not
/// - Validation::Missing::Audience if the token aud doesn't contain the client id
/// - Validation::Missing::AuthorizedParty if there are multiple audiences and azp is missing
/// - Validation::Mismatch::AuthorizedParty if the azp is not the client_id
/// - Validation::Expired::Expires if the current time is past the expiration time
/// - Validation::Expired::MaxAge is the token is older than the provided max_age
/// - Validation::Missing::Authtime if a max_age was given and the token has no auth time
pub fn validate_token(
&self,
token: &IdToken,
nonce: Option<&str>,
max_age: Option<&Duration>,
) -> Result<(), Error> {
let claims = token.payload()?;
if claims.iss!= self.config().issuer {
let expected = self.config().issuer.as_str().to_string();
let actual = claims.iss.as_str().to_string();
return Err(Validation::Mismatch(Mismatch::Issuer { expected, actual }).into());
}
match nonce {
Some(expected) => match claims.nonce {
Some(ref actual) => { | let expected = expected.to_string();
let actual = actual.to_string();
return Err(
Validation::Mismatch(Mismatch::Nonce { expected, actual }).into()
);
}
}
None => return Err(Validation::Missing(Missing::Nonce).into()),
},
None => {
if claims.nonce.is_some() {
return Err(Validation::Missing(Missing::Nonce).into());
}
}
}
if!claims.aud.contains(&self.oauth.client_id) {
return Err(Validation::Missing(Missing::Audience).into());
}
// By spec, if there are multiple auds, we must have an azp
if let SingleOrMultiple::Multiple(_) = claims.aud {
if let None = claims.azp {
return Err(Validation::Missing(Missing::AuthorizedParty).into());
}
}
// If there is an authorized party, it must be our client_id
if let Some(ref actual) = claims.azp {
if actual!= &self.oauth.client_id {
let expected = self.oauth.client_id.to_string();
let actual = actual.to_string();
return Err(
Validation::Mismatch(Mismatch::AuthorizedParty { expected, actual }).into(),
);
}
}
let now = Utc::now();
// Now should never be less than the time this code was written!
if now.timestamp() < 1504758600 {
panic!("chrono::Utc::now() can never be before this was written!")
}
if claims.exp <= now.timestamp() {
return Err(Validation::Expired(Expiry::Expires(
chrono::naive::NaiveDateTime::from_timestamp(claims.exp, 0),
))
.into());
}
if let Some(max) = max_age {
match claims.auth_time {
Some(time) => {
let age = chrono::Duration::seconds(now.timestamp() - time);
if age >= *max {
return Err(error::Validation::Expired(Expiry::MaxAge(age)).into());
}
}
None => return Err(Validation::Missing(Missing::AuthTime).into()),
}
}
Ok(())
}
/// Get a userinfo json document for a given token at the provider's userinfo endpoint.
/// Errors are:
///
/// - Userinfo::NoUrl if this provider doesn't have a userinfo endpoint
/// - Error::Insecure if the userinfo url is not https
/// - Error::Jose if the token is not decoded
/// - Error::Http if something goes wrong getting the document
/// - Error::Json if the response is not a valid Userinfo document
/// - Userinfo::MismatchSubject if the returned userinfo document and tokens subject mismatch
pub fn request_userinfo(
&self,
client: &reqwest::Client,
token: &Token,
) -> Result<Userinfo, Error> {
match self.config().userinfo_endpoint {
Some(ref url) => {
discovery::secure(&url)?;
let claims = token.id_token.payload()?;
let auth_code = token.access_token().to_string();
let mut resp = client
.get(url.clone())
// FIXME This is a transitional hack for Reqwest 0.9 that should be refactored
// when upstream restores typed header support.
.header_011(reqwest::hyper_011::header::Authorization(
reqwest::hyper_011::header::Bearer { token: auth_code },
))
.send()?;
let info: Userinfo = resp.json()?;
if claims.sub!= info.sub {
let expected = info.sub.clone();
let actual = claims.sub.clone();
return Err(error::Userinfo::MismatchSubject { expected, actual }.into());
}
Ok(info)
}
None => Err(error::Userinfo::NoUrl.into()),
}
}
}
/// Optional parameters that [OpenID specifies](https://openid.net/specs/openid-connect-basic-1_0.html#RequestParameters) for the auth URI.
/// Derives Default, so remember to..Default::default() after you specify what you want.
#[derive(Default)]
pub struct Options {
/// MUST contain openid. By default this is ONLY openid. Official optional scopes are
/// email, profile, address, phone, offline_access. Check the Discovery config
/// `scopes_supported` to see what is available at your provider!
pub scope: Option<String>,
pub state: Option<String>,
pub nonce: Option<String>,
pub display: Option<Display>,
pub prompt: Option<std::collections::HashSet<Prompt>>,
pub max_age: Option<Duration>,
pub ui_locales: Option<String>,
pub claims_locales: Option<String>,
pub id_token_hint: Option<String>,
pub login_hint: Option<String>,
pub acr_values: Option<String>,
}
/// The userinfo struct contains all possible userinfo fields regardless of scope. [See spec.](https://openid.net/specs/openid-connect-basic-1_0.html#StandardClaims)
// TODO is there a way to use claims_supported in config to simplify this struct?
#[derive(Debug, Deserialize, Serialize, Validate)]
pub struct Userinfo {
pub sub: String,
#[serde(default)]
pub name: Option<String>,
#[serde(default)]
pub given_name: Option<String>,
#[serde(default)]
pub family_name: Option<String>,
#[serde(default)]
pub middle_name: Option<String>,
#[serde(default)]
pub nickname: Option<String>,
#[serde(default)]
pub preferred_username: Option<String>,
#[serde(default)]
#[serde(with = "url_serde")]
pub profile: Option<Url>,
#[serde(default)]
#[serde(with = "url_serde")]
pub picture: Option<Url>,
#[serde(default)]
#[serde(with = "url_serde")]
pub website: Option<Url>,
#[serde(default)]
#[validate(email)]
pub email: Option<String>,
#[serde(default)]
pub email_verified: bool,
// Isn't required to be just male or female
#[serde(default)]
pub gender: Option<String>,
// ISO 9601:2004 YYYY-MM-DD or YYYY.
#[serde(default)]
pub birthdate: Option<NaiveDate>,
// Region/City codes. Should also have a more concrete serializer form.
#[serde(default)]
pub zoneinfo: Option<String>,
// Usually RFC5646 langcode-countrycode, maybe with a _ sep, could be arbitrary
#[serde(default)]
pub locale: Option<String>,
// Usually E.164 format number
#[serde(default)]
pub phone_number: Option<String>,
#[serde(default)]
pub phone_number_verified: bool,
#[serde(default)]
pub address: Option<Address>,
#[serde(default)]
pub updated_at: Option<i64>,
#[serde(flatten)]
pub extra: HashMap<String, Value>,
}
/// The four values for the preferred display parameter in the Options. See spec for details.
pub enum Display {
Page,
Popup,
Touch,
Wap,
}
impl Display {
fn as_str(&self) -> &'static str {
use self::Display::*;
match *self {
Page => "page",
Popup => "popup",
Touch => "touch",
Wap => "wap",
}
}
}
/// The four possible values for the prompt parameter set in Options. See spec for details.
#[derive(PartialEq, Eq, Hash)]
pub enum Prompt {
None,
Login,
Consent,
SelectAccount,
}
impl Prompt {
fn as_str(&self) -> &'static str {
use self::Prompt::*;
match *self {
None => "none",
Login => "login",
Consent => "consent",
SelectAccount => "select_account",
}
}
}
/// Address Claim struct. Can be only formatted, only the rest, or both.
#[derive(Debug, Deserialize, Serialize | if expected != actual { | random_line_split |
lib.rs | Token` that has been validated. Your user is
//! authenticated!
//!
//! You can also take a more nuanced approach that gives you more fine grained control:
//!
//! ```rust,ignore
//! use oidc;
//! use reqwest;
//! use std::default::Default;
//!
//! let id = "my client".to_string();
//! let secret = "a secret to everybody".to_string();
//! let redirect = reqwest::Url::parse("https://my-redirect.foo/dest")?;
//! let issuer = oidc::issuer::google();
//! let http = reqwest::Client::new();
//!
//! let config = oidc::discovery::discover(&http, issuer)?;
//! let jwks = oidc::discovery::jwks(&http, config.jwks_uri.clone())?;
//! let provider = oidc::discovery::Discovered(config);
//!
//! let client = oidc::new(id, secret, redirect, provider, jwks);
//! let auth_url = client.auth_url(Default::default());
//!
//! //... send your user to auth_url, get an auth_code back at your redirect url handler
//!
//! let mut token = client.request_token(&http, auth_code)?;
//! client.decode_token(&mut token)?;
//! client.validate_token(&token, None, None)?;
//! let userinfo = client.request_userinfo(&http, &token)?;
//! ```
//!
//! This more complicated version uses the discovery module directly. Important distinctions to make
//! between the two:
//!
//! - The complex pattern avoids constructing a new reqwest client every time an outbound method is
//! called. Especially for token decoding having to rebuild reqwest every time can be a large
//! performance penalty.
//! - Tokens don't come decoded or validated. You need to do both manually.
//! - This version demonstrates userinfo. It is not required by spec, so make sure its available!
//! (you get an Error::Userinfo::Nourl if it is not)
pub mod discovery;
pub mod error;
pub mod issuer;
pub mod token;
use std::collections::HashMap;
pub use crate::error::Error;
use biscuit::jwa::{self, SignatureAlgorithm};
use biscuit::jwk::{AlgorithmParameters, JWKSet};
use biscuit::jws::{Compact, Secret};
use biscuit::{Empty, SingleOrMultiple};
use chrono::{Duration, NaiveDate, Utc};
use inth_oauth2::token::Token as _t;
use reqwest::Url;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use validator::Validate;
use validator_derive::Validate;
use crate::discovery::{Config, Discovered};
use crate::error::{Decode, Expiry, Mismatch, Missing, Validation};
use crate::token::{Claims, Token};
type IdToken = Compact<Claims, Empty>;
/// OpenID Connect Client for a provider specified at construction.
pub struct Client {
oauth: inth_oauth2::Client<Discovered>,
jwks: JWKSet<Empty>,
}
// Common pattern in the Client::decode function when dealing with mismatched keys
macro_rules! wrong_key {
($expected:expr, $actual:expr) => {
Err(error::Jose::WrongKeyType {
expected: format!("{:?}", $expected),
actual: format!("{:?}", $actual),
}
.into())
};
}
impl Client {
/// Constructs a client from an issuer url and client parameters via discovery
pub fn discover(id: String, secret: String, redirect: Url, issuer: Url) -> Result<Self, Error> {
discovery::secure(&redirect)?;
let client = reqwest::Client::new();
let config = discovery::discover(&client, issuer)?;
let jwks = discovery::jwks(&client, config.jwks_uri.clone())?;
let provider = Discovered(config);
Ok(Self::new(id, secret, redirect, provider, jwks))
}
/// Constructs a client from a given provider, key set, and parameters. Unlike ::discover(..)
/// this function does not perform any network operations.
pub fn new(
id: String,
secret: String,
redirect: Url,
provider: Discovered,
jwks: JWKSet<Empty>,
) -> Self {
Client {
oauth: inth_oauth2::Client::new(provider, id, secret, Some(redirect.into_string())),
jwks,
}
}
/// Passthrough to the redirect_url stored in inth_oauth2 as a str.
pub fn redirect_url(&self) -> &str {
self.oauth
.redirect_uri
.as_ref()
.expect("We always require a redirect to construct client!")
}
/// Passthrough to the inth_oauth2::client's request token.
pub fn request_token(&self, client: &reqwest::Client, auth_code: &str) -> Result<Token, Error> {
self.oauth
.request_token(client, auth_code)
.map_err(Error::from)
}
/// A reference to the config document of the provider obtained via discovery
pub fn config(&self) -> &Config {
&self.oauth.provider.0
}
/// Constructs the auth_url to redirect a client to the provider. Options are... optional. Use
/// them as needed. Keep the Options struct around for authentication, or at least the nonce
/// and max_age parameter - we need to verify they stay the same and validate if you used them.
pub fn auth_url(&self, options: &Options) -> Url {
let scope = match options.scope {
Some(ref scope) => {
if!scope.contains("openid") {
String::from("openid ") + scope
} else {
scope.clone()
}
}
// Default scope value
None => String::from("openid"),
};
let mut url = self
.oauth
.auth_uri(Some(&scope), options.state.as_ref().map(String::as_str));
{
let mut query = url.query_pairs_mut();
if let Some(ref nonce) = options.nonce {
query.append_pair("nonce", nonce.as_str());
}
if let Some(ref display) = options.display {
query.append_pair("display", display.as_str());
}
if let Some(ref prompt) = options.prompt {
let s = prompt
.iter()
.map(|s| s.as_str())
.collect::<Vec<_>>()
.join(" ");
query.append_pair("prompt", s.as_str());
}
if let Some(max_age) = options.max_age {
query.append_pair("max_age", max_age.num_seconds().to_string().as_str());
}
if let Some(ref ui_locales) = options.ui_locales {
query.append_pair("ui_locales", ui_locales.as_str());
}
if let Some(ref claims_locales) = options.claims_locales {
query.append_pair("claims_locales", claims_locales.as_str());
}
if let Some(ref id_token_hint) = options.id_token_hint {
query.append_pair("id_token_hint", id_token_hint.as_str());
}
if let Some(ref login_hint) = options.login_hint {
query.append_pair("login_hint", login_hint.as_str());
}
if let Some(ref acr_values) = options.acr_values {
query.append_pair("acr_values", acr_values.as_str());
}
}
url
}
/// Given an auth_code and auth options, request the token, decode, and validate it.
pub fn authenticate(
&self,
auth_code: &str,
nonce: Option<&str>,
max_age: Option<&Duration>,
) -> Result<Token, Error> {
let client = reqwest::Client::new();
let mut token = self.request_token(&client, auth_code)?;
self.decode_token(&mut token.id_token)?;
self.validate_token(&token.id_token, nonce, max_age)?;
Ok(token)
}
/// Mutates a Compact::encoded Token to Compact::decoded. Errors are:
///
/// - Decode::MissingKid if the keyset has multiple keys but the key id on the token is missing
/// - Decode::MissingKey if the given key id is not in the key set
/// - Decode::EmptySet if the keyset is empty
/// - Jose::WrongKeyType if the alg of the key and the alg in the token header mismatch
/// - Jose::WrongKeyType if the specified key alg isn't a signature algorithm
/// - Jose error if decoding fails
pub fn | (&self, token: &mut IdToken) -> Result<(), Error> {
// This is an early return if the token is already decoded
if let Compact::Decoded {.. } = *token {
return Ok(());
}
let header = token.unverified_header()?;
// If there is more than one key, the token MUST have a key id
let key = if self.jwks.keys.len() > 1 {
let token_kid = header.registered.key_id.ok_or(Decode::MissingKid)?;
self.jwks
.find(&token_kid)
.ok_or(Decode::MissingKey(token_kid))?
} else {
// TODO We would want to verify the keyset is >1 in the constructor
// rather than every decode call, but we can't return an error in new().
self.jwks.keys.first().as_ref().ok_or(Decode::EmptySet)?
};
if let Some(alg) = key.common.algorithm.as_ref() {
if let &jwa::Algorithm::Signature(sig) = alg {
if header.registered.algorithm!= sig {
return wrong_key!(sig, header.registered.algorithm);
}
} else {
return wrong_key!(SignatureAlgorithm::default(), alg);
}
}
let alg = header.registered.algorithm;
match key.algorithm {
// HMAC
AlgorithmParameters::OctectKey { ref value,.. } => match alg {
SignatureAlgorithm::HS256
| SignatureAlgorithm::HS384
| SignatureAlgorithm::HS512 => {
*token = token.decode(&Secret::Bytes(value.clone()), alg)?;
Ok(())
}
_ => wrong_key!("HS256 | HS384 | HS512", alg),
},
AlgorithmParameters::RSA(ref params) => match alg {
SignatureAlgorithm::RS256
| SignatureAlgorithm::RS384
| SignatureAlgorithm::RS512 => {
let pkcs = Secret::RSAModulusExponent {
n: params.n.clone(),
e: params.e.clone(),
};
*token = token.decode(&pkcs, alg)?;
Ok(())
}
_ => wrong_key!("RS256 | RS384 | RS512", alg),
},
AlgorithmParameters::EllipticCurve(_) => unimplemented!("No support for EC keys yet"),
}
}
/// Validate a decoded token. If you don't get an error, its valid! Nonce and max_age come from
/// your auth_uri options. Errors are:
///
/// - Jose Error if the Token isn't decoded
/// - Validation::Mismatch::Issuer if the provider issuer and token issuer mismatch
/// - Validation::Mismatch::Nonce if a given nonce and the token nonce mismatch
/// - Validation::Missing::Nonce if either the token or args has a nonce and the other does not
/// - Validation::Missing::Audience if the token aud doesn't contain the client id
/// - Validation::Missing::AuthorizedParty if there are multiple audiences and azp is missing
/// - Validation::Mismatch::AuthorizedParty if the azp is not the client_id
/// - Validation::Expired::Expires if the current time is past the expiration time
/// - Validation::Expired::MaxAge is the token is older than the provided max_age
/// - Validation::Missing::Authtime if a max_age was given and the token has no auth time
pub fn validate_token(
&self,
token: &IdToken,
nonce: Option<&str>,
max_age: Option<&Duration>,
) -> Result<(), Error> {
let claims = token.payload()?;
if claims.iss!= self.config().issuer {
let expected = self.config().issuer.as_str().to_string();
let actual = claims.iss.as_str().to_string();
return Err(Validation::Mismatch(Mismatch::Issuer { expected, actual }).into());
}
match nonce {
Some(expected) => match claims.nonce {
Some(ref actual) => {
if expected!= actual {
let expected = expected.to_string();
let actual = actual.to_string();
return Err(
Validation::Mismatch(Mismatch::Nonce { expected, actual }).into()
);
}
}
None => return Err(Validation::Missing(Missing::Nonce).into()),
},
None => {
if claims.nonce.is_some() {
return Err(Validation::Missing(Missing::Nonce).into());
}
}
}
if!claims.aud.contains(&self.oauth.client_id) {
return Err(Validation::Missing(Missing::Audience).into());
}
// By spec, if there are multiple auds, we must have an azp
if let SingleOrMultiple::Multiple(_) = claims.aud {
if let None = claims.azp {
return Err(Validation::Missing(Missing::AuthorizedParty).into());
}
}
// If there is an authorized party, it must be our client_id
if let Some(ref actual) = claims.azp {
if actual!= &self.oauth.client_id {
let expected = self.oauth.client_id.to_string();
let actual = actual.to_string();
return Err(
Validation::Mismatch(Mismatch::AuthorizedParty { expected, actual }).into(),
);
}
}
let now = Utc::now();
// Now should never be less than the time this code was written!
if now.timestamp() < 1504758600 {
panic!("chrono::Utc::now() can never be before this was written!")
}
if claims.exp <= now.timestamp() {
return Err(Validation::Expired(Expiry::Expires(
chrono::naive::NaiveDateTime::from_timestamp(claims.exp, 0),
))
.into());
}
if let Some(max) = max_age {
match claims.auth_time {
Some(time) => {
let age = chrono::Duration::seconds(now.timestamp() - time);
if age >= *max {
return Err(error::Validation::Expired(Expiry::MaxAge(age)).into());
}
}
None => return Err(Validation::Missing(Missing::AuthTime).into()),
}
}
Ok(())
}
/// Get a userinfo json document for a given token at the provider's userinfo endpoint.
/// Errors are:
///
/// - Userinfo::NoUrl if this provider doesn't have a userinfo endpoint
/// - Error::Insecure if the userinfo url is not https
/// - Error::Jose if the token is not decoded
/// - Error::Http if something goes wrong getting the document
/// - Error::Json if the response is not a valid Userinfo document
/// - Userinfo::MismatchSubject if the returned userinfo document and tokens subject mismatch
pub fn request_userinfo(
&self,
client: &reqwest::Client,
token: &Token,
) -> Result<Userinfo, Error> {
match self.config().userinfo_endpoint {
Some(ref url) => {
discovery::secure(&url)?;
let claims = token.id_token.payload()?;
let auth_code = token.access_token().to_string();
let mut resp = client
.get(url.clone())
// FIXME This is a transitional hack for Reqwest 0.9 that should be refactored
// when upstream restores typed header support.
.header_011(reqwest::hyper_011::header::Authorization(
reqwest::hyper_011::header::Bearer { token: auth_code },
))
.send()?;
let info: Userinfo = resp.json()?;
if claims.sub!= info.sub {
let expected = info.sub.clone();
let actual = claims.sub.clone();
return Err(error::Userinfo::MismatchSubject { expected, actual }.into());
}
Ok(info)
}
None => Err(error::Userinfo::NoUrl.into()),
}
}
}
/// Optional parameters that [OpenID specifies](https://openid.net/specs/openid-connect-basic-1_0.html#RequestParameters) for the auth URI.
/// Derives Default, so remember to..Default::default() after you specify what you want.
#[derive(Default)]
pub struct Options {
/// MUST contain openid. By default this is ONLY openid. Official optional scopes are
/// email, profile, address, phone, offline_access. Check the Discovery config
/// `scopes_supported` to see what is available at your provider!
pub scope: Option<String>,
pub state: Option<String>,
pub nonce: Option<String>,
pub display: Option<Display>,
pub prompt: Option<std::collections::HashSet<Prompt>>,
pub max_age: Option<Duration>,
pub ui_locales: Option<String>,
pub claims_locales: Option<String>,
pub id_token_hint: Option<String>,
pub login_hint: Option<String>,
pub acr_values: Option<String>,
}
/// The userinfo struct contains all possible userinfo fields regardless of scope. [See spec.](https://openid.net/specs/openid-connect-basic-1_0.html#StandardClaims)
// TODO is there a way to use claims_supported in config to simplify this struct?
#[derive(Debug, Deserialize, Serialize, Validate)]
pub struct Userinfo {
pub sub: String,
#[serde(default)]
pub name: Option<String>,
#[serde(default)]
pub given_name: Option<String>,
#[serde(default)]
pub family_name: Option<String>,
#[serde(default)]
pub middle_name: Option<String>,
#[serde(default)]
pub nickname: Option<String>,
#[serde(default)]
pub preferred_username: Option<String>,
#[serde(default)]
#[serde(with = "url_serde")]
pub profile: Option<Url>,
#[serde(default)]
#[serde(with = "url_serde")]
pub picture: Option<Url>,
#[serde(default)]
#[serde(with = "url_serde")]
pub website: Option<Url>,
#[serde(default)]
#[validate(email)]
pub email: Option<String>,
#[serde(default)]
pub email_verified: bool,
// Isn't required to be just male or female
#[serde(default)]
pub gender: Option<String>,
// ISO 9601:2004 YYYY-MM-DD or YYYY.
#[serde(default)]
pub birthdate: Option<NaiveDate>,
// Region/City codes. Should also have a more concrete serializer form.
#[serde(default)]
pub zoneinfo: Option<String>,
// Usually RFC5646 langcode-countrycode, maybe with a _ sep, could be arbitrary
#[serde(default)]
pub locale: Option<String>,
// Usually E.164 format number
#[serde(default)]
pub phone_number: Option<String>,
#[serde(default)]
pub phone_number_verified: bool,
#[serde(default)]
pub address: Option<Address>,
#[serde(default)]
pub updated_at: Option<i64>,
#[serde(flatten)]
pub extra: HashMap<String, Value>,
}
/// The four values for the preferred display parameter in the Options. See spec for details.
pub enum Display {
Page,
Popup,
Touch,
Wap,
}
impl Display {
fn as_str(&self) -> &'static str {
use self::Display::*;
match *self {
Page => "page",
Popup => "popup",
Touch => "touch",
Wap => "wap",
}
}
}
/// The four possible values for the prompt parameter set in Options. See spec for details.
#[derive(PartialEq, Eq, Hash)]
pub enum Prompt {
None,
Login,
Consent,
SelectAccount,
}
impl Prompt {
fn as_str(&self) -> &'static str {
use self::Prompt::*;
match *self {
None => "none",
Login => "login",
Consent => "consent",
SelectAccount => "select_account",
}
}
}
/// Address Claim struct. Can be only formatted, only the rest, or both.
#[derive(Debug, Deserialize | decode_token | identifier_name |
lib.rs | Token` that has been validated. Your user is
//! authenticated!
//!
//! You can also take a more nuanced approach that gives you more fine grained control:
//!
//! ```rust,ignore
//! use oidc;
//! use reqwest;
//! use std::default::Default;
//!
//! let id = "my client".to_string();
//! let secret = "a secret to everybody".to_string();
//! let redirect = reqwest::Url::parse("https://my-redirect.foo/dest")?;
//! let issuer = oidc::issuer::google();
//! let http = reqwest::Client::new();
//!
//! let config = oidc::discovery::discover(&http, issuer)?;
//! let jwks = oidc::discovery::jwks(&http, config.jwks_uri.clone())?;
//! let provider = oidc::discovery::Discovered(config);
//!
//! let client = oidc::new(id, secret, redirect, provider, jwks);
//! let auth_url = client.auth_url(Default::default());
//!
//! //... send your user to auth_url, get an auth_code back at your redirect url handler
//!
//! let mut token = client.request_token(&http, auth_code)?;
//! client.decode_token(&mut token)?;
//! client.validate_token(&token, None, None)?;
//! let userinfo = client.request_userinfo(&http, &token)?;
//! ```
//!
//! This more complicated version uses the discovery module directly. Important distinctions to make
//! between the two:
//!
//! - The complex pattern avoids constructing a new reqwest client every time an outbound method is
//! called. Especially for token decoding having to rebuild reqwest every time can be a large
//! performance penalty.
//! - Tokens don't come decoded or validated. You need to do both manually.
//! - This version demonstrates userinfo. It is not required by spec, so make sure its available!
//! (you get an Error::Userinfo::Nourl if it is not)
pub mod discovery;
pub mod error;
pub mod issuer;
pub mod token;
use std::collections::HashMap;
pub use crate::error::Error;
use biscuit::jwa::{self, SignatureAlgorithm};
use biscuit::jwk::{AlgorithmParameters, JWKSet};
use biscuit::jws::{Compact, Secret};
use biscuit::{Empty, SingleOrMultiple};
use chrono::{Duration, NaiveDate, Utc};
use inth_oauth2::token::Token as _t;
use reqwest::Url;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use validator::Validate;
use validator_derive::Validate;
use crate::discovery::{Config, Discovered};
use crate::error::{Decode, Expiry, Mismatch, Missing, Validation};
use crate::token::{Claims, Token};
type IdToken = Compact<Claims, Empty>;
/// OpenID Connect Client for a provider specified at construction.
pub struct Client {
oauth: inth_oauth2::Client<Discovered>,
jwks: JWKSet<Empty>,
}
// Common pattern in the Client::decode function when dealing with mismatched keys
macro_rules! wrong_key {
($expected:expr, $actual:expr) => {
Err(error::Jose::WrongKeyType {
expected: format!("{:?}", $expected),
actual: format!("{:?}", $actual),
}
.into())
};
}
impl Client {
/// Constructs a client from an issuer url and client parameters via discovery
pub fn discover(id: String, secret: String, redirect: Url, issuer: Url) -> Result<Self, Error> {
discovery::secure(&redirect)?;
let client = reqwest::Client::new();
let config = discovery::discover(&client, issuer)?;
let jwks = discovery::jwks(&client, config.jwks_uri.clone())?;
let provider = Discovered(config);
Ok(Self::new(id, secret, redirect, provider, jwks))
}
/// Constructs a client from a given provider, key set, and parameters. Unlike ::discover(..)
/// this function does not perform any network operations.
pub fn new(
id: String,
secret: String,
redirect: Url,
provider: Discovered,
jwks: JWKSet<Empty>,
) -> Self {
Client {
oauth: inth_oauth2::Client::new(provider, id, secret, Some(redirect.into_string())),
jwks,
}
}
/// Passthrough to the redirect_url stored in inth_oauth2 as a str.
pub fn redirect_url(&self) -> &str {
self.oauth
.redirect_uri
.as_ref()
.expect("We always require a redirect to construct client!")
}
/// Passthrough to the inth_oauth2::client's request token.
pub fn request_token(&self, client: &reqwest::Client, auth_code: &str) -> Result<Token, Error> {
self.oauth
.request_token(client, auth_code)
.map_err(Error::from)
}
/// A reference to the config document of the provider obtained via discovery
pub fn config(&self) -> &Config {
&self.oauth.provider.0
}
/// Constructs the auth_url to redirect a client to the provider. Options are... optional. Use
/// them as needed. Keep the Options struct around for authentication, or at least the nonce
/// and max_age parameter - we need to verify they stay the same and validate if you used them.
pub fn auth_url(&self, options: &Options) -> Url {
let scope = match options.scope {
Some(ref scope) => {
if!scope.contains("openid") {
String::from("openid ") + scope
} else {
scope.clone()
}
}
// Default scope value
None => String::from("openid"),
};
let mut url = self
.oauth
.auth_uri(Some(&scope), options.state.as_ref().map(String::as_str));
{
let mut query = url.query_pairs_mut();
if let Some(ref nonce) = options.nonce {
query.append_pair("nonce", nonce.as_str());
}
if let Some(ref display) = options.display {
query.append_pair("display", display.as_str());
}
if let Some(ref prompt) = options.prompt {
let s = prompt
.iter()
.map(|s| s.as_str())
.collect::<Vec<_>>()
.join(" ");
query.append_pair("prompt", s.as_str());
}
if let Some(max_age) = options.max_age {
query.append_pair("max_age", max_age.num_seconds().to_string().as_str());
}
if let Some(ref ui_locales) = options.ui_locales {
query.append_pair("ui_locales", ui_locales.as_str());
}
if let Some(ref claims_locales) = options.claims_locales {
query.append_pair("claims_locales", claims_locales.as_str());
}
if let Some(ref id_token_hint) = options.id_token_hint {
query.append_pair("id_token_hint", id_token_hint.as_str());
}
if let Some(ref login_hint) = options.login_hint {
query.append_pair("login_hint", login_hint.as_str());
}
if let Some(ref acr_values) = options.acr_values {
query.append_pair("acr_values", acr_values.as_str());
}
}
url
}
/// Given an auth_code and auth options, request the token, decode, and validate it.
pub fn authenticate(
&self,
auth_code: &str,
nonce: Option<&str>,
max_age: Option<&Duration>,
) -> Result<Token, Error> {
let client = reqwest::Client::new();
let mut token = self.request_token(&client, auth_code)?;
self.decode_token(&mut token.id_token)?;
self.validate_token(&token.id_token, nonce, max_age)?;
Ok(token)
}
/// Mutates a Compact::encoded Token to Compact::decoded. Errors are:
///
/// - Decode::MissingKid if the keyset has multiple keys but the key id on the token is missing
/// - Decode::MissingKey if the given key id is not in the key set
/// - Decode::EmptySet if the keyset is empty
/// - Jose::WrongKeyType if the alg of the key and the alg in the token header mismatch
/// - Jose::WrongKeyType if the specified key alg isn't a signature algorithm
/// - Jose error if decoding fails
pub fn decode_token(&self, token: &mut IdToken) -> Result<(), Error> {
// This is an early return if the token is already decoded
if let Compact::Decoded {.. } = *token {
return Ok(());
}
let header = token.unverified_header()?;
// If there is more than one key, the token MUST have a key id
let key = if self.jwks.keys.len() > 1 {
let token_kid = header.registered.key_id.ok_or(Decode::MissingKid)?;
self.jwks
.find(&token_kid)
.ok_or(Decode::MissingKey(token_kid))?
} else {
// TODO We would want to verify the keyset is >1 in the constructor
// rather than every decode call, but we can't return an error in new().
self.jwks.keys.first().as_ref().ok_or(Decode::EmptySet)?
};
if let Some(alg) = key.common.algorithm.as_ref() {
if let &jwa::Algorithm::Signature(sig) = alg {
if header.registered.algorithm!= sig {
return wrong_key!(sig, header.registered.algorithm);
}
} else {
return wrong_key!(SignatureAlgorithm::default(), alg);
}
}
let alg = header.registered.algorithm;
match key.algorithm {
// HMAC
AlgorithmParameters::OctectKey { ref value,.. } => match alg {
SignatureAlgorithm::HS256
| SignatureAlgorithm::HS384
| SignatureAlgorithm::HS512 => {
*token = token.decode(&Secret::Bytes(value.clone()), alg)?;
Ok(())
}
_ => wrong_key!("HS256 | HS384 | HS512", alg),
},
AlgorithmParameters::RSA(ref params) => match alg {
SignatureAlgorithm::RS256
| SignatureAlgorithm::RS384
| SignatureAlgorithm::RS512 => {
let pkcs = Secret::RSAModulusExponent {
n: params.n.clone(),
e: params.e.clone(),
};
*token = token.decode(&pkcs, alg)?;
Ok(())
}
_ => wrong_key!("RS256 | RS384 | RS512", alg),
},
AlgorithmParameters::EllipticCurve(_) => unimplemented!("No support for EC keys yet"),
}
}
/// Validate a decoded token. If you don't get an error, its valid! Nonce and max_age come from
/// your auth_uri options. Errors are:
///
/// - Jose Error if the Token isn't decoded
/// - Validation::Mismatch::Issuer if the provider issuer and token issuer mismatch
/// - Validation::Mismatch::Nonce if a given nonce and the token nonce mismatch
/// - Validation::Missing::Nonce if either the token or args has a nonce and the other does not
/// - Validation::Missing::Audience if the token aud doesn't contain the client id
/// - Validation::Missing::AuthorizedParty if there are multiple audiences and azp is missing
/// - Validation::Mismatch::AuthorizedParty if the azp is not the client_id
/// - Validation::Expired::Expires if the current time is past the expiration time
/// - Validation::Expired::MaxAge is the token is older than the provided max_age
/// - Validation::Missing::Authtime if a max_age was given and the token has no auth time
pub fn validate_token(
&self,
token: &IdToken,
nonce: Option<&str>,
max_age: Option<&Duration>,
) -> Result<(), Error> {
let claims = token.payload()?;
if claims.iss!= self.config().issuer {
let expected = self.config().issuer.as_str().to_string();
let actual = claims.iss.as_str().to_string();
return Err(Validation::Mismatch(Mismatch::Issuer { expected, actual }).into());
}
match nonce {
Some(expected) => match claims.nonce {
Some(ref actual) => {
if expected!= actual {
let expected = expected.to_string();
let actual = actual.to_string();
return Err(
Validation::Mismatch(Mismatch::Nonce { expected, actual }).into()
);
}
}
None => return Err(Validation::Missing(Missing::Nonce).into()),
},
None => {
if claims.nonce.is_some() {
return Err(Validation::Missing(Missing::Nonce).into());
}
}
}
if!claims.aud.contains(&self.oauth.client_id) {
return Err(Validation::Missing(Missing::Audience).into());
}
// By spec, if there are multiple auds, we must have an azp
if let SingleOrMultiple::Multiple(_) = claims.aud {
if let None = claims.azp {
return Err(Validation::Missing(Missing::AuthorizedParty).into());
}
}
// If there is an authorized party, it must be our client_id
if let Some(ref actual) = claims.azp {
if actual!= &self.oauth.client_id {
let expected = self.oauth.client_id.to_string();
let actual = actual.to_string();
return Err(
Validation::Mismatch(Mismatch::AuthorizedParty { expected, actual }).into(),
);
}
}
let now = Utc::now();
// Now should never be less than the time this code was written!
if now.timestamp() < 1504758600 {
panic!("chrono::Utc::now() can never be before this was written!")
}
if claims.exp <= now.timestamp() |
if let Some(max) = max_age {
match claims.auth_time {
Some(time) => {
let age = chrono::Duration::seconds(now.timestamp() - time);
if age >= *max {
return Err(error::Validation::Expired(Expiry::MaxAge(age)).into());
}
}
None => return Err(Validation::Missing(Missing::AuthTime).into()),
}
}
Ok(())
}
/// Get a userinfo json document for a given token at the provider's userinfo endpoint.
/// Errors are:
///
/// - Userinfo::NoUrl if this provider doesn't have a userinfo endpoint
/// - Error::Insecure if the userinfo url is not https
/// - Error::Jose if the token is not decoded
/// - Error::Http if something goes wrong getting the document
/// - Error::Json if the response is not a valid Userinfo document
/// - Userinfo::MismatchSubject if the returned userinfo document and tokens subject mismatch
pub fn request_userinfo(
&self,
client: &reqwest::Client,
token: &Token,
) -> Result<Userinfo, Error> {
match self.config().userinfo_endpoint {
Some(ref url) => {
discovery::secure(&url)?;
let claims = token.id_token.payload()?;
let auth_code = token.access_token().to_string();
let mut resp = client
.get(url.clone())
// FIXME This is a transitional hack for Reqwest 0.9 that should be refactored
// when upstream restores typed header support.
.header_011(reqwest::hyper_011::header::Authorization(
reqwest::hyper_011::header::Bearer { token: auth_code },
))
.send()?;
let info: Userinfo = resp.json()?;
if claims.sub!= info.sub {
let expected = info.sub.clone();
let actual = claims.sub.clone();
return Err(error::Userinfo::MismatchSubject { expected, actual }.into());
}
Ok(info)
}
None => Err(error::Userinfo::NoUrl.into()),
}
}
}
/// Optional parameters that [OpenID specifies](https://openid.net/specs/openid-connect-basic-1_0.html#RequestParameters) for the auth URI.
/// Derives Default, so remember to..Default::default() after you specify what you want.
#[derive(Default)]
pub struct Options {
/// MUST contain openid. By default this is ONLY openid. Official optional scopes are
/// email, profile, address, phone, offline_access. Check the Discovery config
/// `scopes_supported` to see what is available at your provider!
pub scope: Option<String>,
pub state: Option<String>,
pub nonce: Option<String>,
pub display: Option<Display>,
pub prompt: Option<std::collections::HashSet<Prompt>>,
pub max_age: Option<Duration>,
pub ui_locales: Option<String>,
pub claims_locales: Option<String>,
pub id_token_hint: Option<String>,
pub login_hint: Option<String>,
pub acr_values: Option<String>,
}
/// The userinfo struct contains all possible userinfo fields regardless of scope. [See spec.](https://openid.net/specs/openid-connect-basic-1_0.html#StandardClaims)
// TODO is there a way to use claims_supported in config to simplify this struct?
#[derive(Debug, Deserialize, Serialize, Validate)]
pub struct Userinfo {
pub sub: String,
#[serde(default)]
pub name: Option<String>,
#[serde(default)]
pub given_name: Option<String>,
#[serde(default)]
pub family_name: Option<String>,
#[serde(default)]
pub middle_name: Option<String>,
#[serde(default)]
pub nickname: Option<String>,
#[serde(default)]
pub preferred_username: Option<String>,
#[serde(default)]
#[serde(with = "url_serde")]
pub profile: Option<Url>,
#[serde(default)]
#[serde(with = "url_serde")]
pub picture: Option<Url>,
#[serde(default)]
#[serde(with = "url_serde")]
pub website: Option<Url>,
#[serde(default)]
#[validate(email)]
pub email: Option<String>,
#[serde(default)]
pub email_verified: bool,
// Isn't required to be just male or female
#[serde(default)]
pub gender: Option<String>,
// ISO 9601:2004 YYYY-MM-DD or YYYY.
#[serde(default)]
pub birthdate: Option<NaiveDate>,
// Region/City codes. Should also have a more concrete serializer form.
#[serde(default)]
pub zoneinfo: Option<String>,
// Usually RFC5646 langcode-countrycode, maybe with a _ sep, could be arbitrary
#[serde(default)]
pub locale: Option<String>,
// Usually E.164 format number
#[serde(default)]
pub phone_number: Option<String>,
#[serde(default)]
pub phone_number_verified: bool,
#[serde(default)]
pub address: Option<Address>,
#[serde(default)]
pub updated_at: Option<i64>,
#[serde(flatten)]
pub extra: HashMap<String, Value>,
}
/// The four values for the preferred display parameter in the Options. See spec for details.
pub enum Display {
Page,
Popup,
Touch,
Wap,
}
impl Display {
fn as_str(&self) -> &'static str {
use self::Display::*;
match *self {
Page => "page",
Popup => "popup",
Touch => "touch",
Wap => "wap",
}
}
}
/// The four possible values for the prompt parameter set in Options. See spec for details.
#[derive(PartialEq, Eq, Hash)]
pub enum Prompt {
None,
Login,
Consent,
SelectAccount,
}
impl Prompt {
fn as_str(&self) -> &'static str {
use self::Prompt::*;
match *self {
None => "none",
Login => "login",
Consent => "consent",
SelectAccount => "select_account",
}
}
}
/// Address Claim struct. Can be only formatted, only the rest, or both.
#[derive(Debug, Deserialize | {
return Err(Validation::Expired(Expiry::Expires(
chrono::naive::NaiveDateTime::from_timestamp(claims.exp, 0),
))
.into());
} | conditional_block |
machine.rs | RegisterRef::Null => 0,
// These conversion unwraps are safe because we know that input
// and stack lengths are bounded by validation rules to fit into an
// i32 (max length is 256 at the time of writing this)
RegisterRef::InputLength => self.input.len().try_into().unwrap(),
RegisterRef::StackLength(stack_id) => {
self.stacks[stack_id].len().try_into().unwrap()
}
RegisterRef::User(reg_id) => *self.registers.get(reg_id).unwrap(),
}
}
/// Sets the register to the given value. The register reference is
/// assumed to be valid and writable (should be validated at build time).
/// Will panic if it isn't valid/writable.
fn set_reg(&mut self, reg: &SpanNode<RegisterRef>, value: LangValue) {
match reg.value() {
RegisterRef::Null => {} // /dev/null behavior - trash any input
RegisterRef::InputLength | RegisterRef::StackLength(_) => {
panic!("Unwritable register {:?}", reg)
}
RegisterRef::User(reg_id) => {
self.registers[*reg_id] = value;
}
}
}
/// Pushes the given value onto the given stack. If the stack reference is
/// invalid or the stack is at capacity, an error is returned. If the stack
/// reference is invalid, will panic (should be validated at build time).
fn push_stack(
&mut self,
stack_ref: &SpanNode<StackRef>,
value: LangValue,
) -> Result<(), (RuntimeError, Span)> {
// Have to access this first cause borrow checker
let max_stack_length = self.hardware_spec.max_stack_length;
let stack = &mut self.stacks[stack_ref.value().0];
// If the stack is capacity, make sure we're not over it
if stack.len() >= max_stack_length {
return Err((RuntimeError::StackOverflow, *stack_ref.metadata()));
}
stack.push(value);
Ok(())
}
/// Pops an element off the given stack. If the pop is successful, the
/// popped value is returned. If the stack is empty, an error is returned.
/// If the stack reference is invalid, will panic (should be validated at
/// build time).
fn pop_stack(
&mut self,
stack_ref: &SpanNode<StackRef>,
) -> Result<LangValue, (RuntimeError, Span)> {
let stack = &mut self.stacks[stack_ref.value().0];
if let Some(val) = stack.pop() {
Ok(val)
} else {
Err((RuntimeError::EmptyStack, *stack_ref.metadata()))
}
}
/// Internal function to execute the next instruction. The return value
/// is the same as [Self::execute_next], except the error needs to be
/// wrapped before being handed to the user.
fn execute_next_inner(&mut self) -> Result<bool, (RuntimeError, Span)> {
// We've previously hit an error, prevent further execution
if self.error.is_some() {
return Ok(false);
}
let instr_node =
match self.program.instructions.get(self.program_counter) {
// Clone is necessary so we don't maintain a ref to self
Some(instr_node) => instr_node.clone(),
// out of instructions to execute, just give up
None => return Ok(false),
};
// Prevent infinite loops
if self.cycle_count >= MAX_CYCLE_COUNT {
// Include the instruction that triggered the error
return Err((RuntimeError::TooManyCycles, *instr_node.metadata()));
}
// If we've reached this point, we know we're going to execute the
// instruction. Increment the cycle count now so that if we exit with
// an error, it still counts.
self.cycle_count += 1;
// Execute the instruction, and get a resulting optional label that we
// should jump to. For most instructions there will be no label, only
// when the instruction wants to trigger a jump.
let instruction = instr_node.value();
let span = *instr_node.metadata();
let target_label: Option<&Label> = match instruction {
Instruction::Read(reg) => {
if self.input.is_empty() {
return Err((RuntimeError::EmptyInput, span));
} else {
// Remove the first element in the input
let val = self.input.remove(0);
self.set_reg(reg, val);
}
None
}
Instruction::Write(src) => {
self.output.push(self.get_val_from_src(src));
None
}
Instruction::Set(dst, src) => {
self.set_reg(dst, self.get_val_from_src(src));
None
}
Instruction::Add(dst, src) => {
self.set_reg(
dst,
(Wrapping(self.get_reg(*dst.value()))
+ Wrapping(self.get_val_from_src(src)))
.0,
);
None
}
Instruction::Sub(dst, src) => {
self.set_reg(
dst,
(Wrapping(self.get_reg(*dst.value()))
- Wrapping(self.get_val_from_src(src)))
.0,
);
None
}
Instruction::Mul(dst, src) => {
self.set_reg(
dst,
(Wrapping(self.get_reg(*dst.value()))
* Wrapping(self.get_val_from_src(src)))
.0,
);
None
}
Instruction::Div(dst, src) => {
let divisor = self.get_val_from_src(src);
let dividend = self.get_reg(*dst.value());
if divisor!= 0 {
// This does flooring division
self.set_reg(dst, dividend / divisor);
} else {
return Err((RuntimeError::DivideByZero, span));
}
None
}
Instruction::Cmp(dst, src_1, src_2) => {
let val_1 = self.get_val_from_src(src_1);
let val_2 = self.get_val_from_src(src_2);
let cmp = match val_1.cmp(&val_2) {
Ordering::Less => -1,
Ordering::Equal => 0,
Ordering::Greater => 1,
};
self.set_reg(dst, cmp);
None
}
Instruction::Push(src, stack_ref) => {
self.push_stack(stack_ref, self.get_val_from_src(src))?;
None
}
Instruction::Pop(stack_ref, dst) => {
let popped = self.pop_stack(stack_ref)?;
self.set_reg(dst, popped);
None
}
// Jumps
Instruction::Jmp(Node(label, _)) => Some(label),
Instruction::Jez(src, Node(label, _)) => {
if self.get_val_from_src(src) == 0 {
Some(label)
} else {
None
}
}
Instruction::Jnz(src, Node(label, _)) => {
if self.get_val_from_src(src)!= 0 {
Some(label)
} else {
None
}
}
Instruction::Jlz(src, Node(label, _)) => {
if self.get_val_from_src(src) < 0 {
Some(label)
} else {
None
}
}
Instruction::Jgz(src, Node(label, _)) => {
if self.get_val_from_src(src) > 0 {
Some(label)
} else {
None
}
}
};
// If the instruction wants to jump to a label, look up its
// corresponding index and go there. Otherwise, just advance the PC one
// instruction
match target_label {
Some(label) => {
let destination = self
.program
.symbol_table
.get(label)
// If this panics, that means there's a bug in the
// compiler pipeline
.unwrap_or_else(|| panic!("unknown label: {}", label));
self.program_counter = *destination;
}
None => {
self.program_counter += 1;
}
}
debug!(println!("Executed {:?}\n\tState: {:?}", instruction, self));
Ok(true)
}
/// Executes the next instruction in the program.
///
/// # Returns
/// - `Ok(true)` if the instruction executed normally
/// - `Ok(false)` if the instruction didn't execute because the program has
/// already terminated
/// - `Err(error)` if an error occurred. The error is returned, with the
/// source information of the offending instruction
pub fn execute_next(&mut self) -> Result<bool, &WithSource<RuntimeError>> {
match self.execute_next_inner() {
Ok(b) => Ok(b),
Err((error, span)) => {
// Store the error in self, then return a ref to it
self.error = Some(WithSource::new(
iter::once(SourceErrorWrapper::new(
error,
span,
&self.source,
)),
self.source.clone(),
));
Err(self.error.as_ref().unwrap())
}
}
}
/// Executes this machine until termination (or error). All instructions are
/// executed until [Self::terminated] returns true. Returns the value of
/// [Self::successful] upon termination.
pub fn execute_all(&mut self) -> Result<bool, &WithSource<RuntimeError>> {
// We can't return the error directly from the loop because of a bug
// in the borrow checker. Instead, we have to play lifetime tetris.
while!self.terminated() {
if self.execute_next().is_err() {
break;
}
}
// Check if an error occurred, and return it if so
match &self.error {
None => Ok(self.successful()),
Some(error) => Err(error),
}
}
/// Get the source code that this machine is built for.
pub fn source_code(&self) -> &str {
&self.source
}
/// Get a reference to the program being executed.
pub fn program(&self) -> &Program<Span> {
&self.program
}
/// Get the current input buffer.
pub fn input(&self) -> &[LangValue] {
self.input.as_slice()
}
/// Get the current output buffer.
pub fn output(&self) -> &[LangValue] {
self.output.as_slice()
}
/// Get all registers and their current values.
pub fn registers(&self) -> HashMap<RegisterRef, LangValue> {
self.hardware_spec
.all_register_refs()
.into_iter()
.map(|reg_ref| (reg_ref, self.get_reg(reg_ref)))
.collect()
}
/// Get all stacks and their current values.
pub fn stacks(&self) -> HashMap<StackRef, &[LangValue]> {
self.hardware_spec
.all_stack_refs()
.into_iter()
.map(|stack_ref| (stack_ref, self.stacks[stack_ref.0].as_slice()))
.collect()
}
/// Get the runtime error that halted execution of this machine. If no error
/// has occurred, return `None`.
pub fn error(&self) -> Option<&WithSource<RuntimeError>> {
self.error.as_ref()
}
}
// Functions that get exported to wasm
#[cfg_attr(target_arch = "wasm32", wasm_bindgen)]
impl Machine {
/// Get the index of the next instruction to be executed.
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "programCounter")
)]
pub fn program_counter(&self) -> usize {
self.program_counter
}
/// Get the number of cycles, i.e. the number of instructions that have
/// been run, during the current program execution.
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "cycleCount")
)]
pub fn cycle_count(&self) -> usize {
self.cycle_count
}
/// Checks if this machine has finished executing. This could be by normal
/// completion or by runtime error.
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "terminated")
)]
pub fn terminated(&self) -> bool {
// Check for normal complete
self.program_counter >= self.program.instructions.len()
// Check for a runtime error
|| self.error.is_some()
}
/// Checks if this machine has completed successfully. The criteria are:
/// 1. Program is terminated (all instructions have been executed)
/// 2. No failures occurred (see [FailureReason] for possible failures)
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "successful")
)]
pub fn successful(&self) -> bool {
self.terminated() && self.failure_reason().is_none()
}
/// Determine why the executed program failed. **Only returns a value if
/// the program actually failed.** Will return `None` if the program
/// is still running or it succeeded.
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "failureReason")
)]
pub fn failure_reason(&self) -> Option<FailureReason> {
if!self.terminated() {
// Program is still running, so we haven't failed (yet)
None
} else if self.error.is_some() {
Some(FailureReason::RuntimeError)
} else if!self.input.is_empty() {
Some(FailureReason::RemainingInput)
} else if self.output!= self.expected_output {
Some(FailureReason::IncorrectOutput)
} else {
// No failure states were hit, so program was successful!
None
}
}
}
// Wasm-ONLY functions
#[cfg(target_arch = "wasm32")]
#[wasm_bindgen]
impl Machine {
/// A wrapper for [Self::input], to be called from wasm.
#[wasm_bindgen(getter, js_name = "input")]
pub fn wasm_input(&self) -> Vec<LangValue> {
self.input.clone()
}
/// A wrapper for [Self::input], to be called from wasm.
#[wasm_bindgen(getter, js_name = "output")]
pub fn wasm_output(&self) -> Vec<LangValue> {
self.output.clone()
}
/// A wrapper for [Self::registers], to be called from wasm. We can't send
/// maps through wasm, so this returns a [JsValue] which is an object
/// mapping register names (strings) to their values (`LangValue`).
#[wasm_bindgen(getter, js_name = "registers")]
pub fn wasm_registers(&self) -> LangValueMap {
// Convert the keys of the register map to strings
let regs_by_name: HashMap<String, LangValue> = self
.registers()
.into_iter()
.map(|(reg_ref, reg_value)| (reg_ref.to_string(), reg_value))
.collect();
// Convert the hashmap to a js object. Be careful here!
JsValue::from_serde(®s_by_name).unwrap().unchecked_into()
}
/// A wrapper for [Self::stacks], to be called from wasm. We can't send
/// maps through wasm, so this returns a [JsValue] which is an object
/// mapping stacks names (strings) to their values (`Vec<LangValue>`).
#[wasm_bindgen(getter, js_name = "stacks")]
pub fn wasm_stacks(&self) -> LangValueArrayMap {
// Convert the keys of the stacks map to strings
let stacks_by_name: HashMap<String, &[LangValue]> = self
.stacks()
.into_iter()
.map(|(stack_ref, stack_value)| {
(stack_ref.to_string(), stack_value)
})
.collect();
// Convert the hashmap to a js object. Be careful here!
JsValue::from_serde(&stacks_by_name)
.unwrap()
.unchecked_into()
}
/// A wrapper for [Self::error], to be called from wasm. We can't send
/// maps through wasm, so this returns a simplified error as a
/// [SourceElement].
#[wasm_bindgen(getter, js_name = "error")]
pub fn wasm_error(&self) -> Option<SourceElement> {
self.error.as_ref().map(|wrapped_error| {
// If an error is present, there should always be exactly one
match wrapped_error.errors() {
[error] => error.into(),
errors => panic!(
"Expected exactly 1 runtime error, but got {:?}",
errors
),
}
})
}
/// A wrapper for [Self::execute_next], to be called from wasm. We throw
/// away the error because it simplifies the logic on the TS side. That
/// error is accessible via [Self::wasm_error] anyway.
#[wasm_bindgen(js_name = "executeNext")]
pub fn wasm_execute_next(&mut self) -> bool {
// If an error occurred, that means something executed, so return true
self.execute_next().unwrap_or(true)
}
/// A wrapper for [Self::execute_all], to be called from wasm. We throw
/// away the error because it simplifies the logic on the TS side. That
/// error is accessible via [Self::wasm_error] anyway.
#[wasm_bindgen(js_name = "executeAll")]
pub fn wasm_execute_all(&mut self) -> bool {
// If an error occurred, that means something executed, so return true
self.execute_all().unwrap_or(true)
}
}
/// The reason why a program failed. **These reasons are only applicable for
/// terminated, unsuccessful programs**. For a program that has yet to
/// terminate, or did so successfully, none of these cases apply.
#[cfg_attr(target_arch = "wasm32", wasm_bindgen)]
#[derive(Copy, Clone, Debug)]
pub enum | FailureReason | identifier_name |
|
machine.rs | be small enough that it probably doesn't matter.
/// Values never get added to the input, only popped off.
input: Vec<LangValue>,
/// The current output buffer. This can be pushed into, but never popped
/// out of.
output: Vec<LangValue>,
/// The registers that the user can read and write. Indexed by Register ID.
registers: Vec<LangValue>,
/// The series of stacks that act as the programs RAM. The number of stacks
/// and their capacity is determined by the initializating hardware spec.
stacks: Vec<Vec<LangValue>>,
/// The number of instructions that have been executed so far. This is not
/// unique, so repeated instructions are counted multiple times.
cycle_count: usize,
/// Stores a runtime error, if one has occurred. Once the error occurs,
/// this should be populated and from then on, the machine has terminated
/// and can no longer execute.
error: Option<WithSource<RuntimeError>>,
}
// Functions that DON'T get exported to wasm
impl Machine {
/// Creates a new machine, ready to be executed.
pub fn new(
hardware_spec: HardwareSpec,
program_spec: &ProgramSpec,
program: Program<Span>,
source: String,
) -> Self {
let registers =
iter::repeat(0).take(hardware_spec.num_registers).collect();
// Initialize `num_stacks` new stacks. Set an initial capacity
// for each one to prevent grows during program operation
let stacks = iter::repeat_with(|| {
Vec::with_capacity(hardware_spec.max_stack_length)
})
.take(hardware_spec.num_stacks)
.collect();
Self {
// Static data
hardware_spec,
program,
source,
expected_output: program_spec.expected_output().into(),
// Runtime state
program_counter: 0,
input: program_spec.input().into(),
output: Vec::new(),
registers,
stacks,
error: None,
// Performance stats
cycle_count: 0,
}
}
/// Gets a source value, which could either be a constant or a register.
/// If the value is a constant, just return that. If it's a register,
/// return the value from that register. Panics if the register reference is
/// invalid (shouldn't be possible because of validation).
fn get_val_from_src(&self, src: &SpanNode<ValueSource<Span>>) -> LangValue {
match src.value() {
ValueSource::Const(Node(val, _)) => *val,
ValueSource::Register(reg_ref) => self.get_reg(*reg_ref.value()),
}
}
/// Gets the value from the given register. The register reference is
/// assumed to be valid (should be validated at build time). Will panic if
/// it isn't valid.
fn get_reg(&self, reg: RegisterRef) -> LangValue {
match reg {
RegisterRef::Null => 0,
// These conversion unwraps are safe because we know that input
// and stack lengths are bounded by validation rules to fit into an
// i32 (max length is 256 at the time of writing this)
RegisterRef::InputLength => self.input.len().try_into().unwrap(),
RegisterRef::StackLength(stack_id) => {
self.stacks[stack_id].len().try_into().unwrap()
}
RegisterRef::User(reg_id) => *self.registers.get(reg_id).unwrap(),
}
}
/// Sets the register to the given value. The register reference is
/// assumed to be valid and writable (should be validated at build time).
/// Will panic if it isn't valid/writable.
fn set_reg(&mut self, reg: &SpanNode<RegisterRef>, value: LangValue) {
match reg.value() {
RegisterRef::Null => {} // /dev/null behavior - trash any input
RegisterRef::InputLength | RegisterRef::StackLength(_) => {
panic!("Unwritable register {:?}", reg)
}
RegisterRef::User(reg_id) => {
self.registers[*reg_id] = value;
}
}
}
/// Pushes the given value onto the given stack. If the stack reference is
/// invalid or the stack is at capacity, an error is returned. If the stack
/// reference is invalid, will panic (should be validated at build time).
fn push_stack(
&mut self,
stack_ref: &SpanNode<StackRef>,
value: LangValue,
) -> Result<(), (RuntimeError, Span)> {
// Have to access this first cause borrow checker
let max_stack_length = self.hardware_spec.max_stack_length;
let stack = &mut self.stacks[stack_ref.value().0];
// If the stack is capacity, make sure we're not over it
if stack.len() >= max_stack_length {
return Err((RuntimeError::StackOverflow, *stack_ref.metadata()));
}
stack.push(value);
Ok(())
}
/// Pops an element off the given stack. If the pop is successful, the
/// popped value is returned. If the stack is empty, an error is returned.
/// If the stack reference is invalid, will panic (should be validated at
/// build time).
fn pop_stack(
&mut self,
stack_ref: &SpanNode<StackRef>,
) -> Result<LangValue, (RuntimeError, Span)> {
let stack = &mut self.stacks[stack_ref.value().0];
if let Some(val) = stack.pop() {
Ok(val)
} else {
Err((RuntimeError::EmptyStack, *stack_ref.metadata()))
}
}
/// Internal function to execute the next instruction. The return value
/// is the same as [Self::execute_next], except the error needs to be
/// wrapped before being handed to the user.
fn execute_next_inner(&mut self) -> Result<bool, (RuntimeError, Span)> {
// We've previously hit an error, prevent further execution
if self.error.is_some() {
return Ok(false);
}
let instr_node =
match self.program.instructions.get(self.program_counter) {
// Clone is necessary so we don't maintain a ref to self
Some(instr_node) => instr_node.clone(),
// out of instructions to execute, just give up
None => return Ok(false),
};
// Prevent infinite loops
if self.cycle_count >= MAX_CYCLE_COUNT {
// Include the instruction that triggered the error
return Err((RuntimeError::TooManyCycles, *instr_node.metadata()));
}
// If we've reached this point, we know we're going to execute the
// instruction. Increment the cycle count now so that if we exit with
// an error, it still counts.
self.cycle_count += 1;
// Execute the instruction, and get a resulting optional label that we
// should jump to. For most instructions there will be no label, only
// when the instruction wants to trigger a jump.
let instruction = instr_node.value();
let span = *instr_node.metadata();
let target_label: Option<&Label> = match instruction {
Instruction::Read(reg) => {
if self.input.is_empty() {
return Err((RuntimeError::EmptyInput, span));
} else {
// Remove the first element in the input
let val = self.input.remove(0);
self.set_reg(reg, val);
}
None
}
Instruction::Write(src) => {
self.output.push(self.get_val_from_src(src));
None
}
Instruction::Set(dst, src) => {
self.set_reg(dst, self.get_val_from_src(src));
None
}
Instruction::Add(dst, src) => {
self.set_reg(
dst,
(Wrapping(self.get_reg(*dst.value()))
+ Wrapping(self.get_val_from_src(src)))
.0,
);
None
}
Instruction::Sub(dst, src) => {
self.set_reg(
dst,
(Wrapping(self.get_reg(*dst.value()))
- Wrapping(self.get_val_from_src(src)))
.0,
);
None
}
Instruction::Mul(dst, src) => {
self.set_reg(
dst,
(Wrapping(self.get_reg(*dst.value()))
* Wrapping(self.get_val_from_src(src)))
.0,
);
None
}
Instruction::Div(dst, src) => {
let divisor = self.get_val_from_src(src);
let dividend = self.get_reg(*dst.value());
if divisor!= 0 {
// This does flooring division
self.set_reg(dst, dividend / divisor);
} else {
return Err((RuntimeError::DivideByZero, span));
}
None
}
Instruction::Cmp(dst, src_1, src_2) => {
let val_1 = self.get_val_from_src(src_1);
let val_2 = self.get_val_from_src(src_2);
let cmp = match val_1.cmp(&val_2) {
Ordering::Less => -1,
Ordering::Equal => 0,
Ordering::Greater => 1,
};
self.set_reg(dst, cmp);
None
}
Instruction::Push(src, stack_ref) => {
self.push_stack(stack_ref, self.get_val_from_src(src))?;
None
}
Instruction::Pop(stack_ref, dst) => {
let popped = self.pop_stack(stack_ref)?;
self.set_reg(dst, popped);
None
}
// Jumps
Instruction::Jmp(Node(label, _)) => Some(label),
Instruction::Jez(src, Node(label, _)) => {
if self.get_val_from_src(src) == 0 {
Some(label)
} else {
None
}
}
Instruction::Jnz(src, Node(label, _)) => {
if self.get_val_from_src(src)!= 0 {
Some(label)
} else {
None
}
}
Instruction::Jlz(src, Node(label, _)) => {
if self.get_val_from_src(src) < 0 {
Some(label)
} else {
None
}
}
Instruction::Jgz(src, Node(label, _)) => {
if self.get_val_from_src(src) > 0 {
Some(label)
} else {
None
}
}
};
// If the instruction wants to jump to a label, look up its
// corresponding index and go there. Otherwise, just advance the PC one
// instruction
match target_label {
Some(label) => {
let destination = self
.program
.symbol_table
.get(label)
// If this panics, that means there's a bug in the
// compiler pipeline
.unwrap_or_else(|| panic!("unknown label: {}", label));
self.program_counter = *destination;
}
None => {
self.program_counter += 1;
}
}
debug!(println!("Executed {:?}\n\tState: {:?}", instruction, self));
Ok(true)
}
/// Executes the next instruction in the program.
///
/// # Returns
/// - `Ok(true)` if the instruction executed normally
/// - `Ok(false)` if the instruction didn't execute because the program has
/// already terminated
/// - `Err(error)` if an error occurred. The error is returned, with the
/// source information of the offending instruction
pub fn execute_next(&mut self) -> Result<bool, &WithSource<RuntimeError>> {
match self.execute_next_inner() {
Ok(b) => Ok(b),
Err((error, span)) => {
// Store the error in self, then return a ref to it
self.error = Some(WithSource::new(
iter::once(SourceErrorWrapper::new(
error,
span,
&self.source,
)),
self.source.clone(),
));
Err(self.error.as_ref().unwrap())
}
}
}
/// Executes this machine until termination (or error). All instructions are
/// executed until [Self::terminated] returns true. Returns the value of
/// [Self::successful] upon termination.
pub fn execute_all(&mut self) -> Result<bool, &WithSource<RuntimeError>> {
// We can't return the error directly from the loop because of a bug
// in the borrow checker. Instead, we have to play lifetime tetris.
while!self.terminated() {
if self.execute_next().is_err() {
break;
}
}
// Check if an error occurred, and return it if so
match &self.error {
None => Ok(self.successful()),
Some(error) => Err(error),
}
}
/// Get the source code that this machine is built for.
pub fn source_code(&self) -> &str {
&self.source
}
/// Get a reference to the program being executed.
pub fn program(&self) -> &Program<Span> {
&self.program
}
/// Get the current input buffer.
pub fn input(&self) -> &[LangValue] {
self.input.as_slice()
}
/// Get the current output buffer.
pub fn output(&self) -> &[LangValue] {
self.output.as_slice()
}
/// Get all registers and their current values.
pub fn registers(&self) -> HashMap<RegisterRef, LangValue> {
self.hardware_spec
.all_register_refs()
.into_iter()
.map(|reg_ref| (reg_ref, self.get_reg(reg_ref)))
.collect()
}
/// Get all stacks and their current values. | .into_iter()
.map(|stack_ref| (stack_ref, self.stacks[stack_ref.0].as_slice()))
.collect()
}
/// Get the runtime error that halted execution of this machine. If no error
/// has occurred, return `None`.
pub fn error(&self) -> Option<&WithSource<RuntimeError>> {
self.error.as_ref()
}
}
// Functions that get exported to wasm
#[cfg_attr(target_arch = "wasm32", wasm_bindgen)]
impl Machine {
/// Get the index of the next instruction to be executed.
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "programCounter")
)]
pub fn program_counter(&self) -> usize {
self.program_counter
}
/// Get the number of cycles, i.e. the number of instructions that have
/// been run, during the current program execution.
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "cycleCount")
)]
pub fn cycle_count(&self) -> usize {
self.cycle_count
}
/// Checks if this machine has finished executing. This could be by normal
/// completion or by runtime error.
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "terminated")
)]
pub fn terminated(&self) -> bool {
// Check for normal complete
self.program_counter >= self.program.instructions.len()
// Check for a runtime error
|| self.error.is_some()
}
/// Checks if this machine has completed successfully. The criteria are:
/// 1. Program is terminated (all instructions have been executed)
/// 2. No failures occurred (see [FailureReason] for possible failures)
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "successful")
)]
pub fn successful(&self) -> bool {
self.terminated() && self.failure_reason().is_none()
}
/// Determine why the executed program failed. **Only returns a value if
/// the program actually failed.** Will return `None` if the program
/// is still running or it succeeded.
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "failureReason")
)]
pub fn failure_reason(&self) -> Option<FailureReason> {
if!self.terminated() {
// Program is still running, so we haven't failed (yet)
None
} else if self.error.is_some() {
Some(FailureReason::RuntimeError)
} else if!self.input.is_empty() {
Some(FailureReason::RemainingInput)
} else if self.output!= self.expected_output {
Some(FailureReason::IncorrectOutput)
} else {
// No failure states were hit, so program was successful!
None
}
}
}
// Wasm-ONLY functions
#[cfg(target_arch = "wasm32")]
#[wasm_bindgen]
impl Machine {
/// A wrapper for [Self::input], to be called from wasm.
#[wasm_bindgen(getter, js_name = "input")]
pub fn wasm_input(&self) -> Vec<LangValue> {
self.input.clone()
}
/// A wrapper for [Self::input], to be called from wasm.
#[wasm_bindgen(getter, js_name = "output")]
pub fn wasm_output(&self) -> Vec<LangValue> {
self.output.clone()
}
/// A wrapper for [Self::registers], to be called from wasm. We can't send
/// maps through wasm, so this returns a [JsValue] which is an object
/// mapping register names (strings) to their values (`LangValue`).
#[wasm_bindgen(getter, js_name = "registers")]
pub fn wasm_registers(&self) -> LangValueMap {
// Convert the keys of the register map to strings
let regs_by_name: HashMap<String, LangValue> = self
.registers()
.into_iter()
.map(|(reg_ref, reg_value)| (reg_ref | pub fn stacks(&self) -> HashMap<StackRef, &[LangValue]> {
self.hardware_spec
.all_stack_refs() | random_line_split |
machine.rs | enough that it probably doesn't matter.
/// Values never get added to the input, only popped off.
input: Vec<LangValue>,
/// The current output buffer. This can be pushed into, but never popped
/// out of.
output: Vec<LangValue>,
/// The registers that the user can read and write. Indexed by Register ID.
registers: Vec<LangValue>,
/// The series of stacks that act as the programs RAM. The number of stacks
/// and their capacity is determined by the initializating hardware spec.
stacks: Vec<Vec<LangValue>>,
/// The number of instructions that have been executed so far. This is not
/// unique, so repeated instructions are counted multiple times.
cycle_count: usize,
/// Stores a runtime error, if one has occurred. Once the error occurs,
/// this should be populated and from then on, the machine has terminated
/// and can no longer execute.
error: Option<WithSource<RuntimeError>>,
}
// Functions that DON'T get exported to wasm
impl Machine {
/// Creates a new machine, ready to be executed.
pub fn new(
hardware_spec: HardwareSpec,
program_spec: &ProgramSpec,
program: Program<Span>,
source: String,
) -> Self {
let registers =
iter::repeat(0).take(hardware_spec.num_registers).collect();
// Initialize `num_stacks` new stacks. Set an initial capacity
// for each one to prevent grows during program operation
let stacks = iter::repeat_with(|| {
Vec::with_capacity(hardware_spec.max_stack_length)
})
.take(hardware_spec.num_stacks)
.collect();
Self {
// Static data
hardware_spec,
program,
source,
expected_output: program_spec.expected_output().into(),
// Runtime state
program_counter: 0,
input: program_spec.input().into(),
output: Vec::new(),
registers,
stacks,
error: None,
// Performance stats
cycle_count: 0,
}
}
/// Gets a source value, which could either be a constant or a register.
/// If the value is a constant, just return that. If it's a register,
/// return the value from that register. Panics if the register reference is
/// invalid (shouldn't be possible because of validation).
fn get_val_from_src(&self, src: &SpanNode<ValueSource<Span>>) -> LangValue {
match src.value() {
ValueSource::Const(Node(val, _)) => *val,
ValueSource::Register(reg_ref) => self.get_reg(*reg_ref.value()),
}
}
/// Gets the value from the given register. The register reference is
/// assumed to be valid (should be validated at build time). Will panic if
/// it isn't valid.
fn get_reg(&self, reg: RegisterRef) -> LangValue {
match reg {
RegisterRef::Null => 0,
// These conversion unwraps are safe because we know that input
// and stack lengths are bounded by validation rules to fit into an
// i32 (max length is 256 at the time of writing this)
RegisterRef::InputLength => self.input.len().try_into().unwrap(),
RegisterRef::StackLength(stack_id) => {
self.stacks[stack_id].len().try_into().unwrap()
}
RegisterRef::User(reg_id) => *self.registers.get(reg_id).unwrap(),
}
}
/// Sets the register to the given value. The register reference is
/// assumed to be valid and writable (should be validated at build time).
/// Will panic if it isn't valid/writable.
fn set_reg(&mut self, reg: &SpanNode<RegisterRef>, value: LangValue) {
match reg.value() {
RegisterRef::Null => {} // /dev/null behavior - trash any input
RegisterRef::InputLength | RegisterRef::StackLength(_) => {
panic!("Unwritable register {:?}", reg)
}
RegisterRef::User(reg_id) => {
self.registers[*reg_id] = value;
}
}
}
/// Pushes the given value onto the given stack. If the stack reference is
/// invalid or the stack is at capacity, an error is returned. If the stack
/// reference is invalid, will panic (should be validated at build time).
fn push_stack(
&mut self,
stack_ref: &SpanNode<StackRef>,
value: LangValue,
) -> Result<(), (RuntimeError, Span)> |
/// Pops an element off the given stack. If the pop is successful, the
/// popped value is returned. If the stack is empty, an error is returned.
/// If the stack reference is invalid, will panic (should be validated at
/// build time).
fn pop_stack(
&mut self,
stack_ref: &SpanNode<StackRef>,
) -> Result<LangValue, (RuntimeError, Span)> {
let stack = &mut self.stacks[stack_ref.value().0];
if let Some(val) = stack.pop() {
Ok(val)
} else {
Err((RuntimeError::EmptyStack, *stack_ref.metadata()))
}
}
/// Internal function to execute the next instruction. The return value
/// is the same as [Self::execute_next], except the error needs to be
/// wrapped before being handed to the user.
fn execute_next_inner(&mut self) -> Result<bool, (RuntimeError, Span)> {
// We've previously hit an error, prevent further execution
if self.error.is_some() {
return Ok(false);
}
let instr_node =
match self.program.instructions.get(self.program_counter) {
// Clone is necessary so we don't maintain a ref to self
Some(instr_node) => instr_node.clone(),
// out of instructions to execute, just give up
None => return Ok(false),
};
// Prevent infinite loops
if self.cycle_count >= MAX_CYCLE_COUNT {
// Include the instruction that triggered the error
return Err((RuntimeError::TooManyCycles, *instr_node.metadata()));
}
// If we've reached this point, we know we're going to execute the
// instruction. Increment the cycle count now so that if we exit with
// an error, it still counts.
self.cycle_count += 1;
// Execute the instruction, and get a resulting optional label that we
// should jump to. For most instructions there will be no label, only
// when the instruction wants to trigger a jump.
let instruction = instr_node.value();
let span = *instr_node.metadata();
let target_label: Option<&Label> = match instruction {
Instruction::Read(reg) => {
if self.input.is_empty() {
return Err((RuntimeError::EmptyInput, span));
} else {
// Remove the first element in the input
let val = self.input.remove(0);
self.set_reg(reg, val);
}
None
}
Instruction::Write(src) => {
self.output.push(self.get_val_from_src(src));
None
}
Instruction::Set(dst, src) => {
self.set_reg(dst, self.get_val_from_src(src));
None
}
Instruction::Add(dst, src) => {
self.set_reg(
dst,
(Wrapping(self.get_reg(*dst.value()))
+ Wrapping(self.get_val_from_src(src)))
.0,
);
None
}
Instruction::Sub(dst, src) => {
self.set_reg(
dst,
(Wrapping(self.get_reg(*dst.value()))
- Wrapping(self.get_val_from_src(src)))
.0,
);
None
}
Instruction::Mul(dst, src) => {
self.set_reg(
dst,
(Wrapping(self.get_reg(*dst.value()))
* Wrapping(self.get_val_from_src(src)))
.0,
);
None
}
Instruction::Div(dst, src) => {
let divisor = self.get_val_from_src(src);
let dividend = self.get_reg(*dst.value());
if divisor!= 0 {
// This does flooring division
self.set_reg(dst, dividend / divisor);
} else {
return Err((RuntimeError::DivideByZero, span));
}
None
}
Instruction::Cmp(dst, src_1, src_2) => {
let val_1 = self.get_val_from_src(src_1);
let val_2 = self.get_val_from_src(src_2);
let cmp = match val_1.cmp(&val_2) {
Ordering::Less => -1,
Ordering::Equal => 0,
Ordering::Greater => 1,
};
self.set_reg(dst, cmp);
None
}
Instruction::Push(src, stack_ref) => {
self.push_stack(stack_ref, self.get_val_from_src(src))?;
None
}
Instruction::Pop(stack_ref, dst) => {
let popped = self.pop_stack(stack_ref)?;
self.set_reg(dst, popped);
None
}
// Jumps
Instruction::Jmp(Node(label, _)) => Some(label),
Instruction::Jez(src, Node(label, _)) => {
if self.get_val_from_src(src) == 0 {
Some(label)
} else {
None
}
}
Instruction::Jnz(src, Node(label, _)) => {
if self.get_val_from_src(src)!= 0 {
Some(label)
} else {
None
}
}
Instruction::Jlz(src, Node(label, _)) => {
if self.get_val_from_src(src) < 0 {
Some(label)
} else {
None
}
}
Instruction::Jgz(src, Node(label, _)) => {
if self.get_val_from_src(src) > 0 {
Some(label)
} else {
None
}
}
};
// If the instruction wants to jump to a label, look up its
// corresponding index and go there. Otherwise, just advance the PC one
// instruction
match target_label {
Some(label) => {
let destination = self
.program
.symbol_table
.get(label)
// If this panics, that means there's a bug in the
// compiler pipeline
.unwrap_or_else(|| panic!("unknown label: {}", label));
self.program_counter = *destination;
}
None => {
self.program_counter += 1;
}
}
debug!(println!("Executed {:?}\n\tState: {:?}", instruction, self));
Ok(true)
}
/// Executes the next instruction in the program.
///
/// # Returns
/// - `Ok(true)` if the instruction executed normally
/// - `Ok(false)` if the instruction didn't execute because the program has
/// already terminated
/// - `Err(error)` if an error occurred. The error is returned, with the
/// source information of the offending instruction
pub fn execute_next(&mut self) -> Result<bool, &WithSource<RuntimeError>> {
match self.execute_next_inner() {
Ok(b) => Ok(b),
Err((error, span)) => {
// Store the error in self, then return a ref to it
self.error = Some(WithSource::new(
iter::once(SourceErrorWrapper::new(
error,
span,
&self.source,
)),
self.source.clone(),
));
Err(self.error.as_ref().unwrap())
}
}
}
/// Executes this machine until termination (or error). All instructions are
/// executed until [Self::terminated] returns true. Returns the value of
/// [Self::successful] upon termination.
pub fn execute_all(&mut self) -> Result<bool, &WithSource<RuntimeError>> {
// We can't return the error directly from the loop because of a bug
// in the borrow checker. Instead, we have to play lifetime tetris.
while!self.terminated() {
if self.execute_next().is_err() {
break;
}
}
// Check if an error occurred, and return it if so
match &self.error {
None => Ok(self.successful()),
Some(error) => Err(error),
}
}
/// Get the source code that this machine is built for.
pub fn source_code(&self) -> &str {
&self.source
}
/// Get a reference to the program being executed.
pub fn program(&self) -> &Program<Span> {
&self.program
}
/// Get the current input buffer.
pub fn input(&self) -> &[LangValue] {
self.input.as_slice()
}
/// Get the current output buffer.
pub fn output(&self) -> &[LangValue] {
self.output.as_slice()
}
/// Get all registers and their current values.
pub fn registers(&self) -> HashMap<RegisterRef, LangValue> {
self.hardware_spec
.all_register_refs()
.into_iter()
.map(|reg_ref| (reg_ref, self.get_reg(reg_ref)))
.collect()
}
/// Get all stacks and their current values.
pub fn stacks(&self) -> HashMap<StackRef, &[LangValue]> {
self.hardware_spec
.all_stack_refs()
.into_iter()
.map(|stack_ref| (stack_ref, self.stacks[stack_ref.0].as_slice()))
.collect()
}
/// Get the runtime error that halted execution of this machine. If no error
/// has occurred, return `None`.
pub fn error(&self) -> Option<&WithSource<RuntimeError>> {
self.error.as_ref()
}
}
// Functions that get exported to wasm
#[cfg_attr(target_arch = "wasm32", wasm_bindgen)]
impl Machine {
/// Get the index of the next instruction to be executed.
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "programCounter")
)]
pub fn program_counter(&self) -> usize {
self.program_counter
}
/// Get the number of cycles, i.e. the number of instructions that have
/// been run, during the current program execution.
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "cycleCount")
)]
pub fn cycle_count(&self) -> usize {
self.cycle_count
}
/// Checks if this machine has finished executing. This could be by normal
/// completion or by runtime error.
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "terminated")
)]
pub fn terminated(&self) -> bool {
// Check for normal complete
self.program_counter >= self.program.instructions.len()
// Check for a runtime error
|| self.error.is_some()
}
/// Checks if this machine has completed successfully. The criteria are:
/// 1. Program is terminated (all instructions have been executed)
/// 2. No failures occurred (see [FailureReason] for possible failures)
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "successful")
)]
pub fn successful(&self) -> bool {
self.terminated() && self.failure_reason().is_none()
}
/// Determine why the executed program failed. **Only returns a value if
/// the program actually failed.** Will return `None` if the program
/// is still running or it succeeded.
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "failureReason")
)]
pub fn failure_reason(&self) -> Option<FailureReason> {
if!self.terminated() {
// Program is still running, so we haven't failed (yet)
None
} else if self.error.is_some() {
Some(FailureReason::RuntimeError)
} else if!self.input.is_empty() {
Some(FailureReason::RemainingInput)
} else if self.output!= self.expected_output {
Some(FailureReason::IncorrectOutput)
} else {
// No failure states were hit, so program was successful!
None
}
}
}
// Wasm-ONLY functions
#[cfg(target_arch = "wasm32")]
#[wasm_bindgen]
impl Machine {
/// A wrapper for [Self::input], to be called from wasm.
#[wasm_bindgen(getter, js_name = "input")]
pub fn wasm_input(&self) -> Vec<LangValue> {
self.input.clone()
}
/// A wrapper for [Self::input], to be called from wasm.
#[wasm_bindgen(getter, js_name = "output")]
pub fn wasm_output(&self) -> Vec<LangValue> {
self.output.clone()
}
/// A wrapper for [Self::registers], to be called from wasm. We can't send
/// maps through wasm, so this returns a [JsValue] which is an object
/// mapping register names (strings) to their values (`LangValue`).
#[wasm_bindgen(getter, js_name = "registers")]
pub fn wasm_registers(&self) -> LangValueMap {
// Convert the keys of the register map to strings
let regs_by_name: HashMap<String, LangValue> = self
.registers()
.into_iter()
.map(|(reg_ref, reg_value)| (reg_ | {
// Have to access this first cause borrow checker
let max_stack_length = self.hardware_spec.max_stack_length;
let stack = &mut self.stacks[stack_ref.value().0];
// If the stack is capacity, make sure we're not over it
if stack.len() >= max_stack_length {
return Err((RuntimeError::StackOverflow, *stack_ref.metadata()));
}
stack.push(value);
Ok(())
} | identifier_body |
machine.rs | enough that it probably doesn't matter.
/// Values never get added to the input, only popped off.
input: Vec<LangValue>,
/// The current output buffer. This can be pushed into, but never popped
/// out of.
output: Vec<LangValue>,
/// The registers that the user can read and write. Indexed by Register ID.
registers: Vec<LangValue>,
/// The series of stacks that act as the programs RAM. The number of stacks
/// and their capacity is determined by the initializating hardware spec.
stacks: Vec<Vec<LangValue>>,
/// The number of instructions that have been executed so far. This is not
/// unique, so repeated instructions are counted multiple times.
cycle_count: usize,
/// Stores a runtime error, if one has occurred. Once the error occurs,
/// this should be populated and from then on, the machine has terminated
/// and can no longer execute.
error: Option<WithSource<RuntimeError>>,
}
// Functions that DON'T get exported to wasm
impl Machine {
/// Creates a new machine, ready to be executed.
pub fn new(
hardware_spec: HardwareSpec,
program_spec: &ProgramSpec,
program: Program<Span>,
source: String,
) -> Self {
let registers =
iter::repeat(0).take(hardware_spec.num_registers).collect();
// Initialize `num_stacks` new stacks. Set an initial capacity
// for each one to prevent grows during program operation
let stacks = iter::repeat_with(|| {
Vec::with_capacity(hardware_spec.max_stack_length)
})
.take(hardware_spec.num_stacks)
.collect();
Self {
// Static data
hardware_spec,
program,
source,
expected_output: program_spec.expected_output().into(),
// Runtime state
program_counter: 0,
input: program_spec.input().into(),
output: Vec::new(),
registers,
stacks,
error: None,
// Performance stats
cycle_count: 0,
}
}
/// Gets a source value, which could either be a constant or a register.
/// If the value is a constant, just return that. If it's a register,
/// return the value from that register. Panics if the register reference is
/// invalid (shouldn't be possible because of validation).
fn get_val_from_src(&self, src: &SpanNode<ValueSource<Span>>) -> LangValue {
match src.value() {
ValueSource::Const(Node(val, _)) => *val,
ValueSource::Register(reg_ref) => self.get_reg(*reg_ref.value()),
}
}
/// Gets the value from the given register. The register reference is
/// assumed to be valid (should be validated at build time). Will panic if
/// it isn't valid.
fn get_reg(&self, reg: RegisterRef) -> LangValue {
match reg {
RegisterRef::Null => 0,
// These conversion unwraps are safe because we know that input
// and stack lengths are bounded by validation rules to fit into an
// i32 (max length is 256 at the time of writing this)
RegisterRef::InputLength => self.input.len().try_into().unwrap(),
RegisterRef::StackLength(stack_id) => {
self.stacks[stack_id].len().try_into().unwrap()
}
RegisterRef::User(reg_id) => *self.registers.get(reg_id).unwrap(),
}
}
/// Sets the register to the given value. The register reference is
/// assumed to be valid and writable (should be validated at build time).
/// Will panic if it isn't valid/writable.
fn set_reg(&mut self, reg: &SpanNode<RegisterRef>, value: LangValue) {
match reg.value() {
RegisterRef::Null => {} // /dev/null behavior - trash any input
RegisterRef::InputLength | RegisterRef::StackLength(_) => {
panic!("Unwritable register {:?}", reg)
}
RegisterRef::User(reg_id) => {
self.registers[*reg_id] = value;
}
}
}
/// Pushes the given value onto the given stack. If the stack reference is
/// invalid or the stack is at capacity, an error is returned. If the stack
/// reference is invalid, will panic (should be validated at build time).
fn push_stack(
&mut self,
stack_ref: &SpanNode<StackRef>,
value: LangValue,
) -> Result<(), (RuntimeError, Span)> {
// Have to access this first cause borrow checker
let max_stack_length = self.hardware_spec.max_stack_length;
let stack = &mut self.stacks[stack_ref.value().0];
// If the stack is capacity, make sure we're not over it
if stack.len() >= max_stack_length {
return Err((RuntimeError::StackOverflow, *stack_ref.metadata()));
}
stack.push(value);
Ok(())
}
/// Pops an element off the given stack. If the pop is successful, the
/// popped value is returned. If the stack is empty, an error is returned.
/// If the stack reference is invalid, will panic (should be validated at
/// build time).
fn pop_stack(
&mut self,
stack_ref: &SpanNode<StackRef>,
) -> Result<LangValue, (RuntimeError, Span)> {
let stack = &mut self.stacks[stack_ref.value().0];
if let Some(val) = stack.pop() {
Ok(val)
} else |
}
/// Internal function to execute the next instruction. The return value
/// is the same as [Self::execute_next], except the error needs to be
/// wrapped before being handed to the user.
fn execute_next_inner(&mut self) -> Result<bool, (RuntimeError, Span)> {
// We've previously hit an error, prevent further execution
if self.error.is_some() {
return Ok(false);
}
let instr_node =
match self.program.instructions.get(self.program_counter) {
// Clone is necessary so we don't maintain a ref to self
Some(instr_node) => instr_node.clone(),
// out of instructions to execute, just give up
None => return Ok(false),
};
// Prevent infinite loops
if self.cycle_count >= MAX_CYCLE_COUNT {
// Include the instruction that triggered the error
return Err((RuntimeError::TooManyCycles, *instr_node.metadata()));
}
// If we've reached this point, we know we're going to execute the
// instruction. Increment the cycle count now so that if we exit with
// an error, it still counts.
self.cycle_count += 1;
// Execute the instruction, and get a resulting optional label that we
// should jump to. For most instructions there will be no label, only
// when the instruction wants to trigger a jump.
let instruction = instr_node.value();
let span = *instr_node.metadata();
let target_label: Option<&Label> = match instruction {
Instruction::Read(reg) => {
if self.input.is_empty() {
return Err((RuntimeError::EmptyInput, span));
} else {
// Remove the first element in the input
let val = self.input.remove(0);
self.set_reg(reg, val);
}
None
}
Instruction::Write(src) => {
self.output.push(self.get_val_from_src(src));
None
}
Instruction::Set(dst, src) => {
self.set_reg(dst, self.get_val_from_src(src));
None
}
Instruction::Add(dst, src) => {
self.set_reg(
dst,
(Wrapping(self.get_reg(*dst.value()))
+ Wrapping(self.get_val_from_src(src)))
.0,
);
None
}
Instruction::Sub(dst, src) => {
self.set_reg(
dst,
(Wrapping(self.get_reg(*dst.value()))
- Wrapping(self.get_val_from_src(src)))
.0,
);
None
}
Instruction::Mul(dst, src) => {
self.set_reg(
dst,
(Wrapping(self.get_reg(*dst.value()))
* Wrapping(self.get_val_from_src(src)))
.0,
);
None
}
Instruction::Div(dst, src) => {
let divisor = self.get_val_from_src(src);
let dividend = self.get_reg(*dst.value());
if divisor!= 0 {
// This does flooring division
self.set_reg(dst, dividend / divisor);
} else {
return Err((RuntimeError::DivideByZero, span));
}
None
}
Instruction::Cmp(dst, src_1, src_2) => {
let val_1 = self.get_val_from_src(src_1);
let val_2 = self.get_val_from_src(src_2);
let cmp = match val_1.cmp(&val_2) {
Ordering::Less => -1,
Ordering::Equal => 0,
Ordering::Greater => 1,
};
self.set_reg(dst, cmp);
None
}
Instruction::Push(src, stack_ref) => {
self.push_stack(stack_ref, self.get_val_from_src(src))?;
None
}
Instruction::Pop(stack_ref, dst) => {
let popped = self.pop_stack(stack_ref)?;
self.set_reg(dst, popped);
None
}
// Jumps
Instruction::Jmp(Node(label, _)) => Some(label),
Instruction::Jez(src, Node(label, _)) => {
if self.get_val_from_src(src) == 0 {
Some(label)
} else {
None
}
}
Instruction::Jnz(src, Node(label, _)) => {
if self.get_val_from_src(src)!= 0 {
Some(label)
} else {
None
}
}
Instruction::Jlz(src, Node(label, _)) => {
if self.get_val_from_src(src) < 0 {
Some(label)
} else {
None
}
}
Instruction::Jgz(src, Node(label, _)) => {
if self.get_val_from_src(src) > 0 {
Some(label)
} else {
None
}
}
};
// If the instruction wants to jump to a label, look up its
// corresponding index and go there. Otherwise, just advance the PC one
// instruction
match target_label {
Some(label) => {
let destination = self
.program
.symbol_table
.get(label)
// If this panics, that means there's a bug in the
// compiler pipeline
.unwrap_or_else(|| panic!("unknown label: {}", label));
self.program_counter = *destination;
}
None => {
self.program_counter += 1;
}
}
debug!(println!("Executed {:?}\n\tState: {:?}", instruction, self));
Ok(true)
}
/// Executes the next instruction in the program.
///
/// # Returns
/// - `Ok(true)` if the instruction executed normally
/// - `Ok(false)` if the instruction didn't execute because the program has
/// already terminated
/// - `Err(error)` if an error occurred. The error is returned, with the
/// source information of the offending instruction
pub fn execute_next(&mut self) -> Result<bool, &WithSource<RuntimeError>> {
match self.execute_next_inner() {
Ok(b) => Ok(b),
Err((error, span)) => {
// Store the error in self, then return a ref to it
self.error = Some(WithSource::new(
iter::once(SourceErrorWrapper::new(
error,
span,
&self.source,
)),
self.source.clone(),
));
Err(self.error.as_ref().unwrap())
}
}
}
/// Executes this machine until termination (or error). All instructions are
/// executed until [Self::terminated] returns true. Returns the value of
/// [Self::successful] upon termination.
pub fn execute_all(&mut self) -> Result<bool, &WithSource<RuntimeError>> {
// We can't return the error directly from the loop because of a bug
// in the borrow checker. Instead, we have to play lifetime tetris.
while!self.terminated() {
if self.execute_next().is_err() {
break;
}
}
// Check if an error occurred, and return it if so
match &self.error {
None => Ok(self.successful()),
Some(error) => Err(error),
}
}
/// Get the source code that this machine is built for.
pub fn source_code(&self) -> &str {
&self.source
}
/// Get a reference to the program being executed.
pub fn program(&self) -> &Program<Span> {
&self.program
}
/// Get the current input buffer.
pub fn input(&self) -> &[LangValue] {
self.input.as_slice()
}
/// Get the current output buffer.
pub fn output(&self) -> &[LangValue] {
self.output.as_slice()
}
/// Get all registers and their current values.
pub fn registers(&self) -> HashMap<RegisterRef, LangValue> {
self.hardware_spec
.all_register_refs()
.into_iter()
.map(|reg_ref| (reg_ref, self.get_reg(reg_ref)))
.collect()
}
/// Get all stacks and their current values.
pub fn stacks(&self) -> HashMap<StackRef, &[LangValue]> {
self.hardware_spec
.all_stack_refs()
.into_iter()
.map(|stack_ref| (stack_ref, self.stacks[stack_ref.0].as_slice()))
.collect()
}
/// Get the runtime error that halted execution of this machine. If no error
/// has occurred, return `None`.
pub fn error(&self) -> Option<&WithSource<RuntimeError>> {
self.error.as_ref()
}
}
// Functions that get exported to wasm
#[cfg_attr(target_arch = "wasm32", wasm_bindgen)]
impl Machine {
/// Get the index of the next instruction to be executed.
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "programCounter")
)]
pub fn program_counter(&self) -> usize {
self.program_counter
}
/// Get the number of cycles, i.e. the number of instructions that have
/// been run, during the current program execution.
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "cycleCount")
)]
pub fn cycle_count(&self) -> usize {
self.cycle_count
}
/// Checks if this machine has finished executing. This could be by normal
/// completion or by runtime error.
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "terminated")
)]
pub fn terminated(&self) -> bool {
// Check for normal complete
self.program_counter >= self.program.instructions.len()
// Check for a runtime error
|| self.error.is_some()
}
/// Checks if this machine has completed successfully. The criteria are:
/// 1. Program is terminated (all instructions have been executed)
/// 2. No failures occurred (see [FailureReason] for possible failures)
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "successful")
)]
pub fn successful(&self) -> bool {
self.terminated() && self.failure_reason().is_none()
}
/// Determine why the executed program failed. **Only returns a value if
/// the program actually failed.** Will return `None` if the program
/// is still running or it succeeded.
#[cfg_attr(
target_arch = "wasm32",
wasm_bindgen(getter, js_name = "failureReason")
)]
pub fn failure_reason(&self) -> Option<FailureReason> {
if!self.terminated() {
// Program is still running, so we haven't failed (yet)
None
} else if self.error.is_some() {
Some(FailureReason::RuntimeError)
} else if!self.input.is_empty() {
Some(FailureReason::RemainingInput)
} else if self.output!= self.expected_output {
Some(FailureReason::IncorrectOutput)
} else {
// No failure states were hit, so program was successful!
None
}
}
}
// Wasm-ONLY functions
#[cfg(target_arch = "wasm32")]
#[wasm_bindgen]
impl Machine {
/// A wrapper for [Self::input], to be called from wasm.
#[wasm_bindgen(getter, js_name = "input")]
pub fn wasm_input(&self) -> Vec<LangValue> {
self.input.clone()
}
/// A wrapper for [Self::input], to be called from wasm.
#[wasm_bindgen(getter, js_name = "output")]
pub fn wasm_output(&self) -> Vec<LangValue> {
self.output.clone()
}
/// A wrapper for [Self::registers], to be called from wasm. We can't send
/// maps through wasm, so this returns a [JsValue] which is an object
/// mapping register names (strings) to their values (`LangValue`).
#[wasm_bindgen(getter, js_name = "registers")]
pub fn wasm_registers(&self) -> LangValueMap {
// Convert the keys of the register map to strings
let regs_by_name: HashMap<String, LangValue> = self
.registers()
.into_iter()
.map(|(reg_ref, reg_value)| (reg_ | {
Err((RuntimeError::EmptyStack, *stack_ref.metadata()))
} | conditional_block |
lib.rs | //! Embed images in documentation.
//!
//! This crate enables the portable embedding of images in
//! `rustdoc`-generated documentation. Standard
//! web-compatible image formats should be supported. Please [file an issue][issue-tracker]
//! if you have problems. Read on to learn how it works.
//!
//! # Showcase
//!
//! See the [showcase documentation][showcase-docs] for an example with embedded images.
//!
//! Please also check out the [source code][showcase-source] for [the showcase crate][showcase]
//! for a fleshed out example.
//!
//! # Motivation
//!
//! A picture is worth a thousand words. This oft quoted adage is no less true for technical
//! documentation. A carefully crafted diagram lets a new user immediately
//! grasp the high-level architecture of a complex library. Illustrations of geometric conventions
//! can vastly reduce confusion among users of scientific libraries. Despite the central role
//! of images in technical documentation, embedding images in Rust documentation in a way that
//! portably works correctly across local installations and [docs.rs](https://docs.rs) has been a
//! [longstanding issue of rustdoc][rustdoc-issue].
//!
//! This crate represents a carefully crafted solution based on procedural macros that works
//! around the current limitations of `rustdoc` and enables a practically workable approach to
//! embedding images in a portable manner.
//!
//! # How to embed images in documentation
//!
//! First, you'll need to depend on this crate. In `cargo.toml`:
//!
//! ```toml
//! [dependencies]
//! // Replace x.x with the latest version
//! embed-doc-image = "x.x"
//! ```
//!
//! What the next step is depends on whether you want to embed images into *inner attribute
//! documentation* or *outer attribute documentation*. Inner attribute documentation is usually
//! used to document crate-level or module-level documentation, and typically starts each line with
//! `//!`. Outer attribute docs are used for most other forms of documentation, such as function
//! and struct documentation. Outer attribute documentation typically starts each line with `///`.
//!
//! In both cases all image paths are relative to the **crate root**.
//!
//! ## Embedding images in outer attribute documentation
//!
//! Outer attribute documentation is typically used for documenting functions, structs, traits,
//! macros and so on. Let's consider documenting a function and embedding an image into its
//! documentation:
//!
//! ```rust
//! // Import the attribute macro
//! use embed_doc_image::embed_doc_image;
//!
//! /// Foos the bar.
//! ///
//! /// Let's drop an image below this text.
//! ///
//! ///![Alt text goes here][myimagelabel]
//! ///
//! /// And another one.
//! ///
//! ///![A Foobaring][foobaring]
//! ///
//! /// We can include any number of images in the above fashion. The important part is that
//! /// you match the label ("myimagelabel" or "foobaring" in this case) with the label in the
//! /// below attribute macro.
//! // Paths are always relative to the **crate root**
//! #[embed_doc_image("myimagelabel", "images/foo.png")]
//! #[embed_doc_image("foobaring", "assets/foobaring.jpg")]
//! fn foobar() {}
//! ```
//!
//! And that's it! If you run `cargo doc`, you should hopefully be able to see your images
//! in the documentation for `foobar`, and it should also work on `docs.rs` without trouble.
//!
//! ## Embedding images in inner attribute documentation
//!
//! The ability for macros to do *anything* with *inner attributes* is very limited. In fact,
//! before Rust 1.54 (which at the time of writing has not yet been released),
//! it is for all intents and purposes non-existent. This also means that we can not directly
//! use our approach to embed images in documentation for Rust < 1.54. However, we can make our
//! code compile with Rust < 1.54 and instead inject a prominent message that some images are
//! missing.
//! `docs.rs`, which always uses a nightly compiler, will be able to show the images. We'll
//! also locally be able to properly embed the images as long as we're using Rust >= 1.54
//! (or nightly). Here's how you can embed images in crate-level or module-level documentation:
//!
//! ```rust
//! //! My awesome crate for fast foobaring in latent space.
//! //!
//! // Important: note the blank line of documentation on each side of the image lookup table.
//! // The "image lookup table" can be placed anywhere, but we place it here together with the
//! // warning if the `doc-images` feature is not enabled.
//! #![cfg_attr(feature = "doc-images",
//! cfg_attr(all(),
//! doc = ::embed_doc_image::embed_image!("myimagelabel", "images/foo.png"),
//! doc = ::embed_doc_image::embed_image!("foobaring", "assets/foobaring.png")))]
//! #![cfg_attr(
//! not(feature = "doc-images"),
//! doc = "**Doc images not enabled**. Compile with feature `doc-images` and Rust version >= 1.54 \
//! to enable."
//! )]
//! //!
//! //! Let's use our images:
//! //!![Alt text goes here][myimagelabel]![A Foobaring][foobaring]
//! ```
//!
//! Sadly there is currently no way to detect Rust versions in `cfg_attr`. Therefore we must
//! rely on a feature flag for toggling proper image embedding. We'll need the following in our
//! `Cargo.toml`:
//!
//! ```toml
//! [features]
//! doc-images = []
//!
//! [package.metadata.docs.rs]
//! # docs.rs uses a nightly compiler, so by instructing it to use our `doc-images` feature we
//! # ensure that it will render any images that we may have in inner attribute documentation.
//! features = ["doc-images"]
//! ```
//!
//! Let's summarize:
//!
//! - `docs.rs` will correctly render our documentation with images.
//! - Locally:
//! - for Rust >= 1.54 with `--features doc-images`, the local documentation will
//! correctly render images.
//! - for Rust < 1.54: the local documentation will be missing some images, and will
//! contain a warning with instructions on how to enable proper image embedding.
//! - we can also use e.g. `cargo +nightly doc --features doc-images` to produce correct
//! documentation with a nightly compiler.
//!
//!
//! # How it works
//!
//! The crux of the issue is that `rustdoc` does not have a mechanism for tracking locally stored
//! images referenced by documentation and carry them over to the final documentation. Therefore
//! currently images on `docs.rs` can only be included if you host the image somewhere on the
//! internet and include the image with its URL. However, this has a number of issues:
//!
//! - You need to host the image, which incurs considerable additional effort on the part of
//! crate authors.
//! - The image is only available for as long as the image is hosted.
//! - Images in local documentation will not work without internet access.
//! - Images are not *versioned*, unless carefully done so manually by the crate author. That is,
//! the author must carefully provide *all* versions of the image across all versions of the
//! crate with a consistent naming convention in order to ensure that documentation of
//! older versions of the crate display the image consistent with that particular version.
//!
//! The solution employed by this crate is based on a remark made in an old
//! [reddit comment from 2017][reddit-comment]. In short, Rustdoc allows images to be provided
//! inline in the Markdown as `base64` encoded binary blobs in the following way:
//!
//! ```rust
//!![Alt text][myimagelabel]
//!
//! [myimagelabel]: data:image/png;base64,BaSe64EnCoDeDdAtA
//! ```
//!
//! Basically we can use the "reference" feature of Markdown links/images to provide the URL
//! of the image in a different location than the image itself, but instead of providing an URL
//! we can directly provide the binary data of the image in the Markdown documentation.
//!
//! However, doing this manually with images would terribly clutter the documentation, which
//! seems less than ideal. Instead, we do this programmatically. The macros available in this
//! crate essentially follow this idea:
//!
//! - Take a label and image path relative to the crate root as input.
//! - Determine the MIME type (based on extension) and `base64` encoding of the image.
//! - Produce an appropriate doc string and inject it into the Markdown documentation for the
//! crate/function/struct/etc.
//!
//! Clearly, this is still quite hacky, but it seems like a workable solution until proper support
//! in `rustdoc` arrives, at which point we may rejoice and abandon this crate to the annals
//! of history.
//!
//! # Acknowledgements
//!
//! As an inexperienced proc macro hacker, I would not have managed to arrive at this
//! solution without the help of several individuals on the Rust Programming Language Community
//! Discord server, most notably:
//!
//! - Yandros [(github.com/danielhenrymantilla)](https://github.com/danielhenrymantilla)
//! - Nemo157 [(github.com/Nemo157)](https://github.com/Nemo157)
//!
//! [showcase]: https://crates.io/crates/embed-doc-image-showcase
//! [showcase-docs]: https://docs.rs/embed-doc-image-showcase
//! [showcase-source]: https://github.com/Andlon/embed-doc-image/tree/master/embed-doc-image-showcase
//! [rustdoc-issue]: https://github.com/rust-lang/rust/issues/32104
//! [issue-tracker]: https://github.com/Andlon/embed-doc-image/issues
//! [reddit-comment]: https://www.reddit.com/r/rust/comments/5ljshj/diagrams_in_documentation/dbwg96q?utm_source=share&utm_medium=web2x&context=3
//!
//!
use proc_macro::TokenStream;
use quote::{quote, ToTokens};
use std::fs::read;
use std::path::{Path, PathBuf};
use syn::parse;
use syn::parse::{Parse, ParseStream};
use syn::{
Item, ItemConst, ItemEnum, ItemExternCrate, ItemFn, ItemForeignMod, ItemImpl, ItemMacro,
ItemMacro2, ItemMod, ItemStatic, ItemStruct, ItemTrait, ItemTraitAlias, ItemType, ItemUnion,
ItemUse,
};
#[derive(Debug)]
struct | {
label: String,
path: PathBuf,
}
impl Parse for ImageDescription {
fn parse(input: ParseStream) -> parse::Result<Self> {
let label = input.parse::<syn::LitStr>()?;
input.parse::<syn::Token![,]>()?;
let path = input.parse::<syn::LitStr>()?;
Ok(ImageDescription {
label: label.value(),
path: PathBuf::from(path.value()),
})
}
}
fn encode_base64_image_from_path(path: &Path) -> String {
let bytes = read(path).unwrap_or_else(|_| panic!("Failed to load image at {}", path.display()));
base64::encode(bytes)
}
fn determine_mime_type(extension: &str) -> String {
let extension = extension.to_ascii_lowercase();
// TODO: Consider using the mime_guess crate? The below list does seem kinda exhaustive for
// doc purposes though?
// Matches taken haphazardly from
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types/Common_types
match extension.as_str() {
"jpg" | "jpeg" => "image/jpeg",
"png" => "image/png",
"bmp" => "image/bmp",
"svg" => "image/svg+xml",
"gif" => "image/gif",
"tif" | "tiff" => "image/tiff",
"webp" => "image/webp",
"ico" => "image/vnd.microsoft.icon",
_ => panic!("Unrecognized image extension, unable to infer correct MIME type"),
}
.to_string()
}
fn produce_doc_string_for_image(image_desc: &ImageDescription) -> String {
let root_dir = std::env::var("CARGO_MANIFEST_DIR")
.expect("Failed to retrieve value of CARGO_MANOFEST_DIR.");
let root_dir = Path::new(&root_dir);
let encoded = encode_base64_image_from_path(&root_dir.join(&image_desc.path));
let ext = image_desc.path.extension().unwrap_or_else(|| {
panic!(
"No extension for file {}. Unable to determine MIME type.",
image_desc.path.display()
)
});
let mime = determine_mime_type(&ext.to_string_lossy());
let doc_string = format!(
" [{label}]: data:{mime};base64,{encoded}",
label = &image_desc.label,
mime = mime,
encoded = &encoded
);
doc_string
}
/// Produces a doc string for inclusion in Markdown documentation.
///
/// Please see the crate-level documentation for usage instructions.
#[proc_macro]
pub fn embed_image(item: TokenStream) -> TokenStream {
let image_desc = syn::parse_macro_input!(item as ImageDescription);
let doc_string = produce_doc_string_for_image(&image_desc);
// Ensure that the "image table" at the end is separated from the rest of the documentation,
// otherwise the markdown parser will not treat them as a "lookup table" for the image data
let s = format!("\n \n {}", doc_string);
let tokens = quote! {
#s
};
tokens.into()
}
/// Produces a doc string for inclusion in Markdown documentation.
///
/// Please see the crate-level documentation for usage instructions.
#[proc_macro_attribute]
pub fn embed_doc_image(attr: TokenStream, item: TokenStream) -> TokenStream {
let image_desc = syn::parse_macro_input!(attr as ImageDescription);
let doc_string = produce_doc_string_for_image(&image_desc);
// Then inject a doc string that "resolves" the image reference and supplies the
// base64-encoded data inline
let mut input: syn::Item = syn::parse_macro_input!(item);
match input {
Item::Const(ItemConst { ref mut attrs,.. })
| Item::Enum(ItemEnum { ref mut attrs,.. })
| Item::ExternCrate(ItemExternCrate { ref mut attrs,.. })
| Item::Fn(ItemFn { ref mut attrs,.. })
| Item::ForeignMod(ItemForeignMod { ref mut attrs,.. })
| Item::Impl(ItemImpl { ref mut attrs,.. })
| Item::Macro(ItemMacro { ref mut attrs,.. })
| Item::Macro2(ItemMacro2 { ref mut attrs,.. })
| Item::Mod(ItemMod { ref mut attrs,.. })
| Item::Static(ItemStatic { ref mut attrs,.. })
| Item::Struct(ItemStruct { ref mut attrs,.. })
| Item::Trait(ItemTrait { ref mut attrs,.. })
| Item::TraitAlias(ItemTraitAlias { ref mut attrs,.. })
| Item::Type(ItemType { ref mut attrs,.. })
| Item::Union(ItemUnion { ref mut attrs,.. })
| Item::Use(ItemUse { ref mut attrs,.. }) => {
let str = doc_string;
// Insert an empty doc line to ensure that we get a blank line between the
// docs and the "bibliography" containing the actual image data.
// Otherwise the markdown parser will mess up our output.
attrs.push(syn::parse_quote! {
#[doc = ""]
});
attrs.push(syn::parse_quote! {
#[doc = #str]
});
input.into_token_stream()
}
_ => syn::Error::new_spanned(
input,
"Unsupported item. Cannot apply attribute to the given item.",
)
.to_compile_error(),
}
.into()
}
| ImageDescription | identifier_name |
lib.rs | //! Embed images in documentation.
//!
//! This crate enables the portable embedding of images in
//! `rustdoc`-generated documentation. Standard
//! web-compatible image formats should be supported. Please [file an issue][issue-tracker]
//! if you have problems. Read on to learn how it works.
//!
//! # Showcase
//!
//! See the [showcase documentation][showcase-docs] for an example with embedded images.
//!
//! Please also check out the [source code][showcase-source] for [the showcase crate][showcase]
//! for a fleshed out example.
//!
//! # Motivation
//!
//! A picture is worth a thousand words. This oft quoted adage is no less true for technical
//! documentation. A carefully crafted diagram lets a new user immediately
//! grasp the high-level architecture of a complex library. Illustrations of geometric conventions
//! can vastly reduce confusion among users of scientific libraries. Despite the central role
//! of images in technical documentation, embedding images in Rust documentation in a way that
//! portably works correctly across local installations and [docs.rs](https://docs.rs) has been a
//! [longstanding issue of rustdoc][rustdoc-issue]. | //! around the current limitations of `rustdoc` and enables a practically workable approach to
//! embedding images in a portable manner.
//!
//! # How to embed images in documentation
//!
//! First, you'll need to depend on this crate. In `cargo.toml`:
//!
//! ```toml
//! [dependencies]
//! // Replace x.x with the latest version
//! embed-doc-image = "x.x"
//! ```
//!
//! What the next step is depends on whether you want to embed images into *inner attribute
//! documentation* or *outer attribute documentation*. Inner attribute documentation is usually
//! used to document crate-level or module-level documentation, and typically starts each line with
//! `//!`. Outer attribute docs are used for most other forms of documentation, such as function
//! and struct documentation. Outer attribute documentation typically starts each line with `///`.
//!
//! In both cases all image paths are relative to the **crate root**.
//!
//! ## Embedding images in outer attribute documentation
//!
//! Outer attribute documentation is typically used for documenting functions, structs, traits,
//! macros and so on. Let's consider documenting a function and embedding an image into its
//! documentation:
//!
//! ```rust
//! // Import the attribute macro
//! use embed_doc_image::embed_doc_image;
//!
//! /// Foos the bar.
//! ///
//! /// Let's drop an image below this text.
//! ///
//! ///![Alt text goes here][myimagelabel]
//! ///
//! /// And another one.
//! ///
//! ///![A Foobaring][foobaring]
//! ///
//! /// We can include any number of images in the above fashion. The important part is that
//! /// you match the label ("myimagelabel" or "foobaring" in this case) with the label in the
//! /// below attribute macro.
//! // Paths are always relative to the **crate root**
//! #[embed_doc_image("myimagelabel", "images/foo.png")]
//! #[embed_doc_image("foobaring", "assets/foobaring.jpg")]
//! fn foobar() {}
//! ```
//!
//! And that's it! If you run `cargo doc`, you should hopefully be able to see your images
//! in the documentation for `foobar`, and it should also work on `docs.rs` without trouble.
//!
//! ## Embedding images in inner attribute documentation
//!
//! The ability for macros to do *anything* with *inner attributes* is very limited. In fact,
//! before Rust 1.54 (which at the time of writing has not yet been released),
//! it is for all intents and purposes non-existent. This also means that we can not directly
//! use our approach to embed images in documentation for Rust < 1.54. However, we can make our
//! code compile with Rust < 1.54 and instead inject a prominent message that some images are
//! missing.
//! `docs.rs`, which always uses a nightly compiler, will be able to show the images. We'll
//! also locally be able to properly embed the images as long as we're using Rust >= 1.54
//! (or nightly). Here's how you can embed images in crate-level or module-level documentation:
//!
//! ```rust
//! //! My awesome crate for fast foobaring in latent space.
//! //!
//! // Important: note the blank line of documentation on each side of the image lookup table.
//! // The "image lookup table" can be placed anywhere, but we place it here together with the
//! // warning if the `doc-images` feature is not enabled.
//! #![cfg_attr(feature = "doc-images",
//! cfg_attr(all(),
//! doc = ::embed_doc_image::embed_image!("myimagelabel", "images/foo.png"),
//! doc = ::embed_doc_image::embed_image!("foobaring", "assets/foobaring.png")))]
//! #![cfg_attr(
//! not(feature = "doc-images"),
//! doc = "**Doc images not enabled**. Compile with feature `doc-images` and Rust version >= 1.54 \
//! to enable."
//! )]
//! //!
//! //! Let's use our images:
//! //!![Alt text goes here][myimagelabel]![A Foobaring][foobaring]
//! ```
//!
//! Sadly there is currently no way to detect Rust versions in `cfg_attr`. Therefore we must
//! rely on a feature flag for toggling proper image embedding. We'll need the following in our
//! `Cargo.toml`:
//!
//! ```toml
//! [features]
//! doc-images = []
//!
//! [package.metadata.docs.rs]
//! # docs.rs uses a nightly compiler, so by instructing it to use our `doc-images` feature we
//! # ensure that it will render any images that we may have in inner attribute documentation.
//! features = ["doc-images"]
//! ```
//!
//! Let's summarize:
//!
//! - `docs.rs` will correctly render our documentation with images.
//! - Locally:
//! - for Rust >= 1.54 with `--features doc-images`, the local documentation will
//! correctly render images.
//! - for Rust < 1.54: the local documentation will be missing some images, and will
//! contain a warning with instructions on how to enable proper image embedding.
//! - we can also use e.g. `cargo +nightly doc --features doc-images` to produce correct
//! documentation with a nightly compiler.
//!
//!
//! # How it works
//!
//! The crux of the issue is that `rustdoc` does not have a mechanism for tracking locally stored
//! images referenced by documentation and carry them over to the final documentation. Therefore
//! currently images on `docs.rs` can only be included if you host the image somewhere on the
//! internet and include the image with its URL. However, this has a number of issues:
//!
//! - You need to host the image, which incurs considerable additional effort on the part of
//! crate authors.
//! - The image is only available for as long as the image is hosted.
//! - Images in local documentation will not work without internet access.
//! - Images are not *versioned*, unless carefully done so manually by the crate author. That is,
//! the author must carefully provide *all* versions of the image across all versions of the
//! crate with a consistent naming convention in order to ensure that documentation of
//! older versions of the crate display the image consistent with that particular version.
//!
//! The solution employed by this crate is based on a remark made in an old
//! [reddit comment from 2017][reddit-comment]. In short, Rustdoc allows images to be provided
//! inline in the Markdown as `base64` encoded binary blobs in the following way:
//!
//! ```rust
//!![Alt text][myimagelabel]
//!
//! [myimagelabel]: data:image/png;base64,BaSe64EnCoDeDdAtA
//! ```
//!
//! Basically we can use the "reference" feature of Markdown links/images to provide the URL
//! of the image in a different location than the image itself, but instead of providing an URL
//! we can directly provide the binary data of the image in the Markdown documentation.
//!
//! However, doing this manually with images would terribly clutter the documentation, which
//! seems less than ideal. Instead, we do this programmatically. The macros available in this
//! crate essentially follow this idea:
//!
//! - Take a label and image path relative to the crate root as input.
//! - Determine the MIME type (based on extension) and `base64` encoding of the image.
//! - Produce an appropriate doc string and inject it into the Markdown documentation for the
//! crate/function/struct/etc.
//!
//! Clearly, this is still quite hacky, but it seems like a workable solution until proper support
//! in `rustdoc` arrives, at which point we may rejoice and abandon this crate to the annals
//! of history.
//!
//! # Acknowledgements
//!
//! As an inexperienced proc macro hacker, I would not have managed to arrive at this
//! solution without the help of several individuals on the Rust Programming Language Community
//! Discord server, most notably:
//!
//! - Yandros [(github.com/danielhenrymantilla)](https://github.com/danielhenrymantilla)
//! - Nemo157 [(github.com/Nemo157)](https://github.com/Nemo157)
//!
//! [showcase]: https://crates.io/crates/embed-doc-image-showcase
//! [showcase-docs]: https://docs.rs/embed-doc-image-showcase
//! [showcase-source]: https://github.com/Andlon/embed-doc-image/tree/master/embed-doc-image-showcase
//! [rustdoc-issue]: https://github.com/rust-lang/rust/issues/32104
//! [issue-tracker]: https://github.com/Andlon/embed-doc-image/issues
//! [reddit-comment]: https://www.reddit.com/r/rust/comments/5ljshj/diagrams_in_documentation/dbwg96q?utm_source=share&utm_medium=web2x&context=3
//!
//!
use proc_macro::TokenStream;
use quote::{quote, ToTokens};
use std::fs::read;
use std::path::{Path, PathBuf};
use syn::parse;
use syn::parse::{Parse, ParseStream};
use syn::{
Item, ItemConst, ItemEnum, ItemExternCrate, ItemFn, ItemForeignMod, ItemImpl, ItemMacro,
ItemMacro2, ItemMod, ItemStatic, ItemStruct, ItemTrait, ItemTraitAlias, ItemType, ItemUnion,
ItemUse,
};
#[derive(Debug)]
struct ImageDescription {
label: String,
path: PathBuf,
}
impl Parse for ImageDescription {
fn parse(input: ParseStream) -> parse::Result<Self> {
let label = input.parse::<syn::LitStr>()?;
input.parse::<syn::Token![,]>()?;
let path = input.parse::<syn::LitStr>()?;
Ok(ImageDescription {
label: label.value(),
path: PathBuf::from(path.value()),
})
}
}
fn encode_base64_image_from_path(path: &Path) -> String {
let bytes = read(path).unwrap_or_else(|_| panic!("Failed to load image at {}", path.display()));
base64::encode(bytes)
}
fn determine_mime_type(extension: &str) -> String {
let extension = extension.to_ascii_lowercase();
// TODO: Consider using the mime_guess crate? The below list does seem kinda exhaustive for
// doc purposes though?
// Matches taken haphazardly from
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types/Common_types
match extension.as_str() {
"jpg" | "jpeg" => "image/jpeg",
"png" => "image/png",
"bmp" => "image/bmp",
"svg" => "image/svg+xml",
"gif" => "image/gif",
"tif" | "tiff" => "image/tiff",
"webp" => "image/webp",
"ico" => "image/vnd.microsoft.icon",
_ => panic!("Unrecognized image extension, unable to infer correct MIME type"),
}
.to_string()
}
fn produce_doc_string_for_image(image_desc: &ImageDescription) -> String {
let root_dir = std::env::var("CARGO_MANIFEST_DIR")
.expect("Failed to retrieve value of CARGO_MANOFEST_DIR.");
let root_dir = Path::new(&root_dir);
let encoded = encode_base64_image_from_path(&root_dir.join(&image_desc.path));
let ext = image_desc.path.extension().unwrap_or_else(|| {
panic!(
"No extension for file {}. Unable to determine MIME type.",
image_desc.path.display()
)
});
let mime = determine_mime_type(&ext.to_string_lossy());
let doc_string = format!(
" [{label}]: data:{mime};base64,{encoded}",
label = &image_desc.label,
mime = mime,
encoded = &encoded
);
doc_string
}
/// Produces a doc string for inclusion in Markdown documentation.
///
/// Please see the crate-level documentation for usage instructions.
#[proc_macro]
pub fn embed_image(item: TokenStream) -> TokenStream {
let image_desc = syn::parse_macro_input!(item as ImageDescription);
let doc_string = produce_doc_string_for_image(&image_desc);
// Ensure that the "image table" at the end is separated from the rest of the documentation,
// otherwise the markdown parser will not treat them as a "lookup table" for the image data
let s = format!("\n \n {}", doc_string);
let tokens = quote! {
#s
};
tokens.into()
}
/// Produces a doc string for inclusion in Markdown documentation.
///
/// Please see the crate-level documentation for usage instructions.
#[proc_macro_attribute]
pub fn embed_doc_image(attr: TokenStream, item: TokenStream) -> TokenStream {
let image_desc = syn::parse_macro_input!(attr as ImageDescription);
let doc_string = produce_doc_string_for_image(&image_desc);
// Then inject a doc string that "resolves" the image reference and supplies the
// base64-encoded data inline
let mut input: syn::Item = syn::parse_macro_input!(item);
match input {
Item::Const(ItemConst { ref mut attrs,.. })
| Item::Enum(ItemEnum { ref mut attrs,.. })
| Item::ExternCrate(ItemExternCrate { ref mut attrs,.. })
| Item::Fn(ItemFn { ref mut attrs,.. })
| Item::ForeignMod(ItemForeignMod { ref mut attrs,.. })
| Item::Impl(ItemImpl { ref mut attrs,.. })
| Item::Macro(ItemMacro { ref mut attrs,.. })
| Item::Macro2(ItemMacro2 { ref mut attrs,.. })
| Item::Mod(ItemMod { ref mut attrs,.. })
| Item::Static(ItemStatic { ref mut attrs,.. })
| Item::Struct(ItemStruct { ref mut attrs,.. })
| Item::Trait(ItemTrait { ref mut attrs,.. })
| Item::TraitAlias(ItemTraitAlias { ref mut attrs,.. })
| Item::Type(ItemType { ref mut attrs,.. })
| Item::Union(ItemUnion { ref mut attrs,.. })
| Item::Use(ItemUse { ref mut attrs,.. }) => {
let str = doc_string;
// Insert an empty doc line to ensure that we get a blank line between the
// docs and the "bibliography" containing the actual image data.
// Otherwise the markdown parser will mess up our output.
attrs.push(syn::parse_quote! {
#[doc = ""]
});
attrs.push(syn::parse_quote! {
#[doc = #str]
});
input.into_token_stream()
}
_ => syn::Error::new_spanned(
input,
"Unsupported item. Cannot apply attribute to the given item.",
)
.to_compile_error(),
}
.into()
} | //!
//! This crate represents a carefully crafted solution based on procedural macros that works | random_line_split |
interrupt.rs | pagectrl, frame_alloc);
local.enable(Idt::LOCAL_APIC_SPURIOUS as u8);
InterruptModel::Apic {
local,
io: spin::Mutex::new(io),
}
}
model => {
if model.is_none() {
tracing::warn!("platform does not support ACPI; falling back to 8259 PIC");
} else {
tracing::warn!(
"ACPI does not indicate APIC interrupt model; falling back to 8259 PIC"
)
}
tracing::info!("configuring 8259 PIC interrupts...");
unsafe {
// functionally a no-op, since interrupts from PC/AT PIC are enabled at boot, just being
// clear for you, the reader, that at this point they are definitely intentionally enabled.
pics.enable();
}
InterruptModel::Pic(spin::Mutex::new(pics))
}
};
tracing::trace!(interrupt_model =?model);
let controller = INTERRUPT_CONTROLLER.init(Self { model });
// `sti` may not be called until the interrupt controller static is
// fully initialized, as an interrupt that occurs before it is
// initialized may attempt to access the static to finish the interrupt!
unsafe {
crate::cpu::intrinsics::sti();
}
controller
}
/// Starts a periodic timer which fires the `timer_tick` interrupt of the
/// provided [`Handlers`] every time `interval` elapses.
pub fn start_periodic_timer(
&self,
interval: Duration,
) -> Result<(), crate::time::InvalidDuration> {
match self.model {
InterruptModel::Pic(_) => crate::time::PIT.lock().start_periodic_timer(interval),
InterruptModel::Apic { ref local,.. } => {
local.start_periodic_timer(interval, Idt::LOCAL_APIC_TIMER as u8)
}
}
}
}
impl<'a, T> hal_core::interrupt::Context for Context<'a, T> {
type Registers = Registers;
fn registers(&self) -> &Registers {
self.registers
}
/// # Safety
///
/// Mutating the value of saved interrupt registers can cause
/// undefined behavior.
unsafe fn registers_mut(&mut self) -> &mut Registers {
self.registers
}
}
impl<'a> ctx::PageFault for Context<'a, PageFaultCode> {
fn fault_vaddr(&self) -> crate::VAddr {
crate::control_regs::Cr2::read()
}
fn debug_error_code(&self) -> &dyn fmt::Debug {
&self.code
}
fn display_error_code(&self) -> &dyn fmt::Display {
&self.code
}
}
impl<'a> ctx::CodeFault for Context<'a, CodeFault<'a>> {
fn is_user_mode(&self) -> bool {
false // TODO(eliza)
}
fn instruction_ptr(&self) -> crate::VAddr {
self.registers.instruction_ptr
}
fn fault_kind(&self) -> &'static str {
self.code.kind
}
fn details(&self) -> Option<&dyn fmt::Display> {
self.code.error_code
}
}
impl<'a> Context<'a, ErrorCode> {
pub fn error_code(&self) -> ErrorCode {
self.code
}
}
impl<'a> Context<'a, PageFaultCode> {
pub fn page_fault_code(&self) -> PageFaultCode {
self.code
}
}
impl hal_core::interrupt::Control for Idt {
// type Vector = u8;
type Registers = Registers;
#[inline]
unsafe fn disable(&mut self) {
crate::cpu::intrinsics::cli();
}
#[inline]
unsafe fn enable(&mut self) {
crate::cpu::intrinsics::sti();
tracing::trace!("interrupts enabled");
}
fn is_enabled(&self) -> bool {
unimplemented!("eliza do this one!!!")
}
fn register_handlers<H>(&mut self) -> Result<(), hal_core::interrupt::RegistrationError>
where
H: Handlers<Registers>,
{
macro_rules! gen_code_faults {
($self:ident, $h:ty, $($vector:path => fn $name:ident($($rest:tt)+),)+) => {
$(
gen_code_faults! {@ $name($($rest)+); }
$self.set_isr($vector, $name::<$h> as *const ());
)+
};
(@ $name:ident($kind:literal);) => {
extern "x86-interrupt" fn $name<H: Handlers<Registers>>(mut registers: Registers) {
let code = CodeFault {
error_code: None,
kind: $kind,
};
H::code_fault(Context { registers: &mut registers, code });
}
};
(@ $name:ident($kind:literal, code);) => {
extern "x86-interrupt" fn $name<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
let code = CodeFault {
error_code: Some(&code),
kind: $kind,
};
H::code_fault(Context { registers: &mut registers, code });
}
};
}
let span = tracing::debug_span!("Idt::register_handlers");
let _enter = span.enter();
extern "x86-interrupt" fn page_fault_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: PageFaultCode,
) {
H::page_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn double_fault_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
H::double_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn pit_timer_isr<H: Handlers<Registers>>(_regs: Registers) {
use core::sync::atomic::Ordering;
// if we weren't trying to do a PIT sleep, handle the timer tick
// instead.
let was_sleeping = crate::time::pit::SLEEPING
.compare_exchange(true, false, Ordering::AcqRel, Ordering::Acquire)
.is_ok();
if!was_sleeping {
H::timer_tick();
} else {
tracing::trace!("PIT sleep completed");
}
unsafe {
match INTERRUPT_CONTROLLER.get_unchecked().model {
InterruptModel::Pic(ref pics) => {
pics.lock().end_interrupt(Idt::PIC_PIT_TIMER as u8)
}
InterruptModel::Apic { ref local,.. } => local.end_interrupt(),
}
}
}
extern "x86-interrupt" fn apic_timer_isr<H: Handlers<Registers>>(_regs: Registers) {
H::timer_tick();
unsafe {
match INTERRUPT_CONTROLLER.get_unchecked().model {
InterruptModel::Pic(_) => unreachable!(),
InterruptModel::Apic { ref local,.. } => local.end_interrupt(),
}
}
}
extern "x86-interrupt" fn keyboard_isr<H: Handlers<Registers>>(_regs: Registers) {
// 0x60 is a magic PC/AT number.
static PORT: cpu::Port = cpu::Port::at(0x60);
// load-bearing read - if we don't read from the keyboard controller it won't
// send another interrupt on later keystrokes.
let scancode = unsafe { PORT.readb() };
H::ps2_keyboard(scancode);
unsafe {
match INTERRUPT_CONTROLLER.get_unchecked().model {
InterruptModel::Pic(ref pics) => {
pics.lock().end_interrupt(Idt::PIC_PS2_KEYBOARD as u8)
}
InterruptModel::Apic { ref local,.. } => local.end_interrupt(),
}
}
}
extern "x86-interrupt" fn test_isr<H: Handlers<Registers>>(mut registers: Registers) {
H::test_interrupt(Context {
registers: &mut registers,
code: (),
});
}
extern "x86-interrupt" fn invalid_tss_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
unsafe {
// Safety: who cares!
crate::vga::writer().force_unlock();
if let Some(com1) = crate::serial::com1() {
com1.force_unlock();
}
}
let selector = SelectorErrorCode(code as u16);
tracing::error!(?selector, "invalid task-state segment!");
let msg = selector.named("task-state segment (TSS)");
let code = CodeFault {
error_code: Some(&msg),
kind: "Invalid TSS (0xA)",
};
H::code_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn segment_not_present_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
unsafe {
// Safety: who cares!
crate::vga::writer().force_unlock();
if let Some(com1) = crate::serial::com1() {
com1.force_unlock();
}
}
let selector = SelectorErrorCode(code as u16);
tracing::error!(?selector, "a segment was not present!");
let msg = selector.named("stack segment");
let code = CodeFault {
error_code: Some(&msg),
kind: "Segment Not Present (0xB)",
};
H::code_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn stack_segment_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
unsafe {
// Safety: who cares!
crate::vga::writer().force_unlock();
if let Some(com1) = crate::serial::com1() {
com1.force_unlock();
}
}
let selector = SelectorErrorCode(code as u16);
tracing::error!(?selector, "a stack-segment fault is happening");
let msg = selector.named("stack segment");
let code = CodeFault {
error_code: Some(&msg),
kind: "Stack-Segment Fault (0xC)",
};
H::code_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn gpf_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
unsafe {
// Safety: who cares!
crate::vga::writer().force_unlock();
if let Some(com1) = crate::serial::com1() {
com1.force_unlock();
}
}
let segment = if code > 0 {
Some(SelectorErrorCode(code as u16))
} else {
None
};
tracing::error!(?segment, "lmao, a general protection fault is happening");
let error_code = segment.map(|seg| seg.named("selector"));
let code = CodeFault {
error_code: error_code.as_ref().map(|code| code as &dyn fmt::Display),
kind: "General Protection Fault (0xD)",
};
H::code_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn spurious_isr() {
tracing::trace!("spurious");
}
// === exceptions ===
// these exceptions are mapped to the HAL `Handlers` trait's "code
// fault" handler, and indicate that the code that was executing did a
// Bad Thing
gen_code_faults! {
self, H,
Self::DIVIDE_BY_ZERO => fn div_0_isr("Divide-By-Zero (0x0)"),
Self::OVERFLOW => fn overflow_isr("Overflow (0x4)"),
Self::BOUND_RANGE_EXCEEDED => fn br_isr("Bound Range Exceeded (0x5)"),
Self::INVALID_OPCODE => fn ud_isr("Invalid Opcode (0x6)"),
Self::DEVICE_NOT_AVAILABLE => fn no_fpu_isr("Device (FPU) Not Available (0x7)"),
Self::ALIGNMENT_CHECK => fn alignment_check_isr("Alignment Check (0x11)", code),
Self::SIMD_FLOATING_POINT => fn simd_fp_exn_isr("SIMD Floating-Point Exception (0x13)"),
Self::X87_FPU_EXCEPTION => fn x87_exn_isr("x87 Floating-Point Exception (0x10)"),
}
// other exceptions, not mapped to the "code fault" handler
self.set_isr(Self::PAGE_FAULT, page_fault_isr::<H> as *const ());
self.set_isr(Self::INVALID_TSS, invalid_tss_isr::<H> as *const ());
self.set_isr(
Self::SEGMENT_NOT_PRESENT,
segment_not_present_isr::<H> as *const (),
);
self.set_isr(
Self::STACK_SEGMENT_FAULT,
stack_segment_isr::<H> as *const (),
);
self.set_isr(Self::GENERAL_PROTECTION_FAULT, gpf_isr::<H> as *const ());
self.set_isr(Self::DOUBLE_FAULT, double_fault_isr::<H> as *const ());
// === hardware interrupts ===
// ISA standard hardware interrupts mapped on both the PICs and IO APIC
// interrupt models.
self.set_isr(Self::PIC_PIT_TIMER, pit_timer_isr::<H> as *const ());
self.set_isr(Self::IOAPIC_PIT_TIMER, pit_timer_isr::<H> as *const ());
self.set_isr(Self::PIC_PS2_KEYBOARD, keyboard_isr::<H> as *const ());
self.set_isr(Self::IOAPIC_PS2_KEYBOARD, keyboard_isr::<H> as *const ());
// local APIC specific hardware itnerrupts
self.set_isr(Self::LOCAL_APIC_SPURIOUS, spurious_isr as *const ());
self.set_isr(Self::LOCAL_APIC_TIMER, apic_timer_isr::<H> as *const ());
// vector 69 (nice) is reserved by the HAL for testing the IDT.
self.set_isr(69, test_isr::<H> as *const ());
Ok(())
}
}
impl fmt::Debug for Registers {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let Self {
instruction_ptr,
code_segment,
stack_ptr,
stack_segment,
_pad: _,
cpu_flags,
_pad2: _,
} = self;
f.debug_struct("Registers")
.field("instruction_ptr", instruction_ptr)
.field("code_segment", code_segment)
.field("cpu_flags", &format_args!("{cpu_flags:#b}"))
.field("stack_ptr", stack_ptr)
.field("stack_segment", stack_segment)
.finish()
}
}
impl fmt::Display for Registers {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, " rip: {:?}", self.instruction_ptr)?;
writeln!(f, " cs: {:?}", self.code_segment)?;
writeln!(f, " flags: {:#b}", self.cpu_flags)?;
writeln!(f, " rsp: {:?}", self.stack_ptr)?;
writeln!(f, " ss: {:?}", self.stack_segment)?;
Ok(())
}
}
pub fn fire_test_interrupt() {
unsafe { asm!("int {0}", const 69) }
}
// === impl SelectorErrorCode ===
impl SelectorErrorCode {
#[inline]
fn named(self, segment_kind: &'static str) -> NamedSelectorErrorCode {
NamedSelectorErrorCode {
segment_kind,
code: self,
}
}
fn display(&self) -> impl fmt::Display {
struct PrettyErrorCode(SelectorErrorCode);
impl fmt::Display for PrettyErrorCode {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let table = self.0.get(SelectorErrorCode::TABLE);
let index = self.0.get(SelectorErrorCode::INDEX);
write!(f, "{table} index {index}")?;
if self.0.get(SelectorErrorCode::EXTERNAL) {
f.write_str(" (from an external source)")?;
}
write!(f, " (error code {:#b})", self.0.bits())?;
Ok(())
}
}
PrettyErrorCode(*self)
}
}
struct NamedSelectorErrorCode {
segment_kind: &'static str,
code: SelectorErrorCode,
}
impl fmt::Display for NamedSelectorErrorCode {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{} at {}", self.segment_kind, self.code.display())
}
}
#[cfg(test)]
mod tests {
use super::*;
use core::mem::size_of;
#[test]
fn | registers_is_correct_size | identifier_name |
|
interrupt.rs | pub const RESERVED_WRITE: bool;
/// When set, the page fault was caused by an instruction fetch. This
/// only applies when the No-Execute bit is supported and enabled.
pub const INSTRUCTION_FETCH: bool;
/// When set, the page fault was caused by a protection-key violation.
/// The PKRU register (for user-mode accesses) or PKRS MSR (for
/// supervisor-mode accesses) specifies the protection key rights.
pub const PROTECTION_KEY: bool;
/// When set, the page fault was caused by a shadow stack access.
pub const SHADOW_STACK: bool;
const _RESERVED0 = 8;
/// When set, the fault was due to an SGX violation. The fault is
/// unrelated to ordinary paging.
pub const SGX: bool;
}
}
bits::bitfield! {
/// Error code set by the "Invalid TSS", "Segment Not Present", "Stack-Segment
/// Fault", and "General Protection Fault" faults.
///
/// This includes a segment selector index, and includes 2 bits describing
/// which table the segment selector references.
pub struct SelectorErrorCode<u16> {
const EXTERNAL: bool;
const TABLE: cpu::DescriptorTable;
const INDEX = 13;
}
}
#[repr(C)]
pub struct Registers {
pub instruction_ptr: VAddr, // TODO(eliza): add VAddr
pub code_segment: segment::Selector,
_pad: [u16; 3],
pub cpu_flags: u64, // TODO(eliza): rflags type?
pub stack_ptr: VAddr, // TODO(eliza): add VAddr
pub stack_segment: segment::Selector,
_pad2: [u16; 3],
}
static IDT: spin::Mutex<idt::Idt> = spin::Mutex::new(idt::Idt::new());
static INTERRUPT_CONTROLLER: InitOnce<Controller> = InitOnce::uninitialized();
impl Controller {
// const DEFAULT_IOAPIC_BASE_PADDR: u64 = 0xFEC00000;
pub fn idt() -> spin::MutexGuard<'static, idt::Idt> {
IDT.lock()
}
#[tracing::instrument(level = "info", name = "interrupt::Controller::init")]
pub fn init<H: Handlers<Registers>>() {
tracing::info!("intializing IDT...");
let mut idt = IDT.lock();
idt.register_handlers::<H>().unwrap();
unsafe {
idt.load_raw();
}
}
pub fn enable_hardware_interrupts(
acpi: Option<&acpi::InterruptModel>,
frame_alloc: &impl hal_core::mem::page::Alloc<mm::size::Size4Kb>,
) -> &'static Self {
let mut pics = pic::CascadedPic::new();
// regardless of whether APIC or PIC interrupt handling will be used,
// the PIC interrupt vectors must be remapped so that they do not
// conflict with CPU exceptions.
unsafe {
tracing::debug!(
big = Idt::PIC_BIG_START,
little = Idt::PIC_LITTLE_START,
"remapping PIC interrupt vectors"
);
pics.set_irq_address(Idt::PIC_BIG_START as u8, Idt::PIC_LITTLE_START as u8);
}
let model = match acpi {
Some(acpi::InterruptModel::Apic(apic_info)) => {
tracing::info!("detected APIC interrupt model");
let mut pagectrl = mm::PageCtrl::current();
// disable the 8259 PICs so that we can use APIC interrupts instead
unsafe {
pics.disable();
}
tracing::info!("disabled 8259 PICs");
// configure the I/O APIC
let mut io = {
// TODO(eliza): consider actually using other I/O APICs? do
// we need them for anything??
tracing::trace!(?apic_info.io_apics, "found {} IO APICs", apic_info.io_apics.len());
let io_apic = &apic_info.io_apics[0];
let addr = PAddr::from_u64(io_apic.address as u64);
tracing::debug!(ioapic.paddr =?addr, "IOAPIC");
IoApic::new(addr, &mut pagectrl, frame_alloc)
};
// map the standard ISA hardware interrupts to I/O APIC
// redirection entries.
io.map_isa_irqs(Idt::IOAPIC_START as u8);
// unmask the PIT timer vector --- we'll need this for calibrating
// the local APIC timer...
io.set_masked(IoApic::PIT_TIMER_IRQ, false);
// unmask the PS/2 keyboard interrupt as well.
io.set_masked(IoApic::PS2_KEYBOARD_IRQ, false);
// enable the local APIC
let local = LocalApic::new(&mut pagectrl, frame_alloc);
local.enable(Idt::LOCAL_APIC_SPURIOUS as u8);
InterruptModel::Apic {
local,
io: spin::Mutex::new(io),
}
}
model => {
if model.is_none() {
tracing::warn!("platform does not support ACPI; falling back to 8259 PIC");
} else {
tracing::warn!(
"ACPI does not indicate APIC interrupt model; falling back to 8259 PIC"
)
}
tracing::info!("configuring 8259 PIC interrupts...");
unsafe {
// functionally a no-op, since interrupts from PC/AT PIC are enabled at boot, just being
// clear for you, the reader, that at this point they are definitely intentionally enabled.
pics.enable();
}
InterruptModel::Pic(spin::Mutex::new(pics))
}
};
tracing::trace!(interrupt_model =?model);
let controller = INTERRUPT_CONTROLLER.init(Self { model });
// `sti` may not be called until the interrupt controller static is
// fully initialized, as an interrupt that occurs before it is
// initialized may attempt to access the static to finish the interrupt!
unsafe {
crate::cpu::intrinsics::sti();
}
controller
}
/// Starts a periodic timer which fires the `timer_tick` interrupt of the
/// provided [`Handlers`] every time `interval` elapses.
pub fn start_periodic_timer(
&self,
interval: Duration,
) -> Result<(), crate::time::InvalidDuration> {
match self.model {
InterruptModel::Pic(_) => crate::time::PIT.lock().start_periodic_timer(interval),
InterruptModel::Apic { ref local,.. } => {
local.start_periodic_timer(interval, Idt::LOCAL_APIC_TIMER as u8)
}
}
}
}
impl<'a, T> hal_core::interrupt::Context for Context<'a, T> {
type Registers = Registers;
fn registers(&self) -> &Registers {
self.registers
}
/// # Safety
///
/// Mutating the value of saved interrupt registers can cause
/// undefined behavior.
unsafe fn registers_mut(&mut self) -> &mut Registers {
self.registers
}
}
impl<'a> ctx::PageFault for Context<'a, PageFaultCode> {
fn fault_vaddr(&self) -> crate::VAddr |
fn debug_error_code(&self) -> &dyn fmt::Debug {
&self.code
}
fn display_error_code(&self) -> &dyn fmt::Display {
&self.code
}
}
impl<'a> ctx::CodeFault for Context<'a, CodeFault<'a>> {
fn is_user_mode(&self) -> bool {
false // TODO(eliza)
}
fn instruction_ptr(&self) -> crate::VAddr {
self.registers.instruction_ptr
}
fn fault_kind(&self) -> &'static str {
self.code.kind
}
fn details(&self) -> Option<&dyn fmt::Display> {
self.code.error_code
}
}
impl<'a> Context<'a, ErrorCode> {
pub fn error_code(&self) -> ErrorCode {
self.code
}
}
impl<'a> Context<'a, PageFaultCode> {
pub fn page_fault_code(&self) -> PageFaultCode {
self.code
}
}
impl hal_core::interrupt::Control for Idt {
// type Vector = u8;
type Registers = Registers;
#[inline]
unsafe fn disable(&mut self) {
crate::cpu::intrinsics::cli();
}
#[inline]
unsafe fn enable(&mut self) {
crate::cpu::intrinsics::sti();
tracing::trace!("interrupts enabled");
}
fn is_enabled(&self) -> bool {
unimplemented!("eliza do this one!!!")
}
fn register_handlers<H>(&mut self) -> Result<(), hal_core::interrupt::RegistrationError>
where
H: Handlers<Registers>,
{
macro_rules! gen_code_faults {
($self:ident, $h:ty, $($vector:path => fn $name:ident($($rest:tt)+),)+) => {
$(
gen_code_faults! {@ $name($($rest)+); }
$self.set_isr($vector, $name::<$h> as *const ());
)+
};
(@ $name:ident($kind:literal);) => {
extern "x86-interrupt" fn $name<H: Handlers<Registers>>(mut registers: Registers) {
let code = CodeFault {
error_code: None,
kind: $kind,
};
H::code_fault(Context { registers: &mut registers, code });
}
};
(@ $name:ident($kind:literal, code);) => {
extern "x86-interrupt" fn $name<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
let code = CodeFault {
error_code: Some(&code),
kind: $kind,
};
H::code_fault(Context { registers: &mut registers, code });
}
};
}
let span = tracing::debug_span!("Idt::register_handlers");
let _enter = span.enter();
extern "x86-interrupt" fn page_fault_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: PageFaultCode,
) {
H::page_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn double_fault_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
H::double_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn pit_timer_isr<H: Handlers<Registers>>(_regs: Registers) {
use core::sync::atomic::Ordering;
// if we weren't trying to do a PIT sleep, handle the timer tick
// instead.
let was_sleeping = crate::time::pit::SLEEPING
.compare_exchange(true, false, Ordering::AcqRel, Ordering::Acquire)
.is_ok();
if!was_sleeping {
H::timer_tick();
} else {
tracing::trace!("PIT sleep completed");
}
unsafe {
match INTERRUPT_CONTROLLER.get_unchecked().model {
InterruptModel::Pic(ref pics) => {
pics.lock().end_interrupt(Idt::PIC_PIT_TIMER as u8)
}
InterruptModel::Apic { ref local,.. } => local.end_interrupt(),
}
}
}
extern "x86-interrupt" fn apic_timer_isr<H: Handlers<Registers>>(_regs: Registers) {
H::timer_tick();
unsafe {
match INTERRUPT_CONTROLLER.get_unchecked().model {
InterruptModel::Pic(_) => unreachable!(),
InterruptModel::Apic { ref local,.. } => local.end_interrupt(),
}
}
}
extern "x86-interrupt" fn keyboard_isr<H: Handlers<Registers>>(_regs: Registers) {
// 0x60 is a magic PC/AT number.
static PORT: cpu::Port = cpu::Port::at(0x60);
// load-bearing read - if we don't read from the keyboard controller it won't
// send another interrupt on later keystrokes.
let scancode = unsafe { PORT.readb() };
H::ps2_keyboard(scancode);
unsafe {
match INTERRUPT_CONTROLLER.get_unchecked().model {
InterruptModel::Pic(ref pics) => {
pics.lock().end_interrupt(Idt::PIC_PS2_KEYBOARD as u8)
}
InterruptModel::Apic { ref local,.. } => local.end_interrupt(),
}
}
}
extern "x86-interrupt" fn test_isr<H: Handlers<Registers>>(mut registers: Registers) {
H::test_interrupt(Context {
registers: &mut registers,
code: (),
});
}
extern "x86-interrupt" fn invalid_tss_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
unsafe {
// Safety: who cares!
crate::vga::writer().force_unlock();
if let Some(com1) = crate::serial::com1() {
com1.force_unlock();
}
}
let selector = SelectorErrorCode(code as u16);
tracing::error!(?selector, "invalid task-state segment!");
let msg = selector.named("task-state segment (TSS)");
let code = CodeFault {
error_code: Some(&msg),
kind: "Invalid TSS (0xA)",
};
H::code_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn segment_not_present_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
unsafe {
// Safety: who cares!
crate::vga::writer().force_unlock();
if let Some(com1) = crate::serial::com1() {
com1.force_unlock();
}
}
let selector = SelectorErrorCode(code as u16);
tracing::error!(?selector, "a segment was not present!");
let msg = selector.named("stack segment");
let code = CodeFault {
error_code: Some(&msg),
kind: "Segment Not Present (0xB)",
};
H::code_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn stack_segment_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
unsafe {
// Safety: who cares!
crate::vga::writer().force_unlock();
if let Some(com1) = crate::serial::com1() {
com1.force_unlock();
}
}
let selector = SelectorErrorCode(code as u16);
tracing::error!(?selector, "a stack-segment fault is happening");
let msg = selector.named("stack segment");
let code = CodeFault {
error_code: Some(&msg),
kind: "Stack-Segment Fault (0xC)",
};
H::code_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn gpf_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
unsafe {
// Safety: who cares!
crate::vga::writer().force_unlock();
if let Some(com1) = crate::serial::com1() {
com1.force_unlock();
}
}
let segment = if code > 0 {
Some(SelectorErrorCode(code as u16))
} else {
None
};
tracing::error!(?segment, "lmao, a general protection fault is happening");
let error_code = segment.map(|seg| seg.named("selector"));
let code = CodeFault {
error_code: error_code.as_ref().map(|code| code as &dyn fmt::Display),
kind: "General Protection Fault (0xD)",
};
H::code_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn spurious_isr() {
tracing::trace!("spurious");
}
// === exceptions ===
// these exceptions are mapped to the HAL `Handlers` trait's "code
// fault" handler, and indicate that the code that was executing did a
// Bad Thing
gen_code_faults! {
self, H,
Self::DIVIDE_BY_ZERO => fn div_0_isr("Divide-By-Zero (0x0)"),
Self::OVERFLOW => fn overflow_isr("Overflow (0x4)"),
Self::BOUND_RANGE_EXCEEDED => fn br_isr | {
crate::control_regs::Cr2::read()
} | identifier_body |
interrupt.rs | pub const RESERVED_WRITE: bool;
/// When set, the page fault was caused by an instruction fetch. This
/// only applies when the No-Execute bit is supported and enabled.
pub const INSTRUCTION_FETCH: bool;
/// When set, the page fault was caused by a protection-key violation.
/// The PKRU register (for user-mode accesses) or PKRS MSR (for
/// supervisor-mode accesses) specifies the protection key rights.
pub const PROTECTION_KEY: bool;
/// When set, the page fault was caused by a shadow stack access.
pub const SHADOW_STACK: bool;
const _RESERVED0 = 8;
/// When set, the fault was due to an SGX violation. The fault is
/// unrelated to ordinary paging.
pub const SGX: bool;
}
}
bits::bitfield! {
/// Error code set by the "Invalid TSS", "Segment Not Present", "Stack-Segment
/// Fault", and "General Protection Fault" faults.
///
/// This includes a segment selector index, and includes 2 bits describing
/// which table the segment selector references.
pub struct SelectorErrorCode<u16> {
const EXTERNAL: bool;
const TABLE: cpu::DescriptorTable;
const INDEX = 13;
}
}
#[repr(C)]
pub struct Registers {
pub instruction_ptr: VAddr, // TODO(eliza): add VAddr
pub code_segment: segment::Selector,
_pad: [u16; 3],
pub cpu_flags: u64, // TODO(eliza): rflags type?
pub stack_ptr: VAddr, // TODO(eliza): add VAddr
pub stack_segment: segment::Selector,
_pad2: [u16; 3],
}
static IDT: spin::Mutex<idt::Idt> = spin::Mutex::new(idt::Idt::new());
static INTERRUPT_CONTROLLER: InitOnce<Controller> = InitOnce::uninitialized();
impl Controller {
// const DEFAULT_IOAPIC_BASE_PADDR: u64 = 0xFEC00000;
pub fn idt() -> spin::MutexGuard<'static, idt::Idt> {
IDT.lock()
}
#[tracing::instrument(level = "info", name = "interrupt::Controller::init")]
pub fn init<H: Handlers<Registers>>() {
tracing::info!("intializing IDT...");
let mut idt = IDT.lock();
idt.register_handlers::<H>().unwrap();
unsafe {
idt.load_raw();
}
}
pub fn enable_hardware_interrupts(
acpi: Option<&acpi::InterruptModel>,
frame_alloc: &impl hal_core::mem::page::Alloc<mm::size::Size4Kb>,
) -> &'static Self {
let mut pics = pic::CascadedPic::new();
// regardless of whether APIC or PIC interrupt handling will be used,
// the PIC interrupt vectors must be remapped so that they do not
// conflict with CPU exceptions.
unsafe {
tracing::debug!(
big = Idt::PIC_BIG_START,
little = Idt::PIC_LITTLE_START,
"remapping PIC interrupt vectors"
);
pics.set_irq_address(Idt::PIC_BIG_START as u8, Idt::PIC_LITTLE_START as u8);
}
let model = match acpi {
Some(acpi::InterruptModel::Apic(apic_info)) => {
tracing::info!("detected APIC interrupt model");
let mut pagectrl = mm::PageCtrl::current();
// disable the 8259 PICs so that we can use APIC interrupts instead
unsafe {
pics.disable();
}
tracing::info!("disabled 8259 PICs");
// configure the I/O APIC
let mut io = {
// TODO(eliza): consider actually using other I/O APICs? do
// we need them for anything??
tracing::trace!(?apic_info.io_apics, "found {} IO APICs", apic_info.io_apics.len());
let io_apic = &apic_info.io_apics[0];
let addr = PAddr::from_u64(io_apic.address as u64);
tracing::debug!(ioapic.paddr =?addr, "IOAPIC");
IoApic::new(addr, &mut pagectrl, frame_alloc)
};
// map the standard ISA hardware interrupts to I/O APIC
// redirection entries.
io.map_isa_irqs(Idt::IOAPIC_START as u8);
// unmask the PIT timer vector --- we'll need this for calibrating
// the local APIC timer...
io.set_masked(IoApic::PIT_TIMER_IRQ, false);
// unmask the PS/2 keyboard interrupt as well.
io.set_masked(IoApic::PS2_KEYBOARD_IRQ, false);
// enable the local APIC
let local = LocalApic::new(&mut pagectrl, frame_alloc);
local.enable(Idt::LOCAL_APIC_SPURIOUS as u8);
InterruptModel::Apic {
local,
io: spin::Mutex::new(io),
}
}
model => {
if model.is_none() {
tracing::warn!("platform does not support ACPI; falling back to 8259 PIC");
} else {
tracing::warn!(
"ACPI does not indicate APIC interrupt model; falling back to 8259 PIC"
)
}
tracing::info!("configuring 8259 PIC interrupts...");
unsafe {
// functionally a no-op, since interrupts from PC/AT PIC are enabled at boot, just being
// clear for you, the reader, that at this point they are definitely intentionally enabled.
pics.enable();
}
InterruptModel::Pic(spin::Mutex::new(pics))
}
};
tracing::trace!(interrupt_model =?model);
let controller = INTERRUPT_CONTROLLER.init(Self { model });
// `sti` may not be called until the interrupt controller static is
// fully initialized, as an interrupt that occurs before it is
// initialized may attempt to access the static to finish the interrupt!
unsafe {
crate::cpu::intrinsics::sti();
}
controller
}
/// Starts a periodic timer which fires the `timer_tick` interrupt of the
/// provided [`Handlers`] every time `interval` elapses.
pub fn start_periodic_timer(
&self,
interval: Duration,
) -> Result<(), crate::time::InvalidDuration> {
match self.model {
InterruptModel::Pic(_) => crate::time::PIT.lock().start_periodic_timer(interval),
InterruptModel::Apic { ref local,.. } => {
local.start_periodic_timer(interval, Idt::LOCAL_APIC_TIMER as u8)
}
}
}
}
impl<'a, T> hal_core::interrupt::Context for Context<'a, T> {
type Registers = Registers;
fn registers(&self) -> &Registers {
self.registers
}
/// # Safety
///
/// Mutating the value of saved interrupt registers can cause
/// undefined behavior.
unsafe fn registers_mut(&mut self) -> &mut Registers {
self.registers
}
}
impl<'a> ctx::PageFault for Context<'a, PageFaultCode> {
fn fault_vaddr(&self) -> crate::VAddr {
crate::control_regs::Cr2::read()
}
fn debug_error_code(&self) -> &dyn fmt::Debug {
&self.code
}
fn display_error_code(&self) -> &dyn fmt::Display {
&self.code
}
}
impl<'a> ctx::CodeFault for Context<'a, CodeFault<'a>> {
fn is_user_mode(&self) -> bool {
false // TODO(eliza)
}
fn instruction_ptr(&self) -> crate::VAddr {
self.registers.instruction_ptr
}
fn fault_kind(&self) -> &'static str {
self.code.kind
}
fn details(&self) -> Option<&dyn fmt::Display> {
self.code.error_code
}
}
impl<'a> Context<'a, ErrorCode> {
pub fn error_code(&self) -> ErrorCode {
self.code
}
}
impl<'a> Context<'a, PageFaultCode> {
pub fn page_fault_code(&self) -> PageFaultCode {
self.code
}
}
impl hal_core::interrupt::Control for Idt {
// type Vector = u8;
type Registers = Registers;
#[inline]
unsafe fn disable(&mut self) {
crate::cpu::intrinsics::cli();
}
#[inline]
unsafe fn enable(&mut self) {
crate::cpu::intrinsics::sti();
tracing::trace!("interrupts enabled");
}
fn is_enabled(&self) -> bool {
unimplemented!("eliza do this one!!!")
}
fn register_handlers<H>(&mut self) -> Result<(), hal_core::interrupt::RegistrationError>
where
H: Handlers<Registers>,
{
macro_rules! gen_code_faults {
($self:ident, $h:ty, $($vector:path => fn $name:ident($($rest:tt)+),)+) => {
$(
gen_code_faults! {@ $name($($rest)+); }
$self.set_isr($vector, $name::<$h> as *const ());
)+
};
(@ $name:ident($kind:literal);) => {
extern "x86-interrupt" fn $name<H: Handlers<Registers>>(mut registers: Registers) {
let code = CodeFault {
error_code: None,
kind: $kind,
};
H::code_fault(Context { registers: &mut registers, code });
}
};
(@ $name:ident($kind:literal, code);) => {
extern "x86-interrupt" fn $name<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
let code = CodeFault {
error_code: Some(&code),
kind: $kind,
};
H::code_fault(Context { registers: &mut registers, code });
}
};
}
let span = tracing::debug_span!("Idt::register_handlers");
let _enter = span.enter();
extern "x86-interrupt" fn page_fault_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: PageFaultCode,
) {
H::page_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn double_fault_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
H::double_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn pit_timer_isr<H: Handlers<Registers>>(_regs: Registers) {
use core::sync::atomic::Ordering;
// if we weren't trying to do a PIT sleep, handle the timer tick
// instead.
let was_sleeping = crate::time::pit::SLEEPING
.compare_exchange(true, false, Ordering::AcqRel, Ordering::Acquire)
.is_ok();
if!was_sleeping {
H::timer_tick();
} else {
tracing::trace!("PIT sleep completed");
}
unsafe {
match INTERRUPT_CONTROLLER.get_unchecked().model {
InterruptModel::Pic(ref pics) => {
pics.lock().end_interrupt(Idt::PIC_PIT_TIMER as u8)
}
InterruptModel::Apic { ref local,.. } => local.end_interrupt(),
}
}
}
extern "x86-interrupt" fn apic_timer_isr<H: Handlers<Registers>>(_regs: Registers) {
H::timer_tick();
unsafe {
match INTERRUPT_CONTROLLER.get_unchecked().model {
InterruptModel::Pic(_) => unreachable!(),
InterruptModel::Apic { ref local,.. } => local.end_interrupt(),
}
}
}
extern "x86-interrupt" fn keyboard_isr<H: Handlers<Registers>>(_regs: Registers) {
// 0x60 is a magic PC/AT number.
static PORT: cpu::Port = cpu::Port::at(0x60);
// load-bearing read - if we don't read from the keyboard controller it won't
// send another interrupt on later keystrokes.
let scancode = unsafe { PORT.readb() };
H::ps2_keyboard(scancode);
unsafe {
match INTERRUPT_CONTROLLER.get_unchecked().model {
InterruptModel::Pic(ref pics) => {
pics.lock().end_interrupt(Idt::PIC_PS2_KEYBOARD as u8)
}
InterruptModel::Apic { ref local,.. } => local.end_interrupt(),
}
}
}
extern "x86-interrupt" fn test_isr<H: Handlers<Registers>>(mut registers: Registers) {
H::test_interrupt(Context {
registers: &mut registers,
code: (),
});
}
extern "x86-interrupt" fn invalid_tss_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
unsafe {
// Safety: who cares!
crate::vga::writer().force_unlock();
if let Some(com1) = crate::serial::com1() {
com1.force_unlock();
}
}
let selector = SelectorErrorCode(code as u16);
tracing::error!(?selector, "invalid task-state segment!");
let msg = selector.named("task-state segment (TSS)");
let code = CodeFault {
error_code: Some(&msg),
kind: "Invalid TSS (0xA)",
};
H::code_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn segment_not_present_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
unsafe {
// Safety: who cares!
crate::vga::writer().force_unlock();
if let Some(com1) = crate::serial::com1() {
com1.force_unlock();
}
}
let selector = SelectorErrorCode(code as u16);
tracing::error!(?selector, "a segment was not present!");
let msg = selector.named("stack segment");
let code = CodeFault {
error_code: Some(&msg),
kind: "Segment Not Present (0xB)",
};
H::code_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn stack_segment_isr<H: Handlers<Registers>>(
mut registers: Registers,
code: u64,
) {
unsafe {
// Safety: who cares!
crate::vga::writer().force_unlock();
if let Some(com1) = crate::serial::com1() {
com1.force_unlock();
}
}
let selector = SelectorErrorCode(code as u16);
tracing::error!(?selector, "a stack-segment fault is happening");
let msg = selector.named("stack segment");
let code = CodeFault {
error_code: Some(&msg),
kind: "Stack-Segment Fault (0xC)",
};
H::code_fault(Context {
registers: &mut registers, | mut registers: Registers,
code: u64,
) {
unsafe {
// Safety: who cares!
crate::vga::writer().force_unlock();
if let Some(com1) = crate::serial::com1() {
com1.force_unlock();
}
}
let segment = if code > 0 {
Some(SelectorErrorCode(code as u16))
} else {
None
};
tracing::error!(?segment, "lmao, a general protection fault is happening");
let error_code = segment.map(|seg| seg.named("selector"));
let code = CodeFault {
error_code: error_code.as_ref().map(|code| code as &dyn fmt::Display),
kind: "General Protection Fault (0xD)",
};
H::code_fault(Context {
registers: &mut registers,
code,
});
}
extern "x86-interrupt" fn spurious_isr() {
tracing::trace!("spurious");
}
// === exceptions ===
// these exceptions are mapped to the HAL `Handlers` trait's "code
// fault" handler, and indicate that the code that was executing did a
// Bad Thing
gen_code_faults! {
self, H,
Self::DIVIDE_BY_ZERO => fn div_0_isr("Divide-By-Zero (0x0)"),
Self::OVERFLOW => fn overflow_isr("Overflow (0x4)"),
Self::BOUND_RANGE_EXCEEDED => fn br_isr("Bound | code,
});
}
extern "x86-interrupt" fn gpf_isr<H: Handlers<Registers>>( | random_line_split |
node.rs | use petgraph;
use petgraph::graph::NodeIndex; |
use std::sync::mpsc;
use std::sync;
use std::fmt;
use std::collections::HashMap;
use std::ops::{Deref, DerefMut};
use checktable;
use flow::data::DataType;
use ops::{Record, Datas};
use flow::domain;
use flow::{Ingredient, NodeAddress, Edge};
use flow::payload::Packet;
use flow::migrate::materialization::Tag;
use backlog;
/// A StreamUpdate reflects the addition or deletion of a row from a reader node.
#[derive(Clone, Debug, PartialEq)]
pub enum StreamUpdate {
/// Indicates the addition of a new row
AddRow(sync::Arc<Vec<DataType>>),
/// Indicates the removal of an existing row
DeleteRow(sync::Arc<Vec<DataType>>),
}
impl From<Record> for StreamUpdate {
fn from(other: Record) -> Self {
match other {
Record::Positive(u) => StreamUpdate::AddRow(u),
Record::Negative(u) => StreamUpdate::DeleteRow(u),
Record::DeleteRequest(..) => unreachable!(),
}
}
}
impl From<Vec<DataType>> for StreamUpdate {
fn from(other: Vec<DataType>) -> Self {
StreamUpdate::AddRow(sync::Arc::new(other))
}
}
#[derive(Clone)]
pub struct Reader {
pub streamers: sync::Arc<sync::Mutex<Vec<mpsc::Sender<Vec<StreamUpdate>>>>>,
pub state: Option<backlog::ReadHandle>,
pub token_generator: Option<checktable::TokenGenerator>,
}
impl Reader {
pub fn get_reader
(&self)
-> Option<Box<Fn(&DataType) -> Result<Vec<Vec<DataType>>, ()> + Send + Sync>> {
self.state.clone().map(|arc| {
Box::new(move |q: &DataType| -> Result<Datas, ()> {
arc.find_and(q,
|rs| rs.into_iter().map(|v| (&**v).clone()).collect::<Vec<_>>())
.map(|r| r.0)
}) as Box<_>
})
}
pub fn key(&self) -> Result<usize, String> {
match self.state {
None => Err(String::from("no state on reader")),
Some(ref s) => Ok(s.key()),
}
}
pub fn len(&self) -> Result<usize, String> {
match self.state {
None => Err(String::from("no state on reader")),
Some(ref s) => Ok(s.len()),
}
}
}
impl Default for Reader {
fn default() -> Self {
Reader {
streamers: sync::Arc::default(),
state: None,
token_generator: None,
}
}
}
enum NodeHandle {
Owned(Type),
Taken(Type),
}
impl NodeHandle {
pub fn mark_taken(&mut self) {
use std::mem;
match mem::replace(self, NodeHandle::Owned(Type::Source)) {
NodeHandle::Owned(t) => {
mem::replace(self, NodeHandle::Taken(t));
}
NodeHandle::Taken(_) => {
unreachable!("tried to take already taken value");
}
}
}
}
impl Deref for NodeHandle {
type Target = Type;
fn deref(&self) -> &Self::Target {
match *self {
NodeHandle::Owned(ref t) |
NodeHandle::Taken(ref t) => t,
}
}
}
impl DerefMut for NodeHandle {
fn deref_mut(&mut self) -> &mut Self::Target {
match *self {
NodeHandle::Owned(ref mut t) => t,
NodeHandle::Taken(_) => unreachable!("cannot mutate taken node"),
}
}
}
pub enum Type {
Ingress,
Internal(Box<Ingredient>),
Egress {
txs: sync::Arc<sync::Mutex<Vec<(NodeAddress, NodeAddress, mpsc::SyncSender<Packet>)>>>,
tags: sync::Arc<sync::Mutex<HashMap<Tag, NodeAddress>>>,
},
Reader(Option<backlog::WriteHandle>, Reader),
Source,
}
impl Type {
// Returns a map from base node to the column in that base node whose value must match the value
// of this node's column to cause a conflict. Returns None for a given base node if any write to
// that base node might cause a conflict.
pub fn base_columns(&self,
column: usize,
graph: &petgraph::Graph<Node, Edge>,
index: NodeIndex)
-> Vec<(NodeIndex, Option<usize>)> {
fn base_parents(graph: &petgraph::Graph<Node, Edge>,
index: NodeIndex)
-> Vec<(NodeIndex, Option<usize>)> {
if let Type::Internal(ref i) = *graph[index] {
if i.is_base() {
return vec![(index, None)];
}
}
graph.neighbors_directed(index, petgraph::EdgeDirection::Incoming)
.flat_map(|n| base_parents(graph, n))
.collect()
}
let parents: Vec<_> = graph.neighbors_directed(index, petgraph::EdgeDirection::Incoming)
.collect();
match *self {
Type::Ingress |
Type::Reader(..) |
Type::Egress {.. } => {
assert_eq!(parents.len(), 1);
graph[parents[0]].base_columns(column, graph, parents[0])
}
Type::Internal(ref i) => {
if i.is_base() {
vec![(index, Some(column))]
} else {
i.parent_columns(column)
.into_iter()
.flat_map(|(n, c)| {
let n = if n.is_global() {
*n.as_global()
} else {
// Find the parent with node address matching the result from
// parent_columns.
*parents.iter()
.find(|p| match graph[**p].addr {
Some(a) if a == n => true,
_ => false,
})
.unwrap()
};
match c {
Some(c) => graph[n].base_columns(c, graph, n),
None => base_parents(graph, n),
}
})
.collect()
}
}
Type::Source => unreachable!(),
}
}
}
impl fmt::Debug for Type {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Type::Source => write!(f, "source node"),
Type::Ingress => write!(f, "ingress node"),
Type::Egress {.. } => write!(f, "egress node"),
Type::Reader(..) => write!(f, "reader node"),
Type::Internal(ref i) => write!(f, "internal {} node", i.description()),
}
}
}
impl Deref for Type {
type Target = Ingredient;
fn deref(&self) -> &Self::Target {
match *self {
Type::Internal(ref i) => i.deref(),
_ => unreachable!(),
}
}
}
impl DerefMut for Type {
fn deref_mut(&mut self) -> &mut Self::Target {
match *self {
Type::Internal(ref mut i) => i.deref_mut(),
_ => unreachable!(),
}
}
}
impl<I> From<I> for Type
where I: Ingredient +'static
{
fn from(i: I) -> Type {
Type::Internal(Box::new(i))
}
}
pub struct Node {
name: String,
domain: Option<domain::Index>,
addr: Option<NodeAddress>,
fields: Vec<String>,
inner: NodeHandle,
}
impl Node {
pub fn new<S1, FS, S2>(name: S1, fields: FS, inner: Type) -> Node
where S1: ToString,
S2: ToString,
FS: IntoIterator<Item = S2>
{
Node {
name: name.to_string(),
domain: None,
addr: None,
fields: fields.into_iter().map(|s| s.to_string()).collect(),
inner: NodeHandle::Owned(inner),
}
}
pub fn mirror(&self, n: Type) -> Node {
let mut n = Self::new(&*self.name, &self.fields, n);
n.domain = self.domain;
n
}
pub fn name(&self) -> &str {
&*self.name
}
pub fn fields(&self) -> &[String] {
&self.fields[..]
}
pub fn domain(&self) -> domain::Index {
match self.domain {
Some(domain) => domain,
None => {
unreachable!("asked for unset domain for {:?}", &*self.inner);
}
}
}
pub fn addr(&self) -> NodeAddress {
match self.addr {
Some(addr) => addr,
None => {
unreachable!("asked for unset addr for {:?}", &*self.inner);
}
}
}
pub fn take(&mut self) -> Node {
let inner = match *self.inner {
Type::Egress { ref tags, ref txs } => {
// egress nodes can still be modified externally if subgraphs are added
// so we just make a new one with a clone of the Mutex-protected Vec
Type::Egress {
txs: txs.clone(),
tags: tags.clone(),
}
}
Type::Reader(ref mut w, ref r) => {
// reader nodes can still be modified externally if txs are added
Type::Reader(w.take(), r.clone())
}
Type::Ingress => Type::Ingress,
Type::Internal(ref mut i) if self.domain.is_some() => Type::Internal(i.take()),
Type::Internal(_) |
Type::Source => unreachable!(),
};
self.inner.mark_taken();
let mut n = self.mirror(inner);
n.addr = self.addr;
n
}
pub fn add_to(&mut self, domain: domain::Index) {
self.domain = Some(domain);
}
pub fn set_addr(&mut self, addr: NodeAddress) {
self.addr = Some(addr);
}
pub fn on_commit(&mut self, remap: &HashMap<NodeAddress, NodeAddress>) {
assert!(self.addr.is_some());
self.inner.on_commit(self.addr.unwrap(), remap)
}
pub fn describe(&self, f: &mut fmt::Write, idx: NodeIndex) -> fmt::Result {
use regex::Regex;
let escape = |s: &str| Regex::new("([\"|{}])").unwrap().replace_all(s, "\\$1");
write!(f,
" [style=filled, fillcolor={}, label=\"",
self.domain
.map(|d| -> usize { d.into() })
.map(|d| format!("\"/set312/{}\"", (d % 12) + 1))
.unwrap_or("white".into()))?;
match *self.inner {
Type::Source => write!(f, "(source)"),
Type::Ingress => write!(f, "{{ {} | (ingress) }}", idx.index()),
Type::Egress {.. } => write!(f, "{{ {} | (egress) }}", idx.index()),
Type::Reader(_, ref r) => {
let key = match r.key() {
Err(_) => String::from("none"),
Ok(k) => format!("{}", k),
};
let size = match r.len() {
Err(_) => String::from("empty"),
Ok(s) => format!("{} distinct keys", s),
};
write!(f,
"{{ {} | (reader / key: {}) | {} }}",
idx.index(),
key,
size)
}
Type::Internal(ref i) => {
write!(f, "{{")?;
// Output node name and description. First row.
write!(f,
"{{ {} / {} | {} }}",
idx.index(),
escape(self.name()),
escape(&i.description()))?;
// Output node outputs. Second row.
write!(f, " | {}", self.fields().join(", \\n"))?;
// Maybe output node's HAVING conditions. Optional third row.
// TODO
// if let Some(conds) = n.node().unwrap().having_conditions() {
// let conds = conds.iter()
// .map(|c| format!("{}", c))
// .collect::<Vec<_>>()
// .join(" ∧ ");
// write!(f, " | σ({})", escape(&conds))?;
// }
write!(f, " }}")
}
}?;
writeln!(f, "\"]")
}
pub fn is_egress(&self) -> bool {
if let Type::Egress {.. } = *self.inner {
true
} else {
false
}
}
pub fn is_ingress(&self) -> bool {
if let Type::Ingress = *self.inner {
true
} else {
false
}
}
pub fn is_internal(&self) -> bool {
if let Type::Internal(..) = *self.inner {
true
} else {
false
}
}
/// A node is considered to be an output node if changes to its state are visible outside of
/// its domain.
pub fn is_output(&self) -> bool {
match *self.inner {
Type::Egress {.. } |
Type::Reader(..) => true,
_ => false,
}
}
}
impl Deref for Node {
type Target = Type;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl DerefMut for Node {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
} | random_line_split |
|
node.rs | use petgraph;
use petgraph::graph::NodeIndex;
use std::sync::mpsc;
use std::sync;
use std::fmt;
use std::collections::HashMap;
use std::ops::{Deref, DerefMut};
use checktable;
use flow::data::DataType;
use ops::{Record, Datas};
use flow::domain;
use flow::{Ingredient, NodeAddress, Edge};
use flow::payload::Packet;
use flow::migrate::materialization::Tag;
use backlog;
/// A StreamUpdate reflects the addition or deletion of a row from a reader node.
#[derive(Clone, Debug, PartialEq)]
pub enum StreamUpdate {
/// Indicates the addition of a new row
AddRow(sync::Arc<Vec<DataType>>),
/// Indicates the removal of an existing row
DeleteRow(sync::Arc<Vec<DataType>>),
}
impl From<Record> for StreamUpdate {
fn from(other: Record) -> Self {
match other {
Record::Positive(u) => StreamUpdate::AddRow(u),
Record::Negative(u) => StreamUpdate::DeleteRow(u),
Record::DeleteRequest(..) => unreachable!(),
}
}
}
impl From<Vec<DataType>> for StreamUpdate {
fn from(other: Vec<DataType>) -> Self {
StreamUpdate::AddRow(sync::Arc::new(other))
}
}
#[derive(Clone)]
pub struct Reader {
pub streamers: sync::Arc<sync::Mutex<Vec<mpsc::Sender<Vec<StreamUpdate>>>>>,
pub state: Option<backlog::ReadHandle>,
pub token_generator: Option<checktable::TokenGenerator>,
}
impl Reader {
pub fn get_reader
(&self)
-> Option<Box<Fn(&DataType) -> Result<Vec<Vec<DataType>>, ()> + Send + Sync>> {
self.state.clone().map(|arc| {
Box::new(move |q: &DataType| -> Result<Datas, ()> {
arc.find_and(q,
|rs| rs.into_iter().map(|v| (&**v).clone()).collect::<Vec<_>>())
.map(|r| r.0)
}) as Box<_>
})
}
pub fn key(&self) -> Result<usize, String> {
match self.state {
None => Err(String::from("no state on reader")),
Some(ref s) => Ok(s.key()),
}
}
pub fn len(&self) -> Result<usize, String> {
match self.state {
None => Err(String::from("no state on reader")),
Some(ref s) => Ok(s.len()),
}
}
}
impl Default for Reader {
fn default() -> Self {
Reader {
streamers: sync::Arc::default(),
state: None,
token_generator: None,
}
}
}
enum NodeHandle {
Owned(Type),
Taken(Type),
}
impl NodeHandle {
pub fn mark_taken(&mut self) {
use std::mem;
match mem::replace(self, NodeHandle::Owned(Type::Source)) {
NodeHandle::Owned(t) => {
mem::replace(self, NodeHandle::Taken(t));
}
NodeHandle::Taken(_) => {
unreachable!("tried to take already taken value");
}
}
}
}
impl Deref for NodeHandle {
type Target = Type;
fn deref(&self) -> &Self::Target {
match *self {
NodeHandle::Owned(ref t) |
NodeHandle::Taken(ref t) => t,
}
}
}
impl DerefMut for NodeHandle {
fn deref_mut(&mut self) -> &mut Self::Target {
match *self {
NodeHandle::Owned(ref mut t) => t,
NodeHandle::Taken(_) => unreachable!("cannot mutate taken node"),
}
}
}
pub enum Type {
Ingress,
Internal(Box<Ingredient>),
Egress {
txs: sync::Arc<sync::Mutex<Vec<(NodeAddress, NodeAddress, mpsc::SyncSender<Packet>)>>>,
tags: sync::Arc<sync::Mutex<HashMap<Tag, NodeAddress>>>,
},
Reader(Option<backlog::WriteHandle>, Reader),
Source,
}
impl Type {
// Returns a map from base node to the column in that base node whose value must match the value
// of this node's column to cause a conflict. Returns None for a given base node if any write to
// that base node might cause a conflict.
pub fn base_columns(&self,
column: usize,
graph: &petgraph::Graph<Node, Edge>,
index: NodeIndex)
-> Vec<(NodeIndex, Option<usize>)> {
fn base_parents(graph: &petgraph::Graph<Node, Edge>,
index: NodeIndex)
-> Vec<(NodeIndex, Option<usize>)> {
if let Type::Internal(ref i) = *graph[index] {
if i.is_base() {
return vec![(index, None)];
}
}
graph.neighbors_directed(index, petgraph::EdgeDirection::Incoming)
.flat_map(|n| base_parents(graph, n))
.collect()
}
let parents: Vec<_> = graph.neighbors_directed(index, petgraph::EdgeDirection::Incoming)
.collect();
match *self {
Type::Ingress |
Type::Reader(..) |
Type::Egress {.. } => {
assert_eq!(parents.len(), 1);
graph[parents[0]].base_columns(column, graph, parents[0])
}
Type::Internal(ref i) => {
if i.is_base() {
vec![(index, Some(column))]
} else {
i.parent_columns(column)
.into_iter()
.flat_map(|(n, c)| {
let n = if n.is_global() {
*n.as_global()
} else {
// Find the parent with node address matching the result from
// parent_columns.
*parents.iter()
.find(|p| match graph[**p].addr {
Some(a) if a == n => true,
_ => false,
})
.unwrap()
};
match c {
Some(c) => graph[n].base_columns(c, graph, n),
None => base_parents(graph, n),
}
})
.collect()
}
}
Type::Source => unreachable!(),
}
}
}
impl fmt::Debug for Type {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Type::Source => write!(f, "source node"),
Type::Ingress => write!(f, "ingress node"),
Type::Egress {.. } => write!(f, "egress node"),
Type::Reader(..) => write!(f, "reader node"),
Type::Internal(ref i) => write!(f, "internal {} node", i.description()),
}
}
}
impl Deref for Type {
type Target = Ingredient;
fn deref(&self) -> &Self::Target {
match *self {
Type::Internal(ref i) => i.deref(),
_ => unreachable!(),
}
}
}
impl DerefMut for Type {
fn deref_mut(&mut self) -> &mut Self::Target {
match *self {
Type::Internal(ref mut i) => i.deref_mut(),
_ => unreachable!(),
}
}
}
impl<I> From<I> for Type
where I: Ingredient +'static
{
fn from(i: I) -> Type {
Type::Internal(Box::new(i))
}
}
pub struct Node {
name: String,
domain: Option<domain::Index>,
addr: Option<NodeAddress>,
fields: Vec<String>,
inner: NodeHandle,
}
impl Node {
pub fn new<S1, FS, S2>(name: S1, fields: FS, inner: Type) -> Node
where S1: ToString,
S2: ToString,
FS: IntoIterator<Item = S2>
{
Node {
name: name.to_string(),
domain: None,
addr: None,
fields: fields.into_iter().map(|s| s.to_string()).collect(),
inner: NodeHandle::Owned(inner),
}
}
pub fn mirror(&self, n: Type) -> Node {
let mut n = Self::new(&*self.name, &self.fields, n);
n.domain = self.domain;
n
}
pub fn name(&self) -> &str {
&*self.name
}
pub fn fields(&self) -> &[String] {
&self.fields[..]
}
pub fn domain(&self) -> domain::Index {
match self.domain {
Some(domain) => domain,
None => {
unreachable!("asked for unset domain for {:?}", &*self.inner);
}
}
}
pub fn addr(&self) -> NodeAddress {
match self.addr {
Some(addr) => addr,
None => {
unreachable!("asked for unset addr for {:?}", &*self.inner);
}
}
}
pub fn | (&mut self) -> Node {
let inner = match *self.inner {
Type::Egress { ref tags, ref txs } => {
// egress nodes can still be modified externally if subgraphs are added
// so we just make a new one with a clone of the Mutex-protected Vec
Type::Egress {
txs: txs.clone(),
tags: tags.clone(),
}
}
Type::Reader(ref mut w, ref r) => {
// reader nodes can still be modified externally if txs are added
Type::Reader(w.take(), r.clone())
}
Type::Ingress => Type::Ingress,
Type::Internal(ref mut i) if self.domain.is_some() => Type::Internal(i.take()),
Type::Internal(_) |
Type::Source => unreachable!(),
};
self.inner.mark_taken();
let mut n = self.mirror(inner);
n.addr = self.addr;
n
}
pub fn add_to(&mut self, domain: domain::Index) {
self.domain = Some(domain);
}
pub fn set_addr(&mut self, addr: NodeAddress) {
self.addr = Some(addr);
}
pub fn on_commit(&mut self, remap: &HashMap<NodeAddress, NodeAddress>) {
assert!(self.addr.is_some());
self.inner.on_commit(self.addr.unwrap(), remap)
}
pub fn describe(&self, f: &mut fmt::Write, idx: NodeIndex) -> fmt::Result {
use regex::Regex;
let escape = |s: &str| Regex::new("([\"|{}])").unwrap().replace_all(s, "\\$1");
write!(f,
" [style=filled, fillcolor={}, label=\"",
self.domain
.map(|d| -> usize { d.into() })
.map(|d| format!("\"/set312/{}\"", (d % 12) + 1))
.unwrap_or("white".into()))?;
match *self.inner {
Type::Source => write!(f, "(source)"),
Type::Ingress => write!(f, "{{ {} | (ingress) }}", idx.index()),
Type::Egress {.. } => write!(f, "{{ {} | (egress) }}", idx.index()),
Type::Reader(_, ref r) => {
let key = match r.key() {
Err(_) => String::from("none"),
Ok(k) => format!("{}", k),
};
let size = match r.len() {
Err(_) => String::from("empty"),
Ok(s) => format!("{} distinct keys", s),
};
write!(f,
"{{ {} | (reader / key: {}) | {} }}",
idx.index(),
key,
size)
}
Type::Internal(ref i) => {
write!(f, "{{")?;
// Output node name and description. First row.
write!(f,
"{{ {} / {} | {} }}",
idx.index(),
escape(self.name()),
escape(&i.description()))?;
// Output node outputs. Second row.
write!(f, " | {}", self.fields().join(", \\n"))?;
// Maybe output node's HAVING conditions. Optional third row.
// TODO
// if let Some(conds) = n.node().unwrap().having_conditions() {
// let conds = conds.iter()
// .map(|c| format!("{}", c))
// .collect::<Vec<_>>()
// .join(" ∧ ");
// write!(f, " | σ({})", escape(&conds))?;
// }
write!(f, " }}")
}
}?;
writeln!(f, "\"]")
}
pub fn is_egress(&self) -> bool {
if let Type::Egress {.. } = *self.inner {
true
} else {
false
}
}
pub fn is_ingress(&self) -> bool {
if let Type::Ingress = *self.inner {
true
} else {
false
}
}
pub fn is_internal(&self) -> bool {
if let Type::Internal(..) = *self.inner {
true
} else {
false
}
}
/// A node is considered to be an output node if changes to its state are visible outside of
/// its domain.
pub fn is_output(&self) -> bool {
match *self.inner {
Type::Egress {.. } |
Type::Reader(..) => true,
_ => false,
}
}
}
impl Deref for Node {
type Target = Type;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl DerefMut for Node {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
| take | identifier_name |
node.rs | use petgraph;
use petgraph::graph::NodeIndex;
use std::sync::mpsc;
use std::sync;
use std::fmt;
use std::collections::HashMap;
use std::ops::{Deref, DerefMut};
use checktable;
use flow::data::DataType;
use ops::{Record, Datas};
use flow::domain;
use flow::{Ingredient, NodeAddress, Edge};
use flow::payload::Packet;
use flow::migrate::materialization::Tag;
use backlog;
/// A StreamUpdate reflects the addition or deletion of a row from a reader node.
#[derive(Clone, Debug, PartialEq)]
pub enum StreamUpdate {
/// Indicates the addition of a new row
AddRow(sync::Arc<Vec<DataType>>),
/// Indicates the removal of an existing row
DeleteRow(sync::Arc<Vec<DataType>>),
}
impl From<Record> for StreamUpdate {
fn from(other: Record) -> Self {
match other {
Record::Positive(u) => StreamUpdate::AddRow(u),
Record::Negative(u) => StreamUpdate::DeleteRow(u),
Record::DeleteRequest(..) => unreachable!(),
}
}
}
impl From<Vec<DataType>> for StreamUpdate {
fn from(other: Vec<DataType>) -> Self {
StreamUpdate::AddRow(sync::Arc::new(other))
}
}
#[derive(Clone)]
pub struct Reader {
pub streamers: sync::Arc<sync::Mutex<Vec<mpsc::Sender<Vec<StreamUpdate>>>>>,
pub state: Option<backlog::ReadHandle>,
pub token_generator: Option<checktable::TokenGenerator>,
}
impl Reader {
pub fn get_reader
(&self)
-> Option<Box<Fn(&DataType) -> Result<Vec<Vec<DataType>>, ()> + Send + Sync>> {
self.state.clone().map(|arc| {
Box::new(move |q: &DataType| -> Result<Datas, ()> {
arc.find_and(q,
|rs| rs.into_iter().map(|v| (&**v).clone()).collect::<Vec<_>>())
.map(|r| r.0)
}) as Box<_>
})
}
pub fn key(&self) -> Result<usize, String> {
match self.state {
None => Err(String::from("no state on reader")),
Some(ref s) => Ok(s.key()),
}
}
pub fn len(&self) -> Result<usize, String> {
match self.state {
None => Err(String::from("no state on reader")),
Some(ref s) => Ok(s.len()),
}
}
}
impl Default for Reader {
fn default() -> Self {
Reader {
streamers: sync::Arc::default(),
state: None,
token_generator: None,
}
}
}
enum NodeHandle {
Owned(Type),
Taken(Type),
}
impl NodeHandle {
pub fn mark_taken(&mut self) {
use std::mem;
match mem::replace(self, NodeHandle::Owned(Type::Source)) {
NodeHandle::Owned(t) => {
mem::replace(self, NodeHandle::Taken(t));
}
NodeHandle::Taken(_) => {
unreachable!("tried to take already taken value");
}
}
}
}
impl Deref for NodeHandle {
type Target = Type;
fn deref(&self) -> &Self::Target {
match *self {
NodeHandle::Owned(ref t) |
NodeHandle::Taken(ref t) => t,
}
}
}
impl DerefMut for NodeHandle {
fn deref_mut(&mut self) -> &mut Self::Target {
match *self {
NodeHandle::Owned(ref mut t) => t,
NodeHandle::Taken(_) => unreachable!("cannot mutate taken node"),
}
}
}
pub enum Type {
Ingress,
Internal(Box<Ingredient>),
Egress {
txs: sync::Arc<sync::Mutex<Vec<(NodeAddress, NodeAddress, mpsc::SyncSender<Packet>)>>>,
tags: sync::Arc<sync::Mutex<HashMap<Tag, NodeAddress>>>,
},
Reader(Option<backlog::WriteHandle>, Reader),
Source,
}
impl Type {
// Returns a map from base node to the column in that base node whose value must match the value
// of this node's column to cause a conflict. Returns None for a given base node if any write to
// that base node might cause a conflict.
pub fn base_columns(&self,
column: usize,
graph: &petgraph::Graph<Node, Edge>,
index: NodeIndex)
-> Vec<(NodeIndex, Option<usize>)> {
fn base_parents(graph: &petgraph::Graph<Node, Edge>,
index: NodeIndex)
-> Vec<(NodeIndex, Option<usize>)> {
if let Type::Internal(ref i) = *graph[index] {
if i.is_base() {
return vec![(index, None)];
}
}
graph.neighbors_directed(index, petgraph::EdgeDirection::Incoming)
.flat_map(|n| base_parents(graph, n))
.collect()
}
let parents: Vec<_> = graph.neighbors_directed(index, petgraph::EdgeDirection::Incoming)
.collect();
match *self {
Type::Ingress |
Type::Reader(..) |
Type::Egress {.. } => {
assert_eq!(parents.len(), 1);
graph[parents[0]].base_columns(column, graph, parents[0])
}
Type::Internal(ref i) => {
if i.is_base() {
vec![(index, Some(column))]
} else {
i.parent_columns(column)
.into_iter()
.flat_map(|(n, c)| {
let n = if n.is_global() {
*n.as_global()
} else {
// Find the parent with node address matching the result from
// parent_columns.
*parents.iter()
.find(|p| match graph[**p].addr {
Some(a) if a == n => true,
_ => false,
})
.unwrap()
};
match c {
Some(c) => graph[n].base_columns(c, graph, n),
None => base_parents(graph, n),
}
})
.collect()
}
}
Type::Source => unreachable!(),
}
}
}
impl fmt::Debug for Type {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Type::Source => write!(f, "source node"),
Type::Ingress => write!(f, "ingress node"),
Type::Egress {.. } => write!(f, "egress node"),
Type::Reader(..) => write!(f, "reader node"),
Type::Internal(ref i) => write!(f, "internal {} node", i.description()),
}
}
}
impl Deref for Type {
type Target = Ingredient;
fn deref(&self) -> &Self::Target {
match *self {
Type::Internal(ref i) => i.deref(),
_ => unreachable!(),
}
}
}
impl DerefMut for Type {
fn deref_mut(&mut self) -> &mut Self::Target {
match *self {
Type::Internal(ref mut i) => i.deref_mut(),
_ => unreachable!(),
}
}
}
impl<I> From<I> for Type
where I: Ingredient +'static
{
fn from(i: I) -> Type {
Type::Internal(Box::new(i))
}
}
pub struct Node {
name: String,
domain: Option<domain::Index>,
addr: Option<NodeAddress>,
fields: Vec<String>,
inner: NodeHandle,
}
impl Node {
pub fn new<S1, FS, S2>(name: S1, fields: FS, inner: Type) -> Node
where S1: ToString,
S2: ToString,
FS: IntoIterator<Item = S2>
{
Node {
name: name.to_string(),
domain: None,
addr: None,
fields: fields.into_iter().map(|s| s.to_string()).collect(),
inner: NodeHandle::Owned(inner),
}
}
pub fn mirror(&self, n: Type) -> Node {
let mut n = Self::new(&*self.name, &self.fields, n);
n.domain = self.domain;
n
}
pub fn name(&self) -> &str {
&*self.name
}
pub fn fields(&self) -> &[String] {
&self.fields[..]
}
pub fn domain(&self) -> domain::Index {
match self.domain {
Some(domain) => domain,
None => {
unreachable!("asked for unset domain for {:?}", &*self.inner);
}
}
}
pub fn addr(&self) -> NodeAddress {
match self.addr {
Some(addr) => addr,
None => {
unreachable!("asked for unset addr for {:?}", &*self.inner);
}
}
}
pub fn take(&mut self) -> Node {
let inner = match *self.inner {
Type::Egress { ref tags, ref txs } => {
// egress nodes can still be modified externally if subgraphs are added
// so we just make a new one with a clone of the Mutex-protected Vec
Type::Egress {
txs: txs.clone(),
tags: tags.clone(),
}
}
Type::Reader(ref mut w, ref r) => {
// reader nodes can still be modified externally if txs are added
Type::Reader(w.take(), r.clone())
}
Type::Ingress => Type::Ingress,
Type::Internal(ref mut i) if self.domain.is_some() => Type::Internal(i.take()),
Type::Internal(_) |
Type::Source => unreachable!(),
};
self.inner.mark_taken();
let mut n = self.mirror(inner);
n.addr = self.addr;
n
}
pub fn add_to(&mut self, domain: domain::Index) {
self.domain = Some(domain);
}
pub fn set_addr(&mut self, addr: NodeAddress) {
self.addr = Some(addr);
}
pub fn on_commit(&mut self, remap: &HashMap<NodeAddress, NodeAddress>) {
assert!(self.addr.is_some());
self.inner.on_commit(self.addr.unwrap(), remap)
}
pub fn describe(&self, f: &mut fmt::Write, idx: NodeIndex) -> fmt::Result {
use regex::Regex;
let escape = |s: &str| Regex::new("([\"|{}])").unwrap().replace_all(s, "\\$1");
write!(f,
" [style=filled, fillcolor={}, label=\"",
self.domain
.map(|d| -> usize { d.into() })
.map(|d| format!("\"/set312/{}\"", (d % 12) + 1))
.unwrap_or("white".into()))?;
match *self.inner {
Type::Source => write!(f, "(source)"),
Type::Ingress => write!(f, "{{ {} | (ingress) }}", idx.index()),
Type::Egress {.. } => write!(f, "{{ {} | (egress) }}", idx.index()),
Type::Reader(_, ref r) => {
let key = match r.key() {
Err(_) => String::from("none"),
Ok(k) => format!("{}", k),
};
let size = match r.len() {
Err(_) => String::from("empty"),
Ok(s) => format!("{} distinct keys", s),
};
write!(f,
"{{ {} | (reader / key: {}) | {} }}",
idx.index(),
key,
size)
}
Type::Internal(ref i) => {
write!(f, "{{")?;
// Output node name and description. First row.
write!(f,
"{{ {} / {} | {} }}",
idx.index(),
escape(self.name()),
escape(&i.description()))?;
// Output node outputs. Second row.
write!(f, " | {}", self.fields().join(", \\n"))?;
// Maybe output node's HAVING conditions. Optional third row.
// TODO
// if let Some(conds) = n.node().unwrap().having_conditions() {
// let conds = conds.iter()
// .map(|c| format!("{}", c))
// .collect::<Vec<_>>()
// .join(" ∧ ");
// write!(f, " | σ({})", escape(&conds))?;
// }
write!(f, " }}")
}
}?;
writeln!(f, "\"]")
}
pub fn is_egress(&self) -> bool {
if let Type::Egress {.. } = *self.inner {
true
} else {
false
}
}
pub fn is_ingress(&self) -> bool {
if let Type::Ingress = *self.inner {
| se {
false
}
}
pub fn is_internal(&self) -> bool {
if let Type::Internal(..) = *self.inner {
true
} else {
false
}
}
/// A node is considered to be an output node if changes to its state are visible outside of
/// its domain.
pub fn is_output(&self) -> bool {
match *self.inner {
Type::Egress {.. } |
Type::Reader(..) => true,
_ => false,
}
}
}
impl Deref for Node {
type Target = Type;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl DerefMut for Node {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
| true
} el | conditional_block |
node.rs | use petgraph;
use petgraph::graph::NodeIndex;
use std::sync::mpsc;
use std::sync;
use std::fmt;
use std::collections::HashMap;
use std::ops::{Deref, DerefMut};
use checktable;
use flow::data::DataType;
use ops::{Record, Datas};
use flow::domain;
use flow::{Ingredient, NodeAddress, Edge};
use flow::payload::Packet;
use flow::migrate::materialization::Tag;
use backlog;
/// A StreamUpdate reflects the addition or deletion of a row from a reader node.
#[derive(Clone, Debug, PartialEq)]
pub enum StreamUpdate {
/// Indicates the addition of a new row
AddRow(sync::Arc<Vec<DataType>>),
/// Indicates the removal of an existing row
DeleteRow(sync::Arc<Vec<DataType>>),
}
impl From<Record> for StreamUpdate {
fn from(other: Record) -> Self {
match other {
Record::Positive(u) => StreamUpdate::AddRow(u),
Record::Negative(u) => StreamUpdate::DeleteRow(u),
Record::DeleteRequest(..) => unreachable!(),
}
}
}
impl From<Vec<DataType>> for StreamUpdate {
fn from(other: Vec<DataType>) -> Self {
StreamUpdate::AddRow(sync::Arc::new(other))
}
}
#[derive(Clone)]
pub struct Reader {
pub streamers: sync::Arc<sync::Mutex<Vec<mpsc::Sender<Vec<StreamUpdate>>>>>,
pub state: Option<backlog::ReadHandle>,
pub token_generator: Option<checktable::TokenGenerator>,
}
impl Reader {
pub fn get_reader
(&self)
-> Option<Box<Fn(&DataType) -> Result<Vec<Vec<DataType>>, ()> + Send + Sync>> {
self.state.clone().map(|arc| {
Box::new(move |q: &DataType| -> Result<Datas, ()> {
arc.find_and(q,
|rs| rs.into_iter().map(|v| (&**v).clone()).collect::<Vec<_>>())
.map(|r| r.0)
}) as Box<_>
})
}
pub fn key(&self) -> Result<usize, String> {
match self.state {
None => Err(String::from("no state on reader")),
Some(ref s) => Ok(s.key()),
}
}
pub fn len(&self) -> Result<usize, String> {
match self.state {
None => Err(String::from("no state on reader")),
Some(ref s) => Ok(s.len()),
}
}
}
impl Default for Reader {
fn default() -> Self {
Reader {
streamers: sync::Arc::default(),
state: None,
token_generator: None,
}
}
}
enum NodeHandle {
Owned(Type),
Taken(Type),
}
impl NodeHandle {
pub fn mark_taken(&mut self) {
use std::mem;
match mem::replace(self, NodeHandle::Owned(Type::Source)) {
NodeHandle::Owned(t) => {
mem::replace(self, NodeHandle::Taken(t));
}
NodeHandle::Taken(_) => {
unreachable!("tried to take already taken value");
}
}
}
}
impl Deref for NodeHandle {
type Target = Type;
fn deref(&self) -> &Self::Target {
match *self {
NodeHandle::Owned(ref t) |
NodeHandle::Taken(ref t) => t,
}
}
}
impl DerefMut for NodeHandle {
fn deref_mut(&mut self) -> &mut Self::Target {
match *self {
NodeHandle::Owned(ref mut t) => t,
NodeHandle::Taken(_) => unreachable!("cannot mutate taken node"),
}
}
}
pub enum Type {
Ingress,
Internal(Box<Ingredient>),
Egress {
txs: sync::Arc<sync::Mutex<Vec<(NodeAddress, NodeAddress, mpsc::SyncSender<Packet>)>>>,
tags: sync::Arc<sync::Mutex<HashMap<Tag, NodeAddress>>>,
},
Reader(Option<backlog::WriteHandle>, Reader),
Source,
}
impl Type {
// Returns a map from base node to the column in that base node whose value must match the value
// of this node's column to cause a conflict. Returns None for a given base node if any write to
// that base node might cause a conflict.
pub fn base_columns(&self,
column: usize,
graph: &petgraph::Graph<Node, Edge>,
index: NodeIndex)
-> Vec<(NodeIndex, Option<usize>)> {
fn base_parents(graph: &petgraph::Graph<Node, Edge>,
index: NodeIndex)
-> Vec<(NodeIndex, Option<usize>)> {
if let Type::Internal(ref i) = *graph[index] {
if i.is_base() {
return vec![(index, None)];
}
}
graph.neighbors_directed(index, petgraph::EdgeDirection::Incoming)
.flat_map(|n| base_parents(graph, n))
.collect()
}
let parents: Vec<_> = graph.neighbors_directed(index, petgraph::EdgeDirection::Incoming)
.collect();
match *self {
Type::Ingress |
Type::Reader(..) |
Type::Egress {.. } => {
assert_eq!(parents.len(), 1);
graph[parents[0]].base_columns(column, graph, parents[0])
}
Type::Internal(ref i) => {
if i.is_base() {
vec![(index, Some(column))]
} else {
i.parent_columns(column)
.into_iter()
.flat_map(|(n, c)| {
let n = if n.is_global() {
*n.as_global()
} else {
// Find the parent with node address matching the result from
// parent_columns.
*parents.iter()
.find(|p| match graph[**p].addr {
Some(a) if a == n => true,
_ => false,
})
.unwrap()
};
match c {
Some(c) => graph[n].base_columns(c, graph, n),
None => base_parents(graph, n),
}
})
.collect()
}
}
Type::Source => unreachable!(),
}
}
}
impl fmt::Debug for Type {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Type::Source => write!(f, "source node"),
Type::Ingress => write!(f, "ingress node"),
Type::Egress {.. } => write!(f, "egress node"),
Type::Reader(..) => write!(f, "reader node"),
Type::Internal(ref i) => write!(f, "internal {} node", i.description()),
}
}
}
impl Deref for Type {
type Target = Ingredient;
fn deref(&self) -> &Self::Target {
match *self {
Type::Internal(ref i) => i.deref(),
_ => unreachable!(),
}
}
}
impl DerefMut for Type {
fn deref_mut(&mut self) -> &mut Self::Target {
match *self {
Type::Internal(ref mut i) => i.deref_mut(),
_ => unreachable!(),
}
}
}
impl<I> From<I> for Type
where I: Ingredient +'static
{
fn from(i: I) -> Type {
Type::Internal(Box::new(i))
}
}
pub struct Node {
name: String,
domain: Option<domain::Index>,
addr: Option<NodeAddress>,
fields: Vec<String>,
inner: NodeHandle,
}
impl Node {
pub fn new<S1, FS, S2>(name: S1, fields: FS, inner: Type) -> Node
where S1: ToString,
S2: ToString,
FS: IntoIterator<Item = S2>
|
pub fn mirror(&self, n: Type) -> Node {
let mut n = Self::new(&*self.name, &self.fields, n);
n.domain = self.domain;
n
}
pub fn name(&self) -> &str {
&*self.name
}
pub fn fields(&self) -> &[String] {
&self.fields[..]
}
pub fn domain(&self) -> domain::Index {
match self.domain {
Some(domain) => domain,
None => {
unreachable!("asked for unset domain for {:?}", &*self.inner);
}
}
}
pub fn addr(&self) -> NodeAddress {
match self.addr {
Some(addr) => addr,
None => {
unreachable!("asked for unset addr for {:?}", &*self.inner);
}
}
}
pub fn take(&mut self) -> Node {
let inner = match *self.inner {
Type::Egress { ref tags, ref txs } => {
// egress nodes can still be modified externally if subgraphs are added
// so we just make a new one with a clone of the Mutex-protected Vec
Type::Egress {
txs: txs.clone(),
tags: tags.clone(),
}
}
Type::Reader(ref mut w, ref r) => {
// reader nodes can still be modified externally if txs are added
Type::Reader(w.take(), r.clone())
}
Type::Ingress => Type::Ingress,
Type::Internal(ref mut i) if self.domain.is_some() => Type::Internal(i.take()),
Type::Internal(_) |
Type::Source => unreachable!(),
};
self.inner.mark_taken();
let mut n = self.mirror(inner);
n.addr = self.addr;
n
}
pub fn add_to(&mut self, domain: domain::Index) {
self.domain = Some(domain);
}
pub fn set_addr(&mut self, addr: NodeAddress) {
self.addr = Some(addr);
}
pub fn on_commit(&mut self, remap: &HashMap<NodeAddress, NodeAddress>) {
assert!(self.addr.is_some());
self.inner.on_commit(self.addr.unwrap(), remap)
}
pub fn describe(&self, f: &mut fmt::Write, idx: NodeIndex) -> fmt::Result {
use regex::Regex;
let escape = |s: &str| Regex::new("([\"|{}])").unwrap().replace_all(s, "\\$1");
write!(f,
" [style=filled, fillcolor={}, label=\"",
self.domain
.map(|d| -> usize { d.into() })
.map(|d| format!("\"/set312/{}\"", (d % 12) + 1))
.unwrap_or("white".into()))?;
match *self.inner {
Type::Source => write!(f, "(source)"),
Type::Ingress => write!(f, "{{ {} | (ingress) }}", idx.index()),
Type::Egress {.. } => write!(f, "{{ {} | (egress) }}", idx.index()),
Type::Reader(_, ref r) => {
let key = match r.key() {
Err(_) => String::from("none"),
Ok(k) => format!("{}", k),
};
let size = match r.len() {
Err(_) => String::from("empty"),
Ok(s) => format!("{} distinct keys", s),
};
write!(f,
"{{ {} | (reader / key: {}) | {} }}",
idx.index(),
key,
size)
}
Type::Internal(ref i) => {
write!(f, "{{")?;
// Output node name and description. First row.
write!(f,
"{{ {} / {} | {} }}",
idx.index(),
escape(self.name()),
escape(&i.description()))?;
// Output node outputs. Second row.
write!(f, " | {}", self.fields().join(", \\n"))?;
// Maybe output node's HAVING conditions. Optional third row.
// TODO
// if let Some(conds) = n.node().unwrap().having_conditions() {
// let conds = conds.iter()
// .map(|c| format!("{}", c))
// .collect::<Vec<_>>()
// .join(" ∧ ");
// write!(f, " | σ({})", escape(&conds))?;
// }
write!(f, " }}")
}
}?;
writeln!(f, "\"]")
}
pub fn is_egress(&self) -> bool {
if let Type::Egress {.. } = *self.inner {
true
} else {
false
}
}
pub fn is_ingress(&self) -> bool {
if let Type::Ingress = *self.inner {
true
} else {
false
}
}
pub fn is_internal(&self) -> bool {
if let Type::Internal(..) = *self.inner {
true
} else {
false
}
}
/// A node is considered to be an output node if changes to its state are visible outside of
/// its domain.
pub fn is_output(&self) -> bool {
match *self.inner {
Type::Egress {.. } |
Type::Reader(..) => true,
_ => false,
}
}
}
impl Deref for Node {
type Target = Type;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl DerefMut for Node {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
| {
Node {
name: name.to_string(),
domain: None,
addr: None,
fields: fields.into_iter().map(|s| s.to_string()).collect(),
inner: NodeHandle::Owned(inner),
}
} | identifier_body |
mod.rs | use std::collections::HashMap;
use std::future::Future;
use std::io::{Read, Write};
use std::net::SocketAddr;
use std::ops::DerefMut;
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::sync::mpsc::{channel, Receiver, Sender};
use std::task::{Context, Waker};
use std::thread::spawn;
use log;
use mio::{Events, Interest, Poll, Token};
use mio::event::{Event, Source};
use mio::net::{TcpListener, TcpStream};
use crate::error::Error;
use crate::Result;
#[cfg(test)]
mod test;
/// TcpStream にイベントが発生したときに呼び出されるコールバック用のトレイトです。
/// 返値を使用してその後のアクションを指定することができます。
pub trait TcpStreamListener: Send {
fn on_ready_to_read(&mut self, r: &mut dyn Read) -> DispatcherAction;
fn on_ready_to_write(&mut self, w: &mut dyn Write) -> DispatcherAction;
fn on_error(&mut self, error: std::io::Error) -> DispatcherAction;
}
/// TcpListener にイベントが発生したときに呼び出されるコールバック用のトレイトです。
/// 返値を使用してその後のアクションを指定することができます。
pub trait TcpListenerListener: Send {
fn on_accept(&mut self, stream: TcpStream, address: SocketAddr) -> DispatcherAction;
fn on_error(&mut self, error: std::io::Error) -> DispatcherAction;
}
/// Listener へのコールバック終了後に Listener が Dispatcher に指示する動作を表す列挙型です。
pub enum DispatcherAction {
/// 特に何も行わないで処理を続行することを示します。
Continue,
/// 指定された Interest フラグに変更することを指定します。
ChangeFlag(Interest),
/// イベントの発生元となるソケットなどの Source の破棄を指定します。
Dispose,
}
// ##############################################################################################
// イベントループスレッド内で外部の指定した処理を行うために channel 経由で送受信されるタスクとその結果を返す Future
// の定義。
type Executable<R> = dyn (FnOnce(&mut PollingLoop) -> R) + Send +'static;
struct TaskState<R> {
result: Option<R>,
waker: Option<Waker>,
}
struct Task<R> {
executable: Box<Executable<R>>,
state: Arc<Mutex<TaskState<R>>>,
}
impl<R> Task<R> {
fn new<E>(executable: Box<E>) -> Self
where
E: (FnOnce(&mut PollingLoop) -> R) + Send +'static,
{
Self { executable, state: Arc::new(Mutex::new(TaskState { result: None, waker: None })) }
}
}
pub struct TaskFuture<R> {
state: Arc<Mutex<TaskState<R>>>,
}
impl<R> Future for TaskFuture<R> {
type Output = R;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> std::task::Poll<Self::Output> {
use std::task::Poll;
let mut state = self.state.lock().unwrap();
if let Some(result) = state.result.take() {
Poll::Ready(result)
} else {
state.waker = Some(cx.waker().clone());
Poll::Pending
}
}
}
// ##############################################################################################
pub type SocketId = usize;
pub struct Dispatcher {
sender: Sender<Task<Result<SocketId>>>,
waker: mio::Waker,
}
impl Dispatcher {
/// 新しいディスパッチャーを起動します。
/// poll が作成されイベントループが開始します。
///
/// # Arguments
/// * `event_buffer_size` - 一度の poll で読み込むイベントの最大数。
///
pub fn new(event_buffer_size: usize) -> Result<Dispatcher> {
let (sender, receiver) = channel();
let poll = Poll::new()?;
let waker = mio::Waker::new(poll.registry(), Token(0))?;
let mut polling_loop = PollingLoop::new(poll, event_buffer_size);
spawn(move || polling_loop.start(receiver));
Ok(Dispatcher { sender, waker })
}
/// 指定された ID のソケットを
pub fn dispose(&self, id: SocketId) -> Box<dyn Future<Output=Result<SocketId>>> {
self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
polling.close(id);
Ok(id)
}))
}
fn run_in_event_loop<E>(&self, exec: Box<E>) -> Box<dyn Future<Output=Result<SocketId>>>
where
E: (FnOnce(&mut PollingLoop) -> Result<SocketId>) + Send +'static,
{
let task = Task::new(exec);
let future = TaskFuture { state: task.state.clone() };
self.sender.send(task).unwrap();
self.waker.wake().unwrap();
Box::new(future)
}
}
impl Drop for Dispatcher {
fn drop(&mut self) {
log::debug!("stopping dispatcher...");
let _ = self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
polling.stopped = true;
Ok(0usize)
}));
}
}
trait DispatcherRegister<S, L> {
fn register(&self, source: S, listener: L) -> Box<dyn Future<Output=Result<SocketId>>>;
}
impl DispatcherRegister<TcpListener, Box<dyn TcpListenerListener>> for Dispatcher {
fn register(
&self,
mut listener: TcpListener,
event_listener: Box<dyn TcpListenerListener>,
) -> Box<dyn Future<Output=Result<SocketId>>> {
self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
let id = polling.sockets.available_id()?;
polling.poll.registry().register(&mut listener, Token(id), Interest::READABLE)?;
polling.sockets.set(id, Socket::Listener(listener, event_listener));
Ok(id)
}))
}
}
impl DispatcherRegister<TcpStream, Box<dyn TcpStreamListener>> for Dispatcher {
fn register(
&self,
mut stream: TcpStream,
listener: Box<dyn TcpStreamListener>,
) -> Box<dyn Future<Output=Result<SocketId>>> {
self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
let id = polling.sockets.available_id()?;
polling.poll.registry().register(
&mut stream,
Token(id),
Interest::READABLE | Interest::WRITABLE,
)?;
polling.sockets.set(id, Socket::Stream(stream, listener));
Ok(id)
}))
}
}
struct PollingLoop {
poll: Poll,
event_buffer_size: usize,
sockets: SocketMap,
stopped: bool,
}
impl PollingLoop {
fn new(poll: Poll, event_buffer_size: usize) -> PollingLoop {
let sockets = SocketMap::new();
PollingLoop { poll, event_buffer_size, sockets, stopped: false }
}
/// poll() のためのイベントループを開始します。イベントループスレッドの中で任意の処理を行う場合は receiver に対応
/// する sender に実行するタスクを投入し、self.poll に登録済みの Waker.wake() でブロッキングを抜けます。
fn start<R>(&mut self, receiver: Receiver<Task<Result<R>>>) -> Result<()> {
let mut events = Events::with_capacity(self.event_buffer_size);
while!self.stopped {
self.poll.poll(&mut events, None)?;
// イベントの発生したソケットを取得
let event_sockets = events
.iter()
.map(|e| self.sockets.get(e.token().0).map(|s| (e, s)))
.flatten()
.collect::<Vec<(&Event, Arc<Mutex<Socket>>)>>();
// イベントの発生したソケットの処理を実行
for (event, socket) in event_sockets.iter() {
match socket.lock()?.deref_mut() {
Socket::Stream(stream, listener) => {
log::info!("CLIENT[{}]", event.token().0);
self.on_tcp_stream(event, stream, listener);
}
Socket::Listener(listener, event_listener) => {
log::info!("SERVER[{}]", event.token().0);
self.on_tcp_listener(event, listener, event_listener);
}
Socket::Waker => {
log::info!("WAKER");
}
}
}
self.run_all_tasks(&receiver);
}
self.cleanup();
log::info!("dispatcher stopped");
Ok(())
}
/// 指定された receiver に存在するすべてのタスクを実行します。
fn run_all_tasks<R>(&mut self, receiver: &Receiver<Task<Result<R>>>) {
for Task { executable, state } in receiver.iter() {
let result = executable(self);
let mut state = state.lock().unwrap();
state.result = Some(result);
if let Some(waker) = state.waker.take() {
waker.wake();
}
}
}
/// 指定された ID のソケットを廃棄します。この操作により対応するソケットはクローズします。
fn close(&mut self, id: SocketId) {
if let Some(socket) = self.sockets.sockets.remove(&id) {
log::debug!("closing socket: {}", id);
match socket.lock().unwrap().deref_mut() {
Socket::Waker => (),
Socket::Stream(stream, _) => self.poll.registry().deregister(stream).unwrap(),
Socket::Listener(listener, _) => self.poll.registry().deregister(listener).unwrap(),
};
log::debug!("socket closed: {}", id);
}
}
/// 登録されているすべてのソケットを廃棄します。この操作によりソケットはクローズされます。
fn cleanup(&mut self) {
for id in self.sockets.ids() {
self.close(id);
}
}
fn action<S: Source>(&mut self, id: SocketId, source: &mut S, action: DispatcherAction) {
match action {
DispatcherAction::Continue => (),
DispatcherAction::ChangeFlag(interest) => {
self.poll.registry().reregister(source, Token(id), inter | spatcherAction::Dispose => self.close(id),
}
}
fn on_tcp_stream(
&mut self,
event: &Event,
stream: &mut TcpStream,
listener: &mut Box<dyn TcpStreamListener>,
) {
// 読み込み可能イベント
if event.is_readable() {
let behaviour = listener.on_ready_to_read(stream);
self.action(event.token().0, stream, behaviour);
}
// 書き込み可能イベント
if event.is_writable() {
let behaviour = listener.on_ready_to_write(stream);
self.action(event.token().0, stream, behaviour);
}
if event.is_error() {
let behaviour = match stream.take_error() {
Ok(Some(err)) => listener.on_error(err),
Ok(None) => DispatcherAction::Continue,
Err(err) => listener.on_error(err),
};
self.action(event.token().0, stream, behaviour);
}
}
fn on_tcp_listener(
&mut self,
event: &Event,
listener: &mut TcpListener,
event_listener: &mut Box<dyn TcpListenerListener>,
) {
// ソケット接続イベント
if event.is_readable() {
let (stream, address) = listener.accept().unwrap();
let behaviour = event_listener.on_accept(stream, address);
self.action(event.token().0, listener, behaviour);
}
}
}
/// Poll に登録するソケットを格納する列挙型。
enum Socket {
Waker,
Stream(TcpStream, Box<dyn TcpStreamListener>),
Listener(TcpListener, Box<dyn TcpListenerListener>),
}
/// オブジェクトに対する ID の割当と ID による参照操作を行うためのマップ。
/// Poll で通知されたトークンからソケットを特定するために使用します。
/// Note that this [IdMap] is not thread-safe.
struct SocketMap {
next: usize,
sockets: HashMap<usize, Arc<Mutex<Socket>>>,
}
impl SocketMap {
/// 新規のマップを作成します。
pub fn new() -> SocketMap {
let sockets = HashMap::new();
SocketMap { next: 0, sockets }
}
/// 指定された ID のオブジェクトを参照します。
pub fn get(&self, id: usize) -> Option<Arc<Mutex<Socket>>> {
if id == 0 {
Some(Arc::new(Mutex::new(Socket::Waker)))
} else {
self.sockets.get(&id).map(|a| a.clone())
}
}
/// 管理されているすべての ID を参照します。
pub fn ids(&self) -> Vec<SocketId> {
self.sockets.keys().map(|id| *id).collect::<Vec<usize>>()
}
/// 使用可能な ID を検索します。
pub fn available_id(&mut self) -> Result<SocketId> {
// NOTE: Token(0) は Waker 用、Token(usize::MAX) は Poll が内部的に使用しているためそれぞれ予約されている
let max = std::usize::MAX - 2;
if self.sockets.len() == max {
return Err(Error::TooManySockets { maximum: std::usize::MAX });
}
for i in 0..=max {
let id = (self.next as u64 + i as u64) as usize + 1;
if self.sockets.get(&id).is_none() {
self.next = if self.next + 1 == max { 0 } else { self.next + 1 };
return Ok(id);
}
}
unreachable!()
}
/// 指定された ID のソケットを新規追加または更新します。
pub fn set(&mut self, id: SocketId, socket: Socket) {
self.sockets.insert(id, Arc::new(Mutex::new(socket)));
}
}
| est).unwrap();
}
Di | conditional_block |
mod.rs | use std::collections::HashMap;
use std::future::Future;
use std::io::{Read, Write};
use std::net::SocketAddr;
use std::ops::DerefMut;
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::sync::mpsc::{channel, Receiver, Sender};
use std::task::{Context, Waker};
use std::thread::spawn;
use log;
use mio::{Events, Interest, Poll, Token};
use mio::event::{Event, Source};
use mio::net::{TcpListener, TcpStream};
use crate::error::Error;
use crate::Result;
#[cfg(test)]
mod test;
/// TcpStream にイベントが発生したときに呼び出されるコールバック用のトレイトです。
/// 返値を使用してその後のアクションを指定することができます。
pub trait TcpStreamListener: Send {
fn on_ready_to_read(&mut self, r: &mut dyn Read) -> DispatcherAction;
fn on_ready_to_write(&mut self, w: &mut dyn Write) -> DispatcherAction;
fn on_error(&mut self, error: std::io::Error) -> DispatcherAction;
}
/// TcpListener にイベントが発生したときに呼び出されるコールバック用のトレイトです。
/// 返値を使用してその後のアクションを指定することができます。
pub trait TcpListenerListener: Send {
fn on_accept(&mut self, stream: TcpStream, address: SocketAddr) -> DispatcherAction;
fn on_error(&mut self, error: std::io::Error) -> DispatcherAction;
}
/// Listener へのコールバック終了後に Listener が Dispatcher に指示する動作を表す列挙型です。
pub enum DispatcherAction {
/// 特に何も行わないで処理を続行することを示します。
Continue,
/// 指定された Interest フラグに変更することを指定します。
ChangeFlag(Interest),
/// イベントの発生元となるソケットなどの Source の破棄を指定します。
Dispose,
}
// ##############################################################################################
// イベントループスレッド内で外部の指定した処理を行うために channel 経由で送受信されるタスクとその結果を返す Future
// の定義。
type Executable<R> = dyn (FnOnce(&mut PollingLoop) -> R) + Send +'static;
struct TaskState<R> {
result: Option<R>,
waker: Option<Waker>,
}
struct Task<R> {
executable: Box<Executable<R>>,
state: Arc<Mutex<TaskState<R>>>,
}
impl<R> Task<R> {
fn new<E>(executable: Box<E>) -> Self
where
E: (FnOnce(&mut PollingLoop) -> R) + Send +'static,
{
Self { executable, state: Arc::new(Mutex::new(TaskState { result: None, waker: None })) }
}
}
pub struct TaskFuture<R> {
state: Arc<Mutex<TaskState<R>>>,
}
impl<R> Future for TaskFuture<R> {
type Output = R;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> std::task::Poll<Self::Output> {
use std::task::Poll;
let mut state = self.state.lock().unwrap();
if let Some(result) = state.result.take() {
Poll::Ready(result)
} else {
state.waker = Some(cx.waker().clone());
Poll::Pending
}
}
}
// ##############################################################################################
pub type SocketId = usize;
pub struct Dispatcher {
sender: Sender<Task<Result<SocketId>>>,
waker: mio::Waker,
}
impl Dispatcher {
/// 新しいディスパッチャーを起動します。
/// poll が作成されイベントループが開始します。
///
/// # Arguments
/// * `event_buffer_size` - 一度の poll で読み込むイベントの最大数。
///
pub fn new(event_buffer_size: usize) -> Result<Dispatcher> {
let (sender, receiver) = channel();
let poll = Poll::new()?;
let waker = mio::Waker::new(poll.registry(), Token(0))?;
let mut polling_loop = PollingLoop::new(poll, event_buffer_size);
spawn(move || polling_loop.start(receiver));
Ok(Dispatcher { sender, waker })
}
/// 指定された ID のソケットを
pub fn dispose(&self, id: SocketId) -> Box<dyn Future<Output=Result<SocketId>>> {
self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
polling.close(id);
Ok(id)
}))
}
fn run_in_event_loop<E>(&self, exec: Box<E>) -> Box<dyn Future<Output=Result<SocketId>>>
where
E: (FnOnce(&mut PollingLoop) -> Result<SocketId>) + Send +'static,
{
let task = Task::new(exec);
let future = TaskFuture { state: task.state.clone() };
self.sender.send(task).unwrap();
self.waker.wake().unwrap();
Box::new(future)
}
}
impl Drop for Dispatcher {
fn drop(&mut self) {
log::debug!("stopping dispatcher...");
let _ = self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
polling.stopped = true;
Ok(0usize)
}));
}
}
trait DispatcherRegister<S, L> {
fn register(&self, source: S, listener: L) -> Box<dyn Future<Output=Result<SocketId>>>;
}
impl DispatcherRegister<TcpListener, Box<dyn TcpListenerListener>> for Dispatcher {
fn register(
&self,
mut listener: TcpListener,
event_listener: Box<dyn TcpListenerListener>,
) -> Box<dyn Future<Output=Result<SocketId>>> {
self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
let id = polling.sockets.available_id()?;
polling.poll.registry().register(&mut listener, Token(id), Interest::READABLE)?;
polling.sockets.set(id, Socket::Listener(listener, event_listener));
Ok(id)
}))
}
}
impl DispatcherRegister<TcpStream, Box<dyn TcpStreamListener>> for Dispatcher {
fn register(
&self,
mut stream: TcpStream,
listener: Box<dyn TcpStreamListener>,
) -> Box<dyn Future<Output=Result<SocketId>>> {
self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
let id = polling.sockets.available_id()?;
polling.poll.registry().register(
&mut stream,
Token(id),
Interest::READABLE | Interest::WRITABLE,
)?;
polling.sockets.set(id, Socket::Stream(stream, listener));
Ok(id)
}))
}
}
struct PollingLoop {
poll: Poll,
event_buffer_size: usize,
sockets: SocketMap,
stopped: bool,
}
impl PollingLoop {
fn new(poll: Poll, event_buffer_size: usize) -> PollingLoop {
let sockets = SocketMap::new();
PollingLoop { poll, event_buffer_size, sockets, stopped: false }
}
/// poll() のためのイベントループを開始します。イベントループスレッドの中で任意の処理を行う場合は receiver に対応
/// する sender に実行するタスクを投入し、self.poll に登録済みの Waker.wake() でブロッキングを抜けます。
fn start<R>(&mut self, receiver: Receiver<Task<Result<R>>>) -> Result<()> {
let mut events = Events::with_capacity(self.event_buffer_size);
while!self.stopped {
self.poll.poll(&mut events, None)?;
// イベントの発生したソケットを取得
let event_sockets = events
.iter()
.map(|e| self.sockets.get(e.token().0).map(|s| (e, s)))
.flatten()
.collect::<Vec<(&Event, Arc<Mutex<Socket>>)>>();
// イベントの発生したソケットの処理を実行
for (event, socket) in event_sockets.iter() {
match socket.lock()?.deref_mut() {
Socket::Stream(stream, listener) => {
log::info!("CLIENT[{}]", event.token().0);
self.on_tcp_stream(event, stream, listener);
}
Socket::Listener(listener, event_listener) => {
log::info!("SERVER[{}]", event.token().0);
self.on_tcp_listener(event, listener, event_listener);
}
Socket::Waker => {
log::info!("WAKER");
}
}
}
self.run_all_tasks(&receiver);
}
self.cleanup();
log::info!("dispatcher stopped");
Ok(())
}
/// 指定された receiver に存在するすべてのタスクを実行します。
fn run_all_tasks<R>(&mut self, receiver: &Receiver<Task<Result<R>>>) {
for Task { executable, state } in receiver.iter() {
let result = executable(self);
let mut state = state.lock().unwrap();
state.result = Some(result);
if let Some(waker) = state.waker.take() {
waker.wake();
}
}
}
/// 指定された ID のソケットを廃棄します。この操作により対応するソケットはクローズします。
fn close(&mut self, id: SocketId) {
if let Some(socket) = self.sockets.sockets.remove(&id) {
log::debug!("closing socket: {}", id);
match socket.lock().unwrap().deref_mut() {
Socket::Waker => (),
Socket::Stream(stream, _) => self.poll.registry().deregister(stream).unwrap(),
Socket::Listener(listener, _) => self.poll.registry().deregister(listener).unwrap(),
};
log::debug!("socket closed: {}", id);
}
}
/// 登録されているすべてのソケットを廃棄します。この操作によりソケットはクローズされます。
fn cleanup(&mut self) {
for id in self.sockets.ids() {
self.close(id);
}
}
fn action<S: Source>(&mut self, id: SocketId, source: &mut S, action: DispatcherAction) {
match action {
DispatcherAction::Continue => (),
DispatcherAction::ChangeFlag(interest) => {
self.poll.registry().reregister(source, Token(id), interest).unwrap();
}
DispatcherAction::Dispose => self.close(id),
}
}
fn on_tcp_stream(
&mut self,
event: &Event,
stream: &mut TcpStream,
listener: &mut Box<dyn TcpStreamListener>,
) {
// 読み込み可能イベント
if event.is_readable() {
let behaviour = listener.on_ready_to_read(stream);
self.action(event.token().0, stream, behaviour);
}
// 書き込み可能イベント
if event.is_writable() {
let behaviour = listener.on_ready_to_write(stream);
self.action(event.token().0, stream, behaviour);
}
if event.is_error() {
let behaviour = match stream.take_error() {
Ok(Some(err)) => listener.on_error(err),
Ok(None) => DispatcherAction::Continue,
Err(err) => listener.on_error(err),
};
self.action(event.token().0, stream, behaviour);
}
}
fn on_tcp_listener(
&mut self,
event: &Event,
listener: &mut TcpListener,
event_listener: &mut Box<dyn TcpListenerListener>,
) {
// ソケット接続イベント
if event.is_readable() {
let (stream, address) = listener.accept().unwrap();
let behaviour = event_listener.on_accept(stream, address);
self.action(event.token().0, listener, behaviour);
}
}
}
/// Poll に登録するソケットを格納する列挙型。
enum Socket {
Waker,
Stream(TcpStream, Box<dyn TcpStreamListener>),
Listener(TcpListener, Box<dyn TcpListenerListener>),
}
/// オブジェクトに対する ID の割当と ID による参照操作を行うためのマップ。
/// Poll で通知されたトークンからソケットを特定するために使用します。
/// Note that this [IdMap] is not thread-safe.
struct SocketMap {
next: usize,
sockets: HashMap<usize, Arc<Mutex<Socket>>>,
}
impl SocketMap {
/// 新規のマップを作成します。
pub fn new() -> SocketMap {
let sockets = HashMap::new();
SocketMap { next: 0, sockets }
}
/// 指定された ID のオブジェクトを参照します。
pub fn get(&self, id: usize) -> Option<Arc<Mutex<Socket>>> {
if id == 0 {
Some(Arc::new(Mutex::new(Socket::Waker)))
} else {
self.sockets.get(&id).map(|a| a.clone())
}
}
/// 管理されているすべての ID を参照します。
pub fn ids(&self) -> Vec<SocketId> {
self.sockets.keys().map(|id| *id).collect::<Vec<usize>>()
}
/// 使用可能な ID を検索します。
pub fn available_id(&mut self) -> Result<SocketId> {
// NOTE: Token(0) は Waker 用、Token(usize::MAX) は Poll が内部的に使用しているためそれぞれ予約されている
let max = std::usize::MAX - 2;
if self.sockets.len() == max {
return Err(Error::TooManySockets { maximum: std::usize::MAX });
}
for i in 0..=max {
let id = (self.next as u64 + i as u64) as usize + 1;
| f self.sockets.get(&id).is_none() {
self.next = if self.next + 1 == max { 0 } else { self.next + 1 };
return Ok(id);
}
}
unreachable!()
}
/// 指定された ID のソケットを新規追加または更新します。
pub fn set(&mut self, id: SocketId, socket: Socket) {
self.sockets.insert(id, Arc::new(Mutex::new(socket)));
}
}
| i | identifier_name |
mod.rs | use std::collections::HashMap;
use std::future::Future;
use std::io::{Read, Write};
use std::net::SocketAddr;
use std::ops::DerefMut;
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::sync::mpsc::{channel, Receiver, Sender};
use std::task::{Context, Waker};
use std::thread::spawn;
use log;
use mio::{Events, Interest, Poll, Token};
use mio::event::{Event, Source};
use mio::net::{TcpListener, TcpStream};
use crate::error::Error;
use crate::Result;
#[cfg(test)]
mod test;
/// TcpStream にイベントが発生したときに呼び出されるコールバック用のトレイトです。
/// 返値を使用してその後のアクションを指定することができます。
pub trait TcpStreamListener: Send {
fn on_ready_to_read(&mut self, r: &mut dyn Read) -> DispatcherAction;
fn on_ready_to_write(&mut self, w: &mut dyn Write) -> DispatcherAction;
fn on_error(&mut self, error: std::io::Error) -> DispatcherAction;
}
/// TcpListener にイベントが発生したときに呼び出されるコールバック用のトレイトです。
/// 返値を使用してその後のアクションを指定することができます。
pub trait TcpListenerListener: Send {
fn on_accept(&mut self, stream: TcpStream, address: SocketAddr) -> DispatcherAction;
fn on_error(&mut self, error: std::io::Error) -> DispatcherAction;
}
/// Listener へのコールバック終了後に Listener が Dispatcher に指示する動作を表す列挙型です。
pub enum DispatcherAction {
/// 特に何も行わないで処理を続行することを示します。
Continue,
/// 指定された Interest フラグに変更することを指定します。
ChangeFlag(Interest),
/// イベントの発生元となるソケットなどの Source の破棄を指定します。
Dispose,
}
// ##############################################################################################
// イベントループスレッド内で外部の指定した処理を行うために channel 経由で送受信されるタスクとその結果を返す Future
// の定義。
type Executable<R> = dyn (FnOnce(&mut PollingLoop) -> R) + Send +'static;
struct TaskState<R> {
result: Option<R>,
waker: Option<Waker>,
}
struct Task<R> {
executable: Box<Executable<R>>,
state: Arc<Mutex<TaskState<R>>>,
}
impl<R> Task<R> {
fn new<E>(executable: Box<E>) -> Self
where
E: (FnOnce(&mut PollingLoop) -> R) + Send +'static,
{
Self { executable, state: Arc::new(Mutex::new(TaskState { result: None, waker: None })) }
}
}
pub struct TaskFuture<R> {
state: Arc<Mutex<TaskState<R>>>,
}
impl<R> Future for TaskFuture<R> {
type Output = R;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> std::task::Poll<Self::Output> {
use std::task::Poll;
let mut state = self.state.lock().unwrap();
if let Some(result) = state.result.take() {
Poll::Ready(result)
} else {
state.waker = Some(cx.waker().clone());
Poll::Pending
}
}
}
// ##############################################################################################
pub type SocketId = usize;
pub struct Dispatcher {
sender: Sender<Task<Result<SocketId>>>,
waker: mio::Waker,
}
impl Dispatcher {
/// 新しいディスパッチャーを起動します。
/// poll が作成されイベントループが開始します。
///
/// # Arguments
/// * `event_buffer_size` - 一度の poll で読み込むイベントの最大数。
///
pub fn new(event_buffer_size: usize) -> Result<Dispatcher> {
let (sender, receiver) = channel();
let poll = Poll::new()?;
let waker = mio::Waker::new(poll.registry(), Token(0))?;
let mut polling_loop = PollingLoop::new(poll, event_buffer_size);
spawn(move || polling_loop.start(receiver));
Ok(Dispatcher { sender, waker })
}
/// 指定された ID のソケットを
pub fn dispose(&self, id: SocketId) -> Box<dyn Future<Output=Result<SocketId>>> {
self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
polling.close(id);
Ok(id)
}))
}
fn run_in_event_loop<E>(&self, exec: Box<E>) -> Box<dyn Future<Output=Result<SocketId>>>
where
E: (FnOnce(&mut PollingLoop) -> Result<SocketId>) + Send +'static,
{
let task = Task::new(exec);
let future = TaskFuture { state: task.state.clone() };
self.sender.send(task).unwrap();
self.waker.wake().unwrap();
Box::new(future)
}
}
impl Drop for Dispatcher {
fn drop(&mut self) {
log::debug!("stopping dispatcher...");
let _ = self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
polling.stopped = true; | Future<Output=Result<SocketId>>>;
}
impl DispatcherRegister<TcpListener, Box<dyn TcpListenerListener>> for Dispatcher {
fn register(
&self,
mut listener: TcpListener,
event_listener: Box<dyn TcpListenerListener>,
) -> Box<dyn Future<Output=Result<SocketId>>> {
self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
let id = polling.sockets.available_id()?;
polling.poll.registry().register(&mut listener, Token(id), Interest::READABLE)?;
polling.sockets.set(id, Socket::Listener(listener, event_listener));
Ok(id)
}))
}
}
impl DispatcherRegister<TcpStream, Box<dyn TcpStreamListener>> for Dispatcher {
fn register(
&self,
mut stream: TcpStream,
listener: Box<dyn TcpStreamListener>,
) -> Box<dyn Future<Output=Result<SocketId>>> {
self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
let id = polling.sockets.available_id()?;
polling.poll.registry().register(
&mut stream,
Token(id),
Interest::READABLE | Interest::WRITABLE,
)?;
polling.sockets.set(id, Socket::Stream(stream, listener));
Ok(id)
}))
}
}
struct PollingLoop {
poll: Poll,
event_buffer_size: usize,
sockets: SocketMap,
stopped: bool,
}
impl PollingLoop {
fn new(poll: Poll, event_buffer_size: usize) -> PollingLoop {
let sockets = SocketMap::new();
PollingLoop { poll, event_buffer_size, sockets, stopped: false }
}
/// poll() のためのイベントループを開始します。イベントループスレッドの中で任意の処理を行う場合は receiver に対応
/// する sender に実行するタスクを投入し、self.poll に登録済みの Waker.wake() でブロッキングを抜けます。
fn start<R>(&mut self, receiver: Receiver<Task<Result<R>>>) -> Result<()> {
let mut events = Events::with_capacity(self.event_buffer_size);
while!self.stopped {
self.poll.poll(&mut events, None)?;
// イベントの発生したソケットを取得
let event_sockets = events
.iter()
.map(|e| self.sockets.get(e.token().0).map(|s| (e, s)))
.flatten()
.collect::<Vec<(&Event, Arc<Mutex<Socket>>)>>();
// イベントの発生したソケットの処理を実行
for (event, socket) in event_sockets.iter() {
match socket.lock()?.deref_mut() {
Socket::Stream(stream, listener) => {
log::info!("CLIENT[{}]", event.token().0);
self.on_tcp_stream(event, stream, listener);
}
Socket::Listener(listener, event_listener) => {
log::info!("SERVER[{}]", event.token().0);
self.on_tcp_listener(event, listener, event_listener);
}
Socket::Waker => {
log::info!("WAKER");
}
}
}
self.run_all_tasks(&receiver);
}
self.cleanup();
log::info!("dispatcher stopped");
Ok(())
}
/// 指定された receiver に存在するすべてのタスクを実行します。
fn run_all_tasks<R>(&mut self, receiver: &Receiver<Task<Result<R>>>) {
for Task { executable, state } in receiver.iter() {
let result = executable(self);
let mut state = state.lock().unwrap();
state.result = Some(result);
if let Some(waker) = state.waker.take() {
waker.wake();
}
}
}
/// 指定された ID のソケットを廃棄します。この操作により対応するソケットはクローズします。
fn close(&mut self, id: SocketId) {
if let Some(socket) = self.sockets.sockets.remove(&id) {
log::debug!("closing socket: {}", id);
match socket.lock().unwrap().deref_mut() {
Socket::Waker => (),
Socket::Stream(stream, _) => self.poll.registry().deregister(stream).unwrap(),
Socket::Listener(listener, _) => self.poll.registry().deregister(listener).unwrap(),
};
log::debug!("socket closed: {}", id);
}
}
/// 登録されているすべてのソケットを廃棄します。この操作によりソケットはクローズされます。
fn cleanup(&mut self) {
for id in self.sockets.ids() {
self.close(id);
}
}
fn action<S: Source>(&mut self, id: SocketId, source: &mut S, action: DispatcherAction) {
match action {
DispatcherAction::Continue => (),
DispatcherAction::ChangeFlag(interest) => {
self.poll.registry().reregister(source, Token(id), interest).unwrap();
}
DispatcherAction::Dispose => self.close(id),
}
}
fn on_tcp_stream(
&mut self,
event: &Event,
stream: &mut TcpStream,
listener: &mut Box<dyn TcpStreamListener>,
) {
// 読み込み可能イベント
if event.is_readable() {
let behaviour = listener.on_ready_to_read(stream);
self.action(event.token().0, stream, behaviour);
}
// 書き込み可能イベント
if event.is_writable() {
let behaviour = listener.on_ready_to_write(stream);
self.action(event.token().0, stream, behaviour);
}
if event.is_error() {
let behaviour = match stream.take_error() {
Ok(Some(err)) => listener.on_error(err),
Ok(None) => DispatcherAction::Continue,
Err(err) => listener.on_error(err),
};
self.action(event.token().0, stream, behaviour);
}
}
fn on_tcp_listener(
&mut self,
event: &Event,
listener: &mut TcpListener,
event_listener: &mut Box<dyn TcpListenerListener>,
) {
// ソケット接続イベント
if event.is_readable() {
let (stream, address) = listener.accept().unwrap();
let behaviour = event_listener.on_accept(stream, address);
self.action(event.token().0, listener, behaviour);
}
}
}
/// Poll に登録するソケットを格納する列挙型。
enum Socket {
Waker,
Stream(TcpStream, Box<dyn TcpStreamListener>),
Listener(TcpListener, Box<dyn TcpListenerListener>),
}
/// オブジェクトに対する ID の割当と ID による参照操作を行うためのマップ。
/// Poll で通知されたトークンからソケットを特定するために使用します。
/// Note that this [IdMap] is not thread-safe.
struct SocketMap {
next: usize,
sockets: HashMap<usize, Arc<Mutex<Socket>>>,
}
impl SocketMap {
/// 新規のマップを作成します。
pub fn new() -> SocketMap {
let sockets = HashMap::new();
SocketMap { next: 0, sockets }
}
/// 指定された ID のオブジェクトを参照します。
pub fn get(&self, id: usize) -> Option<Arc<Mutex<Socket>>> {
if id == 0 {
Some(Arc::new(Mutex::new(Socket::Waker)))
} else {
self.sockets.get(&id).map(|a| a.clone())
}
}
/// 管理されているすべての ID を参照します。
pub fn ids(&self) -> Vec<SocketId> {
self.sockets.keys().map(|id| *id).collect::<Vec<usize>>()
}
/// 使用可能な ID を検索します。
pub fn available_id(&mut self) -> Result<SocketId> {
// NOTE: Token(0) は Waker 用、Token(usize::MAX) は Poll が内部的に使用しているためそれぞれ予約されている
let max = std::usize::MAX - 2;
if self.sockets.len() == max {
return Err(Error::TooManySockets { maximum: std::usize::MAX });
}
for i in 0..=max {
let id = (self.next as u64 + i as u64) as usize + 1;
if self.sockets.get(&id).is_none() {
self.next = if self.next + 1 == max { 0 } else { self.next + 1 };
return Ok(id);
}
}
unreachable!()
}
/// 指定された ID のソケットを新規追加または更新します。
pub fn set(&mut self, id: SocketId, socket: Socket) {
self.sockets.insert(id, Arc::new(Mutex::new(socket)));
}
}
|
Ok(0usize)
}));
}
}
trait DispatcherRegister<S, L> {
fn register(&self, source: S, listener: L) -> Box<dyn | identifier_body |
mod.rs | use std::collections::HashMap;
use std::future::Future;
use std::io::{Read, Write};
use std::net::SocketAddr;
use std::ops::DerefMut;
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::sync::mpsc::{channel, Receiver, Sender};
use std::task::{Context, Waker};
use std::thread::spawn;
use log;
use mio::{Events, Interest, Poll, Token};
use mio::event::{Event, Source};
use mio::net::{TcpListener, TcpStream};
use crate::error::Error;
use crate::Result;
#[cfg(test)]
mod test;
/// TcpStream にイベントが発生したときに呼び出されるコールバック用のトレイトです。
/// 返値を使用してその後のアクションを指定することができます。
pub trait TcpStreamListener: Send {
fn on_ready_to_read(&mut self, r: &mut dyn Read) -> DispatcherAction;
fn on_ready_to_write(&mut self, w: &mut dyn Write) -> DispatcherAction;
fn on_error(&mut self, error: std::io::Error) -> DispatcherAction;
}
/// TcpListener にイベントが発生したときに呼び出されるコールバック用のトレイトです。
/// 返値を使用してその後のアクションを指定することができます。
pub trait TcpListenerListener: Send {
fn on_accept(&mut self, stream: TcpStream, address: SocketAddr) -> DispatcherAction;
fn on_error(&mut self, error: std::io::Error) -> DispatcherAction;
}
/// Listener へのコールバック終了後に Listener が Dispatcher に指示する動作を表す列挙型です。
pub enum DispatcherAction {
/// 特に何も行わないで処理を続行することを示します。
Continue,
/// 指定された Interest フラグに変更することを指定します。
ChangeFlag(Interest),
/// イベントの発生元となるソケットなどの Source の破棄を指定します。
Dispose,
}
// ##############################################################################################
// イベントループスレッド内で外部の指定した処理を行うために channel 経由で送受信されるタスクとその結果を返す Future
// の定義。
type Executable<R> = dyn (FnOnce(&mut PollingLoop) -> R) + Send +'static;
struct TaskState<R> {
result: Option<R>,
waker: Option<Waker>,
}
struct Task<R> {
executable: Box<Executable<R>>,
state: Arc<Mutex<TaskState<R>>>,
}
impl<R> Task<R> {
fn new<E>(executable: Box<E>) -> Self
where
E: (FnOnce(&mut PollingLoop) -> R) + Send +'static,
{
Self { executable, state: Arc::new(Mutex::new(TaskState { result: None, waker: None })) }
}
}
pub struct TaskFuture<R> {
state: Arc<Mutex<TaskState<R>>>,
}
impl<R> Future for TaskFuture<R> { |
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> std::task::Poll<Self::Output> {
use std::task::Poll;
let mut state = self.state.lock().unwrap();
if let Some(result) = state.result.take() {
Poll::Ready(result)
} else {
state.waker = Some(cx.waker().clone());
Poll::Pending
}
}
}
// ##############################################################################################
pub type SocketId = usize;
pub struct Dispatcher {
sender: Sender<Task<Result<SocketId>>>,
waker: mio::Waker,
}
impl Dispatcher {
/// 新しいディスパッチャーを起動します。
/// poll が作成されイベントループが開始します。
///
/// # Arguments
/// * `event_buffer_size` - 一度の poll で読み込むイベントの最大数。
///
pub fn new(event_buffer_size: usize) -> Result<Dispatcher> {
let (sender, receiver) = channel();
let poll = Poll::new()?;
let waker = mio::Waker::new(poll.registry(), Token(0))?;
let mut polling_loop = PollingLoop::new(poll, event_buffer_size);
spawn(move || polling_loop.start(receiver));
Ok(Dispatcher { sender, waker })
}
/// 指定された ID のソケットを
pub fn dispose(&self, id: SocketId) -> Box<dyn Future<Output=Result<SocketId>>> {
self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
polling.close(id);
Ok(id)
}))
}
fn run_in_event_loop<E>(&self, exec: Box<E>) -> Box<dyn Future<Output=Result<SocketId>>>
where
E: (FnOnce(&mut PollingLoop) -> Result<SocketId>) + Send +'static,
{
let task = Task::new(exec);
let future = TaskFuture { state: task.state.clone() };
self.sender.send(task).unwrap();
self.waker.wake().unwrap();
Box::new(future)
}
}
impl Drop for Dispatcher {
fn drop(&mut self) {
log::debug!("stopping dispatcher...");
let _ = self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
polling.stopped = true;
Ok(0usize)
}));
}
}
trait DispatcherRegister<S, L> {
fn register(&self, source: S, listener: L) -> Box<dyn Future<Output=Result<SocketId>>>;
}
impl DispatcherRegister<TcpListener, Box<dyn TcpListenerListener>> for Dispatcher {
fn register(
&self,
mut listener: TcpListener,
event_listener: Box<dyn TcpListenerListener>,
) -> Box<dyn Future<Output=Result<SocketId>>> {
self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
let id = polling.sockets.available_id()?;
polling.poll.registry().register(&mut listener, Token(id), Interest::READABLE)?;
polling.sockets.set(id, Socket::Listener(listener, event_listener));
Ok(id)
}))
}
}
impl DispatcherRegister<TcpStream, Box<dyn TcpStreamListener>> for Dispatcher {
fn register(
&self,
mut stream: TcpStream,
listener: Box<dyn TcpStreamListener>,
) -> Box<dyn Future<Output=Result<SocketId>>> {
self.run_in_event_loop(Box::new(move |polling: &mut PollingLoop| {
let id = polling.sockets.available_id()?;
polling.poll.registry().register(
&mut stream,
Token(id),
Interest::READABLE | Interest::WRITABLE,
)?;
polling.sockets.set(id, Socket::Stream(stream, listener));
Ok(id)
}))
}
}
struct PollingLoop {
poll: Poll,
event_buffer_size: usize,
sockets: SocketMap,
stopped: bool,
}
impl PollingLoop {
fn new(poll: Poll, event_buffer_size: usize) -> PollingLoop {
let sockets = SocketMap::new();
PollingLoop { poll, event_buffer_size, sockets, stopped: false }
}
/// poll() のためのイベントループを開始します。イベントループスレッドの中で任意の処理を行う場合は receiver に対応
/// する sender に実行するタスクを投入し、self.poll に登録済みの Waker.wake() でブロッキングを抜けます。
fn start<R>(&mut self, receiver: Receiver<Task<Result<R>>>) -> Result<()> {
let mut events = Events::with_capacity(self.event_buffer_size);
while!self.stopped {
self.poll.poll(&mut events, None)?;
// イベントの発生したソケットを取得
let event_sockets = events
.iter()
.map(|e| self.sockets.get(e.token().0).map(|s| (e, s)))
.flatten()
.collect::<Vec<(&Event, Arc<Mutex<Socket>>)>>();
// イベントの発生したソケットの処理を実行
for (event, socket) in event_sockets.iter() {
match socket.lock()?.deref_mut() {
Socket::Stream(stream, listener) => {
log::info!("CLIENT[{}]", event.token().0);
self.on_tcp_stream(event, stream, listener);
}
Socket::Listener(listener, event_listener) => {
log::info!("SERVER[{}]", event.token().0);
self.on_tcp_listener(event, listener, event_listener);
}
Socket::Waker => {
log::info!("WAKER");
}
}
}
self.run_all_tasks(&receiver);
}
self.cleanup();
log::info!("dispatcher stopped");
Ok(())
}
/// 指定された receiver に存在するすべてのタスクを実行します。
fn run_all_tasks<R>(&mut self, receiver: &Receiver<Task<Result<R>>>) {
for Task { executable, state } in receiver.iter() {
let result = executable(self);
let mut state = state.lock().unwrap();
state.result = Some(result);
if let Some(waker) = state.waker.take() {
waker.wake();
}
}
}
/// 指定された ID のソケットを廃棄します。この操作により対応するソケットはクローズします。
fn close(&mut self, id: SocketId) {
if let Some(socket) = self.sockets.sockets.remove(&id) {
log::debug!("closing socket: {}", id);
match socket.lock().unwrap().deref_mut() {
Socket::Waker => (),
Socket::Stream(stream, _) => self.poll.registry().deregister(stream).unwrap(),
Socket::Listener(listener, _) => self.poll.registry().deregister(listener).unwrap(),
};
log::debug!("socket closed: {}", id);
}
}
/// 登録されているすべてのソケットを廃棄します。この操作によりソケットはクローズされます。
fn cleanup(&mut self) {
for id in self.sockets.ids() {
self.close(id);
}
}
fn action<S: Source>(&mut self, id: SocketId, source: &mut S, action: DispatcherAction) {
match action {
DispatcherAction::Continue => (),
DispatcherAction::ChangeFlag(interest) => {
self.poll.registry().reregister(source, Token(id), interest).unwrap();
}
DispatcherAction::Dispose => self.close(id),
}
}
fn on_tcp_stream(
&mut self,
event: &Event,
stream: &mut TcpStream,
listener: &mut Box<dyn TcpStreamListener>,
) {
// 読み込み可能イベント
if event.is_readable() {
let behaviour = listener.on_ready_to_read(stream);
self.action(event.token().0, stream, behaviour);
}
// 書き込み可能イベント
if event.is_writable() {
let behaviour = listener.on_ready_to_write(stream);
self.action(event.token().0, stream, behaviour);
}
if event.is_error() {
let behaviour = match stream.take_error() {
Ok(Some(err)) => listener.on_error(err),
Ok(None) => DispatcherAction::Continue,
Err(err) => listener.on_error(err),
};
self.action(event.token().0, stream, behaviour);
}
}
fn on_tcp_listener(
&mut self,
event: &Event,
listener: &mut TcpListener,
event_listener: &mut Box<dyn TcpListenerListener>,
) {
// ソケット接続イベント
if event.is_readable() {
let (stream, address) = listener.accept().unwrap();
let behaviour = event_listener.on_accept(stream, address);
self.action(event.token().0, listener, behaviour);
}
}
}
/// Poll に登録するソケットを格納する列挙型。
enum Socket {
Waker,
Stream(TcpStream, Box<dyn TcpStreamListener>),
Listener(TcpListener, Box<dyn TcpListenerListener>),
}
/// オブジェクトに対する ID の割当と ID による参照操作を行うためのマップ。
/// Poll で通知されたトークンからソケットを特定するために使用します。
/// Note that this [IdMap] is not thread-safe.
struct SocketMap {
next: usize,
sockets: HashMap<usize, Arc<Mutex<Socket>>>,
}
impl SocketMap {
/// 新規のマップを作成します。
pub fn new() -> SocketMap {
let sockets = HashMap::new();
SocketMap { next: 0, sockets }
}
/// 指定された ID のオブジェクトを参照します。
pub fn get(&self, id: usize) -> Option<Arc<Mutex<Socket>>> {
if id == 0 {
Some(Arc::new(Mutex::new(Socket::Waker)))
} else {
self.sockets.get(&id).map(|a| a.clone())
}
}
/// 管理されているすべての ID を参照します。
pub fn ids(&self) -> Vec<SocketId> {
self.sockets.keys().map(|id| *id).collect::<Vec<usize>>()
}
/// 使用可能な ID を検索します。
pub fn available_id(&mut self) -> Result<SocketId> {
// NOTE: Token(0) は Waker 用、Token(usize::MAX) は Poll が内部的に使用しているためそれぞれ予約されている
let max = std::usize::MAX - 2;
if self.sockets.len() == max {
return Err(Error::TooManySockets { maximum: std::usize::MAX });
}
for i in 0..=max {
let id = (self.next as u64 + i as u64) as usize + 1;
if self.sockets.get(&id).is_none() {
self.next = if self.next + 1 == max { 0 } else { self.next + 1 };
return Ok(id);
}
}
unreachable!()
}
/// 指定された ID のソケットを新規追加または更新します。
pub fn set(&mut self, id: SocketId, socket: Socket) {
self.sockets.insert(id, Arc::new(Mutex::new(socket)));
}
} | type Output = R; | random_line_split |
lib.rs | //! This crate is part of [Sophia],
//! an [RDF] and [Linked Data] toolkit in Rust.
//!
//! Terms are the building blocks of an [RDF] graph.
//! There are four types of terms: IRIs, blank nodes (BNode for short),
//! literals and variables.
//!
//! NB: variable only exist in [generalized RDF].
//!
//! This module defines a generic type [`Term`](enum.Term.html)
//! which can be derived differently depending on your needs.
//!
//! * [`RefTerm<'a>`](type.RefTerm.html) (alias of `Term<&'a str>`)
//! should be used for very short-lived terms,
//! *i.e.* terms that live less than `'a`,
//! which is the lifetime of their underlying text.
//!
//! * [`BoxTerm`](type.BoxTerm.html) (alias of `Term<Box<str>>`)
//! should be used when the term may outlive the text used to create it.
//!
//! * [`RcTerm`](type.RcTerm.html) (alias of `Term<Rc<str>>`)
//! should also be used for long-lived terms,
//! especially if they need to be cloned multiple times.
//! The use of `Rc` prevents the duplication of the underlying text,
//! while ensuring that it is cleaned when appropriate.
//!
//! * [`ArcTerm`](type.ArcTerm.html) (alias of `Term<Arc<str>>`)
//! should be used when, additionally,
//! terms need to be sent to other threads.
//!
//! * [`StaticTerm`](type.StaticTerm.html) (alias of `Term<&'static str>)
//! is a special case of `RefTerm`
//! where the underlying text is a static string.
//! Those terms can live as long as the program runs,
//! and be cloned and sent without any restriction.
//!
//! * [`MownTerm`](type.MownTerm.html) (alias of `Term<MownStr<'a>>)
//! should be used in situations where some terms can borrow their data,
//! while others need to own it.
//!
//! [Sophia]: https://docs.rs/sophia/latest/sophia/
//! [RDF]: https://www.w3.org/TR/rdf-primer/
//! [Linked Data]: http://linkeddata.org/
//! [generalized RDF]: https://docs.rs/sophia/latest/sophia/#generalized-vs-strict-rdf-model
#![deny(missing_docs)]
use mownstr::MownStr;
use sophia_api::term::{
term_cmp, term_eq, term_format, term_hash, term_to_string, CopyTerm, RawValue, SimpleIri,
TTerm, TermKind, TryCopyTerm,
};
use std::borrow::Borrow;
use std::convert::TryInto;
use std::fmt::Debug;
use std::hash::{Hash, Hasher};
use std::rc::Rc;
use std::sync::Arc;
pub mod factory;
pub mod index_map;
pub mod variable;
use self::variable::Variable;
pub mod blank_node;
use self::blank_node::BlankNode;
pub mod iri;
use self::iri::{Iri, Normalization};
pub mod literal;
use literal::convert::{AsLiteral, DataType, NativeLiteral};
use literal::Literal;
mod _display;
mod _error;
pub use self::_error::*;
/// Generic type for RDF terms.
///
/// See [module documentation](index.html) for more detail.
///
#[derive(Clone, Copy, Debug, Eq, Ord)]
pub enum Term<TD>
where
TD: TermData,
{
/// An IRI referencing a resource.
Iri(Iri<TD>),
/// A blank node.
///
/// Also known as existentially quantified variable.
BNode(BlankNode<TD>),
/// An RDF literal.
Literal(Literal<TD>),
/// A universally quantified variable like in SPARQL or Notation3.
Variable(Variable<TD>),
}
/// Trait alias for types holding the textual data of terms.
pub trait TermData: AsRef<str> + Clone + Eq + Hash {}
impl<T> TermData for T where T: AsRef<str> + Clone + Eq + Hash {}
/// Convenient alias for a specialization of `Term<T>`.
///
/// See [module documentation](index.html)
/// for more detail on when to use it.
pub type BoxTerm = Term<Box<str>>;
/// Convenient alias for a specialization of `Term<T>`.
///
/// See [module documentation](index.html)
/// for more detail on when to use it.
pub type RcTerm = Term<Rc<str>>;
/// Convenient alias for a specialization of `Term<T>`.
///
/// See [module documentation](index.html)
/// for more detail on when to use it.
pub type ArcTerm = Term<Arc<str>>;
/// Convenient alias for a specialization of `Term<T>`.
///
/// See [module documentation](index.html)
/// for more detail on when to use it.
pub type RefTerm<'a> = Term<&'a str>;
/// Convenient alias for a specialization of `Term<T>`.
///
/// See [module documentation](index.html)
/// for more detail on when to use it.
pub type StaticTerm = RefTerm<'static>;
/// Convenient alias for a specialization of `Term<T>`.
///
/// See [module documentation](index.html)
/// for more detail on when to use it.
pub type MownTerm<'a> = Term<MownStr<'a>>;
impl<T> Term<T>
where
T: TermData,
{
/// Return a new IRI term from the given text.
///
/// May fail if `txt` is not a valid IRI. | U: AsRef<str>,
T: From<U>,
{
Iri::<T>::new(iri).map(Into::into)
}
/// Return a new IRI term from the two given parts (prefix and suffix).
///
/// May fail if the concatenation of `ns` and `suffix`
/// does not produce a valid IRI.
pub fn new_iri_suffixed<U, V>(ns: U, suffix: V) -> Result<Term<T>>
where
U: AsRef<str>,
V: AsRef<str>,
T: From<U> + From<V>,
{
Iri::<T>::new_suffixed(ns, suffix).map(Into::into)
}
/// Return a new blank node term with the given bnode ID.
///
/// Currently, this may never fail;
/// however it returns a result for homogeneity with other constructor methods,
/// and because future versions may be more picky regarding bnode IDs.
pub fn new_bnode<U>(id: U) -> Result<Term<T>>
where
U: AsRef<str>,
T: From<U>,
{
BlankNode::new(id).map(Into::into)
}
/// Return a new literal term with the given value and language tag.
///
/// May fail if the language tag is not a valid BCP47 language tag.
pub fn new_literal_lang<U, V>(txt: U, lang: V) -> Result<Self>
where
V: AsRef<str>,
T: From<U> + From<V>,
{
Literal::<T>::new_lang(txt, lang).map(Into::into)
}
/// Return a new literal term with the given value and datatype.
///
/// May fail if `dt` is not an IRI.
pub fn new_literal_dt<U, V>(txt: U, dt: V) -> Result<Self>
where
T: From<U>,
V: TryInto<Iri<T>>,
TermError: From<<V as TryInto<Iri<T>>>::Error>,
{
Ok(Literal::new_dt(txt, dt.try_into()?).into())
}
/// Return a new variable term with the given name.
///
/// May fail if `name` is not a valid variable name.
pub fn new_variable<U>(name: U) -> Result<Term<T>>
where
U: AsRef<str>,
T: From<U>,
{
Variable::new(name).map(Into::into)
}
/// Borrow the inner contents of the term.
pub fn as_ref(&self) -> Term<&T> {
use self::Term::*;
match &self {
Iri(iri) => Iri(iri.as_ref()),
Literal(lit) => Literal(lit.as_ref()),
BNode(bn) => BNode(bn.as_ref()),
Variable(var) => Variable(var.as_ref()),
}
}
/// Borrow the inner contents of the term as `&str`.
pub fn as_ref_str(&self) -> Term<&str> {
use self::Term::*;
match &self {
Iri(iri) => Iri(iri.as_ref_str()),
Literal(lit) => Literal(lit.as_ref_str()),
BNode(bn) => BNode(bn.as_ref_str()),
Variable(var) => Variable(var.as_ref_str()),
}
}
/// Create a new term by applying `f` to the `TermData` of `self`.
pub fn map<F, TD2>(self, f: F) -> Term<TD2>
where
F: FnMut(T) -> TD2,
TD2: TermData,
{
use self::Term::*;
match self {
Iri(iri) => Iri(iri.map(f)),
Literal(lit) => Literal(lit.map(f)),
BNode(bn) => BNode(bn.map(f)),
Variable(var) => Variable(var.map(f)),
}
}
/// Maps the term using the `Into` trait.
pub fn map_into<TD2>(self) -> Term<TD2>
where
T: Into<TD2>,
TD2: TermData,
{
self.map(Into::into)
}
/// Clone self while transforming the inner `TermData` with the given
/// factory.
///
/// This is done in one step in contrast to calling `clone().map(factory)`.
pub fn clone_map<'a, U, F>(&'a self, factory: F) -> Term<U>
where
U: TermData,
F: FnMut(&'a str) -> U,
{
use self::Term::*;
match self {
Iri(iri) => iri.clone_map(factory).into(),
BNode(bn) => bn.clone_map(factory).into(),
Literal(lit) => lit.clone_map(factory).into(),
Variable(var) => var.clone_map(factory).into(),
}
}
/// Apply `clone_map()` using the `Into` trait.
pub fn clone_into<'src, U>(&'src self) -> Term<U>
where
U: TermData + From<&'src str>,
{
self.clone_map(Into::into)
}
/// Return a term equivalent to this one,
/// with all IRIs (if any)
/// internally represented with all its data in `ns`, and an empty `suffix`.
///
/// # Performances
/// The returned term will borrow data from this one as much as possible,
/// but strings may be allocated in case a concatenation is required.
pub fn normalized(&self, policy: Normalization) -> MownTerm {
match self {
Term::Iri(iri) => iri.normalized(policy).into(),
Term::Literal(lit) => lit.normalized(policy).into(),
_ => self.as_ref_str().map_into(),
}
}
/// Create a new IRI-term from a given IRI without checking its validity.
///
/// # Pre-conditions
///
/// This function conducts no checks if the resulting IRI is valid. This is
/// a contract that is generally assumed. Breaking it could result in
/// unexpected behavior.
///
/// However, in `debug` builds assertions that perform checks are enabled.
pub fn new_iri_unchecked<U>(iri: U) -> Term<T>
where
T: From<U>,
{
Iri::<T>::new_unchecked(iri).into()
}
/// Create a new IRI-term from a given namespace and suffix.
///
/// # Pre-conditions
///
/// It is expected that
///
/// * the resulting IRI is valid per RFC3987,
/// * `suffix` is not the empty string
/// (otherwise, [`new_iri_unchecked`](#method.new_iri_unchecked) should be used instead).
///
/// This is a contract that is generally assumed.
/// Breaking it could result in unexpected behavior.
/// However in `debug` mode, assertions that perform checks are enabled.
pub fn new_iri_suffixed_unchecked<U, V>(ns: U, suffix: V) -> Term<T>
where
T: From<U> + From<V>,
{
Iri::<T>::new_suffixed_unchecked(ns, suffix).into()
}
/// Return a new blank node term.
///
/// # Pre-condition
///
/// This function requires that `id` is a valid bnode ID.
pub fn new_bnode_unchecked<U>(id: U) -> Term<T>
where
U: AsRef<str>,
T: From<U>,
{
BlankNode::<T>::new_unchecked(id).into()
}
/// Return a literal term.
///
/// # Pre-condition
///
/// This function requires that `lang` is a valid language tag.
/// In debug mode this constraint is asserted.
pub fn new_literal_lang_unchecked<U, V>(txt: U, lang: V) -> Self
where
V: AsRef<str>,
T: From<U> + From<V>,
{
Literal::<T>::new_lang_unchecked(txt, lang).into()
}
/// Return a typed literal term.
///
/// # Panics
///
/// Panics if `dt` cannot be converted into an IRI.
pub fn new_literal_dt_unchecked<U, V>(txt: U, dt: V) -> Self
where
T: From<U>,
V: TryInto<Iri<T>>,
<V as TryInto<Iri<T>>>::Error: Debug,
{
Literal::new_dt(txt, dt.try_into().unwrap()).into()
}
/// Return a new variable term.
///
/// # Pre-condition
///
/// This function requires that `name` is a valid variable name.
pub fn new_variable_unchecked<U>(name: U) -> Term<T>
where
U: AsRef<str>,
T: From<U>,
{
Variable::<T>::new_unchecked(name).into()
}
}
impl<T: TermData> TTerm for Term<T> {
fn kind(&self) -> TermKind {
use Term::*;
match self {
Iri(_) => TermKind::Iri,
Literal(_) => TermKind::Literal,
BNode(_) => TermKind::BlankNode,
Variable(_) => TermKind::Variable,
}
}
fn value_raw(&self) -> RawValue {
use Term::*;
match self {
Iri(i) => i.value_raw(),
Literal(l) => l.value_raw(),
BNode(b) => b.value_raw(),
Variable(v) => v.value_raw(),
}
}
fn datatype(&self) -> Option<SimpleIri> {
if let Term::Literal(lit) = self {
lit.datatype()
} else {
None
}
}
fn language(&self) -> Option<&str> {
if let Term::Literal(lit) = self {
lit.language()
} else {
None
}
}
fn as_dyn(&self) -> &dyn TTerm {
self
}
}
impl<TD, TE> PartialEq<TE> for Term<TD>
where
TD: TermData,
TE: TTerm +?Sized,
{
fn eq(&self, other: &TE) -> bool {
term_eq(self, other)
}
}
impl<TD, TE> PartialOrd<TE> for Term<TD>
where
TD: TermData,
TE: TTerm +?Sized,
{
fn partial_cmp(&self, other: &TE) -> Option<std::cmp::Ordering> {
Some(term_cmp(self, other))
}
}
impl<TD> Hash for Term<TD>
where
TD: TermData,
{
fn hash<H: Hasher>(&self, state: &mut H) {
term_hash(self, state)
}
}
impl<TD> From<Iri<TD>> for Term<TD>
where
TD: TermData,
{
fn from(iri: Iri<TD>) -> Self {
Term::Iri(iri)
}
}
impl<TD> From<Literal<TD>> for Term<TD>
where
TD: TermData,
{
fn from(lit: Literal<TD>) -> Self {
Term::Literal(lit)
}
}
impl<TD> From<Variable<TD>> for Term<TD>
where
TD: TermData,
{
fn from(var: Variable<TD>) -> Self {
Term::Variable(var)
}
}
impl<TD> From<BlankNode<TD>> for Term<TD>
where
TD: TermData,
{
fn from(bn: BlankNode<TD>) -> Self {
Term::BNode(bn)
}
}
impl<TD> From<String> for Term<TD>
where
TD: TermData + From<Box<str>> + From<&'static str>,
{
fn from(txt: String) -> Self {
txt.as_literal().into()
}
}
impl<'a> From<SimpleIri<'a>> for RefTerm<'a> {
fn from(other: SimpleIri<'a>) -> Self {
Iri::from(other).into()
}
}
impl<T, TD> From<NativeLiteral<T>> for Term<TD>
where
T: DataType +?Sized,
TD: TermData + From<Box<str>> + From<&'static str>,
{
fn from(other: NativeLiteral<T>) -> Self {
Literal::from(other).into()
}
}
impl<'a, T> From<NativeLiteral<T, &'a str>> for RefTerm<'a>
where
T: DataType +?Sized,
{
fn from(other: NativeLiteral<T, &'a str>) -> Self {
Literal::from(other).into()
}
}
impl<TD> CopyTerm for Term<TD>
where
TD: TermData + for<'x> From<&'x str>,
{
fn copy<T>(term: &T) -> Self
where
T: TTerm +?Sized,
{
match term.kind() {
TermKind::Iri => Term::Iri(Iri::try_copy(term).unwrap()),
TermKind::Literal => Term::Literal(Literal::try_copy(term).unwrap()),
TermKind::BlankNode => Term::BNode(BlankNode::try_copy(term).unwrap()),
TermKind::Variable => Term::Variable(Variable::try_copy(term).unwrap()),
}
}
}
impl<'a, T> From<&'a T> for RefTerm<'a>
where
T: TTerm +?Sized,
{
fn from(t: &'a T) -> Self {
let v = t.value_raw();
match t.kind() {
TermKind::Iri => Term::Iri(match v.1 {
None => Iri::new_unchecked(v.0),
Some(suffix) => Iri::new_suffixed_unchecked(v.0, suffix),
}),
TermKind::Literal => Term::Literal(match t.language() {
None => {
let dt: Iri<&'a str> = t.datatype().unwrap().into();
Literal::new_dt(v.0, dt)
}
Some(tag) => Literal::new_lang_unchecked(v.0, tag),
}),
TermKind::BlankNode => Term::BNode(BlankNode::new_unchecked(v.0)),
TermKind::Variable => Term::Variable(Variable::new_unchecked(v.0)),
}
}
}
impl<'a, TD: TermData + 'a> Borrow<dyn TTerm + 'a> for Term<TD> {
fn borrow(&self) -> &(dyn TTerm + 'a) {
self as _
}
}
#[cfg(test)]
pub(crate) mod test;
/// This line re-exports `same_graph_name` from `sophia_api::term`,
/// to ease transition from older versions of Sophia.
/// It will eventually be deprecated.
///
/// See [`sophia_api`](https://docs.rs/sophia_api/latest/sophia_api/)
pub use sophia_api::term::same_graph_name;
/// This module re-exports things from `sophia_api::ns`,
/// to ease transition from older versions of Sophia.
/// It will eventually be deprecated.
///
/// See [`sophia_api`](https://docs.rs/sophia_api/latest/sophia_api/)
pub mod ns {
pub use sophia_api::ns::*;
}
/// This line re-exports the module `sophia_api::term::matcher`,
/// to ease transition from older versions of Sophia.
/// It will eventually be deprecated.
///
/// See [`sophia_api`](https://docs.rs/sophia_api/latest/sophia_api/)
pub use sophia_api::term::matcher; | pub fn new_iri<U>(iri: U) -> Result<Term<T>>
where | random_line_split |
lib.rs | //! This crate is part of [Sophia],
//! an [RDF] and [Linked Data] toolkit in Rust.
//!
//! Terms are the building blocks of an [RDF] graph.
//! There are four types of terms: IRIs, blank nodes (BNode for short),
//! literals and variables.
//!
//! NB: variable only exist in [generalized RDF].
//!
//! This module defines a generic type [`Term`](enum.Term.html)
//! which can be derived differently depending on your needs.
//!
//! * [`RefTerm<'a>`](type.RefTerm.html) (alias of `Term<&'a str>`)
//! should be used for very short-lived terms,
//! *i.e.* terms that live less than `'a`,
//! which is the lifetime of their underlying text.
//!
//! * [`BoxTerm`](type.BoxTerm.html) (alias of `Term<Box<str>>`)
//! should be used when the term may outlive the text used to create it.
//!
//! * [`RcTerm`](type.RcTerm.html) (alias of `Term<Rc<str>>`)
//! should also be used for long-lived terms,
//! especially if they need to be cloned multiple times.
//! The use of `Rc` prevents the duplication of the underlying text,
//! while ensuring that it is cleaned when appropriate.
//!
//! * [`ArcTerm`](type.ArcTerm.html) (alias of `Term<Arc<str>>`)
//! should be used when, additionally,
//! terms need to be sent to other threads.
//!
//! * [`StaticTerm`](type.StaticTerm.html) (alias of `Term<&'static str>)
//! is a special case of `RefTerm`
//! where the underlying text is a static string.
//! Those terms can live as long as the program runs,
//! and be cloned and sent without any restriction.
//!
//! * [`MownTerm`](type.MownTerm.html) (alias of `Term<MownStr<'a>>)
//! should be used in situations where some terms can borrow their data,
//! while others need to own it.
//!
//! [Sophia]: https://docs.rs/sophia/latest/sophia/
//! [RDF]: https://www.w3.org/TR/rdf-primer/
//! [Linked Data]: http://linkeddata.org/
//! [generalized RDF]: https://docs.rs/sophia/latest/sophia/#generalized-vs-strict-rdf-model
#![deny(missing_docs)]
use mownstr::MownStr;
use sophia_api::term::{
term_cmp, term_eq, term_format, term_hash, term_to_string, CopyTerm, RawValue, SimpleIri,
TTerm, TermKind, TryCopyTerm,
};
use std::borrow::Borrow;
use std::convert::TryInto;
use std::fmt::Debug;
use std::hash::{Hash, Hasher};
use std::rc::Rc;
use std::sync::Arc;
pub mod factory;
pub mod index_map;
pub mod variable;
use self::variable::Variable;
pub mod blank_node;
use self::blank_node::BlankNode;
pub mod iri;
use self::iri::{Iri, Normalization};
pub mod literal;
use literal::convert::{AsLiteral, DataType, NativeLiteral};
use literal::Literal;
mod _display;
mod _error;
pub use self::_error::*;
/// Generic type for RDF terms.
///
/// See [module documentation](index.html) for more detail.
///
#[derive(Clone, Copy, Debug, Eq, Ord)]
pub enum Term<TD>
where
TD: TermData,
{
/// An IRI referencing a resource.
Iri(Iri<TD>),
/// A blank node.
///
/// Also known as existentially quantified variable.
BNode(BlankNode<TD>),
/// An RDF literal.
Literal(Literal<TD>),
/// A universally quantified variable like in SPARQL or Notation3.
Variable(Variable<TD>),
}
/// Trait alias for types holding the textual data of terms.
pub trait TermData: AsRef<str> + Clone + Eq + Hash {}
impl<T> TermData for T where T: AsRef<str> + Clone + Eq + Hash {}
/// Convenient alias for a specialization of `Term<T>`.
///
/// See [module documentation](index.html)
/// for more detail on when to use it.
pub type BoxTerm = Term<Box<str>>;
/// Convenient alias for a specialization of `Term<T>`.
///
/// See [module documentation](index.html)
/// for more detail on when to use it.
pub type RcTerm = Term<Rc<str>>;
/// Convenient alias for a specialization of `Term<T>`.
///
/// See [module documentation](index.html)
/// for more detail on when to use it.
pub type ArcTerm = Term<Arc<str>>;
/// Convenient alias for a specialization of `Term<T>`.
///
/// See [module documentation](index.html)
/// for more detail on when to use it.
pub type RefTerm<'a> = Term<&'a str>;
/// Convenient alias for a specialization of `Term<T>`.
///
/// See [module documentation](index.html)
/// for more detail on when to use it.
pub type StaticTerm = RefTerm<'static>;
/// Convenient alias for a specialization of `Term<T>`.
///
/// See [module documentation](index.html)
/// for more detail on when to use it.
pub type MownTerm<'a> = Term<MownStr<'a>>;
impl<T> Term<T>
where
T: TermData,
{
/// Return a new IRI term from the given text.
///
/// May fail if `txt` is not a valid IRI.
pub fn new_iri<U>(iri: U) -> Result<Term<T>>
where
U: AsRef<str>,
T: From<U>,
{
Iri::<T>::new(iri).map(Into::into)
}
/// Return a new IRI term from the two given parts (prefix and suffix).
///
/// May fail if the concatenation of `ns` and `suffix`
/// does not produce a valid IRI.
pub fn new_iri_suffixed<U, V>(ns: U, suffix: V) -> Result<Term<T>>
where
U: AsRef<str>,
V: AsRef<str>,
T: From<U> + From<V>,
{
Iri::<T>::new_suffixed(ns, suffix).map(Into::into)
}
/// Return a new blank node term with the given bnode ID.
///
/// Currently, this may never fail;
/// however it returns a result for homogeneity with other constructor methods,
/// and because future versions may be more picky regarding bnode IDs.
pub fn new_bnode<U>(id: U) -> Result<Term<T>>
where
U: AsRef<str>,
T: From<U>,
{
BlankNode::new(id).map(Into::into)
}
/// Return a new literal term with the given value and language tag.
///
/// May fail if the language tag is not a valid BCP47 language tag.
pub fn new_literal_lang<U, V>(txt: U, lang: V) -> Result<Self>
where
V: AsRef<str>,
T: From<U> + From<V>,
{
Literal::<T>::new_lang(txt, lang).map(Into::into)
}
/// Return a new literal term with the given value and datatype.
///
/// May fail if `dt` is not an IRI.
pub fn new_literal_dt<U, V>(txt: U, dt: V) -> Result<Self>
where
T: From<U>,
V: TryInto<Iri<T>>,
TermError: From<<V as TryInto<Iri<T>>>::Error>,
{
Ok(Literal::new_dt(txt, dt.try_into()?).into())
}
/// Return a new variable term with the given name.
///
/// May fail if `name` is not a valid variable name.
pub fn new_variable<U>(name: U) -> Result<Term<T>>
where
U: AsRef<str>,
T: From<U>,
{
Variable::new(name).map(Into::into)
}
/// Borrow the inner contents of the term.
pub fn as_ref(&self) -> Term<&T> {
use self::Term::*;
match &self {
Iri(iri) => Iri(iri.as_ref()),
Literal(lit) => Literal(lit.as_ref()),
BNode(bn) => BNode(bn.as_ref()),
Variable(var) => Variable(var.as_ref()),
}
}
/// Borrow the inner contents of the term as `&str`.
pub fn as_ref_str(&self) -> Term<&str> {
use self::Term::*;
match &self {
Iri(iri) => Iri(iri.as_ref_str()),
Literal(lit) => Literal(lit.as_ref_str()),
BNode(bn) => BNode(bn.as_ref_str()),
Variable(var) => Variable(var.as_ref_str()),
}
}
/// Create a new term by applying `f` to the `TermData` of `self`.
pub fn map<F, TD2>(self, f: F) -> Term<TD2>
where
F: FnMut(T) -> TD2,
TD2: TermData,
{
use self::Term::*;
match self {
Iri(iri) => Iri(iri.map(f)),
Literal(lit) => Literal(lit.map(f)),
BNode(bn) => BNode(bn.map(f)),
Variable(var) => Variable(var.map(f)),
}
}
/// Maps the term using the `Into` trait.
pub fn map_into<TD2>(self) -> Term<TD2>
where
T: Into<TD2>,
TD2: TermData,
{
self.map(Into::into)
}
/// Clone self while transforming the inner `TermData` with the given
/// factory.
///
/// This is done in one step in contrast to calling `clone().map(factory)`.
pub fn clone_map<'a, U, F>(&'a self, factory: F) -> Term<U>
where
U: TermData,
F: FnMut(&'a str) -> U,
{
use self::Term::*;
match self {
Iri(iri) => iri.clone_map(factory).into(),
BNode(bn) => bn.clone_map(factory).into(),
Literal(lit) => lit.clone_map(factory).into(),
Variable(var) => var.clone_map(factory).into(),
}
}
/// Apply `clone_map()` using the `Into` trait.
pub fn clone_into<'src, U>(&'src self) -> Term<U>
where
U: TermData + From<&'src str>,
{
self.clone_map(Into::into)
}
/// Return a term equivalent to this one,
/// with all IRIs (if any)
/// internally represented with all its data in `ns`, and an empty `suffix`.
///
/// # Performances
/// The returned term will borrow data from this one as much as possible,
/// but strings may be allocated in case a concatenation is required.
pub fn normalized(&self, policy: Normalization) -> MownTerm {
match self {
Term::Iri(iri) => iri.normalized(policy).into(),
Term::Literal(lit) => lit.normalized(policy).into(),
_ => self.as_ref_str().map_into(),
}
}
/// Create a new IRI-term from a given IRI without checking its validity.
///
/// # Pre-conditions
///
/// This function conducts no checks if the resulting IRI is valid. This is
/// a contract that is generally assumed. Breaking it could result in
/// unexpected behavior.
///
/// However, in `debug` builds assertions that perform checks are enabled.
pub fn new_iri_unchecked<U>(iri: U) -> Term<T>
where
T: From<U>,
{
Iri::<T>::new_unchecked(iri).into()
}
/// Create a new IRI-term from a given namespace and suffix.
///
/// # Pre-conditions
///
/// It is expected that
///
/// * the resulting IRI is valid per RFC3987,
/// * `suffix` is not the empty string
/// (otherwise, [`new_iri_unchecked`](#method.new_iri_unchecked) should be used instead).
///
/// This is a contract that is generally assumed.
/// Breaking it could result in unexpected behavior.
/// However in `debug` mode, assertions that perform checks are enabled.
pub fn new_iri_suffixed_unchecked<U, V>(ns: U, suffix: V) -> Term<T>
where
T: From<U> + From<V>,
{
Iri::<T>::new_suffixed_unchecked(ns, suffix).into()
}
/// Return a new blank node term.
///
/// # Pre-condition
///
/// This function requires that `id` is a valid bnode ID.
pub fn new_bnode_unchecked<U>(id: U) -> Term<T>
where
U: AsRef<str>,
T: From<U>,
{
BlankNode::<T>::new_unchecked(id).into()
}
/// Return a literal term.
///
/// # Pre-condition
///
/// This function requires that `lang` is a valid language tag.
/// In debug mode this constraint is asserted.
pub fn new_literal_lang_unchecked<U, V>(txt: U, lang: V) -> Self
where
V: AsRef<str>,
T: From<U> + From<V>,
{
Literal::<T>::new_lang_unchecked(txt, lang).into()
}
/// Return a typed literal term.
///
/// # Panics
///
/// Panics if `dt` cannot be converted into an IRI.
pub fn new_literal_dt_unchecked<U, V>(txt: U, dt: V) -> Self
where
T: From<U>,
V: TryInto<Iri<T>>,
<V as TryInto<Iri<T>>>::Error: Debug,
{
Literal::new_dt(txt, dt.try_into().unwrap()).into()
}
/// Return a new variable term.
///
/// # Pre-condition
///
/// This function requires that `name` is a valid variable name.
pub fn new_variable_unchecked<U>(name: U) -> Term<T>
where
U: AsRef<str>,
T: From<U>,
{
Variable::<T>::new_unchecked(name).into()
}
}
impl<T: TermData> TTerm for Term<T> {
fn kind(&self) -> TermKind {
use Term::*;
match self {
Iri(_) => TermKind::Iri,
Literal(_) => TermKind::Literal,
BNode(_) => TermKind::BlankNode,
Variable(_) => TermKind::Variable,
}
}
fn value_raw(&self) -> RawValue {
use Term::*;
match self {
Iri(i) => i.value_raw(),
Literal(l) => l.value_raw(),
BNode(b) => b.value_raw(),
Variable(v) => v.value_raw(),
}
}
fn datatype(&self) -> Option<SimpleIri> {
if let Term::Literal(lit) = self {
lit.datatype()
} else {
None
}
}
fn language(&self) -> Option<&str> {
if let Term::Literal(lit) = self {
lit.language()
} else {
None
}
}
fn as_dyn(&self) -> &dyn TTerm {
self
}
}
impl<TD, TE> PartialEq<TE> for Term<TD>
where
TD: TermData,
TE: TTerm +?Sized,
{
fn eq(&self, other: &TE) -> bool {
term_eq(self, other)
}
}
impl<TD, TE> PartialOrd<TE> for Term<TD>
where
TD: TermData,
TE: TTerm +?Sized,
{
fn | (&self, other: &TE) -> Option<std::cmp::Ordering> {
Some(term_cmp(self, other))
}
}
impl<TD> Hash for Term<TD>
where
TD: TermData,
{
fn hash<H: Hasher>(&self, state: &mut H) {
term_hash(self, state)
}
}
impl<TD> From<Iri<TD>> for Term<TD>
where
TD: TermData,
{
fn from(iri: Iri<TD>) -> Self {
Term::Iri(iri)
}
}
impl<TD> From<Literal<TD>> for Term<TD>
where
TD: TermData,
{
fn from(lit: Literal<TD>) -> Self {
Term::Literal(lit)
}
}
impl<TD> From<Variable<TD>> for Term<TD>
where
TD: TermData,
{
fn from(var: Variable<TD>) -> Self {
Term::Variable(var)
}
}
impl<TD> From<BlankNode<TD>> for Term<TD>
where
TD: TermData,
{
fn from(bn: BlankNode<TD>) -> Self {
Term::BNode(bn)
}
}
impl<TD> From<String> for Term<TD>
where
TD: TermData + From<Box<str>> + From<&'static str>,
{
fn from(txt: String) -> Self {
txt.as_literal().into()
}
}
impl<'a> From<SimpleIri<'a>> for RefTerm<'a> {
fn from(other: SimpleIri<'a>) -> Self {
Iri::from(other).into()
}
}
impl<T, TD> From<NativeLiteral<T>> for Term<TD>
where
T: DataType +?Sized,
TD: TermData + From<Box<str>> + From<&'static str>,
{
fn from(other: NativeLiteral<T>) -> Self {
Literal::from(other).into()
}
}
impl<'a, T> From<NativeLiteral<T, &'a str>> for RefTerm<'a>
where
T: DataType +?Sized,
{
fn from(other: NativeLiteral<T, &'a str>) -> Self {
Literal::from(other).into()
}
}
impl<TD> CopyTerm for Term<TD>
where
TD: TermData + for<'x> From<&'x str>,
{
fn copy<T>(term: &T) -> Self
where
T: TTerm +?Sized,
{
match term.kind() {
TermKind::Iri => Term::Iri(Iri::try_copy(term).unwrap()),
TermKind::Literal => Term::Literal(Literal::try_copy(term).unwrap()),
TermKind::BlankNode => Term::BNode(BlankNode::try_copy(term).unwrap()),
TermKind::Variable => Term::Variable(Variable::try_copy(term).unwrap()),
}
}
}
impl<'a, T> From<&'a T> for RefTerm<'a>
where
T: TTerm +?Sized,
{
fn from(t: &'a T) -> Self {
let v = t.value_raw();
match t.kind() {
TermKind::Iri => Term::Iri(match v.1 {
None => Iri::new_unchecked(v.0),
Some(suffix) => Iri::new_suffixed_unchecked(v.0, suffix),
}),
TermKind::Literal => Term::Literal(match t.language() {
None => {
let dt: Iri<&'a str> = t.datatype().unwrap().into();
Literal::new_dt(v.0, dt)
}
Some(tag) => Literal::new_lang_unchecked(v.0, tag),
}),
TermKind::BlankNode => Term::BNode(BlankNode::new_unchecked(v.0)),
TermKind::Variable => Term::Variable(Variable::new_unchecked(v.0)),
}
}
}
impl<'a, TD: TermData + 'a> Borrow<dyn TTerm + 'a> for Term<TD> {
fn borrow(&self) -> &(dyn TTerm + 'a) {
self as _
}
}
#[cfg(test)]
pub(crate) mod test;
/// This line re-exports `same_graph_name` from `sophia_api::term`,
/// to ease transition from older versions of Sophia.
/// It will eventually be deprecated.
///
/// See [`sophia_api`](https://docs.rs/sophia_api/latest/sophia_api/)
pub use sophia_api::term::same_graph_name;
/// This module re-exports things from `sophia_api::ns`,
/// to ease transition from older versions of Sophia.
/// It will eventually be deprecated.
///
/// See [`sophia_api`](https://docs.rs/sophia_api/latest/sophia_api/)
pub mod ns {
pub use sophia_api::ns::*;
}
/// This line re-exports the module `sophia_api::term::matcher`,
/// to ease transition from older versions of Sophia.
/// It will eventually be deprecated.
///
/// See [`sophia_api`](https://docs.rs/sophia_api/latest/sophia_api/)
pub use sophia_api::term::matcher;
| partial_cmp | identifier_name |
custom_insts.rs | //! SPIR-V (extended) instructions specific to `rustc_codegen_spirv`, produced
//! during the original codegen of a crate, and consumed by the `linker`.
use lazy_static::lazy_static;
use rspirv::dr::{Instruction, Operand};
use rspirv::spirv::Op;
use smallvec::SmallVec;
/// Prefix for `CUSTOM_EXT_INST_SET` (`OpExtInstImport` "instruction set" name),
/// without any of the disambiguating suffixes added for specific revisions.
///
/// This **should not** be changed (if possible), to ensure version mismatches
/// can be detected (i.e. starting with this prefix, but the full name differs).
///
/// See `CUSTOM_EXT_INST_SET`'s docs for further constraints on the full name.
pub const CUSTOM_EXT_INST_SET_PREFIX: &str = concat!("Rust.", env!("CARGO_PKG_NAME"), ".");
macro_rules! join_cargo_pkg_version_major_minor_patch {
($sep:literal) => {
concat!(
env!("CARGO_PKG_VERSION_MAJOR"),
$sep,
env!("CARGO_PKG_VERSION_MINOR"),
$sep,
env!("CARGO_PKG_VERSION_PATCH"),
)
}; | /// These considerations are relevant to the specific choice of name:
/// * does *not* start with `NonSemantic.`, as:
/// * some custom instructions may need to be semantic
/// * these custom instructions are not meant for the final SPIR-V
/// (so no third-party support is *technically* required for them)
/// * `NonSemantic.` requires SPIR-V 1.6 (or `SPV_KHR_non_semantic_info`)
/// * always starts with `CUSTOM_EXT_INST_SET_PREFIX` (see also its docs),
/// regardless of Rust-GPU version or custom instruction set definition
/// * contains enough disambiguating information to avoid misinterpretation
/// if the definitions of the custom instructions have changed - this is
/// achieved by hashing the `SCHEMA` constant from `def_custom_insts!` below
pub static ref CUSTOM_EXT_INST_SET: String = {
let schema_hash = {
use rustc_data_structures::stable_hasher::StableHasher;
use std::hash::Hash;
let mut hasher = StableHasher::new();
SCHEMA.hash(&mut hasher);
let (lo, hi) = hasher.finalize();
(lo as u128) | ((hi as u128) << 64)
};
let version = join_cargo_pkg_version_major_minor_patch!("_");
format!("{CUSTOM_EXT_INST_SET_PREFIX}{version}.{schema_hash:x}")
};
}
pub fn register_to_spirt_context(cx: &spirt::Context) {
use spirt::spv::spec::{ExtInstSetDesc, ExtInstSetInstructionDesc};
cx.register_custom_ext_inst_set(
&CUSTOM_EXT_INST_SET,
ExtInstSetDesc {
// HACK(eddyb) this is the most compact form I've found, that isn't
// outright lossy by omitting "Rust vs Rust-GPU" or the version.
short_alias: Some(
concat!("Rust-GPU ", join_cargo_pkg_version_major_minor_patch!(".")).into(),
),
instructions: SCHEMA
.iter()
.map(|&(i, name, operand_names)| {
(
i,
ExtInstSetInstructionDesc {
name: name.into(),
operand_names: operand_names
.iter()
.map(|name| {
name.strip_prefix("..")
.unwrap_or(name)
.replace('_', " ")
.into()
})
.collect(),
is_debuginfo: name.contains("Debug")
|| name.contains("InlinedCallFrame"),
},
)
})
.collect(),
},
);
}
macro_rules! def_custom_insts {
($($num:literal => $name:ident $({ $($field:ident),+ $(,..$variadic_field:ident)? $(,)? })?),+ $(,)?) => {
const SCHEMA: &[(u32, &str, &[&str])] = &[
$(($num, stringify!($name), &[$($(stringify!($field),)+ $(stringify!(..$variadic_field),)?)?])),+
];
#[repr(u32)]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum CustomOp { $($name = $num),+ }
impl CustomOp {
pub fn decode(i: u32) -> Self {
match i {
$($num => Self::$name,)+
_ => unreachable!("{i} is not a valid custom instruction number"),
}
}
pub fn decode_from_ext_inst(inst: &Instruction) -> Self {
assert_eq!(inst.class.opcode, Op::ExtInst);
Self::decode(inst.operands[1].unwrap_literal_ext_inst_integer())
}
pub fn with_operands<T: Clone>(self, operands: &[T]) -> CustomInst<T> {
match self {
$(Self::$name => match operands {
[$($($field,)+ $(ref $variadic_field @..)?)?] => CustomInst::$name $({
$($field: $field.clone(),)+
$($variadic_field: $variadic_field.iter().cloned().collect())?
})?,
_ => unreachable!("{self:?} does not have the right number of operands"),
}),+
}
}
}
#[derive(Clone, Debug)]
pub enum CustomInst<T> {
$($name $({ $($field: T,)+ $($variadic_field: SmallVec<[T; 4]>)? })?),+
}
impl<T> CustomInst<T> {
pub fn op(&self) -> CustomOp {
match *self {
$(Self::$name {.. } => CustomOp::$name),+
}
}
// HACK(eddyb) this should return an iterator, but that's too much effort.
pub fn into_operands(self) -> SmallVec<[T; 8]> {
match self {
$(Self::$name $({ $($field,)+ $($variadic_field)? })? => {
[$($($field),+)?].into_iter() $($(.chain($variadic_field))?)?.collect()
})+
}
}
}
impl CustomInst<Operand> {
pub fn decode(inst: &Instruction) -> Self {
CustomOp::decode_from_ext_inst(inst).with_operands(&inst.operands[2..])
}
}
}
}
// NOTE(eddyb) several of these are similar to `NonSemantic.Shader.DebugInfo.100`
// instructions, but simpler (to aid implementation, for now).
def_custom_insts! {
// Like `DebugLine` (from `NonSemantic.Shader.DebugInfo.100`) or `OpLine`.
0 => SetDebugSrcLoc { file, line_start, line_end, col_start, col_end },
// Like `DebugNoLine` (from `NonSemantic.Shader.DebugInfo.100`) or `OpNoLine`.
1 => ClearDebugSrcLoc,
// Similar to `DebugInlinedAt` (from `NonSemantic.Shader.DebugInfo.100`),
// but simpler: there are no "scope objects", the location of the inlined
// callsite is given by other debuginfo (`SetDebugSrcLoc`/`OpLine`) active
// before this instruction, and only the name of the callee is recorded.
2 => PushInlinedCallFrame { callee_name },
// Leave the most recent inlined call frame entered by a `PushInlinedCallFrame`
// (i.e. the inlined call frames form a virtual call stack in debuginfo).
3 => PopInlinedCallFrame,
// [Semantic] Similar to some proposed `OpAbort`, but without any ability to
// indicate abnormal termination (so it's closer to `OpTerminateInvocation`,
// which we could theoretically use, but that's limited to fragment shaders).
//
// Lowering takes advantage of inlining happening before CFG structurization
// (by forcing inlining of `Abort`s all the way up to entry-points, as to be
// able to turn the `Abort`s into regular `OpReturn`s, from an entry-point),
// but if/when inlining works on structured SPIR-T instead, it's not much
// harder to make any call to a "may (transitively) abort" function branch on
// an additional returned `bool`, instead (i.e. a form of emulated unwinding).
//
// As this is a custom terminator, it must only appear before `OpUnreachable`,
// with at most debuginfo instructions (standard or custom), between the two.
//
// FIXME(eddyb) long-term this kind of custom control-flow could be generalized
// to fully emulate unwinding (resulting in codegen similar to `?` in functions
// returning `Option` or `Result`), to e.g. run destructors, or even allow
// users to do `catch_unwind` at the top-level of their shader to handle
// panics specially (e.g. by appending to a custom buffer, or using some
// specific color in a fragment shader, to indicate a panic happened).
// NOTE(eddyb) `message_debug_printf` operands form a complete `debugPrintf`
// invocation (format string followed by inputs) for the "message", while
// `kind` only distinguishes broad categories like `"abort"` vs `"panic"`.
4 => Abort { kind,..message_debug_printf },
}
impl CustomOp {
/// Returns `true` iff this `CustomOp` is a custom debuginfo instruction,
/// i.e. non-semantic (can/must be ignored wherever `OpLine`/`OpNoLine` are).
pub fn is_debuginfo(self) -> bool {
match self {
CustomOp::SetDebugSrcLoc
| CustomOp::ClearDebugSrcLoc
| CustomOp::PushInlinedCallFrame
| CustomOp::PopInlinedCallFrame => true,
CustomOp::Abort => false,
}
}
/// Returns `true` iff this `CustomOp` is a custom terminator instruction,
/// i.e. semantic and must precede an `OpUnreachable` standard terminator,
/// with at most debuginfo instructions (standard or custom), between the two.
pub fn is_terminator(self) -> bool {
match self {
CustomOp::SetDebugSrcLoc
| CustomOp::ClearDebugSrcLoc
| CustomOp::PushInlinedCallFrame
| CustomOp::PopInlinedCallFrame => false,
CustomOp::Abort => true,
}
}
} | }
lazy_static! {
/// `OpExtInstImport` "instruction set" name for all Rust-GPU instructions.
/// | random_line_split |
custom_insts.rs | //! SPIR-V (extended) instructions specific to `rustc_codegen_spirv`, produced
//! during the original codegen of a crate, and consumed by the `linker`.
use lazy_static::lazy_static;
use rspirv::dr::{Instruction, Operand};
use rspirv::spirv::Op;
use smallvec::SmallVec;
/// Prefix for `CUSTOM_EXT_INST_SET` (`OpExtInstImport` "instruction set" name),
/// without any of the disambiguating suffixes added for specific revisions.
///
/// This **should not** be changed (if possible), to ensure version mismatches
/// can be detected (i.e. starting with this prefix, but the full name differs).
///
/// See `CUSTOM_EXT_INST_SET`'s docs for further constraints on the full name.
pub const CUSTOM_EXT_INST_SET_PREFIX: &str = concat!("Rust.", env!("CARGO_PKG_NAME"), ".");
macro_rules! join_cargo_pkg_version_major_minor_patch {
($sep:literal) => {
concat!(
env!("CARGO_PKG_VERSION_MAJOR"),
$sep,
env!("CARGO_PKG_VERSION_MINOR"),
$sep,
env!("CARGO_PKG_VERSION_PATCH"),
)
};
}
lazy_static! {
/// `OpExtInstImport` "instruction set" name for all Rust-GPU instructions.
///
/// These considerations are relevant to the specific choice of name:
/// * does *not* start with `NonSemantic.`, as:
/// * some custom instructions may need to be semantic
/// * these custom instructions are not meant for the final SPIR-V
/// (so no third-party support is *technically* required for them)
/// * `NonSemantic.` requires SPIR-V 1.6 (or `SPV_KHR_non_semantic_info`)
/// * always starts with `CUSTOM_EXT_INST_SET_PREFIX` (see also its docs),
/// regardless of Rust-GPU version or custom instruction set definition
/// * contains enough disambiguating information to avoid misinterpretation
/// if the definitions of the custom instructions have changed - this is
/// achieved by hashing the `SCHEMA` constant from `def_custom_insts!` below
pub static ref CUSTOM_EXT_INST_SET: String = {
let schema_hash = {
use rustc_data_structures::stable_hasher::StableHasher;
use std::hash::Hash;
let mut hasher = StableHasher::new();
SCHEMA.hash(&mut hasher);
let (lo, hi) = hasher.finalize();
(lo as u128) | ((hi as u128) << 64)
};
let version = join_cargo_pkg_version_major_minor_patch!("_");
format!("{CUSTOM_EXT_INST_SET_PREFIX}{version}.{schema_hash:x}")
};
}
pub fn register_to_spirt_context(cx: &spirt::Context) {
use spirt::spv::spec::{ExtInstSetDesc, ExtInstSetInstructionDesc};
cx.register_custom_ext_inst_set(
&CUSTOM_EXT_INST_SET,
ExtInstSetDesc {
// HACK(eddyb) this is the most compact form I've found, that isn't
// outright lossy by omitting "Rust vs Rust-GPU" or the version.
short_alias: Some(
concat!("Rust-GPU ", join_cargo_pkg_version_major_minor_patch!(".")).into(),
),
instructions: SCHEMA
.iter()
.map(|&(i, name, operand_names)| {
(
i,
ExtInstSetInstructionDesc {
name: name.into(),
operand_names: operand_names
.iter()
.map(|name| {
name.strip_prefix("..")
.unwrap_or(name)
.replace('_', " ")
.into()
})
.collect(),
is_debuginfo: name.contains("Debug")
|| name.contains("InlinedCallFrame"),
},
)
})
.collect(),
},
);
}
macro_rules! def_custom_insts {
($($num:literal => $name:ident $({ $($field:ident),+ $(,..$variadic_field:ident)? $(,)? })?),+ $(,)?) => {
const SCHEMA: &[(u32, &str, &[&str])] = &[
$(($num, stringify!($name), &[$($(stringify!($field),)+ $(stringify!(..$variadic_field),)?)?])),+
];
#[repr(u32)]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum CustomOp { $($name = $num),+ }
impl CustomOp {
pub fn decode(i: u32) -> Self {
match i {
$($num => Self::$name,)+
_ => unreachable!("{i} is not a valid custom instruction number"),
}
}
pub fn decode_from_ext_inst(inst: &Instruction) -> Self {
assert_eq!(inst.class.opcode, Op::ExtInst);
Self::decode(inst.operands[1].unwrap_literal_ext_inst_integer())
}
pub fn with_operands<T: Clone>(self, operands: &[T]) -> CustomInst<T> {
match self {
$(Self::$name => match operands {
[$($($field,)+ $(ref $variadic_field @..)?)?] => CustomInst::$name $({
$($field: $field.clone(),)+
$($variadic_field: $variadic_field.iter().cloned().collect())?
})?,
_ => unreachable!("{self:?} does not have the right number of operands"),
}),+
}
}
}
#[derive(Clone, Debug)]
pub enum CustomInst<T> {
$($name $({ $($field: T,)+ $($variadic_field: SmallVec<[T; 4]>)? })?),+
}
impl<T> CustomInst<T> {
pub fn op(&self) -> CustomOp {
match *self {
$(Self::$name {.. } => CustomOp::$name),+
}
}
// HACK(eddyb) this should return an iterator, but that's too much effort.
pub fn into_operands(self) -> SmallVec<[T; 8]> {
match self {
$(Self::$name $({ $($field,)+ $($variadic_field)? })? => {
[$($($field),+)?].into_iter() $($(.chain($variadic_field))?)?.collect()
})+
}
}
}
impl CustomInst<Operand> {
pub fn decode(inst: &Instruction) -> Self {
CustomOp::decode_from_ext_inst(inst).with_operands(&inst.operands[2..])
}
}
}
}
// NOTE(eddyb) several of these are similar to `NonSemantic.Shader.DebugInfo.100`
// instructions, but simpler (to aid implementation, for now).
def_custom_insts! {
// Like `DebugLine` (from `NonSemantic.Shader.DebugInfo.100`) or `OpLine`.
0 => SetDebugSrcLoc { file, line_start, line_end, col_start, col_end },
// Like `DebugNoLine` (from `NonSemantic.Shader.DebugInfo.100`) or `OpNoLine`.
1 => ClearDebugSrcLoc,
// Similar to `DebugInlinedAt` (from `NonSemantic.Shader.DebugInfo.100`),
// but simpler: there are no "scope objects", the location of the inlined
// callsite is given by other debuginfo (`SetDebugSrcLoc`/`OpLine`) active
// before this instruction, and only the name of the callee is recorded.
2 => PushInlinedCallFrame { callee_name },
// Leave the most recent inlined call frame entered by a `PushInlinedCallFrame`
// (i.e. the inlined call frames form a virtual call stack in debuginfo).
3 => PopInlinedCallFrame,
// [Semantic] Similar to some proposed `OpAbort`, but without any ability to
// indicate abnormal termination (so it's closer to `OpTerminateInvocation`,
// which we could theoretically use, but that's limited to fragment shaders).
//
// Lowering takes advantage of inlining happening before CFG structurization
// (by forcing inlining of `Abort`s all the way up to entry-points, as to be
// able to turn the `Abort`s into regular `OpReturn`s, from an entry-point),
// but if/when inlining works on structured SPIR-T instead, it's not much
// harder to make any call to a "may (transitively) abort" function branch on
// an additional returned `bool`, instead (i.e. a form of emulated unwinding).
//
// As this is a custom terminator, it must only appear before `OpUnreachable`,
// with at most debuginfo instructions (standard or custom), between the two.
//
// FIXME(eddyb) long-term this kind of custom control-flow could be generalized
// to fully emulate unwinding (resulting in codegen similar to `?` in functions
// returning `Option` or `Result`), to e.g. run destructors, or even allow
// users to do `catch_unwind` at the top-level of their shader to handle
// panics specially (e.g. by appending to a custom buffer, or using some
// specific color in a fragment shader, to indicate a panic happened).
// NOTE(eddyb) `message_debug_printf` operands form a complete `debugPrintf`
// invocation (format string followed by inputs) for the "message", while
// `kind` only distinguishes broad categories like `"abort"` vs `"panic"`.
4 => Abort { kind,..message_debug_printf },
}
impl CustomOp {
/// Returns `true` iff this `CustomOp` is a custom debuginfo instruction,
/// i.e. non-semantic (can/must be ignored wherever `OpLine`/`OpNoLine` are).
pub fn is_debuginfo(self) -> bool |
/// Returns `true` iff this `CustomOp` is a custom terminator instruction,
/// i.e. semantic and must precede an `OpUnreachable` standard terminator,
/// with at most debuginfo instructions (standard or custom), between the two.
pub fn is_terminator(self) -> bool {
match self {
CustomOp::SetDebugSrcLoc
| CustomOp::ClearDebugSrcLoc
| CustomOp::PushInlinedCallFrame
| CustomOp::PopInlinedCallFrame => false,
CustomOp::Abort => true,
}
}
}
| {
match self {
CustomOp::SetDebugSrcLoc
| CustomOp::ClearDebugSrcLoc
| CustomOp::PushInlinedCallFrame
| CustomOp::PopInlinedCallFrame => true,
CustomOp::Abort => false,
}
} | identifier_body |
custom_insts.rs | //! SPIR-V (extended) instructions specific to `rustc_codegen_spirv`, produced
//! during the original codegen of a crate, and consumed by the `linker`.
use lazy_static::lazy_static;
use rspirv::dr::{Instruction, Operand};
use rspirv::spirv::Op;
use smallvec::SmallVec;
/// Prefix for `CUSTOM_EXT_INST_SET` (`OpExtInstImport` "instruction set" name),
/// without any of the disambiguating suffixes added for specific revisions.
///
/// This **should not** be changed (if possible), to ensure version mismatches
/// can be detected (i.e. starting with this prefix, but the full name differs).
///
/// See `CUSTOM_EXT_INST_SET`'s docs for further constraints on the full name.
pub const CUSTOM_EXT_INST_SET_PREFIX: &str = concat!("Rust.", env!("CARGO_PKG_NAME"), ".");
macro_rules! join_cargo_pkg_version_major_minor_patch {
($sep:literal) => {
concat!(
env!("CARGO_PKG_VERSION_MAJOR"),
$sep,
env!("CARGO_PKG_VERSION_MINOR"),
$sep,
env!("CARGO_PKG_VERSION_PATCH"),
)
};
}
lazy_static! {
/// `OpExtInstImport` "instruction set" name for all Rust-GPU instructions.
///
/// These considerations are relevant to the specific choice of name:
/// * does *not* start with `NonSemantic.`, as:
/// * some custom instructions may need to be semantic
/// * these custom instructions are not meant for the final SPIR-V
/// (so no third-party support is *technically* required for them)
/// * `NonSemantic.` requires SPIR-V 1.6 (or `SPV_KHR_non_semantic_info`)
/// * always starts with `CUSTOM_EXT_INST_SET_PREFIX` (see also its docs),
/// regardless of Rust-GPU version or custom instruction set definition
/// * contains enough disambiguating information to avoid misinterpretation
/// if the definitions of the custom instructions have changed - this is
/// achieved by hashing the `SCHEMA` constant from `def_custom_insts!` below
pub static ref CUSTOM_EXT_INST_SET: String = {
let schema_hash = {
use rustc_data_structures::stable_hasher::StableHasher;
use std::hash::Hash;
let mut hasher = StableHasher::new();
SCHEMA.hash(&mut hasher);
let (lo, hi) = hasher.finalize();
(lo as u128) | ((hi as u128) << 64)
};
let version = join_cargo_pkg_version_major_minor_patch!("_");
format!("{CUSTOM_EXT_INST_SET_PREFIX}{version}.{schema_hash:x}")
};
}
pub fn register_to_spirt_context(cx: &spirt::Context) {
use spirt::spv::spec::{ExtInstSetDesc, ExtInstSetInstructionDesc};
cx.register_custom_ext_inst_set(
&CUSTOM_EXT_INST_SET,
ExtInstSetDesc {
// HACK(eddyb) this is the most compact form I've found, that isn't
// outright lossy by omitting "Rust vs Rust-GPU" or the version.
short_alias: Some(
concat!("Rust-GPU ", join_cargo_pkg_version_major_minor_patch!(".")).into(),
),
instructions: SCHEMA
.iter()
.map(|&(i, name, operand_names)| {
(
i,
ExtInstSetInstructionDesc {
name: name.into(),
operand_names: operand_names
.iter()
.map(|name| {
name.strip_prefix("..")
.unwrap_or(name)
.replace('_', " ")
.into()
})
.collect(),
is_debuginfo: name.contains("Debug")
|| name.contains("InlinedCallFrame"),
},
)
})
.collect(),
},
);
}
macro_rules! def_custom_insts {
($($num:literal => $name:ident $({ $($field:ident),+ $(,..$variadic_field:ident)? $(,)? })?),+ $(,)?) => {
const SCHEMA: &[(u32, &str, &[&str])] = &[
$(($num, stringify!($name), &[$($(stringify!($field),)+ $(stringify!(..$variadic_field),)?)?])),+
];
#[repr(u32)]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum CustomOp { $($name = $num),+ }
impl CustomOp {
pub fn decode(i: u32) -> Self {
match i {
$($num => Self::$name,)+
_ => unreachable!("{i} is not a valid custom instruction number"),
}
}
pub fn decode_from_ext_inst(inst: &Instruction) -> Self {
assert_eq!(inst.class.opcode, Op::ExtInst);
Self::decode(inst.operands[1].unwrap_literal_ext_inst_integer())
}
pub fn with_operands<T: Clone>(self, operands: &[T]) -> CustomInst<T> {
match self {
$(Self::$name => match operands {
[$($($field,)+ $(ref $variadic_field @..)?)?] => CustomInst::$name $({
$($field: $field.clone(),)+
$($variadic_field: $variadic_field.iter().cloned().collect())?
})?,
_ => unreachable!("{self:?} does not have the right number of operands"),
}),+
}
}
}
#[derive(Clone, Debug)]
pub enum CustomInst<T> {
$($name $({ $($field: T,)+ $($variadic_field: SmallVec<[T; 4]>)? })?),+
}
impl<T> CustomInst<T> {
pub fn op(&self) -> CustomOp {
match *self {
$(Self::$name {.. } => CustomOp::$name),+
}
}
// HACK(eddyb) this should return an iterator, but that's too much effort.
pub fn into_operands(self) -> SmallVec<[T; 8]> {
match self {
$(Self::$name $({ $($field,)+ $($variadic_field)? })? => {
[$($($field),+)?].into_iter() $($(.chain($variadic_field))?)?.collect()
})+
}
}
}
impl CustomInst<Operand> {
pub fn decode(inst: &Instruction) -> Self {
CustomOp::decode_from_ext_inst(inst).with_operands(&inst.operands[2..])
}
}
}
}
// NOTE(eddyb) several of these are similar to `NonSemantic.Shader.DebugInfo.100`
// instructions, but simpler (to aid implementation, for now).
def_custom_insts! {
// Like `DebugLine` (from `NonSemantic.Shader.DebugInfo.100`) or `OpLine`.
0 => SetDebugSrcLoc { file, line_start, line_end, col_start, col_end },
// Like `DebugNoLine` (from `NonSemantic.Shader.DebugInfo.100`) or `OpNoLine`.
1 => ClearDebugSrcLoc,
// Similar to `DebugInlinedAt` (from `NonSemantic.Shader.DebugInfo.100`),
// but simpler: there are no "scope objects", the location of the inlined
// callsite is given by other debuginfo (`SetDebugSrcLoc`/`OpLine`) active
// before this instruction, and only the name of the callee is recorded.
2 => PushInlinedCallFrame { callee_name },
// Leave the most recent inlined call frame entered by a `PushInlinedCallFrame`
// (i.e. the inlined call frames form a virtual call stack in debuginfo).
3 => PopInlinedCallFrame,
// [Semantic] Similar to some proposed `OpAbort`, but without any ability to
// indicate abnormal termination (so it's closer to `OpTerminateInvocation`,
// which we could theoretically use, but that's limited to fragment shaders).
//
// Lowering takes advantage of inlining happening before CFG structurization
// (by forcing inlining of `Abort`s all the way up to entry-points, as to be
// able to turn the `Abort`s into regular `OpReturn`s, from an entry-point),
// but if/when inlining works on structured SPIR-T instead, it's not much
// harder to make any call to a "may (transitively) abort" function branch on
// an additional returned `bool`, instead (i.e. a form of emulated unwinding).
//
// As this is a custom terminator, it must only appear before `OpUnreachable`,
// with at most debuginfo instructions (standard or custom), between the two.
//
// FIXME(eddyb) long-term this kind of custom control-flow could be generalized
// to fully emulate unwinding (resulting in codegen similar to `?` in functions
// returning `Option` or `Result`), to e.g. run destructors, or even allow
// users to do `catch_unwind` at the top-level of their shader to handle
// panics specially (e.g. by appending to a custom buffer, or using some
// specific color in a fragment shader, to indicate a panic happened).
// NOTE(eddyb) `message_debug_printf` operands form a complete `debugPrintf`
// invocation (format string followed by inputs) for the "message", while
// `kind` only distinguishes broad categories like `"abort"` vs `"panic"`.
4 => Abort { kind,..message_debug_printf },
}
impl CustomOp {
/// Returns `true` iff this `CustomOp` is a custom debuginfo instruction,
/// i.e. non-semantic (can/must be ignored wherever `OpLine`/`OpNoLine` are).
pub fn | (self) -> bool {
match self {
CustomOp::SetDebugSrcLoc
| CustomOp::ClearDebugSrcLoc
| CustomOp::PushInlinedCallFrame
| CustomOp::PopInlinedCallFrame => true,
CustomOp::Abort => false,
}
}
/// Returns `true` iff this `CustomOp` is a custom terminator instruction,
/// i.e. semantic and must precede an `OpUnreachable` standard terminator,
/// with at most debuginfo instructions (standard or custom), between the two.
pub fn is_terminator(self) -> bool {
match self {
CustomOp::SetDebugSrcLoc
| CustomOp::ClearDebugSrcLoc
| CustomOp::PushInlinedCallFrame
| CustomOp::PopInlinedCallFrame => false,
CustomOp::Abort => true,
}
}
}
| is_debuginfo | identifier_name |
mod.rs | use std::collections::{HashMap, HashSet};
#[cfg(feature = "egl")]
use smithay::backend::renderer::ImportEgl;
use smithay::{
backend::{
allocator::{
dmabuf::{AnyError, Dmabuf, DmabufAllocator},
gbm::{GbmAllocator, GbmBufferFlags},
vulkan::{ImageUsageFlags, VulkanAllocator},
Allocator,
},
drm::{DrmNode, NodeType},
libinput::{LibinputInputBackend, LibinputSessionInterface},
renderer::{
element::texture::TextureBuffer,
gles::GlesRenderer,
multigpu::{gbm::GbmGlesBackend, GpuManager, MultiTexture},
ImportDma, ImportMemWl,
},
session::libseat::LibSeatSession,
session::Session,
udev::{self, UdevBackend},
vulkan::{version::Version, Instance, PhysicalDevice},
},
delegate_dmabuf,
reexports::{
ash::vk::ExtPhysicalDeviceDrmFn,
calloop::{EventLoop, LoopSignal},
input::Libinput,
wayland_protocols::wp::linux_dmabuf::zv1::server::zwp_linux_dmabuf_feedback_v1,
wayland_server::{protocol::wl_surface::WlSurface, Display},
},
wayland::dmabuf::{
DmabufFeedback, DmabufFeedbackBuilder, DmabufGlobal, DmabufHandler, DmabufState,
ImportError,
},
};
use self::drm::{BackendData, SurfaceComposition, UdevOutputId};
use crate::{cursor::Cursor, drawing::PointerElement, state::Backend, CalloopData, Corrosion};
mod drm;
mod utils;
pub struct UdevData {
pub loop_signal: LoopSignal,
pub session: LibSeatSession,
primary_gpu: DrmNode,
dmabuf_state: Option<(DmabufState, DmabufGlobal)>,
allocator: Option<Box<dyn Allocator<Buffer = Dmabuf, Error = AnyError>>>,
gpu_manager: GpuManager<GbmGlesBackend<GlesRenderer>>,
backends: HashMap<DrmNode, BackendData>,
pointer_element: PointerElement<MultiTexture>,
cursor_image: Cursor,
cursor_images: Vec<(xcursor::parser::Image, TextureBuffer<MultiTexture>)>,
}
impl DmabufHandler for Corrosion<UdevData> {
fn dmabuf_state(&mut self) -> &mut smithay::wayland::dmabuf::DmabufState {
&mut self.backend_data.dmabuf_state.as_mut().unwrap().0
}
fn dmabuf_imported(
&mut self,
_global: &DmabufGlobal,
dmabuf: Dmabuf,
) -> Result<(), smithay::wayland::dmabuf::ImportError> {
self.backend_data
.gpu_manager
.single_renderer(&self.backend_data.primary_gpu)
.and_then(|mut renderer| renderer.import_dmabuf(&dmabuf, None))
.map(|_| ())
.map_err(|_| ImportError::Failed)
}
}
delegate_dmabuf!(Corrosion<UdevData>);
impl Backend for UdevData {
fn early_import(&mut self, output: &WlSurface) {
match self
.gpu_manager
.early_import(Some(self.primary_gpu), self.primary_gpu, output)
{
Ok(()) => {}
Err(err) => tracing::error!("Error on early buffer import: {}", err),
};
}
fn loop_signal(&self) -> &LoopSignal {
&self.loop_signal
}
fn reset_buffers(&mut self, output: &smithay::output::Output) {
if let Some(id) = output.user_data().get::<UdevOutputId>() {
if let Some(gpu) = self.backends.get_mut(&id.device_id) {
if let Some(surface) = gpu.surfaces.get_mut(&id.crtc) {
surface.compositor.reset_buffers();
}
}
}
}
fn seat_name(&self) -> String {
self.session.seat()
}
}
pub fn | () {
let mut event_loop = EventLoop::try_new().expect("Unable to initialize event loop");
let (session, mut _notifier) = match LibSeatSession::new() {
Ok((session, notifier)) => (session, notifier),
Err(err) => {
tracing::error!("Error in creating libseat session: {}", err);
return;
}
};
let mut display = Display::new().expect("Unable to create wayland display");
let primary_gpu = udev::primary_gpu(&session.seat())
.unwrap()
.and_then(|p| {
DrmNode::from_path(p)
.ok()
.expect("Unable to create drm node")
.node_with_type(NodeType::Render)
.expect("Unable to create drm node")
.ok()
})
.unwrap_or_else(|| {
udev::all_gpus(&session.seat())
.unwrap()
.into_iter()
.find_map(|g| DrmNode::from_path(g).ok())
.expect("no gpu")
});
tracing::info!("Using {} as a primary gpu", primary_gpu);
let gpus = GpuManager::new(GbmGlesBackend::default()).unwrap();
let data = UdevData {
loop_signal: event_loop.get_signal(),
dmabuf_state: None,
session,
primary_gpu,
allocator: None,
gpu_manager: gpus,
backends: HashMap::new(),
cursor_image: Cursor::load(),
cursor_images: Vec::new(),
pointer_element: PointerElement::default(),
};
let mut state = Corrosion::new(event_loop.handle(), &mut display, data);
let backend = match UdevBackend::new(&state.seat_name) {
Ok(backend) => backend,
Err(err) => {
tracing::error!("Unable to create udev backend: {}", err);
return;
}
};
for (dev, path) in backend.device_list() {
state.device_added(DrmNode::from_dev_id(dev).unwrap(), &path);
}
state.shm_state.update_formats(
state
.backend_data
.gpu_manager
.single_renderer(&primary_gpu)
.unwrap()
.shm_formats(),
);
if let Ok(instance) = Instance::new(Version::VERSION_1_2, None) {
if let Some(physical_device) =
PhysicalDevice::enumerate(&instance)
.ok()
.and_then(|devices| {
devices
.filter(|phd| phd.has_device_extension(ExtPhysicalDeviceDrmFn::name()))
.find(|phd| {
phd.primary_node().unwrap() == Some(primary_gpu)
|| phd.render_node().unwrap() == Some(primary_gpu)
})
})
{
match VulkanAllocator::new(
&physical_device,
ImageUsageFlags::COLOR_ATTACHMENT | ImageUsageFlags::SAMPLED,
) {
Ok(allocator) => {
state.backend_data.allocator = Some(Box::new(DmabufAllocator(allocator))
as Box<dyn Allocator<Buffer = Dmabuf, Error = AnyError>>);
}
Err(err) => {
tracing::warn!("Failed to create vulkan allocator: {}", err);
}
}
}
}
let mut libinput_context = Libinput::new_with_udev::<LibinputSessionInterface<LibSeatSession>>(
state.backend_data.session.clone().into(),
);
libinput_context.udev_assign_seat(&state.seat_name).unwrap();
let libinput_backend = LibinputInputBackend::new(libinput_context.clone());
state
.handle
.insert_source(libinput_backend, move |event, _, data| {
data.state.process_input_event(event);
})
.unwrap();
let gbm = state
.backend_data
.backends
.get(&primary_gpu)
// If the primary_gpu failed to initialize, we likely have a kmsro device
.or_else(|| state.backend_data.backends.values().next())
// Don't fail, if there is no allocator. There is a chance, that this a single gpu system and we don't need one.
.map(|backend| backend.gbm.clone());
state.backend_data.allocator = gbm.map(|gbm| {
Box::new(DmabufAllocator(GbmAllocator::new(
gbm,
GbmBufferFlags::RENDERING,
))) as Box<_>
});
#[cfg_attr(not(feature = "egl"), allow(unused_mut))]
let mut renderer = state
.backend_data
.gpu_manager
.single_renderer(&primary_gpu)
.unwrap();
#[cfg(feature = "egl")]
{
match renderer.bind_wl_display(&state.display_handle) {
Ok(_) => tracing::info!("Enabled egl hardware acceleration"),
Err(err) => tracing::error!("Error in enabling egl hardware acceleration: {:?}", err),
}
}
let dmabuf_formats = renderer.dmabuf_formats().collect::<Vec<_>>();
let default_feedback = DmabufFeedbackBuilder::new(primary_gpu.dev_id(), dmabuf_formats)
.build()
.unwrap();
let mut dmabuf_state = DmabufState::new();
let dmabuf_global = dmabuf_state.create_global_with_default_feedback::<Corrosion<UdevData>>(
&display.handle(),
&default_feedback,
);
state.backend_data.dmabuf_state = Some((dmabuf_state, dmabuf_global));
let gpus = &mut state.backend_data.gpu_manager;
state
.backend_data
.backends
.values_mut()
.for_each(|backend_data| {
backend_data.surfaces.values_mut().for_each(|surface_data| {
surface_data.dmabuf_feedback = surface_data.dmabuf_feedback.take().or_else(|| {
get_surface_dmabuf_feedback(
primary_gpu,
surface_data.render_node,
gpus,
&surface_data.compositor,
)
});
});
});
event_loop
.handle()
.insert_source(backend, move |event, _, data| match event {
udev::UdevEvent::Added { device_id, path } => {
data.state
.device_added(DrmNode::from_dev_id(device_id).unwrap(), &path);
}
udev::UdevEvent::Changed { device_id } => {
data.state
.device_changed(DrmNode::from_dev_id(device_id).unwrap());
}
udev::UdevEvent::Removed { device_id } => {
data.state
.device_removed(DrmNode::from_dev_id(device_id).unwrap());
}
})
.expect("Error inserting event loop source");
std::env::set_var("WAYLAND_DISPLAY", &state.socket_name);
let mut calloop_data = CalloopData { state, display };
event_loop
.run(
std::time::Duration::from_millis(16),
&mut calloop_data,
|data| {
data.state.space.refresh();
data.state.popup_manager.cleanup();
data.display.flush_clients().unwrap();
},
)
.unwrap();
}
pub struct DrmSurfaceDmabufFeedback {
render_feedback: DmabufFeedback,
scanout_feedback: DmabufFeedback,
}
fn get_surface_dmabuf_feedback(
primary_node: DrmNode,
render_node: DrmNode,
gpus: &mut GpuManager<GbmGlesBackend<GlesRenderer>>,
composition: &SurfaceComposition,
) -> Option<DrmSurfaceDmabufFeedback> {
let primary_formats = gpus
.single_renderer(&primary_node)
.ok()?
.dmabuf_formats()
.collect::<HashSet<_>>();
let render_formats = gpus
.single_renderer(&render_node)
.ok()?
.dmabuf_formats()
.collect::<HashSet<_>>();
let all_render_formats = primary_formats
.iter()
.chain(render_formats.iter())
.copied()
.collect::<HashSet<_>>();
let surface = composition.surface();
let planes = surface.planes().unwrap();
let planes_formats = surface
.supported_formats(planes.primary.handle)
.unwrap()
.into_iter()
.chain(
planes
.overlay
.iter()
.flat_map(|p| surface.supported_formats(p.handle).unwrap()),
)
.collect::<HashSet<_>>()
.intersection(&all_render_formats)
.copied()
.collect::<Vec<_>>();
let builder = DmabufFeedbackBuilder::new(primary_node.dev_id(), primary_formats);
let render_feedback = builder
.clone()
.add_preference_tranche(render_node.dev_id(), None, render_formats.clone())
.build()
.unwrap();
let scanout_feedback = builder
.clone()
.add_preference_tranche(
surface.device_fd().dev_id().unwrap(),
Some(zwp_linux_dmabuf_feedback_v1::TrancheFlags::Scanout),
planes_formats,
)
.add_preference_tranche(render_node.dev_id(), None, render_formats)
.build()
.unwrap();
Some(DrmSurfaceDmabufFeedback {
render_feedback,
scanout_feedback,
})
}
| initialize_backend | identifier_name |
mod.rs | use std::collections::{HashMap, HashSet};
#[cfg(feature = "egl")]
use smithay::backend::renderer::ImportEgl;
use smithay::{
backend::{
allocator::{
dmabuf::{AnyError, Dmabuf, DmabufAllocator},
gbm::{GbmAllocator, GbmBufferFlags},
vulkan::{ImageUsageFlags, VulkanAllocator},
Allocator,
},
drm::{DrmNode, NodeType},
libinput::{LibinputInputBackend, LibinputSessionInterface},
renderer::{
element::texture::TextureBuffer,
gles::GlesRenderer,
multigpu::{gbm::GbmGlesBackend, GpuManager, MultiTexture},
ImportDma, ImportMemWl,
},
session::libseat::LibSeatSession,
session::Session,
udev::{self, UdevBackend},
vulkan::{version::Version, Instance, PhysicalDevice},
},
delegate_dmabuf,
reexports::{
ash::vk::ExtPhysicalDeviceDrmFn,
calloop::{EventLoop, LoopSignal},
input::Libinput,
wayland_protocols::wp::linux_dmabuf::zv1::server::zwp_linux_dmabuf_feedback_v1,
wayland_server::{protocol::wl_surface::WlSurface, Display},
},
wayland::dmabuf::{
DmabufFeedback, DmabufFeedbackBuilder, DmabufGlobal, DmabufHandler, DmabufState,
ImportError,
},
};
use self::drm::{BackendData, SurfaceComposition, UdevOutputId};
use crate::{cursor::Cursor, drawing::PointerElement, state::Backend, CalloopData, Corrosion};
mod drm;
mod utils;
pub struct UdevData {
pub loop_signal: LoopSignal,
pub session: LibSeatSession,
primary_gpu: DrmNode,
dmabuf_state: Option<(DmabufState, DmabufGlobal)>,
allocator: Option<Box<dyn Allocator<Buffer = Dmabuf, Error = AnyError>>>,
gpu_manager: GpuManager<GbmGlesBackend<GlesRenderer>>,
backends: HashMap<DrmNode, BackendData>,
pointer_element: PointerElement<MultiTexture>,
cursor_image: Cursor,
cursor_images: Vec<(xcursor::parser::Image, TextureBuffer<MultiTexture>)>,
}
impl DmabufHandler for Corrosion<UdevData> {
fn dmabuf_state(&mut self) -> &mut smithay::wayland::dmabuf::DmabufState {
&mut self.backend_data.dmabuf_state.as_mut().unwrap().0
}
fn dmabuf_imported(
&mut self,
_global: &DmabufGlobal,
dmabuf: Dmabuf,
) -> Result<(), smithay::wayland::dmabuf::ImportError> {
self.backend_data
.gpu_manager
.single_renderer(&self.backend_data.primary_gpu)
.and_then(|mut renderer| renderer.import_dmabuf(&dmabuf, None))
.map(|_| ())
.map_err(|_| ImportError::Failed)
}
}
delegate_dmabuf!(Corrosion<UdevData>);
impl Backend for UdevData {
fn early_import(&mut self, output: &WlSurface) {
match self
.gpu_manager
.early_import(Some(self.primary_gpu), self.primary_gpu, output)
{
Ok(()) => {}
Err(err) => tracing::error!("Error on early buffer import: {}", err),
};
}
fn loop_signal(&self) -> &LoopSignal {
&self.loop_signal
}
fn reset_buffers(&mut self, output: &smithay::output::Output) {
if let Some(id) = output.user_data().get::<UdevOutputId>() {
if let Some(gpu) = self.backends.get_mut(&id.device_id) {
if let Some(surface) = gpu.surfaces.get_mut(&id.crtc) {
surface.compositor.reset_buffers();
}
}
}
}
fn seat_name(&self) -> String {
self.session.seat()
}
}
pub fn initialize_backend() {
let mut event_loop = EventLoop::try_new().expect("Unable to initialize event loop");
let (session, mut _notifier) = match LibSeatSession::new() {
Ok((session, notifier)) => (session, notifier),
Err(err) => {
tracing::error!("Error in creating libseat session: {}", err);
return;
}
};
let mut display = Display::new().expect("Unable to create wayland display");
let primary_gpu = udev::primary_gpu(&session.seat())
.unwrap()
.and_then(|p| {
DrmNode::from_path(p)
.ok()
.expect("Unable to create drm node")
.node_with_type(NodeType::Render)
.expect("Unable to create drm node")
.ok()
})
.unwrap_or_else(|| {
udev::all_gpus(&session.seat())
.unwrap()
.into_iter()
.find_map(|g| DrmNode::from_path(g).ok())
.expect("no gpu")
});
tracing::info!("Using {} as a primary gpu", primary_gpu);
let gpus = GpuManager::new(GbmGlesBackend::default()).unwrap();
let data = UdevData {
loop_signal: event_loop.get_signal(),
dmabuf_state: None,
session,
primary_gpu,
allocator: None,
gpu_manager: gpus,
backends: HashMap::new(),
cursor_image: Cursor::load(),
cursor_images: Vec::new(),
pointer_element: PointerElement::default(),
};
let mut state = Corrosion::new(event_loop.handle(), &mut display, data);
let backend = match UdevBackend::new(&state.seat_name) {
Ok(backend) => backend,
Err(err) => {
tracing::error!("Unable to create udev backend: {}", err);
return;
}
};
for (dev, path) in backend.device_list() {
state.device_added(DrmNode::from_dev_id(dev).unwrap(), &path);
}
state.shm_state.update_formats(
state
.backend_data
.gpu_manager
.single_renderer(&primary_gpu)
.unwrap()
.shm_formats(),
);
if let Ok(instance) = Instance::new(Version::VERSION_1_2, None) {
if let Some(physical_device) =
PhysicalDevice::enumerate(&instance)
.ok()
.and_then(|devices| {
devices
.filter(|phd| phd.has_device_extension(ExtPhysicalDeviceDrmFn::name()))
.find(|phd| {
phd.primary_node().unwrap() == Some(primary_gpu)
|| phd.render_node().unwrap() == Some(primary_gpu)
})
})
{
match VulkanAllocator::new(
&physical_device,
ImageUsageFlags::COLOR_ATTACHMENT | ImageUsageFlags::SAMPLED,
) {
Ok(allocator) => {
state.backend_data.allocator = Some(Box::new(DmabufAllocator(allocator))
as Box<dyn Allocator<Buffer = Dmabuf, Error = AnyError>>);
}
Err(err) => {
tracing::warn!("Failed to create vulkan allocator: {}", err);
}
}
}
}
let mut libinput_context = Libinput::new_with_udev::<LibinputSessionInterface<LibSeatSession>>(
state.backend_data.session.clone().into(),
);
libinput_context.udev_assign_seat(&state.seat_name).unwrap();
let libinput_backend = LibinputInputBackend::new(libinput_context.clone());
state
.handle
.insert_source(libinput_backend, move |event, _, data| {
data.state.process_input_event(event);
})
.unwrap();
let gbm = state
.backend_data
.backends
.get(&primary_gpu)
// If the primary_gpu failed to initialize, we likely have a kmsro device
.or_else(|| state.backend_data.backends.values().next())
// Don't fail, if there is no allocator. There is a chance, that this a single gpu system and we don't need one.
.map(|backend| backend.gbm.clone());
state.backend_data.allocator = gbm.map(|gbm| {
Box::new(DmabufAllocator(GbmAllocator::new(
gbm,
GbmBufferFlags::RENDERING,
))) as Box<_> | #[cfg_attr(not(feature = "egl"), allow(unused_mut))]
let mut renderer = state
.backend_data
.gpu_manager
.single_renderer(&primary_gpu)
.unwrap();
#[cfg(feature = "egl")]
{
match renderer.bind_wl_display(&state.display_handle) {
Ok(_) => tracing::info!("Enabled egl hardware acceleration"),
Err(err) => tracing::error!("Error in enabling egl hardware acceleration: {:?}", err),
}
}
let dmabuf_formats = renderer.dmabuf_formats().collect::<Vec<_>>();
let default_feedback = DmabufFeedbackBuilder::new(primary_gpu.dev_id(), dmabuf_formats)
.build()
.unwrap();
let mut dmabuf_state = DmabufState::new();
let dmabuf_global = dmabuf_state.create_global_with_default_feedback::<Corrosion<UdevData>>(
&display.handle(),
&default_feedback,
);
state.backend_data.dmabuf_state = Some((dmabuf_state, dmabuf_global));
let gpus = &mut state.backend_data.gpu_manager;
state
.backend_data
.backends
.values_mut()
.for_each(|backend_data| {
backend_data.surfaces.values_mut().for_each(|surface_data| {
surface_data.dmabuf_feedback = surface_data.dmabuf_feedback.take().or_else(|| {
get_surface_dmabuf_feedback(
primary_gpu,
surface_data.render_node,
gpus,
&surface_data.compositor,
)
});
});
});
event_loop
.handle()
.insert_source(backend, move |event, _, data| match event {
udev::UdevEvent::Added { device_id, path } => {
data.state
.device_added(DrmNode::from_dev_id(device_id).unwrap(), &path);
}
udev::UdevEvent::Changed { device_id } => {
data.state
.device_changed(DrmNode::from_dev_id(device_id).unwrap());
}
udev::UdevEvent::Removed { device_id } => {
data.state
.device_removed(DrmNode::from_dev_id(device_id).unwrap());
}
})
.expect("Error inserting event loop source");
std::env::set_var("WAYLAND_DISPLAY", &state.socket_name);
let mut calloop_data = CalloopData { state, display };
event_loop
.run(
std::time::Duration::from_millis(16),
&mut calloop_data,
|data| {
data.state.space.refresh();
data.state.popup_manager.cleanup();
data.display.flush_clients().unwrap();
},
)
.unwrap();
}
pub struct DrmSurfaceDmabufFeedback {
render_feedback: DmabufFeedback,
scanout_feedback: DmabufFeedback,
}
fn get_surface_dmabuf_feedback(
primary_node: DrmNode,
render_node: DrmNode,
gpus: &mut GpuManager<GbmGlesBackend<GlesRenderer>>,
composition: &SurfaceComposition,
) -> Option<DrmSurfaceDmabufFeedback> {
let primary_formats = gpus
.single_renderer(&primary_node)
.ok()?
.dmabuf_formats()
.collect::<HashSet<_>>();
let render_formats = gpus
.single_renderer(&render_node)
.ok()?
.dmabuf_formats()
.collect::<HashSet<_>>();
let all_render_formats = primary_formats
.iter()
.chain(render_formats.iter())
.copied()
.collect::<HashSet<_>>();
let surface = composition.surface();
let planes = surface.planes().unwrap();
let planes_formats = surface
.supported_formats(planes.primary.handle)
.unwrap()
.into_iter()
.chain(
planes
.overlay
.iter()
.flat_map(|p| surface.supported_formats(p.handle).unwrap()),
)
.collect::<HashSet<_>>()
.intersection(&all_render_formats)
.copied()
.collect::<Vec<_>>();
let builder = DmabufFeedbackBuilder::new(primary_node.dev_id(), primary_formats);
let render_feedback = builder
.clone()
.add_preference_tranche(render_node.dev_id(), None, render_formats.clone())
.build()
.unwrap();
let scanout_feedback = builder
.clone()
.add_preference_tranche(
surface.device_fd().dev_id().unwrap(),
Some(zwp_linux_dmabuf_feedback_v1::TrancheFlags::Scanout),
planes_formats,
)
.add_preference_tranche(render_node.dev_id(), None, render_formats)
.build()
.unwrap();
Some(DrmSurfaceDmabufFeedback {
render_feedback,
scanout_feedback,
})
} | }); | random_line_split |
mod.rs | use std::collections::{HashMap, HashSet};
#[cfg(feature = "egl")]
use smithay::backend::renderer::ImportEgl;
use smithay::{
backend::{
allocator::{
dmabuf::{AnyError, Dmabuf, DmabufAllocator},
gbm::{GbmAllocator, GbmBufferFlags},
vulkan::{ImageUsageFlags, VulkanAllocator},
Allocator,
},
drm::{DrmNode, NodeType},
libinput::{LibinputInputBackend, LibinputSessionInterface},
renderer::{
element::texture::TextureBuffer,
gles::GlesRenderer,
multigpu::{gbm::GbmGlesBackend, GpuManager, MultiTexture},
ImportDma, ImportMemWl,
},
session::libseat::LibSeatSession,
session::Session,
udev::{self, UdevBackend},
vulkan::{version::Version, Instance, PhysicalDevice},
},
delegate_dmabuf,
reexports::{
ash::vk::ExtPhysicalDeviceDrmFn,
calloop::{EventLoop, LoopSignal},
input::Libinput,
wayland_protocols::wp::linux_dmabuf::zv1::server::zwp_linux_dmabuf_feedback_v1,
wayland_server::{protocol::wl_surface::WlSurface, Display},
},
wayland::dmabuf::{
DmabufFeedback, DmabufFeedbackBuilder, DmabufGlobal, DmabufHandler, DmabufState,
ImportError,
},
};
use self::drm::{BackendData, SurfaceComposition, UdevOutputId};
use crate::{cursor::Cursor, drawing::PointerElement, state::Backend, CalloopData, Corrosion};
mod drm;
mod utils;
pub struct UdevData {
pub loop_signal: LoopSignal,
pub session: LibSeatSession,
primary_gpu: DrmNode,
dmabuf_state: Option<(DmabufState, DmabufGlobal)>,
allocator: Option<Box<dyn Allocator<Buffer = Dmabuf, Error = AnyError>>>,
gpu_manager: GpuManager<GbmGlesBackend<GlesRenderer>>,
backends: HashMap<DrmNode, BackendData>,
pointer_element: PointerElement<MultiTexture>,
cursor_image: Cursor,
cursor_images: Vec<(xcursor::parser::Image, TextureBuffer<MultiTexture>)>,
}
impl DmabufHandler for Corrosion<UdevData> {
fn dmabuf_state(&mut self) -> &mut smithay::wayland::dmabuf::DmabufState {
&mut self.backend_data.dmabuf_state.as_mut().unwrap().0
}
fn dmabuf_imported(
&mut self,
_global: &DmabufGlobal,
dmabuf: Dmabuf,
) -> Result<(), smithay::wayland::dmabuf::ImportError> {
self.backend_data
.gpu_manager
.single_renderer(&self.backend_data.primary_gpu)
.and_then(|mut renderer| renderer.import_dmabuf(&dmabuf, None))
.map(|_| ())
.map_err(|_| ImportError::Failed)
}
}
delegate_dmabuf!(Corrosion<UdevData>);
impl Backend for UdevData {
fn early_import(&mut self, output: &WlSurface) {
match self
.gpu_manager
.early_import(Some(self.primary_gpu), self.primary_gpu, output)
{
Ok(()) => {}
Err(err) => tracing::error!("Error on early buffer import: {}", err),
};
}
fn loop_signal(&self) -> &LoopSignal |
fn reset_buffers(&mut self, output: &smithay::output::Output) {
if let Some(id) = output.user_data().get::<UdevOutputId>() {
if let Some(gpu) = self.backends.get_mut(&id.device_id) {
if let Some(surface) = gpu.surfaces.get_mut(&id.crtc) {
surface.compositor.reset_buffers();
}
}
}
}
fn seat_name(&self) -> String {
self.session.seat()
}
}
pub fn initialize_backend() {
let mut event_loop = EventLoop::try_new().expect("Unable to initialize event loop");
let (session, mut _notifier) = match LibSeatSession::new() {
Ok((session, notifier)) => (session, notifier),
Err(err) => {
tracing::error!("Error in creating libseat session: {}", err);
return;
}
};
let mut display = Display::new().expect("Unable to create wayland display");
let primary_gpu = udev::primary_gpu(&session.seat())
.unwrap()
.and_then(|p| {
DrmNode::from_path(p)
.ok()
.expect("Unable to create drm node")
.node_with_type(NodeType::Render)
.expect("Unable to create drm node")
.ok()
})
.unwrap_or_else(|| {
udev::all_gpus(&session.seat())
.unwrap()
.into_iter()
.find_map(|g| DrmNode::from_path(g).ok())
.expect("no gpu")
});
tracing::info!("Using {} as a primary gpu", primary_gpu);
let gpus = GpuManager::new(GbmGlesBackend::default()).unwrap();
let data = UdevData {
loop_signal: event_loop.get_signal(),
dmabuf_state: None,
session,
primary_gpu,
allocator: None,
gpu_manager: gpus,
backends: HashMap::new(),
cursor_image: Cursor::load(),
cursor_images: Vec::new(),
pointer_element: PointerElement::default(),
};
let mut state = Corrosion::new(event_loop.handle(), &mut display, data);
let backend = match UdevBackend::new(&state.seat_name) {
Ok(backend) => backend,
Err(err) => {
tracing::error!("Unable to create udev backend: {}", err);
return;
}
};
for (dev, path) in backend.device_list() {
state.device_added(DrmNode::from_dev_id(dev).unwrap(), &path);
}
state.shm_state.update_formats(
state
.backend_data
.gpu_manager
.single_renderer(&primary_gpu)
.unwrap()
.shm_formats(),
);
if let Ok(instance) = Instance::new(Version::VERSION_1_2, None) {
if let Some(physical_device) =
PhysicalDevice::enumerate(&instance)
.ok()
.and_then(|devices| {
devices
.filter(|phd| phd.has_device_extension(ExtPhysicalDeviceDrmFn::name()))
.find(|phd| {
phd.primary_node().unwrap() == Some(primary_gpu)
|| phd.render_node().unwrap() == Some(primary_gpu)
})
})
{
match VulkanAllocator::new(
&physical_device,
ImageUsageFlags::COLOR_ATTACHMENT | ImageUsageFlags::SAMPLED,
) {
Ok(allocator) => {
state.backend_data.allocator = Some(Box::new(DmabufAllocator(allocator))
as Box<dyn Allocator<Buffer = Dmabuf, Error = AnyError>>);
}
Err(err) => {
tracing::warn!("Failed to create vulkan allocator: {}", err);
}
}
}
}
let mut libinput_context = Libinput::new_with_udev::<LibinputSessionInterface<LibSeatSession>>(
state.backend_data.session.clone().into(),
);
libinput_context.udev_assign_seat(&state.seat_name).unwrap();
let libinput_backend = LibinputInputBackend::new(libinput_context.clone());
state
.handle
.insert_source(libinput_backend, move |event, _, data| {
data.state.process_input_event(event);
})
.unwrap();
let gbm = state
.backend_data
.backends
.get(&primary_gpu)
// If the primary_gpu failed to initialize, we likely have a kmsro device
.or_else(|| state.backend_data.backends.values().next())
// Don't fail, if there is no allocator. There is a chance, that this a single gpu system and we don't need one.
.map(|backend| backend.gbm.clone());
state.backend_data.allocator = gbm.map(|gbm| {
Box::new(DmabufAllocator(GbmAllocator::new(
gbm,
GbmBufferFlags::RENDERING,
))) as Box<_>
});
#[cfg_attr(not(feature = "egl"), allow(unused_mut))]
let mut renderer = state
.backend_data
.gpu_manager
.single_renderer(&primary_gpu)
.unwrap();
#[cfg(feature = "egl")]
{
match renderer.bind_wl_display(&state.display_handle) {
Ok(_) => tracing::info!("Enabled egl hardware acceleration"),
Err(err) => tracing::error!("Error in enabling egl hardware acceleration: {:?}", err),
}
}
let dmabuf_formats = renderer.dmabuf_formats().collect::<Vec<_>>();
let default_feedback = DmabufFeedbackBuilder::new(primary_gpu.dev_id(), dmabuf_formats)
.build()
.unwrap();
let mut dmabuf_state = DmabufState::new();
let dmabuf_global = dmabuf_state.create_global_with_default_feedback::<Corrosion<UdevData>>(
&display.handle(),
&default_feedback,
);
state.backend_data.dmabuf_state = Some((dmabuf_state, dmabuf_global));
let gpus = &mut state.backend_data.gpu_manager;
state
.backend_data
.backends
.values_mut()
.for_each(|backend_data| {
backend_data.surfaces.values_mut().for_each(|surface_data| {
surface_data.dmabuf_feedback = surface_data.dmabuf_feedback.take().or_else(|| {
get_surface_dmabuf_feedback(
primary_gpu,
surface_data.render_node,
gpus,
&surface_data.compositor,
)
});
});
});
event_loop
.handle()
.insert_source(backend, move |event, _, data| match event {
udev::UdevEvent::Added { device_id, path } => {
data.state
.device_added(DrmNode::from_dev_id(device_id).unwrap(), &path);
}
udev::UdevEvent::Changed { device_id } => {
data.state
.device_changed(DrmNode::from_dev_id(device_id).unwrap());
}
udev::UdevEvent::Removed { device_id } => {
data.state
.device_removed(DrmNode::from_dev_id(device_id).unwrap());
}
})
.expect("Error inserting event loop source");
std::env::set_var("WAYLAND_DISPLAY", &state.socket_name);
let mut calloop_data = CalloopData { state, display };
event_loop
.run(
std::time::Duration::from_millis(16),
&mut calloop_data,
|data| {
data.state.space.refresh();
data.state.popup_manager.cleanup();
data.display.flush_clients().unwrap();
},
)
.unwrap();
}
pub struct DrmSurfaceDmabufFeedback {
render_feedback: DmabufFeedback,
scanout_feedback: DmabufFeedback,
}
fn get_surface_dmabuf_feedback(
primary_node: DrmNode,
render_node: DrmNode,
gpus: &mut GpuManager<GbmGlesBackend<GlesRenderer>>,
composition: &SurfaceComposition,
) -> Option<DrmSurfaceDmabufFeedback> {
let primary_formats = gpus
.single_renderer(&primary_node)
.ok()?
.dmabuf_formats()
.collect::<HashSet<_>>();
let render_formats = gpus
.single_renderer(&render_node)
.ok()?
.dmabuf_formats()
.collect::<HashSet<_>>();
let all_render_formats = primary_formats
.iter()
.chain(render_formats.iter())
.copied()
.collect::<HashSet<_>>();
let surface = composition.surface();
let planes = surface.planes().unwrap();
let planes_formats = surface
.supported_formats(planes.primary.handle)
.unwrap()
.into_iter()
.chain(
planes
.overlay
.iter()
.flat_map(|p| surface.supported_formats(p.handle).unwrap()),
)
.collect::<HashSet<_>>()
.intersection(&all_render_formats)
.copied()
.collect::<Vec<_>>();
let builder = DmabufFeedbackBuilder::new(primary_node.dev_id(), primary_formats);
let render_feedback = builder
.clone()
.add_preference_tranche(render_node.dev_id(), None, render_formats.clone())
.build()
.unwrap();
let scanout_feedback = builder
.clone()
.add_preference_tranche(
surface.device_fd().dev_id().unwrap(),
Some(zwp_linux_dmabuf_feedback_v1::TrancheFlags::Scanout),
planes_formats,
)
.add_preference_tranche(render_node.dev_id(), None, render_formats)
.build()
.unwrap();
Some(DrmSurfaceDmabufFeedback {
render_feedback,
scanout_feedback,
})
}
| {
&self.loop_signal
} | identifier_body |
mod.rs | //! A batteries included runtime for applications using Tokio.
//!
//! Applications using Tokio require some runtime support in order to work:
//!
//! * A [reactor] to drive I/O resources.
//! * An [executor] to execute tasks that use these I/O resources.
//! * A [timer] for scheduling work to run after a set period of time.
//!
//! While it is possible to setup each component manually, this involves a bunch
//! of boilerplate.
//!
//! [`Runtime`] bundles all of these various runtime components into a single
//! handle that can be started and shutdown together, eliminating the necessary
//! boilerplate to run a Tokio application.
//!
//! Most applications wont need to use [`Runtime`] directly. Instead, they will
//! use the [`run`] function, which uses [`Runtime`] under the hood.
//!
//! Creating a [`Runtime`] does the following:
//!
//! * Spawn a background thread running a [`Reactor`] instance.
//! * Start a [`ThreadPool`] for executing futures.
//! * Run an instance of [`Timer`] **per** thread pool worker thread.
//!
//! The thread pool uses a work-stealing strategy and is configured to start a
//! worker thread for each CPU core available on the system. This tends to be
//! the ideal setup for Tokio applications.
//!
//! A timer per thread pool worker thread is used to minimize the amount of
//! synchronization that is required for working with the timer.
//!
//! # Usage
//!
//! Most applications will use the [`run`] function. This takes a future to
//! "seed" the application, blocking the thread until the runtime becomes
//! [idle].
//!
//! ```rust
//! # extern crate tokio;
//! # extern crate futures;
//! # use futures::{Future, Stream};
//! use tokio::net::TcpListener;
//!
//! # fn process<T>(_: T) -> Box<Future<Item = (), Error = ()> + Send> {
//! # unimplemented!();
//! # }
//! # fn dox() {
//! # let addr = "127.0.0.1:8080".parse().unwrap();
//! let listener = TcpListener::bind(&addr).unwrap();
//!
//! let server = listener.incoming()
//! .map_err(|e| println!("error = {:?}", e))
//! .for_each(|socket| {
//! tokio::spawn(process(socket))
//! });
//!
//! tokio::run(server);
//! # }
//! # pub fn main() {}
//! ```
//!
//! In this function, the `run` function blocks until the runtime becomes idle.
//! See [`shutdown_on_idle`][idle] for more shutdown details.
//!
//! From within the context of the runtime, additional tasks are spawned using
//! the [`tokio::spawn`] function. Futures spawned using this function will be
//! executed on the same thread pool used by the [`Runtime`].
//!
//! A [`Runtime`] instance can also be used directly.
//!
//! ```rust
//! # extern crate tokio;
//! # extern crate futures;
//! # use futures::{Future, Stream};
//! use tokio::runtime::Runtime;
//! use tokio::net::TcpListener;
//!
//! # fn process<T>(_: T) -> Box<Future<Item = (), Error = ()> + Send> {
//! # unimplemented!();
//! # }
//! # fn dox() {
//! # let addr = "127.0.0.1:8080".parse().unwrap();
//! let listener = TcpListener::bind(&addr).unwrap();
//!
//! let server = listener.incoming()
//! .map_err(|e| println!("error = {:?}", e))
//! .for_each(|socket| {
//! tokio::spawn(process(socket))
//! });
//!
//! // Create the runtime
//! let mut rt = Runtime::new().unwrap();
//!
//! // Spawn the server task
//! rt.spawn(server);
//!
//! // Wait until the runtime becomes idle and shut it down.
//! rt.shutdown_on_idle()
//! .wait().unwrap();
//! # }
//! # pub fn main() {}
//! ```
//!
//! [reactor]:../reactor/struct.Reactor.html
//! [executor]: https://tokio.rs/docs/getting-started/runtime-model/#executors
//! [timer]:../timer/index.html
//! [`Runtime`]: struct.Runtime.html
//! [`Reactor`]:../reactor/struct.Reactor.html
//! [`ThreadPool`]:../executor/thread_pool/struct.ThreadPool.html
//! [`run`]: fn.run.html
//! [idle]: struct.Runtime.html#method.shutdown_on_idle
//! [`tokio::spawn`]:../executor/fn.spawn.html
//! [`Timer`]: https://docs.rs/tokio-timer/0.2/tokio_timer/timer/struct.Timer.html
mod builder;
pub mod current_thread;
mod shutdown;
mod task_executor;
pub use self::builder::Builder;
pub use self::shutdown::Shutdown;
pub use self::task_executor::TaskExecutor;
use reactor::Handle;
use std::io;
use tokio_executor::enter;
use tokio_threadpool as threadpool;
use futures;
use futures::future::Future;
/// Handle to the Tokio runtime.
///
/// The Tokio runtime includes a reactor as well as an executor for running
/// tasks.
///
/// Instances of `Runtime` can be created using [`new`] or [`Builder`]. However,
/// most users will use [`tokio::run`], which uses a `Runtime` internally.
///
/// See [module level][mod] documentation for more details.
///
/// [mod]: index.html
/// [`new`]: #method.new
/// [`Builder`]: struct.Builder.html
/// [`tokio::run`]: fn.run.html
#[derive(Debug)]
pub struct Runtime {
inner: Option<Inner>,
}
#[derive(Debug)]
struct Inner {
/// A handle to one of the per-worker reactors.
reactor: Handle,
/// Task execution pool.
pool: threadpool::ThreadPool,
}
// ===== impl Runtime =====
/// Start the Tokio runtime using the supplied future to bootstrap execution.
///
/// This function is used to bootstrap the execution of a Tokio application. It
/// does the following:
///
/// * Start the Tokio runtime using a default configuration.
/// * Spawn the given future onto the thread pool.
/// * Block the current thread until the runtime shuts down.
///
/// Note that the function will not return immediately once `future` has
/// completed. Instead it waits for the entire runtime to become idle.
///
/// See the [module level][mod] documentation for more details.
///
/// # Examples
///
/// ```rust
/// # extern crate tokio;
/// # extern crate futures;
/// # use futures::{Future, Stream};
/// use tokio::net::TcpListener;
///
/// # fn process<T>(_: T) -> Box<Future<Item = (), Error = ()> + Send> {
/// # unimplemented!();
/// # }
/// # fn dox() {
/// # let addr = "127.0.0.1:8080".parse().unwrap();
/// let listener = TcpListener::bind(&addr).unwrap();
///
/// let server = listener.incoming()
/// .map_err(|e| println!("error = {:?}", e))
/// .for_each(|socket| {
/// tokio::spawn(process(socket))
/// });
///
/// tokio::run(server);
/// # }
/// # pub fn main() {}
/// ```
///
/// # Panics
///
/// This function panics if called from the context of an executor.
///
/// [mod]:../index.html
pub fn run<F>(future: F)
where F: Future<Item = (), Error = ()> + Send +'static,
{
let mut runtime = Runtime::new().unwrap();
runtime.spawn(future);
enter().expect("nested tokio::run")
.block_on(runtime.shutdown_on_idle())
.unwrap();
}
impl Runtime {
/// Create a new runtime instance with default configuration values.
///
/// This results in a reactor, thread pool, and timer being initialized. The
/// thread pool will not spawn any worker threads until it needs to, i.e.
/// tasks are scheduled to run.
///
/// Most users will not need to call this function directly, instead they
/// will use [`tokio::run`](fn.run.html).
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// Creating a new `Runtime` with default configuration values.
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
///
/// // Shutdown the runtime
/// rt.shutdown_now()
/// .wait().unwrap();
/// ```
///
/// [mod]: index.html
pub fn new() -> io::Result<Self> {
Builder::new().build()
}
#[deprecated(since = "0.1.5", note = "use `reactor` instead")]
#[doc(hidden)]
pub fn handle(&self) -> &Handle {
#[allow(deprecated)]
self.reactor()
}
/// Return a reference to the reactor handle for this runtime instance.
///
/// The returned handle reference can be cloned in order to get an owned
/// value of the handle. This handle can be used to initialize I/O resources
/// (like TCP or UDP sockets) that will not be used on the runtime.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// let reactor_handle = rt.reactor().clone();
///
/// // use `reactor_handle`
/// ```
#[deprecated(since = "0.1.11", note = "there is now a reactor per worker thread")]
pub fn reactor(&self) -> &Handle {
&self.inner().reactor
}
/// Return a handle to the runtime's executor.
///
/// The returned handle can be used to spawn tasks that run on this runtime.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// let executor_handle = rt.executor();
///
/// // use `executor_handle`
/// ```
pub fn executor(&self) -> TaskExecutor {
let inner = self.inner().pool.sender().clone();
TaskExecutor { inner }
}
/// Spawn a future onto the Tokio runtime.
///
/// This spawns the given future onto the runtime's executor, usually a
/// thread pool. The thread pool is then responsible for polling the future
/// until it completes.
///
/// See [module level][mod] documentation for more details.
///
/// [mod]: index.html
///
/// # Examples
///
/// ```rust
/// # extern crate tokio;
/// # extern crate futures;
/// # use futures::{future, Future, Stream};
/// use tokio::runtime::Runtime;
///
/// # fn dox() {
/// // Create the runtime
/// let mut rt = Runtime::new().unwrap();
///
/// // Spawn a future onto the runtime
/// rt.spawn(future::lazy(|| {
/// println!("now running on a worker thread");
/// Ok(())
/// }));
/// # }
/// # pub fn main() {}
/// ```
///
/// # Panics
///
/// This function panics if the spawn fails. Failure occurs if the executor
/// is currently at capacity and is unable to spawn a new future.
pub fn spawn<F>(&mut self, future: F) -> &mut Self
where F: Future<Item = (), Error = ()> + Send +'static,
{
self.inner_mut().pool.sender().spawn(future).unwrap();
self
}
/// Run a future to completion on the Tokio runtime.
///
/// This runs the given future on the runtime, blocking until it is
/// complete, and yielding its resolved result. Any tasks or timers which
/// the future spawns internally will be executed on the runtime.
///
/// This method should not be called from an asynchronous context.
///
/// # Panics
///
/// This function panics if the executor is at capacity, if the provided
/// future panics, or if called within an asynchronous execution context.
pub fn block_on<F, R, E>(&mut self, future: F) -> Result<R, E>
where
F: Send +'static + Future<Item = R, Error = E>,
R: Send +'static,
E: Send +'static,
{
let (tx, rx) = futures::sync::oneshot::channel();
self.spawn(future.then(move |r| tx.send(r).map_err(|_| unreachable!())));
rx.wait().unwrap()
}
/// Run a future to completion on the Tokio runtime, then wait for all
/// background futures to complete too.
///
/// This runs the given future on the runtime, blocking until it is
/// complete, waiting for background futures to complete, and yielding
/// its resolved result. Any tasks or timers which the future spawns
/// internally will be executed on the runtime and waited for completion.
///
/// This method should not be called from an asynchronous context.
///
/// # Panics
///
/// This function panics if the executor is at capacity, if the provided
/// future panics, or if called within an asynchronous execution context.
pub fn block_on_all<F, R, E>(mut self, future: F) -> Result<R, E>
where
F: Send +'static + Future<Item = R, Error = E>,
R: Send +'static,
E: Send +'static,
{
let res = self.block_on(future);
self.shutdown_on_idle().wait().unwrap();
res
}
/// Signals the runtime to shutdown once it becomes idle.
///
/// Returns a future that completes once the shutdown operation has
/// completed.
///
/// This function can be used to perform a graceful shutdown of the runtime.
///
/// The runtime enters an idle state once **all** of the following occur.
///
/// * The thread pool has no tasks to execute, i.e., all tasks that were
/// spawned have completed.
/// * The reactor is not managing any I/O resources.
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
///
/// // Shutdown the runtime
/// rt.shutdown_on_idle()
/// .wait().unwrap();
/// ```
///
/// [mod]: index.html
pub fn shutdown_on_idle(mut self) -> Shutdown {
let inner = self.inner.take().unwrap();
let inner = inner.pool.shutdown_on_idle();
Shutdown { inner }
}
/// Signals the runtime to shutdown immediately.
///
/// Returns a future that completes once the shutdown operation has
/// completed.
///
/// This function will forcibly shutdown the runtime, causing any
/// in-progress work to become canceled. The shutdown steps are:
///
/// * Drain any scheduled work queues.
/// * Drop any futures that have not yet completed.
/// * Drop the reactor.
///
/// Once the reactor has dropped, any outstanding I/O resources bound to
/// that reactor will no longer function. Calling any method on them will
/// result in an error.
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
///
/// // Shutdown the runtime
/// rt.shutdown_now()
/// .wait().unwrap();
/// ```
///
/// [mod]: index.html
pub fn shutdown_now(mut self) -> Shutdown {
let inner = self.inner.take().unwrap();
Shutdown::shutdown_now(inner)
}
fn inner(&self) -> &Inner {
self.inner.as_ref().unwrap()
}
fn inner_mut(&mut self) -> &mut Inner {
self.inner.as_mut().unwrap()
}
}
impl Drop for Runtime {
fn drop(&mut self) {
if let Some(inner) = self.inner.take() |
}
}
| {
let shutdown = Shutdown::shutdown_now(inner);
let _ = shutdown.wait();
} | conditional_block |
mod.rs | //! A batteries included runtime for applications using Tokio.
//!
//! Applications using Tokio require some runtime support in order to work:
//!
//! * A [reactor] to drive I/O resources.
//! * An [executor] to execute tasks that use these I/O resources.
//! * A [timer] for scheduling work to run after a set period of time.
//!
//! While it is possible to setup each component manually, this involves a bunch
//! of boilerplate.
//!
//! [`Runtime`] bundles all of these various runtime components into a single
//! handle that can be started and shutdown together, eliminating the necessary
//! boilerplate to run a Tokio application.
//!
//! Most applications wont need to use [`Runtime`] directly. Instead, they will
//! use the [`run`] function, which uses [`Runtime`] under the hood.
//!
//! Creating a [`Runtime`] does the following:
//!
//! * Spawn a background thread running a [`Reactor`] instance.
//! * Start a [`ThreadPool`] for executing futures.
//! * Run an instance of [`Timer`] **per** thread pool worker thread.
//!
//! The thread pool uses a work-stealing strategy and is configured to start a
//! worker thread for each CPU core available on the system. This tends to be
//! the ideal setup for Tokio applications.
//!
//! A timer per thread pool worker thread is used to minimize the amount of
//! synchronization that is required for working with the timer.
//!
//! # Usage
//!
//! Most applications will use the [`run`] function. This takes a future to
//! "seed" the application, blocking the thread until the runtime becomes
//! [idle].
//!
//! ```rust
//! # extern crate tokio;
//! # extern crate futures;
//! # use futures::{Future, Stream};
//! use tokio::net::TcpListener;
//!
//! # fn process<T>(_: T) -> Box<Future<Item = (), Error = ()> + Send> {
//! # unimplemented!();
//! # }
//! # fn dox() {
//! # let addr = "127.0.0.1:8080".parse().unwrap();
//! let listener = TcpListener::bind(&addr).unwrap();
//!
//! let server = listener.incoming()
//! .map_err(|e| println!("error = {:?}", e))
//! .for_each(|socket| {
//! tokio::spawn(process(socket))
//! });
//!
//! tokio::run(server);
//! # }
//! # pub fn main() {}
//! ```
//!
//! In this function, the `run` function blocks until the runtime becomes idle.
//! See [`shutdown_on_idle`][idle] for more shutdown details.
//!
//! From within the context of the runtime, additional tasks are spawned using
//! the [`tokio::spawn`] function. Futures spawned using this function will be
//! executed on the same thread pool used by the [`Runtime`].
//!
//! A [`Runtime`] instance can also be used directly.
//!
//! ```rust
//! # extern crate tokio;
//! # extern crate futures;
//! # use futures::{Future, Stream};
//! use tokio::runtime::Runtime;
//! use tokio::net::TcpListener;
//!
//! # fn process<T>(_: T) -> Box<Future<Item = (), Error = ()> + Send> {
//! # unimplemented!();
//! # }
//! # fn dox() {
//! # let addr = "127.0.0.1:8080".parse().unwrap();
//! let listener = TcpListener::bind(&addr).unwrap();
//!
//! let server = listener.incoming()
//! .map_err(|e| println!("error = {:?}", e))
//! .for_each(|socket| {
//! tokio::spawn(process(socket))
//! });
//!
//! // Create the runtime
//! let mut rt = Runtime::new().unwrap();
//!
//! // Spawn the server task
//! rt.spawn(server);
//!
//! // Wait until the runtime becomes idle and shut it down.
//! rt.shutdown_on_idle()
//! .wait().unwrap();
//! # }
//! # pub fn main() {}
//! ```
//!
//! [reactor]:../reactor/struct.Reactor.html
//! [executor]: https://tokio.rs/docs/getting-started/runtime-model/#executors
//! [timer]:../timer/index.html
//! [`Runtime`]: struct.Runtime.html
//! [`Reactor`]:../reactor/struct.Reactor.html
//! [`ThreadPool`]:../executor/thread_pool/struct.ThreadPool.html
//! [`run`]: fn.run.html
//! [idle]: struct.Runtime.html#method.shutdown_on_idle
//! [`tokio::spawn`]:../executor/fn.spawn.html
//! [`Timer`]: https://docs.rs/tokio-timer/0.2/tokio_timer/timer/struct.Timer.html
mod builder;
pub mod current_thread;
mod shutdown;
mod task_executor;
pub use self::builder::Builder;
pub use self::shutdown::Shutdown;
pub use self::task_executor::TaskExecutor;
use reactor::Handle;
use std::io;
use tokio_executor::enter;
use tokio_threadpool as threadpool;
use futures;
use futures::future::Future;
/// Handle to the Tokio runtime.
///
/// The Tokio runtime includes a reactor as well as an executor for running
/// tasks.
///
/// Instances of `Runtime` can be created using [`new`] or [`Builder`]. However,
/// most users will use [`tokio::run`], which uses a `Runtime` internally.
///
/// See [module level][mod] documentation for more details.
///
/// [mod]: index.html
/// [`new`]: #method.new
/// [`Builder`]: struct.Builder.html
/// [`tokio::run`]: fn.run.html
#[derive(Debug)]
pub struct Runtime {
inner: Option<Inner>,
}
#[derive(Debug)]
struct Inner {
/// A handle to one of the per-worker reactors.
reactor: Handle,
/// Task execution pool.
pool: threadpool::ThreadPool,
}
// ===== impl Runtime =====
/// Start the Tokio runtime using the supplied future to bootstrap execution.
///
/// This function is used to bootstrap the execution of a Tokio application. It
/// does the following:
///
/// * Start the Tokio runtime using a default configuration.
/// * Spawn the given future onto the thread pool.
/// * Block the current thread until the runtime shuts down.
///
/// Note that the function will not return immediately once `future` has
/// completed. Instead it waits for the entire runtime to become idle.
///
/// See the [module level][mod] documentation for more details.
///
/// # Examples
///
/// ```rust
/// # extern crate tokio;
/// # extern crate futures;
/// # use futures::{Future, Stream};
/// use tokio::net::TcpListener;
///
/// # fn process<T>(_: T) -> Box<Future<Item = (), Error = ()> + Send> {
/// # unimplemented!();
/// # }
/// # fn dox() {
/// # let addr = "127.0.0.1:8080".parse().unwrap();
/// let listener = TcpListener::bind(&addr).unwrap();
///
/// let server = listener.incoming()
/// .map_err(|e| println!("error = {:?}", e))
/// .for_each(|socket| {
/// tokio::spawn(process(socket))
/// });
///
/// tokio::run(server);
/// # }
/// # pub fn main() {}
/// ```
///
/// # Panics
///
/// This function panics if called from the context of an executor.
///
/// [mod]:../index.html
pub fn run<F>(future: F)
where F: Future<Item = (), Error = ()> + Send +'static,
{
let mut runtime = Runtime::new().unwrap();
runtime.spawn(future);
enter().expect("nested tokio::run")
.block_on(runtime.shutdown_on_idle())
.unwrap();
}
impl Runtime {
/// Create a new runtime instance with default configuration values.
///
/// This results in a reactor, thread pool, and timer being initialized. The
/// thread pool will not spawn any worker threads until it needs to, i.e.
/// tasks are scheduled to run.
///
/// Most users will not need to call this function directly, instead they
/// will use [`tokio::run`](fn.run.html).
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// Creating a new `Runtime` with default configuration values.
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
///
/// // Shutdown the runtime
/// rt.shutdown_now()
/// .wait().unwrap();
/// ```
///
/// [mod]: index.html
pub fn new() -> io::Result<Self> {
Builder::new().build()
}
#[deprecated(since = "0.1.5", note = "use `reactor` instead")]
#[doc(hidden)]
pub fn handle(&self) -> &Handle {
#[allow(deprecated)]
self.reactor()
}
/// Return a reference to the reactor handle for this runtime instance.
///
/// The returned handle reference can be cloned in order to get an owned
/// value of the handle. This handle can be used to initialize I/O resources
/// (like TCP or UDP sockets) that will not be used on the runtime.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// let reactor_handle = rt.reactor().clone();
///
/// // use `reactor_handle`
/// ```
#[deprecated(since = "0.1.11", note = "there is now a reactor per worker thread")]
pub fn reactor(&self) -> &Handle {
&self.inner().reactor
}
/// Return a handle to the runtime's executor.
///
/// The returned handle can be used to spawn tasks that run on this runtime.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// let executor_handle = rt.executor();
///
/// // use `executor_handle`
/// ```
pub fn executor(&self) -> TaskExecutor {
let inner = self.inner().pool.sender().clone();
TaskExecutor { inner }
}
/// Spawn a future onto the Tokio runtime.
///
/// This spawns the given future onto the runtime's executor, usually a
/// thread pool. The thread pool is then responsible for polling the future
/// until it completes.
///
/// See [module level][mod] documentation for more details.
///
/// [mod]: index.html
///
/// # Examples
///
/// ```rust
/// # extern crate tokio;
/// # extern crate futures;
/// # use futures::{future, Future, Stream};
/// use tokio::runtime::Runtime;
///
/// # fn dox() {
/// // Create the runtime
/// let mut rt = Runtime::new().unwrap();
///
/// // Spawn a future onto the runtime
/// rt.spawn(future::lazy(|| {
/// println!("now running on a worker thread");
/// Ok(())
/// }));
/// # }
/// # pub fn main() {}
/// ```
///
/// # Panics
///
/// This function panics if the spawn fails. Failure occurs if the executor
/// is currently at capacity and is unable to spawn a new future.
pub fn spawn<F>(&mut self, future: F) -> &mut Self
where F: Future<Item = (), Error = ()> + Send +'static,
{
self.inner_mut().pool.sender().spawn(future).unwrap();
self
}
/// Run a future to completion on the Tokio runtime.
///
/// This runs the given future on the runtime, blocking until it is
/// complete, and yielding its resolved result. Any tasks or timers which
/// the future spawns internally will be executed on the runtime.
///
/// This method should not be called from an asynchronous context.
///
/// # Panics
///
/// This function panics if the executor is at capacity, if the provided
/// future panics, or if called within an asynchronous execution context.
pub fn block_on<F, R, E>(&mut self, future: F) -> Result<R, E>
where
F: Send +'static + Future<Item = R, Error = E>,
R: Send +'static,
E: Send +'static,
{
let (tx, rx) = futures::sync::oneshot::channel();
self.spawn(future.then(move |r| tx.send(r).map_err(|_| unreachable!())));
rx.wait().unwrap()
}
/// Run a future to completion on the Tokio runtime, then wait for all
/// background futures to complete too.
///
/// This runs the given future on the runtime, blocking until it is
/// complete, waiting for background futures to complete, and yielding
/// its resolved result. Any tasks or timers which the future spawns
/// internally will be executed on the runtime and waited for completion.
///
/// This method should not be called from an asynchronous context.
///
/// # Panics
///
/// This function panics if the executor is at capacity, if the provided
/// future panics, or if called within an asynchronous execution context.
pub fn block_on_all<F, R, E>(mut self, future: F) -> Result<R, E>
where
F: Send +'static + Future<Item = R, Error = E>,
R: Send +'static,
E: Send +'static,
{
let res = self.block_on(future);
self.shutdown_on_idle().wait().unwrap();
res
}
/// Signals the runtime to shutdown once it becomes idle.
///
/// Returns a future that completes once the shutdown operation has
/// completed.
///
/// This function can be used to perform a graceful shutdown of the runtime.
///
/// The runtime enters an idle state once **all** of the following occur.
///
/// * The thread pool has no tasks to execute, i.e., all tasks that were
/// spawned have completed.
/// * The reactor is not managing any I/O resources.
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
///
/// // Shutdown the runtime
/// rt.shutdown_on_idle()
/// .wait().unwrap();
/// ```
///
/// [mod]: index.html
pub fn shutdown_on_idle(mut self) -> Shutdown |
/// Signals the runtime to shutdown immediately.
///
/// Returns a future that completes once the shutdown operation has
/// completed.
///
/// This function will forcibly shutdown the runtime, causing any
/// in-progress work to become canceled. The shutdown steps are:
///
/// * Drain any scheduled work queues.
/// * Drop any futures that have not yet completed.
/// * Drop the reactor.
///
/// Once the reactor has dropped, any outstanding I/O resources bound to
/// that reactor will no longer function. Calling any method on them will
/// result in an error.
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
///
/// // Shutdown the runtime
/// rt.shutdown_now()
/// .wait().unwrap();
/// ```
///
/// [mod]: index.html
pub fn shutdown_now(mut self) -> Shutdown {
let inner = self.inner.take().unwrap();
Shutdown::shutdown_now(inner)
}
fn inner(&self) -> &Inner {
self.inner.as_ref().unwrap()
}
fn inner_mut(&mut self) -> &mut Inner {
self.inner.as_mut().unwrap()
}
}
impl Drop for Runtime {
fn drop(&mut self) {
if let Some(inner) = self.inner.take() {
let shutdown = Shutdown::shutdown_now(inner);
let _ = shutdown.wait();
}
}
}
| {
let inner = self.inner.take().unwrap();
let inner = inner.pool.shutdown_on_idle();
Shutdown { inner }
} | identifier_body |
mod.rs | //! A batteries included runtime for applications using Tokio.
//!
//! Applications using Tokio require some runtime support in order to work:
//!
//! * A [reactor] to drive I/O resources.
//! * An [executor] to execute tasks that use these I/O resources.
//! * A [timer] for scheduling work to run after a set period of time.
//!
//! While it is possible to setup each component manually, this involves a bunch
//! of boilerplate.
//!
//! [`Runtime`] bundles all of these various runtime components into a single
//! handle that can be started and shutdown together, eliminating the necessary
//! boilerplate to run a Tokio application.
//!
//! Most applications wont need to use [`Runtime`] directly. Instead, they will
//! use the [`run`] function, which uses [`Runtime`] under the hood.
//!
//! Creating a [`Runtime`] does the following:
//!
//! * Spawn a background thread running a [`Reactor`] instance.
//! * Start a [`ThreadPool`] for executing futures.
//! * Run an instance of [`Timer`] **per** thread pool worker thread.
//!
//! The thread pool uses a work-stealing strategy and is configured to start a
//! worker thread for each CPU core available on the system. This tends to be
//! the ideal setup for Tokio applications.
//!
//! A timer per thread pool worker thread is used to minimize the amount of
//! synchronization that is required for working with the timer.
//!
//! # Usage
//!
//! Most applications will use the [`run`] function. This takes a future to
//! "seed" the application, blocking the thread until the runtime becomes
//! [idle].
//!
//! ```rust
//! # extern crate tokio;
//! # extern crate futures;
//! # use futures::{Future, Stream};
//! use tokio::net::TcpListener;
//!
//! # fn process<T>(_: T) -> Box<Future<Item = (), Error = ()> + Send> {
//! # unimplemented!();
//! # }
//! # fn dox() {
//! # let addr = "127.0.0.1:8080".parse().unwrap();
//! let listener = TcpListener::bind(&addr).unwrap();
//!
//! let server = listener.incoming()
//! .map_err(|e| println!("error = {:?}", e))
//! .for_each(|socket| {
//! tokio::spawn(process(socket))
//! });
//!
//! tokio::run(server);
//! # }
//! # pub fn main() {}
//! ```
//!
//! In this function, the `run` function blocks until the runtime becomes idle.
//! See [`shutdown_on_idle`][idle] for more shutdown details.
//!
//! From within the context of the runtime, additional tasks are spawned using
//! the [`tokio::spawn`] function. Futures spawned using this function will be
//! executed on the same thread pool used by the [`Runtime`].
//!
//! A [`Runtime`] instance can also be used directly.
//!
//! ```rust
//! # extern crate tokio;
//! # extern crate futures;
//! # use futures::{Future, Stream};
//! use tokio::runtime::Runtime;
//! use tokio::net::TcpListener;
//!
//! # fn process<T>(_: T) -> Box<Future<Item = (), Error = ()> + Send> {
//! # unimplemented!();
//! # }
//! # fn dox() {
//! # let addr = "127.0.0.1:8080".parse().unwrap();
//! let listener = TcpListener::bind(&addr).unwrap();
//!
//! let server = listener.incoming()
//! .map_err(|e| println!("error = {:?}", e))
//! .for_each(|socket| {
//! tokio::spawn(process(socket))
//! });
//!
//! // Create the runtime
//! let mut rt = Runtime::new().unwrap();
//!
//! // Spawn the server task
//! rt.spawn(server);
//!
//! // Wait until the runtime becomes idle and shut it down.
//! rt.shutdown_on_idle()
//! .wait().unwrap();
//! # }
//! # pub fn main() {}
//! ```
//!
//! [reactor]:../reactor/struct.Reactor.html
//! [executor]: https://tokio.rs/docs/getting-started/runtime-model/#executors
//! [timer]:../timer/index.html
//! [`Runtime`]: struct.Runtime.html
//! [`Reactor`]:../reactor/struct.Reactor.html
//! [`ThreadPool`]:../executor/thread_pool/struct.ThreadPool.html
//! [`run`]: fn.run.html
//! [idle]: struct.Runtime.html#method.shutdown_on_idle
//! [`tokio::spawn`]:../executor/fn.spawn.html
//! [`Timer`]: https://docs.rs/tokio-timer/0.2/tokio_timer/timer/struct.Timer.html
mod builder;
pub mod current_thread;
mod shutdown;
mod task_executor;
pub use self::builder::Builder;
pub use self::shutdown::Shutdown;
pub use self::task_executor::TaskExecutor;
use reactor::Handle;
use std::io;
use tokio_executor::enter;
use tokio_threadpool as threadpool;
use futures;
use futures::future::Future;
/// Handle to the Tokio runtime.
///
/// The Tokio runtime includes a reactor as well as an executor for running
/// tasks.
///
/// Instances of `Runtime` can be created using [`new`] or [`Builder`]. However,
/// most users will use [`tokio::run`], which uses a `Runtime` internally.
///
/// See [module level][mod] documentation for more details.
///
/// [mod]: index.html
/// [`new`]: #method.new
/// [`Builder`]: struct.Builder.html
/// [`tokio::run`]: fn.run.html
#[derive(Debug)]
pub struct Runtime {
inner: Option<Inner>,
}
#[derive(Debug)]
struct Inner {
/// A handle to one of the per-worker reactors.
reactor: Handle,
/// Task execution pool.
pool: threadpool::ThreadPool,
}
// ===== impl Runtime =====
/// Start the Tokio runtime using the supplied future to bootstrap execution.
///
/// This function is used to bootstrap the execution of a Tokio application. It
/// does the following:
///
/// * Start the Tokio runtime using a default configuration.
/// * Spawn the given future onto the thread pool.
/// * Block the current thread until the runtime shuts down.
///
/// Note that the function will not return immediately once `future` has
/// completed. Instead it waits for the entire runtime to become idle.
///
/// See the [module level][mod] documentation for more details.
///
/// # Examples
///
/// ```rust
/// # extern crate tokio;
/// # extern crate futures;
/// # use futures::{Future, Stream};
/// use tokio::net::TcpListener;
///
/// # fn process<T>(_: T) -> Box<Future<Item = (), Error = ()> + Send> {
/// # unimplemented!();
/// # }
/// # fn dox() {
/// # let addr = "127.0.0.1:8080".parse().unwrap();
/// let listener = TcpListener::bind(&addr).unwrap();
///
/// let server = listener.incoming()
/// .map_err(|e| println!("error = {:?}", e))
/// .for_each(|socket| {
/// tokio::spawn(process(socket))
/// });
///
/// tokio::run(server);
/// # }
/// # pub fn main() {}
/// ```
///
/// # Panics
///
/// This function panics if called from the context of an executor.
///
/// [mod]:../index.html
pub fn run<F>(future: F)
where F: Future<Item = (), Error = ()> + Send +'static,
{
let mut runtime = Runtime::new().unwrap();
runtime.spawn(future);
enter().expect("nested tokio::run")
.block_on(runtime.shutdown_on_idle())
.unwrap();
}
impl Runtime {
/// Create a new runtime instance with default configuration values.
///
/// This results in a reactor, thread pool, and timer being initialized. The
/// thread pool will not spawn any worker threads until it needs to, i.e.
/// tasks are scheduled to run.
///
/// Most users will not need to call this function directly, instead they
/// will use [`tokio::run`](fn.run.html).
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// Creating a new `Runtime` with default configuration values.
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
///
/// // Shutdown the runtime
/// rt.shutdown_now()
/// .wait().unwrap();
/// ```
///
/// [mod]: index.html
pub fn new() -> io::Result<Self> {
Builder::new().build()
}
#[deprecated(since = "0.1.5", note = "use `reactor` instead")]
#[doc(hidden)]
pub fn handle(&self) -> &Handle {
#[allow(deprecated)]
self.reactor()
}
/// Return a reference to the reactor handle for this runtime instance.
///
/// The returned handle reference can be cloned in order to get an owned
/// value of the handle. This handle can be used to initialize I/O resources
/// (like TCP or UDP sockets) that will not be used on the runtime.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// let reactor_handle = rt.reactor().clone();
///
/// // use `reactor_handle`
/// ```
#[deprecated(since = "0.1.11", note = "there is now a reactor per worker thread")]
pub fn reactor(&self) -> &Handle {
&self.inner().reactor
}
/// Return a handle to the runtime's executor.
///
/// The returned handle can be used to spawn tasks that run on this runtime.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// let executor_handle = rt.executor();
///
/// // use `executor_handle`
/// ```
pub fn executor(&self) -> TaskExecutor {
let inner = self.inner().pool.sender().clone();
TaskExecutor { inner }
}
/// Spawn a future onto the Tokio runtime.
///
/// This spawns the given future onto the runtime's executor, usually a
/// thread pool. The thread pool is then responsible for polling the future
/// until it completes.
///
/// See [module level][mod] documentation for more details.
///
/// [mod]: index.html
///
/// # Examples
///
/// ```rust
/// # extern crate tokio;
/// # extern crate futures;
/// # use futures::{future, Future, Stream};
/// use tokio::runtime::Runtime;
///
/// # fn dox() {
/// // Create the runtime
/// let mut rt = Runtime::new().unwrap();
///
/// // Spawn a future onto the runtime
/// rt.spawn(future::lazy(|| {
/// println!("now running on a worker thread");
/// Ok(())
/// }));
/// # }
/// # pub fn main() {}
/// ```
///
/// # Panics
///
/// This function panics if the spawn fails. Failure occurs if the executor
/// is currently at capacity and is unable to spawn a new future.
pub fn spawn<F>(&mut self, future: F) -> &mut Self
where F: Future<Item = (), Error = ()> + Send +'static,
{
self.inner_mut().pool.sender().spawn(future).unwrap();
self
}
/// Run a future to completion on the Tokio runtime.
///
/// This runs the given future on the runtime, blocking until it is
/// complete, and yielding its resolved result. Any tasks or timers which
/// the future spawns internally will be executed on the runtime.
///
/// This method should not be called from an asynchronous context.
///
/// # Panics
///
/// This function panics if the executor is at capacity, if the provided
/// future panics, or if called within an asynchronous execution context.
pub fn block_on<F, R, E>(&mut self, future: F) -> Result<R, E>
where | F: Send +'static + Future<Item = R, Error = E>,
R: Send +'static,
E: Send +'static,
{
let (tx, rx) = futures::sync::oneshot::channel();
self.spawn(future.then(move |r| tx.send(r).map_err(|_| unreachable!())));
rx.wait().unwrap()
}
/// Run a future to completion on the Tokio runtime, then wait for all
/// background futures to complete too.
///
/// This runs the given future on the runtime, blocking until it is
/// complete, waiting for background futures to complete, and yielding
/// its resolved result. Any tasks or timers which the future spawns
/// internally will be executed on the runtime and waited for completion.
///
/// This method should not be called from an asynchronous context.
///
/// # Panics
///
/// This function panics if the executor is at capacity, if the provided
/// future panics, or if called within an asynchronous execution context.
pub fn block_on_all<F, R, E>(mut self, future: F) -> Result<R, E>
where
F: Send +'static + Future<Item = R, Error = E>,
R: Send +'static,
E: Send +'static,
{
let res = self.block_on(future);
self.shutdown_on_idle().wait().unwrap();
res
}
/// Signals the runtime to shutdown once it becomes idle.
///
/// Returns a future that completes once the shutdown operation has
/// completed.
///
/// This function can be used to perform a graceful shutdown of the runtime.
///
/// The runtime enters an idle state once **all** of the following occur.
///
/// * The thread pool has no tasks to execute, i.e., all tasks that were
/// spawned have completed.
/// * The reactor is not managing any I/O resources.
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
///
/// // Shutdown the runtime
/// rt.shutdown_on_idle()
/// .wait().unwrap();
/// ```
///
/// [mod]: index.html
pub fn shutdown_on_idle(mut self) -> Shutdown {
let inner = self.inner.take().unwrap();
let inner = inner.pool.shutdown_on_idle();
Shutdown { inner }
}
/// Signals the runtime to shutdown immediately.
///
/// Returns a future that completes once the shutdown operation has
/// completed.
///
/// This function will forcibly shutdown the runtime, causing any
/// in-progress work to become canceled. The shutdown steps are:
///
/// * Drain any scheduled work queues.
/// * Drop any futures that have not yet completed.
/// * Drop the reactor.
///
/// Once the reactor has dropped, any outstanding I/O resources bound to
/// that reactor will no longer function. Calling any method on them will
/// result in an error.
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
///
/// // Shutdown the runtime
/// rt.shutdown_now()
/// .wait().unwrap();
/// ```
///
/// [mod]: index.html
pub fn shutdown_now(mut self) -> Shutdown {
let inner = self.inner.take().unwrap();
Shutdown::shutdown_now(inner)
}
fn inner(&self) -> &Inner {
self.inner.as_ref().unwrap()
}
fn inner_mut(&mut self) -> &mut Inner {
self.inner.as_mut().unwrap()
}
}
impl Drop for Runtime {
fn drop(&mut self) {
if let Some(inner) = self.inner.take() {
let shutdown = Shutdown::shutdown_now(inner);
let _ = shutdown.wait();
}
}
} | random_line_split |
|
mod.rs | //! A batteries included runtime for applications using Tokio.
//!
//! Applications using Tokio require some runtime support in order to work:
//!
//! * A [reactor] to drive I/O resources.
//! * An [executor] to execute tasks that use these I/O resources.
//! * A [timer] for scheduling work to run after a set period of time.
//!
//! While it is possible to setup each component manually, this involves a bunch
//! of boilerplate.
//!
//! [`Runtime`] bundles all of these various runtime components into a single
//! handle that can be started and shutdown together, eliminating the necessary
//! boilerplate to run a Tokio application.
//!
//! Most applications wont need to use [`Runtime`] directly. Instead, they will
//! use the [`run`] function, which uses [`Runtime`] under the hood.
//!
//! Creating a [`Runtime`] does the following:
//!
//! * Spawn a background thread running a [`Reactor`] instance.
//! * Start a [`ThreadPool`] for executing futures.
//! * Run an instance of [`Timer`] **per** thread pool worker thread.
//!
//! The thread pool uses a work-stealing strategy and is configured to start a
//! worker thread for each CPU core available on the system. This tends to be
//! the ideal setup for Tokio applications.
//!
//! A timer per thread pool worker thread is used to minimize the amount of
//! synchronization that is required for working with the timer.
//!
//! # Usage
//!
//! Most applications will use the [`run`] function. This takes a future to
//! "seed" the application, blocking the thread until the runtime becomes
//! [idle].
//!
//! ```rust
//! # extern crate tokio;
//! # extern crate futures;
//! # use futures::{Future, Stream};
//! use tokio::net::TcpListener;
//!
//! # fn process<T>(_: T) -> Box<Future<Item = (), Error = ()> + Send> {
//! # unimplemented!();
//! # }
//! # fn dox() {
//! # let addr = "127.0.0.1:8080".parse().unwrap();
//! let listener = TcpListener::bind(&addr).unwrap();
//!
//! let server = listener.incoming()
//! .map_err(|e| println!("error = {:?}", e))
//! .for_each(|socket| {
//! tokio::spawn(process(socket))
//! });
//!
//! tokio::run(server);
//! # }
//! # pub fn main() {}
//! ```
//!
//! In this function, the `run` function blocks until the runtime becomes idle.
//! See [`shutdown_on_idle`][idle] for more shutdown details.
//!
//! From within the context of the runtime, additional tasks are spawned using
//! the [`tokio::spawn`] function. Futures spawned using this function will be
//! executed on the same thread pool used by the [`Runtime`].
//!
//! A [`Runtime`] instance can also be used directly.
//!
//! ```rust
//! # extern crate tokio;
//! # extern crate futures;
//! # use futures::{Future, Stream};
//! use tokio::runtime::Runtime;
//! use tokio::net::TcpListener;
//!
//! # fn process<T>(_: T) -> Box<Future<Item = (), Error = ()> + Send> {
//! # unimplemented!();
//! # }
//! # fn dox() {
//! # let addr = "127.0.0.1:8080".parse().unwrap();
//! let listener = TcpListener::bind(&addr).unwrap();
//!
//! let server = listener.incoming()
//! .map_err(|e| println!("error = {:?}", e))
//! .for_each(|socket| {
//! tokio::spawn(process(socket))
//! });
//!
//! // Create the runtime
//! let mut rt = Runtime::new().unwrap();
//!
//! // Spawn the server task
//! rt.spawn(server);
//!
//! // Wait until the runtime becomes idle and shut it down.
//! rt.shutdown_on_idle()
//! .wait().unwrap();
//! # }
//! # pub fn main() {}
//! ```
//!
//! [reactor]:../reactor/struct.Reactor.html
//! [executor]: https://tokio.rs/docs/getting-started/runtime-model/#executors
//! [timer]:../timer/index.html
//! [`Runtime`]: struct.Runtime.html
//! [`Reactor`]:../reactor/struct.Reactor.html
//! [`ThreadPool`]:../executor/thread_pool/struct.ThreadPool.html
//! [`run`]: fn.run.html
//! [idle]: struct.Runtime.html#method.shutdown_on_idle
//! [`tokio::spawn`]:../executor/fn.spawn.html
//! [`Timer`]: https://docs.rs/tokio-timer/0.2/tokio_timer/timer/struct.Timer.html
mod builder;
pub mod current_thread;
mod shutdown;
mod task_executor;
pub use self::builder::Builder;
pub use self::shutdown::Shutdown;
pub use self::task_executor::TaskExecutor;
use reactor::Handle;
use std::io;
use tokio_executor::enter;
use tokio_threadpool as threadpool;
use futures;
use futures::future::Future;
/// Handle to the Tokio runtime.
///
/// The Tokio runtime includes a reactor as well as an executor for running
/// tasks.
///
/// Instances of `Runtime` can be created using [`new`] or [`Builder`]. However,
/// most users will use [`tokio::run`], which uses a `Runtime` internally.
///
/// See [module level][mod] documentation for more details.
///
/// [mod]: index.html
/// [`new`]: #method.new
/// [`Builder`]: struct.Builder.html
/// [`tokio::run`]: fn.run.html
#[derive(Debug)]
pub struct Runtime {
inner: Option<Inner>,
}
#[derive(Debug)]
struct Inner {
/// A handle to one of the per-worker reactors.
reactor: Handle,
/// Task execution pool.
pool: threadpool::ThreadPool,
}
// ===== impl Runtime =====
/// Start the Tokio runtime using the supplied future to bootstrap execution.
///
/// This function is used to bootstrap the execution of a Tokio application. It
/// does the following:
///
/// * Start the Tokio runtime using a default configuration.
/// * Spawn the given future onto the thread pool.
/// * Block the current thread until the runtime shuts down.
///
/// Note that the function will not return immediately once `future` has
/// completed. Instead it waits for the entire runtime to become idle.
///
/// See the [module level][mod] documentation for more details.
///
/// # Examples
///
/// ```rust
/// # extern crate tokio;
/// # extern crate futures;
/// # use futures::{Future, Stream};
/// use tokio::net::TcpListener;
///
/// # fn process<T>(_: T) -> Box<Future<Item = (), Error = ()> + Send> {
/// # unimplemented!();
/// # }
/// # fn dox() {
/// # let addr = "127.0.0.1:8080".parse().unwrap();
/// let listener = TcpListener::bind(&addr).unwrap();
///
/// let server = listener.incoming()
/// .map_err(|e| println!("error = {:?}", e))
/// .for_each(|socket| {
/// tokio::spawn(process(socket))
/// });
///
/// tokio::run(server);
/// # }
/// # pub fn main() {}
/// ```
///
/// # Panics
///
/// This function panics if called from the context of an executor.
///
/// [mod]:../index.html
pub fn run<F>(future: F)
where F: Future<Item = (), Error = ()> + Send +'static,
{
let mut runtime = Runtime::new().unwrap();
runtime.spawn(future);
enter().expect("nested tokio::run")
.block_on(runtime.shutdown_on_idle())
.unwrap();
}
impl Runtime {
/// Create a new runtime instance with default configuration values.
///
/// This results in a reactor, thread pool, and timer being initialized. The
/// thread pool will not spawn any worker threads until it needs to, i.e.
/// tasks are scheduled to run.
///
/// Most users will not need to call this function directly, instead they
/// will use [`tokio::run`](fn.run.html).
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// Creating a new `Runtime` with default configuration values.
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
///
/// // Shutdown the runtime
/// rt.shutdown_now()
/// .wait().unwrap();
/// ```
///
/// [mod]: index.html
pub fn | () -> io::Result<Self> {
Builder::new().build()
}
#[deprecated(since = "0.1.5", note = "use `reactor` instead")]
#[doc(hidden)]
pub fn handle(&self) -> &Handle {
#[allow(deprecated)]
self.reactor()
}
/// Return a reference to the reactor handle for this runtime instance.
///
/// The returned handle reference can be cloned in order to get an owned
/// value of the handle. This handle can be used to initialize I/O resources
/// (like TCP or UDP sockets) that will not be used on the runtime.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// let reactor_handle = rt.reactor().clone();
///
/// // use `reactor_handle`
/// ```
#[deprecated(since = "0.1.11", note = "there is now a reactor per worker thread")]
pub fn reactor(&self) -> &Handle {
&self.inner().reactor
}
/// Return a handle to the runtime's executor.
///
/// The returned handle can be used to spawn tasks that run on this runtime.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// let executor_handle = rt.executor();
///
/// // use `executor_handle`
/// ```
pub fn executor(&self) -> TaskExecutor {
let inner = self.inner().pool.sender().clone();
TaskExecutor { inner }
}
/// Spawn a future onto the Tokio runtime.
///
/// This spawns the given future onto the runtime's executor, usually a
/// thread pool. The thread pool is then responsible for polling the future
/// until it completes.
///
/// See [module level][mod] documentation for more details.
///
/// [mod]: index.html
///
/// # Examples
///
/// ```rust
/// # extern crate tokio;
/// # extern crate futures;
/// # use futures::{future, Future, Stream};
/// use tokio::runtime::Runtime;
///
/// # fn dox() {
/// // Create the runtime
/// let mut rt = Runtime::new().unwrap();
///
/// // Spawn a future onto the runtime
/// rt.spawn(future::lazy(|| {
/// println!("now running on a worker thread");
/// Ok(())
/// }));
/// # }
/// # pub fn main() {}
/// ```
///
/// # Panics
///
/// This function panics if the spawn fails. Failure occurs if the executor
/// is currently at capacity and is unable to spawn a new future.
pub fn spawn<F>(&mut self, future: F) -> &mut Self
where F: Future<Item = (), Error = ()> + Send +'static,
{
self.inner_mut().pool.sender().spawn(future).unwrap();
self
}
/// Run a future to completion on the Tokio runtime.
///
/// This runs the given future on the runtime, blocking until it is
/// complete, and yielding its resolved result. Any tasks or timers which
/// the future spawns internally will be executed on the runtime.
///
/// This method should not be called from an asynchronous context.
///
/// # Panics
///
/// This function panics if the executor is at capacity, if the provided
/// future panics, or if called within an asynchronous execution context.
pub fn block_on<F, R, E>(&mut self, future: F) -> Result<R, E>
where
F: Send +'static + Future<Item = R, Error = E>,
R: Send +'static,
E: Send +'static,
{
let (tx, rx) = futures::sync::oneshot::channel();
self.spawn(future.then(move |r| tx.send(r).map_err(|_| unreachable!())));
rx.wait().unwrap()
}
/// Run a future to completion on the Tokio runtime, then wait for all
/// background futures to complete too.
///
/// This runs the given future on the runtime, blocking until it is
/// complete, waiting for background futures to complete, and yielding
/// its resolved result. Any tasks or timers which the future spawns
/// internally will be executed on the runtime and waited for completion.
///
/// This method should not be called from an asynchronous context.
///
/// # Panics
///
/// This function panics if the executor is at capacity, if the provided
/// future panics, or if called within an asynchronous execution context.
pub fn block_on_all<F, R, E>(mut self, future: F) -> Result<R, E>
where
F: Send +'static + Future<Item = R, Error = E>,
R: Send +'static,
E: Send +'static,
{
let res = self.block_on(future);
self.shutdown_on_idle().wait().unwrap();
res
}
/// Signals the runtime to shutdown once it becomes idle.
///
/// Returns a future that completes once the shutdown operation has
/// completed.
///
/// This function can be used to perform a graceful shutdown of the runtime.
///
/// The runtime enters an idle state once **all** of the following occur.
///
/// * The thread pool has no tasks to execute, i.e., all tasks that were
/// spawned have completed.
/// * The reactor is not managing any I/O resources.
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
///
/// // Shutdown the runtime
/// rt.shutdown_on_idle()
/// .wait().unwrap();
/// ```
///
/// [mod]: index.html
pub fn shutdown_on_idle(mut self) -> Shutdown {
let inner = self.inner.take().unwrap();
let inner = inner.pool.shutdown_on_idle();
Shutdown { inner }
}
/// Signals the runtime to shutdown immediately.
///
/// Returns a future that completes once the shutdown operation has
/// completed.
///
/// This function will forcibly shutdown the runtime, causing any
/// in-progress work to become canceled. The shutdown steps are:
///
/// * Drain any scheduled work queues.
/// * Drop any futures that have not yet completed.
/// * Drop the reactor.
///
/// Once the reactor has dropped, any outstanding I/O resources bound to
/// that reactor will no longer function. Calling any method on them will
/// result in an error.
///
/// See [module level][mod] documentation for more details.
///
/// # Examples
///
/// ```
/// use tokio::runtime::Runtime;
/// use tokio::prelude::*;
///
/// let rt = Runtime::new()
/// .unwrap();
///
/// // Use the runtime...
///
/// // Shutdown the runtime
/// rt.shutdown_now()
/// .wait().unwrap();
/// ```
///
/// [mod]: index.html
pub fn shutdown_now(mut self) -> Shutdown {
let inner = self.inner.take().unwrap();
Shutdown::shutdown_now(inner)
}
fn inner(&self) -> &Inner {
self.inner.as_ref().unwrap()
}
fn inner_mut(&mut self) -> &mut Inner {
self.inner.as_mut().unwrap()
}
}
impl Drop for Runtime {
fn drop(&mut self) {
if let Some(inner) = self.inner.take() {
let shutdown = Shutdown::shutdown_now(inner);
let _ = shutdown.wait();
}
}
}
| new | identifier_name |
mod.rs | Set,
},
};
pub mod config;
use byteorder::{ByteOrder, LittleEndian};
use failure;
use vec_map::VecMap;
use std::{
collections::{BTreeMap, HashMap}, error::Error as StdError, fmt, iter, mem, panic, sync::Arc,
};
use crypto::{self, CryptoHash, Hash, PublicKey, SecretKey};
use encoding::Error as MessageError;
use helpers::{Height, Round, ValidatorId};
use messages::{Connect, Message, Precommit, ProtocolMessage, RawTransaction, Signed};
use node::ApiSender;
use storage::{self, Database, Error, Fork, Patch, Snapshot};
mod block;
mod genesis;
mod schema;
mod service;
#[macro_use]
mod transaction;
#[cfg(test)]
mod tests;
/// Id of core service table family.
pub const CORE_SERVICE: u16 = 0;
/// Exonum blockchain instance with a certain services set and data storage.
///
/// Only nodes with an identical set of services and genesis block can be combined
/// into a single network.
pub struct Blockchain {
db: Arc<dyn Database>,
service_map: Arc<VecMap<Box<dyn Service>>>,
#[doc(hidden)]
pub service_keypair: (PublicKey, SecretKey),
pub(crate) api_sender: ApiSender,
}
impl Blockchain {
/// Constructs a blockchain for the given `storage` and list of `services`.
pub fn new<D: Into<Arc<dyn Database>>>(
storage: D,
services: Vec<Box<dyn Service>>,
service_public_key: PublicKey,
service_secret_key: SecretKey,
api_sender: ApiSender,
) -> Self {
let mut service_map = VecMap::new();
for service in services {
let id = service.service_id() as usize;
if service_map.contains_key(id) {
panic!(
"Services have already contain service with id={}, please change it.",
id
);
}
service_map.insert(id, service);
}
Self {
db: storage.into(),
service_map: Arc::new(service_map),
service_keypair: (service_public_key, service_secret_key),
api_sender,
}
}
/// Recreates the blockchain to reuse with a sandbox.
#[doc(hidden)]
pub fn clone_with_api_sender(&self, api_sender: ApiSender) -> Self {
Self {
api_sender,
..self.clone()
}
}
/// Returns the `VecMap` for all services. This is a map which
/// contains service identifiers and service interfaces. The VecMap
/// allows proceeding from the service identifier to the service itself.
pub fn service_map(&self) -> &Arc<VecMap<Box<dyn Service>>> {
&self.service_map
}
/// Creates a read-only snapshot of the current storage state.
pub fn snapshot(&self) -> Box<dyn Snapshot> {
self.db.snapshot()
}
/// Creates a snapshot of the current storage state that can be later committed into the storage
/// via the `merge` method.
pub fn fork(&self) -> Fork {
self.db.fork()
}
/// Tries to create a `Transaction` object from the given raw message.
/// A raw message can be converted into a `Transaction` object only
/// if the following conditions are met:
///
/// - Blockchain has a service with the `service_id` of the given raw message.
/// - Service can deserialize the given raw message.
pub fn tx_from_raw(&self, raw: RawTransaction) -> Result<Box<dyn Transaction>, MessageError> {
let id = raw.service_id() as usize;
let service = self.service_map
.get(id)
.ok_or_else(|| MessageError::from("Service not found."))?;
service.tx_from_raw(raw)
}
/// Commits changes from the patch to the blockchain storage.
/// See [`Fork`](../storage/struct.Fork.html) for details.
pub fn merge(&mut self, patch: Patch) -> Result<(), Error> {
self.db.merge(patch)
}
/// Returns the hash of the latest committed block.
///
/// # Panics
///
/// If the genesis block was not committed.
pub fn last_hash(&self) -> Hash {
Schema::new(&self.snapshot())
.block_hashes_by_height()
.last()
.unwrap_or_else(Hash::default)
}
/// Returns the latest committed block.
pub fn last_block(&self) -> Block {
Schema::new(&self.snapshot()).last_block()
}
/// Creates and commits the genesis block with the given genesis configuration
/// if the blockchain has not been initialized.
///
/// # Panics
///
/// * If the genesis block was not committed.
/// * If storage version is not specified or not supported.
pub fn initialize(&mut self, cfg: GenesisConfig) -> Result<(), Error> {
let has_genesis_block =!Schema::new(&self.snapshot())
.block_hashes_by_height()
.is_empty();
if has_genesis_block {
self.assert_storage_version();
} else {
self.initialize_metadata();
self.create_genesis_block(cfg)?;
}
Ok(())
}
/// Initialized node-local metadata.
fn initialize_metadata(&mut self) {
let mut fork = self.db.fork();
storage::StorageMetadata::write_current(&mut fork);
if self.merge(fork.into_patch()).is_ok() | else {
panic!("Could not set database version.")
}
}
/// Checks if storage version is supported.
///
/// # Panics
///
/// Panics if version is not supported or is not specified.
fn assert_storage_version(&self) {
match storage::StorageMetadata::read(self.db.snapshot()) {
Ok(ver) => info!("Storage version is supported with value [{}].", ver),
Err(e) => panic!("{}", e),
}
}
/// Creates and commits the genesis block with the given genesis configuration.
fn create_genesis_block(&mut self, cfg: GenesisConfig) -> Result<(), Error> {
let mut config_propose = StoredConfiguration {
previous_cfg_hash: Hash::zero(),
actual_from: Height::zero(),
validator_keys: cfg.validator_keys,
consensus: cfg.consensus,
services: BTreeMap::new(),
};
let patch = {
let mut fork = self.fork();
// Update service tables
for (_, service) in self.service_map.iter() {
let cfg = service.initialize(&mut fork);
let name = service.service_name();
if config_propose.services.contains_key(name) {
panic!(
"Services already contain service with '{}' name, please change it",
name
);
}
config_propose.services.insert(name.into(), cfg);
}
// Commit actual configuration
{
let mut schema = Schema::new(&mut fork);
if schema.block_hash_by_height(Height::zero()).is_some() {
// TODO create genesis block for MemoryDB and compare it hash with zero block. (ECR-1630)
return Ok(());
}
schema.commit_configuration(config_propose);
};
self.merge(fork.into_patch())?;
self.create_patch(ValidatorId::zero(), Height::zero(), &[])
.1
};
self.merge(patch)?;
Ok(())
}
/// Helper function to map a tuple (`u16`, `u16`) of service table coordinates
/// to a 32-byte value to be used as the `ProofMapIndex` key (it currently
/// supports only fixed size keys). The `hash` function is used to distribute
/// keys uniformly (compared to padding).
/// # Arguments
///
/// * `service_id` - `service_id` as returned by instance of type of
/// `Service` trait
/// * `table_idx` - index of service table in `Vec`, returned by the
/// `state_hash` method of instance of type of `Service` trait
// also, it was the first idea around, to use `hash`
pub fn service_table_unique_key(service_id: u16, table_idx: usize) -> Hash {
debug_assert!(table_idx <= u16::max_value() as usize);
let size = mem::size_of::<u16>();
let mut vec = vec![0; 2 * size];
LittleEndian::write_u16(&mut vec[0..size], service_id);
LittleEndian::write_u16(&mut vec[size..2 * size], table_idx as u16);
crypto::hash(&vec)
}
/// Executes the given transactions from the pool.
/// Then collects the resulting changes from the current storage state and returns them
/// with the hash of the resulting block.
pub fn create_patch(
&self,
proposer_id: ValidatorId,
height: Height,
tx_hashes: &[Hash],
) -> (Hash, Patch) {
// Create fork
let mut fork = self.fork();
let block_hash = {
// Get last hash.
let last_hash = self.last_hash();
// Save & execute transactions.
for (index, hash) in tx_hashes.iter().enumerate() {
self.execute_transaction(*hash, height, index, &mut fork)
// Execution could fail if the transaction
// cannot be deserialized or it isn't in the pool.
.expect("Transaction execution error.");
}
// Invoke execute method for all services.
for service in self.service_map.values() {
// Skip execution for genesis block.
if height > Height(0) {
before_commit(service.as_ref(), &mut fork);
}
}
// Get tx & state hash.
let (tx_hash, state_hash) = {
let state_hashes = {
let schema = Schema::new(&fork);
let vec_core_state = schema.core_state_hash();
let mut state_hashes = Vec::new();
for (idx, core_table_hash) in vec_core_state.into_iter().enumerate() {
let key = Self::service_table_unique_key(CORE_SERVICE, idx);
state_hashes.push((key, core_table_hash));
}
for service in self.service_map.values() {
let service_id = service.service_id();
let vec_service_state = service.state_hash(&fork);
for (idx, service_table_hash) in vec_service_state.into_iter().enumerate() {
let key = Self::service_table_unique_key(service_id, idx);
state_hashes.push((key, service_table_hash));
}
}
state_hashes
};
let mut schema = Schema::new(&mut fork);
let state_hash = {
let mut sum_table = schema.state_hash_aggregator_mut();
for (key, hash) in state_hashes {
sum_table.put(&key, hash)
}
sum_table.merkle_root()
};
let tx_hash = schema.block_transactions(height).merkle_root();
(tx_hash, state_hash)
};
// Create block.
let block = Block::new(
proposer_id,
height,
tx_hashes.len() as u32,
&last_hash,
&tx_hash,
&state_hash,
);
trace!("execute block = {:?}", block);
// Calculate block hash.
let block_hash = block.hash();
// Update height.
let mut schema = Schema::new(&mut fork);
schema.block_hashes_by_height_mut().push(block_hash);
// Save block.
schema.blocks_mut().put(&block_hash, block);
block_hash
};
(block_hash, fork.into_patch())
}
fn execute_transaction(
&self,
tx_hash: Hash,
height: Height,
index: usize,
fork: &mut Fork,
) -> Result<(), failure::Error> {
let (tx, raw, service_name) = {
let schema = Schema::new(&fork);
let raw = schema.transactions().get(&tx_hash).ok_or_else(|| {
failure::err_msg(format!(
"BUG: Cannot find transaction in database. tx: {:?}",
tx_hash
))
})?;
let service_name = self.service_map
.get(raw.service_id() as usize)
.ok_or_else(|| {
failure::err_msg(format!(
"Service not found. Service id: {}",
raw.service_id()
))
})?
.service_name();
let tx = self.tx_from_raw(raw.payload().clone()).or_else(|error| {
Err(failure::err_msg(format!(
"Service <{}>: {}, tx: {:?}",
service_name,
error.description(),
tx_hash
)))
})?;
(tx, raw, service_name)
};
fork.checkpoint();
let catch_result = panic::catch_unwind(panic::AssertUnwindSafe(|| {
let context = TransactionContext::new(&mut *fork, &raw);
tx.execute(context)
}));
let tx_result = TransactionResult(match catch_result {
Ok(execution_result) => {
match execution_result {
Ok(()) => {
fork.commit();
}
Err(ref e) => {
// Unlike panic, transaction failure isn't that rare, so logging the
// whole transaction body is an overkill: it can be relatively big.
info!(
"Service <{}>: {:?} transaction execution failed: {:?}",
service_name, tx_hash, e
);
fork.rollback();
}
}
execution_result.map_err(TransactionError::from)
}
Err(err) => {
if err.is::<Error>() {
// Continue panic unwind if the reason is StorageError.
panic::resume_unwind(err);
}
fork.rollback();
error!(
"Service <{}>: {:?} transaction execution panicked: {:?}",
service_name, tx, err
);
Err(TransactionError::from_panic(&err))
}
});
let mut schema = Schema::new(fork);
schema.transaction_results_mut().put(&tx_hash, tx_result);
schema.commit_transaction(&tx_hash);
schema.block_transactions_mut(height).push(tx_hash);
let location = TxLocation::new(height, index as u64);
schema.transactions_locations_mut().put(&tx_hash, location);
Ok(())
}
/// Commits to the blockchain a new block with the indicated changes (patch),
/// hash and Precommit messages. After that invokes `after_commit`
/// for each service in the increasing order of their identifiers.
pub fn commit<I>(&mut self, patch: &Patch, block_hash: Hash, precommits: I) -> Result<(), Error>
where
I: Iterator<Item = Signed<Precommit>>,
{
let patch = {
let mut fork = {
let mut fork = self.db.fork();
fork.merge(patch.clone()); // FIXME: Avoid cloning here. (ECR-1631)
fork
};
{
let mut schema = Schema::new(&mut fork);
for precommit in precommits {
schema.precommits_mut(&block_hash).push(precommit.clone());
}
// Consensus messages cache is useful only during one height, so it should be
// cleared when a new height is achieved.
schema.consensus_messages_cache_mut().clear();
let txs_in_block = schema.last_block().tx_count();
let txs_count = schema.transactions_pool_len_index().get().unwrap_or(0);
debug_assert!(txs_count >= u64::from(txs_in_block));
schema
.transactions_pool_len_index_mut()
.set(txs_count - u64::from(txs_in_block));
}
fork.into_patch()
};
self.merge(patch)?;
// Invokes `after_commit` for each service in order of their identifiers
for (service_id, service) in self.service_map.iter() {
let context = ServiceContext::new(
self.service_keypair.0,
self.service_keypair.1.clone(),
self.api_sender.clone(),
self.fork(),
service_id as u16,
);
service.after_commit(&context);
}
Ok(())
}
/// Saves the `Connect` message from a peer to the cache.
pub(crate) fn save_peer(&mut self, pubkey: &PublicKey, peer: Signed<Connect>) {
let mut fork = self.fork();
{
let mut schema = Schema::new(&mut fork);
schema.peers_cache_mut().put(pubkey, peer);
}
self.merge(fork.into_patch())
.expect("Unable to save peer to the peers cache");
}
/// Removes from the cache the `Connect` message from a peer.
pub fn remove_peer_with_pubkey(&mut self, key: &PublicKey) | {
info!(
"Storage version successfully initialized with value [{}].",
storage::StorageMetadata::read(&self.db.snapshot()).unwrap(),
)
} | conditional_block |
mod.rs | Set,
},
};
pub mod config;
use byteorder::{ByteOrder, LittleEndian};
use failure;
use vec_map::VecMap;
use std::{
collections::{BTreeMap, HashMap}, error::Error as StdError, fmt, iter, mem, panic, sync::Arc,
};
use crypto::{self, CryptoHash, Hash, PublicKey, SecretKey};
use encoding::Error as MessageError;
use helpers::{Height, Round, ValidatorId};
use messages::{Connect, Message, Precommit, ProtocolMessage, RawTransaction, Signed};
use node::ApiSender;
use storage::{self, Database, Error, Fork, Patch, Snapshot};
mod block;
mod genesis;
mod schema;
mod service;
#[macro_use]
mod transaction;
#[cfg(test)]
mod tests;
/// Id of core service table family.
pub const CORE_SERVICE: u16 = 0;
/// Exonum blockchain instance with a certain services set and data storage.
///
/// Only nodes with an identical set of services and genesis block can be combined
/// into a single network.
pub struct Blockchain {
db: Arc<dyn Database>,
service_map: Arc<VecMap<Box<dyn Service>>>,
#[doc(hidden)]
pub service_keypair: (PublicKey, SecretKey),
pub(crate) api_sender: ApiSender,
}
impl Blockchain {
/// Constructs a blockchain for the given `storage` and list of `services`.
pub fn new<D: Into<Arc<dyn Database>>>(
storage: D,
services: Vec<Box<dyn Service>>,
service_public_key: PublicKey,
service_secret_key: SecretKey,
api_sender: ApiSender,
) -> Self {
let mut service_map = VecMap::new();
for service in services {
let id = service.service_id() as usize;
if service_map.contains_key(id) {
panic!(
"Services have already contain service with id={}, please change it.",
id
);
}
service_map.insert(id, service);
}
Self {
db: storage.into(),
service_map: Arc::new(service_map),
service_keypair: (service_public_key, service_secret_key),
api_sender,
}
}
/// Recreates the blockchain to reuse with a sandbox.
#[doc(hidden)]
pub fn clone_with_api_sender(&self, api_sender: ApiSender) -> Self {
Self {
api_sender,
..self.clone()
}
}
/// Returns the `VecMap` for all services. This is a map which
/// contains service identifiers and service interfaces. The VecMap
/// allows proceeding from the service identifier to the service itself.
pub fn service_map(&self) -> &Arc<VecMap<Box<dyn Service>>> {
&self.service_map
}
/// Creates a read-only snapshot of the current storage state.
pub fn snapshot(&self) -> Box<dyn Snapshot> {
self.db.snapshot()
}
/// Creates a snapshot of the current storage state that can be later committed into the storage
/// via the `merge` method.
pub fn fork(&self) -> Fork {
self.db.fork()
}
/// Tries to create a `Transaction` object from the given raw message.
/// A raw message can be converted into a `Transaction` object only
/// if the following conditions are met:
///
/// - Blockchain has a service with the `service_id` of the given raw message.
/// - Service can deserialize the given raw message.
pub fn tx_from_raw(&self, raw: RawTransaction) -> Result<Box<dyn Transaction>, MessageError> {
let id = raw.service_id() as usize;
let service = self.service_map
.get(id)
.ok_or_else(|| MessageError::from("Service not found."))?;
service.tx_from_raw(raw)
}
/// Commits changes from the patch to the blockchain storage.
/// See [`Fork`](../storage/struct.Fork.html) for details.
pub fn merge(&mut self, patch: Patch) -> Result<(), Error> |
/// Returns the hash of the latest committed block.
///
/// # Panics
///
/// If the genesis block was not committed.
pub fn last_hash(&self) -> Hash {
Schema::new(&self.snapshot())
.block_hashes_by_height()
.last()
.unwrap_or_else(Hash::default)
}
/// Returns the latest committed block.
pub fn last_block(&self) -> Block {
Schema::new(&self.snapshot()).last_block()
}
/// Creates and commits the genesis block with the given genesis configuration
/// if the blockchain has not been initialized.
///
/// # Panics
///
/// * If the genesis block was not committed.
/// * If storage version is not specified or not supported.
pub fn initialize(&mut self, cfg: GenesisConfig) -> Result<(), Error> {
let has_genesis_block =!Schema::new(&self.snapshot())
.block_hashes_by_height()
.is_empty();
if has_genesis_block {
self.assert_storage_version();
} else {
self.initialize_metadata();
self.create_genesis_block(cfg)?;
}
Ok(())
}
/// Initialized node-local metadata.
fn initialize_metadata(&mut self) {
let mut fork = self.db.fork();
storage::StorageMetadata::write_current(&mut fork);
if self.merge(fork.into_patch()).is_ok() {
info!(
"Storage version successfully initialized with value [{}].",
storage::StorageMetadata::read(&self.db.snapshot()).unwrap(),
)
} else {
panic!("Could not set database version.")
}
}
/// Checks if storage version is supported.
///
/// # Panics
///
/// Panics if version is not supported or is not specified.
fn assert_storage_version(&self) {
match storage::StorageMetadata::read(self.db.snapshot()) {
Ok(ver) => info!("Storage version is supported with value [{}].", ver),
Err(e) => panic!("{}", e),
}
}
/// Creates and commits the genesis block with the given genesis configuration.
fn create_genesis_block(&mut self, cfg: GenesisConfig) -> Result<(), Error> {
let mut config_propose = StoredConfiguration {
previous_cfg_hash: Hash::zero(),
actual_from: Height::zero(),
validator_keys: cfg.validator_keys,
consensus: cfg.consensus,
services: BTreeMap::new(),
};
let patch = {
let mut fork = self.fork();
// Update service tables
for (_, service) in self.service_map.iter() {
let cfg = service.initialize(&mut fork);
let name = service.service_name();
if config_propose.services.contains_key(name) {
panic!(
"Services already contain service with '{}' name, please change it",
name
);
}
config_propose.services.insert(name.into(), cfg);
}
// Commit actual configuration
{
let mut schema = Schema::new(&mut fork);
if schema.block_hash_by_height(Height::zero()).is_some() {
// TODO create genesis block for MemoryDB and compare it hash with zero block. (ECR-1630)
return Ok(());
}
schema.commit_configuration(config_propose);
};
self.merge(fork.into_patch())?;
self.create_patch(ValidatorId::zero(), Height::zero(), &[])
.1
};
self.merge(patch)?;
Ok(())
}
/// Helper function to map a tuple (`u16`, `u16`) of service table coordinates
/// to a 32-byte value to be used as the `ProofMapIndex` key (it currently
/// supports only fixed size keys). The `hash` function is used to distribute
/// keys uniformly (compared to padding).
/// # Arguments
///
/// * `service_id` - `service_id` as returned by instance of type of
/// `Service` trait
/// * `table_idx` - index of service table in `Vec`, returned by the
/// `state_hash` method of instance of type of `Service` trait
// also, it was the first idea around, to use `hash`
pub fn service_table_unique_key(service_id: u16, table_idx: usize) -> Hash {
debug_assert!(table_idx <= u16::max_value() as usize);
let size = mem::size_of::<u16>();
let mut vec = vec![0; 2 * size];
LittleEndian::write_u16(&mut vec[0..size], service_id);
LittleEndian::write_u16(&mut vec[size..2 * size], table_idx as u16);
crypto::hash(&vec)
}
/// Executes the given transactions from the pool.
/// Then collects the resulting changes from the current storage state and returns them
/// with the hash of the resulting block.
pub fn create_patch(
&self,
proposer_id: ValidatorId,
height: Height,
tx_hashes: &[Hash],
) -> (Hash, Patch) {
// Create fork
let mut fork = self.fork();
let block_hash = {
// Get last hash.
let last_hash = self.last_hash();
// Save & execute transactions.
for (index, hash) in tx_hashes.iter().enumerate() {
self.execute_transaction(*hash, height, index, &mut fork)
// Execution could fail if the transaction
// cannot be deserialized or it isn't in the pool.
.expect("Transaction execution error.");
}
// Invoke execute method for all services.
for service in self.service_map.values() {
// Skip execution for genesis block.
if height > Height(0) {
before_commit(service.as_ref(), &mut fork);
}
}
// Get tx & state hash.
let (tx_hash, state_hash) = {
let state_hashes = {
let schema = Schema::new(&fork);
let vec_core_state = schema.core_state_hash();
let mut state_hashes = Vec::new();
for (idx, core_table_hash) in vec_core_state.into_iter().enumerate() {
let key = Self::service_table_unique_key(CORE_SERVICE, idx);
state_hashes.push((key, core_table_hash));
}
for service in self.service_map.values() {
let service_id = service.service_id();
let vec_service_state = service.state_hash(&fork);
for (idx, service_table_hash) in vec_service_state.into_iter().enumerate() {
let key = Self::service_table_unique_key(service_id, idx);
state_hashes.push((key, service_table_hash));
}
}
state_hashes
};
let mut schema = Schema::new(&mut fork);
let state_hash = {
let mut sum_table = schema.state_hash_aggregator_mut();
for (key, hash) in state_hashes {
sum_table.put(&key, hash)
}
sum_table.merkle_root()
};
let tx_hash = schema.block_transactions(height).merkle_root();
(tx_hash, state_hash)
};
// Create block.
let block = Block::new(
proposer_id,
height,
tx_hashes.len() as u32,
&last_hash,
&tx_hash,
&state_hash,
);
trace!("execute block = {:?}", block);
// Calculate block hash.
let block_hash = block.hash();
// Update height.
let mut schema = Schema::new(&mut fork);
schema.block_hashes_by_height_mut().push(block_hash);
// Save block.
schema.blocks_mut().put(&block_hash, block);
block_hash
};
(block_hash, fork.into_patch())
}
fn execute_transaction(
&self,
tx_hash: Hash,
height: Height,
index: usize,
fork: &mut Fork,
) -> Result<(), failure::Error> {
let (tx, raw, service_name) = {
let schema = Schema::new(&fork);
let raw = schema.transactions().get(&tx_hash).ok_or_else(|| {
failure::err_msg(format!(
"BUG: Cannot find transaction in database. tx: {:?}",
tx_hash
))
})?;
let service_name = self.service_map
.get(raw.service_id() as usize)
.ok_or_else(|| {
failure::err_msg(format!(
"Service not found. Service id: {}",
raw.service_id()
))
})?
.service_name();
let tx = self.tx_from_raw(raw.payload().clone()).or_else(|error| {
Err(failure::err_msg(format!(
"Service <{}>: {}, tx: {:?}",
service_name,
error.description(),
tx_hash
)))
})?;
(tx, raw, service_name)
};
fork.checkpoint();
let catch_result = panic::catch_unwind(panic::AssertUnwindSafe(|| {
let context = TransactionContext::new(&mut *fork, &raw);
tx.execute(context)
}));
let tx_result = TransactionResult(match catch_result {
Ok(execution_result) => {
match execution_result {
Ok(()) => {
fork.commit();
}
Err(ref e) => {
// Unlike panic, transaction failure isn't that rare, so logging the
// whole transaction body is an overkill: it can be relatively big.
info!(
"Service <{}>: {:?} transaction execution failed: {:?}",
service_name, tx_hash, e
);
fork.rollback();
}
}
execution_result.map_err(TransactionError::from)
}
Err(err) => {
if err.is::<Error>() {
// Continue panic unwind if the reason is StorageError.
panic::resume_unwind(err);
}
fork.rollback();
error!(
"Service <{}>: {:?} transaction execution panicked: {:?}",
service_name, tx, err
);
Err(TransactionError::from_panic(&err))
}
});
let mut schema = Schema::new(fork);
schema.transaction_results_mut().put(&tx_hash, tx_result);
schema.commit_transaction(&tx_hash);
schema.block_transactions_mut(height).push(tx_hash);
let location = TxLocation::new(height, index as u64);
schema.transactions_locations_mut().put(&tx_hash, location);
Ok(())
}
/// Commits to the blockchain a new block with the indicated changes (patch),
/// hash and Precommit messages. After that invokes `after_commit`
/// for each service in the increasing order of their identifiers.
pub fn commit<I>(&mut self, patch: &Patch, block_hash: Hash, precommits: I) -> Result<(), Error>
where
I: Iterator<Item = Signed<Precommit>>,
{
let patch = {
let mut fork = {
let mut fork = self.db.fork();
fork.merge(patch.clone()); // FIXME: Avoid cloning here. (ECR-1631)
fork
};
{
let mut schema = Schema::new(&mut fork);
for precommit in precommits {
schema.precommits_mut(&block_hash).push(precommit.clone());
}
// Consensus messages cache is useful only during one height, so it should be
// cleared when a new height is achieved.
schema.consensus_messages_cache_mut().clear();
let txs_in_block = schema.last_block().tx_count();
let txs_count = schema.transactions_pool_len_index().get().unwrap_or(0);
debug_assert!(txs_count >= u64::from(txs_in_block));
schema
.transactions_pool_len_index_mut()
.set(txs_count - u64::from(txs_in_block));
}
fork.into_patch()
};
self.merge(patch)?;
// Invokes `after_commit` for each service in order of their identifiers
for (service_id, service) in self.service_map.iter() {
let context = ServiceContext::new(
self.service_keypair.0,
self.service_keypair.1.clone(),
self.api_sender.clone(),
self.fork(),
service_id as u16,
);
service.after_commit(&context);
}
Ok(())
}
/// Saves the `Connect` message from a peer to the cache.
pub(crate) fn save_peer(&mut self, pubkey: &PublicKey, peer: Signed<Connect>) {
let mut fork = self.fork();
{
let mut schema = Schema::new(&mut fork);
schema.peers_cache_mut().put(pubkey, peer);
}
self.merge(fork.into_patch())
.expect("Unable to save peer to the peers cache");
}
/// Removes from the cache the `Connect` message from a peer.
pub fn remove_peer_with_pubkey(&mut self, key: &PublicKey) | {
self.db.merge(patch)
} | identifier_body |
mod.rs | TransactionSet,
},
};
pub mod config;
use byteorder::{ByteOrder, LittleEndian};
use failure;
use vec_map::VecMap;
use std::{
collections::{BTreeMap, HashMap}, error::Error as StdError, fmt, iter, mem, panic, sync::Arc,
};
use crypto::{self, CryptoHash, Hash, PublicKey, SecretKey};
use encoding::Error as MessageError;
use helpers::{Height, Round, ValidatorId};
use messages::{Connect, Message, Precommit, ProtocolMessage, RawTransaction, Signed};
use node::ApiSender;
use storage::{self, Database, Error, Fork, Patch, Snapshot};
mod block;
mod genesis;
mod schema;
mod service;
#[macro_use]
mod transaction;
#[cfg(test)]
mod tests;
/// Id of core service table family.
pub const CORE_SERVICE: u16 = 0;
/// Exonum blockchain instance with a certain services set and data storage.
///
/// Only nodes with an identical set of services and genesis block can be combined
/// into a single network.
pub struct Blockchain {
db: Arc<dyn Database>,
service_map: Arc<VecMap<Box<dyn Service>>>,
#[doc(hidden)]
pub service_keypair: (PublicKey, SecretKey),
pub(crate) api_sender: ApiSender,
}
impl Blockchain {
/// Constructs a blockchain for the given `storage` and list of `services`.
pub fn new<D: Into<Arc<dyn Database>>>(
storage: D,
services: Vec<Box<dyn Service>>,
service_public_key: PublicKey,
service_secret_key: SecretKey,
api_sender: ApiSender,
) -> Self {
let mut service_map = VecMap::new();
for service in services {
let id = service.service_id() as usize;
if service_map.contains_key(id) {
panic!(
"Services have already contain service with id={}, please change it.",
id
);
}
service_map.insert(id, service);
}
Self {
db: storage.into(),
service_map: Arc::new(service_map),
service_keypair: (service_public_key, service_secret_key),
api_sender,
}
}
/// Recreates the blockchain to reuse with a sandbox.
#[doc(hidden)]
pub fn clone_with_api_sender(&self, api_sender: ApiSender) -> Self {
Self {
api_sender,
..self.clone()
}
}
/// Returns the `VecMap` for all services. This is a map which
/// contains service identifiers and service interfaces. The VecMap
/// allows proceeding from the service identifier to the service itself.
pub fn service_map(&self) -> &Arc<VecMap<Box<dyn Service>>> {
&self.service_map
}
/// Creates a read-only snapshot of the current storage state.
pub fn snapshot(&self) -> Box<dyn Snapshot> {
self.db.snapshot()
}
/// Creates a snapshot of the current storage state that can be later committed into the storage
/// via the `merge` method.
pub fn fork(&self) -> Fork {
self.db.fork()
}
/// Tries to create a `Transaction` object from the given raw message.
/// A raw message can be converted into a `Transaction` object only
/// if the following conditions are met:
///
/// - Blockchain has a service with the `service_id` of the given raw message.
/// - Service can deserialize the given raw message.
pub fn tx_from_raw(&self, raw: RawTransaction) -> Result<Box<dyn Transaction>, MessageError> {
let id = raw.service_id() as usize;
let service = self.service_map
.get(id)
.ok_or_else(|| MessageError::from("Service not found."))?;
service.tx_from_raw(raw)
}
/// Commits changes from the patch to the blockchain storage.
/// See [`Fork`](../storage/struct.Fork.html) for details.
pub fn merge(&mut self, patch: Patch) -> Result<(), Error> {
self.db.merge(patch)
}
/// Returns the hash of the latest committed block.
///
/// # Panics
///
/// If the genesis block was not committed.
pub fn last_hash(&self) -> Hash {
Schema::new(&self.snapshot())
.block_hashes_by_height()
.last()
.unwrap_or_else(Hash::default)
}
/// Returns the latest committed block.
pub fn last_block(&self) -> Block {
Schema::new(&self.snapshot()).last_block()
}
/// Creates and commits the genesis block with the given genesis configuration
/// if the blockchain has not been initialized.
///
/// # Panics
///
/// * If the genesis block was not committed.
/// * If storage version is not specified or not supported.
pub fn initialize(&mut self, cfg: GenesisConfig) -> Result<(), Error> {
let has_genesis_block =!Schema::new(&self.snapshot())
.block_hashes_by_height()
.is_empty();
if has_genesis_block {
self.assert_storage_version();
} else {
self.initialize_metadata();
self.create_genesis_block(cfg)?;
}
Ok(())
}
/// Initialized node-local metadata.
fn initialize_metadata(&mut self) {
let mut fork = self.db.fork();
storage::StorageMetadata::write_current(&mut fork);
if self.merge(fork.into_patch()).is_ok() {
info!(
"Storage version successfully initialized with value [{}].",
storage::StorageMetadata::read(&self.db.snapshot()).unwrap(),
)
} else {
panic!("Could not set database version.")
}
}
/// Checks if storage version is supported.
///
/// # Panics
///
/// Panics if version is not supported or is not specified.
fn assert_storage_version(&self) {
match storage::StorageMetadata::read(self.db.snapshot()) {
Ok(ver) => info!("Storage version is supported with value [{}].", ver),
Err(e) => panic!("{}", e),
}
}
/// Creates and commits the genesis block with the given genesis configuration.
fn create_genesis_block(&mut self, cfg: GenesisConfig) -> Result<(), Error> {
let mut config_propose = StoredConfiguration {
previous_cfg_hash: Hash::zero(),
actual_from: Height::zero(),
validator_keys: cfg.validator_keys,
consensus: cfg.consensus,
services: BTreeMap::new(),
};
let patch = {
let mut fork = self.fork();
// Update service tables
for (_, service) in self.service_map.iter() {
let cfg = service.initialize(&mut fork);
let name = service.service_name();
if config_propose.services.contains_key(name) {
panic!(
"Services already contain service with '{}' name, please change it",
name
);
}
config_propose.services.insert(name.into(), cfg);
}
// Commit actual configuration
{
let mut schema = Schema::new(&mut fork);
if schema.block_hash_by_height(Height::zero()).is_some() {
// TODO create genesis block for MemoryDB and compare it hash with zero block. (ECR-1630)
return Ok(());
}
schema.commit_configuration(config_propose);
};
self.merge(fork.into_patch())?;
self.create_patch(ValidatorId::zero(), Height::zero(), &[])
.1
};
self.merge(patch)?;
Ok(())
}
/// Helper function to map a tuple (`u16`, `u16`) of service table coordinates
/// to a 32-byte value to be used as the `ProofMapIndex` key (it currently
/// supports only fixed size keys). The `hash` function is used to distribute
/// keys uniformly (compared to padding).
/// # Arguments
///
/// * `service_id` - `service_id` as returned by instance of type of
/// `Service` trait
/// * `table_idx` - index of service table in `Vec`, returned by the
/// `state_hash` method of instance of type of `Service` trait
// also, it was the first idea around, to use `hash`
pub fn service_table_unique_key(service_id: u16, table_idx: usize) -> Hash {
debug_assert!(table_idx <= u16::max_value() as usize);
let size = mem::size_of::<u16>();
let mut vec = vec![0; 2 * size];
LittleEndian::write_u16(&mut vec[0..size], service_id);
LittleEndian::write_u16(&mut vec[size..2 * size], table_idx as u16);
crypto::hash(&vec)
}
/// Executes the given transactions from the pool.
/// Then collects the resulting changes from the current storage state and returns them
/// with the hash of the resulting block.
pub fn create_patch(
&self,
proposer_id: ValidatorId,
height: Height,
tx_hashes: &[Hash],
) -> (Hash, Patch) {
// Create fork
let mut fork = self.fork();
let block_hash = {
// Get last hash.
let last_hash = self.last_hash();
// Save & execute transactions.
for (index, hash) in tx_hashes.iter().enumerate() {
self.execute_transaction(*hash, height, index, &mut fork)
// Execution could fail if the transaction
// cannot be deserialized or it isn't in the pool.
.expect("Transaction execution error.");
}
// Invoke execute method for all services.
for service in self.service_map.values() {
// Skip execution for genesis block.
if height > Height(0) {
before_commit(service.as_ref(), &mut fork);
}
}
// Get tx & state hash.
let (tx_hash, state_hash) = {
let state_hashes = {
let schema = Schema::new(&fork);
let vec_core_state = schema.core_state_hash();
let mut state_hashes = Vec::new();
for (idx, core_table_hash) in vec_core_state.into_iter().enumerate() {
let key = Self::service_table_unique_key(CORE_SERVICE, idx);
state_hashes.push((key, core_table_hash));
}
for service in self.service_map.values() {
let service_id = service.service_id();
let vec_service_state = service.state_hash(&fork);
for (idx, service_table_hash) in vec_service_state.into_iter().enumerate() {
let key = Self::service_table_unique_key(service_id, idx);
state_hashes.push((key, service_table_hash));
}
}
state_hashes
};
let mut schema = Schema::new(&mut fork);
let state_hash = {
let mut sum_table = schema.state_hash_aggregator_mut();
for (key, hash) in state_hashes {
sum_table.put(&key, hash)
}
sum_table.merkle_root()
};
let tx_hash = schema.block_transactions(height).merkle_root();
(tx_hash, state_hash)
};
// Create block.
let block = Block::new(
proposer_id,
height,
tx_hashes.len() as u32,
&last_hash,
&tx_hash,
&state_hash,
);
trace!("execute block = {:?}", block);
// Calculate block hash.
let block_hash = block.hash();
// Update height.
let mut schema = Schema::new(&mut fork);
schema.block_hashes_by_height_mut().push(block_hash);
// Save block.
schema.blocks_mut().put(&block_hash, block);
block_hash
};
(block_hash, fork.into_patch())
}
fn execute_transaction(
&self,
tx_hash: Hash,
height: Height,
index: usize,
fork: &mut Fork,
) -> Result<(), failure::Error> {
let (tx, raw, service_name) = {
let schema = Schema::new(&fork);
let raw = schema.transactions().get(&tx_hash).ok_or_else(|| {
failure::err_msg(format!(
"BUG: Cannot find transaction in database. tx: {:?}",
tx_hash
))
})?;
let service_name = self.service_map
.get(raw.service_id() as usize)
.ok_or_else(|| {
failure::err_msg(format!(
"Service not found. Service id: {}",
raw.service_id()
))
})?
.service_name();
let tx = self.tx_from_raw(raw.payload().clone()).or_else(|error| {
Err(failure::err_msg(format!(
"Service <{}>: {}, tx: {:?}",
service_name,
error.description(),
tx_hash
)))
})?;
(tx, raw, service_name)
};
fork.checkpoint();
let catch_result = panic::catch_unwind(panic::AssertUnwindSafe(|| {
let context = TransactionContext::new(&mut *fork, &raw);
tx.execute(context)
}));
let tx_result = TransactionResult(match catch_result {
Ok(execution_result) => {
match execution_result {
Ok(()) => {
fork.commit();
}
Err(ref e) => {
// Unlike panic, transaction failure isn't that rare, so logging the
// whole transaction body is an overkill: it can be relatively big.
info!(
"Service <{}>: {:?} transaction execution failed: {:?}",
service_name, tx_hash, e
);
fork.rollback();
}
}
execution_result.map_err(TransactionError::from)
}
Err(err) => {
if err.is::<Error>() {
// Continue panic unwind if the reason is StorageError.
panic::resume_unwind(err);
}
fork.rollback();
error!(
"Service <{}>: {:?} transaction execution panicked: {:?}",
service_name, tx, err
);
Err(TransactionError::from_panic(&err))
}
});
let mut schema = Schema::new(fork);
schema.transaction_results_mut().put(&tx_hash, tx_result);
schema.commit_transaction(&tx_hash);
schema.block_transactions_mut(height).push(tx_hash);
let location = TxLocation::new(height, index as u64);
schema.transactions_locations_mut().put(&tx_hash, location);
Ok(())
}
/// Commits to the blockchain a new block with the indicated changes (patch),
/// hash and Precommit messages. After that invokes `after_commit`
/// for each service in the increasing order of their identifiers.
pub fn commit<I>(&mut self, patch: &Patch, block_hash: Hash, precommits: I) -> Result<(), Error>
where
I: Iterator<Item = Signed<Precommit>>,
{
let patch = {
let mut fork = {
let mut fork = self.db.fork();
fork.merge(patch.clone()); // FIXME: Avoid cloning here. (ECR-1631)
fork
};
{
let mut schema = Schema::new(&mut fork);
for precommit in precommits {
schema.precommits_mut(&block_hash).push(precommit.clone());
}
// Consensus messages cache is useful only during one height, so it should be
// cleared when a new height is achieved.
schema.consensus_messages_cache_mut().clear();
let txs_in_block = schema.last_block().tx_count();
let txs_count = schema.transactions_pool_len_index().get().unwrap_or(0);
debug_assert!(txs_count >= u64::from(txs_in_block));
schema
.transactions_pool_len_index_mut()
.set(txs_count - u64::from(txs_in_block));
}
fork.into_patch()
};
self.merge(patch)?;
// Invokes `after_commit` for each service in order of their identifiers
for (service_id, service) in self.service_map.iter() {
let context = ServiceContext::new(
self.service_keypair.0,
self.service_keypair.1.clone(),
self.api_sender.clone(),
self.fork(),
service_id as u16,
);
service.after_commit(&context);
}
Ok(())
}
/// Saves the `Connect` message from a peer to the cache.
pub(crate) fn save_peer(&mut self, pubkey: &PublicKey, peer: Signed<Connect>) {
let mut fork = self.fork();
{
let mut schema = Schema::new(&mut fork);
schema.peers_cache_mut().put(pubkey, peer);
}
| pub fn remove_peer_with_pubkey(&mut self, key: &PublicKey) {
| self.merge(fork.into_patch())
.expect("Unable to save peer to the peers cache");
}
/// Removes from the cache the `Connect` message from a peer. | random_line_split |
mod.rs | id
);
}
service_map.insert(id, service);
}
Self {
db: storage.into(),
service_map: Arc::new(service_map),
service_keypair: (service_public_key, service_secret_key),
api_sender,
}
}
/// Recreates the blockchain to reuse with a sandbox.
#[doc(hidden)]
pub fn clone_with_api_sender(&self, api_sender: ApiSender) -> Self {
Self {
api_sender,
..self.clone()
}
}
/// Returns the `VecMap` for all services. This is a map which
/// contains service identifiers and service interfaces. The VecMap
/// allows proceeding from the service identifier to the service itself.
pub fn service_map(&self) -> &Arc<VecMap<Box<dyn Service>>> {
&self.service_map
}
/// Creates a read-only snapshot of the current storage state.
pub fn snapshot(&self) -> Box<dyn Snapshot> {
self.db.snapshot()
}
/// Creates a snapshot of the current storage state that can be later committed into the storage
/// via the `merge` method.
pub fn fork(&self) -> Fork {
self.db.fork()
}
/// Tries to create a `Transaction` object from the given raw message.
/// A raw message can be converted into a `Transaction` object only
/// if the following conditions are met:
///
/// - Blockchain has a service with the `service_id` of the given raw message.
/// - Service can deserialize the given raw message.
pub fn tx_from_raw(&self, raw: RawTransaction) -> Result<Box<dyn Transaction>, MessageError> {
let id = raw.service_id() as usize;
let service = self.service_map
.get(id)
.ok_or_else(|| MessageError::from("Service not found."))?;
service.tx_from_raw(raw)
}
/// Commits changes from the patch to the blockchain storage.
/// See [`Fork`](../storage/struct.Fork.html) for details.
pub fn merge(&mut self, patch: Patch) -> Result<(), Error> {
self.db.merge(patch)
}
/// Returns the hash of the latest committed block.
///
/// # Panics
///
/// If the genesis block was not committed.
pub fn last_hash(&self) -> Hash {
Schema::new(&self.snapshot())
.block_hashes_by_height()
.last()
.unwrap_or_else(Hash::default)
}
/// Returns the latest committed block.
pub fn last_block(&self) -> Block {
Schema::new(&self.snapshot()).last_block()
}
/// Creates and commits the genesis block with the given genesis configuration
/// if the blockchain has not been initialized.
///
/// # Panics
///
/// * If the genesis block was not committed.
/// * If storage version is not specified or not supported.
pub fn initialize(&mut self, cfg: GenesisConfig) -> Result<(), Error> {
let has_genesis_block =!Schema::new(&self.snapshot())
.block_hashes_by_height()
.is_empty();
if has_genesis_block {
self.assert_storage_version();
} else {
self.initialize_metadata();
self.create_genesis_block(cfg)?;
}
Ok(())
}
/// Initialized node-local metadata.
fn initialize_metadata(&mut self) {
let mut fork = self.db.fork();
storage::StorageMetadata::write_current(&mut fork);
if self.merge(fork.into_patch()).is_ok() {
info!(
"Storage version successfully initialized with value [{}].",
storage::StorageMetadata::read(&self.db.snapshot()).unwrap(),
)
} else {
panic!("Could not set database version.")
}
}
/// Checks if storage version is supported.
///
/// # Panics
///
/// Panics if version is not supported or is not specified.
fn assert_storage_version(&self) {
match storage::StorageMetadata::read(self.db.snapshot()) {
Ok(ver) => info!("Storage version is supported with value [{}].", ver),
Err(e) => panic!("{}", e),
}
}
/// Creates and commits the genesis block with the given genesis configuration.
fn create_genesis_block(&mut self, cfg: GenesisConfig) -> Result<(), Error> {
let mut config_propose = StoredConfiguration {
previous_cfg_hash: Hash::zero(),
actual_from: Height::zero(),
validator_keys: cfg.validator_keys,
consensus: cfg.consensus,
services: BTreeMap::new(),
};
let patch = {
let mut fork = self.fork();
// Update service tables
for (_, service) in self.service_map.iter() {
let cfg = service.initialize(&mut fork);
let name = service.service_name();
if config_propose.services.contains_key(name) {
panic!(
"Services already contain service with '{}' name, please change it",
name
);
}
config_propose.services.insert(name.into(), cfg);
}
// Commit actual configuration
{
let mut schema = Schema::new(&mut fork);
if schema.block_hash_by_height(Height::zero()).is_some() {
// TODO create genesis block for MemoryDB and compare it hash with zero block. (ECR-1630)
return Ok(());
}
schema.commit_configuration(config_propose);
};
self.merge(fork.into_patch())?;
self.create_patch(ValidatorId::zero(), Height::zero(), &[])
.1
};
self.merge(patch)?;
Ok(())
}
/// Helper function to map a tuple (`u16`, `u16`) of service table coordinates
/// to a 32-byte value to be used as the `ProofMapIndex` key (it currently
/// supports only fixed size keys). The `hash` function is used to distribute
/// keys uniformly (compared to padding).
/// # Arguments
///
/// * `service_id` - `service_id` as returned by instance of type of
/// `Service` trait
/// * `table_idx` - index of service table in `Vec`, returned by the
/// `state_hash` method of instance of type of `Service` trait
// also, it was the first idea around, to use `hash`
pub fn service_table_unique_key(service_id: u16, table_idx: usize) -> Hash {
debug_assert!(table_idx <= u16::max_value() as usize);
let size = mem::size_of::<u16>();
let mut vec = vec![0; 2 * size];
LittleEndian::write_u16(&mut vec[0..size], service_id);
LittleEndian::write_u16(&mut vec[size..2 * size], table_idx as u16);
crypto::hash(&vec)
}
/// Executes the given transactions from the pool.
/// Then collects the resulting changes from the current storage state and returns them
/// with the hash of the resulting block.
pub fn create_patch(
&self,
proposer_id: ValidatorId,
height: Height,
tx_hashes: &[Hash],
) -> (Hash, Patch) {
// Create fork
let mut fork = self.fork();
let block_hash = {
// Get last hash.
let last_hash = self.last_hash();
// Save & execute transactions.
for (index, hash) in tx_hashes.iter().enumerate() {
self.execute_transaction(*hash, height, index, &mut fork)
// Execution could fail if the transaction
// cannot be deserialized or it isn't in the pool.
.expect("Transaction execution error.");
}
// Invoke execute method for all services.
for service in self.service_map.values() {
// Skip execution for genesis block.
if height > Height(0) {
before_commit(service.as_ref(), &mut fork);
}
}
// Get tx & state hash.
let (tx_hash, state_hash) = {
let state_hashes = {
let schema = Schema::new(&fork);
let vec_core_state = schema.core_state_hash();
let mut state_hashes = Vec::new();
for (idx, core_table_hash) in vec_core_state.into_iter().enumerate() {
let key = Self::service_table_unique_key(CORE_SERVICE, idx);
state_hashes.push((key, core_table_hash));
}
for service in self.service_map.values() {
let service_id = service.service_id();
let vec_service_state = service.state_hash(&fork);
for (idx, service_table_hash) in vec_service_state.into_iter().enumerate() {
let key = Self::service_table_unique_key(service_id, idx);
state_hashes.push((key, service_table_hash));
}
}
state_hashes
};
let mut schema = Schema::new(&mut fork);
let state_hash = {
let mut sum_table = schema.state_hash_aggregator_mut();
for (key, hash) in state_hashes {
sum_table.put(&key, hash)
}
sum_table.merkle_root()
};
let tx_hash = schema.block_transactions(height).merkle_root();
(tx_hash, state_hash)
};
// Create block.
let block = Block::new(
proposer_id,
height,
tx_hashes.len() as u32,
&last_hash,
&tx_hash,
&state_hash,
);
trace!("execute block = {:?}", block);
// Calculate block hash.
let block_hash = block.hash();
// Update height.
let mut schema = Schema::new(&mut fork);
schema.block_hashes_by_height_mut().push(block_hash);
// Save block.
schema.blocks_mut().put(&block_hash, block);
block_hash
};
(block_hash, fork.into_patch())
}
fn execute_transaction(
&self,
tx_hash: Hash,
height: Height,
index: usize,
fork: &mut Fork,
) -> Result<(), failure::Error> {
let (tx, raw, service_name) = {
let schema = Schema::new(&fork);
let raw = schema.transactions().get(&tx_hash).ok_or_else(|| {
failure::err_msg(format!(
"BUG: Cannot find transaction in database. tx: {:?}",
tx_hash
))
})?;
let service_name = self.service_map
.get(raw.service_id() as usize)
.ok_or_else(|| {
failure::err_msg(format!(
"Service not found. Service id: {}",
raw.service_id()
))
})?
.service_name();
let tx = self.tx_from_raw(raw.payload().clone()).or_else(|error| {
Err(failure::err_msg(format!(
"Service <{}>: {}, tx: {:?}",
service_name,
error.description(),
tx_hash
)))
})?;
(tx, raw, service_name)
};
fork.checkpoint();
let catch_result = panic::catch_unwind(panic::AssertUnwindSafe(|| {
let context = TransactionContext::new(&mut *fork, &raw);
tx.execute(context)
}));
let tx_result = TransactionResult(match catch_result {
Ok(execution_result) => {
match execution_result {
Ok(()) => {
fork.commit();
}
Err(ref e) => {
// Unlike panic, transaction failure isn't that rare, so logging the
// whole transaction body is an overkill: it can be relatively big.
info!(
"Service <{}>: {:?} transaction execution failed: {:?}",
service_name, tx_hash, e
);
fork.rollback();
}
}
execution_result.map_err(TransactionError::from)
}
Err(err) => {
if err.is::<Error>() {
// Continue panic unwind if the reason is StorageError.
panic::resume_unwind(err);
}
fork.rollback();
error!(
"Service <{}>: {:?} transaction execution panicked: {:?}",
service_name, tx, err
);
Err(TransactionError::from_panic(&err))
}
});
let mut schema = Schema::new(fork);
schema.transaction_results_mut().put(&tx_hash, tx_result);
schema.commit_transaction(&tx_hash);
schema.block_transactions_mut(height).push(tx_hash);
let location = TxLocation::new(height, index as u64);
schema.transactions_locations_mut().put(&tx_hash, location);
Ok(())
}
/// Commits to the blockchain a new block with the indicated changes (patch),
/// hash and Precommit messages. After that invokes `after_commit`
/// for each service in the increasing order of their identifiers.
pub fn commit<I>(&mut self, patch: &Patch, block_hash: Hash, precommits: I) -> Result<(), Error>
where
I: Iterator<Item = Signed<Precommit>>,
{
let patch = {
let mut fork = {
let mut fork = self.db.fork();
fork.merge(patch.clone()); // FIXME: Avoid cloning here. (ECR-1631)
fork
};
{
let mut schema = Schema::new(&mut fork);
for precommit in precommits {
schema.precommits_mut(&block_hash).push(precommit.clone());
}
// Consensus messages cache is useful only during one height, so it should be
// cleared when a new height is achieved.
schema.consensus_messages_cache_mut().clear();
let txs_in_block = schema.last_block().tx_count();
let txs_count = schema.transactions_pool_len_index().get().unwrap_or(0);
debug_assert!(txs_count >= u64::from(txs_in_block));
schema
.transactions_pool_len_index_mut()
.set(txs_count - u64::from(txs_in_block));
}
fork.into_patch()
};
self.merge(patch)?;
// Invokes `after_commit` for each service in order of their identifiers
for (service_id, service) in self.service_map.iter() {
let context = ServiceContext::new(
self.service_keypair.0,
self.service_keypair.1.clone(),
self.api_sender.clone(),
self.fork(),
service_id as u16,
);
service.after_commit(&context);
}
Ok(())
}
/// Saves the `Connect` message from a peer to the cache.
pub(crate) fn save_peer(&mut self, pubkey: &PublicKey, peer: Signed<Connect>) {
let mut fork = self.fork();
{
let mut schema = Schema::new(&mut fork);
schema.peers_cache_mut().put(pubkey, peer);
}
self.merge(fork.into_patch())
.expect("Unable to save peer to the peers cache");
}
/// Removes from the cache the `Connect` message from a peer.
pub fn remove_peer_with_pubkey(&mut self, key: &PublicKey) {
let mut fork = self.fork();
{
let mut schema = Schema::new(&mut fork);
let mut peers = schema.peers_cache_mut();
peers.remove(key);
}
self.merge(fork.into_patch())
.expect("Unable to remove peer from the peers cache");
}
/// Returns `Connect` messages from peers saved in the cache, if any.
pub fn get_saved_peers(&self) -> HashMap<PublicKey, Signed<Connect>> {
let schema = Schema::new(self.snapshot());
let peers_cache = schema.peers_cache();
let it = peers_cache.iter().map(|(k, v)| (k, v.clone()));
it.collect()
}
/// Saves the given raw message to the consensus messages cache.
pub(crate) fn save_message<T: ProtocolMessage>(&mut self, round: Round, raw: Signed<T>) {
self.save_messages(round, iter::once(raw.into()));
}
/// Saves a collection of SignedMessage to the consensus messages cache with single access to the
/// `Fork` instance.
pub(crate) fn save_messages<I>(&mut self, round: Round, iter: I)
where
I: IntoIterator<Item = Message>,
{
let mut fork = self.fork();
{
let mut schema = Schema::new(&mut fork);
schema.consensus_messages_cache_mut().extend(iter);
schema.set_consensus_round(round);
}
self.merge(fork.into_patch())
.expect("Unable to save messages to the consensus cache");
}
}
fn | before_commit | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.