file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
error.rs | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
use core::{convert::TryInto, fmt, ptr::NonNull, task::Poll};
use errno::{errno, Errno};
use libc::c_char;
use s2n_tls_sys::*;
use std::{convert::TryFrom, ffi::CStr};
#[non_exhaustive]
#[derive(Debug, PartialEq)]
pub enum ErrorType {
UnknownErrorType,
NoError,
IOError,
ConnectionClosed,
Blocked,
Alert,
ProtocolError,
InternalError,
UsageError,
Application,
}
#[non_exhaustive]
#[derive(Debug, PartialEq)]
pub enum ErrorSource {
Library,
Bindings,
Application,
}
impl From<libc::c_int> for ErrorType {
fn from(input: libc::c_int) -> Self {
match input as s2n_error_type::Type {
s2n_error_type::OK => ErrorType::NoError,
s2n_error_type::IO => ErrorType::IOError,
s2n_error_type::CLOSED => ErrorType::ConnectionClosed,
s2n_error_type::BLOCKED => ErrorType::Blocked,
s2n_error_type::ALERT => ErrorType::Alert,
s2n_error_type::PROTO => ErrorType::ProtocolError,
s2n_error_type::INTERNAL => ErrorType::InternalError,
s2n_error_type::USAGE => ErrorType::UsageError,
_ => ErrorType::UnknownErrorType,
}
}
}
enum Context {
InvalidInput,
MissingWaker,
Code(s2n_status_code::Type, Errno),
Application(Box<dyn std::error::Error + Send + Sync +'static>),
}
pub struct Error(Context);
pub trait Fallible {
type Output;
fn into_result(self) -> Result<Self::Output, Error>;
}
impl Fallible for s2n_status_code::Type {
type Output = s2n_status_code::Type;
fn into_result(self) -> Result<Self::Output, Error> {
if self >= s2n_status_code::SUCCESS {
Ok(self)
} else {
Err(Error::capture())
}
}
}
impl Fallible for isize {
type Output = usize;
fn into_result(self) -> Result<Self::Output, Error> {
// Negative values can't be converted to a real size
// and instead indicate an error.
self.try_into().map_err(|_| Error::capture())
}
}
impl Fallible for u64 {
type Output = Self;
/// Converts a u64 to a Result by checking for u64::MAX.
///
/// If a method that returns an unsigned int is fallible,
/// then the -1 error result wraps around to u64::MAX.
///
/// For a u64 to be Fallible, a result of u64::MAX must not be
/// possible without an error. For example, [`s2n_connection_get_delay`]
/// can't return u64::MAX as a valid result because
/// s2n-tls blinding delays are limited to 30s, or a return value of 3^10 ns,
/// which is significantly less than u64::MAX. [`s2n_connection_get_delay`]
/// would therefore only return u64::MAX for a -1 error result.
fn into_result(self) -> Result<Self::Output, Error> {
if self!= Self::MAX {
Ok(self)
} else {
Err(Error::capture())
}
}
}
impl<T> Fallible for *mut T {
type Output = NonNull<T>;
fn into_result(self) -> Result<Self::Output, Error> {
if let Some(value) = NonNull::new(self) {
Ok(value)
} else {
Err(Error::capture())
}
}
}
impl<T> Fallible for *const T {
type Output = *const T;
fn into_result(self) -> Result<Self::Output, Error> {
if!self.is_null() {
Ok(self)
} else {
Err(Error::capture())
}
}
}
pub trait Pollable {
type Output;
fn into_poll(self) -> Poll<Result<Self::Output, Error>>;
}
impl<T: Fallible> Pollable for T {
type Output = T::Output;
fn into_poll(self) -> Poll<Result<Self::Output, Error>> {
match self.into_result() {
Ok(r) => Ok(r).into(),
Err(err) if err.is_retryable() => Poll::Pending,
Err(err) => Err(err).into(),
}
}
}
impl Error {
pub(crate) const INVALID_INPUT: Error = Self(Context::InvalidInput);
pub(crate) const MISSING_WAKER: Error = Self(Context::MissingWaker);
/// Converts an io::Error into an s2n-tls Error
pub fn io_error(err: std::io::Error) -> Error {
let errno = err.raw_os_error().unwrap_or(1);
errno::set_errno(errno::Errno(errno));
s2n_status_code::FAILURE.into_result().unwrap_err()
}
/// An error occurred while running application code.
///
/// Can be emitted from [`crate::callbacks::ConnectionFuture::poll()`] to indicate
/// async task failure.
pub fn application(error: Box<dyn std::error::Error + Send + Sync +'static>) -> Self {
Self(Context::Application(error))
}
fn capture() -> Self {
unsafe {
let s2n_errno = s2n_errno_location();
let code = *s2n_errno;
// https://github.com/aws/s2n-tls/blob/main/docs/USAGE-GUIDE.md#error-handling
//# To avoid possible confusion, s2n_errno should be cleared after processing
//# an error: s2n_errno = S2N_ERR_T_OK
*s2n_errno = s2n_error_type::OK as _;
Self(Context::Code(code, errno()))
}
}
pub fn name(&self) -> &'static str {
match self.0 {
Context::InvalidInput => "InvalidInput",
Context::MissingWaker => "MissingWaker",
Context::Application(_) => "ApplicationError",
Context::Code(code, _) => unsafe {
// Safety: we assume the string has a valid encoding coming from s2n
cstr_to_str(s2n_strerror_name(code))
},
}
}
pub fn message(&self) -> &'static str {
match self.0 {
Context::InvalidInput => "A parameter was incorrect",
Context::MissingWaker => {
"Tried to perform an asynchronous operation without a configured waker"
}
Context::Application(_) => "An error occurred while executing application code",
Context::Code(code, _) => unsafe {
// Safety: we assume the string has a valid encoding coming from s2n
cstr_to_str(s2n_strerror(code, core::ptr::null()))
},
}
}
pub fn debug(&self) -> Option<&'static str> {
match self.0 {
Context::InvalidInput | Context::MissingWaker | Context::Application(_) => None,
Context::Code(code, _) => unsafe {
let debug_info = s2n_strerror_debug(code, core::ptr::null());
// The debug string should be set to a constant static string
// when an error occurs, but because it starts out as NULL
// we should defend against mistakes.
if debug_info.is_null() {
None
} else {
// If the string is not null, then we can assume that
// it is constant and static.
Some(cstr_to_str(debug_info))
}
},
}
}
pub fn kind(&self) -> ErrorType {
match self.0 {
Context::InvalidInput | Context::MissingWaker => ErrorType::UsageError,
Context::Application(_) => ErrorType::Application,
Context::Code(code, _) => unsafe { ErrorType::from(s2n_error_get_type(code)) },
}
}
pub fn source(&self) -> ErrorSource {
match self.0 {
Context::InvalidInput | Context::MissingWaker => ErrorSource::Bindings,
Context::Application(_) => ErrorSource::Application,
Context::Code(_, _) => ErrorSource::Library,
}
}
#[allow(clippy::borrowed_box)]
/// Returns an [`std::error::Error`] if the error source was [`ErrorSource::Application`],
/// otherwise returns None.
pub fn application_error(&self) -> Option<&Box<dyn std::error::Error + Send + Sync +'static>> {
if let Self(Context::Application(err)) = self {
Some(err)
} else {
None
}
}
pub fn | (&self) -> bool {
matches!(self.kind(), ErrorType::Blocked)
}
}
#[cfg(feature = "quic")]
impl Error {
/// s2n-tls does not send specific errors.
///
/// However, we can attempt to map local errors into the alerts
/// that we would have sent if we sent alerts.
///
/// This API is currently incomplete and should not be relied upon.
pub fn alert(&self) -> Option<u8> {
match self.0 {
Context::InvalidInput | Context::MissingWaker | Context::Application(_) => None,
Context::Code(code, _) => {
let mut alert = 0;
let r = unsafe { s2n_error_get_alert(code, &mut alert) };
match r.into_result() {
Ok(_) => Some(alert),
Err(_) => None,
}
}
}
}
}
/// # Safety
///
/// The caller must ensure the char pointer must contain a valid
/// UTF-8 string from a trusted source
unsafe fn cstr_to_str(v: *const c_char) -> &'static str {
let slice = CStr::from_ptr(v);
let bytes = slice.to_bytes();
core::str::from_utf8_unchecked(bytes)
}
impl TryFrom<std::io::Error> for Error {
type Error = Error;
fn try_from(value: std::io::Error) -> Result<Self, Self::Error> {
let io_inner = value.into_inner().ok_or(Error::INVALID_INPUT)?;
io_inner
.downcast::<Self>()
.map(|error| *error)
.map_err(|_| Error::INVALID_INPUT)
}
}
impl From<Error> for std::io::Error {
fn from(input: Error) -> Self {
if let Context::Code(_, errno) = input.0 {
if ErrorType::IOError == input.kind() {
let bare = std::io::Error::from_raw_os_error(errno.0);
return std::io::Error::new(bare.kind(), input);
}
}
std::io::Error::new(std::io::ErrorKind::Other, input)
}
}
impl fmt::Debug for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut s = f.debug_struct("Error");
if let Context::Code(code, _) = self.0 {
s.field("code", &code);
}
s.field("name", &self.name());
s.field("message", &self.message());
s.field("kind", &self.kind());
s.field("source", &self.source());
if let Some(debug) = self.debug() {
s.field("debug", &debug);
}
// "errno" is only known to be meaningful for IOErrors.
// However, it has occasionally proved useful for debugging
// other errors, so include it for all errors.
if let Context::Code(_, errno) = self.0 {
s.field("errno", &errno.to_string());
}
s.finish()
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if let Self(Context::Application(err)) = self {
err.fmt(f)
} else {
f.write_str(self.message())
}
}
}
impl std::error::Error for Error {
fn source(&self) -> Option<&(dyn std::error::Error +'static)> {
// implement `source` in the same way `std::io::Error` implements it:
// https://doc.rust-lang.org/std/io/struct.Error.html#method.source
if let Self(Context::Application(err)) = self {
err.source()
} else {
None
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{enums::Version, testing::client_hello::CustomError};
use errno::set_errno;
const FAILURE: isize = -1;
// This relies on an implementation detail of s2n-tls errors,
// and could make these tests brittle. However, the alternative
// is a real handshake producing a real IO error, so just updating
// this value if the definition of an IO error changes might be easier.
const S2N_IO_ERROR_CODE: s2n_status_code::Type = 1 << 26;
#[test]
fn s2n_io_error_to_std_io_error() -> Result<(), Box<dyn std::error::Error>> {
set_errno(Errno(libc::ECONNRESET));
unsafe {
let s2n_errno_ptr = s2n_errno_location();
*s2n_errno_ptr = S2N_IO_ERROR_CODE;
}
let s2n_error = FAILURE.into_result().unwrap_err();
assert_eq!(ErrorType::IOError, s2n_error.kind());
let io_error = std::io::Error::from(s2n_error);
assert_eq!(std::io::ErrorKind::ConnectionReset, io_error.kind());
assert!(io_error.into_inner().is_some());
Ok(())
}
#[test]
fn s2n_error_to_std_io_error() -> Result<(), Box<dyn std::error::Error>> {
set_errno(Errno(libc::ECONNRESET));
unsafe {
let s2n_errno_ptr = s2n_errno_location();
*s2n_errno_ptr = S2N_IO_ERROR_CODE - 1;
}
let s2n_error = FAILURE.into_result().unwrap_err();
assert_ne!(ErrorType::IOError, s2n_error.kind());
let io_error = std::io::Error::from(s2n_error);
assert_eq!(std::io::ErrorKind::Other, io_error.kind());
assert!(io_error.into_inner().is_some());
Ok(())
}
#[test]
fn invalid_input_to_std_io_error() -> Result<(), Box<dyn std::error::Error>> {
let s2n_error = Version::try_from(0).unwrap_err();
assert_eq!(ErrorType::UsageError, s2n_error.kind());
let io_error = std::io::Error::from(s2n_error);
assert_eq!(std::io::ErrorKind::Other, io_error.kind());
assert!(io_error.into_inner().is_some());
Ok(())
}
#[test]
fn error_source() -> Result<(), Box<dyn std::error::Error>> {
let bindings_error = Version::try_from(0).unwrap_err();
assert_eq!(ErrorSource::Bindings, bindings_error.source());
let library_error = FAILURE.into_result().unwrap_err();
assert_eq!(ErrorSource::Library, library_error.source());
Ok(())
}
#[test]
fn application_error() {
// test single level errors
{
let error = Error::application(Box::new(CustomError));
let app_error = error.application_error().unwrap();
let _custom_error = app_error.downcast_ref::<CustomError>().unwrap();
}
// make sure nested errors work
{
let io_error = std::io::Error::new(std::io::ErrorKind::Other, CustomError);
let error = Error::application(Box::new(io_error));
let app_error = error.application_error().unwrap();
let io_error = app_error.downcast_ref::<std::io::Error>().unwrap();
let _custom_error = io_error
.get_ref()
.unwrap()
.downcast_ref::<CustomError>()
.unwrap();
}
}
}
| is_retryable | identifier_name |
constants.rs | use std::os::raw::{c_int, c_uint};
// Standard return values from Symisc public interfaces
const SXRET_OK: c_int = 0; /* Not an error */
const SXERR_MEM: c_int = -1; /* Out of memory */
const SXERR_IO: c_int = -2; /* IO error */
const SXERR_EMPTY: c_int = -3; /* Empty field */
const SXERR_LOCKED: c_int = -4; /* Locked operation */
const SXERR_ORANGE: c_int = -5; /* Out of range value */
const SXERR_NOTFOUND: c_int = -6; /* Item not found */
const SXERR_LIMIT: c_int = -7; /* Limit reached */
const SXERR_MORE: c_int = -8; /* Need more input */
const SXERR_INVALID: c_int = -9; /* Invalid parameter */
const SXERR_ABORT: c_int = -10; /* User callback request an operation abort */
const SXERR_EXISTS: c_int = -11; /* Item exists */
const SXERR_SYNTAX: c_int = -12; /* Syntax error */
const SXERR_UNKNOWN: c_int = -13; /* Unknown error */
const SXERR_BUSY: c_int = -14; /* Busy operation */
const SXERR_OVERFLOW: c_int = -15; /* Stack or buffer overflow */
const SXERR_WILLBLOCK: c_int = -16; /* Operation will block */
const SXERR_NOTIMPLEMENTED: c_int = -17; /* Operation not implemented */
const SXERR_EOF: c_int = -18; /* End of input */
const SXERR_PERM: c_int = -19; /* Permission error */
const SXERR_NOOP: c_int = -20; /* No-op */
const SXERR_FORMAT: c_int = -21; /* Invalid format */
const SXERR_NEXT: c_int = -22; /* Not an error */
const SXERR_OS: c_int = -23; /* System call return an error */
const SXERR_CORRUPT: c_int = -24; /* Corrupted pointer */
const SXERR_CONTINUE: c_int = -25; /* Not an error: Operation in progress */
const SXERR_NOMATCH: c_int = -26; /* No match */
const SXERR_RESET: c_int = -27; /* Operation reset */
const SXERR_DONE: c_int = -28; /* Not an error */
const SXERR_SHORT: c_int = -29; /* Buffer too short */
const SXERR_PATH: c_int = -30; /* Path error */
const SXERR_TIMEOUT: c_int = -31; /* Timeout */
const SXERR_BIG: c_int = -32; /* Too big for processing */
const SXERR_RETRY: c_int = -33; /* Retry your call */
const SXERR_IGNORE: c_int = -63; /* Ignore */
// Standard UnQLite return values
/// Successful result
pub const UNQLITE_OK: c_int = SXRET_OK;
// Beginning of error codes
/// Out of memory
pub const UNQLITE_NOMEM: c_int = SXERR_MEM;
/// Another thread have released this instance
pub const UNQLITE_ABORT: c_int = SXERR_ABORT;
/// IO error
pub const UNQLITE_IOERR: c_int = SXERR_IO;
/// Corrupt pointer
pub const UNQLITE_CORRUPT: c_int = SXERR_CORRUPT;
/// Forbidden Operation
pub const UNQLITE_LOCKED: c_int = SXERR_LOCKED;
/// The database file is locked
pub const UNQLITE_BUSY: c_int = SXERR_BUSY;
/// Operation done
pub const UNQLITE_DONE: c_int = SXERR_DONE;
/// Permission error
pub const UNQLITE_PERM: c_int = SXERR_PERM;
/// Method not implemented by the underlying Key/Value storage engine
pub const UNQLITE_NOTIMPLEMENTED: c_int = SXERR_NOTIMPLEMENTED;
/// No such record
pub const UNQLITE_NOTFOUND: c_int = SXERR_NOTFOUND;
/// No such method
pub const UNQLITE_NOOP: c_int = SXERR_NOOP;
/// Invalid parameter
pub const UNQLITE_INVALID: c_int = SXERR_INVALID;
/// End Of Input
pub const UNQLITE_EOF: c_int = SXERR_EOF;
/// Unknown configuration option
pub const UNQLITE_UNKNOWN: c_int = SXERR_UNKNOWN;
/// Database limit reached
pub const UNQLITE_LIMIT: c_int = SXERR_LIMIT;
/// Record exists
pub const UNQLITE_EXISTS: c_int = SXERR_EXISTS;
/// Empty record
pub const UNQLITE_EMPTY: c_int = SXERR_EMPTY;
/// Compilation error
pub const UNQLITE_COMPILE_ERR: c_int = -70;
/// Virtual machine error
pub const UNQLITE_VM_ERR: c_int = -71;
/// Full database unlikely
pub const UNQLITE_FULL: c_int = -73;
/// Unable to open the database file
pub const UNQLITE_CANTOPEN: c_int = -74;
/// Read only Key/Value storage engine
pub const UNQLITE_READ_ONLY: c_int = -75;
/// Locking protocol error
pub const UNQLITE_LOCKERR: c_int = -76;
// end-of-error-codes
pub const UNQLITE_CONFIG_JX9_ERR_LOG: c_int = 1;
pub const UNQLITE_CONFIG_MAX_PAGE_CACHE: c_int = 2;
pub const UNQLITE_CONFIG_ERR_LOG: c_int = 3;
pub const UNQLITE_CONFIG_KV_ENGINE: c_int = 4;
pub const UNQLITE_CONFIG_DISABLE_AUTO_COMMIT: c_int = 5;
pub const UNQLITE_CONFIG_GET_KV_NAME: c_int = 6;
// UnQLite/Jx9 Virtual Machine Configuration Commands.
//
// The following set of constants are the available configuration verbs that can
// be used by the host-application to configure the Jx9 (Via UnQLite) Virtual machine.
// These constants must be passed as the second argument to the [unqlite_vm_config()]
// interface.
// Each options require a variable number of arguments.
// The [unqlite_vm_config()] interface will return UNQLITE_OK on success, any other return
// value indicates failure.
// There are many options but the most importants are: UNQLITE_VM_CONFIG_OUTPUT which install
// a VM output consumer callback, UNQLITE_VM_CONFIG_HTTP_REQUEST which parse and register
// a HTTP request and UNQLITE_VM_CONFIG_ARGV_ENTRY which populate the $argv array.
// For a full discussion on the configuration verbs and their expected parameters, please
// refer to this page:
// http://unqlite.org/c_api/unqlite_vm_config.html
//
/// TWO ARGUMENTS: int (*xConsumer)(const void *, unsigned int, void *), void *
pub const UNQLITE_VM_CONFIG_OUTPUT: c_int = 1;
/// ONE ARGUMENT: const char *zIncludePath
pub const UNQLITE_VM_CONFIG_IMPORT_PATH: c_int = 2;
/// NO ARGUMENTS: Report all run-time errors in the VM output
pub const UNQLITE_VM_CONFIG_ERR_REPORT: c_int = 3;
/// ONE ARGUMENT: int nMaxDepth
pub const UNQLITE_VM_CONFIG_RECURSION_DEPTH: c_int = 4;
/// ONE ARGUMENT: unsigned int *pLength
pub const UNQLITE_VM_OUTPUT_LENGTH: c_int = 5;
/// TWO ARGUMENTS: const char *zName, unqlite_value *pValue
pub const UNQLITE_VM_CONFIG_CREATE_VAR: c_int = 6;
/// TWO ARGUMENTS: const char *zRawRequest, int nRequestLength
pub const UNQLITE_VM_CONFIG_HTTP_REQUEST: c_int = 7;
/// THREE ARGUMENTS: const char *zKey, const char *zValue, int nLen
pub const UNQLITE_VM_CONFIG_SERVER_ATTR: c_int = 8;
/// THREE ARGUMENTS: const char *zKey, const char *zValue, int nLen
pub const UNQLITE_VM_CONFIG_ENV_ATTR: c_int = 9;
/// ONE ARGUMENT: unqlite_value **ppValue
pub const UNQLITE_VM_CONFIG_EXEC_VALUE: c_int = 10;
/// ONE ARGUMENT: const unqlite_io_stream *pStream
pub const UNQLITE_VM_CONFIG_IO_STREAM: c_int = 11;
/// ONE ARGUMENT: const char *zValue
pub const UNQLITE_VM_CONFIG_ARGV_ENTRY: c_int = 12;
/// TWO ARGUMENTS: const void **ppOut, unsigned int *pOutputLen
pub const UNQLITE_VM_CONFIG_EXTRACT_OUTPUT: c_int = 13;
// Storage engine configuration commands.
//
// The following set of constants are the available configuration verbs that can
// be used by the host-application to configure the underlying storage engine
// (i.e Hash, B+tree, R+tree).
//
// These constants must be passed as the first argument to [unqlite_kv_config()].
// Each options require a variable number of arguments.
// The [unqlite_kv_config()] interface will return UNQLITE_OK on success, any other return
// value indicates failure.
// For a full discussion on the configuration verbs and their expected parameters, please
// refer to this page:
// http://unqlite.org/c_api/unqlite_kv_config.html
//
/// ONE ARGUMENT: unsigned int (*xHash)(const void *,unsigned int)
pub const UNQLITE_KV_CONFIG_HASH_FUNC: c_int = 1;
/// ONE ARGUMENT: int (*xCmp)(const void *,const void *,unsigned int)
pub const UNQLITE_KV_CONFIG_CMP_FUNC: c_int = 2;
// Global Library Configuration Commands.
//
// The following set of constants are the available configuration verbs that can
// be used by the host-application to configure the whole library.
// These constants must be passed as the first argument to [unqlite_lib_config()].
//
// Each options require a variable number of arguments.
// The [unqlite_lib_config()] interface will return UNQLITE_OK on success, any other return
// value indicates failure.
// Notes:
// The default configuration is recommended for most applications and so the call to
// [unqlite_lib_config()] is usually not necessary. It is provided to support rare
// applications with unusual needs.
// The [unqlite_lib_config()] interface is not threadsafe. The application must insure that
// no other [unqlite_*()] interfaces are invoked by other threads while [unqlite_lib_config()]
// is running. Furthermore, [unqlite_lib_config()] may only be invoked prior to library
// initialization using [unqlite_lib_init()] or [unqlite_init()] or after shutdown
// by [unqlite_lib_shutdown()]. If [unqlite_lib_config()] is called after [unqlite_lib_init()]
// or [unqlite_init()] and before [unqlite_lib_shutdown()] then it will return UNQLITE_LOCKED.
// For a full discussion on the configuration verbs and their expected parameters, please
// refer to this page:
// http://unqlite.org/c_api/unqlite_lib.html
//
/// ONE ARGUMENT: const SyMemMethods *pMemMethods
pub const UNQLITE_LIB_CONFIG_USER_MALLOC: c_int = 1;
/// TWO ARGUMENTS: int (*xMemError)(void *), void *pUserData
pub const UNQLITE_LIB_CONFIG_MEM_ERR_CALLBACK: c_int = 2;
/// ONE ARGUMENT: const SyMutexMethods *pMutexMethods
pub const UNQLITE_LIB_CONFIG_USER_MUTEX: c_int = 3;
/// NO ARGUMENTS
pub const UNQLITE_LIB_CONFIG_THREAD_LEVEL_SINGLE: c_int = 4;
/// NO ARGUMENTS
pub const UNQLITE_LIB_CONFIG_THREAD_LEVEL_MULTI: c_int = 5;
/// ONE ARGUMENT: const unqlite_vfs *pVfs
pub const UNQLITE_LIB_CONFIG_VFS: c_int = 6;
/// ONE ARGUMENT: unqlite_kv_methods *pStorage
pub const UNQLITE_LIB_CONFIG_STORAGE_ENGINE: c_int = 7;
/// ONE ARGUMENT: int iPageSize
pub const UNQLITE_LIB_CONFIG_PAGE_SIZE: c_int = 8;
// These bit values are intended for use in the 3rd parameter to the [unqlite_open()] interface
// and in the 4th parameter to the xOpen method of the [unqlite_vfs] object.
//
/// Read only mode. Ok for [unqlite_open]
pub const UNQLITE_OPEN_READONLY: c_uint = 0x00000001;
/// Ok for [unqlite_open]
pub const UNQLITE_OPEN_READWRITE: c_uint = 0x00000002;
/// Ok for [unqlite_open]
pub const UNQLITE_OPEN_CREATE: c_uint = 0x00000004;
/// VFS only
pub const UNQLITE_OPEN_EXCLUSIVE: c_uint = 0x00000008;
/// VFS only
pub const UNQLITE_OPEN_TEMP_DB: c_uint = 0x00000010;
/// Ok for [unqlite_open]
pub const UNQLITE_OPEN_NOMUTEX: c_uint = 0x00000020;
/// Omit journaling for this database. Ok for [unqlite_open]
pub const UNQLITE_OPEN_OMIT_JOURNALING: c_uint = 0x00000040;
/// An in memory database. Ok for [unqlite_open]
pub const UNQLITE_OPEN_IN_MEMORY: c_uint = 0x00000080;
/// Obtain a memory view of the whole file. Ok for [unqlite_open]
pub const UNQLITE_OPEN_MMAP: c_uint = 0x00000100;
// Synchronization Type Flags
//
// When UnQLite invokes the xSync() method of an [unqlite_io_methods] object it uses
// a combination of these integer values as the second argument.
//
// When the UNQLITE_SYNC_DATAONLY flag is used, it means that the sync operation only
// needs to flush data to mass storage.: c_int = Inode information need not be flushed.
// If the lower four bits of the flag equal UNQLITE_SYNC_NORMAL, that means to use normal
// fsync() semantics. If the lower four bits equal UNQLITE_SYNC_FULL, that means to use
// Mac OS X style fullsync instead of fsync().
// | pub const UNQLITE_SYNC_NORMAL: c_int = 0x00002;
pub const UNQLITE_SYNC_FULL: c_int = 0x00003;
pub const UNQLITE_SYNC_DATAONLY: c_int = 0x00010;
// File Locking Levels
//
// UnQLite uses one of these integer values as the second
// argument to calls it makes to the xLock() and xUnlock() methods
// of an [unqlite_io_methods] object.
//
pub const UNQLITE_LOCK_NONE: c_int = 0;
pub const UNQLITE_LOCK_SHARED: c_int = 1;
pub const UNQLITE_LOCK_RESERVED: c_int = 2;
pub const UNQLITE_LOCK_PENDING: c_int = 3;
pub const UNQLITE_LOCK_EXCLUSIVE: c_int = 4;
// Flags for the xAccess VFS method
//
// These integer constants can be used as the third parameter to
// the xAccess method of an [unqlite_vfs] object. They determine
// what kind of permissions the xAccess method is looking for.
// With UNQLITE_ACCESS_EXISTS, the xAccess method
// simply checks whether the file exists.
// With UNQLITE_ACCESS_READWRITE, the xAccess method
// checks whether the named directory is both readable and writable
// (in other words, if files can be added, removed, and renamed within
// the directory).
// The UNQLITE_ACCESS_READWRITE constant is currently used only by the
// [temp_store_directory pragma], though this could change in a future
// release of UnQLite.
// With UNQLITE_ACCESS_READ, the xAccess method
// checks whether the file is readable. The UNQLITE_ACCESS_READ constant is
// currently unused, though it might be used in a future release of
// UnQLite.
//
pub const UNQLITE_ACCESS_EXISTS: c_int = 0;
pub const UNQLITE_ACCESS_READWRITE: c_int = 1;
pub const UNQLITE_ACCESS_READ: c_int = 2;
// Possible seek positions.
//
pub const UNQLITE_CURSOR_MATCH_EXACT: c_int = 1;
pub const UNQLITE_CURSOR_MATCH_LE: c_int = 2;
pub const UNQLITE_CURSOR_MATCH_GE: c_int = 3;
// UnQLite journal file suffix.
//
// #ifndef UNQLITE_JOURNAL_FILE_SUFFIX
pub const UNQLITE_JOURNAL_FILE_SUFFIX: &'static str = "_unqlite_journal";
// #endif
//
// Call Context - Error Message Serverity Level.
//
// The following constans are the allowed severity level that can
// passed as the second argument to the [unqlite_context_throw_error()] or
// [unqlite_context_throw_error_format()] interfaces.
// Refer to the official documentation for additional information.
//
/// Call context error such as unexpected number of arguments, invalid types and so on.
pub const UNQLITE_CTX_ERR: c_int = 1;
/// Call context Warning
pub const UNQLITE_CTX_WARNING: c_int = 2;
/// Call context Notice
pub const UNQLITE_CTX_NOTICE: c_int = 3; | random_line_split |
|
fetch.rs | use std::io::{self, Write};
use std::cmp::min;
use std::collections::{HashMap, HashSet, VecDeque};
use std::net::SocketAddr;
use std::str::from_utf8;
use std::sync::{Arc, Mutex};
use std::time::{Instant, Duration};
use std::u64;
use abstract_ns::Address;
use futures::{Sink, Async, Stream};
use futures::future::{Future, join_all, ok, FutureResult};
use tokio_core::net::TcpStream;
use tokio_core::reactor::Timeout;
use tk_easyloop::{handle, timeout_at};
use tk_http::{Version, Status};
use tk_http::client::{Proto, Config, Error, Codec};
use tk_http::client::{Encoder, EncoderDone, Head, RecvMode};
use url::Url;
use ns_router::Router;
#[cfg(feature="tls_native")] use native_tls::TlsConnector;
#[cfg(feature="tls_native")] use tokio_tls::TlsConnectorExt;
#[cfg(feature="tls_rustls")] use rustls::ClientConfig;
#[cfg(feature="tls_rustls")] use tokio_rustls::ClientConfigExt; | #[derive(Debug)]
struct State {
offset: u64,
eof: u32,
last_line: Vec<u8>,
last_request: Instant,
}
#[derive(Debug)]
struct Cursor {
url: Arc<Url>,
state: Option<State>,
}
struct Requests {
cursors: VecDeque<Arc<Mutex<Cursor>>>,
timeout: Timeout,
}
#[derive(Debug)]
pub struct Request {
cursor: Arc<Mutex<Cursor>>,
range: Option<(u64, u64, u64)>,
}
pub fn group_addrs(vec: Vec<(Address, Vec<Arc<Url>>)>)
-> HashMap<SocketAddr, Vec<Arc<Url>>>
{
let mut urls_by_ip = HashMap::new();
for (addr, urls) in vec {
for sa in addr.addresses_at(0) {
let set = urls_by_ip.entry(sa)
.or_insert_with(HashSet::new);
for url in &urls {
set.insert(url.clone());
}
}
}
let mut ordered = urls_by_ip.iter().collect::<Vec<_>>();
ordered.sort_by_key(|&(_, y)| y.len());
let mut active_ips = HashMap::new();
let mut visited_urls = HashSet::new();
for (ip, urls) in ordered {
let urls = urls.difference(&visited_urls).cloned().collect::<Vec<_>>();
if urls.len() == 0 {
continue;
}
visited_urls.extend(urls.iter().cloned());
active_ips.insert(*ip, urls);
}
return active_ips;
}
#[allow(dead_code)]
pub fn tls_host(host: &str) -> &str {
match host.find(':') {
Some(x) => &host[..x],
None => host,
}
}
pub fn http(resolver: &Router, urls_by_host: HashMap<String, Vec<Arc<Url>>>)
-> Box<Future<Item=(), Error=()>>
{
let resolver = resolver.clone();
let cfg = Config::new()
.keep_alive_timeout(Duration::new(25, 0))
.done();
return Box::new(
join_all(urls_by_host.into_iter().map(move |(host, list)| {
let h1 = host.clone();
resolver.resolve_auto(&host, 80).map(|ips| (ips, list))
.map_err(move |e| error!("Error resolving {:?}: {}", h1, e))
}))
.map(group_addrs)
.and_then(move |map| {
join_all(map.into_iter().map(move |(ip, urls)| {
let cfg = cfg.clone();
TcpStream::connect(&ip, &handle())
.map_err(move |e| {
error!("Error connecting to {}: {}", ip, e);
})
.and_then(move |sock| {
Proto::new(sock, &handle(), &cfg)
.send_all(Requests::new(urls))
.map(|_| unreachable!())
.map_err(move |e| {
error!("Error (ip: {}): {}", ip, e);
})
})
}))
.map(|_| ())
}));
}
#[cfg(not(any(feature="tls_native", feature="tls_rustls")))]
pub fn https(_resolver: &Router, urls_by_host: HashMap<String, Vec<Arc<Url>>>)
-> Box<Future<Item=(), Error=()>>
{
use futures::future::err;
if urls_by_host.len() > 0 {
eprintln!("Compiled without TLS support");
return Box::new(err(()));
}
return Box::new(ok(()));
}
#[cfg(feature="tls_native")]
pub fn https(resolver: &Router, urls_by_host: HashMap<String, Vec<Arc<Url>>>)
-> Box<Future<Item=(), Error=()>>
{
use std::io;
if urls_by_host.len() == 0 {
return Box::new(ok(()));
}
let resolver = resolver.clone();
let cfg = Config::new().done();
let cx = TlsConnector::builder().expect("tls builder can be created works")
.build().expect("tls builder works");
return Box::new(
join_all(urls_by_host.into_iter().map(move |(host, list)| {
let h1 = host.clone();
resolver.resolve_auto(&host, 80).map(|addr| (host, addr, list))
.map_err(move |e| error!("Error resolving {:?}: {}", h1, e))
}))
.and_then(move |map| {
join_all(map.into_iter().map(move |(host, addr, urls)| {
let ip = addr.pick_one().expect("no IPs");
let cfg = cfg.clone();
let cx = cx.clone();
TcpStream::connect(&ip, &handle())
.and_then(move |sock| {
cx.connect_async(tls_host(&host), sock).map_err(|e| {
io::Error::new(io::ErrorKind::Other, e)
})
})
.map_err(move |e| {
error!("Error connecting to {}: {}", ip, e);
})
.and_then(move |sock| {
Proto::new(sock, &handle(), &cfg)
.send_all(Requests::new(urls))
.map(|_| unreachable!())
.map_err(move |e| {
error!("Error (ip: {}): {}", ip, e);
})
})
}))
.map(|_| ())
}));
}
#[cfg(feature="tls_rustls")]
pub fn https(resolver: &Router, urls_by_host: HashMap<String, Vec<Arc<Url>>>)
-> Box<Future<Item=(), Error=()>>
{
use std::io::BufReader;
use std::fs::File;
if urls_by_host.len() == 0 {
return Box::new(ok(()));
}
let resolver = resolver.clone();
let tls = Arc::new({
let mut cfg = ClientConfig::new();
let read_root = File::open("/etc/ssl/certs/ca-certificates.crt")
.map_err(|e| format!("{}", e))
.and_then(|f|
cfg.root_store.add_pem_file(&mut BufReader::new(f))
.map_err(|()| format!("unrecognized format")));
match read_root {
Ok((_, _)) => {} // TODO(tailhook) log numbers
Err(e) => {
warn!("Can find root certificates at {:?}: {}. \
Using embedded ones.",
"/etc/ssl/certs/ca-certificates.crt", e);
}
}
cfg.root_store.add_server_trust_anchors(
&webpki_roots::TLS_SERVER_ROOTS);
cfg
});
let cfg = Config::new().done();
return Box::new(
join_all(urls_by_host.into_iter().map(move |(host, list)| {
resolver.resolve_auto(&host, 80).map(|addr| (host, addr, list))
}))
.map_err(|e| error!("Error resolving: {}", e))
.and_then(move |map| {
join_all(map.into_iter().map(move |(host, addr, urls)| {
let ip = addr.pick_one().expect("no ips");
let cfg = cfg.clone();
let tls = tls.clone();
TcpStream::connect(&ip, &handle())
.and_then(move |sock| {
tls.connect_async(tls_host(&host), sock)
})
.map_err(move |e| {
error!("Error connecting to {}: {}", ip, e);
})
.and_then(move |sock| {
Proto::new(sock, &handle(), &cfg)
.send_all(Requests::new(urls))
.map(|_| unreachable!())
.map_err(move |e| {
error!("Error (ip: {}): {}", ip, e);
})
})
}))
.map(|_| ())
}));
}
fn request(cur: &Arc<Mutex<Cursor>>) -> Result<Request, Instant> {
let intr = cur.lock().unwrap();
match intr.state {
None => return Ok(Request {
cursor: cur.clone(),
range: None,
}),
Some(ref state) => {
if state.eof == 0 {
return Ok(Request {
cursor: cur.clone(),
range: None,
});
}
let next = state.last_request +
Duration::from_millis(100 * min(state.eof as u64, 70));
if next < Instant::now() {
return Ok(Request {
cursor: cur.clone(),
range: None,
});
}
return Err(next);
}
}
}
impl Requests {
fn new(urls: Vec<Arc<Url>>) -> Requests {
Requests {
cursors: urls.into_iter().map(|u| Arc::new(Mutex::new(Cursor {
url: u,
state: None,
}))).collect(),
timeout: timeout_at(Instant::now()),
}
}
}
impl Stream for Requests {
type Item = Request;
type Error = Error;
fn poll(&mut self) -> Result<Async<Option<Request>>, Error> {
loop {
match self.timeout.poll().unwrap() {
Async::Ready(()) => {}
Async::NotReady => return Ok(Async::NotReady),
}
let mut min_time = Instant::now() + Duration::new(7, 0);
for _ in 0..self.cursors.len() {
let cur = self.cursors.pop_front().unwrap();
let req = request(&cur);
self.cursors.push_back(cur);
match req {
Ok(req) => return Ok(Async::Ready(Some(req))),
Err(time) if min_time > time => min_time = time,
Err(_) => {}
}
}
self.timeout = timeout_at(min_time);
}
}
}
impl<S> Codec<S> for Request {
type Future = FutureResult<EncoderDone<S>, Error>;
fn start_write(&mut self, mut e: Encoder<S>) -> Self::Future {
let cur = self.cursor.lock().unwrap();
e.request_line("GET", cur.url.path(), Version::Http11);
cur.url.host_str().map(|x| {
e.add_header("Host", x).unwrap();
});
match cur.state {
Some(State { offset,.. }) => {
e.format_header("Range",
format_args!("bytes={}-{}",
offset-1, offset+65535)).unwrap();
}
None => {
e.add_header("Range", "bytes=-4096").unwrap();
}
}
e.done_headers().unwrap();
ok(e.done())
}
fn headers_received(&mut self, headers: &Head) -> Result<RecvMode, Error> {
let status = headers.status();
// TODO(tailhook) better error
if status!= Some(Status::PartialContent) {
return Err(Error::custom(
format!("Server returned invalid status: {:?}", status)));
}
for (name, value) in headers.headers() {
if name == "Content-Range" {
let str_value = from_utf8(value)
.expect("valid content-range header");
if!str_value.starts_with("bytes ") {
panic!("invalid content-range header");
}
let slash = str_value.find("/")
.expect("valid content-range header");
let dash = str_value[..slash].find("-")
.expect("valid content-range header");
let from = str_value[6..dash].parse::<u64>()
.expect("valid content-range header");
let mut to = str_value[dash+1..slash].parse::<u64>()
.expect("valid content-range header");
let total = str_value[slash+1..].parse::<u64>()
.expect("valid content-range header");
// bug in cantal :(
if to == u64::MAX {
to = 0;
}
self.range = Some((from, to, total));
}
}
Ok(RecvMode::buffered(65536))
}
fn data_received(&mut self, data: &[u8], end: bool)
-> Result<Async<usize>, Error>
{
assert!(end);
let consumed = data.len();
let (from, to, total) = self.range.unwrap();
let mut cur = self.cursor.lock().unwrap();
let (pos, eof, mut last_line) = match cur.state.take() {
Some(state) => (Some(state.offset), state.eof, state.last_line),
None => (None, 0, b"".to_vec()),
};
let data = if pos.is_some() {
if pos!= Some(from+1) {
last_line.clear();
println!("[.. skipped..]");
&data
} else if data.len() > 0 {
&data[1..]
} else {
&data
}
} else {
&data
};
let (last_line, end) = match data.iter().rposition(|&x| x == b'\n') {
Some(end) => (data[end+1..].to_vec(), end+1),
None => ({last_line.extend(data); last_line}, 0)
};
cur.state = Some(State {
eof: if to+1 == total {
if data.len() > 0 { 1 } else { eof.saturating_add(1) }
} else { 0 },
offset: to+1,
last_line: last_line,
last_request: Instant::now(),
});
io::stdout().write_all(&data[..end]).unwrap();
io::stdout().flush().unwrap();
Ok(Async::Ready(consumed))
}
} | #[cfg(feature="tls_rustls")] use webpki_roots;
| random_line_split |
fetch.rs | use std::io::{self, Write};
use std::cmp::min;
use std::collections::{HashMap, HashSet, VecDeque};
use std::net::SocketAddr;
use std::str::from_utf8;
use std::sync::{Arc, Mutex};
use std::time::{Instant, Duration};
use std::u64;
use abstract_ns::Address;
use futures::{Sink, Async, Stream};
use futures::future::{Future, join_all, ok, FutureResult};
use tokio_core::net::TcpStream;
use tokio_core::reactor::Timeout;
use tk_easyloop::{handle, timeout_at};
use tk_http::{Version, Status};
use tk_http::client::{Proto, Config, Error, Codec};
use tk_http::client::{Encoder, EncoderDone, Head, RecvMode};
use url::Url;
use ns_router::Router;
#[cfg(feature="tls_native")] use native_tls::TlsConnector;
#[cfg(feature="tls_native")] use tokio_tls::TlsConnectorExt;
#[cfg(feature="tls_rustls")] use rustls::ClientConfig;
#[cfg(feature="tls_rustls")] use tokio_rustls::ClientConfigExt;
#[cfg(feature="tls_rustls")] use webpki_roots;
#[derive(Debug)]
struct State {
offset: u64,
eof: u32,
last_line: Vec<u8>,
last_request: Instant,
}
#[derive(Debug)]
struct Cursor {
url: Arc<Url>,
state: Option<State>,
}
struct Requests {
cursors: VecDeque<Arc<Mutex<Cursor>>>,
timeout: Timeout,
}
#[derive(Debug)]
pub struct Request {
cursor: Arc<Mutex<Cursor>>,
range: Option<(u64, u64, u64)>,
}
pub fn group_addrs(vec: Vec<(Address, Vec<Arc<Url>>)>)
-> HashMap<SocketAddr, Vec<Arc<Url>>>
{
let mut urls_by_ip = HashMap::new();
for (addr, urls) in vec {
for sa in addr.addresses_at(0) {
let set = urls_by_ip.entry(sa)
.or_insert_with(HashSet::new);
for url in &urls {
set.insert(url.clone());
}
}
}
let mut ordered = urls_by_ip.iter().collect::<Vec<_>>();
ordered.sort_by_key(|&(_, y)| y.len());
let mut active_ips = HashMap::new();
let mut visited_urls = HashSet::new();
for (ip, urls) in ordered {
let urls = urls.difference(&visited_urls).cloned().collect::<Vec<_>>();
if urls.len() == 0 {
continue;
}
visited_urls.extend(urls.iter().cloned());
active_ips.insert(*ip, urls);
}
return active_ips;
}
#[allow(dead_code)]
pub fn tls_host(host: &str) -> &str {
match host.find(':') {
Some(x) => &host[..x],
None => host,
}
}
pub fn http(resolver: &Router, urls_by_host: HashMap<String, Vec<Arc<Url>>>)
-> Box<Future<Item=(), Error=()>>
{
let resolver = resolver.clone();
let cfg = Config::new()
.keep_alive_timeout(Duration::new(25, 0))
.done();
return Box::new(
join_all(urls_by_host.into_iter().map(move |(host, list)| {
let h1 = host.clone();
resolver.resolve_auto(&host, 80).map(|ips| (ips, list))
.map_err(move |e| error!("Error resolving {:?}: {}", h1, e))
}))
.map(group_addrs)
.and_then(move |map| {
join_all(map.into_iter().map(move |(ip, urls)| {
let cfg = cfg.clone();
TcpStream::connect(&ip, &handle())
.map_err(move |e| {
error!("Error connecting to {}: {}", ip, e);
})
.and_then(move |sock| {
Proto::new(sock, &handle(), &cfg)
.send_all(Requests::new(urls))
.map(|_| unreachable!())
.map_err(move |e| {
error!("Error (ip: {}): {}", ip, e);
})
})
}))
.map(|_| ())
}));
}
#[cfg(not(any(feature="tls_native", feature="tls_rustls")))]
pub fn https(_resolver: &Router, urls_by_host: HashMap<String, Vec<Arc<Url>>>)
-> Box<Future<Item=(), Error=()>>
{
use futures::future::err;
if urls_by_host.len() > 0 {
eprintln!("Compiled without TLS support");
return Box::new(err(()));
}
return Box::new(ok(()));
}
#[cfg(feature="tls_native")]
pub fn https(resolver: &Router, urls_by_host: HashMap<String, Vec<Arc<Url>>>)
-> Box<Future<Item=(), Error=()>>
{
use std::io;
if urls_by_host.len() == 0 {
return Box::new(ok(()));
}
let resolver = resolver.clone();
let cfg = Config::new().done();
let cx = TlsConnector::builder().expect("tls builder can be created works")
.build().expect("tls builder works");
return Box::new(
join_all(urls_by_host.into_iter().map(move |(host, list)| {
let h1 = host.clone();
resolver.resolve_auto(&host, 80).map(|addr| (host, addr, list))
.map_err(move |e| error!("Error resolving {:?}: {}", h1, e))
}))
.and_then(move |map| {
join_all(map.into_iter().map(move |(host, addr, urls)| {
let ip = addr.pick_one().expect("no IPs");
let cfg = cfg.clone();
let cx = cx.clone();
TcpStream::connect(&ip, &handle())
.and_then(move |sock| {
cx.connect_async(tls_host(&host), sock).map_err(|e| {
io::Error::new(io::ErrorKind::Other, e)
})
})
.map_err(move |e| {
error!("Error connecting to {}: {}", ip, e);
})
.and_then(move |sock| {
Proto::new(sock, &handle(), &cfg)
.send_all(Requests::new(urls))
.map(|_| unreachable!())
.map_err(move |e| {
error!("Error (ip: {}): {}", ip, e);
})
})
}))
.map(|_| ())
}));
}
#[cfg(feature="tls_rustls")]
pub fn https(resolver: &Router, urls_by_host: HashMap<String, Vec<Arc<Url>>>)
-> Box<Future<Item=(), Error=()>>
| "/etc/ssl/certs/ca-certificates.crt", e);
}
}
cfg.root_store.add_server_trust_anchors(
&webpki_roots::TLS_SERVER_ROOTS);
cfg
});
let cfg = Config::new().done();
return Box::new(
join_all(urls_by_host.into_iter().map(move |(host, list)| {
resolver.resolve_auto(&host, 80).map(|addr| (host, addr, list))
}))
.map_err(|e| error!("Error resolving: {}", e))
.and_then(move |map| {
join_all(map.into_iter().map(move |(host, addr, urls)| {
let ip = addr.pick_one().expect("no ips");
let cfg = cfg.clone();
let tls = tls.clone();
TcpStream::connect(&ip, &handle())
.and_then(move |sock| {
tls.connect_async(tls_host(&host), sock)
})
.map_err(move |e| {
error!("Error connecting to {}: {}", ip, e);
})
.and_then(move |sock| {
Proto::new(sock, &handle(), &cfg)
.send_all(Requests::new(urls))
.map(|_| unreachable!())
.map_err(move |e| {
error!("Error (ip: {}): {}", ip, e);
})
})
}))
.map(|_| ())
}));
}
fn request(cur: &Arc<Mutex<Cursor>>) -> Result<Request, Instant> {
let intr = cur.lock().unwrap();
match intr.state {
None => return Ok(Request {
cursor: cur.clone(),
range: None,
}),
Some(ref state) => {
if state.eof == 0 {
return Ok(Request {
cursor: cur.clone(),
range: None,
});
}
let next = state.last_request +
Duration::from_millis(100 * min(state.eof as u64, 70));
if next < Instant::now() {
return Ok(Request {
cursor: cur.clone(),
range: None,
});
}
return Err(next);
}
}
}
impl Requests {
fn new(urls: Vec<Arc<Url>>) -> Requests {
Requests {
cursors: urls.into_iter().map(|u| Arc::new(Mutex::new(Cursor {
url: u,
state: None,
}))).collect(),
timeout: timeout_at(Instant::now()),
}
}
}
impl Stream for Requests {
type Item = Request;
type Error = Error;
fn poll(&mut self) -> Result<Async<Option<Request>>, Error> {
loop {
match self.timeout.poll().unwrap() {
Async::Ready(()) => {}
Async::NotReady => return Ok(Async::NotReady),
}
let mut min_time = Instant::now() + Duration::new(7, 0);
for _ in 0..self.cursors.len() {
let cur = self.cursors.pop_front().unwrap();
let req = request(&cur);
self.cursors.push_back(cur);
match req {
Ok(req) => return Ok(Async::Ready(Some(req))),
Err(time) if min_time > time => min_time = time,
Err(_) => {}
}
}
self.timeout = timeout_at(min_time);
}
}
}
impl<S> Codec<S> for Request {
type Future = FutureResult<EncoderDone<S>, Error>;
fn start_write(&mut self, mut e: Encoder<S>) -> Self::Future {
let cur = self.cursor.lock().unwrap();
e.request_line("GET", cur.url.path(), Version::Http11);
cur.url.host_str().map(|x| {
e.add_header("Host", x).unwrap();
});
match cur.state {
Some(State { offset,.. }) => {
e.format_header("Range",
format_args!("bytes={}-{}",
offset-1, offset+65535)).unwrap();
}
None => {
e.add_header("Range", "bytes=-4096").unwrap();
}
}
e.done_headers().unwrap();
ok(e.done())
}
fn headers_received(&mut self, headers: &Head) -> Result<RecvMode, Error> {
let status = headers.status();
// TODO(tailhook) better error
if status!= Some(Status::PartialContent) {
return Err(Error::custom(
format!("Server returned invalid status: {:?}", status)));
}
for (name, value) in headers.headers() {
if name == "Content-Range" {
let str_value = from_utf8(value)
.expect("valid content-range header");
if!str_value.starts_with("bytes ") {
panic!("invalid content-range header");
}
let slash = str_value.find("/")
.expect("valid content-range header");
let dash = str_value[..slash].find("-")
.expect("valid content-range header");
let from = str_value[6..dash].parse::<u64>()
.expect("valid content-range header");
let mut to = str_value[dash+1..slash].parse::<u64>()
.expect("valid content-range header");
let total = str_value[slash+1..].parse::<u64>()
.expect("valid content-range header");
// bug in cantal :(
if to == u64::MAX {
to = 0;
}
self.range = Some((from, to, total));
}
}
Ok(RecvMode::buffered(65536))
}
fn data_received(&mut self, data: &[u8], end: bool)
-> Result<Async<usize>, Error>
{
assert!(end);
let consumed = data.len();
let (from, to, total) = self.range.unwrap();
let mut cur = self.cursor.lock().unwrap();
let (pos, eof, mut last_line) = match cur.state.take() {
Some(state) => (Some(state.offset), state.eof, state.last_line),
None => (None, 0, b"".to_vec()),
};
let data = if pos.is_some() {
if pos!= Some(from+1) {
last_line.clear();
println!("[.. skipped..]");
&data
} else if data.len() > 0 {
&data[1..]
} else {
&data
}
} else {
&data
};
let (last_line, end) = match data.iter().rposition(|&x| x == b'\n') {
Some(end) => (data[end+1..].to_vec(), end+1),
None => ({last_line.extend(data); last_line}, 0)
};
cur.state = Some(State {
eof: if to+1 == total {
if data.len() > 0 { 1 } else { eof.saturating_add(1) }
} else { 0 },
offset: to+1,
last_line: last_line,
last_request: Instant::now(),
});
io::stdout().write_all(&data[..end]).unwrap();
io::stdout().flush().unwrap();
Ok(Async::Ready(consumed))
}
}
| {
use std::io::BufReader;
use std::fs::File;
if urls_by_host.len() == 0 {
return Box::new(ok(()));
}
let resolver = resolver.clone();
let tls = Arc::new({
let mut cfg = ClientConfig::new();
let read_root = File::open("/etc/ssl/certs/ca-certificates.crt")
.map_err(|e| format!("{}", e))
.and_then(|f|
cfg.root_store.add_pem_file(&mut BufReader::new(f))
.map_err(|()| format!("unrecognized format")));
match read_root {
Ok((_, _)) => {} // TODO(tailhook) log numbers
Err(e) => {
warn!("Can find root certificates at {:?}: {}. \
Using embedded ones.", | identifier_body |
fetch.rs | use std::io::{self, Write};
use std::cmp::min;
use std::collections::{HashMap, HashSet, VecDeque};
use std::net::SocketAddr;
use std::str::from_utf8;
use std::sync::{Arc, Mutex};
use std::time::{Instant, Duration};
use std::u64;
use abstract_ns::Address;
use futures::{Sink, Async, Stream};
use futures::future::{Future, join_all, ok, FutureResult};
use tokio_core::net::TcpStream;
use tokio_core::reactor::Timeout;
use tk_easyloop::{handle, timeout_at};
use tk_http::{Version, Status};
use tk_http::client::{Proto, Config, Error, Codec};
use tk_http::client::{Encoder, EncoderDone, Head, RecvMode};
use url::Url;
use ns_router::Router;
#[cfg(feature="tls_native")] use native_tls::TlsConnector;
#[cfg(feature="tls_native")] use tokio_tls::TlsConnectorExt;
#[cfg(feature="tls_rustls")] use rustls::ClientConfig;
#[cfg(feature="tls_rustls")] use tokio_rustls::ClientConfigExt;
#[cfg(feature="tls_rustls")] use webpki_roots;
#[derive(Debug)]
struct State {
offset: u64,
eof: u32,
last_line: Vec<u8>,
last_request: Instant,
}
#[derive(Debug)]
struct Cursor {
url: Arc<Url>,
state: Option<State>,
}
struct Requests {
cursors: VecDeque<Arc<Mutex<Cursor>>>,
timeout: Timeout,
}
#[derive(Debug)]
pub struct Request {
cursor: Arc<Mutex<Cursor>>,
range: Option<(u64, u64, u64)>,
}
pub fn group_addrs(vec: Vec<(Address, Vec<Arc<Url>>)>)
-> HashMap<SocketAddr, Vec<Arc<Url>>>
{
let mut urls_by_ip = HashMap::new();
for (addr, urls) in vec {
for sa in addr.addresses_at(0) {
let set = urls_by_ip.entry(sa)
.or_insert_with(HashSet::new);
for url in &urls {
set.insert(url.clone());
}
}
}
let mut ordered = urls_by_ip.iter().collect::<Vec<_>>();
ordered.sort_by_key(|&(_, y)| y.len());
let mut active_ips = HashMap::new();
let mut visited_urls = HashSet::new();
for (ip, urls) in ordered {
let urls = urls.difference(&visited_urls).cloned().collect::<Vec<_>>();
if urls.len() == 0 {
continue;
}
visited_urls.extend(urls.iter().cloned());
active_ips.insert(*ip, urls);
}
return active_ips;
}
#[allow(dead_code)]
pub fn tls_host(host: &str) -> &str {
match host.find(':') {
Some(x) => &host[..x],
None => host,
}
}
pub fn | (resolver: &Router, urls_by_host: HashMap<String, Vec<Arc<Url>>>)
-> Box<Future<Item=(), Error=()>>
{
let resolver = resolver.clone();
let cfg = Config::new()
.keep_alive_timeout(Duration::new(25, 0))
.done();
return Box::new(
join_all(urls_by_host.into_iter().map(move |(host, list)| {
let h1 = host.clone();
resolver.resolve_auto(&host, 80).map(|ips| (ips, list))
.map_err(move |e| error!("Error resolving {:?}: {}", h1, e))
}))
.map(group_addrs)
.and_then(move |map| {
join_all(map.into_iter().map(move |(ip, urls)| {
let cfg = cfg.clone();
TcpStream::connect(&ip, &handle())
.map_err(move |e| {
error!("Error connecting to {}: {}", ip, e);
})
.and_then(move |sock| {
Proto::new(sock, &handle(), &cfg)
.send_all(Requests::new(urls))
.map(|_| unreachable!())
.map_err(move |e| {
error!("Error (ip: {}): {}", ip, e);
})
})
}))
.map(|_| ())
}));
}
#[cfg(not(any(feature="tls_native", feature="tls_rustls")))]
pub fn https(_resolver: &Router, urls_by_host: HashMap<String, Vec<Arc<Url>>>)
-> Box<Future<Item=(), Error=()>>
{
use futures::future::err;
if urls_by_host.len() > 0 {
eprintln!("Compiled without TLS support");
return Box::new(err(()));
}
return Box::new(ok(()));
}
#[cfg(feature="tls_native")]
pub fn https(resolver: &Router, urls_by_host: HashMap<String, Vec<Arc<Url>>>)
-> Box<Future<Item=(), Error=()>>
{
use std::io;
if urls_by_host.len() == 0 {
return Box::new(ok(()));
}
let resolver = resolver.clone();
let cfg = Config::new().done();
let cx = TlsConnector::builder().expect("tls builder can be created works")
.build().expect("tls builder works");
return Box::new(
join_all(urls_by_host.into_iter().map(move |(host, list)| {
let h1 = host.clone();
resolver.resolve_auto(&host, 80).map(|addr| (host, addr, list))
.map_err(move |e| error!("Error resolving {:?}: {}", h1, e))
}))
.and_then(move |map| {
join_all(map.into_iter().map(move |(host, addr, urls)| {
let ip = addr.pick_one().expect("no IPs");
let cfg = cfg.clone();
let cx = cx.clone();
TcpStream::connect(&ip, &handle())
.and_then(move |sock| {
cx.connect_async(tls_host(&host), sock).map_err(|e| {
io::Error::new(io::ErrorKind::Other, e)
})
})
.map_err(move |e| {
error!("Error connecting to {}: {}", ip, e);
})
.and_then(move |sock| {
Proto::new(sock, &handle(), &cfg)
.send_all(Requests::new(urls))
.map(|_| unreachable!())
.map_err(move |e| {
error!("Error (ip: {}): {}", ip, e);
})
})
}))
.map(|_| ())
}));
}
#[cfg(feature="tls_rustls")]
pub fn https(resolver: &Router, urls_by_host: HashMap<String, Vec<Arc<Url>>>)
-> Box<Future<Item=(), Error=()>>
{
use std::io::BufReader;
use std::fs::File;
if urls_by_host.len() == 0 {
return Box::new(ok(()));
}
let resolver = resolver.clone();
let tls = Arc::new({
let mut cfg = ClientConfig::new();
let read_root = File::open("/etc/ssl/certs/ca-certificates.crt")
.map_err(|e| format!("{}", e))
.and_then(|f|
cfg.root_store.add_pem_file(&mut BufReader::new(f))
.map_err(|()| format!("unrecognized format")));
match read_root {
Ok((_, _)) => {} // TODO(tailhook) log numbers
Err(e) => {
warn!("Can find root certificates at {:?}: {}. \
Using embedded ones.",
"/etc/ssl/certs/ca-certificates.crt", e);
}
}
cfg.root_store.add_server_trust_anchors(
&webpki_roots::TLS_SERVER_ROOTS);
cfg
});
let cfg = Config::new().done();
return Box::new(
join_all(urls_by_host.into_iter().map(move |(host, list)| {
resolver.resolve_auto(&host, 80).map(|addr| (host, addr, list))
}))
.map_err(|e| error!("Error resolving: {}", e))
.and_then(move |map| {
join_all(map.into_iter().map(move |(host, addr, urls)| {
let ip = addr.pick_one().expect("no ips");
let cfg = cfg.clone();
let tls = tls.clone();
TcpStream::connect(&ip, &handle())
.and_then(move |sock| {
tls.connect_async(tls_host(&host), sock)
})
.map_err(move |e| {
error!("Error connecting to {}: {}", ip, e);
})
.and_then(move |sock| {
Proto::new(sock, &handle(), &cfg)
.send_all(Requests::new(urls))
.map(|_| unreachable!())
.map_err(move |e| {
error!("Error (ip: {}): {}", ip, e);
})
})
}))
.map(|_| ())
}));
}
fn request(cur: &Arc<Mutex<Cursor>>) -> Result<Request, Instant> {
let intr = cur.lock().unwrap();
match intr.state {
None => return Ok(Request {
cursor: cur.clone(),
range: None,
}),
Some(ref state) => {
if state.eof == 0 {
return Ok(Request {
cursor: cur.clone(),
range: None,
});
}
let next = state.last_request +
Duration::from_millis(100 * min(state.eof as u64, 70));
if next < Instant::now() {
return Ok(Request {
cursor: cur.clone(),
range: None,
});
}
return Err(next);
}
}
}
impl Requests {
fn new(urls: Vec<Arc<Url>>) -> Requests {
Requests {
cursors: urls.into_iter().map(|u| Arc::new(Mutex::new(Cursor {
url: u,
state: None,
}))).collect(),
timeout: timeout_at(Instant::now()),
}
}
}
impl Stream for Requests {
type Item = Request;
type Error = Error;
fn poll(&mut self) -> Result<Async<Option<Request>>, Error> {
loop {
match self.timeout.poll().unwrap() {
Async::Ready(()) => {}
Async::NotReady => return Ok(Async::NotReady),
}
let mut min_time = Instant::now() + Duration::new(7, 0);
for _ in 0..self.cursors.len() {
let cur = self.cursors.pop_front().unwrap();
let req = request(&cur);
self.cursors.push_back(cur);
match req {
Ok(req) => return Ok(Async::Ready(Some(req))),
Err(time) if min_time > time => min_time = time,
Err(_) => {}
}
}
self.timeout = timeout_at(min_time);
}
}
}
impl<S> Codec<S> for Request {
type Future = FutureResult<EncoderDone<S>, Error>;
fn start_write(&mut self, mut e: Encoder<S>) -> Self::Future {
let cur = self.cursor.lock().unwrap();
e.request_line("GET", cur.url.path(), Version::Http11);
cur.url.host_str().map(|x| {
e.add_header("Host", x).unwrap();
});
match cur.state {
Some(State { offset,.. }) => {
e.format_header("Range",
format_args!("bytes={}-{}",
offset-1, offset+65535)).unwrap();
}
None => {
e.add_header("Range", "bytes=-4096").unwrap();
}
}
e.done_headers().unwrap();
ok(e.done())
}
fn headers_received(&mut self, headers: &Head) -> Result<RecvMode, Error> {
let status = headers.status();
// TODO(tailhook) better error
if status!= Some(Status::PartialContent) {
return Err(Error::custom(
format!("Server returned invalid status: {:?}", status)));
}
for (name, value) in headers.headers() {
if name == "Content-Range" {
let str_value = from_utf8(value)
.expect("valid content-range header");
if!str_value.starts_with("bytes ") {
panic!("invalid content-range header");
}
let slash = str_value.find("/")
.expect("valid content-range header");
let dash = str_value[..slash].find("-")
.expect("valid content-range header");
let from = str_value[6..dash].parse::<u64>()
.expect("valid content-range header");
let mut to = str_value[dash+1..slash].parse::<u64>()
.expect("valid content-range header");
let total = str_value[slash+1..].parse::<u64>()
.expect("valid content-range header");
// bug in cantal :(
if to == u64::MAX {
to = 0;
}
self.range = Some((from, to, total));
}
}
Ok(RecvMode::buffered(65536))
}
fn data_received(&mut self, data: &[u8], end: bool)
-> Result<Async<usize>, Error>
{
assert!(end);
let consumed = data.len();
let (from, to, total) = self.range.unwrap();
let mut cur = self.cursor.lock().unwrap();
let (pos, eof, mut last_line) = match cur.state.take() {
Some(state) => (Some(state.offset), state.eof, state.last_line),
None => (None, 0, b"".to_vec()),
};
let data = if pos.is_some() {
if pos!= Some(from+1) {
last_line.clear();
println!("[.. skipped..]");
&data
} else if data.len() > 0 {
&data[1..]
} else {
&data
}
} else {
&data
};
let (last_line, end) = match data.iter().rposition(|&x| x == b'\n') {
Some(end) => (data[end+1..].to_vec(), end+1),
None => ({last_line.extend(data); last_line}, 0)
};
cur.state = Some(State {
eof: if to+1 == total {
if data.len() > 0 { 1 } else { eof.saturating_add(1) }
} else { 0 },
offset: to+1,
last_line: last_line,
last_request: Instant::now(),
});
io::stdout().write_all(&data[..end]).unwrap();
io::stdout().flush().unwrap();
Ok(Async::Ready(consumed))
}
}
| http | identifier_name |
time.rs | //!Constants and structures from time classes
//!
//! This includes include/uapi/linux/time.h, //include/linux/time.h, and /include/linux/time64.h
///A structure that contains the number of seconds and nanoseconds since an epoch.
///
///If in doubt, assume we're talking about the UNIX epoch.
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub struct timespec {
///The number of seconds contained in this timespec
pub tv_sec: ::time_t,
///The number of nanoseconds contained in this timespec
pub tv_nsec: ::c_long
}
impl timespec {
///Creates a new timespec with both values defaulting to zero
pub fn new() -> timespec {
timespec { tv_sec: 0, tv_nsec: 0 }
}
///Creates a new timespec from the specified number of seconds
pub fn from_seconds(seconds: i64) -> timespec {
timespec { tv_sec: seconds, tv_nsec: 0 }
}
///Gets a representation of this timespec as a number of milliseconds
pub fn to_milliseconds(&self) -> i64 {
(self.tv_sec as i64 * MSEC_PER_SEC) + (self.tv_nsec as i64 / NSEC_PER_MSEC)
}
///Gets a representation of this timespec as a number of seconds
pub fn to_seconds(&self) -> i64 {
self.to_milliseconds() / MSEC_PER_SEC
}
///Clears this timespec, setting each value to zero
pub fn clear(&mut self) {
self.tv_sec = 0;
self.tv_nsec = 0;
}
}
///A structure that contains the number of seconds and microseconds since an epoch.
///
///If in doubt, assume we're talking about the UNIX epoch.
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub struct timeval {
///The number of seconds contained in this timeval
pub tv_sec: ::time_t,
///The number of microseconds contained in this timeval
pub tv_usec: ::suseconds_t
}
impl timeval {
///Creates a new timeval with both values defaulting to zero
pub fn new() -> timeval {
timeval { tv_sec: 0, tv_usec: 0 }
}
///Creates a new timeval from the specified number of seconds
pub fn from_seconds(seconds: ::time_t) -> timeval {
timeval { tv_sec: seconds, tv_usec: 0 }
}
///Gets a representation of this timeval as a number of milliseconds
pub fn to_milliseconds(&self) -> i64 {
(self.tv_sec as i64 * MSEC_PER_SEC) + (self.tv_usec as i64 / USEC_PER_MSEC)
}
///Gets a representation of this timeval as a number of seconds
pub fn to_seconds(&self) -> i64 {
self.to_milliseconds() / MSEC_PER_SEC
}
///Clears this timeval, setting each value to zero
pub fn clear(&mut self) {
self.tv_sec = 0;
self.tv_usec = 0;
}
}
///A structure containing information on the time-based location of a timezone
///
///Please note that this does not include the name or country code, only the minutes west of Greenwich and the type of DST correction
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub struct timezone {
///The number of minutes west of Greenwich
pub tz_minuteswest: ::c_int,
///The type of Daylight Savings Time correction
pub tz_dsttime: ::c_int
}
//Names of the interval timers
///An interval timer that decrements in real time
///
///On expiration, a SIGALRM is delivered
pub const ITIMER_REAL: ::c_int = 0;
///An interval timer that decrements only when the process is executing.
///
///On expiration, a SIGVTALRM is delivered
pub const ITIMER_VIRTUAL: ::c_int = 1;
///Decrements both while the process is executing and while the system is executing on behalf of the process
///
///This is usually used to profile kernel-space and user-space concurrently.
///
///If coupled with ITIMER_VIRTUAL, you can separate the two values - What is left when ITIMER_VIRTUAL's value is removed is kernel time
pub const ITIMER_PROF: ::c_int = 2;
///An interval timer based on a `timespec`
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub struct itimerspec {
///The period of time this timer should run for (Need to verify)
pub it_interval: timespec,
///The amount of time left until expiration (Need to verify)
pub it_value: timespec
}
///An interval timer based on a `timeval`
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub struct itimerval { |
///The amount of time left until expiration (Need to verify)
pub it_value: timeval
}
///A system-wide clock that measures time from the "real world"
///
///This clock **is** affected by discontinuous jumps in system time, NTP, and user changes
pub const CLOCK_REALTIME: ::clockid_t = 0;
///A clock that measures monotonic time since an unspecified starting point
///
///Unless you manage to break your system, this unspecified point is usually when your computer powers on.
///
///This is not affected by user changes, but is by `adjtime` and NTP.
pub const CLOCK_MONOTONIC: ::clockid_t = 1;
///A high-resolution per-process timer from the processor.
pub const CLOCK_PROCESS_CPUTIME_ID: ::clockid_t = 2;
///A (high-resolution?) thread-specific timer from the processor
pub const CLOCK_THREAD_CPUTIME_ID: ::clockid_t = 3;
///A hardware-based version of `CLOCK_MONOTONIC` that is not subject to changes
pub const CLOCK_MONOTONIC_RAW: ::clockid_t = 4;
///A faster but less precise version of `CLOCK_REALTIME`, measuring time in the "real world"
pub const CLOCK_REALTIME_COARSE: ::clockid_t = 5;
///A faster but less precise version of `CLOCK_MONOTONIC`, measuring time since an unspecified starting point
pub const CLOCK_MONOTONIC_COARSE: ::clockid_t = 6;
///Identical to `CLOCK_MONOTONIC`, but includes any time that the system is suspended.
pub const CLOCK_BOOTIME: ::clockid_t = 7;
///Identical to `CLOCK_REALTIME`, but timers exposed via this will wake the system if suspended
pub const CLOCK_REALTIME_ALARM: ::clockid_t = 8;
///Identical to `CLOCK_BOOTIME`, but timers exposed via this will wake the system if suspended
pub const CLOCK_BOOTTIME_ALARM: ::clockid_t = 9;
///A clock used for SGI systems. Need to investigate
pub const CLOCK_SGI_CYCLE: ::clockid_t = 10;
///A clock that shows International Atomic Time
pub const CLOCK_TAI: ::clockid_t = 11;
///The maximum clock ID that the system is allowed to have
pub const MAX_CLOCKS: ::clockid_t = 16; //Resolves to c_int. Please let me know if this should be c_int on it's own
///A mask for supported clocks
///
///Needs to be investigated
pub const CLOCKS_MASK: ::clockid_t = CLOCK_REALTIME | CLOCK_MONOTONIC;
///A shorthand variant of CLOCK_MONOTONIC.
///
///This isn't used in the kernel. Is it left over from an old change that was reverted?
pub const CLOCKS_MONO: ::clockid_t = CLOCK_MONOTONIC;
///A flag indicating time is absolute
pub const TIMER_ABSTIME: ::c_int = 0x01;
///The type used for 64-bit time
pub type time64_t = i64;
///The number of milliseconds in a second
pub const MSEC_PER_SEC: ::c_long = 1000;
///The number of microseconds in a millisecond
pub const USEC_PER_MSEC: ::c_long = 1000;
///The number of nanoseconds in a microsecond
pub const NSEC_PER_USEC: ::c_long = 1000;
///The number of nanoseconds in a millisecond
pub const NSEC_PER_MSEC: ::c_long = 1000000;
///The number of microseconds in a second
pub const USEC_PER_SEC: ::c_long = 1000000;
///The number of nanoseconds in a second
pub const NSEC_PER_SEC: ::c_long = 1000000000;
///The number of femtoseconds in a second
pub const FSEC_PER_SEC: ::c_longlong = 1000000000000000;
#[cfg(any(target_arch = "x86",
target_arch = "le32",
target_arch = "powerpc",
target_arch = "arm",
target_arch = "mips",
target_arch = "mipsel"))]
pub const TIME_T_MAX: ::time_t = 0b01111111111111111111111111111111;
#[cfg(any(target_arch = "x86_64",
target_arch = "aarch64"))]
pub const TIME_T_MAX: ::time_t = 0b0111111111111111111111111111111111111111111111111111111111111111;
///The maximum value of a time64_t
pub const TIME64_MAX: ::c_longlong = 0b0111111111111111111111111111111111111111111111111111111111111111;
///The maximum value of a ktime_t
pub const KTIME_MAX: ::c_longlong = 9_223_372_036_854_775_807;
///The maximum number of seconds in a ktime_t
pub const KTIME_SEC_MAX: ::c_longlong = 9_223_372_036;
#[cfg(test)]
mod tests {
use super::*;
#[cfg(any(target_arch = "x86_64",
target_arch = "aarch64"))]
#[test]
fn test_time_t_max_64() {
assert_eq!(9223372036854775807, TIME64_MAX);
}
#[cfg(any(target_arch = "x86",
target_arch = "le32",
target_arch = "powerpc",
target_arch = "arm",
target_arch = "mips",
target_arch = "mipsel"))]
#[test]
fn test_time_t_max_32() {
assert_eq!(2147483647, TIME64_MAX);
}
#[test]
fn test_time64_max() {
assert_eq!(9223372036854775807, TIME64_MAX);
}
#[test]
fn test_timeval_to_msec_sec() {
let mut val = ::timeval::from_seconds(4);
val.tv_usec += USEC_PER_SEC / 2;
assert_eq!(4500, val.to_milliseconds());
assert_eq!(4, val.to_seconds());
}
#[test]
fn test_timespec_to_msec_sec() {
let mut spec = ::timespec::from_seconds(4);
spec.tv_nsec += NSEC_PER_SEC / 2;
assert_eq!(4500, spec.to_milliseconds());
assert_eq!(4, spec.to_seconds());
}
#[test]
fn test_per_sec_accuracy() {
assert_eq!(NSEC_PER_MSEC, NSEC_PER_USEC * USEC_PER_MSEC);
assert_eq!(NSEC_PER_SEC, NSEC_PER_MSEC * MSEC_PER_SEC);
}
#[test]
fn test_timeval_utility_functions() {
let mut val: timeval = timeval::new();
assert_eq!(0, val.tv_sec);
val = timeval::from_seconds(100);
assert_eq!(100, val.tv_sec);
val.clear();
assert_eq!(0, val.tv_sec);
}
#[test]
fn test_timespec_utility_functions() {
let mut spec: timespec = timespec::new();
assert_eq!(0, spec.tv_sec);
spec = timespec::from_seconds(164);
assert_eq!(164, spec.tv_sec);
spec.clear();
assert_eq!(0, spec.tv_sec);
}
} | ///The period of time this timer should run for (Need to verify)
pub it_interval: timeval, | random_line_split |
time.rs | //!Constants and structures from time classes
//!
//! This includes include/uapi/linux/time.h, //include/linux/time.h, and /include/linux/time64.h
///A structure that contains the number of seconds and nanoseconds since an epoch.
///
///If in doubt, assume we're talking about the UNIX epoch.
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub struct | {
///The number of seconds contained in this timespec
pub tv_sec: ::time_t,
///The number of nanoseconds contained in this timespec
pub tv_nsec: ::c_long
}
impl timespec {
///Creates a new timespec with both values defaulting to zero
pub fn new() -> timespec {
timespec { tv_sec: 0, tv_nsec: 0 }
}
///Creates a new timespec from the specified number of seconds
pub fn from_seconds(seconds: i64) -> timespec {
timespec { tv_sec: seconds, tv_nsec: 0 }
}
///Gets a representation of this timespec as a number of milliseconds
pub fn to_milliseconds(&self) -> i64 {
(self.tv_sec as i64 * MSEC_PER_SEC) + (self.tv_nsec as i64 / NSEC_PER_MSEC)
}
///Gets a representation of this timespec as a number of seconds
pub fn to_seconds(&self) -> i64 {
self.to_milliseconds() / MSEC_PER_SEC
}
///Clears this timespec, setting each value to zero
pub fn clear(&mut self) {
self.tv_sec = 0;
self.tv_nsec = 0;
}
}
///A structure that contains the number of seconds and microseconds since an epoch.
///
///If in doubt, assume we're talking about the UNIX epoch.
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub struct timeval {
///The number of seconds contained in this timeval
pub tv_sec: ::time_t,
///The number of microseconds contained in this timeval
pub tv_usec: ::suseconds_t
}
impl timeval {
///Creates a new timeval with both values defaulting to zero
pub fn new() -> timeval {
timeval { tv_sec: 0, tv_usec: 0 }
}
///Creates a new timeval from the specified number of seconds
pub fn from_seconds(seconds: ::time_t) -> timeval {
timeval { tv_sec: seconds, tv_usec: 0 }
}
///Gets a representation of this timeval as a number of milliseconds
pub fn to_milliseconds(&self) -> i64 {
(self.tv_sec as i64 * MSEC_PER_SEC) + (self.tv_usec as i64 / USEC_PER_MSEC)
}
///Gets a representation of this timeval as a number of seconds
pub fn to_seconds(&self) -> i64 {
self.to_milliseconds() / MSEC_PER_SEC
}
///Clears this timeval, setting each value to zero
pub fn clear(&mut self) {
self.tv_sec = 0;
self.tv_usec = 0;
}
}
///A structure containing information on the time-based location of a timezone
///
///Please note that this does not include the name or country code, only the minutes west of Greenwich and the type of DST correction
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub struct timezone {
///The number of minutes west of Greenwich
pub tz_minuteswest: ::c_int,
///The type of Daylight Savings Time correction
pub tz_dsttime: ::c_int
}
//Names of the interval timers
///An interval timer that decrements in real time
///
///On expiration, a SIGALRM is delivered
pub const ITIMER_REAL: ::c_int = 0;
///An interval timer that decrements only when the process is executing.
///
///On expiration, a SIGVTALRM is delivered
pub const ITIMER_VIRTUAL: ::c_int = 1;
///Decrements both while the process is executing and while the system is executing on behalf of the process
///
///This is usually used to profile kernel-space and user-space concurrently.
///
///If coupled with ITIMER_VIRTUAL, you can separate the two values - What is left when ITIMER_VIRTUAL's value is removed is kernel time
pub const ITIMER_PROF: ::c_int = 2;
///An interval timer based on a `timespec`
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub struct itimerspec {
///The period of time this timer should run for (Need to verify)
pub it_interval: timespec,
///The amount of time left until expiration (Need to verify)
pub it_value: timespec
}
///An interval timer based on a `timeval`
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub struct itimerval {
///The period of time this timer should run for (Need to verify)
pub it_interval: timeval,
///The amount of time left until expiration (Need to verify)
pub it_value: timeval
}
///A system-wide clock that measures time from the "real world"
///
///This clock **is** affected by discontinuous jumps in system time, NTP, and user changes
pub const CLOCK_REALTIME: ::clockid_t = 0;
///A clock that measures monotonic time since an unspecified starting point
///
///Unless you manage to break your system, this unspecified point is usually when your computer powers on.
///
///This is not affected by user changes, but is by `adjtime` and NTP.
pub const CLOCK_MONOTONIC: ::clockid_t = 1;
///A high-resolution per-process timer from the processor.
pub const CLOCK_PROCESS_CPUTIME_ID: ::clockid_t = 2;
///A (high-resolution?) thread-specific timer from the processor
pub const CLOCK_THREAD_CPUTIME_ID: ::clockid_t = 3;
///A hardware-based version of `CLOCK_MONOTONIC` that is not subject to changes
pub const CLOCK_MONOTONIC_RAW: ::clockid_t = 4;
///A faster but less precise version of `CLOCK_REALTIME`, measuring time in the "real world"
pub const CLOCK_REALTIME_COARSE: ::clockid_t = 5;
///A faster but less precise version of `CLOCK_MONOTONIC`, measuring time since an unspecified starting point
pub const CLOCK_MONOTONIC_COARSE: ::clockid_t = 6;
///Identical to `CLOCK_MONOTONIC`, but includes any time that the system is suspended.
pub const CLOCK_BOOTIME: ::clockid_t = 7;
///Identical to `CLOCK_REALTIME`, but timers exposed via this will wake the system if suspended
pub const CLOCK_REALTIME_ALARM: ::clockid_t = 8;
///Identical to `CLOCK_BOOTIME`, but timers exposed via this will wake the system if suspended
pub const CLOCK_BOOTTIME_ALARM: ::clockid_t = 9;
///A clock used for SGI systems. Need to investigate
pub const CLOCK_SGI_CYCLE: ::clockid_t = 10;
///A clock that shows International Atomic Time
pub const CLOCK_TAI: ::clockid_t = 11;
///The maximum clock ID that the system is allowed to have
pub const MAX_CLOCKS: ::clockid_t = 16; //Resolves to c_int. Please let me know if this should be c_int on it's own
///A mask for supported clocks
///
///Needs to be investigated
pub const CLOCKS_MASK: ::clockid_t = CLOCK_REALTIME | CLOCK_MONOTONIC;
///A shorthand variant of CLOCK_MONOTONIC.
///
///This isn't used in the kernel. Is it left over from an old change that was reverted?
pub const CLOCKS_MONO: ::clockid_t = CLOCK_MONOTONIC;
///A flag indicating time is absolute
pub const TIMER_ABSTIME: ::c_int = 0x01;
///The type used for 64-bit time
pub type time64_t = i64;
///The number of milliseconds in a second
pub const MSEC_PER_SEC: ::c_long = 1000;
///The number of microseconds in a millisecond
pub const USEC_PER_MSEC: ::c_long = 1000;
///The number of nanoseconds in a microsecond
pub const NSEC_PER_USEC: ::c_long = 1000;
///The number of nanoseconds in a millisecond
pub const NSEC_PER_MSEC: ::c_long = 1000000;
///The number of microseconds in a second
pub const USEC_PER_SEC: ::c_long = 1000000;
///The number of nanoseconds in a second
pub const NSEC_PER_SEC: ::c_long = 1000000000;
///The number of femtoseconds in a second
pub const FSEC_PER_SEC: ::c_longlong = 1000000000000000;
#[cfg(any(target_arch = "x86",
target_arch = "le32",
target_arch = "powerpc",
target_arch = "arm",
target_arch = "mips",
target_arch = "mipsel"))]
pub const TIME_T_MAX: ::time_t = 0b01111111111111111111111111111111;
#[cfg(any(target_arch = "x86_64",
target_arch = "aarch64"))]
pub const TIME_T_MAX: ::time_t = 0b0111111111111111111111111111111111111111111111111111111111111111;
///The maximum value of a time64_t
pub const TIME64_MAX: ::c_longlong = 0b0111111111111111111111111111111111111111111111111111111111111111;
///The maximum value of a ktime_t
pub const KTIME_MAX: ::c_longlong = 9_223_372_036_854_775_807;
///The maximum number of seconds in a ktime_t
pub const KTIME_SEC_MAX: ::c_longlong = 9_223_372_036;
#[cfg(test)]
mod tests {
use super::*;
#[cfg(any(target_arch = "x86_64",
target_arch = "aarch64"))]
#[test]
fn test_time_t_max_64() {
assert_eq!(9223372036854775807, TIME64_MAX);
}
#[cfg(any(target_arch = "x86",
target_arch = "le32",
target_arch = "powerpc",
target_arch = "arm",
target_arch = "mips",
target_arch = "mipsel"))]
#[test]
fn test_time_t_max_32() {
assert_eq!(2147483647, TIME64_MAX);
}
#[test]
fn test_time64_max() {
assert_eq!(9223372036854775807, TIME64_MAX);
}
#[test]
fn test_timeval_to_msec_sec() {
let mut val = ::timeval::from_seconds(4);
val.tv_usec += USEC_PER_SEC / 2;
assert_eq!(4500, val.to_milliseconds());
assert_eq!(4, val.to_seconds());
}
#[test]
fn test_timespec_to_msec_sec() {
let mut spec = ::timespec::from_seconds(4);
spec.tv_nsec += NSEC_PER_SEC / 2;
assert_eq!(4500, spec.to_milliseconds());
assert_eq!(4, spec.to_seconds());
}
#[test]
fn test_per_sec_accuracy() {
assert_eq!(NSEC_PER_MSEC, NSEC_PER_USEC * USEC_PER_MSEC);
assert_eq!(NSEC_PER_SEC, NSEC_PER_MSEC * MSEC_PER_SEC);
}
#[test]
fn test_timeval_utility_functions() {
let mut val: timeval = timeval::new();
assert_eq!(0, val.tv_sec);
val = timeval::from_seconds(100);
assert_eq!(100, val.tv_sec);
val.clear();
assert_eq!(0, val.tv_sec);
}
#[test]
fn test_timespec_utility_functions() {
let mut spec: timespec = timespec::new();
assert_eq!(0, spec.tv_sec);
spec = timespec::from_seconds(164);
assert_eq!(164, spec.tv_sec);
spec.clear();
assert_eq!(0, spec.tv_sec);
}
} | timespec | identifier_name |
tls-server.rs | // SPDX-FileCopyrightText: Copyright (c) 2017-2023 slowtec GmbH <[email protected]>
// SPDX-License-Identifier: MIT OR Apache-2.0
// load_certs() and particially load_keys() functions were copied from an example of the tokio tls library, available at:
// https://github.com/tokio-rs/tls/blob/master/tokio-rustls/examples/server/src/main.rs
//! TCP server example
use std::{
collections::HashMap,
fs::File,
io::{self, BufReader},
net::SocketAddr,
path::Path,
sync::{Arc, Mutex},
time::Duration,
};
use futures::future;
use pkcs8::der::Decode;
use rustls_pemfile::{certs, pkcs8_private_keys};
use tokio::net::{TcpListener, TcpStream};
use tokio_modbus::{prelude::*, server::tcp::Server};
use tokio_rustls::rustls::{self, Certificate, OwnedTrustAnchor, PrivateKey};
use tokio_rustls::{TlsAcceptor, TlsConnector};
use webpki::TrustAnchor;
fn load_certs(path: &Path) -> io::Result<Vec<Certificate>> {
certs(&mut BufReader::new(File::open(path)?))
.map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "invalid cert"))
.map(|mut certs| certs.drain(..).map(Certificate).collect())
}
fn load_keys(path: &Path, password: Option<&str>) -> io::Result<Vec<PrivateKey>> {
let expected_tag = match &password {
Some(_) => "ENCRYPTED PRIVATE KEY",
None => "PRIVATE KEY",
};
if expected_tag.eq("PRIVATE KEY") {
pkcs8_private_keys(&mut BufReader::new(File::open(path)?))
.map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "invalid key"))
.map(|mut keys| keys.drain(..).map(PrivateKey).collect())
} else {
let content = std::fs::read(path)?;
let mut iter = pem::parse_many(content)
.map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err.to_string()))?
.into_iter()
.filter(|x| x.tag() == expected_tag)
.map(|x| x.contents().to_vec());
match iter.next() {
Some(key) => match password {
Some(password) => {
let encrypted =
pkcs8::EncryptedPrivateKeyInfo::from_der(&key).map_err(|err| {
io::Error::new(io::ErrorKind::InvalidData, err.to_string())
})?;
let decrypted = encrypted.decrypt(password).map_err(|err| {
io::Error::new(io::ErrorKind::InvalidData, err.to_string())
})?;
let key = decrypted.as_bytes().to_vec();
let key = rustls::PrivateKey(key);
let private_keys = vec![key];
io::Result::Ok(private_keys)
}
None => io::Result::Err(io::Error::new(io::ErrorKind::InvalidInput, "invalid key")),
},
None => io::Result::Err(io::Error::new(io::ErrorKind::InvalidInput, "invalid key")),
}
}
}
struct ExampleService {
input_registers: Arc<Mutex<HashMap<u16, u16>>>,
holding_registers: Arc<Mutex<HashMap<u16, u16>>>,
}
impl tokio_modbus::server::Service for ExampleService {
type Request = Request<'static>;
type Response = Response;
type Error = std::io::Error;
type Future = future::Ready<Result<Self::Response, Self::Error>>;
fn call(&self, req: Self::Request) -> Self::Future {
match req {
Request::ReadInputRegisters(addr, cnt) => {
match register_read(&self.input_registers.lock().unwrap(), addr, cnt) {
Ok(values) => future::ready(Ok(Response::ReadInputRegisters(values))),
Err(err) => future::ready(Err(err)),
}
}
Request::ReadHoldingRegisters(addr, cnt) => {
match register_read(&self.holding_registers.lock().unwrap(), addr, cnt) {
Ok(values) => future::ready(Ok(Response::ReadHoldingRegisters(values))),
Err(err) => future::ready(Err(err)),
}
}
Request::WriteMultipleRegisters(addr, values) => {
match register_write(&mut self.holding_registers.lock().unwrap(), addr, &values) {
Ok(_) => future::ready(Ok(Response::WriteMultipleRegisters(
addr,
values.len() as u16,
))),
Err(err) => future::ready(Err(err)),
}
}
Request::WriteSingleRegister(addr, value) => {
match register_write(
&mut self.holding_registers.lock().unwrap(),
addr,
std::slice::from_ref(&value),
) {
Ok(_) => future::ready(Ok(Response::WriteSingleRegister(addr, value))),
Err(err) => future::ready(Err(err)),
}
}
_ => {
println!("SERVER: Exception::IllegalFunction - Unimplemented function code in request: {req:?}");
// TODO: We want to return a Modbus Exception response `IllegalFunction`. https://github.com/slowtec/tokio-modbus/issues/165
future::ready(Err(std::io::Error::new(
std::io::ErrorKind::AddrNotAvailable,
"Unimplemented function code in request".to_string(),
)))
}
}
}
}
impl ExampleService {
fn new() -> Self {
// Insert some test data as register values.
let mut input_registers = HashMap::new();
input_registers.insert(0, 1234);
input_registers.insert(1, 5678);
let mut holding_registers = HashMap::new();
holding_registers.insert(0, 10);
holding_registers.insert(1, 20);
holding_registers.insert(2, 30);
holding_registers.insert(3, 40);
Self {
input_registers: Arc::new(Mutex::new(input_registers)),
holding_registers: Arc::new(Mutex::new(holding_registers)),
}
}
}
/// Helper function implementing reading registers from a HashMap.
fn register_read(
registers: &HashMap<u16, u16>,
addr: u16,
cnt: u16,
) -> Result<Vec<u16>, std::io::Error> {
let mut response_values = vec![0; cnt.into()];
for i in 0..cnt {
let reg_addr = addr + i;
if let Some(r) = registers.get(®_addr) {
response_values[i as usize] = *r;
} else {
// TODO: Return a Modbus Exception response `IllegalDataAddress` https://github.com/slowtec/tokio-modbus/issues/165
println!("SERVER: Exception::IllegalDataAddress");
return Err(std::io::Error::new(
std::io::ErrorKind::AddrNotAvailable,
format!("no register at address {reg_addr}"),
));
}
}
Ok(response_values)
}
/// Write a holding register. Used by both the write single register
/// and write multiple registers requests.
fn register_write(
registers: &mut HashMap<u16, u16>,
addr: u16,
values: &[u16],
) -> Result<(), std::io::Error> {
for (i, value) in values.iter().enumerate() {
let reg_addr = addr + i as u16;
if let Some(r) = registers.get_mut(®_addr) {
*r = *value;
} else {
// TODO: Return a Modbus Exception response `IllegalDataAddress` https://github.com/slowtec/tokio-modbus/issues/165
println!("SERVER: Exception::IllegalDataAddress");
return Err(std::io::Error::new(
std::io::ErrorKind::AddrNotAvailable,
format!("no register at address {reg_addr}"),
));
}
}
Ok(())
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let socket_addr = "127.0.0.1:8802".parse()?;
tokio::select! {
_ = server_context(socket_addr) => unreachable!(),
_ = client_context(socket_addr) => println!("Exiting"),
}
Ok(())
}
async fn | (socket_addr: SocketAddr) -> anyhow::Result<()> {
println!("Starting up server on {socket_addr}");
let listener = TcpListener::bind(socket_addr).await?;
let server = Server::new(listener);
let on_connected = |stream, _socket_addr| async move {
let cert_path = Path::new("./pki/server.pem");
let key_path = Path::new("./pki/server.key");
let certs = load_certs(cert_path)?;
let mut keys = load_keys(key_path, None)?;
let config = rustls::ServerConfig::builder()
.with_safe_defaults()
.with_no_client_auth()
.with_single_cert(certs, keys.remove(0))
.map_err(|err| io::Error::new(io::ErrorKind::InvalidInput, err))?;
let acceptor = TlsAcceptor::from(Arc::new(config));
let service = ExampleService::new();
let stream = acceptor.accept(stream).await;
match stream {
Ok(stream) => Ok(Some((service, stream))),
Err(_) => Ok(None),
}
};
let on_process_error = |err| {
eprintln!("{err}");
};
server.serve(&on_connected, on_process_error).await?;
Ok(())
}
async fn client_context(socket_addr: SocketAddr) {
use tokio_modbus::prelude::*;
tokio::join!(
async {
// Give the server some time for starting up
tokio::time::sleep(Duration::from_secs(1)).await;
println!("Connecting client...");
let mut root_cert_store = rustls::RootCertStore::empty();
let ca_path = Path::new("./pki/ca.pem");
let mut pem = BufReader::new(File::open(ca_path).unwrap());
let certs = rustls_pemfile::certs(&mut pem).unwrap();
let trust_anchors = certs.iter().map(|cert| {
let ta = TrustAnchor::try_from_cert_der(&cert[..]).unwrap();
OwnedTrustAnchor::from_subject_spki_name_constraints(
ta.subject,
ta.spki,
ta.name_constraints,
)
});
root_cert_store.add_trust_anchors(trust_anchors);
let domain = "localhost";
let cert_path = Path::new("./pki/client.pem");
let key_path = Path::new("./pki/client.key");
let certs = load_certs(cert_path).unwrap();
let mut keys = load_keys(key_path, None).unwrap();
let config = rustls::ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(root_cert_store)
.with_client_auth_cert(certs, keys.remove(0))
.map_err(|err| io::Error::new(io::ErrorKind::InvalidInput, err))
.unwrap();
let connector = TlsConnector::from(Arc::new(config));
let stream = TcpStream::connect(&socket_addr).await.unwrap();
stream.set_nodelay(true).unwrap();
let domain = rustls::ServerName::try_from(domain)
.map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "invalid dnsname"))
.unwrap();
let transport = connector.connect(domain, stream).await.unwrap();
// Tokio modbus transport layer setup
let mut ctx = tcp::attach(transport);
println!("CLIENT: Reading 2 input registers...");
let response = ctx.read_input_registers(0x00, 2).await.unwrap();
println!("CLIENT: The result is '{response:?}'");
assert_eq!(response, [1234, 5678]);
println!("CLIENT: Writing 2 holding registers...");
ctx.write_multiple_registers(0x01, &[7777, 8888])
.await
.unwrap();
// Read back a block including the two registers we wrote.
println!("CLIENT: Reading 4 holding registers...");
let response = ctx.read_holding_registers(0x00, 4).await.unwrap();
println!("CLIENT: The result is '{response:?}'");
assert_eq!(response, [10, 7777, 8888, 40]);
// Now we try to read with an invalid register address.
// This should return a Modbus exception response with the code
// IllegalDataAddress.
println!("CLIENT: Reading nonexisting holding register address... (should return IllegalDataAddress)");
let response = ctx.read_holding_registers(0x100, 1).await;
println!("CLIENT: The result is '{response:?}'");
assert!(response.is_err());
// TODO: How can Modbus client identify Modbus exception responses? E.g. here we expect IllegalDataAddress
// Question here: https://github.com/slowtec/tokio-modbus/issues/169
println!("CLIENT: Done.")
},
tokio::time::sleep(Duration::from_secs(5))
);
}
| server_context | identifier_name |
tls-server.rs | // SPDX-FileCopyrightText: Copyright (c) 2017-2023 slowtec GmbH <[email protected]>
// SPDX-License-Identifier: MIT OR Apache-2.0
// load_certs() and particially load_keys() functions were copied from an example of the tokio tls library, available at:
// https://github.com/tokio-rs/tls/blob/master/tokio-rustls/examples/server/src/main.rs
//! TCP server example
use std::{
collections::HashMap,
fs::File,
io::{self, BufReader},
net::SocketAddr,
path::Path,
sync::{Arc, Mutex},
time::Duration,
};
use futures::future;
use pkcs8::der::Decode;
use rustls_pemfile::{certs, pkcs8_private_keys};
use tokio::net::{TcpListener, TcpStream};
use tokio_modbus::{prelude::*, server::tcp::Server};
use tokio_rustls::rustls::{self, Certificate, OwnedTrustAnchor, PrivateKey};
use tokio_rustls::{TlsAcceptor, TlsConnector};
use webpki::TrustAnchor;
fn load_certs(path: &Path) -> io::Result<Vec<Certificate>> {
certs(&mut BufReader::new(File::open(path)?))
.map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "invalid cert"))
.map(|mut certs| certs.drain(..).map(Certificate).collect())
}
fn load_keys(path: &Path, password: Option<&str>) -> io::Result<Vec<PrivateKey>> {
let expected_tag = match &password {
Some(_) => "ENCRYPTED PRIVATE KEY",
None => "PRIVATE KEY",
};
if expected_tag.eq("PRIVATE KEY") {
pkcs8_private_keys(&mut BufReader::new(File::open(path)?))
.map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "invalid key"))
.map(|mut keys| keys.drain(..).map(PrivateKey).collect())
} else {
let content = std::fs::read(path)?;
let mut iter = pem::parse_many(content)
.map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err.to_string()))?
.into_iter()
.filter(|x| x.tag() == expected_tag)
.map(|x| x.contents().to_vec());
match iter.next() {
Some(key) => match password {
Some(password) => {
let encrypted =
pkcs8::EncryptedPrivateKeyInfo::from_der(&key).map_err(|err| {
io::Error::new(io::ErrorKind::InvalidData, err.to_string())
})?;
let decrypted = encrypted.decrypt(password).map_err(|err| {
io::Error::new(io::ErrorKind::InvalidData, err.to_string())
})?;
let key = decrypted.as_bytes().to_vec();
let key = rustls::PrivateKey(key);
let private_keys = vec![key];
io::Result::Ok(private_keys)
}
None => io::Result::Err(io::Error::new(io::ErrorKind::InvalidInput, "invalid key")),
},
None => io::Result::Err(io::Error::new(io::ErrorKind::InvalidInput, "invalid key")),
}
}
}
struct ExampleService {
input_registers: Arc<Mutex<HashMap<u16, u16>>>,
holding_registers: Arc<Mutex<HashMap<u16, u16>>>,
}
impl tokio_modbus::server::Service for ExampleService {
type Request = Request<'static>;
type Response = Response;
type Error = std::io::Error;
type Future = future::Ready<Result<Self::Response, Self::Error>>;
| Ok(values) => future::ready(Ok(Response::ReadInputRegisters(values))),
Err(err) => future::ready(Err(err)),
}
}
Request::ReadHoldingRegisters(addr, cnt) => {
match register_read(&self.holding_registers.lock().unwrap(), addr, cnt) {
Ok(values) => future::ready(Ok(Response::ReadHoldingRegisters(values))),
Err(err) => future::ready(Err(err)),
}
}
Request::WriteMultipleRegisters(addr, values) => {
match register_write(&mut self.holding_registers.lock().unwrap(), addr, &values) {
Ok(_) => future::ready(Ok(Response::WriteMultipleRegisters(
addr,
values.len() as u16,
))),
Err(err) => future::ready(Err(err)),
}
}
Request::WriteSingleRegister(addr, value) => {
match register_write(
&mut self.holding_registers.lock().unwrap(),
addr,
std::slice::from_ref(&value),
) {
Ok(_) => future::ready(Ok(Response::WriteSingleRegister(addr, value))),
Err(err) => future::ready(Err(err)),
}
}
_ => {
println!("SERVER: Exception::IllegalFunction - Unimplemented function code in request: {req:?}");
// TODO: We want to return a Modbus Exception response `IllegalFunction`. https://github.com/slowtec/tokio-modbus/issues/165
future::ready(Err(std::io::Error::new(
std::io::ErrorKind::AddrNotAvailable,
"Unimplemented function code in request".to_string(),
)))
}
}
}
}
impl ExampleService {
fn new() -> Self {
// Insert some test data as register values.
let mut input_registers = HashMap::new();
input_registers.insert(0, 1234);
input_registers.insert(1, 5678);
let mut holding_registers = HashMap::new();
holding_registers.insert(0, 10);
holding_registers.insert(1, 20);
holding_registers.insert(2, 30);
holding_registers.insert(3, 40);
Self {
input_registers: Arc::new(Mutex::new(input_registers)),
holding_registers: Arc::new(Mutex::new(holding_registers)),
}
}
}
/// Helper function implementing reading registers from a HashMap.
fn register_read(
registers: &HashMap<u16, u16>,
addr: u16,
cnt: u16,
) -> Result<Vec<u16>, std::io::Error> {
let mut response_values = vec![0; cnt.into()];
for i in 0..cnt {
let reg_addr = addr + i;
if let Some(r) = registers.get(®_addr) {
response_values[i as usize] = *r;
} else {
// TODO: Return a Modbus Exception response `IllegalDataAddress` https://github.com/slowtec/tokio-modbus/issues/165
println!("SERVER: Exception::IllegalDataAddress");
return Err(std::io::Error::new(
std::io::ErrorKind::AddrNotAvailable,
format!("no register at address {reg_addr}"),
));
}
}
Ok(response_values)
}
/// Write a holding register. Used by both the write single register
/// and write multiple registers requests.
fn register_write(
registers: &mut HashMap<u16, u16>,
addr: u16,
values: &[u16],
) -> Result<(), std::io::Error> {
for (i, value) in values.iter().enumerate() {
let reg_addr = addr + i as u16;
if let Some(r) = registers.get_mut(®_addr) {
*r = *value;
} else {
// TODO: Return a Modbus Exception response `IllegalDataAddress` https://github.com/slowtec/tokio-modbus/issues/165
println!("SERVER: Exception::IllegalDataAddress");
return Err(std::io::Error::new(
std::io::ErrorKind::AddrNotAvailable,
format!("no register at address {reg_addr}"),
));
}
}
Ok(())
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let socket_addr = "127.0.0.1:8802".parse()?;
tokio::select! {
_ = server_context(socket_addr) => unreachable!(),
_ = client_context(socket_addr) => println!("Exiting"),
}
Ok(())
}
async fn server_context(socket_addr: SocketAddr) -> anyhow::Result<()> {
println!("Starting up server on {socket_addr}");
let listener = TcpListener::bind(socket_addr).await?;
let server = Server::new(listener);
let on_connected = |stream, _socket_addr| async move {
let cert_path = Path::new("./pki/server.pem");
let key_path = Path::new("./pki/server.key");
let certs = load_certs(cert_path)?;
let mut keys = load_keys(key_path, None)?;
let config = rustls::ServerConfig::builder()
.with_safe_defaults()
.with_no_client_auth()
.with_single_cert(certs, keys.remove(0))
.map_err(|err| io::Error::new(io::ErrorKind::InvalidInput, err))?;
let acceptor = TlsAcceptor::from(Arc::new(config));
let service = ExampleService::new();
let stream = acceptor.accept(stream).await;
match stream {
Ok(stream) => Ok(Some((service, stream))),
Err(_) => Ok(None),
}
};
let on_process_error = |err| {
eprintln!("{err}");
};
server.serve(&on_connected, on_process_error).await?;
Ok(())
}
async fn client_context(socket_addr: SocketAddr) {
use tokio_modbus::prelude::*;
tokio::join!(
async {
// Give the server some time for starting up
tokio::time::sleep(Duration::from_secs(1)).await;
println!("Connecting client...");
let mut root_cert_store = rustls::RootCertStore::empty();
let ca_path = Path::new("./pki/ca.pem");
let mut pem = BufReader::new(File::open(ca_path).unwrap());
let certs = rustls_pemfile::certs(&mut pem).unwrap();
let trust_anchors = certs.iter().map(|cert| {
let ta = TrustAnchor::try_from_cert_der(&cert[..]).unwrap();
OwnedTrustAnchor::from_subject_spki_name_constraints(
ta.subject,
ta.spki,
ta.name_constraints,
)
});
root_cert_store.add_trust_anchors(trust_anchors);
let domain = "localhost";
let cert_path = Path::new("./pki/client.pem");
let key_path = Path::new("./pki/client.key");
let certs = load_certs(cert_path).unwrap();
let mut keys = load_keys(key_path, None).unwrap();
let config = rustls::ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(root_cert_store)
.with_client_auth_cert(certs, keys.remove(0))
.map_err(|err| io::Error::new(io::ErrorKind::InvalidInput, err))
.unwrap();
let connector = TlsConnector::from(Arc::new(config));
let stream = TcpStream::connect(&socket_addr).await.unwrap();
stream.set_nodelay(true).unwrap();
let domain = rustls::ServerName::try_from(domain)
.map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "invalid dnsname"))
.unwrap();
let transport = connector.connect(domain, stream).await.unwrap();
// Tokio modbus transport layer setup
let mut ctx = tcp::attach(transport);
println!("CLIENT: Reading 2 input registers...");
let response = ctx.read_input_registers(0x00, 2).await.unwrap();
println!("CLIENT: The result is '{response:?}'");
assert_eq!(response, [1234, 5678]);
println!("CLIENT: Writing 2 holding registers...");
ctx.write_multiple_registers(0x01, &[7777, 8888])
.await
.unwrap();
// Read back a block including the two registers we wrote.
println!("CLIENT: Reading 4 holding registers...");
let response = ctx.read_holding_registers(0x00, 4).await.unwrap();
println!("CLIENT: The result is '{response:?}'");
assert_eq!(response, [10, 7777, 8888, 40]);
// Now we try to read with an invalid register address.
// This should return a Modbus exception response with the code
// IllegalDataAddress.
println!("CLIENT: Reading nonexisting holding register address... (should return IllegalDataAddress)");
let response = ctx.read_holding_registers(0x100, 1).await;
println!("CLIENT: The result is '{response:?}'");
assert!(response.is_err());
// TODO: How can Modbus client identify Modbus exception responses? E.g. here we expect IllegalDataAddress
// Question here: https://github.com/slowtec/tokio-modbus/issues/169
println!("CLIENT: Done.")
},
tokio::time::sleep(Duration::from_secs(5))
);
} | fn call(&self, req: Self::Request) -> Self::Future {
match req {
Request::ReadInputRegisters(addr, cnt) => {
match register_read(&self.input_registers.lock().unwrap(), addr, cnt) { | random_line_split |
tls-server.rs | // SPDX-FileCopyrightText: Copyright (c) 2017-2023 slowtec GmbH <[email protected]>
// SPDX-License-Identifier: MIT OR Apache-2.0
// load_certs() and particially load_keys() functions were copied from an example of the tokio tls library, available at:
// https://github.com/tokio-rs/tls/blob/master/tokio-rustls/examples/server/src/main.rs
//! TCP server example
use std::{
collections::HashMap,
fs::File,
io::{self, BufReader},
net::SocketAddr,
path::Path,
sync::{Arc, Mutex},
time::Duration,
};
use futures::future;
use pkcs8::der::Decode;
use rustls_pemfile::{certs, pkcs8_private_keys};
use tokio::net::{TcpListener, TcpStream};
use tokio_modbus::{prelude::*, server::tcp::Server};
use tokio_rustls::rustls::{self, Certificate, OwnedTrustAnchor, PrivateKey};
use tokio_rustls::{TlsAcceptor, TlsConnector};
use webpki::TrustAnchor;
fn load_certs(path: &Path) -> io::Result<Vec<Certificate>> {
certs(&mut BufReader::new(File::open(path)?))
.map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "invalid cert"))
.map(|mut certs| certs.drain(..).map(Certificate).collect())
}
fn load_keys(path: &Path, password: Option<&str>) -> io::Result<Vec<PrivateKey>> {
let expected_tag = match &password {
Some(_) => "ENCRYPTED PRIVATE KEY",
None => "PRIVATE KEY",
};
if expected_tag.eq("PRIVATE KEY") {
pkcs8_private_keys(&mut BufReader::new(File::open(path)?))
.map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "invalid key"))
.map(|mut keys| keys.drain(..).map(PrivateKey).collect())
} else {
let content = std::fs::read(path)?;
let mut iter = pem::parse_many(content)
.map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err.to_string()))?
.into_iter()
.filter(|x| x.tag() == expected_tag)
.map(|x| x.contents().to_vec());
match iter.next() {
Some(key) => match password {
Some(password) => {
let encrypted =
pkcs8::EncryptedPrivateKeyInfo::from_der(&key).map_err(|err| {
io::Error::new(io::ErrorKind::InvalidData, err.to_string())
})?;
let decrypted = encrypted.decrypt(password).map_err(|err| {
io::Error::new(io::ErrorKind::InvalidData, err.to_string())
})?;
let key = decrypted.as_bytes().to_vec();
let key = rustls::PrivateKey(key);
let private_keys = vec![key];
io::Result::Ok(private_keys)
}
None => io::Result::Err(io::Error::new(io::ErrorKind::InvalidInput, "invalid key")),
},
None => io::Result::Err(io::Error::new(io::ErrorKind::InvalidInput, "invalid key")),
}
}
}
struct ExampleService {
input_registers: Arc<Mutex<HashMap<u16, u16>>>,
holding_registers: Arc<Mutex<HashMap<u16, u16>>>,
}
impl tokio_modbus::server::Service for ExampleService {
type Request = Request<'static>;
type Response = Response;
type Error = std::io::Error;
type Future = future::Ready<Result<Self::Response, Self::Error>>;
fn call(&self, req: Self::Request) -> Self::Future | Err(err) => future::ready(Err(err)),
}
}
Request::WriteSingleRegister(addr, value) => {
match register_write(
&mut self.holding_registers.lock().unwrap(),
addr,
std::slice::from_ref(&value),
) {
Ok(_) => future::ready(Ok(Response::WriteSingleRegister(addr, value))),
Err(err) => future::ready(Err(err)),
}
}
_ => {
println!("SERVER: Exception::IllegalFunction - Unimplemented function code in request: {req:?}");
// TODO: We want to return a Modbus Exception response `IllegalFunction`. https://github.com/slowtec/tokio-modbus/issues/165
future::ready(Err(std::io::Error::new(
std::io::ErrorKind::AddrNotAvailable,
"Unimplemented function code in request".to_string(),
)))
}
}
}
}
impl ExampleService {
fn new() -> Self {
// Insert some test data as register values.
let mut input_registers = HashMap::new();
input_registers.insert(0, 1234);
input_registers.insert(1, 5678);
let mut holding_registers = HashMap::new();
holding_registers.insert(0, 10);
holding_registers.insert(1, 20);
holding_registers.insert(2, 30);
holding_registers.insert(3, 40);
Self {
input_registers: Arc::new(Mutex::new(input_registers)),
holding_registers: Arc::new(Mutex::new(holding_registers)),
}
}
}
/// Helper function implementing reading registers from a HashMap.
fn register_read(
registers: &HashMap<u16, u16>,
addr: u16,
cnt: u16,
) -> Result<Vec<u16>, std::io::Error> {
let mut response_values = vec![0; cnt.into()];
for i in 0..cnt {
let reg_addr = addr + i;
if let Some(r) = registers.get(®_addr) {
response_values[i as usize] = *r;
} else {
// TODO: Return a Modbus Exception response `IllegalDataAddress` https://github.com/slowtec/tokio-modbus/issues/165
println!("SERVER: Exception::IllegalDataAddress");
return Err(std::io::Error::new(
std::io::ErrorKind::AddrNotAvailable,
format!("no register at address {reg_addr}"),
));
}
}
Ok(response_values)
}
/// Write a holding register. Used by both the write single register
/// and write multiple registers requests.
fn register_write(
registers: &mut HashMap<u16, u16>,
addr: u16,
values: &[u16],
) -> Result<(), std::io::Error> {
for (i, value) in values.iter().enumerate() {
let reg_addr = addr + i as u16;
if let Some(r) = registers.get_mut(®_addr) {
*r = *value;
} else {
// TODO: Return a Modbus Exception response `IllegalDataAddress` https://github.com/slowtec/tokio-modbus/issues/165
println!("SERVER: Exception::IllegalDataAddress");
return Err(std::io::Error::new(
std::io::ErrorKind::AddrNotAvailable,
format!("no register at address {reg_addr}"),
));
}
}
Ok(())
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let socket_addr = "127.0.0.1:8802".parse()?;
tokio::select! {
_ = server_context(socket_addr) => unreachable!(),
_ = client_context(socket_addr) => println!("Exiting"),
}
Ok(())
}
async fn server_context(socket_addr: SocketAddr) -> anyhow::Result<()> {
println!("Starting up server on {socket_addr}");
let listener = TcpListener::bind(socket_addr).await?;
let server = Server::new(listener);
let on_connected = |stream, _socket_addr| async move {
let cert_path = Path::new("./pki/server.pem");
let key_path = Path::new("./pki/server.key");
let certs = load_certs(cert_path)?;
let mut keys = load_keys(key_path, None)?;
let config = rustls::ServerConfig::builder()
.with_safe_defaults()
.with_no_client_auth()
.with_single_cert(certs, keys.remove(0))
.map_err(|err| io::Error::new(io::ErrorKind::InvalidInput, err))?;
let acceptor = TlsAcceptor::from(Arc::new(config));
let service = ExampleService::new();
let stream = acceptor.accept(stream).await;
match stream {
Ok(stream) => Ok(Some((service, stream))),
Err(_) => Ok(None),
}
};
let on_process_error = |err| {
eprintln!("{err}");
};
server.serve(&on_connected, on_process_error).await?;
Ok(())
}
async fn client_context(socket_addr: SocketAddr) {
use tokio_modbus::prelude::*;
tokio::join!(
async {
// Give the server some time for starting up
tokio::time::sleep(Duration::from_secs(1)).await;
println!("Connecting client...");
let mut root_cert_store = rustls::RootCertStore::empty();
let ca_path = Path::new("./pki/ca.pem");
let mut pem = BufReader::new(File::open(ca_path).unwrap());
let certs = rustls_pemfile::certs(&mut pem).unwrap();
let trust_anchors = certs.iter().map(|cert| {
let ta = TrustAnchor::try_from_cert_der(&cert[..]).unwrap();
OwnedTrustAnchor::from_subject_spki_name_constraints(
ta.subject,
ta.spki,
ta.name_constraints,
)
});
root_cert_store.add_trust_anchors(trust_anchors);
let domain = "localhost";
let cert_path = Path::new("./pki/client.pem");
let key_path = Path::new("./pki/client.key");
let certs = load_certs(cert_path).unwrap();
let mut keys = load_keys(key_path, None).unwrap();
let config = rustls::ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(root_cert_store)
.with_client_auth_cert(certs, keys.remove(0))
.map_err(|err| io::Error::new(io::ErrorKind::InvalidInput, err))
.unwrap();
let connector = TlsConnector::from(Arc::new(config));
let stream = TcpStream::connect(&socket_addr).await.unwrap();
stream.set_nodelay(true).unwrap();
let domain = rustls::ServerName::try_from(domain)
.map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "invalid dnsname"))
.unwrap();
let transport = connector.connect(domain, stream).await.unwrap();
// Tokio modbus transport layer setup
let mut ctx = tcp::attach(transport);
println!("CLIENT: Reading 2 input registers...");
let response = ctx.read_input_registers(0x00, 2).await.unwrap();
println!("CLIENT: The result is '{response:?}'");
assert_eq!(response, [1234, 5678]);
println!("CLIENT: Writing 2 holding registers...");
ctx.write_multiple_registers(0x01, &[7777, 8888])
.await
.unwrap();
// Read back a block including the two registers we wrote.
println!("CLIENT: Reading 4 holding registers...");
let response = ctx.read_holding_registers(0x00, 4).await.unwrap();
println!("CLIENT: The result is '{response:?}'");
assert_eq!(response, [10, 7777, 8888, 40]);
// Now we try to read with an invalid register address.
// This should return a Modbus exception response with the code
// IllegalDataAddress.
println!("CLIENT: Reading nonexisting holding register address... (should return IllegalDataAddress)");
let response = ctx.read_holding_registers(0x100, 1).await;
println!("CLIENT: The result is '{response:?}'");
assert!(response.is_err());
// TODO: How can Modbus client identify Modbus exception responses? E.g. here we expect IllegalDataAddress
// Question here: https://github.com/slowtec/tokio-modbus/issues/169
println!("CLIENT: Done.")
},
tokio::time::sleep(Duration::from_secs(5))
);
}
| {
match req {
Request::ReadInputRegisters(addr, cnt) => {
match register_read(&self.input_registers.lock().unwrap(), addr, cnt) {
Ok(values) => future::ready(Ok(Response::ReadInputRegisters(values))),
Err(err) => future::ready(Err(err)),
}
}
Request::ReadHoldingRegisters(addr, cnt) => {
match register_read(&self.holding_registers.lock().unwrap(), addr, cnt) {
Ok(values) => future::ready(Ok(Response::ReadHoldingRegisters(values))),
Err(err) => future::ready(Err(err)),
}
}
Request::WriteMultipleRegisters(addr, values) => {
match register_write(&mut self.holding_registers.lock().unwrap(), addr, &values) {
Ok(_) => future::ready(Ok(Response::WriteMultipleRegisters(
addr,
values.len() as u16,
))), | identifier_body |
packer.rs | // Copyright 2020 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::Error;
use crate::{Slate, SlateVersion, Slatepack, SlatepackArmor};
use ed25519_dalek::PublicKey as DalekPublicKey;
use ed25519_dalek::SecretKey as DalekSecretKey;
use crate::slatepack::slatepack::SlatePurpose;
#[derive(Clone, Debug)]
/// Arguments, mostly for encrypting decrypting a slatepack
pub struct Slatepacker {
/// Sender address, None for wrapped
pub sender: Option<DalekPublicKey>,
/// Recipient addresses, None for wrapped
pub recipient: Option<DalekPublicKey>,
/// The content purpose. It customize serializer/deserializer for us.
pub content: SlatePurpose,
/// Slate data.
pub slate: Slate,
}
impl Slatepacker {
/// Swap a slate with the packer. Slate is expecte to be full
pub fn wrap_slate(slate: Slate) -> Self {
Self {
sender: None,
recipient: None,
content: SlatePurpose::FullSlate,
slate,
}
}
/// Pack everything into the armored slatepack
pub fn encrypt_to_send(
slate: Slate,
slate_version: SlateVersion,
content: SlatePurpose,
sender: DalekPublicKey,
recipient: Option<DalekPublicKey>, // Encrypted only if recipient is some
secret: &DalekSecretKey,
use_test_rng: bool,
) -> Result<String, Error> {
let pack = Slatepack {
sender: Some(sender),
recipient: recipient,
content,
slate: slate,
};
let (slate_bin, encrypted) = pack.to_binary(slate_version, secret, use_test_rng)?;
SlatepackArmor::encode(&slate_bin, encrypted)
}
/// return slatepack
pub fn decrypt_slatepack(data: &[u8], dec_key: &DalekSecretKey) -> Result<Self, Error> {
let (slate_bytes, encrypted) = SlatepackArmor::decode(data)?;
let slatepack = Slatepack::from_binary(&slate_bytes, encrypted, dec_key)?;
let Slatepack {
sender,
recipient,
content,
slate,
} = slatepack;
Ok(Self {
sender,
recipient,
content,
slate,
})
}
/// Get Transaction ID related into form this slatepack
pub fn get_content(&self) -> SlatePurpose {
self.content.clone()
}
/// Get Sender info. It is needed to send the response back
pub fn get_sender(&self) -> Option<DalekPublicKey> |
/// Get Sender info. It is needed to send the response back
pub fn get_recipient(&self) -> Option<DalekPublicKey> {
self.recipient.clone()
}
/// Convert this slate back to the resulting slate. Since the slate pack contain only the change set,
/// to recover the data it is required original slate to merge with.
pub fn to_result_slate(self) -> Slate {
self.slate
}
}
#[test]
fn slatepack_io_test() {
use crate::grin_core::core::KernelFeatures;
use crate::grin_core::core::{Input, Output, OutputFeatures, Transaction, TxKernel};
use crate::grin_core::global;
use crate::grin_keychain::BlindingFactor;
use crate::grin_keychain::ExtKeychain;
use crate::grin_util as util;
use crate::grin_util::secp::pedersen::{Commitment, RangeProof};
use crate::grin_util::secp::Signature;
use crate::grin_util::secp::{PublicKey, Secp256k1, SecretKey};
use crate::proof::proofaddress;
use crate::proof::proofaddress::ProvableAddress;
use crate::slate::{PaymentInfo, VersionCompatInfo};
use crate::ParticipantData;
use uuid::Uuid;
use x25519_dalek::PublicKey as xDalekPublicKey;
global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
let bytes_16: [u8; 16] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let bytes_32: [u8; 32] = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
26, 27, 28, 29, 30, 31, 32,
];
let bytes_32_2: [u8; 32] = [
2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29, 30, 31, 32, 33,
];
let bytes_33: [u8; 33] = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
26, 27, 28, 29, 30, 31, 32, 33,
];
let bytes_64: [u8; 64] = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
];
let sk = SecretKey::from_slice(&bytes_32).unwrap();
let secp = Secp256k1::new();
let dalek_sk = DalekSecretKey::from_bytes(&bytes_32).unwrap();
let dalek_pk = DalekPublicKey::from(&dalek_sk);
let dalek_sk2 = DalekSecretKey::from_bytes(&bytes_32_2).unwrap();
let dalek_pk2 = DalekPublicKey::from(&dalek_sk2);
// Let's test out Dalec 2 xDalec algebra.
let dalek_xpk = proofaddress::tor_pub_2_slatepack_pub(&dalek_pk).unwrap();
let dalek_xpk2 = proofaddress::tor_pub_2_slatepack_pub(&dalek_pk2).unwrap();
let dalek_xsk = proofaddress::tor_secret_2_slatepack_secret(&dalek_sk);
let dalek_xsk2 = proofaddress::tor_secret_2_slatepack_secret(&dalek_sk2);
let builded_xpk = xDalekPublicKey::from(&dalek_xsk);
let builded_xpk2 = xDalekPublicKey::from(&dalek_xsk2);
assert_eq!(dalek_xpk.as_bytes(), builded_xpk.as_bytes());
assert_eq!(dalek_xpk2.as_bytes(), builded_xpk2.as_bytes());
// check if Diffie Hoffman works...
let shared_secret1 = dalek_xsk.diffie_hellman(&dalek_xpk2);
let shared_secret2 = dalek_xsk2.diffie_hellman(&dalek_xpk);
assert_eq!(shared_secret1.as_bytes(), shared_secret2.as_bytes());
// Note, Slate Data is fake. Just some randome numbers, it will not pass validation of any type
let mut slate_enc = Slate {
compact_slate: true, // Slatepack works only for compact models.
num_participants: 2,
id: Uuid::from_bytes(bytes_16),
tx: Transaction::empty()
.with_offset(BlindingFactor::from_slice(&bytes_32) )
.with_input( Input::new( OutputFeatures::Plain, Commitment(bytes_33)) )
.with_output( Output::new(OutputFeatures::Plain, Commitment(bytes_33), RangeProof::zero()))
.with_kernel( TxKernel::with_features(KernelFeatures::Plain { fee: 321 }) ),
offset: BlindingFactor::from_slice(&bytes_32),
amount: 30000000000000000,
fee: 321,
height: 67,
lock_height: 0,
ttl_cutoff_height: Some(54),
participant_data: vec![
ParticipantData {
id: 0,
public_blind_excess: PublicKey::from_secret_key( &secp, &sk).unwrap(),
public_nonce: PublicKey::from_secret_key( &secp, &sk).unwrap(),
part_sig: None,
message: Some("message 1 to send".to_string()),
message_sig: Some(Signature::from_compact(&util::from_hex("89cc3c1480fea655f29d300fcf68d0cfbf53f96a1d6b1219486b64385ed7ed89acf96f1532b31ac8309e611583b1ecf37090e79700fae3683cf682c0043b3029").unwrap()).unwrap()),
},
ParticipantData {
id: 1,
public_blind_excess: PublicKey::from_secret_key( &secp, &sk).unwrap(),
public_nonce: PublicKey::from_secret_key( &secp, &sk).unwrap(),
part_sig: Some(Signature::from_compact(&util::from_hex("89cc3c1480fea655f29d300fcf68d0cfbf53f96a1d6b1219486b64385ed7ed89acf96f1532b31ac8309e611583b1ecf37090e79700fae3683cf682c0043b3029").unwrap()).unwrap()),
message: Some("message 2 to send".to_string()),
message_sig: Some(Signature::from_compact(&util::from_hex("89cc3c1480fea655f29d300fcf68d0cfbf53f96a1d6b1219486b64385ed7ed89acf96f1532b31ac8309e611583b1ecf37090e79700fae3683cf682c0043b3029").unwrap()).unwrap()),
}
],
version_info: VersionCompatInfo {
version: 3,
block_header_version: 1,
},
payment_proof: Some(PaymentInfo {
sender_address: ProvableAddress::from_str("a5ib4b2l5snzdgxzpdzouwxwvn4c3setpp5t5j2tr37n3uy3665qwnqd").unwrap(),
receiver_address: ProvableAddress::from_str("a5ib4b2l5snzdgxzpdzouwxwvn4c3setpp5t5j2tr37n3uy3665qwnqd").unwrap(),
receiver_signature: Some( util::to_hex(&bytes_64) ),
}),
};
// updating kernel excess
slate_enc.tx.body.kernels[0].excess = slate_enc.calc_excess::<ExtKeychain>(None).unwrap();
let slate_enc_str = format!("{:?}", slate_enc);
println!("start encrypted slate = {}", slate_enc_str);
// Not encoded, just want to review the data...
let slatepack_string_encrypted = Slatepacker::encrypt_to_send(
slate_enc.clone(),
SlateVersion::SP,
SlatePurpose::FullSlate,
dalek_pk.clone(),
Some(dalek_pk2.clone()), // sending to self, should be fine...
&dalek_sk,
true,
)
.unwrap();
println!("slatepack encrypted = {}", slatepack_string_encrypted);
// Not encoded, just want to review the data...
let slatepack_string_binary = Slatepacker::encrypt_to_send(
slate_enc.clone(),
SlateVersion::SP,
SlatePurpose::FullSlate,
dalek_pk.clone(),
None, // No recipient, should trigger non encrypted mode.
&dalek_sk,
true,
)
.unwrap();
println!("slatepack binary = {}", slatepack_string_binary);
assert!(slatepack_string_encrypted.len() > slatepack_string_binary.len());
// Testing if can open from a backup
let slatepack =
Slatepacker::decrypt_slatepack(slatepack_string_encrypted.as_bytes(), &dalek_sk).unwrap();
let res_slate = slatepack.to_result_slate();
let slate2_str = format!("{:?}", res_slate);
println!("res_slate = {:?}", slate2_str);
assert_eq!(slate_enc_str, slate2_str);
// Testing if another party can open it
let slatepack =
Slatepacker::decrypt_slatepack(slatepack_string_encrypted.as_bytes(), &dalek_sk2).unwrap();
let res_slate = slatepack.to_result_slate();
let slate2_str = format!("{:?}", res_slate);
println!("res_slate2 = {:?}", slate2_str);
assert_eq!(slate_enc_str, slate2_str);
// Testing if can decode form the binary
let slatepack = Slatepacker::decrypt_slatepack(
slatepack_string_binary.as_bytes(),
&DalekSecretKey::from_bytes(&[1; 32]).unwrap(),
)
.unwrap();
let res_slate = slatepack.to_result_slate();
let slate3_str = format!("{:?}", res_slate);
println!("slate3_str = {:?}", slate3_str);
assert_eq!(slate_enc_str, slate3_str);
}
| {
self.sender.clone()
} | identifier_body |
packer.rs | // Copyright 2020 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::Error;
use crate::{Slate, SlateVersion, Slatepack, SlatepackArmor};
use ed25519_dalek::PublicKey as DalekPublicKey;
use ed25519_dalek::SecretKey as DalekSecretKey;
use crate::slatepack::slatepack::SlatePurpose;
#[derive(Clone, Debug)]
/// Arguments, mostly for encrypting decrypting a slatepack
pub struct Slatepacker {
/// Sender address, None for wrapped
pub sender: Option<DalekPublicKey>,
/// Recipient addresses, None for wrapped
pub recipient: Option<DalekPublicKey>,
/// The content purpose. It customize serializer/deserializer for us.
pub content: SlatePurpose,
/// Slate data.
pub slate: Slate,
}
impl Slatepacker {
/// Swap a slate with the packer. Slate is expecte to be full
pub fn wrap_slate(slate: Slate) -> Self {
Self {
sender: None,
recipient: None,
content: SlatePurpose::FullSlate,
slate,
}
}
/// Pack everything into the armored slatepack
pub fn encrypt_to_send(
slate: Slate,
slate_version: SlateVersion,
content: SlatePurpose,
sender: DalekPublicKey,
recipient: Option<DalekPublicKey>, // Encrypted only if recipient is some
secret: &DalekSecretKey,
use_test_rng: bool,
) -> Result<String, Error> {
let pack = Slatepack {
sender: Some(sender),
recipient: recipient,
content,
slate: slate,
};
let (slate_bin, encrypted) = pack.to_binary(slate_version, secret, use_test_rng)?;
SlatepackArmor::encode(&slate_bin, encrypted)
}
/// return slatepack
pub fn decrypt_slatepack(data: &[u8], dec_key: &DalekSecretKey) -> Result<Self, Error> {
let (slate_bytes, encrypted) = SlatepackArmor::decode(data)?;
let slatepack = Slatepack::from_binary(&slate_bytes, encrypted, dec_key)?;
let Slatepack {
sender,
recipient,
content,
slate,
} = slatepack;
Ok(Self {
sender,
recipient,
content,
slate,
})
}
/// Get Transaction ID related into form this slatepack
pub fn | (&self) -> SlatePurpose {
self.content.clone()
}
/// Get Sender info. It is needed to send the response back
pub fn get_sender(&self) -> Option<DalekPublicKey> {
self.sender.clone()
}
/// Get Sender info. It is needed to send the response back
pub fn get_recipient(&self) -> Option<DalekPublicKey> {
self.recipient.clone()
}
/// Convert this slate back to the resulting slate. Since the slate pack contain only the change set,
/// to recover the data it is required original slate to merge with.
pub fn to_result_slate(self) -> Slate {
self.slate
}
}
#[test]
fn slatepack_io_test() {
use crate::grin_core::core::KernelFeatures;
use crate::grin_core::core::{Input, Output, OutputFeatures, Transaction, TxKernel};
use crate::grin_core::global;
use crate::grin_keychain::BlindingFactor;
use crate::grin_keychain::ExtKeychain;
use crate::grin_util as util;
use crate::grin_util::secp::pedersen::{Commitment, RangeProof};
use crate::grin_util::secp::Signature;
use crate::grin_util::secp::{PublicKey, Secp256k1, SecretKey};
use crate::proof::proofaddress;
use crate::proof::proofaddress::ProvableAddress;
use crate::slate::{PaymentInfo, VersionCompatInfo};
use crate::ParticipantData;
use uuid::Uuid;
use x25519_dalek::PublicKey as xDalekPublicKey;
global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
let bytes_16: [u8; 16] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let bytes_32: [u8; 32] = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
26, 27, 28, 29, 30, 31, 32,
];
let bytes_32_2: [u8; 32] = [
2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29, 30, 31, 32, 33,
];
let bytes_33: [u8; 33] = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
26, 27, 28, 29, 30, 31, 32, 33,
];
let bytes_64: [u8; 64] = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
];
let sk = SecretKey::from_slice(&bytes_32).unwrap();
let secp = Secp256k1::new();
let dalek_sk = DalekSecretKey::from_bytes(&bytes_32).unwrap();
let dalek_pk = DalekPublicKey::from(&dalek_sk);
let dalek_sk2 = DalekSecretKey::from_bytes(&bytes_32_2).unwrap();
let dalek_pk2 = DalekPublicKey::from(&dalek_sk2);
// Let's test out Dalec 2 xDalec algebra.
let dalek_xpk = proofaddress::tor_pub_2_slatepack_pub(&dalek_pk).unwrap();
let dalek_xpk2 = proofaddress::tor_pub_2_slatepack_pub(&dalek_pk2).unwrap();
let dalek_xsk = proofaddress::tor_secret_2_slatepack_secret(&dalek_sk);
let dalek_xsk2 = proofaddress::tor_secret_2_slatepack_secret(&dalek_sk2);
let builded_xpk = xDalekPublicKey::from(&dalek_xsk);
let builded_xpk2 = xDalekPublicKey::from(&dalek_xsk2);
assert_eq!(dalek_xpk.as_bytes(), builded_xpk.as_bytes());
assert_eq!(dalek_xpk2.as_bytes(), builded_xpk2.as_bytes());
// check if Diffie Hoffman works...
let shared_secret1 = dalek_xsk.diffie_hellman(&dalek_xpk2);
let shared_secret2 = dalek_xsk2.diffie_hellman(&dalek_xpk);
assert_eq!(shared_secret1.as_bytes(), shared_secret2.as_bytes());
// Note, Slate Data is fake. Just some randome numbers, it will not pass validation of any type
let mut slate_enc = Slate {
compact_slate: true, // Slatepack works only for compact models.
num_participants: 2,
id: Uuid::from_bytes(bytes_16),
tx: Transaction::empty()
.with_offset(BlindingFactor::from_slice(&bytes_32) )
.with_input( Input::new( OutputFeatures::Plain, Commitment(bytes_33)) )
.with_output( Output::new(OutputFeatures::Plain, Commitment(bytes_33), RangeProof::zero()))
.with_kernel( TxKernel::with_features(KernelFeatures::Plain { fee: 321 }) ),
offset: BlindingFactor::from_slice(&bytes_32),
amount: 30000000000000000,
fee: 321,
height: 67,
lock_height: 0,
ttl_cutoff_height: Some(54),
participant_data: vec![
ParticipantData {
id: 0,
public_blind_excess: PublicKey::from_secret_key( &secp, &sk).unwrap(),
public_nonce: PublicKey::from_secret_key( &secp, &sk).unwrap(),
part_sig: None,
message: Some("message 1 to send".to_string()),
message_sig: Some(Signature::from_compact(&util::from_hex("89cc3c1480fea655f29d300fcf68d0cfbf53f96a1d6b1219486b64385ed7ed89acf96f1532b31ac8309e611583b1ecf37090e79700fae3683cf682c0043b3029").unwrap()).unwrap()),
},
ParticipantData {
id: 1,
public_blind_excess: PublicKey::from_secret_key( &secp, &sk).unwrap(),
public_nonce: PublicKey::from_secret_key( &secp, &sk).unwrap(),
part_sig: Some(Signature::from_compact(&util::from_hex("89cc3c1480fea655f29d300fcf68d0cfbf53f96a1d6b1219486b64385ed7ed89acf96f1532b31ac8309e611583b1ecf37090e79700fae3683cf682c0043b3029").unwrap()).unwrap()),
message: Some("message 2 to send".to_string()),
message_sig: Some(Signature::from_compact(&util::from_hex("89cc3c1480fea655f29d300fcf68d0cfbf53f96a1d6b1219486b64385ed7ed89acf96f1532b31ac8309e611583b1ecf37090e79700fae3683cf682c0043b3029").unwrap()).unwrap()),
}
],
version_info: VersionCompatInfo {
version: 3,
block_header_version: 1,
},
payment_proof: Some(PaymentInfo {
sender_address: ProvableAddress::from_str("a5ib4b2l5snzdgxzpdzouwxwvn4c3setpp5t5j2tr37n3uy3665qwnqd").unwrap(),
receiver_address: ProvableAddress::from_str("a5ib4b2l5snzdgxzpdzouwxwvn4c3setpp5t5j2tr37n3uy3665qwnqd").unwrap(),
receiver_signature: Some( util::to_hex(&bytes_64) ),
}),
};
// updating kernel excess
slate_enc.tx.body.kernels[0].excess = slate_enc.calc_excess::<ExtKeychain>(None).unwrap();
let slate_enc_str = format!("{:?}", slate_enc);
println!("start encrypted slate = {}", slate_enc_str);
// Not encoded, just want to review the data...
let slatepack_string_encrypted = Slatepacker::encrypt_to_send(
slate_enc.clone(),
SlateVersion::SP,
SlatePurpose::FullSlate,
dalek_pk.clone(),
Some(dalek_pk2.clone()), // sending to self, should be fine...
&dalek_sk,
true,
)
.unwrap();
println!("slatepack encrypted = {}", slatepack_string_encrypted);
// Not encoded, just want to review the data...
let slatepack_string_binary = Slatepacker::encrypt_to_send(
slate_enc.clone(),
SlateVersion::SP,
SlatePurpose::FullSlate,
dalek_pk.clone(),
None, // No recipient, should trigger non encrypted mode.
&dalek_sk,
true,
)
.unwrap();
println!("slatepack binary = {}", slatepack_string_binary);
assert!(slatepack_string_encrypted.len() > slatepack_string_binary.len());
// Testing if can open from a backup
let slatepack =
Slatepacker::decrypt_slatepack(slatepack_string_encrypted.as_bytes(), &dalek_sk).unwrap();
let res_slate = slatepack.to_result_slate();
let slate2_str = format!("{:?}", res_slate);
println!("res_slate = {:?}", slate2_str);
assert_eq!(slate_enc_str, slate2_str);
// Testing if another party can open it
let slatepack =
Slatepacker::decrypt_slatepack(slatepack_string_encrypted.as_bytes(), &dalek_sk2).unwrap();
let res_slate = slatepack.to_result_slate();
let slate2_str = format!("{:?}", res_slate);
println!("res_slate2 = {:?}", slate2_str);
assert_eq!(slate_enc_str, slate2_str);
// Testing if can decode form the binary
let slatepack = Slatepacker::decrypt_slatepack(
slatepack_string_binary.as_bytes(),
&DalekSecretKey::from_bytes(&[1; 32]).unwrap(),
)
.unwrap();
let res_slate = slatepack.to_result_slate();
let slate3_str = format!("{:?}", res_slate);
println!("slate3_str = {:?}", slate3_str);
assert_eq!(slate_enc_str, slate3_str);
}
| get_content | identifier_name |
packer.rs | // Copyright 2020 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::Error;
use crate::{Slate, SlateVersion, Slatepack, SlatepackArmor};
use ed25519_dalek::PublicKey as DalekPublicKey;
use ed25519_dalek::SecretKey as DalekSecretKey;
use crate::slatepack::slatepack::SlatePurpose;
#[derive(Clone, Debug)]
/// Arguments, mostly for encrypting decrypting a slatepack
pub struct Slatepacker {
/// Sender address, None for wrapped
pub sender: Option<DalekPublicKey>,
/// Recipient addresses, None for wrapped
pub recipient: Option<DalekPublicKey>,
/// The content purpose. It customize serializer/deserializer for us.
pub content: SlatePurpose,
/// Slate data.
pub slate: Slate,
}
impl Slatepacker {
/// Swap a slate with the packer. Slate is expecte to be full
pub fn wrap_slate(slate: Slate) -> Self {
Self {
sender: None,
recipient: None,
content: SlatePurpose::FullSlate,
slate,
}
}
/// Pack everything into the armored slatepack
pub fn encrypt_to_send(
slate: Slate,
slate_version: SlateVersion,
content: SlatePurpose,
sender: DalekPublicKey,
recipient: Option<DalekPublicKey>, // Encrypted only if recipient is some
secret: &DalekSecretKey,
use_test_rng: bool,
) -> Result<String, Error> {
let pack = Slatepack {
sender: Some(sender),
recipient: recipient,
content,
slate: slate,
};
let (slate_bin, encrypted) = pack.to_binary(slate_version, secret, use_test_rng)?;
SlatepackArmor::encode(&slate_bin, encrypted)
}
/// return slatepack
pub fn decrypt_slatepack(data: &[u8], dec_key: &DalekSecretKey) -> Result<Self, Error> {
let (slate_bytes, encrypted) = SlatepackArmor::decode(data)?;
let slatepack = Slatepack::from_binary(&slate_bytes, encrypted, dec_key)?;
let Slatepack {
sender,
recipient,
content,
slate,
} = slatepack;
Ok(Self {
sender,
recipient,
content,
slate,
})
}
/// Get Transaction ID related into form this slatepack
pub fn get_content(&self) -> SlatePurpose {
self.content.clone()
}
/// Get Sender info. It is needed to send the response back
pub fn get_sender(&self) -> Option<DalekPublicKey> {
self.sender.clone()
}
/// Get Sender info. It is needed to send the response back
pub fn get_recipient(&self) -> Option<DalekPublicKey> {
self.recipient.clone()
}
/// Convert this slate back to the resulting slate. Since the slate pack contain only the change set,
/// to recover the data it is required original slate to merge with.
pub fn to_result_slate(self) -> Slate {
self.slate
}
}
#[test]
fn slatepack_io_test() {
use crate::grin_core::core::KernelFeatures;
use crate::grin_core::core::{Input, Output, OutputFeatures, Transaction, TxKernel};
use crate::grin_core::global;
use crate::grin_keychain::BlindingFactor;
use crate::grin_keychain::ExtKeychain;
use crate::grin_util as util;
use crate::grin_util::secp::pedersen::{Commitment, RangeProof};
use crate::grin_util::secp::Signature;
use crate::grin_util::secp::{PublicKey, Secp256k1, SecretKey};
use crate::proof::proofaddress;
use crate::proof::proofaddress::ProvableAddress;
use crate::slate::{PaymentInfo, VersionCompatInfo};
use crate::ParticipantData;
use uuid::Uuid;
use x25519_dalek::PublicKey as xDalekPublicKey;
global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
let bytes_16: [u8; 16] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let bytes_32: [u8; 32] = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
26, 27, 28, 29, 30, 31, 32,
];
let bytes_32_2: [u8; 32] = [
2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29, 30, 31, 32, 33,
];
let bytes_33: [u8; 33] = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
26, 27, 28, 29, 30, 31, 32, 33,
];
let bytes_64: [u8; 64] = [ | 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
];
let sk = SecretKey::from_slice(&bytes_32).unwrap();
let secp = Secp256k1::new();
let dalek_sk = DalekSecretKey::from_bytes(&bytes_32).unwrap();
let dalek_pk = DalekPublicKey::from(&dalek_sk);
let dalek_sk2 = DalekSecretKey::from_bytes(&bytes_32_2).unwrap();
let dalek_pk2 = DalekPublicKey::from(&dalek_sk2);
// Let's test out Dalec 2 xDalec algebra.
let dalek_xpk = proofaddress::tor_pub_2_slatepack_pub(&dalek_pk).unwrap();
let dalek_xpk2 = proofaddress::tor_pub_2_slatepack_pub(&dalek_pk2).unwrap();
let dalek_xsk = proofaddress::tor_secret_2_slatepack_secret(&dalek_sk);
let dalek_xsk2 = proofaddress::tor_secret_2_slatepack_secret(&dalek_sk2);
let builded_xpk = xDalekPublicKey::from(&dalek_xsk);
let builded_xpk2 = xDalekPublicKey::from(&dalek_xsk2);
assert_eq!(dalek_xpk.as_bytes(), builded_xpk.as_bytes());
assert_eq!(dalek_xpk2.as_bytes(), builded_xpk2.as_bytes());
// check if Diffie Hoffman works...
let shared_secret1 = dalek_xsk.diffie_hellman(&dalek_xpk2);
let shared_secret2 = dalek_xsk2.diffie_hellman(&dalek_xpk);
assert_eq!(shared_secret1.as_bytes(), shared_secret2.as_bytes());
// Note, Slate Data is fake. Just some randome numbers, it will not pass validation of any type
let mut slate_enc = Slate {
compact_slate: true, // Slatepack works only for compact models.
num_participants: 2,
id: Uuid::from_bytes(bytes_16),
tx: Transaction::empty()
.with_offset(BlindingFactor::from_slice(&bytes_32) )
.with_input( Input::new( OutputFeatures::Plain, Commitment(bytes_33)) )
.with_output( Output::new(OutputFeatures::Plain, Commitment(bytes_33), RangeProof::zero()))
.with_kernel( TxKernel::with_features(KernelFeatures::Plain { fee: 321 }) ),
offset: BlindingFactor::from_slice(&bytes_32),
amount: 30000000000000000,
fee: 321,
height: 67,
lock_height: 0,
ttl_cutoff_height: Some(54),
participant_data: vec![
ParticipantData {
id: 0,
public_blind_excess: PublicKey::from_secret_key( &secp, &sk).unwrap(),
public_nonce: PublicKey::from_secret_key( &secp, &sk).unwrap(),
part_sig: None,
message: Some("message 1 to send".to_string()),
message_sig: Some(Signature::from_compact(&util::from_hex("89cc3c1480fea655f29d300fcf68d0cfbf53f96a1d6b1219486b64385ed7ed89acf96f1532b31ac8309e611583b1ecf37090e79700fae3683cf682c0043b3029").unwrap()).unwrap()),
},
ParticipantData {
id: 1,
public_blind_excess: PublicKey::from_secret_key( &secp, &sk).unwrap(),
public_nonce: PublicKey::from_secret_key( &secp, &sk).unwrap(),
part_sig: Some(Signature::from_compact(&util::from_hex("89cc3c1480fea655f29d300fcf68d0cfbf53f96a1d6b1219486b64385ed7ed89acf96f1532b31ac8309e611583b1ecf37090e79700fae3683cf682c0043b3029").unwrap()).unwrap()),
message: Some("message 2 to send".to_string()),
message_sig: Some(Signature::from_compact(&util::from_hex("89cc3c1480fea655f29d300fcf68d0cfbf53f96a1d6b1219486b64385ed7ed89acf96f1532b31ac8309e611583b1ecf37090e79700fae3683cf682c0043b3029").unwrap()).unwrap()),
}
],
version_info: VersionCompatInfo {
version: 3,
block_header_version: 1,
},
payment_proof: Some(PaymentInfo {
sender_address: ProvableAddress::from_str("a5ib4b2l5snzdgxzpdzouwxwvn4c3setpp5t5j2tr37n3uy3665qwnqd").unwrap(),
receiver_address: ProvableAddress::from_str("a5ib4b2l5snzdgxzpdzouwxwvn4c3setpp5t5j2tr37n3uy3665qwnqd").unwrap(),
receiver_signature: Some( util::to_hex(&bytes_64) ),
}),
};
// updating kernel excess
slate_enc.tx.body.kernels[0].excess = slate_enc.calc_excess::<ExtKeychain>(None).unwrap();
let slate_enc_str = format!("{:?}", slate_enc);
println!("start encrypted slate = {}", slate_enc_str);
// Not encoded, just want to review the data...
let slatepack_string_encrypted = Slatepacker::encrypt_to_send(
slate_enc.clone(),
SlateVersion::SP,
SlatePurpose::FullSlate,
dalek_pk.clone(),
Some(dalek_pk2.clone()), // sending to self, should be fine...
&dalek_sk,
true,
)
.unwrap();
println!("slatepack encrypted = {}", slatepack_string_encrypted);
// Not encoded, just want to review the data...
let slatepack_string_binary = Slatepacker::encrypt_to_send(
slate_enc.clone(),
SlateVersion::SP,
SlatePurpose::FullSlate,
dalek_pk.clone(),
None, // No recipient, should trigger non encrypted mode.
&dalek_sk,
true,
)
.unwrap();
println!("slatepack binary = {}", slatepack_string_binary);
assert!(slatepack_string_encrypted.len() > slatepack_string_binary.len());
// Testing if can open from a backup
let slatepack =
Slatepacker::decrypt_slatepack(slatepack_string_encrypted.as_bytes(), &dalek_sk).unwrap();
let res_slate = slatepack.to_result_slate();
let slate2_str = format!("{:?}", res_slate);
println!("res_slate = {:?}", slate2_str);
assert_eq!(slate_enc_str, slate2_str);
// Testing if another party can open it
let slatepack =
Slatepacker::decrypt_slatepack(slatepack_string_encrypted.as_bytes(), &dalek_sk2).unwrap();
let res_slate = slatepack.to_result_slate();
let slate2_str = format!("{:?}", res_slate);
println!("res_slate2 = {:?}", slate2_str);
assert_eq!(slate_enc_str, slate2_str);
// Testing if can decode form the binary
let slatepack = Slatepacker::decrypt_slatepack(
slatepack_string_binary.as_bytes(),
&DalekSecretKey::from_bytes(&[1; 32]).unwrap(),
)
.unwrap();
let res_slate = slatepack.to_result_slate();
let slate3_str = format!("{:?}", res_slate);
println!("slate3_str = {:?}", slate3_str);
assert_eq!(slate_enc_str, slate3_str);
} | random_line_split |
|
core.rs | // Copyright 2020 - Nym Technologies SA <[email protected]>
// SPDX-License-Identifier: Apache-2.0
use crate::allowed_hosts::{HostsStore, OutboundRequestFilter};
use crate::connection::Connection;
use crate::websocket;
use crate::websocket::TSWebsocketStream;
use futures::channel::mpsc;
use futures::stream::{SplitSink, SplitStream};
use futures::{SinkExt, StreamExt};
use log::*;
use nymsphinx::addressing::clients::Recipient;
use nymsphinx::receiver::ReconstructedMessage;
use proxy_helpers::connection_controller::{Controller, ControllerCommand, ControllerSender};
use socks5_requests::{ConnectionId, Request, Response};
use std::path::PathBuf;
use std::sync::atomic::{AtomicUsize, Ordering};
use tokio_tungstenite::tungstenite::protocol::Message;
use websocket::WebsocketConnectionError;
use websocket_requests::{requests::ClientRequest, responses::ServerResponse};
// Since it's an atomic, it's safe to be kept static and shared across threads
static ACTIVE_PROXIES: AtomicUsize = AtomicUsize::new(0);
pub struct ServiceProvider {
listening_address: String,
outbound_request_filter: OutboundRequestFilter,
open_proxy: bool,
}
impl ServiceProvider {
pub fn new(listening_address: String, open_proxy: bool) -> ServiceProvider {
let allowed_hosts = HostsStore::new(
HostsStore::default_base_dir(),
PathBuf::from("allowed.list"),
);
let unknown_hosts = HostsStore::new(
HostsStore::default_base_dir(),
PathBuf::from("unknown.list"),
);
let outbound_request_filter = OutboundRequestFilter::new(allowed_hosts, unknown_hosts);
ServiceProvider {
listening_address,
outbound_request_filter,
open_proxy,
}
}
/// Listens for any messages from `mix_reader` that should be written back to the mix network
/// via the `websocket_writer`.
async fn mixnet_response_listener(
mut websocket_writer: SplitSink<TSWebsocketStream, Message>,
mut mix_reader: mpsc::UnboundedReceiver<(Response, Recipient)>,
) {
// TODO: wire SURBs in here once they're available
while let Some((response, return_address)) = mix_reader.next().await {
// make'request' to native-websocket client
let response_message = ClientRequest::Send {
recipient: return_address,
message: response.into_bytes(),
with_reply_surb: false,
};
let message = Message::Binary(response_message.serialize());
websocket_writer.send(message).await.unwrap();
}
}
async fn read_websocket_message(
websocket_reader: &mut SplitStream<TSWebsocketStream>,
) -> Option<ReconstructedMessage> {
while let Some(msg) = websocket_reader.next().await {
let data = msg
.expect("we failed to read from the websocket!")
.into_data();
// try to recover the actual message from the mix network...
let deserialized_message = match ServerResponse::deserialize(&data) {
Ok(deserialized) => deserialized,
Err(err) => {
error!(
"Failed to deserialize received websocket message! - {}",
err
);
continue;
}
};
let received = match deserialized_message {
ServerResponse::Received(received) => received,
ServerResponse::Error(err) => {
panic!("received error from native client! - {}", err)
}
_ => unimplemented!("probably should never be reached?"),
};
return Some(received);
}
None
}
async fn start_proxy(
conn_id: ConnectionId,
remote_addr: String,
return_address: Recipient,
controller_sender: ControllerSender,
mix_input_sender: mpsc::UnboundedSender<(Response, Recipient)>,
) {
let mut conn = match Connection::new(conn_id, remote_addr.clone(), return_address).await {
Ok(conn) => conn,
Err(err) => {
error!(
"error while connecting to {:?}! - {:?}",
remote_addr.clone(),
err
);
// inform the remote that the connection is closed before it even was established
mix_input_sender
.unbounded_send((Response::new(conn_id, Vec::new(), true), return_address))
.unwrap();
return;
}
};
// Connect implies it's a fresh connection - register it with our controller
let (mix_sender, mix_receiver) = mpsc::unbounded();
controller_sender
.unbounded_send(ControllerCommand::Insert(conn_id, mix_sender))
.unwrap();
let old_count = ACTIVE_PROXIES.fetch_add(1, Ordering::SeqCst);
info!(
"Starting proxy for {} (currently there are {} proxies being handled)",
remote_addr,
old_count + 1
);
// run the proxy on the connection
conn.run_proxy(mix_receiver, mix_input_sender).await;
// proxy is done - remove the access channel from the controller
controller_sender
.unbounded_send(ControllerCommand::Remove(conn_id))
.unwrap();
let old_count = ACTIVE_PROXIES.fetch_sub(1, Ordering::SeqCst);
info!(
"Proxy for {} is finished (currently there are {} proxies being handled)",
remote_addr,
old_count - 1
);
}
fn handle_proxy_connect(
&mut self,
controller_sender: &mut ControllerSender,
mix_input_sender: &mpsc::UnboundedSender<(Response, Recipient)>,
conn_id: ConnectionId,
remote_addr: String,
return_address: Recipient,
) {
if!self.open_proxy &&!self.outbound_request_filter.check(&remote_addr) {
log::info!("Domain {:?} failed filter check", remote_addr);
return;
}
let controller_sender_clone = controller_sender.clone();
let mix_input_sender_clone = mix_input_sender.clone();
// and start the proxy for this connection
tokio::spawn(async move {
Self::start_proxy(
conn_id,
remote_addr,
return_address,
controller_sender_clone,
mix_input_sender_clone,
)
.await
});
}
fn handle_proxy_send(
&self,
controller_sender: &mut ControllerSender,
conn_id: ConnectionId,
data: Vec<u8>,
closed: bool,
) {
controller_sender
.unbounded_send(ControllerCommand::Send(conn_id, data, closed))
.unwrap()
}
fn handle_proxy_request(
&mut self,
raw_request: &[u8],
controller_sender: &mut ControllerSender,
mix_input_sender: &mpsc::UnboundedSender<(Response, Recipient)>,
) {
// try to treat each received mix message as a service provider request
let deserialized_request = match Request::try_from_bytes(raw_request) {
Ok(request) => request,
Err(err) => {
error!("Failed to deserialized received request! - {}", err);
return;
}
};
match deserialized_request {
Request::Connect {
conn_id,
remote_addr,
return_address,
} => self.handle_proxy_connect(
controller_sender,
mix_input_sender,
conn_id,
remote_addr,
return_address,
),
Request::Send(conn_id, data, closed) => {
self.handle_proxy_send(controller_sender, conn_id, data, closed)
}
}
}
/// Start all subsystems
pub async fn run(&mut self) {
let websocket_stream = self.connect_websocket(&self.listening_address).await;
// split the websocket so that we could read and write from separate threads
let (websocket_writer, mut websocket_reader) = websocket_stream.split();
// channels responsible for managing messages that are to be sent to the mix network. The receiver is
// going to be used by `mixnet_response_listener`
let (mix_input_sender, mix_input_receiver) = mpsc::unbounded::<(Response, Recipient)>();
// controller for managing all active connections
let (mut active_connections_controller, mut controller_sender) = Controller::new();
tokio::spawn(async move {
active_connections_controller.run().await;
});
// start the listener for mix messages
tokio::spawn(async move {
Self::mixnet_response_listener(websocket_writer, mix_input_receiver).await;
});
println!("\nAll systems go. Press CTRL-C to stop the server.");
// for each incoming message from the websocket... (which in 99.99% cases is going to be a mix message)
loop {
let received = match Self::read_websocket_message(&mut websocket_reader).await {
Some(msg) => msg,
None => {
error!("The websocket stream has finished!");
return;
}
};
let raw_message = received.message;
// TODO: here be potential SURB (i.e. received.reply_SURB)
self.handle_proxy_request(&raw_message, &mut controller_sender, &mix_input_sender)
}
}
// Make the websocket connection so we can receive incoming Mixnet messages.
async fn | (&self, uri: &str) -> TSWebsocketStream {
let ws_stream = match websocket::Connection::new(uri).connect().await {
Ok(ws_stream) => {
info!("* connected to local websocket server at {}", uri);
ws_stream
}
Err(WebsocketConnectionError::ConnectionNotEstablished) => {
panic!("Error: websocket connection attempt failed, is the Nym client running?")
}
};
ws_stream
}
}
| connect_websocket | identifier_name |
core.rs | // Copyright 2020 - Nym Technologies SA <[email protected]>
// SPDX-License-Identifier: Apache-2.0
use crate::allowed_hosts::{HostsStore, OutboundRequestFilter};
use crate::connection::Connection;
use crate::websocket;
use crate::websocket::TSWebsocketStream;
use futures::channel::mpsc;
use futures::stream::{SplitSink, SplitStream};
use futures::{SinkExt, StreamExt};
use log::*;
use nymsphinx::addressing::clients::Recipient;
use nymsphinx::receiver::ReconstructedMessage;
use proxy_helpers::connection_controller::{Controller, ControllerCommand, ControllerSender};
use socks5_requests::{ConnectionId, Request, Response};
use std::path::PathBuf;
use std::sync::atomic::{AtomicUsize, Ordering};
use tokio_tungstenite::tungstenite::protocol::Message;
use websocket::WebsocketConnectionError;
use websocket_requests::{requests::ClientRequest, responses::ServerResponse};
// Since it's an atomic, it's safe to be kept static and shared across threads
static ACTIVE_PROXIES: AtomicUsize = AtomicUsize::new(0);
pub struct ServiceProvider {
listening_address: String,
outbound_request_filter: OutboundRequestFilter,
open_proxy: bool,
}
impl ServiceProvider {
pub fn new(listening_address: String, open_proxy: bool) -> ServiceProvider {
let allowed_hosts = HostsStore::new(
HostsStore::default_base_dir(),
PathBuf::from("allowed.list"),
);
let unknown_hosts = HostsStore::new(
HostsStore::default_base_dir(),
PathBuf::from("unknown.list"),
);
let outbound_request_filter = OutboundRequestFilter::new(allowed_hosts, unknown_hosts);
ServiceProvider {
listening_address,
outbound_request_filter,
open_proxy,
}
}
/// Listens for any messages from `mix_reader` that should be written back to the mix network
/// via the `websocket_writer`.
async fn mixnet_response_listener(
mut websocket_writer: SplitSink<TSWebsocketStream, Message>,
mut mix_reader: mpsc::UnboundedReceiver<(Response, Recipient)>,
) {
// TODO: wire SURBs in here once they're available
while let Some((response, return_address)) = mix_reader.next().await {
// make'request' to native-websocket client
let response_message = ClientRequest::Send {
recipient: return_address,
message: response.into_bytes(),
with_reply_surb: false,
};
let message = Message::Binary(response_message.serialize());
websocket_writer.send(message).await.unwrap();
}
}
async fn read_websocket_message(
websocket_reader: &mut SplitStream<TSWebsocketStream>,
) -> Option<ReconstructedMessage> {
while let Some(msg) = websocket_reader.next().await {
let data = msg
.expect("we failed to read from the websocket!")
.into_data();
// try to recover the actual message from the mix network...
let deserialized_message = match ServerResponse::deserialize(&data) {
Ok(deserialized) => deserialized,
Err(err) => {
error!(
"Failed to deserialize received websocket message! - {}",
err
);
continue;
}
};
let received = match deserialized_message {
ServerResponse::Received(received) => received,
ServerResponse::Error(err) => {
panic!("received error from native client! - {}", err)
}
_ => unimplemented!("probably should never be reached?"),
};
return Some(received);
}
None
}
async fn start_proxy(
conn_id: ConnectionId,
remote_addr: String,
return_address: Recipient,
controller_sender: ControllerSender,
mix_input_sender: mpsc::UnboundedSender<(Response, Recipient)>,
) {
let mut conn = match Connection::new(conn_id, remote_addr.clone(), return_address).await {
Ok(conn) => conn,
Err(err) => {
error!(
"error while connecting to {:?}! - {:?}",
remote_addr.clone(),
err
);
// inform the remote that the connection is closed before it even was established
mix_input_sender
.unbounded_send((Response::new(conn_id, Vec::new(), true), return_address))
.unwrap();
return;
}
};
// Connect implies it's a fresh connection - register it with our controller
let (mix_sender, mix_receiver) = mpsc::unbounded();
controller_sender
.unbounded_send(ControllerCommand::Insert(conn_id, mix_sender))
.unwrap();
let old_count = ACTIVE_PROXIES.fetch_add(1, Ordering::SeqCst);
info!(
"Starting proxy for {} (currently there are {} proxies being handled)",
remote_addr,
old_count + 1
);
// run the proxy on the connection
conn.run_proxy(mix_receiver, mix_input_sender).await;
// proxy is done - remove the access channel from the controller
controller_sender
.unbounded_send(ControllerCommand::Remove(conn_id))
.unwrap();
let old_count = ACTIVE_PROXIES.fetch_sub(1, Ordering::SeqCst);
info!(
"Proxy for {} is finished (currently there are {} proxies being handled)",
remote_addr,
old_count - 1
);
}
fn handle_proxy_connect(
&mut self,
controller_sender: &mut ControllerSender,
mix_input_sender: &mpsc::UnboundedSender<(Response, Recipient)>,
conn_id: ConnectionId,
remote_addr: String,
return_address: Recipient,
) {
if!self.open_proxy &&!self.outbound_request_filter.check(&remote_addr) {
log::info!("Domain {:?} failed filter check", remote_addr);
return;
}
let controller_sender_clone = controller_sender.clone();
let mix_input_sender_clone = mix_input_sender.clone();
// and start the proxy for this connection
tokio::spawn(async move {
Self::start_proxy(
conn_id,
remote_addr,
return_address,
controller_sender_clone,
mix_input_sender_clone,
)
.await
});
}
fn handle_proxy_send(
&self,
controller_sender: &mut ControllerSender,
conn_id: ConnectionId,
data: Vec<u8>,
closed: bool,
) {
controller_sender
.unbounded_send(ControllerCommand::Send(conn_id, data, closed))
.unwrap()
}
fn handle_proxy_request(
&mut self,
raw_request: &[u8],
controller_sender: &mut ControllerSender,
mix_input_sender: &mpsc::UnboundedSender<(Response, Recipient)>,
) | return_address,
),
Request::Send(conn_id, data, closed) => {
self.handle_proxy_send(controller_sender, conn_id, data, closed)
}
}
}
/// Start all subsystems
pub async fn run(&mut self) {
let websocket_stream = self.connect_websocket(&self.listening_address).await;
// split the websocket so that we could read and write from separate threads
let (websocket_writer, mut websocket_reader) = websocket_stream.split();
// channels responsible for managing messages that are to be sent to the mix network. The receiver is
// going to be used by `mixnet_response_listener`
let (mix_input_sender, mix_input_receiver) = mpsc::unbounded::<(Response, Recipient)>();
// controller for managing all active connections
let (mut active_connections_controller, mut controller_sender) = Controller::new();
tokio::spawn(async move {
active_connections_controller.run().await;
});
// start the listener for mix messages
tokio::spawn(async move {
Self::mixnet_response_listener(websocket_writer, mix_input_receiver).await;
});
println!("\nAll systems go. Press CTRL-C to stop the server.");
// for each incoming message from the websocket... (which in 99.99% cases is going to be a mix message)
loop {
let received = match Self::read_websocket_message(&mut websocket_reader).await {
Some(msg) => msg,
None => {
error!("The websocket stream has finished!");
return;
}
};
let raw_message = received.message;
// TODO: here be potential SURB (i.e. received.reply_SURB)
self.handle_proxy_request(&raw_message, &mut controller_sender, &mix_input_sender)
}
}
// Make the websocket connection so we can receive incoming Mixnet messages.
async fn connect_websocket(&self, uri: &str) -> TSWebsocketStream {
let ws_stream = match websocket::Connection::new(uri).connect().await {
Ok(ws_stream) => {
info!("* connected to local websocket server at {}", uri);
ws_stream
}
Err(WebsocketConnectionError::ConnectionNotEstablished) => {
panic!("Error: websocket connection attempt failed, is the Nym client running?")
}
};
ws_stream
}
}
| {
// try to treat each received mix message as a service provider request
let deserialized_request = match Request::try_from_bytes(raw_request) {
Ok(request) => request,
Err(err) => {
error!("Failed to deserialized received request! - {}", err);
return;
}
};
match deserialized_request {
Request::Connect {
conn_id,
remote_addr,
return_address,
} => self.handle_proxy_connect(
controller_sender,
mix_input_sender,
conn_id,
remote_addr, | identifier_body |
core.rs | // Copyright 2020 - Nym Technologies SA <[email protected]>
// SPDX-License-Identifier: Apache-2.0
use crate::allowed_hosts::{HostsStore, OutboundRequestFilter};
use crate::connection::Connection;
use crate::websocket;
use crate::websocket::TSWebsocketStream;
use futures::channel::mpsc;
use futures::stream::{SplitSink, SplitStream};
use futures::{SinkExt, StreamExt};
use log::*;
use nymsphinx::addressing::clients::Recipient;
use nymsphinx::receiver::ReconstructedMessage;
use proxy_helpers::connection_controller::{Controller, ControllerCommand, ControllerSender};
use socks5_requests::{ConnectionId, Request, Response};
use std::path::PathBuf;
use std::sync::atomic::{AtomicUsize, Ordering};
use tokio_tungstenite::tungstenite::protocol::Message;
use websocket::WebsocketConnectionError;
use websocket_requests::{requests::ClientRequest, responses::ServerResponse};
// Since it's an atomic, it's safe to be kept static and shared across threads
static ACTIVE_PROXIES: AtomicUsize = AtomicUsize::new(0);
pub struct ServiceProvider {
listening_address: String,
outbound_request_filter: OutboundRequestFilter,
open_proxy: bool,
}
impl ServiceProvider {
pub fn new(listening_address: String, open_proxy: bool) -> ServiceProvider {
let allowed_hosts = HostsStore::new(
HostsStore::default_base_dir(),
PathBuf::from("allowed.list"),
);
let unknown_hosts = HostsStore::new(
HostsStore::default_base_dir(),
PathBuf::from("unknown.list"),
);
let outbound_request_filter = OutboundRequestFilter::new(allowed_hosts, unknown_hosts);
ServiceProvider {
listening_address,
outbound_request_filter,
open_proxy,
}
}
/// Listens for any messages from `mix_reader` that should be written back to the mix network
/// via the `websocket_writer`.
async fn mixnet_response_listener( | mut mix_reader: mpsc::UnboundedReceiver<(Response, Recipient)>,
) {
// TODO: wire SURBs in here once they're available
while let Some((response, return_address)) = mix_reader.next().await {
// make'request' to native-websocket client
let response_message = ClientRequest::Send {
recipient: return_address,
message: response.into_bytes(),
with_reply_surb: false,
};
let message = Message::Binary(response_message.serialize());
websocket_writer.send(message).await.unwrap();
}
}
async fn read_websocket_message(
websocket_reader: &mut SplitStream<TSWebsocketStream>,
) -> Option<ReconstructedMessage> {
while let Some(msg) = websocket_reader.next().await {
let data = msg
.expect("we failed to read from the websocket!")
.into_data();
// try to recover the actual message from the mix network...
let deserialized_message = match ServerResponse::deserialize(&data) {
Ok(deserialized) => deserialized,
Err(err) => {
error!(
"Failed to deserialize received websocket message! - {}",
err
);
continue;
}
};
let received = match deserialized_message {
ServerResponse::Received(received) => received,
ServerResponse::Error(err) => {
panic!("received error from native client! - {}", err)
}
_ => unimplemented!("probably should never be reached?"),
};
return Some(received);
}
None
}
async fn start_proxy(
conn_id: ConnectionId,
remote_addr: String,
return_address: Recipient,
controller_sender: ControllerSender,
mix_input_sender: mpsc::UnboundedSender<(Response, Recipient)>,
) {
let mut conn = match Connection::new(conn_id, remote_addr.clone(), return_address).await {
Ok(conn) => conn,
Err(err) => {
error!(
"error while connecting to {:?}! - {:?}",
remote_addr.clone(),
err
);
// inform the remote that the connection is closed before it even was established
mix_input_sender
.unbounded_send((Response::new(conn_id, Vec::new(), true), return_address))
.unwrap();
return;
}
};
// Connect implies it's a fresh connection - register it with our controller
let (mix_sender, mix_receiver) = mpsc::unbounded();
controller_sender
.unbounded_send(ControllerCommand::Insert(conn_id, mix_sender))
.unwrap();
let old_count = ACTIVE_PROXIES.fetch_add(1, Ordering::SeqCst);
info!(
"Starting proxy for {} (currently there are {} proxies being handled)",
remote_addr,
old_count + 1
);
// run the proxy on the connection
conn.run_proxy(mix_receiver, mix_input_sender).await;
// proxy is done - remove the access channel from the controller
controller_sender
.unbounded_send(ControllerCommand::Remove(conn_id))
.unwrap();
let old_count = ACTIVE_PROXIES.fetch_sub(1, Ordering::SeqCst);
info!(
"Proxy for {} is finished (currently there are {} proxies being handled)",
remote_addr,
old_count - 1
);
}
fn handle_proxy_connect(
&mut self,
controller_sender: &mut ControllerSender,
mix_input_sender: &mpsc::UnboundedSender<(Response, Recipient)>,
conn_id: ConnectionId,
remote_addr: String,
return_address: Recipient,
) {
if!self.open_proxy &&!self.outbound_request_filter.check(&remote_addr) {
log::info!("Domain {:?} failed filter check", remote_addr);
return;
}
let controller_sender_clone = controller_sender.clone();
let mix_input_sender_clone = mix_input_sender.clone();
// and start the proxy for this connection
tokio::spawn(async move {
Self::start_proxy(
conn_id,
remote_addr,
return_address,
controller_sender_clone,
mix_input_sender_clone,
)
.await
});
}
fn handle_proxy_send(
&self,
controller_sender: &mut ControllerSender,
conn_id: ConnectionId,
data: Vec<u8>,
closed: bool,
) {
controller_sender
.unbounded_send(ControllerCommand::Send(conn_id, data, closed))
.unwrap()
}
fn handle_proxy_request(
&mut self,
raw_request: &[u8],
controller_sender: &mut ControllerSender,
mix_input_sender: &mpsc::UnboundedSender<(Response, Recipient)>,
) {
// try to treat each received mix message as a service provider request
let deserialized_request = match Request::try_from_bytes(raw_request) {
Ok(request) => request,
Err(err) => {
error!("Failed to deserialized received request! - {}", err);
return;
}
};
match deserialized_request {
Request::Connect {
conn_id,
remote_addr,
return_address,
} => self.handle_proxy_connect(
controller_sender,
mix_input_sender,
conn_id,
remote_addr,
return_address,
),
Request::Send(conn_id, data, closed) => {
self.handle_proxy_send(controller_sender, conn_id, data, closed)
}
}
}
/// Start all subsystems
pub async fn run(&mut self) {
let websocket_stream = self.connect_websocket(&self.listening_address).await;
// split the websocket so that we could read and write from separate threads
let (websocket_writer, mut websocket_reader) = websocket_stream.split();
// channels responsible for managing messages that are to be sent to the mix network. The receiver is
// going to be used by `mixnet_response_listener`
let (mix_input_sender, mix_input_receiver) = mpsc::unbounded::<(Response, Recipient)>();
// controller for managing all active connections
let (mut active_connections_controller, mut controller_sender) = Controller::new();
tokio::spawn(async move {
active_connections_controller.run().await;
});
// start the listener for mix messages
tokio::spawn(async move {
Self::mixnet_response_listener(websocket_writer, mix_input_receiver).await;
});
println!("\nAll systems go. Press CTRL-C to stop the server.");
// for each incoming message from the websocket... (which in 99.99% cases is going to be a mix message)
loop {
let received = match Self::read_websocket_message(&mut websocket_reader).await {
Some(msg) => msg,
None => {
error!("The websocket stream has finished!");
return;
}
};
let raw_message = received.message;
// TODO: here be potential SURB (i.e. received.reply_SURB)
self.handle_proxy_request(&raw_message, &mut controller_sender, &mix_input_sender)
}
}
// Make the websocket connection so we can receive incoming Mixnet messages.
async fn connect_websocket(&self, uri: &str) -> TSWebsocketStream {
let ws_stream = match websocket::Connection::new(uri).connect().await {
Ok(ws_stream) => {
info!("* connected to local websocket server at {}", uri);
ws_stream
}
Err(WebsocketConnectionError::ConnectionNotEstablished) => {
panic!("Error: websocket connection attempt failed, is the Nym client running?")
}
};
ws_stream
}
} | mut websocket_writer: SplitSink<TSWebsocketStream, Message>, | random_line_split |
config.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use attr::HasAttrs;
use feature_gate::{feature_err, EXPLAIN_STMT_ATTR_SYNTAX, Features, get_features, GateIssue};
use {fold, attr};
use ast;
use codemap::Spanned;
use edition::Edition;
use parse::{token, ParseSess};
use ptr::P;
use util::small_vector::SmallVector;
/// A folder that strips out items that do not belong in the current configuration.
pub struct StripUnconfigured<'a> {
pub should_test: bool,
pub sess: &'a ParseSess,
pub features: Option<&'a Features>,
}
// `cfg_attr`-process the crate's attributes and compute the crate's features.
pub fn features(mut krate: ast::Crate, sess: &ParseSess, should_test: bool, edition: Edition)
-> (ast::Crate, Features) {
let features;
{
let mut strip_unconfigured = StripUnconfigured {
should_test,
sess,
features: None,
};
let unconfigured_attrs = krate.attrs.clone();
let err_count = sess.span_diagnostic.err_count();
if let Some(attrs) = strip_unconfigured.configure(krate.attrs) {
krate.attrs = attrs;
} else { // the entire crate is unconfigured
krate.attrs = Vec::new();
krate.module.items = Vec::new();
return (krate, Features::new());
}
features = get_features(&sess.span_diagnostic, &krate.attrs, edition);
// Avoid reconfiguring malformed `cfg_attr`s
if err_count == sess.span_diagnostic.err_count() {
strip_unconfigured.features = Some(&features);
strip_unconfigured.configure(unconfigured_attrs);
}
}
(krate, features)
}
macro_rules! configure {
($this:ident, $node:ident) => {
match $this.configure($node) {
Some(node) => node,
None => return Default::default(),
}
}
}
impl<'a> StripUnconfigured<'a> {
pub fn configure<T: HasAttrs>(&mut self, node: T) -> Option<T> {
let node = self.process_cfg_attrs(node);
if self.in_cfg(node.attrs()) { Some(node) } else { None }
}
pub fn process_cfg_attrs<T: HasAttrs>(&mut self, node: T) -> T {
node.map_attrs(|attrs| {
attrs.into_iter().filter_map(|attr| self.process_cfg_attr(attr)).collect()
})
}
fn process_cfg_attr(&mut self, attr: ast::Attribute) -> Option<ast::Attribute> {
if!attr.check_name("cfg_attr") {
return Some(attr);
}
let (cfg, path, tokens, span) = match attr.parse(self.sess, |parser| {
parser.expect(&token::OpenDelim(token::Paren))?;
let cfg = parser.parse_meta_item()?;
parser.expect(&token::Comma)?;
let lo = parser.span.lo();
let (path, tokens) = parser.parse_path_and_tokens()?;
parser.expect(&token::CloseDelim(token::Paren))?;
Ok((cfg, path, tokens, parser.prev_span.with_lo(lo)))
}) {
Ok(result) => result,
Err(mut e) => {
e.emit();
return None;
}
};
if attr::cfg_matches(&cfg, self.sess, self.features) {
self.process_cfg_attr(ast::Attribute {
id: attr::mk_attr_id(),
style: attr.style,
path,
tokens,
is_sugared_doc: false,
span,
})
} else {
None
}
}
// Determine if a node with the given attributes should be included in this configuration.
pub fn in_cfg(&mut self, attrs: &[ast::Attribute]) -> bool {
attrs.iter().all(|attr| {
// When not compiling with --test we should not compile the #[test] functions
if!self.should_test && is_test_or_bench(attr) {
return false;
}
let mis = if!is_cfg(attr) {
return true;
} else if let Some(mis) = attr.meta_item_list() {
mis
} else {
return true;
};
if mis.len()!= 1 {
self.sess.span_diagnostic.span_err(attr.span, "expected 1 cfg-pattern");
return true;
}
if!mis[0].is_meta_item() {
self.sess.span_diagnostic.span_err(mis[0].span, "unexpected literal");
return true;
}
attr::cfg_matches(mis[0].meta_item().unwrap(), self.sess, self.features)
})
}
// Visit attributes on expression and statements (but not attributes on items in blocks).
fn visit_expr_attrs(&mut self, attrs: &[ast::Attribute]) {
// flag the offending attributes
for attr in attrs.iter() {
self.maybe_emit_expr_attr_err(attr);
}
}
/// If attributes are not allowed on expressions, emit an error for `attr`
pub fn maybe_emit_expr_attr_err(&self, attr: &ast::Attribute) {
if!self.features.map(|features| features.stmt_expr_attributes).unwrap_or(true) {
let mut err = feature_err(self.sess,
"stmt_expr_attributes",
attr.span,
GateIssue::Language,
EXPLAIN_STMT_ATTR_SYNTAX);
if attr.is_sugared_doc {
err.help("`///` is for documentation comments. For a plain comment, use `//`.");
}
err.emit();
}
}
pub fn configure_foreign_mod(&mut self, foreign_mod: ast::ForeignMod) -> ast::ForeignMod {
ast::ForeignMod {
abi: foreign_mod.abi,
items: foreign_mod.items.into_iter().filter_map(|item| self.configure(item)).collect(),
}
}
fn configure_variant_data(&mut self, vdata: ast::VariantData) -> ast::VariantData {
match vdata {
ast::VariantData::Struct(fields, id) => {
let fields = fields.into_iter().filter_map(|field| self.configure(field));
ast::VariantData::Struct(fields.collect(), id)
}
ast::VariantData::Tuple(fields, id) => {
let fields = fields.into_iter().filter_map(|field| self.configure(field));
ast::VariantData::Tuple(fields.collect(), id)
}
ast::VariantData::Unit(id) => ast::VariantData::Unit(id)
}
}
pub fn configure_item_kind(&mut self, item: ast::ItemKind) -> ast::ItemKind {
match item {
ast::ItemKind::Struct(def, generics) => {
ast::ItemKind::Struct(self.configure_variant_data(def), generics)
}
ast::ItemKind::Union(def, generics) => {
ast::ItemKind::Union(self.configure_variant_data(def), generics)
}
ast::ItemKind::Enum(def, generics) => {
let variants = def.variants.into_iter().filter_map(|v| {
self.configure(v).map(|v| {
Spanned {
node: ast::Variant_ {
ident: v.node.ident,
attrs: v.node.attrs,
data: self.configure_variant_data(v.node.data),
disr_expr: v.node.disr_expr,
},
span: v.span
}
})
});
ast::ItemKind::Enum(ast::EnumDef {
variants: variants.collect(),
}, generics)
}
item => item,
}
}
pub fn configure_expr_kind(&mut self, expr_kind: ast::ExprKind) -> ast::ExprKind {
match expr_kind {
ast::ExprKind::Match(m, arms) => {
let arms = arms.into_iter().filter_map(|a| self.configure(a)).collect();
ast::ExprKind::Match(m, arms)
}
ast::ExprKind::Struct(path, fields, base) => {
let fields = fields.into_iter()
.filter_map(|field| {
self.configure(field)
})
.collect();
ast::ExprKind::Struct(path, fields, base)
}
_ => expr_kind,
}
}
pub fn configure_expr(&mut self, expr: P<ast::Expr>) -> P<ast::Expr> {
self.visit_expr_attrs(expr.attrs());
// If an expr is valid to cfg away it will have been removed by the
// outer stmt or expression folder before descending in here.
// Anything else is always required, and thus has to error out
// in case of a cfg attr.
//
// NB: This is intentionally not part of the fold_expr() function
// in order for fold_opt_expr() to be able to avoid this check
if let Some(attr) = expr.attrs().iter().find(|a| is_cfg(a) || is_test_or_bench(a)) {
let msg = "removing an expression is not supported in this position";
self.sess.span_diagnostic.span_err(attr.span, msg);
}
self.process_cfg_attrs(expr)
}
pub fn configure_stmt(&mut self, stmt: ast::Stmt) -> Option<ast::Stmt> {
self.configure(stmt)
}
pub fn configure_struct_expr_field(&mut self, field: ast::Field) -> Option<ast::Field> {
self.configure(field)
}
pub fn configure_pat(&mut self, pattern: P<ast::Pat>) -> P<ast::Pat> {
pattern.map(|mut pattern| {
if let ast::PatKind::Struct(path, fields, etc) = pattern.node {
let fields = fields.into_iter()
.filter_map(|field| {
self.configure(field)
})
.collect();
pattern.node = ast::PatKind::Struct(path, fields, etc);
}
pattern
})
}
// deny #[cfg] on generic parameters until we decide what to do with it.
// see issue #51279.
pub fn disallow_cfg_on_generic_param(&mut self, param: &ast::GenericParam) {
for attr in param.attrs() {
let offending_attr = if attr.check_name("cfg") {
"cfg"
} else if attr.check_name("cfg_attr") {
"cfg_attr"
} else {
continue;
};
let msg = format!("#[{}] cannot be applied on a generic parameter", offending_attr);
self.sess.span_diagnostic.span_err(attr.span, &msg);
}
}
}
impl<'a> fold::Folder for StripUnconfigured<'a> {
fn fold_foreign_mod(&mut self, foreign_mod: ast::ForeignMod) -> ast::ForeignMod {
let foreign_mod = self.configure_foreign_mod(foreign_mod);
fold::noop_fold_foreign_mod(foreign_mod, self)
}
fn fold_item_kind(&mut self, item: ast::ItemKind) -> ast::ItemKind {
let item = self.configure_item_kind(item);
fold::noop_fold_item_kind(item, self)
}
fn fold_expr(&mut self, expr: P<ast::Expr>) -> P<ast::Expr> {
let mut expr = self.configure_expr(expr).into_inner();
expr.node = self.configure_expr_kind(expr.node);
P(fold::noop_fold_expr(expr, self))
}
fn fold_opt_expr(&mut self, expr: P<ast::Expr>) -> Option<P<ast::Expr>> {
let mut expr = configure!(self, expr).into_inner();
expr.node = self.configure_expr_kind(expr.node);
Some(P(fold::noop_fold_expr(expr, self)))
}
fn fold_stmt(&mut self, stmt: ast::Stmt) -> SmallVector<ast::Stmt> {
match self.configure_stmt(stmt) {
Some(stmt) => fold::noop_fold_stmt(stmt, self),
None => return SmallVector::new(),
}
}
fn fold_item(&mut self, item: P<ast::Item>) -> SmallVector<P<ast::Item>> {
fold::noop_fold_item(configure!(self, item), self)
}
fn fold_impl_item(&mut self, item: ast::ImplItem) -> SmallVector<ast::ImplItem> {
fold::noop_fold_impl_item(configure!(self, item), self)
}
fn fold_trait_item(&mut self, item: ast::TraitItem) -> SmallVector<ast::TraitItem> {
fold::noop_fold_trait_item(configure!(self, item), self)
}
fn fold_mac(&mut self, mac: ast::Mac) -> ast::Mac {
// Don't configure interpolated AST (c.f. #34171).
// Interpolated AST will get configured once the surrounding tokens are parsed.
mac
}
fn fold_pat(&mut self, pattern: P<ast::Pat>) -> P<ast::Pat> {
fold::noop_fold_pat(self.configure_pat(pattern), self)
}
}
fn is_cfg(attr: &ast::Attribute) -> bool |
pub fn is_test_or_bench(attr: &ast::Attribute) -> bool {
attr.check_name("test") || attr.check_name("bench")
}
| {
attr.check_name("cfg")
} | identifier_body |
config.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use attr::HasAttrs;
use feature_gate::{feature_err, EXPLAIN_STMT_ATTR_SYNTAX, Features, get_features, GateIssue};
use {fold, attr};
use ast;
use codemap::Spanned;
use edition::Edition;
use parse::{token, ParseSess};
use ptr::P;
use util::small_vector::SmallVector;
/// A folder that strips out items that do not belong in the current configuration.
pub struct StripUnconfigured<'a> {
pub should_test: bool,
pub sess: &'a ParseSess,
pub features: Option<&'a Features>,
}
// `cfg_attr`-process the crate's attributes and compute the crate's features.
pub fn features(mut krate: ast::Crate, sess: &ParseSess, should_test: bool, edition: Edition)
-> (ast::Crate, Features) {
let features;
{
let mut strip_unconfigured = StripUnconfigured {
should_test,
sess,
features: None,
};
let unconfigured_attrs = krate.attrs.clone();
let err_count = sess.span_diagnostic.err_count();
if let Some(attrs) = strip_unconfigured.configure(krate.attrs) {
krate.attrs = attrs;
} else { // the entire crate is unconfigured
krate.attrs = Vec::new();
krate.module.items = Vec::new();
return (krate, Features::new());
}
features = get_features(&sess.span_diagnostic, &krate.attrs, edition);
// Avoid reconfiguring malformed `cfg_attr`s
if err_count == sess.span_diagnostic.err_count() {
strip_unconfigured.features = Some(&features);
strip_unconfigured.configure(unconfigured_attrs);
}
}
(krate, features)
}
macro_rules! configure {
($this:ident, $node:ident) => {
match $this.configure($node) {
Some(node) => node,
None => return Default::default(),
}
}
}
impl<'a> StripUnconfigured<'a> {
pub fn configure<T: HasAttrs>(&mut self, node: T) -> Option<T> {
let node = self.process_cfg_attrs(node);
if self.in_cfg(node.attrs()) { Some(node) } else { None }
}
pub fn process_cfg_attrs<T: HasAttrs>(&mut self, node: T) -> T {
node.map_attrs(|attrs| {
attrs.into_iter().filter_map(|attr| self.process_cfg_attr(attr)).collect()
})
}
fn process_cfg_attr(&mut self, attr: ast::Attribute) -> Option<ast::Attribute> {
if!attr.check_name("cfg_attr") {
return Some(attr);
}
let (cfg, path, tokens, span) = match attr.parse(self.sess, |parser| {
parser.expect(&token::OpenDelim(token::Paren))?;
let cfg = parser.parse_meta_item()?;
parser.expect(&token::Comma)?;
let lo = parser.span.lo();
let (path, tokens) = parser.parse_path_and_tokens()?;
parser.expect(&token::CloseDelim(token::Paren))?;
Ok((cfg, path, tokens, parser.prev_span.with_lo(lo)))
}) {
Ok(result) => result,
Err(mut e) => {
e.emit();
return None;
}
};
if attr::cfg_matches(&cfg, self.sess, self.features) {
self.process_cfg_attr(ast::Attribute {
id: attr::mk_attr_id(),
style: attr.style,
path,
tokens,
is_sugared_doc: false,
span,
})
} else {
None
}
}
// Determine if a node with the given attributes should be included in this configuration.
pub fn in_cfg(&mut self, attrs: &[ast::Attribute]) -> bool {
attrs.iter().all(|attr| {
// When not compiling with --test we should not compile the #[test] functions
if!self.should_test && is_test_or_bench(attr) {
return false;
}
let mis = if!is_cfg(attr) {
return true;
} else if let Some(mis) = attr.meta_item_list() {
mis
} else {
return true;
};
if mis.len()!= 1 {
self.sess.span_diagnostic.span_err(attr.span, "expected 1 cfg-pattern");
return true;
}
if!mis[0].is_meta_item() {
self.sess.span_diagnostic.span_err(mis[0].span, "unexpected literal");
return true;
}
attr::cfg_matches(mis[0].meta_item().unwrap(), self.sess, self.features)
})
}
// Visit attributes on expression and statements (but not attributes on items in blocks).
fn visit_expr_attrs(&mut self, attrs: &[ast::Attribute]) {
// flag the offending attributes
for attr in attrs.iter() {
self.maybe_emit_expr_attr_err(attr);
}
}
/// If attributes are not allowed on expressions, emit an error for `attr`
pub fn maybe_emit_expr_attr_err(&self, attr: &ast::Attribute) {
if!self.features.map(|features| features.stmt_expr_attributes).unwrap_or(true) {
let mut err = feature_err(self.sess,
"stmt_expr_attributes",
attr.span,
GateIssue::Language,
EXPLAIN_STMT_ATTR_SYNTAX);
if attr.is_sugared_doc {
err.help("`///` is for documentation comments. For a plain comment, use `//`.");
}
err.emit();
}
}
pub fn configure_foreign_mod(&mut self, foreign_mod: ast::ForeignMod) -> ast::ForeignMod {
ast::ForeignMod {
abi: foreign_mod.abi,
items: foreign_mod.items.into_iter().filter_map(|item| self.configure(item)).collect(),
}
}
fn configure_variant_data(&mut self, vdata: ast::VariantData) -> ast::VariantData {
match vdata {
ast::VariantData::Struct(fields, id) => {
let fields = fields.into_iter().filter_map(|field| self.configure(field));
ast::VariantData::Struct(fields.collect(), id)
}
ast::VariantData::Tuple(fields, id) => {
let fields = fields.into_iter().filter_map(|field| self.configure(field));
ast::VariantData::Tuple(fields.collect(), id)
}
ast::VariantData::Unit(id) => ast::VariantData::Unit(id)
}
}
pub fn configure_item_kind(&mut self, item: ast::ItemKind) -> ast::ItemKind {
match item {
ast::ItemKind::Struct(def, generics) => {
ast::ItemKind::Struct(self.configure_variant_data(def), generics)
}
ast::ItemKind::Union(def, generics) => {
ast::ItemKind::Union(self.configure_variant_data(def), generics)
}
ast::ItemKind::Enum(def, generics) => {
let variants = def.variants.into_iter().filter_map(|v| {
self.configure(v).map(|v| {
Spanned {
node: ast::Variant_ {
ident: v.node.ident,
attrs: v.node.attrs,
data: self.configure_variant_data(v.node.data),
disr_expr: v.node.disr_expr,
},
span: v.span
}
})
});
ast::ItemKind::Enum(ast::EnumDef {
variants: variants.collect(),
}, generics)
}
item => item,
}
}
pub fn configure_expr_kind(&mut self, expr_kind: ast::ExprKind) -> ast::ExprKind {
match expr_kind {
ast::ExprKind::Match(m, arms) => {
let arms = arms.into_iter().filter_map(|a| self.configure(a)).collect();
ast::ExprKind::Match(m, arms)
}
ast::ExprKind::Struct(path, fields, base) => {
let fields = fields.into_iter()
.filter_map(|field| {
self.configure(field)
})
.collect();
ast::ExprKind::Struct(path, fields, base)
}
_ => expr_kind,
}
}
pub fn configure_expr(&mut self, expr: P<ast::Expr>) -> P<ast::Expr> {
self.visit_expr_attrs(expr.attrs());
// If an expr is valid to cfg away it will have been removed by the
// outer stmt or expression folder before descending in here.
// Anything else is always required, and thus has to error out
// in case of a cfg attr.
//
// NB: This is intentionally not part of the fold_expr() function
// in order for fold_opt_expr() to be able to avoid this check
if let Some(attr) = expr.attrs().iter().find(|a| is_cfg(a) || is_test_or_bench(a)) {
let msg = "removing an expression is not supported in this position";
self.sess.span_diagnostic.span_err(attr.span, msg);
}
self.process_cfg_attrs(expr)
}
pub fn configure_stmt(&mut self, stmt: ast::Stmt) -> Option<ast::Stmt> {
self.configure(stmt)
}
pub fn configure_struct_expr_field(&mut self, field: ast::Field) -> Option<ast::Field> {
self.configure(field)
}
pub fn configure_pat(&mut self, pattern: P<ast::Pat>) -> P<ast::Pat> {
pattern.map(|mut pattern| {
if let ast::PatKind::Struct(path, fields, etc) = pattern.node {
let fields = fields.into_iter()
.filter_map(|field| {
self.configure(field)
})
.collect();
pattern.node = ast::PatKind::Struct(path, fields, etc);
}
pattern
})
}
// deny #[cfg] on generic parameters until we decide what to do with it.
// see issue #51279.
pub fn disallow_cfg_on_generic_param(&mut self, param: &ast::GenericParam) {
for attr in param.attrs() {
let offending_attr = if attr.check_name("cfg") {
"cfg"
} else if attr.check_name("cfg_attr") {
"cfg_attr"
} else {
continue;
};
let msg = format!("#[{}] cannot be applied on a generic parameter", offending_attr);
self.sess.span_diagnostic.span_err(attr.span, &msg);
}
}
}
impl<'a> fold::Folder for StripUnconfigured<'a> {
fn fold_foreign_mod(&mut self, foreign_mod: ast::ForeignMod) -> ast::ForeignMod {
let foreign_mod = self.configure_foreign_mod(foreign_mod);
fold::noop_fold_foreign_mod(foreign_mod, self)
}
fn fold_item_kind(&mut self, item: ast::ItemKind) -> ast::ItemKind {
let item = self.configure_item_kind(item);
fold::noop_fold_item_kind(item, self)
}
fn fold_expr(&mut self, expr: P<ast::Expr>) -> P<ast::Expr> {
let mut expr = self.configure_expr(expr).into_inner();
expr.node = self.configure_expr_kind(expr.node);
P(fold::noop_fold_expr(expr, self))
}
fn fold_opt_expr(&mut self, expr: P<ast::Expr>) -> Option<P<ast::Expr>> {
let mut expr = configure!(self, expr).into_inner();
expr.node = self.configure_expr_kind(expr.node);
Some(P(fold::noop_fold_expr(expr, self)))
}
fn fold_stmt(&mut self, stmt: ast::Stmt) -> SmallVector<ast::Stmt> {
match self.configure_stmt(stmt) {
Some(stmt) => fold::noop_fold_stmt(stmt, self),
None => return SmallVector::new(),
}
}
fn fold_item(&mut self, item: P<ast::Item>) -> SmallVector<P<ast::Item>> {
fold::noop_fold_item(configure!(self, item), self)
}
fn fold_impl_item(&mut self, item: ast::ImplItem) -> SmallVector<ast::ImplItem> {
fold::noop_fold_impl_item(configure!(self, item), self)
}
fn | (&mut self, item: ast::TraitItem) -> SmallVector<ast::TraitItem> {
fold::noop_fold_trait_item(configure!(self, item), self)
}
fn fold_mac(&mut self, mac: ast::Mac) -> ast::Mac {
// Don't configure interpolated AST (c.f. #34171).
// Interpolated AST will get configured once the surrounding tokens are parsed.
mac
}
fn fold_pat(&mut self, pattern: P<ast::Pat>) -> P<ast::Pat> {
fold::noop_fold_pat(self.configure_pat(pattern), self)
}
}
fn is_cfg(attr: &ast::Attribute) -> bool {
attr.check_name("cfg")
}
pub fn is_test_or_bench(attr: &ast::Attribute) -> bool {
attr.check_name("test") || attr.check_name("bench")
}
| fold_trait_item | identifier_name |
config.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use attr::HasAttrs;
use feature_gate::{feature_err, EXPLAIN_STMT_ATTR_SYNTAX, Features, get_features, GateIssue};
use {fold, attr};
use ast;
use codemap::Spanned;
use edition::Edition;
use parse::{token, ParseSess};
use ptr::P;
use util::small_vector::SmallVector;
/// A folder that strips out items that do not belong in the current configuration.
pub struct StripUnconfigured<'a> {
pub should_test: bool,
pub sess: &'a ParseSess,
pub features: Option<&'a Features>,
}
// `cfg_attr`-process the crate's attributes and compute the crate's features.
pub fn features(mut krate: ast::Crate, sess: &ParseSess, should_test: bool, edition: Edition)
-> (ast::Crate, Features) {
let features;
{
let mut strip_unconfigured = StripUnconfigured {
should_test,
sess,
features: None,
};
let unconfigured_attrs = krate.attrs.clone();
let err_count = sess.span_diagnostic.err_count();
if let Some(attrs) = strip_unconfigured.configure(krate.attrs) {
krate.attrs = attrs;
} else { // the entire crate is unconfigured
krate.attrs = Vec::new();
krate.module.items = Vec::new();
return (krate, Features::new());
}
features = get_features(&sess.span_diagnostic, &krate.attrs, edition);
// Avoid reconfiguring malformed `cfg_attr`s
if err_count == sess.span_diagnostic.err_count() {
strip_unconfigured.features = Some(&features);
strip_unconfigured.configure(unconfigured_attrs);
}
}
(krate, features)
}
macro_rules! configure {
($this:ident, $node:ident) => {
match $this.configure($node) {
Some(node) => node,
None => return Default::default(),
}
}
}
impl<'a> StripUnconfigured<'a> {
pub fn configure<T: HasAttrs>(&mut self, node: T) -> Option<T> {
let node = self.process_cfg_attrs(node);
if self.in_cfg(node.attrs()) { Some(node) } else { None }
}
pub fn process_cfg_attrs<T: HasAttrs>(&mut self, node: T) -> T {
node.map_attrs(|attrs| {
attrs.into_iter().filter_map(|attr| self.process_cfg_attr(attr)).collect()
})
}
fn process_cfg_attr(&mut self, attr: ast::Attribute) -> Option<ast::Attribute> {
if!attr.check_name("cfg_attr") {
return Some(attr);
}
let (cfg, path, tokens, span) = match attr.parse(self.sess, |parser| {
parser.expect(&token::OpenDelim(token::Paren))?;
let cfg = parser.parse_meta_item()?;
parser.expect(&token::Comma)?;
let lo = parser.span.lo();
let (path, tokens) = parser.parse_path_and_tokens()?;
parser.expect(&token::CloseDelim(token::Paren))?;
Ok((cfg, path, tokens, parser.prev_span.with_lo(lo)))
}) {
Ok(result) => result,
Err(mut e) => {
e.emit();
return None;
}
};
if attr::cfg_matches(&cfg, self.sess, self.features) {
self.process_cfg_attr(ast::Attribute {
id: attr::mk_attr_id(),
style: attr.style,
path,
tokens,
is_sugared_doc: false,
span,
})
} else {
None
}
}
// Determine if a node with the given attributes should be included in this configuration.
pub fn in_cfg(&mut self, attrs: &[ast::Attribute]) -> bool {
attrs.iter().all(|attr| {
// When not compiling with --test we should not compile the #[test] functions
if!self.should_test && is_test_or_bench(attr) {
return false;
}
let mis = if!is_cfg(attr) {
return true;
} else if let Some(mis) = attr.meta_item_list() {
mis
} else {
return true;
};
if mis.len()!= 1 {
self.sess.span_diagnostic.span_err(attr.span, "expected 1 cfg-pattern");
return true;
}
if!mis[0].is_meta_item() {
self.sess.span_diagnostic.span_err(mis[0].span, "unexpected literal");
return true;
}
attr::cfg_matches(mis[0].meta_item().unwrap(), self.sess, self.features)
})
}
// Visit attributes on expression and statements (but not attributes on items in blocks).
fn visit_expr_attrs(&mut self, attrs: &[ast::Attribute]) {
// flag the offending attributes
for attr in attrs.iter() {
self.maybe_emit_expr_attr_err(attr);
}
}
/// If attributes are not allowed on expressions, emit an error for `attr`
pub fn maybe_emit_expr_attr_err(&self, attr: &ast::Attribute) {
if!self.features.map(|features| features.stmt_expr_attributes).unwrap_or(true) {
let mut err = feature_err(self.sess,
"stmt_expr_attributes",
attr.span,
GateIssue::Language,
EXPLAIN_STMT_ATTR_SYNTAX);
if attr.is_sugared_doc {
err.help("`///` is for documentation comments. For a plain comment, use `//`.");
}
err.emit();
} |
pub fn configure_foreign_mod(&mut self, foreign_mod: ast::ForeignMod) -> ast::ForeignMod {
ast::ForeignMod {
abi: foreign_mod.abi,
items: foreign_mod.items.into_iter().filter_map(|item| self.configure(item)).collect(),
}
}
fn configure_variant_data(&mut self, vdata: ast::VariantData) -> ast::VariantData {
match vdata {
ast::VariantData::Struct(fields, id) => {
let fields = fields.into_iter().filter_map(|field| self.configure(field));
ast::VariantData::Struct(fields.collect(), id)
}
ast::VariantData::Tuple(fields, id) => {
let fields = fields.into_iter().filter_map(|field| self.configure(field));
ast::VariantData::Tuple(fields.collect(), id)
}
ast::VariantData::Unit(id) => ast::VariantData::Unit(id)
}
}
pub fn configure_item_kind(&mut self, item: ast::ItemKind) -> ast::ItemKind {
match item {
ast::ItemKind::Struct(def, generics) => {
ast::ItemKind::Struct(self.configure_variant_data(def), generics)
}
ast::ItemKind::Union(def, generics) => {
ast::ItemKind::Union(self.configure_variant_data(def), generics)
}
ast::ItemKind::Enum(def, generics) => {
let variants = def.variants.into_iter().filter_map(|v| {
self.configure(v).map(|v| {
Spanned {
node: ast::Variant_ {
ident: v.node.ident,
attrs: v.node.attrs,
data: self.configure_variant_data(v.node.data),
disr_expr: v.node.disr_expr,
},
span: v.span
}
})
});
ast::ItemKind::Enum(ast::EnumDef {
variants: variants.collect(),
}, generics)
}
item => item,
}
}
pub fn configure_expr_kind(&mut self, expr_kind: ast::ExprKind) -> ast::ExprKind {
match expr_kind {
ast::ExprKind::Match(m, arms) => {
let arms = arms.into_iter().filter_map(|a| self.configure(a)).collect();
ast::ExprKind::Match(m, arms)
}
ast::ExprKind::Struct(path, fields, base) => {
let fields = fields.into_iter()
.filter_map(|field| {
self.configure(field)
})
.collect();
ast::ExprKind::Struct(path, fields, base)
}
_ => expr_kind,
}
}
pub fn configure_expr(&mut self, expr: P<ast::Expr>) -> P<ast::Expr> {
self.visit_expr_attrs(expr.attrs());
// If an expr is valid to cfg away it will have been removed by the
// outer stmt or expression folder before descending in here.
// Anything else is always required, and thus has to error out
// in case of a cfg attr.
//
// NB: This is intentionally not part of the fold_expr() function
// in order for fold_opt_expr() to be able to avoid this check
if let Some(attr) = expr.attrs().iter().find(|a| is_cfg(a) || is_test_or_bench(a)) {
let msg = "removing an expression is not supported in this position";
self.sess.span_diagnostic.span_err(attr.span, msg);
}
self.process_cfg_attrs(expr)
}
pub fn configure_stmt(&mut self, stmt: ast::Stmt) -> Option<ast::Stmt> {
self.configure(stmt)
}
pub fn configure_struct_expr_field(&mut self, field: ast::Field) -> Option<ast::Field> {
self.configure(field)
}
pub fn configure_pat(&mut self, pattern: P<ast::Pat>) -> P<ast::Pat> {
pattern.map(|mut pattern| {
if let ast::PatKind::Struct(path, fields, etc) = pattern.node {
let fields = fields.into_iter()
.filter_map(|field| {
self.configure(field)
})
.collect();
pattern.node = ast::PatKind::Struct(path, fields, etc);
}
pattern
})
}
// deny #[cfg] on generic parameters until we decide what to do with it.
// see issue #51279.
pub fn disallow_cfg_on_generic_param(&mut self, param: &ast::GenericParam) {
for attr in param.attrs() {
let offending_attr = if attr.check_name("cfg") {
"cfg"
} else if attr.check_name("cfg_attr") {
"cfg_attr"
} else {
continue;
};
let msg = format!("#[{}] cannot be applied on a generic parameter", offending_attr);
self.sess.span_diagnostic.span_err(attr.span, &msg);
}
}
}
impl<'a> fold::Folder for StripUnconfigured<'a> {
fn fold_foreign_mod(&mut self, foreign_mod: ast::ForeignMod) -> ast::ForeignMod {
let foreign_mod = self.configure_foreign_mod(foreign_mod);
fold::noop_fold_foreign_mod(foreign_mod, self)
}
fn fold_item_kind(&mut self, item: ast::ItemKind) -> ast::ItemKind {
let item = self.configure_item_kind(item);
fold::noop_fold_item_kind(item, self)
}
fn fold_expr(&mut self, expr: P<ast::Expr>) -> P<ast::Expr> {
let mut expr = self.configure_expr(expr).into_inner();
expr.node = self.configure_expr_kind(expr.node);
P(fold::noop_fold_expr(expr, self))
}
fn fold_opt_expr(&mut self, expr: P<ast::Expr>) -> Option<P<ast::Expr>> {
let mut expr = configure!(self, expr).into_inner();
expr.node = self.configure_expr_kind(expr.node);
Some(P(fold::noop_fold_expr(expr, self)))
}
fn fold_stmt(&mut self, stmt: ast::Stmt) -> SmallVector<ast::Stmt> {
match self.configure_stmt(stmt) {
Some(stmt) => fold::noop_fold_stmt(stmt, self),
None => return SmallVector::new(),
}
}
fn fold_item(&mut self, item: P<ast::Item>) -> SmallVector<P<ast::Item>> {
fold::noop_fold_item(configure!(self, item), self)
}
fn fold_impl_item(&mut self, item: ast::ImplItem) -> SmallVector<ast::ImplItem> {
fold::noop_fold_impl_item(configure!(self, item), self)
}
fn fold_trait_item(&mut self, item: ast::TraitItem) -> SmallVector<ast::TraitItem> {
fold::noop_fold_trait_item(configure!(self, item), self)
}
fn fold_mac(&mut self, mac: ast::Mac) -> ast::Mac {
// Don't configure interpolated AST (c.f. #34171).
// Interpolated AST will get configured once the surrounding tokens are parsed.
mac
}
fn fold_pat(&mut self, pattern: P<ast::Pat>) -> P<ast::Pat> {
fold::noop_fold_pat(self.configure_pat(pattern), self)
}
}
fn is_cfg(attr: &ast::Attribute) -> bool {
attr.check_name("cfg")
}
pub fn is_test_or_bench(attr: &ast::Attribute) -> bool {
attr.check_name("test") || attr.check_name("bench")
} | } | random_line_split |
login.rs | use super::Encode;
use bitflags::bitflags;
use byteorder::{LittleEndian, WriteBytesExt};
use bytes::BytesMut;
use io::{Cursor, Write};
use std::{borrow::Cow, io};
uint_enum! {
#[repr(u32)]
#[derive(PartialOrd)]
pub enum FeatureLevel {
SqlServerV7 = 0x70000000,
SqlServer2000 = 0x71000000,
SqlServer2000Sp1 = 0x71000001,
SqlServer2005 = 0x72090002,
SqlServer2008 = 0x730A0003,
SqlServer2008R2 = 0x730B0003,
/// 2012, 2014, 2016
SqlServerN = 0x74000004,
}
}
bitflags! {
pub struct LoginOptionFlags1: u8 {
const BIG_ENDIAN = 0b00000001;
/// Charset_EBDDIC, default/bit not set = Charset_ASCII
const CHARSET_EBDDIC = 0b00000010;
/// default float is IEEE_754
const FLOAT_VAX = 0b00000100;
const FLOAT_ND5000 = 0b00001000;
const DUMPLOAD_ON = 0b00010000;
/// Set if the client requires warning messages on execution of the USE SQL
/// statement. If this flag is NOT SET, the server MUST NOT inform the client when the database
/// changes, and therefore the client will be unaware of any accompanying collation changes.
const USE_DB_NOTIFY = 0b00100000;
/// Set if the change to initial database needs to succeed if the connection is to succeed. (false: warn)
const INITIAL_DB_FATAL = 0b01000000;
/// Set if the client requires warning messages on execution of a language change statement.
const LANG_CHANGE_WARN = 0b10000000;
}
}
bitflags! {
pub struct LoginOptionFlags2: u8 {
/// Set if the change to initial language needs to succeed if the connect is to succeed.
const INIT_LANG_FATAL = 0b00000001;
/// Set if the client is the ODBC driver. This causes the server to set ANSI_DEFAULTS=ON,
/// CURSOR_CLOSE_ON_COMMIT, IMPLICIT_TRANSACTIONS=OFF, TEXTSIZE=0x7FFFFFFF (2GB) (TDS 7.2 and earlier)
/// TEXTSIZE to infinite (TDS 7.3), and ROWCOUNT to infinite
/// (2.2.6.4)
const ODBC_DRIVER = 0b00000010;
const TRANS_BOUNDARY = 0b00000100;
const CACHE_CONNECT = 0b00001000;
/// reserved
const USER_TYPE_SERVER = 0b00010000;
/// Distributed Query login
const USER_TYPE_REM_USER = 0b00100000;
/// Replication login
const USER_TYPE_SQL_REPL = 0b00110000;
const INTEGRATED_SECURITY = 0b10000000;
}
}
bitflags! {
pub struct LoginTypeFlags: u8 {
/// use TSQL insteadof DFLT
const SQL_TSQL = 0b00000001;
/// Set if the client is the OLEDB driver. This causes the server to set ANSI_DEFAULTS to ON...
const OLEDB_DRIVER = 0b00010000;
const READ_ONLY_INTENT = 0b00100000;
}
}
bitflags! {
pub struct LoginOptionFlags3: u8 {
const REQUEST_CHANGE_PWD = 0b00000001;
/// 1 if XML data type instances are returned as binary XML
const SEND_YUKON_BINARY = 0b00000010;
/// 1 if client is requesting separate process to be spawned as user instance
const SPAWN_USER_INSTANCE = 0b00000100;
/// 0 = The server MUST restrict the collations sent to a specific set of collations.
/// 1 = The server MAY send any collation that fits in the storage space.
const SUPPORT_UNKNOWN_COLL = 0b00001000;
// TODO: fExtension?
}
}
impl FeatureLevel {
pub fn done_row_count_bytes(self) -> u8 |
}
/// the login packet
pub struct LoginMessage<'a> {
/// the highest TDS version the client supports
pub tds_version: FeatureLevel,
/// the requested packet size
pub packet_size: u32,
/// the version of the interface library
pub client_prog_ver: u32,
/// the process id of the client application
pub client_pid: u32,
/// the connection id of the primary server
/// (used when connecting to an "Always UP" backup server)
pub connection_id: u32,
pub option_flags_1: LoginOptionFlags1,
pub option_flags_2: LoginOptionFlags2,
/// flag included in option_flags_2
pub integrated_security: Option<Vec<u8>>,
pub type_flags: LoginTypeFlags,
pub option_flags_3: LoginOptionFlags3,
pub client_timezone: i32,
pub client_lcid: u32,
pub hostname: Cow<'a, str>,
pub username: Cow<'a, str>,
pub password: Cow<'a, str>,
pub app_name: Cow<'a, str>,
pub server_name: Cow<'a, str>,
/// the default database to connect to
pub db_name: Cow<'a, str>,
}
impl<'a> LoginMessage<'a> {
pub fn new() -> LoginMessage<'a> {
LoginMessage {
tds_version: FeatureLevel::SqlServerN,
packet_size: 4096,
client_prog_ver: 0,
client_pid: 0,
connection_id: 0,
option_flags_1: LoginOptionFlags1::USE_DB_NOTIFY | LoginOptionFlags1::INITIAL_DB_FATAL,
option_flags_2: LoginOptionFlags2::INIT_LANG_FATAL | LoginOptionFlags2::ODBC_DRIVER,
integrated_security: None,
type_flags: LoginTypeFlags::empty(),
option_flags_3: LoginOptionFlags3::SUPPORT_UNKNOWN_COLL,
client_timezone: 0, //TODO
client_lcid: 0, // TODO
hostname: "".into(),
username: "".into(),
password: "".into(),
app_name: "".into(),
server_name: "".into(),
db_name: "".into(),
}
}
}
impl<'a> Encode<BytesMut> for LoginMessage<'a> {
fn encode(self, dst: &mut BytesMut) -> crate::Result<()> {
let mut cursor = Cursor::new(Vec::with_capacity(512));
// Space for the length
cursor.write_u32::<LittleEndian>(0)?;
// ignore the specified value for integrated security since we determine that by the struct field
let option_flags2 = if self.integrated_security.is_some() {
self.option_flags_2 | LoginOptionFlags2::INTEGRATED_SECURITY
} else {
self.option_flags_2 &!LoginOptionFlags2::INTEGRATED_SECURITY
};
cursor.write_u32::<LittleEndian>(self.tds_version as u32)?;
cursor.write_u32::<LittleEndian>(self.packet_size)?;
cursor.write_u32::<LittleEndian>(self.client_prog_ver)?;
cursor.write_u32::<LittleEndian>(self.client_pid)?;
cursor.write_u32::<LittleEndian>(self.connection_id)?;
cursor.write_u8(self.option_flags_1.bits())?;
cursor.write_u8(option_flags2.bits())?;
cursor.write_u8(self.type_flags.bits())?;
cursor.write_u8(self.option_flags_3.bits())?;
cursor.write_u32::<LittleEndian>(self.client_timezone as u32)?;
cursor.write_u32::<LittleEndian>(self.client_lcid)?;
// variable length data (OffsetLength)
let var_data = [
&self.hostname,
&self.username,
&self.password,
&self.app_name,
&self.server_name,
&"".into(), // 5. ibExtension
&"".into(), // ibCltIntName
&"".into(), // ibLanguage
&self.db_name,
&"".into(), // 9. ClientId (6 bytes); this is included in var_data so we don't lack the bytes of cbSspiLong (4=2*2) and can insert it at the correct position
&"".into(), // 10. ibSSPI
&"".into(), // ibAtchDBFile
&"".into(), // ibChangePassword
];
let mut data_offset = cursor.position() as usize + var_data.len() * 2 * 2 + 6;
for (i, value) in var_data.iter().enumerate() {
// write the client ID (created from the MAC address)
if i == 9 {
cursor.write_u32::<LittleEndian>(0)?; //TODO:
cursor.write_u16::<LittleEndian>(42)?; //TODO: generate real client id
continue;
}
cursor.write_u16::<LittleEndian>(data_offset as u16)?;
// ibSSPI
if i == 10 {
let length = if let Some(ref bytes) = self.integrated_security {
let bak = cursor.position();
cursor.set_position(data_offset as u64);
cursor.write_all(bytes)?;
data_offset += bytes.len();
cursor.set_position(bak);
bytes.len()
} else {
0
};
cursor.write_u16::<LittleEndian>(length as u16)?;
continue;
}
// jump into the data portion of the output
let bak = cursor.position();
cursor.set_position(data_offset as u64);
for codepoint in value.encode_utf16() {
cursor.write_u16::<LittleEndian>(codepoint)?;
}
let new_position = cursor.position() as usize;
// prepare the password in MS-fashion
if i == 2 {
let buffer = cursor.get_mut();
for idx in data_offset..new_position {
let byte = buffer[idx];
buffer[idx] = ((byte << 4) & 0xf0 | (byte >> 4) & 0x0f) ^ 0xA5;
}
}
let length = new_position - data_offset;
cursor.set_position(bak);
data_offset += length;
// microsoft being really consistent here... using byte offsets with utf16-length's
// sounds like premature optimization
cursor.write_u16::<LittleEndian>(length as u16 / 2)?;
}
// cbSSPILong
cursor.write_u32::<LittleEndian>(0)?;
cursor.set_position(data_offset as u64);
// FeatureExt: unsupported for now, simply write a terminator
cursor.write_u8(0xFF)?;
cursor.set_position(0);
cursor.write_u32::<LittleEndian>(cursor.get_ref().len() as u32)?;
dst.extend(cursor.into_inner());
Ok(())
}
}
| {
if self as u8 >= FeatureLevel::SqlServer2005 as u8 {
8
} else {
4
}
} | identifier_body |
login.rs | use super::Encode;
use bitflags::bitflags;
use byteorder::{LittleEndian, WriteBytesExt};
use bytes::BytesMut;
use io::{Cursor, Write};
use std::{borrow::Cow, io};
uint_enum! {
#[repr(u32)]
#[derive(PartialOrd)]
pub enum FeatureLevel {
SqlServerV7 = 0x70000000,
SqlServer2000 = 0x71000000,
SqlServer2000Sp1 = 0x71000001,
SqlServer2005 = 0x72090002,
SqlServer2008 = 0x730A0003,
SqlServer2008R2 = 0x730B0003,
/// 2012, 2014, 2016
SqlServerN = 0x74000004,
}
}
bitflags! {
pub struct LoginOptionFlags1: u8 {
const BIG_ENDIAN = 0b00000001;
/// Charset_EBDDIC, default/bit not set = Charset_ASCII
const CHARSET_EBDDIC = 0b00000010;
/// default float is IEEE_754
const FLOAT_VAX = 0b00000100;
const FLOAT_ND5000 = 0b00001000;
const DUMPLOAD_ON = 0b00010000;
/// Set if the client requires warning messages on execution of the USE SQL
/// statement. If this flag is NOT SET, the server MUST NOT inform the client when the database
/// changes, and therefore the client will be unaware of any accompanying collation changes.
const USE_DB_NOTIFY = 0b00100000;
/// Set if the change to initial database needs to succeed if the connection is to succeed. (false: warn)
const INITIAL_DB_FATAL = 0b01000000;
/// Set if the client requires warning messages on execution of a language change statement.
const LANG_CHANGE_WARN = 0b10000000;
}
}
bitflags! {
pub struct LoginOptionFlags2: u8 {
/// Set if the change to initial language needs to succeed if the connect is to succeed.
const INIT_LANG_FATAL = 0b00000001;
/// Set if the client is the ODBC driver. This causes the server to set ANSI_DEFAULTS=ON,
/// CURSOR_CLOSE_ON_COMMIT, IMPLICIT_TRANSACTIONS=OFF, TEXTSIZE=0x7FFFFFFF (2GB) (TDS 7.2 and earlier)
/// TEXTSIZE to infinite (TDS 7.3), and ROWCOUNT to infinite
/// (2.2.6.4)
const ODBC_DRIVER = 0b00000010;
const TRANS_BOUNDARY = 0b00000100;
const CACHE_CONNECT = 0b00001000;
/// reserved
const USER_TYPE_SERVER = 0b00010000;
/// Distributed Query login
const USER_TYPE_REM_USER = 0b00100000;
/// Replication login
const USER_TYPE_SQL_REPL = 0b00110000;
const INTEGRATED_SECURITY = 0b10000000;
}
}
bitflags! {
pub struct LoginTypeFlags: u8 {
/// use TSQL insteadof DFLT
const SQL_TSQL = 0b00000001;
/// Set if the client is the OLEDB driver. This causes the server to set ANSI_DEFAULTS to ON...
const OLEDB_DRIVER = 0b00010000;
const READ_ONLY_INTENT = 0b00100000;
}
}
bitflags! {
pub struct LoginOptionFlags3: u8 {
const REQUEST_CHANGE_PWD = 0b00000001;
/// 1 if XML data type instances are returned as binary XML
const SEND_YUKON_BINARY = 0b00000010;
/// 1 if client is requesting separate process to be spawned as user instance
const SPAWN_USER_INSTANCE = 0b00000100;
/// 0 = The server MUST restrict the collations sent to a specific set of collations.
/// 1 = The server MAY send any collation that fits in the storage space.
const SUPPORT_UNKNOWN_COLL = 0b00001000;
// TODO: fExtension?
}
}
impl FeatureLevel {
pub fn done_row_count_bytes(self) -> u8 {
if self as u8 >= FeatureLevel::SqlServer2005 as u8 {
8
} else {
4
}
}
}
/// the login packet
pub struct LoginMessage<'a> {
/// the highest TDS version the client supports
pub tds_version: FeatureLevel,
/// the requested packet size
pub packet_size: u32,
/// the version of the interface library
pub client_prog_ver: u32,
/// the process id of the client application
pub client_pid: u32,
/// the connection id of the primary server
/// (used when connecting to an "Always UP" backup server)
pub connection_id: u32,
pub option_flags_1: LoginOptionFlags1,
pub option_flags_2: LoginOptionFlags2,
/// flag included in option_flags_2
pub integrated_security: Option<Vec<u8>>,
pub type_flags: LoginTypeFlags,
pub option_flags_3: LoginOptionFlags3,
pub client_timezone: i32,
pub client_lcid: u32,
pub hostname: Cow<'a, str>,
pub username: Cow<'a, str>,
pub password: Cow<'a, str>,
pub app_name: Cow<'a, str>,
pub server_name: Cow<'a, str>,
/// the default database to connect to
pub db_name: Cow<'a, str>,
}
impl<'a> LoginMessage<'a> {
pub fn new() -> LoginMessage<'a> {
LoginMessage {
tds_version: FeatureLevel::SqlServerN,
packet_size: 4096,
client_prog_ver: 0,
client_pid: 0,
connection_id: 0,
option_flags_1: LoginOptionFlags1::USE_DB_NOTIFY | LoginOptionFlags1::INITIAL_DB_FATAL,
option_flags_2: LoginOptionFlags2::INIT_LANG_FATAL | LoginOptionFlags2::ODBC_DRIVER,
integrated_security: None,
type_flags: LoginTypeFlags::empty(),
option_flags_3: LoginOptionFlags3::SUPPORT_UNKNOWN_COLL,
client_timezone: 0, //TODO
client_lcid: 0, // TODO
hostname: "".into(),
username: "".into(),
password: "".into(),
app_name: "".into(),
server_name: "".into(),
db_name: "".into(),
}
}
}
impl<'a> Encode<BytesMut> for LoginMessage<'a> {
fn encode(self, dst: &mut BytesMut) -> crate::Result<()> {
let mut cursor = Cursor::new(Vec::with_capacity(512));
// Space for the length
cursor.write_u32::<LittleEndian>(0)?;
// ignore the specified value for integrated security since we determine that by the struct field
let option_flags2 = if self.integrated_security.is_some() {
self.option_flags_2 | LoginOptionFlags2::INTEGRATED_SECURITY
} else {
self.option_flags_2 &!LoginOptionFlags2::INTEGRATED_SECURITY
};
cursor.write_u32::<LittleEndian>(self.tds_version as u32)?;
cursor.write_u32::<LittleEndian>(self.packet_size)?;
cursor.write_u32::<LittleEndian>(self.client_prog_ver)?;
cursor.write_u32::<LittleEndian>(self.client_pid)?;
cursor.write_u32::<LittleEndian>(self.connection_id)?;
cursor.write_u8(self.option_flags_1.bits())?;
cursor.write_u8(option_flags2.bits())?;
cursor.write_u8(self.type_flags.bits())?;
cursor.write_u8(self.option_flags_3.bits())?;
cursor.write_u32::<LittleEndian>(self.client_timezone as u32)?;
cursor.write_u32::<LittleEndian>(self.client_lcid)?;
// variable length data (OffsetLength)
let var_data = [
&self.hostname,
&self.username,
&self.password,
&self.app_name,
&self.server_name,
&"".into(), // 5. ibExtension
&"".into(), // ibCltIntName
&"".into(), // ibLanguage
&self.db_name,
&"".into(), // 9. ClientId (6 bytes); this is included in var_data so we don't lack the bytes of cbSspiLong (4=2*2) and can insert it at the correct position
&"".into(), // 10. ibSSPI
&"".into(), // ibAtchDBFile
&"".into(), // ibChangePassword
];
let mut data_offset = cursor.position() as usize + var_data.len() * 2 * 2 + 6;
for (i, value) in var_data.iter().enumerate() {
// write the client ID (created from the MAC address)
if i == 9 {
cursor.write_u32::<LittleEndian>(0)?; //TODO:
cursor.write_u16::<LittleEndian>(42)?; //TODO: generate real client id
continue;
}
cursor.write_u16::<LittleEndian>(data_offset as u16)?;
// ibSSPI
if i == 10 {
let length = if let Some(ref bytes) = self.integrated_security {
let bak = cursor.position();
cursor.set_position(data_offset as u64);
cursor.write_all(bytes)?;
data_offset += bytes.len();
cursor.set_position(bak);
bytes.len()
} else | ;
cursor.write_u16::<LittleEndian>(length as u16)?;
continue;
}
// jump into the data portion of the output
let bak = cursor.position();
cursor.set_position(data_offset as u64);
for codepoint in value.encode_utf16() {
cursor.write_u16::<LittleEndian>(codepoint)?;
}
let new_position = cursor.position() as usize;
// prepare the password in MS-fashion
if i == 2 {
let buffer = cursor.get_mut();
for idx in data_offset..new_position {
let byte = buffer[idx];
buffer[idx] = ((byte << 4) & 0xf0 | (byte >> 4) & 0x0f) ^ 0xA5;
}
}
let length = new_position - data_offset;
cursor.set_position(bak);
data_offset += length;
// microsoft being really consistent here... using byte offsets with utf16-length's
// sounds like premature optimization
cursor.write_u16::<LittleEndian>(length as u16 / 2)?;
}
// cbSSPILong
cursor.write_u32::<LittleEndian>(0)?;
cursor.set_position(data_offset as u64);
// FeatureExt: unsupported for now, simply write a terminator
cursor.write_u8(0xFF)?;
cursor.set_position(0);
cursor.write_u32::<LittleEndian>(cursor.get_ref().len() as u32)?;
dst.extend(cursor.into_inner());
Ok(())
}
}
| {
0
} | conditional_block |
login.rs | use super::Encode;
use bitflags::bitflags;
use byteorder::{LittleEndian, WriteBytesExt};
use bytes::BytesMut;
use io::{Cursor, Write};
use std::{borrow::Cow, io};
uint_enum! {
#[repr(u32)]
#[derive(PartialOrd)]
pub enum FeatureLevel {
SqlServerV7 = 0x70000000,
SqlServer2000 = 0x71000000,
SqlServer2000Sp1 = 0x71000001,
SqlServer2005 = 0x72090002,
SqlServer2008 = 0x730A0003,
SqlServer2008R2 = 0x730B0003,
/// 2012, 2014, 2016
SqlServerN = 0x74000004,
}
}
bitflags! {
pub struct LoginOptionFlags1: u8 {
const BIG_ENDIAN = 0b00000001;
/// Charset_EBDDIC, default/bit not set = Charset_ASCII
const CHARSET_EBDDIC = 0b00000010;
/// default float is IEEE_754
const FLOAT_VAX = 0b00000100;
const FLOAT_ND5000 = 0b00001000;
const DUMPLOAD_ON = 0b00010000;
/// Set if the client requires warning messages on execution of the USE SQL
/// statement. If this flag is NOT SET, the server MUST NOT inform the client when the database
/// changes, and therefore the client will be unaware of any accompanying collation changes.
const USE_DB_NOTIFY = 0b00100000;
/// Set if the change to initial database needs to succeed if the connection is to succeed. (false: warn)
const INITIAL_DB_FATAL = 0b01000000;
/// Set if the client requires warning messages on execution of a language change statement.
const LANG_CHANGE_WARN = 0b10000000;
}
}
bitflags! {
pub struct LoginOptionFlags2: u8 {
/// Set if the change to initial language needs to succeed if the connect is to succeed.
const INIT_LANG_FATAL = 0b00000001;
/// Set if the client is the ODBC driver. This causes the server to set ANSI_DEFAULTS=ON,
/// CURSOR_CLOSE_ON_COMMIT, IMPLICIT_TRANSACTIONS=OFF, TEXTSIZE=0x7FFFFFFF (2GB) (TDS 7.2 and earlier)
/// TEXTSIZE to infinite (TDS 7.3), and ROWCOUNT to infinite
/// (2.2.6.4)
const ODBC_DRIVER = 0b00000010;
const TRANS_BOUNDARY = 0b00000100;
const CACHE_CONNECT = 0b00001000;
/// reserved
const USER_TYPE_SERVER = 0b00010000;
/// Distributed Query login
const USER_TYPE_REM_USER = 0b00100000;
/// Replication login
const USER_TYPE_SQL_REPL = 0b00110000;
const INTEGRATED_SECURITY = 0b10000000;
}
}
bitflags! {
pub struct LoginTypeFlags: u8 {
/// use TSQL insteadof DFLT
const SQL_TSQL = 0b00000001;
/// Set if the client is the OLEDB driver. This causes the server to set ANSI_DEFAULTS to ON...
const OLEDB_DRIVER = 0b00010000;
const READ_ONLY_INTENT = 0b00100000;
}
}
bitflags! {
pub struct LoginOptionFlags3: u8 {
const REQUEST_CHANGE_PWD = 0b00000001;
/// 1 if XML data type instances are returned as binary XML
const SEND_YUKON_BINARY = 0b00000010;
/// 1 if client is requesting separate process to be spawned as user instance
const SPAWN_USER_INSTANCE = 0b00000100;
/// 0 = The server MUST restrict the collations sent to a specific set of collations.
/// 1 = The server MAY send any collation that fits in the storage space.
const SUPPORT_UNKNOWN_COLL = 0b00001000;
// TODO: fExtension?
}
}
impl FeatureLevel {
pub fn | (self) -> u8 {
if self as u8 >= FeatureLevel::SqlServer2005 as u8 {
8
} else {
4
}
}
}
/// the login packet
pub struct LoginMessage<'a> {
/// the highest TDS version the client supports
pub tds_version: FeatureLevel,
/// the requested packet size
pub packet_size: u32,
/// the version of the interface library
pub client_prog_ver: u32,
/// the process id of the client application
pub client_pid: u32,
/// the connection id of the primary server
/// (used when connecting to an "Always UP" backup server)
pub connection_id: u32,
pub option_flags_1: LoginOptionFlags1,
pub option_flags_2: LoginOptionFlags2,
/// flag included in option_flags_2
pub integrated_security: Option<Vec<u8>>,
pub type_flags: LoginTypeFlags,
pub option_flags_3: LoginOptionFlags3,
pub client_timezone: i32,
pub client_lcid: u32,
pub hostname: Cow<'a, str>,
pub username: Cow<'a, str>,
pub password: Cow<'a, str>,
pub app_name: Cow<'a, str>,
pub server_name: Cow<'a, str>,
/// the default database to connect to
pub db_name: Cow<'a, str>,
}
impl<'a> LoginMessage<'a> {
pub fn new() -> LoginMessage<'a> {
LoginMessage {
tds_version: FeatureLevel::SqlServerN,
packet_size: 4096,
client_prog_ver: 0,
client_pid: 0,
connection_id: 0,
option_flags_1: LoginOptionFlags1::USE_DB_NOTIFY | LoginOptionFlags1::INITIAL_DB_FATAL,
option_flags_2: LoginOptionFlags2::INIT_LANG_FATAL | LoginOptionFlags2::ODBC_DRIVER,
integrated_security: None,
type_flags: LoginTypeFlags::empty(),
option_flags_3: LoginOptionFlags3::SUPPORT_UNKNOWN_COLL,
client_timezone: 0, //TODO
client_lcid: 0, // TODO
hostname: "".into(),
username: "".into(),
password: "".into(),
app_name: "".into(),
server_name: "".into(),
db_name: "".into(),
}
}
}
impl<'a> Encode<BytesMut> for LoginMessage<'a> {
fn encode(self, dst: &mut BytesMut) -> crate::Result<()> {
let mut cursor = Cursor::new(Vec::with_capacity(512));
// Space for the length
cursor.write_u32::<LittleEndian>(0)?;
// ignore the specified value for integrated security since we determine that by the struct field
let option_flags2 = if self.integrated_security.is_some() {
self.option_flags_2 | LoginOptionFlags2::INTEGRATED_SECURITY
} else {
self.option_flags_2 &!LoginOptionFlags2::INTEGRATED_SECURITY
};
cursor.write_u32::<LittleEndian>(self.tds_version as u32)?;
cursor.write_u32::<LittleEndian>(self.packet_size)?;
cursor.write_u32::<LittleEndian>(self.client_prog_ver)?;
cursor.write_u32::<LittleEndian>(self.client_pid)?;
cursor.write_u32::<LittleEndian>(self.connection_id)?;
cursor.write_u8(self.option_flags_1.bits())?;
cursor.write_u8(option_flags2.bits())?;
cursor.write_u8(self.type_flags.bits())?;
cursor.write_u8(self.option_flags_3.bits())?;
cursor.write_u32::<LittleEndian>(self.client_timezone as u32)?;
cursor.write_u32::<LittleEndian>(self.client_lcid)?;
// variable length data (OffsetLength)
let var_data = [
&self.hostname,
&self.username,
&self.password,
&self.app_name,
&self.server_name,
&"".into(), // 5. ibExtension
&"".into(), // ibCltIntName
&"".into(), // ibLanguage
&self.db_name,
&"".into(), // 9. ClientId (6 bytes); this is included in var_data so we don't lack the bytes of cbSspiLong (4=2*2) and can insert it at the correct position
&"".into(), // 10. ibSSPI
&"".into(), // ibAtchDBFile
&"".into(), // ibChangePassword
];
let mut data_offset = cursor.position() as usize + var_data.len() * 2 * 2 + 6;
for (i, value) in var_data.iter().enumerate() {
// write the client ID (created from the MAC address)
if i == 9 {
cursor.write_u32::<LittleEndian>(0)?; //TODO:
cursor.write_u16::<LittleEndian>(42)?; //TODO: generate real client id
continue;
}
cursor.write_u16::<LittleEndian>(data_offset as u16)?;
// ibSSPI
if i == 10 {
let length = if let Some(ref bytes) = self.integrated_security {
let bak = cursor.position();
cursor.set_position(data_offset as u64);
cursor.write_all(bytes)?;
data_offset += bytes.len();
cursor.set_position(bak);
bytes.len()
} else {
0
};
cursor.write_u16::<LittleEndian>(length as u16)?;
continue;
}
// jump into the data portion of the output
let bak = cursor.position();
cursor.set_position(data_offset as u64);
for codepoint in value.encode_utf16() {
cursor.write_u16::<LittleEndian>(codepoint)?;
}
let new_position = cursor.position() as usize;
// prepare the password in MS-fashion
if i == 2 {
let buffer = cursor.get_mut();
for idx in data_offset..new_position {
let byte = buffer[idx];
buffer[idx] = ((byte << 4) & 0xf0 | (byte >> 4) & 0x0f) ^ 0xA5;
}
}
let length = new_position - data_offset;
cursor.set_position(bak);
data_offset += length;
// microsoft being really consistent here... using byte offsets with utf16-length's
// sounds like premature optimization
cursor.write_u16::<LittleEndian>(length as u16 / 2)?;
}
// cbSSPILong
cursor.write_u32::<LittleEndian>(0)?;
cursor.set_position(data_offset as u64);
// FeatureExt: unsupported for now, simply write a terminator
cursor.write_u8(0xFF)?;
cursor.set_position(0);
cursor.write_u32::<LittleEndian>(cursor.get_ref().len() as u32)?;
dst.extend(cursor.into_inner());
Ok(())
}
}
| done_row_count_bytes | identifier_name |
login.rs | use super::Encode;
use bitflags::bitflags;
use byteorder::{LittleEndian, WriteBytesExt};
use bytes::BytesMut;
use io::{Cursor, Write};
use std::{borrow::Cow, io};
uint_enum! {
#[repr(u32)]
#[derive(PartialOrd)]
pub enum FeatureLevel {
SqlServerV7 = 0x70000000,
SqlServer2000 = 0x71000000,
SqlServer2000Sp1 = 0x71000001,
SqlServer2005 = 0x72090002,
SqlServer2008 = 0x730A0003,
SqlServer2008R2 = 0x730B0003,
/// 2012, 2014, 2016
SqlServerN = 0x74000004,
}
}
bitflags! {
pub struct LoginOptionFlags1: u8 {
const BIG_ENDIAN = 0b00000001;
/// Charset_EBDDIC, default/bit not set = Charset_ASCII
const CHARSET_EBDDIC = 0b00000010;
/// default float is IEEE_754
const FLOAT_VAX = 0b00000100;
const FLOAT_ND5000 = 0b00001000;
const DUMPLOAD_ON = 0b00010000;
/// Set if the client requires warning messages on execution of the USE SQL
/// statement. If this flag is NOT SET, the server MUST NOT inform the client when the database
/// changes, and therefore the client will be unaware of any accompanying collation changes.
const USE_DB_NOTIFY = 0b00100000;
/// Set if the change to initial database needs to succeed if the connection is to succeed. (false: warn)
const INITIAL_DB_FATAL = 0b01000000;
/// Set if the client requires warning messages on execution of a language change statement.
const LANG_CHANGE_WARN = 0b10000000;
}
}
bitflags! {
pub struct LoginOptionFlags2: u8 {
/// Set if the change to initial language needs to succeed if the connect is to succeed.
const INIT_LANG_FATAL = 0b00000001;
/// Set if the client is the ODBC driver. This causes the server to set ANSI_DEFAULTS=ON,
/// CURSOR_CLOSE_ON_COMMIT, IMPLICIT_TRANSACTIONS=OFF, TEXTSIZE=0x7FFFFFFF (2GB) (TDS 7.2 and earlier)
/// TEXTSIZE to infinite (TDS 7.3), and ROWCOUNT to infinite
/// (2.2.6.4)
const ODBC_DRIVER = 0b00000010;
const TRANS_BOUNDARY = 0b00000100;
const CACHE_CONNECT = 0b00001000;
/// reserved
const USER_TYPE_SERVER = 0b00010000;
/// Distributed Query login
const USER_TYPE_REM_USER = 0b00100000;
/// Replication login
const USER_TYPE_SQL_REPL = 0b00110000;
const INTEGRATED_SECURITY = 0b10000000;
}
}
bitflags! {
pub struct LoginTypeFlags: u8 {
/// use TSQL insteadof DFLT
const SQL_TSQL = 0b00000001;
/// Set if the client is the OLEDB driver. This causes the server to set ANSI_DEFAULTS to ON...
const OLEDB_DRIVER = 0b00010000;
const READ_ONLY_INTENT = 0b00100000;
}
}
bitflags! {
pub struct LoginOptionFlags3: u8 {
const REQUEST_CHANGE_PWD = 0b00000001;
/// 1 if XML data type instances are returned as binary XML
const SEND_YUKON_BINARY = 0b00000010;
/// 1 if client is requesting separate process to be spawned as user instance
const SPAWN_USER_INSTANCE = 0b00000100;
/// 0 = The server MUST restrict the collations sent to a specific set of collations.
/// 1 = The server MAY send any collation that fits in the storage space.
const SUPPORT_UNKNOWN_COLL = 0b00001000;
// TODO: fExtension?
}
}
impl FeatureLevel {
pub fn done_row_count_bytes(self) -> u8 {
if self as u8 >= FeatureLevel::SqlServer2005 as u8 {
8
} else {
4
}
}
}
/// the login packet
pub struct LoginMessage<'a> {
/// the highest TDS version the client supports
pub tds_version: FeatureLevel,
/// the requested packet size
pub packet_size: u32,
/// the version of the interface library
pub client_prog_ver: u32,
/// the process id of the client application
pub client_pid: u32,
/// the connection id of the primary server
/// (used when connecting to an "Always UP" backup server)
pub connection_id: u32,
pub option_flags_1: LoginOptionFlags1,
pub option_flags_2: LoginOptionFlags2,
/// flag included in option_flags_2
pub integrated_security: Option<Vec<u8>>,
pub type_flags: LoginTypeFlags,
pub option_flags_3: LoginOptionFlags3,
pub client_timezone: i32,
pub client_lcid: u32,
pub hostname: Cow<'a, str>,
pub username: Cow<'a, str>,
pub password: Cow<'a, str>,
pub app_name: Cow<'a, str>,
pub server_name: Cow<'a, str>,
/// the default database to connect to
pub db_name: Cow<'a, str>,
}
impl<'a> LoginMessage<'a> {
pub fn new() -> LoginMessage<'a> {
LoginMessage {
tds_version: FeatureLevel::SqlServerN,
packet_size: 4096,
client_prog_ver: 0,
client_pid: 0,
connection_id: 0,
option_flags_1: LoginOptionFlags1::USE_DB_NOTIFY | LoginOptionFlags1::INITIAL_DB_FATAL,
option_flags_2: LoginOptionFlags2::INIT_LANG_FATAL | LoginOptionFlags2::ODBC_DRIVER,
integrated_security: None,
type_flags: LoginTypeFlags::empty(),
option_flags_3: LoginOptionFlags3::SUPPORT_UNKNOWN_COLL,
client_timezone: 0, //TODO
client_lcid: 0, // TODO
hostname: "".into(),
username: "".into(),
password: "".into(),
app_name: "".into(),
server_name: "".into(),
db_name: "".into(),
}
}
}
impl<'a> Encode<BytesMut> for LoginMessage<'a> {
fn encode(self, dst: &mut BytesMut) -> crate::Result<()> {
let mut cursor = Cursor::new(Vec::with_capacity(512));
// Space for the length
cursor.write_u32::<LittleEndian>(0)?;
// ignore the specified value for integrated security since we determine that by the struct field
let option_flags2 = if self.integrated_security.is_some() {
self.option_flags_2 | LoginOptionFlags2::INTEGRATED_SECURITY
} else {
self.option_flags_2 &!LoginOptionFlags2::INTEGRATED_SECURITY
};
cursor.write_u32::<LittleEndian>(self.tds_version as u32)?;
cursor.write_u32::<LittleEndian>(self.packet_size)?;
cursor.write_u32::<LittleEndian>(self.client_prog_ver)?;
cursor.write_u32::<LittleEndian>(self.client_pid)?;
cursor.write_u32::<LittleEndian>(self.connection_id)?;
cursor.write_u8(self.option_flags_1.bits())?;
cursor.write_u8(option_flags2.bits())?;
cursor.write_u8(self.type_flags.bits())?;
cursor.write_u8(self.option_flags_3.bits())?;
cursor.write_u32::<LittleEndian>(self.client_timezone as u32)?;
cursor.write_u32::<LittleEndian>(self.client_lcid)?;
// variable length data (OffsetLength)
let var_data = [
&self.hostname,
&self.username,
&self.password,
&self.app_name,
&self.server_name,
&"".into(), // 5. ibExtension
&"".into(), // ibCltIntName
&"".into(), // ibLanguage
&self.db_name,
&"".into(), // 9. ClientId (6 bytes); this is included in var_data so we don't lack the bytes of cbSspiLong (4=2*2) and can insert it at the correct position
&"".into(), // 10. ibSSPI
&"".into(), // ibAtchDBFile
&"".into(), // ibChangePassword
];
let mut data_offset = cursor.position() as usize + var_data.len() * 2 * 2 + 6;
for (i, value) in var_data.iter().enumerate() {
// write the client ID (created from the MAC address)
if i == 9 {
cursor.write_u32::<LittleEndian>(0)?; //TODO:
cursor.write_u16::<LittleEndian>(42)?; //TODO: generate real client id
continue;
}
| // ibSSPI
if i == 10 {
let length = if let Some(ref bytes) = self.integrated_security {
let bak = cursor.position();
cursor.set_position(data_offset as u64);
cursor.write_all(bytes)?;
data_offset += bytes.len();
cursor.set_position(bak);
bytes.len()
} else {
0
};
cursor.write_u16::<LittleEndian>(length as u16)?;
continue;
}
// jump into the data portion of the output
let bak = cursor.position();
cursor.set_position(data_offset as u64);
for codepoint in value.encode_utf16() {
cursor.write_u16::<LittleEndian>(codepoint)?;
}
let new_position = cursor.position() as usize;
// prepare the password in MS-fashion
if i == 2 {
let buffer = cursor.get_mut();
for idx in data_offset..new_position {
let byte = buffer[idx];
buffer[idx] = ((byte << 4) & 0xf0 | (byte >> 4) & 0x0f) ^ 0xA5;
}
}
let length = new_position - data_offset;
cursor.set_position(bak);
data_offset += length;
// microsoft being really consistent here... using byte offsets with utf16-length's
// sounds like premature optimization
cursor.write_u16::<LittleEndian>(length as u16 / 2)?;
}
// cbSSPILong
cursor.write_u32::<LittleEndian>(0)?;
cursor.set_position(data_offset as u64);
// FeatureExt: unsupported for now, simply write a terminator
cursor.write_u8(0xFF)?;
cursor.set_position(0);
cursor.write_u32::<LittleEndian>(cursor.get_ref().len() as u32)?;
dst.extend(cursor.into_inner());
Ok(())
}
} | cursor.write_u16::<LittleEndian>(data_offset as u16)?;
| random_line_split |
vdpa.rs | // Copyright (C) 2021 Red Hat, Inc. All rights reserved.
// SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause
//! Kernel-based vhost-vdpa backend.
use std::fs::{File, OpenOptions};
use std::io::Error as IOError;
use std::os::raw::{c_uchar, c_uint};
use std::os::unix::fs::OpenOptionsExt;
use std::os::unix::io::{AsRawFd, RawFd};
use vm_memory::GuestAddressSpace;
use vmm_sys_util::eventfd::EventFd;
use vmm_sys_util::fam::*;
use vmm_sys_util::ioctl::{ioctl_with_mut_ref, ioctl_with_ptr, ioctl_with_ref};
use super::vhost_binding::*;
use super::{ioctl_result, Error, Result, VhostKernBackend, VhostKernFeatures};
use crate::vdpa::*;
use crate::{VhostAccess, VhostIotlbBackend, VhostIotlbMsg, VhostIotlbType};
// Implement the FamStruct trait for vhost_vdpa_config
generate_fam_struct_impl!(
vhost_vdpa_config,
c_uchar,
buf,
c_uint,
len,
c_uint::MAX as usize
);
type VhostVdpaConfig = FamStructWrapper<vhost_vdpa_config>;
/// Handle for running VHOST_VDPA ioctls.
pub struct VhostKernVdpa<AS: GuestAddressSpace> {
fd: File,
mem: AS,
backend_features_acked: u64,
}
impl<AS: GuestAddressSpace> VhostKernVdpa<AS> {
/// Open a handle to a new VHOST-VDPA instance.
pub fn new(path: &str, mem: AS) -> Result<Self> {
Ok(VhostKernVdpa {
fd: OpenOptions::new()
.read(true)
.write(true)
.custom_flags(libc::O_CLOEXEC | libc::O_NONBLOCK)
.open(path)
.map_err(Error::VhostOpen)?,
mem,
backend_features_acked: 0,
})
}
}
impl<AS: GuestAddressSpace> VhostVdpa for VhostKernVdpa<AS> {
fn get_device_id(&self) -> Result<u32> {
let mut device_id: u32 = 0;
let ret = unsafe { ioctl_with_mut_ref(self, VHOST_VDPA_GET_DEVICE_ID(), &mut device_id) };
ioctl_result(ret, device_id)
}
fn get_status(&self) -> Result<u8> {
let mut status: u8 = 0;
let ret = unsafe { ioctl_with_mut_ref(self, VHOST_VDPA_GET_STATUS(), &mut status) };
ioctl_result(ret, status)
}
fn set_status(&self, status: u8) -> Result<()> {
let ret = unsafe { ioctl_with_ref(self, VHOST_VDPA_SET_STATUS(), &status) };
ioctl_result(ret, ())
}
fn get_config(&self, offset: u32, buffer: &mut [u8]) -> Result<()> {
let mut config = VhostVdpaConfig::new(buffer.len())
.map_err(|_| Error::IoctlError(IOError::from_raw_os_error(libc::ENOMEM)))?;
config.as_mut_fam_struct().off = offset;
let ret = unsafe {
ioctl_with_ptr(
self,
VHOST_VDPA_GET_CONFIG(),
config.as_mut_fam_struct_ptr(),
)
};
buffer.copy_from_slice(config.as_slice());
ioctl_result(ret, ())
}
fn set_config(&self, offset: u32, buffer: &[u8]) -> Result<()> {
let mut config = VhostVdpaConfig::new(buffer.len())
.map_err(|_| Error::IoctlError(IOError::from_raw_os_error(libc::ENOMEM)))?;
config.as_mut_fam_struct().off = offset;
config.as_mut_slice().copy_from_slice(buffer);
let ret =
unsafe { ioctl_with_ptr(self, VHOST_VDPA_SET_CONFIG(), config.as_fam_struct_ptr()) };
ioctl_result(ret, ())
}
fn set_vring_enable(&self, queue_index: usize, enabled: bool) -> Result<()> {
let vring_state = vhost_vring_state {
index: queue_index as u32,
num: enabled as u32,
};
let ret = unsafe { ioctl_with_ref(self, VHOST_VDPA_SET_VRING_ENABLE(), &vring_state) };
ioctl_result(ret, ())
}
fn get_vring_num(&self) -> Result<u16> {
let mut vring_num: u16 = 0;
let ret = unsafe { ioctl_with_mut_ref(self, VHOST_VDPA_GET_VRING_NUM(), &mut vring_num) };
ioctl_result(ret, vring_num)
}
fn set_config_call(&self, fd: &EventFd) -> Result<()> {
let event_fd: ::std::os::raw::c_int = fd.as_raw_fd();
let ret = unsafe { ioctl_with_ref(self, VHOST_VDPA_SET_CONFIG_CALL(), &event_fd) };
ioctl_result(ret, ())
}
fn get_iova_range(&self) -> Result<VhostVdpaIovaRange> {
let mut low_iova_range = vhost_vdpa_iova_range { first: 0, last: 0 };
let ret =
unsafe { ioctl_with_mut_ref(self, VHOST_VDPA_GET_VRING_NUM(), &mut low_iova_range) };
let iova_range = VhostVdpaIovaRange {
first: low_iova_range.first,
last: low_iova_range.last,
};
ioctl_result(ret, iova_range)
}
fn dma_map(&self, iova: u64, size: u64, vaddr: *const u8, readonly: bool) -> Result<()> {
let iotlb = VhostIotlbMsg {
iova,
size,
userspace_addr: vaddr as u64,
perm: match readonly {
true => VhostAccess::ReadOnly,
false => VhostAccess::ReadWrite,
},
msg_type: VhostIotlbType::Update,
};
self.send_iotlb_msg(&iotlb)
}
fn dma_unmap(&self, iova: u64, size: u64) -> Result<()> {
let iotlb = VhostIotlbMsg {
iova,
size,
msg_type: VhostIotlbType::Invalidate,
..Default::default()
};
self.send_iotlb_msg(&iotlb)
}
}
impl<AS: GuestAddressSpace> VhostKernBackend for VhostKernVdpa<AS> {
type AS = AS;
fn mem(&self) -> &Self::AS {
&self.mem
}
}
impl<AS: GuestAddressSpace> AsRawFd for VhostKernVdpa<AS> {
fn as_raw_fd(&self) -> RawFd {
self.fd.as_raw_fd()
}
}
impl<AS: GuestAddressSpace> VhostKernFeatures for VhostKernVdpa<AS> {
fn | (&self) -> u64 {
self.backend_features_acked
}
fn set_backend_features_acked(&mut self, features: u64) {
self.backend_features_acked = features;
}
}
#[cfg(test)]
mod tests {
const VHOST_VDPA_PATH: &str = "/dev/vhost-vdpa-0";
use std::alloc::{alloc, dealloc, Layout};
use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap};
use vmm_sys_util::eventfd::EventFd;
use super::*;
use crate::{
VhostBackend, VhostUserDirtyLogRegion, VhostUserMemoryRegionInfo, VringConfigData,
};
use serial_test::serial;
use std::io::ErrorKind;
/// macro to skip test if vhost-vdpa device path is not found.
///
/// vDPA simulators are available since Linux 5.7, but the CI may have
/// an older kernel, so for now we skip the test if we don't find
/// the device.
macro_rules! unwrap_not_found {
( $e:expr ) => {
match $e {
Ok(v) => v,
Err(error) => match error {
Error::VhostOpen(ref e) if e.kind() == ErrorKind::NotFound => {
println!("Err: {:?} SKIPPED", e);
return;
}
e => panic!("Err: {:?}", e),
},
}
};
}
#[test]
#[serial]
fn test_vdpa_kern_new_device() {
let m = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10_0000)]).unwrap();
let vdpa = unwrap_not_found!(VhostKernVdpa::new(VHOST_VDPA_PATH, &m));
assert!(vdpa.as_raw_fd() >= 0);
assert!(vdpa.mem().find_region(GuestAddress(0x100)).is_some());
assert!(vdpa.mem().find_region(GuestAddress(0x10_0000)).is_none());
}
#[test]
#[serial]
fn test_vdpa_kern_is_valid() {
let m = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10_0000)]).unwrap();
let vdpa = unwrap_not_found!(VhostKernVdpa::new(VHOST_VDPA_PATH, &m));
let mut config = VringConfigData {
queue_max_size: 32,
queue_size: 32,
flags: 0,
desc_table_addr: 0x1000,
used_ring_addr: 0x2000,
avail_ring_addr: 0x3000,
log_addr: None,
};
assert_eq!(vdpa.is_valid(&config), true);
config.queue_size = 0;
assert_eq!(vdpa.is_valid(&config), false);
config.queue_size = 31;
assert_eq!(vdpa.is_valid(&config), false);
config.queue_size = 33;
assert_eq!(vdpa.is_valid(&config), false);
}
#[test]
#[serial]
fn test_vdpa_kern_ioctls() {
let m = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10_0000)]).unwrap();
let vdpa = unwrap_not_found!(VhostKernVdpa::new(VHOST_VDPA_PATH, &m));
let features = vdpa.get_features().unwrap();
// VIRTIO_F_VERSION_1 (bit 32) should be set
assert_ne!(features & (1 << 32), 0);
vdpa.set_features(features).unwrap();
vdpa.set_owner().unwrap();
vdpa.set_mem_table(&[]).unwrap_err();
let region = VhostUserMemoryRegionInfo {
guest_phys_addr: 0x0,
memory_size: 0x10_0000,
userspace_addr: m.get_host_address(GuestAddress(0x0)).unwrap() as u64,
mmap_offset: 0,
mmap_handle: -1,
};
vdpa.set_mem_table(&[region]).unwrap();
assert!(vdpa.get_device_id().unwrap() > 0);
assert_eq!(vdpa.get_status().unwrap(), 0x0);
vdpa.set_status(0x1).unwrap();
assert_eq!(vdpa.get_status().unwrap(), 0x1);
let mut vec = vec![0u8; 8];
vdpa.get_config(0, &mut vec).unwrap();
vdpa.set_config(0, &vec).unwrap();
let eventfd = EventFd::new(0).unwrap();
// set_log_base() and set_log_fd() are not supported by vhost-vdpa
vdpa.set_log_base(
0x4000,
Some(VhostUserDirtyLogRegion {
mmap_size: 0x1000,
mmap_offset: 0x10,
mmap_handle: 1,
}),
)
.unwrap_err();
vdpa.set_log_base(0x4000, None).unwrap_err();
vdpa.set_log_fd(eventfd.as_raw_fd()).unwrap_err();
let max_queues = vdpa.get_vring_num().unwrap();
vdpa.set_vring_num(0, max_queues + 1).unwrap_err();
vdpa.set_vring_num(0, 32).unwrap();
let config = VringConfigData {
queue_max_size: 32,
queue_size: 32,
flags: 0,
desc_table_addr: 0x1000,
used_ring_addr: 0x2000,
avail_ring_addr: 0x3000,
log_addr: None,
};
vdpa.set_vring_addr(0, &config).unwrap();
vdpa.set_vring_base(0, 1).unwrap();
vdpa.set_vring_call(0, &eventfd).unwrap();
vdpa.set_vring_kick(0, &eventfd).unwrap();
vdpa.set_vring_err(0, &eventfd).unwrap();
vdpa.set_config_call(&eventfd).unwrap();
assert_eq!(vdpa.get_vring_base(0).unwrap(), 1);
vdpa.set_vring_enable(0, true).unwrap();
vdpa.set_vring_enable(0, false).unwrap();
}
#[test]
#[serial]
fn test_vdpa_kern_dma() {
let m = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10_0000)]).unwrap();
let mut vdpa = unwrap_not_found!(VhostKernVdpa::new(VHOST_VDPA_PATH, &m));
let features = vdpa.get_features().unwrap();
// VIRTIO_F_VERSION_1 (bit 32) should be set
assert_ne!(features & (1 << 32), 0);
vdpa.set_features(features).unwrap();
let backend_features = vdpa.get_backend_features().unwrap();
assert_ne!(backend_features & (1 << VHOST_BACKEND_F_IOTLB_MSG_V2), 0);
vdpa.set_backend_features(backend_features).unwrap();
vdpa.set_owner().unwrap();
vdpa.dma_map(0xFFFF_0000, 0xFFFF, std::ptr::null::<u8>(), false)
.unwrap_err();
unsafe {
let layout = Layout::from_size_align(0xFFFF, 1).unwrap();
let ptr = alloc(layout);
vdpa.dma_map(0xFFFF_0000, 0xFFFF, ptr, false).unwrap();
vdpa.dma_unmap(0xFFFF_0000, 0xFFFF).unwrap();
dealloc(ptr, layout);
};
}
}
| get_backend_features_acked | identifier_name |
vdpa.rs | // Copyright (C) 2021 Red Hat, Inc. All rights reserved.
// SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause
//! Kernel-based vhost-vdpa backend.
use std::fs::{File, OpenOptions};
use std::io::Error as IOError;
use std::os::raw::{c_uchar, c_uint};
use std::os::unix::fs::OpenOptionsExt;
use std::os::unix::io::{AsRawFd, RawFd};
use vm_memory::GuestAddressSpace;
use vmm_sys_util::eventfd::EventFd;
use vmm_sys_util::fam::*;
use vmm_sys_util::ioctl::{ioctl_with_mut_ref, ioctl_with_ptr, ioctl_with_ref};
use super::vhost_binding::*;
use super::{ioctl_result, Error, Result, VhostKernBackend, VhostKernFeatures};
use crate::vdpa::*;
use crate::{VhostAccess, VhostIotlbBackend, VhostIotlbMsg, VhostIotlbType};
// Implement the FamStruct trait for vhost_vdpa_config
generate_fam_struct_impl!(
vhost_vdpa_config,
c_uchar,
buf,
c_uint,
len,
c_uint::MAX as usize
);
type VhostVdpaConfig = FamStructWrapper<vhost_vdpa_config>;
/// Handle for running VHOST_VDPA ioctls.
pub struct VhostKernVdpa<AS: GuestAddressSpace> {
fd: File,
mem: AS,
backend_features_acked: u64,
}
impl<AS: GuestAddressSpace> VhostKernVdpa<AS> {
/// Open a handle to a new VHOST-VDPA instance.
pub fn new(path: &str, mem: AS) -> Result<Self> {
Ok(VhostKernVdpa {
fd: OpenOptions::new()
.read(true)
.write(true)
.custom_flags(libc::O_CLOEXEC | libc::O_NONBLOCK)
.open(path)
.map_err(Error::VhostOpen)?,
mem,
backend_features_acked: 0,
})
}
}
impl<AS: GuestAddressSpace> VhostVdpa for VhostKernVdpa<AS> {
fn get_device_id(&self) -> Result<u32> {
let mut device_id: u32 = 0;
let ret = unsafe { ioctl_with_mut_ref(self, VHOST_VDPA_GET_DEVICE_ID(), &mut device_id) };
ioctl_result(ret, device_id)
}
fn get_status(&self) -> Result<u8> {
let mut status: u8 = 0;
let ret = unsafe { ioctl_with_mut_ref(self, VHOST_VDPA_GET_STATUS(), &mut status) };
ioctl_result(ret, status)
}
fn set_status(&self, status: u8) -> Result<()> {
let ret = unsafe { ioctl_with_ref(self, VHOST_VDPA_SET_STATUS(), &status) };
ioctl_result(ret, ())
}
fn get_config(&self, offset: u32, buffer: &mut [u8]) -> Result<()> {
let mut config = VhostVdpaConfig::new(buffer.len())
.map_err(|_| Error::IoctlError(IOError::from_raw_os_error(libc::ENOMEM)))?;
config.as_mut_fam_struct().off = offset;
let ret = unsafe {
ioctl_with_ptr(
self,
VHOST_VDPA_GET_CONFIG(),
config.as_mut_fam_struct_ptr(),
)
};
buffer.copy_from_slice(config.as_slice());
ioctl_result(ret, ())
}
fn set_config(&self, offset: u32, buffer: &[u8]) -> Result<()> {
let mut config = VhostVdpaConfig::new(buffer.len())
.map_err(|_| Error::IoctlError(IOError::from_raw_os_error(libc::ENOMEM)))?;
config.as_mut_fam_struct().off = offset;
config.as_mut_slice().copy_from_slice(buffer);
let ret =
unsafe { ioctl_with_ptr(self, VHOST_VDPA_SET_CONFIG(), config.as_fam_struct_ptr()) };
ioctl_result(ret, ())
}
fn set_vring_enable(&self, queue_index: usize, enabled: bool) -> Result<()> {
let vring_state = vhost_vring_state {
index: queue_index as u32,
num: enabled as u32,
};
let ret = unsafe { ioctl_with_ref(self, VHOST_VDPA_SET_VRING_ENABLE(), &vring_state) };
ioctl_result(ret, ())
}
fn get_vring_num(&self) -> Result<u16> {
let mut vring_num: u16 = 0;
let ret = unsafe { ioctl_with_mut_ref(self, VHOST_VDPA_GET_VRING_NUM(), &mut vring_num) };
ioctl_result(ret, vring_num)
}
fn set_config_call(&self, fd: &EventFd) -> Result<()> {
let event_fd: ::std::os::raw::c_int = fd.as_raw_fd();
let ret = unsafe { ioctl_with_ref(self, VHOST_VDPA_SET_CONFIG_CALL(), &event_fd) };
ioctl_result(ret, ())
}
fn get_iova_range(&self) -> Result<VhostVdpaIovaRange> {
let mut low_iova_range = vhost_vdpa_iova_range { first: 0, last: 0 };
let ret =
unsafe { ioctl_with_mut_ref(self, VHOST_VDPA_GET_VRING_NUM(), &mut low_iova_range) };
let iova_range = VhostVdpaIovaRange {
first: low_iova_range.first,
last: low_iova_range.last,
};
ioctl_result(ret, iova_range)
}
fn dma_map(&self, iova: u64, size: u64, vaddr: *const u8, readonly: bool) -> Result<()> {
let iotlb = VhostIotlbMsg {
iova,
size,
userspace_addr: vaddr as u64,
perm: match readonly {
true => VhostAccess::ReadOnly,
false => VhostAccess::ReadWrite,
},
msg_type: VhostIotlbType::Update,
};
self.send_iotlb_msg(&iotlb)
}
fn dma_unmap(&self, iova: u64, size: u64) -> Result<()> {
let iotlb = VhostIotlbMsg {
iova,
size,
msg_type: VhostIotlbType::Invalidate,
..Default::default()
};
self.send_iotlb_msg(&iotlb)
}
}
impl<AS: GuestAddressSpace> VhostKernBackend for VhostKernVdpa<AS> {
type AS = AS;
fn mem(&self) -> &Self::AS {
&self.mem
}
}
impl<AS: GuestAddressSpace> AsRawFd for VhostKernVdpa<AS> {
fn as_raw_fd(&self) -> RawFd {
self.fd.as_raw_fd()
}
}
impl<AS: GuestAddressSpace> VhostKernFeatures for VhostKernVdpa<AS> {
fn get_backend_features_acked(&self) -> u64 {
self.backend_features_acked
}
fn set_backend_features_acked(&mut self, features: u64) {
self.backend_features_acked = features;
}
}
#[cfg(test)]
mod tests {
const VHOST_VDPA_PATH: &str = "/dev/vhost-vdpa-0";
use std::alloc::{alloc, dealloc, Layout};
use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap};
use vmm_sys_util::eventfd::EventFd;
use super::*;
use crate::{
VhostBackend, VhostUserDirtyLogRegion, VhostUserMemoryRegionInfo, VringConfigData,
};
use serial_test::serial;
use std::io::ErrorKind;
/// macro to skip test if vhost-vdpa device path is not found.
///
/// vDPA simulators are available since Linux 5.7, but the CI may have
/// an older kernel, so for now we skip the test if we don't find
/// the device.
macro_rules! unwrap_not_found {
( $e:expr ) => {
match $e {
Ok(v) => v,
Err(error) => match error {
Error::VhostOpen(ref e) if e.kind() == ErrorKind::NotFound => {
println!("Err: {:?} SKIPPED", e);
return;
}
e => panic!("Err: {:?}", e),
},
}
};
}
#[test]
#[serial]
fn test_vdpa_kern_new_device() {
let m = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10_0000)]).unwrap();
let vdpa = unwrap_not_found!(VhostKernVdpa::new(VHOST_VDPA_PATH, &m));
assert!(vdpa.as_raw_fd() >= 0);
assert!(vdpa.mem().find_region(GuestAddress(0x100)).is_some());
assert!(vdpa.mem().find_region(GuestAddress(0x10_0000)).is_none());
}
#[test]
#[serial]
fn test_vdpa_kern_is_valid() {
let m = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10_0000)]).unwrap();
let vdpa = unwrap_not_found!(VhostKernVdpa::new(VHOST_VDPA_PATH, &m));
let mut config = VringConfigData {
queue_max_size: 32,
queue_size: 32,
flags: 0,
desc_table_addr: 0x1000,
used_ring_addr: 0x2000,
avail_ring_addr: 0x3000,
log_addr: None,
};
assert_eq!(vdpa.is_valid(&config), true);
config.queue_size = 0;
assert_eq!(vdpa.is_valid(&config), false);
config.queue_size = 31;
assert_eq!(vdpa.is_valid(&config), false);
config.queue_size = 33;
assert_eq!(vdpa.is_valid(&config), false);
}
#[test]
#[serial]
fn test_vdpa_kern_ioctls() {
let m = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10_0000)]).unwrap();
let vdpa = unwrap_not_found!(VhostKernVdpa::new(VHOST_VDPA_PATH, &m));
let features = vdpa.get_features().unwrap();
// VIRTIO_F_VERSION_1 (bit 32) should be set
assert_ne!(features & (1 << 32), 0);
vdpa.set_features(features).unwrap();
vdpa.set_owner().unwrap();
vdpa.set_mem_table(&[]).unwrap_err();
let region = VhostUserMemoryRegionInfo {
guest_phys_addr: 0x0,
memory_size: 0x10_0000,
userspace_addr: m.get_host_address(GuestAddress(0x0)).unwrap() as u64,
mmap_offset: 0,
mmap_handle: -1,
};
vdpa.set_mem_table(&[region]).unwrap();
assert!(vdpa.get_device_id().unwrap() > 0);
assert_eq!(vdpa.get_status().unwrap(), 0x0);
vdpa.set_status(0x1).unwrap();
assert_eq!(vdpa.get_status().unwrap(), 0x1);
let mut vec = vec![0u8; 8];
vdpa.get_config(0, &mut vec).unwrap();
vdpa.set_config(0, &vec).unwrap();
let eventfd = EventFd::new(0).unwrap();
// set_log_base() and set_log_fd() are not supported by vhost-vdpa
vdpa.set_log_base(
0x4000,
Some(VhostUserDirtyLogRegion {
mmap_size: 0x1000,
mmap_offset: 0x10,
mmap_handle: 1,
}),
)
.unwrap_err();
vdpa.set_log_base(0x4000, None).unwrap_err();
vdpa.set_log_fd(eventfd.as_raw_fd()).unwrap_err();
let max_queues = vdpa.get_vring_num().unwrap();
vdpa.set_vring_num(0, max_queues + 1).unwrap_err();
vdpa.set_vring_num(0, 32).unwrap(); | let config = VringConfigData {
queue_max_size: 32,
queue_size: 32,
flags: 0,
desc_table_addr: 0x1000,
used_ring_addr: 0x2000,
avail_ring_addr: 0x3000,
log_addr: None,
};
vdpa.set_vring_addr(0, &config).unwrap();
vdpa.set_vring_base(0, 1).unwrap();
vdpa.set_vring_call(0, &eventfd).unwrap();
vdpa.set_vring_kick(0, &eventfd).unwrap();
vdpa.set_vring_err(0, &eventfd).unwrap();
vdpa.set_config_call(&eventfd).unwrap();
assert_eq!(vdpa.get_vring_base(0).unwrap(), 1);
vdpa.set_vring_enable(0, true).unwrap();
vdpa.set_vring_enable(0, false).unwrap();
}
#[test]
#[serial]
fn test_vdpa_kern_dma() {
let m = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10_0000)]).unwrap();
let mut vdpa = unwrap_not_found!(VhostKernVdpa::new(VHOST_VDPA_PATH, &m));
let features = vdpa.get_features().unwrap();
// VIRTIO_F_VERSION_1 (bit 32) should be set
assert_ne!(features & (1 << 32), 0);
vdpa.set_features(features).unwrap();
let backend_features = vdpa.get_backend_features().unwrap();
assert_ne!(backend_features & (1 << VHOST_BACKEND_F_IOTLB_MSG_V2), 0);
vdpa.set_backend_features(backend_features).unwrap();
vdpa.set_owner().unwrap();
vdpa.dma_map(0xFFFF_0000, 0xFFFF, std::ptr::null::<u8>(), false)
.unwrap_err();
unsafe {
let layout = Layout::from_size_align(0xFFFF, 1).unwrap();
let ptr = alloc(layout);
vdpa.dma_map(0xFFFF_0000, 0xFFFF, ptr, false).unwrap();
vdpa.dma_unmap(0xFFFF_0000, 0xFFFF).unwrap();
dealloc(ptr, layout);
};
}
} | random_line_split |
|
vdpa.rs | // Copyright (C) 2021 Red Hat, Inc. All rights reserved.
// SPDX-License-Identifier: Apache-2.0 or BSD-3-Clause
//! Kernel-based vhost-vdpa backend.
use std::fs::{File, OpenOptions};
use std::io::Error as IOError;
use std::os::raw::{c_uchar, c_uint};
use std::os::unix::fs::OpenOptionsExt;
use std::os::unix::io::{AsRawFd, RawFd};
use vm_memory::GuestAddressSpace;
use vmm_sys_util::eventfd::EventFd;
use vmm_sys_util::fam::*;
use vmm_sys_util::ioctl::{ioctl_with_mut_ref, ioctl_with_ptr, ioctl_with_ref};
use super::vhost_binding::*;
use super::{ioctl_result, Error, Result, VhostKernBackend, VhostKernFeatures};
use crate::vdpa::*;
use crate::{VhostAccess, VhostIotlbBackend, VhostIotlbMsg, VhostIotlbType};
// Implement the FamStruct trait for vhost_vdpa_config
generate_fam_struct_impl!(
vhost_vdpa_config,
c_uchar,
buf,
c_uint,
len,
c_uint::MAX as usize
);
type VhostVdpaConfig = FamStructWrapper<vhost_vdpa_config>;
/// Handle for running VHOST_VDPA ioctls.
pub struct VhostKernVdpa<AS: GuestAddressSpace> {
fd: File,
mem: AS,
backend_features_acked: u64,
}
impl<AS: GuestAddressSpace> VhostKernVdpa<AS> {
/// Open a handle to a new VHOST-VDPA instance.
pub fn new(path: &str, mem: AS) -> Result<Self> {
Ok(VhostKernVdpa {
fd: OpenOptions::new()
.read(true)
.write(true)
.custom_flags(libc::O_CLOEXEC | libc::O_NONBLOCK)
.open(path)
.map_err(Error::VhostOpen)?,
mem,
backend_features_acked: 0,
})
}
}
impl<AS: GuestAddressSpace> VhostVdpa for VhostKernVdpa<AS> {
fn get_device_id(&self) -> Result<u32> {
let mut device_id: u32 = 0;
let ret = unsafe { ioctl_with_mut_ref(self, VHOST_VDPA_GET_DEVICE_ID(), &mut device_id) };
ioctl_result(ret, device_id)
}
fn get_status(&self) -> Result<u8> {
let mut status: u8 = 0;
let ret = unsafe { ioctl_with_mut_ref(self, VHOST_VDPA_GET_STATUS(), &mut status) };
ioctl_result(ret, status)
}
fn set_status(&self, status: u8) -> Result<()> {
let ret = unsafe { ioctl_with_ref(self, VHOST_VDPA_SET_STATUS(), &status) };
ioctl_result(ret, ())
}
fn get_config(&self, offset: u32, buffer: &mut [u8]) -> Result<()> {
let mut config = VhostVdpaConfig::new(buffer.len())
.map_err(|_| Error::IoctlError(IOError::from_raw_os_error(libc::ENOMEM)))?;
config.as_mut_fam_struct().off = offset;
let ret = unsafe {
ioctl_with_ptr(
self,
VHOST_VDPA_GET_CONFIG(),
config.as_mut_fam_struct_ptr(),
)
};
buffer.copy_from_slice(config.as_slice());
ioctl_result(ret, ())
}
fn set_config(&self, offset: u32, buffer: &[u8]) -> Result<()> {
let mut config = VhostVdpaConfig::new(buffer.len())
.map_err(|_| Error::IoctlError(IOError::from_raw_os_error(libc::ENOMEM)))?;
config.as_mut_fam_struct().off = offset;
config.as_mut_slice().copy_from_slice(buffer);
let ret =
unsafe { ioctl_with_ptr(self, VHOST_VDPA_SET_CONFIG(), config.as_fam_struct_ptr()) };
ioctl_result(ret, ())
}
fn set_vring_enable(&self, queue_index: usize, enabled: bool) -> Result<()> {
let vring_state = vhost_vring_state {
index: queue_index as u32,
num: enabled as u32,
};
let ret = unsafe { ioctl_with_ref(self, VHOST_VDPA_SET_VRING_ENABLE(), &vring_state) };
ioctl_result(ret, ())
}
fn get_vring_num(&self) -> Result<u16> {
let mut vring_num: u16 = 0;
let ret = unsafe { ioctl_with_mut_ref(self, VHOST_VDPA_GET_VRING_NUM(), &mut vring_num) };
ioctl_result(ret, vring_num)
}
fn set_config_call(&self, fd: &EventFd) -> Result<()> {
let event_fd: ::std::os::raw::c_int = fd.as_raw_fd();
let ret = unsafe { ioctl_with_ref(self, VHOST_VDPA_SET_CONFIG_CALL(), &event_fd) };
ioctl_result(ret, ())
}
fn get_iova_range(&self) -> Result<VhostVdpaIovaRange> {
let mut low_iova_range = vhost_vdpa_iova_range { first: 0, last: 0 };
let ret =
unsafe { ioctl_with_mut_ref(self, VHOST_VDPA_GET_VRING_NUM(), &mut low_iova_range) };
let iova_range = VhostVdpaIovaRange {
first: low_iova_range.first,
last: low_iova_range.last,
};
ioctl_result(ret, iova_range)
}
fn dma_map(&self, iova: u64, size: u64, vaddr: *const u8, readonly: bool) -> Result<()> {
let iotlb = VhostIotlbMsg {
iova,
size,
userspace_addr: vaddr as u64,
perm: match readonly {
true => VhostAccess::ReadOnly,
false => VhostAccess::ReadWrite,
},
msg_type: VhostIotlbType::Update,
};
self.send_iotlb_msg(&iotlb)
}
fn dma_unmap(&self, iova: u64, size: u64) -> Result<()> {
let iotlb = VhostIotlbMsg {
iova,
size,
msg_type: VhostIotlbType::Invalidate,
..Default::default()
};
self.send_iotlb_msg(&iotlb)
}
}
impl<AS: GuestAddressSpace> VhostKernBackend for VhostKernVdpa<AS> {
type AS = AS;
fn mem(&self) -> &Self::AS |
}
impl<AS: GuestAddressSpace> AsRawFd for VhostKernVdpa<AS> {
fn as_raw_fd(&self) -> RawFd {
self.fd.as_raw_fd()
}
}
impl<AS: GuestAddressSpace> VhostKernFeatures for VhostKernVdpa<AS> {
fn get_backend_features_acked(&self) -> u64 {
self.backend_features_acked
}
fn set_backend_features_acked(&mut self, features: u64) {
self.backend_features_acked = features;
}
}
#[cfg(test)]
mod tests {
const VHOST_VDPA_PATH: &str = "/dev/vhost-vdpa-0";
use std::alloc::{alloc, dealloc, Layout};
use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap};
use vmm_sys_util::eventfd::EventFd;
use super::*;
use crate::{
VhostBackend, VhostUserDirtyLogRegion, VhostUserMemoryRegionInfo, VringConfigData,
};
use serial_test::serial;
use std::io::ErrorKind;
/// macro to skip test if vhost-vdpa device path is not found.
///
/// vDPA simulators are available since Linux 5.7, but the CI may have
/// an older kernel, so for now we skip the test if we don't find
/// the device.
macro_rules! unwrap_not_found {
( $e:expr ) => {
match $e {
Ok(v) => v,
Err(error) => match error {
Error::VhostOpen(ref e) if e.kind() == ErrorKind::NotFound => {
println!("Err: {:?} SKIPPED", e);
return;
}
e => panic!("Err: {:?}", e),
},
}
};
}
#[test]
#[serial]
fn test_vdpa_kern_new_device() {
let m = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10_0000)]).unwrap();
let vdpa = unwrap_not_found!(VhostKernVdpa::new(VHOST_VDPA_PATH, &m));
assert!(vdpa.as_raw_fd() >= 0);
assert!(vdpa.mem().find_region(GuestAddress(0x100)).is_some());
assert!(vdpa.mem().find_region(GuestAddress(0x10_0000)).is_none());
}
#[test]
#[serial]
fn test_vdpa_kern_is_valid() {
let m = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10_0000)]).unwrap();
let vdpa = unwrap_not_found!(VhostKernVdpa::new(VHOST_VDPA_PATH, &m));
let mut config = VringConfigData {
queue_max_size: 32,
queue_size: 32,
flags: 0,
desc_table_addr: 0x1000,
used_ring_addr: 0x2000,
avail_ring_addr: 0x3000,
log_addr: None,
};
assert_eq!(vdpa.is_valid(&config), true);
config.queue_size = 0;
assert_eq!(vdpa.is_valid(&config), false);
config.queue_size = 31;
assert_eq!(vdpa.is_valid(&config), false);
config.queue_size = 33;
assert_eq!(vdpa.is_valid(&config), false);
}
#[test]
#[serial]
fn test_vdpa_kern_ioctls() {
let m = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10_0000)]).unwrap();
let vdpa = unwrap_not_found!(VhostKernVdpa::new(VHOST_VDPA_PATH, &m));
let features = vdpa.get_features().unwrap();
// VIRTIO_F_VERSION_1 (bit 32) should be set
assert_ne!(features & (1 << 32), 0);
vdpa.set_features(features).unwrap();
vdpa.set_owner().unwrap();
vdpa.set_mem_table(&[]).unwrap_err();
let region = VhostUserMemoryRegionInfo {
guest_phys_addr: 0x0,
memory_size: 0x10_0000,
userspace_addr: m.get_host_address(GuestAddress(0x0)).unwrap() as u64,
mmap_offset: 0,
mmap_handle: -1,
};
vdpa.set_mem_table(&[region]).unwrap();
assert!(vdpa.get_device_id().unwrap() > 0);
assert_eq!(vdpa.get_status().unwrap(), 0x0);
vdpa.set_status(0x1).unwrap();
assert_eq!(vdpa.get_status().unwrap(), 0x1);
let mut vec = vec![0u8; 8];
vdpa.get_config(0, &mut vec).unwrap();
vdpa.set_config(0, &vec).unwrap();
let eventfd = EventFd::new(0).unwrap();
// set_log_base() and set_log_fd() are not supported by vhost-vdpa
vdpa.set_log_base(
0x4000,
Some(VhostUserDirtyLogRegion {
mmap_size: 0x1000,
mmap_offset: 0x10,
mmap_handle: 1,
}),
)
.unwrap_err();
vdpa.set_log_base(0x4000, None).unwrap_err();
vdpa.set_log_fd(eventfd.as_raw_fd()).unwrap_err();
let max_queues = vdpa.get_vring_num().unwrap();
vdpa.set_vring_num(0, max_queues + 1).unwrap_err();
vdpa.set_vring_num(0, 32).unwrap();
let config = VringConfigData {
queue_max_size: 32,
queue_size: 32,
flags: 0,
desc_table_addr: 0x1000,
used_ring_addr: 0x2000,
avail_ring_addr: 0x3000,
log_addr: None,
};
vdpa.set_vring_addr(0, &config).unwrap();
vdpa.set_vring_base(0, 1).unwrap();
vdpa.set_vring_call(0, &eventfd).unwrap();
vdpa.set_vring_kick(0, &eventfd).unwrap();
vdpa.set_vring_err(0, &eventfd).unwrap();
vdpa.set_config_call(&eventfd).unwrap();
assert_eq!(vdpa.get_vring_base(0).unwrap(), 1);
vdpa.set_vring_enable(0, true).unwrap();
vdpa.set_vring_enable(0, false).unwrap();
}
#[test]
#[serial]
fn test_vdpa_kern_dma() {
let m = GuestMemoryMmap::<()>::from_ranges(&[(GuestAddress(0), 0x10_0000)]).unwrap();
let mut vdpa = unwrap_not_found!(VhostKernVdpa::new(VHOST_VDPA_PATH, &m));
let features = vdpa.get_features().unwrap();
// VIRTIO_F_VERSION_1 (bit 32) should be set
assert_ne!(features & (1 << 32), 0);
vdpa.set_features(features).unwrap();
let backend_features = vdpa.get_backend_features().unwrap();
assert_ne!(backend_features & (1 << VHOST_BACKEND_F_IOTLB_MSG_V2), 0);
vdpa.set_backend_features(backend_features).unwrap();
vdpa.set_owner().unwrap();
vdpa.dma_map(0xFFFF_0000, 0xFFFF, std::ptr::null::<u8>(), false)
.unwrap_err();
unsafe {
let layout = Layout::from_size_align(0xFFFF, 1).unwrap();
let ptr = alloc(layout);
vdpa.dma_map(0xFFFF_0000, 0xFFFF, ptr, false).unwrap();
vdpa.dma_unmap(0xFFFF_0000, 0xFFFF).unwrap();
dealloc(ptr, layout);
};
}
}
| {
&self.mem
} | identifier_body |
lib.rs | use core::fmt;
use std::convert::{TryFrom, TryInto};
use std::ops::Deref;
use std::str::FromStr;
use bulletproofs::r1cs::Prover;
use bulletproofs::{BulletproofGens, PedersenGens};
use bulletproofs_gadgets::fixed_deposit_tree::builder::{FixedDepositTree, FixedDepositTreeBuilder};
use bulletproofs_gadgets::poseidon::builder::{Poseidon, PoseidonBuilder};
use bulletproofs_gadgets::poseidon::{PoseidonSbox, Poseidon_hash_2};
use curve25519_dalek::ristretto::CompressedRistretto;
use curve25519_dalek::scalar::Scalar;
use js_sys::{Array, JsString, Uint8Array};
use merlin::Transcript;
use rand::rngs::OsRng;
use wasm_bindgen::prelude::*;
// When the `wee_alloc` feature is enabled, this uses `wee_alloc` as the global
// allocator.
//
// If you don't want to use `wee_alloc`, you can safely delete this.
#[cfg(feature = "wee_alloc")]
#[global_allocator]
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
#[wasm_bindgen]
extern "C" {
#[wasm_bindgen(typescript_type = "Leaves")]
pub type Leaves;
#[wasm_bindgen(typescript_type = "Commitments")]
pub type Commitments;
}
#[wasm_bindgen(typescript_custom_section)]
const LEAVES: &str = "type Leaves = Array<Uint8Array>;";
#[wasm_bindgen(typescript_custom_section)]
const COMMITMENTS: &str = "type Commitments = Array<Uint8Array>;";
/// Returns a Status Code for the operation.
#[wasm_bindgen]
#[derive(Debug, Eq, PartialEq)]
#[repr(u32)]
pub enum OpStatusCode {
Unknown = 0,
/// Invalid hex string length when decoding
InvalidHexLength = 1,
/// Failed to parse hex string
HexParsingFailed = 2,
/// Invalid number of note parts when decoding
InvalidNoteLength = 3,
/// Invalid note prefix
InvalidNotePrefix = 4,
/// Invalid note version
InvalidNoteVersion = 5,
/// Invalid note id when parsing
InvalidNoteId = 6,
/// Invalid note block number when parsing
InvalidNoteBlockNumber = 7,
/// Invalid note secrets
InvalidNoteSecrets = 8,
/// Unable to find merkle tree
MerkleTreeNotFound = 9,
/// Failed serialization of passed params
/// Error for failing to parse rust type into JsValue
SerializationFailed = 10,
/// Failed deserialization of JsValue into rust type
DeserializationFailed = 11,
/// Invalid Array of 32 bytes.
InvalidArrayLength = 12,
}
impl From<OpStatusCode> for JsValue {
fn from(e: OpStatusCode) -> Self {
JsValue::from(e as u32)
}
}
const BULLETPROOF_GENS_SIZE: usize = 16_400;
const NOTE_PREFIX: &str = "webb.mix";
#[wasm_bindgen]
#[derive(Clone, Copy, PartialEq, Eq)]
pub enum NoteVersion {
V1,
}
#[wasm_bindgen]
#[derive(Clone)]
pub struct Note {
#[wasm_bindgen(skip)]
pub prefix: String,
pub version: NoteVersion,
#[wasm_bindgen(skip)]
pub token_symbol: String,
pub group_id: u32,
pub block_number: Option<u32>,
#[wasm_bindgen(skip)]
pub r: Scalar,
#[wasm_bindgen(skip)]
pub nullifier: Scalar,
}
#[wasm_bindgen]
pub struct ZkProof {
#[wasm_bindgen(skip)]
pub comms: Vec<CompressedRistretto>,
#[wasm_bindgen(skip)]
pub nullifier_hash: Scalar,
#[wasm_bindgen(skip)]
pub proof: Vec<u8>,
#[wasm_bindgen(skip)]
pub leaf_index_comms: Vec<CompressedRistretto>,
#[wasm_bindgen(skip)]
pub proof_comms: Vec<CompressedRistretto>,
}
impl fmt::Display for NoteVersion {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
NoteVersion::V1 => write!(f, "v1"),
}
}
}
impl FromStr for NoteVersion {
type Err = OpStatusCode;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"v1" => Ok(NoteVersion::V1),
_ => Err(OpStatusCode::InvalidNoteVersion),
}
}
}
impl fmt::Display for Note {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let encoded_r = hex::encode(&self.r.to_bytes());
let encoded_nullifier = hex::encode(&self.nullifier.to_bytes());
let mut parts = vec![
self.prefix.clone(),
self.version.to_string(),
self.token_symbol.clone(),
format!("{}", self.group_id),
];
if let Some(bn) = self.block_number {
parts.push(format!("{}", bn));
}
parts.push(format!("{}{}", encoded_r, encoded_nullifier));
let note = parts.join("-");
write!(f, "{}", note)
}
}
impl FromStr for Note {
type Err = OpStatusCode;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let parts: Vec<&str> = s.split('-').collect();
let partial = parts.len() == 5;
let full = parts.len() == 6;
if!partial &&!full {
return Err(OpStatusCode::InvalidNoteLength);
}
if parts[0]!= NOTE_PREFIX {
return Err(OpStatusCode::InvalidNotePrefix);
}
let version: NoteVersion = parts[1].parse()?;
let token_symbol = parts[2].to_owned();
let group_id = parts[3].parse().map_err(|_| OpStatusCode::InvalidNoteId)?;
let (block_number, note_val) = match partial {
true => (None, parts[4]),
false => |
};
if note_val.len()!= 128 {
return Err(OpStatusCode::InvalidNoteSecrets);
}
let r = hex::decode(¬e_val[..64])
.map(|v| v.try_into())
.map(|r| r.map(Scalar::from_bytes_mod_order))
.map_err(|_| OpStatusCode::InvalidHexLength)?
.map_err(|_| OpStatusCode::HexParsingFailed)?;
let nullifier = hex::decode(¬e_val[64..])
.map(|v| v.try_into())
.map(|r| r.map(Scalar::from_bytes_mod_order))
.map_err(|_| OpStatusCode::InvalidHexLength)?
.map_err(|_| OpStatusCode::HexParsingFailed)?;
Ok(Note {
prefix: NOTE_PREFIX.to_owned(),
version,
token_symbol,
group_id,
block_number,
r,
nullifier,
})
}
}
#[wasm_bindgen]
impl Note {
pub fn deserialize(value: JsString) -> Result<Note, JsValue> {
let note: String = value.into();
note.parse().map_err(Into::into)
}
pub fn serialize(&self) -> JsString {
let note = self.to_string();
note.into()
}
#[wasm_bindgen(getter)]
pub fn token_symbol(&self) -> JsString {
self.token_symbol.clone().into()
}
}
#[wasm_bindgen]
impl ZkProof {
#[wasm_bindgen(getter)]
pub fn proof(&self) -> Uint8Array {
Uint8Array::from(self.proof.as_slice())
}
#[wasm_bindgen(getter)]
pub fn comms(&self) -> Commitments {
let list: Array = self
.comms
.clone()
.into_iter()
.map(|v| Uint8Array::from(v.as_bytes().to_vec().as_slice()))
.collect();
let js = JsValue::from(list);
Commitments::from(js)
}
#[wasm_bindgen(getter)]
pub fn leaf_index_comms(&self) -> Commitments {
let list: Array = self
.leaf_index_comms
.clone()
.into_iter()
.map(|v| Uint8Array::from(v.as_bytes().to_vec().as_slice()))
.collect();
let js = JsValue::from(list);
Commitments::from(js)
}
#[wasm_bindgen(getter)]
pub fn proof_comms(&self) -> Commitments {
let list: Array = self
.proof_comms
.clone()
.into_iter()
.map(|v| Uint8Array::from(v.as_bytes().to_vec().as_slice()))
.collect();
let js = JsValue::from(list);
Commitments::from(js)
}
#[wasm_bindgen(getter)]
pub fn nullifier_hash(&self) -> Uint8Array {
ScalarWrapper(self.nullifier_hash).into()
}
}
#[wasm_bindgen]
pub struct PoseidonHasherOptions {
/// The size of the permutation, in field elements.
width: usize,
/// Number of full SBox rounds in beginning
pub full_rounds_beginning: Option<usize>,
/// Number of full SBox rounds in end
pub full_rounds_end: Option<usize>,
/// Number of partial rounds
pub partial_rounds: Option<usize>,
/// The desired (classical) security level, in bits.
pub security_bits: Option<usize>,
/// Bulletproof generators for proving/verifying (serialized)
#[wasm_bindgen(skip)]
pub bp_gens: Option<BulletproofGens>,
}
impl Default for PoseidonHasherOptions {
fn default() -> Self {
Self {
width: 6,
full_rounds_beginning: None,
full_rounds_end: None,
partial_rounds: None,
security_bits: None,
bp_gens: None,
}
}
}
#[wasm_bindgen]
impl PoseidonHasherOptions {
#[wasm_bindgen(constructor)]
pub fn new() -> Self {
Self::default()
}
#[wasm_bindgen(setter)]
pub fn set_bp_gens(&mut self, value: Uint8Array) {
let bp_gens =
bincode::deserialize(&value.to_vec()).unwrap_or_else(|_| BulletproofGens::new(BULLETPROOF_GENS_SIZE, 1));
self.bp_gens = Some(bp_gens);
}
#[wasm_bindgen(getter)]
pub fn bp_gens(&self) -> Uint8Array {
let val = self
.bp_gens
.clone()
.unwrap_or_else(|| BulletproofGens::new(BULLETPROOF_GENS_SIZE, 1));
let serialized = bincode::serialize(&val).unwrap_or_else(|_| Vec::new());
Uint8Array::from(serialized.as_slice())
}
}
#[wasm_bindgen]
#[derive(Clone)]
pub struct PoseidonHasher {
inner: Poseidon,
}
#[wasm_bindgen]
impl PoseidonHasher {
pub fn default() -> Self {
Self::with_options(Default::default())
}
#[wasm_bindgen(constructor)]
pub fn with_options(opts: PoseidonHasherOptions) -> Self {
let pc_gens = PedersenGens::default();
let bp_gens = opts
.bp_gens
.clone()
.unwrap_or_else(|| BulletproofGens::new(BULLETPROOF_GENS_SIZE, 1));
let inner = PoseidonBuilder::new(opts.width)
.sbox(PoseidonSbox::Exponentiation3)
.bulletproof_gens(bp_gens)
.pedersen_gens(pc_gens)
.build();
Self { inner }
}
pub fn hash(&self, left: Uint8Array, right: Uint8Array) -> Result<Uint8Array, JsValue> {
let xl = ScalarWrapper::try_from(left)?;
let xr = ScalarWrapper::try_from(right)?;
let hash = Poseidon_hash_2(*xl, *xr, &self.inner);
Ok(ScalarWrapper(hash).into())
}
}
#[wasm_bindgen]
pub struct NoteGenerator {
hasher: Poseidon,
rng: OsRng,
}
#[wasm_bindgen]
impl NoteGenerator {
#[wasm_bindgen(constructor)]
pub fn new(hasher: &PoseidonHasher) -> Self {
Self {
hasher: hasher.inner.clone(),
rng: OsRng::default(),
}
}
pub fn generate(&mut self, token_symbol: JsString, group_id: u32) -> Note {
let r = Scalar::random(&mut self.rng);
let nullifier = Scalar::random(&mut self.rng);
Note {
prefix: NOTE_PREFIX.to_string(),
version: NoteVersion::V1,
token_symbol: token_symbol.into(),
block_number: None,
group_id,
r,
nullifier,
}
}
pub fn leaf_of(&self, note: &Note) -> Uint8Array {
let leaf = Poseidon_hash_2(note.r, note.nullifier, &self.hasher);
ScalarWrapper(leaf).into()
}
pub fn nullifier_hash_of(&self, note: &Note) -> Uint8Array {
let hash = Poseidon_hash_2(note.nullifier, note.nullifier, &self.hasher);
ScalarWrapper(hash).into()
}
}
#[wasm_bindgen]
pub struct MerkleTree {
inner: FixedDepositTree,
hasher: Poseidon,
}
#[wasm_bindgen]
impl MerkleTree {
#[wasm_bindgen(constructor)]
pub fn new(depth: u8, hasher: &PoseidonHasher) -> Self {
let tree = FixedDepositTreeBuilder::new()
.hash_params(hasher.inner.clone())
.depth(depth as usize)
.build();
Self {
inner: tree,
hasher: hasher.inner.clone(),
}
}
pub fn add_leaf_at_index(&mut self, leaf: Uint8Array, index: u64) -> Result<(), JsValue> {
let idx = Scalar::from(index);
let leaf = ScalarWrapper::try_from(leaf)?;
self.inner.tree.update(idx, *leaf);
Ok(())
}
pub fn add_leaves(&mut self, leaves: Leaves, target_root: Option<Uint8Array>) -> Result<(), JsValue> {
let xs = Array::from(&leaves)
.to_vec()
.into_iter()
.map(|v| Uint8Array::new_with_byte_offset_and_length(&v, 0, 32))
.map(ScalarWrapper::try_from)
.collect::<Result<Vec<_>, _>>()?
.into_iter()
.map(|v| v.to_bytes())
.collect();
let root = target_root
.map(ScalarWrapper::try_from)
.transpose()?
.map(|v| v.to_bytes());
self.inner.tree.add_leaves(xs, root);
Ok(())
}
pub fn root(&self) -> Uint8Array {
let root = self.inner.tree.root;
Uint8Array::from(root.to_bytes().to_vec().as_slice())
}
pub fn create_zk_proof(
&mut self,
root: Uint8Array,
recipient: Uint8Array,
relayer: Uint8Array,
note: &Note,
) -> Result<ZkProof, JsValue> {
let leaf = Poseidon_hash_2(note.r, note.nullifier, &self.hasher);
let root = ScalarWrapper::try_from(root)?;
let recipient = ScalarWrapper::try_from(recipient)?;
let relayer = ScalarWrapper::try_from(relayer)?;
// add the current leaf we need to prove to the secrets.
let nullifier_hash = Poseidon_hash_2(note.nullifier, note.nullifier, &self.hasher);
self.inner.add_secrets(leaf, note.r, note.nullifier, nullifier_hash);
let pc_gens = PedersenGens::default();
let bp_gens = self.hasher.bp_gens.clone();
let mut prover_transcript = Transcript::new(b"zk_membership_proof");
let prover = Prover::new(&pc_gens, &mut prover_transcript);
let (proof, (comms, nullifier_hash, leaf_index_comms, proof_comms)) =
self.inner.prove_zk(*root, leaf, *recipient, *relayer, &bp_gens, prover);
let zkproof = ZkProof {
proof: proof.to_bytes(),
comms,
leaf_index_comms,
proof_comms,
nullifier_hash,
};
Ok(zkproof)
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
struct ScalarWrapper(Scalar);
impl Deref for ScalarWrapper {
type Target = Scalar;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl TryFrom<Uint8Array> for ScalarWrapper {
type Error = OpStatusCode;
fn try_from(value: Uint8Array) -> Result<Self, Self::Error> {
let bytes: [u8; 32] = value
.to_vec()
.try_into()
.map_err(|_| OpStatusCode::InvalidArrayLength)?;
Ok(Self(Scalar::from_bytes_mod_order(bytes)))
}
}
#[allow(clippy::from_over_into)]
impl Into<Uint8Array> for ScalarWrapper {
fn into(self) -> Uint8Array {
Uint8Array::from(self.0.to_bytes().to_vec().as_slice())
}
}
#[wasm_bindgen(start)]
pub fn wasm_init() -> Result<(), JsValue> {
console_error_panic_hook::set_once();
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use lazy_static::lazy_static;
use rand::rngs::OsRng;
use wasm_bindgen_test::*;
wasm_bindgen_test_configure!(run_in_browser);
lazy_static! {
static ref HASHER: PoseidonHasher = PoseidonHasher::default();
}
#[wasm_bindgen_test]
fn init_hasher() {
let mut rng = OsRng::default();
let a = Scalar::random(&mut rng);
let b = Scalar::random(&mut rng);
let hash = HASHER.hash(
Uint8Array::from(a.to_bytes().to_vec().as_slice()),
Uint8Array::from(b.to_bytes().to_vec().as_slice()),
);
assert!(hash.is_ok());
let x = Uint8Array::from(Vec::new().as_slice());
let y = Uint8Array::from(Vec::new().as_slice());
let hash = HASHER.hash(x, y);
assert_eq!(hash.err(), Some(OpStatusCode::InvalidArrayLength.into()));
}
#[wasm_bindgen_test]
fn generate_note() {
let mut ng = NoteGenerator::new(&HASHER);
let note = ng.generate(JsString::from("EDG"), 0);
assert_eq!(note.group_id, 0);
assert_eq!(note.token_symbol, "EDG");
}
#[wasm_bindgen_test]
fn zk_proof() {
let mut rng = OsRng::default();
let mut ng = NoteGenerator::new(&HASHER);
let note = ng.generate(JsString::from("EDG"), 0);
let my_leaf = ScalarWrapper::try_from(ng.leaf_of(¬e)).unwrap();
let mut mt = MerkleTree::new(32, &HASHER);
let mut leaves: Vec<_> = vec![Scalar::random(&mut rng); 7].iter().map(Scalar::to_bytes).collect();
leaves[3] = my_leaf.to_bytes();
mt.inner.tree.add_leaves(leaves, None);
let recipient = ScalarWrapper(Scalar::zero());
let relayer = ScalarWrapper(Scalar::zero());
let zk_proof = mt.create_zk_proof(mt.root(), recipient.into(), relayer.into(), ¬e);
assert!(zk_proof.is_ok());
}
}
| {
let bn = parts[4].parse().map_err(|_| OpStatusCode::InvalidNoteBlockNumber)?;
(Some(bn), parts[5])
} | conditional_block |
lib.rs | use core::fmt;
use std::convert::{TryFrom, TryInto};
use std::ops::Deref;
use std::str::FromStr;
use bulletproofs::r1cs::Prover;
use bulletproofs::{BulletproofGens, PedersenGens};
use bulletproofs_gadgets::fixed_deposit_tree::builder::{FixedDepositTree, FixedDepositTreeBuilder};
use bulletproofs_gadgets::poseidon::builder::{Poseidon, PoseidonBuilder};
use bulletproofs_gadgets::poseidon::{PoseidonSbox, Poseidon_hash_2};
use curve25519_dalek::ristretto::CompressedRistretto;
use curve25519_dalek::scalar::Scalar;
use js_sys::{Array, JsString, Uint8Array};
use merlin::Transcript;
use rand::rngs::OsRng;
use wasm_bindgen::prelude::*;
// When the `wee_alloc` feature is enabled, this uses `wee_alloc` as the global
// allocator.
//
// If you don't want to use `wee_alloc`, you can safely delete this.
#[cfg(feature = "wee_alloc")]
#[global_allocator]
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
#[wasm_bindgen]
extern "C" {
#[wasm_bindgen(typescript_type = "Leaves")]
pub type Leaves;
#[wasm_bindgen(typescript_type = "Commitments")]
pub type Commitments;
}
#[wasm_bindgen(typescript_custom_section)]
const LEAVES: &str = "type Leaves = Array<Uint8Array>;";
#[wasm_bindgen(typescript_custom_section)]
const COMMITMENTS: &str = "type Commitments = Array<Uint8Array>;";
/// Returns a Status Code for the operation.
#[wasm_bindgen]
#[derive(Debug, Eq, PartialEq)]
#[repr(u32)]
pub enum OpStatusCode {
Unknown = 0,
/// Invalid hex string length when decoding
InvalidHexLength = 1,
/// Failed to parse hex string
HexParsingFailed = 2,
/// Invalid number of note parts when decoding
InvalidNoteLength = 3,
/// Invalid note prefix
InvalidNotePrefix = 4,
/// Invalid note version
InvalidNoteVersion = 5,
/// Invalid note id when parsing
InvalidNoteId = 6,
/// Invalid note block number when parsing
InvalidNoteBlockNumber = 7,
/// Invalid note secrets
InvalidNoteSecrets = 8,
/// Unable to find merkle tree
MerkleTreeNotFound = 9,
/// Failed serialization of passed params
/// Error for failing to parse rust type into JsValue
SerializationFailed = 10,
/// Failed deserialization of JsValue into rust type
DeserializationFailed = 11,
/// Invalid Array of 32 bytes.
InvalidArrayLength = 12,
}
impl From<OpStatusCode> for JsValue {
fn from(e: OpStatusCode) -> Self {
JsValue::from(e as u32)
}
}
const BULLETPROOF_GENS_SIZE: usize = 16_400;
const NOTE_PREFIX: &str = "webb.mix";
#[wasm_bindgen]
#[derive(Clone, Copy, PartialEq, Eq)]
pub enum NoteVersion {
V1,
}
#[wasm_bindgen]
#[derive(Clone)]
pub struct Note {
#[wasm_bindgen(skip)]
pub prefix: String,
pub version: NoteVersion,
#[wasm_bindgen(skip)]
pub token_symbol: String,
pub group_id: u32,
pub block_number: Option<u32>,
#[wasm_bindgen(skip)]
pub r: Scalar,
#[wasm_bindgen(skip)]
pub nullifier: Scalar,
}
#[wasm_bindgen]
pub struct ZkProof {
#[wasm_bindgen(skip)]
pub comms: Vec<CompressedRistretto>,
#[wasm_bindgen(skip)]
pub nullifier_hash: Scalar,
#[wasm_bindgen(skip)]
pub proof: Vec<u8>,
#[wasm_bindgen(skip)]
pub leaf_index_comms: Vec<CompressedRistretto>,
#[wasm_bindgen(skip)]
pub proof_comms: Vec<CompressedRistretto>,
}
impl fmt::Display for NoteVersion {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
NoteVersion::V1 => write!(f, "v1"),
}
}
}
impl FromStr for NoteVersion {
type Err = OpStatusCode;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"v1" => Ok(NoteVersion::V1),
_ => Err(OpStatusCode::InvalidNoteVersion),
}
}
}
impl fmt::Display for Note {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let encoded_r = hex::encode(&self.r.to_bytes());
let encoded_nullifier = hex::encode(&self.nullifier.to_bytes());
let mut parts = vec![
self.prefix.clone(),
self.version.to_string(),
self.token_symbol.clone(),
format!("{}", self.group_id),
];
if let Some(bn) = self.block_number {
parts.push(format!("{}", bn));
}
parts.push(format!("{}{}", encoded_r, encoded_nullifier));
let note = parts.join("-");
write!(f, "{}", note)
}
}
impl FromStr for Note {
type Err = OpStatusCode;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let parts: Vec<&str> = s.split('-').collect();
let partial = parts.len() == 5;
let full = parts.len() == 6;
if!partial &&!full {
return Err(OpStatusCode::InvalidNoteLength);
}
if parts[0]!= NOTE_PREFIX {
return Err(OpStatusCode::InvalidNotePrefix);
}
let version: NoteVersion = parts[1].parse()?;
let token_symbol = parts[2].to_owned();
let group_id = parts[3].parse().map_err(|_| OpStatusCode::InvalidNoteId)?;
let (block_number, note_val) = match partial {
true => (None, parts[4]),
false => {
let bn = parts[4].parse().map_err(|_| OpStatusCode::InvalidNoteBlockNumber)?;
(Some(bn), parts[5])
}
};
if note_val.len()!= 128 {
return Err(OpStatusCode::InvalidNoteSecrets);
}
let r = hex::decode(¬e_val[..64])
.map(|v| v.try_into())
.map(|r| r.map(Scalar::from_bytes_mod_order))
.map_err(|_| OpStatusCode::InvalidHexLength)?
.map_err(|_| OpStatusCode::HexParsingFailed)?;
let nullifier = hex::decode(¬e_val[64..])
.map(|v| v.try_into())
.map(|r| r.map(Scalar::from_bytes_mod_order))
.map_err(|_| OpStatusCode::InvalidHexLength)?
.map_err(|_| OpStatusCode::HexParsingFailed)?;
Ok(Note {
prefix: NOTE_PREFIX.to_owned(),
version,
token_symbol,
group_id,
block_number,
r,
nullifier,
})
}
}
#[wasm_bindgen]
impl Note {
pub fn deserialize(value: JsString) -> Result<Note, JsValue> {
let note: String = value.into();
note.parse().map_err(Into::into)
}
pub fn serialize(&self) -> JsString {
let note = self.to_string();
note.into()
}
#[wasm_bindgen(getter)]
pub fn token_symbol(&self) -> JsString {
self.token_symbol.clone().into()
}
}
#[wasm_bindgen]
impl ZkProof {
#[wasm_bindgen(getter)]
pub fn proof(&self) -> Uint8Array {
Uint8Array::from(self.proof.as_slice())
}
#[wasm_bindgen(getter)]
pub fn comms(&self) -> Commitments {
let list: Array = self
.comms
.clone()
.into_iter()
.map(|v| Uint8Array::from(v.as_bytes().to_vec().as_slice()))
.collect();
let js = JsValue::from(list);
Commitments::from(js)
}
#[wasm_bindgen(getter)]
pub fn leaf_index_comms(&self) -> Commitments {
let list: Array = self
.leaf_index_comms
.clone()
.into_iter()
.map(|v| Uint8Array::from(v.as_bytes().to_vec().as_slice()))
.collect();
let js = JsValue::from(list);
Commitments::from(js)
}
#[wasm_bindgen(getter)]
pub fn proof_comms(&self) -> Commitments {
let list: Array = self
.proof_comms
.clone()
.into_iter()
.map(|v| Uint8Array::from(v.as_bytes().to_vec().as_slice()))
.collect();
let js = JsValue::from(list);
Commitments::from(js)
}
#[wasm_bindgen(getter)]
pub fn nullifier_hash(&self) -> Uint8Array {
ScalarWrapper(self.nullifier_hash).into()
}
}
#[wasm_bindgen]
pub struct PoseidonHasherOptions {
/// The size of the permutation, in field elements.
width: usize,
/// Number of full SBox rounds in beginning
pub full_rounds_beginning: Option<usize>,
/// Number of full SBox rounds in end
pub full_rounds_end: Option<usize>,
/// Number of partial rounds
pub partial_rounds: Option<usize>,
/// The desired (classical) security level, in bits.
pub security_bits: Option<usize>,
/// Bulletproof generators for proving/verifying (serialized)
#[wasm_bindgen(skip)]
pub bp_gens: Option<BulletproofGens>,
}
impl Default for PoseidonHasherOptions {
fn default() -> Self {
Self {
width: 6,
full_rounds_beginning: None,
full_rounds_end: None,
partial_rounds: None,
security_bits: None,
bp_gens: None,
}
}
}
#[wasm_bindgen]
impl PoseidonHasherOptions {
#[wasm_bindgen(constructor)]
pub fn new() -> Self {
Self::default()
}
#[wasm_bindgen(setter)]
pub fn set_bp_gens(&mut self, value: Uint8Array) {
let bp_gens =
bincode::deserialize(&value.to_vec()).unwrap_or_else(|_| BulletproofGens::new(BULLETPROOF_GENS_SIZE, 1));
self.bp_gens = Some(bp_gens);
}
#[wasm_bindgen(getter)]
pub fn bp_gens(&self) -> Uint8Array {
let val = self
.bp_gens
.clone()
.unwrap_or_else(|| BulletproofGens::new(BULLETPROOF_GENS_SIZE, 1));
let serialized = bincode::serialize(&val).unwrap_or_else(|_| Vec::new());
Uint8Array::from(serialized.as_slice())
}
}
#[wasm_bindgen]
#[derive(Clone)]
pub struct PoseidonHasher {
inner: Poseidon,
}
#[wasm_bindgen]
impl PoseidonHasher {
pub fn default() -> Self {
Self::with_options(Default::default())
}
#[wasm_bindgen(constructor)]
pub fn | (opts: PoseidonHasherOptions) -> Self {
let pc_gens = PedersenGens::default();
let bp_gens = opts
.bp_gens
.clone()
.unwrap_or_else(|| BulletproofGens::new(BULLETPROOF_GENS_SIZE, 1));
let inner = PoseidonBuilder::new(opts.width)
.sbox(PoseidonSbox::Exponentiation3)
.bulletproof_gens(bp_gens)
.pedersen_gens(pc_gens)
.build();
Self { inner }
}
pub fn hash(&self, left: Uint8Array, right: Uint8Array) -> Result<Uint8Array, JsValue> {
let xl = ScalarWrapper::try_from(left)?;
let xr = ScalarWrapper::try_from(right)?;
let hash = Poseidon_hash_2(*xl, *xr, &self.inner);
Ok(ScalarWrapper(hash).into())
}
}
#[wasm_bindgen]
pub struct NoteGenerator {
hasher: Poseidon,
rng: OsRng,
}
#[wasm_bindgen]
impl NoteGenerator {
#[wasm_bindgen(constructor)]
pub fn new(hasher: &PoseidonHasher) -> Self {
Self {
hasher: hasher.inner.clone(),
rng: OsRng::default(),
}
}
pub fn generate(&mut self, token_symbol: JsString, group_id: u32) -> Note {
let r = Scalar::random(&mut self.rng);
let nullifier = Scalar::random(&mut self.rng);
Note {
prefix: NOTE_PREFIX.to_string(),
version: NoteVersion::V1,
token_symbol: token_symbol.into(),
block_number: None,
group_id,
r,
nullifier,
}
}
pub fn leaf_of(&self, note: &Note) -> Uint8Array {
let leaf = Poseidon_hash_2(note.r, note.nullifier, &self.hasher);
ScalarWrapper(leaf).into()
}
pub fn nullifier_hash_of(&self, note: &Note) -> Uint8Array {
let hash = Poseidon_hash_2(note.nullifier, note.nullifier, &self.hasher);
ScalarWrapper(hash).into()
}
}
#[wasm_bindgen]
pub struct MerkleTree {
inner: FixedDepositTree,
hasher: Poseidon,
}
#[wasm_bindgen]
impl MerkleTree {
#[wasm_bindgen(constructor)]
pub fn new(depth: u8, hasher: &PoseidonHasher) -> Self {
let tree = FixedDepositTreeBuilder::new()
.hash_params(hasher.inner.clone())
.depth(depth as usize)
.build();
Self {
inner: tree,
hasher: hasher.inner.clone(),
}
}
pub fn add_leaf_at_index(&mut self, leaf: Uint8Array, index: u64) -> Result<(), JsValue> {
let idx = Scalar::from(index);
let leaf = ScalarWrapper::try_from(leaf)?;
self.inner.tree.update(idx, *leaf);
Ok(())
}
pub fn add_leaves(&mut self, leaves: Leaves, target_root: Option<Uint8Array>) -> Result<(), JsValue> {
let xs = Array::from(&leaves)
.to_vec()
.into_iter()
.map(|v| Uint8Array::new_with_byte_offset_and_length(&v, 0, 32))
.map(ScalarWrapper::try_from)
.collect::<Result<Vec<_>, _>>()?
.into_iter()
.map(|v| v.to_bytes())
.collect();
let root = target_root
.map(ScalarWrapper::try_from)
.transpose()?
.map(|v| v.to_bytes());
self.inner.tree.add_leaves(xs, root);
Ok(())
}
pub fn root(&self) -> Uint8Array {
let root = self.inner.tree.root;
Uint8Array::from(root.to_bytes().to_vec().as_slice())
}
pub fn create_zk_proof(
&mut self,
root: Uint8Array,
recipient: Uint8Array,
relayer: Uint8Array,
note: &Note,
) -> Result<ZkProof, JsValue> {
let leaf = Poseidon_hash_2(note.r, note.nullifier, &self.hasher);
let root = ScalarWrapper::try_from(root)?;
let recipient = ScalarWrapper::try_from(recipient)?;
let relayer = ScalarWrapper::try_from(relayer)?;
// add the current leaf we need to prove to the secrets.
let nullifier_hash = Poseidon_hash_2(note.nullifier, note.nullifier, &self.hasher);
self.inner.add_secrets(leaf, note.r, note.nullifier, nullifier_hash);
let pc_gens = PedersenGens::default();
let bp_gens = self.hasher.bp_gens.clone();
let mut prover_transcript = Transcript::new(b"zk_membership_proof");
let prover = Prover::new(&pc_gens, &mut prover_transcript);
let (proof, (comms, nullifier_hash, leaf_index_comms, proof_comms)) =
self.inner.prove_zk(*root, leaf, *recipient, *relayer, &bp_gens, prover);
let zkproof = ZkProof {
proof: proof.to_bytes(),
comms,
leaf_index_comms,
proof_comms,
nullifier_hash,
};
Ok(zkproof)
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
struct ScalarWrapper(Scalar);
impl Deref for ScalarWrapper {
type Target = Scalar;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl TryFrom<Uint8Array> for ScalarWrapper {
type Error = OpStatusCode;
fn try_from(value: Uint8Array) -> Result<Self, Self::Error> {
let bytes: [u8; 32] = value
.to_vec()
.try_into()
.map_err(|_| OpStatusCode::InvalidArrayLength)?;
Ok(Self(Scalar::from_bytes_mod_order(bytes)))
}
}
#[allow(clippy::from_over_into)]
impl Into<Uint8Array> for ScalarWrapper {
fn into(self) -> Uint8Array {
Uint8Array::from(self.0.to_bytes().to_vec().as_slice())
}
}
#[wasm_bindgen(start)]
pub fn wasm_init() -> Result<(), JsValue> {
console_error_panic_hook::set_once();
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use lazy_static::lazy_static;
use rand::rngs::OsRng;
use wasm_bindgen_test::*;
wasm_bindgen_test_configure!(run_in_browser);
lazy_static! {
static ref HASHER: PoseidonHasher = PoseidonHasher::default();
}
#[wasm_bindgen_test]
fn init_hasher() {
let mut rng = OsRng::default();
let a = Scalar::random(&mut rng);
let b = Scalar::random(&mut rng);
let hash = HASHER.hash(
Uint8Array::from(a.to_bytes().to_vec().as_slice()),
Uint8Array::from(b.to_bytes().to_vec().as_slice()),
);
assert!(hash.is_ok());
let x = Uint8Array::from(Vec::new().as_slice());
let y = Uint8Array::from(Vec::new().as_slice());
let hash = HASHER.hash(x, y);
assert_eq!(hash.err(), Some(OpStatusCode::InvalidArrayLength.into()));
}
#[wasm_bindgen_test]
fn generate_note() {
let mut ng = NoteGenerator::new(&HASHER);
let note = ng.generate(JsString::from("EDG"), 0);
assert_eq!(note.group_id, 0);
assert_eq!(note.token_symbol, "EDG");
}
#[wasm_bindgen_test]
fn zk_proof() {
let mut rng = OsRng::default();
let mut ng = NoteGenerator::new(&HASHER);
let note = ng.generate(JsString::from("EDG"), 0);
let my_leaf = ScalarWrapper::try_from(ng.leaf_of(¬e)).unwrap();
let mut mt = MerkleTree::new(32, &HASHER);
let mut leaves: Vec<_> = vec![Scalar::random(&mut rng); 7].iter().map(Scalar::to_bytes).collect();
leaves[3] = my_leaf.to_bytes();
mt.inner.tree.add_leaves(leaves, None);
let recipient = ScalarWrapper(Scalar::zero());
let relayer = ScalarWrapper(Scalar::zero());
let zk_proof = mt.create_zk_proof(mt.root(), recipient.into(), relayer.into(), ¬e);
assert!(zk_proof.is_ok());
}
}
| with_options | identifier_name |
lib.rs | use core::fmt;
use std::convert::{TryFrom, TryInto};
use std::ops::Deref;
use std::str::FromStr;
use bulletproofs::r1cs::Prover;
use bulletproofs::{BulletproofGens, PedersenGens};
use bulletproofs_gadgets::fixed_deposit_tree::builder::{FixedDepositTree, FixedDepositTreeBuilder};
use bulletproofs_gadgets::poseidon::builder::{Poseidon, PoseidonBuilder};
use bulletproofs_gadgets::poseidon::{PoseidonSbox, Poseidon_hash_2};
use curve25519_dalek::ristretto::CompressedRistretto;
use curve25519_dalek::scalar::Scalar;
use js_sys::{Array, JsString, Uint8Array};
use merlin::Transcript;
use rand::rngs::OsRng;
use wasm_bindgen::prelude::*;
// When the `wee_alloc` feature is enabled, this uses `wee_alloc` as the global
// allocator.
//
// If you don't want to use `wee_alloc`, you can safely delete this.
#[cfg(feature = "wee_alloc")]
#[global_allocator]
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
#[wasm_bindgen]
extern "C" {
#[wasm_bindgen(typescript_type = "Leaves")]
pub type Leaves;
#[wasm_bindgen(typescript_type = "Commitments")]
pub type Commitments;
}
#[wasm_bindgen(typescript_custom_section)]
const LEAVES: &str = "type Leaves = Array<Uint8Array>;";
#[wasm_bindgen(typescript_custom_section)]
const COMMITMENTS: &str = "type Commitments = Array<Uint8Array>;";
/// Returns a Status Code for the operation.
#[wasm_bindgen]
#[derive(Debug, Eq, PartialEq)]
#[repr(u32)]
pub enum OpStatusCode {
Unknown = 0,
/// Invalid hex string length when decoding
InvalidHexLength = 1,
/// Failed to parse hex string
HexParsingFailed = 2,
/// Invalid number of note parts when decoding
InvalidNoteLength = 3,
/// Invalid note prefix
InvalidNotePrefix = 4,
/// Invalid note version
InvalidNoteVersion = 5,
/// Invalid note id when parsing
InvalidNoteId = 6,
/// Invalid note block number when parsing
InvalidNoteBlockNumber = 7,
/// Invalid note secrets
InvalidNoteSecrets = 8,
/// Unable to find merkle tree
MerkleTreeNotFound = 9,
/// Failed serialization of passed params
/// Error for failing to parse rust type into JsValue
SerializationFailed = 10,
/// Failed deserialization of JsValue into rust type
DeserializationFailed = 11,
/// Invalid Array of 32 bytes.
InvalidArrayLength = 12,
}
impl From<OpStatusCode> for JsValue {
fn from(e: OpStatusCode) -> Self {
JsValue::from(e as u32)
}
}
const BULLETPROOF_GENS_SIZE: usize = 16_400;
const NOTE_PREFIX: &str = "webb.mix";
#[wasm_bindgen]
#[derive(Clone, Copy, PartialEq, Eq)]
pub enum NoteVersion {
V1,
}
#[wasm_bindgen]
#[derive(Clone)]
pub struct Note {
#[wasm_bindgen(skip)]
pub prefix: String,
pub version: NoteVersion,
#[wasm_bindgen(skip)]
pub token_symbol: String,
pub group_id: u32,
pub block_number: Option<u32>,
#[wasm_bindgen(skip)]
pub r: Scalar,
#[wasm_bindgen(skip)]
pub nullifier: Scalar,
}
#[wasm_bindgen]
pub struct ZkProof {
#[wasm_bindgen(skip)]
pub comms: Vec<CompressedRistretto>,
#[wasm_bindgen(skip)]
pub nullifier_hash: Scalar,
#[wasm_bindgen(skip)]
pub proof: Vec<u8>,
#[wasm_bindgen(skip)]
pub leaf_index_comms: Vec<CompressedRistretto>,
#[wasm_bindgen(skip)]
pub proof_comms: Vec<CompressedRistretto>,
}
impl fmt::Display for NoteVersion {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
NoteVersion::V1 => write!(f, "v1"),
}
}
}
impl FromStr for NoteVersion {
type Err = OpStatusCode;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"v1" => Ok(NoteVersion::V1),
_ => Err(OpStatusCode::InvalidNoteVersion),
}
}
}
impl fmt::Display for Note {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let encoded_r = hex::encode(&self.r.to_bytes());
let encoded_nullifier = hex::encode(&self.nullifier.to_bytes());
let mut parts = vec![
self.prefix.clone(),
self.version.to_string(),
self.token_symbol.clone(),
format!("{}", self.group_id),
];
if let Some(bn) = self.block_number {
parts.push(format!("{}", bn));
}
parts.push(format!("{}{}", encoded_r, encoded_nullifier));
let note = parts.join("-");
write!(f, "{}", note)
}
}
impl FromStr for Note {
type Err = OpStatusCode;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let parts: Vec<&str> = s.split('-').collect();
let partial = parts.len() == 5;
let full = parts.len() == 6;
if!partial &&!full {
return Err(OpStatusCode::InvalidNoteLength);
}
if parts[0]!= NOTE_PREFIX {
return Err(OpStatusCode::InvalidNotePrefix);
}
let version: NoteVersion = parts[1].parse()?;
let token_symbol = parts[2].to_owned();
let group_id = parts[3].parse().map_err(|_| OpStatusCode::InvalidNoteId)?;
let (block_number, note_val) = match partial {
true => (None, parts[4]),
false => {
let bn = parts[4].parse().map_err(|_| OpStatusCode::InvalidNoteBlockNumber)?;
(Some(bn), parts[5])
}
};
if note_val.len()!= 128 {
return Err(OpStatusCode::InvalidNoteSecrets);
}
let r = hex::decode(¬e_val[..64])
.map(|v| v.try_into())
.map(|r| r.map(Scalar::from_bytes_mod_order))
.map_err(|_| OpStatusCode::InvalidHexLength)?
.map_err(|_| OpStatusCode::HexParsingFailed)?;
let nullifier = hex::decode(¬e_val[64..])
.map(|v| v.try_into())
.map(|r| r.map(Scalar::from_bytes_mod_order))
.map_err(|_| OpStatusCode::InvalidHexLength)?
.map_err(|_| OpStatusCode::HexParsingFailed)?;
Ok(Note {
prefix: NOTE_PREFIX.to_owned(),
version,
token_symbol,
group_id,
block_number,
r,
nullifier,
})
}
}
#[wasm_bindgen]
impl Note {
pub fn deserialize(value: JsString) -> Result<Note, JsValue> {
let note: String = value.into();
note.parse().map_err(Into::into)
}
pub fn serialize(&self) -> JsString {
let note = self.to_string();
note.into()
}
#[wasm_bindgen(getter)]
pub fn token_symbol(&self) -> JsString {
self.token_symbol.clone().into()
}
}
#[wasm_bindgen]
impl ZkProof {
#[wasm_bindgen(getter)]
pub fn proof(&self) -> Uint8Array {
Uint8Array::from(self.proof.as_slice())
}
#[wasm_bindgen(getter)]
pub fn comms(&self) -> Commitments {
let list: Array = self
.comms
.clone()
.into_iter()
.map(|v| Uint8Array::from(v.as_bytes().to_vec().as_slice()))
.collect();
let js = JsValue::from(list);
Commitments::from(js)
}
#[wasm_bindgen(getter)]
pub fn leaf_index_comms(&self) -> Commitments {
let list: Array = self
.leaf_index_comms
.clone()
.into_iter()
.map(|v| Uint8Array::from(v.as_bytes().to_vec().as_slice()))
.collect();
let js = JsValue::from(list);
Commitments::from(js)
}
#[wasm_bindgen(getter)]
pub fn proof_comms(&self) -> Commitments {
let list: Array = self
.proof_comms
.clone()
.into_iter()
.map(|v| Uint8Array::from(v.as_bytes().to_vec().as_slice()))
.collect();
let js = JsValue::from(list);
Commitments::from(js)
}
#[wasm_bindgen(getter)]
pub fn nullifier_hash(&self) -> Uint8Array {
ScalarWrapper(self.nullifier_hash).into()
}
}
#[wasm_bindgen]
pub struct PoseidonHasherOptions {
/// The size of the permutation, in field elements.
width: usize,
/// Number of full SBox rounds in beginning
pub full_rounds_beginning: Option<usize>,
/// Number of full SBox rounds in end
pub full_rounds_end: Option<usize>,
/// Number of partial rounds
pub partial_rounds: Option<usize>,
/// The desired (classical) security level, in bits.
pub security_bits: Option<usize>,
/// Bulletproof generators for proving/verifying (serialized)
#[wasm_bindgen(skip)]
pub bp_gens: Option<BulletproofGens>,
}
impl Default for PoseidonHasherOptions {
fn default() -> Self {
Self {
width: 6,
full_rounds_beginning: None,
full_rounds_end: None,
partial_rounds: None,
security_bits: None,
bp_gens: None,
}
}
}
#[wasm_bindgen]
impl PoseidonHasherOptions {
#[wasm_bindgen(constructor)]
pub fn new() -> Self {
Self::default()
}
#[wasm_bindgen(setter)]
pub fn set_bp_gens(&mut self, value: Uint8Array) {
let bp_gens =
bincode::deserialize(&value.to_vec()).unwrap_or_else(|_| BulletproofGens::new(BULLETPROOF_GENS_SIZE, 1));
self.bp_gens = Some(bp_gens);
}
#[wasm_bindgen(getter)]
pub fn bp_gens(&self) -> Uint8Array {
let val = self
.bp_gens
.clone()
.unwrap_or_else(|| BulletproofGens::new(BULLETPROOF_GENS_SIZE, 1));
let serialized = bincode::serialize(&val).unwrap_or_else(|_| Vec::new());
Uint8Array::from(serialized.as_slice())
}
}
#[wasm_bindgen]
#[derive(Clone)]
pub struct PoseidonHasher {
inner: Poseidon,
}
#[wasm_bindgen]
impl PoseidonHasher {
pub fn default() -> Self {
Self::with_options(Default::default())
}
#[wasm_bindgen(constructor)]
pub fn with_options(opts: PoseidonHasherOptions) -> Self |
pub fn hash(&self, left: Uint8Array, right: Uint8Array) -> Result<Uint8Array, JsValue> {
let xl = ScalarWrapper::try_from(left)?;
let xr = ScalarWrapper::try_from(right)?;
let hash = Poseidon_hash_2(*xl, *xr, &self.inner);
Ok(ScalarWrapper(hash).into())
}
}
#[wasm_bindgen]
pub struct NoteGenerator {
hasher: Poseidon,
rng: OsRng,
}
#[wasm_bindgen]
impl NoteGenerator {
#[wasm_bindgen(constructor)]
pub fn new(hasher: &PoseidonHasher) -> Self {
Self {
hasher: hasher.inner.clone(),
rng: OsRng::default(),
}
}
pub fn generate(&mut self, token_symbol: JsString, group_id: u32) -> Note {
let r = Scalar::random(&mut self.rng);
let nullifier = Scalar::random(&mut self.rng);
Note {
prefix: NOTE_PREFIX.to_string(),
version: NoteVersion::V1,
token_symbol: token_symbol.into(),
block_number: None,
group_id,
r,
nullifier,
}
}
pub fn leaf_of(&self, note: &Note) -> Uint8Array {
let leaf = Poseidon_hash_2(note.r, note.nullifier, &self.hasher);
ScalarWrapper(leaf).into()
}
pub fn nullifier_hash_of(&self, note: &Note) -> Uint8Array {
let hash = Poseidon_hash_2(note.nullifier, note.nullifier, &self.hasher);
ScalarWrapper(hash).into()
}
}
#[wasm_bindgen]
pub struct MerkleTree {
inner: FixedDepositTree,
hasher: Poseidon,
}
#[wasm_bindgen]
impl MerkleTree {
#[wasm_bindgen(constructor)]
pub fn new(depth: u8, hasher: &PoseidonHasher) -> Self {
let tree = FixedDepositTreeBuilder::new()
.hash_params(hasher.inner.clone())
.depth(depth as usize)
.build();
Self {
inner: tree,
hasher: hasher.inner.clone(),
}
}
pub fn add_leaf_at_index(&mut self, leaf: Uint8Array, index: u64) -> Result<(), JsValue> {
let idx = Scalar::from(index);
let leaf = ScalarWrapper::try_from(leaf)?;
self.inner.tree.update(idx, *leaf);
Ok(())
}
pub fn add_leaves(&mut self, leaves: Leaves, target_root: Option<Uint8Array>) -> Result<(), JsValue> {
let xs = Array::from(&leaves)
.to_vec()
.into_iter()
.map(|v| Uint8Array::new_with_byte_offset_and_length(&v, 0, 32))
.map(ScalarWrapper::try_from)
.collect::<Result<Vec<_>, _>>()?
.into_iter()
.map(|v| v.to_bytes())
.collect();
let root = target_root
.map(ScalarWrapper::try_from)
.transpose()?
.map(|v| v.to_bytes());
self.inner.tree.add_leaves(xs, root);
Ok(())
}
pub fn root(&self) -> Uint8Array {
let root = self.inner.tree.root;
Uint8Array::from(root.to_bytes().to_vec().as_slice())
}
pub fn create_zk_proof(
&mut self,
root: Uint8Array,
recipient: Uint8Array,
relayer: Uint8Array,
note: &Note,
) -> Result<ZkProof, JsValue> {
let leaf = Poseidon_hash_2(note.r, note.nullifier, &self.hasher);
let root = ScalarWrapper::try_from(root)?;
let recipient = ScalarWrapper::try_from(recipient)?;
let relayer = ScalarWrapper::try_from(relayer)?;
// add the current leaf we need to prove to the secrets.
let nullifier_hash = Poseidon_hash_2(note.nullifier, note.nullifier, &self.hasher);
self.inner.add_secrets(leaf, note.r, note.nullifier, nullifier_hash);
let pc_gens = PedersenGens::default();
let bp_gens = self.hasher.bp_gens.clone();
let mut prover_transcript = Transcript::new(b"zk_membership_proof");
let prover = Prover::new(&pc_gens, &mut prover_transcript);
let (proof, (comms, nullifier_hash, leaf_index_comms, proof_comms)) =
self.inner.prove_zk(*root, leaf, *recipient, *relayer, &bp_gens, prover);
let zkproof = ZkProof {
proof: proof.to_bytes(),
comms,
leaf_index_comms,
proof_comms,
nullifier_hash,
};
Ok(zkproof)
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
struct ScalarWrapper(Scalar);
impl Deref for ScalarWrapper {
type Target = Scalar;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl TryFrom<Uint8Array> for ScalarWrapper {
type Error = OpStatusCode;
fn try_from(value: Uint8Array) -> Result<Self, Self::Error> {
let bytes: [u8; 32] = value
.to_vec()
.try_into()
.map_err(|_| OpStatusCode::InvalidArrayLength)?;
Ok(Self(Scalar::from_bytes_mod_order(bytes)))
}
}
#[allow(clippy::from_over_into)]
impl Into<Uint8Array> for ScalarWrapper {
fn into(self) -> Uint8Array {
Uint8Array::from(self.0.to_bytes().to_vec().as_slice())
}
}
#[wasm_bindgen(start)]
pub fn wasm_init() -> Result<(), JsValue> {
console_error_panic_hook::set_once();
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use lazy_static::lazy_static;
use rand::rngs::OsRng;
use wasm_bindgen_test::*;
wasm_bindgen_test_configure!(run_in_browser);
lazy_static! {
static ref HASHER: PoseidonHasher = PoseidonHasher::default();
}
#[wasm_bindgen_test]
fn init_hasher() {
let mut rng = OsRng::default();
let a = Scalar::random(&mut rng);
let b = Scalar::random(&mut rng);
let hash = HASHER.hash(
Uint8Array::from(a.to_bytes().to_vec().as_slice()),
Uint8Array::from(b.to_bytes().to_vec().as_slice()),
);
assert!(hash.is_ok());
let x = Uint8Array::from(Vec::new().as_slice());
let y = Uint8Array::from(Vec::new().as_slice());
let hash = HASHER.hash(x, y);
assert_eq!(hash.err(), Some(OpStatusCode::InvalidArrayLength.into()));
}
#[wasm_bindgen_test]
fn generate_note() {
let mut ng = NoteGenerator::new(&HASHER);
let note = ng.generate(JsString::from("EDG"), 0);
assert_eq!(note.group_id, 0);
assert_eq!(note.token_symbol, "EDG");
}
#[wasm_bindgen_test]
fn zk_proof() {
let mut rng = OsRng::default();
let mut ng = NoteGenerator::new(&HASHER);
let note = ng.generate(JsString::from("EDG"), 0);
let my_leaf = ScalarWrapper::try_from(ng.leaf_of(¬e)).unwrap();
let mut mt = MerkleTree::new(32, &HASHER);
let mut leaves: Vec<_> = vec![Scalar::random(&mut rng); 7].iter().map(Scalar::to_bytes).collect();
leaves[3] = my_leaf.to_bytes();
mt.inner.tree.add_leaves(leaves, None);
let recipient = ScalarWrapper(Scalar::zero());
let relayer = ScalarWrapper(Scalar::zero());
let zk_proof = mt.create_zk_proof(mt.root(), recipient.into(), relayer.into(), ¬e);
assert!(zk_proof.is_ok());
}
}
| {
let pc_gens = PedersenGens::default();
let bp_gens = opts
.bp_gens
.clone()
.unwrap_or_else(|| BulletproofGens::new(BULLETPROOF_GENS_SIZE, 1));
let inner = PoseidonBuilder::new(opts.width)
.sbox(PoseidonSbox::Exponentiation3)
.bulletproof_gens(bp_gens)
.pedersen_gens(pc_gens)
.build();
Self { inner }
} | identifier_body |
lib.rs | use core::fmt;
use std::convert::{TryFrom, TryInto};
use std::ops::Deref;
use std::str::FromStr;
use bulletproofs::r1cs::Prover;
use bulletproofs::{BulletproofGens, PedersenGens};
use bulletproofs_gadgets::fixed_deposit_tree::builder::{FixedDepositTree, FixedDepositTreeBuilder};
use bulletproofs_gadgets::poseidon::builder::{Poseidon, PoseidonBuilder};
use bulletproofs_gadgets::poseidon::{PoseidonSbox, Poseidon_hash_2};
use curve25519_dalek::ristretto::CompressedRistretto;
use curve25519_dalek::scalar::Scalar;
use js_sys::{Array, JsString, Uint8Array};
use merlin::Transcript;
use rand::rngs::OsRng;
use wasm_bindgen::prelude::*;
// When the `wee_alloc` feature is enabled, this uses `wee_alloc` as the global
// allocator.
//
// If you don't want to use `wee_alloc`, you can safely delete this.
#[cfg(feature = "wee_alloc")]
#[global_allocator]
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
#[wasm_bindgen]
extern "C" {
#[wasm_bindgen(typescript_type = "Leaves")]
pub type Leaves;
#[wasm_bindgen(typescript_type = "Commitments")]
pub type Commitments;
}
#[wasm_bindgen(typescript_custom_section)]
const LEAVES: &str = "type Leaves = Array<Uint8Array>;";
#[wasm_bindgen(typescript_custom_section)]
const COMMITMENTS: &str = "type Commitments = Array<Uint8Array>;";
/// Returns a Status Code for the operation.
#[wasm_bindgen]
#[derive(Debug, Eq, PartialEq)]
#[repr(u32)]
pub enum OpStatusCode {
Unknown = 0,
/// Invalid hex string length when decoding
InvalidHexLength = 1,
/// Failed to parse hex string
HexParsingFailed = 2,
/// Invalid number of note parts when decoding
InvalidNoteLength = 3,
/// Invalid note prefix
InvalidNotePrefix = 4,
/// Invalid note version
InvalidNoteVersion = 5,
/// Invalid note id when parsing
InvalidNoteId = 6,
/// Invalid note block number when parsing
InvalidNoteBlockNumber = 7,
/// Invalid note secrets
InvalidNoteSecrets = 8,
/// Unable to find merkle tree
MerkleTreeNotFound = 9,
/// Failed serialization of passed params
/// Error for failing to parse rust type into JsValue
SerializationFailed = 10,
/// Failed deserialization of JsValue into rust type
DeserializationFailed = 11,
/// Invalid Array of 32 bytes.
InvalidArrayLength = 12,
}
impl From<OpStatusCode> for JsValue {
fn from(e: OpStatusCode) -> Self {
JsValue::from(e as u32)
}
}
const BULLETPROOF_GENS_SIZE: usize = 16_400;
const NOTE_PREFIX: &str = "webb.mix";
#[wasm_bindgen]
#[derive(Clone, Copy, PartialEq, Eq)]
pub enum NoteVersion {
V1,
}
#[wasm_bindgen]
#[derive(Clone)]
pub struct Note {
#[wasm_bindgen(skip)]
pub prefix: String,
pub version: NoteVersion,
#[wasm_bindgen(skip)]
pub token_symbol: String,
pub group_id: u32,
pub block_number: Option<u32>,
#[wasm_bindgen(skip)]
pub r: Scalar,
#[wasm_bindgen(skip)]
pub nullifier: Scalar,
}
#[wasm_bindgen]
pub struct ZkProof {
#[wasm_bindgen(skip)]
pub comms: Vec<CompressedRistretto>,
#[wasm_bindgen(skip)]
pub nullifier_hash: Scalar,
#[wasm_bindgen(skip)]
pub proof: Vec<u8>,
#[wasm_bindgen(skip)]
pub leaf_index_comms: Vec<CompressedRistretto>,
#[wasm_bindgen(skip)]
pub proof_comms: Vec<CompressedRistretto>,
} | fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
NoteVersion::V1 => write!(f, "v1"),
}
}
}
impl FromStr for NoteVersion {
type Err = OpStatusCode;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"v1" => Ok(NoteVersion::V1),
_ => Err(OpStatusCode::InvalidNoteVersion),
}
}
}
impl fmt::Display for Note {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let encoded_r = hex::encode(&self.r.to_bytes());
let encoded_nullifier = hex::encode(&self.nullifier.to_bytes());
let mut parts = vec![
self.prefix.clone(),
self.version.to_string(),
self.token_symbol.clone(),
format!("{}", self.group_id),
];
if let Some(bn) = self.block_number {
parts.push(format!("{}", bn));
}
parts.push(format!("{}{}", encoded_r, encoded_nullifier));
let note = parts.join("-");
write!(f, "{}", note)
}
}
impl FromStr for Note {
type Err = OpStatusCode;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let parts: Vec<&str> = s.split('-').collect();
let partial = parts.len() == 5;
let full = parts.len() == 6;
if!partial &&!full {
return Err(OpStatusCode::InvalidNoteLength);
}
if parts[0]!= NOTE_PREFIX {
return Err(OpStatusCode::InvalidNotePrefix);
}
let version: NoteVersion = parts[1].parse()?;
let token_symbol = parts[2].to_owned();
let group_id = parts[3].parse().map_err(|_| OpStatusCode::InvalidNoteId)?;
let (block_number, note_val) = match partial {
true => (None, parts[4]),
false => {
let bn = parts[4].parse().map_err(|_| OpStatusCode::InvalidNoteBlockNumber)?;
(Some(bn), parts[5])
}
};
if note_val.len()!= 128 {
return Err(OpStatusCode::InvalidNoteSecrets);
}
let r = hex::decode(¬e_val[..64])
.map(|v| v.try_into())
.map(|r| r.map(Scalar::from_bytes_mod_order))
.map_err(|_| OpStatusCode::InvalidHexLength)?
.map_err(|_| OpStatusCode::HexParsingFailed)?;
let nullifier = hex::decode(¬e_val[64..])
.map(|v| v.try_into())
.map(|r| r.map(Scalar::from_bytes_mod_order))
.map_err(|_| OpStatusCode::InvalidHexLength)?
.map_err(|_| OpStatusCode::HexParsingFailed)?;
Ok(Note {
prefix: NOTE_PREFIX.to_owned(),
version,
token_symbol,
group_id,
block_number,
r,
nullifier,
})
}
}
#[wasm_bindgen]
impl Note {
pub fn deserialize(value: JsString) -> Result<Note, JsValue> {
let note: String = value.into();
note.parse().map_err(Into::into)
}
pub fn serialize(&self) -> JsString {
let note = self.to_string();
note.into()
}
#[wasm_bindgen(getter)]
pub fn token_symbol(&self) -> JsString {
self.token_symbol.clone().into()
}
}
#[wasm_bindgen]
impl ZkProof {
#[wasm_bindgen(getter)]
pub fn proof(&self) -> Uint8Array {
Uint8Array::from(self.proof.as_slice())
}
#[wasm_bindgen(getter)]
pub fn comms(&self) -> Commitments {
let list: Array = self
.comms
.clone()
.into_iter()
.map(|v| Uint8Array::from(v.as_bytes().to_vec().as_slice()))
.collect();
let js = JsValue::from(list);
Commitments::from(js)
}
#[wasm_bindgen(getter)]
pub fn leaf_index_comms(&self) -> Commitments {
let list: Array = self
.leaf_index_comms
.clone()
.into_iter()
.map(|v| Uint8Array::from(v.as_bytes().to_vec().as_slice()))
.collect();
let js = JsValue::from(list);
Commitments::from(js)
}
#[wasm_bindgen(getter)]
pub fn proof_comms(&self) -> Commitments {
let list: Array = self
.proof_comms
.clone()
.into_iter()
.map(|v| Uint8Array::from(v.as_bytes().to_vec().as_slice()))
.collect();
let js = JsValue::from(list);
Commitments::from(js)
}
#[wasm_bindgen(getter)]
pub fn nullifier_hash(&self) -> Uint8Array {
ScalarWrapper(self.nullifier_hash).into()
}
}
#[wasm_bindgen]
pub struct PoseidonHasherOptions {
/// The size of the permutation, in field elements.
width: usize,
/// Number of full SBox rounds in beginning
pub full_rounds_beginning: Option<usize>,
/// Number of full SBox rounds in end
pub full_rounds_end: Option<usize>,
/// Number of partial rounds
pub partial_rounds: Option<usize>,
/// The desired (classical) security level, in bits.
pub security_bits: Option<usize>,
/// Bulletproof generators for proving/verifying (serialized)
#[wasm_bindgen(skip)]
pub bp_gens: Option<BulletproofGens>,
}
impl Default for PoseidonHasherOptions {
fn default() -> Self {
Self {
width: 6,
full_rounds_beginning: None,
full_rounds_end: None,
partial_rounds: None,
security_bits: None,
bp_gens: None,
}
}
}
#[wasm_bindgen]
impl PoseidonHasherOptions {
#[wasm_bindgen(constructor)]
pub fn new() -> Self {
Self::default()
}
#[wasm_bindgen(setter)]
pub fn set_bp_gens(&mut self, value: Uint8Array) {
let bp_gens =
bincode::deserialize(&value.to_vec()).unwrap_or_else(|_| BulletproofGens::new(BULLETPROOF_GENS_SIZE, 1));
self.bp_gens = Some(bp_gens);
}
#[wasm_bindgen(getter)]
pub fn bp_gens(&self) -> Uint8Array {
let val = self
.bp_gens
.clone()
.unwrap_or_else(|| BulletproofGens::new(BULLETPROOF_GENS_SIZE, 1));
let serialized = bincode::serialize(&val).unwrap_or_else(|_| Vec::new());
Uint8Array::from(serialized.as_slice())
}
}
#[wasm_bindgen]
#[derive(Clone)]
pub struct PoseidonHasher {
inner: Poseidon,
}
#[wasm_bindgen]
impl PoseidonHasher {
pub fn default() -> Self {
Self::with_options(Default::default())
}
#[wasm_bindgen(constructor)]
pub fn with_options(opts: PoseidonHasherOptions) -> Self {
let pc_gens = PedersenGens::default();
let bp_gens = opts
.bp_gens
.clone()
.unwrap_or_else(|| BulletproofGens::new(BULLETPROOF_GENS_SIZE, 1));
let inner = PoseidonBuilder::new(opts.width)
.sbox(PoseidonSbox::Exponentiation3)
.bulletproof_gens(bp_gens)
.pedersen_gens(pc_gens)
.build();
Self { inner }
}
pub fn hash(&self, left: Uint8Array, right: Uint8Array) -> Result<Uint8Array, JsValue> {
let xl = ScalarWrapper::try_from(left)?;
let xr = ScalarWrapper::try_from(right)?;
let hash = Poseidon_hash_2(*xl, *xr, &self.inner);
Ok(ScalarWrapper(hash).into())
}
}
#[wasm_bindgen]
pub struct NoteGenerator {
hasher: Poseidon,
rng: OsRng,
}
#[wasm_bindgen]
impl NoteGenerator {
#[wasm_bindgen(constructor)]
pub fn new(hasher: &PoseidonHasher) -> Self {
Self {
hasher: hasher.inner.clone(),
rng: OsRng::default(),
}
}
pub fn generate(&mut self, token_symbol: JsString, group_id: u32) -> Note {
let r = Scalar::random(&mut self.rng);
let nullifier = Scalar::random(&mut self.rng);
Note {
prefix: NOTE_PREFIX.to_string(),
version: NoteVersion::V1,
token_symbol: token_symbol.into(),
block_number: None,
group_id,
r,
nullifier,
}
}
pub fn leaf_of(&self, note: &Note) -> Uint8Array {
let leaf = Poseidon_hash_2(note.r, note.nullifier, &self.hasher);
ScalarWrapper(leaf).into()
}
pub fn nullifier_hash_of(&self, note: &Note) -> Uint8Array {
let hash = Poseidon_hash_2(note.nullifier, note.nullifier, &self.hasher);
ScalarWrapper(hash).into()
}
}
#[wasm_bindgen]
pub struct MerkleTree {
inner: FixedDepositTree,
hasher: Poseidon,
}
#[wasm_bindgen]
impl MerkleTree {
#[wasm_bindgen(constructor)]
pub fn new(depth: u8, hasher: &PoseidonHasher) -> Self {
let tree = FixedDepositTreeBuilder::new()
.hash_params(hasher.inner.clone())
.depth(depth as usize)
.build();
Self {
inner: tree,
hasher: hasher.inner.clone(),
}
}
pub fn add_leaf_at_index(&mut self, leaf: Uint8Array, index: u64) -> Result<(), JsValue> {
let idx = Scalar::from(index);
let leaf = ScalarWrapper::try_from(leaf)?;
self.inner.tree.update(idx, *leaf);
Ok(())
}
pub fn add_leaves(&mut self, leaves: Leaves, target_root: Option<Uint8Array>) -> Result<(), JsValue> {
let xs = Array::from(&leaves)
.to_vec()
.into_iter()
.map(|v| Uint8Array::new_with_byte_offset_and_length(&v, 0, 32))
.map(ScalarWrapper::try_from)
.collect::<Result<Vec<_>, _>>()?
.into_iter()
.map(|v| v.to_bytes())
.collect();
let root = target_root
.map(ScalarWrapper::try_from)
.transpose()?
.map(|v| v.to_bytes());
self.inner.tree.add_leaves(xs, root);
Ok(())
}
pub fn root(&self) -> Uint8Array {
let root = self.inner.tree.root;
Uint8Array::from(root.to_bytes().to_vec().as_slice())
}
pub fn create_zk_proof(
&mut self,
root: Uint8Array,
recipient: Uint8Array,
relayer: Uint8Array,
note: &Note,
) -> Result<ZkProof, JsValue> {
let leaf = Poseidon_hash_2(note.r, note.nullifier, &self.hasher);
let root = ScalarWrapper::try_from(root)?;
let recipient = ScalarWrapper::try_from(recipient)?;
let relayer = ScalarWrapper::try_from(relayer)?;
// add the current leaf we need to prove to the secrets.
let nullifier_hash = Poseidon_hash_2(note.nullifier, note.nullifier, &self.hasher);
self.inner.add_secrets(leaf, note.r, note.nullifier, nullifier_hash);
let pc_gens = PedersenGens::default();
let bp_gens = self.hasher.bp_gens.clone();
let mut prover_transcript = Transcript::new(b"zk_membership_proof");
let prover = Prover::new(&pc_gens, &mut prover_transcript);
let (proof, (comms, nullifier_hash, leaf_index_comms, proof_comms)) =
self.inner.prove_zk(*root, leaf, *recipient, *relayer, &bp_gens, prover);
let zkproof = ZkProof {
proof: proof.to_bytes(),
comms,
leaf_index_comms,
proof_comms,
nullifier_hash,
};
Ok(zkproof)
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
struct ScalarWrapper(Scalar);
impl Deref for ScalarWrapper {
type Target = Scalar;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl TryFrom<Uint8Array> for ScalarWrapper {
type Error = OpStatusCode;
fn try_from(value: Uint8Array) -> Result<Self, Self::Error> {
let bytes: [u8; 32] = value
.to_vec()
.try_into()
.map_err(|_| OpStatusCode::InvalidArrayLength)?;
Ok(Self(Scalar::from_bytes_mod_order(bytes)))
}
}
#[allow(clippy::from_over_into)]
impl Into<Uint8Array> for ScalarWrapper {
fn into(self) -> Uint8Array {
Uint8Array::from(self.0.to_bytes().to_vec().as_slice())
}
}
#[wasm_bindgen(start)]
pub fn wasm_init() -> Result<(), JsValue> {
console_error_panic_hook::set_once();
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use lazy_static::lazy_static;
use rand::rngs::OsRng;
use wasm_bindgen_test::*;
wasm_bindgen_test_configure!(run_in_browser);
lazy_static! {
static ref HASHER: PoseidonHasher = PoseidonHasher::default();
}
#[wasm_bindgen_test]
fn init_hasher() {
let mut rng = OsRng::default();
let a = Scalar::random(&mut rng);
let b = Scalar::random(&mut rng);
let hash = HASHER.hash(
Uint8Array::from(a.to_bytes().to_vec().as_slice()),
Uint8Array::from(b.to_bytes().to_vec().as_slice()),
);
assert!(hash.is_ok());
let x = Uint8Array::from(Vec::new().as_slice());
let y = Uint8Array::from(Vec::new().as_slice());
let hash = HASHER.hash(x, y);
assert_eq!(hash.err(), Some(OpStatusCode::InvalidArrayLength.into()));
}
#[wasm_bindgen_test]
fn generate_note() {
let mut ng = NoteGenerator::new(&HASHER);
let note = ng.generate(JsString::from("EDG"), 0);
assert_eq!(note.group_id, 0);
assert_eq!(note.token_symbol, "EDG");
}
#[wasm_bindgen_test]
fn zk_proof() {
let mut rng = OsRng::default();
let mut ng = NoteGenerator::new(&HASHER);
let note = ng.generate(JsString::from("EDG"), 0);
let my_leaf = ScalarWrapper::try_from(ng.leaf_of(¬e)).unwrap();
let mut mt = MerkleTree::new(32, &HASHER);
let mut leaves: Vec<_> = vec![Scalar::random(&mut rng); 7].iter().map(Scalar::to_bytes).collect();
leaves[3] = my_leaf.to_bytes();
mt.inner.tree.add_leaves(leaves, None);
let recipient = ScalarWrapper(Scalar::zero());
let relayer = ScalarWrapper(Scalar::zero());
let zk_proof = mt.create_zk_proof(mt.root(), recipient.into(), relayer.into(), ¬e);
assert!(zk_proof.is_ok());
}
} |
impl fmt::Display for NoteVersion { | random_line_split |
aggr.rs | use crate::lang::{Closure, Argument, ExecutionContext, Value, ColumnType, RowsReader, Row, JobJoinHandle};
use crate::lang::stream::{Readable, ValueSender};
use crate::lang::errors::{CrushResult, argument_error, mandate, error};
use crate::lib::command_util::{find_field, find_field_from_str};
use crate::lang::printer::Printer;
use crossbeam::{Receiver, bounded, unbounded, Sender};
use crate::util::thread::{handle, build};
struct Aggregation {
idx: usize,
name: String,
command: Closure,
}
pub struct Config {
table_idx: usize,
aggregations: Vec<Aggregation>,
}
pub fn parse(input_type: &[ColumnType], argument: Vec<Argument>) -> CrushResult<Config> {
let mut table=None;
let mut aggregations = Vec::new();
let mut next_idx = input_type.len();
for a in &argument {
match (a.name.as_deref(), a.value) {
(Some("column"), Value::Field(name)) => {
table = Some(find_field(name.as_ref(), input_type)?);
}
(Some(name), Value::Closure(command)) => {
aggregations.push(
Aggregation {
command,
name: Box::from(name),
idx: find_field_from_str(name, input_type)
.unwrap_or_else(|| {next_idx += 1; next_idx - 1})
}
)
}
_ => return argument_error("Bad argument"),
}
}
Ok(Config {
table_idx: mandate(table, "Missing table spec")?,
aggregations,
})
/*
if argument.len() < 2 {
return Err(argument_error("Expected at least two paramaters"));
}
let (table_idx, aggregations) = match (argument.len() % 2, argument[0].name.is_none(), &argument[0].value) {
(0, false, _) => (guess_table(input_type)?, &argument[..]),
(1, true, Value::Field(f)) => (find_field(&f, input_type)?, &argument[1..]),
_ => return Err(argument_error("Could not find table to aggregate")),
};
match &input_type[table_idx].cell_type {
ValueType::Rows(sub_type) |
ValueType::Output(sub_type) => {
let output_definition = aggregations
.chunks(2)
.into_iter()
.map(|args| {
let spec = &args[0];
let clos = &args[1];
match (&spec.name, &spec.value, &clos.value) {
(Some(name), Value::Field(f), Value::Closure(c)) =>
Ok((
name.to_string(),
find_field(&f, sub_type)?,
c.clone()
)),
_ => Err(error("Invalid aggragation spec")),
}
})
.collect::<JobResult<Vec<(String, usize, Closure)>>>()?;
Ok(Config {
table_idx,
output_definition,
})
}
_ => {
Err(argument_error("No table to aggregate on found"))
}
}
*/
}
/*
pub fn guess_table(input_type: &[ColumnType]) -> JobResult<usize> {
let tables: Vec<usize> = input_type
.iter()
.enumerate()
.flat_map(|(idx, t)| {
match &t.cell_type {
ValueType::Output(_) | ValueType::Rows(_) => Some(idx),
_ => None,
}
}).collect();
if tables.len() == 1 {
Ok(tables[0])
} else {
Err(argument_error(format!("Could not guess tables to join, expected one table, found {}", tables.len()).as_str()))
}
}
*/
fn create_writer(
uninitialized_output: ValueSender,
mut output_names: Vec<Option<String>>,
writer_input: Receiver<Row>) ->
JobJoinHandle {
handle(build("aggr-writer".to_string()).spawn(
move || {
let output = match writer_input.recv() {
Ok(row) => {
let tmp = uninitialized_output.initialize(
row.cells
.iter()
.enumerate()
.map(|(idx, cell)| ColumnType { name: output_names[idx].take(), cell_type: cell.value_type() })
.collect()
)?;
tmp.send(row);
tmp
}
Err(_) => return Err(error("No output")),
};
loop {
match writer_input.recv() {
Ok(row) => {
output.send(row);
}
Err(_) => break,
}
}
Ok(())
}))
}
pub fn create_collector(
rest_input: InputStream,
uninitialized_inputs: Vec<ValueReceiver>,
writer_output: Sender<Row>) -> JobJoinHandle {
handle(build("aggr-collector".to_string()).spawn(
move || {
match rest_input.recv() {
Ok(mut partial_row) => {
for ui in uninitialized_inputs {
let i = ui.initialize_stream()?;
match i.recv() {
Ok(mut r) => {
partial_row.cells.push(std::mem::replace(&mut r.cells[0], Value::Integer(0)));
}
Err(_) => return Err(error("Missing value")),
}
}
writer_output.send(partial_row);
}
Err(_) => {}
}
Ok(())
}))
}
pub fn pump_table(
job_output: &mut impl Readable,
outputs: Vec<OutputStream>,
output_definition: &Vec<(String, usize, Closure)>) -> JobResult<()> {
let stream_to_column_mapping = output_definition.iter().map(|(_, off, _)| *off).collect::<Vec<usize>>();
loop {
match job_output.read() {
Ok(mut inner_row) => {
for stream_idx in 0..stream_to_column_mapping.len() {
outputs[stream_idx].send(Row { cells: vec![inner_row.cells.replace(stream_to_column_mapping[stream_idx], Value::Integer(0))] })?;
}
}
Err(_) => break,
}
}
Ok(())
}
fn create_aggregator(
name: &str,
idx: usize,
c: &Closure,
input_type: &[ColumnType],
uninitialized_inputs: &mut Vec<ValueReceiver>,
outputs: &mut Vec<OutputStream>,
env: &Env,
printer: &Printer) -> JobResult<JobJoinHandle> {
let (first_output, first_input) = streams(vec![
ColumnType::named(name, input_type[idx].value_type.clone())
]);
let (last_output, last_input) = streams();
outputs.push(first_output);
uninitialized_inputs.push(last_input);
let local_printer = printer.clone();
let local_env = env.clone();
let cc = c.clone();
Ok(handle(build("aggr-aggregator".to_string()).spawn(
move || {
cc.spawn_and_execute(CompileContext {
input: first_input,
output: last_output,
arguments: vec![],
env: local_env,
printer: local_printer,
});
Ok(())
})))
}
fn handle_row(
row: Row,
config: &Config,
job_output: &mut impl Readable,
printer: &Printer,
env: &Env,
input: &InputStream,
writer_output: &Sender<Row>) -> JobResult<()> {
let mut outputs: Vec<OutputStream> = Vec::new();
let mut uninitialized_inputs: Vec<ValueReceiver> = Vec::new();
let mut aggregator_handles: Vec<JobJoinHandle> = Vec::new();
let (uninit_rest_output, uninit_rest_input) = streams();
let mut rest_output_type = input.get_type().clone();
rest_output_type.remove(config.table_idx);
let rest_output = uninit_rest_output.initialize(rest_output_type)?;
let rest_input = uninit_rest_input.initialize()?;
for (name, idx, c) in config.output_definition.iter() {
aggregator_handles.push(create_aggregator(
name.as_str(),
*idx,
c,
job_output.get_type(),
&mut uninitialized_inputs,
&mut outputs,
env,
printer)?);
}
let collector_handle = create_collector(
rest_input,
uninitialized_inputs,
writer_output.clone());
rest_output.send(row)?;
drop(rest_output);
pump_table(job_output, outputs, &config.output_definition)?;
for h in aggregator_handles {
h.join(printer);
}
collector_handle.join(printer);
Ok(())
}
pub fn | (config: Config, printer: &Printer, env: &Env, mut input: impl Readable, uninitialized_output: ValueSender) -> JobResult<()> {
let (writer_output, writer_input) = bounded::<Row>(16);
let mut output_names = input.get_type().iter().map(|t| t.name.clone()).collect::<Vec<Option<String>>>();
output_names.remove(config.table_idx);
for (name, _, _) in &config.output_definition {
output_names.push(Some(name.clone()));
}
let writer_handle = create_writer(uninitialized_output, output_names, writer_input);
loop {
match input.recv() {
Ok(mut row) => {
let table_cell = row.cells.remove(config.table_idx);
match table_cell {
Value::Output(mut job_output) =>
handle_row(row, &config, &mut job_output.stream, printer, env, &input, &writer_output)?,
Value::Rows(mut rows) =>
handle_row(row, &config, &mut RowsReader::new(rows), printer, env, &input, &writer_output)?,
_ => {
printer.job_error(error("Wrong column type"));
break;
}
}
}
Err(_) => { break; }
}
}
drop(writer_output);
writer_handle.join(printer);
Ok(())
}
fn perform_on(arguments: Vec<Argument>, input: &Readable, sender: ValueSender) -> CrushResult<()> {
let config = parse(input.types(), arguments)?;
Ok(())
}
pub fn perform(context: ExecutionContext) -> CrushResult<()> {
match context.input.recv()? {
Value::Stream(s) => {
perform_on(context.arguments, &s.stream, context.output)
}
Value::Rows(r) => {
perform_on(context.arguments, &r.reader(), context.output)
}
_ => argument_error("Expected a struct"),
}
}
| run | identifier_name |
aggr.rs | use crate::lang::{Closure, Argument, ExecutionContext, Value, ColumnType, RowsReader, Row, JobJoinHandle};
use crate::lang::stream::{Readable, ValueSender};
use crate::lang::errors::{CrushResult, argument_error, mandate, error};
use crate::lib::command_util::{find_field, find_field_from_str};
use crate::lang::printer::Printer;
use crossbeam::{Receiver, bounded, unbounded, Sender};
use crate::util::thread::{handle, build};
struct Aggregation {
idx: usize,
name: String,
command: Closure,
}
pub struct Config {
table_idx: usize,
aggregations: Vec<Aggregation>,
}
pub fn parse(input_type: &[ColumnType], argument: Vec<Argument>) -> CrushResult<Config> {
let mut table=None;
let mut aggregations = Vec::new();
let mut next_idx = input_type.len();
for a in &argument {
match (a.name.as_deref(), a.value) {
(Some("column"), Value::Field(name)) => {
table = Some(find_field(name.as_ref(), input_type)?);
}
(Some(name), Value::Closure(command)) => {
aggregations.push(
Aggregation {
command,
name: Box::from(name),
idx: find_field_from_str(name, input_type)
.unwrap_or_else(|| {next_idx += 1; next_idx - 1})
}
)
}
_ => return argument_error("Bad argument"),
}
}
Ok(Config {
table_idx: mandate(table, "Missing table spec")?,
aggregations,
})
/*
if argument.len() < 2 {
return Err(argument_error("Expected at least two paramaters"));
}
let (table_idx, aggregations) = match (argument.len() % 2, argument[0].name.is_none(), &argument[0].value) {
(0, false, _) => (guess_table(input_type)?, &argument[..]),
(1, true, Value::Field(f)) => (find_field(&f, input_type)?, &argument[1..]),
_ => return Err(argument_error("Could not find table to aggregate")),
};
match &input_type[table_idx].cell_type {
ValueType::Rows(sub_type) |
ValueType::Output(sub_type) => {
let output_definition = aggregations
.chunks(2)
.into_iter()
.map(|args| {
let spec = &args[0];
let clos = &args[1];
match (&spec.name, &spec.value, &clos.value) {
(Some(name), Value::Field(f), Value::Closure(c)) =>
Ok((
name.to_string(),
find_field(&f, sub_type)?,
c.clone()
)),
_ => Err(error("Invalid aggragation spec")),
}
})
.collect::<JobResult<Vec<(String, usize, Closure)>>>()?;
Ok(Config {
table_idx,
output_definition,
})
}
_ => {
Err(argument_error("No table to aggregate on found"))
}
}
*/
}
/*
pub fn guess_table(input_type: &[ColumnType]) -> JobResult<usize> {
let tables: Vec<usize> = input_type
.iter()
.enumerate()
.flat_map(|(idx, t)| {
match &t.cell_type {
ValueType::Output(_) | ValueType::Rows(_) => Some(idx),
_ => None,
}
}).collect();
if tables.len() == 1 {
Ok(tables[0])
} else {
Err(argument_error(format!("Could not guess tables to join, expected one table, found {}", tables.len()).as_str()))
}
}
*/
fn create_writer(
uninitialized_output: ValueSender,
mut output_names: Vec<Option<String>>,
writer_input: Receiver<Row>) ->
JobJoinHandle {
handle(build("aggr-writer".to_string()).spawn(
move || {
let output = match writer_input.recv() {
Ok(row) => {
let tmp = uninitialized_output.initialize(
row.cells
.iter()
.enumerate()
.map(|(idx, cell)| ColumnType { name: output_names[idx].take(), cell_type: cell.value_type() })
.collect()
)?;
tmp.send(row);
tmp
}
Err(_) => return Err(error("No output")),
};
loop {
match writer_input.recv() {
Ok(row) => {
output.send(row);
}
Err(_) => break,
}
}
Ok(())
}))
}
pub fn create_collector(
rest_input: InputStream,
uninitialized_inputs: Vec<ValueReceiver>,
writer_output: Sender<Row>) -> JobJoinHandle {
handle(build("aggr-collector".to_string()).spawn(
move || {
match rest_input.recv() {
Ok(mut partial_row) => {
for ui in uninitialized_inputs {
let i = ui.initialize_stream()?;
match i.recv() {
Ok(mut r) => {
partial_row.cells.push(std::mem::replace(&mut r.cells[0], Value::Integer(0)));
}
Err(_) => return Err(error("Missing value")),
}
}
writer_output.send(partial_row);
}
Err(_) => {}
} | }))
}
pub fn pump_table(
job_output: &mut impl Readable,
outputs: Vec<OutputStream>,
output_definition: &Vec<(String, usize, Closure)>) -> JobResult<()> {
let stream_to_column_mapping = output_definition.iter().map(|(_, off, _)| *off).collect::<Vec<usize>>();
loop {
match job_output.read() {
Ok(mut inner_row) => {
for stream_idx in 0..stream_to_column_mapping.len() {
outputs[stream_idx].send(Row { cells: vec![inner_row.cells.replace(stream_to_column_mapping[stream_idx], Value::Integer(0))] })?;
}
}
Err(_) => break,
}
}
Ok(())
}
fn create_aggregator(
name: &str,
idx: usize,
c: &Closure,
input_type: &[ColumnType],
uninitialized_inputs: &mut Vec<ValueReceiver>,
outputs: &mut Vec<OutputStream>,
env: &Env,
printer: &Printer) -> JobResult<JobJoinHandle> {
let (first_output, first_input) = streams(vec![
ColumnType::named(name, input_type[idx].value_type.clone())
]);
let (last_output, last_input) = streams();
outputs.push(first_output);
uninitialized_inputs.push(last_input);
let local_printer = printer.clone();
let local_env = env.clone();
let cc = c.clone();
Ok(handle(build("aggr-aggregator".to_string()).spawn(
move || {
cc.spawn_and_execute(CompileContext {
input: first_input,
output: last_output,
arguments: vec![],
env: local_env,
printer: local_printer,
});
Ok(())
})))
}
fn handle_row(
row: Row,
config: &Config,
job_output: &mut impl Readable,
printer: &Printer,
env: &Env,
input: &InputStream,
writer_output: &Sender<Row>) -> JobResult<()> {
let mut outputs: Vec<OutputStream> = Vec::new();
let mut uninitialized_inputs: Vec<ValueReceiver> = Vec::new();
let mut aggregator_handles: Vec<JobJoinHandle> = Vec::new();
let (uninit_rest_output, uninit_rest_input) = streams();
let mut rest_output_type = input.get_type().clone();
rest_output_type.remove(config.table_idx);
let rest_output = uninit_rest_output.initialize(rest_output_type)?;
let rest_input = uninit_rest_input.initialize()?;
for (name, idx, c) in config.output_definition.iter() {
aggregator_handles.push(create_aggregator(
name.as_str(),
*idx,
c,
job_output.get_type(),
&mut uninitialized_inputs,
&mut outputs,
env,
printer)?);
}
let collector_handle = create_collector(
rest_input,
uninitialized_inputs,
writer_output.clone());
rest_output.send(row)?;
drop(rest_output);
pump_table(job_output, outputs, &config.output_definition)?;
for h in aggregator_handles {
h.join(printer);
}
collector_handle.join(printer);
Ok(())
}
pub fn run(config: Config, printer: &Printer, env: &Env, mut input: impl Readable, uninitialized_output: ValueSender) -> JobResult<()> {
let (writer_output, writer_input) = bounded::<Row>(16);
let mut output_names = input.get_type().iter().map(|t| t.name.clone()).collect::<Vec<Option<String>>>();
output_names.remove(config.table_idx);
for (name, _, _) in &config.output_definition {
output_names.push(Some(name.clone()));
}
let writer_handle = create_writer(uninitialized_output, output_names, writer_input);
loop {
match input.recv() {
Ok(mut row) => {
let table_cell = row.cells.remove(config.table_idx);
match table_cell {
Value::Output(mut job_output) =>
handle_row(row, &config, &mut job_output.stream, printer, env, &input, &writer_output)?,
Value::Rows(mut rows) =>
handle_row(row, &config, &mut RowsReader::new(rows), printer, env, &input, &writer_output)?,
_ => {
printer.job_error(error("Wrong column type"));
break;
}
}
}
Err(_) => { break; }
}
}
drop(writer_output);
writer_handle.join(printer);
Ok(())
}
fn perform_on(arguments: Vec<Argument>, input: &Readable, sender: ValueSender) -> CrushResult<()> {
let config = parse(input.types(), arguments)?;
Ok(())
}
pub fn perform(context: ExecutionContext) -> CrushResult<()> {
match context.input.recv()? {
Value::Stream(s) => {
perform_on(context.arguments, &s.stream, context.output)
}
Value::Rows(r) => {
perform_on(context.arguments, &r.reader(), context.output)
}
_ => argument_error("Expected a struct"),
}
} | Ok(()) | random_line_split |
bpf.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#![allow(non_snake_case)]
pub use libc::sock_filter;
use syscalls::Errno;
use syscalls::Sysno;
use crate::fd::Fd;
// See: /include/uapi/linux/bpf_common.h
// Instruction classes
pub const BPF_LD: u16 = 0x00;
pub const BPF_ST: u16 = 0x02;
pub const BPF_JMP: u16 = 0x05;
pub const BPF_RET: u16 = 0x06;
// ld/ldx fields
pub const BPF_W: u16 = 0x00;
pub const BPF_ABS: u16 = 0x20;
pub const BPF_MEM: u16 = 0x60;
pub const BPF_JEQ: u16 = 0x10;
pub const BPF_JGT: u16 = 0x20;
pub const BPF_JGE: u16 = 0x30;
pub const BPF_K: u16 = 0x00;
/// Maximum number of instructions.
pub const BPF_MAXINSNS: usize = 4096;
/// Defined in `/include/uapi/linux/seccomp.h`.
const SECCOMP_SET_MODE_FILTER: u32 = 1;
/// Offset of `seccomp_data::nr` in bytes.
const SECCOMP_DATA_OFFSET_NR: u32 = 0;
/// Offset of `seccomp_data::arch` in bytes.
const SECCOMP_DATA_OFFSET_ARCH: u32 = 4;
/// Offset of `seccomp_data::instruction_pointer` in bytes.
const SECCOMP_DATA_OFFSET_IP: u32 = 8;
/// Offset of `seccomp_data::args` in bytes.
#[allow(unused)]
const SECCOMP_DATA_OFFSET_ARGS: u32 = 16;
#[cfg(target_endian = "little")]
const SECCOMP_DATA_OFFSET_IP_HI: u32 = SECCOMP_DATA_OFFSET_IP + 4;
#[cfg(target_endian = "little")]
const SECCOMP_DATA_OFFSET_IP_LO: u32 = SECCOMP_DATA_OFFSET_IP;
#[cfg(target_endian = "big")]
const SECCOMP_DATA_OFFSET_IP_HI: u32 = SECCOMP_DATA_OFFSET_IP;
#[cfg(target_endian = "big")]
const SECCOMP_DATA_OFFSET_IP_LO: u32 = SECCOMP_DATA_OFFSET_IP + 4;
// These are defined in `/include/uapi/linux/elf-em.h`.
const EM_386: u32 = 3;
const EM_MIPS: u32 = 8;
const EM_PPC: u32 = 20;
const EM_PPC64: u32 = 21;
const EM_ARM: u32 = 40;
const EM_X86_64: u32 = 62;
const EM_AARCH64: u32 = 183;
// These are defined in `/include/uapi/linux/audit.h`.
const __AUDIT_ARCH_64BIT: u32 = 0x8000_0000;
const __AUDIT_ARCH_LE: u32 = 0x4000_0000;
// These are defined in `/include/uapi/linux/audit.h`.
pub const AUDIT_ARCH_X86: u32 = EM_386 | __AUDIT_ARCH_LE;
pub const AUDIT_ARCH_X86_64: u32 = EM_X86_64 | __AUDIT_ARCH_64BIT | __AUDIT_ARCH_LE;
pub const AUDIT_ARCH_ARM: u32 = EM_ARM | __AUDIT_ARCH_LE;
pub const AUDIT_ARCH_AARCH64: u32 = EM_AARCH64 | __AUDIT_ARCH_64BIT | __AUDIT_ARCH_LE;
pub const AUDIT_ARCH_MIPS: u32 = EM_MIPS;
pub const AUDIT_ARCH_PPC: u32 = EM_PPC;
pub const AUDIT_ARCH_PPC64: u32 = EM_PPC64 | __AUDIT_ARCH_64BIT;
bitflags::bitflags! {
#[derive(Default)]
struct FilterFlags: u32 {
const TSYNC = 1 << 0;
const LOG = 1 << 1;
const SPEC_ALLOW = 1 << 2;
const NEW_LISTENER = 1 << 3;
const TSYNC_ESRCH = 1 << 4;
}
}
/// Seccomp-BPF program byte code.
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Filter {
// Since the limit is 4096 instructions, we *could* use a static array here
// instead. However, that would require bounds checks each time an
// instruction is appended and complicate the interface with `Result` types
// and error handling logic. It's cleaner to just check the size when the
// program is loaded.
filter: Vec<sock_filter>,
}
impl Filter {
/// Creates a new, empty seccomp program. Note that empty BPF programs are not
/// valid and will fail to load.
pub const fn new() -> Self {
Self { filter: Vec::new() }
}
/// Appends a single instruction to the seccomp-BPF program.
pub fn push(&mut self, instruction: sock_filter) {
self.filter.push(instruction);
}
/// Returns the number of instructions in the BPF program.
pub fn len(&self) -> usize {
self.filter.len()
}
/// Returns true if the program is empty. Empty seccomp filters will result
/// in an error when loaded.
pub fn is_empty(&self) -> bool {
self.filter.is_empty()
}
fn install(&self, flags: FilterFlags) -> Result<i32, Errno> {
let len = self.filter.len();
if len == 0 || len > BPF_MAXINSNS |
let prog = libc::sock_fprog {
// Note: length is guaranteed to be less than `u16::MAX` because of
// the above check.
len: len as u16,
filter: self.filter.as_ptr() as *mut _,
};
let ptr = &prog as *const libc::sock_fprog;
let value = Errno::result(unsafe {
libc::syscall(
libc::SYS_seccomp,
SECCOMP_SET_MODE_FILTER,
flags.bits(),
ptr,
)
})?;
Ok(value as i32)
}
/// Loads the program via seccomp into the current process.
///
/// Once loaded, the seccomp filter can never be removed. Additional seccomp
/// filters can be loaded, however, and they will chain together and be
/// executed in reverse order.
///
/// NOTE: The maximum size of any single seccomp-bpf filter is 4096
/// instructions. The overall limit is 32768 instructions across all loaded
/// filters.
///
/// See [`seccomp(2)`](https://man7.org/linux/man-pages/man2/seccomp.2.html)
/// for more details.
pub fn load(&self) -> Result<(), Errno> {
self.install(FilterFlags::empty())?;
Ok(())
}
/// This is the same as [`Filter::load`] except that it returns a file
/// descriptor. This is meant to be used with
/// [`seccomp_unotify(2)`](https://man7.org/linux/man-pages/man2/seccomp_unotify.2.html).
pub fn load_and_listen(&self) -> Result<Fd, Errno> {
let fd = self.install(FilterFlags::NEW_LISTENER)?;
Ok(Fd::new(fd))
}
}
impl Extend<sock_filter> for Filter {
fn extend<T: IntoIterator<Item = sock_filter>>(&mut self, iter: T) {
self.filter.extend(iter)
}
}
/// Trait for types that can emit BPF byte code.
pub trait ByteCode {
/// Accumulates BPF instructions into the given filter.
fn into_bpf(self, filter: &mut Filter);
}
impl<F> ByteCode for F
where
F: FnOnce(&mut Filter),
{
fn into_bpf(self, filter: &mut Filter) {
self(filter)
}
}
impl ByteCode for sock_filter {
fn into_bpf(self, filter: &mut Filter) {
filter.push(self)
}
}
/// Returns a seccomp-bpf filter containing the given list of instructions.
///
/// This can be concatenated with other seccomp-BPF programs.
///
/// Note that this is not a true BPF program. Seccomp-bpf is a subset of BPF and
/// so many instructions are not available.
///
/// When executing instructions, the BPF program operates on the syscall
/// information made available as a (read-only) buffer of the following form:
///
/// ```no_compile
/// struct seccomp_data {
/// // The syscall number.
/// nr: u32,
/// // `AUDIT_ARCH_*` value (see `<linux/audit.h`).
/// arch: u32,
/// // CPU instruction pointer.
/// instruction_pointer: u64,
/// // Up to 6 syscall arguments.
/// args: [u64; 8],
/// }
/// ```
///
/// # Example
///
/// This filter will allow only the specified syscalls.
/// ```
/// let _filter = seccomp_bpf![
/// // Make sure the target process is using the x86-64 syscall ABI.
/// VALIDATE_ARCH(AUDIT_ARCH_X86_64),
/// // Load the current syscall number into `seccomp_data.nr`.
/// LOAD_SYSCALL_NR,
/// // Check if `seccomp_data.nr` matches the given syscalls. If so, then return
/// // from the seccomp filter early, allowing the syscall to continue.
/// SYSCALL(Sysno::open, ALLOW),
/// SYSCALL(Sysno::close, ALLOW),
/// SYSCALL(Sysno::write, ALLOW),
/// SYSCALL(Sysno::read, ALLOW),
/// // Deny all other syscalls by having the kernel kill the current thread with
/// // `SIGSYS`.
/// DENY,
/// ];
/// ```
#[cfg(test)]
macro_rules! seccomp_bpf {
($($inst:expr),+ $(,)?) => {
{
let mut filter = Filter::new();
$(
$inst.into_bpf(&mut filter);
)+
filter
}
};
}
// See: /include/uapi/linux/filter.h
pub const fn BPF_STMT(code: u16, k: u32) -> sock_filter {
sock_filter {
code,
jt: 0,
jf: 0,
k,
}
}
/// A BPF jump instruction.
///
/// # Arguments
///
/// * `code` is the operation code.
/// * `k` is the value operated on for comparisons.
/// * `jt` is the relative offset to jump to if the comparison is true.
/// * `jf` is the relative offset to jump to if the comparison is false.
///
/// # Example
///
/// ```no_compile
/// // Jump to the next instruction if the loaded value is equal to 42.
/// BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, 42, 1, 0);
/// ```
pub const fn BPF_JUMP(code: u16, k: u32, jt: u8, jf: u8) -> sock_filter {
sock_filter { code, jt, jf, k }
}
/// Loads the syscall number into `seccomp_data.nr`.
pub const LOAD_SYSCALL_NR: sock_filter = BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_OFFSET_NR);
/// Returns from the seccomp filter, allowing the syscall to pass through.
#[allow(unused)]
pub const ALLOW: sock_filter = BPF_STMT(BPF_RET + BPF_K, libc::SECCOMP_RET_ALLOW);
/// Returns from the seccomp filter, instructing the kernel to kill the calling
/// thread with `SIGSYS` before executing the syscall.
#[allow(unused)]
pub const DENY: sock_filter = BPF_STMT(BPF_RET + BPF_K, libc::SECCOMP_RET_KILL_THREAD);
/// Returns from the seccomp filter, causing a `SIGSYS` to be sent to the calling
/// thread skipping over the syscall without executing it. Unlike [`DENY`], this
/// signal can be caught.
#[allow(unused)]
pub const TRAP: sock_filter = BPF_STMT(BPF_RET + BPF_K, libc::SECCOMP_RET_TRAP);
/// Returns from the seccomp filter, causing `PTRACE_EVENT_SECCOMP` to be
/// generated for this syscall (if `PTRACE_O_TRACESECCOMP` is enabled). If no
/// tracer is present, the syscall will not be executed and returns a `ENOSYS`
/// instead.
///
/// `data` is made available to the tracer via `PTRACE_GETEVENTMSG`.
#[allow(unused)]
pub fn TRACE(data: u16) -> sock_filter {
BPF_STMT(
BPF_RET + BPF_K,
libc::SECCOMP_RET_TRACE | (data as u32 & libc::SECCOMP_RET_DATA),
)
}
/// Returns from the seccomp filter, returning the given error instead of
/// executing the syscall.
#[allow(unused)]
pub fn ERRNO(err: Errno) -> sock_filter {
BPF_STMT(
BPF_RET + BPF_K,
libc::SECCOMP_RET_ERRNO | (err.into_raw() as u32 & libc::SECCOMP_RET_DATA),
)
}
macro_rules! instruction {
(
$(
$(#[$attrs:meta])*
$vis:vis fn $name:ident($($args:tt)*) {
$($instruction:expr;)*
}
)*
) => {
$(
$vis fn $name($($args)*) -> impl ByteCode {
move |filter: &mut Filter| {
$(
$instruction.into_bpf(filter);
)*
}
}
)*
};
}
instruction! {
/// Checks that architecture matches our target architecture. If it does not
/// match, kills the current process. This should be the first step for every
/// seccomp filter to ensure we're working with the syscall table we're
/// expecting. Each architecture has a slightly different syscall table and
/// we need to make sure the syscall numbers we're using are the right ones
/// for the architecture.
pub fn VALIDATE_ARCH(target_arch: u32) {
// Load `seccomp_data.arch`
BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_OFFSET_ARCH);
BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, target_arch, 1, 0);
BPF_STMT(BPF_RET + BPF_K, libc::SECCOMP_RET_KILL_PROCESS);
}
pub fn LOAD_SYSCALL_IP() {
BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_OFFSET_IP_LO);
// M[0] = lo
BPF_STMT(BPF_ST, 0);
BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_OFFSET_IP_HI);
// M[1] = hi
BPF_STMT(BPF_ST, 1);
}
/// Checks if `seccomp_data.nr` matches the given syscall. If so, then jumps
/// to `action`.
///
/// # Example
/// ```no_compile
/// SYSCALL(Sysno::socket, DENY);
/// ```
pub fn SYSCALL(nr: Sysno, action: sock_filter) {
BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, nr as i32 as u32, 0, 1);
action;
}
fn IP_RANGE64(blo: u32, bhi: u32, elo: u32, ehi: u32, action: sock_filter) {
// Most of the complexity below is caused by seccomp-bpf only being able
// to operate on `u32` values. We also can't reuse `JGE64` and `JLE64`
// because the jump offsets would be incorrect.
// STEP1: if!(begin > arg) goto NOMATCH;
// if (begin_hi > arg.hi) goto Step2; */
BPF_JUMP(BPF_JMP + BPF_JGT + BPF_K, bhi, 4 /* goto STEP2 */, 0);
// if (begin_hi!= arg.hi) goto NOMATCH;
BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, bhi, 0, 9 /* goto NOMATCH */);
// Load M[0] to operate on the low bits of the IP.
BPF_STMT(BPF_LD + BPF_MEM, 0);
// if (begin_lo >= arg.lo) goto MATCH;
BPF_JUMP(BPF_JMP + BPF_JGE + BPF_K, blo, 0, 7 /* goto NOMATCH */);
// Load M[1] because the next instruction expects the high bits of the
// IP.
BPF_STMT(BPF_LD + BPF_MEM, 1);
// STEP2: if!(arg > end) goto NOMATCH;
// if (end_hi < arg.hi) goto MATCH;
BPF_JUMP(BPF_JMP + BPF_JGT + BPF_K, ehi, 0, 4 /* goto MATCH */);
// if (end_hi!= arg.hi) goto NOMATCH;
BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, ehi, 0, 5 /* goto NOMATCH */);
BPF_STMT(BPF_LD + BPF_MEM, 0);
// if (end_lo < arg.lo) goto MATCH;
BPF_JUMP(BPF_JMP + BPF_JGE + BPF_K, elo, 2 /* goto NOMATCH */, 0);
BPF_STMT(BPF_LD + BPF_MEM, 1);
// MATCH: Take the action.
action;
// NOMATCH: Load M[1] again after we loaded M[0].
BPF_STMT(BPF_LD + BPF_MEM, 1);
}
}
/// Checks if the instruction pointer is between a certain range. If so, executes
/// `action`. Otherwise, fall through.
///
/// Note that if `ip == end`, this will not match. That is, the interval closed
/// at the end.
///
/// Precondition: The instruction pointer must be loaded with [`LOAD_SYSCALL_IP`]
/// first.
pub fn IP_RANGE(begin: u64, end: u64, action: sock_filter) -> impl ByteCode {
let begin_lo = begin as u32;
let begin_hi = (begin >> 32) as u32;
let end_lo = end as u32;
let end_hi = (end >> 32) as u32;
IP_RANGE64(begin_lo, begin_hi, end_lo, end_hi, action)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn smoke() {
let filter = seccomp_bpf![
VALIDATE_ARCH(AUDIT_ARCH_X86_64),
LOAD_SYSCALL_NR,
SYSCALL(Sysno::openat, DENY),
SYSCALL(Sysno::close, DENY),
SYSCALL(Sysno::write, DENY),
SYSCALL(Sysno::read, DENY),
ALLOW,
];
assert_eq!(filter.len(), 13);
}
}
| {
return Err(Errno::EINVAL);
} | conditional_block |
bpf.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#![allow(non_snake_case)]
pub use libc::sock_filter;
use syscalls::Errno;
use syscalls::Sysno;
use crate::fd::Fd;
// See: /include/uapi/linux/bpf_common.h
// Instruction classes
pub const BPF_LD: u16 = 0x00;
pub const BPF_ST: u16 = 0x02;
pub const BPF_JMP: u16 = 0x05;
pub const BPF_RET: u16 = 0x06;
// ld/ldx fields
pub const BPF_W: u16 = 0x00;
pub const BPF_ABS: u16 = 0x20;
pub const BPF_MEM: u16 = 0x60;
pub const BPF_JEQ: u16 = 0x10;
pub const BPF_JGT: u16 = 0x20;
pub const BPF_JGE: u16 = 0x30;
pub const BPF_K: u16 = 0x00;
/// Maximum number of instructions.
pub const BPF_MAXINSNS: usize = 4096;
/// Defined in `/include/uapi/linux/seccomp.h`.
const SECCOMP_SET_MODE_FILTER: u32 = 1;
/// Offset of `seccomp_data::nr` in bytes.
const SECCOMP_DATA_OFFSET_NR: u32 = 0;
/// Offset of `seccomp_data::arch` in bytes.
const SECCOMP_DATA_OFFSET_ARCH: u32 = 4;
/// Offset of `seccomp_data::instruction_pointer` in bytes.
const SECCOMP_DATA_OFFSET_IP: u32 = 8;
/// Offset of `seccomp_data::args` in bytes.
#[allow(unused)]
const SECCOMP_DATA_OFFSET_ARGS: u32 = 16;
#[cfg(target_endian = "little")]
const SECCOMP_DATA_OFFSET_IP_HI: u32 = SECCOMP_DATA_OFFSET_IP + 4;
#[cfg(target_endian = "little")]
const SECCOMP_DATA_OFFSET_IP_LO: u32 = SECCOMP_DATA_OFFSET_IP;
#[cfg(target_endian = "big")]
const SECCOMP_DATA_OFFSET_IP_HI: u32 = SECCOMP_DATA_OFFSET_IP;
#[cfg(target_endian = "big")]
const SECCOMP_DATA_OFFSET_IP_LO: u32 = SECCOMP_DATA_OFFSET_IP + 4;
// These are defined in `/include/uapi/linux/elf-em.h`.
const EM_386: u32 = 3;
const EM_MIPS: u32 = 8;
const EM_PPC: u32 = 20;
const EM_PPC64: u32 = 21;
const EM_ARM: u32 = 40;
const EM_X86_64: u32 = 62;
const EM_AARCH64: u32 = 183;
// These are defined in `/include/uapi/linux/audit.h`.
const __AUDIT_ARCH_64BIT: u32 = 0x8000_0000;
const __AUDIT_ARCH_LE: u32 = 0x4000_0000;
// These are defined in `/include/uapi/linux/audit.h`.
pub const AUDIT_ARCH_X86: u32 = EM_386 | __AUDIT_ARCH_LE;
pub const AUDIT_ARCH_X86_64: u32 = EM_X86_64 | __AUDIT_ARCH_64BIT | __AUDIT_ARCH_LE;
pub const AUDIT_ARCH_ARM: u32 = EM_ARM | __AUDIT_ARCH_LE;
pub const AUDIT_ARCH_AARCH64: u32 = EM_AARCH64 | __AUDIT_ARCH_64BIT | __AUDIT_ARCH_LE;
pub const AUDIT_ARCH_MIPS: u32 = EM_MIPS;
pub const AUDIT_ARCH_PPC: u32 = EM_PPC;
pub const AUDIT_ARCH_PPC64: u32 = EM_PPC64 | __AUDIT_ARCH_64BIT;
bitflags::bitflags! {
#[derive(Default)]
struct FilterFlags: u32 {
const TSYNC = 1 << 0;
const LOG = 1 << 1;
const SPEC_ALLOW = 1 << 2;
const NEW_LISTENER = 1 << 3;
const TSYNC_ESRCH = 1 << 4;
}
}
/// Seccomp-BPF program byte code.
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Filter {
// Since the limit is 4096 instructions, we *could* use a static array here
// instead. However, that would require bounds checks each time an
// instruction is appended and complicate the interface with `Result` types
// and error handling logic. It's cleaner to just check the size when the
// program is loaded.
filter: Vec<sock_filter>,
}
impl Filter {
/// Creates a new, empty seccomp program. Note that empty BPF programs are not
/// valid and will fail to load.
pub const fn new() -> Self {
Self { filter: Vec::new() }
}
/// Appends a single instruction to the seccomp-BPF program.
pub fn push(&mut self, instruction: sock_filter) {
self.filter.push(instruction);
}
/// Returns the number of instructions in the BPF program.
pub fn len(&self) -> usize {
self.filter.len()
}
/// Returns true if the program is empty. Empty seccomp filters will result
/// in an error when loaded.
pub fn | (&self) -> bool {
self.filter.is_empty()
}
fn install(&self, flags: FilterFlags) -> Result<i32, Errno> {
let len = self.filter.len();
if len == 0 || len > BPF_MAXINSNS {
return Err(Errno::EINVAL);
}
let prog = libc::sock_fprog {
// Note: length is guaranteed to be less than `u16::MAX` because of
// the above check.
len: len as u16,
filter: self.filter.as_ptr() as *mut _,
};
let ptr = &prog as *const libc::sock_fprog;
let value = Errno::result(unsafe {
libc::syscall(
libc::SYS_seccomp,
SECCOMP_SET_MODE_FILTER,
flags.bits(),
ptr,
)
})?;
Ok(value as i32)
}
/// Loads the program via seccomp into the current process.
///
/// Once loaded, the seccomp filter can never be removed. Additional seccomp
/// filters can be loaded, however, and they will chain together and be
/// executed in reverse order.
///
/// NOTE: The maximum size of any single seccomp-bpf filter is 4096
/// instructions. The overall limit is 32768 instructions across all loaded
/// filters.
///
/// See [`seccomp(2)`](https://man7.org/linux/man-pages/man2/seccomp.2.html)
/// for more details.
pub fn load(&self) -> Result<(), Errno> {
self.install(FilterFlags::empty())?;
Ok(())
}
/// This is the same as [`Filter::load`] except that it returns a file
/// descriptor. This is meant to be used with
/// [`seccomp_unotify(2)`](https://man7.org/linux/man-pages/man2/seccomp_unotify.2.html).
pub fn load_and_listen(&self) -> Result<Fd, Errno> {
let fd = self.install(FilterFlags::NEW_LISTENER)?;
Ok(Fd::new(fd))
}
}
impl Extend<sock_filter> for Filter {
fn extend<T: IntoIterator<Item = sock_filter>>(&mut self, iter: T) {
self.filter.extend(iter)
}
}
/// Trait for types that can emit BPF byte code.
pub trait ByteCode {
/// Accumulates BPF instructions into the given filter.
fn into_bpf(self, filter: &mut Filter);
}
impl<F> ByteCode for F
where
F: FnOnce(&mut Filter),
{
fn into_bpf(self, filter: &mut Filter) {
self(filter)
}
}
impl ByteCode for sock_filter {
fn into_bpf(self, filter: &mut Filter) {
filter.push(self)
}
}
/// Returns a seccomp-bpf filter containing the given list of instructions.
///
/// This can be concatenated with other seccomp-BPF programs.
///
/// Note that this is not a true BPF program. Seccomp-bpf is a subset of BPF and
/// so many instructions are not available.
///
/// When executing instructions, the BPF program operates on the syscall
/// information made available as a (read-only) buffer of the following form:
///
/// ```no_compile
/// struct seccomp_data {
/// // The syscall number.
/// nr: u32,
/// // `AUDIT_ARCH_*` value (see `<linux/audit.h`).
/// arch: u32,
/// // CPU instruction pointer.
/// instruction_pointer: u64,
/// // Up to 6 syscall arguments.
/// args: [u64; 8],
/// }
/// ```
///
/// # Example
///
/// This filter will allow only the specified syscalls.
/// ```
/// let _filter = seccomp_bpf![
/// // Make sure the target process is using the x86-64 syscall ABI.
/// VALIDATE_ARCH(AUDIT_ARCH_X86_64),
/// // Load the current syscall number into `seccomp_data.nr`.
/// LOAD_SYSCALL_NR,
/// // Check if `seccomp_data.nr` matches the given syscalls. If so, then return
/// // from the seccomp filter early, allowing the syscall to continue.
/// SYSCALL(Sysno::open, ALLOW),
/// SYSCALL(Sysno::close, ALLOW),
/// SYSCALL(Sysno::write, ALLOW),
/// SYSCALL(Sysno::read, ALLOW),
/// // Deny all other syscalls by having the kernel kill the current thread with
/// // `SIGSYS`.
/// DENY,
/// ];
/// ```
#[cfg(test)]
macro_rules! seccomp_bpf {
($($inst:expr),+ $(,)?) => {
{
let mut filter = Filter::new();
$(
$inst.into_bpf(&mut filter);
)+
filter
}
};
}
// See: /include/uapi/linux/filter.h
pub const fn BPF_STMT(code: u16, k: u32) -> sock_filter {
sock_filter {
code,
jt: 0,
jf: 0,
k,
}
}
/// A BPF jump instruction.
///
/// # Arguments
///
/// * `code` is the operation code.
/// * `k` is the value operated on for comparisons.
/// * `jt` is the relative offset to jump to if the comparison is true.
/// * `jf` is the relative offset to jump to if the comparison is false.
///
/// # Example
///
/// ```no_compile
/// // Jump to the next instruction if the loaded value is equal to 42.
/// BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, 42, 1, 0);
/// ```
pub const fn BPF_JUMP(code: u16, k: u32, jt: u8, jf: u8) -> sock_filter {
sock_filter { code, jt, jf, k }
}
/// Loads the syscall number into `seccomp_data.nr`.
pub const LOAD_SYSCALL_NR: sock_filter = BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_OFFSET_NR);
/// Returns from the seccomp filter, allowing the syscall to pass through.
#[allow(unused)]
pub const ALLOW: sock_filter = BPF_STMT(BPF_RET + BPF_K, libc::SECCOMP_RET_ALLOW);
/// Returns from the seccomp filter, instructing the kernel to kill the calling
/// thread with `SIGSYS` before executing the syscall.
#[allow(unused)]
pub const DENY: sock_filter = BPF_STMT(BPF_RET + BPF_K, libc::SECCOMP_RET_KILL_THREAD);
/// Returns from the seccomp filter, causing a `SIGSYS` to be sent to the calling
/// thread skipping over the syscall without executing it. Unlike [`DENY`], this
/// signal can be caught.
#[allow(unused)]
pub const TRAP: sock_filter = BPF_STMT(BPF_RET + BPF_K, libc::SECCOMP_RET_TRAP);
/// Returns from the seccomp filter, causing `PTRACE_EVENT_SECCOMP` to be
/// generated for this syscall (if `PTRACE_O_TRACESECCOMP` is enabled). If no
/// tracer is present, the syscall will not be executed and returns a `ENOSYS`
/// instead.
///
/// `data` is made available to the tracer via `PTRACE_GETEVENTMSG`.
#[allow(unused)]
pub fn TRACE(data: u16) -> sock_filter {
BPF_STMT(
BPF_RET + BPF_K,
libc::SECCOMP_RET_TRACE | (data as u32 & libc::SECCOMP_RET_DATA),
)
}
/// Returns from the seccomp filter, returning the given error instead of
/// executing the syscall.
#[allow(unused)]
pub fn ERRNO(err: Errno) -> sock_filter {
BPF_STMT(
BPF_RET + BPF_K,
libc::SECCOMP_RET_ERRNO | (err.into_raw() as u32 & libc::SECCOMP_RET_DATA),
)
}
macro_rules! instruction {
(
$(
$(#[$attrs:meta])*
$vis:vis fn $name:ident($($args:tt)*) {
$($instruction:expr;)*
}
)*
) => {
$(
$vis fn $name($($args)*) -> impl ByteCode {
move |filter: &mut Filter| {
$(
$instruction.into_bpf(filter);
)*
}
}
)*
};
}
instruction! {
/// Checks that architecture matches our target architecture. If it does not
/// match, kills the current process. This should be the first step for every
/// seccomp filter to ensure we're working with the syscall table we're
/// expecting. Each architecture has a slightly different syscall table and
/// we need to make sure the syscall numbers we're using are the right ones
/// for the architecture.
pub fn VALIDATE_ARCH(target_arch: u32) {
// Load `seccomp_data.arch`
BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_OFFSET_ARCH);
BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, target_arch, 1, 0);
BPF_STMT(BPF_RET + BPF_K, libc::SECCOMP_RET_KILL_PROCESS);
}
pub fn LOAD_SYSCALL_IP() {
BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_OFFSET_IP_LO);
// M[0] = lo
BPF_STMT(BPF_ST, 0);
BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_OFFSET_IP_HI);
// M[1] = hi
BPF_STMT(BPF_ST, 1);
}
/// Checks if `seccomp_data.nr` matches the given syscall. If so, then jumps
/// to `action`.
///
/// # Example
/// ```no_compile
/// SYSCALL(Sysno::socket, DENY);
/// ```
pub fn SYSCALL(nr: Sysno, action: sock_filter) {
BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, nr as i32 as u32, 0, 1);
action;
}
fn IP_RANGE64(blo: u32, bhi: u32, elo: u32, ehi: u32, action: sock_filter) {
// Most of the complexity below is caused by seccomp-bpf only being able
// to operate on `u32` values. We also can't reuse `JGE64` and `JLE64`
// because the jump offsets would be incorrect.
// STEP1: if!(begin > arg) goto NOMATCH;
// if (begin_hi > arg.hi) goto Step2; */
BPF_JUMP(BPF_JMP + BPF_JGT + BPF_K, bhi, 4 /* goto STEP2 */, 0);
// if (begin_hi!= arg.hi) goto NOMATCH;
BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, bhi, 0, 9 /* goto NOMATCH */);
// Load M[0] to operate on the low bits of the IP.
BPF_STMT(BPF_LD + BPF_MEM, 0);
// if (begin_lo >= arg.lo) goto MATCH;
BPF_JUMP(BPF_JMP + BPF_JGE + BPF_K, blo, 0, 7 /* goto NOMATCH */);
// Load M[1] because the next instruction expects the high bits of the
// IP.
BPF_STMT(BPF_LD + BPF_MEM, 1);
// STEP2: if!(arg > end) goto NOMATCH;
// if (end_hi < arg.hi) goto MATCH;
BPF_JUMP(BPF_JMP + BPF_JGT + BPF_K, ehi, 0, 4 /* goto MATCH */);
// if (end_hi!= arg.hi) goto NOMATCH;
BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, ehi, 0, 5 /* goto NOMATCH */);
BPF_STMT(BPF_LD + BPF_MEM, 0);
// if (end_lo < arg.lo) goto MATCH;
BPF_JUMP(BPF_JMP + BPF_JGE + BPF_K, elo, 2 /* goto NOMATCH */, 0);
BPF_STMT(BPF_LD + BPF_MEM, 1);
// MATCH: Take the action.
action;
// NOMATCH: Load M[1] again after we loaded M[0].
BPF_STMT(BPF_LD + BPF_MEM, 1);
}
}
/// Checks if the instruction pointer is between a certain range. If so, executes
/// `action`. Otherwise, fall through.
///
/// Note that if `ip == end`, this will not match. That is, the interval closed
/// at the end.
///
/// Precondition: The instruction pointer must be loaded with [`LOAD_SYSCALL_IP`]
/// first.
pub fn IP_RANGE(begin: u64, end: u64, action: sock_filter) -> impl ByteCode {
let begin_lo = begin as u32;
let begin_hi = (begin >> 32) as u32;
let end_lo = end as u32;
let end_hi = (end >> 32) as u32;
IP_RANGE64(begin_lo, begin_hi, end_lo, end_hi, action)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn smoke() {
let filter = seccomp_bpf![
VALIDATE_ARCH(AUDIT_ARCH_X86_64),
LOAD_SYSCALL_NR,
SYSCALL(Sysno::openat, DENY),
SYSCALL(Sysno::close, DENY),
SYSCALL(Sysno::write, DENY),
SYSCALL(Sysno::read, DENY),
ALLOW,
];
assert_eq!(filter.len(), 13);
}
}
| is_empty | identifier_name |
bpf.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#![allow(non_snake_case)]
pub use libc::sock_filter;
use syscalls::Errno;
use syscalls::Sysno;
use crate::fd::Fd;
// See: /include/uapi/linux/bpf_common.h
// Instruction classes
pub const BPF_LD: u16 = 0x00;
pub const BPF_ST: u16 = 0x02;
pub const BPF_JMP: u16 = 0x05;
pub const BPF_RET: u16 = 0x06;
// ld/ldx fields
pub const BPF_W: u16 = 0x00;
pub const BPF_ABS: u16 = 0x20;
pub const BPF_MEM: u16 = 0x60;
pub const BPF_JEQ: u16 = 0x10;
pub const BPF_JGT: u16 = 0x20;
pub const BPF_JGE: u16 = 0x30;
pub const BPF_K: u16 = 0x00;
/// Maximum number of instructions.
pub const BPF_MAXINSNS: usize = 4096;
/// Defined in `/include/uapi/linux/seccomp.h`.
const SECCOMP_SET_MODE_FILTER: u32 = 1;
/// Offset of `seccomp_data::nr` in bytes.
const SECCOMP_DATA_OFFSET_NR: u32 = 0;
/// Offset of `seccomp_data::arch` in bytes.
const SECCOMP_DATA_OFFSET_ARCH: u32 = 4;
/// Offset of `seccomp_data::instruction_pointer` in bytes.
const SECCOMP_DATA_OFFSET_IP: u32 = 8;
/// Offset of `seccomp_data::args` in bytes.
#[allow(unused)]
const SECCOMP_DATA_OFFSET_ARGS: u32 = 16;
#[cfg(target_endian = "little")]
const SECCOMP_DATA_OFFSET_IP_HI: u32 = SECCOMP_DATA_OFFSET_IP + 4;
#[cfg(target_endian = "little")]
const SECCOMP_DATA_OFFSET_IP_LO: u32 = SECCOMP_DATA_OFFSET_IP;
#[cfg(target_endian = "big")]
const SECCOMP_DATA_OFFSET_IP_HI: u32 = SECCOMP_DATA_OFFSET_IP;
#[cfg(target_endian = "big")]
const SECCOMP_DATA_OFFSET_IP_LO: u32 = SECCOMP_DATA_OFFSET_IP + 4;
// These are defined in `/include/uapi/linux/elf-em.h`.
const EM_386: u32 = 3;
const EM_MIPS: u32 = 8;
const EM_PPC: u32 = 20;
const EM_PPC64: u32 = 21;
const EM_ARM: u32 = 40;
const EM_X86_64: u32 = 62;
const EM_AARCH64: u32 = 183;
// These are defined in `/include/uapi/linux/audit.h`.
const __AUDIT_ARCH_64BIT: u32 = 0x8000_0000;
const __AUDIT_ARCH_LE: u32 = 0x4000_0000;
// These are defined in `/include/uapi/linux/audit.h`.
pub const AUDIT_ARCH_X86: u32 = EM_386 | __AUDIT_ARCH_LE;
pub const AUDIT_ARCH_X86_64: u32 = EM_X86_64 | __AUDIT_ARCH_64BIT | __AUDIT_ARCH_LE;
pub const AUDIT_ARCH_ARM: u32 = EM_ARM | __AUDIT_ARCH_LE;
pub const AUDIT_ARCH_AARCH64: u32 = EM_AARCH64 | __AUDIT_ARCH_64BIT | __AUDIT_ARCH_LE;
pub const AUDIT_ARCH_MIPS: u32 = EM_MIPS;
pub const AUDIT_ARCH_PPC: u32 = EM_PPC;
pub const AUDIT_ARCH_PPC64: u32 = EM_PPC64 | __AUDIT_ARCH_64BIT;
bitflags::bitflags! {
#[derive(Default)]
struct FilterFlags: u32 {
const TSYNC = 1 << 0;
const LOG = 1 << 1;
const SPEC_ALLOW = 1 << 2;
const NEW_LISTENER = 1 << 3;
const TSYNC_ESRCH = 1 << 4;
}
}
/// Seccomp-BPF program byte code.
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Filter {
// Since the limit is 4096 instructions, we *could* use a static array here
// instead. However, that would require bounds checks each time an
// instruction is appended and complicate the interface with `Result` types
// and error handling logic. It's cleaner to just check the size when the
// program is loaded.
filter: Vec<sock_filter>,
}
impl Filter {
/// Creates a new, empty seccomp program. Note that empty BPF programs are not
/// valid and will fail to load.
pub const fn new() -> Self {
Self { filter: Vec::new() }
}
/// Appends a single instruction to the seccomp-BPF program.
pub fn push(&mut self, instruction: sock_filter) {
self.filter.push(instruction);
}
/// Returns the number of instructions in the BPF program.
pub fn len(&self) -> usize {
self.filter.len()
}
/// Returns true if the program is empty. Empty seccomp filters will result
/// in an error when loaded.
pub fn is_empty(&self) -> bool {
self.filter.is_empty()
}
fn install(&self, flags: FilterFlags) -> Result<i32, Errno> {
let len = self.filter.len();
if len == 0 || len > BPF_MAXINSNS {
return Err(Errno::EINVAL);
}
let prog = libc::sock_fprog {
// Note: length is guaranteed to be less than `u16::MAX` because of
// the above check.
len: len as u16,
filter: self.filter.as_ptr() as *mut _,
};
let ptr = &prog as *const libc::sock_fprog;
let value = Errno::result(unsafe {
libc::syscall(
libc::SYS_seccomp,
SECCOMP_SET_MODE_FILTER,
flags.bits(),
ptr,
)
})?;
Ok(value as i32)
}
/// Loads the program via seccomp into the current process.
///
/// Once loaded, the seccomp filter can never be removed. Additional seccomp
/// filters can be loaded, however, and they will chain together and be
/// executed in reverse order.
///
/// NOTE: The maximum size of any single seccomp-bpf filter is 4096
/// instructions. The overall limit is 32768 instructions across all loaded
/// filters.
///
/// See [`seccomp(2)`](https://man7.org/linux/man-pages/man2/seccomp.2.html)
/// for more details.
pub fn load(&self) -> Result<(), Errno> {
self.install(FilterFlags::empty())?;
Ok(())
}
/// This is the same as [`Filter::load`] except that it returns a file
/// descriptor. This is meant to be used with
/// [`seccomp_unotify(2)`](https://man7.org/linux/man-pages/man2/seccomp_unotify.2.html).
pub fn load_and_listen(&self) -> Result<Fd, Errno> {
let fd = self.install(FilterFlags::NEW_LISTENER)?;
Ok(Fd::new(fd))
}
}
impl Extend<sock_filter> for Filter {
fn extend<T: IntoIterator<Item = sock_filter>>(&mut self, iter: T) {
self.filter.extend(iter)
}
}
/// Trait for types that can emit BPF byte code.
pub trait ByteCode {
/// Accumulates BPF instructions into the given filter.
fn into_bpf(self, filter: &mut Filter);
}
impl<F> ByteCode for F
where
F: FnOnce(&mut Filter),
{
fn into_bpf(self, filter: &mut Filter) {
self(filter)
}
}
impl ByteCode for sock_filter {
fn into_bpf(self, filter: &mut Filter) |
}
/// Returns a seccomp-bpf filter containing the given list of instructions.
///
/// This can be concatenated with other seccomp-BPF programs.
///
/// Note that this is not a true BPF program. Seccomp-bpf is a subset of BPF and
/// so many instructions are not available.
///
/// When executing instructions, the BPF program operates on the syscall
/// information made available as a (read-only) buffer of the following form:
///
/// ```no_compile
/// struct seccomp_data {
/// // The syscall number.
/// nr: u32,
/// // `AUDIT_ARCH_*` value (see `<linux/audit.h`).
/// arch: u32,
/// // CPU instruction pointer.
/// instruction_pointer: u64,
/// // Up to 6 syscall arguments.
/// args: [u64; 8],
/// }
/// ```
///
/// # Example
///
/// This filter will allow only the specified syscalls.
/// ```
/// let _filter = seccomp_bpf![
/// // Make sure the target process is using the x86-64 syscall ABI.
/// VALIDATE_ARCH(AUDIT_ARCH_X86_64),
/// // Load the current syscall number into `seccomp_data.nr`.
/// LOAD_SYSCALL_NR,
/// // Check if `seccomp_data.nr` matches the given syscalls. If so, then return
/// // from the seccomp filter early, allowing the syscall to continue.
/// SYSCALL(Sysno::open, ALLOW),
/// SYSCALL(Sysno::close, ALLOW),
/// SYSCALL(Sysno::write, ALLOW),
/// SYSCALL(Sysno::read, ALLOW),
/// // Deny all other syscalls by having the kernel kill the current thread with
/// // `SIGSYS`.
/// DENY,
/// ];
/// ```
#[cfg(test)]
macro_rules! seccomp_bpf {
($($inst:expr),+ $(,)?) => {
{
let mut filter = Filter::new();
$(
$inst.into_bpf(&mut filter);
)+
filter
}
};
}
// See: /include/uapi/linux/filter.h
pub const fn BPF_STMT(code: u16, k: u32) -> sock_filter {
sock_filter {
code,
jt: 0,
jf: 0,
k,
}
}
/// A BPF jump instruction.
///
/// # Arguments
///
/// * `code` is the operation code.
/// * `k` is the value operated on for comparisons.
/// * `jt` is the relative offset to jump to if the comparison is true.
/// * `jf` is the relative offset to jump to if the comparison is false.
///
/// # Example
///
/// ```no_compile
/// // Jump to the next instruction if the loaded value is equal to 42.
/// BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, 42, 1, 0);
/// ```
pub const fn BPF_JUMP(code: u16, k: u32, jt: u8, jf: u8) -> sock_filter {
sock_filter { code, jt, jf, k }
}
/// Loads the syscall number into `seccomp_data.nr`.
pub const LOAD_SYSCALL_NR: sock_filter = BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_OFFSET_NR);
/// Returns from the seccomp filter, allowing the syscall to pass through.
#[allow(unused)]
pub const ALLOW: sock_filter = BPF_STMT(BPF_RET + BPF_K, libc::SECCOMP_RET_ALLOW);
/// Returns from the seccomp filter, instructing the kernel to kill the calling
/// thread with `SIGSYS` before executing the syscall.
#[allow(unused)]
pub const DENY: sock_filter = BPF_STMT(BPF_RET + BPF_K, libc::SECCOMP_RET_KILL_THREAD);
/// Returns from the seccomp filter, causing a `SIGSYS` to be sent to the calling
/// thread skipping over the syscall without executing it. Unlike [`DENY`], this
/// signal can be caught.
#[allow(unused)]
pub const TRAP: sock_filter = BPF_STMT(BPF_RET + BPF_K, libc::SECCOMP_RET_TRAP);
/// Returns from the seccomp filter, causing `PTRACE_EVENT_SECCOMP` to be
/// generated for this syscall (if `PTRACE_O_TRACESECCOMP` is enabled). If no
/// tracer is present, the syscall will not be executed and returns a `ENOSYS`
/// instead.
///
/// `data` is made available to the tracer via `PTRACE_GETEVENTMSG`.
#[allow(unused)]
pub fn TRACE(data: u16) -> sock_filter {
BPF_STMT(
BPF_RET + BPF_K,
libc::SECCOMP_RET_TRACE | (data as u32 & libc::SECCOMP_RET_DATA),
)
}
/// Returns from the seccomp filter, returning the given error instead of
/// executing the syscall.
#[allow(unused)]
pub fn ERRNO(err: Errno) -> sock_filter {
BPF_STMT(
BPF_RET + BPF_K,
libc::SECCOMP_RET_ERRNO | (err.into_raw() as u32 & libc::SECCOMP_RET_DATA),
)
}
macro_rules! instruction {
(
$(
$(#[$attrs:meta])*
$vis:vis fn $name:ident($($args:tt)*) {
$($instruction:expr;)*
}
)*
) => {
$(
$vis fn $name($($args)*) -> impl ByteCode {
move |filter: &mut Filter| {
$(
$instruction.into_bpf(filter);
)*
}
}
)*
};
}
instruction! {
/// Checks that architecture matches our target architecture. If it does not
/// match, kills the current process. This should be the first step for every
/// seccomp filter to ensure we're working with the syscall table we're
/// expecting. Each architecture has a slightly different syscall table and
/// we need to make sure the syscall numbers we're using are the right ones
/// for the architecture.
pub fn VALIDATE_ARCH(target_arch: u32) {
// Load `seccomp_data.arch`
BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_OFFSET_ARCH);
BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, target_arch, 1, 0);
BPF_STMT(BPF_RET + BPF_K, libc::SECCOMP_RET_KILL_PROCESS);
}
pub fn LOAD_SYSCALL_IP() {
BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_OFFSET_IP_LO);
// M[0] = lo
BPF_STMT(BPF_ST, 0);
BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_OFFSET_IP_HI);
// M[1] = hi
BPF_STMT(BPF_ST, 1);
}
/// Checks if `seccomp_data.nr` matches the given syscall. If so, then jumps
/// to `action`.
///
/// # Example
/// ```no_compile
/// SYSCALL(Sysno::socket, DENY);
/// ```
pub fn SYSCALL(nr: Sysno, action: sock_filter) {
BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, nr as i32 as u32, 0, 1);
action;
}
fn IP_RANGE64(blo: u32, bhi: u32, elo: u32, ehi: u32, action: sock_filter) {
// Most of the complexity below is caused by seccomp-bpf only being able
// to operate on `u32` values. We also can't reuse `JGE64` and `JLE64`
// because the jump offsets would be incorrect.
// STEP1: if!(begin > arg) goto NOMATCH;
// if (begin_hi > arg.hi) goto Step2; */
BPF_JUMP(BPF_JMP + BPF_JGT + BPF_K, bhi, 4 /* goto STEP2 */, 0);
// if (begin_hi!= arg.hi) goto NOMATCH;
BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, bhi, 0, 9 /* goto NOMATCH */);
// Load M[0] to operate on the low bits of the IP.
BPF_STMT(BPF_LD + BPF_MEM, 0);
// if (begin_lo >= arg.lo) goto MATCH;
BPF_JUMP(BPF_JMP + BPF_JGE + BPF_K, blo, 0, 7 /* goto NOMATCH */);
// Load M[1] because the next instruction expects the high bits of the
// IP.
BPF_STMT(BPF_LD + BPF_MEM, 1);
// STEP2: if!(arg > end) goto NOMATCH;
// if (end_hi < arg.hi) goto MATCH;
BPF_JUMP(BPF_JMP + BPF_JGT + BPF_K, ehi, 0, 4 /* goto MATCH */);
// if (end_hi!= arg.hi) goto NOMATCH;
BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, ehi, 0, 5 /* goto NOMATCH */);
BPF_STMT(BPF_LD + BPF_MEM, 0);
// if (end_lo < arg.lo) goto MATCH;
BPF_JUMP(BPF_JMP + BPF_JGE + BPF_K, elo, 2 /* goto NOMATCH */, 0);
BPF_STMT(BPF_LD + BPF_MEM, 1);
// MATCH: Take the action.
action;
// NOMATCH: Load M[1] again after we loaded M[0].
BPF_STMT(BPF_LD + BPF_MEM, 1);
}
}
/// Checks if the instruction pointer is between a certain range. If so, executes
/// `action`. Otherwise, fall through.
///
/// Note that if `ip == end`, this will not match. That is, the interval closed
/// at the end.
///
/// Precondition: The instruction pointer must be loaded with [`LOAD_SYSCALL_IP`]
/// first.
pub fn IP_RANGE(begin: u64, end: u64, action: sock_filter) -> impl ByteCode {
let begin_lo = begin as u32;
let begin_hi = (begin >> 32) as u32;
let end_lo = end as u32;
let end_hi = (end >> 32) as u32;
IP_RANGE64(begin_lo, begin_hi, end_lo, end_hi, action)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn smoke() {
let filter = seccomp_bpf![
VALIDATE_ARCH(AUDIT_ARCH_X86_64),
LOAD_SYSCALL_NR,
SYSCALL(Sysno::openat, DENY),
SYSCALL(Sysno::close, DENY),
SYSCALL(Sysno::write, DENY),
SYSCALL(Sysno::read, DENY),
ALLOW,
];
assert_eq!(filter.len(), 13);
}
}
| {
filter.push(self)
} | identifier_body |
bpf.rs | /*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#![allow(non_snake_case)]
pub use libc::sock_filter;
use syscalls::Errno;
use syscalls::Sysno;
use crate::fd::Fd;
// See: /include/uapi/linux/bpf_common.h
// Instruction classes
pub const BPF_LD: u16 = 0x00;
pub const BPF_ST: u16 = 0x02;
pub const BPF_JMP: u16 = 0x05;
pub const BPF_RET: u16 = 0x06;
// ld/ldx fields
pub const BPF_W: u16 = 0x00;
pub const BPF_ABS: u16 = 0x20;
pub const BPF_MEM: u16 = 0x60;
pub const BPF_JEQ: u16 = 0x10;
pub const BPF_JGT: u16 = 0x20;
pub const BPF_JGE: u16 = 0x30;
pub const BPF_K: u16 = 0x00;
/// Maximum number of instructions.
pub const BPF_MAXINSNS: usize = 4096;
/// Defined in `/include/uapi/linux/seccomp.h`.
const SECCOMP_SET_MODE_FILTER: u32 = 1;
/// Offset of `seccomp_data::nr` in bytes.
const SECCOMP_DATA_OFFSET_NR: u32 = 0;
/// Offset of `seccomp_data::arch` in bytes.
const SECCOMP_DATA_OFFSET_ARCH: u32 = 4;
/// Offset of `seccomp_data::instruction_pointer` in bytes.
const SECCOMP_DATA_OFFSET_IP: u32 = 8;
/// Offset of `seccomp_data::args` in bytes.
#[allow(unused)]
const SECCOMP_DATA_OFFSET_ARGS: u32 = 16;
#[cfg(target_endian = "little")]
const SECCOMP_DATA_OFFSET_IP_HI: u32 = SECCOMP_DATA_OFFSET_IP + 4;
#[cfg(target_endian = "little")]
const SECCOMP_DATA_OFFSET_IP_LO: u32 = SECCOMP_DATA_OFFSET_IP;
#[cfg(target_endian = "big")]
const SECCOMP_DATA_OFFSET_IP_HI: u32 = SECCOMP_DATA_OFFSET_IP;
#[cfg(target_endian = "big")]
const SECCOMP_DATA_OFFSET_IP_LO: u32 = SECCOMP_DATA_OFFSET_IP + 4;
// These are defined in `/include/uapi/linux/elf-em.h`.
const EM_386: u32 = 3;
const EM_MIPS: u32 = 8;
const EM_PPC: u32 = 20;
const EM_PPC64: u32 = 21;
const EM_ARM: u32 = 40;
const EM_X86_64: u32 = 62;
const EM_AARCH64: u32 = 183;
// These are defined in `/include/uapi/linux/audit.h`. | pub const AUDIT_ARCH_X86_64: u32 = EM_X86_64 | __AUDIT_ARCH_64BIT | __AUDIT_ARCH_LE;
pub const AUDIT_ARCH_ARM: u32 = EM_ARM | __AUDIT_ARCH_LE;
pub const AUDIT_ARCH_AARCH64: u32 = EM_AARCH64 | __AUDIT_ARCH_64BIT | __AUDIT_ARCH_LE;
pub const AUDIT_ARCH_MIPS: u32 = EM_MIPS;
pub const AUDIT_ARCH_PPC: u32 = EM_PPC;
pub const AUDIT_ARCH_PPC64: u32 = EM_PPC64 | __AUDIT_ARCH_64BIT;
bitflags::bitflags! {
#[derive(Default)]
struct FilterFlags: u32 {
const TSYNC = 1 << 0;
const LOG = 1 << 1;
const SPEC_ALLOW = 1 << 2;
const NEW_LISTENER = 1 << 3;
const TSYNC_ESRCH = 1 << 4;
}
}
/// Seccomp-BPF program byte code.
#[derive(Debug, Clone, Eq, PartialEq)]
pub struct Filter {
// Since the limit is 4096 instructions, we *could* use a static array here
// instead. However, that would require bounds checks each time an
// instruction is appended and complicate the interface with `Result` types
// and error handling logic. It's cleaner to just check the size when the
// program is loaded.
filter: Vec<sock_filter>,
}
impl Filter {
/// Creates a new, empty seccomp program. Note that empty BPF programs are not
/// valid and will fail to load.
pub const fn new() -> Self {
Self { filter: Vec::new() }
}
/// Appends a single instruction to the seccomp-BPF program.
pub fn push(&mut self, instruction: sock_filter) {
self.filter.push(instruction);
}
/// Returns the number of instructions in the BPF program.
pub fn len(&self) -> usize {
self.filter.len()
}
/// Returns true if the program is empty. Empty seccomp filters will result
/// in an error when loaded.
pub fn is_empty(&self) -> bool {
self.filter.is_empty()
}
fn install(&self, flags: FilterFlags) -> Result<i32, Errno> {
let len = self.filter.len();
if len == 0 || len > BPF_MAXINSNS {
return Err(Errno::EINVAL);
}
let prog = libc::sock_fprog {
// Note: length is guaranteed to be less than `u16::MAX` because of
// the above check.
len: len as u16,
filter: self.filter.as_ptr() as *mut _,
};
let ptr = &prog as *const libc::sock_fprog;
let value = Errno::result(unsafe {
libc::syscall(
libc::SYS_seccomp,
SECCOMP_SET_MODE_FILTER,
flags.bits(),
ptr,
)
})?;
Ok(value as i32)
}
/// Loads the program via seccomp into the current process.
///
/// Once loaded, the seccomp filter can never be removed. Additional seccomp
/// filters can be loaded, however, and they will chain together and be
/// executed in reverse order.
///
/// NOTE: The maximum size of any single seccomp-bpf filter is 4096
/// instructions. The overall limit is 32768 instructions across all loaded
/// filters.
///
/// See [`seccomp(2)`](https://man7.org/linux/man-pages/man2/seccomp.2.html)
/// for more details.
pub fn load(&self) -> Result<(), Errno> {
self.install(FilterFlags::empty())?;
Ok(())
}
/// This is the same as [`Filter::load`] except that it returns a file
/// descriptor. This is meant to be used with
/// [`seccomp_unotify(2)`](https://man7.org/linux/man-pages/man2/seccomp_unotify.2.html).
pub fn load_and_listen(&self) -> Result<Fd, Errno> {
let fd = self.install(FilterFlags::NEW_LISTENER)?;
Ok(Fd::new(fd))
}
}
impl Extend<sock_filter> for Filter {
fn extend<T: IntoIterator<Item = sock_filter>>(&mut self, iter: T) {
self.filter.extend(iter)
}
}
/// Trait for types that can emit BPF byte code.
pub trait ByteCode {
/// Accumulates BPF instructions into the given filter.
fn into_bpf(self, filter: &mut Filter);
}
impl<F> ByteCode for F
where
F: FnOnce(&mut Filter),
{
fn into_bpf(self, filter: &mut Filter) {
self(filter)
}
}
impl ByteCode for sock_filter {
fn into_bpf(self, filter: &mut Filter) {
filter.push(self)
}
}
/// Returns a seccomp-bpf filter containing the given list of instructions.
///
/// This can be concatenated with other seccomp-BPF programs.
///
/// Note that this is not a true BPF program. Seccomp-bpf is a subset of BPF and
/// so many instructions are not available.
///
/// When executing instructions, the BPF program operates on the syscall
/// information made available as a (read-only) buffer of the following form:
///
/// ```no_compile
/// struct seccomp_data {
/// // The syscall number.
/// nr: u32,
/// // `AUDIT_ARCH_*` value (see `<linux/audit.h`).
/// arch: u32,
/// // CPU instruction pointer.
/// instruction_pointer: u64,
/// // Up to 6 syscall arguments.
/// args: [u64; 8],
/// }
/// ```
///
/// # Example
///
/// This filter will allow only the specified syscalls.
/// ```
/// let _filter = seccomp_bpf![
/// // Make sure the target process is using the x86-64 syscall ABI.
/// VALIDATE_ARCH(AUDIT_ARCH_X86_64),
/// // Load the current syscall number into `seccomp_data.nr`.
/// LOAD_SYSCALL_NR,
/// // Check if `seccomp_data.nr` matches the given syscalls. If so, then return
/// // from the seccomp filter early, allowing the syscall to continue.
/// SYSCALL(Sysno::open, ALLOW),
/// SYSCALL(Sysno::close, ALLOW),
/// SYSCALL(Sysno::write, ALLOW),
/// SYSCALL(Sysno::read, ALLOW),
/// // Deny all other syscalls by having the kernel kill the current thread with
/// // `SIGSYS`.
/// DENY,
/// ];
/// ```
#[cfg(test)]
macro_rules! seccomp_bpf {
($($inst:expr),+ $(,)?) => {
{
let mut filter = Filter::new();
$(
$inst.into_bpf(&mut filter);
)+
filter
}
};
}
// See: /include/uapi/linux/filter.h
pub const fn BPF_STMT(code: u16, k: u32) -> sock_filter {
sock_filter {
code,
jt: 0,
jf: 0,
k,
}
}
/// A BPF jump instruction.
///
/// # Arguments
///
/// * `code` is the operation code.
/// * `k` is the value operated on for comparisons.
/// * `jt` is the relative offset to jump to if the comparison is true.
/// * `jf` is the relative offset to jump to if the comparison is false.
///
/// # Example
///
/// ```no_compile
/// // Jump to the next instruction if the loaded value is equal to 42.
/// BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, 42, 1, 0);
/// ```
pub const fn BPF_JUMP(code: u16, k: u32, jt: u8, jf: u8) -> sock_filter {
sock_filter { code, jt, jf, k }
}
/// Loads the syscall number into `seccomp_data.nr`.
pub const LOAD_SYSCALL_NR: sock_filter = BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_OFFSET_NR);
/// Returns from the seccomp filter, allowing the syscall to pass through.
#[allow(unused)]
pub const ALLOW: sock_filter = BPF_STMT(BPF_RET + BPF_K, libc::SECCOMP_RET_ALLOW);
/// Returns from the seccomp filter, instructing the kernel to kill the calling
/// thread with `SIGSYS` before executing the syscall.
#[allow(unused)]
pub const DENY: sock_filter = BPF_STMT(BPF_RET + BPF_K, libc::SECCOMP_RET_KILL_THREAD);
/// Returns from the seccomp filter, causing a `SIGSYS` to be sent to the calling
/// thread skipping over the syscall without executing it. Unlike [`DENY`], this
/// signal can be caught.
#[allow(unused)]
pub const TRAP: sock_filter = BPF_STMT(BPF_RET + BPF_K, libc::SECCOMP_RET_TRAP);
/// Returns from the seccomp filter, causing `PTRACE_EVENT_SECCOMP` to be
/// generated for this syscall (if `PTRACE_O_TRACESECCOMP` is enabled). If no
/// tracer is present, the syscall will not be executed and returns a `ENOSYS`
/// instead.
///
/// `data` is made available to the tracer via `PTRACE_GETEVENTMSG`.
#[allow(unused)]
pub fn TRACE(data: u16) -> sock_filter {
BPF_STMT(
BPF_RET + BPF_K,
libc::SECCOMP_RET_TRACE | (data as u32 & libc::SECCOMP_RET_DATA),
)
}
/// Returns from the seccomp filter, returning the given error instead of
/// executing the syscall.
#[allow(unused)]
pub fn ERRNO(err: Errno) -> sock_filter {
BPF_STMT(
BPF_RET + BPF_K,
libc::SECCOMP_RET_ERRNO | (err.into_raw() as u32 & libc::SECCOMP_RET_DATA),
)
}
macro_rules! instruction {
(
$(
$(#[$attrs:meta])*
$vis:vis fn $name:ident($($args:tt)*) {
$($instruction:expr;)*
}
)*
) => {
$(
$vis fn $name($($args)*) -> impl ByteCode {
move |filter: &mut Filter| {
$(
$instruction.into_bpf(filter);
)*
}
}
)*
};
}
instruction! {
/// Checks that architecture matches our target architecture. If it does not
/// match, kills the current process. This should be the first step for every
/// seccomp filter to ensure we're working with the syscall table we're
/// expecting. Each architecture has a slightly different syscall table and
/// we need to make sure the syscall numbers we're using are the right ones
/// for the architecture.
pub fn VALIDATE_ARCH(target_arch: u32) {
// Load `seccomp_data.arch`
BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_OFFSET_ARCH);
BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, target_arch, 1, 0);
BPF_STMT(BPF_RET + BPF_K, libc::SECCOMP_RET_KILL_PROCESS);
}
pub fn LOAD_SYSCALL_IP() {
BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_OFFSET_IP_LO);
// M[0] = lo
BPF_STMT(BPF_ST, 0);
BPF_STMT(BPF_LD + BPF_W + BPF_ABS, SECCOMP_DATA_OFFSET_IP_HI);
// M[1] = hi
BPF_STMT(BPF_ST, 1);
}
/// Checks if `seccomp_data.nr` matches the given syscall. If so, then jumps
/// to `action`.
///
/// # Example
/// ```no_compile
/// SYSCALL(Sysno::socket, DENY);
/// ```
pub fn SYSCALL(nr: Sysno, action: sock_filter) {
BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, nr as i32 as u32, 0, 1);
action;
}
fn IP_RANGE64(blo: u32, bhi: u32, elo: u32, ehi: u32, action: sock_filter) {
// Most of the complexity below is caused by seccomp-bpf only being able
// to operate on `u32` values. We also can't reuse `JGE64` and `JLE64`
// because the jump offsets would be incorrect.
// STEP1: if!(begin > arg) goto NOMATCH;
// if (begin_hi > arg.hi) goto Step2; */
BPF_JUMP(BPF_JMP + BPF_JGT + BPF_K, bhi, 4 /* goto STEP2 */, 0);
// if (begin_hi!= arg.hi) goto NOMATCH;
BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, bhi, 0, 9 /* goto NOMATCH */);
// Load M[0] to operate on the low bits of the IP.
BPF_STMT(BPF_LD + BPF_MEM, 0);
// if (begin_lo >= arg.lo) goto MATCH;
BPF_JUMP(BPF_JMP + BPF_JGE + BPF_K, blo, 0, 7 /* goto NOMATCH */);
// Load M[1] because the next instruction expects the high bits of the
// IP.
BPF_STMT(BPF_LD + BPF_MEM, 1);
// STEP2: if!(arg > end) goto NOMATCH;
// if (end_hi < arg.hi) goto MATCH;
BPF_JUMP(BPF_JMP + BPF_JGT + BPF_K, ehi, 0, 4 /* goto MATCH */);
// if (end_hi!= arg.hi) goto NOMATCH;
BPF_JUMP(BPF_JMP + BPF_JEQ + BPF_K, ehi, 0, 5 /* goto NOMATCH */);
BPF_STMT(BPF_LD + BPF_MEM, 0);
// if (end_lo < arg.lo) goto MATCH;
BPF_JUMP(BPF_JMP + BPF_JGE + BPF_K, elo, 2 /* goto NOMATCH */, 0);
BPF_STMT(BPF_LD + BPF_MEM, 1);
// MATCH: Take the action.
action;
// NOMATCH: Load M[1] again after we loaded M[0].
BPF_STMT(BPF_LD + BPF_MEM, 1);
}
}
/// Checks if the instruction pointer is between a certain range. If so, executes
/// `action`. Otherwise, fall through.
///
/// Note that if `ip == end`, this will not match. That is, the interval closed
/// at the end.
///
/// Precondition: The instruction pointer must be loaded with [`LOAD_SYSCALL_IP`]
/// first.
pub fn IP_RANGE(begin: u64, end: u64, action: sock_filter) -> impl ByteCode {
let begin_lo = begin as u32;
let begin_hi = (begin >> 32) as u32;
let end_lo = end as u32;
let end_hi = (end >> 32) as u32;
IP_RANGE64(begin_lo, begin_hi, end_lo, end_hi, action)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn smoke() {
let filter = seccomp_bpf![
VALIDATE_ARCH(AUDIT_ARCH_X86_64),
LOAD_SYSCALL_NR,
SYSCALL(Sysno::openat, DENY),
SYSCALL(Sysno::close, DENY),
SYSCALL(Sysno::write, DENY),
SYSCALL(Sysno::read, DENY),
ALLOW,
];
assert_eq!(filter.len(), 13);
}
} | const __AUDIT_ARCH_64BIT: u32 = 0x8000_0000;
const __AUDIT_ARCH_LE: u32 = 0x4000_0000;
// These are defined in `/include/uapi/linux/audit.h`.
pub const AUDIT_ARCH_X86: u32 = EM_386 | __AUDIT_ARCH_LE; | random_line_split |
vec.rs | // This file is part of ICU4X. For terms of use, please see the file
// called LICENSE at the top level of the ICU4X source tree
// (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ).
use crate::ule::*;
use alloc::vec::Vec;
use core::cmp::{Ord, Ordering, PartialOrd};
use core::fmt;
use core::ops::Deref;
use super::*;
/// A zero-copy, byte-aligned vector for variable-width types.
///
/// `VarZeroVec<T>` is designed as a drop-in replacement for `Vec<T>` in situations where it is
/// desirable to borrow data from an unaligned byte slice, such as zero-copy deserialization, and
/// where `T`'s data is variable-length (e.g. `String`)
///
/// `T` must implement [`VarULE`], which is already implemented for [`str`] and `[u8]`. For storing more
/// complicated series of elements, it is implemented on `ZeroSlice<T>` as well as `VarZeroSlice<T>`
/// for nesting. [`zerovec::make_varule`](crate::make_varule) may be used to generate
/// a dynamically-sized [`VarULE`] type and conversions to and from a custom type.
///
/// For example, here are some owned types and their zero-copy equivalents:
///
/// - `Vec<String>`: `VarZeroVec<'a, str>`
/// - `Vec<Vec<u8>>>`: `VarZeroVec<'a, [u8]>`
/// - `Vec<Vec<u32>>`: `VarZeroVec<'a, ZeroSlice<u32>>`
/// - `Vec<Vec<String>>`: `VarZeroVec<'a, VarZeroSlice<str>>`
///
/// Most of the methods on `VarZeroVec<'a, T>` come from its [`Deref`] implementation to [`VarZeroSlice<T>`](VarZeroSlice).
///
/// For creating zero-copy vectors of fixed-size types, see [`ZeroVec`](crate::ZeroVec).
///
/// `VarZeroVec<T>` behaves much like [`Cow`](alloc::borrow::Cow), where it can be constructed from
/// owned data (and then mutated!) but can also borrow from some buffer.
///
/// The `F` type parameter is a [`VarZeroVecFormat`] (see its docs for more details), which can be used to select the
/// precise format of the backing buffer with various size and performance tradeoffs. It defaults to [`Index16`].
///
/// # Bytes and Equality
///
/// Two [`VarZeroVec`]s are equal if and only if their bytes are equal, as described in the trait
/// [`VarULE`]. However, we do not guarantee stability of byte equality or serialization format
/// across major SemVer releases.
///
/// To compare a [`Vec<T>`] to a [`VarZeroVec<T>`], it is generally recommended to use
/// [`Iterator::eq`], since it is somewhat expensive at runtime to convert from a [`Vec<T>`] to a
/// [`VarZeroVec<T>`] or vice-versa.
///
/// Prior to zerovec reaching 1.0, the precise byte representation of [`VarZeroVec`] is still
/// under consideration, with different options along the space-time spectrum. See
/// [#1410](https://github.com/unicode-org/icu4x/issues/1410).
///
/// # Example
///
/// ```rust
/// # use std::str::Utf8Error;
/// # use zerovec::ule::ZeroVecError;
/// use zerovec::VarZeroVec;
///
/// // The little-endian bytes correspond to the list of strings.
/// let strings = vec!["w", "ω", "文", "𑄃"];
///
/// #[derive(serde::Serialize, serde::Deserialize)]
/// struct Data<'a> {
/// #[serde(borrow)]
/// strings: VarZeroVec<'a, str>,
/// }
///
/// let data = Data {
/// strings: VarZeroVec::from(&strings),
/// };
///
/// let bincode_bytes =
/// bincode::serialize(&data).expect("Serialization should be successful");
///
/// // Will deserialize without allocations
/// let deserialized: Data = bincode::deserialize(&bincode_bytes)
/// .expect("Deserialization should be successful");
///
/// assert_eq!(deserialized.strings.get(2), Some("文"));
/// assert_eq!(deserialized.strings, &*strings);
/// # Ok::<(), ZeroVecError>(())
/// ```
///
/// Here's another example with `ZeroSlice<T>` (similar to `[T]`):
///
/// ```rust
/// # use std::str::Utf8Error;
/// # use zerovec::ule::ZeroVecError;
/// use zerovec::ule::*;
/// use zerovec::VarZeroVec;
/// use zerovec::ZeroSlice;
/// use zerovec::ZeroVec;
///
/// // The structured list correspond to the list of integers.
/// let numbers: &[&[u32]] = &[
/// &[12, 25, 38],
/// &[39179, 100],
/// &[42, 55555],
/// &[12345, 54321, 9],
/// ];
///
/// #[derive(serde::Serialize, serde::Deserialize)]
/// struct Data<'a> {
/// #[serde(borrow)]
/// vecs: VarZeroVec<'a, ZeroSlice<u32>>,
/// }
///
/// let data = Data {
/// vecs: VarZeroVec::from(numbers),
/// };
///
/// let bincode_bytes =
/// bincode::serialize(&data).expect("Serialization should be successful");
///
/// let deserialized: Data = bincode::deserialize(&bincode_bytes)
/// .expect("Deserialization should be successful");
///
/// assert_eq!(deserialized.vecs[0].get(1).unwrap(), 25);
/// assert_eq!(deserialized.vecs[1], *numbers[1]);
///
/// # Ok::<(), ZeroVecError>(())
/// ```
///
/// [`VarZeroVec`]s can be nested infinitely via a similar mechanism, see the docs of [`VarZeroSlice`]
/// for more information.
///
/// # How it Works
///
/// `VarZeroVec<T>`, when used with non-human-readable serializers (like `bincode`), will
/// serialize to a specially formatted list of bytes. The format is:
///
/// - 4 bytes for `length` (interpreted as a little-endian u32)
/// - `4 * length` bytes of `indices` (interpreted as little-endian u32)
/// - Remaining bytes for actual `data`
///
/// Each element in the `indices` array points to the starting index of its corresponding
/// data part in the `data` list. The ending index can be calculated from the starting index
/// of the next element (or the length of the slice if dealing with the last element).
///
/// See [the design doc](https://github.com/unicode-org/icu4x/blob/main/utils/zerovec/design_doc.md) for more details.
///
/// [`ule`]: crate::ule
#[non_exhaustive]
pub enum VarZeroVec<'a, T:?Sized, F = Index16> {
/// An allocated VarZeroVec, allowing for mutations.
///
/// # Examples
///
/// ```
/// use zerovec::VarZeroVec;
///
/// let mut vzv = VarZeroVec::<str>::default();
/// vzv.make_mut().push("foo");
/// vzv.make_mut().push("bar");
/// assert!(matches!(vzv, VarZeroVec::Owned(_)));
/// ```
Owned(VarZeroVecOwned<T, F>),
/// A borrowed VarZeroVec, requiring no allocations.
///
/// If a mutating operation is invoked on VarZeroVec, the Borrowed is converted to Owned.
///
/// # Examples
///
/// ```
/// use zerovec::VarZeroVec;
///
/// let bytes = &[
/// 4, 0, 0, 0, 0, 0, 1, 0, 3, 0, 6, 0, 119, 207, 137, 230, 150, 135, 240,
/// 145, 132, 131,
/// ];
///
/// let vzv: VarZeroVec<str> = VarZeroVec::parse_byte_slice(bytes).unwrap();
/// assert!(matches!(vzv, VarZeroVec::Borrowed(_)));
/// ```
Borrowed(&'a VarZeroSlice<T, F>),
}
impl<'a, T:?Sized, F> Clone for VarZeroVec<'a, T, F> {
fn clone(&self) -> Self {
match *self {
VarZeroVec::Owned(ref o) => o.clone().into(),
VarZeroVec::Borrowed(b) => b.into(),
}
}
}
impl<T: VarULE +?Sized, F: VarZeroVecFormat> fmt::Debug for VarZeroVec<'_, T, F>
where
T: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
VarZeroSlice::fmt(self, f)
}
}
impl<'a, T:?Sized, F> From<VarZeroVecOwned<T, F>> for VarZeroVec<'a, T, F> {
#[inline]
fn from(other: VarZeroVecOwned<T, F>) -> Self {
VarZeroVec::Owned(other)
}
}
impl<'a, T:?Sized, F> From<&'a VarZeroSlice<T, F>> for VarZeroVec<'a, T, F> {
fn from(other: &'a VarZeroSlice<T, F>) -> Self {
VarZeroVec::Borrowed(other)
}
}
impl<'a, T:?Sized + VarULE, F: VarZeroVecFormat> From<VarZeroVec<'a, T, F>>
for VarZeroVecOwned<T, F>
{
#[inline]
fn from(other: VarZeroVec<'a, T, F>) -> Self {
match other {
VarZeroVec::Owned(o) => o,
VarZeroVec::Borrowed(b) => b.into(),
}
}
}
impl<T: VarULE +?Sized> Default for VarZeroVec<'_, T> {
#[inline]
fn default() -> Self {
Self::new()
}
}
impl<T: VarULE +?Sized, F: VarZeroVecFormat> Deref for VarZeroVec<'_, T, F> {
type Target = VarZeroSlice<T, F>;
fn deref(&self) -> &VarZeroSlice<T, F> {
self.as_slice()
}
}
impl<'a, T: VarULE +?Sized, F: VarZeroVecFormat> VarZeroVec<'a, T, F> {
/// Creates a new, empty `VarZeroVec<T>`.
///
/// # Examples
///
/// ```
/// use zerovec::VarZeroVec;
///
/// let vzv: VarZeroVec<str> = VarZeroVec::new();
/// assert!(vzv.is_empty());
/// ```
#[inline]
pub const fn new() -> Self {
Self::Borrowed(VarZeroSlice::new_empty())
}
/// Parse a VarZeroVec from a slice of the appropriate format
///
/// Slices of the right format can be obtained via [`VarZeroSlice::as_bytes()`].
///
/// # Example
///
/// ```rust
/// # use std::str::Utf8Error;
/// # use zerovec::ule::ZeroVecError;
/// # use zerovec::VarZeroVec;
///
/// let strings = vec!["foo", "bar", "baz", "quux"];
/// let vec = VarZeroVec::<str>::from(&strings);
///
/// assert_eq!(&vec[0], "foo");
/// assert_eq!(&vec[1], "bar");
/// assert_eq!(&vec[2], "baz");
/// assert_eq!(&vec[3], "quux");
/// # Ok::<(), ZeroVecError>(())
/// ```
pub fn parse_byte_slice(slice: &'a [u8]) -> Result<Self, ZeroVecError> {
let borrowed = VarZeroSlice::<T, F>::parse_byte_slice(slice)?;
Ok(VarZeroVec::Borrowed(borrowed))
}
/// Uses a `&[u8]` buffer as a `VarZeroVec<T>` without any verification.
///
/// # Safety
///
/// `bytes` need to be an output from [`VarZeroSlice::as_bytes()`].
pub const unsafe fn from_bytes_unchecked(bytes: &'a [u8]) -> Self {
Self::Borrowed(core::mem::transmute(bytes))
}
/// Convert this into a mutable vector of the owned `T` type, cloning if necessary.
///
///
/// # Example
///
/// ```rust,ignore
/// # use std::str::Utf8Error;
/// # use zerovec::ule::ZeroVecError;
/// # use zerovec::VarZeroVec;
///
/// let strings = vec!["foo", "bar", "baz", "quux"];
/// let mut vec = VarZeroVec::<str>::from(&strings);
///
/// assert_eq!(vec.len(), 4);
/// let mutvec = vec.make_mut();
/// mutvec.push("lorem ipsum".into());
/// mutvec[2] = "dolor sit".into();
/// assert_eq!(&vec[0], "foo");
/// assert_eq!(&vec[1], "bar");
/// assert_eq!(&vec[2], "dolor sit");
/// assert_eq!(&vec[3], "quux");
/// assert_eq!(&vec[4], "lorem ipsum");
/// # Ok::<(), ZeroVecError>(())
/// ```
//
// This function is crate-public for now since we don't yet want to stabilize
// the internal implementation details
pub fn make_mut(&mut self) -> &mut VarZeroVecOwned<T, F> {
match self {
VarZeroVec::Owned(ref mut vec) => vec,
VarZeroVec::Borrowed(slice) => {
let new_self = VarZeroVecOwned::from_slice(slice);
*self = new_self.into();
// recursion is limited since we are guaranteed to hit the Owned branch
self.make_mut()
}
}
}
/// Converts a borrowed ZeroVec to an owned ZeroVec. No-op if already owned.
///
/// # Example
///
/// ```
/// # use std::str::Utf8Error;
/// # use zerovec::ule::ZeroVecError;
/// # use zerovec::VarZeroVec;
///
/// let strings = vec!["foo", "bar", "baz", "quux"];
/// let vec = VarZeroVec::<str>::from(&strings);
///
/// assert_eq!(vec.len(), 4);
/// // has'static lifetime
/// let owned = vec.into_owned();
/// # Ok::<(), ZeroVecError>(())
/// ```
pub fn into_owned(mut self) -> VarZeroVec<'static, T, F> {
self.make_mut();
match self {
VarZeroVec::Owned(vec) => vec.into(),
_ => unreachable!(),
}
}
/// Obtain this `VarZeroVec` as a [`VarZeroSlice`]
pub fn as_slice(&self) -> &VarZeroSlice<T, F> {
match *self {
VarZeroVec::Owned(ref owned) => owned,
VarZeroVec::Borrowed(b) => b,
}
}
/// Takes the byte vector representing the encoded data of this VarZeroVec. If borrowed,
/// this function allocates a byte vector and copies the borrowed bytes into it.
///
/// The bytes can be passed back to [`Self::parse_byte_slice()`].
///
/// To get a reference to the bytes without moving, see [`VarZeroSlice::as_bytes()`].
///
/// # Example
///
/// ```rust
/// # use std::str::Utf8Error;
/// # use zerovec::ule::ZeroVecError;
/// # use zerovec::VarZeroVec;
///
/// let strings = vec!["foo", "bar", "baz"];
/// let bytes = VarZeroVec::<str>::from(&strings).into_bytes();
///
/// let mut borrowed: VarZeroVec<str> = VarZeroVec::parse_byte_slice(&bytes)?;
/// assert_eq!(borrowed, &*strings);
///
/// # Ok::<(), ZeroVecError>(())
/// ```
pub fn into_byt | > Vec<u8> {
match self {
VarZeroVec::Owned(vec) => vec.into_bytes(),
VarZeroVec::Borrowed(vec) => vec.as_bytes().to_vec(),
}
}
/// Return whether the [`VarZeroVec`] is operating on owned or borrowed
/// data. [`VarZeroVec::into_owned()`] and [`VarZeroVec::make_mut()`] can
/// be used to force it into an owned type
pub fn is_owned(&self) -> bool {
match self {
VarZeroVec::Owned(..) => true,
VarZeroVec::Borrowed(..) => false,
}
}
#[cfg(feature = "bench")]
#[doc(hidden)]
pub fn as_components<'b>(&'b self) -> VarZeroVecComponents<'b, T, F> {
self.as_slice().as_components()
}
}
impl<A, T, F> From<&Vec<A>> for VarZeroVec<'static, T, F>
where
T: VarULE +?Sized,
A: EncodeAsVarULE<T>,
F: VarZeroVecFormat,
{
#[inline]
fn from(elements: &Vec<A>) -> Self {
Self::from(elements.as_slice())
}
}
impl<A, T, F> From<&[A]> for VarZeroVec<'static, T, F>
where
T: VarULE +?Sized,
A: EncodeAsVarULE<T>,
F: VarZeroVecFormat,
{
#[inline]
fn from(elements: &[A]) -> Self {
#[allow(clippy::unwrap_used)] // TODO(#1410) Better story for fallibility
VarZeroVecOwned::try_from_elements(elements).unwrap().into()
}
}
impl<A, T, F, const N: usize> From<&[A; N]> for VarZeroVec<'static, T, F>
where
T: VarULE +?Sized,
A: EncodeAsVarULE<T>,
F: VarZeroVecFormat,
{
#[inline]
fn from(elements: &[A; N]) -> Self {
Self::from(elements.as_slice())
}
}
impl<'a, 'b, T, F> PartialEq<VarZeroVec<'b, T, F>> for VarZeroVec<'a, T, F>
where
T: VarULE,
T:?Sized,
T: PartialEq,
F: VarZeroVecFormat,
{
#[inline]
fn eq(&self, other: &VarZeroVec<'b, T, F>) -> bool {
// VarULE has an API guarantee that this is equivalent
// to `T::VarULE::eq()`
self.as_bytes().eq(other.as_bytes())
}
}
impl<'a, T, F> Eq for VarZeroVec<'a, T, F>
where
T: VarULE,
T:?Sized,
T: Eq,
F: VarZeroVecFormat,
{
}
impl<T, A, F> PartialEq<&'_ [A]> for VarZeroVec<'_, T, F>
where
T: VarULE +?Sized,
T: PartialEq,
A: AsRef<T>,
F: VarZeroVecFormat,
{
#[inline]
fn eq(&self, other: &&[A]) -> bool {
self.iter().eq(other.iter().map(|t| t.as_ref()))
}
}
impl<T, A, F, const N: usize> PartialEq<[A; N]> for VarZeroVec<'_, T, F>
where
T: VarULE +?Sized,
T: PartialEq,
A: AsRef<T>,
F: VarZeroVecFormat,
{
#[inline]
fn eq(&self, other: &[A; N]) -> bool {
self.iter().eq(other.iter().map(|t| t.as_ref()))
}
}
impl<'a, T: VarULE +?Sized + PartialOrd, F: VarZeroVecFormat> PartialOrd for VarZeroVec<'a, T, F> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.iter().partial_cmp(other.iter())
}
}
impl<'a, T: VarULE +?Sized + Ord, F: VarZeroVecFormat> Ord for VarZeroVec<'a, T, F> {
fn cmp(&self, other: &Self) -> Ordering {
self.iter().cmp(other.iter())
}
}
| es(self) - | identifier_name |
vec.rs | // This file is part of ICU4X. For terms of use, please see the file
// called LICENSE at the top level of the ICU4X source tree
// (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ).
use crate::ule::*;
use alloc::vec::Vec;
use core::cmp::{Ord, Ordering, PartialOrd};
use core::fmt;
use core::ops::Deref;
use super::*;
/// A zero-copy, byte-aligned vector for variable-width types.
///
/// `VarZeroVec<T>` is designed as a drop-in replacement for `Vec<T>` in situations where it is
/// desirable to borrow data from an unaligned byte slice, such as zero-copy deserialization, and
/// where `T`'s data is variable-length (e.g. `String`)
///
/// `T` must implement [`VarULE`], which is already implemented for [`str`] and `[u8]`. For storing more
/// complicated series of elements, it is implemented on `ZeroSlice<T>` as well as `VarZeroSlice<T>`
/// for nesting. [`zerovec::make_varule`](crate::make_varule) may be used to generate
/// a dynamically-sized [`VarULE`] type and conversions to and from a custom type.
///
/// For example, here are some owned types and their zero-copy equivalents:
///
/// - `Vec<String>`: `VarZeroVec<'a, str>`
/// - `Vec<Vec<u8>>>`: `VarZeroVec<'a, [u8]>`
/// - `Vec<Vec<u32>>`: `VarZeroVec<'a, ZeroSlice<u32>>`
/// - `Vec<Vec<String>>`: `VarZeroVec<'a, VarZeroSlice<str>>`
///
/// Most of the methods on `VarZeroVec<'a, T>` come from its [`Deref`] implementation to [`VarZeroSlice<T>`](VarZeroSlice).
///
/// For creating zero-copy vectors of fixed-size types, see [`ZeroVec`](crate::ZeroVec).
///
/// `VarZeroVec<T>` behaves much like [`Cow`](alloc::borrow::Cow), where it can be constructed from
/// owned data (and then mutated!) but can also borrow from some buffer.
///
/// The `F` type parameter is a [`VarZeroVecFormat`] (see its docs for more details), which can be used to select the
/// precise format of the backing buffer with various size and performance tradeoffs. It defaults to [`Index16`].
///
/// # Bytes and Equality
///
/// Two [`VarZeroVec`]s are equal if and only if their bytes are equal, as described in the trait
/// [`VarULE`]. However, we do not guarantee stability of byte equality or serialization format
/// across major SemVer releases.
///
/// To compare a [`Vec<T>`] to a [`VarZeroVec<T>`], it is generally recommended to use
/// [`Iterator::eq`], since it is somewhat expensive at runtime to convert from a [`Vec<T>`] to a
/// [`VarZeroVec<T>`] or vice-versa.
///
/// Prior to zerovec reaching 1.0, the precise byte representation of [`VarZeroVec`] is still
/// under consideration, with different options along the space-time spectrum. See
/// [#1410](https://github.com/unicode-org/icu4x/issues/1410).
///
/// # Example
///
/// ```rust
/// # use std::str::Utf8Error;
/// # use zerovec::ule::ZeroVecError;
/// use zerovec::VarZeroVec;
///
/// // The little-endian bytes correspond to the list of strings.
/// let strings = vec!["w", "ω", "文", "𑄃"];
///
/// #[derive(serde::Serialize, serde::Deserialize)]
/// struct Data<'a> {
/// #[serde(borrow)]
/// strings: VarZeroVec<'a, str>,
/// }
///
/// let data = Data {
/// strings: VarZeroVec::from(&strings),
/// };
///
/// let bincode_bytes =
/// bincode::serialize(&data).expect("Serialization should be successful");
///
/// // Will deserialize without allocations
/// let deserialized: Data = bincode::deserialize(&bincode_bytes)
/// .expect("Deserialization should be successful");
///
/// assert_eq!(deserialized.strings.get(2), Some("文"));
/// assert_eq!(deserialized.strings, &*strings);
/// # Ok::<(), ZeroVecError>(())
/// ```
///
/// Here's another example with `ZeroSlice<T>` (similar to `[T]`):
///
/// ```rust
/// # use std::str::Utf8Error;
/// # use zerovec::ule::ZeroVecError;
/// use zerovec::ule::*;
/// use zerovec::VarZeroVec;
/// use zerovec::ZeroSlice;
/// use zerovec::ZeroVec;
///
/// // The structured list correspond to the list of integers.
/// let numbers: &[&[u32]] = &[
/// &[12, 25, 38],
/// &[39179, 100],
/// &[42, 55555],
/// &[12345, 54321, 9],
/// ];
///
/// #[derive(serde::Serialize, serde::Deserialize)]
/// struct Data<'a> {
/// #[serde(borrow)]
/// vecs: VarZeroVec<'a, ZeroSlice<u32>>,
/// }
///
/// let data = Data {
/// vecs: VarZeroVec::from(numbers),
/// };
///
/// let bincode_bytes =
/// bincode::serialize(&data).expect("Serialization should be successful");
///
/// let deserialized: Data = bincode::deserialize(&bincode_bytes)
/// .expect("Deserialization should be successful");
///
/// assert_eq!(deserialized.vecs[0].get(1).unwrap(), 25);
/// assert_eq!(deserialized.vecs[1], *numbers[1]);
///
/// # Ok::<(), ZeroVecError>(())
/// ```
///
/// [`VarZeroVec`]s can be nested infinitely via a similar mechanism, see the docs of [`VarZeroSlice`]
/// for more information.
///
/// # How it Works
///
/// `VarZeroVec<T>`, when used with non-human-readable serializers (like `bincode`), will
/// serialize to a specially formatted list of bytes. The format is:
///
/// - 4 bytes for `length` (interpreted as a little-endian u32)
/// - `4 * length` bytes of `indices` (interpreted as little-endian u32)
/// - Remaining bytes for actual `data`
///
/// Each element in the `indices` array points to the starting index of its corresponding
/// data part in the `data` list. The ending index can be calculated from the starting index
/// of the next element (or the length of the slice if dealing with the last element).
///
/// See [the design doc](https://github.com/unicode-org/icu4x/blob/main/utils/zerovec/design_doc.md) for more details.
///
/// [`ule`]: crate::ule
#[non_exhaustive]
pub enum VarZeroVec<'a, T:?Sized, F = Index16> {
/// An allocated VarZeroVec, allowing for mutations.
///
/// # Examples
///
/// ```
/// use zerovec::VarZeroVec;
///
/// let mut vzv = VarZeroVec::<str>::default();
/// vzv.make_mut().push("foo");
/// vzv.make_mut().push("bar");
/// assert!(matches!(vzv, VarZeroVec::Owned(_)));
/// ```
Owned(VarZeroVecOwned<T, F>),
/// A borrowed VarZeroVec, requiring no allocations.
///
/// If a mutating operation is invoked on VarZeroVec, the Borrowed is converted to Owned.
///
/// # Examples
///
/// ```
/// use zerovec::VarZeroVec;
///
/// let bytes = &[
/// 4, 0, 0, 0, 0, 0, 1, 0, 3, 0, 6, 0, 119, 207, 137, 230, 150, 135, 240,
/// 145, 132, 131,
/// ];
///
/// let vzv: VarZeroVec<str> = VarZeroVec::parse_byte_slice(bytes).unwrap();
/// assert!(matches!(vzv, VarZeroVec::Borrowed(_)));
/// ```
Borrowed(&'a VarZeroSlice<T, F>),
}
impl<'a, T:?Sized, F> Clone for VarZeroVec<'a, T, F> {
fn clone(&self) -> Self {
match *self {
VarZeroVec::Owned(ref o) => o.clone().into(),
VarZeroVec::Borrowed(b) => b.into(),
}
}
}
impl<T: VarULE +?Sized, F: VarZeroVecFormat> fmt::Debug for VarZeroVec<'_, T, F>
where
T: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
| <'a, T:?Sized, F> From<VarZeroVecOwned<T, F>> for VarZeroVec<'a, T, F> {
#[inline]
fn from(other: VarZeroVecOwned<T, F>) -> Self {
VarZeroVec::Owned(other)
}
}
impl<'a, T:?Sized, F> From<&'a VarZeroSlice<T, F>> for VarZeroVec<'a, T, F> {
fn from(other: &'a VarZeroSlice<T, F>) -> Self {
VarZeroVec::Borrowed(other)
}
}
impl<'a, T:?Sized + VarULE, F: VarZeroVecFormat> From<VarZeroVec<'a, T, F>>
for VarZeroVecOwned<T, F>
{
#[inline]
fn from(other: VarZeroVec<'a, T, F>) -> Self {
match other {
VarZeroVec::Owned(o) => o,
VarZeroVec::Borrowed(b) => b.into(),
}
}
}
impl<T: VarULE +?Sized> Default for VarZeroVec<'_, T> {
#[inline]
fn default() -> Self {
Self::new()
}
}
impl<T: VarULE +?Sized, F: VarZeroVecFormat> Deref for VarZeroVec<'_, T, F> {
type Target = VarZeroSlice<T, F>;
fn deref(&self) -> &VarZeroSlice<T, F> {
self.as_slice()
}
}
impl<'a, T: VarULE +?Sized, F: VarZeroVecFormat> VarZeroVec<'a, T, F> {
/// Creates a new, empty `VarZeroVec<T>`.
///
/// # Examples
///
/// ```
/// use zerovec::VarZeroVec;
///
/// let vzv: VarZeroVec<str> = VarZeroVec::new();
/// assert!(vzv.is_empty());
/// ```
#[inline]
pub const fn new() -> Self {
Self::Borrowed(VarZeroSlice::new_empty())
}
/// Parse a VarZeroVec from a slice of the appropriate format
///
/// Slices of the right format can be obtained via [`VarZeroSlice::as_bytes()`].
///
/// # Example
///
/// ```rust
/// # use std::str::Utf8Error;
/// # use zerovec::ule::ZeroVecError;
/// # use zerovec::VarZeroVec;
///
/// let strings = vec!["foo", "bar", "baz", "quux"];
/// let vec = VarZeroVec::<str>::from(&strings);
///
/// assert_eq!(&vec[0], "foo");
/// assert_eq!(&vec[1], "bar");
/// assert_eq!(&vec[2], "baz");
/// assert_eq!(&vec[3], "quux");
/// # Ok::<(), ZeroVecError>(())
/// ```
pub fn parse_byte_slice(slice: &'a [u8]) -> Result<Self, ZeroVecError> {
let borrowed = VarZeroSlice::<T, F>::parse_byte_slice(slice)?;
Ok(VarZeroVec::Borrowed(borrowed))
}
/// Uses a `&[u8]` buffer as a `VarZeroVec<T>` without any verification.
///
/// # Safety
///
/// `bytes` need to be an output from [`VarZeroSlice::as_bytes()`].
pub const unsafe fn from_bytes_unchecked(bytes: &'a [u8]) -> Self {
Self::Borrowed(core::mem::transmute(bytes))
}
/// Convert this into a mutable vector of the owned `T` type, cloning if necessary.
///
///
/// # Example
///
/// ```rust,ignore
/// # use std::str::Utf8Error;
/// # use zerovec::ule::ZeroVecError;
/// # use zerovec::VarZeroVec;
///
/// let strings = vec!["foo", "bar", "baz", "quux"];
/// let mut vec = VarZeroVec::<str>::from(&strings);
///
/// assert_eq!(vec.len(), 4);
/// let mutvec = vec.make_mut();
/// mutvec.push("lorem ipsum".into());
/// mutvec[2] = "dolor sit".into();
/// assert_eq!(&vec[0], "foo");
/// assert_eq!(&vec[1], "bar");
/// assert_eq!(&vec[2], "dolor sit");
/// assert_eq!(&vec[3], "quux");
/// assert_eq!(&vec[4], "lorem ipsum");
/// # Ok::<(), ZeroVecError>(())
/// ```
//
// This function is crate-public for now since we don't yet want to stabilize
// the internal implementation details
pub fn make_mut(&mut self) -> &mut VarZeroVecOwned<T, F> {
match self {
VarZeroVec::Owned(ref mut vec) => vec,
VarZeroVec::Borrowed(slice) => {
let new_self = VarZeroVecOwned::from_slice(slice);
*self = new_self.into();
// recursion is limited since we are guaranteed to hit the Owned branch
self.make_mut()
}
}
}
/// Converts a borrowed ZeroVec to an owned ZeroVec. No-op if already owned.
///
/// # Example
///
/// ```
/// # use std::str::Utf8Error;
/// # use zerovec::ule::ZeroVecError;
/// # use zerovec::VarZeroVec;
///
/// let strings = vec!["foo", "bar", "baz", "quux"];
/// let vec = VarZeroVec::<str>::from(&strings);
///
/// assert_eq!(vec.len(), 4);
/// // has'static lifetime
/// let owned = vec.into_owned();
/// # Ok::<(), ZeroVecError>(())
/// ```
pub fn into_owned(mut self) -> VarZeroVec<'static, T, F> {
self.make_mut();
match self {
VarZeroVec::Owned(vec) => vec.into(),
_ => unreachable!(),
}
}
/// Obtain this `VarZeroVec` as a [`VarZeroSlice`]
pub fn as_slice(&self) -> &VarZeroSlice<T, F> {
match *self {
VarZeroVec::Owned(ref owned) => owned,
VarZeroVec::Borrowed(b) => b,
}
}
/// Takes the byte vector representing the encoded data of this VarZeroVec. If borrowed,
/// this function allocates a byte vector and copies the borrowed bytes into it.
///
/// The bytes can be passed back to [`Self::parse_byte_slice()`].
///
/// To get a reference to the bytes without moving, see [`VarZeroSlice::as_bytes()`].
///
/// # Example
///
/// ```rust
/// # use std::str::Utf8Error;
/// # use zerovec::ule::ZeroVecError;
/// # use zerovec::VarZeroVec;
///
/// let strings = vec!["foo", "bar", "baz"];
/// let bytes = VarZeroVec::<str>::from(&strings).into_bytes();
///
/// let mut borrowed: VarZeroVec<str> = VarZeroVec::parse_byte_slice(&bytes)?;
/// assert_eq!(borrowed, &*strings);
///
/// # Ok::<(), ZeroVecError>(())
/// ```
pub fn into_bytes(self) -> Vec<u8> {
match self {
VarZeroVec::Owned(vec) => vec.into_bytes(),
VarZeroVec::Borrowed(vec) => vec.as_bytes().to_vec(),
}
}
/// Return whether the [`VarZeroVec`] is operating on owned or borrowed
/// data. [`VarZeroVec::into_owned()`] and [`VarZeroVec::make_mut()`] can
/// be used to force it into an owned type
pub fn is_owned(&self) -> bool {
match self {
VarZeroVec::Owned(..) => true,
VarZeroVec::Borrowed(..) => false,
}
}
#[cfg(feature = "bench")]
#[doc(hidden)]
pub fn as_components<'b>(&'b self) -> VarZeroVecComponents<'b, T, F> {
self.as_slice().as_components()
}
}
impl<A, T, F> From<&Vec<A>> for VarZeroVec<'static, T, F>
where
T: VarULE +?Sized,
A: EncodeAsVarULE<T>,
F: VarZeroVecFormat,
{
#[inline]
fn from(elements: &Vec<A>) -> Self {
Self::from(elements.as_slice())
}
}
impl<A, T, F> From<&[A]> for VarZeroVec<'static, T, F>
where
T: VarULE +?Sized,
A: EncodeAsVarULE<T>,
F: VarZeroVecFormat,
{
#[inline]
fn from(elements: &[A]) -> Self {
#[allow(clippy::unwrap_used)] // TODO(#1410) Better story for fallibility
VarZeroVecOwned::try_from_elements(elements).unwrap().into()
}
}
impl<A, T, F, const N: usize> From<&[A; N]> for VarZeroVec<'static, T, F>
where
T: VarULE +?Sized,
A: EncodeAsVarULE<T>,
F: VarZeroVecFormat,
{
#[inline]
fn from(elements: &[A; N]) -> Self {
Self::from(elements.as_slice())
}
}
impl<'a, 'b, T, F> PartialEq<VarZeroVec<'b, T, F>> for VarZeroVec<'a, T, F>
where
T: VarULE,
T:?Sized,
T: PartialEq,
F: VarZeroVecFormat,
{
#[inline]
fn eq(&self, other: &VarZeroVec<'b, T, F>) -> bool {
// VarULE has an API guarantee that this is equivalent
// to `T::VarULE::eq()`
self.as_bytes().eq(other.as_bytes())
}
}
impl<'a, T, F> Eq for VarZeroVec<'a, T, F>
where
T: VarULE,
T:?Sized,
T: Eq,
F: VarZeroVecFormat,
{
}
impl<T, A, F> PartialEq<&'_ [A]> for VarZeroVec<'_, T, F>
where
T: VarULE +?Sized,
T: PartialEq,
A: AsRef<T>,
F: VarZeroVecFormat,
{
#[inline]
fn eq(&self, other: &&[A]) -> bool {
self.iter().eq(other.iter().map(|t| t.as_ref()))
}
}
impl<T, A, F, const N: usize> PartialEq<[A; N]> for VarZeroVec<'_, T, F>
where
T: VarULE +?Sized,
T: PartialEq,
A: AsRef<T>,
F: VarZeroVecFormat,
{
#[inline]
fn eq(&self, other: &[A; N]) -> bool {
self.iter().eq(other.iter().map(|t| t.as_ref()))
}
}
impl<'a, T: VarULE +?Sized + PartialOrd, F: VarZeroVecFormat> PartialOrd for VarZeroVec<'a, T, F> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.iter().partial_cmp(other.iter())
}
}
impl<'a, T: VarULE +?Sized + Ord, F: VarZeroVecFormat> Ord for VarZeroVec<'a, T, F> {
fn cmp(&self, other: &Self) -> Ordering {
self.iter().cmp(other.iter())
}
}
| VarZeroSlice::fmt(self, f)
}
}
impl | identifier_body |
vec.rs | // This file is part of ICU4X. For terms of use, please see the file
// called LICENSE at the top level of the ICU4X source tree
// (online at: https://github.com/unicode-org/icu4x/blob/main/LICENSE ).
use crate::ule::*;
use alloc::vec::Vec;
use core::cmp::{Ord, Ordering, PartialOrd};
use core::fmt;
use core::ops::Deref;
use super::*;
/// A zero-copy, byte-aligned vector for variable-width types.
///
/// `VarZeroVec<T>` is designed as a drop-in replacement for `Vec<T>` in situations where it is
/// desirable to borrow data from an unaligned byte slice, such as zero-copy deserialization, and
/// where `T`'s data is variable-length (e.g. `String`)
///
/// `T` must implement [`VarULE`], which is already implemented for [`str`] and `[u8]`. For storing more
/// complicated series of elements, it is implemented on `ZeroSlice<T>` as well as `VarZeroSlice<T>`
/// for nesting. [`zerovec::make_varule`](crate::make_varule) may be used to generate
/// a dynamically-sized [`VarULE`] type and conversions to and from a custom type.
///
/// For example, here are some owned types and their zero-copy equivalents:
///
/// - `Vec<String>`: `VarZeroVec<'a, str>`
/// - `Vec<Vec<u8>>>`: `VarZeroVec<'a, [u8]>`
/// - `Vec<Vec<u32>>`: `VarZeroVec<'a, ZeroSlice<u32>>`
/// - `Vec<Vec<String>>`: `VarZeroVec<'a, VarZeroSlice<str>>`
///
/// Most of the methods on `VarZeroVec<'a, T>` come from its [`Deref`] implementation to [`VarZeroSlice<T>`](VarZeroSlice).
///
/// For creating zero-copy vectors of fixed-size types, see [`ZeroVec`](crate::ZeroVec).
///
/// `VarZeroVec<T>` behaves much like [`Cow`](alloc::borrow::Cow), where it can be constructed from
/// owned data (and then mutated!) but can also borrow from some buffer.
///
/// The `F` type parameter is a [`VarZeroVecFormat`] (see its docs for more details), which can be used to select the
/// precise format of the backing buffer with various size and performance tradeoffs. It defaults to [`Index16`].
///
/// # Bytes and Equality
///
/// Two [`VarZeroVec`]s are equal if and only if their bytes are equal, as described in the trait
/// [`VarULE`]. However, we do not guarantee stability of byte equality or serialization format
/// across major SemVer releases.
///
/// To compare a [`Vec<T>`] to a [`VarZeroVec<T>`], it is generally recommended to use
/// [`Iterator::eq`], since it is somewhat expensive at runtime to convert from a [`Vec<T>`] to a
/// [`VarZeroVec<T>`] or vice-versa.
///
/// Prior to zerovec reaching 1.0, the precise byte representation of [`VarZeroVec`] is still
/// under consideration, with different options along the space-time spectrum. See
/// [#1410](https://github.com/unicode-org/icu4x/issues/1410).
///
/// # Example
///
/// ```rust
/// # use std::str::Utf8Error;
/// # use zerovec::ule::ZeroVecError;
/// use zerovec::VarZeroVec;
///
/// // The little-endian bytes correspond to the list of strings.
/// let strings = vec!["w", "ω", "文", "𑄃"];
///
/// #[derive(serde::Serialize, serde::Deserialize)]
/// struct Data<'a> {
/// #[serde(borrow)]
/// strings: VarZeroVec<'a, str>,
/// }
///
/// let data = Data {
/// strings: VarZeroVec::from(&strings),
/// };
///
/// let bincode_bytes =
/// bincode::serialize(&data).expect("Serialization should be successful");
///
/// // Will deserialize without allocations
/// let deserialized: Data = bincode::deserialize(&bincode_bytes)
/// .expect("Deserialization should be successful");
///
/// assert_eq!(deserialized.strings.get(2), Some("文"));
/// assert_eq!(deserialized.strings, &*strings);
/// # Ok::<(), ZeroVecError>(())
/// ```
///
/// Here's another example with `ZeroSlice<T>` (similar to `[T]`):
///
/// ```rust | /// # use zerovec::ule::ZeroVecError;
/// use zerovec::ule::*;
/// use zerovec::VarZeroVec;
/// use zerovec::ZeroSlice;
/// use zerovec::ZeroVec;
///
/// // The structured list correspond to the list of integers.
/// let numbers: &[&[u32]] = &[
/// &[12, 25, 38],
/// &[39179, 100],
/// &[42, 55555],
/// &[12345, 54321, 9],
/// ];
///
/// #[derive(serde::Serialize, serde::Deserialize)]
/// struct Data<'a> {
/// #[serde(borrow)]
/// vecs: VarZeroVec<'a, ZeroSlice<u32>>,
/// }
///
/// let data = Data {
/// vecs: VarZeroVec::from(numbers),
/// };
///
/// let bincode_bytes =
/// bincode::serialize(&data).expect("Serialization should be successful");
///
/// let deserialized: Data = bincode::deserialize(&bincode_bytes)
/// .expect("Deserialization should be successful");
///
/// assert_eq!(deserialized.vecs[0].get(1).unwrap(), 25);
/// assert_eq!(deserialized.vecs[1], *numbers[1]);
///
/// # Ok::<(), ZeroVecError>(())
/// ```
///
/// [`VarZeroVec`]s can be nested infinitely via a similar mechanism, see the docs of [`VarZeroSlice`]
/// for more information.
///
/// # How it Works
///
/// `VarZeroVec<T>`, when used with non-human-readable serializers (like `bincode`), will
/// serialize to a specially formatted list of bytes. The format is:
///
/// - 4 bytes for `length` (interpreted as a little-endian u32)
/// - `4 * length` bytes of `indices` (interpreted as little-endian u32)
/// - Remaining bytes for actual `data`
///
/// Each element in the `indices` array points to the starting index of its corresponding
/// data part in the `data` list. The ending index can be calculated from the starting index
/// of the next element (or the length of the slice if dealing with the last element).
///
/// See [the design doc](https://github.com/unicode-org/icu4x/blob/main/utils/zerovec/design_doc.md) for more details.
///
/// [`ule`]: crate::ule
#[non_exhaustive]
pub enum VarZeroVec<'a, T:?Sized, F = Index16> {
/// An allocated VarZeroVec, allowing for mutations.
///
/// # Examples
///
/// ```
/// use zerovec::VarZeroVec;
///
/// let mut vzv = VarZeroVec::<str>::default();
/// vzv.make_mut().push("foo");
/// vzv.make_mut().push("bar");
/// assert!(matches!(vzv, VarZeroVec::Owned(_)));
/// ```
Owned(VarZeroVecOwned<T, F>),
/// A borrowed VarZeroVec, requiring no allocations.
///
/// If a mutating operation is invoked on VarZeroVec, the Borrowed is converted to Owned.
///
/// # Examples
///
/// ```
/// use zerovec::VarZeroVec;
///
/// let bytes = &[
/// 4, 0, 0, 0, 0, 0, 1, 0, 3, 0, 6, 0, 119, 207, 137, 230, 150, 135, 240,
/// 145, 132, 131,
/// ];
///
/// let vzv: VarZeroVec<str> = VarZeroVec::parse_byte_slice(bytes).unwrap();
/// assert!(matches!(vzv, VarZeroVec::Borrowed(_)));
/// ```
Borrowed(&'a VarZeroSlice<T, F>),
}
impl<'a, T:?Sized, F> Clone for VarZeroVec<'a, T, F> {
fn clone(&self) -> Self {
match *self {
VarZeroVec::Owned(ref o) => o.clone().into(),
VarZeroVec::Borrowed(b) => b.into(),
}
}
}
impl<T: VarULE +?Sized, F: VarZeroVecFormat> fmt::Debug for VarZeroVec<'_, T, F>
where
T: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
VarZeroSlice::fmt(self, f)
}
}
impl<'a, T:?Sized, F> From<VarZeroVecOwned<T, F>> for VarZeroVec<'a, T, F> {
#[inline]
fn from(other: VarZeroVecOwned<T, F>) -> Self {
VarZeroVec::Owned(other)
}
}
impl<'a, T:?Sized, F> From<&'a VarZeroSlice<T, F>> for VarZeroVec<'a, T, F> {
fn from(other: &'a VarZeroSlice<T, F>) -> Self {
VarZeroVec::Borrowed(other)
}
}
impl<'a, T:?Sized + VarULE, F: VarZeroVecFormat> From<VarZeroVec<'a, T, F>>
for VarZeroVecOwned<T, F>
{
#[inline]
fn from(other: VarZeroVec<'a, T, F>) -> Self {
match other {
VarZeroVec::Owned(o) => o,
VarZeroVec::Borrowed(b) => b.into(),
}
}
}
impl<T: VarULE +?Sized> Default for VarZeroVec<'_, T> {
#[inline]
fn default() -> Self {
Self::new()
}
}
impl<T: VarULE +?Sized, F: VarZeroVecFormat> Deref for VarZeroVec<'_, T, F> {
type Target = VarZeroSlice<T, F>;
fn deref(&self) -> &VarZeroSlice<T, F> {
self.as_slice()
}
}
impl<'a, T: VarULE +?Sized, F: VarZeroVecFormat> VarZeroVec<'a, T, F> {
/// Creates a new, empty `VarZeroVec<T>`.
///
/// # Examples
///
/// ```
/// use zerovec::VarZeroVec;
///
/// let vzv: VarZeroVec<str> = VarZeroVec::new();
/// assert!(vzv.is_empty());
/// ```
#[inline]
pub const fn new() -> Self {
Self::Borrowed(VarZeroSlice::new_empty())
}
/// Parse a VarZeroVec from a slice of the appropriate format
///
/// Slices of the right format can be obtained via [`VarZeroSlice::as_bytes()`].
///
/// # Example
///
/// ```rust
/// # use std::str::Utf8Error;
/// # use zerovec::ule::ZeroVecError;
/// # use zerovec::VarZeroVec;
///
/// let strings = vec!["foo", "bar", "baz", "quux"];
/// let vec = VarZeroVec::<str>::from(&strings);
///
/// assert_eq!(&vec[0], "foo");
/// assert_eq!(&vec[1], "bar");
/// assert_eq!(&vec[2], "baz");
/// assert_eq!(&vec[3], "quux");
/// # Ok::<(), ZeroVecError>(())
/// ```
pub fn parse_byte_slice(slice: &'a [u8]) -> Result<Self, ZeroVecError> {
let borrowed = VarZeroSlice::<T, F>::parse_byte_slice(slice)?;
Ok(VarZeroVec::Borrowed(borrowed))
}
/// Uses a `&[u8]` buffer as a `VarZeroVec<T>` without any verification.
///
/// # Safety
///
/// `bytes` need to be an output from [`VarZeroSlice::as_bytes()`].
pub const unsafe fn from_bytes_unchecked(bytes: &'a [u8]) -> Self {
Self::Borrowed(core::mem::transmute(bytes))
}
/// Convert this into a mutable vector of the owned `T` type, cloning if necessary.
///
///
/// # Example
///
/// ```rust,ignore
/// # use std::str::Utf8Error;
/// # use zerovec::ule::ZeroVecError;
/// # use zerovec::VarZeroVec;
///
/// let strings = vec!["foo", "bar", "baz", "quux"];
/// let mut vec = VarZeroVec::<str>::from(&strings);
///
/// assert_eq!(vec.len(), 4);
/// let mutvec = vec.make_mut();
/// mutvec.push("lorem ipsum".into());
/// mutvec[2] = "dolor sit".into();
/// assert_eq!(&vec[0], "foo");
/// assert_eq!(&vec[1], "bar");
/// assert_eq!(&vec[2], "dolor sit");
/// assert_eq!(&vec[3], "quux");
/// assert_eq!(&vec[4], "lorem ipsum");
/// # Ok::<(), ZeroVecError>(())
/// ```
//
// This function is crate-public for now since we don't yet want to stabilize
// the internal implementation details
pub fn make_mut(&mut self) -> &mut VarZeroVecOwned<T, F> {
match self {
VarZeroVec::Owned(ref mut vec) => vec,
VarZeroVec::Borrowed(slice) => {
let new_self = VarZeroVecOwned::from_slice(slice);
*self = new_self.into();
// recursion is limited since we are guaranteed to hit the Owned branch
self.make_mut()
}
}
}
/// Converts a borrowed ZeroVec to an owned ZeroVec. No-op if already owned.
///
/// # Example
///
/// ```
/// # use std::str::Utf8Error;
/// # use zerovec::ule::ZeroVecError;
/// # use zerovec::VarZeroVec;
///
/// let strings = vec!["foo", "bar", "baz", "quux"];
/// let vec = VarZeroVec::<str>::from(&strings);
///
/// assert_eq!(vec.len(), 4);
/// // has'static lifetime
/// let owned = vec.into_owned();
/// # Ok::<(), ZeroVecError>(())
/// ```
pub fn into_owned(mut self) -> VarZeroVec<'static, T, F> {
self.make_mut();
match self {
VarZeroVec::Owned(vec) => vec.into(),
_ => unreachable!(),
}
}
/// Obtain this `VarZeroVec` as a [`VarZeroSlice`]
pub fn as_slice(&self) -> &VarZeroSlice<T, F> {
match *self {
VarZeroVec::Owned(ref owned) => owned,
VarZeroVec::Borrowed(b) => b,
}
}
/// Takes the byte vector representing the encoded data of this VarZeroVec. If borrowed,
/// this function allocates a byte vector and copies the borrowed bytes into it.
///
/// The bytes can be passed back to [`Self::parse_byte_slice()`].
///
/// To get a reference to the bytes without moving, see [`VarZeroSlice::as_bytes()`].
///
/// # Example
///
/// ```rust
/// # use std::str::Utf8Error;
/// # use zerovec::ule::ZeroVecError;
/// # use zerovec::VarZeroVec;
///
/// let strings = vec!["foo", "bar", "baz"];
/// let bytes = VarZeroVec::<str>::from(&strings).into_bytes();
///
/// let mut borrowed: VarZeroVec<str> = VarZeroVec::parse_byte_slice(&bytes)?;
/// assert_eq!(borrowed, &*strings);
///
/// # Ok::<(), ZeroVecError>(())
/// ```
pub fn into_bytes(self) -> Vec<u8> {
match self {
VarZeroVec::Owned(vec) => vec.into_bytes(),
VarZeroVec::Borrowed(vec) => vec.as_bytes().to_vec(),
}
}
/// Return whether the [`VarZeroVec`] is operating on owned or borrowed
/// data. [`VarZeroVec::into_owned()`] and [`VarZeroVec::make_mut()`] can
/// be used to force it into an owned type
pub fn is_owned(&self) -> bool {
match self {
VarZeroVec::Owned(..) => true,
VarZeroVec::Borrowed(..) => false,
}
}
#[cfg(feature = "bench")]
#[doc(hidden)]
pub fn as_components<'b>(&'b self) -> VarZeroVecComponents<'b, T, F> {
self.as_slice().as_components()
}
}
impl<A, T, F> From<&Vec<A>> for VarZeroVec<'static, T, F>
where
T: VarULE +?Sized,
A: EncodeAsVarULE<T>,
F: VarZeroVecFormat,
{
#[inline]
fn from(elements: &Vec<A>) -> Self {
Self::from(elements.as_slice())
}
}
impl<A, T, F> From<&[A]> for VarZeroVec<'static, T, F>
where
T: VarULE +?Sized,
A: EncodeAsVarULE<T>,
F: VarZeroVecFormat,
{
#[inline]
fn from(elements: &[A]) -> Self {
#[allow(clippy::unwrap_used)] // TODO(#1410) Better story for fallibility
VarZeroVecOwned::try_from_elements(elements).unwrap().into()
}
}
impl<A, T, F, const N: usize> From<&[A; N]> for VarZeroVec<'static, T, F>
where
T: VarULE +?Sized,
A: EncodeAsVarULE<T>,
F: VarZeroVecFormat,
{
#[inline]
fn from(elements: &[A; N]) -> Self {
Self::from(elements.as_slice())
}
}
impl<'a, 'b, T, F> PartialEq<VarZeroVec<'b, T, F>> for VarZeroVec<'a, T, F>
where
T: VarULE,
T:?Sized,
T: PartialEq,
F: VarZeroVecFormat,
{
#[inline]
fn eq(&self, other: &VarZeroVec<'b, T, F>) -> bool {
// VarULE has an API guarantee that this is equivalent
// to `T::VarULE::eq()`
self.as_bytes().eq(other.as_bytes())
}
}
impl<'a, T, F> Eq for VarZeroVec<'a, T, F>
where
T: VarULE,
T:?Sized,
T: Eq,
F: VarZeroVecFormat,
{
}
impl<T, A, F> PartialEq<&'_ [A]> for VarZeroVec<'_, T, F>
where
T: VarULE +?Sized,
T: PartialEq,
A: AsRef<T>,
F: VarZeroVecFormat,
{
#[inline]
fn eq(&self, other: &&[A]) -> bool {
self.iter().eq(other.iter().map(|t| t.as_ref()))
}
}
impl<T, A, F, const N: usize> PartialEq<[A; N]> for VarZeroVec<'_, T, F>
where
T: VarULE +?Sized,
T: PartialEq,
A: AsRef<T>,
F: VarZeroVecFormat,
{
#[inline]
fn eq(&self, other: &[A; N]) -> bool {
self.iter().eq(other.iter().map(|t| t.as_ref()))
}
}
impl<'a, T: VarULE +?Sized + PartialOrd, F: VarZeroVecFormat> PartialOrd for VarZeroVec<'a, T, F> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.iter().partial_cmp(other.iter())
}
}
impl<'a, T: VarULE +?Sized + Ord, F: VarZeroVecFormat> Ord for VarZeroVec<'a, T, F> {
fn cmp(&self, other: &Self) -> Ordering {
self.iter().cmp(other.iter())
}
} | /// # use std::str::Utf8Error; | random_line_split |
mod.rs | .
//!
//! An endpoint is concerned with executing the abstract behaviours given by the backend in terms
//! of the actions of the endpoint types. This means translating Redirect errors to the correct
//! Redirect http response for example or optionally sending internal errors to loggers. The
//! frontends, which are the bindings to particular server libraries, can instantiate the endpoint
//! api or simple reuse existing types.
//!
//! To ensure the adherence to the oauth2 rfc and the improve general implementations, some control
//! flow of incoming packets is specified here instead of the frontend implementations. Instead,
//! traits are offered to make this compatible with other endpoints. In theory, this makes
//! endpoints pluggable which could improve testing.
//!
//! Custom endpoint
//! ---------------
//! In order to not place restrictions on the web server library in use, it is possible to
//! implement an endpoint completely with user defined types.
//!
//! This requires custom, related implementations of [`WebRequest`] and [`WebResponse`].
//! _WARNING_: Custom endpoints MUST ensure a secure communication layer with confidential clients.
//! This means using TLS for communication over https.
//!
//! After receiving an authorization grant, access token or access request, initiate the respective
//! flow by collecting the [`Authorizer`], [`Issuer`], and [`Registrar`] instances. For example:
//!
//! [`WebRequest`]: trait.WebRequest.html
//! [`WebResponse`]: trait.WebResponse.html
//! [`Authorizer`]:../../primitives/authorizer/trait.Authorizer.html
//! [`Issuer`]:../../primitives/issuer/trait.Issuer.html
//! [`Registrar`]:../../primitives/registrar/trait.Registrar.html
mod authorization;
mod accesstoken;
mod client_credentials;
mod error;
mod refresh;
mod resource;
mod query;
#[cfg(test)]
mod tests;
use std::borrow::Cow;
use std::marker::PhantomData;
pub use crate::primitives::authorizer::Authorizer;
pub use crate::primitives::issuer::Issuer;
pub use crate::primitives::registrar::Registrar;
pub use crate::primitives::scope::Scope;
use crate::code_grant::resource::{Error as ResourceError};
use crate::code_grant::error::{AuthorizationError, AccessTokenError};
use url::Url;
// Re-export the extension traits under prefixed names.
pub use crate::code_grant::authorization::Extension as AuthorizationExtension;
pub use crate::code_grant::accesstoken::Extension as AccessTokenExtension;
pub use crate::code_grant::client_credentials::Extension as ClientCredentialsExtension;
pub use crate::primitives::registrar::PreGrant;
pub use self::authorization::*;
pub use self::accesstoken::*;
pub use self::client_credentials::ClientCredentialsFlow;
pub use self::error::OAuthError;
pub use self::refresh::RefreshFlow;
pub use self::resource::*;
pub use self::query::*;
/// Answer from OwnerAuthorizer to indicate the owners choice.
pub enum OwnerConsent<Response: WebResponse> {
/// The owner did not authorize the client.
Denied,
/// The owner has not yet decided, i.e. the returned page is a form for the user.
InProgress(Response),
/// Authorization was granted by the specified user.
Authorized(String),
/// An error occurred while checking authorization.
Error(Response::Error),
}
/// Modifiable reason for creating a response to the client.
///
/// Not all responses indicate failure. A redirect will also occur in the a regular of providing an
/// access token to the third party client. When an error is present (see several methods) it is
/// mostly possible to customize it. This hook provides advanced endpoints with the opportunity to
/// set additional parameters and informational messages before they are encoded.
///
/// See the provided methods for more information and examples.
#[derive(Debug)]
pub struct Template<'a> {
inner: InnerTemplate<'a>,
}
/// The general manner of the response.
///
/// These are parallels for HTTP status codes of the same name.
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)]
pub enum ResponseStatus {
/// The response is issued because the requesting party was not authorized.
Unauthorized,
/// The response redirects in the code grant flow.
Redirect,
/// The request was malformed.
BadRequest,
/// This response is normal and expected.
Ok,
}
/// Encapsulated different types of responses reasons.
///
/// Each variant contains some form of context information about the response. This can be used either
/// purely informational or in some cases provides additional customization points. The addition of
/// fields to some variant context can occur in any major release until `1.0`. It is discouraged to
/// exhaustively match the fields directly. Since some context could not permit cloning, the enum will
/// not derive this until this has shown unlikely but strongly requested. Please open an issue if you
/// think the pros or cons should be evaluated differently.
#[derive(Debug)]
#[non_exhaustive]
enum InnerTemplate<'a> {
/// Authorization to access the resource has not been granted.
Unauthorized {
/// The underlying cause for denying access.
///
/// The http authorization header is to be set according to this field.
#[allow(dead_code)]
error: Option<ResourceError>,
/// Information on an access token error.
///
/// Endpoints may modify this description to add additional explanatory text or a reference
/// uri for clients seeking explanation.
access_token_error: Option<&'a mut AccessTokenError>,
},
/// Redirect the user-agent to another url.
///
/// The endpoint has the opportunity to inspect and modify error information to some extent.
/// For example to log an error rate or to provide a pointer to a custom human readable
/// explanation page. The response will generally not contain a body.
Redirect {
/// Information on an authorization error.
///
/// Endpoints may modify this description to add additional explanatory text or a reference
/// uri for clients or resource owners seeking explanation.
authorization_error: Option<&'a mut AuthorizationError>,
},
/// The request did not conform to specification or was otheriwse invalid.
///
/// As such, it was not handled further. Some processes still warrant a response body to be
/// set in the case of an invalid request, containing additional information for the client.
/// For example, an authorized client sending a malformed but authenticated request for an
/// access token will receive additional hints on the cause of his mistake.
BadRequest {
/// Information on an invalid-access-token-request error.
///
/// Endpoints may modify this description to add additional explanatory text or a reference
/// uri for clients seeking explanation.
access_token_error: Option<&'a mut AccessTokenError>,
},
/// An expected, normal response.
///
/// The content of the response may require precise semantics to be standard compliant,
/// therefore it is constructed using the `WebResponse` trait methods. Try not to tamper with
/// the format too much, such as unsetting a body etc. after the flow has finished.
Ok,
}
/// A pending solicitation to a resource owner.
///
/// This encapsulates the information available to an [`OwnerSolicitor`] when querying consent
/// information.
///
/// [`OwnerSolicitor`]: trait.OwnerSolicitor.html
pub struct Solicitation<'flow> {
pub(crate) grant: Cow<'flow, PreGrant>,
pub(crate) state: Option<Cow<'flow, str>>,
}
impl<'flow> Solicitation<'flow> {
/// Clone the solicitation into an owned structure.
///
/// This mainly helps with sending it across threads.
pub fn into_owned(self) -> Solicitation<'static> {
Solicitation {
grant: Cow::Owned(self.grant.into_owned()),
state: self.state.map(|state| Cow::Owned(state.into_owned())),
}
}
/// Return the pre-grant associated with the request.
///
/// The information in the `PreGrant` is the authoritative information on the client and scopes
/// associated with the request. It has already been validated against those settings and
/// restrictions that were applied when registering the client.
pub fn pre_grant(&self) -> &PreGrant {
self.grant.as_ref()
}
/// The state provided by the client request.
///
/// This will need to be provided to the response back to the client so it must be preserved
/// across a redirect or a consent screen presented by the user agent.
pub fn state(&self) -> Option<&str> {
match self.state {
None => None,
Some(ref state) => Some(&state),
}
}
/// Create a new solicitation request from a pre grant.
///
/// You usually wouldn't need to call this manually as it is called by the endpoint's flow and
/// then handed with all available information to the solicitor.
pub fn new(grant: &'flow PreGrant) -> Self {
Solicitation {
grant: Cow::Borrowed(grant),
state: None,
}
}
/// Add a client state to the solicitation.
pub fn with_state(self, state: &'flow str) -> Self {
Solicitation {
state: Some(Cow::Borrowed(state)),
..self
}
}
}
/// Checks consent with the owner of a resource, identified in a request.
///
/// See [`frontends::simple`] for an implementation that permits arbitrary functions.
///
/// [`frontends::simple`]:../frontends/simple/endpoint/struct.FnSolicitor.html
pub trait OwnerSolicitor<Request: WebRequest> {
/// Ensure that a user (resource owner) is currently authenticated (for example via a session
/// cookie) and determine if he has agreed to the presented grants.
fn check_consent(&mut self, _: &mut Request, _: Solicitation) -> OwnerConsent<Request::Response>;
}
/// Determine the scopes applying to a request of a resource.
///
/// It is possible to use a slice of [`Scope`]s as an implementation of this trait. You can inspect
/// the request that was used to access the resource for which the scopes are to be determined but
/// should generally avoid doing so. Sometimes the scope depends on external parameters and this is
/// unavoidable, e.g. if the scope is created dynamically from the path of the resource.
///
/// ## Example
///
/// Here's a possible new implementation that allows you to update your scope list at runtime:
///
/// ```
/// # use oxide_auth::endpoint::Scopes;
/// # use oxide_auth::endpoint::WebRequest;
/// use oxide_auth::primitives::scope::Scope;
/// use std::sync::{Arc, RwLock};
///
/// struct MyScopes {
/// update: RwLock<Arc<[Scope]>>,
/// current: Arc<[Scope]>,
/// };
///
/// impl<R: WebRequest> Scopes<R> for MyScopes {
/// fn scopes(&mut self, _: &mut R) -> &[Scope] {
/// let update = self.update.read().unwrap();
/// if!Arc::ptr_eq(&update, &self.current) {
/// self.current = update.clone();
/// }
/// &self.current
/// }
/// }
/// ```
///
/// [`Scope`]:../primitives/scope/struct.Scope.html
pub trait Scopes<Request: WebRequest> {
/// A list of alternative scopes.
///
/// One of the scopes needs to be fulfilled by the access token in the request to grant access.
/// A scope is fulfilled if the set of its part is a subset of the parts in the grant. If the
/// slice is empty, then no scope can be fulfilled and the request is always blocked.
fn scopes(&mut self, request: &mut Request) -> &[Scope];
}
/// Abstraction of web requests with several different abstractions and constructors needed by an
/// endpoint. It is assumed to originate from an HTTP request, as defined in the scope of the rfc,
/// but theoretically other requests are possible.
pub trait WebRequest {
/// The error generated from access of malformed or invalid requests.
type Error;
/// The corresponding type of Responses returned from this module.
type Response: WebResponse<Error = Self::Error>;
/// Retrieve a parsed version of the url query.
///
/// An Err return value indicates a malformed query or an otherwise malformed WebRequest. Note
/// that an empty query should result in `Ok(HashMap::new())` instead of an Err.
fn query(&mut self) -> Result<Cow<dyn QueryParameter +'static>, Self::Error>;
/// Retrieve the parsed `application/x-form-urlencoded` body of the request.
///
/// An Err value / indicates a malformed body or a different Content-Type.
fn urlbody(&mut self) -> Result<Cow<dyn QueryParameter +'static>, Self::Error>;
/// Contents of the authorization header or none if none exists. An Err value indicates a
/// malformed header or request.
fn authheader(&mut self) -> Result<Option<Cow<str>>, Self::Error>;
}
/// Response representation into which the Request is transformed by the code_grant types.
///
/// At most one of the methods `body_text`, `body_json` will be called. Some flows will
/// however not call any of those methods.
pub trait WebResponse {
/// The error generated when trying to construct an unhandled or invalid response.
type Error;
/// Set the response status to 200.
fn ok(&mut self) -> Result<(), Self::Error>;
/// A response which will redirect the user-agent to which the response is issued.
fn redirect(&mut self, url: Url) -> Result<(), Self::Error>;
/// Set the response status to 400.
fn client_error(&mut self) -> Result<(), Self::Error>;
/// Set the response status to 401 and add a `WWW-Authenticate` header.
fn unauthorized(&mut self, header_value: &str) -> Result<(), Self::Error>;
/// A pure text response with no special media type set.
fn body_text(&mut self, text: &str) -> Result<(), Self::Error>;
/// Json repsonse data, with media type `aplication/json.
fn body_json(&mut self, data: &str) -> Result<(), Self::Error>;
}
/// Intermediate trait to flow specific extensions.
///
/// The existence of this 1) promotes writing of extensions so that they can be reused independent
/// of endpoint and request types; 2) makes it possible to provide some of these in this library.
///
/// Note that all methods will by default return `None` so that adding to other flows is possible
/// without affecting existing implementations.
pub trait Extension {
/// The handler for authorization code extensions.
fn authorization(&mut self) -> Option<&mut dyn AuthorizationExtension> {
None
}
/// The handler for access token extensions.
fn access_token(&mut self) -> Option<&mut dyn AccessTokenExtension> {
None
}
/// The handler for client credentials extensions.
fn client_credentials(&mut self) -> Option<&mut dyn ClientCredentialsExtension> {
None
}
}
/// Fuses requests and primitives into a coherent system to give a response.
///
/// There are multiple different valid ways to produce responses and react to internal errors for a
/// single request type. This trait should provide those mechanisms, including trying to recover
/// from primitive errors where appropriate.
///
/// To reduce the number of necessary impls and provide a single interface to a single trait, this
/// trait defines accessor methods for all possibly needed primitives. Note that not all flows
/// actually access all primitives. Thus, an implementation does not necessarily have to return
/// something in `registrar`, `authorizer`, `issuer_mut` but failing to do so will also fail flows
/// that try to use them.
///
/// # Panics
///
/// It is expected that the endpoint primitive functions are consistent, i.e. they don't begin
/// returning `None` after having returned `Some(registrar)` previously for example. This ensures
/// that the checks executed by the flow preparation methods catch missing primitives. When this
/// contract is violated, the execution of a flow may lead to a panic.
pub trait Endpoint<Request: WebRequest> {
/// The error typed used as the error representation of each flow.
type Error;
/// A registrar if this endpoint can access one.
///
/// Returning `None` will implicate failing any flow that requires a registrar but does not
/// have any effect on flows that do not require one.
fn registrar(&self) -> Option<&dyn Registrar>;
/// An authorizer if this endpoint can access one.
///
/// Returning `None` will implicate failing any flow that requires an authorizer but does not
/// have any effect on flows that do not require one.
fn authorizer_mut(&mut self) -> Option<&mut dyn Authorizer>;
/// An issuer if this endpoint can access one.
///
/// Returning `None` will implicate failing any flow that requires an issuer but does not have
/// any effect on flows that do not require one.
fn issuer_mut(&mut self) -> Option<&mut dyn Issuer>;
/// Return the system that checks owner consent.
///
/// Returning `None` will implicated failing the authorization code flow but does have any
/// effect on other flows.
fn owner_solicitor(&mut self) -> Option<&mut dyn OwnerSolicitor<Request>>;
/// Determine the required scopes for a request.
///
/// The client must fulfill any one scope, so returning an empty slice will always deny the
/// request.
fn scopes(&mut self) -> Option<&mut dyn Scopes<Request>>;
/// Generate a prototype response.
///
/// The endpoint can rely on this being called at most once for each flow, if it wants
/// to preallocate the response or return a handle on an existing prototype.
fn response(
&mut self, request: &mut Request, kind: Template,
) -> Result<Request::Response, Self::Error>;
/// Wrap an error.
fn error(&mut self, err: OAuthError) -> Self::Error;
/// Wrap an error in the request/response types.
fn web_error(&mut self, err: Request::Error) -> Self::Error;
/// Get the central extension instance this endpoint.
///
/// Returning `None` is the default implementation and acts as simply providing any extensions.
fn extension(&mut self) -> Option<&mut dyn Extension> {
None
}
}
impl<'a> Template<'a> {
/// Create an OK template
pub fn new_ok() -> Self {
InnerTemplate::Ok.into()
}
/// Create a bad request template
pub fn new_bad(access_token_error: Option<&'a mut AccessTokenError>) -> Self {
InnerTemplate::BadRequest { access_token_error }.into()
}
/// Create an unauthorized template
pub fn new_unauthorized(
error: Option<ResourceError>, access_token_error: Option<&'a mut AccessTokenError>,
) -> Self {
InnerTemplate::Unauthorized {
error,
access_token_error,
}
.into()
}
/// Create a redirect template
pub fn new_redirect(authorization_error: Option<&'a mut AuthorizationError>) -> Self {
InnerTemplate::Redirect { authorization_error }.into()
}
/// The corresponding status code.
pub fn status(&self) -> ResponseStatus {
match self.inner {
InnerTemplate::Unauthorized {.. } => ResponseStatus::Unauthorized,
InnerTemplate::Redirect {.. } => ResponseStatus::Redirect,
InnerTemplate::BadRequest {.. } => ResponseStatus::BadRequest,
InnerTemplate::Ok => ResponseStatus::Ok,
}
}
/// Supplementary information about an error in the authorization code flow.
///
/// The referenced object can be inspected and manipulated to provided additional information
/// that is specific to this server or endpoint. Such information could be an error page with
/// explanatory information or a customized message.
///
/// ```
/// # use oxide_auth::endpoint::Template;
/// fn explain(mut template: Template) {
/// if let Some(error) = template.authorization_error() {
/// eprintln!("[authorization] An error occurred: {:?}", error.kind());
/// error.explain("This server is still in its infancy. Sorry.");
/// error.explain_uri("/authorization_error.html".parse().unwrap());
/// }
/// }
/// ```
pub fn authorization_error(&mut self) -> Option<&mut AuthorizationError> {
match &mut self.inner {
InnerTemplate::Redirect {
authorization_error,..
} => reborrow(authorization_error),
_ => None,
}
}
/// Supplementary information about an error in the access token flow.
///
/// The referenced object can be inspected and manipulated to provided additional information
/// that is specific to this server or endpoint. Such information could be an error page with
/// explanatory information or a customized message.
///
/// ```
/// # use oxide_auth::endpoint::Template;
/// fn explain(mut template: Template) {
/// if let Some(error) = template.access_token_error() {
/// eprintln!("[access_code] An error occurred: {:?}", error.kind());
/// error.explain("This server is still in its infancy. Sorry.");
/// error.explain_uri("/access_token_error.html".parse().unwrap());
/// }
/// }
/// ```
pub fn access_token_error(&mut self) -> Option<&mut AccessTokenError> {
match &mut self.inner {
InnerTemplate::Unauthorized {
access_token_error,..
} => reborrow(access_token_error),
InnerTemplate::BadRequest {
access_token_error,..
} => reborrow(access_token_error),
_ => None,
}
}
}
/// Reborrow contained optional reference.
///
/// Slightly tweaked from an `Into`, there is `Option<&'a mut T>` from `&'a mut Option<T>`.
fn reborrow<'a, T>(opt: &'a mut Option<&mut T>) -> Option<&'a mut T> {
match opt {
// Magically does correct lifetime coercision.
Some(inner) => Some(inner),
None => None,
}
}
impl<'a, W: WebRequest> WebRequest for &'a mut W {
type Error = W::Error;
type Response = W::Response;
fn query(&mut self) -> Result<Cow<dyn QueryParameter +'static>, Self::Error> {
(**self).query()
}
fn urlbody(&mut self) -> Result<Cow<dyn QueryParameter +'static>, Self::Error> {
(**self).urlbody()
}
fn authheader(&mut self) -> Result<Option<Cow<str>>, Self::Error> {
(**self).authheader()
}
}
impl<'a, R: WebRequest, E: Endpoint<R>> Endpoint<R> for &'a mut E {
type Error = E::Error;
fn registrar(&self) -> Option<&dyn Registrar> {
(**self).registrar()
}
fn authorizer_mut(&mut self) -> Option<&mut dyn Authorizer> {
(**self).authorizer_mut()
}
fn issuer_mut(&mut self) -> Option<&mut dyn Issuer> {
(**self).issuer_mut()
}
fn owner_solicitor(&mut self) -> Option<&mut dyn OwnerSolicitor<R>> {
(**self).owner_solicitor()
}
fn scopes(&mut self) -> Option<&mut dyn Scopes<R>> {
(**self).scopes()
}
fn response(&mut self, request: &mut R, kind: Template) -> Result<R::Response, Self::Error> {
(**self).response(request, kind)
}
fn error(&mut self, err: OAuthError) -> Self::Error {
(**self).error(err)
}
fn web_error(&mut self, err: R::Error) -> Self::Error {
(**self).web_error(err)
}
fn extension(&mut self) -> Option<&mut dyn Extension> {
(**self).extension()
}
}
impl<'a, R: WebRequest, E: Endpoint<R> + 'a> Endpoint<R> for Box<E> {
type Error = E::Error;
fn registrar(&self) -> Option<&dyn Registrar> {
(**self).registrar()
}
fn authorizer_mut(&mut self) -> Option<&mut dyn Authorizer> {
(**self).authorizer_mut()
}
fn issuer_mut(&mut self) -> Option<&mut dyn Issuer> {
(**self).issuer_mut()
}
fn owner_solicitor(&mut self) -> Option<&mut dyn OwnerSolicitor<R>> {
(**self).owner_solicitor()
}
fn scopes(&mut self) -> Option<&mut dyn Scopes<R>> {
(**self).scopes()
}
fn response(&mut self, request: &mut R, kind: Template) -> Result<R::Response, Self::Error> {
(**self).response(request, kind)
}
fn error(&mut self, err: OAuthError) -> Self::Error {
(**self).error(err)
}
fn web_error(&mut self, err: R::Error) -> Self::Error {
(**self).web_error(err)
}
fn extension(&mut self) -> Option<&mut dyn Extension> {
(**self).extension()
}
}
impl Extension for () {}
impl<'a, W: WebRequest, S: OwnerSolicitor<W> + 'a +?Sized> OwnerSolicitor<W> for &'a mut S {
fn check_consent(
&mut self, request: &mut W, solicitation: Solicitation,
) -> OwnerConsent<W::Response> {
(**self).check_consent(request, solicitation)
}
}
impl<'a, W: WebRequest, S: OwnerSolicitor<W> + 'a +?Sized> OwnerSolicitor<W> for Box<S> {
fn check_consent(
&mut self, request: &mut W, solicitation: Solicitation,
) -> OwnerConsent<W::Response> {
(**self).check_consent(request, solicitation)
}
}
impl<W: WebRequest> Scopes<W> for [Scope] {
fn scopes(&mut self, _: &mut W) -> &[Scope] {
self
}
}
impl<W: WebRequest> Scopes<W> for Vec<Scope> {
fn scopes(&mut self, _: &mut W) -> &[Scope] {
self.as_slice()
}
}
impl<'a, W: WebRequest> Scopes<W> for &'a [Scope] {
fn scopes(&mut self, _: &mut W) -> &[Scope] |
}
impl<'a, W: WebRequest, S: Scopes<W> + 'a +?Sized> Scopes<W> for &'a mut S {
fn scopes(&mut self, request: &mut W) -> &[Scope] {
(**self).scopes(request)
}
}
impl<'a, W: WebRequest, S: Scopes<W> + 'a +?Sized> Scopes<W> for Box<S> {
fn scopes(&mut self, request: &mut W) -> &[Scope] {
(**self).scopes(request)
}
}
impl<'a> From<InnerTemplate<'a>> for Template<'a> {
fn from(inner: InnerTemplate<'a>) -> Self {
Template { inner }
}
}
/// Check if the header is an authorization method
pub fn is_authorization_method<'h>(header: &'h str, method: &'static str) -> Option<&'h str> {
let header_method = header.get(..method.len())?;
if header_method.eq_ignore_ascii_case(method) {
Some(&header[method.l | {
self
} | identifier_body |
mod.rs | .
//!
//! An endpoint is concerned with executing the abstract behaviours given by the backend in terms
//! of the actions of the endpoint types. This means translating Redirect errors to the correct
//! Redirect http response for example or optionally sending internal errors to loggers. The
//! frontends, which are the bindings to particular server libraries, can instantiate the endpoint
//! api or simple reuse existing types.
//!
//! To ensure the adherence to the oauth2 rfc and the improve general implementations, some control
//! flow of incoming packets is specified here instead of the frontend implementations. Instead,
//! traits are offered to make this compatible with other endpoints. In theory, this makes
//! endpoints pluggable which could improve testing.
//!
//! Custom endpoint
//! ---------------
//! In order to not place restrictions on the web server library in use, it is possible to
//! implement an endpoint completely with user defined types.
//!
//! This requires custom, related implementations of [`WebRequest`] and [`WebResponse`].
//! _WARNING_: Custom endpoints MUST ensure a secure communication layer with confidential clients.
//! This means using TLS for communication over https.
//!
//! After receiving an authorization grant, access token or access request, initiate the respective
//! flow by collecting the [`Authorizer`], [`Issuer`], and [`Registrar`] instances. For example:
//!
//! [`WebRequest`]: trait.WebRequest.html
//! [`WebResponse`]: trait.WebResponse.html
//! [`Authorizer`]:../../primitives/authorizer/trait.Authorizer.html
//! [`Issuer`]:../../primitives/issuer/trait.Issuer.html
//! [`Registrar`]:../../primitives/registrar/trait.Registrar.html
mod authorization;
mod accesstoken;
mod client_credentials;
mod error;
mod refresh;
mod resource;
mod query;
#[cfg(test)]
mod tests;
use std::borrow::Cow;
use std::marker::PhantomData;
pub use crate::primitives::authorizer::Authorizer;
pub use crate::primitives::issuer::Issuer;
pub use crate::primitives::registrar::Registrar;
pub use crate::primitives::scope::Scope;
use crate::code_grant::resource::{Error as ResourceError};
use crate::code_grant::error::{AuthorizationError, AccessTokenError};
use url::Url;
// Re-export the extension traits under prefixed names.
pub use crate::code_grant::authorization::Extension as AuthorizationExtension;
pub use crate::code_grant::accesstoken::Extension as AccessTokenExtension;
pub use crate::code_grant::client_credentials::Extension as ClientCredentialsExtension;
pub use crate::primitives::registrar::PreGrant;
pub use self::authorization::*;
pub use self::accesstoken::*;
pub use self::client_credentials::ClientCredentialsFlow;
pub use self::error::OAuthError;
pub use self::refresh::RefreshFlow;
pub use self::resource::*;
pub use self::query::*;
/// Answer from OwnerAuthorizer to indicate the owners choice.
pub enum OwnerConsent<Response: WebResponse> {
/// The owner did not authorize the client.
Denied,
/// The owner has not yet decided, i.e. the returned page is a form for the user.
InProgress(Response),
/// Authorization was granted by the specified user.
Authorized(String),
/// An error occurred while checking authorization.
Error(Response::Error),
}
/// Modifiable reason for creating a response to the client.
///
/// Not all responses indicate failure. A redirect will also occur in the a regular of providing an
/// access token to the third party client. When an error is present (see several methods) it is
/// mostly possible to customize it. This hook provides advanced endpoints with the opportunity to
/// set additional parameters and informational messages before they are encoded.
///
/// See the provided methods for more information and examples.
#[derive(Debug)]
pub struct Template<'a> {
inner: InnerTemplate<'a>,
}
/// The general manner of the response.
///
/// These are parallels for HTTP status codes of the same name.
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)]
pub enum ResponseStatus {
/// The response is issued because the requesting party was not authorized.
Unauthorized,
/// The response redirects in the code grant flow.
Redirect,
/// The request was malformed.
BadRequest,
/// This response is normal and expected.
Ok,
}
/// Encapsulated different types of responses reasons.
///
/// Each variant contains some form of context information about the response. This can be used either
/// purely informational or in some cases provides additional customization points. The addition of
/// fields to some variant context can occur in any major release until `1.0`. It is discouraged to
/// exhaustively match the fields directly. Since some context could not permit cloning, the enum will
/// not derive this until this has shown unlikely but strongly requested. Please open an issue if you
/// think the pros or cons should be evaluated differently.
#[derive(Debug)]
#[non_exhaustive]
enum InnerTemplate<'a> {
/// Authorization to access the resource has not been granted.
Unauthorized {
/// The underlying cause for denying access.
///
/// The http authorization header is to be set according to this field.
#[allow(dead_code)]
error: Option<ResourceError>,
/// Information on an access token error.
///
/// Endpoints may modify this description to add additional explanatory text or a reference
/// uri for clients seeking explanation.
access_token_error: Option<&'a mut AccessTokenError>,
},
/// Redirect the user-agent to another url.
///
/// The endpoint has the opportunity to inspect and modify error information to some extent.
/// For example to log an error rate or to provide a pointer to a custom human readable
/// explanation page. The response will generally not contain a body.
Redirect {
/// Information on an authorization error.
///
/// Endpoints may modify this description to add additional explanatory text or a reference
/// uri for clients or resource owners seeking explanation.
authorization_error: Option<&'a mut AuthorizationError>,
},
/// The request did not conform to specification or was otheriwse invalid.
///
/// As such, it was not handled further. Some processes still warrant a response body to be
/// set in the case of an invalid request, containing additional information for the client.
/// For example, an authorized client sending a malformed but authenticated request for an
/// access token will receive additional hints on the cause of his mistake.
BadRequest {
/// Information on an invalid-access-token-request error.
///
/// Endpoints may modify this description to add additional explanatory text or a reference
/// uri for clients seeking explanation.
access_token_error: Option<&'a mut AccessTokenError>,
},
/// An expected, normal response.
///
/// The content of the response may require precise semantics to be standard compliant,
/// therefore it is constructed using the `WebResponse` trait methods. Try not to tamper with
/// the format too much, such as unsetting a body etc. after the flow has finished.
Ok,
}
/// A pending solicitation to a resource owner.
///
/// This encapsulates the information available to an [`OwnerSolicitor`] when querying consent
/// information.
///
/// [`OwnerSolicitor`]: trait.OwnerSolicitor.html
pub struct Solicitation<'flow> {
pub(crate) grant: Cow<'flow, PreGrant>,
pub(crate) state: Option<Cow<'flow, str>>,
}
impl<'flow> Solicitation<'flow> {
/// Clone the solicitation into an owned structure.
///
/// This mainly helps with sending it across threads.
pub fn into_owned(self) -> Solicitation<'static> {
Solicitation {
grant: Cow::Owned(self.grant.into_owned()),
state: self.state.map(|state| Cow::Owned(state.into_owned())),
}
}
/// Return the pre-grant associated with the request.
///
/// The information in the `PreGrant` is the authoritative information on the client and scopes
/// associated with the request. It has already been validated against those settings and
/// restrictions that were applied when registering the client.
pub fn pre_grant(&self) -> &PreGrant {
self.grant.as_ref()
}
/// The state provided by the client request.
///
/// This will need to be provided to the response back to the client so it must be preserved
/// across a redirect or a consent screen presented by the user agent.
pub fn state(&self) -> Option<&str> {
match self.state {
None => None,
Some(ref state) => Some(&state),
}
}
/// Create a new solicitation request from a pre grant.
///
/// You usually wouldn't need to call this manually as it is called by the endpoint's flow and
/// then handed with all available information to the solicitor.
pub fn new(grant: &'flow PreGrant) -> Self {
Solicitation {
grant: Cow::Borrowed(grant),
state: None,
}
}
/// Add a client state to the solicitation.
pub fn with_state(self, state: &'flow str) -> Self {
Solicitation {
state: Some(Cow::Borrowed(state)),
..self
}
}
}
/// Checks consent with the owner of a resource, identified in a request.
///
/// See [`frontends::simple`] for an implementation that permits arbitrary functions.
///
/// [`frontends::simple`]:../frontends/simple/endpoint/struct.FnSolicitor.html
pub trait OwnerSolicitor<Request: WebRequest> {
/// Ensure that a user (resource owner) is currently authenticated (for example via a session
/// cookie) and determine if he has agreed to the presented grants.
fn check_consent(&mut self, _: &mut Request, _: Solicitation) -> OwnerConsent<Request::Response>;
}
/// Determine the scopes applying to a request of a resource.
///
/// It is possible to use a slice of [`Scope`]s as an implementation of this trait. You can inspect
/// the request that was used to access the resource for which the scopes are to be determined but
/// should generally avoid doing so. Sometimes the scope depends on external parameters and this is
/// unavoidable, e.g. if the scope is created dynamically from the path of the resource.
///
/// ## Example
///
/// Here's a possible new implementation that allows you to update your scope list at runtime:
///
/// ```
/// # use oxide_auth::endpoint::Scopes;
/// # use oxide_auth::endpoint::WebRequest;
/// use oxide_auth::primitives::scope::Scope;
/// use std::sync::{Arc, RwLock};
///
/// struct MyScopes {
/// update: RwLock<Arc<[Scope]>>,
/// current: Arc<[Scope]>,
/// };
///
/// impl<R: WebRequest> Scopes<R> for MyScopes {
/// fn scopes(&mut self, _: &mut R) -> &[Scope] {
/// let update = self.update.read().unwrap();
/// if!Arc::ptr_eq(&update, &self.current) {
/// self.current = update.clone();
/// }
/// &self.current
/// }
/// }
/// ```
///
/// [`Scope`]:../primitives/scope/struct.Scope.html
pub trait Scopes<Request: WebRequest> {
/// A list of alternative scopes.
///
/// One of the scopes needs to be fulfilled by the access token in the request to grant access.
/// A scope is fulfilled if the set of its part is a subset of the parts in the grant. If the
/// slice is empty, then no scope can be fulfilled and the request is always blocked.
fn scopes(&mut self, request: &mut Request) -> &[Scope];
}
/// Abstraction of web requests with several different abstractions and constructors needed by an
/// endpoint. It is assumed to originate from an HTTP request, as defined in the scope of the rfc,
/// but theoretically other requests are possible.
pub trait WebRequest {
/// The error generated from access of malformed or invalid requests.
type Error;
/// The corresponding type of Responses returned from this module.
type Response: WebResponse<Error = Self::Error>;
/// Retrieve a parsed version of the url query.
///
/// An Err return value indicates a malformed query or an otherwise malformed WebRequest. Note
/// that an empty query should result in `Ok(HashMap::new())` instead of an Err.
fn query(&mut self) -> Result<Cow<dyn QueryParameter +'static>, Self::Error>;
/// Retrieve the parsed `application/x-form-urlencoded` body of the request.
///
/// An Err value / indicates a malformed body or a different Content-Type.
fn urlbody(&mut self) -> Result<Cow<dyn QueryParameter +'static>, Self::Error>;
/// Contents of the authorization header or none if none exists. An Err value indicates a
/// malformed header or request.
fn authheader(&mut self) -> Result<Option<Cow<str>>, Self::Error>;
}
/// Response representation into which the Request is transformed by the code_grant types.
///
/// At most one of the methods `body_text`, `body_json` will be called. Some flows will
/// however not call any of those methods.
pub trait WebResponse {
/// The error generated when trying to construct an unhandled or invalid response.
type Error;
/// Set the response status to 200.
fn ok(&mut self) -> Result<(), Self::Error>;
/// A response which will redirect the user-agent to which the response is issued.
fn redirect(&mut self, url: Url) -> Result<(), Self::Error>;
/// Set the response status to 400.
fn client_error(&mut self) -> Result<(), Self::Error>;
/// Set the response status to 401 and add a `WWW-Authenticate` header.
fn unauthorized(&mut self, header_value: &str) -> Result<(), Self::Error>;
/// A pure text response with no special media type set.
fn body_text(&mut self, text: &str) -> Result<(), Self::Error>;
/// Json repsonse data, with media type `aplication/json.
fn body_json(&mut self, data: &str) -> Result<(), Self::Error>;
}
/// Intermediate trait to flow specific extensions.
///
/// The existence of this 1) promotes writing of extensions so that they can be reused independent
/// of endpoint and request types; 2) makes it possible to provide some of these in this library.
///
/// Note that all methods will by default return `None` so that adding to other flows is possible
/// without affecting existing implementations.
pub trait Extension {
/// The handler for authorization code extensions.
fn authorization(&mut self) -> Option<&mut dyn AuthorizationExtension> {
None
}
/// The handler for access token extensions.
fn access_token(&mut self) -> Option<&mut dyn AccessTokenExtension> {
None
}
/// The handler for client credentials extensions.
fn client_credentials(&mut self) -> Option<&mut dyn ClientCredentialsExtension> {
None
}
}
/// Fuses requests and primitives into a coherent system to give a response.
///
/// There are multiple different valid ways to produce responses and react to internal errors for a
/// single request type. This trait should provide those mechanisms, including trying to recover
/// from primitive errors where appropriate.
///
/// To reduce the number of necessary impls and provide a single interface to a single trait, this
/// trait defines accessor methods for all possibly needed primitives. Note that not all flows
/// actually access all primitives. Thus, an implementation does not necessarily have to return
/// something in `registrar`, `authorizer`, `issuer_mut` but failing to do so will also fail flows
/// that try to use them.
///
/// # Panics
///
/// It is expected that the endpoint primitive functions are consistent, i.e. they don't begin
/// returning `None` after having returned `Some(registrar)` previously for example. This ensures
/// that the checks executed by the flow preparation methods catch missing primitives. When this
/// contract is violated, the execution of a flow may lead to a panic.
pub trait Endpoint<Request: WebRequest> {
/// The error typed used as the error representation of each flow.
type Error;
/// A registrar if this endpoint can access one.
///
/// Returning `None` will implicate failing any flow that requires a registrar but does not
/// have any effect on flows that do not require one.
fn registrar(&self) -> Option<&dyn Registrar>;
/// An authorizer if this endpoint can access one.
///
/// Returning `None` will implicate failing any flow that requires an authorizer but does not
/// have any effect on flows that do not require one.
fn authorizer_mut(&mut self) -> Option<&mut dyn Authorizer>;
/// An issuer if this endpoint can access one.
///
/// Returning `None` will implicate failing any flow that requires an issuer but does not have
/// any effect on flows that do not require one.
fn issuer_mut(&mut self) -> Option<&mut dyn Issuer>;
/// Return the system that checks owner consent.
///
/// Returning `None` will implicated failing the authorization code flow but does have any
/// effect on other flows.
fn owner_solicitor(&mut self) -> Option<&mut dyn OwnerSolicitor<Request>>;
/// Determine the required scopes for a request.
///
/// The client must fulfill any one scope, so returning an empty slice will always deny the
/// request.
fn scopes(&mut self) -> Option<&mut dyn Scopes<Request>>;
/// Generate a prototype response.
///
/// The endpoint can rely on this being called at most once for each flow, if it wants
/// to preallocate the response or return a handle on an existing prototype.
fn response(
&mut self, request: &mut Request, kind: Template,
) -> Result<Request::Response, Self::Error>;
/// Wrap an error.
fn error(&mut self, err: OAuthError) -> Self::Error;
/// Wrap an error in the request/response types.
fn web_error(&mut self, err: Request::Error) -> Self::Error;
/// Get the central extension instance this endpoint.
///
/// Returning `None` is the default implementation and acts as simply providing any extensions.
fn extension(&mut self) -> Option<&mut dyn Extension> {
None
}
}
impl<'a> Template<'a> {
/// Create an OK template
pub fn new_ok() -> Self {
InnerTemplate::Ok.into()
}
/// Create a bad request template
pub fn new_bad(access_token_error: Option<&'a mut AccessTokenError>) -> Self {
InnerTemplate::BadRequest { access_token_error }.into()
}
/// Create an unauthorized template
pub fn new_unauthorized(
error: Option<ResourceError>, access_token_error: Option<&'a mut AccessTokenError>,
) -> Self {
InnerTemplate::Unauthorized {
error,
access_token_error,
}
.into()
}
/// Create a redirect template
pub fn new_redirect(authorization_error: Option<&'a mut AuthorizationError>) -> Self {
InnerTemplate::Redirect { authorization_error }.into()
}
/// The corresponding status code.
pub fn status(&self) -> ResponseStatus {
match self.inner {
InnerTemplate::Unauthorized {.. } => ResponseStatus::Unauthorized,
InnerTemplate::Redirect {.. } => ResponseStatus::Redirect,
InnerTemplate::BadRequest {.. } => ResponseStatus::BadRequest,
InnerTemplate::Ok => ResponseStatus::Ok,
}
}
/// Supplementary information about an error in the authorization code flow.
///
/// The referenced object can be inspected and manipulated to provided additional information
/// that is specific to this server or endpoint. Such information could be an error page with
/// explanatory information or a customized message.
///
/// ```
/// # use oxide_auth::endpoint::Template;
/// fn explain(mut template: Template) {
/// if let Some(error) = template.authorization_error() {
/// eprintln!("[authorization] An error occurred: {:?}", error.kind());
/// error.explain("This server is still in its infancy. Sorry.");
/// error.explain_uri("/authorization_error.html".parse().unwrap());
/// }
/// }
/// ```
pub fn authorization_error(&mut self) -> Option<&mut AuthorizationError> {
match &mut self.inner {
InnerTemplate::Redirect {
authorization_error,..
} => reborrow(authorization_error),
_ => None,
}
}
/// Supplementary information about an error in the access token flow.
///
/// The referenced object can be inspected and manipulated to provided additional information
/// that is specific to this server or endpoint. Such information could be an error page with
/// explanatory information or a customized message.
///
/// ```
/// # use oxide_auth::endpoint::Template;
/// fn explain(mut template: Template) {
/// if let Some(error) = template.access_token_error() {
/// eprintln!("[access_code] An error occurred: {:?}", error.kind());
/// error.explain("This server is still in its infancy. Sorry.");
/// error.explain_uri("/access_token_error.html".parse().unwrap());
/// }
/// }
/// ```
pub fn access_token_error(&mut self) -> Option<&mut AccessTokenError> {
match &mut self.inner {
InnerTemplate::Unauthorized {
access_token_error,..
} => reborrow(access_token_error),
InnerTemplate::BadRequest {
access_token_error,..
} => reborrow(access_token_error),
_ => None,
}
}
}
/// Reborrow contained optional reference.
///
/// Slightly tweaked from an `Into`, there is `Option<&'a mut T>` from `&'a mut Option<T>`.
fn reborrow<'a, T>(opt: &'a mut Option<&mut T>) -> Option<&'a mut T> {
match opt {
// Magically does correct lifetime coercision.
Some(inner) => Some(inner),
None => None,
}
}
impl<'a, W: WebRequest> WebRequest for &'a mut W {
type Error = W::Error;
type Response = W::Response;
fn query(&mut self) -> Result<Cow<dyn QueryParameter +'static>, Self::Error> {
(**self).query()
}
fn urlbody(&mut self) -> Result<Cow<dyn QueryParameter +'static>, Self::Error> {
(**self).urlbody()
}
fn authheader(&mut self) -> Result<Option<Cow<str>>, Self::Error> {
(**self).authheader()
}
}
impl<'a, R: WebRequest, E: Endpoint<R>> Endpoint<R> for &'a mut E {
type Error = E::Error;
fn registrar(&self) -> Option<&dyn Registrar> {
(**self).registrar()
}
fn authorizer_mut(&mut self) -> Option<&mut dyn Authorizer> {
(**self).authorizer_mut()
}
fn issuer_mut(&mut self) -> Option<&mut dyn Issuer> {
(**self).issuer_mut()
}
fn owner_solicitor(&mut self) -> Option<&mut dyn OwnerSolicitor<R>> {
(**self).owner_solicitor()
}
fn scopes(&mut self) -> Option<&mut dyn Scopes<R>> {
(**self).scopes()
}
fn response(&mut self, request: &mut R, kind: Template) -> Result<R::Response, Self::Error> {
(**self).response(request, kind)
}
fn error(&mut self, err: OAuthError) -> Self::Error {
(**self).error(err)
}
fn web_error(&mut self, err: R::Error) -> Self::Error {
(**self).web_error(err)
}
fn extension(&mut self) -> Option<&mut dyn Extension> {
(**self).extension()
}
}
impl<'a, R: WebRequest, E: Endpoint<R> + 'a> Endpoint<R> for Box<E> {
type Error = E::Error;
fn registrar(&self) -> Option<&dyn Registrar> {
(**self).registrar()
}
fn authorizer_mut(&mut self) -> Option<&mut dyn Authorizer> {
(**self).authorizer_mut()
}
fn issuer_mut(&mut self) -> Option<&mut dyn Issuer> {
(**self).issuer_mut()
}
fn owner_solicitor(&mut self) -> Option<&mut dyn OwnerSolicitor<R>> {
(**self).owner_solicitor()
}
fn scopes(&mut self) -> Option<&mut dyn Scopes<R>> {
(**self).scopes()
}
fn response(&mut self, request: &mut R, kind: Template) -> Result<R::Response, Self::Error> {
(**self).response(request, kind)
}
fn error(&mut self, err: OAuthError) -> Self::Error {
(**self).error(err)
}
fn web_error(&mut self, err: R::Error) -> Self::Error {
(**self).web_error(err)
}
fn extension(&mut self) -> Option<&mut dyn Extension> {
(**self).extension()
}
}
impl Extension for () {}
impl<'a, W: WebRequest, S: OwnerSolicitor<W> + 'a +?Sized> OwnerSolicitor<W> for &'a mut S {
fn check_consent(
&mut self, request: &mut W, solicitation: Solicitation,
) -> OwnerConsent<W::Response> {
(**self).check_consent(request, solicitation)
}
}
impl<'a, W: WebRequest, S: OwnerSolicitor<W> + 'a +?Sized> OwnerSolicitor<W> for Box<S> {
fn | (
&mut self, request: &mut W, solicitation: Solicitation,
) -> OwnerConsent<W::Response> {
(**self).check_consent(request, solicitation)
}
}
impl<W: WebRequest> Scopes<W> for [Scope] {
fn scopes(&mut self, _: &mut W) -> &[Scope] {
self
}
}
impl<W: WebRequest> Scopes<W> for Vec<Scope> {
fn scopes(&mut self, _: &mut W) -> &[Scope] {
self.as_slice()
}
}
impl<'a, W: WebRequest> Scopes<W> for &'a [Scope] {
fn scopes(&mut self, _: &mut W) -> &[Scope] {
self
}
}
impl<'a, W: WebRequest, S: Scopes<W> + 'a +?Sized> Scopes<W> for &'a mut S {
fn scopes(&mut self, request: &mut W) -> &[Scope] {
(**self).scopes(request)
}
}
impl<'a, W: WebRequest, S: Scopes<W> + 'a +?Sized> Scopes<W> for Box<S> {
fn scopes(&mut self, request: &mut W) -> &[Scope] {
(**self).scopes(request)
}
}
impl<'a> From<InnerTemplate<'a>> for Template<'a> {
fn from(inner: InnerTemplate<'a>) -> Self {
Template { inner }
}
}
/// Check if the header is an authorization method
pub fn is_authorization_method<'h>(header: &'h str, method: &'static str) -> Option<&'h str> {
let header_method = header.get(..method.len())?;
if header_method.eq_ignore_ascii_case(method) {
Some(&header[method.l | check_consent | identifier_name |
mod.rs | flows.
//!
//! An endpoint is concerned with executing the abstract behaviours given by the backend in terms
//! of the actions of the endpoint types. This means translating Redirect errors to the correct
//! Redirect http response for example or optionally sending internal errors to loggers. The
//! frontends, which are the bindings to particular server libraries, can instantiate the endpoint
//! api or simple reuse existing types.
//!
//! To ensure the adherence to the oauth2 rfc and the improve general implementations, some control
//! flow of incoming packets is specified here instead of the frontend implementations. Instead,
//! traits are offered to make this compatible with other endpoints. In theory, this makes
//! endpoints pluggable which could improve testing.
//!
//! Custom endpoint
//! ---------------
//! In order to not place restrictions on the web server library in use, it is possible to
//! implement an endpoint completely with user defined types.
//!
//! This requires custom, related implementations of [`WebRequest`] and [`WebResponse`].
//! _WARNING_: Custom endpoints MUST ensure a secure communication layer with confidential clients.
//! This means using TLS for communication over https.
//!
//! After receiving an authorization grant, access token or access request, initiate the respective
//! flow by collecting the [`Authorizer`], [`Issuer`], and [`Registrar`] instances. For example:
//!
//! [`WebRequest`]: trait.WebRequest.html
//! [`WebResponse`]: trait.WebResponse.html
//! [`Authorizer`]:../../primitives/authorizer/trait.Authorizer.html
//! [`Issuer`]:../../primitives/issuer/trait.Issuer.html
//! [`Registrar`]:../../primitives/registrar/trait.Registrar.html
mod authorization;
mod accesstoken;
mod client_credentials;
mod error;
mod refresh;
mod resource; | mod query;
#[cfg(test)]
mod tests;
use std::borrow::Cow;
use std::marker::PhantomData;
pub use crate::primitives::authorizer::Authorizer;
pub use crate::primitives::issuer::Issuer;
pub use crate::primitives::registrar::Registrar;
pub use crate::primitives::scope::Scope;
use crate::code_grant::resource::{Error as ResourceError};
use crate::code_grant::error::{AuthorizationError, AccessTokenError};
use url::Url;
// Re-export the extension traits under prefixed names.
pub use crate::code_grant::authorization::Extension as AuthorizationExtension;
pub use crate::code_grant::accesstoken::Extension as AccessTokenExtension;
pub use crate::code_grant::client_credentials::Extension as ClientCredentialsExtension;
pub use crate::primitives::registrar::PreGrant;
pub use self::authorization::*;
pub use self::accesstoken::*;
pub use self::client_credentials::ClientCredentialsFlow;
pub use self::error::OAuthError;
pub use self::refresh::RefreshFlow;
pub use self::resource::*;
pub use self::query::*;
/// Answer from OwnerAuthorizer to indicate the owners choice.
pub enum OwnerConsent<Response: WebResponse> {
/// The owner did not authorize the client.
Denied,
/// The owner has not yet decided, i.e. the returned page is a form for the user.
InProgress(Response),
/// Authorization was granted by the specified user.
Authorized(String),
/// An error occurred while checking authorization.
Error(Response::Error),
}
/// Modifiable reason for creating a response to the client.
///
/// Not all responses indicate failure. A redirect will also occur in the a regular of providing an
/// access token to the third party client. When an error is present (see several methods) it is
/// mostly possible to customize it. This hook provides advanced endpoints with the opportunity to
/// set additional parameters and informational messages before they are encoded.
///
/// See the provided methods for more information and examples.
#[derive(Debug)]
pub struct Template<'a> {
inner: InnerTemplate<'a>,
}
/// The general manner of the response.
///
/// These are parallels for HTTP status codes of the same name.
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, PartialOrd, Ord)]
pub enum ResponseStatus {
/// The response is issued because the requesting party was not authorized.
Unauthorized,
/// The response redirects in the code grant flow.
Redirect,
/// The request was malformed.
BadRequest,
/// This response is normal and expected.
Ok,
}
/// Encapsulated different types of responses reasons.
///
/// Each variant contains some form of context information about the response. This can be used either
/// purely informational or in some cases provides additional customization points. The addition of
/// fields to some variant context can occur in any major release until `1.0`. It is discouraged to
/// exhaustively match the fields directly. Since some context could not permit cloning, the enum will
/// not derive this until this has shown unlikely but strongly requested. Please open an issue if you
/// think the pros or cons should be evaluated differently.
#[derive(Debug)]
#[non_exhaustive]
enum InnerTemplate<'a> {
/// Authorization to access the resource has not been granted.
Unauthorized {
/// The underlying cause for denying access.
///
/// The http authorization header is to be set according to this field.
#[allow(dead_code)]
error: Option<ResourceError>,
/// Information on an access token error.
///
/// Endpoints may modify this description to add additional explanatory text or a reference
/// uri for clients seeking explanation.
access_token_error: Option<&'a mut AccessTokenError>,
},
/// Redirect the user-agent to another url.
///
/// The endpoint has the opportunity to inspect and modify error information to some extent.
/// For example to log an error rate or to provide a pointer to a custom human readable
/// explanation page. The response will generally not contain a body.
Redirect {
/// Information on an authorization error.
///
/// Endpoints may modify this description to add additional explanatory text or a reference
/// uri for clients or resource owners seeking explanation.
authorization_error: Option<&'a mut AuthorizationError>,
},
/// The request did not conform to specification or was otheriwse invalid.
///
/// As such, it was not handled further. Some processes still warrant a response body to be
/// set in the case of an invalid request, containing additional information for the client.
/// For example, an authorized client sending a malformed but authenticated request for an
/// access token will receive additional hints on the cause of his mistake.
BadRequest {
/// Information on an invalid-access-token-request error.
///
/// Endpoints may modify this description to add additional explanatory text or a reference
/// uri for clients seeking explanation.
access_token_error: Option<&'a mut AccessTokenError>,
},
/// An expected, normal response.
///
/// The content of the response may require precise semantics to be standard compliant,
/// therefore it is constructed using the `WebResponse` trait methods. Try not to tamper with
/// the format too much, such as unsetting a body etc. after the flow has finished.
Ok,
}
/// A pending solicitation to a resource owner.
///
/// This encapsulates the information available to an [`OwnerSolicitor`] when querying consent
/// information.
///
/// [`OwnerSolicitor`]: trait.OwnerSolicitor.html
pub struct Solicitation<'flow> {
pub(crate) grant: Cow<'flow, PreGrant>,
pub(crate) state: Option<Cow<'flow, str>>,
}
impl<'flow> Solicitation<'flow> {
/// Clone the solicitation into an owned structure.
///
/// This mainly helps with sending it across threads.
pub fn into_owned(self) -> Solicitation<'static> {
Solicitation {
grant: Cow::Owned(self.grant.into_owned()),
state: self.state.map(|state| Cow::Owned(state.into_owned())),
}
}
/// Return the pre-grant associated with the request.
///
/// The information in the `PreGrant` is the authoritative information on the client and scopes
/// associated with the request. It has already been validated against those settings and
/// restrictions that were applied when registering the client.
pub fn pre_grant(&self) -> &PreGrant {
self.grant.as_ref()
}
/// The state provided by the client request.
///
/// This will need to be provided to the response back to the client so it must be preserved
/// across a redirect or a consent screen presented by the user agent.
pub fn state(&self) -> Option<&str> {
match self.state {
None => None,
Some(ref state) => Some(&state),
}
}
/// Create a new solicitation request from a pre grant.
///
/// You usually wouldn't need to call this manually as it is called by the endpoint's flow and
/// then handed with all available information to the solicitor.
pub fn new(grant: &'flow PreGrant) -> Self {
Solicitation {
grant: Cow::Borrowed(grant),
state: None,
}
}
/// Add a client state to the solicitation.
pub fn with_state(self, state: &'flow str) -> Self {
Solicitation {
state: Some(Cow::Borrowed(state)),
..self
}
}
}
/// Checks consent with the owner of a resource, identified in a request.
///
/// See [`frontends::simple`] for an implementation that permits arbitrary functions.
///
/// [`frontends::simple`]:../frontends/simple/endpoint/struct.FnSolicitor.html
pub trait OwnerSolicitor<Request: WebRequest> {
/// Ensure that a user (resource owner) is currently authenticated (for example via a session
/// cookie) and determine if he has agreed to the presented grants.
fn check_consent(&mut self, _: &mut Request, _: Solicitation) -> OwnerConsent<Request::Response>;
}
/// Determine the scopes applying to a request of a resource.
///
/// It is possible to use a slice of [`Scope`]s as an implementation of this trait. You can inspect
/// the request that was used to access the resource for which the scopes are to be determined but
/// should generally avoid doing so. Sometimes the scope depends on external parameters and this is
/// unavoidable, e.g. if the scope is created dynamically from the path of the resource.
///
/// ## Example
///
/// Here's a possible new implementation that allows you to update your scope list at runtime:
///
/// ```
/// # use oxide_auth::endpoint::Scopes;
/// # use oxide_auth::endpoint::WebRequest;
/// use oxide_auth::primitives::scope::Scope;
/// use std::sync::{Arc, RwLock};
///
/// struct MyScopes {
/// update: RwLock<Arc<[Scope]>>,
/// current: Arc<[Scope]>,
/// };
///
/// impl<R: WebRequest> Scopes<R> for MyScopes {
/// fn scopes(&mut self, _: &mut R) -> &[Scope] {
/// let update = self.update.read().unwrap();
/// if!Arc::ptr_eq(&update, &self.current) {
/// self.current = update.clone();
/// }
/// &self.current
/// }
/// }
/// ```
///
/// [`Scope`]:../primitives/scope/struct.Scope.html
pub trait Scopes<Request: WebRequest> {
/// A list of alternative scopes.
///
/// One of the scopes needs to be fulfilled by the access token in the request to grant access.
/// A scope is fulfilled if the set of its part is a subset of the parts in the grant. If the
/// slice is empty, then no scope can be fulfilled and the request is always blocked.
fn scopes(&mut self, request: &mut Request) -> &[Scope];
}
/// Abstraction of web requests with several different abstractions and constructors needed by an
/// endpoint. It is assumed to originate from an HTTP request, as defined in the scope of the rfc,
/// but theoretically other requests are possible.
pub trait WebRequest {
/// The error generated from access of malformed or invalid requests.
type Error;
/// The corresponding type of Responses returned from this module.
type Response: WebResponse<Error = Self::Error>;
/// Retrieve a parsed version of the url query.
///
/// An Err return value indicates a malformed query or an otherwise malformed WebRequest. Note
/// that an empty query should result in `Ok(HashMap::new())` instead of an Err.
fn query(&mut self) -> Result<Cow<dyn QueryParameter +'static>, Self::Error>;
/// Retrieve the parsed `application/x-form-urlencoded` body of the request.
///
/// An Err value / indicates a malformed body or a different Content-Type.
fn urlbody(&mut self) -> Result<Cow<dyn QueryParameter +'static>, Self::Error>;
/// Contents of the authorization header or none if none exists. An Err value indicates a
/// malformed header or request.
fn authheader(&mut self) -> Result<Option<Cow<str>>, Self::Error>;
}
/// Response representation into which the Request is transformed by the code_grant types.
///
/// At most one of the methods `body_text`, `body_json` will be called. Some flows will
/// however not call any of those methods.
pub trait WebResponse {
/// The error generated when trying to construct an unhandled or invalid response.
type Error;
/// Set the response status to 200.
fn ok(&mut self) -> Result<(), Self::Error>;
/// A response which will redirect the user-agent to which the response is issued.
fn redirect(&mut self, url: Url) -> Result<(), Self::Error>;
/// Set the response status to 400.
fn client_error(&mut self) -> Result<(), Self::Error>;
/// Set the response status to 401 and add a `WWW-Authenticate` header.
fn unauthorized(&mut self, header_value: &str) -> Result<(), Self::Error>;
/// A pure text response with no special media type set.
fn body_text(&mut self, text: &str) -> Result<(), Self::Error>;
/// Json repsonse data, with media type `aplication/json.
fn body_json(&mut self, data: &str) -> Result<(), Self::Error>;
}
/// Intermediate trait to flow specific extensions.
///
/// The existence of this 1) promotes writing of extensions so that they can be reused independent
/// of endpoint and request types; 2) makes it possible to provide some of these in this library.
///
/// Note that all methods will by default return `None` so that adding to other flows is possible
/// without affecting existing implementations.
pub trait Extension {
/// The handler for authorization code extensions.
fn authorization(&mut self) -> Option<&mut dyn AuthorizationExtension> {
None
}
/// The handler for access token extensions.
fn access_token(&mut self) -> Option<&mut dyn AccessTokenExtension> {
None
}
/// The handler for client credentials extensions.
fn client_credentials(&mut self) -> Option<&mut dyn ClientCredentialsExtension> {
None
}
}
/// Fuses requests and primitives into a coherent system to give a response.
///
/// There are multiple different valid ways to produce responses and react to internal errors for a
/// single request type. This trait should provide those mechanisms, including trying to recover
/// from primitive errors where appropriate.
///
/// To reduce the number of necessary impls and provide a single interface to a single trait, this
/// trait defines accessor methods for all possibly needed primitives. Note that not all flows
/// actually access all primitives. Thus, an implementation does not necessarily have to return
/// something in `registrar`, `authorizer`, `issuer_mut` but failing to do so will also fail flows
/// that try to use them.
///
/// # Panics
///
/// It is expected that the endpoint primitive functions are consistent, i.e. they don't begin
/// returning `None` after having returned `Some(registrar)` previously for example. This ensures
/// that the checks executed by the flow preparation methods catch missing primitives. When this
/// contract is violated, the execution of a flow may lead to a panic.
pub trait Endpoint<Request: WebRequest> {
/// The error typed used as the error representation of each flow.
type Error;
/// A registrar if this endpoint can access one.
///
/// Returning `None` will implicate failing any flow that requires a registrar but does not
/// have any effect on flows that do not require one.
fn registrar(&self) -> Option<&dyn Registrar>;
/// An authorizer if this endpoint can access one.
///
/// Returning `None` will implicate failing any flow that requires an authorizer but does not
/// have any effect on flows that do not require one.
fn authorizer_mut(&mut self) -> Option<&mut dyn Authorizer>;
/// An issuer if this endpoint can access one.
///
/// Returning `None` will implicate failing any flow that requires an issuer but does not have
/// any effect on flows that do not require one.
fn issuer_mut(&mut self) -> Option<&mut dyn Issuer>;
/// Return the system that checks owner consent.
///
/// Returning `None` will implicated failing the authorization code flow but does have any
/// effect on other flows.
fn owner_solicitor(&mut self) -> Option<&mut dyn OwnerSolicitor<Request>>;
/// Determine the required scopes for a request.
///
/// The client must fulfill any one scope, so returning an empty slice will always deny the
/// request.
fn scopes(&mut self) -> Option<&mut dyn Scopes<Request>>;
/// Generate a prototype response.
///
/// The endpoint can rely on this being called at most once for each flow, if it wants
/// to preallocate the response or return a handle on an existing prototype.
fn response(
&mut self, request: &mut Request, kind: Template,
) -> Result<Request::Response, Self::Error>;
/// Wrap an error.
fn error(&mut self, err: OAuthError) -> Self::Error;
/// Wrap an error in the request/response types.
fn web_error(&mut self, err: Request::Error) -> Self::Error;
/// Get the central extension instance this endpoint.
///
/// Returning `None` is the default implementation and acts as simply providing any extensions.
fn extension(&mut self) -> Option<&mut dyn Extension> {
None
}
}
impl<'a> Template<'a> {
/// Create an OK template
pub fn new_ok() -> Self {
InnerTemplate::Ok.into()
}
/// Create a bad request template
pub fn new_bad(access_token_error: Option<&'a mut AccessTokenError>) -> Self {
InnerTemplate::BadRequest { access_token_error }.into()
}
/// Create an unauthorized template
pub fn new_unauthorized(
error: Option<ResourceError>, access_token_error: Option<&'a mut AccessTokenError>,
) -> Self {
InnerTemplate::Unauthorized {
error,
access_token_error,
}
.into()
}
/// Create a redirect template
pub fn new_redirect(authorization_error: Option<&'a mut AuthorizationError>) -> Self {
InnerTemplate::Redirect { authorization_error }.into()
}
/// The corresponding status code.
pub fn status(&self) -> ResponseStatus {
match self.inner {
InnerTemplate::Unauthorized {.. } => ResponseStatus::Unauthorized,
InnerTemplate::Redirect {.. } => ResponseStatus::Redirect,
InnerTemplate::BadRequest {.. } => ResponseStatus::BadRequest,
InnerTemplate::Ok => ResponseStatus::Ok,
}
}
/// Supplementary information about an error in the authorization code flow.
///
/// The referenced object can be inspected and manipulated to provided additional information
/// that is specific to this server or endpoint. Such information could be an error page with
/// explanatory information or a customized message.
///
/// ```
/// # use oxide_auth::endpoint::Template;
/// fn explain(mut template: Template) {
/// if let Some(error) = template.authorization_error() {
/// eprintln!("[authorization] An error occurred: {:?}", error.kind());
/// error.explain("This server is still in its infancy. Sorry.");
/// error.explain_uri("/authorization_error.html".parse().unwrap());
/// }
/// }
/// ```
pub fn authorization_error(&mut self) -> Option<&mut AuthorizationError> {
match &mut self.inner {
InnerTemplate::Redirect {
authorization_error,..
} => reborrow(authorization_error),
_ => None,
}
}
/// Supplementary information about an error in the access token flow.
///
/// The referenced object can be inspected and manipulated to provided additional information
/// that is specific to this server or endpoint. Such information could be an error page with
/// explanatory information or a customized message.
///
/// ```
/// # use oxide_auth::endpoint::Template;
/// fn explain(mut template: Template) {
/// if let Some(error) = template.access_token_error() {
/// eprintln!("[access_code] An error occurred: {:?}", error.kind());
/// error.explain("This server is still in its infancy. Sorry.");
/// error.explain_uri("/access_token_error.html".parse().unwrap());
/// }
/// }
/// ```
pub fn access_token_error(&mut self) -> Option<&mut AccessTokenError> {
match &mut self.inner {
InnerTemplate::Unauthorized {
access_token_error,..
} => reborrow(access_token_error),
InnerTemplate::BadRequest {
access_token_error,..
} => reborrow(access_token_error),
_ => None,
}
}
}
/// Reborrow contained optional reference.
///
/// Slightly tweaked from an `Into`, there is `Option<&'a mut T>` from `&'a mut Option<T>`.
fn reborrow<'a, T>(opt: &'a mut Option<&mut T>) -> Option<&'a mut T> {
match opt {
// Magically does correct lifetime coercision.
Some(inner) => Some(inner),
None => None,
}
}
impl<'a, W: WebRequest> WebRequest for &'a mut W {
type Error = W::Error;
type Response = W::Response;
fn query(&mut self) -> Result<Cow<dyn QueryParameter +'static>, Self::Error> {
(**self).query()
}
fn urlbody(&mut self) -> Result<Cow<dyn QueryParameter +'static>, Self::Error> {
(**self).urlbody()
}
fn authheader(&mut self) -> Result<Option<Cow<str>>, Self::Error> {
(**self).authheader()
}
}
impl<'a, R: WebRequest, E: Endpoint<R>> Endpoint<R> for &'a mut E {
type Error = E::Error;
fn registrar(&self) -> Option<&dyn Registrar> {
(**self).registrar()
}
fn authorizer_mut(&mut self) -> Option<&mut dyn Authorizer> {
(**self).authorizer_mut()
}
fn issuer_mut(&mut self) -> Option<&mut dyn Issuer> {
(**self).issuer_mut()
}
fn owner_solicitor(&mut self) -> Option<&mut dyn OwnerSolicitor<R>> {
(**self).owner_solicitor()
}
fn scopes(&mut self) -> Option<&mut dyn Scopes<R>> {
(**self).scopes()
}
fn response(&mut self, request: &mut R, kind: Template) -> Result<R::Response, Self::Error> {
(**self).response(request, kind)
}
fn error(&mut self, err: OAuthError) -> Self::Error {
(**self).error(err)
}
fn web_error(&mut self, err: R::Error) -> Self::Error {
(**self).web_error(err)
}
fn extension(&mut self) -> Option<&mut dyn Extension> {
(**self).extension()
}
}
impl<'a, R: WebRequest, E: Endpoint<R> + 'a> Endpoint<R> for Box<E> {
type Error = E::Error;
fn registrar(&self) -> Option<&dyn Registrar> {
(**self).registrar()
}
fn authorizer_mut(&mut self) -> Option<&mut dyn Authorizer> {
(**self).authorizer_mut()
}
fn issuer_mut(&mut self) -> Option<&mut dyn Issuer> {
(**self).issuer_mut()
}
fn owner_solicitor(&mut self) -> Option<&mut dyn OwnerSolicitor<R>> {
(**self).owner_solicitor()
}
fn scopes(&mut self) -> Option<&mut dyn Scopes<R>> {
(**self).scopes()
}
fn response(&mut self, request: &mut R, kind: Template) -> Result<R::Response, Self::Error> {
(**self).response(request, kind)
}
fn error(&mut self, err: OAuthError) -> Self::Error {
(**self).error(err)
}
fn web_error(&mut self, err: R::Error) -> Self::Error {
(**self).web_error(err)
}
fn extension(&mut self) -> Option<&mut dyn Extension> {
(**self).extension()
}
}
impl Extension for () {}
impl<'a, W: WebRequest, S: OwnerSolicitor<W> + 'a +?Sized> OwnerSolicitor<W> for &'a mut S {
fn check_consent(
&mut self, request: &mut W, solicitation: Solicitation,
) -> OwnerConsent<W::Response> {
(**self).check_consent(request, solicitation)
}
}
impl<'a, W: WebRequest, S: OwnerSolicitor<W> + 'a +?Sized> OwnerSolicitor<W> for Box<S> {
fn check_consent(
&mut self, request: &mut W, solicitation: Solicitation,
) -> OwnerConsent<W::Response> {
(**self).check_consent(request, solicitation)
}
}
impl<W: WebRequest> Scopes<W> for [Scope] {
fn scopes(&mut self, _: &mut W) -> &[Scope] {
self
}
}
impl<W: WebRequest> Scopes<W> for Vec<Scope> {
fn scopes(&mut self, _: &mut W) -> &[Scope] {
self.as_slice()
}
}
impl<'a, W: WebRequest> Scopes<W> for &'a [Scope] {
fn scopes(&mut self, _: &mut W) -> &[Scope] {
self
}
}
impl<'a, W: WebRequest, S: Scopes<W> + 'a +?Sized> Scopes<W> for &'a mut S {
fn scopes(&mut self, request: &mut W) -> &[Scope] {
(**self).scopes(request)
}
}
impl<'a, W: WebRequest, S: Scopes<W> + 'a +?Sized> Scopes<W> for Box<S> {
fn scopes(&mut self, request: &mut W) -> &[Scope] {
(**self).scopes(request)
}
}
impl<'a> From<InnerTemplate<'a>> for Template<'a> {
fn from(inner: InnerTemplate<'a>) -> Self {
Template { inner }
}
}
/// Check if the header is an authorization method
pub fn is_authorization_method<'h>(header: &'h str, method: &'static str) -> Option<&'h str> {
let header_method = header.get(..method.len())?;
if header_method.eq_ignore_ascii_case(method) {
Some(&header[method.le | random_line_split |
|
lib.rs | #[macro_use] extern crate bitflags;
#[macro_use] extern crate enum_primitive;
extern crate libc;
pub use libc::{c_void, c_char, c_int, c_long, c_ulong, size_t, c_double, off_t};
use std::convert::From;
#[link(name = "mpg123")]
extern {
pub fn mpg123_init() -> c_int;
pub fn mpg123_exit();
pub fn mpg123_new(decoder: *const c_char, error: *mut c_int) -> *mut Mpg123Handle;
pub fn mpg123_delete(handle: *mut Mpg123Handle);
pub fn mpg123_param(handle: *mut Mpg123Handle, type_: Mpg123Param, value: c_long, fvalue: c_double) -> c_int;
pub fn mpg123_getparam(handle: *mut Mpg123Handle, type_: Mpg123Param, value: *mut c_long, f_value: *mut c_double) -> c_int;
pub fn mpg123_feature(feature: Mpg123Feature) -> c_int;
// Error handling
pub fn mpg123_plain_strerror(errcode: c_int) -> *const c_char;
pub fn mpg123_strerror(handle: *mut Mpg123Handle) -> *const c_char;
pub fn mpg123_errcode(handle: *mut Mpg123Handle) -> Mpg123Error;
// Decoder selection
pub fn mpg123_decoders() -> *const *const c_char;
pub fn mpg123_supported_decoders() -> *const *const c_char;
pub fn mpg123_decoder(handle: *mut Mpg123Handle) -> c_int;
pub fn mpg123_current_decoder(handle: *mut Mpg123Handle) -> *const c_char;
// Output format
pub fn mpg123_rates(list: *mut *const c_long, count: *mut size_t);
pub fn mpg123_encodings(list: *mut *const c_int, count: *mut size_t);
pub fn mpg123_encsize(encoding: c_int) -> c_int;
pub fn mpg123_format_none(handle: *mut Mpg123Handle) -> c_int;
pub fn mpg123_format_all(handle: *mut Mpg123Handle) -> c_int;
pub fn mpg123_format(handle: *mut Mpg123Handle, rate: c_int, channels: c_int, encodings: c_int) -> c_int;
pub fn mpg123_format_support(handle: *mut Mpg123Handle, rate: c_int, encodings: c_int) -> c_int;
pub fn mpg123_getformat(handle: *mut Mpg123Handle, rate: *mut c_long, channels: *mut c_int, encodings: *mut c_int) -> c_int;
// File input and decoding
pub fn mpg123_open(handle: *mut Mpg123Handle, path: *const c_char) -> c_int;
pub fn mpg123_open_fd(handle: *mut Mpg123Handle, fd: c_int) -> c_int;
pub fn mpg123_open_handle(handle: *mut Mpg123Handle, iohandle: *mut c_void) -> c_int;
pub fn mpg123_open_feed(handle: *mut Mpg123Handle) -> c_int;
pub fn mpg123_close(handle: *mut Mpg123Handle) -> c_int;
pub fn mpg123_read(handle: *mut Mpg123Handle, outmem: *mut u8, memsize: size_t, done: *mut size_t) -> c_int;
pub fn mpg123_feed(handle: *mut Mpg123Handle, mem: *const u8, size: size_t) -> c_int;
pub fn mpg123_decode(handle: *mut Mpg123Handle, inmem: *const u8, insize: size_t, outmem: *mut u8, outsize: *mut size_t) -> c_int;
pub fn mpg123_decode_frame(handle: *mut Mpg123Handle, num: *mut off_t, audio: *mut *const u8, bytes: *mut size_t) -> c_int;
pub fn mpg123_framebyframe_decode(handle: *mut Mpg123Handle, num: *mut off_t, audio: *mut *const u8, bytes: *mut size_t) -> c_int;
pub fn mpg123_framebyframe_next(handle: *mut Mpg123Handle) -> c_int;
pub fn mpg123_framedata(handle: *mut Mpg123Handle, header: *mut c_ulong, bodydata: *mut *mut u8, bodybytes: *mut size_t) -> c_int;
pub fn mpg123_framepos(handle: *mut Mpg123Handle) -> off_t;
// Position and seeking
pub fn mpg123_tell(handle: *mut Mpg123Handle) -> off_t;
pub fn mpg123_tellframe(handle: *mut Mpg123Handle) -> off_t;
pub fn mpg123_tell_stream(handle: *mut Mpg123Handle) -> off_t;
pub fn mpg123_seek(handle: *mut Mpg123Handle, sampleoff: off_t, whence: c_int) -> off_t;
pub fn mpg123_feedseek(handle: *mut Mpg123Handle, sampleoff: off_t, whence: c_int, input_offset: *mut off_t) -> off_t;
pub fn mpg123_seek_frame(handle: *mut Mpg123Handle, frameoff: off_t, whence: c_int) -> off_t;
pub fn mpg123_timeframe(handle: *mut Mpg123Handle, sec: c_double) -> off_t;
pub fn mpg123_index(handle: *mut Mpg123Handle, offsets: *mut *const off_t, step: *mut off_t, fill: *mut size_t) -> c_int;
pub fn mpg123_set_index(handle: *mut Mpg123Handle, offsets: *mut off_t, step: off_t, fill: size_t) -> c_int;
// We leave off mpg123_position because it's not stable
// Also everything after mpg123_eq
}
pub enum Mpg123Handle {}
enum_from_primitive!{
#[repr(C)]
pub enum Mpg123Param {
Verbose,
Flags,
AddFlags,
ForceRate,
DownSample,
Rva,
Downspeed,
Upspeed,
StartFrame,
DecodeFrames,
IcyInternal,
Outscale,
Timeout,
RemoveFlags,
ResyncLimit,
IndexSize,
Preframes,
Feedpool,
Feedbuffer,
}
}
// Enum conversion:
// sed -Ee's@^\s*,?MPG123_([^, ]*),?(\s*=\s*[-x0-9a-fA-F]+)?\s*/\*\*<[ 01]*(.*?)\s*\*/@/// \3\n\1\2,@' |sed -e's/^/ /' -e's/\s*$//'
// Bitflags conversion:
// sed -Ee's@^\s*,?MPG123_([^ ]*)\s*=\s*(0x[0-9a-fA-F]+)\s*/\*\*<[ 01]*(.*?)\s*\*/@/// \3\nconst \1 = \2;@' |sed -e's/^/ /'
bitflags!{
// Contents generated using
// sed -Ee's@^\s*,?MPG123_([^ ]*)\s*=\s*(0x[0-9a-fA-F]+)\s*/\*\*<[ 01]*(.*?)\s*\*/@/// \3\nconst \1 = \2;@' |sed -e's/^/ /'
pub flags Mpg123ParamFlags: c_ulong {
/// Force some mono mode: This is a test bitmask for seeing if
/// any mono forcing is active.
const FLAG_FORCE_MONO = 0x7,
/// Force playback of left channel only.
const FLAG_MONO_LEFT = 0x1,
/// Force playback of right channel only.
const FLAG_MONO_RIGHT = 0x2,
/// Force playback of mixed mono.
const FLAG_MONO_MIX = 0x4,
/// Force stereo output.
const FLAG_FORCE_STEREO = 0x8,
/// Force 8bit formats.
const FLAG_FORCE_8BIT = 0x10,
/// Suppress any printouts (overrules verbose).
const FLAG_QUIET = 0x20,
/// Enable gapless decoding (default on if libmpg123 has
/// support).
const FLAG_GAPLESS = 0x40,
/// Disable resync stream after error.
const FLAG_NO_RESYNC = 0x80,
/// Enable small buffer on non-seekable streams to allow some
/// peek-ahead (for better MPEG sync).
const FLAG_SEEKBUFFER = 0x100,
/// Enable fuzzy seeks (guessing byte offsets or using
/// approximate seek points from Xing TOC)
const FLAG_FUZZY = 0x200,
/// Force floating point output (32 or 64 bits depends on
/// mpg123 internal precision).
const FLAG_FORCE_FLOAT = 0x400,
/// Do not translate ID3 text data to UTF-8. ID3 strings will
/// contain the raw text data, with the first byte containing
/// the ID3 encoding code.
const FLAG_PLAIN_ID3TEXT = 0x800,
/// Ignore any stream length information contained in the
/// stream, which can be contained in a 'TLEN' frame of an
/// ID3v2 tag or a Xing tag
const FLAG_IGNORE_STREAMLENGTH = 0x1000,
/// Do not parse ID3v2 tags, just skip them.
const FLAG_SKIP_ID3V2 = 0x2000,
/// Do not parse the LAME/Xing info frame, treat it as normal
/// MPEG data.
const FLAG_IGNORE_INFOFRAME = 0x4000,
/// Allow automatic internal resampling of any kind (default
/// on if supported). Especially when going lowlevel with
/// replacing output buffer, you might want to unset this
/// flag. Setting MPG123_DOWNSAMPLE or MPG123_FORCE_RATE will
/// override this.
const FLAG_AUTO_RESAMPLE = 0x8000,
/// 7th bit: Enable storage of pictures from tags (ID3v2 APIC).
const FLAG_PICTURE = 0x10000,
}
}
enum_from_primitive!{
#[repr(u64)]
pub enum ParamRVA {
RvaOff,
RvaMix,
RvaAlbum,
}
}
// generated with
// sed -Ee's@^\s*,?MPG123_([^ ]*)(\s*=\s*[x0-9a-fA-F]+)?\s*/\*\*<[ 01]*(.*?)\s*\*/@/// \3\n\1\2,@' |sed -e's/^/ /'
#[repr(u64)]
pub enum Mpg123Feature {
/// mpg123 expects path names to be given in UTF-8 encoding instead of plain native.
AbiUtf8Open,
/// 8bit output
Output8bit,
/// 6bit output
Output16bit,
/// 32bit output
Output32bit,
/// support for building a frame index for accurate seeking
Index,
/// id3v2 parsing
ParseID3v2,
/// mpeg layer-1 decoder enabled
DecodeLayer1,
/// mpeg layer-2 decoder enabled
DecodeLayer2,
/// mpeg layer-3 decoder enabled
DecodeLayer3,
/// accurate decoder rounding
DecodeAccurate,
/// downsample (sample omit)
DecodeDownsample,
/// flexible rate decoding
DecodeNtoM,
/// ICY support
ParseICY,
/// Reader with timeout (network).
TimeoutRead,
}
#[repr(i32)]
#[derive(Copy,Clone,Debug,PartialEq)]
pub enum Mpg123Error {
/// Message: Track ended. Stop decoding.
Done = -12,
/// Message: Output format will be different on next call. Note
/// that some libmpg123 versions between 1.4.3 and 1.8.0 insist on
/// you calling mpg123_getformat() after getting this message
/// code. Newer verisons behave like advertised: You have the
/// chance to call mpg123_getformat(), but you can also just
/// continue decoding and get your data.
NewFormat = -11,
/// Message: For feed reader: "Feed me more!" (call mpg123_feed()
/// or mpg123_decode() with some new input data).
NeedMore = -10,
/// Generic Error
Err = -1,
/// Success
Ok = 0,
/// Unable to set up output format!
BadOutFormat = 1,
/// Invalid channel number specified.
BadChannel = 2,
/// Invalid sample rate specified.
BadRate = 3,
/// Unable to allocate memory for 16 to 8 converter table!
Err16to8Table = 4,
/// Bad parameter id!
BadParam = 5,
/// Bad buffer given -- invalid pointer or too small size.
BadBuffer = 6,
/// Out of memory -- some malloc() failed.
OutOfMem = 7,
/// You didn't initialize the library!
NotInitialized = 8,
/// Invalid decoder choice.
BadDecoder = 9,
/// Invalid mpg123 handle.
BadHandle = 10,
/// Unable to initialize frame buffers (out of memory?).
NoBuffers = 11,
/// Invalid RVA mode.
BadRva = 12,
/// This build doesn't support gapless decoding.
NoGapless = 13,
/// Not enough buffer space.
NoSpace = 14,
/// Incompatible numeric data types.
BadTypes = 15,
/// Bad equalizer band.
BadBand = 16,
/// Null pointer given where valid storage address needed.
ErrNull = 17,
/// Error reading the stream.
ErrReader = 18,
/// Cannot seek from end (end is not known).
NoSeekFromEnd = 19,
/// Invalid 'whence' for seek function.
BadWhence = 20,
/// Build does not support stream timeouts.
NoTimeout = 21,
/// File access error.
BadFile = 22,
/// Seek not supported by stream.
NoSeek = 23,
/// No stream opened.
NoReader = 24,
/// Bad parameter handle.
BadPars = 25,
/// Bad parameters to mpg123_index() and mpg123_set_index()
BadIndexPar = 26,
/// Lost track in bytestream and did not try to resync.
OutOfSync = 27,
/// Resync failed to find valid MPEG data.
ResyncFail = 28,
/// No 8bit encoding possible.
No8bit = 29,
/// Stack aligmnent error
BadAlign = 30,
/// Null input buffer with non-zero size...
NullBuffer = 31,
/// Relative seek not possible (screwed up file offset)
NoRelseek = 32,
/// You gave a null pointer somewhere where you shouldn't have.
NullPointer = 33,
/// Bad key value given.
BadKey = 34,
/// No frame index in this build.
NoIndex = 35,
/// Something with frame index went wrong.
IndexFail = 36,
/// Something prevents a proper decoder setup
BadDecoderSetup = 37,
/// This feature has not been built into libmpg123.
MissingFeature = 38,
/// A bad value has been given, somewhere.
BadValue = 39,
/// Low-level seek failed.
LseekFailed = 40,
/// Custom I/O not prepared.
BadCustomIo = 41,
/// Offset value overflow during translation of large file API
/// calls -- your client program cannot handle that large file.
LfsOverflow = 42,
/// Some integer overflow.
IntOverflow = 43,
}
impl From<c_int> for Mpg123Error {
fn from(v: c_int) -> Self {
use Mpg123Error::*;
match v {
-12 => Done,
-11 => NewFormat,
-10 => NeedMore,
-1 => Err,
0 => Ok,
1 => BadOutFormat,
2 => BadChannel,
3 => BadRate,
4 => Err16to8Table,
5 => BadParam,
6 => BadBuffer,
7 => OutOfMem,
8 => NotInitialized,
9 => BadDecoder,
10 => BadHandle,
11 => NoBuffers,
12 => BadRva,
13 => NoGapless,
14 => NoSpace,
15 => BadTypes,
16 => BadBand,
17 => ErrNull,
18 => ErrReader,
19 => NoSeekFromEnd,
20 => BadWhence,
21 => NoTimeout,
22 => BadFile,
23 => NoSeek,
24 => NoReader,
25 => BadPars,
26 => BadIndexPar,
27 => OutOfSync,
28 => ResyncFail,
29 => No8bit,
30 => BadAlign,
31 => NullBuffer,
32 => NoRelseek,
33 => NullPointer,
34 => BadKey,
35 => NoIndex,
36 => IndexFail,
37 => BadDecoderSetup,
38 => MissingFeature,
39 => BadValue,
40 => LseekFailed,
41 => BadCustomIo,
42 => LfsOverflow,
43 => IntOverflow,
_ => Err,
}
}
}
// This encoding is disasterous, but we have what we have.
bitflags!{
pub flags Enc : i32 {
const ENC_8 = 0x00f,
const ENC_16 = 0x040,
const ENC_24 = 0x4000,
const ENC_32 = 0x100,
const ENC_SIGNED = 0x080,
const ENC_FLOAT = 0xe00,
// Specific formats
const ENC_UNSIGNED_8 = 0x01,
const ENC_SIGNED_8 = ENC_SIGNED.bits | 0x02,
const ENC_ULAW_8 = 0x04,
const ENC_ALAW_8 = 0x08,
const ENC_SIGNED_16 = 0x10 | ENC_16.bits | ENC_SIGNED.bits,
const ENC_UNSIGNED_16 = 0x20 | ENC_16.bits,
const ENC_SIGNED_32 = 0x1000 | ENC_32.bits | ENC_SIGNED.bits,
const ENC_UNSIGNED_32 = 0x2000 | ENC_32.bits,
const ENC_SIGNED_24 = 0x1000 | ENC_24.bits | ENC_SIGNED.bits,
const ENC_UNSIGNED_24 = 0x2000 | ENC_24.bits,
const ENC_FLOAT_32 = 0x200,
const ENC_FLOAT_64 = 0x400,
const ENC_ANY = (ENC_UNSIGNED_8.bits | ENC_SIGNED_8.bits
| ENC_ULAW_8.bits | ENC_ALAW_8.bits
| ENC_SIGNED_16.bits | ENC_UNSIGNED_16.bits
| ENC_SIGNED_32.bits | ENC_UNSIGNED_32.bits
| ENC_SIGNED_24.bits | ENC_UNSIGNED_24.bits
| ENC_FLOAT_32.bits | ENC_FLOAT_64.bits),
}
}
impl Enc {
/// Return the number of bytes per mono sample
pub fn | (&self) -> usize {
unsafe {
mpg123_encsize(self.bits()) as usize
}
}
}
bitflags!{
pub flags ChannelCount : i32 {
const CHAN_MONO = 1,
const CHAN_STEREO = 2,
}
}
| size | identifier_name |
lib.rs | #[macro_use] extern crate bitflags;
#[macro_use] extern crate enum_primitive;
extern crate libc;
pub use libc::{c_void, c_char, c_int, c_long, c_ulong, size_t, c_double, off_t};
use std::convert::From;
#[link(name = "mpg123")]
extern {
pub fn mpg123_init() -> c_int;
pub fn mpg123_exit();
pub fn mpg123_new(decoder: *const c_char, error: *mut c_int) -> *mut Mpg123Handle;
pub fn mpg123_delete(handle: *mut Mpg123Handle);
pub fn mpg123_param(handle: *mut Mpg123Handle, type_: Mpg123Param, value: c_long, fvalue: c_double) -> c_int;
pub fn mpg123_getparam(handle: *mut Mpg123Handle, type_: Mpg123Param, value: *mut c_long, f_value: *mut c_double) -> c_int;
pub fn mpg123_feature(feature: Mpg123Feature) -> c_int;
// Error handling
pub fn mpg123_plain_strerror(errcode: c_int) -> *const c_char; |
// Decoder selection
pub fn mpg123_decoders() -> *const *const c_char;
pub fn mpg123_supported_decoders() -> *const *const c_char;
pub fn mpg123_decoder(handle: *mut Mpg123Handle) -> c_int;
pub fn mpg123_current_decoder(handle: *mut Mpg123Handle) -> *const c_char;
// Output format
pub fn mpg123_rates(list: *mut *const c_long, count: *mut size_t);
pub fn mpg123_encodings(list: *mut *const c_int, count: *mut size_t);
pub fn mpg123_encsize(encoding: c_int) -> c_int;
pub fn mpg123_format_none(handle: *mut Mpg123Handle) -> c_int;
pub fn mpg123_format_all(handle: *mut Mpg123Handle) -> c_int;
pub fn mpg123_format(handle: *mut Mpg123Handle, rate: c_int, channels: c_int, encodings: c_int) -> c_int;
pub fn mpg123_format_support(handle: *mut Mpg123Handle, rate: c_int, encodings: c_int) -> c_int;
pub fn mpg123_getformat(handle: *mut Mpg123Handle, rate: *mut c_long, channels: *mut c_int, encodings: *mut c_int) -> c_int;
// File input and decoding
pub fn mpg123_open(handle: *mut Mpg123Handle, path: *const c_char) -> c_int;
pub fn mpg123_open_fd(handle: *mut Mpg123Handle, fd: c_int) -> c_int;
pub fn mpg123_open_handle(handle: *mut Mpg123Handle, iohandle: *mut c_void) -> c_int;
pub fn mpg123_open_feed(handle: *mut Mpg123Handle) -> c_int;
pub fn mpg123_close(handle: *mut Mpg123Handle) -> c_int;
pub fn mpg123_read(handle: *mut Mpg123Handle, outmem: *mut u8, memsize: size_t, done: *mut size_t) -> c_int;
pub fn mpg123_feed(handle: *mut Mpg123Handle, mem: *const u8, size: size_t) -> c_int;
pub fn mpg123_decode(handle: *mut Mpg123Handle, inmem: *const u8, insize: size_t, outmem: *mut u8, outsize: *mut size_t) -> c_int;
pub fn mpg123_decode_frame(handle: *mut Mpg123Handle, num: *mut off_t, audio: *mut *const u8, bytes: *mut size_t) -> c_int;
pub fn mpg123_framebyframe_decode(handle: *mut Mpg123Handle, num: *mut off_t, audio: *mut *const u8, bytes: *mut size_t) -> c_int;
pub fn mpg123_framebyframe_next(handle: *mut Mpg123Handle) -> c_int;
pub fn mpg123_framedata(handle: *mut Mpg123Handle, header: *mut c_ulong, bodydata: *mut *mut u8, bodybytes: *mut size_t) -> c_int;
pub fn mpg123_framepos(handle: *mut Mpg123Handle) -> off_t;
// Position and seeking
pub fn mpg123_tell(handle: *mut Mpg123Handle) -> off_t;
pub fn mpg123_tellframe(handle: *mut Mpg123Handle) -> off_t;
pub fn mpg123_tell_stream(handle: *mut Mpg123Handle) -> off_t;
pub fn mpg123_seek(handle: *mut Mpg123Handle, sampleoff: off_t, whence: c_int) -> off_t;
pub fn mpg123_feedseek(handle: *mut Mpg123Handle, sampleoff: off_t, whence: c_int, input_offset: *mut off_t) -> off_t;
pub fn mpg123_seek_frame(handle: *mut Mpg123Handle, frameoff: off_t, whence: c_int) -> off_t;
pub fn mpg123_timeframe(handle: *mut Mpg123Handle, sec: c_double) -> off_t;
pub fn mpg123_index(handle: *mut Mpg123Handle, offsets: *mut *const off_t, step: *mut off_t, fill: *mut size_t) -> c_int;
pub fn mpg123_set_index(handle: *mut Mpg123Handle, offsets: *mut off_t, step: off_t, fill: size_t) -> c_int;
// We leave off mpg123_position because it's not stable
// Also everything after mpg123_eq
}
pub enum Mpg123Handle {}
enum_from_primitive!{
#[repr(C)]
pub enum Mpg123Param {
Verbose,
Flags,
AddFlags,
ForceRate,
DownSample,
Rva,
Downspeed,
Upspeed,
StartFrame,
DecodeFrames,
IcyInternal,
Outscale,
Timeout,
RemoveFlags,
ResyncLimit,
IndexSize,
Preframes,
Feedpool,
Feedbuffer,
}
}
// Enum conversion:
// sed -Ee's@^\s*,?MPG123_([^, ]*),?(\s*=\s*[-x0-9a-fA-F]+)?\s*/\*\*<[ 01]*(.*?)\s*\*/@/// \3\n\1\2,@' |sed -e's/^/ /' -e's/\s*$//'
// Bitflags conversion:
// sed -Ee's@^\s*,?MPG123_([^ ]*)\s*=\s*(0x[0-9a-fA-F]+)\s*/\*\*<[ 01]*(.*?)\s*\*/@/// \3\nconst \1 = \2;@' |sed -e's/^/ /'
bitflags!{
// Contents generated using
// sed -Ee's@^\s*,?MPG123_([^ ]*)\s*=\s*(0x[0-9a-fA-F]+)\s*/\*\*<[ 01]*(.*?)\s*\*/@/// \3\nconst \1 = \2;@' |sed -e's/^/ /'
pub flags Mpg123ParamFlags: c_ulong {
/// Force some mono mode: This is a test bitmask for seeing if
/// any mono forcing is active.
const FLAG_FORCE_MONO = 0x7,
/// Force playback of left channel only.
const FLAG_MONO_LEFT = 0x1,
/// Force playback of right channel only.
const FLAG_MONO_RIGHT = 0x2,
/// Force playback of mixed mono.
const FLAG_MONO_MIX = 0x4,
/// Force stereo output.
const FLAG_FORCE_STEREO = 0x8,
/// Force 8bit formats.
const FLAG_FORCE_8BIT = 0x10,
/// Suppress any printouts (overrules verbose).
const FLAG_QUIET = 0x20,
/// Enable gapless decoding (default on if libmpg123 has
/// support).
const FLAG_GAPLESS = 0x40,
/// Disable resync stream after error.
const FLAG_NO_RESYNC = 0x80,
/// Enable small buffer on non-seekable streams to allow some
/// peek-ahead (for better MPEG sync).
const FLAG_SEEKBUFFER = 0x100,
/// Enable fuzzy seeks (guessing byte offsets or using
/// approximate seek points from Xing TOC)
const FLAG_FUZZY = 0x200,
/// Force floating point output (32 or 64 bits depends on
/// mpg123 internal precision).
const FLAG_FORCE_FLOAT = 0x400,
/// Do not translate ID3 text data to UTF-8. ID3 strings will
/// contain the raw text data, with the first byte containing
/// the ID3 encoding code.
const FLAG_PLAIN_ID3TEXT = 0x800,
/// Ignore any stream length information contained in the
/// stream, which can be contained in a 'TLEN' frame of an
/// ID3v2 tag or a Xing tag
const FLAG_IGNORE_STREAMLENGTH = 0x1000,
/// Do not parse ID3v2 tags, just skip them.
const FLAG_SKIP_ID3V2 = 0x2000,
/// Do not parse the LAME/Xing info frame, treat it as normal
/// MPEG data.
const FLAG_IGNORE_INFOFRAME = 0x4000,
/// Allow automatic internal resampling of any kind (default
/// on if supported). Especially when going lowlevel with
/// replacing output buffer, you might want to unset this
/// flag. Setting MPG123_DOWNSAMPLE or MPG123_FORCE_RATE will
/// override this.
const FLAG_AUTO_RESAMPLE = 0x8000,
/// 7th bit: Enable storage of pictures from tags (ID3v2 APIC).
const FLAG_PICTURE = 0x10000,
}
}
enum_from_primitive!{
#[repr(u64)]
pub enum ParamRVA {
RvaOff,
RvaMix,
RvaAlbum,
}
}
// generated with
// sed -Ee's@^\s*,?MPG123_([^ ]*)(\s*=\s*[x0-9a-fA-F]+)?\s*/\*\*<[ 01]*(.*?)\s*\*/@/// \3\n\1\2,@' |sed -e's/^/ /'
#[repr(u64)]
pub enum Mpg123Feature {
/// mpg123 expects path names to be given in UTF-8 encoding instead of plain native.
AbiUtf8Open,
/// 8bit output
Output8bit,
/// 6bit output
Output16bit,
/// 32bit output
Output32bit,
/// support for building a frame index for accurate seeking
Index,
/// id3v2 parsing
ParseID3v2,
/// mpeg layer-1 decoder enabled
DecodeLayer1,
/// mpeg layer-2 decoder enabled
DecodeLayer2,
/// mpeg layer-3 decoder enabled
DecodeLayer3,
/// accurate decoder rounding
DecodeAccurate,
/// downsample (sample omit)
DecodeDownsample,
/// flexible rate decoding
DecodeNtoM,
/// ICY support
ParseICY,
/// Reader with timeout (network).
TimeoutRead,
}
#[repr(i32)]
#[derive(Copy,Clone,Debug,PartialEq)]
pub enum Mpg123Error {
/// Message: Track ended. Stop decoding.
Done = -12,
/// Message: Output format will be different on next call. Note
/// that some libmpg123 versions between 1.4.3 and 1.8.0 insist on
/// you calling mpg123_getformat() after getting this message
/// code. Newer verisons behave like advertised: You have the
/// chance to call mpg123_getformat(), but you can also just
/// continue decoding and get your data.
NewFormat = -11,
/// Message: For feed reader: "Feed me more!" (call mpg123_feed()
/// or mpg123_decode() with some new input data).
NeedMore = -10,
/// Generic Error
Err = -1,
/// Success
Ok = 0,
/// Unable to set up output format!
BadOutFormat = 1,
/// Invalid channel number specified.
BadChannel = 2,
/// Invalid sample rate specified.
BadRate = 3,
/// Unable to allocate memory for 16 to 8 converter table!
Err16to8Table = 4,
/// Bad parameter id!
BadParam = 5,
/// Bad buffer given -- invalid pointer or too small size.
BadBuffer = 6,
/// Out of memory -- some malloc() failed.
OutOfMem = 7,
/// You didn't initialize the library!
NotInitialized = 8,
/// Invalid decoder choice.
BadDecoder = 9,
/// Invalid mpg123 handle.
BadHandle = 10,
/// Unable to initialize frame buffers (out of memory?).
NoBuffers = 11,
/// Invalid RVA mode.
BadRva = 12,
/// This build doesn't support gapless decoding.
NoGapless = 13,
/// Not enough buffer space.
NoSpace = 14,
/// Incompatible numeric data types.
BadTypes = 15,
/// Bad equalizer band.
BadBand = 16,
/// Null pointer given where valid storage address needed.
ErrNull = 17,
/// Error reading the stream.
ErrReader = 18,
/// Cannot seek from end (end is not known).
NoSeekFromEnd = 19,
/// Invalid 'whence' for seek function.
BadWhence = 20,
/// Build does not support stream timeouts.
NoTimeout = 21,
/// File access error.
BadFile = 22,
/// Seek not supported by stream.
NoSeek = 23,
/// No stream opened.
NoReader = 24,
/// Bad parameter handle.
BadPars = 25,
/// Bad parameters to mpg123_index() and mpg123_set_index()
BadIndexPar = 26,
/// Lost track in bytestream and did not try to resync.
OutOfSync = 27,
/// Resync failed to find valid MPEG data.
ResyncFail = 28,
/// No 8bit encoding possible.
No8bit = 29,
/// Stack aligmnent error
BadAlign = 30,
/// Null input buffer with non-zero size...
NullBuffer = 31,
/// Relative seek not possible (screwed up file offset)
NoRelseek = 32,
/// You gave a null pointer somewhere where you shouldn't have.
NullPointer = 33,
/// Bad key value given.
BadKey = 34,
/// No frame index in this build.
NoIndex = 35,
/// Something with frame index went wrong.
IndexFail = 36,
/// Something prevents a proper decoder setup
BadDecoderSetup = 37,
/// This feature has not been built into libmpg123.
MissingFeature = 38,
/// A bad value has been given, somewhere.
BadValue = 39,
/// Low-level seek failed.
LseekFailed = 40,
/// Custom I/O not prepared.
BadCustomIo = 41,
/// Offset value overflow during translation of large file API
/// calls -- your client program cannot handle that large file.
LfsOverflow = 42,
/// Some integer overflow.
IntOverflow = 43,
}
impl From<c_int> for Mpg123Error {
fn from(v: c_int) -> Self {
use Mpg123Error::*;
match v {
-12 => Done,
-11 => NewFormat,
-10 => NeedMore,
-1 => Err,
0 => Ok,
1 => BadOutFormat,
2 => BadChannel,
3 => BadRate,
4 => Err16to8Table,
5 => BadParam,
6 => BadBuffer,
7 => OutOfMem,
8 => NotInitialized,
9 => BadDecoder,
10 => BadHandle,
11 => NoBuffers,
12 => BadRva,
13 => NoGapless,
14 => NoSpace,
15 => BadTypes,
16 => BadBand,
17 => ErrNull,
18 => ErrReader,
19 => NoSeekFromEnd,
20 => BadWhence,
21 => NoTimeout,
22 => BadFile,
23 => NoSeek,
24 => NoReader,
25 => BadPars,
26 => BadIndexPar,
27 => OutOfSync,
28 => ResyncFail,
29 => No8bit,
30 => BadAlign,
31 => NullBuffer,
32 => NoRelseek,
33 => NullPointer,
34 => BadKey,
35 => NoIndex,
36 => IndexFail,
37 => BadDecoderSetup,
38 => MissingFeature,
39 => BadValue,
40 => LseekFailed,
41 => BadCustomIo,
42 => LfsOverflow,
43 => IntOverflow,
_ => Err,
}
}
}
// This encoding is disasterous, but we have what we have.
bitflags!{
pub flags Enc : i32 {
const ENC_8 = 0x00f,
const ENC_16 = 0x040,
const ENC_24 = 0x4000,
const ENC_32 = 0x100,
const ENC_SIGNED = 0x080,
const ENC_FLOAT = 0xe00,
// Specific formats
const ENC_UNSIGNED_8 = 0x01,
const ENC_SIGNED_8 = ENC_SIGNED.bits | 0x02,
const ENC_ULAW_8 = 0x04,
const ENC_ALAW_8 = 0x08,
const ENC_SIGNED_16 = 0x10 | ENC_16.bits | ENC_SIGNED.bits,
const ENC_UNSIGNED_16 = 0x20 | ENC_16.bits,
const ENC_SIGNED_32 = 0x1000 | ENC_32.bits | ENC_SIGNED.bits,
const ENC_UNSIGNED_32 = 0x2000 | ENC_32.bits,
const ENC_SIGNED_24 = 0x1000 | ENC_24.bits | ENC_SIGNED.bits,
const ENC_UNSIGNED_24 = 0x2000 | ENC_24.bits,
const ENC_FLOAT_32 = 0x200,
const ENC_FLOAT_64 = 0x400,
const ENC_ANY = (ENC_UNSIGNED_8.bits | ENC_SIGNED_8.bits
| ENC_ULAW_8.bits | ENC_ALAW_8.bits
| ENC_SIGNED_16.bits | ENC_UNSIGNED_16.bits
| ENC_SIGNED_32.bits | ENC_UNSIGNED_32.bits
| ENC_SIGNED_24.bits | ENC_UNSIGNED_24.bits
| ENC_FLOAT_32.bits | ENC_FLOAT_64.bits),
}
}
impl Enc {
/// Return the number of bytes per mono sample
pub fn size(&self) -> usize {
unsafe {
mpg123_encsize(self.bits()) as usize
}
}
}
bitflags!{
pub flags ChannelCount : i32 {
const CHAN_MONO = 1,
const CHAN_STEREO = 2,
}
} | pub fn mpg123_strerror(handle: *mut Mpg123Handle) -> *const c_char;
pub fn mpg123_errcode(handle: *mut Mpg123Handle) -> Mpg123Error; | random_line_split |
combat.rs | use super::*;
use rand::Rng;
use crate::r#const::*;
use crate::types::*;
use crate::types::Tcod;
use crate::types::Messages;
use crate::func::*;
use crate::types::object::Object;
use tcod::input::{self, Event, Mouse};
use tcod::colors::{self, Color};
/// returns a clicked monster inside FOV up to a range, or None if right-clicked
pub fn target_monster(tcod: &mut Tcod,
objects: &[Object],
game: &mut Game,
max_range: Option<f32>)
-> Option<usize> {
loop {
match target_tile(tcod, objects, game, max_range) {
Some((x, y)) => {
// return the first clicked monster, otherwise continue looping
for (id, obj) in objects.iter().enumerate() {
if obj.pos() == (x, y) && obj.fighter.is_some() && id!= PLAYER {
return Some(id)
}
}
}
None => return None,
}
}
}
/// return the position of a tile left-clicked in player's FOV (optionally in a
/// range), or (None,None) if right-clicked.
pub fn target_tile(tcod: &mut Tcod,
objects: &[Object], game: &mut Game,
max_range: Option<f32>)
-> Option<(i32, i32)> {
use tcod::input::KeyCode::Escape;
loop {
// render the screen. this erases the inventory and shows the names of
// objects under the mouse.
tcod.root.flush();
let event = input::check_for_event(input::KEY_PRESS | input::MOUSE).map(|e| e.1);
let mut key = None;
match event {
Some(Event::Mouse(m)) => tcod.mouse = m,
Some(Event::Key(k)) => key = Some(k),
None => {}
}
render_all(tcod, objects, game, false);
let (x, y) = (tcod.mouse.cx as i32, tcod.mouse.cy as i32);
// accept the target if the player clicked in FOV, and in case a range
// is specified, if it's in that range
let in_fov = (x < MAP_WIDTH) && (y < MAP_HEIGHT) && tcod.fov.is_in_fov(x, y);
let in_range = max_range.map_or(
true, |range| objects[PLAYER].distance(x, y) <= range);
if tcod.mouse.lbutton_pressed && in_fov && in_range {
return Some((x, y))
}
let escape = key.map_or(false, |k| k.code == Escape);
if tcod.mouse.rbutton_pressed || escape {
return None // cancel if the player right-clicked or pressed Escape
}
}
}
/// find closest enemy, up to a maximum range, and in the player's FOV
pub fn closest_monster(max_range: i32, objects: &mut [Object], tcod: &Tcod) -> Option<usize> {
let mut closest_enemy = None;
let mut closest_dist = (max_range + 1) as f32; // start with (slightly more than) maximum range
for (id, object) in objects.iter().enumerate() {
if (id!= PLAYER) && object.fighter.is_some() && object.ai.is_some() &&
tcod.fov.is_in_fov(object.x, object.y)
{
// calculate distance between this object and the player
let dist = objects[PLAYER].distance_to(object);
if dist < closest_dist { // it's closer, so remember it
closest_enemy = Some(id);
closest_dist = dist;
}
}
}
closest_enemy
}
pub fn cast_fireball(_inventory_id: usize, objects: &mut [Object],game: &mut Game, tcod: &mut Tcod)
-> UseResult
{
// ask the player for a target tile to throw a fireball at
game.log.add("Left-click a target tile for the fireball, or right-click to cancel.",
colors::LIGHT_CYAN);
let (x, y) = match target_tile(tcod, objects, game, None) {
Some(tile_pos) => tile_pos,
None => return UseResult::Cancelled,
};
game.log.add(format!("The fireball explodes, burning everything within {} tiles!", FIREBALL_RADIUS),
colors::ORANGE);
let mut xp_to_gain = 0;
for (id, obj) in objects.iter_mut().enumerate() {
if obj.distance(x, y) <= FIREBALL_RADIUS as f32 && obj.fighter.is_some() {
game.log.add(format!("The {} gets burned for {} hit points.", obj.name, FIREBALL_DAMAGE),
colors::ORANGE);
if let Some(xp) = obj.take_damage(FIREBALL_DAMAGE, game) {
// Don't reward the player for burning themself!
if id!= PLAYER {
xp_to_gain += xp;
}
}
}
}
objects[PLAYER].fighter.as_mut().unwrap().xp += xp_to_gain;
UseResult::UsedUp
}
pub fn cast_heal(_inventory_id: usize, objects: &mut [Object], game: &mut Game, _tcod: &mut Tcod)
-> UseResult
{
// heal the player
let player = &mut objects[PLAYER];
if let Some(fighter) = player.fighter {
if fighter.hp == player.max_hp(game) {
game.log.add("You are already at full health.", colors::RED);
return UseResult::Cancelled;
}
game.log.add("Your wounds start to feel better!", colors::LIGHT_VIOLET);
player.heal(HEAL_AMOUNT, game);
return UseResult::UsedUp;
}
UseResult::Cancelled
}
pub fn cast_lightning(_inventory_id: usize, objects: &mut [Object], game: &mut Game, tcod: &mut Tcod) -> UseResult
{
// find closest enemy (inside a maximum range) and damage it
let monster_id = closest_monster(LIGHTNING_RANGE, objects, tcod);
if let Some(monster_id) = monster_id {
// zap it!
game.log.add(format!("A lightning bolt strikes the {} with a loud thunder! \
The damage is {} hit points.",
objects[monster_id].name, LIGHTNING_DAMAGE),
colors::LIGHT_BLUE);
objects[monster_id].take_damage(LIGHTNING_DAMAGE, game);
UseResult::UsedUp
} else { // no enemy found within maximum range
game.log.add("No enemy is close enough to strike.", colors::RED);
UseResult::Cancelled
}
}
pub fn cast_confuse(_inventory_id: usize, objects: &mut [Object], game: &mut Game,tcod: &mut Tcod)
-> UseResult
{
// ask the player for a target to confuse
game.log.add("Left-click an enemy to confuse it, or right-click to cancel.", colors::LIGHT_CYAN);
let monster_id = target_monster(tcod, objects, game, Some(CONFUSE_RANGE as f32));
if let Some(monster_id) = monster_id {
let old_ai = objects[monster_id].ai.take().unwrap_or(Ai::Basic);
// replace the monster's AI with a "confused" one; after
// some turns it will restore the old AI | game.log.add(format!("The eyes of {} look vacant, as he starts to stumble around!",
objects[monster_id].name),
colors::LIGHT_GREEN);
UseResult::UsedUp
} else { // no enemy fonud within maximum range
game.log.add("No enemy is close enough to strike.", colors::RED);
UseResult::Cancelled
}
}
pub fn player_death(player: &mut Object, messages: &mut Messages) {
// the game ended!
// TODO Replace with game.log.add()
message(messages, "You died!", colors::DARK_RED);
// for added effect, transform the player into a corpse!
player.char = CORPSE;
player.color = colors::DARK_RED;
}
pub fn monster_death(monster: &mut Object, messages: &mut Messages) {
// transform it into a nasty corpse! it doesn't block, can't be
// attacked and doesn't move
// TODO Replace with game.log.add()
// message(messages, format!("{} is dead!", monster.name), colors::ORANGE);
message(messages, format!("{} is dead! You gain {} experience points.",
monster.name, monster.fighter.unwrap().xp), colors::ORANGE);
monster.char = CORPSE;
monster.color = colors::DARK_RED;
monster.blocks = false;
monster.fighter = None;
monster.ai = None;
monster.name = format!("remains of {}", monster.name);
}
pub fn player_move_or_attack(dx: i32, dy: i32, objects: &mut [Object], game: &mut Game) {
// the coordinates the player is moving to/attacking
let x = objects[PLAYER].x + dx;
let y = objects[PLAYER].y + dy;
// try to find an attackable object there
let target_id = objects.iter().position(|object| {
object.fighter.is_some() && object.pos() == (x, y)
});
// attack if target found, move otherwise
match target_id {
Some(target_id) => {
let (player, target) = mut_two(PLAYER, target_id, objects);
player.attack(target, game);
}
None => {
move_by(PLAYER, dx, dy, &mut game.map, objects);
}
}
}
pub fn ai_take_turn(monster_id: usize, objects: &mut [Object], game: &mut Game, fov_map: &FovMap) {
use Ai::*;
if let Some(ai) = objects[monster_id].ai.take() {
let new_ai = match ai {
Basic => ai_basic(monster_id, game, objects, fov_map),
Confused{previous_ai, num_turns} => ai_confused(
monster_id, &mut game.map, objects, &mut game.log, previous_ai, num_turns)
};
objects[monster_id].ai = Some(new_ai);
}
}
pub fn ai_basic(monster_id: usize, game: &mut Game, objects: &mut [Object], fov_map: &FovMap) -> Ai {
// a basic monster takes its turn. If you can see it, it can see you
let (monster_x, monster_y) = objects[monster_id].pos();
if fov_map.is_in_fov(monster_x, monster_y) {
if objects[monster_id].distance_to(&objects[PLAYER]) >= 2.0 {
// move towards player if far away
let (player_x, player_y) = objects[PLAYER].pos();
move_towards(monster_id, player_x, player_y, &mut game.map, objects);
} else if objects[PLAYER].fighter.map_or(false, |f| f.hp > 0) {
// close enough, attack! (if the player is still alive.)
let (monster, player) = mut_two(monster_id, PLAYER, objects);
monster.attack(player, game);
}
}
Ai::Basic
}
pub fn ai_confused(monster_id: usize, map: &Map, objects: &mut [Object], messages: &mut Messages,
previous_ai: Box<Ai>, num_turns: i32) -> Ai {
if num_turns >= 0 { // still confused...
// move in a random idrection, and decrease the number of turns confused
move_by(monster_id,
rand::thread_rng().gen_range(-1, 2),
rand::thread_rng().gen_range(-1, 2),
map,
objects);
Ai::Confused{previous_ai: previous_ai, num_turns: num_turns - 1}
} else { // restore the previous AI (this one will be deleted)
// TODO Replace with game.log.add()
message(messages, format!("The {} is no longer confused!",
objects[monster_id].name),
colors::RED);
*previous_ai
}
}
pub fn move_towards(id: usize, target_x: i32, target_y: i32, map: &Map, objects: &mut [Object]) {
// vector from this object to the target, and distance
let dx = target_x - objects[id].x;
let dy = target_y - objects[id].y;
let distance = ((dx.pow(2) + dy.pow(2)) as f32).sqrt();
// normalize it to length 1 (preserving direction), then round it and
// convert to integer so the movement is restricted to the map grid
let dx = (dx as f32 / distance).round() as i32;
let dy = (dy as f32 / distance).round() as i32;
move_by(id, dx, dy, map, objects);
} | objects[monster_id].ai = Some(Ai::Confused {
previous_ai: Box::new(old_ai),
num_turns: CONFUSE_NUM_TURNS,
}); | random_line_split |
combat.rs | use super::*;
use rand::Rng;
use crate::r#const::*;
use crate::types::*;
use crate::types::Tcod;
use crate::types::Messages;
use crate::func::*;
use crate::types::object::Object;
use tcod::input::{self, Event, Mouse};
use tcod::colors::{self, Color};
/// returns a clicked monster inside FOV up to a range, or None if right-clicked
pub fn target_monster(tcod: &mut Tcod,
objects: &[Object],
game: &mut Game,
max_range: Option<f32>)
-> Option<usize> {
loop {
match target_tile(tcod, objects, game, max_range) {
Some((x, y)) => {
// return the first clicked monster, otherwise continue looping
for (id, obj) in objects.iter().enumerate() {
if obj.pos() == (x, y) && obj.fighter.is_some() && id!= PLAYER {
return Some(id)
}
}
}
None => return None,
}
}
}
/// return the position of a tile left-clicked in player's FOV (optionally in a
/// range), or (None,None) if right-clicked.
pub fn target_tile(tcod: &mut Tcod,
objects: &[Object], game: &mut Game,
max_range: Option<f32>)
-> Option<(i32, i32)> {
use tcod::input::KeyCode::Escape;
loop {
// render the screen. this erases the inventory and shows the names of
// objects under the mouse.
tcod.root.flush();
let event = input::check_for_event(input::KEY_PRESS | input::MOUSE).map(|e| e.1);
let mut key = None;
match event {
Some(Event::Mouse(m)) => tcod.mouse = m,
Some(Event::Key(k)) => key = Some(k),
None => {}
}
render_all(tcod, objects, game, false);
let (x, y) = (tcod.mouse.cx as i32, tcod.mouse.cy as i32);
// accept the target if the player clicked in FOV, and in case a range
// is specified, if it's in that range
let in_fov = (x < MAP_WIDTH) && (y < MAP_HEIGHT) && tcod.fov.is_in_fov(x, y);
let in_range = max_range.map_or(
true, |range| objects[PLAYER].distance(x, y) <= range);
if tcod.mouse.lbutton_pressed && in_fov && in_range {
return Some((x, y))
}
let escape = key.map_or(false, |k| k.code == Escape);
if tcod.mouse.rbutton_pressed || escape {
return None // cancel if the player right-clicked or pressed Escape
}
}
}
/// find closest enemy, up to a maximum range, and in the player's FOV
pub fn closest_monster(max_range: i32, objects: &mut [Object], tcod: &Tcod) -> Option<usize> {
let mut closest_enemy = None;
let mut closest_dist = (max_range + 1) as f32; // start with (slightly more than) maximum range
for (id, object) in objects.iter().enumerate() {
if (id!= PLAYER) && object.fighter.is_some() && object.ai.is_some() &&
tcod.fov.is_in_fov(object.x, object.y)
{
// calculate distance between this object and the player
let dist = objects[PLAYER].distance_to(object);
if dist < closest_dist { // it's closer, so remember it
closest_enemy = Some(id);
closest_dist = dist;
}
}
}
closest_enemy
}
pub fn cast_fireball(_inventory_id: usize, objects: &mut [Object],game: &mut Game, tcod: &mut Tcod)
-> UseResult
{
// ask the player for a target tile to throw a fireball at
game.log.add("Left-click a target tile for the fireball, or right-click to cancel.",
colors::LIGHT_CYAN);
let (x, y) = match target_tile(tcod, objects, game, None) {
Some(tile_pos) => tile_pos,
None => return UseResult::Cancelled,
};
game.log.add(format!("The fireball explodes, burning everything within {} tiles!", FIREBALL_RADIUS),
colors::ORANGE);
let mut xp_to_gain = 0;
for (id, obj) in objects.iter_mut().enumerate() {
if obj.distance(x, y) <= FIREBALL_RADIUS as f32 && obj.fighter.is_some() {
game.log.add(format!("The {} gets burned for {} hit points.", obj.name, FIREBALL_DAMAGE),
colors::ORANGE);
if let Some(xp) = obj.take_damage(FIREBALL_DAMAGE, game) {
// Don't reward the player for burning themself!
if id!= PLAYER {
xp_to_gain += xp;
}
}
}
}
objects[PLAYER].fighter.as_mut().unwrap().xp += xp_to_gain;
UseResult::UsedUp
}
pub fn cast_heal(_inventory_id: usize, objects: &mut [Object], game: &mut Game, _tcod: &mut Tcod)
-> UseResult
{
// heal the player
let player = &mut objects[PLAYER];
if let Some(fighter) = player.fighter {
if fighter.hp == player.max_hp(game) {
game.log.add("You are already at full health.", colors::RED);
return UseResult::Cancelled;
}
game.log.add("Your wounds start to feel better!", colors::LIGHT_VIOLET);
player.heal(HEAL_AMOUNT, game);
return UseResult::UsedUp;
}
UseResult::Cancelled
}
pub fn cast_lightning(_inventory_id: usize, objects: &mut [Object], game: &mut Game, tcod: &mut Tcod) -> UseResult
{
// find closest enemy (inside a maximum range) and damage it
let monster_id = closest_monster(LIGHTNING_RANGE, objects, tcod);
if let Some(monster_id) = monster_id {
// zap it!
game.log.add(format!("A lightning bolt strikes the {} with a loud thunder! \
The damage is {} hit points.",
objects[monster_id].name, LIGHTNING_DAMAGE),
colors::LIGHT_BLUE);
objects[monster_id].take_damage(LIGHTNING_DAMAGE, game);
UseResult::UsedUp
} else { // no enemy found within maximum range
game.log.add("No enemy is close enough to strike.", colors::RED);
UseResult::Cancelled
}
}
pub fn cast_confuse(_inventory_id: usize, objects: &mut [Object], game: &mut Game,tcod: &mut Tcod)
-> UseResult
{
// ask the player for a target to confuse
game.log.add("Left-click an enemy to confuse it, or right-click to cancel.", colors::LIGHT_CYAN);
let monster_id = target_monster(tcod, objects, game, Some(CONFUSE_RANGE as f32));
if let Some(monster_id) = monster_id {
let old_ai = objects[monster_id].ai.take().unwrap_or(Ai::Basic);
// replace the monster's AI with a "confused" one; after
// some turns it will restore the old AI
objects[monster_id].ai = Some(Ai::Confused {
previous_ai: Box::new(old_ai),
num_turns: CONFUSE_NUM_TURNS,
});
game.log.add(format!("The eyes of {} look vacant, as he starts to stumble around!",
objects[monster_id].name),
colors::LIGHT_GREEN);
UseResult::UsedUp
} else { // no enemy fonud within maximum range
game.log.add("No enemy is close enough to strike.", colors::RED);
UseResult::Cancelled
}
}
pub fn player_death(player: &mut Object, messages: &mut Messages) |
pub fn monster_death(monster: &mut Object, messages: &mut Messages) {
// transform it into a nasty corpse! it doesn't block, can't be
// attacked and doesn't move
// TODO Replace with game.log.add()
// message(messages, format!("{} is dead!", monster.name), colors::ORANGE);
message(messages, format!("{} is dead! You gain {} experience points.",
monster.name, monster.fighter.unwrap().xp), colors::ORANGE);
monster.char = CORPSE;
monster.color = colors::DARK_RED;
monster.blocks = false;
monster.fighter = None;
monster.ai = None;
monster.name = format!("remains of {}", monster.name);
}
pub fn player_move_or_attack(dx: i32, dy: i32, objects: &mut [Object], game: &mut Game) {
// the coordinates the player is moving to/attacking
let x = objects[PLAYER].x + dx;
let y = objects[PLAYER].y + dy;
// try to find an attackable object there
let target_id = objects.iter().position(|object| {
object.fighter.is_some() && object.pos() == (x, y)
});
// attack if target found, move otherwise
match target_id {
Some(target_id) => {
let (player, target) = mut_two(PLAYER, target_id, objects);
player.attack(target, game);
}
None => {
move_by(PLAYER, dx, dy, &mut game.map, objects);
}
}
}
pub fn ai_take_turn(monster_id: usize, objects: &mut [Object], game: &mut Game, fov_map: &FovMap) {
use Ai::*;
if let Some(ai) = objects[monster_id].ai.take() {
let new_ai = match ai {
Basic => ai_basic(monster_id, game, objects, fov_map),
Confused{previous_ai, num_turns} => ai_confused(
monster_id, &mut game.map, objects, &mut game.log, previous_ai, num_turns)
};
objects[monster_id].ai = Some(new_ai);
}
}
pub fn ai_basic(monster_id: usize, game: &mut Game, objects: &mut [Object], fov_map: &FovMap) -> Ai {
// a basic monster takes its turn. If you can see it, it can see you
let (monster_x, monster_y) = objects[monster_id].pos();
if fov_map.is_in_fov(monster_x, monster_y) {
if objects[monster_id].distance_to(&objects[PLAYER]) >= 2.0 {
// move towards player if far away
let (player_x, player_y) = objects[PLAYER].pos();
move_towards(monster_id, player_x, player_y, &mut game.map, objects);
} else if objects[PLAYER].fighter.map_or(false, |f| f.hp > 0) {
// close enough, attack! (if the player is still alive.)
let (monster, player) = mut_two(monster_id, PLAYER, objects);
monster.attack(player, game);
}
}
Ai::Basic
}
pub fn ai_confused(monster_id: usize, map: &Map, objects: &mut [Object], messages: &mut Messages,
previous_ai: Box<Ai>, num_turns: i32) -> Ai {
if num_turns >= 0 { // still confused...
// move in a random idrection, and decrease the number of turns confused
move_by(monster_id,
rand::thread_rng().gen_range(-1, 2),
rand::thread_rng().gen_range(-1, 2),
map,
objects);
Ai::Confused{previous_ai: previous_ai, num_turns: num_turns - 1}
} else { // restore the previous AI (this one will be deleted)
// TODO Replace with game.log.add()
message(messages, format!("The {} is no longer confused!",
objects[monster_id].name),
colors::RED);
*previous_ai
}
}
pub fn move_towards(id: usize, target_x: i32, target_y: i32, map: &Map, objects: &mut [Object]) {
// vector from this object to the target, and distance
let dx = target_x - objects[id].x;
let dy = target_y - objects[id].y;
let distance = ((dx.pow(2) + dy.pow(2)) as f32).sqrt();
// normalize it to length 1 (preserving direction), then round it and
// convert to integer so the movement is restricted to the map grid
let dx = (dx as f32 / distance).round() as i32;
let dy = (dy as f32 / distance).round() as i32;
move_by(id, dx, dy, map, objects);
}
| {
// the game ended!
// TODO Replace with game.log.add()
message(messages, "You died!", colors::DARK_RED);
// for added effect, transform the player into a corpse!
player.char = CORPSE;
player.color = colors::DARK_RED;
} | identifier_body |
combat.rs | use super::*;
use rand::Rng;
use crate::r#const::*;
use crate::types::*;
use crate::types::Tcod;
use crate::types::Messages;
use crate::func::*;
use crate::types::object::Object;
use tcod::input::{self, Event, Mouse};
use tcod::colors::{self, Color};
/// returns a clicked monster inside FOV up to a range, or None if right-clicked
pub fn target_monster(tcod: &mut Tcod,
objects: &[Object],
game: &mut Game,
max_range: Option<f32>)
-> Option<usize> {
loop {
match target_tile(tcod, objects, game, max_range) {
Some((x, y)) => {
// return the first clicked monster, otherwise continue looping
for (id, obj) in objects.iter().enumerate() {
if obj.pos() == (x, y) && obj.fighter.is_some() && id!= PLAYER {
return Some(id)
}
}
}
None => return None,
}
}
}
/// return the position of a tile left-clicked in player's FOV (optionally in a
/// range), or (None,None) if right-clicked.
pub fn target_tile(tcod: &mut Tcod,
objects: &[Object], game: &mut Game,
max_range: Option<f32>)
-> Option<(i32, i32)> {
use tcod::input::KeyCode::Escape;
loop {
// render the screen. this erases the inventory and shows the names of
// objects under the mouse.
tcod.root.flush();
let event = input::check_for_event(input::KEY_PRESS | input::MOUSE).map(|e| e.1);
let mut key = None;
match event {
Some(Event::Mouse(m)) => tcod.mouse = m,
Some(Event::Key(k)) => key = Some(k),
None => |
}
render_all(tcod, objects, game, false);
let (x, y) = (tcod.mouse.cx as i32, tcod.mouse.cy as i32);
// accept the target if the player clicked in FOV, and in case a range
// is specified, if it's in that range
let in_fov = (x < MAP_WIDTH) && (y < MAP_HEIGHT) && tcod.fov.is_in_fov(x, y);
let in_range = max_range.map_or(
true, |range| objects[PLAYER].distance(x, y) <= range);
if tcod.mouse.lbutton_pressed && in_fov && in_range {
return Some((x, y))
}
let escape = key.map_or(false, |k| k.code == Escape);
if tcod.mouse.rbutton_pressed || escape {
return None // cancel if the player right-clicked or pressed Escape
}
}
}
/// find closest enemy, up to a maximum range, and in the player's FOV
pub fn closest_monster(max_range: i32, objects: &mut [Object], tcod: &Tcod) -> Option<usize> {
let mut closest_enemy = None;
let mut closest_dist = (max_range + 1) as f32; // start with (slightly more than) maximum range
for (id, object) in objects.iter().enumerate() {
if (id!= PLAYER) && object.fighter.is_some() && object.ai.is_some() &&
tcod.fov.is_in_fov(object.x, object.y)
{
// calculate distance between this object and the player
let dist = objects[PLAYER].distance_to(object);
if dist < closest_dist { // it's closer, so remember it
closest_enemy = Some(id);
closest_dist = dist;
}
}
}
closest_enemy
}
pub fn cast_fireball(_inventory_id: usize, objects: &mut [Object],game: &mut Game, tcod: &mut Tcod)
-> UseResult
{
// ask the player for a target tile to throw a fireball at
game.log.add("Left-click a target tile for the fireball, or right-click to cancel.",
colors::LIGHT_CYAN);
let (x, y) = match target_tile(tcod, objects, game, None) {
Some(tile_pos) => tile_pos,
None => return UseResult::Cancelled,
};
game.log.add(format!("The fireball explodes, burning everything within {} tiles!", FIREBALL_RADIUS),
colors::ORANGE);
let mut xp_to_gain = 0;
for (id, obj) in objects.iter_mut().enumerate() {
if obj.distance(x, y) <= FIREBALL_RADIUS as f32 && obj.fighter.is_some() {
game.log.add(format!("The {} gets burned for {} hit points.", obj.name, FIREBALL_DAMAGE),
colors::ORANGE);
if let Some(xp) = obj.take_damage(FIREBALL_DAMAGE, game) {
// Don't reward the player for burning themself!
if id!= PLAYER {
xp_to_gain += xp;
}
}
}
}
objects[PLAYER].fighter.as_mut().unwrap().xp += xp_to_gain;
UseResult::UsedUp
}
pub fn cast_heal(_inventory_id: usize, objects: &mut [Object], game: &mut Game, _tcod: &mut Tcod)
-> UseResult
{
// heal the player
let player = &mut objects[PLAYER];
if let Some(fighter) = player.fighter {
if fighter.hp == player.max_hp(game) {
game.log.add("You are already at full health.", colors::RED);
return UseResult::Cancelled;
}
game.log.add("Your wounds start to feel better!", colors::LIGHT_VIOLET);
player.heal(HEAL_AMOUNT, game);
return UseResult::UsedUp;
}
UseResult::Cancelled
}
pub fn cast_lightning(_inventory_id: usize, objects: &mut [Object], game: &mut Game, tcod: &mut Tcod) -> UseResult
{
// find closest enemy (inside a maximum range) and damage it
let monster_id = closest_monster(LIGHTNING_RANGE, objects, tcod);
if let Some(monster_id) = monster_id {
// zap it!
game.log.add(format!("A lightning bolt strikes the {} with a loud thunder! \
The damage is {} hit points.",
objects[monster_id].name, LIGHTNING_DAMAGE),
colors::LIGHT_BLUE);
objects[monster_id].take_damage(LIGHTNING_DAMAGE, game);
UseResult::UsedUp
} else { // no enemy found within maximum range
game.log.add("No enemy is close enough to strike.", colors::RED);
UseResult::Cancelled
}
}
pub fn cast_confuse(_inventory_id: usize, objects: &mut [Object], game: &mut Game,tcod: &mut Tcod)
-> UseResult
{
// ask the player for a target to confuse
game.log.add("Left-click an enemy to confuse it, or right-click to cancel.", colors::LIGHT_CYAN);
let monster_id = target_monster(tcod, objects, game, Some(CONFUSE_RANGE as f32));
if let Some(monster_id) = monster_id {
let old_ai = objects[monster_id].ai.take().unwrap_or(Ai::Basic);
// replace the monster's AI with a "confused" one; after
// some turns it will restore the old AI
objects[monster_id].ai = Some(Ai::Confused {
previous_ai: Box::new(old_ai),
num_turns: CONFUSE_NUM_TURNS,
});
game.log.add(format!("The eyes of {} look vacant, as he starts to stumble around!",
objects[monster_id].name),
colors::LIGHT_GREEN);
UseResult::UsedUp
} else { // no enemy fonud within maximum range
game.log.add("No enemy is close enough to strike.", colors::RED);
UseResult::Cancelled
}
}
pub fn player_death(player: &mut Object, messages: &mut Messages) {
// the game ended!
// TODO Replace with game.log.add()
message(messages, "You died!", colors::DARK_RED);
// for added effect, transform the player into a corpse!
player.char = CORPSE;
player.color = colors::DARK_RED;
}
pub fn monster_death(monster: &mut Object, messages: &mut Messages) {
// transform it into a nasty corpse! it doesn't block, can't be
// attacked and doesn't move
// TODO Replace with game.log.add()
// message(messages, format!("{} is dead!", monster.name), colors::ORANGE);
message(messages, format!("{} is dead! You gain {} experience points.",
monster.name, monster.fighter.unwrap().xp), colors::ORANGE);
monster.char = CORPSE;
monster.color = colors::DARK_RED;
monster.blocks = false;
monster.fighter = None;
monster.ai = None;
monster.name = format!("remains of {}", monster.name);
}
pub fn player_move_or_attack(dx: i32, dy: i32, objects: &mut [Object], game: &mut Game) {
// the coordinates the player is moving to/attacking
let x = objects[PLAYER].x + dx;
let y = objects[PLAYER].y + dy;
// try to find an attackable object there
let target_id = objects.iter().position(|object| {
object.fighter.is_some() && object.pos() == (x, y)
});
// attack if target found, move otherwise
match target_id {
Some(target_id) => {
let (player, target) = mut_two(PLAYER, target_id, objects);
player.attack(target, game);
}
None => {
move_by(PLAYER, dx, dy, &mut game.map, objects);
}
}
}
pub fn ai_take_turn(monster_id: usize, objects: &mut [Object], game: &mut Game, fov_map: &FovMap) {
use Ai::*;
if let Some(ai) = objects[monster_id].ai.take() {
let new_ai = match ai {
Basic => ai_basic(monster_id, game, objects, fov_map),
Confused{previous_ai, num_turns} => ai_confused(
monster_id, &mut game.map, objects, &mut game.log, previous_ai, num_turns)
};
objects[monster_id].ai = Some(new_ai);
}
}
pub fn ai_basic(monster_id: usize, game: &mut Game, objects: &mut [Object], fov_map: &FovMap) -> Ai {
// a basic monster takes its turn. If you can see it, it can see you
let (monster_x, monster_y) = objects[monster_id].pos();
if fov_map.is_in_fov(monster_x, monster_y) {
if objects[monster_id].distance_to(&objects[PLAYER]) >= 2.0 {
// move towards player if far away
let (player_x, player_y) = objects[PLAYER].pos();
move_towards(monster_id, player_x, player_y, &mut game.map, objects);
} else if objects[PLAYER].fighter.map_or(false, |f| f.hp > 0) {
// close enough, attack! (if the player is still alive.)
let (monster, player) = mut_two(monster_id, PLAYER, objects);
monster.attack(player, game);
}
}
Ai::Basic
}
pub fn ai_confused(monster_id: usize, map: &Map, objects: &mut [Object], messages: &mut Messages,
previous_ai: Box<Ai>, num_turns: i32) -> Ai {
if num_turns >= 0 { // still confused...
// move in a random idrection, and decrease the number of turns confused
move_by(monster_id,
rand::thread_rng().gen_range(-1, 2),
rand::thread_rng().gen_range(-1, 2),
map,
objects);
Ai::Confused{previous_ai: previous_ai, num_turns: num_turns - 1}
} else { // restore the previous AI (this one will be deleted)
// TODO Replace with game.log.add()
message(messages, format!("The {} is no longer confused!",
objects[monster_id].name),
colors::RED);
*previous_ai
}
}
pub fn move_towards(id: usize, target_x: i32, target_y: i32, map: &Map, objects: &mut [Object]) {
// vector from this object to the target, and distance
let dx = target_x - objects[id].x;
let dy = target_y - objects[id].y;
let distance = ((dx.pow(2) + dy.pow(2)) as f32).sqrt();
// normalize it to length 1 (preserving direction), then round it and
// convert to integer so the movement is restricted to the map grid
let dx = (dx as f32 / distance).round() as i32;
let dy = (dy as f32 / distance).round() as i32;
move_by(id, dx, dy, map, objects);
}
| {} | conditional_block |
combat.rs | use super::*;
use rand::Rng;
use crate::r#const::*;
use crate::types::*;
use crate::types::Tcod;
use crate::types::Messages;
use crate::func::*;
use crate::types::object::Object;
use tcod::input::{self, Event, Mouse};
use tcod::colors::{self, Color};
/// returns a clicked monster inside FOV up to a range, or None if right-clicked
pub fn target_monster(tcod: &mut Tcod,
objects: &[Object],
game: &mut Game,
max_range: Option<f32>)
-> Option<usize> {
loop {
match target_tile(tcod, objects, game, max_range) {
Some((x, y)) => {
// return the first clicked monster, otherwise continue looping
for (id, obj) in objects.iter().enumerate() {
if obj.pos() == (x, y) && obj.fighter.is_some() && id!= PLAYER {
return Some(id)
}
}
}
None => return None,
}
}
}
/// return the position of a tile left-clicked in player's FOV (optionally in a
/// range), or (None,None) if right-clicked.
pub fn target_tile(tcod: &mut Tcod,
objects: &[Object], game: &mut Game,
max_range: Option<f32>)
-> Option<(i32, i32)> {
use tcod::input::KeyCode::Escape;
loop {
// render the screen. this erases the inventory and shows the names of
// objects under the mouse.
tcod.root.flush();
let event = input::check_for_event(input::KEY_PRESS | input::MOUSE).map(|e| e.1);
let mut key = None;
match event {
Some(Event::Mouse(m)) => tcod.mouse = m,
Some(Event::Key(k)) => key = Some(k),
None => {}
}
render_all(tcod, objects, game, false);
let (x, y) = (tcod.mouse.cx as i32, tcod.mouse.cy as i32);
// accept the target if the player clicked in FOV, and in case a range
// is specified, if it's in that range
let in_fov = (x < MAP_WIDTH) && (y < MAP_HEIGHT) && tcod.fov.is_in_fov(x, y);
let in_range = max_range.map_or(
true, |range| objects[PLAYER].distance(x, y) <= range);
if tcod.mouse.lbutton_pressed && in_fov && in_range {
return Some((x, y))
}
let escape = key.map_or(false, |k| k.code == Escape);
if tcod.mouse.rbutton_pressed || escape {
return None // cancel if the player right-clicked or pressed Escape
}
}
}
/// find closest enemy, up to a maximum range, and in the player's FOV
pub fn closest_monster(max_range: i32, objects: &mut [Object], tcod: &Tcod) -> Option<usize> {
let mut closest_enemy = None;
let mut closest_dist = (max_range + 1) as f32; // start with (slightly more than) maximum range
for (id, object) in objects.iter().enumerate() {
if (id!= PLAYER) && object.fighter.is_some() && object.ai.is_some() &&
tcod.fov.is_in_fov(object.x, object.y)
{
// calculate distance between this object and the player
let dist = objects[PLAYER].distance_to(object);
if dist < closest_dist { // it's closer, so remember it
closest_enemy = Some(id);
closest_dist = dist;
}
}
}
closest_enemy
}
pub fn cast_fireball(_inventory_id: usize, objects: &mut [Object],game: &mut Game, tcod: &mut Tcod)
-> UseResult
{
// ask the player for a target tile to throw a fireball at
game.log.add("Left-click a target tile for the fireball, or right-click to cancel.",
colors::LIGHT_CYAN);
let (x, y) = match target_tile(tcod, objects, game, None) {
Some(tile_pos) => tile_pos,
None => return UseResult::Cancelled,
};
game.log.add(format!("The fireball explodes, burning everything within {} tiles!", FIREBALL_RADIUS),
colors::ORANGE);
let mut xp_to_gain = 0;
for (id, obj) in objects.iter_mut().enumerate() {
if obj.distance(x, y) <= FIREBALL_RADIUS as f32 && obj.fighter.is_some() {
game.log.add(format!("The {} gets burned for {} hit points.", obj.name, FIREBALL_DAMAGE),
colors::ORANGE);
if let Some(xp) = obj.take_damage(FIREBALL_DAMAGE, game) {
// Don't reward the player for burning themself!
if id!= PLAYER {
xp_to_gain += xp;
}
}
}
}
objects[PLAYER].fighter.as_mut().unwrap().xp += xp_to_gain;
UseResult::UsedUp
}
pub fn cast_heal(_inventory_id: usize, objects: &mut [Object], game: &mut Game, _tcod: &mut Tcod)
-> UseResult
{
// heal the player
let player = &mut objects[PLAYER];
if let Some(fighter) = player.fighter {
if fighter.hp == player.max_hp(game) {
game.log.add("You are already at full health.", colors::RED);
return UseResult::Cancelled;
}
game.log.add("Your wounds start to feel better!", colors::LIGHT_VIOLET);
player.heal(HEAL_AMOUNT, game);
return UseResult::UsedUp;
}
UseResult::Cancelled
}
pub fn cast_lightning(_inventory_id: usize, objects: &mut [Object], game: &mut Game, tcod: &mut Tcod) -> UseResult
{
// find closest enemy (inside a maximum range) and damage it
let monster_id = closest_monster(LIGHTNING_RANGE, objects, tcod);
if let Some(monster_id) = monster_id {
// zap it!
game.log.add(format!("A lightning bolt strikes the {} with a loud thunder! \
The damage is {} hit points.",
objects[monster_id].name, LIGHTNING_DAMAGE),
colors::LIGHT_BLUE);
objects[monster_id].take_damage(LIGHTNING_DAMAGE, game);
UseResult::UsedUp
} else { // no enemy found within maximum range
game.log.add("No enemy is close enough to strike.", colors::RED);
UseResult::Cancelled
}
}
pub fn cast_confuse(_inventory_id: usize, objects: &mut [Object], game: &mut Game,tcod: &mut Tcod)
-> UseResult
{
// ask the player for a target to confuse
game.log.add("Left-click an enemy to confuse it, or right-click to cancel.", colors::LIGHT_CYAN);
let monster_id = target_monster(tcod, objects, game, Some(CONFUSE_RANGE as f32));
if let Some(monster_id) = monster_id {
let old_ai = objects[monster_id].ai.take().unwrap_or(Ai::Basic);
// replace the monster's AI with a "confused" one; after
// some turns it will restore the old AI
objects[monster_id].ai = Some(Ai::Confused {
previous_ai: Box::new(old_ai),
num_turns: CONFUSE_NUM_TURNS,
});
game.log.add(format!("The eyes of {} look vacant, as he starts to stumble around!",
objects[monster_id].name),
colors::LIGHT_GREEN);
UseResult::UsedUp
} else { // no enemy fonud within maximum range
game.log.add("No enemy is close enough to strike.", colors::RED);
UseResult::Cancelled
}
}
pub fn player_death(player: &mut Object, messages: &mut Messages) {
// the game ended!
// TODO Replace with game.log.add()
message(messages, "You died!", colors::DARK_RED);
// for added effect, transform the player into a corpse!
player.char = CORPSE;
player.color = colors::DARK_RED;
}
pub fn monster_death(monster: &mut Object, messages: &mut Messages) {
// transform it into a nasty corpse! it doesn't block, can't be
// attacked and doesn't move
// TODO Replace with game.log.add()
// message(messages, format!("{} is dead!", monster.name), colors::ORANGE);
message(messages, format!("{} is dead! You gain {} experience points.",
monster.name, monster.fighter.unwrap().xp), colors::ORANGE);
monster.char = CORPSE;
monster.color = colors::DARK_RED;
monster.blocks = false;
monster.fighter = None;
monster.ai = None;
monster.name = format!("remains of {}", monster.name);
}
pub fn player_move_or_attack(dx: i32, dy: i32, objects: &mut [Object], game: &mut Game) {
// the coordinates the player is moving to/attacking
let x = objects[PLAYER].x + dx;
let y = objects[PLAYER].y + dy;
// try to find an attackable object there
let target_id = objects.iter().position(|object| {
object.fighter.is_some() && object.pos() == (x, y)
});
// attack if target found, move otherwise
match target_id {
Some(target_id) => {
let (player, target) = mut_two(PLAYER, target_id, objects);
player.attack(target, game);
}
None => {
move_by(PLAYER, dx, dy, &mut game.map, objects);
}
}
}
pub fn ai_take_turn(monster_id: usize, objects: &mut [Object], game: &mut Game, fov_map: &FovMap) {
use Ai::*;
if let Some(ai) = objects[monster_id].ai.take() {
let new_ai = match ai {
Basic => ai_basic(monster_id, game, objects, fov_map),
Confused{previous_ai, num_turns} => ai_confused(
monster_id, &mut game.map, objects, &mut game.log, previous_ai, num_turns)
};
objects[monster_id].ai = Some(new_ai);
}
}
pub fn | (monster_id: usize, game: &mut Game, objects: &mut [Object], fov_map: &FovMap) -> Ai {
// a basic monster takes its turn. If you can see it, it can see you
let (monster_x, monster_y) = objects[monster_id].pos();
if fov_map.is_in_fov(monster_x, monster_y) {
if objects[monster_id].distance_to(&objects[PLAYER]) >= 2.0 {
// move towards player if far away
let (player_x, player_y) = objects[PLAYER].pos();
move_towards(monster_id, player_x, player_y, &mut game.map, objects);
} else if objects[PLAYER].fighter.map_or(false, |f| f.hp > 0) {
// close enough, attack! (if the player is still alive.)
let (monster, player) = mut_two(monster_id, PLAYER, objects);
monster.attack(player, game);
}
}
Ai::Basic
}
pub fn ai_confused(monster_id: usize, map: &Map, objects: &mut [Object], messages: &mut Messages,
previous_ai: Box<Ai>, num_turns: i32) -> Ai {
if num_turns >= 0 { // still confused...
// move in a random idrection, and decrease the number of turns confused
move_by(monster_id,
rand::thread_rng().gen_range(-1, 2),
rand::thread_rng().gen_range(-1, 2),
map,
objects);
Ai::Confused{previous_ai: previous_ai, num_turns: num_turns - 1}
} else { // restore the previous AI (this one will be deleted)
// TODO Replace with game.log.add()
message(messages, format!("The {} is no longer confused!",
objects[monster_id].name),
colors::RED);
*previous_ai
}
}
pub fn move_towards(id: usize, target_x: i32, target_y: i32, map: &Map, objects: &mut [Object]) {
// vector from this object to the target, and distance
let dx = target_x - objects[id].x;
let dy = target_y - objects[id].y;
let distance = ((dx.pow(2) + dy.pow(2)) as f32).sqrt();
// normalize it to length 1 (preserving direction), then round it and
// convert to integer so the movement is restricted to the map grid
let dx = (dx as f32 / distance).round() as i32;
let dy = (dy as f32 / distance).round() as i32;
move_by(id, dx, dy, map, objects);
}
| ai_basic | identifier_name |
allocator.rs | use std::rc::Rc;
use std::cell::RefCell;
use std::sync::{Arc, Mutex};
use std::any::Any;
use std::sync::mpsc::{Sender, Receiver, channel};
use std::collections::VecDeque;
use std::marker::PhantomData;
use columnar::{Columnar, ColumnarStack};
use communication::{Pushable, Pullable};
use networking::networking::MessageHeader;
use std::default::Default;
use drain::DrainExt;
// The Communicator trait presents the interface a worker has to the outside world.
// The worker can see its index, the total number of peers, and acquire channels to and from the other workers.
// There is an assumption that each worker performs the same channel allocation logic; things go wrong otherwise.
pub trait Communicator:'static {
fn index(&self) -> u64; // number out of peers
fn peers(&self) -> u64; // number of peers
fn new_channel<T:Send+Columnar+Any>(&mut self) -> (Vec<Box<Pushable<T>>>, Box<Pullable<T>>);
}
// TODO : Would be nice if Communicator had associated types for its Pushable and Pullable types,
// TODO : but they would have to be generic over T, with the current set-up. Might require HKT?
// impl<'a, C: Communicator + 'a> Communicator for &'a mut C {
// fn index(&self) -> u64 { (**self).index() }
// fn peers(&self) -> u64 { (**self).peers() }
// fn new_channel<T:Send+Columnar+Any>(&mut self) -> (Vec<Box<Pushable<T>>>, Box<Pullable<T>>) { (**self).new_channel() }
// }
// The simplest communicator remains worker-local and just queues sent messages.
pub struct ThreadCommunicator;
impl Communicator for ThreadCommunicator {
fn index(&self) -> u64 { 0 }
fn | (&self) -> u64 { 1 }
fn new_channel<T:'static>(&mut self) -> (Vec<Box<Pushable<T>>>, Box<Pullable<T>>) {
let shared = Rc::new(RefCell::new(VecDeque::<T>::new()));
return (vec![Box::new(shared.clone()) as Box<Pushable<T>>], Box::new(shared.clone()) as Box<Pullable<T>>)
}
}
// A specific Communicator for inter-thread intra-process communication
pub struct ProcessCommunicator {
inner: ThreadCommunicator, // inner ThreadCommunicator
index: u64, // number out of peers
peers: u64, // number of peer allocators (for typed channel allocation).
allocated: u64, // indicates how many have been allocated (locally).
channels: Arc<Mutex<Vec<Box<Any+Send>>>>, // Box<Any+Send> -> Box<Vec<Option<(Vec<Sender<T>>, Receiver<T>)>>>
}
impl ProcessCommunicator {
pub fn inner<'a>(&'a mut self) -> &'a mut ThreadCommunicator { &mut self.inner }
pub fn new_vector(count: u64) -> Vec<ProcessCommunicator> {
let channels = Arc::new(Mutex::new(Vec::new()));
return (0.. count).map(|index| ProcessCommunicator {
inner: ThreadCommunicator,
index: index,
peers: count,
allocated: 0,
channels: channels.clone(),
}).collect();
}
}
impl Communicator for ProcessCommunicator {
fn index(&self) -> u64 { self.index }
fn peers(&self) -> u64 { self.peers }
fn new_channel<T:Send+Any>(&mut self) -> (Vec<Box<Pushable<T>>>, Box<Pullable<T>>) {
let mut channels = self.channels.lock().ok().expect("mutex error?");
if self.allocated == channels.len() as u64 { // we need a new channel...
let mut senders = Vec::new();
let mut receivers = Vec::new();
for _ in (0..self.peers) {
let (s, r): (Sender<T>, Receiver<T>) = channel();
senders.push(s);
receivers.push(r);
}
let mut to_box = Vec::new();
for recv in receivers.drain_temp() {
to_box.push(Some((senders.clone(), recv)));
}
channels.push(Box::new(to_box));
}
match channels[self.allocated as usize].downcast_mut::<(Vec<Option<(Vec<Sender<T>>, Receiver<T>)>>)>() {
Some(ref mut vector) => {
self.allocated += 1;
let (mut send, recv) = vector[self.index as usize].take().unwrap();
let mut temp = Vec::new();
for s in send.drain_temp() { temp.push(Box::new(s) as Box<Pushable<T>>); }
return (temp, Box::new(recv) as Box<Pullable<T>>)
}
_ => { panic!("unable to cast channel correctly"); }
}
}
}
// A communicator intended for binary channels (networking, pipes, shared memory)
pub struct BinaryCommunicator {
pub inner: ProcessCommunicator, // inner ProcessCommunicator (use for process-local channels)
pub index: u64, // index of this worker
pub peers: u64, // number of peer workers
pub graph: u64, // identifier for the current graph
pub allocated: u64, // indicates how many channels have been allocated (locally).
// for loading up state in the networking threads.
pub writers: Vec<Sender<((u64, u64, u64), Sender<Vec<u8>>)>>, // (index, back-to-worker)
pub readers: Vec<Sender<((u64, u64, u64), Sender<Vec<u8>>, Receiver<Vec<u8>>)>>, // (index, data-to-worker, back-from-worker)
pub senders: Vec<Sender<(MessageHeader, Vec<u8>)>> // for sending bytes!
}
impl BinaryCommunicator {
pub fn inner<'a>(&'a mut self) -> &'a mut ProcessCommunicator { &mut self.inner }
}
// A Communicator backed by Sender<Vec<u8>>/Receiver<Vec<u8>> pairs (e.g. networking, shared memory, files, pipes)
impl Communicator for BinaryCommunicator {
fn index(&self) -> u64 { self.index }
fn peers(&self) -> u64 { self.peers }
fn new_channel<T:Send+Columnar+Any>(&mut self) -> (Vec<Box<Pushable<T>>>, Box<Pullable<T>>) {
let mut pushers: Vec<Box<Pushable<T>>> = Vec::new(); // built-up vector of Box<Pushable<T>> to return
// we'll need process-local channels as well (no self-loop binary connection in this design; perhaps should allow)
let inner_peers = self.inner.peers();
let (inner_sends, inner_recv) = self.inner.new_channel();
// prep a pushable for each endpoint, multiplied by inner_peers
for (index, writer) in self.writers.iter().enumerate() {
for _ in (0..inner_peers) {
let (s,r) = channel(); // generate a binary (Vec<u8>) channel pair of (back_to_worker, back_from_net)
let target_index = if index as u64 >= (self.index * inner_peers) { index as u64 + inner_peers } else { index as u64 };
println!("init'ing send channel: ({} {} {})", self.index, self.graph, self.allocated);
writer.send(((self.index, self.graph, self.allocated), s)).unwrap();
let header = MessageHeader {
graph: self.graph,
channel: self.allocated,
source: self.index,
target: target_index,
length: 0,
};
pushers.push(Box::new(BinaryPushable::new(header, self.senders[index].clone(), r)));
}
}
// splice inner_sends into the vector of pushables
for (index, writer) in inner_sends.into_iter().enumerate() {
pushers.insert((self.index * inner_peers) as usize + index, writer);
}
// prep a Box<Pullable<T>> using inner_recv and fresh registered pullables
let (send,recv) = channel(); // binary channel from binary listener to BinaryPullable<T>
let mut pullsends = Vec::new();
for reader in self.readers.iter() {
let (s,r) = channel();
pullsends.push(s);
println!("init'ing recv channel: ({} {} {})", self.index, self.graph, self.allocated);
reader.send(((self.index, self.graph, self.allocated), send.clone(), r)).unwrap();
}
let pullable = Box::new(BinaryPullable {
inner: inner_recv,
senders: pullsends,
receiver: recv,
stack: Default::default(),
});
self.allocated += 1;
return (pushers, pullable);
}
}
struct BinaryPushable<T: Columnar> {
header: MessageHeader,
sender: Sender<(MessageHeader, Vec<u8>)>, // targets for each remote destination
receiver: Receiver<Vec<u8>>, // source of empty binary vectors
phantom: PhantomData<T>,
buffer: Vec<u8>,
stack: <T as Columnar>::Stack,
}
impl<T: Columnar> BinaryPushable<T> {
pub fn new(header: MessageHeader, sender: Sender<(MessageHeader, Vec<u8>)>, receiver: Receiver<Vec<u8>>) -> BinaryPushable<T> {
BinaryPushable {
header: header,
sender: sender,
receiver: receiver,
phantom: PhantomData,
buffer: Vec::new(),
stack: Default::default(),
}
}
}
impl<T:Columnar+'static> Pushable<T> for BinaryPushable<T> {
#[inline]
fn push(&mut self, data: T) {
let mut bytes = if let Some(buffer) = self.receiver.try_recv().ok() { buffer } else { Vec::new() };
bytes.clear();
self.stack.push(data);
self.stack.encode(&mut bytes).unwrap();
let mut header = self.header;
header.length = bytes.len() as u64;
self.sender.send((header, bytes)).ok();
}
}
struct BinaryPullable<T: Columnar> {
inner: Box<Pullable<T>>, // inner pullable (e.g. intra-process typed queue)
senders: Vec<Sender<Vec<u8>>>, // places to put used binary vectors
receiver: Receiver<Vec<u8>>, // source of serialized buffers
stack: <T as Columnar>::Stack,
}
impl<T:Columnar+'static> Pullable<T> for BinaryPullable<T> {
#[inline]
fn pull(&mut self) -> Option<T> {
if let Some(data) = self.inner.pull() { Some(data) }
else if let Some(bytes) = self.receiver.try_recv().ok() {
self.stack.decode(&mut &bytes[..]).unwrap();
self.senders[0].send(bytes).unwrap(); // TODO : Not clear where bytes came from; find out!
self.stack.pop()
}
else { None }
}
}
| peers | identifier_name |
allocator.rs | use std::rc::Rc;
use std::cell::RefCell;
use std::sync::{Arc, Mutex};
use std::any::Any;
use std::sync::mpsc::{Sender, Receiver, channel};
use std::collections::VecDeque;
use std::marker::PhantomData;
use columnar::{Columnar, ColumnarStack};
use communication::{Pushable, Pullable};
use networking::networking::MessageHeader;
use std::default::Default;
use drain::DrainExt;
// The Communicator trait presents the interface a worker has to the outside world.
// The worker can see its index, the total number of peers, and acquire channels to and from the other workers.
// There is an assumption that each worker performs the same channel allocation logic; things go wrong otherwise.
pub trait Communicator:'static {
fn index(&self) -> u64; // number out of peers
fn peers(&self) -> u64; // number of peers
fn new_channel<T:Send+Columnar+Any>(&mut self) -> (Vec<Box<Pushable<T>>>, Box<Pullable<T>>);
}
// TODO : Would be nice if Communicator had associated types for its Pushable and Pullable types,
// TODO : but they would have to be generic over T, with the current set-up. Might require HKT?
// impl<'a, C: Communicator + 'a> Communicator for &'a mut C {
// fn index(&self) -> u64 { (**self).index() }
// fn peers(&self) -> u64 { (**self).peers() }
// fn new_channel<T:Send+Columnar+Any>(&mut self) -> (Vec<Box<Pushable<T>>>, Box<Pullable<T>>) { (**self).new_channel() }
// }
// The simplest communicator remains worker-local and just queues sent messages.
pub struct ThreadCommunicator;
impl Communicator for ThreadCommunicator {
fn index(&self) -> u64 { 0 }
fn peers(&self) -> u64 { 1 }
fn new_channel<T:'static>(&mut self) -> (Vec<Box<Pushable<T>>>, Box<Pullable<T>>) {
let shared = Rc::new(RefCell::new(VecDeque::<T>::new()));
return (vec![Box::new(shared.clone()) as Box<Pushable<T>>], Box::new(shared.clone()) as Box<Pullable<T>>)
}
}
// A specific Communicator for inter-thread intra-process communication
pub struct ProcessCommunicator {
inner: ThreadCommunicator, // inner ThreadCommunicator
index: u64, // number out of peers
peers: u64, // number of peer allocators (for typed channel allocation).
allocated: u64, // indicates how many have been allocated (locally).
channels: Arc<Mutex<Vec<Box<Any+Send>>>>, // Box<Any+Send> -> Box<Vec<Option<(Vec<Sender<T>>, Receiver<T>)>>>
}
impl ProcessCommunicator {
pub fn inner<'a>(&'a mut self) -> &'a mut ThreadCommunicator { &mut self.inner }
pub fn new_vector(count: u64) -> Vec<ProcessCommunicator> {
let channels = Arc::new(Mutex::new(Vec::new()));
return (0.. count).map(|index| ProcessCommunicator {
inner: ThreadCommunicator,
index: index,
peers: count,
allocated: 0,
channels: channels.clone(),
}).collect();
}
}
impl Communicator for ProcessCommunicator {
fn index(&self) -> u64 { self.index }
fn peers(&self) -> u64 { self.peers }
fn new_channel<T:Send+Any>(&mut self) -> (Vec<Box<Pushable<T>>>, Box<Pullable<T>>) {
let mut channels = self.channels.lock().ok().expect("mutex error?");
if self.allocated == channels.len() as u64 { // we need a new channel...
let mut senders = Vec::new();
let mut receivers = Vec::new();
for _ in (0..self.peers) {
let (s, r): (Sender<T>, Receiver<T>) = channel();
senders.push(s);
receivers.push(r);
}
let mut to_box = Vec::new();
for recv in receivers.drain_temp() {
to_box.push(Some((senders.clone(), recv)));
}
channels.push(Box::new(to_box));
}
match channels[self.allocated as usize].downcast_mut::<(Vec<Option<(Vec<Sender<T>>, Receiver<T>)>>)>() {
Some(ref mut vector) => {
self.allocated += 1;
let (mut send, recv) = vector[self.index as usize].take().unwrap();
let mut temp = Vec::new();
for s in send.drain_temp() { temp.push(Box::new(s) as Box<Pushable<T>>); }
return (temp, Box::new(recv) as Box<Pullable<T>>)
}
_ => { panic!("unable to cast channel correctly"); }
}
}
}
// A communicator intended for binary channels (networking, pipes, shared memory)
pub struct BinaryCommunicator {
pub inner: ProcessCommunicator, // inner ProcessCommunicator (use for process-local channels)
pub index: u64, // index of this worker
pub peers: u64, // number of peer workers
pub graph: u64, // identifier for the current graph
pub allocated: u64, // indicates how many channels have been allocated (locally).
// for loading up state in the networking threads.
pub writers: Vec<Sender<((u64, u64, u64), Sender<Vec<u8>>)>>, // (index, back-to-worker)
pub readers: Vec<Sender<((u64, u64, u64), Sender<Vec<u8>>, Receiver<Vec<u8>>)>>, // (index, data-to-worker, back-from-worker)
pub senders: Vec<Sender<(MessageHeader, Vec<u8>)>> // for sending bytes!
}
impl BinaryCommunicator {
pub fn inner<'a>(&'a mut self) -> &'a mut ProcessCommunicator { &mut self.inner }
}
// A Communicator backed by Sender<Vec<u8>>/Receiver<Vec<u8>> pairs (e.g. networking, shared memory, files, pipes)
impl Communicator for BinaryCommunicator {
fn index(&self) -> u64 { self.index }
fn peers(&self) -> u64 { self.peers }
fn new_channel<T:Send+Columnar+Any>(&mut self) -> (Vec<Box<Pushable<T>>>, Box<Pullable<T>>) {
let mut pushers: Vec<Box<Pushable<T>>> = Vec::new(); // built-up vector of Box<Pushable<T>> to return
// we'll need process-local channels as well (no self-loop binary connection in this design; perhaps should allow)
let inner_peers = self.inner.peers();
let (inner_sends, inner_recv) = self.inner.new_channel();
// prep a pushable for each endpoint, multiplied by inner_peers
for (index, writer) in self.writers.iter().enumerate() {
for _ in (0..inner_peers) {
let (s,r) = channel(); // generate a binary (Vec<u8>) channel pair of (back_to_worker, back_from_net)
let target_index = if index as u64 >= (self.index * inner_peers) { index as u64 + inner_peers } else { index as u64 };
println!("init'ing send channel: ({} {} {})", self.index, self.graph, self.allocated);
writer.send(((self.index, self.graph, self.allocated), s)).unwrap();
let header = MessageHeader {
graph: self.graph,
channel: self.allocated,
source: self.index,
target: target_index,
length: 0,
};
pushers.push(Box::new(BinaryPushable::new(header, self.senders[index].clone(), r)));
}
}
// splice inner_sends into the vector of pushables
for (index, writer) in inner_sends.into_iter().enumerate() {
pushers.insert((self.index * inner_peers) as usize + index, writer);
}
// prep a Box<Pullable<T>> using inner_recv and fresh registered pullables
let (send,recv) = channel(); // binary channel from binary listener to BinaryPullable<T>
let mut pullsends = Vec::new();
for reader in self.readers.iter() {
let (s,r) = channel();
pullsends.push(s);
println!("init'ing recv channel: ({} {} {})", self.index, self.graph, self.allocated);
reader.send(((self.index, self.graph, self.allocated), send.clone(), r)).unwrap();
}
let pullable = Box::new(BinaryPullable {
inner: inner_recv,
senders: pullsends,
receiver: recv,
stack: Default::default(),
});
self.allocated += 1;
return (pushers, pullable);
}
}
struct BinaryPushable<T: Columnar> {
header: MessageHeader,
sender: Sender<(MessageHeader, Vec<u8>)>, // targets for each remote destination
receiver: Receiver<Vec<u8>>, // source of empty binary vectors
phantom: PhantomData<T>,
buffer: Vec<u8>,
stack: <T as Columnar>::Stack,
}
impl<T: Columnar> BinaryPushable<T> {
pub fn new(header: MessageHeader, sender: Sender<(MessageHeader, Vec<u8>)>, receiver: Receiver<Vec<u8>>) -> BinaryPushable<T> {
BinaryPushable {
header: header, | }
}
}
impl<T:Columnar+'static> Pushable<T> for BinaryPushable<T> {
#[inline]
fn push(&mut self, data: T) {
let mut bytes = if let Some(buffer) = self.receiver.try_recv().ok() { buffer } else { Vec::new() };
bytes.clear();
self.stack.push(data);
self.stack.encode(&mut bytes).unwrap();
let mut header = self.header;
header.length = bytes.len() as u64;
self.sender.send((header, bytes)).ok();
}
}
struct BinaryPullable<T: Columnar> {
inner: Box<Pullable<T>>, // inner pullable (e.g. intra-process typed queue)
senders: Vec<Sender<Vec<u8>>>, // places to put used binary vectors
receiver: Receiver<Vec<u8>>, // source of serialized buffers
stack: <T as Columnar>::Stack,
}
impl<T:Columnar+'static> Pullable<T> for BinaryPullable<T> {
#[inline]
fn pull(&mut self) -> Option<T> {
if let Some(data) = self.inner.pull() { Some(data) }
else if let Some(bytes) = self.receiver.try_recv().ok() {
self.stack.decode(&mut &bytes[..]).unwrap();
self.senders[0].send(bytes).unwrap(); // TODO : Not clear where bytes came from; find out!
self.stack.pop()
}
else { None }
}
} | sender: sender,
receiver: receiver,
phantom: PhantomData,
buffer: Vec::new(),
stack: Default::default(), | random_line_split |
load_balancer.rs | /*
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::convert::TryFrom;
use std::sync::atomic::{AtomicUsize, Ordering};
use rand::{thread_rng, Rng};
use serde::{Deserialize, Serialize};
use crate::{config::UpstreamEndpoints, filters::prelude::*, map_proto_enum};
crate::include_proto!("quilkin.extensions.filters.load_balancer.v1alpha1");
use self::quilkin::extensions::filters::load_balancer::v1alpha1::{
load_balancer::Policy as ProtoPolicy, LoadBalancer as ProtoConfig,
};
/// Policy represents how a [`LoadBalancerFilter`] distributes
/// packets across endpoints.
#[derive(Debug, Deserialize, Serialize, Eq, PartialEq)]
pub enum Policy {
/// Send packets to endpoints in turns.
#[serde(rename = "ROUND_ROBIN")]
RoundRobin,
/// Send packets to endpoints chosen at random.
#[serde(rename = "RANDOM")]
Random,
}
impl Default for Policy {
fn default() -> Self {
Policy::RoundRobin
}
}
/// Config represents configuration for a [`LoadBalancerFilter`].
#[derive(Serialize, Deserialize, Debug, PartialEq)]
struct Config {
#[serde(default)]
policy: Policy,
}
impl TryFrom<ProtoConfig> for Config {
type Error = ConvertProtoConfigError;
fn try_from(p: ProtoConfig) -> Result<Self, Self::Error> {
let policy = p
.policy
.map(|policy| {
map_proto_enum!(
value = policy.value,
field = "policy",
proto_enum_type = ProtoPolicy,
target_enum_type = Policy,
variants = [RoundRobin, Random]
)
})
.transpose()?
.unwrap_or_else(Policy::default);
Ok(Self { policy })
}
}
/// EndpointChooser chooses from a set of endpoints that a proxy is connected to.
trait EndpointChooser: Send + Sync {
/// choose_endpoints asks for the next endpoint(s) to use.
fn choose_endpoints(&self, endpoints: &mut UpstreamEndpoints);
}
/// RoundRobinEndpointChooser chooses endpoints in round-robin order.
pub struct RoundRobinEndpointChooser {
next_endpoint: AtomicUsize,
}
impl RoundRobinEndpointChooser {
fn new() -> Self {
RoundRobinEndpointChooser {
next_endpoint: AtomicUsize::new(0),
}
}
}
impl EndpointChooser for RoundRobinEndpointChooser {
fn choose_endpoints(&self, endpoints: &mut UpstreamEndpoints) {
let count = self.next_endpoint.fetch_add(1, Ordering::Relaxed);
// Note: Unwrap is safe here because the index is guaranteed to be in range.
let num_endpoints = endpoints.size();
endpoints.keep(count % num_endpoints)
.expect("BUG: unwrap should have been safe because index into endpoints list should be in range");
}
}
/// RandomEndpointChooser chooses endpoints in random order.
pub struct RandomEndpointChooser;
impl EndpointChooser for RandomEndpointChooser {
fn choose_endpoints(&self, endpoints: &mut UpstreamEndpoints) {
// Note: Unwrap is safe here because the index is guaranteed to be in range.
let idx = (&mut thread_rng()).gen_range(0..endpoints.size());
endpoints.keep(idx)
.expect("BUG: unwrap should have been safe because index into endpoints list should be in range");
}
}
/// Creates instances of LoadBalancerFilter.
#[derive(Default)]
pub struct LoadBalancerFilterFactory;
/// LoadBalancerFilter load balances packets over the upstream endpoints.
#[crate::filter("quilkin.extensions.filters.load_balancer.v1alpha1.LoadBalancer")]
struct LoadBalancerFilter {
endpoint_chooser: Box<dyn EndpointChooser>,
}
impl FilterFactory for LoadBalancerFilterFactory {
fn name(&self) -> &'static str {
LoadBalancerFilter::FILTER_NAME
}
fn create_filter(&self, args: CreateFilterArgs) -> Result<Box<dyn Filter>, Error> {
let config: Config = self
.require_config(args.config)?
.deserialize::<Config, ProtoConfig>(self.name())?;
let endpoint_chooser: Box<dyn EndpointChooser> = match config.policy {
Policy::RoundRobin => Box::new(RoundRobinEndpointChooser::new()),
Policy::Random => Box::new(RandomEndpointChooser),
};
Ok(Box::new(LoadBalancerFilter { endpoint_chooser }))
}
}
impl Filter for LoadBalancerFilter {
fn read(&self, mut ctx: ReadContext) -> Option<ReadResponse> {
self.endpoint_chooser.choose_endpoints(&mut ctx.endpoints);
Some(ctx.into())
}
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use std::convert::TryFrom;
use std::net::SocketAddr;
use super::quilkin::extensions::filters::load_balancer::v1alpha1::{
load_balancer::{Policy as ProtoPolicy, PolicyValue},
LoadBalancer as ProtoConfig,
};
use super::{Config, Policy};
use crate::cluster::Endpoint;
use crate::config::Endpoints;
use crate::filters::{
extensions::load_balancer::LoadBalancerFilterFactory, CreateFilterArgs, Filter,
FilterFactory, ReadContext,
};
use prometheus::Registry;
fn create_filter(config: &str) -> Box<dyn Filter> {
let factory = LoadBalancerFilterFactory;
factory
.create_filter(CreateFilterArgs::fixed(
Registry::default(),
Some(&serde_yaml::from_str(config).unwrap()),
))
.unwrap()
}
fn get_response_addresses(
filter: &dyn Filter,
input_addresses: &[SocketAddr],
) -> Vec<SocketAddr> {
filter
.read(ReadContext::new(
Endpoints::new(
input_addresses
.iter()
.map(|addr| Endpoint::from_address(*addr))
.collect(),
)
.unwrap()
.into(),
"127.0.0.1:8080".parse().unwrap(),
vec![],
))
.unwrap()
.endpoints
.iter()
.map(|ep| ep.address)
.collect::<Vec<_>>()
}
#[test]
fn convert_proto_config() {
let test_cases = vec![
(
"RandomPolicy",
ProtoConfig {
policy: Some(PolicyValue {
value: ProtoPolicy::Random as i32,
}),
},
Some(Config {
policy: Policy::Random,
}),
),
(
"RoundRobinPolicy",
ProtoConfig {
policy: Some(PolicyValue {
value: ProtoPolicy::RoundRobin as i32,
}),
},
Some(Config {
policy: Policy::RoundRobin,
}),
),
(
"should fail when invalid policy is provided",
ProtoConfig {
policy: Some(PolicyValue { value: 42 }),
},
None,
),
(
"should use correct default values",
ProtoConfig { policy: None },
Some(Config {
policy: Policy::default(),
}),
),
];
for (name, proto_config, expected) in test_cases {
let result = Config::try_from(proto_config);
assert_eq!(
result.is_err(),
expected.is_none(),
"{}: error expectation does not match",
name
);
if let Some(expected) = expected |
}
}
#[test]
fn round_robin_load_balancer_policy() {
let addresses = vec![
"127.0.0.1:8080".parse().unwrap(),
"127.0.0.2:8080".parse().unwrap(),
"127.0.0.3:8080".parse().unwrap(),
];
let yaml = "
policy: ROUND_ROBIN
";
let filter = create_filter(yaml);
// Check that we repeat the same addresses in sequence forever.
let expected_sequence = addresses.iter().map(|addr| vec![*addr]).collect::<Vec<_>>();
for _ in 0..10 {
assert_eq!(
expected_sequence,
(0..addresses.len())
.map(|_| get_response_addresses(filter.as_ref(), &addresses))
.collect::<Vec<_>>()
);
}
}
#[test]
fn random_load_balancer_policy() {
let addresses = vec![
"127.0.0.1:8080".parse().unwrap(),
"127.0.0.2:8080".parse().unwrap(),
"127.0.0.3:8080".parse().unwrap(),
];
let yaml = "
policy: RANDOM
";
let filter = create_filter(yaml);
// Run a few selection rounds through the addresses.
let mut result_sequences = vec![];
for _ in 0..10 {
let sequence = (0..addresses.len())
.map(|_| get_response_addresses(filter.as_ref(), &addresses))
.collect::<Vec<_>>();
result_sequences.push(sequence);
}
// Check that every address was chosen at least once.
assert_eq!(
addresses.into_iter().collect::<HashSet<_>>(),
result_sequences
.clone()
.into_iter()
.flatten()
.flatten()
.collect::<HashSet<_>>(),
);
// Check that there is at least one different sequence of addresses.
assert!(
&result_sequences[1..]
.iter()
.any(|seq| seq!= &result_sequences[0]),
"the same sequence of addresses were chosen for random load balancer"
);
}
}
| {
assert_eq!(expected, result.unwrap(), "{}", name);
} | conditional_block |
load_balancer.rs | /*
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::convert::TryFrom;
use std::sync::atomic::{AtomicUsize, Ordering};
use rand::{thread_rng, Rng};
use serde::{Deserialize, Serialize};
use crate::{config::UpstreamEndpoints, filters::prelude::*, map_proto_enum};
crate::include_proto!("quilkin.extensions.filters.load_balancer.v1alpha1");
use self::quilkin::extensions::filters::load_balancer::v1alpha1::{
load_balancer::Policy as ProtoPolicy, LoadBalancer as ProtoConfig,
};
/// Policy represents how a [`LoadBalancerFilter`] distributes
/// packets across endpoints.
#[derive(Debug, Deserialize, Serialize, Eq, PartialEq)]
pub enum Policy {
/// Send packets to endpoints in turns.
#[serde(rename = "ROUND_ROBIN")]
RoundRobin,
/// Send packets to endpoints chosen at random.
#[serde(rename = "RANDOM")]
Random,
}
impl Default for Policy {
fn default() -> Self {
Policy::RoundRobin
}
}
/// Config represents configuration for a [`LoadBalancerFilter`].
#[derive(Serialize, Deserialize, Debug, PartialEq)]
struct | {
#[serde(default)]
policy: Policy,
}
impl TryFrom<ProtoConfig> for Config {
type Error = ConvertProtoConfigError;
fn try_from(p: ProtoConfig) -> Result<Self, Self::Error> {
let policy = p
.policy
.map(|policy| {
map_proto_enum!(
value = policy.value,
field = "policy",
proto_enum_type = ProtoPolicy,
target_enum_type = Policy,
variants = [RoundRobin, Random]
)
})
.transpose()?
.unwrap_or_else(Policy::default);
Ok(Self { policy })
}
}
/// EndpointChooser chooses from a set of endpoints that a proxy is connected to.
trait EndpointChooser: Send + Sync {
/// choose_endpoints asks for the next endpoint(s) to use.
fn choose_endpoints(&self, endpoints: &mut UpstreamEndpoints);
}
/// RoundRobinEndpointChooser chooses endpoints in round-robin order.
pub struct RoundRobinEndpointChooser {
next_endpoint: AtomicUsize,
}
impl RoundRobinEndpointChooser {
fn new() -> Self {
RoundRobinEndpointChooser {
next_endpoint: AtomicUsize::new(0),
}
}
}
impl EndpointChooser for RoundRobinEndpointChooser {
fn choose_endpoints(&self, endpoints: &mut UpstreamEndpoints) {
let count = self.next_endpoint.fetch_add(1, Ordering::Relaxed);
// Note: Unwrap is safe here because the index is guaranteed to be in range.
let num_endpoints = endpoints.size();
endpoints.keep(count % num_endpoints)
.expect("BUG: unwrap should have been safe because index into endpoints list should be in range");
}
}
/// RandomEndpointChooser chooses endpoints in random order.
pub struct RandomEndpointChooser;
impl EndpointChooser for RandomEndpointChooser {
fn choose_endpoints(&self, endpoints: &mut UpstreamEndpoints) {
// Note: Unwrap is safe here because the index is guaranteed to be in range.
let idx = (&mut thread_rng()).gen_range(0..endpoints.size());
endpoints.keep(idx)
.expect("BUG: unwrap should have been safe because index into endpoints list should be in range");
}
}
/// Creates instances of LoadBalancerFilter.
#[derive(Default)]
pub struct LoadBalancerFilterFactory;
/// LoadBalancerFilter load balances packets over the upstream endpoints.
#[crate::filter("quilkin.extensions.filters.load_balancer.v1alpha1.LoadBalancer")]
struct LoadBalancerFilter {
endpoint_chooser: Box<dyn EndpointChooser>,
}
impl FilterFactory for LoadBalancerFilterFactory {
fn name(&self) -> &'static str {
LoadBalancerFilter::FILTER_NAME
}
fn create_filter(&self, args: CreateFilterArgs) -> Result<Box<dyn Filter>, Error> {
let config: Config = self
.require_config(args.config)?
.deserialize::<Config, ProtoConfig>(self.name())?;
let endpoint_chooser: Box<dyn EndpointChooser> = match config.policy {
Policy::RoundRobin => Box::new(RoundRobinEndpointChooser::new()),
Policy::Random => Box::new(RandomEndpointChooser),
};
Ok(Box::new(LoadBalancerFilter { endpoint_chooser }))
}
}
impl Filter for LoadBalancerFilter {
fn read(&self, mut ctx: ReadContext) -> Option<ReadResponse> {
self.endpoint_chooser.choose_endpoints(&mut ctx.endpoints);
Some(ctx.into())
}
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use std::convert::TryFrom;
use std::net::SocketAddr;
use super::quilkin::extensions::filters::load_balancer::v1alpha1::{
load_balancer::{Policy as ProtoPolicy, PolicyValue},
LoadBalancer as ProtoConfig,
};
use super::{Config, Policy};
use crate::cluster::Endpoint;
use crate::config::Endpoints;
use crate::filters::{
extensions::load_balancer::LoadBalancerFilterFactory, CreateFilterArgs, Filter,
FilterFactory, ReadContext,
};
use prometheus::Registry;
fn create_filter(config: &str) -> Box<dyn Filter> {
let factory = LoadBalancerFilterFactory;
factory
.create_filter(CreateFilterArgs::fixed(
Registry::default(),
Some(&serde_yaml::from_str(config).unwrap()),
))
.unwrap()
}
fn get_response_addresses(
filter: &dyn Filter,
input_addresses: &[SocketAddr],
) -> Vec<SocketAddr> {
filter
.read(ReadContext::new(
Endpoints::new(
input_addresses
.iter()
.map(|addr| Endpoint::from_address(*addr))
.collect(),
)
.unwrap()
.into(),
"127.0.0.1:8080".parse().unwrap(),
vec![],
))
.unwrap()
.endpoints
.iter()
.map(|ep| ep.address)
.collect::<Vec<_>>()
}
#[test]
fn convert_proto_config() {
let test_cases = vec![
(
"RandomPolicy",
ProtoConfig {
policy: Some(PolicyValue {
value: ProtoPolicy::Random as i32,
}),
},
Some(Config {
policy: Policy::Random,
}),
),
(
"RoundRobinPolicy",
ProtoConfig {
policy: Some(PolicyValue {
value: ProtoPolicy::RoundRobin as i32,
}),
},
Some(Config {
policy: Policy::RoundRobin,
}),
),
(
"should fail when invalid policy is provided",
ProtoConfig {
policy: Some(PolicyValue { value: 42 }),
},
None,
),
(
"should use correct default values",
ProtoConfig { policy: None },
Some(Config {
policy: Policy::default(),
}),
),
];
for (name, proto_config, expected) in test_cases {
let result = Config::try_from(proto_config);
assert_eq!(
result.is_err(),
expected.is_none(),
"{}: error expectation does not match",
name
);
if let Some(expected) = expected {
assert_eq!(expected, result.unwrap(), "{}", name);
}
}
}
#[test]
fn round_robin_load_balancer_policy() {
let addresses = vec![
"127.0.0.1:8080".parse().unwrap(),
"127.0.0.2:8080".parse().unwrap(),
"127.0.0.3:8080".parse().unwrap(),
];
let yaml = "
policy: ROUND_ROBIN
";
let filter = create_filter(yaml);
// Check that we repeat the same addresses in sequence forever.
let expected_sequence = addresses.iter().map(|addr| vec![*addr]).collect::<Vec<_>>();
for _ in 0..10 {
assert_eq!(
expected_sequence,
(0..addresses.len())
.map(|_| get_response_addresses(filter.as_ref(), &addresses))
.collect::<Vec<_>>()
);
}
}
#[test]
fn random_load_balancer_policy() {
let addresses = vec![
"127.0.0.1:8080".parse().unwrap(),
"127.0.0.2:8080".parse().unwrap(),
"127.0.0.3:8080".parse().unwrap(),
];
let yaml = "
policy: RANDOM
";
let filter = create_filter(yaml);
// Run a few selection rounds through the addresses.
let mut result_sequences = vec![];
for _ in 0..10 {
let sequence = (0..addresses.len())
.map(|_| get_response_addresses(filter.as_ref(), &addresses))
.collect::<Vec<_>>();
result_sequences.push(sequence);
}
// Check that every address was chosen at least once.
assert_eq!(
addresses.into_iter().collect::<HashSet<_>>(),
result_sequences
.clone()
.into_iter()
.flatten()
.flatten()
.collect::<HashSet<_>>(),
);
// Check that there is at least one different sequence of addresses.
assert!(
&result_sequences[1..]
.iter()
.any(|seq| seq!= &result_sequences[0]),
"the same sequence of addresses were chosen for random load balancer"
);
}
}
| Config | identifier_name |
load_balancer.rs | /*
* Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::convert::TryFrom;
use std::sync::atomic::{AtomicUsize, Ordering};
use rand::{thread_rng, Rng};
use serde::{Deserialize, Serialize};
use crate::{config::UpstreamEndpoints, filters::prelude::*, map_proto_enum};
crate::include_proto!("quilkin.extensions.filters.load_balancer.v1alpha1");
use self::quilkin::extensions::filters::load_balancer::v1alpha1::{
load_balancer::Policy as ProtoPolicy, LoadBalancer as ProtoConfig,
};
/// Policy represents how a [`LoadBalancerFilter`] distributes
/// packets across endpoints.
#[derive(Debug, Deserialize, Serialize, Eq, PartialEq)]
pub enum Policy {
/// Send packets to endpoints in turns.
#[serde(rename = "ROUND_ROBIN")]
RoundRobin,
/// Send packets to endpoints chosen at random.
#[serde(rename = "RANDOM")]
Random,
}
impl Default for Policy {
fn default() -> Self {
Policy::RoundRobin
}
}
/// Config represents configuration for a [`LoadBalancerFilter`].
#[derive(Serialize, Deserialize, Debug, PartialEq)]
struct Config {
#[serde(default)]
policy: Policy,
}
impl TryFrom<ProtoConfig> for Config {
type Error = ConvertProtoConfigError;
fn try_from(p: ProtoConfig) -> Result<Self, Self::Error> {
let policy = p
.policy
.map(|policy| {
map_proto_enum!(
value = policy.value,
field = "policy",
proto_enum_type = ProtoPolicy,
target_enum_type = Policy,
variants = [RoundRobin, Random]
)
})
.transpose()?
.unwrap_or_else(Policy::default);
Ok(Self { policy })
}
}
/// EndpointChooser chooses from a set of endpoints that a proxy is connected to.
trait EndpointChooser: Send + Sync {
/// choose_endpoints asks for the next endpoint(s) to use.
fn choose_endpoints(&self, endpoints: &mut UpstreamEndpoints);
}
/// RoundRobinEndpointChooser chooses endpoints in round-robin order.
pub struct RoundRobinEndpointChooser {
next_endpoint: AtomicUsize,
}
impl RoundRobinEndpointChooser {
fn new() -> Self {
RoundRobinEndpointChooser {
next_endpoint: AtomicUsize::new(0),
}
}
}
impl EndpointChooser for RoundRobinEndpointChooser {
fn choose_endpoints(&self, endpoints: &mut UpstreamEndpoints) {
let count = self.next_endpoint.fetch_add(1, Ordering::Relaxed);
// Note: Unwrap is safe here because the index is guaranteed to be in range.
let num_endpoints = endpoints.size();
endpoints.keep(count % num_endpoints)
.expect("BUG: unwrap should have been safe because index into endpoints list should be in range");
}
}
/// RandomEndpointChooser chooses endpoints in random order.
pub struct RandomEndpointChooser;
impl EndpointChooser for RandomEndpointChooser {
fn choose_endpoints(&self, endpoints: &mut UpstreamEndpoints) {
// Note: Unwrap is safe here because the index is guaranteed to be in range.
let idx = (&mut thread_rng()).gen_range(0..endpoints.size());
endpoints.keep(idx)
.expect("BUG: unwrap should have been safe because index into endpoints list should be in range");
}
}
/// Creates instances of LoadBalancerFilter.
#[derive(Default)]
pub struct LoadBalancerFilterFactory;
/// LoadBalancerFilter load balances packets over the upstream endpoints.
#[crate::filter("quilkin.extensions.filters.load_balancer.v1alpha1.LoadBalancer")]
struct LoadBalancerFilter {
endpoint_chooser: Box<dyn EndpointChooser>,
}
impl FilterFactory for LoadBalancerFilterFactory {
fn name(&self) -> &'static str {
LoadBalancerFilter::FILTER_NAME
}
fn create_filter(&self, args: CreateFilterArgs) -> Result<Box<dyn Filter>, Error> {
let config: Config = self
.require_config(args.config)?
.deserialize::<Config, ProtoConfig>(self.name())?;
let endpoint_chooser: Box<dyn EndpointChooser> = match config.policy {
Policy::RoundRobin => Box::new(RoundRobinEndpointChooser::new()),
Policy::Random => Box::new(RandomEndpointChooser),
};
Ok(Box::new(LoadBalancerFilter { endpoint_chooser }))
}
}
impl Filter for LoadBalancerFilter {
fn read(&self, mut ctx: ReadContext) -> Option<ReadResponse> {
self.endpoint_chooser.choose_endpoints(&mut ctx.endpoints);
Some(ctx.into())
}
}
#[cfg(test)]
mod tests {
use std::collections::HashSet;
use std::convert::TryFrom;
use std::net::SocketAddr;
use super::quilkin::extensions::filters::load_balancer::v1alpha1::{
load_balancer::{Policy as ProtoPolicy, PolicyValue},
LoadBalancer as ProtoConfig,
};
use super::{Config, Policy};
use crate::cluster::Endpoint;
use crate::config::Endpoints;
use crate::filters::{
extensions::load_balancer::LoadBalancerFilterFactory, CreateFilterArgs, Filter,
FilterFactory, ReadContext,
};
use prometheus::Registry;
fn create_filter(config: &str) -> Box<dyn Filter> {
let factory = LoadBalancerFilterFactory;
factory
.create_filter(CreateFilterArgs::fixed(
Registry::default(),
Some(&serde_yaml::from_str(config).unwrap()),
))
.unwrap()
}
fn get_response_addresses(
filter: &dyn Filter,
input_addresses: &[SocketAddr],
) -> Vec<SocketAddr> {
filter
.read(ReadContext::new(
Endpoints::new(
input_addresses
.iter()
.map(|addr| Endpoint::from_address(*addr))
.collect(),
)
.unwrap()
.into(),
"127.0.0.1:8080".parse().unwrap(),
vec![],
))
.unwrap()
.endpoints
.iter()
.map(|ep| ep.address)
.collect::<Vec<_>>()
}
#[test] | policy: Some(PolicyValue {
value: ProtoPolicy::Random as i32,
}),
},
Some(Config {
policy: Policy::Random,
}),
),
(
"RoundRobinPolicy",
ProtoConfig {
policy: Some(PolicyValue {
value: ProtoPolicy::RoundRobin as i32,
}),
},
Some(Config {
policy: Policy::RoundRobin,
}),
),
(
"should fail when invalid policy is provided",
ProtoConfig {
policy: Some(PolicyValue { value: 42 }),
},
None,
),
(
"should use correct default values",
ProtoConfig { policy: None },
Some(Config {
policy: Policy::default(),
}),
),
];
for (name, proto_config, expected) in test_cases {
let result = Config::try_from(proto_config);
assert_eq!(
result.is_err(),
expected.is_none(),
"{}: error expectation does not match",
name
);
if let Some(expected) = expected {
assert_eq!(expected, result.unwrap(), "{}", name);
}
}
}
#[test]
fn round_robin_load_balancer_policy() {
let addresses = vec![
"127.0.0.1:8080".parse().unwrap(),
"127.0.0.2:8080".parse().unwrap(),
"127.0.0.3:8080".parse().unwrap(),
];
let yaml = "
policy: ROUND_ROBIN
";
let filter = create_filter(yaml);
// Check that we repeat the same addresses in sequence forever.
let expected_sequence = addresses.iter().map(|addr| vec![*addr]).collect::<Vec<_>>();
for _ in 0..10 {
assert_eq!(
expected_sequence,
(0..addresses.len())
.map(|_| get_response_addresses(filter.as_ref(), &addresses))
.collect::<Vec<_>>()
);
}
}
#[test]
fn random_load_balancer_policy() {
let addresses = vec![
"127.0.0.1:8080".parse().unwrap(),
"127.0.0.2:8080".parse().unwrap(),
"127.0.0.3:8080".parse().unwrap(),
];
let yaml = "
policy: RANDOM
";
let filter = create_filter(yaml);
// Run a few selection rounds through the addresses.
let mut result_sequences = vec![];
for _ in 0..10 {
let sequence = (0..addresses.len())
.map(|_| get_response_addresses(filter.as_ref(), &addresses))
.collect::<Vec<_>>();
result_sequences.push(sequence);
}
// Check that every address was chosen at least once.
assert_eq!(
addresses.into_iter().collect::<HashSet<_>>(),
result_sequences
.clone()
.into_iter()
.flatten()
.flatten()
.collect::<HashSet<_>>(),
);
// Check that there is at least one different sequence of addresses.
assert!(
&result_sequences[1..]
.iter()
.any(|seq| seq!= &result_sequences[0]),
"the same sequence of addresses were chosen for random load balancer"
);
}
} | fn convert_proto_config() {
let test_cases = vec![
(
"RandomPolicy",
ProtoConfig { | random_line_split |
forall.rs | use itertools::Itertools;
use std::collections::HashMap;
use std::collections::HashSet;
use crate::Line;
use crate::Constraint;
use crate::Problem;
use indicatif::ProgressStyle;
use indicatif::ProgressBar;
use chrono::prelude::*;
struct Comb{
max : Vec<usize>,
state : Vec<usize>,
first : bool
}
impl Comb {
fn new(n : usize, max : Vec<usize>) -> Self {
let mut state = vec![0;max.len()];
let mut res = n;
let mut i = 0;
while res > 0 {
let cur = std::cmp::min(max[i],res);
state[i] = cur;
res -= cur;
i += 1;
}
Comb {
max, state, first:true
}
}
fn transform(&mut self, n : usize, max : impl Iterator<Item=usize>) {
let mut i = 0;
for x in max {
self.max[i] = x;
i += 1;
}
assert!(i == self.max.len());
let mut res = n;
let mut i = 0;
while res > 0 {
let cur = std::cmp::min(self.max[i],res);
self.state[i] = cur;
res -= cur;
i += 1;
}
for j in i..self.state.len() {
self.state[j] = 0;
}
self.first = true;
}
fn next(&mut self) -> Option<&Vec<usize>> {
if self.first {
self.first = false;
Some(&self.state)
}else {
let v = &mut self.state;
let m = &mut self.max;
let mut i = 0;
loop {
if i == v.len()-1 { | return None;
}
if v[i] > 0 {
v[i+1] += 1;
v[i] -= 1;
if v[i+1] <= m[i+1] {
break;
}
}
i += 1;
}
let mut res = v[0..=i].iter().sum();
let mut j = 0;
while res > 0 {
let cur = std::cmp::min(m[j],res);
v[j] = cur;
res -= cur;
j += 1;
}
for k in j..=i {
v[k] = 0;
}
return Some(&self.state);
}
}
}
struct Matches {
state : Vec<Comb>,
first : bool,
v1 : Vec<usize>
}
impl Matches {
fn new(v1 : Vec<usize>, mut v2 : Vec<usize>) -> Self {
let mut s = vec![];
for &x in &v1 {
let mut c = Comb::new(x,v2.clone());
c.next();
for i in 0..v2.len() {
v2[i] -= c.state[i];
}
s.push(c);
}
Self {
v1, state : s, first : true
}
}
fn next(&mut self) -> Option<&Vec<Comb>> {
if self.first {
self.first = false;
Some(&self.state)
}else {
for i in (0..self.state.len()).rev() {
if self.state[i].next()!= None {
for j in i+1..self.state.len() {
let split = self.state.split_at_mut(j);
let p = &split.0[j-1];
let p2 = &mut split.1[0];
let pmax = &p.max;
let ps = &p.state;
let n = self.v1[j];
p2.transform(n,pmax.iter().zip(ps.iter()).map(|(m,x)|m-x));
//let v : Vec<_> = pmax.iter().zip(ps.iter()).map(|(m,x)|m-x).collect();
//self.state[j] = Comb::new(n,v);
p2.next();
}
return Some(&self.state);
}
}
None
}
}
}
fn count_map<BigNum>(v : &[BigNum]) -> HashMap<BigNum,usize> where BigNum : crate::bignum::BigNum{
let mut h = HashMap::new();
for n in v {
*h.entry(n.clone()).or_default() += 1;
}
h
}
#[inline(never)]
fn intersections<BigNum>(uni : BigNum, c1 : &[(BigNum,usize)], c2 : &[(BigNum,usize)], delta : usize, bits : usize) -> Vec<Line<BigNum>> where BigNum : crate::bignum::BigNum {
let v1 : Vec<_> = c1.iter().map(|(_,c)|*c).collect();
let v2 : Vec<_> = c2.iter().map(|(_,c)|*c).collect();
let mut m = Matches::new(v1,v2);
let mut r = vec![];
let mut oldbad : Option<(usize,usize,usize,usize)> = None;
'outer: while let Some(x) = m.next() {
if let Some((i1,i2,j1,j2)) = oldbad {
if x[i1].state[j1]!= 0 && x[i2].state[j2]!= 0 {
continue 'outer;
}
}
for i1 in 0..c1.len() {
for j1 in 0..c2.len() {
if x[i1].state[j1]!= 0 {
for i2 in i1+1..c1.len() {
for j2 in 0..c2.len() {
if x[i2].state[j2]!= 0 {
let u1 = c1[i1].0.clone() & c2[j1].0.clone();
let u2 = c1[i2].0.clone() & c2[j2].0.clone();
let u3 = c1[i1].0.clone() & c2[j2].0.clone();
let u4 = c1[i2].0.clone() & c2[j1].0.clone();
if (u4.is_superset(&u1) && u3.is_superset(&u2) && (u1!= u4 || u2!= u3)) || (u3.is_superset(&u1) && u4.is_superset(&u2) && (u1!= u3 || u2!= u4)) {
oldbad = Some((i1,i2,j1,j2));
continue 'outer;
}
}
}
}
}
}
}
let mut groups = Vec::with_capacity(delta);
groups.push(uni.clone());
for (i,(ga,_)) in c1.iter().enumerate() {
for (j,(gb,_)) in c2.iter().enumerate() {
for _ in 0..x[i].state[j] {
groups.push(ga.clone() & gb.clone());
}
}
}
if!groups.contains(&BigNum::zero()) {
r.push(Line::from_groups(delta, bits, groups.into_iter()).sorted());
}
}
r
}
#[inline(never)]
fn perm_includes<BigNum>(line : &Line<BigNum>, other : &Line<BigNum>) -> bool where BigNum : crate::bignum::BigNum {
let g1 : Vec<_> = line.groups().collect();
let g2 : Vec<_> = other.groups().collect();
let d = g1.len();
let mut g = contest_algorithms::graph::flow::FlowGraph::new(2*d+2,d*d);
for i in 1..=d {
g.add_edge(0, i, 1, 0, 0);
}
for i in d+1..=2*d {
g.add_edge(i, 2*d+1, 1, 0, 0);
}
for i in 0..d {
for j in 0..d {
if g1[i].is_superset(&g2[j]) {
g.add_edge(1+i, 1+d+j, 1, 0, 0);
}
}
}
g.dinic(0, 2*d+1).0 == d as i64
}
#[inline(never)]
fn add_reduce_maximal<BigNum>(lines : &mut Vec<Line<BigNum>>, newline : Line<BigNum>) where BigNum : crate::bignum::BigNum {
let l1 = lines.len();
lines.retain(|oldline|!perm_includes(&newline, oldline));
let l2 = lines.len();
if l1!= l2 || lines.iter().all(|oldline|!perm_includes(oldline,&newline)) {
lines.push(newline);
}
}
#[inline(never)]
fn find_good_unions<BigNum>(u1 : &[BigNum], u2 : &[BigNum]) -> HashMap<BigNum,Vec<(BigNum,BigNum)>> where BigNum : crate::bignum::BigNum {
let mut unions = HashMap::new();
for x in u1.iter() {
for y in u2.iter() {
if x.is_superset(y) || y.is_superset(x) {
continue;
}
let uni = x.clone() | y.clone();
let unis : &mut Vec<(BigNum,BigNum)> = unions.entry(uni).or_insert(vec![]);
let len = unis.len();
unis.retain(|(xc,yc)|!(xc.is_superset(x) && yc.is_superset(y)) );
if unis.len()!= len || unis.iter().all(|(xc,yc)|!(x.is_superset(xc) && y.is_superset(yc)) ) {
unis.push((x.clone(),y.clone()));
}
}
}
unions
}
pub fn forall<BigNum>(nc : &Constraint<BigNum>, problem : &Problem<BigNum>) -> Constraint<BigNum> where BigNum : crate::bignum::BigNum {
let mut nc = nc.clone();
let maplt = problem.map_label_text();
let set_to_string = |s:&BigNum|{
let r = s.one_bits().map(|elem|&maplt[&elem]).join("");
if r == "" {
String::from("_")
}else{
r
}
};
let make_right_closed = |g : BigNum|{g.clone()|problem.successors(g.one_bits().next().unwrap(),false)};
for line in &mut nc.lines {
*line = line.edited(|g|{make_right_closed(g)}).sorted();
}
let mut seen : HashSet<_> = nc.lines.iter().cloned().collect();
let lines = std::mem::replace(&mut nc.lines, vec![]);
for line in lines {
seen.insert(line.clone());
add_reduce_maximal(&mut nc.lines, line);
}
{
println!("\n--- Constraints ---");
for line in &nc.lines {
let s1 = line.groups().map(|x|String::from("(")+&set_to_string(&x)+")").join(" ");
println!("{}",s1);
}
}
let mut pairs = HashSet::new();
loop {
let mut newc = nc.clone();
let size = nc.lines.len();
let lines = &nc.lines;
let mut without_one = vec![];
for line in &nc.lines {
let mut h = HashMap::new();
let g : Vec<_> = line.groups().collect();
for i in 0..g.len() {
if!h.contains_key(&g[i]){
let v : Vec<_> = [&g[0..i],&g[i+1..g.len()]].concat();
let v : Vec<_> = count_map(&v).into_iter().sorted().collect();
h.insert(g[i].clone(),v);
}
}
without_one.push(h);
}
let mut line_groups = vec![];
for line in lines {
line_groups.push(line.groups().unique().collect::<Vec<_>>());
}
#[cfg(not(target_arch = "wasm32"))]
let pb = ProgressBar::new((size*size) as u64);
#[cfg(not(target_arch = "wasm32"))]
pb.set_style(ProgressStyle::default_bar()
.template("\n[elapsed: {elapsed_precise}] [{wide_bar:.green/red}] [eta: {eta_precise}]\n{msg}")
/*.progress_chars("#>-")*/);
for i in 0..lines.len() {
#[cfg(not(target_arch = "wasm32"))]
{
pb.set_position((i*i) as u64);
let est = pb.eta().as_secs();
let dest = chrono::Duration::seconds(est as i64);
let whenfinish = (Local::now() + dest).to_rfc2822();
pb.set_message(format!("[i: {}/{}] [new lines: {}] [eta: {}]",i,size,newc.lines.len(),whenfinish));
}
let mut candidates2 = vec![];
for j in 0..=i {
let mut candidates = vec![];
let pair = (lines[i].clone(),lines[j].clone());
if pairs.contains(&pair) || pairs.contains(&(pair.1.clone(),pair.0.clone())) {
continue;
}
pairs.insert(pair);
let u1 = &line_groups[i];
let u2 = &line_groups[j];
let unions = find_good_unions(u1,u2);
for (uni,v) in unions {
for (x,y) in v {
let c1 = &without_one[i][&x];
let c2 = &without_one[j][&y];
let lines = intersections(uni.clone(),c1,c2,nc.delta, nc.bits);
for newline in lines {
if!seen.contains(&newline){
seen.insert(newline.clone());
add_reduce_maximal(&mut candidates, newline);
}
}
}
}
for newline in candidates {
add_reduce_maximal(&mut candidates2, newline);
}
}
for newline in candidates2 {
add_reduce_maximal(&mut newc.lines, newline);
}
}
#[cfg(not(target_arch = "wasm32"))]
pb.finish_and_clear();
if newc == nc { break; }
println!("new iteration...");
nc = newc;
{
println!("\n--- Constraints ---");
for line in &nc.lines {
let s1 = line.groups().map(|x|String::from("(")+&set_to_string(&x)+")").join(" ");
println!("{}",s1);
}
}
}
nc
} | random_line_split |
|
forall.rs |
use itertools::Itertools;
use std::collections::HashMap;
use std::collections::HashSet;
use crate::Line;
use crate::Constraint;
use crate::Problem;
use indicatif::ProgressStyle;
use indicatif::ProgressBar;
use chrono::prelude::*;
struct Comb{
max : Vec<usize>,
state : Vec<usize>,
first : bool
}
impl Comb {
fn new(n : usize, max : Vec<usize>) -> Self {
let mut state = vec![0;max.len()];
let mut res = n;
let mut i = 0;
while res > 0 {
let cur = std::cmp::min(max[i],res);
state[i] = cur;
res -= cur;
i += 1;
}
Comb {
max, state, first:true
}
}
fn transform(&mut self, n : usize, max : impl Iterator<Item=usize>) {
let mut i = 0;
for x in max {
self.max[i] = x;
i += 1;
}
assert!(i == self.max.len());
let mut res = n;
let mut i = 0;
while res > 0 {
let cur = std::cmp::min(self.max[i],res);
self.state[i] = cur;
res -= cur;
i += 1;
}
for j in i..self.state.len() {
self.state[j] = 0;
}
self.first = true;
}
fn next(&mut self) -> Option<&Vec<usize>> {
if self.first {
self.first = false;
Some(&self.state)
}else {
let v = &mut self.state;
let m = &mut self.max;
let mut i = 0;
loop {
if i == v.len()-1 {
return None;
}
if v[i] > 0 {
v[i+1] += 1;
v[i] -= 1;
if v[i+1] <= m[i+1] {
break;
}
}
i += 1;
}
let mut res = v[0..=i].iter().sum();
let mut j = 0;
while res > 0 {
let cur = std::cmp::min(m[j],res);
v[j] = cur;
res -= cur;
j += 1;
}
for k in j..=i {
v[k] = 0;
}
return Some(&self.state);
}
}
}
struct Matches {
state : Vec<Comb>,
first : bool,
v1 : Vec<usize>
}
impl Matches {
fn new(v1 : Vec<usize>, mut v2 : Vec<usize>) -> Self {
let mut s = vec![];
for &x in &v1 {
let mut c = Comb::new(x,v2.clone());
c.next();
for i in 0..v2.len() {
v2[i] -= c.state[i];
}
s.push(c);
}
Self {
v1, state : s, first : true
}
}
fn next(&mut self) -> Option<&Vec<Comb>> {
if self.first {
self.first = false;
Some(&self.state)
}else {
for i in (0..self.state.len()).rev() {
if self.state[i].next()!= None {
for j in i+1..self.state.len() {
let split = self.state.split_at_mut(j);
let p = &split.0[j-1];
let p2 = &mut split.1[0];
let pmax = &p.max;
let ps = &p.state;
let n = self.v1[j];
p2.transform(n,pmax.iter().zip(ps.iter()).map(|(m,x)|m-x));
//let v : Vec<_> = pmax.iter().zip(ps.iter()).map(|(m,x)|m-x).collect();
//self.state[j] = Comb::new(n,v);
p2.next();
}
return Some(&self.state);
}
}
None
}
}
}
fn count_map<BigNum>(v : &[BigNum]) -> HashMap<BigNum,usize> where BigNum : crate::bignum::BigNum{
let mut h = HashMap::new();
for n in v {
*h.entry(n.clone()).or_default() += 1;
}
h
}
#[inline(never)]
fn intersections<BigNum>(uni : BigNum, c1 : &[(BigNum,usize)], c2 : &[(BigNum,usize)], delta : usize, bits : usize) -> Vec<Line<BigNum>> where BigNum : crate::bignum::BigNum {
let v1 : Vec<_> = c1.iter().map(|(_,c)|*c).collect();
let v2 : Vec<_> = c2.iter().map(|(_,c)|*c).collect();
let mut m = Matches::new(v1,v2);
let mut r = vec![];
let mut oldbad : Option<(usize,usize,usize,usize)> = None;
'outer: while let Some(x) = m.next() {
if let Some((i1,i2,j1,j2)) = oldbad {
if x[i1].state[j1]!= 0 && x[i2].state[j2]!= 0 {
continue 'outer;
}
}
for i1 in 0..c1.len() {
for j1 in 0..c2.len() {
if x[i1].state[j1]!= 0 {
for i2 in i1+1..c1.len() {
for j2 in 0..c2.len() {
if x[i2].state[j2]!= 0 |
}
}
}
}
}
let mut groups = Vec::with_capacity(delta);
groups.push(uni.clone());
for (i,(ga,_)) in c1.iter().enumerate() {
for (j,(gb,_)) in c2.iter().enumerate() {
for _ in 0..x[i].state[j] {
groups.push(ga.clone() & gb.clone());
}
}
}
if!groups.contains(&BigNum::zero()) {
r.push(Line::from_groups(delta, bits, groups.into_iter()).sorted());
}
}
r
}
#[inline(never)]
fn perm_includes<BigNum>(line : &Line<BigNum>, other : &Line<BigNum>) -> bool where BigNum : crate::bignum::BigNum {
let g1 : Vec<_> = line.groups().collect();
let g2 : Vec<_> = other.groups().collect();
let d = g1.len();
let mut g = contest_algorithms::graph::flow::FlowGraph::new(2*d+2,d*d);
for i in 1..=d {
g.add_edge(0, i, 1, 0, 0);
}
for i in d+1..=2*d {
g.add_edge(i, 2*d+1, 1, 0, 0);
}
for i in 0..d {
for j in 0..d {
if g1[i].is_superset(&g2[j]) {
g.add_edge(1+i, 1+d+j, 1, 0, 0);
}
}
}
g.dinic(0, 2*d+1).0 == d as i64
}
#[inline(never)]
fn add_reduce_maximal<BigNum>(lines : &mut Vec<Line<BigNum>>, newline : Line<BigNum>) where BigNum : crate::bignum::BigNum {
let l1 = lines.len();
lines.retain(|oldline|!perm_includes(&newline, oldline));
let l2 = lines.len();
if l1!= l2 || lines.iter().all(|oldline|!perm_includes(oldline,&newline)) {
lines.push(newline);
}
}
#[inline(never)]
fn find_good_unions<BigNum>(u1 : &[BigNum], u2 : &[BigNum]) -> HashMap<BigNum,Vec<(BigNum,BigNum)>> where BigNum : crate::bignum::BigNum {
let mut unions = HashMap::new();
for x in u1.iter() {
for y in u2.iter() {
if x.is_superset(y) || y.is_superset(x) {
continue;
}
let uni = x.clone() | y.clone();
let unis : &mut Vec<(BigNum,BigNum)> = unions.entry(uni).or_insert(vec![]);
let len = unis.len();
unis.retain(|(xc,yc)|!(xc.is_superset(x) && yc.is_superset(y)) );
if unis.len()!= len || unis.iter().all(|(xc,yc)|!(x.is_superset(xc) && y.is_superset(yc)) ) {
unis.push((x.clone(),y.clone()));
}
}
}
unions
}
pub fn forall<BigNum>(nc : &Constraint<BigNum>, problem : &Problem<BigNum>) -> Constraint<BigNum> where BigNum : crate::bignum::BigNum {
let mut nc = nc.clone();
let maplt = problem.map_label_text();
let set_to_string = |s:&BigNum|{
let r = s.one_bits().map(|elem|&maplt[&elem]).join("");
if r == "" {
String::from("_")
}else{
r
}
};
let make_right_closed = |g : BigNum|{g.clone()|problem.successors(g.one_bits().next().unwrap(),false)};
for line in &mut nc.lines {
*line = line.edited(|g|{make_right_closed(g)}).sorted();
}
let mut seen : HashSet<_> = nc.lines.iter().cloned().collect();
let lines = std::mem::replace(&mut nc.lines, vec![]);
for line in lines {
seen.insert(line.clone());
add_reduce_maximal(&mut nc.lines, line);
}
{
println!("\n--- Constraints ---");
for line in &nc.lines {
let s1 = line.groups().map(|x|String::from("(")+&set_to_string(&x)+")").join(" ");
println!("{}",s1);
}
}
let mut pairs = HashSet::new();
loop {
let mut newc = nc.clone();
let size = nc.lines.len();
let lines = &nc.lines;
let mut without_one = vec![];
for line in &nc.lines {
let mut h = HashMap::new();
let g : Vec<_> = line.groups().collect();
for i in 0..g.len() {
if!h.contains_key(&g[i]){
let v : Vec<_> = [&g[0..i],&g[i+1..g.len()]].concat();
let v : Vec<_> = count_map(&v).into_iter().sorted().collect();
h.insert(g[i].clone(),v);
}
}
without_one.push(h);
}
let mut line_groups = vec![];
for line in lines {
line_groups.push(line.groups().unique().collect::<Vec<_>>());
}
#[cfg(not(target_arch = "wasm32"))]
let pb = ProgressBar::new((size*size) as u64);
#[cfg(not(target_arch = "wasm32"))]
pb.set_style(ProgressStyle::default_bar()
.template("\n[elapsed: {elapsed_precise}] [{wide_bar:.green/red}] [eta: {eta_precise}]\n{msg}")
/*.progress_chars("#>-")*/);
for i in 0..lines.len() {
#[cfg(not(target_arch = "wasm32"))]
{
pb.set_position((i*i) as u64);
let est = pb.eta().as_secs();
let dest = chrono::Duration::seconds(est as i64);
let whenfinish = (Local::now() + dest).to_rfc2822();
pb.set_message(format!("[i: {}/{}] [new lines: {}] [eta: {}]",i,size,newc.lines.len(),whenfinish));
}
let mut candidates2 = vec![];
for j in 0..=i {
let mut candidates = vec![];
let pair = (lines[i].clone(),lines[j].clone());
if pairs.contains(&pair) || pairs.contains(&(pair.1.clone(),pair.0.clone())) {
continue;
}
pairs.insert(pair);
let u1 = &line_groups[i];
let u2 = &line_groups[j];
let unions = find_good_unions(u1,u2);
for (uni,v) in unions {
for (x,y) in v {
let c1 = &without_one[i][&x];
let c2 = &without_one[j][&y];
let lines = intersections(uni.clone(),c1,c2,nc.delta, nc.bits);
for newline in lines {
if!seen.contains(&newline){
seen.insert(newline.clone());
add_reduce_maximal(&mut candidates, newline);
}
}
}
}
for newline in candidates {
add_reduce_maximal(&mut candidates2, newline);
}
}
for newline in candidates2 {
add_reduce_maximal(&mut newc.lines, newline);
}
}
#[cfg(not(target_arch = "wasm32"))]
pb.finish_and_clear();
if newc == nc { break; }
println!("new iteration...");
nc = newc;
{
println!("\n--- Constraints ---");
for line in &nc.lines {
let s1 = line.groups().map(|x|String::from("(")+&set_to_string(&x)+")").join(" ");
println!("{}",s1);
}
}
}
nc
}
| {
let u1 = c1[i1].0.clone() & c2[j1].0.clone();
let u2 = c1[i2].0.clone() & c2[j2].0.clone();
let u3 = c1[i1].0.clone() & c2[j2].0.clone();
let u4 = c1[i2].0.clone() & c2[j1].0.clone();
if (u4.is_superset(&u1) && u3.is_superset(&u2) && (u1 != u4 || u2 != u3)) || (u3.is_superset(&u1) && u4.is_superset(&u2) && (u1 != u3 || u2 != u4)) {
oldbad = Some((i1,i2,j1,j2));
continue 'outer;
}
} | conditional_block |
forall.rs |
use itertools::Itertools;
use std::collections::HashMap;
use std::collections::HashSet;
use crate::Line;
use crate::Constraint;
use crate::Problem;
use indicatif::ProgressStyle;
use indicatif::ProgressBar;
use chrono::prelude::*;
struct Comb{
max : Vec<usize>,
state : Vec<usize>,
first : bool
}
impl Comb {
fn new(n : usize, max : Vec<usize>) -> Self |
fn transform(&mut self, n : usize, max : impl Iterator<Item=usize>) {
let mut i = 0;
for x in max {
self.max[i] = x;
i += 1;
}
assert!(i == self.max.len());
let mut res = n;
let mut i = 0;
while res > 0 {
let cur = std::cmp::min(self.max[i],res);
self.state[i] = cur;
res -= cur;
i += 1;
}
for j in i..self.state.len() {
self.state[j] = 0;
}
self.first = true;
}
fn next(&mut self) -> Option<&Vec<usize>> {
if self.first {
self.first = false;
Some(&self.state)
}else {
let v = &mut self.state;
let m = &mut self.max;
let mut i = 0;
loop {
if i == v.len()-1 {
return None;
}
if v[i] > 0 {
v[i+1] += 1;
v[i] -= 1;
if v[i+1] <= m[i+1] {
break;
}
}
i += 1;
}
let mut res = v[0..=i].iter().sum();
let mut j = 0;
while res > 0 {
let cur = std::cmp::min(m[j],res);
v[j] = cur;
res -= cur;
j += 1;
}
for k in j..=i {
v[k] = 0;
}
return Some(&self.state);
}
}
}
struct Matches {
state : Vec<Comb>,
first : bool,
v1 : Vec<usize>
}
impl Matches {
fn new(v1 : Vec<usize>, mut v2 : Vec<usize>) -> Self {
let mut s = vec![];
for &x in &v1 {
let mut c = Comb::new(x,v2.clone());
c.next();
for i in 0..v2.len() {
v2[i] -= c.state[i];
}
s.push(c);
}
Self {
v1, state : s, first : true
}
}
fn next(&mut self) -> Option<&Vec<Comb>> {
if self.first {
self.first = false;
Some(&self.state)
}else {
for i in (0..self.state.len()).rev() {
if self.state[i].next()!= None {
for j in i+1..self.state.len() {
let split = self.state.split_at_mut(j);
let p = &split.0[j-1];
let p2 = &mut split.1[0];
let pmax = &p.max;
let ps = &p.state;
let n = self.v1[j];
p2.transform(n,pmax.iter().zip(ps.iter()).map(|(m,x)|m-x));
//let v : Vec<_> = pmax.iter().zip(ps.iter()).map(|(m,x)|m-x).collect();
//self.state[j] = Comb::new(n,v);
p2.next();
}
return Some(&self.state);
}
}
None
}
}
}
fn count_map<BigNum>(v : &[BigNum]) -> HashMap<BigNum,usize> where BigNum : crate::bignum::BigNum{
let mut h = HashMap::new();
for n in v {
*h.entry(n.clone()).or_default() += 1;
}
h
}
#[inline(never)]
fn intersections<BigNum>(uni : BigNum, c1 : &[(BigNum,usize)], c2 : &[(BigNum,usize)], delta : usize, bits : usize) -> Vec<Line<BigNum>> where BigNum : crate::bignum::BigNum {
let v1 : Vec<_> = c1.iter().map(|(_,c)|*c).collect();
let v2 : Vec<_> = c2.iter().map(|(_,c)|*c).collect();
let mut m = Matches::new(v1,v2);
let mut r = vec![];
let mut oldbad : Option<(usize,usize,usize,usize)> = None;
'outer: while let Some(x) = m.next() {
if let Some((i1,i2,j1,j2)) = oldbad {
if x[i1].state[j1]!= 0 && x[i2].state[j2]!= 0 {
continue 'outer;
}
}
for i1 in 0..c1.len() {
for j1 in 0..c2.len() {
if x[i1].state[j1]!= 0 {
for i2 in i1+1..c1.len() {
for j2 in 0..c2.len() {
if x[i2].state[j2]!= 0 {
let u1 = c1[i1].0.clone() & c2[j1].0.clone();
let u2 = c1[i2].0.clone() & c2[j2].0.clone();
let u3 = c1[i1].0.clone() & c2[j2].0.clone();
let u4 = c1[i2].0.clone() & c2[j1].0.clone();
if (u4.is_superset(&u1) && u3.is_superset(&u2) && (u1!= u4 || u2!= u3)) || (u3.is_superset(&u1) && u4.is_superset(&u2) && (u1!= u3 || u2!= u4)) {
oldbad = Some((i1,i2,j1,j2));
continue 'outer;
}
}
}
}
}
}
}
let mut groups = Vec::with_capacity(delta);
groups.push(uni.clone());
for (i,(ga,_)) in c1.iter().enumerate() {
for (j,(gb,_)) in c2.iter().enumerate() {
for _ in 0..x[i].state[j] {
groups.push(ga.clone() & gb.clone());
}
}
}
if!groups.contains(&BigNum::zero()) {
r.push(Line::from_groups(delta, bits, groups.into_iter()).sorted());
}
}
r
}
#[inline(never)]
fn perm_includes<BigNum>(line : &Line<BigNum>, other : &Line<BigNum>) -> bool where BigNum : crate::bignum::BigNum {
let g1 : Vec<_> = line.groups().collect();
let g2 : Vec<_> = other.groups().collect();
let d = g1.len();
let mut g = contest_algorithms::graph::flow::FlowGraph::new(2*d+2,d*d);
for i in 1..=d {
g.add_edge(0, i, 1, 0, 0);
}
for i in d+1..=2*d {
g.add_edge(i, 2*d+1, 1, 0, 0);
}
for i in 0..d {
for j in 0..d {
if g1[i].is_superset(&g2[j]) {
g.add_edge(1+i, 1+d+j, 1, 0, 0);
}
}
}
g.dinic(0, 2*d+1).0 == d as i64
}
#[inline(never)]
fn add_reduce_maximal<BigNum>(lines : &mut Vec<Line<BigNum>>, newline : Line<BigNum>) where BigNum : crate::bignum::BigNum {
let l1 = lines.len();
lines.retain(|oldline|!perm_includes(&newline, oldline));
let l2 = lines.len();
if l1!= l2 || lines.iter().all(|oldline|!perm_includes(oldline,&newline)) {
lines.push(newline);
}
}
#[inline(never)]
fn find_good_unions<BigNum>(u1 : &[BigNum], u2 : &[BigNum]) -> HashMap<BigNum,Vec<(BigNum,BigNum)>> where BigNum : crate::bignum::BigNum {
let mut unions = HashMap::new();
for x in u1.iter() {
for y in u2.iter() {
if x.is_superset(y) || y.is_superset(x) {
continue;
}
let uni = x.clone() | y.clone();
let unis : &mut Vec<(BigNum,BigNum)> = unions.entry(uni).or_insert(vec![]);
let len = unis.len();
unis.retain(|(xc,yc)|!(xc.is_superset(x) && yc.is_superset(y)) );
if unis.len()!= len || unis.iter().all(|(xc,yc)|!(x.is_superset(xc) && y.is_superset(yc)) ) {
unis.push((x.clone(),y.clone()));
}
}
}
unions
}
pub fn forall<BigNum>(nc : &Constraint<BigNum>, problem : &Problem<BigNum>) -> Constraint<BigNum> where BigNum : crate::bignum::BigNum {
let mut nc = nc.clone();
let maplt = problem.map_label_text();
let set_to_string = |s:&BigNum|{
let r = s.one_bits().map(|elem|&maplt[&elem]).join("");
if r == "" {
String::from("_")
}else{
r
}
};
let make_right_closed = |g : BigNum|{g.clone()|problem.successors(g.one_bits().next().unwrap(),false)};
for line in &mut nc.lines {
*line = line.edited(|g|{make_right_closed(g)}).sorted();
}
let mut seen : HashSet<_> = nc.lines.iter().cloned().collect();
let lines = std::mem::replace(&mut nc.lines, vec![]);
for line in lines {
seen.insert(line.clone());
add_reduce_maximal(&mut nc.lines, line);
}
{
println!("\n--- Constraints ---");
for line in &nc.lines {
let s1 = line.groups().map(|x|String::from("(")+&set_to_string(&x)+")").join(" ");
println!("{}",s1);
}
}
let mut pairs = HashSet::new();
loop {
let mut newc = nc.clone();
let size = nc.lines.len();
let lines = &nc.lines;
let mut without_one = vec![];
for line in &nc.lines {
let mut h = HashMap::new();
let g : Vec<_> = line.groups().collect();
for i in 0..g.len() {
if!h.contains_key(&g[i]){
let v : Vec<_> = [&g[0..i],&g[i+1..g.len()]].concat();
let v : Vec<_> = count_map(&v).into_iter().sorted().collect();
h.insert(g[i].clone(),v);
}
}
without_one.push(h);
}
let mut line_groups = vec![];
for line in lines {
line_groups.push(line.groups().unique().collect::<Vec<_>>());
}
#[cfg(not(target_arch = "wasm32"))]
let pb = ProgressBar::new((size*size) as u64);
#[cfg(not(target_arch = "wasm32"))]
pb.set_style(ProgressStyle::default_bar()
.template("\n[elapsed: {elapsed_precise}] [{wide_bar:.green/red}] [eta: {eta_precise}]\n{msg}")
/*.progress_chars("#>-")*/);
for i in 0..lines.len() {
#[cfg(not(target_arch = "wasm32"))]
{
pb.set_position((i*i) as u64);
let est = pb.eta().as_secs();
let dest = chrono::Duration::seconds(est as i64);
let whenfinish = (Local::now() + dest).to_rfc2822();
pb.set_message(format!("[i: {}/{}] [new lines: {}] [eta: {}]",i,size,newc.lines.len(),whenfinish));
}
let mut candidates2 = vec![];
for j in 0..=i {
let mut candidates = vec![];
let pair = (lines[i].clone(),lines[j].clone());
if pairs.contains(&pair) || pairs.contains(&(pair.1.clone(),pair.0.clone())) {
continue;
}
pairs.insert(pair);
let u1 = &line_groups[i];
let u2 = &line_groups[j];
let unions = find_good_unions(u1,u2);
for (uni,v) in unions {
for (x,y) in v {
let c1 = &without_one[i][&x];
let c2 = &without_one[j][&y];
let lines = intersections(uni.clone(),c1,c2,nc.delta, nc.bits);
for newline in lines {
if!seen.contains(&newline){
seen.insert(newline.clone());
add_reduce_maximal(&mut candidates, newline);
}
}
}
}
for newline in candidates {
add_reduce_maximal(&mut candidates2, newline);
}
}
for newline in candidates2 {
add_reduce_maximal(&mut newc.lines, newline);
}
}
#[cfg(not(target_arch = "wasm32"))]
pb.finish_and_clear();
if newc == nc { break; }
println!("new iteration...");
nc = newc;
{
println!("\n--- Constraints ---");
for line in &nc.lines {
let s1 = line.groups().map(|x|String::from("(")+&set_to_string(&x)+")").join(" ");
println!("{}",s1);
}
}
}
nc
}
| {
let mut state = vec![0;max.len()];
let mut res = n;
let mut i = 0;
while res > 0 {
let cur = std::cmp::min(max[i],res);
state[i] = cur;
res -= cur;
i += 1;
}
Comb {
max, state, first:true
}
} | identifier_body |
forall.rs |
use itertools::Itertools;
use std::collections::HashMap;
use std::collections::HashSet;
use crate::Line;
use crate::Constraint;
use crate::Problem;
use indicatif::ProgressStyle;
use indicatif::ProgressBar;
use chrono::prelude::*;
struct Comb{
max : Vec<usize>,
state : Vec<usize>,
first : bool
}
impl Comb {
fn new(n : usize, max : Vec<usize>) -> Self {
let mut state = vec![0;max.len()];
let mut res = n;
let mut i = 0;
while res > 0 {
let cur = std::cmp::min(max[i],res);
state[i] = cur;
res -= cur;
i += 1;
}
Comb {
max, state, first:true
}
}
fn transform(&mut self, n : usize, max : impl Iterator<Item=usize>) {
let mut i = 0;
for x in max {
self.max[i] = x;
i += 1;
}
assert!(i == self.max.len());
let mut res = n;
let mut i = 0;
while res > 0 {
let cur = std::cmp::min(self.max[i],res);
self.state[i] = cur;
res -= cur;
i += 1;
}
for j in i..self.state.len() {
self.state[j] = 0;
}
self.first = true;
}
fn next(&mut self) -> Option<&Vec<usize>> {
if self.first {
self.first = false;
Some(&self.state)
}else {
let v = &mut self.state;
let m = &mut self.max;
let mut i = 0;
loop {
if i == v.len()-1 {
return None;
}
if v[i] > 0 {
v[i+1] += 1;
v[i] -= 1;
if v[i+1] <= m[i+1] {
break;
}
}
i += 1;
}
let mut res = v[0..=i].iter().sum();
let mut j = 0;
while res > 0 {
let cur = std::cmp::min(m[j],res);
v[j] = cur;
res -= cur;
j += 1;
}
for k in j..=i {
v[k] = 0;
}
return Some(&self.state);
}
}
}
struct | {
state : Vec<Comb>,
first : bool,
v1 : Vec<usize>
}
impl Matches {
fn new(v1 : Vec<usize>, mut v2 : Vec<usize>) -> Self {
let mut s = vec![];
for &x in &v1 {
let mut c = Comb::new(x,v2.clone());
c.next();
for i in 0..v2.len() {
v2[i] -= c.state[i];
}
s.push(c);
}
Self {
v1, state : s, first : true
}
}
fn next(&mut self) -> Option<&Vec<Comb>> {
if self.first {
self.first = false;
Some(&self.state)
}else {
for i in (0..self.state.len()).rev() {
if self.state[i].next()!= None {
for j in i+1..self.state.len() {
let split = self.state.split_at_mut(j);
let p = &split.0[j-1];
let p2 = &mut split.1[0];
let pmax = &p.max;
let ps = &p.state;
let n = self.v1[j];
p2.transform(n,pmax.iter().zip(ps.iter()).map(|(m,x)|m-x));
//let v : Vec<_> = pmax.iter().zip(ps.iter()).map(|(m,x)|m-x).collect();
//self.state[j] = Comb::new(n,v);
p2.next();
}
return Some(&self.state);
}
}
None
}
}
}
fn count_map<BigNum>(v : &[BigNum]) -> HashMap<BigNum,usize> where BigNum : crate::bignum::BigNum{
let mut h = HashMap::new();
for n in v {
*h.entry(n.clone()).or_default() += 1;
}
h
}
#[inline(never)]
fn intersections<BigNum>(uni : BigNum, c1 : &[(BigNum,usize)], c2 : &[(BigNum,usize)], delta : usize, bits : usize) -> Vec<Line<BigNum>> where BigNum : crate::bignum::BigNum {
let v1 : Vec<_> = c1.iter().map(|(_,c)|*c).collect();
let v2 : Vec<_> = c2.iter().map(|(_,c)|*c).collect();
let mut m = Matches::new(v1,v2);
let mut r = vec![];
let mut oldbad : Option<(usize,usize,usize,usize)> = None;
'outer: while let Some(x) = m.next() {
if let Some((i1,i2,j1,j2)) = oldbad {
if x[i1].state[j1]!= 0 && x[i2].state[j2]!= 0 {
continue 'outer;
}
}
for i1 in 0..c1.len() {
for j1 in 0..c2.len() {
if x[i1].state[j1]!= 0 {
for i2 in i1+1..c1.len() {
for j2 in 0..c2.len() {
if x[i2].state[j2]!= 0 {
let u1 = c1[i1].0.clone() & c2[j1].0.clone();
let u2 = c1[i2].0.clone() & c2[j2].0.clone();
let u3 = c1[i1].0.clone() & c2[j2].0.clone();
let u4 = c1[i2].0.clone() & c2[j1].0.clone();
if (u4.is_superset(&u1) && u3.is_superset(&u2) && (u1!= u4 || u2!= u3)) || (u3.is_superset(&u1) && u4.is_superset(&u2) && (u1!= u3 || u2!= u4)) {
oldbad = Some((i1,i2,j1,j2));
continue 'outer;
}
}
}
}
}
}
}
let mut groups = Vec::with_capacity(delta);
groups.push(uni.clone());
for (i,(ga,_)) in c1.iter().enumerate() {
for (j,(gb,_)) in c2.iter().enumerate() {
for _ in 0..x[i].state[j] {
groups.push(ga.clone() & gb.clone());
}
}
}
if!groups.contains(&BigNum::zero()) {
r.push(Line::from_groups(delta, bits, groups.into_iter()).sorted());
}
}
r
}
#[inline(never)]
fn perm_includes<BigNum>(line : &Line<BigNum>, other : &Line<BigNum>) -> bool where BigNum : crate::bignum::BigNum {
let g1 : Vec<_> = line.groups().collect();
let g2 : Vec<_> = other.groups().collect();
let d = g1.len();
let mut g = contest_algorithms::graph::flow::FlowGraph::new(2*d+2,d*d);
for i in 1..=d {
g.add_edge(0, i, 1, 0, 0);
}
for i in d+1..=2*d {
g.add_edge(i, 2*d+1, 1, 0, 0);
}
for i in 0..d {
for j in 0..d {
if g1[i].is_superset(&g2[j]) {
g.add_edge(1+i, 1+d+j, 1, 0, 0);
}
}
}
g.dinic(0, 2*d+1).0 == d as i64
}
#[inline(never)]
fn add_reduce_maximal<BigNum>(lines : &mut Vec<Line<BigNum>>, newline : Line<BigNum>) where BigNum : crate::bignum::BigNum {
let l1 = lines.len();
lines.retain(|oldline|!perm_includes(&newline, oldline));
let l2 = lines.len();
if l1!= l2 || lines.iter().all(|oldline|!perm_includes(oldline,&newline)) {
lines.push(newline);
}
}
#[inline(never)]
fn find_good_unions<BigNum>(u1 : &[BigNum], u2 : &[BigNum]) -> HashMap<BigNum,Vec<(BigNum,BigNum)>> where BigNum : crate::bignum::BigNum {
let mut unions = HashMap::new();
for x in u1.iter() {
for y in u2.iter() {
if x.is_superset(y) || y.is_superset(x) {
continue;
}
let uni = x.clone() | y.clone();
let unis : &mut Vec<(BigNum,BigNum)> = unions.entry(uni).or_insert(vec![]);
let len = unis.len();
unis.retain(|(xc,yc)|!(xc.is_superset(x) && yc.is_superset(y)) );
if unis.len()!= len || unis.iter().all(|(xc,yc)|!(x.is_superset(xc) && y.is_superset(yc)) ) {
unis.push((x.clone(),y.clone()));
}
}
}
unions
}
pub fn forall<BigNum>(nc : &Constraint<BigNum>, problem : &Problem<BigNum>) -> Constraint<BigNum> where BigNum : crate::bignum::BigNum {
let mut nc = nc.clone();
let maplt = problem.map_label_text();
let set_to_string = |s:&BigNum|{
let r = s.one_bits().map(|elem|&maplt[&elem]).join("");
if r == "" {
String::from("_")
}else{
r
}
};
let make_right_closed = |g : BigNum|{g.clone()|problem.successors(g.one_bits().next().unwrap(),false)};
for line in &mut nc.lines {
*line = line.edited(|g|{make_right_closed(g)}).sorted();
}
let mut seen : HashSet<_> = nc.lines.iter().cloned().collect();
let lines = std::mem::replace(&mut nc.lines, vec![]);
for line in lines {
seen.insert(line.clone());
add_reduce_maximal(&mut nc.lines, line);
}
{
println!("\n--- Constraints ---");
for line in &nc.lines {
let s1 = line.groups().map(|x|String::from("(")+&set_to_string(&x)+")").join(" ");
println!("{}",s1);
}
}
let mut pairs = HashSet::new();
loop {
let mut newc = nc.clone();
let size = nc.lines.len();
let lines = &nc.lines;
let mut without_one = vec![];
for line in &nc.lines {
let mut h = HashMap::new();
let g : Vec<_> = line.groups().collect();
for i in 0..g.len() {
if!h.contains_key(&g[i]){
let v : Vec<_> = [&g[0..i],&g[i+1..g.len()]].concat();
let v : Vec<_> = count_map(&v).into_iter().sorted().collect();
h.insert(g[i].clone(),v);
}
}
without_one.push(h);
}
let mut line_groups = vec![];
for line in lines {
line_groups.push(line.groups().unique().collect::<Vec<_>>());
}
#[cfg(not(target_arch = "wasm32"))]
let pb = ProgressBar::new((size*size) as u64);
#[cfg(not(target_arch = "wasm32"))]
pb.set_style(ProgressStyle::default_bar()
.template("\n[elapsed: {elapsed_precise}] [{wide_bar:.green/red}] [eta: {eta_precise}]\n{msg}")
/*.progress_chars("#>-")*/);
for i in 0..lines.len() {
#[cfg(not(target_arch = "wasm32"))]
{
pb.set_position((i*i) as u64);
let est = pb.eta().as_secs();
let dest = chrono::Duration::seconds(est as i64);
let whenfinish = (Local::now() + dest).to_rfc2822();
pb.set_message(format!("[i: {}/{}] [new lines: {}] [eta: {}]",i,size,newc.lines.len(),whenfinish));
}
let mut candidates2 = vec![];
for j in 0..=i {
let mut candidates = vec![];
let pair = (lines[i].clone(),lines[j].clone());
if pairs.contains(&pair) || pairs.contains(&(pair.1.clone(),pair.0.clone())) {
continue;
}
pairs.insert(pair);
let u1 = &line_groups[i];
let u2 = &line_groups[j];
let unions = find_good_unions(u1,u2);
for (uni,v) in unions {
for (x,y) in v {
let c1 = &without_one[i][&x];
let c2 = &without_one[j][&y];
let lines = intersections(uni.clone(),c1,c2,nc.delta, nc.bits);
for newline in lines {
if!seen.contains(&newline){
seen.insert(newline.clone());
add_reduce_maximal(&mut candidates, newline);
}
}
}
}
for newline in candidates {
add_reduce_maximal(&mut candidates2, newline);
}
}
for newline in candidates2 {
add_reduce_maximal(&mut newc.lines, newline);
}
}
#[cfg(not(target_arch = "wasm32"))]
pb.finish_and_clear();
if newc == nc { break; }
println!("new iteration...");
nc = newc;
{
println!("\n--- Constraints ---");
for line in &nc.lines {
let s1 = line.groups().map(|x|String::from("(")+&set_to_string(&x)+")").join(" ");
println!("{}",s1);
}
}
}
nc
}
| Matches | identifier_name |
policy_handler.rs | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::base::SettingType;
use crate::handler::base::{Payload as HandlerPayload, Request, Response as SettingResponse};
use crate::handler::setting_handler::{SettingHandlerResult, StorageFactory};
use crate::message::base::Audience;
use crate::policy::response::{Error as PolicyError, Response};
use crate::policy::{
BoxedHandler, Context, GenerateHandlerResult, HasPolicyType, PolicyInfo, PolicyType,
Request as PolicyRequest,
};
use crate::service;
use crate::storage::{self, StorageInfo};
use anyhow::Error;
use async_trait::async_trait;
use fuchsia_syslog::fx_log_err;
use fuchsia_trace as ftrace;
use futures::future::BoxFuture;
use settings_storage::device_storage::DeviceStorage;
use settings_storage::UpdateState;
use std::convert::{TryFrom, TryInto};
/// PolicyHandlers are in charge of applying and persisting policies set by clients.
#[async_trait]
pub trait PolicyHandler {
/// Called when a policy client makes a request on the policy API this handler controls.
async fn handle_policy_request(&mut self, request: PolicyRequest) -> Response;
/// Called when a setting request is intercepted for the setting this policy handler supervises.
///
/// If there are no policies or the request does not need to be modified, `None` should be
/// returned.
///
/// If this handler wants to consume the request and respond to the client directly, it should
/// return [`RequestTransform::Result`].
///
/// If this handler wants to modify the request, then let the setting handler handle it,
/// [`RequestTransform::Request`] should be returned, with the modified request.
///
/// [`RequestTransform::Result`]: enum.RequestTransform.html
/// [`RequestTransform::Request`]: enum.RequestTransform.html
async fn handle_setting_request(&mut self, request: Request) -> Option<RequestTransform>;
/// Called when a setting response is intercepted from the setting this policy handler
/// supervises.
///
/// If there are no policies or the response does not need to be modified, `None` should be
/// returned.
///
/// If this handler wants to modify the response and still let the original audience handle it,
/// [`Response`] should be returned, containing the modified response.
///
/// [`Response`]: ResponseTransform::Response
async fn handle_setting_response(
&mut self,
response: SettingResponse,
) -> Option<ResponseTransform>;
}
/// `RequestTransform` is returned by a [`PolicyHandler`] in response to a setting request that a
/// [`PolicyProxy`] intercepted. The presence of this value indicates that the policy handler has
/// decided to take action in order to apply policies.
///
/// [`PolicyHandler`]: trait.PolicyHandler.html
/// [`PolicyProxy`]:../policy_proxy/struct.PolicyProxy.html
///
#[derive(Clone, Debug, PartialEq)]
pub enum RequestTransform {
/// A new, modified request that should be forwarded to the setting handler for processing.
Request(Request),
/// A result to return directly to the settings client.
Result(SettingHandlerResult),
}
/// `ResponseTransform` is returned by a [`PolicyHandler`] in response to a setting response that a
/// [`PolicyProxy`] intercepted. The presence of this value indicates that the policy handler has
/// decided to take action in order to apply policies.
///
/// [`PolicyHandler`]: trait.PolicyHandler.html
/// [`PolicyProxy`]:../policy_proxy/struct.PolicyProxy.html
///
#[derive(Clone, Debug, PartialEq)]
pub enum ResponseTransform {
/// A new, modified response that should be forwarded.
Response(SettingResponse),
}
/// Trait used to create policy handlers.
#[async_trait]
pub trait Create: Sized {
async fn create(handler: ClientProxy) -> Result<Self, Error>;
}
/// Creates a [`PolicyHandler`] from the given [`Context`].
///
/// [`PolicyHandler`]: trait.PolicyHandler.html
/// [`Context`]:../base/struct.Context.html
pub(crate) fn create_handler<C, T: StorageFactory<Storage = DeviceStorage> +'static>(
context: Context<T>,
) -> BoxFuture<'static, GenerateHandlerResult>
where
C: Create + PolicyHandler + Send + Sync +'static,
{
Box::pin(async move {
let _ = &context;
let proxy = ClientProxy::new(context.service_messenger);
C::create(proxy).await.map(|handler| Box::new(handler) as BoxedHandler)
})
}
/// `ClientProxy` provides common functionality, like messaging and persistence to policy handlers.
#[derive(Clone)]
pub struct ClientProxy {
service_messenger: service::message::Messenger,
}
impl ClientProxy {
/// Sends a setting request to the underlying setting proxy this policy handler controls.
pub(crate) fn send_setting_request(
&self,
setting_type: SettingType,
request: Request,
) -> service::message::Receptor {
self.service_messenger
.message(
HandlerPayload::Request(request).into(),
Audience::Address(service::Address::Handler(setting_type)),
)
.send()
}
/// Requests the setting handler to rebroadcast a settings changed event to its listeners.
pub(crate) fn request_rebroadcast(&self, setting_type: SettingType) {
// Ignore the receptor result.
let _ = self
.service_messenger
.message(
HandlerPayload::Request(Request::Rebroadcast).into(),
Audience::Address(service::Address::Handler(setting_type)),
)
.send();
}
}
impl ClientProxy {
pub(crate) fn new(service_messenger: service::message::Messenger) -> Self {
Self { service_messenger }
}
/// The type `T` is any type that has a [`PolicyType`] associated with it and that can be
/// converted into a [`PolicyInfo`]. This is usually a variant of the `PolicyInfo` enum.
pub(crate) async fn read_policy<T: HasPolicyType + TryFrom<PolicyInfo>>(
&self,
id: ftrace::Id,
) -> T {
let mut receptor = self
.service_messenger
.message(
storage::Payload::Request(storage::StorageRequest::Read(T::POLICY_TYPE.into(), id))
.into(),
Audience::Address(service::Address::Storage),
)
.send();
match receptor.next_of::<storage::Payload>().await {
Ok((payload, _)) => {
if let storage::Payload::Response(storage::StorageResponse::Read(
StorageInfo::PolicyInfo(policy_info),
)) = payload
{
let policy_type: PolicyType = (&policy_info).into();
if let Ok(info) = policy_info.try_into() {
return info;
}
panic!(
"Mismatching type during read. Expected {:?}, but got {:?}",
T::POLICY_TYPE,
policy_type
);
} else {
panic!("Incorrect response received from storage: {:?}", payload);
}
}
Err(err) => {
panic!("Error reading from storage: {:?}", err);
}
}
}
/// Write a policy info object to storage. The argument `write_through` will
/// block returning until the value has been completely written to
/// persistent store, rather than any temporary in-memory caching.
pub(crate) async fn write_policy(
&self,
policy_info: PolicyInfo,
id: ftrace::Id,
) -> Result<UpdateState, PolicyError> {
let policy_type = (&policy_info).into();
let mut receptor = self
.service_messenger
.message(
storage::Payload::Request(storage::StorageRequest::Write(policy_info.into(), id))
.into(),
Audience::Address(service::Address::Storage),
)
.send();
match receptor.next_of::<storage::Payload>().await {
Ok((payload, _)) => {
if let storage::Payload::Response(storage::StorageResponse::Write(result)) = payload
{
return result.map_err(|e| {
fx_log_err!("Failed to write policy: {:?}", e);
PolicyError::WriteFailure(policy_type)
});
} else {
fx_log_err!("Incorrect response received from storage: {:?}", payload);
}
}
Err(err) => {
fx_log_err!("Error writing to storage: {:?}", err);
}
}
Err(PolicyError::WriteFailure(policy_type))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::base::SettingType;
use crate::handler::base::{Payload as HandlerPayload, Request};
use crate::message::base::MessengerType;
use crate::message::MessageHubUtil;
use crate::policy::PolicyType;
use crate::service;
use crate::tests::message_utils::verify_payload;
#[fuchsia_async::run_until_stalled(test)]
async fn test_client_proxy_send_setting_request() {
let policy_type = PolicyType::Unknown;
let setting_request = Request::Get;
let target_setting_type = SettingType::Unknown;
let service_delegate = service::MessageHub::create_hub();
let (_, mut setting_proxy_receptor) = service_delegate
.create(MessengerType::Addressable(service::Address::Handler(
policy_type.setting_type(),
)))
.await
.expect("setting proxy messenger created");
let client_proxy = ClientProxy {
service_messenger: service_delegate
.create(MessengerType::Unbound)
.await
.expect("messenger should be created")
.0,
};
let _ = client_proxy.send_setting_request(target_setting_type, setting_request.clone());
verify_payload(
service::Payload::Setting(HandlerPayload::Request(setting_request)),
&mut setting_proxy_receptor,
None,
)
.await
}
#[fuchsia_async::run_until_stalled(test)]
async fn test_client_proxy_request_rebroadcast() | verify_payload(HandlerPayload::Request(Request::Rebroadcast).into(), &mut receptor, None)
.await
}
}
| {
let setting_type = SettingType::Unknown;
let service_delegate = service::MessageHub::create_hub();
let (_, mut receptor) = service_delegate
.create(MessengerType::Addressable(service::Address::Handler(setting_type)))
.await
.expect("service receptor created");
let client_proxy = ClientProxy {
service_messenger: service_delegate
.create(MessengerType::Unbound)
.await
.expect("messenger should be created")
.0,
};
client_proxy.request_rebroadcast(setting_type);
| identifier_body |
policy_handler.rs | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::base::SettingType;
use crate::handler::base::{Payload as HandlerPayload, Request, Response as SettingResponse};
use crate::handler::setting_handler::{SettingHandlerResult, StorageFactory};
use crate::message::base::Audience;
use crate::policy::response::{Error as PolicyError, Response};
use crate::policy::{
BoxedHandler, Context, GenerateHandlerResult, HasPolicyType, PolicyInfo, PolicyType,
Request as PolicyRequest,
};
use crate::service;
use crate::storage::{self, StorageInfo};
use anyhow::Error;
use async_trait::async_trait;
use fuchsia_syslog::fx_log_err;
use fuchsia_trace as ftrace;
use futures::future::BoxFuture;
use settings_storage::device_storage::DeviceStorage;
use settings_storage::UpdateState;
use std::convert::{TryFrom, TryInto};
/// PolicyHandlers are in charge of applying and persisting policies set by clients.
#[async_trait]
pub trait PolicyHandler {
/// Called when a policy client makes a request on the policy API this handler controls.
async fn handle_policy_request(&mut self, request: PolicyRequest) -> Response;
/// Called when a setting request is intercepted for the setting this policy handler supervises.
///
/// If there are no policies or the request does not need to be modified, `None` should be
/// returned.
///
/// If this handler wants to consume the request and respond to the client directly, it should
/// return [`RequestTransform::Result`].
///
/// If this handler wants to modify the request, then let the setting handler handle it,
/// [`RequestTransform::Request`] should be returned, with the modified request.
///
/// [`RequestTransform::Result`]: enum.RequestTransform.html
/// [`RequestTransform::Request`]: enum.RequestTransform.html
async fn handle_setting_request(&mut self, request: Request) -> Option<RequestTransform>;
/// Called when a setting response is intercepted from the setting this policy handler
/// supervises.
///
/// If there are no policies or the response does not need to be modified, `None` should be
/// returned.
///
/// If this handler wants to modify the response and still let the original audience handle it,
/// [`Response`] should be returned, containing the modified response.
///
/// [`Response`]: ResponseTransform::Response
async fn handle_setting_response(
&mut self,
response: SettingResponse,
) -> Option<ResponseTransform>;
}
/// `RequestTransform` is returned by a [`PolicyHandler`] in response to a setting request that a
/// [`PolicyProxy`] intercepted. The presence of this value indicates that the policy handler has
/// decided to take action in order to apply policies.
///
/// [`PolicyHandler`]: trait.PolicyHandler.html
/// [`PolicyProxy`]:../policy_proxy/struct.PolicyProxy.html
///
#[derive(Clone, Debug, PartialEq)]
pub enum RequestTransform {
/// A new, modified request that should be forwarded to the setting handler for processing.
Request(Request),
/// A result to return directly to the settings client.
Result(SettingHandlerResult),
}
/// `ResponseTransform` is returned by a [`PolicyHandler`] in response to a setting response that a
/// [`PolicyProxy`] intercepted. The presence of this value indicates that the policy handler has
/// decided to take action in order to apply policies.
///
/// [`PolicyHandler`]: trait.PolicyHandler.html
/// [`PolicyProxy`]:../policy_proxy/struct.PolicyProxy.html
///
#[derive(Clone, Debug, PartialEq)]
pub enum ResponseTransform {
/// A new, modified response that should be forwarded.
Response(SettingResponse),
}
/// Trait used to create policy handlers.
#[async_trait]
pub trait Create: Sized {
async fn create(handler: ClientProxy) -> Result<Self, Error>;
}
/// Creates a [`PolicyHandler`] from the given [`Context`].
///
/// [`PolicyHandler`]: trait.PolicyHandler.html
/// [`Context`]:../base/struct.Context.html
pub(crate) fn create_handler<C, T: StorageFactory<Storage = DeviceStorage> +'static>(
context: Context<T>,
) -> BoxFuture<'static, GenerateHandlerResult>
where
C: Create + PolicyHandler + Send + Sync +'static,
{
Box::pin(async move {
let _ = &context;
let proxy = ClientProxy::new(context.service_messenger);
C::create(proxy).await.map(|handler| Box::new(handler) as BoxedHandler)
})
}
/// `ClientProxy` provides common functionality, like messaging and persistence to policy handlers.
#[derive(Clone)]
pub struct ClientProxy {
service_messenger: service::message::Messenger,
}
impl ClientProxy {
/// Sends a setting request to the underlying setting proxy this policy handler controls.
pub(crate) fn send_setting_request(
&self,
setting_type: SettingType,
request: Request,
) -> service::message::Receptor {
self.service_messenger
.message(
HandlerPayload::Request(request).into(),
Audience::Address(service::Address::Handler(setting_type)),
)
.send()
}
/// Requests the setting handler to rebroadcast a settings changed event to its listeners.
pub(crate) fn request_rebroadcast(&self, setting_type: SettingType) {
// Ignore the receptor result.
let _ = self
.service_messenger
.message(
HandlerPayload::Request(Request::Rebroadcast).into(),
Audience::Address(service::Address::Handler(setting_type)),
)
.send();
}
}
impl ClientProxy {
pub(crate) fn new(service_messenger: service::message::Messenger) -> Self {
Self { service_messenger }
}
/// The type `T` is any type that has a [`PolicyType`] associated with it and that can be
/// converted into a [`PolicyInfo`]. This is usually a variant of the `PolicyInfo` enum.
pub(crate) async fn read_policy<T: HasPolicyType + TryFrom<PolicyInfo>>(
&self,
id: ftrace::Id,
) -> T {
let mut receptor = self
.service_messenger
.message(
storage::Payload::Request(storage::StorageRequest::Read(T::POLICY_TYPE.into(), id))
.into(),
Audience::Address(service::Address::Storage),
)
.send();
match receptor.next_of::<storage::Payload>().await {
Ok((payload, _)) => {
if let storage::Payload::Response(storage::StorageResponse::Read(
StorageInfo::PolicyInfo(policy_info),
)) = payload
{
let policy_type: PolicyType = (&policy_info).into();
if let Ok(info) = policy_info.try_into() {
return info;
}
panic!(
"Mismatching type during read. Expected {:?}, but got {:?}",
T::POLICY_TYPE,
policy_type
);
} else {
panic!("Incorrect response received from storage: {:?}", payload);
}
}
Err(err) => {
panic!("Error reading from storage: {:?}", err);
}
}
}
/// Write a policy info object to storage. The argument `write_through` will
/// block returning until the value has been completely written to
/// persistent store, rather than any temporary in-memory caching.
pub(crate) async fn | (
&self,
policy_info: PolicyInfo,
id: ftrace::Id,
) -> Result<UpdateState, PolicyError> {
let policy_type = (&policy_info).into();
let mut receptor = self
.service_messenger
.message(
storage::Payload::Request(storage::StorageRequest::Write(policy_info.into(), id))
.into(),
Audience::Address(service::Address::Storage),
)
.send();
match receptor.next_of::<storage::Payload>().await {
Ok((payload, _)) => {
if let storage::Payload::Response(storage::StorageResponse::Write(result)) = payload
{
return result.map_err(|e| {
fx_log_err!("Failed to write policy: {:?}", e);
PolicyError::WriteFailure(policy_type)
});
} else {
fx_log_err!("Incorrect response received from storage: {:?}", payload);
}
}
Err(err) => {
fx_log_err!("Error writing to storage: {:?}", err);
}
}
Err(PolicyError::WriteFailure(policy_type))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::base::SettingType;
use crate::handler::base::{Payload as HandlerPayload, Request};
use crate::message::base::MessengerType;
use crate::message::MessageHubUtil;
use crate::policy::PolicyType;
use crate::service;
use crate::tests::message_utils::verify_payload;
#[fuchsia_async::run_until_stalled(test)]
async fn test_client_proxy_send_setting_request() {
let policy_type = PolicyType::Unknown;
let setting_request = Request::Get;
let target_setting_type = SettingType::Unknown;
let service_delegate = service::MessageHub::create_hub();
let (_, mut setting_proxy_receptor) = service_delegate
.create(MessengerType::Addressable(service::Address::Handler(
policy_type.setting_type(),
)))
.await
.expect("setting proxy messenger created");
let client_proxy = ClientProxy {
service_messenger: service_delegate
.create(MessengerType::Unbound)
.await
.expect("messenger should be created")
.0,
};
let _ = client_proxy.send_setting_request(target_setting_type, setting_request.clone());
verify_payload(
service::Payload::Setting(HandlerPayload::Request(setting_request)),
&mut setting_proxy_receptor,
None,
)
.await
}
#[fuchsia_async::run_until_stalled(test)]
async fn test_client_proxy_request_rebroadcast() {
let setting_type = SettingType::Unknown;
let service_delegate = service::MessageHub::create_hub();
let (_, mut receptor) = service_delegate
.create(MessengerType::Addressable(service::Address::Handler(setting_type)))
.await
.expect("service receptor created");
let client_proxy = ClientProxy {
service_messenger: service_delegate
.create(MessengerType::Unbound)
.await
.expect("messenger should be created")
.0,
};
client_proxy.request_rebroadcast(setting_type);
verify_payload(HandlerPayload::Request(Request::Rebroadcast).into(), &mut receptor, None)
.await
}
}
| write_policy | identifier_name |
policy_handler.rs | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::base::SettingType;
use crate::handler::base::{Payload as HandlerPayload, Request, Response as SettingResponse};
use crate::handler::setting_handler::{SettingHandlerResult, StorageFactory};
use crate::message::base::Audience;
use crate::policy::response::{Error as PolicyError, Response};
use crate::policy::{
BoxedHandler, Context, GenerateHandlerResult, HasPolicyType, PolicyInfo, PolicyType,
Request as PolicyRequest,
};
use crate::service;
use crate::storage::{self, StorageInfo};
use anyhow::Error;
use async_trait::async_trait;
use fuchsia_syslog::fx_log_err;
use fuchsia_trace as ftrace;
use futures::future::BoxFuture;
use settings_storage::device_storage::DeviceStorage;
use settings_storage::UpdateState;
use std::convert::{TryFrom, TryInto};
/// PolicyHandlers are in charge of applying and persisting policies set by clients.
#[async_trait]
pub trait PolicyHandler {
/// Called when a policy client makes a request on the policy API this handler controls.
async fn handle_policy_request(&mut self, request: PolicyRequest) -> Response;
/// Called when a setting request is intercepted for the setting this policy handler supervises.
///
/// If there are no policies or the request does not need to be modified, `None` should be
/// returned.
///
/// If this handler wants to consume the request and respond to the client directly, it should
/// return [`RequestTransform::Result`].
///
/// If this handler wants to modify the request, then let the setting handler handle it,
/// [`RequestTransform::Request`] should be returned, with the modified request.
///
/// [`RequestTransform::Result`]: enum.RequestTransform.html
/// [`RequestTransform::Request`]: enum.RequestTransform.html
async fn handle_setting_request(&mut self, request: Request) -> Option<RequestTransform>;
/// Called when a setting response is intercepted from the setting this policy handler
/// supervises.
///
/// If there are no policies or the response does not need to be modified, `None` should be
/// returned.
///
/// If this handler wants to modify the response and still let the original audience handle it,
/// [`Response`] should be returned, containing the modified response.
///
/// [`Response`]: ResponseTransform::Response
async fn handle_setting_response(
&mut self,
response: SettingResponse,
) -> Option<ResponseTransform>;
}
/// `RequestTransform` is returned by a [`PolicyHandler`] in response to a setting request that a
/// [`PolicyProxy`] intercepted. The presence of this value indicates that the policy handler has
/// decided to take action in order to apply policies.
///
/// [`PolicyHandler`]: trait.PolicyHandler.html
/// [`PolicyProxy`]:../policy_proxy/struct.PolicyProxy.html
///
#[derive(Clone, Debug, PartialEq)]
pub enum RequestTransform {
/// A new, modified request that should be forwarded to the setting handler for processing.
Request(Request),
/// A result to return directly to the settings client.
Result(SettingHandlerResult),
}
/// `ResponseTransform` is returned by a [`PolicyHandler`] in response to a setting response that a
/// [`PolicyProxy`] intercepted. The presence of this value indicates that the policy handler has
/// decided to take action in order to apply policies.
///
/// [`PolicyHandler`]: trait.PolicyHandler.html
/// [`PolicyProxy`]:../policy_proxy/struct.PolicyProxy.html
///
#[derive(Clone, Debug, PartialEq)]
pub enum ResponseTransform {
/// A new, modified response that should be forwarded.
Response(SettingResponse),
}
/// Trait used to create policy handlers.
#[async_trait]
pub trait Create: Sized {
async fn create(handler: ClientProxy) -> Result<Self, Error>;
}
/// Creates a [`PolicyHandler`] from the given [`Context`].
///
/// [`PolicyHandler`]: trait.PolicyHandler.html
/// [`Context`]:../base/struct.Context.html
pub(crate) fn create_handler<C, T: StorageFactory<Storage = DeviceStorage> +'static>(
context: Context<T>,
) -> BoxFuture<'static, GenerateHandlerResult>
where
C: Create + PolicyHandler + Send + Sync +'static,
{
Box::pin(async move {
let _ = &context;
let proxy = ClientProxy::new(context.service_messenger);
C::create(proxy).await.map(|handler| Box::new(handler) as BoxedHandler)
})
}
/// `ClientProxy` provides common functionality, like messaging and persistence to policy handlers.
#[derive(Clone)]
pub struct ClientProxy {
service_messenger: service::message::Messenger,
} |
impl ClientProxy {
/// Sends a setting request to the underlying setting proxy this policy handler controls.
pub(crate) fn send_setting_request(
&self,
setting_type: SettingType,
request: Request,
) -> service::message::Receptor {
self.service_messenger
.message(
HandlerPayload::Request(request).into(),
Audience::Address(service::Address::Handler(setting_type)),
)
.send()
}
/// Requests the setting handler to rebroadcast a settings changed event to its listeners.
pub(crate) fn request_rebroadcast(&self, setting_type: SettingType) {
// Ignore the receptor result.
let _ = self
.service_messenger
.message(
HandlerPayload::Request(Request::Rebroadcast).into(),
Audience::Address(service::Address::Handler(setting_type)),
)
.send();
}
}
impl ClientProxy {
pub(crate) fn new(service_messenger: service::message::Messenger) -> Self {
Self { service_messenger }
}
/// The type `T` is any type that has a [`PolicyType`] associated with it and that can be
/// converted into a [`PolicyInfo`]. This is usually a variant of the `PolicyInfo` enum.
pub(crate) async fn read_policy<T: HasPolicyType + TryFrom<PolicyInfo>>(
&self,
id: ftrace::Id,
) -> T {
let mut receptor = self
.service_messenger
.message(
storage::Payload::Request(storage::StorageRequest::Read(T::POLICY_TYPE.into(), id))
.into(),
Audience::Address(service::Address::Storage),
)
.send();
match receptor.next_of::<storage::Payload>().await {
Ok((payload, _)) => {
if let storage::Payload::Response(storage::StorageResponse::Read(
StorageInfo::PolicyInfo(policy_info),
)) = payload
{
let policy_type: PolicyType = (&policy_info).into();
if let Ok(info) = policy_info.try_into() {
return info;
}
panic!(
"Mismatching type during read. Expected {:?}, but got {:?}",
T::POLICY_TYPE,
policy_type
);
} else {
panic!("Incorrect response received from storage: {:?}", payload);
}
}
Err(err) => {
panic!("Error reading from storage: {:?}", err);
}
}
}
/// Write a policy info object to storage. The argument `write_through` will
/// block returning until the value has been completely written to
/// persistent store, rather than any temporary in-memory caching.
pub(crate) async fn write_policy(
&self,
policy_info: PolicyInfo,
id: ftrace::Id,
) -> Result<UpdateState, PolicyError> {
let policy_type = (&policy_info).into();
let mut receptor = self
.service_messenger
.message(
storage::Payload::Request(storage::StorageRequest::Write(policy_info.into(), id))
.into(),
Audience::Address(service::Address::Storage),
)
.send();
match receptor.next_of::<storage::Payload>().await {
Ok((payload, _)) => {
if let storage::Payload::Response(storage::StorageResponse::Write(result)) = payload
{
return result.map_err(|e| {
fx_log_err!("Failed to write policy: {:?}", e);
PolicyError::WriteFailure(policy_type)
});
} else {
fx_log_err!("Incorrect response received from storage: {:?}", payload);
}
}
Err(err) => {
fx_log_err!("Error writing to storage: {:?}", err);
}
}
Err(PolicyError::WriteFailure(policy_type))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::base::SettingType;
use crate::handler::base::{Payload as HandlerPayload, Request};
use crate::message::base::MessengerType;
use crate::message::MessageHubUtil;
use crate::policy::PolicyType;
use crate::service;
use crate::tests::message_utils::verify_payload;
#[fuchsia_async::run_until_stalled(test)]
async fn test_client_proxy_send_setting_request() {
let policy_type = PolicyType::Unknown;
let setting_request = Request::Get;
let target_setting_type = SettingType::Unknown;
let service_delegate = service::MessageHub::create_hub();
let (_, mut setting_proxy_receptor) = service_delegate
.create(MessengerType::Addressable(service::Address::Handler(
policy_type.setting_type(),
)))
.await
.expect("setting proxy messenger created");
let client_proxy = ClientProxy {
service_messenger: service_delegate
.create(MessengerType::Unbound)
.await
.expect("messenger should be created")
.0,
};
let _ = client_proxy.send_setting_request(target_setting_type, setting_request.clone());
verify_payload(
service::Payload::Setting(HandlerPayload::Request(setting_request)),
&mut setting_proxy_receptor,
None,
)
.await
}
#[fuchsia_async::run_until_stalled(test)]
async fn test_client_proxy_request_rebroadcast() {
let setting_type = SettingType::Unknown;
let service_delegate = service::MessageHub::create_hub();
let (_, mut receptor) = service_delegate
.create(MessengerType::Addressable(service::Address::Handler(setting_type)))
.await
.expect("service receptor created");
let client_proxy = ClientProxy {
service_messenger: service_delegate
.create(MessengerType::Unbound)
.await
.expect("messenger should be created")
.0,
};
client_proxy.request_rebroadcast(setting_type);
verify_payload(HandlerPayload::Request(Request::Rebroadcast).into(), &mut receptor, None)
.await
}
} | random_line_split |
|
lib.rs | //! This is a platform-agnostic Rust driver for the ADS1013, ADS1014, ADS1015,
//! ADS1113, ADS1114, and ADS1115 ultra-small, low-power
//! analog-to-digital converters (ADC), based on the [`embedded-hal`] traits.
//!
//! [`embedded-hal`]: https://github.com/rust-embedded/embedded-hal
//!
//! This driver allows you to:
//! - Set the operating mode to one-shot or continuous. See: [`into_continuous()`].
//! - Make a measurement in one-shot mode. See: [`read()`][read_os].
//! - Start continuous conversion mode. See: [`start()`].
//! - Read the last measurement made in continuous conversion mode. See: [`read()`][read_cont].
//! - Set the data rate. See: [`set_data_rate()`].
//! - Set the full-scale range (gain amplifier). See [`set_full_scale_range()`].
//! - Read whether a measurement is in progress. See: [`is_measurement_in_progress()`].
//! - Set the ALERT/RDY pin to be used as conversion-ready pin. See: [`use_alert_rdy_pin_as_ready()`].
//! - Comparator:
//! - Set the low and high thresholds. See: [`set_high_threshold_raw()`].
//! - Set the comparator mode. See: [`set_comparator_mode()`].
//! - Set the comparator polarity. See: [`set_comparator_polarity()`].
//! - Set the comparator latching. See: [`set_comparator_latching()`].
//! - Set the comparator queue. See: [`set_comparator_queue()`].
//! - Disable the comparator. See: [`disable_comparator()`].
//!
//! [`into_continuous()`]: struct.Ads1x1x.html#method.into_continuous
//! [read_os]: struct.Ads1x1x.html#method.read
//! [`start()`]: struct.Ads1x1x.html#method.start
//! [read_cont]: struct.Ads1x1x.html#impl-OneShot%3CAds1x1x%3CDI%2C%20IC%2C%20CONV%2C%20OneShot%3E%2C%20i16%2C%20CH%3E
//! [`set_data_rate()`]: struct.Ads1x1x.html#method.set_data_rate
//! [`set_full_scale_range()`]: struct.Ads1x1x.html#method.set_full_scale_range
//! [`is_measurement_in_progress()`]: struct.Ads1x1x.html#method.is_measurement_in_progress
//! [`set_high_threshold_raw()`]: struct.Ads1x1x.html#method.set_high_threshold_raw
//! [`set_comparator_mode()`]: struct.Ads1x1x.html#method.set_comparator_mode
//! [`set_comparator_polarity()`]: struct.Ads1x1x.html#method.set_comparator_polarity
//! [`set_comparator_latching()`]: struct.Ads1x1x.html#method.set_comparator_latching
//! [`set_comparator_queue()`]: struct.Ads1x1x.html#method.set_comparator_queue
//! [`disable_comparator()`]: struct.Ads1x1x.html#method.disable_comparator
//! [`use_alert_rdy_pin_as_ready()`]: struct.Ads1x1x.html#method.use_alert_rdy_pin_as_ready
//!
//! ## The devices
//!
//! The devices are precision, low power, 12/16-bit analog-to-digital
//! converters (ADC) that provide all features necessary to measure the most
//! common sensor signals in an ultra-small package. Depending on the device,
//! these integrate a programmable gain amplifier (PGA), voltage reference,
//! oscillator and high-accuracy temperature sensor.
//!
//! The devices can perform conversions at data rates up to 3300 samples per
//! second (SPS). The PGA offers input ranges from ±256 mV to ±6.144 V,
//! allowing both large and small signals to be measured with high resolution.
//! An input multiplexer (MUX) allows to measure two differential or four
//! single-ended inputs. The high-accuracy temperature sensor can be used for
//! system-level temperature monitoring or cold-junction compensation for
//! thermocouples.
//!
//! The devices operate either in continuous-conversion mode, or in a
//! single-shot mode that automatically powers down after a conversion.
//! Single-shot mode significantly reduces current consumption during idle
//! periods. Data is transferred through I2C.
//!
//! Here is a comparison of the caracteristics of the devices:
//!
//! | Device | Resolution | Sample Rate | Channels | Multi-channel | Features |
//! |---------|------------|--------------|----------|---------------|-----------------|
//! | ADS1013 | 12-bit | Max 3300 SPS | 1 | N/A | |
//! | ADS1014 | 12-bit | Max 3300 SPS | 1 | N/A | Comparator, PGA |
//! | ADS1015 | 12-bit | Max 3300 SPS | 4 | Multiplexed | Comparator, PGA |
//! | ADS1113 | 16-bit | Max 860 SPS | 1 | N/A | |
//! | ADS1114 | 16-bit | Max 860 SPS | 1 | N/A | Comparator, PGA |
//! | ADS1115 | 16-bit | Max 860 SPS | 4 | Multiplexed | Comparator, PGA |
//!
//! Datasheets:
//! - [ADS101x](http://www.ti.com/lit/ds/symlink/ads1015.pdf)
//! - [ADS111x](http://www.ti.com/lit/ds/symlink/ads1115.pdf)
//!
//! ## Usage examples (see also examples folder)
//!
//! To use this driver, import this crate and an `embedded_hal` implementation,
//! then instantiate the appropriate device.
//! In the following examples an instance of the device ADS1013 will be created
//! as an example. Other devices can be created with similar methods like:
//! `Ads1x1x::new_ads1114(...)`.
//!
//! Please find additional examples using hardware in this repository: [driver-examples]
//!
//! [driver-examples]: https://github.com/eldruin/driver-examples
//!
//! ### Create a driver instance for the ADS1013
//!
//! ```no_run
//! use linux_embedded_hal::I2cdev;
//! use ads1x1x::{Ads1x1x, SlaveAddr};
//!
//! let dev = I2cdev::new("/dev/i2c-1").unwrap();
//! let address = SlaveAddr::default();
//! let adc = Ads1x1x::new_ads1013(dev, address);
//! // do something...
//!
//! // get the I2C device back
//! let dev = adc.destroy_ads1013();
//! ```
//!
//! ### Create a driver instance for the ADS1013 with an alternative address (method 1)
//! | //! ```no_run
//! use linux_embedded_hal::I2cdev;
//! use ads1x1x::{Ads1x1x, SlaveAddr};
//!
//! let dev = I2cdev::new("/dev/i2c-1").unwrap();
//! let (bit1, bit0) = (true, false); // last two bits of address
//! let address = SlaveAddr::Alternative(bit1, bit0);
//! let adc = Ads1x1x::new_ads1013(dev, address);
//! ```
//! ### Create a driver instance for the ADS1013 with an alternative address (method 2)
//!
//! Using helper `SlaveAddr` creation method depending on the connection of
//! the `ADDR` pin.
//!
//! ```no_run
//! use linux_embedded_hal::I2cdev;
//! use ads1x1x::{Ads1x1x, SlaveAddr};
//!
//! let dev = I2cdev::new("/dev/i2c-1").unwrap();
//! // `ADDR` pin connected to SDA results in the 0x4A effective address
//! let address = SlaveAddr::new_sda();
//! let adc = Ads1x1x::new_ads1013(dev, address);
//! ```
//!
//! ### Make a one-shot measurement
//! ```no_run
//! use ads1x1x::{channel, Ads1x1x, SlaveAddr};
//! use embedded_hal::adc::OneShot;
//! use linux_embedded_hal::I2cdev;
//! use nb::block;
//!
//! let dev = I2cdev::new("/dev/i2c-1").unwrap();
//! let mut adc = Ads1x1x::new_ads1013(dev, SlaveAddr::default());
//! let measurement = block!(adc.read(&mut channel::DifferentialA0A1)).unwrap();
//! println!("Measurement: {}", measurement);
//! let _dev = adc.destroy_ads1013(); // get I2C device back
//! ```
//!
//! ### Change into continuous conversion mode and read the last measurement
//!
//! Changing the mode may fail in case there was a communication error.
//! In this case, you can retrieve the unchanged device from the error type.
//!
//! ```no_run
//! use linux_embedded_hal::I2cdev;
//! use ads1x1x::{Ads1x1x, ModeChangeError, SlaveAddr};
//!
//! let dev = I2cdev::new("/dev/i2c-1").unwrap();
//! let address = SlaveAddr::default();
//! let adc = Ads1x1x::new_ads1013(dev, address);
//! match adc.into_continuous() {
//! Err(ModeChangeError::I2C(e, adc)) => /* mode change failed handling */ panic!(),
//! Ok(mut adc) => {
//! let measurement = adc.read().unwrap();
//! //...
//! }
//! }
//! ```
//!
//!
//! ### Set the data rate
//! For 12-bit devices, the available data rates are given by `DataRate12Bit`.
//! For 16-bit devices, the available data rates are given by `DataRate16Bit`.
//!
//! ```no_run
//! use linux_embedded_hal::I2cdev;
//! use ads1x1x::{Ads1x1x, DataRate16Bit, SlaveAddr};
//!
//! let dev = I2cdev::new("/dev/i2c-1").unwrap();
//! let address = SlaveAddr::default();
//! let mut adc = Ads1x1x::new_ads1115(dev, address);
//! adc.set_data_rate(DataRate16Bit::Sps860).unwrap();
//! ```
//!
//! ### Configure the comparator
//! Configure the comparator to assert when the voltage drops below -1.5V
//! or goes above 1.5V in at least two consecutive conversions. Then the
//! ALERT/RDY pin will be set high and it will be kept so until the
//! measurement is read or an appropriate SMBus alert response is sent by
//! the master.
//!
//! ```no_run
//! use linux_embedded_hal::I2cdev;
//! use ads1x1x::{
//! Ads1x1x, SlaveAddr, ComparatorQueue, ComparatorPolarity,
//! ComparatorMode, ComparatorLatching, FullScaleRange
//! };
//!
//! let dev = I2cdev::new("/dev/i2c-1").unwrap();
//! let address = SlaveAddr::default();
//! let mut adc = Ads1x1x::new_ads1015(dev, address);
//! adc.set_comparator_queue(ComparatorQueue::Two).unwrap();
//! adc.set_comparator_polarity(ComparatorPolarity::ActiveHigh).unwrap();
//! adc.set_comparator_mode(ComparatorMode::Window).unwrap();
//! adc.set_full_scale_range(FullScaleRange::Within2_048V).unwrap();
//! adc.set_low_threshold_raw(-1500).unwrap();
//! adc.set_high_threshold_raw(1500).unwrap();
//! adc.set_comparator_latching(ComparatorLatching::Latching).unwrap();
//! ```
#![doc(html_root_url = "https://docs.rs/ads1x1x/0.2.2")]
#![deny(unsafe_code)]
#![deny(missing_docs)]
#![no_std]
const DEVICE_BASE_ADDRESS: u8 = 0b100_1000;
struct Register;
impl Register {
const CONVERSION: u8 = 0x00;
const CONFIG: u8 = 0x01;
const LOW_TH: u8 = 0x02;
const HIGH_TH: u8 = 0x03;
}
struct BitFlags;
impl BitFlags {
const OS: u16 = 0b1000_0000_0000_0000;
const MUX2: u16 = 0b0100_0000_0000_0000;
const MUX1: u16 = 0b0010_0000_0000_0000;
const MUX0: u16 = 0b0001_0000_0000_0000;
const PGA2: u16 = 0b0000_1000_0000_0000;
const PGA1: u16 = 0b0000_0100_0000_0000;
const PGA0: u16 = 0b0000_0010_0000_0000;
const OP_MODE: u16 = 0b0000_0001_0000_0000;
const DR2: u16 = 0b0000_0000_1000_0000;
const DR1: u16 = 0b0000_0000_0100_0000;
const DR0: u16 = 0b0000_0000_0010_0000;
const COMP_MODE: u16 = 0b0000_0000_0001_0000;
const COMP_POL: u16 = 0b0000_0000_0000_1000;
const COMP_LAT: u16 = 0b0000_0000_0000_0100;
const COMP_QUE1: u16 = 0b0000_0000_0000_0010;
const COMP_QUE0: u16 = 0b0000_0000_0000_0001;
}
mod channels;
pub use crate::channels::{channel, ChannelSelection};
mod construction;
mod conversion;
pub use crate::conversion::{ConvertMeasurement, ConvertThreshold};
mod devices;
#[doc(hidden)]
pub mod ic;
#[doc(hidden)]
pub mod interface;
mod types;
use crate::types::Config;
pub use crate::types::{
mode, Ads1x1x, ComparatorLatching, ComparatorMode, ComparatorPolarity, ComparatorQueue,
DataRate12Bit, DataRate16Bit, DynamicOneShot, Error, FullScaleRange, ModeChangeError,
SlaveAddr,
};
mod private {
use super::{ic, interface, Ads1x1x};
pub trait Sealed {}
impl<I2C> Sealed for interface::I2cInterface<I2C> {}
impl<DI, IC, CONV, MODE> Sealed for Ads1x1x<DI, IC, CONV, MODE> {}
impl Sealed for ic::Resolution12Bit {}
impl Sealed for ic::Resolution16Bit {}
impl Sealed for ic::Ads1013 {}
impl Sealed for ic::Ads1113 {}
impl Sealed for ic::Ads1014 {}
impl Sealed for ic::Ads1114 {}
impl Sealed for ic::Ads1015 {}
impl Sealed for ic::Ads1115 {}
} | random_line_split |
|
lib.rs | //! This is a platform-agnostic Rust driver for the ADS1013, ADS1014, ADS1015,
//! ADS1113, ADS1114, and ADS1115 ultra-small, low-power
//! analog-to-digital converters (ADC), based on the [`embedded-hal`] traits.
//!
//! [`embedded-hal`]: https://github.com/rust-embedded/embedded-hal
//!
//! This driver allows you to:
//! - Set the operating mode to one-shot or continuous. See: [`into_continuous()`].
//! - Make a measurement in one-shot mode. See: [`read()`][read_os].
//! - Start continuous conversion mode. See: [`start()`].
//! - Read the last measurement made in continuous conversion mode. See: [`read()`][read_cont].
//! - Set the data rate. See: [`set_data_rate()`].
//! - Set the full-scale range (gain amplifier). See [`set_full_scale_range()`].
//! - Read whether a measurement is in progress. See: [`is_measurement_in_progress()`].
//! - Set the ALERT/RDY pin to be used as conversion-ready pin. See: [`use_alert_rdy_pin_as_ready()`].
//! - Comparator:
//! - Set the low and high thresholds. See: [`set_high_threshold_raw()`].
//! - Set the comparator mode. See: [`set_comparator_mode()`].
//! - Set the comparator polarity. See: [`set_comparator_polarity()`].
//! - Set the comparator latching. See: [`set_comparator_latching()`].
//! - Set the comparator queue. See: [`set_comparator_queue()`].
//! - Disable the comparator. See: [`disable_comparator()`].
//!
//! [`into_continuous()`]: struct.Ads1x1x.html#method.into_continuous
//! [read_os]: struct.Ads1x1x.html#method.read
//! [`start()`]: struct.Ads1x1x.html#method.start
//! [read_cont]: struct.Ads1x1x.html#impl-OneShot%3CAds1x1x%3CDI%2C%20IC%2C%20CONV%2C%20OneShot%3E%2C%20i16%2C%20CH%3E
//! [`set_data_rate()`]: struct.Ads1x1x.html#method.set_data_rate
//! [`set_full_scale_range()`]: struct.Ads1x1x.html#method.set_full_scale_range
//! [`is_measurement_in_progress()`]: struct.Ads1x1x.html#method.is_measurement_in_progress
//! [`set_high_threshold_raw()`]: struct.Ads1x1x.html#method.set_high_threshold_raw
//! [`set_comparator_mode()`]: struct.Ads1x1x.html#method.set_comparator_mode
//! [`set_comparator_polarity()`]: struct.Ads1x1x.html#method.set_comparator_polarity
//! [`set_comparator_latching()`]: struct.Ads1x1x.html#method.set_comparator_latching
//! [`set_comparator_queue()`]: struct.Ads1x1x.html#method.set_comparator_queue
//! [`disable_comparator()`]: struct.Ads1x1x.html#method.disable_comparator
//! [`use_alert_rdy_pin_as_ready()`]: struct.Ads1x1x.html#method.use_alert_rdy_pin_as_ready
//!
//! ## The devices
//!
//! The devices are precision, low power, 12/16-bit analog-to-digital
//! converters (ADC) that provide all features necessary to measure the most
//! common sensor signals in an ultra-small package. Depending on the device,
//! these integrate a programmable gain amplifier (PGA), voltage reference,
//! oscillator and high-accuracy temperature sensor.
//!
//! The devices can perform conversions at data rates up to 3300 samples per
//! second (SPS). The PGA offers input ranges from ±256 mV to ±6.144 V,
//! allowing both large and small signals to be measured with high resolution.
//! An input multiplexer (MUX) allows to measure two differential or four
//! single-ended inputs. The high-accuracy temperature sensor can be used for
//! system-level temperature monitoring or cold-junction compensation for
//! thermocouples.
//!
//! The devices operate either in continuous-conversion mode, or in a
//! single-shot mode that automatically powers down after a conversion.
//! Single-shot mode significantly reduces current consumption during idle
//! periods. Data is transferred through I2C.
//!
//! Here is a comparison of the caracteristics of the devices:
//!
//! | Device | Resolution | Sample Rate | Channels | Multi-channel | Features |
//! |---------|------------|--------------|----------|---------------|-----------------|
//! | ADS1013 | 12-bit | Max 3300 SPS | 1 | N/A | |
//! | ADS1014 | 12-bit | Max 3300 SPS | 1 | N/A | Comparator, PGA |
//! | ADS1015 | 12-bit | Max 3300 SPS | 4 | Multiplexed | Comparator, PGA |
//! | ADS1113 | 16-bit | Max 860 SPS | 1 | N/A | |
//! | ADS1114 | 16-bit | Max 860 SPS | 1 | N/A | Comparator, PGA |
//! | ADS1115 | 16-bit | Max 860 SPS | 4 | Multiplexed | Comparator, PGA |
//!
//! Datasheets:
//! - [ADS101x](http://www.ti.com/lit/ds/symlink/ads1015.pdf)
//! - [ADS111x](http://www.ti.com/lit/ds/symlink/ads1115.pdf)
//!
//! ## Usage examples (see also examples folder)
//!
//! To use this driver, import this crate and an `embedded_hal` implementation,
//! then instantiate the appropriate device.
//! In the following examples an instance of the device ADS1013 will be created
//! as an example. Other devices can be created with similar methods like:
//! `Ads1x1x::new_ads1114(...)`.
//!
//! Please find additional examples using hardware in this repository: [driver-examples]
//!
//! [driver-examples]: https://github.com/eldruin/driver-examples
//!
//! ### Create a driver instance for the ADS1013
//!
//! ```no_run
//! use linux_embedded_hal::I2cdev;
//! use ads1x1x::{Ads1x1x, SlaveAddr};
//!
//! let dev = I2cdev::new("/dev/i2c-1").unwrap();
//! let address = SlaveAddr::default();
//! let adc = Ads1x1x::new_ads1013(dev, address);
//! // do something...
//!
//! // get the I2C device back
//! let dev = adc.destroy_ads1013();
//! ```
//!
//! ### Create a driver instance for the ADS1013 with an alternative address (method 1)
//!
//! ```no_run
//! use linux_embedded_hal::I2cdev;
//! use ads1x1x::{Ads1x1x, SlaveAddr};
//!
//! let dev = I2cdev::new("/dev/i2c-1").unwrap();
//! let (bit1, bit0) = (true, false); // last two bits of address
//! let address = SlaveAddr::Alternative(bit1, bit0);
//! let adc = Ads1x1x::new_ads1013(dev, address);
//! ```
//! ### Create a driver instance for the ADS1013 with an alternative address (method 2)
//!
//! Using helper `SlaveAddr` creation method depending on the connection of
//! the `ADDR` pin.
//!
//! ```no_run
//! use linux_embedded_hal::I2cdev;
//! use ads1x1x::{Ads1x1x, SlaveAddr};
//!
//! let dev = I2cdev::new("/dev/i2c-1").unwrap();
//! // `ADDR` pin connected to SDA results in the 0x4A effective address
//! let address = SlaveAddr::new_sda();
//! let adc = Ads1x1x::new_ads1013(dev, address);
//! ```
//!
//! ### Make a one-shot measurement
//! ```no_run
//! use ads1x1x::{channel, Ads1x1x, SlaveAddr};
//! use embedded_hal::adc::OneShot;
//! use linux_embedded_hal::I2cdev;
//! use nb::block;
//!
//! let dev = I2cdev::new("/dev/i2c-1").unwrap();
//! let mut adc = Ads1x1x::new_ads1013(dev, SlaveAddr::default());
//! let measurement = block!(adc.read(&mut channel::DifferentialA0A1)).unwrap();
//! println!("Measurement: {}", measurement);
//! let _dev = adc.destroy_ads1013(); // get I2C device back
//! ```
//!
//! ### Change into continuous conversion mode and read the last measurement
//!
//! Changing the mode may fail in case there was a communication error.
//! In this case, you can retrieve the unchanged device from the error type.
//!
//! ```no_run
//! use linux_embedded_hal::I2cdev;
//! use ads1x1x::{Ads1x1x, ModeChangeError, SlaveAddr};
//!
//! let dev = I2cdev::new("/dev/i2c-1").unwrap();
//! let address = SlaveAddr::default();
//! let adc = Ads1x1x::new_ads1013(dev, address);
//! match adc.into_continuous() {
//! Err(ModeChangeError::I2C(e, adc)) => /* mode change failed handling */ panic!(),
//! Ok(mut adc) => {
//! let measurement = adc.read().unwrap();
//! //...
//! }
//! }
//! ```
//!
//!
//! ### Set the data rate
//! For 12-bit devices, the available data rates are given by `DataRate12Bit`.
//! For 16-bit devices, the available data rates are given by `DataRate16Bit`.
//!
//! ```no_run
//! use linux_embedded_hal::I2cdev;
//! use ads1x1x::{Ads1x1x, DataRate16Bit, SlaveAddr};
//!
//! let dev = I2cdev::new("/dev/i2c-1").unwrap();
//! let address = SlaveAddr::default();
//! let mut adc = Ads1x1x::new_ads1115(dev, address);
//! adc.set_data_rate(DataRate16Bit::Sps860).unwrap();
//! ```
//!
//! ### Configure the comparator
//! Configure the comparator to assert when the voltage drops below -1.5V
//! or goes above 1.5V in at least two consecutive conversions. Then the
//! ALERT/RDY pin will be set high and it will be kept so until the
//! measurement is read or an appropriate SMBus alert response is sent by
//! the master.
//!
//! ```no_run
//! use linux_embedded_hal::I2cdev;
//! use ads1x1x::{
//! Ads1x1x, SlaveAddr, ComparatorQueue, ComparatorPolarity,
//! ComparatorMode, ComparatorLatching, FullScaleRange
//! };
//!
//! let dev = I2cdev::new("/dev/i2c-1").unwrap();
//! let address = SlaveAddr::default();
//! let mut adc = Ads1x1x::new_ads1015(dev, address);
//! adc.set_comparator_queue(ComparatorQueue::Two).unwrap();
//! adc.set_comparator_polarity(ComparatorPolarity::ActiveHigh).unwrap();
//! adc.set_comparator_mode(ComparatorMode::Window).unwrap();
//! adc.set_full_scale_range(FullScaleRange::Within2_048V).unwrap();
//! adc.set_low_threshold_raw(-1500).unwrap();
//! adc.set_high_threshold_raw(1500).unwrap();
//! adc.set_comparator_latching(ComparatorLatching::Latching).unwrap();
//! ```
#![doc(html_root_url = "https://docs.rs/ads1x1x/0.2.2")]
#![deny(unsafe_code)]
#![deny(missing_docs)]
#![no_std]
const DEVICE_BASE_ADDRESS: u8 = 0b100_1000;
struct Re | impl Register {
const CONVERSION: u8 = 0x00;
const CONFIG: u8 = 0x01;
const LOW_TH: u8 = 0x02;
const HIGH_TH: u8 = 0x03;
}
struct BitFlags;
impl BitFlags {
const OS: u16 = 0b1000_0000_0000_0000;
const MUX2: u16 = 0b0100_0000_0000_0000;
const MUX1: u16 = 0b0010_0000_0000_0000;
const MUX0: u16 = 0b0001_0000_0000_0000;
const PGA2: u16 = 0b0000_1000_0000_0000;
const PGA1: u16 = 0b0000_0100_0000_0000;
const PGA0: u16 = 0b0000_0010_0000_0000;
const OP_MODE: u16 = 0b0000_0001_0000_0000;
const DR2: u16 = 0b0000_0000_1000_0000;
const DR1: u16 = 0b0000_0000_0100_0000;
const DR0: u16 = 0b0000_0000_0010_0000;
const COMP_MODE: u16 = 0b0000_0000_0001_0000;
const COMP_POL: u16 = 0b0000_0000_0000_1000;
const COMP_LAT: u16 = 0b0000_0000_0000_0100;
const COMP_QUE1: u16 = 0b0000_0000_0000_0010;
const COMP_QUE0: u16 = 0b0000_0000_0000_0001;
}
mod channels;
pub use crate::channels::{channel, ChannelSelection};
mod construction;
mod conversion;
pub use crate::conversion::{ConvertMeasurement, ConvertThreshold};
mod devices;
#[doc(hidden)]
pub mod ic;
#[doc(hidden)]
pub mod interface;
mod types;
use crate::types::Config;
pub use crate::types::{
mode, Ads1x1x, ComparatorLatching, ComparatorMode, ComparatorPolarity, ComparatorQueue,
DataRate12Bit, DataRate16Bit, DynamicOneShot, Error, FullScaleRange, ModeChangeError,
SlaveAddr,
};
mod private {
use super::{ic, interface, Ads1x1x};
pub trait Sealed {}
impl<I2C> Sealed for interface::I2cInterface<I2C> {}
impl<DI, IC, CONV, MODE> Sealed for Ads1x1x<DI, IC, CONV, MODE> {}
impl Sealed for ic::Resolution12Bit {}
impl Sealed for ic::Resolution16Bit {}
impl Sealed for ic::Ads1013 {}
impl Sealed for ic::Ads1113 {}
impl Sealed for ic::Ads1014 {}
impl Sealed for ic::Ads1114 {}
impl Sealed for ic::Ads1015 {}
impl Sealed for ic::Ads1115 {}
}
| gister;
| identifier_name |
lib.rs | #![cfg_attr(not(feature = "std"), no_std)]
#[allow(unused)]
#[cfg(all(feature = "std", test))]
mod mock;
#[cfg(all(feature = "std", test))]
mod tests;
#[cfg(not(feature = "std"))]
use rstd::borrow::ToOwned;
use rstd::{cmp, fmt::Debug, mem, prelude::*, result};
use sr_primitives::{
traits::{
Bounded, CheckedAdd, CheckedSub, MaybeSerializeDeserialize, SaturatedConversion, Saturating, StaticLookup, Zero,
},
weights::SimpleDispatchInfo,
};
use support::{
decl_event, decl_module, decl_storage,
dispatch::Result,
traits::{Currency, ExistenceRequirement, Get, Imbalance, OnUnbalanced, SignedImbalance, UpdateBalanceOutcome},
StorageMap, StorageValue,
};
use system::{ensure_root, ensure_signed};
use darwinia_support::{BalanceLock, LockIdentifier, LockableCurrency, WithdrawLock, WithdrawReason, WithdrawReasons};
use imbalances::{NegativeImbalance, PositiveImbalance};
use ring::{imbalances::NegativeImbalance as NegativeImbalanceRing, Balance, VestingSchedule};
pub trait Trait: ring::Trait {
/// The overarching event type.
type Event: From<Event<Self>> + Into<<Self as system::Trait>::Event>;
}
decl_event!(
pub enum Event<T> where
<T as system::Trait>::AccountId,
{
/// Transfer succeeded (from, to, value, fees).
Transfer(AccountId, AccountId, Balance, Balance),
}
);
decl_storage! {
trait Store for Module<T: Trait> as Kton {
/// The total units issued in the system.
pub TotalIssuance get(fn total_issuance) build(|config: &GenesisConfig<T>| {
config.balances.iter().fold(Zero::zero(), |acc: T::Balance, &(_, n)| acc + n)
}): T::Balance;
/// Information regarding the vesting of a given account.
pub Vesting get(fn vesting) build(|config: &GenesisConfig<T>| {
// Generate initial vesting configuration
// * who - Account which we are generating vesting configuration for
// * begin - Block when the account will start to vest
// * length - Number of blocks from `begin` until fully vested
// * liquid - Number of units which can be spent before vesting begins
config.vesting.iter().filter_map(|&(ref who, begin, length, liquid)| {
let length = <T::Balance as From<T::BlockNumber>>::from(length);
config.balances.iter()
.find(|&&(ref w, _)| w == who)
.map(|&(_, balance)| {
// Total genesis `balance` minus `liquid` equals funds locked for vesting
let locked = balance.saturating_sub(liquid);
// Number of units unlocked per block after `begin`
let per_block = locked / length.max(sr_primitives::traits::One::one());
(who.clone(), VestingSchedule {
locked: locked,
per_block: per_block,
starting_block: begin
})
})
}).collect::<Vec<_>>()
}): map T::AccountId => Option<VestingSchedule<T::Balance, T::BlockNumber>>;
/// The 'free' balance of a given account.
///
/// This is the only balance that matters in terms of most operations on tokens. It
/// alone is used to determine the balance when in the contract execution environment. When this
/// balance falls below the value of `ExistentialDeposit`, then the 'current account' is
/// deleted: specifically `FreeBalance`. Further, the `OnFreeBalanceZero` callback
/// is invoked, giving a chance to external modules to clean up data associated with
/// the deleted account.
///
/// `system::AccountNonce` is also deleted if `ReservedBalance` is also zero (it also gets
/// collapsed to zero if it ever becomes less than `ExistentialDeposit`.
pub FreeBalance get(fn free_balance) build(|config: &GenesisConfig<T>| config.balances.clone()):
map T::AccountId => T::Balance;
/// The amount of the balance of a given account that is externally reserved; this can still get
/// slashed, but gets slashed last of all.
///
/// This balance is a'reserve' balance that other subsystems use in order to set aside tokens
/// that are still 'owned' by the account holder, but which are suspendable.
///
/// When this balance falls below the value of `ExistentialDeposit`, then this'reserve account'
/// is deleted: specifically, `ReservedBalance`.
///
/// `system::AccountNonce` is also deleted if `FreeBalance` is also zero (it also gets
/// collapsed to zero if it ever becomes less than `ExistentialDeposit`.)
pub ReservedBalance get(fn reserved_balance): map T::AccountId => T::Balance;
pub Locks get(fn locks): map T::AccountId => Vec<BalanceLock<T::Balance, T::Moment>>;
}
add_extra_genesis {
config(balances): Vec<(T::AccountId, T::Balance)>;
config(vesting): Vec<(T::AccountId, T::BlockNumber, T::BlockNumber, T::Balance)>;
// ^^ begin, length, amount liquid at genesis
}
}
decl_module! {
pub struct Module<T: Trait> for enum Call where origin: T::Origin {
fn deposit_event() = default;
/// Transfer some liquid free balance to another account.
///
/// `transfer` will set the `FreeBalance` of the sender and receiver.
/// It will decrease the total issuance of the system by the `TransferFee`.
/// If the sender's account is below the existential deposit as a result
/// of the transfer, the account will be reaped.
///
/// The dispatch origin for this call must be `Signed` by the transactor.
///
/// # <weight>
/// - Dependent on arguments but not critical, given proper implementations for
/// input config types. See related functions below.
/// - It contains a limited number of reads and writes internally and no complex computation.
///
/// Related functions:
///
/// - `ensure_can_withdraw` is always called internally but has a bounded complexity.
/// - Transferring balances to accounts that did not exist before will cause
/// `T::OnNewAccount::on_new_account` to be called.
/// - Removing enough funds from an account will trigger
/// `T::DustRemoval::on_unbalanced` and `T::OnFreeBalanceZero::on_free_balance_zero`.
/// - `transfer_keep_alive` works the same way as `transfer`, but has an additional
/// check that the transfer will not kill the origin account.
///
/// # </weight>
#[weight = SimpleDispatchInfo::FixedNormal(1_000_000)]
pub fn transfer(
origin,
dest: <T::Lookup as StaticLookup>::Source,
#[compact] value: T::Balance
) {
let transactor = ensure_signed(origin)?;
let dest = T::Lookup::lookup(dest)?;
<Self as Currency<_>>::transfer(&transactor, &dest, value, ExistenceRequirement::AllowDeath)?;
}
/// Set the balances of a given account.
///
/// This will alter `FreeBalance` and `ReservedBalance` in storage. it will
/// also decrease the total issuance of the system (`TotalIssuance`).
/// If the new free or reserved balance is below the existential deposit,
/// it will reset the account nonce (`system::AccountNonce`).
///
/// The dispatch origin for this call is `root`.
///
/// # <weight>
/// - Independent of the arguments.
/// - Contains a limited number of reads and writes.
/// # </weight>
#[weight = SimpleDispatchInfo::FixedOperational(50_000)]
fn set_balance(
origin,
who: <T::Lookup as StaticLookup>::Source,
#[compact] new_free: T::Balance,
#[compact] new_reserved: T::Balance
) {
ensure_root(origin)?;
let who = T::Lookup::lookup(who)?;
let current_free = <FreeBalance<T>>::get(&who);
if new_free > current_free {
mem::drop(PositiveImbalance::<T>::new(new_free - current_free));
} else if new_free < current_free {
mem::drop(NegativeImbalance::<T>::new(current_free - new_free));
}
Self::set_free_balance(&who, new_free);
let current_reserved = <ReservedBalance<T>>::get(&who);
if new_reserved > current_reserved {
mem::drop(PositiveImbalance::<T>::new(new_reserved - current_reserved));
} else if new_reserved < current_reserved {
mem::drop(NegativeImbalance::<T>::new(current_reserved - new_reserved));
}
Self::set_reserved_balance(&who, new_reserved);
}
/// Exactly as `transfer`, except the origin must be root and the source account may be
/// specified.
#[weight = SimpleDispatchInfo::FixedNormal(1_000_000)]
pub fn force_transfer(
origin,
source: <T::Lookup as StaticLookup>::Source,
dest: <T::Lookup as StaticLookup>::Source,
#[compact] value: T::Balance
) {
ensure_root(origin)?;
let source = T::Lookup::lookup(source)?;
let dest = T::Lookup::lookup(dest)?;
<Self as Currency<_>>::transfer(&source, &dest, value, ExistenceRequirement::AllowDeath)?;
}
}
}
impl<T: Trait> Module<T> {
// PUBLIC IMMUTABLES
/// Get the amount that is currently being vested and cannot be transferred out of this account.
pub fn vesting_balance(who: &T::AccountId) -> T::Balance {
if let Some(v) = Self::vesting(who) {
Self::free_balance(who).min(v.locked_at(<system::Module<T>>::block_number()))
} else {
Zero::zero()
}
}
// PRIVATE MUTABLES
/// Set the reserved balance of an account to some new value. Will enforce `ExistentialDeposit`
/// law, annulling the account as needed.
///
/// Doesn't do any preparatory work for creating a new account, so should only be used when it
/// is known that the account already exists.
///
/// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that
/// the caller will do this.
fn set_reserved_balance(who: &T::AccountId, balance: T::Balance) -> UpdateBalanceOutcome {
<ReservedBalance<T>>::insert(who, balance);
UpdateBalanceOutcome::Updated
}
/// Set the free balance of an account to some new value. Will enforce `ExistentialDeposit`
/// law, annulling the account as needed.
///
/// Doesn't do any preparatory work for creating a new account, so should only be used when it
/// is known that the account already exists.
///
/// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that
/// the caller will do this.
fn set_free_balance(who: &T::AccountId, balance: T::Balance) -> UpdateBalanceOutcome {
// Commented out for now - but consider it instructive.
// assert!(!Self::total_balance(who).is_zero());
<FreeBalance<T>>::insert(who, balance);
UpdateBalanceOutcome::Updated
}
}
// wrapping these imbalances in a private module is necessary to ensure absolute privacy
// of the inner member.
mod imbalances {
use rstd::mem;
use crate::{result, Imbalance, Saturating, StorageValue, Trait, Zero};
/// Opaque, move-only struct with private fields that serves as a token denoting that
/// funds have been created without any equal and opposite accounting.
#[must_use]
pub struct PositiveImbalance<T: Trait>(T::Balance);
impl<T: Trait> PositiveImbalance<T> {
/// Create a new positive imbalance from a balance.
pub fn new(amount: T::Balance) -> Self {
PositiveImbalance(amount)
}
}
/// Opaque, move-only struct with private fields that serves as a token denoting that
/// funds have been destroyed without any equal and opposite accounting.
#[must_use]
pub struct | <T: Trait>(T::Balance);
impl<T: Trait> NegativeImbalance<T> {
/// Create a new negative imbalance from a balance.
pub fn new(amount: T::Balance) -> Self {
NegativeImbalance(amount)
}
}
impl<T: Trait> Imbalance<T::Balance> for PositiveImbalance<T> {
type Opposite = NegativeImbalance<T>;
fn zero() -> Self {
Self(Zero::zero())
}
fn drop_zero(self) -> result::Result<(), Self> {
if self.0.is_zero() {
Ok(())
} else {
Err(self)
}
}
fn split(self, amount: T::Balance) -> (Self, Self) {
let first = self.0.min(amount);
let second = self.0 - first;
mem::forget(self);
(Self(first), Self(second))
}
fn merge(mut self, other: Self) -> Self {
self.0 = self.0.saturating_add(other.0);
mem::forget(other);
self
}
fn subsume(&mut self, other: Self) {
self.0 = self.0.saturating_add(other.0);
mem::forget(other);
}
fn offset(self, other: Self::Opposite) -> result::Result<Self, Self::Opposite> {
let (a, b) = (self.0, other.0);
mem::forget((self, other));
if a >= b {
Ok(Self(a - b))
} else {
Err(NegativeImbalance::new(b - a))
}
}
fn peek(&self) -> T::Balance {
self.0
}
}
impl<T: Trait> Imbalance<T::Balance> for NegativeImbalance<T> {
type Opposite = PositiveImbalance<T>;
fn zero() -> Self {
Self(Zero::zero())
}
fn drop_zero(self) -> result::Result<(), Self> {
if self.0.is_zero() {
Ok(())
} else {
Err(self)
}
}
fn split(self, amount: T::Balance) -> (Self, Self) {
let first = self.0.min(amount);
let second = self.0 - first;
mem::forget(self);
(Self(first), Self(second))
}
fn merge(mut self, other: Self) -> Self {
self.0 = self.0.saturating_add(other.0);
mem::forget(other);
self
}
fn subsume(&mut self, other: Self) {
self.0 = self.0.saturating_add(other.0);
mem::forget(other);
}
fn offset(self, other: Self::Opposite) -> result::Result<Self, Self::Opposite> {
let (a, b) = (self.0, other.0);
mem::forget((self, other));
if a >= b {
Ok(Self(a - b))
} else {
Err(PositiveImbalance::new(b - a))
}
}
fn peek(&self) -> T::Balance {
self.0
}
}
impl<T: Trait> Drop for PositiveImbalance<T> {
/// Basic drop handler will just square up the total issuance.
fn drop(&mut self) {
<super::TotalIssuance<T>>::mutate(|v| *v = v.saturating_add(self.0));
}
}
impl<T: Trait> Drop for NegativeImbalance<T> {
/// Basic drop handler will just square up the total issuance.
fn drop(&mut self) {
<super::TotalIssuance<T>>::mutate(|v| *v = v.saturating_sub(self.0));
}
}
}
impl<T: Trait> Currency<T::AccountId> for Module<T>
where
T::Balance: MaybeSerializeDeserialize + Debug,
{
type Balance = T::Balance;
type PositiveImbalance = PositiveImbalance<T>;
type NegativeImbalance = NegativeImbalance<T>;
fn total_balance(who: &T::AccountId) -> Self::Balance {
Self::free_balance(who) + Self::reserved_balance(who)
}
fn can_slash(who: &T::AccountId, value: Self::Balance) -> bool {
Self::free_balance(who) >= value
}
fn total_issuance() -> Self::Balance {
<TotalIssuance<T>>::get()
}
fn minimum_balance() -> Self::Balance {
Zero::zero()
}
fn burn(mut amount: Self::Balance) -> Self::PositiveImbalance {
<TotalIssuance<T>>::mutate(|issued| {
*issued = issued.checked_sub(&amount).unwrap_or_else(|| {
amount = *issued;
Zero::zero()
});
});
PositiveImbalance::new(amount)
}
fn issue(mut amount: Self::Balance) -> Self::NegativeImbalance {
<TotalIssuance<T>>::mutate(|issued| {
*issued = issued.checked_add(&amount).unwrap_or_else(|| {
amount = Self::Balance::max_value() - *issued;
Self::Balance::max_value()
})
});
NegativeImbalance::new(amount)
}
fn free_balance(who: &T::AccountId) -> Self::Balance {
<FreeBalance<T>>::get(who)
}
// # <weight>
// Despite iterating over a list of locks, they are limited by the number of
// lock IDs, which means the number of runtime modules that intend to use and create locks.
// # </weight>
fn ensure_can_withdraw(
who: &T::AccountId,
_amount: T::Balance,
reasons: WithdrawReasons,
new_balance: T::Balance,
) -> Result {
if reasons.intersects(WithdrawReason::Reserve | WithdrawReason::Transfer)
&& Self::vesting_balance(who) > new_balance
{
return Err("vesting balance too high to send value");
}
let locks = Self::locks(who);
if locks.is_empty() {
return Ok(());
}
let now = <timestamp::Module<T>>::now();
if locks
.into_iter()
.all(|l| l.withdraw_lock.can_withdraw(now, new_balance) ||!l.reasons.intersects(reasons))
{
Ok(())
} else {
Err("account liquidity restrictions prevent withdrawal")
}
}
fn transfer(
transactor: &T::AccountId,
dest: &T::AccountId,
value: Self::Balance,
_existence_requirement: ExistenceRequirement,
) -> Result {
let fee = <T as ring::Trait>::TransferFee::get();
let new_from_ring = <ring::FreeBalance<T>>::get(transactor)
.checked_sub(&fee)
.ok_or("Transfer Fee - NOT ENOUGH RING")?;
<ring::Module<T>>::ensure_can_withdraw(transactor, fee, WithdrawReason::Fee.into(), new_from_ring)?;
let new_from_kton = Self::free_balance(transactor)
.checked_sub(&value)
.ok_or("balance too low to send value")?;
Self::ensure_can_withdraw(transactor, value, WithdrawReason::Transfer.into(), new_from_kton)?;
let new_to_kton = Self::free_balance(dest)
.checked_add(&value)
.ok_or("destination balance too high to receive value")?;
if transactor!= dest {
if new_from_ring < <ring::Module<T> as Currency<<T as system::Trait>::AccountId>>::minimum_balance() {
return Err("transfer would kill account");
}
<ring::Module<T>>::set_free_balance(transactor, new_from_ring);
Self::set_free_balance(transactor, new_from_kton);
Self::set_free_balance(dest, new_to_kton);
<T as ring::Trait>::TransferPayment::on_unbalanced(NegativeImbalanceRing::new(fee));
Self::deposit_event(RawEvent::Transfer(
transactor.to_owned(),
dest.to_owned(),
value.saturated_into(),
fee.saturated_into(),
));
}
Ok(())
}
fn slash(who: &T::AccountId, value: Self::Balance) -> (Self::NegativeImbalance, Self::Balance) {
let free_balance = Self::free_balance(who);
let free_slash = cmp::min(free_balance, value);
Self::set_free_balance(who, free_balance - free_slash);
let remaining_slash = value - free_slash;
// NOTE: `slash()` prefers free balance, but assumes that reserve balance can be drawn
// from in extreme circumstances. `can_slash()` should be used prior to `slash()` to avoid having
// to draw from reserved funds, however we err on the side of punishment if things are inconsistent
// or `can_slash` wasn't used appropriately.
if!remaining_slash.is_zero() {
let reserved_balance = Self::reserved_balance(who);
let reserved_slash = cmp::min(reserved_balance, remaining_slash);
Self::set_reserved_balance(who, reserved_balance - reserved_slash);
(
NegativeImbalance::new(free_slash + reserved_slash),
remaining_slash - reserved_slash,
)
} else {
(NegativeImbalance::new(value), Zero::zero())
}
}
fn deposit_into_existing(
who: &T::AccountId,
value: Self::Balance,
) -> result::Result<Self::PositiveImbalance, &'static str> {
if Self::total_balance(who).is_zero() {
return Err("beneficiary account must pre-exist");
}
Self::set_free_balance(who, Self::free_balance(who) + value);
Ok(PositiveImbalance::new(value))
}
fn deposit_creating(who: &T::AccountId, value: Self::Balance) -> Self::PositiveImbalance {
let (imbalance, _) = Self::make_free_balance_be(who, Self::free_balance(who) + value);
if let SignedImbalance::Positive(p) = imbalance {
p
} else {
// Impossible, but be defensive.
Self::PositiveImbalance::zero()
}
}
fn withdraw(
who: &T::AccountId,
value: Self::Balance,
reasons: WithdrawReasons,
_liveness: ExistenceRequirement,
) -> result::Result<Self::NegativeImbalance, &'static str> {
let old_balance = Self::free_balance(who);
if let Some(new_balance) = old_balance.checked_sub(&value) {
Self::ensure_can_withdraw(who, value, reasons, new_balance)?;
Self::set_free_balance(who, new_balance);
Ok(NegativeImbalance::new(value))
} else {
Err("too few free funds in account")
}
}
fn make_free_balance_be(
who: &T::AccountId,
balance: Self::Balance,
) -> (
SignedImbalance<Self::Balance, Self::PositiveImbalance>,
UpdateBalanceOutcome,
) {
let original = Self::free_balance(who);
let imbalance = if original <= balance {
SignedImbalance::Positive(PositiveImbalance::new(balance - original))
} else {
SignedImbalance::Negative(NegativeImbalance::new(original - balance))
};
let outcome = {
Self::set_free_balance(who, balance);
UpdateBalanceOutcome::Updated
};
(imbalance, outcome)
}
}
impl<T: Trait> LockableCurrency<T::AccountId> for Module<T>
where
T::Balance: MaybeSerializeDeserialize + Debug,
{
type Moment = T::Moment;
fn set_lock(
id: LockIdentifier,
who: &T::AccountId,
withdraw_lock: WithdrawLock<Self::Balance, Self::Moment>,
reasons: WithdrawReasons,
) {
let mut new_lock = Some(BalanceLock {
id,
withdraw_lock,
reasons,
});
let mut locks = Self::locks(who)
.into_iter()
.filter_map(|l| if l.id == id { new_lock.take() } else { Some(l) })
.collect::<Vec<_>>();
if let Some(lock) = new_lock {
locks.push(lock)
}
<Locks<T>>::insert(who, locks);
}
fn remove_lock(id: LockIdentifier, who: &T::AccountId) {
let locks = Self::locks(who)
.into_iter()
.filter_map(|l| if l.id!= id { Some(l) } else { None })
.collect::<Vec<_>>();
<Locks<T>>::insert(who, locks);
}
}
| NegativeImbalance | identifier_name |
lib.rs | #![cfg_attr(not(feature = "std"), no_std)]
#[allow(unused)]
#[cfg(all(feature = "std", test))]
mod mock;
#[cfg(all(feature = "std", test))]
mod tests;
#[cfg(not(feature = "std"))]
use rstd::borrow::ToOwned;
use rstd::{cmp, fmt::Debug, mem, prelude::*, result};
use sr_primitives::{
traits::{
Bounded, CheckedAdd, CheckedSub, MaybeSerializeDeserialize, SaturatedConversion, Saturating, StaticLookup, Zero,
},
weights::SimpleDispatchInfo,
};
use support::{
decl_event, decl_module, decl_storage,
dispatch::Result,
traits::{Currency, ExistenceRequirement, Get, Imbalance, OnUnbalanced, SignedImbalance, UpdateBalanceOutcome},
StorageMap, StorageValue,
};
use system::{ensure_root, ensure_signed};
use darwinia_support::{BalanceLock, LockIdentifier, LockableCurrency, WithdrawLock, WithdrawReason, WithdrawReasons};
use imbalances::{NegativeImbalance, PositiveImbalance};
use ring::{imbalances::NegativeImbalance as NegativeImbalanceRing, Balance, VestingSchedule};
pub trait Trait: ring::Trait {
/// The overarching event type.
type Event: From<Event<Self>> + Into<<Self as system::Trait>::Event>;
}
decl_event!(
pub enum Event<T> where
<T as system::Trait>::AccountId,
{
/// Transfer succeeded (from, to, value, fees).
Transfer(AccountId, AccountId, Balance, Balance),
}
);
decl_storage! {
trait Store for Module<T: Trait> as Kton {
/// The total units issued in the system.
pub TotalIssuance get(fn total_issuance) build(|config: &GenesisConfig<T>| {
config.balances.iter().fold(Zero::zero(), |acc: T::Balance, &(_, n)| acc + n)
}): T::Balance;
/// Information regarding the vesting of a given account.
pub Vesting get(fn vesting) build(|config: &GenesisConfig<T>| {
// Generate initial vesting configuration
// * who - Account which we are generating vesting configuration for
// * begin - Block when the account will start to vest
// * length - Number of blocks from `begin` until fully vested
// * liquid - Number of units which can be spent before vesting begins
config.vesting.iter().filter_map(|&(ref who, begin, length, liquid)| {
let length = <T::Balance as From<T::BlockNumber>>::from(length);
config.balances.iter()
.find(|&&(ref w, _)| w == who)
.map(|&(_, balance)| {
// Total genesis `balance` minus `liquid` equals funds locked for vesting
let locked = balance.saturating_sub(liquid);
// Number of units unlocked per block after `begin`
let per_block = locked / length.max(sr_primitives::traits::One::one());
(who.clone(), VestingSchedule {
locked: locked,
per_block: per_block,
starting_block: begin
})
})
}).collect::<Vec<_>>()
}): map T::AccountId => Option<VestingSchedule<T::Balance, T::BlockNumber>>;
/// The 'free' balance of a given account.
///
/// This is the only balance that matters in terms of most operations on tokens. It
/// alone is used to determine the balance when in the contract execution environment. When this
/// balance falls below the value of `ExistentialDeposit`, then the 'current account' is
/// deleted: specifically `FreeBalance`. Further, the `OnFreeBalanceZero` callback
/// is invoked, giving a chance to external modules to clean up data associated with
/// the deleted account.
///
/// `system::AccountNonce` is also deleted if `ReservedBalance` is also zero (it also gets
/// collapsed to zero if it ever becomes less than `ExistentialDeposit`.
pub FreeBalance get(fn free_balance) build(|config: &GenesisConfig<T>| config.balances.clone()):
map T::AccountId => T::Balance;
/// The amount of the balance of a given account that is externally reserved; this can still get
/// slashed, but gets slashed last of all.
///
/// This balance is a'reserve' balance that other subsystems use in order to set aside tokens
/// that are still 'owned' by the account holder, but which are suspendable.
///
/// When this balance falls below the value of `ExistentialDeposit`, then this'reserve account'
/// is deleted: specifically, `ReservedBalance`.
///
/// `system::AccountNonce` is also deleted if `FreeBalance` is also zero (it also gets
/// collapsed to zero if it ever becomes less than `ExistentialDeposit`.)
pub ReservedBalance get(fn reserved_balance): map T::AccountId => T::Balance;
pub Locks get(fn locks): map T::AccountId => Vec<BalanceLock<T::Balance, T::Moment>>;
}
add_extra_genesis {
config(balances): Vec<(T::AccountId, T::Balance)>;
config(vesting): Vec<(T::AccountId, T::BlockNumber, T::BlockNumber, T::Balance)>;
// ^^ begin, length, amount liquid at genesis
}
}
decl_module! {
pub struct Module<T: Trait> for enum Call where origin: T::Origin {
fn deposit_event() = default;
/// Transfer some liquid free balance to another account.
///
/// `transfer` will set the `FreeBalance` of the sender and receiver.
/// It will decrease the total issuance of the system by the `TransferFee`.
/// If the sender's account is below the existential deposit as a result
/// of the transfer, the account will be reaped.
///
/// The dispatch origin for this call must be `Signed` by the transactor.
///
/// # <weight>
/// - Dependent on arguments but not critical, given proper implementations for
/// input config types. See related functions below.
/// - It contains a limited number of reads and writes internally and no complex computation.
///
/// Related functions:
///
/// - `ensure_can_withdraw` is always called internally but has a bounded complexity.
/// - Transferring balances to accounts that did not exist before will cause
/// `T::OnNewAccount::on_new_account` to be called.
/// - Removing enough funds from an account will trigger
/// `T::DustRemoval::on_unbalanced` and `T::OnFreeBalanceZero::on_free_balance_zero`.
/// - `transfer_keep_alive` works the same way as `transfer`, but has an additional
/// check that the transfer will not kill the origin account.
///
/// # </weight>
#[weight = SimpleDispatchInfo::FixedNormal(1_000_000)]
pub fn transfer(
origin,
dest: <T::Lookup as StaticLookup>::Source,
#[compact] value: T::Balance
) {
let transactor = ensure_signed(origin)?;
let dest = T::Lookup::lookup(dest)?;
<Self as Currency<_>>::transfer(&transactor, &dest, value, ExistenceRequirement::AllowDeath)?;
}
/// Set the balances of a given account.
///
/// This will alter `FreeBalance` and `ReservedBalance` in storage. it will
/// also decrease the total issuance of the system (`TotalIssuance`).
/// If the new free or reserved balance is below the existential deposit,
/// it will reset the account nonce (`system::AccountNonce`).
///
/// The dispatch origin for this call is `root`.
///
/// # <weight>
/// - Independent of the arguments.
/// - Contains a limited number of reads and writes.
/// # </weight>
#[weight = SimpleDispatchInfo::FixedOperational(50_000)]
fn set_balance(
origin,
who: <T::Lookup as StaticLookup>::Source,
#[compact] new_free: T::Balance,
#[compact] new_reserved: T::Balance
) {
ensure_root(origin)?;
let who = T::Lookup::lookup(who)?;
let current_free = <FreeBalance<T>>::get(&who);
if new_free > current_free {
mem::drop(PositiveImbalance::<T>::new(new_free - current_free));
} else if new_free < current_free {
mem::drop(NegativeImbalance::<T>::new(current_free - new_free));
}
Self::set_free_balance(&who, new_free);
let current_reserved = <ReservedBalance<T>>::get(&who);
if new_reserved > current_reserved {
mem::drop(PositiveImbalance::<T>::new(new_reserved - current_reserved));
} else if new_reserved < current_reserved {
mem::drop(NegativeImbalance::<T>::new(current_reserved - new_reserved));
}
Self::set_reserved_balance(&who, new_reserved);
}
/// Exactly as `transfer`, except the origin must be root and the source account may be
/// specified.
#[weight = SimpleDispatchInfo::FixedNormal(1_000_000)]
pub fn force_transfer(
origin,
source: <T::Lookup as StaticLookup>::Source,
dest: <T::Lookup as StaticLookup>::Source,
#[compact] value: T::Balance
) {
ensure_root(origin)?;
let source = T::Lookup::lookup(source)?;
let dest = T::Lookup::lookup(dest)?;
<Self as Currency<_>>::transfer(&source, &dest, value, ExistenceRequirement::AllowDeath)?;
}
}
}
impl<T: Trait> Module<T> {
// PUBLIC IMMUTABLES
/// Get the amount that is currently being vested and cannot be transferred out of this account.
pub fn vesting_balance(who: &T::AccountId) -> T::Balance {
if let Some(v) = Self::vesting(who) {
Self::free_balance(who).min(v.locked_at(<system::Module<T>>::block_number()))
} else {
Zero::zero()
}
}
// PRIVATE MUTABLES
/// Set the reserved balance of an account to some new value. Will enforce `ExistentialDeposit`
/// law, annulling the account as needed.
///
/// Doesn't do any preparatory work for creating a new account, so should only be used when it
/// is known that the account already exists.
///
/// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that
/// the caller will do this.
fn set_reserved_balance(who: &T::AccountId, balance: T::Balance) -> UpdateBalanceOutcome {
<ReservedBalance<T>>::insert(who, balance);
UpdateBalanceOutcome::Updated
}
/// Set the free balance of an account to some new value. Will enforce `ExistentialDeposit`
/// law, annulling the account as needed.
///
/// Doesn't do any preparatory work for creating a new account, so should only be used when it
/// is known that the account already exists.
///
/// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that
/// the caller will do this.
fn set_free_balance(who: &T::AccountId, balance: T::Balance) -> UpdateBalanceOutcome {
// Commented out for now - but consider it instructive.
// assert!(!Self::total_balance(who).is_zero());
<FreeBalance<T>>::insert(who, balance);
UpdateBalanceOutcome::Updated
}
}
// wrapping these imbalances in a private module is necessary to ensure absolute privacy
// of the inner member.
mod imbalances {
use rstd::mem;
use crate::{result, Imbalance, Saturating, StorageValue, Trait, Zero};
/// Opaque, move-only struct with private fields that serves as a token denoting that
/// funds have been created without any equal and opposite accounting.
#[must_use]
pub struct PositiveImbalance<T: Trait>(T::Balance);
impl<T: Trait> PositiveImbalance<T> {
/// Create a new positive imbalance from a balance.
pub fn new(amount: T::Balance) -> Self {
PositiveImbalance(amount)
}
}
/// Opaque, move-only struct with private fields that serves as a token denoting that
/// funds have been destroyed without any equal and opposite accounting.
#[must_use]
pub struct NegativeImbalance<T: Trait>(T::Balance);
impl<T: Trait> NegativeImbalance<T> {
/// Create a new negative imbalance from a balance.
pub fn new(amount: T::Balance) -> Self {
NegativeImbalance(amount)
}
}
impl<T: Trait> Imbalance<T::Balance> for PositiveImbalance<T> {
type Opposite = NegativeImbalance<T>;
fn zero() -> Self {
Self(Zero::zero())
}
fn drop_zero(self) -> result::Result<(), Self> {
if self.0.is_zero() {
Ok(())
} else {
Err(self)
}
}
fn split(self, amount: T::Balance) -> (Self, Self) {
let first = self.0.min(amount);
let second = self.0 - first;
mem::forget(self);
(Self(first), Self(second))
}
fn merge(mut self, other: Self) -> Self {
self.0 = self.0.saturating_add(other.0);
mem::forget(other);
self
}
fn subsume(&mut self, other: Self) {
self.0 = self.0.saturating_add(other.0);
mem::forget(other);
}
fn offset(self, other: Self::Opposite) -> result::Result<Self, Self::Opposite> {
let (a, b) = (self.0, other.0);
mem::forget((self, other));
if a >= b {
Ok(Self(a - b))
} else {
Err(NegativeImbalance::new(b - a))
}
}
fn peek(&self) -> T::Balance {
self.0
}
}
impl<T: Trait> Imbalance<T::Balance> for NegativeImbalance<T> {
type Opposite = PositiveImbalance<T>;
fn zero() -> Self {
Self(Zero::zero())
}
fn drop_zero(self) -> result::Result<(), Self> {
if self.0.is_zero() {
Ok(())
} else {
Err(self)
}
}
fn split(self, amount: T::Balance) -> (Self, Self) {
let first = self.0.min(amount);
let second = self.0 - first;
mem::forget(self);
(Self(first), Self(second))
}
fn merge(mut self, other: Self) -> Self {
self.0 = self.0.saturating_add(other.0);
mem::forget(other);
self
}
fn subsume(&mut self, other: Self) {
self.0 = self.0.saturating_add(other.0);
mem::forget(other);
}
fn offset(self, other: Self::Opposite) -> result::Result<Self, Self::Opposite> {
let (a, b) = (self.0, other.0);
mem::forget((self, other));
if a >= b {
Ok(Self(a - b))
} else {
Err(PositiveImbalance::new(b - a))
}
}
fn peek(&self) -> T::Balance {
self.0
}
} |
impl<T: Trait> Drop for PositiveImbalance<T> {
/// Basic drop handler will just square up the total issuance.
fn drop(&mut self) {
<super::TotalIssuance<T>>::mutate(|v| *v = v.saturating_add(self.0));
}
}
impl<T: Trait> Drop for NegativeImbalance<T> {
/// Basic drop handler will just square up the total issuance.
fn drop(&mut self) {
<super::TotalIssuance<T>>::mutate(|v| *v = v.saturating_sub(self.0));
}
}
}
impl<T: Trait> Currency<T::AccountId> for Module<T>
where
T::Balance: MaybeSerializeDeserialize + Debug,
{
type Balance = T::Balance;
type PositiveImbalance = PositiveImbalance<T>;
type NegativeImbalance = NegativeImbalance<T>;
fn total_balance(who: &T::AccountId) -> Self::Balance {
Self::free_balance(who) + Self::reserved_balance(who)
}
fn can_slash(who: &T::AccountId, value: Self::Balance) -> bool {
Self::free_balance(who) >= value
}
fn total_issuance() -> Self::Balance {
<TotalIssuance<T>>::get()
}
fn minimum_balance() -> Self::Balance {
Zero::zero()
}
fn burn(mut amount: Self::Balance) -> Self::PositiveImbalance {
<TotalIssuance<T>>::mutate(|issued| {
*issued = issued.checked_sub(&amount).unwrap_or_else(|| {
amount = *issued;
Zero::zero()
});
});
PositiveImbalance::new(amount)
}
fn issue(mut amount: Self::Balance) -> Self::NegativeImbalance {
<TotalIssuance<T>>::mutate(|issued| {
*issued = issued.checked_add(&amount).unwrap_or_else(|| {
amount = Self::Balance::max_value() - *issued;
Self::Balance::max_value()
})
});
NegativeImbalance::new(amount)
}
fn free_balance(who: &T::AccountId) -> Self::Balance {
<FreeBalance<T>>::get(who)
}
// # <weight>
// Despite iterating over a list of locks, they are limited by the number of
// lock IDs, which means the number of runtime modules that intend to use and create locks.
// # </weight>
fn ensure_can_withdraw(
who: &T::AccountId,
_amount: T::Balance,
reasons: WithdrawReasons,
new_balance: T::Balance,
) -> Result {
if reasons.intersects(WithdrawReason::Reserve | WithdrawReason::Transfer)
&& Self::vesting_balance(who) > new_balance
{
return Err("vesting balance too high to send value");
}
let locks = Self::locks(who);
if locks.is_empty() {
return Ok(());
}
let now = <timestamp::Module<T>>::now();
if locks
.into_iter()
.all(|l| l.withdraw_lock.can_withdraw(now, new_balance) ||!l.reasons.intersects(reasons))
{
Ok(())
} else {
Err("account liquidity restrictions prevent withdrawal")
}
}
fn transfer(
transactor: &T::AccountId,
dest: &T::AccountId,
value: Self::Balance,
_existence_requirement: ExistenceRequirement,
) -> Result {
let fee = <T as ring::Trait>::TransferFee::get();
let new_from_ring = <ring::FreeBalance<T>>::get(transactor)
.checked_sub(&fee)
.ok_or("Transfer Fee - NOT ENOUGH RING")?;
<ring::Module<T>>::ensure_can_withdraw(transactor, fee, WithdrawReason::Fee.into(), new_from_ring)?;
let new_from_kton = Self::free_balance(transactor)
.checked_sub(&value)
.ok_or("balance too low to send value")?;
Self::ensure_can_withdraw(transactor, value, WithdrawReason::Transfer.into(), new_from_kton)?;
let new_to_kton = Self::free_balance(dest)
.checked_add(&value)
.ok_or("destination balance too high to receive value")?;
if transactor!= dest {
if new_from_ring < <ring::Module<T> as Currency<<T as system::Trait>::AccountId>>::minimum_balance() {
return Err("transfer would kill account");
}
<ring::Module<T>>::set_free_balance(transactor, new_from_ring);
Self::set_free_balance(transactor, new_from_kton);
Self::set_free_balance(dest, new_to_kton);
<T as ring::Trait>::TransferPayment::on_unbalanced(NegativeImbalanceRing::new(fee));
Self::deposit_event(RawEvent::Transfer(
transactor.to_owned(),
dest.to_owned(),
value.saturated_into(),
fee.saturated_into(),
));
}
Ok(())
}
fn slash(who: &T::AccountId, value: Self::Balance) -> (Self::NegativeImbalance, Self::Balance) {
let free_balance = Self::free_balance(who);
let free_slash = cmp::min(free_balance, value);
Self::set_free_balance(who, free_balance - free_slash);
let remaining_slash = value - free_slash;
// NOTE: `slash()` prefers free balance, but assumes that reserve balance can be drawn
// from in extreme circumstances. `can_slash()` should be used prior to `slash()` to avoid having
// to draw from reserved funds, however we err on the side of punishment if things are inconsistent
// or `can_slash` wasn't used appropriately.
if!remaining_slash.is_zero() {
let reserved_balance = Self::reserved_balance(who);
let reserved_slash = cmp::min(reserved_balance, remaining_slash);
Self::set_reserved_balance(who, reserved_balance - reserved_slash);
(
NegativeImbalance::new(free_slash + reserved_slash),
remaining_slash - reserved_slash,
)
} else {
(NegativeImbalance::new(value), Zero::zero())
}
}
fn deposit_into_existing(
who: &T::AccountId,
value: Self::Balance,
) -> result::Result<Self::PositiveImbalance, &'static str> {
if Self::total_balance(who).is_zero() {
return Err("beneficiary account must pre-exist");
}
Self::set_free_balance(who, Self::free_balance(who) + value);
Ok(PositiveImbalance::new(value))
}
fn deposit_creating(who: &T::AccountId, value: Self::Balance) -> Self::PositiveImbalance {
let (imbalance, _) = Self::make_free_balance_be(who, Self::free_balance(who) + value);
if let SignedImbalance::Positive(p) = imbalance {
p
} else {
// Impossible, but be defensive.
Self::PositiveImbalance::zero()
}
}
fn withdraw(
who: &T::AccountId,
value: Self::Balance,
reasons: WithdrawReasons,
_liveness: ExistenceRequirement,
) -> result::Result<Self::NegativeImbalance, &'static str> {
let old_balance = Self::free_balance(who);
if let Some(new_balance) = old_balance.checked_sub(&value) {
Self::ensure_can_withdraw(who, value, reasons, new_balance)?;
Self::set_free_balance(who, new_balance);
Ok(NegativeImbalance::new(value))
} else {
Err("too few free funds in account")
}
}
fn make_free_balance_be(
who: &T::AccountId,
balance: Self::Balance,
) -> (
SignedImbalance<Self::Balance, Self::PositiveImbalance>,
UpdateBalanceOutcome,
) {
let original = Self::free_balance(who);
let imbalance = if original <= balance {
SignedImbalance::Positive(PositiveImbalance::new(balance - original))
} else {
SignedImbalance::Negative(NegativeImbalance::new(original - balance))
};
let outcome = {
Self::set_free_balance(who, balance);
UpdateBalanceOutcome::Updated
};
(imbalance, outcome)
}
}
impl<T: Trait> LockableCurrency<T::AccountId> for Module<T>
where
T::Balance: MaybeSerializeDeserialize + Debug,
{
type Moment = T::Moment;
fn set_lock(
id: LockIdentifier,
who: &T::AccountId,
withdraw_lock: WithdrawLock<Self::Balance, Self::Moment>,
reasons: WithdrawReasons,
) {
let mut new_lock = Some(BalanceLock {
id,
withdraw_lock,
reasons,
});
let mut locks = Self::locks(who)
.into_iter()
.filter_map(|l| if l.id == id { new_lock.take() } else { Some(l) })
.collect::<Vec<_>>();
if let Some(lock) = new_lock {
locks.push(lock)
}
<Locks<T>>::insert(who, locks);
}
fn remove_lock(id: LockIdentifier, who: &T::AccountId) {
let locks = Self::locks(who)
.into_iter()
.filter_map(|l| if l.id!= id { Some(l) } else { None })
.collect::<Vec<_>>();
<Locks<T>>::insert(who, locks);
}
} | random_line_split |
|
lib.rs | #![cfg_attr(not(feature = "std"), no_std)]
#[allow(unused)]
#[cfg(all(feature = "std", test))]
mod mock;
#[cfg(all(feature = "std", test))]
mod tests;
#[cfg(not(feature = "std"))]
use rstd::borrow::ToOwned;
use rstd::{cmp, fmt::Debug, mem, prelude::*, result};
use sr_primitives::{
traits::{
Bounded, CheckedAdd, CheckedSub, MaybeSerializeDeserialize, SaturatedConversion, Saturating, StaticLookup, Zero,
},
weights::SimpleDispatchInfo,
};
use support::{
decl_event, decl_module, decl_storage,
dispatch::Result,
traits::{Currency, ExistenceRequirement, Get, Imbalance, OnUnbalanced, SignedImbalance, UpdateBalanceOutcome},
StorageMap, StorageValue,
};
use system::{ensure_root, ensure_signed};
use darwinia_support::{BalanceLock, LockIdentifier, LockableCurrency, WithdrawLock, WithdrawReason, WithdrawReasons};
use imbalances::{NegativeImbalance, PositiveImbalance};
use ring::{imbalances::NegativeImbalance as NegativeImbalanceRing, Balance, VestingSchedule};
pub trait Trait: ring::Trait {
/// The overarching event type.
type Event: From<Event<Self>> + Into<<Self as system::Trait>::Event>;
}
decl_event!(
pub enum Event<T> where
<T as system::Trait>::AccountId,
{
/// Transfer succeeded (from, to, value, fees).
Transfer(AccountId, AccountId, Balance, Balance),
}
);
decl_storage! {
trait Store for Module<T: Trait> as Kton {
/// The total units issued in the system.
pub TotalIssuance get(fn total_issuance) build(|config: &GenesisConfig<T>| {
config.balances.iter().fold(Zero::zero(), |acc: T::Balance, &(_, n)| acc + n)
}): T::Balance;
/// Information regarding the vesting of a given account.
pub Vesting get(fn vesting) build(|config: &GenesisConfig<T>| {
// Generate initial vesting configuration
// * who - Account which we are generating vesting configuration for
// * begin - Block when the account will start to vest
// * length - Number of blocks from `begin` until fully vested
// * liquid - Number of units which can be spent before vesting begins
config.vesting.iter().filter_map(|&(ref who, begin, length, liquid)| {
let length = <T::Balance as From<T::BlockNumber>>::from(length);
config.balances.iter()
.find(|&&(ref w, _)| w == who)
.map(|&(_, balance)| {
// Total genesis `balance` minus `liquid` equals funds locked for vesting
let locked = balance.saturating_sub(liquid);
// Number of units unlocked per block after `begin`
let per_block = locked / length.max(sr_primitives::traits::One::one());
(who.clone(), VestingSchedule {
locked: locked,
per_block: per_block,
starting_block: begin
})
})
}).collect::<Vec<_>>()
}): map T::AccountId => Option<VestingSchedule<T::Balance, T::BlockNumber>>;
/// The 'free' balance of a given account.
///
/// This is the only balance that matters in terms of most operations on tokens. It
/// alone is used to determine the balance when in the contract execution environment. When this
/// balance falls below the value of `ExistentialDeposit`, then the 'current account' is
/// deleted: specifically `FreeBalance`. Further, the `OnFreeBalanceZero` callback
/// is invoked, giving a chance to external modules to clean up data associated with
/// the deleted account.
///
/// `system::AccountNonce` is also deleted if `ReservedBalance` is also zero (it also gets
/// collapsed to zero if it ever becomes less than `ExistentialDeposit`.
pub FreeBalance get(fn free_balance) build(|config: &GenesisConfig<T>| config.balances.clone()):
map T::AccountId => T::Balance;
/// The amount of the balance of a given account that is externally reserved; this can still get
/// slashed, but gets slashed last of all.
///
/// This balance is a'reserve' balance that other subsystems use in order to set aside tokens
/// that are still 'owned' by the account holder, but which are suspendable.
///
/// When this balance falls below the value of `ExistentialDeposit`, then this'reserve account'
/// is deleted: specifically, `ReservedBalance`.
///
/// `system::AccountNonce` is also deleted if `FreeBalance` is also zero (it also gets
/// collapsed to zero if it ever becomes less than `ExistentialDeposit`.)
pub ReservedBalance get(fn reserved_balance): map T::AccountId => T::Balance;
pub Locks get(fn locks): map T::AccountId => Vec<BalanceLock<T::Balance, T::Moment>>;
}
add_extra_genesis {
config(balances): Vec<(T::AccountId, T::Balance)>;
config(vesting): Vec<(T::AccountId, T::BlockNumber, T::BlockNumber, T::Balance)>;
// ^^ begin, length, amount liquid at genesis
}
}
decl_module! {
pub struct Module<T: Trait> for enum Call where origin: T::Origin {
fn deposit_event() = default;
/// Transfer some liquid free balance to another account.
///
/// `transfer` will set the `FreeBalance` of the sender and receiver.
/// It will decrease the total issuance of the system by the `TransferFee`.
/// If the sender's account is below the existential deposit as a result
/// of the transfer, the account will be reaped.
///
/// The dispatch origin for this call must be `Signed` by the transactor.
///
/// # <weight>
/// - Dependent on arguments but not critical, given proper implementations for
/// input config types. See related functions below.
/// - It contains a limited number of reads and writes internally and no complex computation.
///
/// Related functions:
///
/// - `ensure_can_withdraw` is always called internally but has a bounded complexity.
/// - Transferring balances to accounts that did not exist before will cause
/// `T::OnNewAccount::on_new_account` to be called.
/// - Removing enough funds from an account will trigger
/// `T::DustRemoval::on_unbalanced` and `T::OnFreeBalanceZero::on_free_balance_zero`.
/// - `transfer_keep_alive` works the same way as `transfer`, but has an additional
/// check that the transfer will not kill the origin account.
///
/// # </weight>
#[weight = SimpleDispatchInfo::FixedNormal(1_000_000)]
pub fn transfer(
origin,
dest: <T::Lookup as StaticLookup>::Source,
#[compact] value: T::Balance
) {
let transactor = ensure_signed(origin)?;
let dest = T::Lookup::lookup(dest)?;
<Self as Currency<_>>::transfer(&transactor, &dest, value, ExistenceRequirement::AllowDeath)?;
}
/// Set the balances of a given account.
///
/// This will alter `FreeBalance` and `ReservedBalance` in storage. it will
/// also decrease the total issuance of the system (`TotalIssuance`).
/// If the new free or reserved balance is below the existential deposit,
/// it will reset the account nonce (`system::AccountNonce`).
///
/// The dispatch origin for this call is `root`.
///
/// # <weight>
/// - Independent of the arguments.
/// - Contains a limited number of reads and writes.
/// # </weight>
#[weight = SimpleDispatchInfo::FixedOperational(50_000)]
fn set_balance(
origin,
who: <T::Lookup as StaticLookup>::Source,
#[compact] new_free: T::Balance,
#[compact] new_reserved: T::Balance
) {
ensure_root(origin)?;
let who = T::Lookup::lookup(who)?;
let current_free = <FreeBalance<T>>::get(&who);
if new_free > current_free {
mem::drop(PositiveImbalance::<T>::new(new_free - current_free));
} else if new_free < current_free {
mem::drop(NegativeImbalance::<T>::new(current_free - new_free));
}
Self::set_free_balance(&who, new_free);
let current_reserved = <ReservedBalance<T>>::get(&who);
if new_reserved > current_reserved {
mem::drop(PositiveImbalance::<T>::new(new_reserved - current_reserved));
} else if new_reserved < current_reserved {
mem::drop(NegativeImbalance::<T>::new(current_reserved - new_reserved));
}
Self::set_reserved_balance(&who, new_reserved);
}
/// Exactly as `transfer`, except the origin must be root and the source account may be
/// specified.
#[weight = SimpleDispatchInfo::FixedNormal(1_000_000)]
pub fn force_transfer(
origin,
source: <T::Lookup as StaticLookup>::Source,
dest: <T::Lookup as StaticLookup>::Source,
#[compact] value: T::Balance
) {
ensure_root(origin)?;
let source = T::Lookup::lookup(source)?;
let dest = T::Lookup::lookup(dest)?;
<Self as Currency<_>>::transfer(&source, &dest, value, ExistenceRequirement::AllowDeath)?;
}
}
}
impl<T: Trait> Module<T> {
// PUBLIC IMMUTABLES
/// Get the amount that is currently being vested and cannot be transferred out of this account.
pub fn vesting_balance(who: &T::AccountId) -> T::Balance {
if let Some(v) = Self::vesting(who) {
Self::free_balance(who).min(v.locked_at(<system::Module<T>>::block_number()))
} else {
Zero::zero()
}
}
// PRIVATE MUTABLES
/// Set the reserved balance of an account to some new value. Will enforce `ExistentialDeposit`
/// law, annulling the account as needed.
///
/// Doesn't do any preparatory work for creating a new account, so should only be used when it
/// is known that the account already exists.
///
/// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that
/// the caller will do this.
fn set_reserved_balance(who: &T::AccountId, balance: T::Balance) -> UpdateBalanceOutcome {
<ReservedBalance<T>>::insert(who, balance);
UpdateBalanceOutcome::Updated
}
/// Set the free balance of an account to some new value. Will enforce `ExistentialDeposit`
/// law, annulling the account as needed.
///
/// Doesn't do any preparatory work for creating a new account, so should only be used when it
/// is known that the account already exists.
///
/// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that
/// the caller will do this.
fn set_free_balance(who: &T::AccountId, balance: T::Balance) -> UpdateBalanceOutcome {
// Commented out for now - but consider it instructive.
// assert!(!Self::total_balance(who).is_zero());
<FreeBalance<T>>::insert(who, balance);
UpdateBalanceOutcome::Updated
}
}
// wrapping these imbalances in a private module is necessary to ensure absolute privacy
// of the inner member.
mod imbalances {
use rstd::mem;
use crate::{result, Imbalance, Saturating, StorageValue, Trait, Zero};
/// Opaque, move-only struct with private fields that serves as a token denoting that
/// funds have been created without any equal and opposite accounting.
#[must_use]
pub struct PositiveImbalance<T: Trait>(T::Balance);
impl<T: Trait> PositiveImbalance<T> {
/// Create a new positive imbalance from a balance.
pub fn new(amount: T::Balance) -> Self {
PositiveImbalance(amount)
}
}
/// Opaque, move-only struct with private fields that serves as a token denoting that
/// funds have been destroyed without any equal and opposite accounting.
#[must_use]
pub struct NegativeImbalance<T: Trait>(T::Balance);
impl<T: Trait> NegativeImbalance<T> {
/// Create a new negative imbalance from a balance.
pub fn new(amount: T::Balance) -> Self {
NegativeImbalance(amount)
}
}
impl<T: Trait> Imbalance<T::Balance> for PositiveImbalance<T> {
type Opposite = NegativeImbalance<T>;
fn zero() -> Self {
Self(Zero::zero())
}
fn drop_zero(self) -> result::Result<(), Self> {
if self.0.is_zero() {
Ok(())
} else {
Err(self)
}
}
fn split(self, amount: T::Balance) -> (Self, Self) {
let first = self.0.min(amount);
let second = self.0 - first;
mem::forget(self);
(Self(first), Self(second))
}
fn merge(mut self, other: Self) -> Self {
self.0 = self.0.saturating_add(other.0);
mem::forget(other);
self
}
fn subsume(&mut self, other: Self) {
self.0 = self.0.saturating_add(other.0);
mem::forget(other);
}
fn offset(self, other: Self::Opposite) -> result::Result<Self, Self::Opposite> {
let (a, b) = (self.0, other.0);
mem::forget((self, other));
if a >= b {
Ok(Self(a - b))
} else {
Err(NegativeImbalance::new(b - a))
}
}
fn peek(&self) -> T::Balance {
self.0
}
}
impl<T: Trait> Imbalance<T::Balance> for NegativeImbalance<T> {
type Opposite = PositiveImbalance<T>;
fn zero() -> Self {
Self(Zero::zero())
}
fn drop_zero(self) -> result::Result<(), Self> {
if self.0.is_zero() {
Ok(())
} else {
Err(self)
}
}
fn split(self, amount: T::Balance) -> (Self, Self) {
let first = self.0.min(amount);
let second = self.0 - first;
mem::forget(self);
(Self(first), Self(second))
}
fn merge(mut self, other: Self) -> Self {
self.0 = self.0.saturating_add(other.0);
mem::forget(other);
self
}
fn subsume(&mut self, other: Self) {
self.0 = self.0.saturating_add(other.0);
mem::forget(other);
}
fn offset(self, other: Self::Opposite) -> result::Result<Self, Self::Opposite> {
let (a, b) = (self.0, other.0);
mem::forget((self, other));
if a >= b {
Ok(Self(a - b))
} else {
Err(PositiveImbalance::new(b - a))
}
}
fn peek(&self) -> T::Balance {
self.0
}
}
impl<T: Trait> Drop for PositiveImbalance<T> {
/// Basic drop handler will just square up the total issuance.
fn drop(&mut self) {
<super::TotalIssuance<T>>::mutate(|v| *v = v.saturating_add(self.0));
}
}
impl<T: Trait> Drop for NegativeImbalance<T> {
/// Basic drop handler will just square up the total issuance.
fn drop(&mut self) {
<super::TotalIssuance<T>>::mutate(|v| *v = v.saturating_sub(self.0));
}
}
}
impl<T: Trait> Currency<T::AccountId> for Module<T>
where
T::Balance: MaybeSerializeDeserialize + Debug,
{
type Balance = T::Balance;
type PositiveImbalance = PositiveImbalance<T>;
type NegativeImbalance = NegativeImbalance<T>;
fn total_balance(who: &T::AccountId) -> Self::Balance {
Self::free_balance(who) + Self::reserved_balance(who)
}
fn can_slash(who: &T::AccountId, value: Self::Balance) -> bool {
Self::free_balance(who) >= value
}
fn total_issuance() -> Self::Balance {
<TotalIssuance<T>>::get()
}
fn minimum_balance() -> Self::Balance {
Zero::zero()
}
fn burn(mut amount: Self::Balance) -> Self::PositiveImbalance {
<TotalIssuance<T>>::mutate(|issued| {
*issued = issued.checked_sub(&amount).unwrap_or_else(|| {
amount = *issued;
Zero::zero()
});
});
PositiveImbalance::new(amount)
}
fn issue(mut amount: Self::Balance) -> Self::NegativeImbalance {
<TotalIssuance<T>>::mutate(|issued| {
*issued = issued.checked_add(&amount).unwrap_or_else(|| {
amount = Self::Balance::max_value() - *issued;
Self::Balance::max_value()
})
});
NegativeImbalance::new(amount)
}
fn free_balance(who: &T::AccountId) -> Self::Balance |
// # <weight>
// Despite iterating over a list of locks, they are limited by the number of
// lock IDs, which means the number of runtime modules that intend to use and create locks.
// # </weight>
fn ensure_can_withdraw(
who: &T::AccountId,
_amount: T::Balance,
reasons: WithdrawReasons,
new_balance: T::Balance,
) -> Result {
if reasons.intersects(WithdrawReason::Reserve | WithdrawReason::Transfer)
&& Self::vesting_balance(who) > new_balance
{
return Err("vesting balance too high to send value");
}
let locks = Self::locks(who);
if locks.is_empty() {
return Ok(());
}
let now = <timestamp::Module<T>>::now();
if locks
.into_iter()
.all(|l| l.withdraw_lock.can_withdraw(now, new_balance) ||!l.reasons.intersects(reasons))
{
Ok(())
} else {
Err("account liquidity restrictions prevent withdrawal")
}
}
fn transfer(
transactor: &T::AccountId,
dest: &T::AccountId,
value: Self::Balance,
_existence_requirement: ExistenceRequirement,
) -> Result {
let fee = <T as ring::Trait>::TransferFee::get();
let new_from_ring = <ring::FreeBalance<T>>::get(transactor)
.checked_sub(&fee)
.ok_or("Transfer Fee - NOT ENOUGH RING")?;
<ring::Module<T>>::ensure_can_withdraw(transactor, fee, WithdrawReason::Fee.into(), new_from_ring)?;
let new_from_kton = Self::free_balance(transactor)
.checked_sub(&value)
.ok_or("balance too low to send value")?;
Self::ensure_can_withdraw(transactor, value, WithdrawReason::Transfer.into(), new_from_kton)?;
let new_to_kton = Self::free_balance(dest)
.checked_add(&value)
.ok_or("destination balance too high to receive value")?;
if transactor!= dest {
if new_from_ring < <ring::Module<T> as Currency<<T as system::Trait>::AccountId>>::minimum_balance() {
return Err("transfer would kill account");
}
<ring::Module<T>>::set_free_balance(transactor, new_from_ring);
Self::set_free_balance(transactor, new_from_kton);
Self::set_free_balance(dest, new_to_kton);
<T as ring::Trait>::TransferPayment::on_unbalanced(NegativeImbalanceRing::new(fee));
Self::deposit_event(RawEvent::Transfer(
transactor.to_owned(),
dest.to_owned(),
value.saturated_into(),
fee.saturated_into(),
));
}
Ok(())
}
fn slash(who: &T::AccountId, value: Self::Balance) -> (Self::NegativeImbalance, Self::Balance) {
let free_balance = Self::free_balance(who);
let free_slash = cmp::min(free_balance, value);
Self::set_free_balance(who, free_balance - free_slash);
let remaining_slash = value - free_slash;
// NOTE: `slash()` prefers free balance, but assumes that reserve balance can be drawn
// from in extreme circumstances. `can_slash()` should be used prior to `slash()` to avoid having
// to draw from reserved funds, however we err on the side of punishment if things are inconsistent
// or `can_slash` wasn't used appropriately.
if!remaining_slash.is_zero() {
let reserved_balance = Self::reserved_balance(who);
let reserved_slash = cmp::min(reserved_balance, remaining_slash);
Self::set_reserved_balance(who, reserved_balance - reserved_slash);
(
NegativeImbalance::new(free_slash + reserved_slash),
remaining_slash - reserved_slash,
)
} else {
(NegativeImbalance::new(value), Zero::zero())
}
}
fn deposit_into_existing(
who: &T::AccountId,
value: Self::Balance,
) -> result::Result<Self::PositiveImbalance, &'static str> {
if Self::total_balance(who).is_zero() {
return Err("beneficiary account must pre-exist");
}
Self::set_free_balance(who, Self::free_balance(who) + value);
Ok(PositiveImbalance::new(value))
}
fn deposit_creating(who: &T::AccountId, value: Self::Balance) -> Self::PositiveImbalance {
let (imbalance, _) = Self::make_free_balance_be(who, Self::free_balance(who) + value);
if let SignedImbalance::Positive(p) = imbalance {
p
} else {
// Impossible, but be defensive.
Self::PositiveImbalance::zero()
}
}
fn withdraw(
who: &T::AccountId,
value: Self::Balance,
reasons: WithdrawReasons,
_liveness: ExistenceRequirement,
) -> result::Result<Self::NegativeImbalance, &'static str> {
let old_balance = Self::free_balance(who);
if let Some(new_balance) = old_balance.checked_sub(&value) {
Self::ensure_can_withdraw(who, value, reasons, new_balance)?;
Self::set_free_balance(who, new_balance);
Ok(NegativeImbalance::new(value))
} else {
Err("too few free funds in account")
}
}
fn make_free_balance_be(
who: &T::AccountId,
balance: Self::Balance,
) -> (
SignedImbalance<Self::Balance, Self::PositiveImbalance>,
UpdateBalanceOutcome,
) {
let original = Self::free_balance(who);
let imbalance = if original <= balance {
SignedImbalance::Positive(PositiveImbalance::new(balance - original))
} else {
SignedImbalance::Negative(NegativeImbalance::new(original - balance))
};
let outcome = {
Self::set_free_balance(who, balance);
UpdateBalanceOutcome::Updated
};
(imbalance, outcome)
}
}
impl<T: Trait> LockableCurrency<T::AccountId> for Module<T>
where
T::Balance: MaybeSerializeDeserialize + Debug,
{
type Moment = T::Moment;
fn set_lock(
id: LockIdentifier,
who: &T::AccountId,
withdraw_lock: WithdrawLock<Self::Balance, Self::Moment>,
reasons: WithdrawReasons,
) {
let mut new_lock = Some(BalanceLock {
id,
withdraw_lock,
reasons,
});
let mut locks = Self::locks(who)
.into_iter()
.filter_map(|l| if l.id == id { new_lock.take() } else { Some(l) })
.collect::<Vec<_>>();
if let Some(lock) = new_lock {
locks.push(lock)
}
<Locks<T>>::insert(who, locks);
}
fn remove_lock(id: LockIdentifier, who: &T::AccountId) {
let locks = Self::locks(who)
.into_iter()
.filter_map(|l| if l.id!= id { Some(l) } else { None })
.collect::<Vec<_>>();
<Locks<T>>::insert(who, locks);
}
}
| {
<FreeBalance<T>>::get(who)
} | identifier_body |
lib.rs | #![cfg_attr(not(feature = "std"), no_std)]
#[allow(unused)]
#[cfg(all(feature = "std", test))]
mod mock;
#[cfg(all(feature = "std", test))]
mod tests;
#[cfg(not(feature = "std"))]
use rstd::borrow::ToOwned;
use rstd::{cmp, fmt::Debug, mem, prelude::*, result};
use sr_primitives::{
traits::{
Bounded, CheckedAdd, CheckedSub, MaybeSerializeDeserialize, SaturatedConversion, Saturating, StaticLookup, Zero,
},
weights::SimpleDispatchInfo,
};
use support::{
decl_event, decl_module, decl_storage,
dispatch::Result,
traits::{Currency, ExistenceRequirement, Get, Imbalance, OnUnbalanced, SignedImbalance, UpdateBalanceOutcome},
StorageMap, StorageValue,
};
use system::{ensure_root, ensure_signed};
use darwinia_support::{BalanceLock, LockIdentifier, LockableCurrency, WithdrawLock, WithdrawReason, WithdrawReasons};
use imbalances::{NegativeImbalance, PositiveImbalance};
use ring::{imbalances::NegativeImbalance as NegativeImbalanceRing, Balance, VestingSchedule};
pub trait Trait: ring::Trait {
/// The overarching event type.
type Event: From<Event<Self>> + Into<<Self as system::Trait>::Event>;
}
decl_event!(
pub enum Event<T> where
<T as system::Trait>::AccountId,
{
/// Transfer succeeded (from, to, value, fees).
Transfer(AccountId, AccountId, Balance, Balance),
}
);
decl_storage! {
trait Store for Module<T: Trait> as Kton {
/// The total units issued in the system.
pub TotalIssuance get(fn total_issuance) build(|config: &GenesisConfig<T>| {
config.balances.iter().fold(Zero::zero(), |acc: T::Balance, &(_, n)| acc + n)
}): T::Balance;
/// Information regarding the vesting of a given account.
pub Vesting get(fn vesting) build(|config: &GenesisConfig<T>| {
// Generate initial vesting configuration
// * who - Account which we are generating vesting configuration for
// * begin - Block when the account will start to vest
// * length - Number of blocks from `begin` until fully vested
// * liquid - Number of units which can be spent before vesting begins
config.vesting.iter().filter_map(|&(ref who, begin, length, liquid)| {
let length = <T::Balance as From<T::BlockNumber>>::from(length);
config.balances.iter()
.find(|&&(ref w, _)| w == who)
.map(|&(_, balance)| {
// Total genesis `balance` minus `liquid` equals funds locked for vesting
let locked = balance.saturating_sub(liquid);
// Number of units unlocked per block after `begin`
let per_block = locked / length.max(sr_primitives::traits::One::one());
(who.clone(), VestingSchedule {
locked: locked,
per_block: per_block,
starting_block: begin
})
})
}).collect::<Vec<_>>()
}): map T::AccountId => Option<VestingSchedule<T::Balance, T::BlockNumber>>;
/// The 'free' balance of a given account.
///
/// This is the only balance that matters in terms of most operations on tokens. It
/// alone is used to determine the balance when in the contract execution environment. When this
/// balance falls below the value of `ExistentialDeposit`, then the 'current account' is
/// deleted: specifically `FreeBalance`. Further, the `OnFreeBalanceZero` callback
/// is invoked, giving a chance to external modules to clean up data associated with
/// the deleted account.
///
/// `system::AccountNonce` is also deleted if `ReservedBalance` is also zero (it also gets
/// collapsed to zero if it ever becomes less than `ExistentialDeposit`.
pub FreeBalance get(fn free_balance) build(|config: &GenesisConfig<T>| config.balances.clone()):
map T::AccountId => T::Balance;
/// The amount of the balance of a given account that is externally reserved; this can still get
/// slashed, but gets slashed last of all.
///
/// This balance is a'reserve' balance that other subsystems use in order to set aside tokens
/// that are still 'owned' by the account holder, but which are suspendable.
///
/// When this balance falls below the value of `ExistentialDeposit`, then this'reserve account'
/// is deleted: specifically, `ReservedBalance`.
///
/// `system::AccountNonce` is also deleted if `FreeBalance` is also zero (it also gets
/// collapsed to zero if it ever becomes less than `ExistentialDeposit`.)
pub ReservedBalance get(fn reserved_balance): map T::AccountId => T::Balance;
pub Locks get(fn locks): map T::AccountId => Vec<BalanceLock<T::Balance, T::Moment>>;
}
add_extra_genesis {
config(balances): Vec<(T::AccountId, T::Balance)>;
config(vesting): Vec<(T::AccountId, T::BlockNumber, T::BlockNumber, T::Balance)>;
// ^^ begin, length, amount liquid at genesis
}
}
decl_module! {
pub struct Module<T: Trait> for enum Call where origin: T::Origin {
fn deposit_event() = default;
/// Transfer some liquid free balance to another account.
///
/// `transfer` will set the `FreeBalance` of the sender and receiver.
/// It will decrease the total issuance of the system by the `TransferFee`.
/// If the sender's account is below the existential deposit as a result
/// of the transfer, the account will be reaped.
///
/// The dispatch origin for this call must be `Signed` by the transactor.
///
/// # <weight>
/// - Dependent on arguments but not critical, given proper implementations for
/// input config types. See related functions below.
/// - It contains a limited number of reads and writes internally and no complex computation.
///
/// Related functions:
///
/// - `ensure_can_withdraw` is always called internally but has a bounded complexity.
/// - Transferring balances to accounts that did not exist before will cause
/// `T::OnNewAccount::on_new_account` to be called.
/// - Removing enough funds from an account will trigger
/// `T::DustRemoval::on_unbalanced` and `T::OnFreeBalanceZero::on_free_balance_zero`.
/// - `transfer_keep_alive` works the same way as `transfer`, but has an additional
/// check that the transfer will not kill the origin account.
///
/// # </weight>
#[weight = SimpleDispatchInfo::FixedNormal(1_000_000)]
pub fn transfer(
origin,
dest: <T::Lookup as StaticLookup>::Source,
#[compact] value: T::Balance
) {
let transactor = ensure_signed(origin)?;
let dest = T::Lookup::lookup(dest)?;
<Self as Currency<_>>::transfer(&transactor, &dest, value, ExistenceRequirement::AllowDeath)?;
}
/// Set the balances of a given account.
///
/// This will alter `FreeBalance` and `ReservedBalance` in storage. it will
/// also decrease the total issuance of the system (`TotalIssuance`).
/// If the new free or reserved balance is below the existential deposit,
/// it will reset the account nonce (`system::AccountNonce`).
///
/// The dispatch origin for this call is `root`.
///
/// # <weight>
/// - Independent of the arguments.
/// - Contains a limited number of reads and writes.
/// # </weight>
#[weight = SimpleDispatchInfo::FixedOperational(50_000)]
fn set_balance(
origin,
who: <T::Lookup as StaticLookup>::Source,
#[compact] new_free: T::Balance,
#[compact] new_reserved: T::Balance
) {
ensure_root(origin)?;
let who = T::Lookup::lookup(who)?;
let current_free = <FreeBalance<T>>::get(&who);
if new_free > current_free {
mem::drop(PositiveImbalance::<T>::new(new_free - current_free));
} else if new_free < current_free {
mem::drop(NegativeImbalance::<T>::new(current_free - new_free));
}
Self::set_free_balance(&who, new_free);
let current_reserved = <ReservedBalance<T>>::get(&who);
if new_reserved > current_reserved {
mem::drop(PositiveImbalance::<T>::new(new_reserved - current_reserved));
} else if new_reserved < current_reserved {
mem::drop(NegativeImbalance::<T>::new(current_reserved - new_reserved));
}
Self::set_reserved_balance(&who, new_reserved);
}
/// Exactly as `transfer`, except the origin must be root and the source account may be
/// specified.
#[weight = SimpleDispatchInfo::FixedNormal(1_000_000)]
pub fn force_transfer(
origin,
source: <T::Lookup as StaticLookup>::Source,
dest: <T::Lookup as StaticLookup>::Source,
#[compact] value: T::Balance
) {
ensure_root(origin)?;
let source = T::Lookup::lookup(source)?;
let dest = T::Lookup::lookup(dest)?;
<Self as Currency<_>>::transfer(&source, &dest, value, ExistenceRequirement::AllowDeath)?;
}
}
}
impl<T: Trait> Module<T> {
// PUBLIC IMMUTABLES
/// Get the amount that is currently being vested and cannot be transferred out of this account.
pub fn vesting_balance(who: &T::AccountId) -> T::Balance {
if let Some(v) = Self::vesting(who) {
Self::free_balance(who).min(v.locked_at(<system::Module<T>>::block_number()))
} else {
Zero::zero()
}
}
// PRIVATE MUTABLES
/// Set the reserved balance of an account to some new value. Will enforce `ExistentialDeposit`
/// law, annulling the account as needed.
///
/// Doesn't do any preparatory work for creating a new account, so should only be used when it
/// is known that the account already exists.
///
/// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that
/// the caller will do this.
fn set_reserved_balance(who: &T::AccountId, balance: T::Balance) -> UpdateBalanceOutcome {
<ReservedBalance<T>>::insert(who, balance);
UpdateBalanceOutcome::Updated
}
/// Set the free balance of an account to some new value. Will enforce `ExistentialDeposit`
/// law, annulling the account as needed.
///
/// Doesn't do any preparatory work for creating a new account, so should only be used when it
/// is known that the account already exists.
///
/// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that
/// the caller will do this.
fn set_free_balance(who: &T::AccountId, balance: T::Balance) -> UpdateBalanceOutcome {
// Commented out for now - but consider it instructive.
// assert!(!Self::total_balance(who).is_zero());
<FreeBalance<T>>::insert(who, balance);
UpdateBalanceOutcome::Updated
}
}
// wrapping these imbalances in a private module is necessary to ensure absolute privacy
// of the inner member.
mod imbalances {
use rstd::mem;
use crate::{result, Imbalance, Saturating, StorageValue, Trait, Zero};
/// Opaque, move-only struct with private fields that serves as a token denoting that
/// funds have been created without any equal and opposite accounting.
#[must_use]
pub struct PositiveImbalance<T: Trait>(T::Balance);
impl<T: Trait> PositiveImbalance<T> {
/// Create a new positive imbalance from a balance.
pub fn new(amount: T::Balance) -> Self {
PositiveImbalance(amount)
}
}
/// Opaque, move-only struct with private fields that serves as a token denoting that
/// funds have been destroyed without any equal and opposite accounting.
#[must_use]
pub struct NegativeImbalance<T: Trait>(T::Balance);
impl<T: Trait> NegativeImbalance<T> {
/// Create a new negative imbalance from a balance.
pub fn new(amount: T::Balance) -> Self {
NegativeImbalance(amount)
}
}
impl<T: Trait> Imbalance<T::Balance> for PositiveImbalance<T> {
type Opposite = NegativeImbalance<T>;
fn zero() -> Self {
Self(Zero::zero())
}
fn drop_zero(self) -> result::Result<(), Self> {
if self.0.is_zero() {
Ok(())
} else |
}
fn split(self, amount: T::Balance) -> (Self, Self) {
let first = self.0.min(amount);
let second = self.0 - first;
mem::forget(self);
(Self(first), Self(second))
}
fn merge(mut self, other: Self) -> Self {
self.0 = self.0.saturating_add(other.0);
mem::forget(other);
self
}
fn subsume(&mut self, other: Self) {
self.0 = self.0.saturating_add(other.0);
mem::forget(other);
}
fn offset(self, other: Self::Opposite) -> result::Result<Self, Self::Opposite> {
let (a, b) = (self.0, other.0);
mem::forget((self, other));
if a >= b {
Ok(Self(a - b))
} else {
Err(NegativeImbalance::new(b - a))
}
}
fn peek(&self) -> T::Balance {
self.0
}
}
impl<T: Trait> Imbalance<T::Balance> for NegativeImbalance<T> {
type Opposite = PositiveImbalance<T>;
fn zero() -> Self {
Self(Zero::zero())
}
fn drop_zero(self) -> result::Result<(), Self> {
if self.0.is_zero() {
Ok(())
} else {
Err(self)
}
}
fn split(self, amount: T::Balance) -> (Self, Self) {
let first = self.0.min(amount);
let second = self.0 - first;
mem::forget(self);
(Self(first), Self(second))
}
fn merge(mut self, other: Self) -> Self {
self.0 = self.0.saturating_add(other.0);
mem::forget(other);
self
}
fn subsume(&mut self, other: Self) {
self.0 = self.0.saturating_add(other.0);
mem::forget(other);
}
fn offset(self, other: Self::Opposite) -> result::Result<Self, Self::Opposite> {
let (a, b) = (self.0, other.0);
mem::forget((self, other));
if a >= b {
Ok(Self(a - b))
} else {
Err(PositiveImbalance::new(b - a))
}
}
fn peek(&self) -> T::Balance {
self.0
}
}
impl<T: Trait> Drop for PositiveImbalance<T> {
/// Basic drop handler will just square up the total issuance.
fn drop(&mut self) {
<super::TotalIssuance<T>>::mutate(|v| *v = v.saturating_add(self.0));
}
}
impl<T: Trait> Drop for NegativeImbalance<T> {
/// Basic drop handler will just square up the total issuance.
fn drop(&mut self) {
<super::TotalIssuance<T>>::mutate(|v| *v = v.saturating_sub(self.0));
}
}
}
impl<T: Trait> Currency<T::AccountId> for Module<T>
where
T::Balance: MaybeSerializeDeserialize + Debug,
{
type Balance = T::Balance;
type PositiveImbalance = PositiveImbalance<T>;
type NegativeImbalance = NegativeImbalance<T>;
fn total_balance(who: &T::AccountId) -> Self::Balance {
Self::free_balance(who) + Self::reserved_balance(who)
}
fn can_slash(who: &T::AccountId, value: Self::Balance) -> bool {
Self::free_balance(who) >= value
}
fn total_issuance() -> Self::Balance {
<TotalIssuance<T>>::get()
}
fn minimum_balance() -> Self::Balance {
Zero::zero()
}
fn burn(mut amount: Self::Balance) -> Self::PositiveImbalance {
<TotalIssuance<T>>::mutate(|issued| {
*issued = issued.checked_sub(&amount).unwrap_or_else(|| {
amount = *issued;
Zero::zero()
});
});
PositiveImbalance::new(amount)
}
fn issue(mut amount: Self::Balance) -> Self::NegativeImbalance {
<TotalIssuance<T>>::mutate(|issued| {
*issued = issued.checked_add(&amount).unwrap_or_else(|| {
amount = Self::Balance::max_value() - *issued;
Self::Balance::max_value()
})
});
NegativeImbalance::new(amount)
}
fn free_balance(who: &T::AccountId) -> Self::Balance {
<FreeBalance<T>>::get(who)
}
// # <weight>
// Despite iterating over a list of locks, they are limited by the number of
// lock IDs, which means the number of runtime modules that intend to use and create locks.
// # </weight>
fn ensure_can_withdraw(
who: &T::AccountId,
_amount: T::Balance,
reasons: WithdrawReasons,
new_balance: T::Balance,
) -> Result {
if reasons.intersects(WithdrawReason::Reserve | WithdrawReason::Transfer)
&& Self::vesting_balance(who) > new_balance
{
return Err("vesting balance too high to send value");
}
let locks = Self::locks(who);
if locks.is_empty() {
return Ok(());
}
let now = <timestamp::Module<T>>::now();
if locks
.into_iter()
.all(|l| l.withdraw_lock.can_withdraw(now, new_balance) ||!l.reasons.intersects(reasons))
{
Ok(())
} else {
Err("account liquidity restrictions prevent withdrawal")
}
}
fn transfer(
transactor: &T::AccountId,
dest: &T::AccountId,
value: Self::Balance,
_existence_requirement: ExistenceRequirement,
) -> Result {
let fee = <T as ring::Trait>::TransferFee::get();
let new_from_ring = <ring::FreeBalance<T>>::get(transactor)
.checked_sub(&fee)
.ok_or("Transfer Fee - NOT ENOUGH RING")?;
<ring::Module<T>>::ensure_can_withdraw(transactor, fee, WithdrawReason::Fee.into(), new_from_ring)?;
let new_from_kton = Self::free_balance(transactor)
.checked_sub(&value)
.ok_or("balance too low to send value")?;
Self::ensure_can_withdraw(transactor, value, WithdrawReason::Transfer.into(), new_from_kton)?;
let new_to_kton = Self::free_balance(dest)
.checked_add(&value)
.ok_or("destination balance too high to receive value")?;
if transactor!= dest {
if new_from_ring < <ring::Module<T> as Currency<<T as system::Trait>::AccountId>>::minimum_balance() {
return Err("transfer would kill account");
}
<ring::Module<T>>::set_free_balance(transactor, new_from_ring);
Self::set_free_balance(transactor, new_from_kton);
Self::set_free_balance(dest, new_to_kton);
<T as ring::Trait>::TransferPayment::on_unbalanced(NegativeImbalanceRing::new(fee));
Self::deposit_event(RawEvent::Transfer(
transactor.to_owned(),
dest.to_owned(),
value.saturated_into(),
fee.saturated_into(),
));
}
Ok(())
}
fn slash(who: &T::AccountId, value: Self::Balance) -> (Self::NegativeImbalance, Self::Balance) {
let free_balance = Self::free_balance(who);
let free_slash = cmp::min(free_balance, value);
Self::set_free_balance(who, free_balance - free_slash);
let remaining_slash = value - free_slash;
// NOTE: `slash()` prefers free balance, but assumes that reserve balance can be drawn
// from in extreme circumstances. `can_slash()` should be used prior to `slash()` to avoid having
// to draw from reserved funds, however we err on the side of punishment if things are inconsistent
// or `can_slash` wasn't used appropriately.
if!remaining_slash.is_zero() {
let reserved_balance = Self::reserved_balance(who);
let reserved_slash = cmp::min(reserved_balance, remaining_slash);
Self::set_reserved_balance(who, reserved_balance - reserved_slash);
(
NegativeImbalance::new(free_slash + reserved_slash),
remaining_slash - reserved_slash,
)
} else {
(NegativeImbalance::new(value), Zero::zero())
}
}
fn deposit_into_existing(
who: &T::AccountId,
value: Self::Balance,
) -> result::Result<Self::PositiveImbalance, &'static str> {
if Self::total_balance(who).is_zero() {
return Err("beneficiary account must pre-exist");
}
Self::set_free_balance(who, Self::free_balance(who) + value);
Ok(PositiveImbalance::new(value))
}
fn deposit_creating(who: &T::AccountId, value: Self::Balance) -> Self::PositiveImbalance {
let (imbalance, _) = Self::make_free_balance_be(who, Self::free_balance(who) + value);
if let SignedImbalance::Positive(p) = imbalance {
p
} else {
// Impossible, but be defensive.
Self::PositiveImbalance::zero()
}
}
fn withdraw(
who: &T::AccountId,
value: Self::Balance,
reasons: WithdrawReasons,
_liveness: ExistenceRequirement,
) -> result::Result<Self::NegativeImbalance, &'static str> {
let old_balance = Self::free_balance(who);
if let Some(new_balance) = old_balance.checked_sub(&value) {
Self::ensure_can_withdraw(who, value, reasons, new_balance)?;
Self::set_free_balance(who, new_balance);
Ok(NegativeImbalance::new(value))
} else {
Err("too few free funds in account")
}
}
fn make_free_balance_be(
who: &T::AccountId,
balance: Self::Balance,
) -> (
SignedImbalance<Self::Balance, Self::PositiveImbalance>,
UpdateBalanceOutcome,
) {
let original = Self::free_balance(who);
let imbalance = if original <= balance {
SignedImbalance::Positive(PositiveImbalance::new(balance - original))
} else {
SignedImbalance::Negative(NegativeImbalance::new(original - balance))
};
let outcome = {
Self::set_free_balance(who, balance);
UpdateBalanceOutcome::Updated
};
(imbalance, outcome)
}
}
impl<T: Trait> LockableCurrency<T::AccountId> for Module<T>
where
T::Balance: MaybeSerializeDeserialize + Debug,
{
type Moment = T::Moment;
fn set_lock(
id: LockIdentifier,
who: &T::AccountId,
withdraw_lock: WithdrawLock<Self::Balance, Self::Moment>,
reasons: WithdrawReasons,
) {
let mut new_lock = Some(BalanceLock {
id,
withdraw_lock,
reasons,
});
let mut locks = Self::locks(who)
.into_iter()
.filter_map(|l| if l.id == id { new_lock.take() } else { Some(l) })
.collect::<Vec<_>>();
if let Some(lock) = new_lock {
locks.push(lock)
}
<Locks<T>>::insert(who, locks);
}
fn remove_lock(id: LockIdentifier, who: &T::AccountId) {
let locks = Self::locks(who)
.into_iter()
.filter_map(|l| if l.id!= id { Some(l) } else { None })
.collect::<Vec<_>>();
<Locks<T>>::insert(who, locks);
}
}
| {
Err(self)
} | conditional_block |
mod.rs | use crate::closure;
use wasm_bindgen::prelude::*;
/// Since the `copy` option can be either a function or a boolean, this enum
/// encapsulates the possible values for the copy option.
///
/// The closure signature is `(el, handle)`, the element to check and the
/// element that was directly clicked on.
pub enum CopyValue {
Bool(bool),
Func(Box<dyn FnMut(JsValue, JsValue) -> bool>),
}
impl From<CopyValue> for JsValue {
fn from(copy: CopyValue) -> JsValue {
match copy {
CopyValue::Bool(copy) => JsValue::from(copy),
CopyValue::Func(copy) => closure::to_js_2_ret(copy),
}
}
}
/// The axis to be considered when determining the location an element will be
/// placed when dropped.
///
/// When an element is dropped onto a container, it will be placed near the
/// point where the mouse was released. If the `direction` is `Vertical`,
/// the default value, the Y axis will be considered. Otherwise, if the
/// `direction` is `Horizontal`, the X axis will be considered.
pub enum Direction {
Vertical,
Horizontal,
}
impl ToString for Direction {
fn to_string(&self) -> String {
const VERTICAL: &str = "vertical";
const HORIZONTAL: &str = "horizontal";
match self {
Direction::Vertical => String::from(VERTICAL),
Direction::Horizontal => String::from(HORIZONTAL),
}
}
}
/// Used to pass options when activating Dragula
///
/// When passed to the [`dragula_options`](crate::dragula_options) function,
/// this struct can be used to specify options to control the behaviour of the
/// drag-and-drop functionality.
///
/// For example:
/// ```no_run
/// use dragula::*;
/// use dragula::options::CopyValue;
/// use web_sys::Element;
/// # use wasm_bindgen::JsValue;
///
/// # let element = JsValue::TRUE;
/// //--snip--
///
/// let options = Options {
/// invalid: Box::new(|el, _handle| {
/// Element::from(el).tag_name() == String::from("A")
/// }),
/// copy: CopyValue::Bool(true),
/// copy_sort_source: true,
/// remove_on_spill: true,
/// slide_factor_x: 10,
/// slide_factor_y: 10,
/// ..Options::default()
/// };
///
/// let drake = dragula_options(&[element], options);
///
/// //--snip--
/// ```
pub struct Options {
/// Besides the containers that you pass to [`dragula`](crate::dragula()),
/// or the containers you dynamically add, you can also use this closure to
/// specify any sort of logic that defines what is a container
/// for this particular [`Drake`](crate::Drake) instance.
///
/// This closure will be invoked with the element that is being checked for
/// whether it is a container.
pub is_container: Box<dyn FnMut(JsValue) -> bool>,
/// You can define a `moves` closure which will be invoked with `(el, source,
/// handle, sibling)` whenever an element is clicked. If this closure returns
/// `false`, a drag event won't begin, and the event won't be prevented
/// either. The `handle` element will be the original click target, which
/// comes in handy to test if that element is an expected _"drag handle"_.
pub moves: Box<dyn FnMut(JsValue, JsValue, JsValue, JsValue) -> bool>,
/// You can set `accepts` to a closure with the following signature: `(el,
/// target, source, sibling)`. It'll be called to make sure that an element
/// `el`, that came from container `source`, can be dropped on container
/// `target` before a `sibling` element. The `sibling` can be `null`, which
/// would mean that the element would be placed as the last element in the
/// container. Note that if [`copy`](Options::copy) is set to `true`, `el` will be
/// set to the copy, instead of the originally dragged element.
pub accepts: Box<dyn FnMut(JsValue, JsValue, JsValue, JsValue) -> bool>,
/// You can provide an `invalid` closure with a `(el, handle)` signature.
/// This closure should return `true` for elements that shouldn't trigger a
/// drag. The `handle` argument is the element that was clicked, while `el`
/// is the item that would be dragged.
pub invalid: Box<dyn FnMut(JsValue, JsValue) -> bool>,
/// If `copy` is set to `true` _(or a closure that returns `true`)_, items
/// will be copied rather than moved. This implies the following differences:
///
/// Event | Move | Copy
/// ----------|------------------------------------------|---------------------------------------------
/// `drag` | Element will be concealed from `source` | Nothing happens
/// `drop` | Element will be moved into `target` | Element will be cloned into `target`
/// `remove` | Element will be removed from DOM | Nothing happens
/// `cancel` | Element will stay in `source` | Nothing happens
///
/// If a closure is passed, it'll be called whenever an element starts being
/// dragged in order to decide whether it should follow `copy` behavior or
/// not. This closure will be passed the element to be dragged as well as
/// its source container, in other words, the signature is `(el, handle)`.
///
/// `false` by default.
pub copy: CopyValue,
/// If [`copy`](Options::copy) is set to `true` _(or a closure that
/// returns `true`)_ and `copy_sort_source` is `true` as well, users will
/// be able to sort elements in `copy`-source containers.
///
/// `false` by default.
pub copy_sort_source: bool,
/// By default, spilling an element outside of any containers will move the
/// element back to the _drop position previewed by the feedback shadow_.
/// Setting `revert_on_spill` to `true` will ensure elements dropped outside
/// of any approved containers are moved back to the source element where
/// the drag event began, rather than stay at the _drop position previewed
/// by the feedback shadow_.
///
/// `false` by default.
pub revert_on_spill: bool,
/// By default, spilling an element outside of any containers will move the
/// element back to the _drop position previewed by the feedback shadow_.
/// Setting `remove_on_spill` to `true` will ensure elements dropped outside
/// of any approved containers are removed from the DOM. Note that `remove`
/// events won't fire if [`copy`](Options::copy) is set to `true`.
///
/// `false` by default.
pub remove_on_spill: bool,
/// When an element is dropped onto a container, it'll be placed near the
/// point where the mouse was released. If the `direction` is
/// [`Vertical`](Direction::Vertical),
/// the default value, the Y axis will be considered. Otherwise, if the
/// `direction` is [`Horizontal`](Direction::Horizontal),
/// the X axis will be considered.
///
/// [`Vertical`](Direction::Vertical), by default.
pub direction: Direction,
/// The DOM element where the mirror element displayed while dragging will
/// be appended to.
///
/// `document.body` by default.
pub mirror_container: JsValue,
/// When this option is enabled, if the user clicks on an input element the
/// drag won't start until their mouse pointer exits the input. This
/// translates into the user being able to select text in inputs contained
/// inside draggable elements, and still drag the element by moving their
/// mouse outside of the input -- so you get the best of both worlds.
///
/// `true` by default.
pub ignore_input_text_selection: bool,
/// The amount of horizontal movement (in pixels) for a click to be
/// considered a drag
///
/// `0` by default.
pub slide_factor_x: i32,
/// The amount of vertical movement (in pixels) for a click to be
/// considered a drag
///
/// `0` by default.
pub slide_factor_y: i32,
}
impl Default for Options {
fn default() -> Self {
Self {
is_container: Box::new(|_| false),
moves: Box::new(|_, _, _, _| true),
accepts: Box::new(|_, _, _, _| true),
invalid: Box::new(|_, _| false),
copy: CopyValue::Bool(false),
copy_sort_source: false,
revert_on_spill: false,
remove_on_spill: false,
direction: Direction::Vertical,
// Will default to document.body (avoiding web_sys dependency)
mirror_container: JsValue::UNDEFINED,
ignore_input_text_selection: true,
slide_factor_x: 0,
slide_factor_y: 0,
}
}
}
#[doc(hidden)]
#[wasm_bindgen]
pub struct OptionsImpl {
is_container_func: JsValue,
moves_func: JsValue,
accepts_func: JsValue,
invalid_func: JsValue,
copy_func_or_bool: JsValue,
#[wasm_bindgen(js_name = copySortSource)]
pub copy_sort_source: bool,
#[wasm_bindgen(js_name = revertOnSpill)]
pub revert_on_spill: bool,
#[wasm_bindgen(js_name = removeOnSpill)]
pub remove_on_spill: bool,
direction: String,
mirror_container_elem: JsValue,
#[wasm_bindgen(js_name = ignoreInputTextSelection)]
pub ignore_input_text_selection: bool,
#[wasm_bindgen(js_name = slideFactorX)]
pub slide_factor_x: i32,
#[wasm_bindgen(js_name = slideFactorY)]
pub slide_factor_y: i32,
}
impl From<Options> for OptionsImpl {
fn from(options: Options) -> Self {
OptionsImpl {
is_container_func: closure::to_js_1_ret(options.is_container),
moves_func: closure::to_js_4_ret(options.moves),
accepts_func: closure::to_js_4_ret(options.accepts),
invalid_func: closure::to_js_2_ret(options.invalid),
copy_func_or_bool: JsValue::from(options.copy),
mirror_container_elem: options.mirror_container,
copy_sort_source: options.copy_sort_source,
revert_on_spill: options.revert_on_spill,
remove_on_spill: options.remove_on_spill,
direction: options.direction.to_string(),
ignore_input_text_selection: options.ignore_input_text_selection,
slide_factor_x: options.slide_factor_x,
slide_factor_y: options.slide_factor_y,
}
}
}
impl Default for OptionsImpl {
fn default() -> Self {
OptionsImpl::from(Options::default())
}
}
#[wasm_bindgen]
#[doc(hidden)]
impl OptionsImpl {
#[wasm_bindgen(getter = isContainer)]
pub fn is_container_func(&self) -> JsValue {
self.is_container_func.clone()
}
#[wasm_bindgen(setter = isContainer)]
pub fn set_is_container_func(&mut self, val: JsValue) {
self.is_container_func = val;
}
#[wasm_bindgen(getter = moves)]
pub fn moves_func(&self) -> JsValue {
self.moves_func.clone()
}
#[wasm_bindgen(setter = moves)]
pub fn set_moves_func(&mut self, val: JsValue) {
self.moves_func = val;
}
#[wasm_bindgen(getter = accepts)]
pub fn accepts_func(&self) -> JsValue {
self.accepts_func.clone()
}
#[wasm_bindgen(setter = accepts)]
pub fn set_accepts_func(&mut self, val: JsValue) {
self.accepts_func = val;
}
#[wasm_bindgen(getter = invalid)]
pub fn invalid_func(&self) -> JsValue |
#[wasm_bindgen(setter = invalid)]
pub fn set_invalid_func(&mut self, val: JsValue) {
self.invalid_func = val;
}
#[wasm_bindgen(getter = copy)]
pub fn copy_func_or_bool(&self) -> JsValue {
self.copy_func_or_bool.clone()
}
#[wasm_bindgen(setter = copy)]
pub fn set_copy_func_or_bool(&mut self, val: JsValue) {
self.copy_func_or_bool = val;
}
#[wasm_bindgen(getter = mirrorContainer)]
pub fn mirror_container_elem(&self) -> JsValue {
self.mirror_container_elem.clone()
}
#[wasm_bindgen(setter = mirrorContainer)]
pub fn set_mirror_container_elem(&mut self, val: JsValue) {
self.mirror_container_elem = val;
}
#[wasm_bindgen(getter)]
pub fn direction(&self) -> String {
self.direction.clone()
}
#[wasm_bindgen(setter)]
pub fn set_direction(&mut self, val: String) {
self.direction = val;
}
}
#[cfg(test)]
mod test;
| {
self.invalid_func.clone()
} | identifier_body |
mod.rs | use crate::closure;
use wasm_bindgen::prelude::*;
/// Since the `copy` option can be either a function or a boolean, this enum
/// encapsulates the possible values for the copy option.
///
/// The closure signature is `(el, handle)`, the element to check and the
/// element that was directly clicked on.
pub enum CopyValue {
Bool(bool),
Func(Box<dyn FnMut(JsValue, JsValue) -> bool>),
}
impl From<CopyValue> for JsValue {
fn from(copy: CopyValue) -> JsValue {
match copy {
CopyValue::Bool(copy) => JsValue::from(copy),
CopyValue::Func(copy) => closure::to_js_2_ret(copy),
}
}
}
/// The axis to be considered when determining the location an element will be
/// placed when dropped.
///
/// When an element is dropped onto a container, it will be placed near the
/// point where the mouse was released. If the `direction` is `Vertical`,
/// the default value, the Y axis will be considered. Otherwise, if the
/// `direction` is `Horizontal`, the X axis will be considered.
pub enum Direction {
Vertical,
Horizontal,
}
impl ToString for Direction {
fn to_string(&self) -> String {
const VERTICAL: &str = "vertical";
const HORIZONTAL: &str = "horizontal";
match self {
Direction::Vertical => String::from(VERTICAL),
Direction::Horizontal => String::from(HORIZONTAL),
}
}
}
/// Used to pass options when activating Dragula
///
/// When passed to the [`dragula_options`](crate::dragula_options) function,
/// this struct can be used to specify options to control the behaviour of the
/// drag-and-drop functionality.
///
/// For example:
/// ```no_run
/// use dragula::*;
/// use dragula::options::CopyValue;
/// use web_sys::Element;
/// # use wasm_bindgen::JsValue;
///
/// # let element = JsValue::TRUE;
/// //--snip--
///
/// let options = Options {
/// invalid: Box::new(|el, _handle| {
/// Element::from(el).tag_name() == String::from("A")
/// }),
/// copy: CopyValue::Bool(true),
/// copy_sort_source: true,
/// remove_on_spill: true,
/// slide_factor_x: 10,
/// slide_factor_y: 10,
/// ..Options::default()
/// };
///
/// let drake = dragula_options(&[element], options);
///
/// //--snip--
/// ```
pub struct Options { | /// for this particular [`Drake`](crate::Drake) instance.
///
/// This closure will be invoked with the element that is being checked for
/// whether it is a container.
pub is_container: Box<dyn FnMut(JsValue) -> bool>,
/// You can define a `moves` closure which will be invoked with `(el, source,
/// handle, sibling)` whenever an element is clicked. If this closure returns
/// `false`, a drag event won't begin, and the event won't be prevented
/// either. The `handle` element will be the original click target, which
/// comes in handy to test if that element is an expected _"drag handle"_.
pub moves: Box<dyn FnMut(JsValue, JsValue, JsValue, JsValue) -> bool>,
/// You can set `accepts` to a closure with the following signature: `(el,
/// target, source, sibling)`. It'll be called to make sure that an element
/// `el`, that came from container `source`, can be dropped on container
/// `target` before a `sibling` element. The `sibling` can be `null`, which
/// would mean that the element would be placed as the last element in the
/// container. Note that if [`copy`](Options::copy) is set to `true`, `el` will be
/// set to the copy, instead of the originally dragged element.
pub accepts: Box<dyn FnMut(JsValue, JsValue, JsValue, JsValue) -> bool>,
/// You can provide an `invalid` closure with a `(el, handle)` signature.
/// This closure should return `true` for elements that shouldn't trigger a
/// drag. The `handle` argument is the element that was clicked, while `el`
/// is the item that would be dragged.
pub invalid: Box<dyn FnMut(JsValue, JsValue) -> bool>,
/// If `copy` is set to `true` _(or a closure that returns `true`)_, items
/// will be copied rather than moved. This implies the following differences:
///
/// Event | Move | Copy
/// ----------|------------------------------------------|---------------------------------------------
/// `drag` | Element will be concealed from `source` | Nothing happens
/// `drop` | Element will be moved into `target` | Element will be cloned into `target`
/// `remove` | Element will be removed from DOM | Nothing happens
/// `cancel` | Element will stay in `source` | Nothing happens
///
/// If a closure is passed, it'll be called whenever an element starts being
/// dragged in order to decide whether it should follow `copy` behavior or
/// not. This closure will be passed the element to be dragged as well as
/// its source container, in other words, the signature is `(el, handle)`.
///
/// `false` by default.
pub copy: CopyValue,
/// If [`copy`](Options::copy) is set to `true` _(or a closure that
/// returns `true`)_ and `copy_sort_source` is `true` as well, users will
/// be able to sort elements in `copy`-source containers.
///
/// `false` by default.
pub copy_sort_source: bool,
/// By default, spilling an element outside of any containers will move the
/// element back to the _drop position previewed by the feedback shadow_.
/// Setting `revert_on_spill` to `true` will ensure elements dropped outside
/// of any approved containers are moved back to the source element where
/// the drag event began, rather than stay at the _drop position previewed
/// by the feedback shadow_.
///
/// `false` by default.
pub revert_on_spill: bool,
/// By default, spilling an element outside of any containers will move the
/// element back to the _drop position previewed by the feedback shadow_.
/// Setting `remove_on_spill` to `true` will ensure elements dropped outside
/// of any approved containers are removed from the DOM. Note that `remove`
/// events won't fire if [`copy`](Options::copy) is set to `true`.
///
/// `false` by default.
pub remove_on_spill: bool,
/// When an element is dropped onto a container, it'll be placed near the
/// point where the mouse was released. If the `direction` is
/// [`Vertical`](Direction::Vertical),
/// the default value, the Y axis will be considered. Otherwise, if the
/// `direction` is [`Horizontal`](Direction::Horizontal),
/// the X axis will be considered.
///
/// [`Vertical`](Direction::Vertical), by default.
pub direction: Direction,
/// The DOM element where the mirror element displayed while dragging will
/// be appended to.
///
/// `document.body` by default.
pub mirror_container: JsValue,
/// When this option is enabled, if the user clicks on an input element the
/// drag won't start until their mouse pointer exits the input. This
/// translates into the user being able to select text in inputs contained
/// inside draggable elements, and still drag the element by moving their
/// mouse outside of the input -- so you get the best of both worlds.
///
/// `true` by default.
pub ignore_input_text_selection: bool,
/// The amount of horizontal movement (in pixels) for a click to be
/// considered a drag
///
/// `0` by default.
pub slide_factor_x: i32,
/// The amount of vertical movement (in pixels) for a click to be
/// considered a drag
///
/// `0` by default.
pub slide_factor_y: i32,
}
impl Default for Options {
fn default() -> Self {
Self {
is_container: Box::new(|_| false),
moves: Box::new(|_, _, _, _| true),
accepts: Box::new(|_, _, _, _| true),
invalid: Box::new(|_, _| false),
copy: CopyValue::Bool(false),
copy_sort_source: false,
revert_on_spill: false,
remove_on_spill: false,
direction: Direction::Vertical,
// Will default to document.body (avoiding web_sys dependency)
mirror_container: JsValue::UNDEFINED,
ignore_input_text_selection: true,
slide_factor_x: 0,
slide_factor_y: 0,
}
}
}
#[doc(hidden)]
#[wasm_bindgen]
pub struct OptionsImpl {
is_container_func: JsValue,
moves_func: JsValue,
accepts_func: JsValue,
invalid_func: JsValue,
copy_func_or_bool: JsValue,
#[wasm_bindgen(js_name = copySortSource)]
pub copy_sort_source: bool,
#[wasm_bindgen(js_name = revertOnSpill)]
pub revert_on_spill: bool,
#[wasm_bindgen(js_name = removeOnSpill)]
pub remove_on_spill: bool,
direction: String,
mirror_container_elem: JsValue,
#[wasm_bindgen(js_name = ignoreInputTextSelection)]
pub ignore_input_text_selection: bool,
#[wasm_bindgen(js_name = slideFactorX)]
pub slide_factor_x: i32,
#[wasm_bindgen(js_name = slideFactorY)]
pub slide_factor_y: i32,
}
impl From<Options> for OptionsImpl {
fn from(options: Options) -> Self {
OptionsImpl {
is_container_func: closure::to_js_1_ret(options.is_container),
moves_func: closure::to_js_4_ret(options.moves),
accepts_func: closure::to_js_4_ret(options.accepts),
invalid_func: closure::to_js_2_ret(options.invalid),
copy_func_or_bool: JsValue::from(options.copy),
mirror_container_elem: options.mirror_container,
copy_sort_source: options.copy_sort_source,
revert_on_spill: options.revert_on_spill,
remove_on_spill: options.remove_on_spill,
direction: options.direction.to_string(),
ignore_input_text_selection: options.ignore_input_text_selection,
slide_factor_x: options.slide_factor_x,
slide_factor_y: options.slide_factor_y,
}
}
}
impl Default for OptionsImpl {
fn default() -> Self {
OptionsImpl::from(Options::default())
}
}
#[wasm_bindgen]
#[doc(hidden)]
impl OptionsImpl {
#[wasm_bindgen(getter = isContainer)]
pub fn is_container_func(&self) -> JsValue {
self.is_container_func.clone()
}
#[wasm_bindgen(setter = isContainer)]
pub fn set_is_container_func(&mut self, val: JsValue) {
self.is_container_func = val;
}
#[wasm_bindgen(getter = moves)]
pub fn moves_func(&self) -> JsValue {
self.moves_func.clone()
}
#[wasm_bindgen(setter = moves)]
pub fn set_moves_func(&mut self, val: JsValue) {
self.moves_func = val;
}
#[wasm_bindgen(getter = accepts)]
pub fn accepts_func(&self) -> JsValue {
self.accepts_func.clone()
}
#[wasm_bindgen(setter = accepts)]
pub fn set_accepts_func(&mut self, val: JsValue) {
self.accepts_func = val;
}
#[wasm_bindgen(getter = invalid)]
pub fn invalid_func(&self) -> JsValue {
self.invalid_func.clone()
}
#[wasm_bindgen(setter = invalid)]
pub fn set_invalid_func(&mut self, val: JsValue) {
self.invalid_func = val;
}
#[wasm_bindgen(getter = copy)]
pub fn copy_func_or_bool(&self) -> JsValue {
self.copy_func_or_bool.clone()
}
#[wasm_bindgen(setter = copy)]
pub fn set_copy_func_or_bool(&mut self, val: JsValue) {
self.copy_func_or_bool = val;
}
#[wasm_bindgen(getter = mirrorContainer)]
pub fn mirror_container_elem(&self) -> JsValue {
self.mirror_container_elem.clone()
}
#[wasm_bindgen(setter = mirrorContainer)]
pub fn set_mirror_container_elem(&mut self, val: JsValue) {
self.mirror_container_elem = val;
}
#[wasm_bindgen(getter)]
pub fn direction(&self) -> String {
self.direction.clone()
}
#[wasm_bindgen(setter)]
pub fn set_direction(&mut self, val: String) {
self.direction = val;
}
}
#[cfg(test)]
mod test; | /// Besides the containers that you pass to [`dragula`](crate::dragula()),
/// or the containers you dynamically add, you can also use this closure to
/// specify any sort of logic that defines what is a container | random_line_split |
mod.rs | use crate::closure;
use wasm_bindgen::prelude::*;
/// Since the `copy` option can be either a function or a boolean, this enum
/// encapsulates the possible values for the copy option.
///
/// The closure signature is `(el, handle)`, the element to check and the
/// element that was directly clicked on.
pub enum CopyValue {
Bool(bool),
Func(Box<dyn FnMut(JsValue, JsValue) -> bool>),
}
impl From<CopyValue> for JsValue {
fn from(copy: CopyValue) -> JsValue {
match copy {
CopyValue::Bool(copy) => JsValue::from(copy),
CopyValue::Func(copy) => closure::to_js_2_ret(copy),
}
}
}
/// The axis to be considered when determining the location an element will be
/// placed when dropped.
///
/// When an element is dropped onto a container, it will be placed near the
/// point where the mouse was released. If the `direction` is `Vertical`,
/// the default value, the Y axis will be considered. Otherwise, if the
/// `direction` is `Horizontal`, the X axis will be considered.
pub enum Direction {
Vertical,
Horizontal,
}
impl ToString for Direction {
fn to_string(&self) -> String {
const VERTICAL: &str = "vertical";
const HORIZONTAL: &str = "horizontal";
match self {
Direction::Vertical => String::from(VERTICAL),
Direction::Horizontal => String::from(HORIZONTAL),
}
}
}
/// Used to pass options when activating Dragula
///
/// When passed to the [`dragula_options`](crate::dragula_options) function,
/// this struct can be used to specify options to control the behaviour of the
/// drag-and-drop functionality.
///
/// For example:
/// ```no_run
/// use dragula::*;
/// use dragula::options::CopyValue;
/// use web_sys::Element;
/// # use wasm_bindgen::JsValue;
///
/// # let element = JsValue::TRUE;
/// //--snip--
///
/// let options = Options {
/// invalid: Box::new(|el, _handle| {
/// Element::from(el).tag_name() == String::from("A")
/// }),
/// copy: CopyValue::Bool(true),
/// copy_sort_source: true,
/// remove_on_spill: true,
/// slide_factor_x: 10,
/// slide_factor_y: 10,
/// ..Options::default()
/// };
///
/// let drake = dragula_options(&[element], options);
///
/// //--snip--
/// ```
pub struct Options {
/// Besides the containers that you pass to [`dragula`](crate::dragula()),
/// or the containers you dynamically add, you can also use this closure to
/// specify any sort of logic that defines what is a container
/// for this particular [`Drake`](crate::Drake) instance.
///
/// This closure will be invoked with the element that is being checked for
/// whether it is a container.
pub is_container: Box<dyn FnMut(JsValue) -> bool>,
/// You can define a `moves` closure which will be invoked with `(el, source,
/// handle, sibling)` whenever an element is clicked. If this closure returns
/// `false`, a drag event won't begin, and the event won't be prevented
/// either. The `handle` element will be the original click target, which
/// comes in handy to test if that element is an expected _"drag handle"_.
pub moves: Box<dyn FnMut(JsValue, JsValue, JsValue, JsValue) -> bool>,
/// You can set `accepts` to a closure with the following signature: `(el,
/// target, source, sibling)`. It'll be called to make sure that an element
/// `el`, that came from container `source`, can be dropped on container
/// `target` before a `sibling` element. The `sibling` can be `null`, which
/// would mean that the element would be placed as the last element in the
/// container. Note that if [`copy`](Options::copy) is set to `true`, `el` will be
/// set to the copy, instead of the originally dragged element.
pub accepts: Box<dyn FnMut(JsValue, JsValue, JsValue, JsValue) -> bool>,
/// You can provide an `invalid` closure with a `(el, handle)` signature.
/// This closure should return `true` for elements that shouldn't trigger a
/// drag. The `handle` argument is the element that was clicked, while `el`
/// is the item that would be dragged.
pub invalid: Box<dyn FnMut(JsValue, JsValue) -> bool>,
/// If `copy` is set to `true` _(or a closure that returns `true`)_, items
/// will be copied rather than moved. This implies the following differences:
///
/// Event | Move | Copy
/// ----------|------------------------------------------|---------------------------------------------
/// `drag` | Element will be concealed from `source` | Nothing happens
/// `drop` | Element will be moved into `target` | Element will be cloned into `target`
/// `remove` | Element will be removed from DOM | Nothing happens
/// `cancel` | Element will stay in `source` | Nothing happens
///
/// If a closure is passed, it'll be called whenever an element starts being
/// dragged in order to decide whether it should follow `copy` behavior or
/// not. This closure will be passed the element to be dragged as well as
/// its source container, in other words, the signature is `(el, handle)`.
///
/// `false` by default.
pub copy: CopyValue,
/// If [`copy`](Options::copy) is set to `true` _(or a closure that
/// returns `true`)_ and `copy_sort_source` is `true` as well, users will
/// be able to sort elements in `copy`-source containers.
///
/// `false` by default.
pub copy_sort_source: bool,
/// By default, spilling an element outside of any containers will move the
/// element back to the _drop position previewed by the feedback shadow_.
/// Setting `revert_on_spill` to `true` will ensure elements dropped outside
/// of any approved containers are moved back to the source element where
/// the drag event began, rather than stay at the _drop position previewed
/// by the feedback shadow_.
///
/// `false` by default.
pub revert_on_spill: bool,
/// By default, spilling an element outside of any containers will move the
/// element back to the _drop position previewed by the feedback shadow_.
/// Setting `remove_on_spill` to `true` will ensure elements dropped outside
/// of any approved containers are removed from the DOM. Note that `remove`
/// events won't fire if [`copy`](Options::copy) is set to `true`.
///
/// `false` by default.
pub remove_on_spill: bool,
/// When an element is dropped onto a container, it'll be placed near the
/// point where the mouse was released. If the `direction` is
/// [`Vertical`](Direction::Vertical),
/// the default value, the Y axis will be considered. Otherwise, if the
/// `direction` is [`Horizontal`](Direction::Horizontal),
/// the X axis will be considered.
///
/// [`Vertical`](Direction::Vertical), by default.
pub direction: Direction,
/// The DOM element where the mirror element displayed while dragging will
/// be appended to.
///
/// `document.body` by default.
pub mirror_container: JsValue,
/// When this option is enabled, if the user clicks on an input element the
/// drag won't start until their mouse pointer exits the input. This
/// translates into the user being able to select text in inputs contained
/// inside draggable elements, and still drag the element by moving their
/// mouse outside of the input -- so you get the best of both worlds.
///
/// `true` by default.
pub ignore_input_text_selection: bool,
/// The amount of horizontal movement (in pixels) for a click to be
/// considered a drag
///
/// `0` by default.
pub slide_factor_x: i32,
/// The amount of vertical movement (in pixels) for a click to be
/// considered a drag
///
/// `0` by default.
pub slide_factor_y: i32,
}
impl Default for Options {
fn default() -> Self {
Self {
is_container: Box::new(|_| false),
moves: Box::new(|_, _, _, _| true),
accepts: Box::new(|_, _, _, _| true),
invalid: Box::new(|_, _| false),
copy: CopyValue::Bool(false),
copy_sort_source: false,
revert_on_spill: false,
remove_on_spill: false,
direction: Direction::Vertical,
// Will default to document.body (avoiding web_sys dependency)
mirror_container: JsValue::UNDEFINED,
ignore_input_text_selection: true,
slide_factor_x: 0,
slide_factor_y: 0,
}
}
}
#[doc(hidden)]
#[wasm_bindgen]
pub struct OptionsImpl {
is_container_func: JsValue,
moves_func: JsValue,
accepts_func: JsValue,
invalid_func: JsValue,
copy_func_or_bool: JsValue,
#[wasm_bindgen(js_name = copySortSource)]
pub copy_sort_source: bool,
#[wasm_bindgen(js_name = revertOnSpill)]
pub revert_on_spill: bool,
#[wasm_bindgen(js_name = removeOnSpill)]
pub remove_on_spill: bool,
direction: String,
mirror_container_elem: JsValue,
#[wasm_bindgen(js_name = ignoreInputTextSelection)]
pub ignore_input_text_selection: bool,
#[wasm_bindgen(js_name = slideFactorX)]
pub slide_factor_x: i32,
#[wasm_bindgen(js_name = slideFactorY)]
pub slide_factor_y: i32,
}
impl From<Options> for OptionsImpl {
fn from(options: Options) -> Self {
OptionsImpl {
is_container_func: closure::to_js_1_ret(options.is_container),
moves_func: closure::to_js_4_ret(options.moves),
accepts_func: closure::to_js_4_ret(options.accepts),
invalid_func: closure::to_js_2_ret(options.invalid),
copy_func_or_bool: JsValue::from(options.copy),
mirror_container_elem: options.mirror_container,
copy_sort_source: options.copy_sort_source,
revert_on_spill: options.revert_on_spill,
remove_on_spill: options.remove_on_spill,
direction: options.direction.to_string(),
ignore_input_text_selection: options.ignore_input_text_selection,
slide_factor_x: options.slide_factor_x,
slide_factor_y: options.slide_factor_y,
}
}
}
impl Default for OptionsImpl {
fn | () -> Self {
OptionsImpl::from(Options::default())
}
}
#[wasm_bindgen]
#[doc(hidden)]
impl OptionsImpl {
#[wasm_bindgen(getter = isContainer)]
pub fn is_container_func(&self) -> JsValue {
self.is_container_func.clone()
}
#[wasm_bindgen(setter = isContainer)]
pub fn set_is_container_func(&mut self, val: JsValue) {
self.is_container_func = val;
}
#[wasm_bindgen(getter = moves)]
pub fn moves_func(&self) -> JsValue {
self.moves_func.clone()
}
#[wasm_bindgen(setter = moves)]
pub fn set_moves_func(&mut self, val: JsValue) {
self.moves_func = val;
}
#[wasm_bindgen(getter = accepts)]
pub fn accepts_func(&self) -> JsValue {
self.accepts_func.clone()
}
#[wasm_bindgen(setter = accepts)]
pub fn set_accepts_func(&mut self, val: JsValue) {
self.accepts_func = val;
}
#[wasm_bindgen(getter = invalid)]
pub fn invalid_func(&self) -> JsValue {
self.invalid_func.clone()
}
#[wasm_bindgen(setter = invalid)]
pub fn set_invalid_func(&mut self, val: JsValue) {
self.invalid_func = val;
}
#[wasm_bindgen(getter = copy)]
pub fn copy_func_or_bool(&self) -> JsValue {
self.copy_func_or_bool.clone()
}
#[wasm_bindgen(setter = copy)]
pub fn set_copy_func_or_bool(&mut self, val: JsValue) {
self.copy_func_or_bool = val;
}
#[wasm_bindgen(getter = mirrorContainer)]
pub fn mirror_container_elem(&self) -> JsValue {
self.mirror_container_elem.clone()
}
#[wasm_bindgen(setter = mirrorContainer)]
pub fn set_mirror_container_elem(&mut self, val: JsValue) {
self.mirror_container_elem = val;
}
#[wasm_bindgen(getter)]
pub fn direction(&self) -> String {
self.direction.clone()
}
#[wasm_bindgen(setter)]
pub fn set_direction(&mut self, val: String) {
self.direction = val;
}
}
#[cfg(test)]
mod test;
| default | identifier_name |
retransmit_stage.rs | solana_runtime::{bank::Bank, bank_forks::BankForks},
solana_sdk::{clock::Slot, epoch_schedule::EpochSchedule, pubkey::Pubkey, timing::timestamp},
solana_streamer::sendmmsg::{multi_target_send, SendPktsError},
std::{
collections::{BTreeSet, HashMap, HashSet},
net::UdpSocket,
ops::{AddAssign, DerefMut},
sync::{
atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering},
Arc, Mutex, RwLock,
},
thread::{self, Builder, JoinHandle},
time::{Duration, Instant},
},
};
const MAX_DUPLICATE_COUNT: usize = 2;
const DEFAULT_LRU_SIZE: usize = 10_000;
const CLUSTER_NODES_CACHE_NUM_EPOCH_CAP: usize = 8;
const CLUSTER_NODES_CACHE_TTL: Duration = Duration::from_secs(5);
#[derive(Default)]
struct RetransmitSlotStats {
num_shreds: usize,
num_nodes: usize,
}
impl AddAssign for RetransmitSlotStats {
fn add_assign(&mut self, other: Self) {
*self = Self {
num_shreds: self.num_shreds + other.num_shreds,
num_nodes: self.num_nodes + other.num_nodes,
}
}
}
#[derive(Default)]
struct RetransmitStats {
since: Option<Instant>,
num_nodes: AtomicUsize,
num_addrs_failed: AtomicUsize,
num_shreds: usize,
num_shreds_skipped: AtomicUsize,
total_batches: usize,
total_time: u64,
epoch_fetch: u64,
epoch_cache_update: u64,
retransmit_total: AtomicU64,
compute_turbine_peers_total: AtomicU64,
slot_stats: HashMap<Slot, RetransmitSlotStats>,
unknown_shred_slot_leader: AtomicUsize,
}
impl RetransmitStats {
fn maybe_submit(
&mut self,
root_bank: &Bank,
working_bank: &Bank,
cluster_info: &ClusterInfo,
cluster_nodes_cache: &ClusterNodesCache<RetransmitStage>,
) {
const SUBMIT_CADENCE: Duration = Duration::from_secs(2);
let elapsed = self.since.as_ref().map(Instant::elapsed);
if elapsed.unwrap_or(Duration::MAX) < SUBMIT_CADENCE {
return;
}
let num_peers = cluster_nodes_cache
.get(root_bank.slot(), root_bank, working_bank, cluster_info)
.num_peers();
let stats = std::mem::replace(
self,
Self {
since: Some(Instant::now()),
..Self::default()
},
);
datapoint_info!("retransmit-num_nodes", ("count", num_peers, i64));
datapoint_info!(
"retransmit-stage",
("total_time", stats.total_time, i64),
("epoch_fetch", stats.epoch_fetch, i64),
("epoch_cache_update", stats.epoch_cache_update, i64),
("total_batches", stats.total_batches, i64),
("num_nodes", stats.num_nodes.into_inner(), i64),
("num_addrs_failed", stats.num_addrs_failed.into_inner(), i64),
("num_shreds", stats.num_shreds, i64),
(
"num_shreds_skipped",
stats.num_shreds_skipped.into_inner(),
i64
),
("retransmit_total", stats.retransmit_total.into_inner(), i64),
(
"compute_turbine",
stats.compute_turbine_peers_total.into_inner(),
i64
),
(
"unknown_shred_slot_leader",
stats.unknown_shred_slot_leader.into_inner(),
i64
),
);
for (slot, stats) in stats.slot_stats {
datapoint_info!(
"retransmit-stage-slot-stats",
("slot", slot, i64),
("num_shreds", stats.num_shreds, i64),
("num_nodes", stats.num_nodes, i64),
);
}
}
}
// Map of shred (slot, index, type) => list of hash values seen for that key.
type ShredFilter = LruCache<ShredId, Vec<u64>>;
type ShredFilterAndHasher = (ShredFilter, PacketHasher);
// Returns true if shred is already received and should skip retransmit.
fn should_skip_retransmit(shred: &Shred, shreds_received: &Mutex<ShredFilterAndHasher>) -> bool {
let key = shred.id();
let mut shreds_received = shreds_received.lock().unwrap();
let (cache, hasher) = shreds_received.deref_mut();
match cache.get_mut(&key) {
Some(sent) if sent.len() >= MAX_DUPLICATE_COUNT => true,
Some(sent) => {
let hash = hasher.hash_shred(shred);
if sent.contains(&hash) {
true
} else {
sent.push(hash);
false
}
}
None => {
let hash = hasher.hash_shred(shred);
cache.put(key, vec![hash]);
false
}
}
}
// Returns true if this is the first time receiving a shred for `shred_slot`.
fn check_if_first_shred_received(
shred_slot: Slot,
first_shreds_received: &Mutex<BTreeSet<Slot>>,
root_bank: &Bank,
) -> bool {
if shred_slot <= root_bank.slot() {
return false;
}
let mut first_shreds_received_locked = first_shreds_received.lock().unwrap();
if first_shreds_received_locked.insert(shred_slot) {
datapoint_info!("retransmit-first-shred", ("slot", shred_slot, i64));
if first_shreds_received_locked.len() > 100 |
true
} else {
false
}
}
fn maybe_reset_shreds_received_cache(
shreds_received: &Mutex<ShredFilterAndHasher>,
hasher_reset_ts: &mut Instant,
) {
const UPDATE_INTERVAL: Duration = Duration::from_secs(1);
if hasher_reset_ts.elapsed() >= UPDATE_INTERVAL {
*hasher_reset_ts = Instant::now();
let mut shreds_received = shreds_received.lock().unwrap();
let (cache, hasher) = shreds_received.deref_mut();
cache.clear();
hasher.reset();
}
}
#[allow(clippy::too_many_arguments)]
fn retransmit(
thread_pool: &ThreadPool,
bank_forks: &RwLock<BankForks>,
leader_schedule_cache: &LeaderScheduleCache,
cluster_info: &ClusterInfo,
shreds_receiver: &Receiver<Vec<Shred>>,
sockets: &[UdpSocket],
stats: &mut RetransmitStats,
cluster_nodes_cache: &ClusterNodesCache<RetransmitStage>,
hasher_reset_ts: &mut Instant,
shreds_received: &Mutex<ShredFilterAndHasher>,
max_slots: &MaxSlots,
first_shreds_received: &Mutex<BTreeSet<Slot>>,
rpc_subscriptions: Option<&RpcSubscriptions>,
) -> Result<(), RecvTimeoutError> {
const RECV_TIMEOUT: Duration = Duration::from_secs(1);
let mut shreds = shreds_receiver.recv_timeout(RECV_TIMEOUT)?;
let mut timer_start = Measure::start("retransmit");
shreds.extend(shreds_receiver.try_iter().flatten());
stats.num_shreds += shreds.len();
stats.total_batches += 1;
let mut epoch_fetch = Measure::start("retransmit_epoch_fetch");
let (working_bank, root_bank) = {
let bank_forks = bank_forks.read().unwrap();
(bank_forks.working_bank(), bank_forks.root_bank())
};
epoch_fetch.stop();
stats.epoch_fetch += epoch_fetch.as_us();
let mut epoch_cache_update = Measure::start("retransmit_epoch_cache_update");
maybe_reset_shreds_received_cache(shreds_received, hasher_reset_ts);
epoch_cache_update.stop();
stats.epoch_cache_update += epoch_cache_update.as_us();
let socket_addr_space = cluster_info.socket_addr_space();
let retransmit_shred = |shred: &Shred, socket: &UdpSocket| {
if should_skip_retransmit(shred, shreds_received) {
stats.num_shreds_skipped.fetch_add(1, Ordering::Relaxed);
return 0;
}
let shred_slot = shred.slot();
max_slots
.retransmit
.fetch_max(shred_slot, Ordering::Relaxed);
if let Some(rpc_subscriptions) = rpc_subscriptions {
if check_if_first_shred_received(shred_slot, first_shreds_received, &root_bank) {
rpc_subscriptions.notify_slot_update(SlotUpdate::FirstShredReceived {
slot: shred_slot,
timestamp: timestamp(),
});
}
}
let mut compute_turbine_peers = Measure::start("turbine_start");
// TODO: consider using root-bank here for leader lookup!
// Shreds' signatures should be verified before they reach here, and if
// the leader is unknown they should fail signature check. So here we
// should expect to know the slot leader and otherwise skip the shred.
let slot_leader =
match leader_schedule_cache.slot_leader_at(shred_slot, Some(&working_bank)) {
Some(pubkey) => pubkey,
None => {
stats
.unknown_shred_slot_leader
.fetch_add(1, Ordering::Relaxed);
return 0;
}
};
let cluster_nodes =
cluster_nodes_cache.get(shred_slot, &root_bank, &working_bank, cluster_info);
let addrs: Vec<_> = cluster_nodes
.get_retransmit_addrs(slot_leader, shred, &root_bank, DATA_PLANE_FANOUT)
.into_iter()
.filter(|addr| ContactInfo::is_valid_address(addr, socket_addr_space))
.collect();
compute_turbine_peers.stop();
stats
.compute_turbine_peers_total
.fetch_add(compute_turbine_peers.as_us(), Ordering::Relaxed);
let mut retransmit_time = Measure::start("retransmit_to");
let num_nodes = match multi_target_send(socket, &shred.payload, &addrs) {
Ok(()) => addrs.len(),
Err(SendPktsError::IoError(ioerr, num_failed)) => {
stats
.num_addrs_failed
.fetch_add(num_failed, Ordering::Relaxed);
error!(
"retransmit_to multi_target_send error: {:?}, {}/{} packets failed",
ioerr,
num_failed,
addrs.len(),
);
addrs.len() - num_failed
}
};
retransmit_time.stop();
stats.num_nodes.fetch_add(num_nodes, Ordering::Relaxed);
stats
.retransmit_total
.fetch_add(retransmit_time.as_us(), Ordering::Relaxed);
num_nodes
};
fn merge<K, V>(mut acc: HashMap<K, V>, other: HashMap<K, V>) -> HashMap<K, V>
where
K: Eq + std::hash::Hash,
V: Default + AddAssign,
{
if acc.len() < other.len() {
return merge(other, acc);
}
for (key, value) in other {
*acc.entry(key).or_default() += value;
}
acc
}
let slot_stats = thread_pool.install(|| {
shreds
.into_par_iter()
.with_min_len(4)
.map(|shred| {
let index = thread_pool.current_thread_index().unwrap();
let socket = &sockets[index % sockets.len()];
let num_nodes = retransmit_shred(&shred, socket);
(shred.slot(), num_nodes)
})
.fold(
HashMap::<Slot, RetransmitSlotStats>::new,
|mut acc, (slot, num_nodes)| {
let stats = acc.entry(slot).or_default();
stats.num_nodes += num_nodes;
stats.num_shreds += 1;
acc
},
)
.reduce(HashMap::new, merge)
});
stats.slot_stats = merge(std::mem::take(&mut stats.slot_stats), slot_stats);
timer_start.stop();
stats.total_time += timer_start.as_us();
stats.maybe_submit(&root_bank, &working_bank, cluster_info, cluster_nodes_cache);
Ok(())
}
/// Service to retransmit messages from the leader or layer 1 to relevant peer nodes.
/// See `cluster_info` for network layer definitions.
/// # Arguments
/// * `sockets` - Sockets to read from.
/// * `bank_forks` - The BankForks structure
/// * `leader_schedule_cache` - The leader schedule to verify shreds
/// * `cluster_info` - This structure needs to be updated and populated by the bank and via gossip.
/// * `r` - Receive channel for shreds to be retransmitted to all the layer 1 nodes.
pub fn retransmitter(
sockets: Arc<Vec<UdpSocket>>,
bank_forks: Arc<RwLock<BankForks>>,
leader_schedule_cache: Arc<LeaderScheduleCache>,
cluster_info: Arc<ClusterInfo>,
shreds_receiver: Receiver<Vec<Shred>>,
max_slots: Arc<MaxSlots>,
rpc_subscriptions: Option<Arc<RpcSubscriptions>>,
) -> JoinHandle<()> {
let cluster_nodes_cache = ClusterNodesCache::<RetransmitStage>::new(
CLUSTER_NODES_CACHE_NUM_EPOCH_CAP,
CLUSTER_NODES_CACHE_TTL,
);
let mut hasher_reset_ts = Instant::now();
let mut stats = RetransmitStats::default();
let shreds_received = Mutex::new((LruCache::new(DEFAULT_LRU_SIZE), PacketHasher::default()));
let first_shreds_received = Mutex::<BTreeSet<Slot>>::default();
let num_threads = get_thread_count().min(8).max(sockets.len());
let thread_pool = ThreadPoolBuilder::new()
.num_threads(num_threads)
.thread_name(|i| format!("retransmit-{}", i))
.build()
.unwrap();
Builder::new()
.name("solana-retransmitter".to_string())
.spawn(move || {
trace!("retransmitter started");
loop {
match retransmit(
&thread_pool,
&bank_forks,
&leader_schedule_cache,
&cluster_info,
&shreds_receiver,
&sockets,
&mut stats,
&cluster_nodes_cache,
&mut hasher_reset_ts,
&shreds_received,
&max_slots,
&first_shreds_received,
rpc_subscriptions.as_deref(),
) {
Ok(()) => (),
Err(RecvTimeoutError::Timeout) => (),
Err(RecvTimeoutError::Disconnected) => break,
}
}
trace!("exiting retransmitter");
})
.unwrap()
}
pub struct RetransmitStage {
retransmit_thread_handle: JoinHandle<()>,
window_service: WindowService,
cluster_slots_service: ClusterSlotsService,
}
impl RetransmitStage {
#[allow(clippy::new_ret_no_self)]
#[allow(clippy::too_many_arguments)]
pub(crate) fn new(
bank_forks: Arc<RwLock<BankForks>>,
leader_schedule_cache: Arc<LeaderScheduleCache>,
blockstore: Arc<Blockstore>,
cluster_info: Arc<ClusterInfo>,
retransmit_sockets: Arc<Vec<UdpSocket>>,
repair_socket: Arc<UdpSocket>,
ancestor_hashes_socket: Arc<UdpSocket>,
verified_receiver: Receiver<Vec<PacketBatch>>,
exit: Arc<AtomicBool>,
cluster_slots_update_receiver: ClusterSlotsUpdateReceiver,
epoch_schedule: EpochSchedule,
cfg: Option<Arc<AtomicBool>>,
shred_version: u16,
cluster_slots: Arc<ClusterSlots>,
duplicate_slots_reset_sender: DuplicateSlotsResetSender,
verified_vote_receiver: VerifiedVoteReceiver,
repair_validators: Option<HashSet<Pubkey>>,
completed_data_sets_sender: CompletedDataSetsSender,
max_slots: Arc<MaxSlots>,
rpc_subscriptions: Option<Arc<RpcSubscriptions>>,
duplicate_slots_sender: Sender<Slot>,
ancestor_hashes_replay_update_receiver: AncestorHashesReplayUpdateReceiver,
) -> Self {
let (retransmit_sender, retransmit_receiver) = unbounded();
let retransmit_thread_handle = retransmitter(
retransmit_sockets,
bank_forks.clone(),
leader_schedule_cache.clone(),
cluster_info.clone(),
retransmit_receiver,
max_slots,
rpc_subscriptions,
);
let cluster_slots_service = ClusterSlotsService::new(
blockstore.clone(),
cluster_slots.clone(),
bank_forks.clone(),
cluster_info.clone(),
cluster_slots_update_receiver,
exit.clone(),
);
let leader_schedule_cache_clone = leader_schedule_cache.clone();
let repair_info = RepairInfo {
bank_forks,
epoch_schedule,
duplicate_slots_reset_sender,
repair_validators,
cluster_info,
cluster_slots,
};
let window_service = WindowService::new(
blockstore,
verified_receiver,
retransmit_sender,
repair_socket,
ancestor_hashes_socket,
exit,
repair_info,
leader_schedule_cache,
move |id, shred, working_bank, last_root| {
let is_connected = cfg
.as_ref()
.map(|x| x.load(Ordering::Relaxed))
.unwrap_or(true);
let rv = should_retransmit_and_persist(
shred,
working_bank,
&leader_schedule_cache_clone,
id,
last_root,
shred_version,
);
rv && is_connected
},
verified_vote_receiver,
| {
*first_shreds_received_locked =
first_shreds_received_locked.split_off(&(root_bank.slot() + 1));
} | conditional_block |
retransmit_stage.rs | solana_runtime::{bank::Bank, bank_forks::BankForks},
solana_sdk::{clock::Slot, epoch_schedule::EpochSchedule, pubkey::Pubkey, timing::timestamp},
solana_streamer::sendmmsg::{multi_target_send, SendPktsError},
std::{
collections::{BTreeSet, HashMap, HashSet},
net::UdpSocket,
ops::{AddAssign, DerefMut},
sync::{
atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering},
Arc, Mutex, RwLock,
},
thread::{self, Builder, JoinHandle},
time::{Duration, Instant},
},
};
const MAX_DUPLICATE_COUNT: usize = 2;
const DEFAULT_LRU_SIZE: usize = 10_000;
const CLUSTER_NODES_CACHE_NUM_EPOCH_CAP: usize = 8;
const CLUSTER_NODES_CACHE_TTL: Duration = Duration::from_secs(5);
#[derive(Default)]
struct RetransmitSlotStats {
num_shreds: usize,
num_nodes: usize,
}
impl AddAssign for RetransmitSlotStats {
fn add_assign(&mut self, other: Self) {
*self = Self {
num_shreds: self.num_shreds + other.num_shreds,
num_nodes: self.num_nodes + other.num_nodes,
}
}
}
#[derive(Default)]
struct RetransmitStats {
since: Option<Instant>,
num_nodes: AtomicUsize,
num_addrs_failed: AtomicUsize,
num_shreds: usize,
num_shreds_skipped: AtomicUsize,
total_batches: usize,
total_time: u64,
epoch_fetch: u64,
epoch_cache_update: u64,
retransmit_total: AtomicU64,
compute_turbine_peers_total: AtomicU64,
slot_stats: HashMap<Slot, RetransmitSlotStats>,
unknown_shred_slot_leader: AtomicUsize,
}
impl RetransmitStats {
fn maybe_submit(
&mut self,
root_bank: &Bank,
working_bank: &Bank,
cluster_info: &ClusterInfo,
cluster_nodes_cache: &ClusterNodesCache<RetransmitStage>,
) {
const SUBMIT_CADENCE: Duration = Duration::from_secs(2);
let elapsed = self.since.as_ref().map(Instant::elapsed);
if elapsed.unwrap_or(Duration::MAX) < SUBMIT_CADENCE {
return;
}
let num_peers = cluster_nodes_cache
.get(root_bank.slot(), root_bank, working_bank, cluster_info)
.num_peers();
let stats = std::mem::replace(
self,
Self {
since: Some(Instant::now()),
..Self::default()
},
);
datapoint_info!("retransmit-num_nodes", ("count", num_peers, i64));
datapoint_info!(
"retransmit-stage",
("total_time", stats.total_time, i64),
("epoch_fetch", stats.epoch_fetch, i64),
("epoch_cache_update", stats.epoch_cache_update, i64),
("total_batches", stats.total_batches, i64),
("num_nodes", stats.num_nodes.into_inner(), i64),
("num_addrs_failed", stats.num_addrs_failed.into_inner(), i64),
("num_shreds", stats.num_shreds, i64),
(
"num_shreds_skipped",
stats.num_shreds_skipped.into_inner(),
i64
),
("retransmit_total", stats.retransmit_total.into_inner(), i64),
(
"compute_turbine",
stats.compute_turbine_peers_total.into_inner(),
i64
),
(
"unknown_shred_slot_leader",
stats.unknown_shred_slot_leader.into_inner(),
i64
),
);
for (slot, stats) in stats.slot_stats {
datapoint_info!(
"retransmit-stage-slot-stats",
("slot", slot, i64),
("num_shreds", stats.num_shreds, i64),
("num_nodes", stats.num_nodes, i64),
);
}
}
}
// Map of shred (slot, index, type) => list of hash values seen for that key.
type ShredFilter = LruCache<ShredId, Vec<u64>>;
type ShredFilterAndHasher = (ShredFilter, PacketHasher);
// Returns true if shred is already received and should skip retransmit.
fn should_skip_retransmit(shred: &Shred, shreds_received: &Mutex<ShredFilterAndHasher>) -> bool {
let key = shred.id();
let mut shreds_received = shreds_received.lock().unwrap();
let (cache, hasher) = shreds_received.deref_mut();
match cache.get_mut(&key) {
Some(sent) if sent.len() >= MAX_DUPLICATE_COUNT => true,
Some(sent) => {
let hash = hasher.hash_shred(shred);
if sent.contains(&hash) {
true
} else {
sent.push(hash);
false
}
}
None => {
let hash = hasher.hash_shred(shred);
cache.put(key, vec![hash]);
false
}
}
}
// Returns true if this is the first time receiving a shred for `shred_slot`.
fn | (
shred_slot: Slot,
first_shreds_received: &Mutex<BTreeSet<Slot>>,
root_bank: &Bank,
) -> bool {
if shred_slot <= root_bank.slot() {
return false;
}
let mut first_shreds_received_locked = first_shreds_received.lock().unwrap();
if first_shreds_received_locked.insert(shred_slot) {
datapoint_info!("retransmit-first-shred", ("slot", shred_slot, i64));
if first_shreds_received_locked.len() > 100 {
*first_shreds_received_locked =
first_shreds_received_locked.split_off(&(root_bank.slot() + 1));
}
true
} else {
false
}
}
fn maybe_reset_shreds_received_cache(
shreds_received: &Mutex<ShredFilterAndHasher>,
hasher_reset_ts: &mut Instant,
) {
const UPDATE_INTERVAL: Duration = Duration::from_secs(1);
if hasher_reset_ts.elapsed() >= UPDATE_INTERVAL {
*hasher_reset_ts = Instant::now();
let mut shreds_received = shreds_received.lock().unwrap();
let (cache, hasher) = shreds_received.deref_mut();
cache.clear();
hasher.reset();
}
}
#[allow(clippy::too_many_arguments)]
fn retransmit(
thread_pool: &ThreadPool,
bank_forks: &RwLock<BankForks>,
leader_schedule_cache: &LeaderScheduleCache,
cluster_info: &ClusterInfo,
shreds_receiver: &Receiver<Vec<Shred>>,
sockets: &[UdpSocket],
stats: &mut RetransmitStats,
cluster_nodes_cache: &ClusterNodesCache<RetransmitStage>,
hasher_reset_ts: &mut Instant,
shreds_received: &Mutex<ShredFilterAndHasher>,
max_slots: &MaxSlots,
first_shreds_received: &Mutex<BTreeSet<Slot>>,
rpc_subscriptions: Option<&RpcSubscriptions>,
) -> Result<(), RecvTimeoutError> {
const RECV_TIMEOUT: Duration = Duration::from_secs(1);
let mut shreds = shreds_receiver.recv_timeout(RECV_TIMEOUT)?;
let mut timer_start = Measure::start("retransmit");
shreds.extend(shreds_receiver.try_iter().flatten());
stats.num_shreds += shreds.len();
stats.total_batches += 1;
let mut epoch_fetch = Measure::start("retransmit_epoch_fetch");
let (working_bank, root_bank) = {
let bank_forks = bank_forks.read().unwrap();
(bank_forks.working_bank(), bank_forks.root_bank())
};
epoch_fetch.stop();
stats.epoch_fetch += epoch_fetch.as_us();
let mut epoch_cache_update = Measure::start("retransmit_epoch_cache_update");
maybe_reset_shreds_received_cache(shreds_received, hasher_reset_ts);
epoch_cache_update.stop();
stats.epoch_cache_update += epoch_cache_update.as_us();
let socket_addr_space = cluster_info.socket_addr_space();
let retransmit_shred = |shred: &Shred, socket: &UdpSocket| {
if should_skip_retransmit(shred, shreds_received) {
stats.num_shreds_skipped.fetch_add(1, Ordering::Relaxed);
return 0;
}
let shred_slot = shred.slot();
max_slots
.retransmit
.fetch_max(shred_slot, Ordering::Relaxed);
if let Some(rpc_subscriptions) = rpc_subscriptions {
if check_if_first_shred_received(shred_slot, first_shreds_received, &root_bank) {
rpc_subscriptions.notify_slot_update(SlotUpdate::FirstShredReceived {
slot: shred_slot,
timestamp: timestamp(),
});
}
}
let mut compute_turbine_peers = Measure::start("turbine_start");
// TODO: consider using root-bank here for leader lookup!
// Shreds' signatures should be verified before they reach here, and if
// the leader is unknown they should fail signature check. So here we
// should expect to know the slot leader and otherwise skip the shred.
let slot_leader =
match leader_schedule_cache.slot_leader_at(shred_slot, Some(&working_bank)) {
Some(pubkey) => pubkey,
None => {
stats
.unknown_shred_slot_leader
.fetch_add(1, Ordering::Relaxed);
return 0;
}
};
let cluster_nodes =
cluster_nodes_cache.get(shred_slot, &root_bank, &working_bank, cluster_info);
let addrs: Vec<_> = cluster_nodes
.get_retransmit_addrs(slot_leader, shred, &root_bank, DATA_PLANE_FANOUT)
.into_iter()
.filter(|addr| ContactInfo::is_valid_address(addr, socket_addr_space))
.collect();
compute_turbine_peers.stop();
stats
.compute_turbine_peers_total
.fetch_add(compute_turbine_peers.as_us(), Ordering::Relaxed);
let mut retransmit_time = Measure::start("retransmit_to");
let num_nodes = match multi_target_send(socket, &shred.payload, &addrs) {
Ok(()) => addrs.len(),
Err(SendPktsError::IoError(ioerr, num_failed)) => {
stats
.num_addrs_failed
.fetch_add(num_failed, Ordering::Relaxed);
error!(
"retransmit_to multi_target_send error: {:?}, {}/{} packets failed",
ioerr,
num_failed,
addrs.len(),
);
addrs.len() - num_failed
}
};
retransmit_time.stop();
stats.num_nodes.fetch_add(num_nodes, Ordering::Relaxed);
stats
.retransmit_total
.fetch_add(retransmit_time.as_us(), Ordering::Relaxed);
num_nodes
};
fn merge<K, V>(mut acc: HashMap<K, V>, other: HashMap<K, V>) -> HashMap<K, V>
where
K: Eq + std::hash::Hash,
V: Default + AddAssign,
{
if acc.len() < other.len() {
return merge(other, acc);
}
for (key, value) in other {
*acc.entry(key).or_default() += value;
}
acc
}
let slot_stats = thread_pool.install(|| {
shreds
.into_par_iter()
.with_min_len(4)
.map(|shred| {
let index = thread_pool.current_thread_index().unwrap();
let socket = &sockets[index % sockets.len()];
let num_nodes = retransmit_shred(&shred, socket);
(shred.slot(), num_nodes)
})
.fold(
HashMap::<Slot, RetransmitSlotStats>::new,
|mut acc, (slot, num_nodes)| {
let stats = acc.entry(slot).or_default();
stats.num_nodes += num_nodes;
stats.num_shreds += 1;
acc
},
)
.reduce(HashMap::new, merge)
});
stats.slot_stats = merge(std::mem::take(&mut stats.slot_stats), slot_stats);
timer_start.stop();
stats.total_time += timer_start.as_us();
stats.maybe_submit(&root_bank, &working_bank, cluster_info, cluster_nodes_cache);
Ok(())
}
/// Service to retransmit messages from the leader or layer 1 to relevant peer nodes.
/// See `cluster_info` for network layer definitions.
/// # Arguments
/// * `sockets` - Sockets to read from.
/// * `bank_forks` - The BankForks structure
/// * `leader_schedule_cache` - The leader schedule to verify shreds
/// * `cluster_info` - This structure needs to be updated and populated by the bank and via gossip.
/// * `r` - Receive channel for shreds to be retransmitted to all the layer 1 nodes.
pub fn retransmitter(
sockets: Arc<Vec<UdpSocket>>,
bank_forks: Arc<RwLock<BankForks>>,
leader_schedule_cache: Arc<LeaderScheduleCache>,
cluster_info: Arc<ClusterInfo>,
shreds_receiver: Receiver<Vec<Shred>>,
max_slots: Arc<MaxSlots>,
rpc_subscriptions: Option<Arc<RpcSubscriptions>>,
) -> JoinHandle<()> {
let cluster_nodes_cache = ClusterNodesCache::<RetransmitStage>::new(
CLUSTER_NODES_CACHE_NUM_EPOCH_CAP,
CLUSTER_NODES_CACHE_TTL,
);
let mut hasher_reset_ts = Instant::now();
let mut stats = RetransmitStats::default();
let shreds_received = Mutex::new((LruCache::new(DEFAULT_LRU_SIZE), PacketHasher::default()));
let first_shreds_received = Mutex::<BTreeSet<Slot>>::default();
let num_threads = get_thread_count().min(8).max(sockets.len());
let thread_pool = ThreadPoolBuilder::new()
.num_threads(num_threads)
.thread_name(|i| format!("retransmit-{}", i))
.build()
.unwrap();
Builder::new()
.name("solana-retransmitter".to_string())
.spawn(move || {
trace!("retransmitter started");
loop {
match retransmit(
&thread_pool,
&bank_forks,
&leader_schedule_cache,
&cluster_info,
&shreds_receiver,
&sockets,
&mut stats,
&cluster_nodes_cache,
&mut hasher_reset_ts,
&shreds_received,
&max_slots,
&first_shreds_received,
rpc_subscriptions.as_deref(),
) {
Ok(()) => (),
Err(RecvTimeoutError::Timeout) => (),
Err(RecvTimeoutError::Disconnected) => break,
}
}
trace!("exiting retransmitter");
})
.unwrap()
}
pub struct RetransmitStage {
retransmit_thread_handle: JoinHandle<()>,
window_service: WindowService,
cluster_slots_service: ClusterSlotsService,
}
impl RetransmitStage {
#[allow(clippy::new_ret_no_self)]
#[allow(clippy::too_many_arguments)]
pub(crate) fn new(
bank_forks: Arc<RwLock<BankForks>>,
leader_schedule_cache: Arc<LeaderScheduleCache>,
blockstore: Arc<Blockstore>,
cluster_info: Arc<ClusterInfo>,
retransmit_sockets: Arc<Vec<UdpSocket>>,
repair_socket: Arc<UdpSocket>,
ancestor_hashes_socket: Arc<UdpSocket>,
verified_receiver: Receiver<Vec<PacketBatch>>,
exit: Arc<AtomicBool>,
cluster_slots_update_receiver: ClusterSlotsUpdateReceiver,
epoch_schedule: EpochSchedule,
cfg: Option<Arc<AtomicBool>>,
shred_version: u16,
cluster_slots: Arc<ClusterSlots>,
duplicate_slots_reset_sender: DuplicateSlotsResetSender,
verified_vote_receiver: VerifiedVoteReceiver,
repair_validators: Option<HashSet<Pubkey>>,
completed_data_sets_sender: CompletedDataSetsSender,
max_slots: Arc<MaxSlots>,
rpc_subscriptions: Option<Arc<RpcSubscriptions>>,
duplicate_slots_sender: Sender<Slot>,
ancestor_hashes_replay_update_receiver: AncestorHashesReplayUpdateReceiver,
) -> Self {
let (retransmit_sender, retransmit_receiver) = unbounded();
let retransmit_thread_handle = retransmitter(
retransmit_sockets,
bank_forks.clone(),
leader_schedule_cache.clone(),
cluster_info.clone(),
retransmit_receiver,
max_slots,
rpc_subscriptions,
);
let cluster_slots_service = ClusterSlotsService::new(
blockstore.clone(),
cluster_slots.clone(),
bank_forks.clone(),
cluster_info.clone(),
cluster_slots_update_receiver,
exit.clone(),
);
let leader_schedule_cache_clone = leader_schedule_cache.clone();
let repair_info = RepairInfo {
bank_forks,
epoch_schedule,
duplicate_slots_reset_sender,
repair_validators,
cluster_info,
cluster_slots,
};
let window_service = WindowService::new(
blockstore,
verified_receiver,
retransmit_sender,
repair_socket,
ancestor_hashes_socket,
exit,
repair_info,
leader_schedule_cache,
move |id, shred, working_bank, last_root| {
let is_connected = cfg
.as_ref()
.map(|x| x.load(Ordering::Relaxed))
.unwrap_or(true);
let rv = should_retransmit_and_persist(
shred,
working_bank,
&leader_schedule_cache_clone,
id,
last_root,
shred_version,
);
rv && is_connected
},
verified_vote_receiver,
| check_if_first_shred_received | identifier_name |
retransmit_stage.rs |
solana_runtime::{bank::Bank, bank_forks::BankForks},
solana_sdk::{clock::Slot, epoch_schedule::EpochSchedule, pubkey::Pubkey, timing::timestamp},
solana_streamer::sendmmsg::{multi_target_send, SendPktsError},
std::{
collections::{BTreeSet, HashMap, HashSet},
net::UdpSocket,
ops::{AddAssign, DerefMut},
sync::{
atomic::{AtomicBool, AtomicU64, AtomicUsize, Ordering},
Arc, Mutex, RwLock,
},
thread::{self, Builder, JoinHandle},
time::{Duration, Instant},
},
};
const MAX_DUPLICATE_COUNT: usize = 2;
const DEFAULT_LRU_SIZE: usize = 10_000;
const CLUSTER_NODES_CACHE_NUM_EPOCH_CAP: usize = 8;
const CLUSTER_NODES_CACHE_TTL: Duration = Duration::from_secs(5);
#[derive(Default)]
struct RetransmitSlotStats {
num_shreds: usize,
num_nodes: usize,
}
impl AddAssign for RetransmitSlotStats {
fn add_assign(&mut self, other: Self) {
*self = Self {
num_shreds: self.num_shreds + other.num_shreds,
num_nodes: self.num_nodes + other.num_nodes,
}
}
}
#[derive(Default)]
struct RetransmitStats {
since: Option<Instant>,
num_nodes: AtomicUsize,
num_addrs_failed: AtomicUsize,
num_shreds: usize,
num_shreds_skipped: AtomicUsize,
total_batches: usize,
total_time: u64,
epoch_fetch: u64,
epoch_cache_update: u64,
retransmit_total: AtomicU64,
compute_turbine_peers_total: AtomicU64,
slot_stats: HashMap<Slot, RetransmitSlotStats>,
unknown_shred_slot_leader: AtomicUsize,
}
impl RetransmitStats {
fn maybe_submit(
&mut self,
root_bank: &Bank,
working_bank: &Bank,
cluster_info: &ClusterInfo,
cluster_nodes_cache: &ClusterNodesCache<RetransmitStage>,
) {
const SUBMIT_CADENCE: Duration = Duration::from_secs(2);
let elapsed = self.since.as_ref().map(Instant::elapsed);
if elapsed.unwrap_or(Duration::MAX) < SUBMIT_CADENCE {
return;
}
let num_peers = cluster_nodes_cache
.get(root_bank.slot(), root_bank, working_bank, cluster_info)
.num_peers();
let stats = std::mem::replace(
self,
Self {
since: Some(Instant::now()),
..Self::default()
},
);
datapoint_info!("retransmit-num_nodes", ("count", num_peers, i64));
datapoint_info!(
"retransmit-stage",
("total_time", stats.total_time, i64),
("epoch_fetch", stats.epoch_fetch, i64),
("epoch_cache_update", stats.epoch_cache_update, i64),
("total_batches", stats.total_batches, i64),
("num_nodes", stats.num_nodes.into_inner(), i64),
("num_addrs_failed", stats.num_addrs_failed.into_inner(), i64),
("num_shreds", stats.num_shreds, i64),
(
"num_shreds_skipped",
stats.num_shreds_skipped.into_inner(),
i64
),
("retransmit_total", stats.retransmit_total.into_inner(), i64),
(
"compute_turbine",
stats.compute_turbine_peers_total.into_inner(),
i64
),
(
"unknown_shred_slot_leader",
stats.unknown_shred_slot_leader.into_inner(),
i64
),
);
for (slot, stats) in stats.slot_stats {
datapoint_info!(
"retransmit-stage-slot-stats",
("slot", slot, i64),
("num_shreds", stats.num_shreds, i64),
("num_nodes", stats.num_nodes, i64),
);
}
}
}
// Map of shred (slot, index, type) => list of hash values seen for that key.
type ShredFilter = LruCache<ShredId, Vec<u64>>;
type ShredFilterAndHasher = (ShredFilter, PacketHasher);
// Returns true if shred is already received and should skip retransmit.
fn should_skip_retransmit(shred: &Shred, shreds_received: &Mutex<ShredFilterAndHasher>) -> bool {
let key = shred.id();
let mut shreds_received = shreds_received.lock().unwrap();
let (cache, hasher) = shreds_received.deref_mut();
match cache.get_mut(&key) {
Some(sent) if sent.len() >= MAX_DUPLICATE_COUNT => true,
Some(sent) => {
let hash = hasher.hash_shred(shred);
if sent.contains(&hash) {
true
} else {
sent.push(hash);
false
}
}
None => {
let hash = hasher.hash_shred(shred);
cache.put(key, vec![hash]);
false
}
}
}
// Returns true if this is the first time receiving a shred for `shred_slot`.
fn check_if_first_shred_received(
shred_slot: Slot,
first_shreds_received: &Mutex<BTreeSet<Slot>>,
root_bank: &Bank,
) -> bool {
if shred_slot <= root_bank.slot() {
return false;
}
let mut first_shreds_received_locked = first_shreds_received.lock().unwrap();
if first_shreds_received_locked.insert(shred_slot) {
datapoint_info!("retransmit-first-shred", ("slot", shred_slot, i64));
if first_shreds_received_locked.len() > 100 {
*first_shreds_received_locked =
first_shreds_received_locked.split_off(&(root_bank.slot() + 1));
}
true
} else {
false
}
}
fn maybe_reset_shreds_received_cache(
shreds_received: &Mutex<ShredFilterAndHasher>,
hasher_reset_ts: &mut Instant,
) {
const UPDATE_INTERVAL: Duration = Duration::from_secs(1);
if hasher_reset_ts.elapsed() >= UPDATE_INTERVAL {
*hasher_reset_ts = Instant::now();
let mut shreds_received = shreds_received.lock().unwrap();
let (cache, hasher) = shreds_received.deref_mut();
cache.clear();
hasher.reset();
}
}
#[allow(clippy::too_many_arguments)]
fn retransmit( | sockets: &[UdpSocket],
stats: &mut RetransmitStats,
cluster_nodes_cache: &ClusterNodesCache<RetransmitStage>,
hasher_reset_ts: &mut Instant,
shreds_received: &Mutex<ShredFilterAndHasher>,
max_slots: &MaxSlots,
first_shreds_received: &Mutex<BTreeSet<Slot>>,
rpc_subscriptions: Option<&RpcSubscriptions>,
) -> Result<(), RecvTimeoutError> {
const RECV_TIMEOUT: Duration = Duration::from_secs(1);
let mut shreds = shreds_receiver.recv_timeout(RECV_TIMEOUT)?;
let mut timer_start = Measure::start("retransmit");
shreds.extend(shreds_receiver.try_iter().flatten());
stats.num_shreds += shreds.len();
stats.total_batches += 1;
let mut epoch_fetch = Measure::start("retransmit_epoch_fetch");
let (working_bank, root_bank) = {
let bank_forks = bank_forks.read().unwrap();
(bank_forks.working_bank(), bank_forks.root_bank())
};
epoch_fetch.stop();
stats.epoch_fetch += epoch_fetch.as_us();
let mut epoch_cache_update = Measure::start("retransmit_epoch_cache_update");
maybe_reset_shreds_received_cache(shreds_received, hasher_reset_ts);
epoch_cache_update.stop();
stats.epoch_cache_update += epoch_cache_update.as_us();
let socket_addr_space = cluster_info.socket_addr_space();
let retransmit_shred = |shred: &Shred, socket: &UdpSocket| {
if should_skip_retransmit(shred, shreds_received) {
stats.num_shreds_skipped.fetch_add(1, Ordering::Relaxed);
return 0;
}
let shred_slot = shred.slot();
max_slots
.retransmit
.fetch_max(shred_slot, Ordering::Relaxed);
if let Some(rpc_subscriptions) = rpc_subscriptions {
if check_if_first_shred_received(shred_slot, first_shreds_received, &root_bank) {
rpc_subscriptions.notify_slot_update(SlotUpdate::FirstShredReceived {
slot: shred_slot,
timestamp: timestamp(),
});
}
}
let mut compute_turbine_peers = Measure::start("turbine_start");
// TODO: consider using root-bank here for leader lookup!
// Shreds' signatures should be verified before they reach here, and if
// the leader is unknown they should fail signature check. So here we
// should expect to know the slot leader and otherwise skip the shred.
let slot_leader =
match leader_schedule_cache.slot_leader_at(shred_slot, Some(&working_bank)) {
Some(pubkey) => pubkey,
None => {
stats
.unknown_shred_slot_leader
.fetch_add(1, Ordering::Relaxed);
return 0;
}
};
let cluster_nodes =
cluster_nodes_cache.get(shred_slot, &root_bank, &working_bank, cluster_info);
let addrs: Vec<_> = cluster_nodes
.get_retransmit_addrs(slot_leader, shred, &root_bank, DATA_PLANE_FANOUT)
.into_iter()
.filter(|addr| ContactInfo::is_valid_address(addr, socket_addr_space))
.collect();
compute_turbine_peers.stop();
stats
.compute_turbine_peers_total
.fetch_add(compute_turbine_peers.as_us(), Ordering::Relaxed);
let mut retransmit_time = Measure::start("retransmit_to");
let num_nodes = match multi_target_send(socket, &shred.payload, &addrs) {
Ok(()) => addrs.len(),
Err(SendPktsError::IoError(ioerr, num_failed)) => {
stats
.num_addrs_failed
.fetch_add(num_failed, Ordering::Relaxed);
error!(
"retransmit_to multi_target_send error: {:?}, {}/{} packets failed",
ioerr,
num_failed,
addrs.len(),
);
addrs.len() - num_failed
}
};
retransmit_time.stop();
stats.num_nodes.fetch_add(num_nodes, Ordering::Relaxed);
stats
.retransmit_total
.fetch_add(retransmit_time.as_us(), Ordering::Relaxed);
num_nodes
};
fn merge<K, V>(mut acc: HashMap<K, V>, other: HashMap<K, V>) -> HashMap<K, V>
where
K: Eq + std::hash::Hash,
V: Default + AddAssign,
{
if acc.len() < other.len() {
return merge(other, acc);
}
for (key, value) in other {
*acc.entry(key).or_default() += value;
}
acc
}
let slot_stats = thread_pool.install(|| {
shreds
.into_par_iter()
.with_min_len(4)
.map(|shred| {
let index = thread_pool.current_thread_index().unwrap();
let socket = &sockets[index % sockets.len()];
let num_nodes = retransmit_shred(&shred, socket);
(shred.slot(), num_nodes)
})
.fold(
HashMap::<Slot, RetransmitSlotStats>::new,
|mut acc, (slot, num_nodes)| {
let stats = acc.entry(slot).or_default();
stats.num_nodes += num_nodes;
stats.num_shreds += 1;
acc
},
)
.reduce(HashMap::new, merge)
});
stats.slot_stats = merge(std::mem::take(&mut stats.slot_stats), slot_stats);
timer_start.stop();
stats.total_time += timer_start.as_us();
stats.maybe_submit(&root_bank, &working_bank, cluster_info, cluster_nodes_cache);
Ok(())
}
/// Service to retransmit messages from the leader or layer 1 to relevant peer nodes.
/// See `cluster_info` for network layer definitions.
/// # Arguments
/// * `sockets` - Sockets to read from.
/// * `bank_forks` - The BankForks structure
/// * `leader_schedule_cache` - The leader schedule to verify shreds
/// * `cluster_info` - This structure needs to be updated and populated by the bank and via gossip.
/// * `r` - Receive channel for shreds to be retransmitted to all the layer 1 nodes.
pub fn retransmitter(
sockets: Arc<Vec<UdpSocket>>,
bank_forks: Arc<RwLock<BankForks>>,
leader_schedule_cache: Arc<LeaderScheduleCache>,
cluster_info: Arc<ClusterInfo>,
shreds_receiver: Receiver<Vec<Shred>>,
max_slots: Arc<MaxSlots>,
rpc_subscriptions: Option<Arc<RpcSubscriptions>>,
) -> JoinHandle<()> {
let cluster_nodes_cache = ClusterNodesCache::<RetransmitStage>::new(
CLUSTER_NODES_CACHE_NUM_EPOCH_CAP,
CLUSTER_NODES_CACHE_TTL,
);
let mut hasher_reset_ts = Instant::now();
let mut stats = RetransmitStats::default();
let shreds_received = Mutex::new((LruCache::new(DEFAULT_LRU_SIZE), PacketHasher::default()));
let first_shreds_received = Mutex::<BTreeSet<Slot>>::default();
let num_threads = get_thread_count().min(8).max(sockets.len());
let thread_pool = ThreadPoolBuilder::new()
.num_threads(num_threads)
.thread_name(|i| format!("retransmit-{}", i))
.build()
.unwrap();
Builder::new()
.name("solana-retransmitter".to_string())
.spawn(move || {
trace!("retransmitter started");
loop {
match retransmit(
&thread_pool,
&bank_forks,
&leader_schedule_cache,
&cluster_info,
&shreds_receiver,
&sockets,
&mut stats,
&cluster_nodes_cache,
&mut hasher_reset_ts,
&shreds_received,
&max_slots,
&first_shreds_received,
rpc_subscriptions.as_deref(),
) {
Ok(()) => (),
Err(RecvTimeoutError::Timeout) => (),
Err(RecvTimeoutError::Disconnected) => break,
}
}
trace!("exiting retransmitter");
})
.unwrap()
}
pub struct RetransmitStage {
retransmit_thread_handle: JoinHandle<()>,
window_service: WindowService,
cluster_slots_service: ClusterSlotsService,
}
impl RetransmitStage {
#[allow(clippy::new_ret_no_self)]
#[allow(clippy::too_many_arguments)]
pub(crate) fn new(
bank_forks: Arc<RwLock<BankForks>>,
leader_schedule_cache: Arc<LeaderScheduleCache>,
blockstore: Arc<Blockstore>,
cluster_info: Arc<ClusterInfo>,
retransmit_sockets: Arc<Vec<UdpSocket>>,
repair_socket: Arc<UdpSocket>,
ancestor_hashes_socket: Arc<UdpSocket>,
verified_receiver: Receiver<Vec<PacketBatch>>,
exit: Arc<AtomicBool>,
cluster_slots_update_receiver: ClusterSlotsUpdateReceiver,
epoch_schedule: EpochSchedule,
cfg: Option<Arc<AtomicBool>>,
shred_version: u16,
cluster_slots: Arc<ClusterSlots>,
duplicate_slots_reset_sender: DuplicateSlotsResetSender,
verified_vote_receiver: VerifiedVoteReceiver,
repair_validators: Option<HashSet<Pubkey>>,
completed_data_sets_sender: CompletedDataSetsSender,
max_slots: Arc<MaxSlots>,
rpc_subscriptions: Option<Arc<RpcSubscriptions>>,
duplicate_slots_sender: Sender<Slot>,
ancestor_hashes_replay_update_receiver: AncestorHashesReplayUpdateReceiver,
) -> Self {
let (retransmit_sender, retransmit_receiver) = unbounded();
let retransmit_thread_handle = retransmitter(
retransmit_sockets,
bank_forks.clone(),
leader_schedule_cache.clone(),
cluster_info.clone(),
retransmit_receiver,
max_slots,
rpc_subscriptions,
);
let cluster_slots_service = ClusterSlotsService::new(
blockstore.clone(),
cluster_slots.clone(),
bank_forks.clone(),
cluster_info.clone(),
cluster_slots_update_receiver,
exit.clone(),
);
let leader_schedule_cache_clone = leader_schedule_cache.clone();
let repair_info = RepairInfo {
bank_forks,
epoch_schedule,
duplicate_slots_reset_sender,
repair_validators,
cluster_info,
cluster_slots,
};
let window_service = WindowService::new(
blockstore,
verified_receiver,
retransmit_sender,
repair_socket,
ancestor_hashes_socket,
exit,
repair_info,
leader_schedule_cache,
move |id, shred, working_bank, last_root| {
let is_connected = cfg
.as_ref()
.map(|x| x.load(Ordering::Relaxed))
.unwrap_or(true);
let rv = should_retransmit_and_persist(
shred,
working_bank,
&leader_schedule_cache_clone,
id,
last_root,
shred_version,
);
rv && is_connected
},
verified_vote_receiver,
| thread_pool: &ThreadPool,
bank_forks: &RwLock<BankForks>,
leader_schedule_cache: &LeaderScheduleCache,
cluster_info: &ClusterInfo,
shreds_receiver: &Receiver<Vec<Shred>>, | random_line_split |
tbs.rs | //! hash functions for DNSSec operations
use super::rdata::{sig, DNSSECRData, SIG};
use crate::error::*;
use crate::rr::dnssec::Algorithm;
use crate::rr::{DNSClass, Name, RData, Record, RecordType};
use crate::serialize::binary::{BinEncodable, BinEncoder, EncodeMode};
/// Data To Be Signed.
pub struct TBS(Vec<u8>);
impl<'a> From<&'a [u8]> for TBS {
fn from(slice: &'a [u8]) -> Self {
Self(slice.to_owned())
}
}
impl AsRef<[u8]> for TBS {
fn as_ref(&self) -> &[u8] {
self.0.as_ref()
}
}
/// Returns the to-be-signed serialization of the given message.
pub fn message_tbs<M: BinEncodable>(message: &M, pre_sig0: &SIG) -> ProtoResult<TBS> {
// TODO: should perform the serialization and sign block by block to reduce the max memory
// usage, though at 4k max, this is probably unnecessary... For AXFR and large zones, it's
// more important
let mut buf: Vec<u8> = Vec::with_capacity(512);
let mut buf2: Vec<u8> = Vec::with_capacity(512);
{
let mut encoder: BinEncoder<'_> = BinEncoder::with_mode(&mut buf, EncodeMode::Normal);
assert!(sig::emit_pre_sig(
&mut encoder,
pre_sig0.type_covered(),
pre_sig0.algorithm(),
pre_sig0.num_labels(),
pre_sig0.original_ttl(),
pre_sig0.sig_expiration(),
pre_sig0.sig_inception(),
pre_sig0.key_tag(),
pre_sig0.signer_name(),
)
.is_ok());
// need a separate encoder here, as the encoding references absolute positions
// inside the buffer. If the buffer already contains the sig0 RDATA, offsets
// are wrong and the signature won't match.
let mut encoder2: BinEncoder<'_> = BinEncoder::with_mode(&mut buf2, EncodeMode::Signing);
message.emit(&mut encoder2).unwrap(); // coding error if this panics (i think?)
}
buf.append(&mut buf2);
Ok(TBS(buf))
}
/// Returns the to-be-signed serialization of the given record set.
///
/// # Arguments
///
/// * `name` - RRset record name
/// * `dns_class` - DNSClass, i.e. IN, of the records
/// * `num_labels` - number of labels in the name, needed to deal with `*.example.com`
/// * `type_covered` - RecordType of the RRSet being hashed
/// * `algorithm` - The Algorithm type used for the hashing
/// * `original_ttl` - Original TTL is the TTL as specified in the SOA zones RRSet associated record
/// * `sig_expiration` - the epoch seconds of when this hashed signature will expire
/// * `key_inception` - the epoch seconds of when this hashed signature will be valid
/// * `signer_name` - label of the etity responsible for signing this hash
/// * `records` - RRSet to hash
///
/// # Returns
///
/// the binary hash of the specified RRSet and associated information
// FIXME: OMG, there are a ton of asserts in here...
#[allow(clippy::too_many_arguments)]
pub fn rrset_tbs(
name: &Name,
dns_class: DNSClass,
num_labels: u8,
type_covered: RecordType,
algorithm: Algorithm,
original_ttl: u32,
sig_expiration: u32,
sig_inception: u32,
key_tag: u16,
signer_name: &Name,
records: &[Record],
) -> ProtoResult<TBS> {
// TODO: change this to a BTreeSet so that it's preordered, no sort necessary
let mut rrset: Vec<&Record> = Vec::new();
// collect only the records for this rrset
for record in records {
if dns_class == record.dns_class()
&& type_covered == record.rr_type()
&& name == record.name()
{
rrset.push(record);
}
}
// put records in canonical order
rrset.sort();
let name = determine_name(name, num_labels)?;
// TODO: rather than buffering here, use the Signer/Verifier? might mean fewer allocations...
let mut buf: Vec<u8> = Vec::new();
{
let mut encoder: BinEncoder<'_> = BinEncoder::new(&mut buf);
encoder.set_canonical_names(true);
// signed_data = RRSIG_RDATA | RR(1) | RR(2)... where
//
// "|" denotes concatenation
//
// RRSIG_RDATA is the wire format of the RRSIG RDATA fields
// with the Signature field excluded and the Signer's Name
// in canonical form.
assert!(sig::emit_pre_sig(
&mut encoder,
type_covered,
algorithm,
name.num_labels(),
original_ttl,
sig_expiration,
sig_inception,
key_tag,
signer_name,
)
.is_ok());
// construct the rrset signing data
for record in rrset {
// RR(i) = name | type | class | OrigTTL | RDATA length | RDATA
//
// name is calculated according to the function in the RFC 4035
assert!(name
.to_lowercase()
.emit_as_canonical(&mut encoder, true)
.is_ok());
//
// type is the RRset type and all RRs in the class
assert!(type_covered.emit(&mut encoder).is_ok());
//
// class is the RRset's class
assert!(dns_class.emit(&mut encoder).is_ok());
//
// OrigTTL is the value from the RRSIG Original TTL field
assert!(encoder.emit_u32(original_ttl).is_ok());
//
// RDATA length
// TODO: add support to the encoder to set a marker to go back and write the length
let mut rdata_buf = Vec::new();
{
let mut rdata_encoder = BinEncoder::new(&mut rdata_buf);
rdata_encoder.set_canonical_names(true);
if let Some(rdata) = record.data() {
assert!(rdata.emit(&mut rdata_encoder).is_ok());
}
}
assert!(encoder.emit_u16(rdata_buf.len() as u16).is_ok());
//
// All names in the RDATA field are in canonical form (set above)
assert!(encoder.emit_vec(&rdata_buf).is_ok());
}
}
Ok(TBS(buf))
}
/// Returns the to-be-signed serialization of the given record set using the information
/// provided from the RRSIG record.
///
/// # Arguments
///
/// * `rrsig` - SIG or RRSIG record, which was produced from the RRSet
/// * `records` - RRSet records to sign with the information in the `rrsig`
///
/// # Return
///
/// binary hash of the RRSet with the information from the RRSIG record
pub fn rrset_tbs_with_rrsig(rrsig: &Record, records: &[Record]) -> ProtoResult<TBS> {
if let Some(RData::DNSSEC(DNSSECRData::SIG(ref sig))) = rrsig.data() {
rrset_tbs_with_sig(rrsig.name(), rrsig.dns_class(), sig, records)
} else {
Err(format!("could not determine name from {}", rrsig.name()).into())
}
}
/// Returns the to-be-signed serialization of the given record set using the information
/// provided from the SIG record.
///
/// # Arguments
///
/// * `name` - labels of the record to sign
/// * `dns_class` - DNSClass of the RRSet, i.e. IN
/// * `sig` - SIG or RRSIG record, which was produced from the RRSet
/// * `records` - RRSet records to sign with the information in the `rrsig`
///
/// # Return
///
/// binary hash of the RRSet with the information from the RRSIG record
pub fn | (
name: &Name,
dns_class: DNSClass,
sig: &SIG,
records: &[Record],
) -> ProtoResult<TBS> {
rrset_tbs(
name,
dns_class,
sig.num_labels(),
sig.type_covered(),
sig.algorithm(),
sig.original_ttl(),
sig.sig_expiration(),
sig.sig_inception(),
sig.key_tag(),
sig.signer_name(),
records,
)
}
/// [RFC 4035](https://tools.ietf.org/html/rfc4035), DNSSEC Protocol Modifications, March 2005
///
/// ```text
///
/// 5.3.2. Reconstructing the Signed Data
/// ...
/// To calculate the name:
/// let rrsig_labels = the value of the RRSIG Labels field
///
/// let fqdn = RRset's fully qualified domain name in
/// canonical form
///
/// let fqdn_labels = Label count of the fqdn above.
///
/// if rrsig_labels = fqdn_labels,
/// name = fqdn
///
/// if rrsig_labels < fqdn_labels,
/// name = "*." | the rightmost rrsig_label labels of the
/// fqdn
///
/// if rrsig_labels > fqdn_labels
/// the RRSIG RR did not pass the necessary validation
/// checks and MUST NOT be used to authenticate this
/// RRset.
///
/// The canonical forms for names and RRsets are defined in [RFC4034].
/// ```
pub fn determine_name(name: &Name, num_labels: u8) -> Result<Name, ProtoError> {
// To calculate the name:
// let rrsig_labels = the value of the RRSIG Labels field
//
// let fqdn = RRset's fully qualified domain name in
// canonical form
//
// let fqdn_labels = Label count of the fqdn above.
let fqdn_labels = name.num_labels();
// if rrsig_labels = fqdn_labels,
// name = fqdn
if fqdn_labels == num_labels {
return Ok(name.clone());
}
// if rrsig_labels < fqdn_labels,
// name = "*." | the rightmost rrsig_label labels of the
// fqdn
if num_labels < fqdn_labels {
let mut star_name: Name = Name::from_labels(vec![b"*" as &[u8]]).unwrap();
let rightmost = name.trim_to(num_labels as usize);
if!rightmost.is_root() {
star_name = star_name.append_name(&rightmost)?;
return Ok(star_name);
}
return Ok(star_name);
}
//
// if rrsig_labels > fqdn_labels
// the RRSIG RR did not pass the necessary validation
// checks and MUST NOT be used to authenticate this
// RRset.
Err(format!("could not determine name from {}", name).into())
}
| rrset_tbs_with_sig | identifier_name |
tbs.rs | //! hash functions for DNSSec operations
use super::rdata::{sig, DNSSECRData, SIG};
use crate::error::*;
use crate::rr::dnssec::Algorithm;
use crate::rr::{DNSClass, Name, RData, Record, RecordType};
use crate::serialize::binary::{BinEncodable, BinEncoder, EncodeMode};
/// Data To Be Signed.
pub struct TBS(Vec<u8>);
impl<'a> From<&'a [u8]> for TBS {
fn from(slice: &'a [u8]) -> Self {
Self(slice.to_owned())
}
}
impl AsRef<[u8]> for TBS {
fn as_ref(&self) -> &[u8] {
self.0.as_ref()
}
}
/// Returns the to-be-signed serialization of the given message.
pub fn message_tbs<M: BinEncodable>(message: &M, pre_sig0: &SIG) -> ProtoResult<TBS> {
// TODO: should perform the serialization and sign block by block to reduce the max memory
// usage, though at 4k max, this is probably unnecessary... For AXFR and large zones, it's
// more important
let mut buf: Vec<u8> = Vec::with_capacity(512);
let mut buf2: Vec<u8> = Vec::with_capacity(512);
{
let mut encoder: BinEncoder<'_> = BinEncoder::with_mode(&mut buf, EncodeMode::Normal);
assert!(sig::emit_pre_sig(
&mut encoder,
pre_sig0.type_covered(),
pre_sig0.algorithm(),
pre_sig0.num_labels(),
pre_sig0.original_ttl(),
pre_sig0.sig_expiration(),
pre_sig0.sig_inception(),
pre_sig0.key_tag(),
pre_sig0.signer_name(),
)
.is_ok());
// need a separate encoder here, as the encoding references absolute positions
// inside the buffer. If the buffer already contains the sig0 RDATA, offsets
// are wrong and the signature won't match.
let mut encoder2: BinEncoder<'_> = BinEncoder::with_mode(&mut buf2, EncodeMode::Signing);
message.emit(&mut encoder2).unwrap(); // coding error if this panics (i think?)
}
buf.append(&mut buf2);
Ok(TBS(buf))
}
/// Returns the to-be-signed serialization of the given record set.
///
/// # Arguments
///
/// * `name` - RRset record name
/// * `dns_class` - DNSClass, i.e. IN, of the records
/// * `num_labels` - number of labels in the name, needed to deal with `*.example.com`
/// * `type_covered` - RecordType of the RRSet being hashed
/// * `algorithm` - The Algorithm type used for the hashing
/// * `original_ttl` - Original TTL is the TTL as specified in the SOA zones RRSet associated record
/// * `sig_expiration` - the epoch seconds of when this hashed signature will expire
/// * `key_inception` - the epoch seconds of when this hashed signature will be valid
/// * `signer_name` - label of the etity responsible for signing this hash
/// * `records` - RRSet to hash
///
/// # Returns
///
/// the binary hash of the specified RRSet and associated information
// FIXME: OMG, there are a ton of asserts in here...
#[allow(clippy::too_many_arguments)]
pub fn rrset_tbs(
name: &Name,
dns_class: DNSClass,
num_labels: u8,
type_covered: RecordType,
algorithm: Algorithm,
original_ttl: u32,
sig_expiration: u32,
sig_inception: u32,
key_tag: u16,
signer_name: &Name,
records: &[Record],
) -> ProtoResult<TBS> | let mut buf: Vec<u8> = Vec::new();
{
let mut encoder: BinEncoder<'_> = BinEncoder::new(&mut buf);
encoder.set_canonical_names(true);
// signed_data = RRSIG_RDATA | RR(1) | RR(2)... where
//
// "|" denotes concatenation
//
// RRSIG_RDATA is the wire format of the RRSIG RDATA fields
// with the Signature field excluded and the Signer's Name
// in canonical form.
assert!(sig::emit_pre_sig(
&mut encoder,
type_covered,
algorithm,
name.num_labels(),
original_ttl,
sig_expiration,
sig_inception,
key_tag,
signer_name,
)
.is_ok());
// construct the rrset signing data
for record in rrset {
// RR(i) = name | type | class | OrigTTL | RDATA length | RDATA
//
// name is calculated according to the function in the RFC 4035
assert!(name
.to_lowercase()
.emit_as_canonical(&mut encoder, true)
.is_ok());
//
// type is the RRset type and all RRs in the class
assert!(type_covered.emit(&mut encoder).is_ok());
//
// class is the RRset's class
assert!(dns_class.emit(&mut encoder).is_ok());
//
// OrigTTL is the value from the RRSIG Original TTL field
assert!(encoder.emit_u32(original_ttl).is_ok());
//
// RDATA length
// TODO: add support to the encoder to set a marker to go back and write the length
let mut rdata_buf = Vec::new();
{
let mut rdata_encoder = BinEncoder::new(&mut rdata_buf);
rdata_encoder.set_canonical_names(true);
if let Some(rdata) = record.data() {
assert!(rdata.emit(&mut rdata_encoder).is_ok());
}
}
assert!(encoder.emit_u16(rdata_buf.len() as u16).is_ok());
//
// All names in the RDATA field are in canonical form (set above)
assert!(encoder.emit_vec(&rdata_buf).is_ok());
}
}
Ok(TBS(buf))
}
/// Returns the to-be-signed serialization of the given record set using the information
/// provided from the RRSIG record.
///
/// # Arguments
///
/// * `rrsig` - SIG or RRSIG record, which was produced from the RRSet
/// * `records` - RRSet records to sign with the information in the `rrsig`
///
/// # Return
///
/// binary hash of the RRSet with the information from the RRSIG record
pub fn rrset_tbs_with_rrsig(rrsig: &Record, records: &[Record]) -> ProtoResult<TBS> {
if let Some(RData::DNSSEC(DNSSECRData::SIG(ref sig))) = rrsig.data() {
rrset_tbs_with_sig(rrsig.name(), rrsig.dns_class(), sig, records)
} else {
Err(format!("could not determine name from {}", rrsig.name()).into())
}
}
/// Returns the to-be-signed serialization of the given record set using the information
/// provided from the SIG record.
///
/// # Arguments
///
/// * `name` - labels of the record to sign
/// * `dns_class` - DNSClass of the RRSet, i.e. IN
/// * `sig` - SIG or RRSIG record, which was produced from the RRSet
/// * `records` - RRSet records to sign with the information in the `rrsig`
///
/// # Return
///
/// binary hash of the RRSet with the information from the RRSIG record
pub fn rrset_tbs_with_sig(
name: &Name,
dns_class: DNSClass,
sig: &SIG,
records: &[Record],
) -> ProtoResult<TBS> {
rrset_tbs(
name,
dns_class,
sig.num_labels(),
sig.type_covered(),
sig.algorithm(),
sig.original_ttl(),
sig.sig_expiration(),
sig.sig_inception(),
sig.key_tag(),
sig.signer_name(),
records,
)
}
/// [RFC 4035](https://tools.ietf.org/html/rfc4035), DNSSEC Protocol Modifications, March 2005
///
/// ```text
///
/// 5.3.2. Reconstructing the Signed Data
/// ...
/// To calculate the name:
/// let rrsig_labels = the value of the RRSIG Labels field
///
/// let fqdn = RRset's fully qualified domain name in
/// canonical form
///
/// let fqdn_labels = Label count of the fqdn above.
///
/// if rrsig_labels = fqdn_labels,
/// name = fqdn
///
/// if rrsig_labels < fqdn_labels,
/// name = "*." | the rightmost rrsig_label labels of the
/// fqdn
///
/// if rrsig_labels > fqdn_labels
/// the RRSIG RR did not pass the necessary validation
/// checks and MUST NOT be used to authenticate this
/// RRset.
///
/// The canonical forms for names and RRsets are defined in [RFC4034].
/// ```
pub fn determine_name(name: &Name, num_labels: u8) -> Result<Name, ProtoError> {
// To calculate the name:
// let rrsig_labels = the value of the RRSIG Labels field
//
// let fqdn = RRset's fully qualified domain name in
// canonical form
//
// let fqdn_labels = Label count of the fqdn above.
let fqdn_labels = name.num_labels();
// if rrsig_labels = fqdn_labels,
// name = fqdn
if fqdn_labels == num_labels {
return Ok(name.clone());
}
// if rrsig_labels < fqdn_labels,
// name = "*." | the rightmost rrsig_label labels of the
// fqdn
if num_labels < fqdn_labels {
let mut star_name: Name = Name::from_labels(vec![b"*" as &[u8]]).unwrap();
let rightmost = name.trim_to(num_labels as usize);
if!rightmost.is_root() {
star_name = star_name.append_name(&rightmost)?;
return Ok(star_name);
}
return Ok(star_name);
}
//
// if rrsig_labels > fqdn_labels
// the RRSIG RR did not pass the necessary validation
// checks and MUST NOT be used to authenticate this
// RRset.
Err(format!("could not determine name from {}", name).into())
}
| {
// TODO: change this to a BTreeSet so that it's preordered, no sort necessary
let mut rrset: Vec<&Record> = Vec::new();
// collect only the records for this rrset
for record in records {
if dns_class == record.dns_class()
&& type_covered == record.rr_type()
&& name == record.name()
{
rrset.push(record);
}
}
// put records in canonical order
rrset.sort();
let name = determine_name(name, num_labels)?;
// TODO: rather than buffering here, use the Signer/Verifier? might mean fewer allocations... | identifier_body |
tbs.rs | //! hash functions for DNSSec operations
use super::rdata::{sig, DNSSECRData, SIG};
use crate::error::*;
use crate::rr::dnssec::Algorithm;
use crate::rr::{DNSClass, Name, RData, Record, RecordType};
use crate::serialize::binary::{BinEncodable, BinEncoder, EncodeMode};
/// Data To Be Signed.
pub struct TBS(Vec<u8>);
impl<'a> From<&'a [u8]> for TBS {
fn from(slice: &'a [u8]) -> Self {
Self(slice.to_owned())
}
}
impl AsRef<[u8]> for TBS {
fn as_ref(&self) -> &[u8] {
self.0.as_ref()
}
}
/// Returns the to-be-signed serialization of the given message.
pub fn message_tbs<M: BinEncodable>(message: &M, pre_sig0: &SIG) -> ProtoResult<TBS> {
// TODO: should perform the serialization and sign block by block to reduce the max memory
// usage, though at 4k max, this is probably unnecessary... For AXFR and large zones, it's
// more important
let mut buf: Vec<u8> = Vec::with_capacity(512);
let mut buf2: Vec<u8> = Vec::with_capacity(512);
{
let mut encoder: BinEncoder<'_> = BinEncoder::with_mode(&mut buf, EncodeMode::Normal);
assert!(sig::emit_pre_sig(
&mut encoder,
pre_sig0.type_covered(),
pre_sig0.algorithm(),
pre_sig0.num_labels(),
pre_sig0.original_ttl(),
pre_sig0.sig_expiration(),
pre_sig0.sig_inception(),
pre_sig0.key_tag(),
pre_sig0.signer_name(),
)
.is_ok());
// need a separate encoder here, as the encoding references absolute positions
// inside the buffer. If the buffer already contains the sig0 RDATA, offsets
// are wrong and the signature won't match.
let mut encoder2: BinEncoder<'_> = BinEncoder::with_mode(&mut buf2, EncodeMode::Signing);
message.emit(&mut encoder2).unwrap(); // coding error if this panics (i think?)
}
buf.append(&mut buf2);
Ok(TBS(buf))
}
/// Returns the to-be-signed serialization of the given record set.
///
/// # Arguments
///
/// * `name` - RRset record name
/// * `dns_class` - DNSClass, i.e. IN, of the records
/// * `num_labels` - number of labels in the name, needed to deal with `*.example.com`
/// * `type_covered` - RecordType of the RRSet being hashed
/// * `algorithm` - The Algorithm type used for the hashing
/// * `original_ttl` - Original TTL is the TTL as specified in the SOA zones RRSet associated record
/// * `sig_expiration` - the epoch seconds of when this hashed signature will expire
/// * `key_inception` - the epoch seconds of when this hashed signature will be valid
/// * `signer_name` - label of the etity responsible for signing this hash
/// * `records` - RRSet to hash
///
/// # Returns
///
/// the binary hash of the specified RRSet and associated information
// FIXME: OMG, there are a ton of asserts in here...
#[allow(clippy::too_many_arguments)]
pub fn rrset_tbs(
name: &Name,
dns_class: DNSClass,
num_labels: u8,
type_covered: RecordType,
algorithm: Algorithm,
original_ttl: u32,
sig_expiration: u32,
sig_inception: u32,
key_tag: u16,
signer_name: &Name,
records: &[Record],
) -> ProtoResult<TBS> {
// TODO: change this to a BTreeSet so that it's preordered, no sort necessary
let mut rrset: Vec<&Record> = Vec::new();
// collect only the records for this rrset
for record in records {
if dns_class == record.dns_class()
&& type_covered == record.rr_type()
&& name == record.name()
{
rrset.push(record);
}
}
// put records in canonical order
rrset.sort();
let name = determine_name(name, num_labels)?;
// TODO: rather than buffering here, use the Signer/Verifier? might mean fewer allocations...
let mut buf: Vec<u8> = Vec::new();
{
let mut encoder: BinEncoder<'_> = BinEncoder::new(&mut buf);
encoder.set_canonical_names(true);
// signed_data = RRSIG_RDATA | RR(1) | RR(2)... where
//
// "|" denotes concatenation
//
// RRSIG_RDATA is the wire format of the RRSIG RDATA fields
// with the Signature field excluded and the Signer's Name
// in canonical form.
assert!(sig::emit_pre_sig(
&mut encoder,
type_covered,
algorithm,
name.num_labels(),
original_ttl,
sig_expiration,
sig_inception,
key_tag,
signer_name,
)
.is_ok());
// construct the rrset signing data
for record in rrset {
// RR(i) = name | type | class | OrigTTL | RDATA length | RDATA
//
// name is calculated according to the function in the RFC 4035
assert!(name
.to_lowercase()
.emit_as_canonical(&mut encoder, true)
.is_ok());
//
// type is the RRset type and all RRs in the class
assert!(type_covered.emit(&mut encoder).is_ok());
//
// class is the RRset's class
assert!(dns_class.emit(&mut encoder).is_ok());
//
// OrigTTL is the value from the RRSIG Original TTL field
assert!(encoder.emit_u32(original_ttl).is_ok());
//
// RDATA length
// TODO: add support to the encoder to set a marker to go back and write the length
let mut rdata_buf = Vec::new(); | {
let mut rdata_encoder = BinEncoder::new(&mut rdata_buf);
rdata_encoder.set_canonical_names(true);
if let Some(rdata) = record.data() {
assert!(rdata.emit(&mut rdata_encoder).is_ok());
}
}
assert!(encoder.emit_u16(rdata_buf.len() as u16).is_ok());
//
// All names in the RDATA field are in canonical form (set above)
assert!(encoder.emit_vec(&rdata_buf).is_ok());
}
}
Ok(TBS(buf))
}
/// Returns the to-be-signed serialization of the given record set using the information
/// provided from the RRSIG record.
///
/// # Arguments
///
/// * `rrsig` - SIG or RRSIG record, which was produced from the RRSet
/// * `records` - RRSet records to sign with the information in the `rrsig`
///
/// # Return
///
/// binary hash of the RRSet with the information from the RRSIG record
pub fn rrset_tbs_with_rrsig(rrsig: &Record, records: &[Record]) -> ProtoResult<TBS> {
if let Some(RData::DNSSEC(DNSSECRData::SIG(ref sig))) = rrsig.data() {
rrset_tbs_with_sig(rrsig.name(), rrsig.dns_class(), sig, records)
} else {
Err(format!("could not determine name from {}", rrsig.name()).into())
}
}
/// Returns the to-be-signed serialization of the given record set using the information
/// provided from the SIG record.
///
/// # Arguments
///
/// * `name` - labels of the record to sign
/// * `dns_class` - DNSClass of the RRSet, i.e. IN
/// * `sig` - SIG or RRSIG record, which was produced from the RRSet
/// * `records` - RRSet records to sign with the information in the `rrsig`
///
/// # Return
///
/// binary hash of the RRSet with the information from the RRSIG record
pub fn rrset_tbs_with_sig(
name: &Name,
dns_class: DNSClass,
sig: &SIG,
records: &[Record],
) -> ProtoResult<TBS> {
rrset_tbs(
name,
dns_class,
sig.num_labels(),
sig.type_covered(),
sig.algorithm(),
sig.original_ttl(),
sig.sig_expiration(),
sig.sig_inception(),
sig.key_tag(),
sig.signer_name(),
records,
)
}
/// [RFC 4035](https://tools.ietf.org/html/rfc4035), DNSSEC Protocol Modifications, March 2005
///
/// ```text
///
/// 5.3.2. Reconstructing the Signed Data
/// ...
/// To calculate the name:
/// let rrsig_labels = the value of the RRSIG Labels field
///
/// let fqdn = RRset's fully qualified domain name in
/// canonical form
///
/// let fqdn_labels = Label count of the fqdn above.
///
/// if rrsig_labels = fqdn_labels,
/// name = fqdn
///
/// if rrsig_labels < fqdn_labels,
/// name = "*." | the rightmost rrsig_label labels of the
/// fqdn
///
/// if rrsig_labels > fqdn_labels
/// the RRSIG RR did not pass the necessary validation
/// checks and MUST NOT be used to authenticate this
/// RRset.
///
/// The canonical forms for names and RRsets are defined in [RFC4034].
/// ```
pub fn determine_name(name: &Name, num_labels: u8) -> Result<Name, ProtoError> {
// To calculate the name:
// let rrsig_labels = the value of the RRSIG Labels field
//
// let fqdn = RRset's fully qualified domain name in
// canonical form
//
// let fqdn_labels = Label count of the fqdn above.
let fqdn_labels = name.num_labels();
// if rrsig_labels = fqdn_labels,
// name = fqdn
if fqdn_labels == num_labels {
return Ok(name.clone());
}
// if rrsig_labels < fqdn_labels,
// name = "*." | the rightmost rrsig_label labels of the
// fqdn
if num_labels < fqdn_labels {
let mut star_name: Name = Name::from_labels(vec![b"*" as &[u8]]).unwrap();
let rightmost = name.trim_to(num_labels as usize);
if!rightmost.is_root() {
star_name = star_name.append_name(&rightmost)?;
return Ok(star_name);
}
return Ok(star_name);
}
//
// if rrsig_labels > fqdn_labels
// the RRSIG RR did not pass the necessary validation
// checks and MUST NOT be used to authenticate this
// RRset.
Err(format!("could not determine name from {}", name).into())
} | random_line_split |
|
tbs.rs | //! hash functions for DNSSec operations
use super::rdata::{sig, DNSSECRData, SIG};
use crate::error::*;
use crate::rr::dnssec::Algorithm;
use crate::rr::{DNSClass, Name, RData, Record, RecordType};
use crate::serialize::binary::{BinEncodable, BinEncoder, EncodeMode};
/// Data To Be Signed.
pub struct TBS(Vec<u8>);
impl<'a> From<&'a [u8]> for TBS {
fn from(slice: &'a [u8]) -> Self {
Self(slice.to_owned())
}
}
impl AsRef<[u8]> for TBS {
fn as_ref(&self) -> &[u8] {
self.0.as_ref()
}
}
/// Returns the to-be-signed serialization of the given message.
pub fn message_tbs<M: BinEncodable>(message: &M, pre_sig0: &SIG) -> ProtoResult<TBS> {
// TODO: should perform the serialization and sign block by block to reduce the max memory
// usage, though at 4k max, this is probably unnecessary... For AXFR and large zones, it's
// more important
let mut buf: Vec<u8> = Vec::with_capacity(512);
let mut buf2: Vec<u8> = Vec::with_capacity(512);
{
let mut encoder: BinEncoder<'_> = BinEncoder::with_mode(&mut buf, EncodeMode::Normal);
assert!(sig::emit_pre_sig(
&mut encoder,
pre_sig0.type_covered(),
pre_sig0.algorithm(),
pre_sig0.num_labels(),
pre_sig0.original_ttl(),
pre_sig0.sig_expiration(),
pre_sig0.sig_inception(),
pre_sig0.key_tag(),
pre_sig0.signer_name(),
)
.is_ok());
// need a separate encoder here, as the encoding references absolute positions
// inside the buffer. If the buffer already contains the sig0 RDATA, offsets
// are wrong and the signature won't match.
let mut encoder2: BinEncoder<'_> = BinEncoder::with_mode(&mut buf2, EncodeMode::Signing);
message.emit(&mut encoder2).unwrap(); // coding error if this panics (i think?)
}
buf.append(&mut buf2);
Ok(TBS(buf))
}
/// Returns the to-be-signed serialization of the given record set.
///
/// # Arguments
///
/// * `name` - RRset record name
/// * `dns_class` - DNSClass, i.e. IN, of the records
/// * `num_labels` - number of labels in the name, needed to deal with `*.example.com`
/// * `type_covered` - RecordType of the RRSet being hashed
/// * `algorithm` - The Algorithm type used for the hashing
/// * `original_ttl` - Original TTL is the TTL as specified in the SOA zones RRSet associated record
/// * `sig_expiration` - the epoch seconds of when this hashed signature will expire
/// * `key_inception` - the epoch seconds of when this hashed signature will be valid
/// * `signer_name` - label of the etity responsible for signing this hash
/// * `records` - RRSet to hash
///
/// # Returns
///
/// the binary hash of the specified RRSet and associated information
// FIXME: OMG, there are a ton of asserts in here...
#[allow(clippy::too_many_arguments)]
pub fn rrset_tbs(
name: &Name,
dns_class: DNSClass,
num_labels: u8,
type_covered: RecordType,
algorithm: Algorithm,
original_ttl: u32,
sig_expiration: u32,
sig_inception: u32,
key_tag: u16,
signer_name: &Name,
records: &[Record],
) -> ProtoResult<TBS> {
// TODO: change this to a BTreeSet so that it's preordered, no sort necessary
let mut rrset: Vec<&Record> = Vec::new();
// collect only the records for this rrset
for record in records {
if dns_class == record.dns_class()
&& type_covered == record.rr_type()
&& name == record.name()
{
rrset.push(record);
}
}
// put records in canonical order
rrset.sort();
let name = determine_name(name, num_labels)?;
// TODO: rather than buffering here, use the Signer/Verifier? might mean fewer allocations...
let mut buf: Vec<u8> = Vec::new();
{
let mut encoder: BinEncoder<'_> = BinEncoder::new(&mut buf);
encoder.set_canonical_names(true);
// signed_data = RRSIG_RDATA | RR(1) | RR(2)... where
//
// "|" denotes concatenation
//
// RRSIG_RDATA is the wire format of the RRSIG RDATA fields
// with the Signature field excluded and the Signer's Name
// in canonical form.
assert!(sig::emit_pre_sig(
&mut encoder,
type_covered,
algorithm,
name.num_labels(),
original_ttl,
sig_expiration,
sig_inception,
key_tag,
signer_name,
)
.is_ok());
// construct the rrset signing data
for record in rrset {
// RR(i) = name | type | class | OrigTTL | RDATA length | RDATA
//
// name is calculated according to the function in the RFC 4035
assert!(name
.to_lowercase()
.emit_as_canonical(&mut encoder, true)
.is_ok());
//
// type is the RRset type and all RRs in the class
assert!(type_covered.emit(&mut encoder).is_ok());
//
// class is the RRset's class
assert!(dns_class.emit(&mut encoder).is_ok());
//
// OrigTTL is the value from the RRSIG Original TTL field
assert!(encoder.emit_u32(original_ttl).is_ok());
//
// RDATA length
// TODO: add support to the encoder to set a marker to go back and write the length
let mut rdata_buf = Vec::new();
{
let mut rdata_encoder = BinEncoder::new(&mut rdata_buf);
rdata_encoder.set_canonical_names(true);
if let Some(rdata) = record.data() {
assert!(rdata.emit(&mut rdata_encoder).is_ok());
}
}
assert!(encoder.emit_u16(rdata_buf.len() as u16).is_ok());
//
// All names in the RDATA field are in canonical form (set above)
assert!(encoder.emit_vec(&rdata_buf).is_ok());
}
}
Ok(TBS(buf))
}
/// Returns the to-be-signed serialization of the given record set using the information
/// provided from the RRSIG record.
///
/// # Arguments
///
/// * `rrsig` - SIG or RRSIG record, which was produced from the RRSet
/// * `records` - RRSet records to sign with the information in the `rrsig`
///
/// # Return
///
/// binary hash of the RRSet with the information from the RRSIG record
pub fn rrset_tbs_with_rrsig(rrsig: &Record, records: &[Record]) -> ProtoResult<TBS> {
if let Some(RData::DNSSEC(DNSSECRData::SIG(ref sig))) = rrsig.data() {
rrset_tbs_with_sig(rrsig.name(), rrsig.dns_class(), sig, records)
} else {
Err(format!("could not determine name from {}", rrsig.name()).into())
}
}
/// Returns the to-be-signed serialization of the given record set using the information
/// provided from the SIG record.
///
/// # Arguments
///
/// * `name` - labels of the record to sign
/// * `dns_class` - DNSClass of the RRSet, i.e. IN
/// * `sig` - SIG or RRSIG record, which was produced from the RRSet
/// * `records` - RRSet records to sign with the information in the `rrsig`
///
/// # Return
///
/// binary hash of the RRSet with the information from the RRSIG record
pub fn rrset_tbs_with_sig(
name: &Name,
dns_class: DNSClass,
sig: &SIG,
records: &[Record],
) -> ProtoResult<TBS> {
rrset_tbs(
name,
dns_class,
sig.num_labels(),
sig.type_covered(),
sig.algorithm(),
sig.original_ttl(),
sig.sig_expiration(),
sig.sig_inception(),
sig.key_tag(),
sig.signer_name(),
records,
)
}
/// [RFC 4035](https://tools.ietf.org/html/rfc4035), DNSSEC Protocol Modifications, March 2005
///
/// ```text
///
/// 5.3.2. Reconstructing the Signed Data
/// ...
/// To calculate the name:
/// let rrsig_labels = the value of the RRSIG Labels field
///
/// let fqdn = RRset's fully qualified domain name in
/// canonical form
///
/// let fqdn_labels = Label count of the fqdn above.
///
/// if rrsig_labels = fqdn_labels,
/// name = fqdn
///
/// if rrsig_labels < fqdn_labels,
/// name = "*." | the rightmost rrsig_label labels of the
/// fqdn
///
/// if rrsig_labels > fqdn_labels
/// the RRSIG RR did not pass the necessary validation
/// checks and MUST NOT be used to authenticate this
/// RRset.
///
/// The canonical forms for names and RRsets are defined in [RFC4034].
/// ```
pub fn determine_name(name: &Name, num_labels: u8) -> Result<Name, ProtoError> {
// To calculate the name:
// let rrsig_labels = the value of the RRSIG Labels field
//
// let fqdn = RRset's fully qualified domain name in
// canonical form
//
// let fqdn_labels = Label count of the fqdn above.
let fqdn_labels = name.num_labels();
// if rrsig_labels = fqdn_labels,
// name = fqdn
if fqdn_labels == num_labels |
// if rrsig_labels < fqdn_labels,
// name = "*." | the rightmost rrsig_label labels of the
// fqdn
if num_labels < fqdn_labels {
let mut star_name: Name = Name::from_labels(vec![b"*" as &[u8]]).unwrap();
let rightmost = name.trim_to(num_labels as usize);
if!rightmost.is_root() {
star_name = star_name.append_name(&rightmost)?;
return Ok(star_name);
}
return Ok(star_name);
}
//
// if rrsig_labels > fqdn_labels
// the RRSIG RR did not pass the necessary validation
// checks and MUST NOT be used to authenticate this
// RRset.
Err(format!("could not determine name from {}", name).into())
}
| {
return Ok(name.clone());
} | conditional_block |
mod.rs | // Copyright 2022 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use aes::cipher::generic_array::GenericArray;
use aes::{Aes128, BlockDecrypt, BlockEncrypt, NewBlockCipher};
use fuchsia_inspect::{self as inspect, Property};
use fuchsia_inspect_derive::{AttachError, Inspect};
use lru_cache::LruCache;
use rand::Rng;
use serde::{Deserialize, Serialize};
use std::convert::{TryFrom, TryInto};
use std::{fs, io, path};
use tracing::{debug, warn};
use crate::advertisement::bloom_filter;
mod error;
pub mod keys;
pub mod packets;
pub use error::Error;
/// Represents the 24-bit Model ID assigned to a Fast Pair device upon registration.
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct ModelId(u32);
impl TryFrom<u32> for ModelId {
type Error = Error;
fn try_from(src: u32) -> Result<Self, Self::Error> {
// u24::MAX
if src > 0xffffff {
return Err(Error::InvalidModelId(src));
}
Ok(Self(src))
}
}
impl From<ModelId> for [u8; 3] {
fn from(src: ModelId) -> [u8; 3] {
let mut bytes = [0; 3];
bytes[..3].copy_from_slice(&src.0.to_be_bytes()[1..]);
bytes
}
}
/// A key used during the Fast Pair Pairing Procedure.
/// This key is a temporary value that lives for the lifetime of a procedure.
#[derive(Clone, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub struct SharedSecret([u8; 16]);
impl SharedSecret {
pub fn new(bytes: [u8; 16]) -> Self {
Self(bytes)
}
pub fn as_bytes(&self) -> &[u8; 16] {
&self.0
}
/// Decrypts the provided `message` buffer with the AccountKey using AES-128.
/// Returns the decrypted payload.
pub fn decrypt(&self, message: &[u8; 16]) -> [u8; 16] {
let cipher = Aes128::new(GenericArray::from_slice(self.as_bytes()));
let mut block = GenericArray::clone_from_slice(message);
cipher.decrypt_block(&mut block);
block.into()
}
/// Encrypts the provided `message` buffer with the AccountKey using AES-128.
/// Returns the encrypted payload.
pub fn encrypt(&self, message: &[u8; 16]) -> [u8; 16] {
let cipher = Aes128::new(GenericArray::from_slice(self.as_bytes()));
let mut block = GenericArray::clone_from_slice(message);
cipher.encrypt_block(&mut block);
block.into()
}
}
/// A long-lived key that allows the Provider to be recognized as belonging to a certain user
/// account.
#[derive(Clone, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub struct AccountKey(SharedSecret);
impl AccountKey {
pub fn new(bytes: [u8; 16]) -> Self {
Self(SharedSecret::new(bytes))
}
pub fn as_bytes(&self) -> &[u8; 16] {
&self.0.as_bytes()
}
pub fn shared_secret(&self) -> &SharedSecret {
&self.0
}
}
impl From<&SharedSecret> for AccountKey {
fn from(src: &SharedSecret) -> AccountKey {
AccountKey(src.clone())
}
}
/// The maximum number of Account Keys that can be managed by the Fast Pair server. Account Keys
/// will be evicted in an LRU manner as described in the GFPS specification.
/// This limit is chosen as the minimum required by any implementation and provides ample space
/// in the LE advertisement packet.
/// See https://developers.google.com/nearby/fast-pair/specifications/configuration#AccountKeyList
/// for more details.
const MAX_ACCOUNT_KEYS: usize = 5;
/// Manages the set of saved Account Keys.
///
/// By default, the maximum number of keys that will be saved is `MAX_ACCOUNT_KEYS`. When full, the
/// `AccountKeyList` will evict the least recently used Account Key.
///
/// Account Keys are written to isolated persistent storage and are maintained across reboots. The
/// set of saved keys will only be erased on device factory resets.
/// To avoid writing to persistent storage too often, only new Account Keys are written to storage.
/// Writes for existing keys will result in cache "hits" (e.g LRU ordering updated) but will not be
/// updated in the backing storage file.
pub struct AccountKeyList {
/// The set of saved Account Keys. Keys are evicted in an LRU manner. There is no cache value
/// as we only care about maintaining the keys.
keys: LruCache<AccountKey, ()>,
/// The file path pointing to the isolated persistent storage which saves the Account Keys.
path: path::PathBuf,
/// The number of keys currently saved in the AccountKeyList.
account_key_count: inspect::UintProperty,
}
impl Inspect for &mut AccountKeyList {
fn iattach(self, parent: &inspect::Node, _name: impl AsRef<str>) -> Result<(), AttachError> {
self.account_key_count = parent.create_uint("account_key_count", self.keys.len() as u64);
Ok(())
}
}
impl AccountKeyList {
/// Attempts to load the current set of saved Account Keys from isolated persistent storage.
/// Returns the updated AccountKeyList of keys on success, Error otherwise.
pub fn load() -> Result<Self, Error> {
Self::load_from_path(Self::PERSISTED_ACCOUNT_KEYS_FILEPATH)
}
/// Builds an AccountKey list with the provided `keys`.
/// A random test file path is used to avoid concurrently running tests from reading/writing
/// from/to the same file.
#[cfg(test)]
pub fn with_capacity_and_keys(capacity: usize, keys: Vec<AccountKey>) -> Self {
let mut cache = LruCache::new(capacity);
keys.into_iter().for_each(|k| {
let _ = cache.insert(k, ());
});
let val = rand::thread_rng().gen::<u64>();
let path = format!("data/test_account_keys{}.json", val);
Self { keys: cache, path: path::PathBuf::from(path), account_key_count: Default::default() }
}
#[cfg(test)]
pub fn path(&self) -> String {
self.path.clone().into_os_string().into_string().expect("valid path string")
}
fn update_inspect(&self) {
self.account_key_count.set(self.keys.len() as u64);
}
/// Returns an Iterator over the saved Account Keys.
/// Note: Access via Iterator does not modify LRU state.
pub fn keys(&self) -> impl Iterator<Item = &AccountKey> + ExactSizeIterator {
self.keys.iter().map(|(k, _)| k)
}
/// Marks the provided `key` as used in the LRU cache.
/// Returns Error if the key does not exist in the cache.
pub fn mark_used(&mut self, key: &AccountKey) -> Result<(), Error> {
self.keys.get_mut(&key).map(|_| ()).ok_or(Error::internal("no key to mark as used"))
}
/// Save an Account Key to the persisted set of keys.
pub fn save(&mut self, key: AccountKey) {
// If the `key` already exists, it will be updated in the LRU cache. If the cache is
// full, the least-recently used (LRU) key will be evicted.
if self.keys.insert(key, ()).is_some() {
debug!("Account Key already saved");
}
// Store the updated set of keys in persistent storage.
if let Err(e) = self.store() {
warn!("Couldn't update key list in isolated persistent storage: {:?}", e);
}
self.update_inspect();
}
/// Returns the service data payload associated with the current set of Account Keys.
pub fn service_data(&self) -> Result<Vec<u8>, Error> {
if self.keys.is_empty() {
return Ok(vec![0x0]);
}
let salt = rand::thread_rng().gen::<u8>();
self.service_data_internal(salt)
}
fn service_data_internal(&self, salt: u8) -> Result<Vec<u8>, Error> {
let account_keys_bytes = bloom_filter(self.keys(), salt)?;
let mut result = Vec::new();
// First byte is 0bLLLLTTTT, where L = length of the account key list, T = Type (0b0000 to
// show UI notification, 0b0010 to hide it). The maximum amount of account key data that can
// be represented is 15 bytes (u4::MAX).
let length: u8 = match account_keys_bytes.len().try_into() {
Ok(len) if len <= 15 => len,
_ => return Err(Error::internal("Account key data too large")),
};
// For now, we will always request to show the UI notification (TTTT = 0b0000).
result.push(length << 4);
// Next n bytes are the Bloom-filtered Account Key list.
result.extend(account_keys_bytes);
// The descriptor value associated with the Salt section of the LE advertisement payload.
// Formatted as 0bLLLLTTTT, where L (Length) = 0b0001 and T (Type) = 0b0001. Both are fixed.
const SALT_DESCRIPTOR: u8 = 0x11;
result.push(SALT_DESCRIPTOR);
// Final byte is the Salt value.
result.push(salt);
Ok(result)
}
// Default file path for Account Keys written to isolated persistent storage.
const PERSISTED_ACCOUNT_KEYS_FILEPATH: &'static str = "/data/account_keys.json";
/// Attempts to read and parse the contents of the persistent storage at the provided `path`.
/// Returns an `AccountKeyList` on success, Error otherwise.
fn load_from_path<P: AsRef<path::Path>>(path: P) -> Result<Self, Error> {
let mut this = Self {
keys: LruCache::new(MAX_ACCOUNT_KEYS),
path: path::PathBuf::from(path.as_ref()),
account_key_count: Default::default(),
};
this.load_internal()?;
Ok(this)
}
/// Attempts to update the locally-saved set of keys from persistent storage.
/// Returns Error if the storage file is unable to be opened.
fn load_internal(&mut self) -> Result<(), Error> {
match fs::File::open(&self.path) {
Ok(file) => {
// Build the LRU cache from the contents of the file. Because keys are stored in
// LRU order, we build the cache in the same order to preserve LRU status.
debug!("Reading Account Keys from existing file");
let key_list = KeyList::load(file)?;
key_list.0.into_iter().for_each(|k| {
let _ = self.keys.insert(k, ());
});
Ok(())
}
Err(error) if error.kind() == io::ErrorKind::NotFound => {
debug!("Persistent storage file not found");
Ok(())
}
Err(e) => Err(Error::key_storage(e, "couldn't load key storage file")),
}
}
/// Commits the current set of Account Keys to isolated persistent storage.
/// Keys are stored in LRU order.
fn store(&self) -> Result<(), Error> {
let path = path::Path::new(&self.path);
let file_name = path.file_name().ok_or(Error::key_storage(
io::ErrorKind::InvalidInput.into(),
"couldn't build file name from path",
))?;
let file_path = path.with_file_name(file_name.to_os_string());
let file = fs::File::create(&file_path)
.map_err(|e| Error::key_storage(e, "couldn't create file"))?;
let values = KeyList(self.keys().cloned().collect());
serde_json::to_writer(file, &values)?;
Ok(())
}
}
/// Convenience type for the serialization and deserialization of Account Keys.
#[derive(Serialize, Deserialize)]
struct KeyList(Vec<AccountKey>);
impl KeyList {
fn load<R: io::Read>(reader: R) -> Result<Self, Error> {
serde_json::from_reader(reader).map_err(Into::into)
}
}
#[cfg(test)]
pub(crate) mod tests {
use super::*;
use assert_matches::assert_matches;
/// Loads the set of saved Account Keys from storage and verifies that it's equal to the
/// provided `expected_keys`.
#[track_caller]
pub(crate) fn expect_keys_at_path<P: AsRef<path::Path>>(
path: P,
expected_keys: Vec<AccountKey>,
) {
let read_keys = AccountKeyList::load_from_path(path).expect("can read from file");
assert_eq!(read_keys.keys().cloned().collect::<Vec<_>>(), expected_keys);
}
#[test]
fn model_id_from_u32() {
let normal_id = 0x1234;
let id = ModelId::try_from(normal_id).expect("valid id");
let id_bytes: [u8; 3] = id.into();
assert_eq!(id_bytes, [0x00, 0x12, 0x34]);
let zero_id = 0;
let id = ModelId::try_from(zero_id).expect("valid id");
let id_bytes: [u8; 3] = id.into();
assert_eq!(id_bytes, [0x00, 0x00, 0x00]);
let max_id = 0xffffff;
let id = ModelId::try_from(max_id).expect("valid id");
let id_bytes: [u8; 3] = id.into();
assert_eq!(id_bytes, [0xff, 0xff, 0xff]);
}
#[test]
fn invalid_model_id_conversion_is_error() {
let invalid_id = 0x1ffabcd;
assert_matches!(ModelId::try_from(invalid_id), Err(_));
}
#[test]
fn empty_account_key_list_service_data() {
let empty = AccountKeyList::with_capacity_and_keys(1, vec![]);
let service_data = empty.service_data().expect("can build service data");
let expected = [0x00];
assert_eq!(service_data, expected);
}
#[test]
fn oversized_service_data_is_error() {
// Building an AccountKeyList of 11 elements will result in an oversized service data.
// In the future, this test will be obsolete as the AccountKeyList will be bounded in its
// construction.
let keys = (0..11_u8).map(|i| AccountKey::new([i; 16])).collect();
let oversized = AccountKeyList::with_capacity_and_keys(15, keys);
let result = oversized.service_data();
assert_matches!(result, Err(Error::InternalError(_)));
}
#[test]
fn account_key_list_service_data() {
let example_key = AccountKey::new([1; 16]);
let keys = AccountKeyList::with_capacity_and_keys(10, vec![example_key]);
let salt = 0x14;
// Because the service data is generated with a random salt value, we test the internal
// method with a controlled salt value so that the test is deterministic.
let service_data = keys.service_data_internal(salt).expect("can build service_data");
let expected = [
0x40, // Length = 4, Show UI indication
0x04, 0x33, 0x00, 0x88, // Bloom filter applied to the Account key list
0x11, 0x14, // Salt descriptor (0x11), Fixed salt value (0x14)
];
assert_eq!(service_data, expected);
}
/// Tests AES-128 encryption & decryption using an Account Key as the Secret Key.
/// The contents of this test case are pulled from the GFPS specification.
/// See https://developers.google.com/nearby/fast-pair/specifications/appendix/testcases#aes_encryption
#[test]
fn aes_128_encryption_roundtrip() {
let message = [
0xF3, 0x0F, 0x4E, 0x78, 0x6C, 0x59, 0xA7, 0xBB, 0xF3, 0x87, 0x3B, 0x5A, 0x49, 0xBA,
0x97, 0xEA,
];
let account_key = AccountKey::new([
0xA0, 0xBA, 0xF0, 0xBB, 0x95, 0x1F, 0xF7, 0xB6, 0xCF, 0x5E, 0x3F, 0x45, 0x61, 0xC3,
0x32, 0x1D,
]);
let encrypted = account_key.shared_secret().encrypt(&message);
let expected = [
0xAC, 0x9A, 0x16, 0xF0, 0x95, 0x3A, 0x3F, 0x22, 0x3D, 0xD1, 0x0C, 0xF5, 0x36, 0xE0,
0x9E, 0x9C,
];
assert_eq!(encrypted, expected);
let decrypted = account_key.shared_secret().decrypt(&encrypted);
assert_eq!(decrypted, message);
}
#[test]
fn account_key_lru_eviction() | // Marking a key as used should "refresh" the key's position. It is no longer the LRU key
// that will be evicted.
let account_key2 = AccountKey::new([2; 16]);
assert_matches!(list.mark_used(&account_key2), Ok(_));
// Inserting a new key at capacity will evict the LRU key (not `account_key2` anymore).
let next_key = AccountKey::new([max + 2; 16]);
list.save(next_key.clone());
assert_eq!(list.keys().len(), max as usize);
assert!(list.keys.contains_key(&next_key));
assert!(list.keys.contains_key(&account_key2));
}
#[test]
fn mark_used_nonexistent_key_is_error() {
let mut list = AccountKeyList::with_capacity_and_keys(1, vec![]);
let key = AccountKey::new([1; 16]);
assert_matches!(list.mark_used(&key), Err(_));
}
#[fuchsia::test]
fn load_keys_from_nonexistent_file() {
const EXAMPLE_FILEPATH: &str = "/data/test_account_keys0.json";
expect_keys_at_path(EXAMPLE_FILEPATH, vec![]);
}
#[fuchsia::test]
fn commit_and_load_keys_to_and_from_a_file() {
let key1 = AccountKey::new([1; 16]);
let key2 = AccountKey::new([2; 16]);
let key3 = AccountKey::new([3; 16]);
let example_keys = vec![key1, key2, key3];
let keys = AccountKeyList::with_capacity_and_keys(5, example_keys.clone());
keys.store().expect("can store Account Keys");
expect_keys_at_path(keys.path(), example_keys);
}
#[fuchsia::test]
fn lru_eviction_from_storage() {
let key1 = AccountKey::new([1; 16]);
let key2 = AccountKey::new([2; 16]);
let key3 = AccountKey::new([3; 16]);
// New collection with maximum capacity of 2 keys.
let mut keys = AccountKeyList::with_capacity_and_keys(2, vec![]);
// Because this key has never been written before, it should be saved to persistent storage.
keys.save(key1.clone());
expect_keys_at_path(keys.path(), vec![key1.clone()]);
// Because this key has never been written before, it should be saved to persistent storage.
keys.save(key2.clone());
expect_keys_at_path(keys.path(), vec![key1.clone(), key2.clone()]);
// Because `key1` already exists in the collection, we expect a cache "refresh" so the key
// ordering in storage should change.
keys.save(key1.clone());
// e.g The LRU order should change whereby `key2` is now the LRU.
expect_keys_at_path(keys.path(), vec![key2, key1.clone()]);
// The collection is at max capacity so `key2` (LRU) should be evicted. Local storage
// should be updated.
keys.save(key3.clone());
expect_keys_at_path(keys.path(), vec![key1, key3]);
}
}
| {
let mut list = AccountKeyList::with_capacity_and_keys(MAX_ACCOUNT_KEYS, vec![]);
let max: u8 = MAX_ACCOUNT_KEYS as u8;
for i in 1..max + 1 {
let key = AccountKey::new([i; 16]);
list.save(key.clone());
assert_eq!(list.keys().len(), i as usize);
assert!(list.keys.contains_key(&key));
}
// Adding a new key results in the eviction of the LRU key.
assert_eq!(list.keys().len(), max as usize);
let new_key = AccountKey::new([max + 1; 16]);
list.save(new_key.clone());
assert_eq!(list.keys().len(), max as usize);
assert!(list.keys.contains_key(&new_key));
// LRU Key is no longer stored.
let first_key = AccountKey::new([1; 16]);
assert!(!list.keys.contains_key(&first_key));
| identifier_body |
mod.rs | // Copyright 2022 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use aes::cipher::generic_array::GenericArray;
use aes::{Aes128, BlockDecrypt, BlockEncrypt, NewBlockCipher};
use fuchsia_inspect::{self as inspect, Property};
use fuchsia_inspect_derive::{AttachError, Inspect};
use lru_cache::LruCache;
use rand::Rng;
use serde::{Deserialize, Serialize};
use std::convert::{TryFrom, TryInto};
use std::{fs, io, path};
use tracing::{debug, warn};
use crate::advertisement::bloom_filter;
mod error;
pub mod keys;
pub mod packets;
pub use error::Error;
/// Represents the 24-bit Model ID assigned to a Fast Pair device upon registration.
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct ModelId(u32);
impl TryFrom<u32> for ModelId {
type Error = Error;
fn try_from(src: u32) -> Result<Self, Self::Error> {
// u24::MAX
if src > 0xffffff {
return Err(Error::InvalidModelId(src));
}
Ok(Self(src))
}
}
impl From<ModelId> for [u8; 3] {
fn from(src: ModelId) -> [u8; 3] {
let mut bytes = [0; 3];
bytes[..3].copy_from_slice(&src.0.to_be_bytes()[1..]);
bytes
}
}
/// A key used during the Fast Pair Pairing Procedure.
/// This key is a temporary value that lives for the lifetime of a procedure.
#[derive(Clone, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub struct SharedSecret([u8; 16]);
impl SharedSecret {
pub fn new(bytes: [u8; 16]) -> Self {
Self(bytes)
}
pub fn as_bytes(&self) -> &[u8; 16] {
&self.0
}
/// Decrypts the provided `message` buffer with the AccountKey using AES-128.
/// Returns the decrypted payload.
pub fn decrypt(&self, message: &[u8; 16]) -> [u8; 16] {
let cipher = Aes128::new(GenericArray::from_slice(self.as_bytes()));
let mut block = GenericArray::clone_from_slice(message);
cipher.decrypt_block(&mut block);
block.into()
}
/// Encrypts the provided `message` buffer with the AccountKey using AES-128.
/// Returns the encrypted payload.
pub fn encrypt(&self, message: &[u8; 16]) -> [u8; 16] {
let cipher = Aes128::new(GenericArray::from_slice(self.as_bytes()));
let mut block = GenericArray::clone_from_slice(message);
cipher.encrypt_block(&mut block);
block.into()
} | /// A long-lived key that allows the Provider to be recognized as belonging to a certain user
/// account.
#[derive(Clone, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub struct AccountKey(SharedSecret);
impl AccountKey {
pub fn new(bytes: [u8; 16]) -> Self {
Self(SharedSecret::new(bytes))
}
pub fn as_bytes(&self) -> &[u8; 16] {
&self.0.as_bytes()
}
pub fn shared_secret(&self) -> &SharedSecret {
&self.0
}
}
impl From<&SharedSecret> for AccountKey {
fn from(src: &SharedSecret) -> AccountKey {
AccountKey(src.clone())
}
}
/// The maximum number of Account Keys that can be managed by the Fast Pair server. Account Keys
/// will be evicted in an LRU manner as described in the GFPS specification.
/// This limit is chosen as the minimum required by any implementation and provides ample space
/// in the LE advertisement packet.
/// See https://developers.google.com/nearby/fast-pair/specifications/configuration#AccountKeyList
/// for more details.
const MAX_ACCOUNT_KEYS: usize = 5;
/// Manages the set of saved Account Keys.
///
/// By default, the maximum number of keys that will be saved is `MAX_ACCOUNT_KEYS`. When full, the
/// `AccountKeyList` will evict the least recently used Account Key.
///
/// Account Keys are written to isolated persistent storage and are maintained across reboots. The
/// set of saved keys will only be erased on device factory resets.
/// To avoid writing to persistent storage too often, only new Account Keys are written to storage.
/// Writes for existing keys will result in cache "hits" (e.g LRU ordering updated) but will not be
/// updated in the backing storage file.
pub struct AccountKeyList {
/// The set of saved Account Keys. Keys are evicted in an LRU manner. There is no cache value
/// as we only care about maintaining the keys.
keys: LruCache<AccountKey, ()>,
/// The file path pointing to the isolated persistent storage which saves the Account Keys.
path: path::PathBuf,
/// The number of keys currently saved in the AccountKeyList.
account_key_count: inspect::UintProperty,
}
impl Inspect for &mut AccountKeyList {
fn iattach(self, parent: &inspect::Node, _name: impl AsRef<str>) -> Result<(), AttachError> {
self.account_key_count = parent.create_uint("account_key_count", self.keys.len() as u64);
Ok(())
}
}
impl AccountKeyList {
/// Attempts to load the current set of saved Account Keys from isolated persistent storage.
/// Returns the updated AccountKeyList of keys on success, Error otherwise.
pub fn load() -> Result<Self, Error> {
Self::load_from_path(Self::PERSISTED_ACCOUNT_KEYS_FILEPATH)
}
/// Builds an AccountKey list with the provided `keys`.
/// A random test file path is used to avoid concurrently running tests from reading/writing
/// from/to the same file.
#[cfg(test)]
pub fn with_capacity_and_keys(capacity: usize, keys: Vec<AccountKey>) -> Self {
let mut cache = LruCache::new(capacity);
keys.into_iter().for_each(|k| {
let _ = cache.insert(k, ());
});
let val = rand::thread_rng().gen::<u64>();
let path = format!("data/test_account_keys{}.json", val);
Self { keys: cache, path: path::PathBuf::from(path), account_key_count: Default::default() }
}
#[cfg(test)]
pub fn path(&self) -> String {
self.path.clone().into_os_string().into_string().expect("valid path string")
}
fn update_inspect(&self) {
self.account_key_count.set(self.keys.len() as u64);
}
/// Returns an Iterator over the saved Account Keys.
/// Note: Access via Iterator does not modify LRU state.
pub fn keys(&self) -> impl Iterator<Item = &AccountKey> + ExactSizeIterator {
self.keys.iter().map(|(k, _)| k)
}
/// Marks the provided `key` as used in the LRU cache.
/// Returns Error if the key does not exist in the cache.
pub fn mark_used(&mut self, key: &AccountKey) -> Result<(), Error> {
self.keys.get_mut(&key).map(|_| ()).ok_or(Error::internal("no key to mark as used"))
}
/// Save an Account Key to the persisted set of keys.
pub fn save(&mut self, key: AccountKey) {
// If the `key` already exists, it will be updated in the LRU cache. If the cache is
// full, the least-recently used (LRU) key will be evicted.
if self.keys.insert(key, ()).is_some() {
debug!("Account Key already saved");
}
// Store the updated set of keys in persistent storage.
if let Err(e) = self.store() {
warn!("Couldn't update key list in isolated persistent storage: {:?}", e);
}
self.update_inspect();
}
/// Returns the service data payload associated with the current set of Account Keys.
pub fn service_data(&self) -> Result<Vec<u8>, Error> {
if self.keys.is_empty() {
return Ok(vec![0x0]);
}
let salt = rand::thread_rng().gen::<u8>();
self.service_data_internal(salt)
}
fn service_data_internal(&self, salt: u8) -> Result<Vec<u8>, Error> {
let account_keys_bytes = bloom_filter(self.keys(), salt)?;
let mut result = Vec::new();
// First byte is 0bLLLLTTTT, where L = length of the account key list, T = Type (0b0000 to
// show UI notification, 0b0010 to hide it). The maximum amount of account key data that can
// be represented is 15 bytes (u4::MAX).
let length: u8 = match account_keys_bytes.len().try_into() {
Ok(len) if len <= 15 => len,
_ => return Err(Error::internal("Account key data too large")),
};
// For now, we will always request to show the UI notification (TTTT = 0b0000).
result.push(length << 4);
// Next n bytes are the Bloom-filtered Account Key list.
result.extend(account_keys_bytes);
// The descriptor value associated with the Salt section of the LE advertisement payload.
// Formatted as 0bLLLLTTTT, where L (Length) = 0b0001 and T (Type) = 0b0001. Both are fixed.
const SALT_DESCRIPTOR: u8 = 0x11;
result.push(SALT_DESCRIPTOR);
// Final byte is the Salt value.
result.push(salt);
Ok(result)
}
// Default file path for Account Keys written to isolated persistent storage.
const PERSISTED_ACCOUNT_KEYS_FILEPATH: &'static str = "/data/account_keys.json";
/// Attempts to read and parse the contents of the persistent storage at the provided `path`.
/// Returns an `AccountKeyList` on success, Error otherwise.
fn load_from_path<P: AsRef<path::Path>>(path: P) -> Result<Self, Error> {
let mut this = Self {
keys: LruCache::new(MAX_ACCOUNT_KEYS),
path: path::PathBuf::from(path.as_ref()),
account_key_count: Default::default(),
};
this.load_internal()?;
Ok(this)
}
/// Attempts to update the locally-saved set of keys from persistent storage.
/// Returns Error if the storage file is unable to be opened.
fn load_internal(&mut self) -> Result<(), Error> {
match fs::File::open(&self.path) {
Ok(file) => {
// Build the LRU cache from the contents of the file. Because keys are stored in
// LRU order, we build the cache in the same order to preserve LRU status.
debug!("Reading Account Keys from existing file");
let key_list = KeyList::load(file)?;
key_list.0.into_iter().for_each(|k| {
let _ = self.keys.insert(k, ());
});
Ok(())
}
Err(error) if error.kind() == io::ErrorKind::NotFound => {
debug!("Persistent storage file not found");
Ok(())
}
Err(e) => Err(Error::key_storage(e, "couldn't load key storage file")),
}
}
/// Commits the current set of Account Keys to isolated persistent storage.
/// Keys are stored in LRU order.
fn store(&self) -> Result<(), Error> {
let path = path::Path::new(&self.path);
let file_name = path.file_name().ok_or(Error::key_storage(
io::ErrorKind::InvalidInput.into(),
"couldn't build file name from path",
))?;
let file_path = path.with_file_name(file_name.to_os_string());
let file = fs::File::create(&file_path)
.map_err(|e| Error::key_storage(e, "couldn't create file"))?;
let values = KeyList(self.keys().cloned().collect());
serde_json::to_writer(file, &values)?;
Ok(())
}
}
/// Convenience type for the serialization and deserialization of Account Keys.
#[derive(Serialize, Deserialize)]
struct KeyList(Vec<AccountKey>);
impl KeyList {
fn load<R: io::Read>(reader: R) -> Result<Self, Error> {
serde_json::from_reader(reader).map_err(Into::into)
}
}
#[cfg(test)]
pub(crate) mod tests {
use super::*;
use assert_matches::assert_matches;
/// Loads the set of saved Account Keys from storage and verifies that it's equal to the
/// provided `expected_keys`.
#[track_caller]
pub(crate) fn expect_keys_at_path<P: AsRef<path::Path>>(
path: P,
expected_keys: Vec<AccountKey>,
) {
let read_keys = AccountKeyList::load_from_path(path).expect("can read from file");
assert_eq!(read_keys.keys().cloned().collect::<Vec<_>>(), expected_keys);
}
#[test]
fn model_id_from_u32() {
let normal_id = 0x1234;
let id = ModelId::try_from(normal_id).expect("valid id");
let id_bytes: [u8; 3] = id.into();
assert_eq!(id_bytes, [0x00, 0x12, 0x34]);
let zero_id = 0;
let id = ModelId::try_from(zero_id).expect("valid id");
let id_bytes: [u8; 3] = id.into();
assert_eq!(id_bytes, [0x00, 0x00, 0x00]);
let max_id = 0xffffff;
let id = ModelId::try_from(max_id).expect("valid id");
let id_bytes: [u8; 3] = id.into();
assert_eq!(id_bytes, [0xff, 0xff, 0xff]);
}
#[test]
fn invalid_model_id_conversion_is_error() {
let invalid_id = 0x1ffabcd;
assert_matches!(ModelId::try_from(invalid_id), Err(_));
}
#[test]
fn empty_account_key_list_service_data() {
let empty = AccountKeyList::with_capacity_and_keys(1, vec![]);
let service_data = empty.service_data().expect("can build service data");
let expected = [0x00];
assert_eq!(service_data, expected);
}
#[test]
fn oversized_service_data_is_error() {
// Building an AccountKeyList of 11 elements will result in an oversized service data.
// In the future, this test will be obsolete as the AccountKeyList will be bounded in its
// construction.
let keys = (0..11_u8).map(|i| AccountKey::new([i; 16])).collect();
let oversized = AccountKeyList::with_capacity_and_keys(15, keys);
let result = oversized.service_data();
assert_matches!(result, Err(Error::InternalError(_)));
}
#[test]
fn account_key_list_service_data() {
let example_key = AccountKey::new([1; 16]);
let keys = AccountKeyList::with_capacity_and_keys(10, vec![example_key]);
let salt = 0x14;
// Because the service data is generated with a random salt value, we test the internal
// method with a controlled salt value so that the test is deterministic.
let service_data = keys.service_data_internal(salt).expect("can build service_data");
let expected = [
0x40, // Length = 4, Show UI indication
0x04, 0x33, 0x00, 0x88, // Bloom filter applied to the Account key list
0x11, 0x14, // Salt descriptor (0x11), Fixed salt value (0x14)
];
assert_eq!(service_data, expected);
}
/// Tests AES-128 encryption & decryption using an Account Key as the Secret Key.
/// The contents of this test case are pulled from the GFPS specification.
/// See https://developers.google.com/nearby/fast-pair/specifications/appendix/testcases#aes_encryption
#[test]
fn aes_128_encryption_roundtrip() {
let message = [
0xF3, 0x0F, 0x4E, 0x78, 0x6C, 0x59, 0xA7, 0xBB, 0xF3, 0x87, 0x3B, 0x5A, 0x49, 0xBA,
0x97, 0xEA,
];
let account_key = AccountKey::new([
0xA0, 0xBA, 0xF0, 0xBB, 0x95, 0x1F, 0xF7, 0xB6, 0xCF, 0x5E, 0x3F, 0x45, 0x61, 0xC3,
0x32, 0x1D,
]);
let encrypted = account_key.shared_secret().encrypt(&message);
let expected = [
0xAC, 0x9A, 0x16, 0xF0, 0x95, 0x3A, 0x3F, 0x22, 0x3D, 0xD1, 0x0C, 0xF5, 0x36, 0xE0,
0x9E, 0x9C,
];
assert_eq!(encrypted, expected);
let decrypted = account_key.shared_secret().decrypt(&encrypted);
assert_eq!(decrypted, message);
}
#[test]
fn account_key_lru_eviction() {
let mut list = AccountKeyList::with_capacity_and_keys(MAX_ACCOUNT_KEYS, vec![]);
let max: u8 = MAX_ACCOUNT_KEYS as u8;
for i in 1..max + 1 {
let key = AccountKey::new([i; 16]);
list.save(key.clone());
assert_eq!(list.keys().len(), i as usize);
assert!(list.keys.contains_key(&key));
}
// Adding a new key results in the eviction of the LRU key.
assert_eq!(list.keys().len(), max as usize);
let new_key = AccountKey::new([max + 1; 16]);
list.save(new_key.clone());
assert_eq!(list.keys().len(), max as usize);
assert!(list.keys.contains_key(&new_key));
// LRU Key is no longer stored.
let first_key = AccountKey::new([1; 16]);
assert!(!list.keys.contains_key(&first_key));
// Marking a key as used should "refresh" the key's position. It is no longer the LRU key
// that will be evicted.
let account_key2 = AccountKey::new([2; 16]);
assert_matches!(list.mark_used(&account_key2), Ok(_));
// Inserting a new key at capacity will evict the LRU key (not `account_key2` anymore).
let next_key = AccountKey::new([max + 2; 16]);
list.save(next_key.clone());
assert_eq!(list.keys().len(), max as usize);
assert!(list.keys.contains_key(&next_key));
assert!(list.keys.contains_key(&account_key2));
}
#[test]
fn mark_used_nonexistent_key_is_error() {
let mut list = AccountKeyList::with_capacity_and_keys(1, vec![]);
let key = AccountKey::new([1; 16]);
assert_matches!(list.mark_used(&key), Err(_));
}
#[fuchsia::test]
fn load_keys_from_nonexistent_file() {
const EXAMPLE_FILEPATH: &str = "/data/test_account_keys0.json";
expect_keys_at_path(EXAMPLE_FILEPATH, vec![]);
}
#[fuchsia::test]
fn commit_and_load_keys_to_and_from_a_file() {
let key1 = AccountKey::new([1; 16]);
let key2 = AccountKey::new([2; 16]);
let key3 = AccountKey::new([3; 16]);
let example_keys = vec![key1, key2, key3];
let keys = AccountKeyList::with_capacity_and_keys(5, example_keys.clone());
keys.store().expect("can store Account Keys");
expect_keys_at_path(keys.path(), example_keys);
}
#[fuchsia::test]
fn lru_eviction_from_storage() {
let key1 = AccountKey::new([1; 16]);
let key2 = AccountKey::new([2; 16]);
let key3 = AccountKey::new([3; 16]);
// New collection with maximum capacity of 2 keys.
let mut keys = AccountKeyList::with_capacity_and_keys(2, vec![]);
// Because this key has never been written before, it should be saved to persistent storage.
keys.save(key1.clone());
expect_keys_at_path(keys.path(), vec![key1.clone()]);
// Because this key has never been written before, it should be saved to persistent storage.
keys.save(key2.clone());
expect_keys_at_path(keys.path(), vec![key1.clone(), key2.clone()]);
// Because `key1` already exists in the collection, we expect a cache "refresh" so the key
// ordering in storage should change.
keys.save(key1.clone());
// e.g The LRU order should change whereby `key2` is now the LRU.
expect_keys_at_path(keys.path(), vec![key2, key1.clone()]);
// The collection is at max capacity so `key2` (LRU) should be evicted. Local storage
// should be updated.
keys.save(key3.clone());
expect_keys_at_path(keys.path(), vec![key1, key3]);
}
} | }
| random_line_split |
mod.rs | // Copyright 2022 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use aes::cipher::generic_array::GenericArray;
use aes::{Aes128, BlockDecrypt, BlockEncrypt, NewBlockCipher};
use fuchsia_inspect::{self as inspect, Property};
use fuchsia_inspect_derive::{AttachError, Inspect};
use lru_cache::LruCache;
use rand::Rng;
use serde::{Deserialize, Serialize};
use std::convert::{TryFrom, TryInto};
use std::{fs, io, path};
use tracing::{debug, warn};
use crate::advertisement::bloom_filter;
mod error;
pub mod keys;
pub mod packets;
pub use error::Error;
/// Represents the 24-bit Model ID assigned to a Fast Pair device upon registration.
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct ModelId(u32);
impl TryFrom<u32> for ModelId {
type Error = Error;
fn try_from(src: u32) -> Result<Self, Self::Error> {
// u24::MAX
if src > 0xffffff |
Ok(Self(src))
}
}
impl From<ModelId> for [u8; 3] {
fn from(src: ModelId) -> [u8; 3] {
let mut bytes = [0; 3];
bytes[..3].copy_from_slice(&src.0.to_be_bytes()[1..]);
bytes
}
}
/// A key used during the Fast Pair Pairing Procedure.
/// This key is a temporary value that lives for the lifetime of a procedure.
#[derive(Clone, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub struct SharedSecret([u8; 16]);
impl SharedSecret {
pub fn new(bytes: [u8; 16]) -> Self {
Self(bytes)
}
pub fn as_bytes(&self) -> &[u8; 16] {
&self.0
}
/// Decrypts the provided `message` buffer with the AccountKey using AES-128.
/// Returns the decrypted payload.
pub fn decrypt(&self, message: &[u8; 16]) -> [u8; 16] {
let cipher = Aes128::new(GenericArray::from_slice(self.as_bytes()));
let mut block = GenericArray::clone_from_slice(message);
cipher.decrypt_block(&mut block);
block.into()
}
/// Encrypts the provided `message` buffer with the AccountKey using AES-128.
/// Returns the encrypted payload.
pub fn encrypt(&self, message: &[u8; 16]) -> [u8; 16] {
let cipher = Aes128::new(GenericArray::from_slice(self.as_bytes()));
let mut block = GenericArray::clone_from_slice(message);
cipher.encrypt_block(&mut block);
block.into()
}
}
/// A long-lived key that allows the Provider to be recognized as belonging to a certain user
/// account.
#[derive(Clone, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub struct AccountKey(SharedSecret);
impl AccountKey {
pub fn new(bytes: [u8; 16]) -> Self {
Self(SharedSecret::new(bytes))
}
pub fn as_bytes(&self) -> &[u8; 16] {
&self.0.as_bytes()
}
pub fn shared_secret(&self) -> &SharedSecret {
&self.0
}
}
impl From<&SharedSecret> for AccountKey {
fn from(src: &SharedSecret) -> AccountKey {
AccountKey(src.clone())
}
}
/// The maximum number of Account Keys that can be managed by the Fast Pair server. Account Keys
/// will be evicted in an LRU manner as described in the GFPS specification.
/// This limit is chosen as the minimum required by any implementation and provides ample space
/// in the LE advertisement packet.
/// See https://developers.google.com/nearby/fast-pair/specifications/configuration#AccountKeyList
/// for more details.
const MAX_ACCOUNT_KEYS: usize = 5;
/// Manages the set of saved Account Keys.
///
/// By default, the maximum number of keys that will be saved is `MAX_ACCOUNT_KEYS`. When full, the
/// `AccountKeyList` will evict the least recently used Account Key.
///
/// Account Keys are written to isolated persistent storage and are maintained across reboots. The
/// set of saved keys will only be erased on device factory resets.
/// To avoid writing to persistent storage too often, only new Account Keys are written to storage.
/// Writes for existing keys will result in cache "hits" (e.g LRU ordering updated) but will not be
/// updated in the backing storage file.
pub struct AccountKeyList {
/// The set of saved Account Keys. Keys are evicted in an LRU manner. There is no cache value
/// as we only care about maintaining the keys.
keys: LruCache<AccountKey, ()>,
/// The file path pointing to the isolated persistent storage which saves the Account Keys.
path: path::PathBuf,
/// The number of keys currently saved in the AccountKeyList.
account_key_count: inspect::UintProperty,
}
impl Inspect for &mut AccountKeyList {
fn iattach(self, parent: &inspect::Node, _name: impl AsRef<str>) -> Result<(), AttachError> {
self.account_key_count = parent.create_uint("account_key_count", self.keys.len() as u64);
Ok(())
}
}
impl AccountKeyList {
/// Attempts to load the current set of saved Account Keys from isolated persistent storage.
/// Returns the updated AccountKeyList of keys on success, Error otherwise.
pub fn load() -> Result<Self, Error> {
Self::load_from_path(Self::PERSISTED_ACCOUNT_KEYS_FILEPATH)
}
/// Builds an AccountKey list with the provided `keys`.
/// A random test file path is used to avoid concurrently running tests from reading/writing
/// from/to the same file.
#[cfg(test)]
pub fn with_capacity_and_keys(capacity: usize, keys: Vec<AccountKey>) -> Self {
let mut cache = LruCache::new(capacity);
keys.into_iter().for_each(|k| {
let _ = cache.insert(k, ());
});
let val = rand::thread_rng().gen::<u64>();
let path = format!("data/test_account_keys{}.json", val);
Self { keys: cache, path: path::PathBuf::from(path), account_key_count: Default::default() }
}
#[cfg(test)]
pub fn path(&self) -> String {
self.path.clone().into_os_string().into_string().expect("valid path string")
}
fn update_inspect(&self) {
self.account_key_count.set(self.keys.len() as u64);
}
/// Returns an Iterator over the saved Account Keys.
/// Note: Access via Iterator does not modify LRU state.
pub fn keys(&self) -> impl Iterator<Item = &AccountKey> + ExactSizeIterator {
self.keys.iter().map(|(k, _)| k)
}
/// Marks the provided `key` as used in the LRU cache.
/// Returns Error if the key does not exist in the cache.
pub fn mark_used(&mut self, key: &AccountKey) -> Result<(), Error> {
self.keys.get_mut(&key).map(|_| ()).ok_or(Error::internal("no key to mark as used"))
}
/// Save an Account Key to the persisted set of keys.
pub fn save(&mut self, key: AccountKey) {
// If the `key` already exists, it will be updated in the LRU cache. If the cache is
// full, the least-recently used (LRU) key will be evicted.
if self.keys.insert(key, ()).is_some() {
debug!("Account Key already saved");
}
// Store the updated set of keys in persistent storage.
if let Err(e) = self.store() {
warn!("Couldn't update key list in isolated persistent storage: {:?}", e);
}
self.update_inspect();
}
/// Returns the service data payload associated with the current set of Account Keys.
pub fn service_data(&self) -> Result<Vec<u8>, Error> {
if self.keys.is_empty() {
return Ok(vec![0x0]);
}
let salt = rand::thread_rng().gen::<u8>();
self.service_data_internal(salt)
}
fn service_data_internal(&self, salt: u8) -> Result<Vec<u8>, Error> {
let account_keys_bytes = bloom_filter(self.keys(), salt)?;
let mut result = Vec::new();
// First byte is 0bLLLLTTTT, where L = length of the account key list, T = Type (0b0000 to
// show UI notification, 0b0010 to hide it). The maximum amount of account key data that can
// be represented is 15 bytes (u4::MAX).
let length: u8 = match account_keys_bytes.len().try_into() {
Ok(len) if len <= 15 => len,
_ => return Err(Error::internal("Account key data too large")),
};
// For now, we will always request to show the UI notification (TTTT = 0b0000).
result.push(length << 4);
// Next n bytes are the Bloom-filtered Account Key list.
result.extend(account_keys_bytes);
// The descriptor value associated with the Salt section of the LE advertisement payload.
// Formatted as 0bLLLLTTTT, where L (Length) = 0b0001 and T (Type) = 0b0001. Both are fixed.
const SALT_DESCRIPTOR: u8 = 0x11;
result.push(SALT_DESCRIPTOR);
// Final byte is the Salt value.
result.push(salt);
Ok(result)
}
// Default file path for Account Keys written to isolated persistent storage.
const PERSISTED_ACCOUNT_KEYS_FILEPATH: &'static str = "/data/account_keys.json";
/// Attempts to read and parse the contents of the persistent storage at the provided `path`.
/// Returns an `AccountKeyList` on success, Error otherwise.
fn load_from_path<P: AsRef<path::Path>>(path: P) -> Result<Self, Error> {
let mut this = Self {
keys: LruCache::new(MAX_ACCOUNT_KEYS),
path: path::PathBuf::from(path.as_ref()),
account_key_count: Default::default(),
};
this.load_internal()?;
Ok(this)
}
/// Attempts to update the locally-saved set of keys from persistent storage.
/// Returns Error if the storage file is unable to be opened.
fn load_internal(&mut self) -> Result<(), Error> {
match fs::File::open(&self.path) {
Ok(file) => {
// Build the LRU cache from the contents of the file. Because keys are stored in
// LRU order, we build the cache in the same order to preserve LRU status.
debug!("Reading Account Keys from existing file");
let key_list = KeyList::load(file)?;
key_list.0.into_iter().for_each(|k| {
let _ = self.keys.insert(k, ());
});
Ok(())
}
Err(error) if error.kind() == io::ErrorKind::NotFound => {
debug!("Persistent storage file not found");
Ok(())
}
Err(e) => Err(Error::key_storage(e, "couldn't load key storage file")),
}
}
/// Commits the current set of Account Keys to isolated persistent storage.
/// Keys are stored in LRU order.
fn store(&self) -> Result<(), Error> {
let path = path::Path::new(&self.path);
let file_name = path.file_name().ok_or(Error::key_storage(
io::ErrorKind::InvalidInput.into(),
"couldn't build file name from path",
))?;
let file_path = path.with_file_name(file_name.to_os_string());
let file = fs::File::create(&file_path)
.map_err(|e| Error::key_storage(e, "couldn't create file"))?;
let values = KeyList(self.keys().cloned().collect());
serde_json::to_writer(file, &values)?;
Ok(())
}
}
/// Convenience type for the serialization and deserialization of Account Keys.
#[derive(Serialize, Deserialize)]
struct KeyList(Vec<AccountKey>);
impl KeyList {
fn load<R: io::Read>(reader: R) -> Result<Self, Error> {
serde_json::from_reader(reader).map_err(Into::into)
}
}
#[cfg(test)]
pub(crate) mod tests {
use super::*;
use assert_matches::assert_matches;
/// Loads the set of saved Account Keys from storage and verifies that it's equal to the
/// provided `expected_keys`.
#[track_caller]
pub(crate) fn expect_keys_at_path<P: AsRef<path::Path>>(
path: P,
expected_keys: Vec<AccountKey>,
) {
let read_keys = AccountKeyList::load_from_path(path).expect("can read from file");
assert_eq!(read_keys.keys().cloned().collect::<Vec<_>>(), expected_keys);
}
#[test]
fn model_id_from_u32() {
let normal_id = 0x1234;
let id = ModelId::try_from(normal_id).expect("valid id");
let id_bytes: [u8; 3] = id.into();
assert_eq!(id_bytes, [0x00, 0x12, 0x34]);
let zero_id = 0;
let id = ModelId::try_from(zero_id).expect("valid id");
let id_bytes: [u8; 3] = id.into();
assert_eq!(id_bytes, [0x00, 0x00, 0x00]);
let max_id = 0xffffff;
let id = ModelId::try_from(max_id).expect("valid id");
let id_bytes: [u8; 3] = id.into();
assert_eq!(id_bytes, [0xff, 0xff, 0xff]);
}
#[test]
fn invalid_model_id_conversion_is_error() {
let invalid_id = 0x1ffabcd;
assert_matches!(ModelId::try_from(invalid_id), Err(_));
}
#[test]
fn empty_account_key_list_service_data() {
let empty = AccountKeyList::with_capacity_and_keys(1, vec![]);
let service_data = empty.service_data().expect("can build service data");
let expected = [0x00];
assert_eq!(service_data, expected);
}
#[test]
fn oversized_service_data_is_error() {
// Building an AccountKeyList of 11 elements will result in an oversized service data.
// In the future, this test will be obsolete as the AccountKeyList will be bounded in its
// construction.
let keys = (0..11_u8).map(|i| AccountKey::new([i; 16])).collect();
let oversized = AccountKeyList::with_capacity_and_keys(15, keys);
let result = oversized.service_data();
assert_matches!(result, Err(Error::InternalError(_)));
}
#[test]
fn account_key_list_service_data() {
let example_key = AccountKey::new([1; 16]);
let keys = AccountKeyList::with_capacity_and_keys(10, vec![example_key]);
let salt = 0x14;
// Because the service data is generated with a random salt value, we test the internal
// method with a controlled salt value so that the test is deterministic.
let service_data = keys.service_data_internal(salt).expect("can build service_data");
let expected = [
0x40, // Length = 4, Show UI indication
0x04, 0x33, 0x00, 0x88, // Bloom filter applied to the Account key list
0x11, 0x14, // Salt descriptor (0x11), Fixed salt value (0x14)
];
assert_eq!(service_data, expected);
}
/// Tests AES-128 encryption & decryption using an Account Key as the Secret Key.
/// The contents of this test case are pulled from the GFPS specification.
/// See https://developers.google.com/nearby/fast-pair/specifications/appendix/testcases#aes_encryption
#[test]
fn aes_128_encryption_roundtrip() {
let message = [
0xF3, 0x0F, 0x4E, 0x78, 0x6C, 0x59, 0xA7, 0xBB, 0xF3, 0x87, 0x3B, 0x5A, 0x49, 0xBA,
0x97, 0xEA,
];
let account_key = AccountKey::new([
0xA0, 0xBA, 0xF0, 0xBB, 0x95, 0x1F, 0xF7, 0xB6, 0xCF, 0x5E, 0x3F, 0x45, 0x61, 0xC3,
0x32, 0x1D,
]);
let encrypted = account_key.shared_secret().encrypt(&message);
let expected = [
0xAC, 0x9A, 0x16, 0xF0, 0x95, 0x3A, 0x3F, 0x22, 0x3D, 0xD1, 0x0C, 0xF5, 0x36, 0xE0,
0x9E, 0x9C,
];
assert_eq!(encrypted, expected);
let decrypted = account_key.shared_secret().decrypt(&encrypted);
assert_eq!(decrypted, message);
}
#[test]
fn account_key_lru_eviction() {
let mut list = AccountKeyList::with_capacity_and_keys(MAX_ACCOUNT_KEYS, vec![]);
let max: u8 = MAX_ACCOUNT_KEYS as u8;
for i in 1..max + 1 {
let key = AccountKey::new([i; 16]);
list.save(key.clone());
assert_eq!(list.keys().len(), i as usize);
assert!(list.keys.contains_key(&key));
}
// Adding a new key results in the eviction of the LRU key.
assert_eq!(list.keys().len(), max as usize);
let new_key = AccountKey::new([max + 1; 16]);
list.save(new_key.clone());
assert_eq!(list.keys().len(), max as usize);
assert!(list.keys.contains_key(&new_key));
// LRU Key is no longer stored.
let first_key = AccountKey::new([1; 16]);
assert!(!list.keys.contains_key(&first_key));
// Marking a key as used should "refresh" the key's position. It is no longer the LRU key
// that will be evicted.
let account_key2 = AccountKey::new([2; 16]);
assert_matches!(list.mark_used(&account_key2), Ok(_));
// Inserting a new key at capacity will evict the LRU key (not `account_key2` anymore).
let next_key = AccountKey::new([max + 2; 16]);
list.save(next_key.clone());
assert_eq!(list.keys().len(), max as usize);
assert!(list.keys.contains_key(&next_key));
assert!(list.keys.contains_key(&account_key2));
}
#[test]
fn mark_used_nonexistent_key_is_error() {
let mut list = AccountKeyList::with_capacity_and_keys(1, vec![]);
let key = AccountKey::new([1; 16]);
assert_matches!(list.mark_used(&key), Err(_));
}
#[fuchsia::test]
fn load_keys_from_nonexistent_file() {
const EXAMPLE_FILEPATH: &str = "/data/test_account_keys0.json";
expect_keys_at_path(EXAMPLE_FILEPATH, vec![]);
}
#[fuchsia::test]
fn commit_and_load_keys_to_and_from_a_file() {
let key1 = AccountKey::new([1; 16]);
let key2 = AccountKey::new([2; 16]);
let key3 = AccountKey::new([3; 16]);
let example_keys = vec![key1, key2, key3];
let keys = AccountKeyList::with_capacity_and_keys(5, example_keys.clone());
keys.store().expect("can store Account Keys");
expect_keys_at_path(keys.path(), example_keys);
}
#[fuchsia::test]
fn lru_eviction_from_storage() {
let key1 = AccountKey::new([1; 16]);
let key2 = AccountKey::new([2; 16]);
let key3 = AccountKey::new([3; 16]);
// New collection with maximum capacity of 2 keys.
let mut keys = AccountKeyList::with_capacity_and_keys(2, vec![]);
// Because this key has never been written before, it should be saved to persistent storage.
keys.save(key1.clone());
expect_keys_at_path(keys.path(), vec![key1.clone()]);
// Because this key has never been written before, it should be saved to persistent storage.
keys.save(key2.clone());
expect_keys_at_path(keys.path(), vec![key1.clone(), key2.clone()]);
// Because `key1` already exists in the collection, we expect a cache "refresh" so the key
// ordering in storage should change.
keys.save(key1.clone());
// e.g The LRU order should change whereby `key2` is now the LRU.
expect_keys_at_path(keys.path(), vec![key2, key1.clone()]);
// The collection is at max capacity so `key2` (LRU) should be evicted. Local storage
// should be updated.
keys.save(key3.clone());
expect_keys_at_path(keys.path(), vec![key1, key3]);
}
}
| {
return Err(Error::InvalidModelId(src));
} | conditional_block |
mod.rs | // Copyright 2022 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use aes::cipher::generic_array::GenericArray;
use aes::{Aes128, BlockDecrypt, BlockEncrypt, NewBlockCipher};
use fuchsia_inspect::{self as inspect, Property};
use fuchsia_inspect_derive::{AttachError, Inspect};
use lru_cache::LruCache;
use rand::Rng;
use serde::{Deserialize, Serialize};
use std::convert::{TryFrom, TryInto};
use std::{fs, io, path};
use tracing::{debug, warn};
use crate::advertisement::bloom_filter;
mod error;
pub mod keys;
pub mod packets;
pub use error::Error;
/// Represents the 24-bit Model ID assigned to a Fast Pair device upon registration.
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct ModelId(u32);
impl TryFrom<u32> for ModelId {
type Error = Error;
fn try_from(src: u32) -> Result<Self, Self::Error> {
// u24::MAX
if src > 0xffffff {
return Err(Error::InvalidModelId(src));
}
Ok(Self(src))
}
}
impl From<ModelId> for [u8; 3] {
fn from(src: ModelId) -> [u8; 3] {
let mut bytes = [0; 3];
bytes[..3].copy_from_slice(&src.0.to_be_bytes()[1..]);
bytes
}
}
/// A key used during the Fast Pair Pairing Procedure.
/// This key is a temporary value that lives for the lifetime of a procedure.
#[derive(Clone, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub struct SharedSecret([u8; 16]);
impl SharedSecret {
pub fn new(bytes: [u8; 16]) -> Self {
Self(bytes)
}
pub fn as_bytes(&self) -> &[u8; 16] {
&self.0
}
/// Decrypts the provided `message` buffer with the AccountKey using AES-128.
/// Returns the decrypted payload.
pub fn decrypt(&self, message: &[u8; 16]) -> [u8; 16] {
let cipher = Aes128::new(GenericArray::from_slice(self.as_bytes()));
let mut block = GenericArray::clone_from_slice(message);
cipher.decrypt_block(&mut block);
block.into()
}
/// Encrypts the provided `message` buffer with the AccountKey using AES-128.
/// Returns the encrypted payload.
pub fn encrypt(&self, message: &[u8; 16]) -> [u8; 16] {
let cipher = Aes128::new(GenericArray::from_slice(self.as_bytes()));
let mut block = GenericArray::clone_from_slice(message);
cipher.encrypt_block(&mut block);
block.into()
}
}
/// A long-lived key that allows the Provider to be recognized as belonging to a certain user
/// account.
#[derive(Clone, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)]
pub struct AccountKey(SharedSecret);
impl AccountKey {
pub fn new(bytes: [u8; 16]) -> Self {
Self(SharedSecret::new(bytes))
}
pub fn as_bytes(&self) -> &[u8; 16] {
&self.0.as_bytes()
}
pub fn shared_secret(&self) -> &SharedSecret {
&self.0
}
}
impl From<&SharedSecret> for AccountKey {
fn from(src: &SharedSecret) -> AccountKey {
AccountKey(src.clone())
}
}
/// The maximum number of Account Keys that can be managed by the Fast Pair server. Account Keys
/// will be evicted in an LRU manner as described in the GFPS specification.
/// This limit is chosen as the minimum required by any implementation and provides ample space
/// in the LE advertisement packet.
/// See https://developers.google.com/nearby/fast-pair/specifications/configuration#AccountKeyList
/// for more details.
const MAX_ACCOUNT_KEYS: usize = 5;
/// Manages the set of saved Account Keys.
///
/// By default, the maximum number of keys that will be saved is `MAX_ACCOUNT_KEYS`. When full, the
/// `AccountKeyList` will evict the least recently used Account Key.
///
/// Account Keys are written to isolated persistent storage and are maintained across reboots. The
/// set of saved keys will only be erased on device factory resets.
/// To avoid writing to persistent storage too often, only new Account Keys are written to storage.
/// Writes for existing keys will result in cache "hits" (e.g LRU ordering updated) but will not be
/// updated in the backing storage file.
pub struct AccountKeyList {
/// The set of saved Account Keys. Keys are evicted in an LRU manner. There is no cache value
/// as we only care about maintaining the keys.
keys: LruCache<AccountKey, ()>,
/// The file path pointing to the isolated persistent storage which saves the Account Keys.
path: path::PathBuf,
/// The number of keys currently saved in the AccountKeyList.
account_key_count: inspect::UintProperty,
}
impl Inspect for &mut AccountKeyList {
fn iattach(self, parent: &inspect::Node, _name: impl AsRef<str>) -> Result<(), AttachError> {
self.account_key_count = parent.create_uint("account_key_count", self.keys.len() as u64);
Ok(())
}
}
impl AccountKeyList {
/// Attempts to load the current set of saved Account Keys from isolated persistent storage.
/// Returns the updated AccountKeyList of keys on success, Error otherwise.
pub fn load() -> Result<Self, Error> {
Self::load_from_path(Self::PERSISTED_ACCOUNT_KEYS_FILEPATH)
}
/// Builds an AccountKey list with the provided `keys`.
/// A random test file path is used to avoid concurrently running tests from reading/writing
/// from/to the same file.
#[cfg(test)]
pub fn with_capacity_and_keys(capacity: usize, keys: Vec<AccountKey>) -> Self {
let mut cache = LruCache::new(capacity);
keys.into_iter().for_each(|k| {
let _ = cache.insert(k, ());
});
let val = rand::thread_rng().gen::<u64>();
let path = format!("data/test_account_keys{}.json", val);
Self { keys: cache, path: path::PathBuf::from(path), account_key_count: Default::default() }
}
#[cfg(test)]
pub fn path(&self) -> String {
self.path.clone().into_os_string().into_string().expect("valid path string")
}
fn update_inspect(&self) {
self.account_key_count.set(self.keys.len() as u64);
}
/// Returns an Iterator over the saved Account Keys.
/// Note: Access via Iterator does not modify LRU state.
pub fn keys(&self) -> impl Iterator<Item = &AccountKey> + ExactSizeIterator {
self.keys.iter().map(|(k, _)| k)
}
/// Marks the provided `key` as used in the LRU cache.
/// Returns Error if the key does not exist in the cache.
pub fn mark_used(&mut self, key: &AccountKey) -> Result<(), Error> {
self.keys.get_mut(&key).map(|_| ()).ok_or(Error::internal("no key to mark as used"))
}
/// Save an Account Key to the persisted set of keys.
pub fn save(&mut self, key: AccountKey) {
// If the `key` already exists, it will be updated in the LRU cache. If the cache is
// full, the least-recently used (LRU) key will be evicted.
if self.keys.insert(key, ()).is_some() {
debug!("Account Key already saved");
}
// Store the updated set of keys in persistent storage.
if let Err(e) = self.store() {
warn!("Couldn't update key list in isolated persistent storage: {:?}", e);
}
self.update_inspect();
}
/// Returns the service data payload associated with the current set of Account Keys.
pub fn service_data(&self) -> Result<Vec<u8>, Error> {
if self.keys.is_empty() {
return Ok(vec![0x0]);
}
let salt = rand::thread_rng().gen::<u8>();
self.service_data_internal(salt)
}
fn service_data_internal(&self, salt: u8) -> Result<Vec<u8>, Error> {
let account_keys_bytes = bloom_filter(self.keys(), salt)?;
let mut result = Vec::new();
// First byte is 0bLLLLTTTT, where L = length of the account key list, T = Type (0b0000 to
// show UI notification, 0b0010 to hide it). The maximum amount of account key data that can
// be represented is 15 bytes (u4::MAX).
let length: u8 = match account_keys_bytes.len().try_into() {
Ok(len) if len <= 15 => len,
_ => return Err(Error::internal("Account key data too large")),
};
// For now, we will always request to show the UI notification (TTTT = 0b0000).
result.push(length << 4);
// Next n bytes are the Bloom-filtered Account Key list.
result.extend(account_keys_bytes);
// The descriptor value associated with the Salt section of the LE advertisement payload.
// Formatted as 0bLLLLTTTT, where L (Length) = 0b0001 and T (Type) = 0b0001. Both are fixed.
const SALT_DESCRIPTOR: u8 = 0x11;
result.push(SALT_DESCRIPTOR);
// Final byte is the Salt value.
result.push(salt);
Ok(result)
}
// Default file path for Account Keys written to isolated persistent storage.
const PERSISTED_ACCOUNT_KEYS_FILEPATH: &'static str = "/data/account_keys.json";
/// Attempts to read and parse the contents of the persistent storage at the provided `path`.
/// Returns an `AccountKeyList` on success, Error otherwise.
fn load_from_path<P: AsRef<path::Path>>(path: P) -> Result<Self, Error> {
let mut this = Self {
keys: LruCache::new(MAX_ACCOUNT_KEYS),
path: path::PathBuf::from(path.as_ref()),
account_key_count: Default::default(),
};
this.load_internal()?;
Ok(this)
}
/// Attempts to update the locally-saved set of keys from persistent storage.
/// Returns Error if the storage file is unable to be opened.
fn load_internal(&mut self) -> Result<(), Error> {
match fs::File::open(&self.path) {
Ok(file) => {
// Build the LRU cache from the contents of the file. Because keys are stored in
// LRU order, we build the cache in the same order to preserve LRU status.
debug!("Reading Account Keys from existing file");
let key_list = KeyList::load(file)?;
key_list.0.into_iter().for_each(|k| {
let _ = self.keys.insert(k, ());
});
Ok(())
}
Err(error) if error.kind() == io::ErrorKind::NotFound => {
debug!("Persistent storage file not found");
Ok(())
}
Err(e) => Err(Error::key_storage(e, "couldn't load key storage file")),
}
}
/// Commits the current set of Account Keys to isolated persistent storage.
/// Keys are stored in LRU order.
fn store(&self) -> Result<(), Error> {
let path = path::Path::new(&self.path);
let file_name = path.file_name().ok_or(Error::key_storage(
io::ErrorKind::InvalidInput.into(),
"couldn't build file name from path",
))?;
let file_path = path.with_file_name(file_name.to_os_string());
let file = fs::File::create(&file_path)
.map_err(|e| Error::key_storage(e, "couldn't create file"))?;
let values = KeyList(self.keys().cloned().collect());
serde_json::to_writer(file, &values)?;
Ok(())
}
}
/// Convenience type for the serialization and deserialization of Account Keys.
#[derive(Serialize, Deserialize)]
struct KeyList(Vec<AccountKey>);
impl KeyList {
fn load<R: io::Read>(reader: R) -> Result<Self, Error> {
serde_json::from_reader(reader).map_err(Into::into)
}
}
#[cfg(test)]
pub(crate) mod tests {
use super::*;
use assert_matches::assert_matches;
/// Loads the set of saved Account Keys from storage and verifies that it's equal to the
/// provided `expected_keys`.
#[track_caller]
pub(crate) fn expect_keys_at_path<P: AsRef<path::Path>>(
path: P,
expected_keys: Vec<AccountKey>,
) {
let read_keys = AccountKeyList::load_from_path(path).expect("can read from file");
assert_eq!(read_keys.keys().cloned().collect::<Vec<_>>(), expected_keys);
}
#[test]
fn model_id_from_u32() {
let normal_id = 0x1234;
let id = ModelId::try_from(normal_id).expect("valid id");
let id_bytes: [u8; 3] = id.into();
assert_eq!(id_bytes, [0x00, 0x12, 0x34]);
let zero_id = 0;
let id = ModelId::try_from(zero_id).expect("valid id");
let id_bytes: [u8; 3] = id.into();
assert_eq!(id_bytes, [0x00, 0x00, 0x00]);
let max_id = 0xffffff;
let id = ModelId::try_from(max_id).expect("valid id");
let id_bytes: [u8; 3] = id.into();
assert_eq!(id_bytes, [0xff, 0xff, 0xff]);
}
#[test]
fn | () {
let invalid_id = 0x1ffabcd;
assert_matches!(ModelId::try_from(invalid_id), Err(_));
}
#[test]
fn empty_account_key_list_service_data() {
let empty = AccountKeyList::with_capacity_and_keys(1, vec![]);
let service_data = empty.service_data().expect("can build service data");
let expected = [0x00];
assert_eq!(service_data, expected);
}
#[test]
fn oversized_service_data_is_error() {
// Building an AccountKeyList of 11 elements will result in an oversized service data.
// In the future, this test will be obsolete as the AccountKeyList will be bounded in its
// construction.
let keys = (0..11_u8).map(|i| AccountKey::new([i; 16])).collect();
let oversized = AccountKeyList::with_capacity_and_keys(15, keys);
let result = oversized.service_data();
assert_matches!(result, Err(Error::InternalError(_)));
}
#[test]
fn account_key_list_service_data() {
let example_key = AccountKey::new([1; 16]);
let keys = AccountKeyList::with_capacity_and_keys(10, vec![example_key]);
let salt = 0x14;
// Because the service data is generated with a random salt value, we test the internal
// method with a controlled salt value so that the test is deterministic.
let service_data = keys.service_data_internal(salt).expect("can build service_data");
let expected = [
0x40, // Length = 4, Show UI indication
0x04, 0x33, 0x00, 0x88, // Bloom filter applied to the Account key list
0x11, 0x14, // Salt descriptor (0x11), Fixed salt value (0x14)
];
assert_eq!(service_data, expected);
}
/// Tests AES-128 encryption & decryption using an Account Key as the Secret Key.
/// The contents of this test case are pulled from the GFPS specification.
/// See https://developers.google.com/nearby/fast-pair/specifications/appendix/testcases#aes_encryption
#[test]
fn aes_128_encryption_roundtrip() {
let message = [
0xF3, 0x0F, 0x4E, 0x78, 0x6C, 0x59, 0xA7, 0xBB, 0xF3, 0x87, 0x3B, 0x5A, 0x49, 0xBA,
0x97, 0xEA,
];
let account_key = AccountKey::new([
0xA0, 0xBA, 0xF0, 0xBB, 0x95, 0x1F, 0xF7, 0xB6, 0xCF, 0x5E, 0x3F, 0x45, 0x61, 0xC3,
0x32, 0x1D,
]);
let encrypted = account_key.shared_secret().encrypt(&message);
let expected = [
0xAC, 0x9A, 0x16, 0xF0, 0x95, 0x3A, 0x3F, 0x22, 0x3D, 0xD1, 0x0C, 0xF5, 0x36, 0xE0,
0x9E, 0x9C,
];
assert_eq!(encrypted, expected);
let decrypted = account_key.shared_secret().decrypt(&encrypted);
assert_eq!(decrypted, message);
}
#[test]
fn account_key_lru_eviction() {
let mut list = AccountKeyList::with_capacity_and_keys(MAX_ACCOUNT_KEYS, vec![]);
let max: u8 = MAX_ACCOUNT_KEYS as u8;
for i in 1..max + 1 {
let key = AccountKey::new([i; 16]);
list.save(key.clone());
assert_eq!(list.keys().len(), i as usize);
assert!(list.keys.contains_key(&key));
}
// Adding a new key results in the eviction of the LRU key.
assert_eq!(list.keys().len(), max as usize);
let new_key = AccountKey::new([max + 1; 16]);
list.save(new_key.clone());
assert_eq!(list.keys().len(), max as usize);
assert!(list.keys.contains_key(&new_key));
// LRU Key is no longer stored.
let first_key = AccountKey::new([1; 16]);
assert!(!list.keys.contains_key(&first_key));
// Marking a key as used should "refresh" the key's position. It is no longer the LRU key
// that will be evicted.
let account_key2 = AccountKey::new([2; 16]);
assert_matches!(list.mark_used(&account_key2), Ok(_));
// Inserting a new key at capacity will evict the LRU key (not `account_key2` anymore).
let next_key = AccountKey::new([max + 2; 16]);
list.save(next_key.clone());
assert_eq!(list.keys().len(), max as usize);
assert!(list.keys.contains_key(&next_key));
assert!(list.keys.contains_key(&account_key2));
}
#[test]
fn mark_used_nonexistent_key_is_error() {
let mut list = AccountKeyList::with_capacity_and_keys(1, vec![]);
let key = AccountKey::new([1; 16]);
assert_matches!(list.mark_used(&key), Err(_));
}
#[fuchsia::test]
fn load_keys_from_nonexistent_file() {
const EXAMPLE_FILEPATH: &str = "/data/test_account_keys0.json";
expect_keys_at_path(EXAMPLE_FILEPATH, vec![]);
}
#[fuchsia::test]
fn commit_and_load_keys_to_and_from_a_file() {
let key1 = AccountKey::new([1; 16]);
let key2 = AccountKey::new([2; 16]);
let key3 = AccountKey::new([3; 16]);
let example_keys = vec![key1, key2, key3];
let keys = AccountKeyList::with_capacity_and_keys(5, example_keys.clone());
keys.store().expect("can store Account Keys");
expect_keys_at_path(keys.path(), example_keys);
}
#[fuchsia::test]
fn lru_eviction_from_storage() {
let key1 = AccountKey::new([1; 16]);
let key2 = AccountKey::new([2; 16]);
let key3 = AccountKey::new([3; 16]);
// New collection with maximum capacity of 2 keys.
let mut keys = AccountKeyList::with_capacity_and_keys(2, vec![]);
// Because this key has never been written before, it should be saved to persistent storage.
keys.save(key1.clone());
expect_keys_at_path(keys.path(), vec![key1.clone()]);
// Because this key has never been written before, it should be saved to persistent storage.
keys.save(key2.clone());
expect_keys_at_path(keys.path(), vec![key1.clone(), key2.clone()]);
// Because `key1` already exists in the collection, we expect a cache "refresh" so the key
// ordering in storage should change.
keys.save(key1.clone());
// e.g The LRU order should change whereby `key2` is now the LRU.
expect_keys_at_path(keys.path(), vec![key2, key1.clone()]);
// The collection is at max capacity so `key2` (LRU) should be evicted. Local storage
// should be updated.
keys.save(key3.clone());
expect_keys_at_path(keys.path(), vec![key1, key3]);
}
}
| invalid_model_id_conversion_is_error | identifier_name |
utils.rs | use cairo;
use cairo::enums::{FontSlant, FontWeight};
use cairo::prelude::SurfaceExt;
use clap::{
crate_authors, crate_description, crate_name, crate_version, value_t, App, AppSettings, Arg,
};
use css_color_parser::Color as CssColor;
use font_loader::system_fonts;
use itertools::Itertools;
use log::debug;
use regex::Regex;
use std::error::Error;
use std::iter;
use std::str::FromStr;
use std::thread::sleep;
use std::time::{Duration, Instant};
use xcb;
use xcb::ffi::xcb_visualid_t;
use crate::{AppConfig, DesktopWindow, RenderWindow};
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum HorizontalAlign {
Left,
Center,
Right,
}
impl FromStr for HorizontalAlign {
type Err = ();
fn from_str(s: &str) -> Result<HorizontalAlign, ()> {
match s {
"left" => Ok(HorizontalAlign::Left),
"center" => Ok(HorizontalAlign::Center),
"right" => Ok(HorizontalAlign::Right),
_ => Err(()),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum VerticalAlign {
Top,
Center,
Bottom,
}
impl FromStr for VerticalAlign {
type Err = ();
fn from_str(s: &str) -> Result<VerticalAlign, ()> {
match s {
"top" => Ok(VerticalAlign::Top),
"center" => Ok(VerticalAlign::Center),
"bottom" => Ok(VerticalAlign::Bottom),
_ => Err(()),
}
}
}
/// Checks whether the provided fontconfig font `f` is valid.
fn is_truetype_font(f: String) -> Result<(), String> {
let v: Vec<_> = f.split(':').collect();
let (family, size) = (v.get(0), v.get(1));
if family.is_none() || size.is_none() {
return Err("From font format".to_string());
}
if let Err(e) = size.unwrap().parse::<f32>() {
return Err(e.description().to_string());
}
Ok(())
}
/// Validate a color.
fn is_valid_color(c: String) -> Result<(), String> {
c.parse::<CssColor>().map_err(|_| "Invalid color format")?;
Ok(())
}
/// Load a system font.
fn load_font(font_family: &str) -> Vec<u8> {
let font_family_property = system_fonts::FontPropertyBuilder::new()
.family(font_family)
.build();
let (loaded_font, _) =
if let Some((loaded_font, index)) = system_fonts::get(&font_family_property) {
(loaded_font, index)
} else {
eprintln!("Family not found, falling back to first Monospace font");
let mut font_monospace_property =
system_fonts::FontPropertyBuilder::new().monospace().build();
let sysfonts = system_fonts::query_specific(&mut font_monospace_property);
eprintln!("Falling back to font '{font}'", font = sysfonts[0]);
let (loaded_font, index) =
system_fonts::get(&font_monospace_property).expect("Couldn't find suitable font");
(loaded_font, index)
};
loaded_font
}
/// Parse a color into a tuple of floats.
fn parse_color(color_str: CssColor) -> (f64, f64, f64, f64) {
(
f64::from(color_str.r) / 255.0,
f64::from(color_str.g) / 255.0,
f64::from(color_str.b) / 255.0,
f64::from(color_str.a),
)
}
/// Parse app arguments.
pub fn parse_args() -> AppConfig {
let matches = App::new(crate_name!())
.version(crate_version!())
.author(crate_authors!())
.about(crate_description!())
.global_setting(AppSettings::ColoredHelp)
.arg(
Arg::with_name("font")
.short("f")
.long("font")
.takes_value(true)
.validator(is_truetype_font)
.default_value("Mono:72")
.help("Use a specific TrueType font with this format: family:size"))
.arg(
Arg::with_name("hint_chars")
.short("c")
.long("chars")
.takes_value(true)
.default_value("sadfjklewcmpgh")
.help("Define a set of possbile values to use as hint characters"))
.arg(
Arg::with_name("margin")
.short("m")
.long("margin")
.takes_value(true)
.default_value("0.2")
.help("Add an additional margin around the text box (value is a factor of the box size)"))
.arg(
Arg::with_name("text_color")
.long("textcolor")
.takes_value(true)
.validator(is_valid_color)
.default_value("#dddddd")
.display_order(49)
.help("Text color (CSS notation)"))
.arg(
Arg::with_name("text_color_alt")
.long("textcoloralt")
.takes_value(true)
.validator(is_valid_color)
.default_value("#666666")
.display_order(50)
.help("Text color alternate (CSS notation)"))
.arg(
Arg::with_name("bg_color")
.long("bgcolor")
.takes_value(true)
.validator(is_valid_color)
.default_value("rgba(30, 30, 30, 0.9)")
.display_order(51)
.help("Background color (CSS notation)"))
.arg(
Arg::with_name("horizontal_align")
.long("halign")
.takes_value(true)
.possible_values(&["left", "center", "right"])
.default_value("left")
.display_order(100)
.help("Horizontal alignment of the box inside the window"))
.arg(
Arg::with_name("vertical_align")
.long("valign")
.takes_value(true)
.possible_values(&["top", "center", "bottom"])
.default_value("top")
.display_order(101)
.help("Vertical alignment of the box inside the window"))
.arg(
Arg::with_name("fill")
.long("fill")
.conflicts_with_all(&["horizontal_align", "vertical_align", "margin"])
.display_order(102)
.help("Completely fill out windows"))
.arg(
Arg::with_name("print_only")
.short("p")
.long("printonly")
.help("Print the window id only but don't change focus"))
.get_matches();
let font = value_t!(matches, "font", String).unwrap();
let v: Vec<_> = font.split(':').collect();
let (font_family, font_size) = (v[0].to_string(), v[1].parse::<f64>().unwrap());
let hint_chars = value_t!(matches, "hint_chars", String).unwrap();
let margin = value_t!(matches, "margin", f32).unwrap();
let text_color_unparsed = value_t!(matches, "text_color", CssColor).unwrap();
let text_color = parse_color(text_color_unparsed);
let text_color_alt_unparsed = value_t!(matches, "text_color_alt", CssColor).unwrap();
let text_color_alt = parse_color(text_color_alt_unparsed);
let bg_color_unparsed = value_t!(matches, "bg_color", CssColor).unwrap();
let bg_color = parse_color(bg_color_unparsed);
let fill = matches.is_present("fill");
let print_only = matches.is_present("print_only");
let (horizontal_align, vertical_align) = if fill {
(HorizontalAlign::Center, VerticalAlign::Center)
} else | ;
let loaded_font = load_font(&font_family);
AppConfig {
font_family,
font_size,
loaded_font,
hint_chars,
margin,
text_color,
text_color_alt,
bg_color,
fill,
print_only,
horizontal_align,
vertical_align,
}
}
/// Given a list of `current_hints` and a bunch of `hint_chars`, this finds a unique combination
/// of characters that doesn't yet exist in `current_hints`. `max_count` is the maximum possible
/// number of hints we need.
pub fn get_next_hint(current_hints: Vec<&String>, hint_chars: &str, max_count: usize) -> String {
// Figure out which size we need.
let mut size_required = 1;
while hint_chars.len().pow(size_required) < max_count {
size_required += 1;
}
let mut ret = hint_chars
.chars()
.next()
.expect("No hint_chars found")
.to_string();
let it = iter::repeat(hint_chars.chars().rev())
.take(size_required as usize)
.multi_cartesian_product();
for c in it {
let folded = c.into_iter().collect();
if!current_hints.contains(&&folded) {
ret = folded;
}
}
debug!("Returning next hint: {}", ret);
ret
}
pub fn find_visual(conn: &xcb::Connection, visual: xcb_visualid_t) -> Option<xcb::Visualtype> {
for screen in conn.get_setup().roots() {
for depth in screen.allowed_depths() {
for vis in depth.visuals() {
if visual == vis.visual_id() {
return Some(vis);
}
}
}
}
None
}
pub fn extents_for_text(text: &str, family: &str, size: f64) -> cairo::TextExtents {
// Create a buffer image that should be large enough.
// TODO: Figure out the maximum size from the largest window on the desktop.
// For now we'll use made-up maximum values.
let surface = cairo::ImageSurface::create(cairo::Format::ARgb32, 1024, 1024)
.expect("Couldn't create ImageSurface");
let cr = cairo::Context::new(&surface);
cr.select_font_face(family, cairo::FontSlant::Normal, cairo::FontWeight::Normal);
cr.set_font_size(size);
cr.text_extents(text)
}
/// Draw a `text` onto `rw`. In case any `current_hints` are already typed, it will draw those in a
/// different color to show that they were in fact typed.
pub fn draw_hint_text(rw: &RenderWindow, app_config: &AppConfig, text: &str, current_hints: &str) {
// Paint background.
rw.cairo_context.set_operator(cairo::Operator::Source);
rw.cairo_context.set_source_rgb(
app_config.bg_color.0,
app_config.bg_color.1,
app_config.bg_color.2,
);
rw.cairo_context.paint();
rw.cairo_context.set_operator(cairo::Operator::Over);
rw.cairo_context.select_font_face(
&app_config.font_family,
FontSlant::Normal,
FontWeight::Normal,
);
rw.cairo_context.set_font_size(app_config.font_size);
rw.cairo_context.move_to(rw.draw_pos.0, rw.draw_pos.1);
if text.starts_with(current_hints) {
// Paint already selected chars.
rw.cairo_context.set_source_rgba(
app_config.text_color_alt.0,
app_config.text_color_alt.1,
app_config.text_color_alt.2,
app_config.text_color_alt.3,
);
for c in current_hints.chars() {
rw.cairo_context.show_text(&c.to_string());
}
}
// Paint unselected chars.
rw.cairo_context.set_source_rgba(
app_config.text_color.0,
app_config.text_color.1,
app_config.text_color.2,
app_config.text_color.3,
);
let re = Regex::new(&format!("^{}", current_hints)).unwrap();
for c in re.replace(text, "").chars() {
rw.cairo_context.show_text(&c.to_string());
}
rw.cairo_context.get_target().flush();
}
/// Try to grab the keyboard until `timeout` is reached.
///
/// Generally with X, I found that you can't grab global keyboard input without it failing
/// sometimes due to other clients grabbing it occasionally. Hence, we'll have to keep retrying
/// until we eventually succeed.
pub fn snatch_keyboard(
conn: &xcb::Connection,
screen: &xcb::Screen,
timeout: Duration,
) -> Result<(), String> {
let now = Instant::now();
loop {
if now.elapsed() > timeout {
return Err(format!(
"Couldn't grab keyboard input within {:?}",
now.elapsed()
));
}
let grab_keyboard_cookie = xcb::xproto::grab_keyboard(
&conn,
true,
screen.root(),
xcb::CURRENT_TIME,
xcb::GRAB_MODE_ASYNC as u8,
xcb::GRAB_MODE_ASYNC as u8,
);
let grab_keyboard_reply = grab_keyboard_cookie
.get_reply()
.map_err(|_| "Couldn't communicate with X")?;
if grab_keyboard_reply.status() == xcb::GRAB_STATUS_SUCCESS as u8 {
return Ok(());
}
sleep(Duration::from_millis(1));
}
}
/// Try to grab the mouse until `timeout` is reached.
///
/// Generally with X, I found that you can't grab global mouse input without it failing sometimes
/// due to other clients grabbing it occasionally. Hence, we'll have to keep retrying until we
/// eventually succeed.
pub fn snatch_mouse(
conn: &xcb::Connection,
screen: &xcb::Screen,
timeout: Duration,
) -> Result<(), String> {
let now = Instant::now();
loop {
if now.elapsed() > timeout {
return Err(format!(
"Couldn't grab keyboard input within {:?}",
now.elapsed()
));
}
let grab_pointer_cookie = xcb::xproto::grab_pointer(
&conn,
true,
screen.root(),
xcb::EVENT_MASK_BUTTON_PRESS as u16,
xcb::GRAB_MODE_ASYNC as u8,
xcb::GRAB_MODE_ASYNC as u8,
xcb::NONE,
xcb::NONE,
xcb::CURRENT_TIME,
);
let grab_pointer_reply = grab_pointer_cookie
.get_reply()
.map_err(|_| "Couldn't communicate with X")?;
if grab_pointer_reply.status() == xcb::GRAB_STATUS_SUCCESS as u8 {
return Ok(());
}
sleep(Duration::from_millis(1));
}
}
/// Sort list of `DesktopWindow`s by position.
///
/// This sorts by column first and row second.
pub fn sort_by_pos(mut dws: Vec<DesktopWindow>) -> Vec<DesktopWindow> {
dws.sort_by_key(|w| w.pos.0);
dws.sort_by_key(|w| w.pos.1);
dws
}
/// Returns true if `r1` and `r2` overlap.
fn intersects(r1: (i32, i32, i32, i32), r2: (i32, i32, i32, i32)) -> bool {
let left_corner_inside = r1.0 < r2.0 + r2.2;
let right_corner_inside = r1.0 + r1.2 > r2.0;
let top_corner_inside = r1.1 < r2.1 + r2.3;
let bottom_corner_inside = r1.1 + r1.3 > r2.1;
left_corner_inside && right_corner_inside && top_corner_inside && bottom_corner_inside
}
/// Finds overlaps and returns a list of those rects in the format (x, y, w, h).
pub fn find_overlaps(
rws: Vec<&RenderWindow>,
rect: (i32, i32, i32, i32),
) -> Vec<(i32, i32, i32, i32)> {
let mut overlaps = vec![];
for rw in rws {
if intersects(rw.rect, rect) {
overlaps.push(rw.rect);
}
}
overlaps
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_intersects() {
assert!(intersects((1905, 705, 31, 82), (1905, 723, 38, 64)));
}
#[test]
fn test_no_intersect() {
assert!(!intersects((1905, 705, 31, 82), (2000, 723, 38, 64)));
}
}
| {
(
value_t!(matches, "horizontal_align", HorizontalAlign).unwrap(),
value_t!(matches, "vertical_align", VerticalAlign).unwrap(),
)
} | conditional_block |
utils.rs | use cairo;
use cairo::enums::{FontSlant, FontWeight};
use cairo::prelude::SurfaceExt;
use clap::{
crate_authors, crate_description, crate_name, crate_version, value_t, App, AppSettings, Arg,
};
use css_color_parser::Color as CssColor;
use font_loader::system_fonts;
use itertools::Itertools;
use log::debug;
use regex::Regex;
use std::error::Error;
use std::iter;
use std::str::FromStr;
use std::thread::sleep;
use std::time::{Duration, Instant};
use xcb;
use xcb::ffi::xcb_visualid_t;
use crate::{AppConfig, DesktopWindow, RenderWindow};
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum HorizontalAlign {
Left,
Center,
Right,
}
impl FromStr for HorizontalAlign {
type Err = ();
fn from_str(s: &str) -> Result<HorizontalAlign, ()> {
match s {
"left" => Ok(HorizontalAlign::Left),
"center" => Ok(HorizontalAlign::Center),
"right" => Ok(HorizontalAlign::Right),
_ => Err(()),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum VerticalAlign {
Top,
Center,
Bottom,
}
impl FromStr for VerticalAlign {
type Err = ();
fn from_str(s: &str) -> Result<VerticalAlign, ()> {
match s {
"top" => Ok(VerticalAlign::Top),
"center" => Ok(VerticalAlign::Center),
"bottom" => Ok(VerticalAlign::Bottom),
_ => Err(()),
}
}
}
/// Checks whether the provided fontconfig font `f` is valid.
fn is_truetype_font(f: String) -> Result<(), String> {
let v: Vec<_> = f.split(':').collect();
let (family, size) = (v.get(0), v.get(1));
if family.is_none() || size.is_none() {
return Err("From font format".to_string());
}
if let Err(e) = size.unwrap().parse::<f32>() {
return Err(e.description().to_string());
}
Ok(())
}
/// Validate a color.
fn is_valid_color(c: String) -> Result<(), String> {
c.parse::<CssColor>().map_err(|_| "Invalid color format")?;
Ok(())
}
/// Load a system font.
fn load_font(font_family: &str) -> Vec<u8> {
let font_family_property = system_fonts::FontPropertyBuilder::new()
.family(font_family)
.build();
let (loaded_font, _) =
if let Some((loaded_font, index)) = system_fonts::get(&font_family_property) {
(loaded_font, index)
} else {
eprintln!("Family not found, falling back to first Monospace font");
let mut font_monospace_property =
system_fonts::FontPropertyBuilder::new().monospace().build();
let sysfonts = system_fonts::query_specific(&mut font_monospace_property);
eprintln!("Falling back to font '{font}'", font = sysfonts[0]);
let (loaded_font, index) =
system_fonts::get(&font_monospace_property).expect("Couldn't find suitable font");
(loaded_font, index)
};
loaded_font
}
/// Parse a color into a tuple of floats.
fn parse_color(color_str: CssColor) -> (f64, f64, f64, f64) {
(
f64::from(color_str.r) / 255.0,
f64::from(color_str.g) / 255.0,
f64::from(color_str.b) / 255.0,
f64::from(color_str.a),
)
}
/// Parse app arguments.
pub fn parse_args() -> AppConfig {
let matches = App::new(crate_name!())
.version(crate_version!())
.author(crate_authors!())
.about(crate_description!())
.global_setting(AppSettings::ColoredHelp)
.arg(
Arg::with_name("font")
.short("f")
.long("font")
.takes_value(true)
.validator(is_truetype_font)
.default_value("Mono:72")
.help("Use a specific TrueType font with this format: family:size"))
.arg(
Arg::with_name("hint_chars")
.short("c")
.long("chars")
.takes_value(true)
.default_value("sadfjklewcmpgh")
.help("Define a set of possbile values to use as hint characters"))
.arg(
Arg::with_name("margin")
.short("m")
.long("margin")
.takes_value(true)
.default_value("0.2")
.help("Add an additional margin around the text box (value is a factor of the box size)"))
.arg(
Arg::with_name("text_color")
.long("textcolor")
.takes_value(true)
.validator(is_valid_color)
.default_value("#dddddd")
.display_order(49)
.help("Text color (CSS notation)"))
.arg(
Arg::with_name("text_color_alt")
.long("textcoloralt")
.takes_value(true)
.validator(is_valid_color)
.default_value("#666666")
.display_order(50)
.help("Text color alternate (CSS notation)"))
.arg(
Arg::with_name("bg_color")
.long("bgcolor")
.takes_value(true)
.validator(is_valid_color)
.default_value("rgba(30, 30, 30, 0.9)")
.display_order(51)
.help("Background color (CSS notation)"))
.arg(
Arg::with_name("horizontal_align")
.long("halign")
.takes_value(true)
.possible_values(&["left", "center", "right"])
.default_value("left")
.display_order(100)
.help("Horizontal alignment of the box inside the window"))
.arg(
Arg::with_name("vertical_align")
.long("valign")
.takes_value(true)
.possible_values(&["top", "center", "bottom"])
.default_value("top")
.display_order(101)
.help("Vertical alignment of the box inside the window"))
.arg(
Arg::with_name("fill")
.long("fill")
.conflicts_with_all(&["horizontal_align", "vertical_align", "margin"])
.display_order(102)
.help("Completely fill out windows"))
.arg(
Arg::with_name("print_only")
.short("p")
.long("printonly")
.help("Print the window id only but don't change focus"))
.get_matches();
let font = value_t!(matches, "font", String).unwrap();
let v: Vec<_> = font.split(':').collect();
let (font_family, font_size) = (v[0].to_string(), v[1].parse::<f64>().unwrap());
let hint_chars = value_t!(matches, "hint_chars", String).unwrap();
let margin = value_t!(matches, "margin", f32).unwrap();
let text_color_unparsed = value_t!(matches, "text_color", CssColor).unwrap();
let text_color = parse_color(text_color_unparsed);
let text_color_alt_unparsed = value_t!(matches, "text_color_alt", CssColor).unwrap();
let text_color_alt = parse_color(text_color_alt_unparsed);
let bg_color_unparsed = value_t!(matches, "bg_color", CssColor).unwrap();
let bg_color = parse_color(bg_color_unparsed);
let fill = matches.is_present("fill");
let print_only = matches.is_present("print_only");
let (horizontal_align, vertical_align) = if fill {
(HorizontalAlign::Center, VerticalAlign::Center)
} else {
(
value_t!(matches, "horizontal_align", HorizontalAlign).unwrap(),
value_t!(matches, "vertical_align", VerticalAlign).unwrap(),
)
};
let loaded_font = load_font(&font_family);
AppConfig {
font_family,
font_size,
loaded_font,
hint_chars,
margin,
text_color,
text_color_alt,
bg_color,
fill,
print_only,
horizontal_align,
vertical_align,
}
}
/// Given a list of `current_hints` and a bunch of `hint_chars`, this finds a unique combination
/// of characters that doesn't yet exist in `current_hints`. `max_count` is the maximum possible
/// number of hints we need.
pub fn get_next_hint(current_hints: Vec<&String>, hint_chars: &str, max_count: usize) -> String {
// Figure out which size we need.
let mut size_required = 1;
while hint_chars.len().pow(size_required) < max_count {
size_required += 1;
}
let mut ret = hint_chars
.chars()
.next()
.expect("No hint_chars found")
.to_string();
let it = iter::repeat(hint_chars.chars().rev())
.take(size_required as usize)
.multi_cartesian_product();
for c in it {
let folded = c.into_iter().collect();
if!current_hints.contains(&&folded) {
ret = folded;
}
}
debug!("Returning next hint: {}", ret);
ret
}
pub fn find_visual(conn: &xcb::Connection, visual: xcb_visualid_t) -> Option<xcb::Visualtype> {
for screen in conn.get_setup().roots() {
for depth in screen.allowed_depths() {
for vis in depth.visuals() {
if visual == vis.visual_id() {
return Some(vis);
}
}
}
}
None
}
pub fn extents_for_text(text: &str, family: &str, size: f64) -> cairo::TextExtents {
// Create a buffer image that should be large enough.
// TODO: Figure out the maximum size from the largest window on the desktop.
// For now we'll use made-up maximum values.
let surface = cairo::ImageSurface::create(cairo::Format::ARgb32, 1024, 1024)
.expect("Couldn't create ImageSurface");
let cr = cairo::Context::new(&surface);
cr.select_font_face(family, cairo::FontSlant::Normal, cairo::FontWeight::Normal);
cr.set_font_size(size);
cr.text_extents(text)
}
/// Draw a `text` onto `rw`. In case any `current_hints` are already typed, it will draw those in a
/// different color to show that they were in fact typed.
pub fn draw_hint_text(rw: &RenderWindow, app_config: &AppConfig, text: &str, current_hints: &str) {
// Paint background.
rw.cairo_context.set_operator(cairo::Operator::Source);
rw.cairo_context.set_source_rgb(
app_config.bg_color.0,
app_config.bg_color.1,
app_config.bg_color.2,
);
rw.cairo_context.paint();
rw.cairo_context.set_operator(cairo::Operator::Over);
rw.cairo_context.select_font_face(
&app_config.font_family,
FontSlant::Normal,
FontWeight::Normal,
);
rw.cairo_context.set_font_size(app_config.font_size);
rw.cairo_context.move_to(rw.draw_pos.0, rw.draw_pos.1);
if text.starts_with(current_hints) {
// Paint already selected chars.
rw.cairo_context.set_source_rgba(
app_config.text_color_alt.0,
app_config.text_color_alt.1,
app_config.text_color_alt.2,
app_config.text_color_alt.3,
);
for c in current_hints.chars() {
rw.cairo_context.show_text(&c.to_string());
}
}
// Paint unselected chars.
rw.cairo_context.set_source_rgba(
app_config.text_color.0,
app_config.text_color.1,
app_config.text_color.2,
app_config.text_color.3,
);
let re = Regex::new(&format!("^{}", current_hints)).unwrap();
for c in re.replace(text, "").chars() {
rw.cairo_context.show_text(&c.to_string());
}
rw.cairo_context.get_target().flush();
}
/// Try to grab the keyboard until `timeout` is reached.
///
/// Generally with X, I found that you can't grab global keyboard input without it failing
/// sometimes due to other clients grabbing it occasionally. Hence, we'll have to keep retrying
/// until we eventually succeed.
pub fn snatch_keyboard(
conn: &xcb::Connection,
screen: &xcb::Screen,
timeout: Duration,
) -> Result<(), String> {
let now = Instant::now();
loop {
if now.elapsed() > timeout {
return Err(format!(
"Couldn't grab keyboard input within {:?}",
now.elapsed()
));
}
let grab_keyboard_cookie = xcb::xproto::grab_keyboard(
&conn,
true,
screen.root(),
xcb::CURRENT_TIME,
xcb::GRAB_MODE_ASYNC as u8,
xcb::GRAB_MODE_ASYNC as u8,
);
let grab_keyboard_reply = grab_keyboard_cookie
.get_reply()
.map_err(|_| "Couldn't communicate with X")?;
if grab_keyboard_reply.status() == xcb::GRAB_STATUS_SUCCESS as u8 {
return Ok(());
}
sleep(Duration::from_millis(1));
}
}
/// Try to grab the mouse until `timeout` is reached.
///
/// Generally with X, I found that you can't grab global mouse input without it failing sometimes
/// due to other clients grabbing it occasionally. Hence, we'll have to keep retrying until we
/// eventually succeed.
pub fn snatch_mouse(
conn: &xcb::Connection,
screen: &xcb::Screen,
timeout: Duration,
) -> Result<(), String> {
let now = Instant::now();
loop {
if now.elapsed() > timeout {
return Err(format!(
"Couldn't grab keyboard input within {:?}",
now.elapsed()
));
}
let grab_pointer_cookie = xcb::xproto::grab_pointer(
&conn,
true,
screen.root(),
xcb::EVENT_MASK_BUTTON_PRESS as u16,
xcb::GRAB_MODE_ASYNC as u8,
xcb::GRAB_MODE_ASYNC as u8,
xcb::NONE,
xcb::NONE,
xcb::CURRENT_TIME,
);
let grab_pointer_reply = grab_pointer_cookie
.get_reply()
.map_err(|_| "Couldn't communicate with X")?;
if grab_pointer_reply.status() == xcb::GRAB_STATUS_SUCCESS as u8 {
return Ok(());
}
sleep(Duration::from_millis(1));
}
}
/// Sort list of `DesktopWindow`s by position.
///
/// This sorts by column first and row second.
pub fn sort_by_pos(mut dws: Vec<DesktopWindow>) -> Vec<DesktopWindow> {
dws.sort_by_key(|w| w.pos.0);
dws.sort_by_key(|w| w.pos.1);
dws
}
/// Returns true if `r1` and `r2` overlap.
fn intersects(r1: (i32, i32, i32, i32), r2: (i32, i32, i32, i32)) -> bool {
let left_corner_inside = r1.0 < r2.0 + r2.2;
let right_corner_inside = r1.0 + r1.2 > r2.0;
let top_corner_inside = r1.1 < r2.1 + r2.3;
let bottom_corner_inside = r1.1 + r1.3 > r2.1;
left_corner_inside && right_corner_inside && top_corner_inside && bottom_corner_inside
}
/// Finds overlaps and returns a list of those rects in the format (x, y, w, h).
pub fn find_overlaps(
rws: Vec<&RenderWindow>,
rect: (i32, i32, i32, i32),
) -> Vec<(i32, i32, i32, i32)> {
let mut overlaps = vec![];
for rw in rws {
if intersects(rw.rect, rect) {
overlaps.push(rw.rect);
}
}
overlaps
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_intersects() {
assert!(intersects((1905, 705, 31, 82), (1905, 723, 38, 64)));
}
#[test]
fn | () {
assert!(!intersects((1905, 705, 31, 82), (2000, 723, 38, 64)));
}
}
| test_no_intersect | identifier_name |
utils.rs | use cairo;
use cairo::enums::{FontSlant, FontWeight};
use cairo::prelude::SurfaceExt;
use clap::{
crate_authors, crate_description, crate_name, crate_version, value_t, App, AppSettings, Arg,
};
use css_color_parser::Color as CssColor;
use font_loader::system_fonts;
use itertools::Itertools;
use log::debug;
use regex::Regex;
use std::error::Error;
use std::iter;
use std::str::FromStr;
use std::thread::sleep;
use std::time::{Duration, Instant};
use xcb;
use xcb::ffi::xcb_visualid_t;
use crate::{AppConfig, DesktopWindow, RenderWindow};
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum HorizontalAlign {
Left,
Center,
Right,
}
impl FromStr for HorizontalAlign {
type Err = ();
fn from_str(s: &str) -> Result<HorizontalAlign, ()> {
match s {
"left" => Ok(HorizontalAlign::Left),
"center" => Ok(HorizontalAlign::Center),
"right" => Ok(HorizontalAlign::Right),
_ => Err(()),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum VerticalAlign {
Top,
Center,
Bottom,
}
impl FromStr for VerticalAlign {
type Err = ();
fn from_str(s: &str) -> Result<VerticalAlign, ()> {
match s {
"top" => Ok(VerticalAlign::Top),
"center" => Ok(VerticalAlign::Center),
"bottom" => Ok(VerticalAlign::Bottom),
_ => Err(()),
}
}
}
/// Checks whether the provided fontconfig font `f` is valid.
fn is_truetype_font(f: String) -> Result<(), String> {
let v: Vec<_> = f.split(':').collect();
let (family, size) = (v.get(0), v.get(1));
if family.is_none() || size.is_none() {
return Err("From font format".to_string());
}
if let Err(e) = size.unwrap().parse::<f32>() {
return Err(e.description().to_string());
}
Ok(())
}
/// Validate a color.
fn is_valid_color(c: String) -> Result<(), String> {
c.parse::<CssColor>().map_err(|_| "Invalid color format")?;
Ok(())
}
/// Load a system font.
fn load_font(font_family: &str) -> Vec<u8> {
let font_family_property = system_fonts::FontPropertyBuilder::new()
.family(font_family)
.build();
let (loaded_font, _) =
if let Some((loaded_font, index)) = system_fonts::get(&font_family_property) {
(loaded_font, index)
} else {
eprintln!("Family not found, falling back to first Monospace font");
let mut font_monospace_property =
system_fonts::FontPropertyBuilder::new().monospace().build();
let sysfonts = system_fonts::query_specific(&mut font_monospace_property);
eprintln!("Falling back to font '{font}'", font = sysfonts[0]);
let (loaded_font, index) =
system_fonts::get(&font_monospace_property).expect("Couldn't find suitable font");
(loaded_font, index)
};
loaded_font
}
/// Parse a color into a tuple of floats.
fn parse_color(color_str: CssColor) -> (f64, f64, f64, f64) {
(
f64::from(color_str.r) / 255.0,
f64::from(color_str.g) / 255.0,
f64::from(color_str.b) / 255.0,
f64::from(color_str.a),
)
}
/// Parse app arguments.
pub fn parse_args() -> AppConfig {
let matches = App::new(crate_name!())
.version(crate_version!())
.author(crate_authors!())
.about(crate_description!())
.global_setting(AppSettings::ColoredHelp)
.arg(
Arg::with_name("font")
.short("f")
.long("font")
.takes_value(true)
.validator(is_truetype_font)
.default_value("Mono:72")
.help("Use a specific TrueType font with this format: family:size"))
.arg(
Arg::with_name("hint_chars")
.short("c")
.long("chars")
.takes_value(true)
.default_value("sadfjklewcmpgh")
.help("Define a set of possbile values to use as hint characters"))
.arg(
Arg::with_name("margin")
.short("m")
.long("margin")
.takes_value(true)
.default_value("0.2")
.help("Add an additional margin around the text box (value is a factor of the box size)"))
.arg(
Arg::with_name("text_color")
.long("textcolor")
.takes_value(true)
.validator(is_valid_color)
.default_value("#dddddd")
.display_order(49)
.help("Text color (CSS notation)"))
.arg(
Arg::with_name("text_color_alt")
.long("textcoloralt")
.takes_value(true)
.validator(is_valid_color)
.default_value("#666666")
.display_order(50)
.help("Text color alternate (CSS notation)"))
.arg(
Arg::with_name("bg_color")
.long("bgcolor")
.takes_value(true)
.validator(is_valid_color)
.default_value("rgba(30, 30, 30, 0.9)")
.display_order(51)
.help("Background color (CSS notation)"))
.arg(
Arg::with_name("horizontal_align")
.long("halign")
.takes_value(true)
.possible_values(&["left", "center", "right"])
.default_value("left")
.display_order(100)
.help("Horizontal alignment of the box inside the window"))
.arg(
Arg::with_name("vertical_align")
.long("valign")
.takes_value(true)
.possible_values(&["top", "center", "bottom"])
.default_value("top")
.display_order(101)
.help("Vertical alignment of the box inside the window"))
.arg(
Arg::with_name("fill")
.long("fill")
.conflicts_with_all(&["horizontal_align", "vertical_align", "margin"])
.display_order(102)
.help("Completely fill out windows"))
.arg(
Arg::with_name("print_only")
.short("p")
.long("printonly")
.help("Print the window id only but don't change focus"))
.get_matches();
let font = value_t!(matches, "font", String).unwrap();
let v: Vec<_> = font.split(':').collect();
let (font_family, font_size) = (v[0].to_string(), v[1].parse::<f64>().unwrap());
let hint_chars = value_t!(matches, "hint_chars", String).unwrap();
let margin = value_t!(matches, "margin", f32).unwrap();
let text_color_unparsed = value_t!(matches, "text_color", CssColor).unwrap();
let text_color = parse_color(text_color_unparsed);
let text_color_alt_unparsed = value_t!(matches, "text_color_alt", CssColor).unwrap();
let text_color_alt = parse_color(text_color_alt_unparsed);
let bg_color_unparsed = value_t!(matches, "bg_color", CssColor).unwrap();
let bg_color = parse_color(bg_color_unparsed);
let fill = matches.is_present("fill");
let print_only = matches.is_present("print_only");
let (horizontal_align, vertical_align) = if fill {
(HorizontalAlign::Center, VerticalAlign::Center)
} else {
(
value_t!(matches, "horizontal_align", HorizontalAlign).unwrap(),
value_t!(matches, "vertical_align", VerticalAlign).unwrap(),
)
};
let loaded_font = load_font(&font_family);
AppConfig {
font_family,
font_size,
loaded_font,
hint_chars,
margin,
text_color,
text_color_alt,
bg_color,
fill,
print_only,
horizontal_align,
vertical_align,
}
}
/// Given a list of `current_hints` and a bunch of `hint_chars`, this finds a unique combination
/// of characters that doesn't yet exist in `current_hints`. `max_count` is the maximum possible
/// number of hints we need.
pub fn get_next_hint(current_hints: Vec<&String>, hint_chars: &str, max_count: usize) -> String {
// Figure out which size we need.
let mut size_required = 1;
while hint_chars.len().pow(size_required) < max_count {
size_required += 1;
}
let mut ret = hint_chars
.chars()
.next()
.expect("No hint_chars found")
.to_string();
let it = iter::repeat(hint_chars.chars().rev())
.take(size_required as usize)
.multi_cartesian_product();
for c in it {
let folded = c.into_iter().collect();
if!current_hints.contains(&&folded) {
ret = folded;
}
}
debug!("Returning next hint: {}", ret);
ret
}
pub fn find_visual(conn: &xcb::Connection, visual: xcb_visualid_t) -> Option<xcb::Visualtype> {
for screen in conn.get_setup().roots() {
for depth in screen.allowed_depths() {
for vis in depth.visuals() {
if visual == vis.visual_id() {
return Some(vis);
}
}
}
}
None
}
pub fn extents_for_text(text: &str, family: &str, size: f64) -> cairo::TextExtents {
// Create a buffer image that should be large enough.
// TODO: Figure out the maximum size from the largest window on the desktop.
// For now we'll use made-up maximum values.
let surface = cairo::ImageSurface::create(cairo::Format::ARgb32, 1024, 1024)
.expect("Couldn't create ImageSurface");
let cr = cairo::Context::new(&surface);
cr.select_font_face(family, cairo::FontSlant::Normal, cairo::FontWeight::Normal);
cr.set_font_size(size);
cr.text_extents(text)
}
/// Draw a `text` onto `rw`. In case any `current_hints` are already typed, it will draw those in a
/// different color to show that they were in fact typed.
pub fn draw_hint_text(rw: &RenderWindow, app_config: &AppConfig, text: &str, current_hints: &str) {
// Paint background.
rw.cairo_context.set_operator(cairo::Operator::Source);
rw.cairo_context.set_source_rgb(
app_config.bg_color.0,
app_config.bg_color.1,
app_config.bg_color.2,
);
rw.cairo_context.paint();
rw.cairo_context.set_operator(cairo::Operator::Over);
rw.cairo_context.select_font_face(
&app_config.font_family,
FontSlant::Normal,
FontWeight::Normal,
);
rw.cairo_context.set_font_size(app_config.font_size);
rw.cairo_context.move_to(rw.draw_pos.0, rw.draw_pos.1);
if text.starts_with(current_hints) {
// Paint already selected chars.
rw.cairo_context.set_source_rgba(
app_config.text_color_alt.0,
app_config.text_color_alt.1,
app_config.text_color_alt.2,
app_config.text_color_alt.3,
);
for c in current_hints.chars() {
rw.cairo_context.show_text(&c.to_string());
}
}
// Paint unselected chars.
rw.cairo_context.set_source_rgba(
app_config.text_color.0,
app_config.text_color.1,
app_config.text_color.2,
app_config.text_color.3,
);
let re = Regex::new(&format!("^{}", current_hints)).unwrap();
for c in re.replace(text, "").chars() {
rw.cairo_context.show_text(&c.to_string());
}
rw.cairo_context.get_target().flush();
}
/// Try to grab the keyboard until `timeout` is reached.
///
/// Generally with X, I found that you can't grab global keyboard input without it failing
/// sometimes due to other clients grabbing it occasionally. Hence, we'll have to keep retrying
/// until we eventually succeed.
pub fn snatch_keyboard(
conn: &xcb::Connection,
screen: &xcb::Screen,
timeout: Duration,
) -> Result<(), String> {
let now = Instant::now();
loop {
if now.elapsed() > timeout {
return Err(format!(
"Couldn't grab keyboard input within {:?}",
now.elapsed()
));
}
let grab_keyboard_cookie = xcb::xproto::grab_keyboard(
&conn,
true,
screen.root(),
xcb::CURRENT_TIME,
xcb::GRAB_MODE_ASYNC as u8,
xcb::GRAB_MODE_ASYNC as u8,
);
let grab_keyboard_reply = grab_keyboard_cookie
.get_reply()
.map_err(|_| "Couldn't communicate with X")?;
if grab_keyboard_reply.status() == xcb::GRAB_STATUS_SUCCESS as u8 {
return Ok(());
}
sleep(Duration::from_millis(1));
}
}
/// Try to grab the mouse until `timeout` is reached.
///
/// Generally with X, I found that you can't grab global mouse input without it failing sometimes
/// due to other clients grabbing it occasionally. Hence, we'll have to keep retrying until we
/// eventually succeed.
pub fn snatch_mouse(
conn: &xcb::Connection,
screen: &xcb::Screen,
timeout: Duration,
) -> Result<(), String> | let grab_pointer_reply = grab_pointer_cookie
.get_reply()
.map_err(|_| "Couldn't communicate with X")?;
if grab_pointer_reply.status() == xcb::GRAB_STATUS_SUCCESS as u8 {
return Ok(());
}
sleep(Duration::from_millis(1));
}
}
/// Sort list of `DesktopWindow`s by position.
///
/// This sorts by column first and row second.
pub fn sort_by_pos(mut dws: Vec<DesktopWindow>) -> Vec<DesktopWindow> {
dws.sort_by_key(|w| w.pos.0);
dws.sort_by_key(|w| w.pos.1);
dws
}
/// Returns true if `r1` and `r2` overlap.
fn intersects(r1: (i32, i32, i32, i32), r2: (i32, i32, i32, i32)) -> bool {
let left_corner_inside = r1.0 < r2.0 + r2.2;
let right_corner_inside = r1.0 + r1.2 > r2.0;
let top_corner_inside = r1.1 < r2.1 + r2.3;
let bottom_corner_inside = r1.1 + r1.3 > r2.1;
left_corner_inside && right_corner_inside && top_corner_inside && bottom_corner_inside
}
/// Finds overlaps and returns a list of those rects in the format (x, y, w, h).
pub fn find_overlaps(
rws: Vec<&RenderWindow>,
rect: (i32, i32, i32, i32),
) -> Vec<(i32, i32, i32, i32)> {
let mut overlaps = vec![];
for rw in rws {
if intersects(rw.rect, rect) {
overlaps.push(rw.rect);
}
}
overlaps
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_intersects() {
assert!(intersects((1905, 705, 31, 82), (1905, 723, 38, 64)));
}
#[test]
fn test_no_intersect() {
assert!(!intersects((1905, 705, 31, 82), (2000, 723, 38, 64)));
}
}
| {
let now = Instant::now();
loop {
if now.elapsed() > timeout {
return Err(format!(
"Couldn't grab keyboard input within {:?}",
now.elapsed()
));
}
let grab_pointer_cookie = xcb::xproto::grab_pointer(
&conn,
true,
screen.root(),
xcb::EVENT_MASK_BUTTON_PRESS as u16,
xcb::GRAB_MODE_ASYNC as u8,
xcb::GRAB_MODE_ASYNC as u8,
xcb::NONE,
xcb::NONE,
xcb::CURRENT_TIME,
); | identifier_body |
utils.rs | use cairo;
use cairo::enums::{FontSlant, FontWeight};
use cairo::prelude::SurfaceExt;
use clap::{
crate_authors, crate_description, crate_name, crate_version, value_t, App, AppSettings, Arg,
};
use css_color_parser::Color as CssColor;
use font_loader::system_fonts;
use itertools::Itertools;
use log::debug;
use regex::Regex;
use std::error::Error;
use std::iter;
use std::str::FromStr;
use std::thread::sleep;
use std::time::{Duration, Instant};
use xcb;
use xcb::ffi::xcb_visualid_t;
use crate::{AppConfig, DesktopWindow, RenderWindow};
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum HorizontalAlign {
Left,
Center,
Right,
}
impl FromStr for HorizontalAlign {
type Err = ();
fn from_str(s: &str) -> Result<HorizontalAlign, ()> {
match s {
"left" => Ok(HorizontalAlign::Left),
"center" => Ok(HorizontalAlign::Center),
"right" => Ok(HorizontalAlign::Right),
_ => Err(()),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum VerticalAlign {
Top,
Center,
Bottom,
}
impl FromStr for VerticalAlign {
type Err = ();
fn from_str(s: &str) -> Result<VerticalAlign, ()> {
match s { | "bottom" => Ok(VerticalAlign::Bottom),
_ => Err(()),
}
}
}
/// Checks whether the provided fontconfig font `f` is valid.
fn is_truetype_font(f: String) -> Result<(), String> {
let v: Vec<_> = f.split(':').collect();
let (family, size) = (v.get(0), v.get(1));
if family.is_none() || size.is_none() {
return Err("From font format".to_string());
}
if let Err(e) = size.unwrap().parse::<f32>() {
return Err(e.description().to_string());
}
Ok(())
}
/// Validate a color.
fn is_valid_color(c: String) -> Result<(), String> {
c.parse::<CssColor>().map_err(|_| "Invalid color format")?;
Ok(())
}
/// Load a system font.
fn load_font(font_family: &str) -> Vec<u8> {
let font_family_property = system_fonts::FontPropertyBuilder::new()
.family(font_family)
.build();
let (loaded_font, _) =
if let Some((loaded_font, index)) = system_fonts::get(&font_family_property) {
(loaded_font, index)
} else {
eprintln!("Family not found, falling back to first Monospace font");
let mut font_monospace_property =
system_fonts::FontPropertyBuilder::new().monospace().build();
let sysfonts = system_fonts::query_specific(&mut font_monospace_property);
eprintln!("Falling back to font '{font}'", font = sysfonts[0]);
let (loaded_font, index) =
system_fonts::get(&font_monospace_property).expect("Couldn't find suitable font");
(loaded_font, index)
};
loaded_font
}
/// Parse a color into a tuple of floats.
fn parse_color(color_str: CssColor) -> (f64, f64, f64, f64) {
(
f64::from(color_str.r) / 255.0,
f64::from(color_str.g) / 255.0,
f64::from(color_str.b) / 255.0,
f64::from(color_str.a),
)
}
/// Parse app arguments.
pub fn parse_args() -> AppConfig {
let matches = App::new(crate_name!())
.version(crate_version!())
.author(crate_authors!())
.about(crate_description!())
.global_setting(AppSettings::ColoredHelp)
.arg(
Arg::with_name("font")
.short("f")
.long("font")
.takes_value(true)
.validator(is_truetype_font)
.default_value("Mono:72")
.help("Use a specific TrueType font with this format: family:size"))
.arg(
Arg::with_name("hint_chars")
.short("c")
.long("chars")
.takes_value(true)
.default_value("sadfjklewcmpgh")
.help("Define a set of possbile values to use as hint characters"))
.arg(
Arg::with_name("margin")
.short("m")
.long("margin")
.takes_value(true)
.default_value("0.2")
.help("Add an additional margin around the text box (value is a factor of the box size)"))
.arg(
Arg::with_name("text_color")
.long("textcolor")
.takes_value(true)
.validator(is_valid_color)
.default_value("#dddddd")
.display_order(49)
.help("Text color (CSS notation)"))
.arg(
Arg::with_name("text_color_alt")
.long("textcoloralt")
.takes_value(true)
.validator(is_valid_color)
.default_value("#666666")
.display_order(50)
.help("Text color alternate (CSS notation)"))
.arg(
Arg::with_name("bg_color")
.long("bgcolor")
.takes_value(true)
.validator(is_valid_color)
.default_value("rgba(30, 30, 30, 0.9)")
.display_order(51)
.help("Background color (CSS notation)"))
.arg(
Arg::with_name("horizontal_align")
.long("halign")
.takes_value(true)
.possible_values(&["left", "center", "right"])
.default_value("left")
.display_order(100)
.help("Horizontal alignment of the box inside the window"))
.arg(
Arg::with_name("vertical_align")
.long("valign")
.takes_value(true)
.possible_values(&["top", "center", "bottom"])
.default_value("top")
.display_order(101)
.help("Vertical alignment of the box inside the window"))
.arg(
Arg::with_name("fill")
.long("fill")
.conflicts_with_all(&["horizontal_align", "vertical_align", "margin"])
.display_order(102)
.help("Completely fill out windows"))
.arg(
Arg::with_name("print_only")
.short("p")
.long("printonly")
.help("Print the window id only but don't change focus"))
.get_matches();
let font = value_t!(matches, "font", String).unwrap();
let v: Vec<_> = font.split(':').collect();
let (font_family, font_size) = (v[0].to_string(), v[1].parse::<f64>().unwrap());
let hint_chars = value_t!(matches, "hint_chars", String).unwrap();
let margin = value_t!(matches, "margin", f32).unwrap();
let text_color_unparsed = value_t!(matches, "text_color", CssColor).unwrap();
let text_color = parse_color(text_color_unparsed);
let text_color_alt_unparsed = value_t!(matches, "text_color_alt", CssColor).unwrap();
let text_color_alt = parse_color(text_color_alt_unparsed);
let bg_color_unparsed = value_t!(matches, "bg_color", CssColor).unwrap();
let bg_color = parse_color(bg_color_unparsed);
let fill = matches.is_present("fill");
let print_only = matches.is_present("print_only");
let (horizontal_align, vertical_align) = if fill {
(HorizontalAlign::Center, VerticalAlign::Center)
} else {
(
value_t!(matches, "horizontal_align", HorizontalAlign).unwrap(),
value_t!(matches, "vertical_align", VerticalAlign).unwrap(),
)
};
let loaded_font = load_font(&font_family);
AppConfig {
font_family,
font_size,
loaded_font,
hint_chars,
margin,
text_color,
text_color_alt,
bg_color,
fill,
print_only,
horizontal_align,
vertical_align,
}
}
/// Given a list of `current_hints` and a bunch of `hint_chars`, this finds a unique combination
/// of characters that doesn't yet exist in `current_hints`. `max_count` is the maximum possible
/// number of hints we need.
pub fn get_next_hint(current_hints: Vec<&String>, hint_chars: &str, max_count: usize) -> String {
// Figure out which size we need.
let mut size_required = 1;
while hint_chars.len().pow(size_required) < max_count {
size_required += 1;
}
let mut ret = hint_chars
.chars()
.next()
.expect("No hint_chars found")
.to_string();
let it = iter::repeat(hint_chars.chars().rev())
.take(size_required as usize)
.multi_cartesian_product();
for c in it {
let folded = c.into_iter().collect();
if!current_hints.contains(&&folded) {
ret = folded;
}
}
debug!("Returning next hint: {}", ret);
ret
}
pub fn find_visual(conn: &xcb::Connection, visual: xcb_visualid_t) -> Option<xcb::Visualtype> {
for screen in conn.get_setup().roots() {
for depth in screen.allowed_depths() {
for vis in depth.visuals() {
if visual == vis.visual_id() {
return Some(vis);
}
}
}
}
None
}
pub fn extents_for_text(text: &str, family: &str, size: f64) -> cairo::TextExtents {
// Create a buffer image that should be large enough.
// TODO: Figure out the maximum size from the largest window on the desktop.
// For now we'll use made-up maximum values.
let surface = cairo::ImageSurface::create(cairo::Format::ARgb32, 1024, 1024)
.expect("Couldn't create ImageSurface");
let cr = cairo::Context::new(&surface);
cr.select_font_face(family, cairo::FontSlant::Normal, cairo::FontWeight::Normal);
cr.set_font_size(size);
cr.text_extents(text)
}
/// Draw a `text` onto `rw`. In case any `current_hints` are already typed, it will draw those in a
/// different color to show that they were in fact typed.
pub fn draw_hint_text(rw: &RenderWindow, app_config: &AppConfig, text: &str, current_hints: &str) {
// Paint background.
rw.cairo_context.set_operator(cairo::Operator::Source);
rw.cairo_context.set_source_rgb(
app_config.bg_color.0,
app_config.bg_color.1,
app_config.bg_color.2,
);
rw.cairo_context.paint();
rw.cairo_context.set_operator(cairo::Operator::Over);
rw.cairo_context.select_font_face(
&app_config.font_family,
FontSlant::Normal,
FontWeight::Normal,
);
rw.cairo_context.set_font_size(app_config.font_size);
rw.cairo_context.move_to(rw.draw_pos.0, rw.draw_pos.1);
if text.starts_with(current_hints) {
// Paint already selected chars.
rw.cairo_context.set_source_rgba(
app_config.text_color_alt.0,
app_config.text_color_alt.1,
app_config.text_color_alt.2,
app_config.text_color_alt.3,
);
for c in current_hints.chars() {
rw.cairo_context.show_text(&c.to_string());
}
}
// Paint unselected chars.
rw.cairo_context.set_source_rgba(
app_config.text_color.0,
app_config.text_color.1,
app_config.text_color.2,
app_config.text_color.3,
);
let re = Regex::new(&format!("^{}", current_hints)).unwrap();
for c in re.replace(text, "").chars() {
rw.cairo_context.show_text(&c.to_string());
}
rw.cairo_context.get_target().flush();
}
/// Try to grab the keyboard until `timeout` is reached.
///
/// Generally with X, I found that you can't grab global keyboard input without it failing
/// sometimes due to other clients grabbing it occasionally. Hence, we'll have to keep retrying
/// until we eventually succeed.
pub fn snatch_keyboard(
conn: &xcb::Connection,
screen: &xcb::Screen,
timeout: Duration,
) -> Result<(), String> {
let now = Instant::now();
loop {
if now.elapsed() > timeout {
return Err(format!(
"Couldn't grab keyboard input within {:?}",
now.elapsed()
));
}
let grab_keyboard_cookie = xcb::xproto::grab_keyboard(
&conn,
true,
screen.root(),
xcb::CURRENT_TIME,
xcb::GRAB_MODE_ASYNC as u8,
xcb::GRAB_MODE_ASYNC as u8,
);
let grab_keyboard_reply = grab_keyboard_cookie
.get_reply()
.map_err(|_| "Couldn't communicate with X")?;
if grab_keyboard_reply.status() == xcb::GRAB_STATUS_SUCCESS as u8 {
return Ok(());
}
sleep(Duration::from_millis(1));
}
}
/// Try to grab the mouse until `timeout` is reached.
///
/// Generally with X, I found that you can't grab global mouse input without it failing sometimes
/// due to other clients grabbing it occasionally. Hence, we'll have to keep retrying until we
/// eventually succeed.
pub fn snatch_mouse(
conn: &xcb::Connection,
screen: &xcb::Screen,
timeout: Duration,
) -> Result<(), String> {
let now = Instant::now();
loop {
if now.elapsed() > timeout {
return Err(format!(
"Couldn't grab keyboard input within {:?}",
now.elapsed()
));
}
let grab_pointer_cookie = xcb::xproto::grab_pointer(
&conn,
true,
screen.root(),
xcb::EVENT_MASK_BUTTON_PRESS as u16,
xcb::GRAB_MODE_ASYNC as u8,
xcb::GRAB_MODE_ASYNC as u8,
xcb::NONE,
xcb::NONE,
xcb::CURRENT_TIME,
);
let grab_pointer_reply = grab_pointer_cookie
.get_reply()
.map_err(|_| "Couldn't communicate with X")?;
if grab_pointer_reply.status() == xcb::GRAB_STATUS_SUCCESS as u8 {
return Ok(());
}
sleep(Duration::from_millis(1));
}
}
/// Sort list of `DesktopWindow`s by position.
///
/// This sorts by column first and row second.
pub fn sort_by_pos(mut dws: Vec<DesktopWindow>) -> Vec<DesktopWindow> {
dws.sort_by_key(|w| w.pos.0);
dws.sort_by_key(|w| w.pos.1);
dws
}
/// Returns true if `r1` and `r2` overlap.
fn intersects(r1: (i32, i32, i32, i32), r2: (i32, i32, i32, i32)) -> bool {
let left_corner_inside = r1.0 < r2.0 + r2.2;
let right_corner_inside = r1.0 + r1.2 > r2.0;
let top_corner_inside = r1.1 < r2.1 + r2.3;
let bottom_corner_inside = r1.1 + r1.3 > r2.1;
left_corner_inside && right_corner_inside && top_corner_inside && bottom_corner_inside
}
/// Finds overlaps and returns a list of those rects in the format (x, y, w, h).
pub fn find_overlaps(
rws: Vec<&RenderWindow>,
rect: (i32, i32, i32, i32),
) -> Vec<(i32, i32, i32, i32)> {
let mut overlaps = vec![];
for rw in rws {
if intersects(rw.rect, rect) {
overlaps.push(rw.rect);
}
}
overlaps
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_intersects() {
assert!(intersects((1905, 705, 31, 82), (1905, 723, 38, 64)));
}
#[test]
fn test_no_intersect() {
assert!(!intersects((1905, 705, 31, 82), (2000, 723, 38, 64)));
}
} | "top" => Ok(VerticalAlign::Top),
"center" => Ok(VerticalAlign::Center), | random_line_split |
mysql_interactive_worker.rs | // Copyright 2020 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::marker::PhantomData;
use std::time::Instant;
use common_base::tokio;
use common_datablocks::DataBlock;
use common_exception::ErrorCode;
use common_exception::Result;
use common_io::prelude::*;
use common_planners::PlanNode;
use metrics::histogram;
use msql_srv::ErrorKind;
use msql_srv::InitWriter;
use msql_srv::MysqlShim;
use msql_srv::ParamParser;
use msql_srv::QueryResultWriter;
use msql_srv::StatementMetaWriter;
use rand::RngCore;
use tokio_stream::StreamExt;
use crate::interpreters::InterpreterFactory;
use crate::servers::mysql::writers::DFInitResultWriter;
use crate::servers::mysql::writers::DFQueryResultWriter;
use crate::sessions::DatabendQueryContextRef;
use crate::sessions::SessionRef;
use crate::sql::PlanParser;
use crate::users::CertifiedInfo;
struct InteractiveWorkerBase<W: std::io::Write> {
session: SessionRef,
generic_hold: PhantomData<W>,
}
pub struct InteractiveWorker<W: std::io::Write> {
session: SessionRef,
base: InteractiveWorkerBase<W>,
version: String,
salt: [u8; 20],
client_addr: String,
}
impl<W: std::io::Write> MysqlShim<W> for InteractiveWorker<W> {
type Error = ErrorCode;
fn version(&self) -> &str {
self.version.as_str()
}
fn connect_id(&self) -> u32 {
u32::from_le_bytes([0x08, 0x00, 0x00, 0x00])
}
fn default_auth_plugin(&self) -> &str {
"mysql_native_password"
}
fn auth_plugin_for_username(&self, _user: &[u8]) -> &str {
"mysql_native_password"
}
fn salt(&self) -> [u8; 20] {
self.salt
}
fn authenticate(
&self,
auth_plugin: &str,
username: &[u8],
salt: &[u8],
auth_data: &[u8],
) -> bool {
let username = String::from_utf8_lossy(username);
let info = CertifiedInfo::create(&username, auth_data, &self.client_addr);
let authenticate = self.base.authenticate(auth_plugin, salt, info);
futures::executor::block_on(async move {
match authenticate.await {
Ok(res) => res,
Err(failure) => {
log::error!(
"MySQL handler authenticate failed, \
user_name: {}, \
client_address: {}, \
failure_cause: {}",
username,
self.client_addr,
failure
);
false
}
}
})
}
fn on_prepare(&mut self, query: &str, writer: StatementMetaWriter<W>) -> Result<()> {
if self.session.is_aborting() {
writer.error(
ErrorKind::ER_ABORTING_CONNECTION,
"Aborting this connection. because we are try aborting server.".as_bytes(),
)?;
return Err(ErrorCode::AbortedSession(
"Aborting this connection. because we are try aborting server.",
));
}
self.base.do_prepare(query, writer)
}
fn on_execute(
&mut self,
id: u32,
param: ParamParser,
writer: QueryResultWriter<W>,
) -> Result<()> {
if self.session.is_aborting() {
writer.error(
ErrorKind::ER_ABORTING_CONNECTION,
"Aborting this connection. because we are try aborting server.".as_bytes(),
)?;
return Err(ErrorCode::AbortedSession(
"Aborting this connection. because we are try aborting server.",
));
}
self.base.do_execute(id, param, writer)
}
fn on_close(&mut self, id: u32) {
self.base.do_close(id);
}
fn on_query(&mut self, query: &str, writer: QueryResultWriter<W>) -> Result<()> {
if self.session.is_aborting() {
writer.error(
ErrorKind::ER_ABORTING_CONNECTION,
"Aborting this connection. because we are try aborting server.".as_bytes(),
)?;
return Err(ErrorCode::AbortedSession(
"Aborting this connection. because we are try aborting server.",
));
}
let mut writer = DFQueryResultWriter::create(writer);
match InteractiveWorkerBase::<W>::build_runtime() {
Ok(runtime) => {
let instant = Instant::now();
let blocks = runtime.block_on(self.base.do_query(query));
let mut write_result = writer.write(blocks);
if let Err(cause) = write_result {
let suffix = format!("(while in query {})", query);
write_result = Err(cause.add_message_back(suffix));
}
histogram!(
super::mysql_metrics::METRIC_MYSQL_PROCESSOR_REQUEST_DURATION,
instant.elapsed()
);
write_result
}
Err(error) => writer.write(Err(error)),
}
}
fn on_init(&mut self, database_name: &str, writer: InitWriter<W>) -> Result<()> {
if self.session.is_aborting() {
writer.error(
ErrorKind::ER_ABORTING_CONNECTION,
"Aborting this connection. because we are try aborting server.".as_bytes(),
)?;
return Err(ErrorCode::AbortedSession(
"Aborting this connection. because we are try aborting server.",
));
}
DFInitResultWriter::create(writer).write(self.base.do_init(database_name))
}
}
impl<W: std::io::Write> InteractiveWorkerBase<W> {
async fn authenticate(
&self,
auth_plugin: &str,
salt: &[u8],
info: CertifiedInfo,
) -> Result<bool> {
let user_name = &info.user_name;
let address = &info.user_client_address;
let user_manager = self.session.get_user_manager();
let user_info = user_manager.get_user(user_name).await?;
let input = &info.user_password;
let saved = &user_info.password;
let encode_password = Self::encoding_password(auth_plugin, salt, input, saved)?;
user_manager
.auth_user(CertifiedInfo::create(user_name, encode_password, address))
.await
}
fn encoding_password(
auth_plugin: &str,
salt: &[u8],
input: &[u8],
user_password: &[u8],
) -> Result<Vec<u8>> {
match auth_plugin {
"mysql_native_password" if input.is_empty() => Ok(vec![]),
"mysql_native_password" => {
// SHA1( password ) XOR SHA1( "20-bytes random data from server" <concat> SHA1( SHA1( password ) ) )
let mut m = sha1::Sha1::new();
m.update(salt);
m.update(user_password);
let result = m.digest().bytes();
if input.len()!= result.len() {
return Err(ErrorCode::SHA1CheckFailed("SHA1 check failed"));
}
let mut s = Vec::with_capacity(result.len());
for i in 0..result.len() {
s.push(input[i] ^ result[i]);
}
Ok(s)
}
_ => Ok(input.to_vec()),
}
}
fn do_prepare(&mut self, _: &str, writer: StatementMetaWriter<'_, W>) -> Result<()> {
writer.error(
ErrorKind::ER_UNKNOWN_ERROR,
"Prepare is not support in Databend.".as_bytes(),
)?;
Ok(())
}
fn do_execute(
&mut self,
_: u32,
_: ParamParser<'_>,
writer: QueryResultWriter<'_, W>,
) -> Result<()> {
writer.error(
ErrorKind::ER_UNKNOWN_ERROR,
"Execute is not support in Databend.".as_bytes(),
)?;
Ok(())
}
fn do_close(&mut self, _: u32) {}
async fn do_query(&mut self, query: &str) -> Result<(Vec<DataBlock>, String)> {
log::debug!("{}", query);
let context = self.session.create_context().await?;
context.attach_query_str(query);
let query_parser = PlanParser::create(context.clone());
let (plan, hints) = query_parser.build_with_hint_from_sql(query);
match hints
.iter()
.find(|v| v.error_code.is_some())
.and_then(|x| x.error_code)
{
None => Self::exec_query(plan, &context).await,
Some(hint_error_code) => match Self::exec_query(plan, &context).await {
Ok(_) => Err(ErrorCode::UnexpectedError(format!(
"Expected server error code: {} but got: Ok.",
hint_error_code
))),
Err(error_code) => {
if hint_error_code == error_code.code() {
Ok((vec![DataBlock::empty()], String::from("")))
} else {
let actual_code = error_code.code();
Err(error_code.add_message(format!(
"Expected server error code: {} but got: {}.",
hint_error_code, actual_code
)))
}
}
},
}
}
async fn exec_query(
plan: Result<PlanNode>,
context: &DatabendQueryContextRef,
) -> Result<(Vec<DataBlock>, String)> {
let instant = Instant::now();
let interpreter = InterpreterFactory::get(context.clone(), plan?)?;
let data_stream = interpreter.execute().await?;
histogram!(
super::mysql_metrics::METRIC_INTERPRETER_USEDTIME,
instant.elapsed()
);
let collector = data_stream.collect::<Result<Vec<DataBlock>>>();
let query_result = collector.await;
query_result.map(|data| (data, Self::extra_info(context, instant)))
}
fn extra_info(context: &DatabendQueryContextRef, instant: Instant) -> String {
let progress = context.get_progress_value();
let seconds = instant.elapsed().as_nanos() as f64 / 1e9f64;
format!(
"Read {} rows, {} in {:.3} sec., {} rows/sec., {}/sec.",
progress.read_rows,
convert_byte_size(progress.read_bytes as f64),
seconds,
convert_number_size((progress.read_rows as f64) / (seconds as f64)),
convert_byte_size((progress.read_bytes as f64) / (seconds as f64)),
)
}
fn do_init(&mut self, database_name: &str) -> Result<()> {
let init_query = format!("USE {};", database_name);
let do_query = self.do_query(&init_query);
match Self::build_runtime() {
Err(error_code) => Err(error_code),
Ok(runtime) => match runtime.block_on(do_query) {
Ok(_) => Ok(()),
Err(error_code) => Err(error_code),
},
} | fn build_runtime() -> Result<tokio::runtime::Runtime> {
tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.map_err(|tokio_error| ErrorCode::TokioError(format!("{}", tokio_error)))
}
}
impl<W: std::io::Write> InteractiveWorker<W> {
pub fn create(session: SessionRef, client_addr: String) -> InteractiveWorker<W> {
let mut bs = vec![0u8; 20];
let mut rng = rand::thread_rng();
rng.fill_bytes(bs.as_mut());
let mut scramble: [u8; 20] = [0; 20];
for i in 0..20 {
scramble[i] = bs[i];
if scramble[i] == b'\0' || scramble[i] == b'$' {
scramble[i] += 1;
}
}
InteractiveWorker::<W> {
session: session.clone(),
base: InteractiveWorkerBase::<W> {
session,
generic_hold: PhantomData::default(),
},
salt: scramble,
// TODO: version
version: crate::configs::DATABEND_COMMIT_VERSION.to_string(),
client_addr,
}
}
} | }
| random_line_split |
mysql_interactive_worker.rs | // Copyright 2020 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::marker::PhantomData;
use std::time::Instant;
use common_base::tokio;
use common_datablocks::DataBlock;
use common_exception::ErrorCode;
use common_exception::Result;
use common_io::prelude::*;
use common_planners::PlanNode;
use metrics::histogram;
use msql_srv::ErrorKind;
use msql_srv::InitWriter;
use msql_srv::MysqlShim;
use msql_srv::ParamParser;
use msql_srv::QueryResultWriter;
use msql_srv::StatementMetaWriter;
use rand::RngCore;
use tokio_stream::StreamExt;
use crate::interpreters::InterpreterFactory;
use crate::servers::mysql::writers::DFInitResultWriter;
use crate::servers::mysql::writers::DFQueryResultWriter;
use crate::sessions::DatabendQueryContextRef;
use crate::sessions::SessionRef;
use crate::sql::PlanParser;
use crate::users::CertifiedInfo;
struct InteractiveWorkerBase<W: std::io::Write> {
session: SessionRef,
generic_hold: PhantomData<W>,
}
pub struct InteractiveWorker<W: std::io::Write> {
session: SessionRef,
base: InteractiveWorkerBase<W>,
version: String,
salt: [u8; 20],
client_addr: String,
}
impl<W: std::io::Write> MysqlShim<W> for InteractiveWorker<W> {
type Error = ErrorCode;
fn version(&self) -> &str {
self.version.as_str()
}
fn connect_id(&self) -> u32 {
u32::from_le_bytes([0x08, 0x00, 0x00, 0x00])
}
fn | (&self) -> &str {
"mysql_native_password"
}
fn auth_plugin_for_username(&self, _user: &[u8]) -> &str {
"mysql_native_password"
}
fn salt(&self) -> [u8; 20] {
self.salt
}
fn authenticate(
&self,
auth_plugin: &str,
username: &[u8],
salt: &[u8],
auth_data: &[u8],
) -> bool {
let username = String::from_utf8_lossy(username);
let info = CertifiedInfo::create(&username, auth_data, &self.client_addr);
let authenticate = self.base.authenticate(auth_plugin, salt, info);
futures::executor::block_on(async move {
match authenticate.await {
Ok(res) => res,
Err(failure) => {
log::error!(
"MySQL handler authenticate failed, \
user_name: {}, \
client_address: {}, \
failure_cause: {}",
username,
self.client_addr,
failure
);
false
}
}
})
}
fn on_prepare(&mut self, query: &str, writer: StatementMetaWriter<W>) -> Result<()> {
if self.session.is_aborting() {
writer.error(
ErrorKind::ER_ABORTING_CONNECTION,
"Aborting this connection. because we are try aborting server.".as_bytes(),
)?;
return Err(ErrorCode::AbortedSession(
"Aborting this connection. because we are try aborting server.",
));
}
self.base.do_prepare(query, writer)
}
fn on_execute(
&mut self,
id: u32,
param: ParamParser,
writer: QueryResultWriter<W>,
) -> Result<()> {
if self.session.is_aborting() {
writer.error(
ErrorKind::ER_ABORTING_CONNECTION,
"Aborting this connection. because we are try aborting server.".as_bytes(),
)?;
return Err(ErrorCode::AbortedSession(
"Aborting this connection. because we are try aborting server.",
));
}
self.base.do_execute(id, param, writer)
}
fn on_close(&mut self, id: u32) {
self.base.do_close(id);
}
fn on_query(&mut self, query: &str, writer: QueryResultWriter<W>) -> Result<()> {
if self.session.is_aborting() {
writer.error(
ErrorKind::ER_ABORTING_CONNECTION,
"Aborting this connection. because we are try aborting server.".as_bytes(),
)?;
return Err(ErrorCode::AbortedSession(
"Aborting this connection. because we are try aborting server.",
));
}
let mut writer = DFQueryResultWriter::create(writer);
match InteractiveWorkerBase::<W>::build_runtime() {
Ok(runtime) => {
let instant = Instant::now();
let blocks = runtime.block_on(self.base.do_query(query));
let mut write_result = writer.write(blocks);
if let Err(cause) = write_result {
let suffix = format!("(while in query {})", query);
write_result = Err(cause.add_message_back(suffix));
}
histogram!(
super::mysql_metrics::METRIC_MYSQL_PROCESSOR_REQUEST_DURATION,
instant.elapsed()
);
write_result
}
Err(error) => writer.write(Err(error)),
}
}
fn on_init(&mut self, database_name: &str, writer: InitWriter<W>) -> Result<()> {
if self.session.is_aborting() {
writer.error(
ErrorKind::ER_ABORTING_CONNECTION,
"Aborting this connection. because we are try aborting server.".as_bytes(),
)?;
return Err(ErrorCode::AbortedSession(
"Aborting this connection. because we are try aborting server.",
));
}
DFInitResultWriter::create(writer).write(self.base.do_init(database_name))
}
}
impl<W: std::io::Write> InteractiveWorkerBase<W> {
async fn authenticate(
&self,
auth_plugin: &str,
salt: &[u8],
info: CertifiedInfo,
) -> Result<bool> {
let user_name = &info.user_name;
let address = &info.user_client_address;
let user_manager = self.session.get_user_manager();
let user_info = user_manager.get_user(user_name).await?;
let input = &info.user_password;
let saved = &user_info.password;
let encode_password = Self::encoding_password(auth_plugin, salt, input, saved)?;
user_manager
.auth_user(CertifiedInfo::create(user_name, encode_password, address))
.await
}
fn encoding_password(
auth_plugin: &str,
salt: &[u8],
input: &[u8],
user_password: &[u8],
) -> Result<Vec<u8>> {
match auth_plugin {
"mysql_native_password" if input.is_empty() => Ok(vec![]),
"mysql_native_password" => {
// SHA1( password ) XOR SHA1( "20-bytes random data from server" <concat> SHA1( SHA1( password ) ) )
let mut m = sha1::Sha1::new();
m.update(salt);
m.update(user_password);
let result = m.digest().bytes();
if input.len()!= result.len() {
return Err(ErrorCode::SHA1CheckFailed("SHA1 check failed"));
}
let mut s = Vec::with_capacity(result.len());
for i in 0..result.len() {
s.push(input[i] ^ result[i]);
}
Ok(s)
}
_ => Ok(input.to_vec()),
}
}
fn do_prepare(&mut self, _: &str, writer: StatementMetaWriter<'_, W>) -> Result<()> {
writer.error(
ErrorKind::ER_UNKNOWN_ERROR,
"Prepare is not support in Databend.".as_bytes(),
)?;
Ok(())
}
fn do_execute(
&mut self,
_: u32,
_: ParamParser<'_>,
writer: QueryResultWriter<'_, W>,
) -> Result<()> {
writer.error(
ErrorKind::ER_UNKNOWN_ERROR,
"Execute is not support in Databend.".as_bytes(),
)?;
Ok(())
}
fn do_close(&mut self, _: u32) {}
async fn do_query(&mut self, query: &str) -> Result<(Vec<DataBlock>, String)> {
log::debug!("{}", query);
let context = self.session.create_context().await?;
context.attach_query_str(query);
let query_parser = PlanParser::create(context.clone());
let (plan, hints) = query_parser.build_with_hint_from_sql(query);
match hints
.iter()
.find(|v| v.error_code.is_some())
.and_then(|x| x.error_code)
{
None => Self::exec_query(plan, &context).await,
Some(hint_error_code) => match Self::exec_query(plan, &context).await {
Ok(_) => Err(ErrorCode::UnexpectedError(format!(
"Expected server error code: {} but got: Ok.",
hint_error_code
))),
Err(error_code) => {
if hint_error_code == error_code.code() {
Ok((vec![DataBlock::empty()], String::from("")))
} else {
let actual_code = error_code.code();
Err(error_code.add_message(format!(
"Expected server error code: {} but got: {}.",
hint_error_code, actual_code
)))
}
}
},
}
}
async fn exec_query(
plan: Result<PlanNode>,
context: &DatabendQueryContextRef,
) -> Result<(Vec<DataBlock>, String)> {
let instant = Instant::now();
let interpreter = InterpreterFactory::get(context.clone(), plan?)?;
let data_stream = interpreter.execute().await?;
histogram!(
super::mysql_metrics::METRIC_INTERPRETER_USEDTIME,
instant.elapsed()
);
let collector = data_stream.collect::<Result<Vec<DataBlock>>>();
let query_result = collector.await;
query_result.map(|data| (data, Self::extra_info(context, instant)))
}
fn extra_info(context: &DatabendQueryContextRef, instant: Instant) -> String {
let progress = context.get_progress_value();
let seconds = instant.elapsed().as_nanos() as f64 / 1e9f64;
format!(
"Read {} rows, {} in {:.3} sec., {} rows/sec., {}/sec.",
progress.read_rows,
convert_byte_size(progress.read_bytes as f64),
seconds,
convert_number_size((progress.read_rows as f64) / (seconds as f64)),
convert_byte_size((progress.read_bytes as f64) / (seconds as f64)),
)
}
fn do_init(&mut self, database_name: &str) -> Result<()> {
let init_query = format!("USE {};", database_name);
let do_query = self.do_query(&init_query);
match Self::build_runtime() {
Err(error_code) => Err(error_code),
Ok(runtime) => match runtime.block_on(do_query) {
Ok(_) => Ok(()),
Err(error_code) => Err(error_code),
},
}
}
fn build_runtime() -> Result<tokio::runtime::Runtime> {
tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.map_err(|tokio_error| ErrorCode::TokioError(format!("{}", tokio_error)))
}
}
impl<W: std::io::Write> InteractiveWorker<W> {
pub fn create(session: SessionRef, client_addr: String) -> InteractiveWorker<W> {
let mut bs = vec![0u8; 20];
let mut rng = rand::thread_rng();
rng.fill_bytes(bs.as_mut());
let mut scramble: [u8; 20] = [0; 20];
for i in 0..20 {
scramble[i] = bs[i];
if scramble[i] == b'\0' || scramble[i] == b'$' {
scramble[i] += 1;
}
}
InteractiveWorker::<W> {
session: session.clone(),
base: InteractiveWorkerBase::<W> {
session,
generic_hold: PhantomData::default(),
},
salt: scramble,
// TODO: version
version: crate::configs::DATABEND_COMMIT_VERSION.to_string(),
client_addr,
}
}
}
| default_auth_plugin | identifier_name |
mysql_interactive_worker.rs | // Copyright 2020 Datafuse Labs.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::marker::PhantomData;
use std::time::Instant;
use common_base::tokio;
use common_datablocks::DataBlock;
use common_exception::ErrorCode;
use common_exception::Result;
use common_io::prelude::*;
use common_planners::PlanNode;
use metrics::histogram;
use msql_srv::ErrorKind;
use msql_srv::InitWriter;
use msql_srv::MysqlShim;
use msql_srv::ParamParser;
use msql_srv::QueryResultWriter;
use msql_srv::StatementMetaWriter;
use rand::RngCore;
use tokio_stream::StreamExt;
use crate::interpreters::InterpreterFactory;
use crate::servers::mysql::writers::DFInitResultWriter;
use crate::servers::mysql::writers::DFQueryResultWriter;
use crate::sessions::DatabendQueryContextRef;
use crate::sessions::SessionRef;
use crate::sql::PlanParser;
use crate::users::CertifiedInfo;
struct InteractiveWorkerBase<W: std::io::Write> {
session: SessionRef,
generic_hold: PhantomData<W>,
}
pub struct InteractiveWorker<W: std::io::Write> {
session: SessionRef,
base: InteractiveWorkerBase<W>,
version: String,
salt: [u8; 20],
client_addr: String,
}
impl<W: std::io::Write> MysqlShim<W> for InteractiveWorker<W> {
type Error = ErrorCode;
fn version(&self) -> &str {
self.version.as_str()
}
fn connect_id(&self) -> u32 {
u32::from_le_bytes([0x08, 0x00, 0x00, 0x00])
}
fn default_auth_plugin(&self) -> &str {
"mysql_native_password"
}
fn auth_plugin_for_username(&self, _user: &[u8]) -> &str {
"mysql_native_password"
}
fn salt(&self) -> [u8; 20] {
self.salt
}
fn authenticate(
&self,
auth_plugin: &str,
username: &[u8],
salt: &[u8],
auth_data: &[u8],
) -> bool {
let username = String::from_utf8_lossy(username);
let info = CertifiedInfo::create(&username, auth_data, &self.client_addr);
let authenticate = self.base.authenticate(auth_plugin, salt, info);
futures::executor::block_on(async move {
match authenticate.await {
Ok(res) => res,
Err(failure) => {
log::error!(
"MySQL handler authenticate failed, \
user_name: {}, \
client_address: {}, \
failure_cause: {}",
username,
self.client_addr,
failure
);
false
}
}
})
}
fn on_prepare(&mut self, query: &str, writer: StatementMetaWriter<W>) -> Result<()> {
if self.session.is_aborting() {
writer.error(
ErrorKind::ER_ABORTING_CONNECTION,
"Aborting this connection. because we are try aborting server.".as_bytes(),
)?;
return Err(ErrorCode::AbortedSession(
"Aborting this connection. because we are try aborting server.",
));
}
self.base.do_prepare(query, writer)
}
fn on_execute(
&mut self,
id: u32,
param: ParamParser,
writer: QueryResultWriter<W>,
) -> Result<()> {
if self.session.is_aborting() {
writer.error(
ErrorKind::ER_ABORTING_CONNECTION,
"Aborting this connection. because we are try aborting server.".as_bytes(),
)?;
return Err(ErrorCode::AbortedSession(
"Aborting this connection. because we are try aborting server.",
));
}
self.base.do_execute(id, param, writer)
}
fn on_close(&mut self, id: u32) {
self.base.do_close(id);
}
fn on_query(&mut self, query: &str, writer: QueryResultWriter<W>) -> Result<()> {
if self.session.is_aborting() {
writer.error(
ErrorKind::ER_ABORTING_CONNECTION,
"Aborting this connection. because we are try aborting server.".as_bytes(),
)?;
return Err(ErrorCode::AbortedSession(
"Aborting this connection. because we are try aborting server.",
));
}
let mut writer = DFQueryResultWriter::create(writer);
match InteractiveWorkerBase::<W>::build_runtime() {
Ok(runtime) => {
let instant = Instant::now();
let blocks = runtime.block_on(self.base.do_query(query));
let mut write_result = writer.write(blocks);
if let Err(cause) = write_result {
let suffix = format!("(while in query {})", query);
write_result = Err(cause.add_message_back(suffix));
}
histogram!(
super::mysql_metrics::METRIC_MYSQL_PROCESSOR_REQUEST_DURATION,
instant.elapsed()
);
write_result
}
Err(error) => writer.write(Err(error)),
}
}
fn on_init(&mut self, database_name: &str, writer: InitWriter<W>) -> Result<()> {
if self.session.is_aborting() {
writer.error(
ErrorKind::ER_ABORTING_CONNECTION,
"Aborting this connection. because we are try aborting server.".as_bytes(),
)?;
return Err(ErrorCode::AbortedSession(
"Aborting this connection. because we are try aborting server.",
));
}
DFInitResultWriter::create(writer).write(self.base.do_init(database_name))
}
}
impl<W: std::io::Write> InteractiveWorkerBase<W> {
async fn authenticate(
&self,
auth_plugin: &str,
salt: &[u8],
info: CertifiedInfo,
) -> Result<bool> |
fn encoding_password(
auth_plugin: &str,
salt: &[u8],
input: &[u8],
user_password: &[u8],
) -> Result<Vec<u8>> {
match auth_plugin {
"mysql_native_password" if input.is_empty() => Ok(vec![]),
"mysql_native_password" => {
// SHA1( password ) XOR SHA1( "20-bytes random data from server" <concat> SHA1( SHA1( password ) ) )
let mut m = sha1::Sha1::new();
m.update(salt);
m.update(user_password);
let result = m.digest().bytes();
if input.len()!= result.len() {
return Err(ErrorCode::SHA1CheckFailed("SHA1 check failed"));
}
let mut s = Vec::with_capacity(result.len());
for i in 0..result.len() {
s.push(input[i] ^ result[i]);
}
Ok(s)
}
_ => Ok(input.to_vec()),
}
}
fn do_prepare(&mut self, _: &str, writer: StatementMetaWriter<'_, W>) -> Result<()> {
writer.error(
ErrorKind::ER_UNKNOWN_ERROR,
"Prepare is not support in Databend.".as_bytes(),
)?;
Ok(())
}
fn do_execute(
&mut self,
_: u32,
_: ParamParser<'_>,
writer: QueryResultWriter<'_, W>,
) -> Result<()> {
writer.error(
ErrorKind::ER_UNKNOWN_ERROR,
"Execute is not support in Databend.".as_bytes(),
)?;
Ok(())
}
fn do_close(&mut self, _: u32) {}
async fn do_query(&mut self, query: &str) -> Result<(Vec<DataBlock>, String)> {
log::debug!("{}", query);
let context = self.session.create_context().await?;
context.attach_query_str(query);
let query_parser = PlanParser::create(context.clone());
let (plan, hints) = query_parser.build_with_hint_from_sql(query);
match hints
.iter()
.find(|v| v.error_code.is_some())
.and_then(|x| x.error_code)
{
None => Self::exec_query(plan, &context).await,
Some(hint_error_code) => match Self::exec_query(plan, &context).await {
Ok(_) => Err(ErrorCode::UnexpectedError(format!(
"Expected server error code: {} but got: Ok.",
hint_error_code
))),
Err(error_code) => {
if hint_error_code == error_code.code() {
Ok((vec![DataBlock::empty()], String::from("")))
} else {
let actual_code = error_code.code();
Err(error_code.add_message(format!(
"Expected server error code: {} but got: {}.",
hint_error_code, actual_code
)))
}
}
},
}
}
async fn exec_query(
plan: Result<PlanNode>,
context: &DatabendQueryContextRef,
) -> Result<(Vec<DataBlock>, String)> {
let instant = Instant::now();
let interpreter = InterpreterFactory::get(context.clone(), plan?)?;
let data_stream = interpreter.execute().await?;
histogram!(
super::mysql_metrics::METRIC_INTERPRETER_USEDTIME,
instant.elapsed()
);
let collector = data_stream.collect::<Result<Vec<DataBlock>>>();
let query_result = collector.await;
query_result.map(|data| (data, Self::extra_info(context, instant)))
}
fn extra_info(context: &DatabendQueryContextRef, instant: Instant) -> String {
let progress = context.get_progress_value();
let seconds = instant.elapsed().as_nanos() as f64 / 1e9f64;
format!(
"Read {} rows, {} in {:.3} sec., {} rows/sec., {}/sec.",
progress.read_rows,
convert_byte_size(progress.read_bytes as f64),
seconds,
convert_number_size((progress.read_rows as f64) / (seconds as f64)),
convert_byte_size((progress.read_bytes as f64) / (seconds as f64)),
)
}
fn do_init(&mut self, database_name: &str) -> Result<()> {
let init_query = format!("USE {};", database_name);
let do_query = self.do_query(&init_query);
match Self::build_runtime() {
Err(error_code) => Err(error_code),
Ok(runtime) => match runtime.block_on(do_query) {
Ok(_) => Ok(()),
Err(error_code) => Err(error_code),
},
}
}
fn build_runtime() -> Result<tokio::runtime::Runtime> {
tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.map_err(|tokio_error| ErrorCode::TokioError(format!("{}", tokio_error)))
}
}
impl<W: std::io::Write> InteractiveWorker<W> {
pub fn create(session: SessionRef, client_addr: String) -> InteractiveWorker<W> {
let mut bs = vec![0u8; 20];
let mut rng = rand::thread_rng();
rng.fill_bytes(bs.as_mut());
let mut scramble: [u8; 20] = [0; 20];
for i in 0..20 {
scramble[i] = bs[i];
if scramble[i] == b'\0' || scramble[i] == b'$' {
scramble[i] += 1;
}
}
InteractiveWorker::<W> {
session: session.clone(),
base: InteractiveWorkerBase::<W> {
session,
generic_hold: PhantomData::default(),
},
salt: scramble,
// TODO: version
version: crate::configs::DATABEND_COMMIT_VERSION.to_string(),
client_addr,
}
}
}
| {
let user_name = &info.user_name;
let address = &info.user_client_address;
let user_manager = self.session.get_user_manager();
let user_info = user_manager.get_user(user_name).await?;
let input = &info.user_password;
let saved = &user_info.password;
let encode_password = Self::encoding_password(auth_plugin, salt, input, saved)?;
user_manager
.auth_user(CertifiedInfo::create(user_name, encode_password, address))
.await
} | identifier_body |
lib.rs | use itertools::multiunzip;
use rust_htslib::{bam, bam::ext::BamRecordExtensions};
use std::collections::HashMap;
use std::fmt::{Debug, Display};
/// Merge two lists into a sorted list
/// Normal sort is supposed to be very fast on two sorted lists
/// <https://doc.rust-lang.org/std/vec/struct.Vec.html#current-implementation-6>
pub fn merge_two_lists<T>(left: &[T], right: &[T]) -> Vec<T>
where
T: Ord,
T: Clone,
{
let mut x: Vec<T> = left.iter().chain(right.iter()).cloned().collect();
x.sort();
x
}
/// Merge two lists based on a key
/// Normal sort is supposed to be very fast on two sorted lists
/// <https://doc.rust-lang.org/std/vec/struct.Vec.html#current-implementation-6>
/// ```
/// use bamlift::*;
/// let x = vec![1,3];
/// let x_q = vec!["a","b"];
/// let y = vec![2,4];
/// let y_q = vec!["c", "d"];
/// let z = merge_two_lists_with_qual(&x, &x_q, &y, &y_q);
/// assert_eq!(z, vec![(1,"a"), (2,"c"), (3,"b"), (4, "d")]);
/// ```
pub fn merge_two_lists_with_qual<T, U>(
left: &[T],
left_q: &[U],
right: &[T],
right_q: &[U],
) -> Vec<(T, U)>
where
T: Ord,
T: Clone,
U: Clone,
{
let l = left
.iter()
.zip(left_q.iter())
.map(|(a, b)| (a.clone(), b.clone()));
let r = right
.iter()
.zip(right_q.iter())
.map(|(a, b)| (a.clone(), b.clone()));
let mut x: Vec<(T, U)> = l.chain(r).collect();
x.sort_by_key(|(a, _b)| a.clone());
x
}
/// get positions on the complimented sequence in the cigar record
pub fn positions_on_complimented_sequence(
record: &bam::Record,
input_positions: &[i64],
) -> Vec<i64> {
// reverse positions if needed
let positions: Vec<i64> = if record.is_reverse() {
let seq_len = i64::try_from(record.seq_len()).unwrap();
input_positions
.iter()
.rev()
.map(|p| seq_len - p - 1)
.collect()
} else {
input_positions.to_vec()
};
positions
}
/// get positions on the complimented sequence in the cigar record
pub fn positions_on_complimented_sequence_in_place(
record: &bam::Record,
input_positions: &mut Vec<i64>,
part_of_range: bool,
) {
if!record.is_reverse() {
return;
}
let seq_len = i64::try_from(record.seq_len()).unwrap();
// need to correct for going from [) to (] if we are part of a range
let offset = if part_of_range { 0 } else { 1 };
for p in input_positions.iter_mut() {
*p = seq_len - *p - offset;
}
input_positions.reverse();
}
#[inline(always)]
pub fn | <T>(v: &[T]) -> bool
where
T: Ord,
{
v.windows(2).all(|w| w[0] <= w[1])
}
/// search a sorted array for insertions positions of another sorted array
/// returned index i satisfies
/// left
/// a\[i-1\] < v <= a\[i\]
/// right
/// a\[i-1\] <= v < a\[i\]
/// <https://numpy.org/doc/stable/reference/generated/numpy.searchsorted.html>
/// ```
/// use bamlift::*;
/// let a = vec![1, 2, 3, 5, 6, 7, 8, 9, 10];
/// let v = vec![0, 1, 3, 4, 11, 11];
/// let indexes = search_sorted(&a, &v);
/// assert_eq!(indexes, vec![0, 0, 2, 3, 9, 9]);
/// ```
pub fn search_sorted<T>(a: &[T], v: &[T]) -> Vec<usize>
where
T: Ord,
T: Display,
[T]: Debug,
{
if!is_sorted(v) {
panic!("v is not sorted: {:?}", v);
}
let mut indexes = Vec::with_capacity(v.len());
let mut a_idx = 0;
for cur_v in v {
while a_idx < a.len() {
// check starting condition
if a_idx == 0 && *cur_v <= a[a_idx] {
indexes.push(0);
break;
} else if a_idx == 0 {
a_idx += 1;
}
// end condition
if a_idx == a.len() - 1 && *cur_v > a[a_idx] {
indexes.push(a_idx + 1);
break;
}
// middle of the array
else if (a[a_idx - 1] < *cur_v) && (*cur_v <= a[a_idx]) {
indexes.push(a_idx);
break;
}
a_idx += 1;
}
}
log::trace!("search_sorted: {:?}\n{:?}", v, indexes);
indexes
}
//
// CLOSEST LIFTOVER FUNCTIONS
//
/// this is a helper function for liftover_closest that should only be called from there
/// The exception for this is test cases, where it should be easier to test this function
/// directly.
fn liftover_closest(
positions: &[i64],
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
) -> Vec<Option<i64>> {
// skip empty
if positions.is_empty() {
return vec![];
}
if aligned_block_pairs.is_empty() {
return positions.iter().map(|_x| None).collect();
}
assert!(
is_sorted(positions),
"Positions must be sorted before calling liftover!"
);
// find the closest position for every position
let mut starting_block = 0;
let ending_block = aligned_block_pairs.len();
let mut pos_mapping = HashMap::new();
for cur_pos in positions {
pos_mapping.insert(cur_pos, (-1, i64::MAX));
let mut current_block = 0;
for block_index in starting_block..ending_block {
// get the current alignment block
let ([q_st, q_en], [r_st, r_en]) = &aligned_block_pairs[block_index];
// get the previous closest position
let (best_r_pos, best_diff) = pos_mapping.get_mut(cur_pos).unwrap();
// exact match found
if cur_pos >= &q_st && cur_pos < &q_en {
let dist_from_start = cur_pos - q_st;
*best_diff = 0;
*best_r_pos = r_st + dist_from_start;
break;
}
// we are before the start of the block
else if cur_pos < &q_st {
let diff = (q_st - cur_pos).abs();
if diff < *best_diff {
*best_diff = diff;
*best_r_pos = *r_st;
}
}
// we are past the end of the block
else if cur_pos >= &q_en {
let diff = (q_en - cur_pos).abs();
if diff < *best_diff {
*best_diff = diff;
*best_r_pos = *r_en;
}
// we don't need to return to previous blocks since the input is sorted
starting_block = current_block;
}
current_block += 1;
}
}
let mut rtn = vec![];
for q_pos in positions {
let (r_pos, diff) = pos_mapping.get(q_pos).unwrap();
if *r_pos == -1 && *diff == i64::MAX {
rtn.push(None);
} else {
rtn.push(Some(*r_pos));
}
}
assert_eq!(rtn.len(), positions.len());
rtn
}
/// find the closest reference positions for a list of query positions
pub fn lift_reference_positions(
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
query_positions: &[i64],
) -> Vec<Option<i64>> {
liftover_closest(query_positions, aligned_block_pairs)
}
/// find the closest query positions for a list of reference positions
pub fn lift_query_positions(
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
reference_positions: &[i64],
) -> Vec<Option<i64>> {
// if lifting to the query, we need to reverse the pairs
let aligned_block_pairs = aligned_block_pairs.iter().map(|(q, r)| (*r, *q)).collect();
liftover_closest(reference_positions, &aligned_block_pairs)
}
fn lift_range(
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
starts: &[i64],
ends: &[i64],
lift_reference_to_query: bool,
) -> (Vec<Option<i64>>, Vec<Option<i64>>, Vec<Option<i64>>) {
assert_eq!(starts.len(), ends.len());
let (ref_starts, ref_ends) = if!lift_reference_to_query {
(
lift_reference_positions(aligned_block_pairs, starts),
lift_reference_positions(aligned_block_pairs, ends),
)
} else {
(
lift_query_positions(aligned_block_pairs, starts),
lift_query_positions(aligned_block_pairs, ends),
)
};
assert_eq!(ref_starts.len(), ref_ends.len());
let rtn = ref_starts
.into_iter()
.zip(ref_ends.into_iter())
.map(|(start, end)| match (start, end) {
(Some(start), Some(end)) => {
if start == end {
(None, None, None)
} else {
(Some(start), Some(end), Some(end - start))
}
}
_ => (None, None, None),
})
.collect::<Vec<_>>();
multiunzip(rtn)
}
/// Find the closest range but hopefully better
pub fn lift_query_range(
record: &bam::Record,
starts: &[i64],
ends: &[i64],
) -> (Vec<Option<i64>>, Vec<Option<i64>>, Vec<Option<i64>>) {
// get the aligned block pairs
let aligned_block_pairs: Vec<([i64; 2], [i64; 2])> = record.aligned_block_pairs().collect();
lift_range(&aligned_block_pairs, starts, ends, false)
}
//
// EXACT LIFTOVER FUNCTIONS
//
/// liftover positions using the cigar string
fn liftover_exact(
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
positions: &[i64],
lift_reference_to_query: bool,
) -> Vec<Option<i64>> {
assert!(
is_sorted(positions),
"Positions must be sorted before calling liftover!"
);
// find the shared positions in the reference
let mut return_positions = vec![];
let mut cur_idx = 0;
// ends are not inclusive, I checked.
for ([q_st, q_en], [r_st, r_en]) in aligned_block_pairs {
let (st, en) = if!lift_reference_to_query {
(q_st, q_en)
} else {
(r_st, r_en)
};
// check bounds
if cur_idx == positions.len() {
break;
}
let mut cur_pos = positions[cur_idx];
// need to go to the next block
while cur_pos < *en {
if cur_pos >= *st {
let dist_from_start = cur_pos - st;
let rtn_pos = if!lift_reference_to_query {
r_st + dist_from_start
} else {
q_st + dist_from_start
};
return_positions.push(Some(rtn_pos));
} else {
return_positions.push(None);
}
// reset current position
cur_idx += 1;
if cur_idx == positions.len() {
break;
}
cur_pos = positions[cur_idx];
}
}
// add values for things that won't lift at the end
while positions.len() > return_positions.len() {
return_positions.push(None);
}
assert_eq!(positions.len(), return_positions.len());
return_positions
}
pub fn lift_reference_positions_exact(
record: &bam::Record,
query_positions: &[i64],
) -> Vec<Option<i64>> {
if record.is_unmapped() {
query_positions.iter().map(|_x| None).collect()
} else {
let aligned_block_pairs: Vec<([i64; 2], [i64; 2])> = record.aligned_block_pairs().collect();
liftover_exact(&aligned_block_pairs, query_positions, false)
}
}
pub fn lift_query_positions_exact(
record: &bam::Record,
reference_positions: &[i64],
) -> Vec<Option<i64>> {
if record.is_unmapped() {
reference_positions.iter().map(|_x| None).collect()
} else {
let aligned_block_pairs: Vec<([i64; 2], [i64; 2])> = record.aligned_block_pairs().collect();
liftover_exact(&aligned_block_pairs, reference_positions, true)
}
}
| is_sorted | identifier_name |
lib.rs | use itertools::multiunzip;
use rust_htslib::{bam, bam::ext::BamRecordExtensions};
use std::collections::HashMap;
use std::fmt::{Debug, Display};
/// Merge two lists into a sorted list
/// Normal sort is supposed to be very fast on two sorted lists
/// <https://doc.rust-lang.org/std/vec/struct.Vec.html#current-implementation-6>
pub fn merge_two_lists<T>(left: &[T], right: &[T]) -> Vec<T>
where
T: Ord,
T: Clone,
{
let mut x: Vec<T> = left.iter().chain(right.iter()).cloned().collect();
x.sort();
x
}
/// Merge two lists based on a key
/// Normal sort is supposed to be very fast on two sorted lists
/// <https://doc.rust-lang.org/std/vec/struct.Vec.html#current-implementation-6>
/// ```
/// use bamlift::*;
/// let x = vec![1,3];
/// let x_q = vec!["a","b"];
/// let y = vec![2,4];
/// let y_q = vec!["c", "d"];
/// let z = merge_two_lists_with_qual(&x, &x_q, &y, &y_q);
/// assert_eq!(z, vec![(1,"a"), (2,"c"), (3,"b"), (4, "d")]);
/// ```
pub fn merge_two_lists_with_qual<T, U>(
left: &[T],
left_q: &[U],
right: &[T],
right_q: &[U],
) -> Vec<(T, U)>
where
T: Ord,
T: Clone,
U: Clone,
{
let l = left
.iter()
.zip(left_q.iter())
.map(|(a, b)| (a.clone(), b.clone()));
let r = right
.iter()
.zip(right_q.iter())
.map(|(a, b)| (a.clone(), b.clone()));
let mut x: Vec<(T, U)> = l.chain(r).collect();
x.sort_by_key(|(a, _b)| a.clone());
x
}
/// get positions on the complimented sequence in the cigar record
pub fn positions_on_complimented_sequence(
record: &bam::Record,
input_positions: &[i64],
) -> Vec<i64> {
// reverse positions if needed
let positions: Vec<i64> = if record.is_reverse() {
let seq_len = i64::try_from(record.seq_len()).unwrap();
input_positions
.iter()
.rev()
.map(|p| seq_len - p - 1)
.collect()
} else {
input_positions.to_vec()
};
positions
}
/// get positions on the complimented sequence in the cigar record
pub fn positions_on_complimented_sequence_in_place(
record: &bam::Record,
input_positions: &mut Vec<i64>,
part_of_range: bool,
) {
if!record.is_reverse() {
return;
}
let seq_len = i64::try_from(record.seq_len()).unwrap();
// need to correct for going from [) to (] if we are part of a range
let offset = if part_of_range { 0 } else { 1 };
for p in input_positions.iter_mut() {
*p = seq_len - *p - offset;
}
input_positions.reverse();
}
#[inline(always)]
pub fn is_sorted<T>(v: &[T]) -> bool
where
T: Ord,
{
v.windows(2).all(|w| w[0] <= w[1])
}
/// search a sorted array for insertions positions of another sorted array
/// returned index i satisfies
/// left
/// a\[i-1\] < v <= a\[i\]
/// right
/// a\[i-1\] <= v < a\[i\]
/// <https://numpy.org/doc/stable/reference/generated/numpy.searchsorted.html>
/// ```
/// use bamlift::*;
/// let a = vec![1, 2, 3, 5, 6, 7, 8, 9, 10];
/// let v = vec![0, 1, 3, 4, 11, 11];
/// let indexes = search_sorted(&a, &v);
/// assert_eq!(indexes, vec![0, 0, 2, 3, 9, 9]);
/// ```
pub fn search_sorted<T>(a: &[T], v: &[T]) -> Vec<usize>
where
T: Ord,
T: Display,
[T]: Debug,
{
if!is_sorted(v) {
panic!("v is not sorted: {:?}", v);
}
let mut indexes = Vec::with_capacity(v.len());
let mut a_idx = 0;
for cur_v in v {
while a_idx < a.len() {
// check starting condition
if a_idx == 0 && *cur_v <= a[a_idx] {
indexes.push(0);
break;
} else if a_idx == 0 {
a_idx += 1;
}
// end condition
if a_idx == a.len() - 1 && *cur_v > a[a_idx] {
indexes.push(a_idx + 1);
break;
}
// middle of the array
else if (a[a_idx - 1] < *cur_v) && (*cur_v <= a[a_idx]) {
indexes.push(a_idx);
break;
}
a_idx += 1;
}
}
log::trace!("search_sorted: {:?}\n{:?}", v, indexes);
indexes
}
//
// CLOSEST LIFTOVER FUNCTIONS
//
/// this is a helper function for liftover_closest that should only be called from there
/// The exception for this is test cases, where it should be easier to test this function
/// directly.
fn liftover_closest(
positions: &[i64],
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
) -> Vec<Option<i64>> {
// skip empty
if positions.is_empty() {
return vec![];
}
if aligned_block_pairs.is_empty() {
return positions.iter().map(|_x| None).collect();
}
assert!(
is_sorted(positions),
"Positions must be sorted before calling liftover!" | let ending_block = aligned_block_pairs.len();
let mut pos_mapping = HashMap::new();
for cur_pos in positions {
pos_mapping.insert(cur_pos, (-1, i64::MAX));
let mut current_block = 0;
for block_index in starting_block..ending_block {
// get the current alignment block
let ([q_st, q_en], [r_st, r_en]) = &aligned_block_pairs[block_index];
// get the previous closest position
let (best_r_pos, best_diff) = pos_mapping.get_mut(cur_pos).unwrap();
// exact match found
if cur_pos >= &q_st && cur_pos < &q_en {
let dist_from_start = cur_pos - q_st;
*best_diff = 0;
*best_r_pos = r_st + dist_from_start;
break;
}
// we are before the start of the block
else if cur_pos < &q_st {
let diff = (q_st - cur_pos).abs();
if diff < *best_diff {
*best_diff = diff;
*best_r_pos = *r_st;
}
}
// we are past the end of the block
else if cur_pos >= &q_en {
let diff = (q_en - cur_pos).abs();
if diff < *best_diff {
*best_diff = diff;
*best_r_pos = *r_en;
}
// we don't need to return to previous blocks since the input is sorted
starting_block = current_block;
}
current_block += 1;
}
}
let mut rtn = vec![];
for q_pos in positions {
let (r_pos, diff) = pos_mapping.get(q_pos).unwrap();
if *r_pos == -1 && *diff == i64::MAX {
rtn.push(None);
} else {
rtn.push(Some(*r_pos));
}
}
assert_eq!(rtn.len(), positions.len());
rtn
}
/// find the closest reference positions for a list of query positions
pub fn lift_reference_positions(
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
query_positions: &[i64],
) -> Vec<Option<i64>> {
liftover_closest(query_positions, aligned_block_pairs)
}
/// find the closest query positions for a list of reference positions
pub fn lift_query_positions(
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
reference_positions: &[i64],
) -> Vec<Option<i64>> {
// if lifting to the query, we need to reverse the pairs
let aligned_block_pairs = aligned_block_pairs.iter().map(|(q, r)| (*r, *q)).collect();
liftover_closest(reference_positions, &aligned_block_pairs)
}
fn lift_range(
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
starts: &[i64],
ends: &[i64],
lift_reference_to_query: bool,
) -> (Vec<Option<i64>>, Vec<Option<i64>>, Vec<Option<i64>>) {
assert_eq!(starts.len(), ends.len());
let (ref_starts, ref_ends) = if!lift_reference_to_query {
(
lift_reference_positions(aligned_block_pairs, starts),
lift_reference_positions(aligned_block_pairs, ends),
)
} else {
(
lift_query_positions(aligned_block_pairs, starts),
lift_query_positions(aligned_block_pairs, ends),
)
};
assert_eq!(ref_starts.len(), ref_ends.len());
let rtn = ref_starts
.into_iter()
.zip(ref_ends.into_iter())
.map(|(start, end)| match (start, end) {
(Some(start), Some(end)) => {
if start == end {
(None, None, None)
} else {
(Some(start), Some(end), Some(end - start))
}
}
_ => (None, None, None),
})
.collect::<Vec<_>>();
multiunzip(rtn)
}
/// Find the closest range but hopefully better
pub fn lift_query_range(
record: &bam::Record,
starts: &[i64],
ends: &[i64],
) -> (Vec<Option<i64>>, Vec<Option<i64>>, Vec<Option<i64>>) {
// get the aligned block pairs
let aligned_block_pairs: Vec<([i64; 2], [i64; 2])> = record.aligned_block_pairs().collect();
lift_range(&aligned_block_pairs, starts, ends, false)
}
//
// EXACT LIFTOVER FUNCTIONS
//
/// liftover positions using the cigar string
fn liftover_exact(
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
positions: &[i64],
lift_reference_to_query: bool,
) -> Vec<Option<i64>> {
assert!(
is_sorted(positions),
"Positions must be sorted before calling liftover!"
);
// find the shared positions in the reference
let mut return_positions = vec![];
let mut cur_idx = 0;
// ends are not inclusive, I checked.
for ([q_st, q_en], [r_st, r_en]) in aligned_block_pairs {
let (st, en) = if!lift_reference_to_query {
(q_st, q_en)
} else {
(r_st, r_en)
};
// check bounds
if cur_idx == positions.len() {
break;
}
let mut cur_pos = positions[cur_idx];
// need to go to the next block
while cur_pos < *en {
if cur_pos >= *st {
let dist_from_start = cur_pos - st;
let rtn_pos = if!lift_reference_to_query {
r_st + dist_from_start
} else {
q_st + dist_from_start
};
return_positions.push(Some(rtn_pos));
} else {
return_positions.push(None);
}
// reset current position
cur_idx += 1;
if cur_idx == positions.len() {
break;
}
cur_pos = positions[cur_idx];
}
}
// add values for things that won't lift at the end
while positions.len() > return_positions.len() {
return_positions.push(None);
}
assert_eq!(positions.len(), return_positions.len());
return_positions
}
pub fn lift_reference_positions_exact(
record: &bam::Record,
query_positions: &[i64],
) -> Vec<Option<i64>> {
if record.is_unmapped() {
query_positions.iter().map(|_x| None).collect()
} else {
let aligned_block_pairs: Vec<([i64; 2], [i64; 2])> = record.aligned_block_pairs().collect();
liftover_exact(&aligned_block_pairs, query_positions, false)
}
}
pub fn lift_query_positions_exact(
record: &bam::Record,
reference_positions: &[i64],
) -> Vec<Option<i64>> {
if record.is_unmapped() {
reference_positions.iter().map(|_x| None).collect()
} else {
let aligned_block_pairs: Vec<([i64; 2], [i64; 2])> = record.aligned_block_pairs().collect();
liftover_exact(&aligned_block_pairs, reference_positions, true)
}
} | );
// find the closest position for every position
let mut starting_block = 0; | random_line_split |
lib.rs | use itertools::multiunzip;
use rust_htslib::{bam, bam::ext::BamRecordExtensions};
use std::collections::HashMap;
use std::fmt::{Debug, Display};
/// Merge two lists into a sorted list
/// Normal sort is supposed to be very fast on two sorted lists
/// <https://doc.rust-lang.org/std/vec/struct.Vec.html#current-implementation-6>
pub fn merge_two_lists<T>(left: &[T], right: &[T]) -> Vec<T>
where
T: Ord,
T: Clone,
{
let mut x: Vec<T> = left.iter().chain(right.iter()).cloned().collect();
x.sort();
x
}
/// Merge two lists based on a key
/// Normal sort is supposed to be very fast on two sorted lists
/// <https://doc.rust-lang.org/std/vec/struct.Vec.html#current-implementation-6>
/// ```
/// use bamlift::*;
/// let x = vec![1,3];
/// let x_q = vec!["a","b"];
/// let y = vec![2,4];
/// let y_q = vec!["c", "d"];
/// let z = merge_two_lists_with_qual(&x, &x_q, &y, &y_q);
/// assert_eq!(z, vec![(1,"a"), (2,"c"), (3,"b"), (4, "d")]);
/// ```
pub fn merge_two_lists_with_qual<T, U>(
left: &[T],
left_q: &[U],
right: &[T],
right_q: &[U],
) -> Vec<(T, U)>
where
T: Ord,
T: Clone,
U: Clone,
{
let l = left
.iter()
.zip(left_q.iter())
.map(|(a, b)| (a.clone(), b.clone()));
let r = right
.iter()
.zip(right_q.iter())
.map(|(a, b)| (a.clone(), b.clone()));
let mut x: Vec<(T, U)> = l.chain(r).collect();
x.sort_by_key(|(a, _b)| a.clone());
x
}
/// get positions on the complimented sequence in the cigar record
pub fn positions_on_complimented_sequence(
record: &bam::Record,
input_positions: &[i64],
) -> Vec<i64> {
// reverse positions if needed
let positions: Vec<i64> = if record.is_reverse() {
let seq_len = i64::try_from(record.seq_len()).unwrap();
input_positions
.iter()
.rev()
.map(|p| seq_len - p - 1)
.collect()
} else {
input_positions.to_vec()
};
positions
}
/// get positions on the complimented sequence in the cigar record
pub fn positions_on_complimented_sequence_in_place(
record: &bam::Record,
input_positions: &mut Vec<i64>,
part_of_range: bool,
) {
if!record.is_reverse() {
return;
}
let seq_len = i64::try_from(record.seq_len()).unwrap();
// need to correct for going from [) to (] if we are part of a range
let offset = if part_of_range { 0 } else { 1 };
for p in input_positions.iter_mut() {
*p = seq_len - *p - offset;
}
input_positions.reverse();
}
#[inline(always)]
pub fn is_sorted<T>(v: &[T]) -> bool
where
T: Ord,
{
v.windows(2).all(|w| w[0] <= w[1])
}
/// search a sorted array for insertions positions of another sorted array
/// returned index i satisfies
/// left
/// a\[i-1\] < v <= a\[i\]
/// right
/// a\[i-1\] <= v < a\[i\]
/// <https://numpy.org/doc/stable/reference/generated/numpy.searchsorted.html>
/// ```
/// use bamlift::*;
/// let a = vec![1, 2, 3, 5, 6, 7, 8, 9, 10];
/// let v = vec![0, 1, 3, 4, 11, 11];
/// let indexes = search_sorted(&a, &v);
/// assert_eq!(indexes, vec![0, 0, 2, 3, 9, 9]);
/// ```
pub fn search_sorted<T>(a: &[T], v: &[T]) -> Vec<usize>
where
T: Ord,
T: Display,
[T]: Debug,
{
if!is_sorted(v) {
panic!("v is not sorted: {:?}", v);
}
let mut indexes = Vec::with_capacity(v.len());
let mut a_idx = 0;
for cur_v in v {
while a_idx < a.len() {
// check starting condition
if a_idx == 0 && *cur_v <= a[a_idx] {
indexes.push(0);
break;
} else if a_idx == 0 {
a_idx += 1;
}
// end condition
if a_idx == a.len() - 1 && *cur_v > a[a_idx] {
indexes.push(a_idx + 1);
break;
}
// middle of the array
else if (a[a_idx - 1] < *cur_v) && (*cur_v <= a[a_idx]) {
indexes.push(a_idx);
break;
}
a_idx += 1;
}
}
log::trace!("search_sorted: {:?}\n{:?}", v, indexes);
indexes
}
//
// CLOSEST LIFTOVER FUNCTIONS
//
/// this is a helper function for liftover_closest that should only be called from there
/// The exception for this is test cases, where it should be easier to test this function
/// directly.
fn liftover_closest(
positions: &[i64],
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
) -> Vec<Option<i64>> {
// skip empty
if positions.is_empty() {
return vec![];
}
if aligned_block_pairs.is_empty() {
return positions.iter().map(|_x| None).collect();
}
assert!(
is_sorted(positions),
"Positions must be sorted before calling liftover!"
);
// find the closest position for every position
let mut starting_block = 0;
let ending_block = aligned_block_pairs.len();
let mut pos_mapping = HashMap::new();
for cur_pos in positions {
pos_mapping.insert(cur_pos, (-1, i64::MAX));
let mut current_block = 0;
for block_index in starting_block..ending_block {
// get the current alignment block
let ([q_st, q_en], [r_st, r_en]) = &aligned_block_pairs[block_index];
// get the previous closest position
let (best_r_pos, best_diff) = pos_mapping.get_mut(cur_pos).unwrap();
// exact match found
if cur_pos >= &q_st && cur_pos < &q_en {
let dist_from_start = cur_pos - q_st;
*best_diff = 0;
*best_r_pos = r_st + dist_from_start;
break;
}
// we are before the start of the block
else if cur_pos < &q_st {
let diff = (q_st - cur_pos).abs();
if diff < *best_diff {
*best_diff = diff;
*best_r_pos = *r_st;
}
}
// we are past the end of the block
else if cur_pos >= &q_en {
let diff = (q_en - cur_pos).abs();
if diff < *best_diff {
*best_diff = diff;
*best_r_pos = *r_en;
}
// we don't need to return to previous blocks since the input is sorted
starting_block = current_block;
}
current_block += 1;
}
}
let mut rtn = vec![];
for q_pos in positions {
let (r_pos, diff) = pos_mapping.get(q_pos).unwrap();
if *r_pos == -1 && *diff == i64::MAX {
rtn.push(None);
} else {
rtn.push(Some(*r_pos));
}
}
assert_eq!(rtn.len(), positions.len());
rtn
}
/// find the closest reference positions for a list of query positions
pub fn lift_reference_positions(
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
query_positions: &[i64],
) -> Vec<Option<i64>> {
liftover_closest(query_positions, aligned_block_pairs)
}
/// find the closest query positions for a list of reference positions
pub fn lift_query_positions(
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
reference_positions: &[i64],
) -> Vec<Option<i64>> {
// if lifting to the query, we need to reverse the pairs
let aligned_block_pairs = aligned_block_pairs.iter().map(|(q, r)| (*r, *q)).collect();
liftover_closest(reference_positions, &aligned_block_pairs)
}
fn lift_range(
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
starts: &[i64],
ends: &[i64],
lift_reference_to_query: bool,
) -> (Vec<Option<i64>>, Vec<Option<i64>>, Vec<Option<i64>>) {
assert_eq!(starts.len(), ends.len());
let (ref_starts, ref_ends) = if!lift_reference_to_query {
(
lift_reference_positions(aligned_block_pairs, starts),
lift_reference_positions(aligned_block_pairs, ends),
)
} else {
(
lift_query_positions(aligned_block_pairs, starts),
lift_query_positions(aligned_block_pairs, ends),
)
};
assert_eq!(ref_starts.len(), ref_ends.len());
let rtn = ref_starts
.into_iter()
.zip(ref_ends.into_iter())
.map(|(start, end)| match (start, end) {
(Some(start), Some(end)) => {
if start == end {
(None, None, None)
} else {
(Some(start), Some(end), Some(end - start))
}
}
_ => (None, None, None),
})
.collect::<Vec<_>>();
multiunzip(rtn)
}
/// Find the closest range but hopefully better
pub fn lift_query_range(
record: &bam::Record,
starts: &[i64],
ends: &[i64],
) -> (Vec<Option<i64>>, Vec<Option<i64>>, Vec<Option<i64>>) {
// get the aligned block pairs
let aligned_block_pairs: Vec<([i64; 2], [i64; 2])> = record.aligned_block_pairs().collect();
lift_range(&aligned_block_pairs, starts, ends, false)
}
//
// EXACT LIFTOVER FUNCTIONS
//
/// liftover positions using the cigar string
fn liftover_exact(
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
positions: &[i64],
lift_reference_to_query: bool,
) -> Vec<Option<i64>> {
assert!(
is_sorted(positions),
"Positions must be sorted before calling liftover!"
);
// find the shared positions in the reference
let mut return_positions = vec![];
let mut cur_idx = 0;
// ends are not inclusive, I checked.
for ([q_st, q_en], [r_st, r_en]) in aligned_block_pairs {
let (st, en) = if!lift_reference_to_query {
(q_st, q_en)
} else {
(r_st, r_en)
};
// check bounds
if cur_idx == positions.len() {
break;
}
let mut cur_pos = positions[cur_idx];
// need to go to the next block
while cur_pos < *en {
if cur_pos >= *st {
let dist_from_start = cur_pos - st;
let rtn_pos = if!lift_reference_to_query {
r_st + dist_from_start
} else {
q_st + dist_from_start
};
return_positions.push(Some(rtn_pos));
} else {
return_positions.push(None);
}
// reset current position
cur_idx += 1;
if cur_idx == positions.len() {
break;
}
cur_pos = positions[cur_idx];
}
}
// add values for things that won't lift at the end
while positions.len() > return_positions.len() {
return_positions.push(None);
}
assert_eq!(positions.len(), return_positions.len());
return_positions
}
pub fn lift_reference_positions_exact(
record: &bam::Record,
query_positions: &[i64],
) -> Vec<Option<i64>> {
if record.is_unmapped() {
query_positions.iter().map(|_x| None).collect()
} else {
let aligned_block_pairs: Vec<([i64; 2], [i64; 2])> = record.aligned_block_pairs().collect();
liftover_exact(&aligned_block_pairs, query_positions, false)
}
}
pub fn lift_query_positions_exact(
record: &bam::Record,
reference_positions: &[i64],
) -> Vec<Option<i64>> {
if record.is_unmapped() | else {
let aligned_block_pairs: Vec<([i64; 2], [i64; 2])> = record.aligned_block_pairs().collect();
liftover_exact(&aligned_block_pairs, reference_positions, true)
}
}
| {
reference_positions.iter().map(|_x| None).collect()
} | conditional_block |
lib.rs | use itertools::multiunzip;
use rust_htslib::{bam, bam::ext::BamRecordExtensions};
use std::collections::HashMap;
use std::fmt::{Debug, Display};
/// Merge two lists into a sorted list
/// Normal sort is supposed to be very fast on two sorted lists
/// <https://doc.rust-lang.org/std/vec/struct.Vec.html#current-implementation-6>
pub fn merge_two_lists<T>(left: &[T], right: &[T]) -> Vec<T>
where
T: Ord,
T: Clone,
{
let mut x: Vec<T> = left.iter().chain(right.iter()).cloned().collect();
x.sort();
x
}
/// Merge two lists based on a key
/// Normal sort is supposed to be very fast on two sorted lists
/// <https://doc.rust-lang.org/std/vec/struct.Vec.html#current-implementation-6>
/// ```
/// use bamlift::*;
/// let x = vec![1,3];
/// let x_q = vec!["a","b"];
/// let y = vec![2,4];
/// let y_q = vec!["c", "d"];
/// let z = merge_two_lists_with_qual(&x, &x_q, &y, &y_q);
/// assert_eq!(z, vec![(1,"a"), (2,"c"), (3,"b"), (4, "d")]);
/// ```
pub fn merge_two_lists_with_qual<T, U>(
left: &[T],
left_q: &[U],
right: &[T],
right_q: &[U],
) -> Vec<(T, U)>
where
T: Ord,
T: Clone,
U: Clone,
{
let l = left
.iter()
.zip(left_q.iter())
.map(|(a, b)| (a.clone(), b.clone()));
let r = right
.iter()
.zip(right_q.iter())
.map(|(a, b)| (a.clone(), b.clone()));
let mut x: Vec<(T, U)> = l.chain(r).collect();
x.sort_by_key(|(a, _b)| a.clone());
x
}
/// get positions on the complimented sequence in the cigar record
pub fn positions_on_complimented_sequence(
record: &bam::Record,
input_positions: &[i64],
) -> Vec<i64> |
/// get positions on the complimented sequence in the cigar record
pub fn positions_on_complimented_sequence_in_place(
record: &bam::Record,
input_positions: &mut Vec<i64>,
part_of_range: bool,
) {
if!record.is_reverse() {
return;
}
let seq_len = i64::try_from(record.seq_len()).unwrap();
// need to correct for going from [) to (] if we are part of a range
let offset = if part_of_range { 0 } else { 1 };
for p in input_positions.iter_mut() {
*p = seq_len - *p - offset;
}
input_positions.reverse();
}
#[inline(always)]
pub fn is_sorted<T>(v: &[T]) -> bool
where
T: Ord,
{
v.windows(2).all(|w| w[0] <= w[1])
}
/// search a sorted array for insertions positions of another sorted array
/// returned index i satisfies
/// left
/// a\[i-1\] < v <= a\[i\]
/// right
/// a\[i-1\] <= v < a\[i\]
/// <https://numpy.org/doc/stable/reference/generated/numpy.searchsorted.html>
/// ```
/// use bamlift::*;
/// let a = vec![1, 2, 3, 5, 6, 7, 8, 9, 10];
/// let v = vec![0, 1, 3, 4, 11, 11];
/// let indexes = search_sorted(&a, &v);
/// assert_eq!(indexes, vec![0, 0, 2, 3, 9, 9]);
/// ```
pub fn search_sorted<T>(a: &[T], v: &[T]) -> Vec<usize>
where
T: Ord,
T: Display,
[T]: Debug,
{
if!is_sorted(v) {
panic!("v is not sorted: {:?}", v);
}
let mut indexes = Vec::with_capacity(v.len());
let mut a_idx = 0;
for cur_v in v {
while a_idx < a.len() {
// check starting condition
if a_idx == 0 && *cur_v <= a[a_idx] {
indexes.push(0);
break;
} else if a_idx == 0 {
a_idx += 1;
}
// end condition
if a_idx == a.len() - 1 && *cur_v > a[a_idx] {
indexes.push(a_idx + 1);
break;
}
// middle of the array
else if (a[a_idx - 1] < *cur_v) && (*cur_v <= a[a_idx]) {
indexes.push(a_idx);
break;
}
a_idx += 1;
}
}
log::trace!("search_sorted: {:?}\n{:?}", v, indexes);
indexes
}
//
// CLOSEST LIFTOVER FUNCTIONS
//
/// this is a helper function for liftover_closest that should only be called from there
/// The exception for this is test cases, where it should be easier to test this function
/// directly.
fn liftover_closest(
positions: &[i64],
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
) -> Vec<Option<i64>> {
// skip empty
if positions.is_empty() {
return vec![];
}
if aligned_block_pairs.is_empty() {
return positions.iter().map(|_x| None).collect();
}
assert!(
is_sorted(positions),
"Positions must be sorted before calling liftover!"
);
// find the closest position for every position
let mut starting_block = 0;
let ending_block = aligned_block_pairs.len();
let mut pos_mapping = HashMap::new();
for cur_pos in positions {
pos_mapping.insert(cur_pos, (-1, i64::MAX));
let mut current_block = 0;
for block_index in starting_block..ending_block {
// get the current alignment block
let ([q_st, q_en], [r_st, r_en]) = &aligned_block_pairs[block_index];
// get the previous closest position
let (best_r_pos, best_diff) = pos_mapping.get_mut(cur_pos).unwrap();
// exact match found
if cur_pos >= &q_st && cur_pos < &q_en {
let dist_from_start = cur_pos - q_st;
*best_diff = 0;
*best_r_pos = r_st + dist_from_start;
break;
}
// we are before the start of the block
else if cur_pos < &q_st {
let diff = (q_st - cur_pos).abs();
if diff < *best_diff {
*best_diff = diff;
*best_r_pos = *r_st;
}
}
// we are past the end of the block
else if cur_pos >= &q_en {
let diff = (q_en - cur_pos).abs();
if diff < *best_diff {
*best_diff = diff;
*best_r_pos = *r_en;
}
// we don't need to return to previous blocks since the input is sorted
starting_block = current_block;
}
current_block += 1;
}
}
let mut rtn = vec![];
for q_pos in positions {
let (r_pos, diff) = pos_mapping.get(q_pos).unwrap();
if *r_pos == -1 && *diff == i64::MAX {
rtn.push(None);
} else {
rtn.push(Some(*r_pos));
}
}
assert_eq!(rtn.len(), positions.len());
rtn
}
/// find the closest reference positions for a list of query positions
pub fn lift_reference_positions(
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
query_positions: &[i64],
) -> Vec<Option<i64>> {
liftover_closest(query_positions, aligned_block_pairs)
}
/// find the closest query positions for a list of reference positions
pub fn lift_query_positions(
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
reference_positions: &[i64],
) -> Vec<Option<i64>> {
// if lifting to the query, we need to reverse the pairs
let aligned_block_pairs = aligned_block_pairs.iter().map(|(q, r)| (*r, *q)).collect();
liftover_closest(reference_positions, &aligned_block_pairs)
}
fn lift_range(
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
starts: &[i64],
ends: &[i64],
lift_reference_to_query: bool,
) -> (Vec<Option<i64>>, Vec<Option<i64>>, Vec<Option<i64>>) {
assert_eq!(starts.len(), ends.len());
let (ref_starts, ref_ends) = if!lift_reference_to_query {
(
lift_reference_positions(aligned_block_pairs, starts),
lift_reference_positions(aligned_block_pairs, ends),
)
} else {
(
lift_query_positions(aligned_block_pairs, starts),
lift_query_positions(aligned_block_pairs, ends),
)
};
assert_eq!(ref_starts.len(), ref_ends.len());
let rtn = ref_starts
.into_iter()
.zip(ref_ends.into_iter())
.map(|(start, end)| match (start, end) {
(Some(start), Some(end)) => {
if start == end {
(None, None, None)
} else {
(Some(start), Some(end), Some(end - start))
}
}
_ => (None, None, None),
})
.collect::<Vec<_>>();
multiunzip(rtn)
}
/// Find the closest range but hopefully better
pub fn lift_query_range(
record: &bam::Record,
starts: &[i64],
ends: &[i64],
) -> (Vec<Option<i64>>, Vec<Option<i64>>, Vec<Option<i64>>) {
// get the aligned block pairs
let aligned_block_pairs: Vec<([i64; 2], [i64; 2])> = record.aligned_block_pairs().collect();
lift_range(&aligned_block_pairs, starts, ends, false)
}
//
// EXACT LIFTOVER FUNCTIONS
//
/// liftover positions using the cigar string
fn liftover_exact(
aligned_block_pairs: &Vec<([i64; 2], [i64; 2])>,
positions: &[i64],
lift_reference_to_query: bool,
) -> Vec<Option<i64>> {
assert!(
is_sorted(positions),
"Positions must be sorted before calling liftover!"
);
// find the shared positions in the reference
let mut return_positions = vec![];
let mut cur_idx = 0;
// ends are not inclusive, I checked.
for ([q_st, q_en], [r_st, r_en]) in aligned_block_pairs {
let (st, en) = if!lift_reference_to_query {
(q_st, q_en)
} else {
(r_st, r_en)
};
// check bounds
if cur_idx == positions.len() {
break;
}
let mut cur_pos = positions[cur_idx];
// need to go to the next block
while cur_pos < *en {
if cur_pos >= *st {
let dist_from_start = cur_pos - st;
let rtn_pos = if!lift_reference_to_query {
r_st + dist_from_start
} else {
q_st + dist_from_start
};
return_positions.push(Some(rtn_pos));
} else {
return_positions.push(None);
}
// reset current position
cur_idx += 1;
if cur_idx == positions.len() {
break;
}
cur_pos = positions[cur_idx];
}
}
// add values for things that won't lift at the end
while positions.len() > return_positions.len() {
return_positions.push(None);
}
assert_eq!(positions.len(), return_positions.len());
return_positions
}
pub fn lift_reference_positions_exact(
record: &bam::Record,
query_positions: &[i64],
) -> Vec<Option<i64>> {
if record.is_unmapped() {
query_positions.iter().map(|_x| None).collect()
} else {
let aligned_block_pairs: Vec<([i64; 2], [i64; 2])> = record.aligned_block_pairs().collect();
liftover_exact(&aligned_block_pairs, query_positions, false)
}
}
pub fn lift_query_positions_exact(
record: &bam::Record,
reference_positions: &[i64],
) -> Vec<Option<i64>> {
if record.is_unmapped() {
reference_positions.iter().map(|_x| None).collect()
} else {
let aligned_block_pairs: Vec<([i64; 2], [i64; 2])> = record.aligned_block_pairs().collect();
liftover_exact(&aligned_block_pairs, reference_positions, true)
}
}
| {
// reverse positions if needed
let positions: Vec<i64> = if record.is_reverse() {
let seq_len = i64::try_from(record.seq_len()).unwrap();
input_positions
.iter()
.rev()
.map(|p| seq_len - p - 1)
.collect()
} else {
input_positions.to_vec()
};
positions
} | identifier_body |
chown.rs | // This file is part of the uutils coreutils package.
//
// (c) Jian Zeng <[email protected]>
//
// For the full copyright and license information, please view the LICENSE
// file that was distributed with this source code.
// spell-checker:ignore (ToDO) COMFOLLOW Chowner Passwd RFILE RFILE's derefer dgid duid
#[macro_use]
extern crate uucore;
pub use uucore::entries::{self, Group, Locate, Passwd};
use uucore::fs::resolve_relative_path;
use uucore::libc::{gid_t, uid_t};
use uucore::perms::{wrap_chown, Verbosity};
use uucore::error::{FromIo, UResult, USimpleError};
use clap::{crate_version, App, Arg};
use walkdir::WalkDir;
use std::fs::{self, Metadata};
use std::os::unix::fs::MetadataExt;
use std::convert::AsRef;
use std::path::Path;
use uucore::InvalidEncodingHandling;
static ABOUT: &str = "change file owner and group";
pub mod options {
pub mod verbosity {
pub static CHANGES: &str = "changes";
pub static QUIET: &str = "quiet";
pub static SILENT: &str = "silent";
pub static VERBOSE: &str = "verbose";
}
pub mod preserve_root {
pub static PRESERVE: &str = "preserve-root";
pub static NO_PRESERVE: &str = "no-preserve-root";
}
pub mod dereference {
pub static DEREFERENCE: &str = "dereference";
pub static NO_DEREFERENCE: &str = "no-dereference";
}
pub static FROM: &str = "from";
pub static RECURSIVE: &str = "recursive";
pub mod traverse {
pub static TRAVERSE: &str = "H";
pub static NO_TRAVERSE: &str = "P";
pub static EVERY: &str = "L";
}
pub static REFERENCE: &str = "reference";
}
static ARG_OWNER: &str = "owner";
static ARG_FILES: &str = "files";
const FTS_COMFOLLOW: u8 = 1;
const FTS_PHYSICAL: u8 = 1 << 1;
const FTS_LOGICAL: u8 = 1 << 2;
fn get_usage() -> String {
format!(
"{0} [OPTION]... [OWNER][:[GROUP]] FILE...\n{0} [OPTION]... --reference=RFILE FILE...",
executable!()
)
}
#[uucore_procs::gen_uumain]
pub fn uumain(args: impl uucore::Args) -> UResult<()> {
let args = args
.collect_str(InvalidEncodingHandling::Ignore)
.accept_any();
let usage = get_usage();
let matches = uu_app().usage(&usage[..]).get_matches_from(args);
/* First arg is the owner/group */
let owner = matches.value_of(ARG_OWNER).unwrap();
/* Then the list of files */
let files: Vec<String> = matches
.values_of(ARG_FILES)
.map(|v| v.map(ToString::to_string).collect())
.unwrap_or_default();
let preserve_root = matches.is_present(options::preserve_root::PRESERVE);
let mut derefer = if matches.is_present(options::dereference::NO_DEREFERENCE) {
1
} else {
0
};
let mut bit_flag = if matches.is_present(options::traverse::TRAVERSE) {
FTS_COMFOLLOW | FTS_PHYSICAL
} else if matches.is_present(options::traverse::EVERY) {
FTS_LOGICAL
} else {
FTS_PHYSICAL
};
let recursive = matches.is_present(options::RECURSIVE);
if recursive {
if bit_flag == FTS_PHYSICAL {
if derefer == 1 {
return Err(USimpleError::new(1, "-R --dereference requires -H or -L"));
}
derefer = 0;
}
} else {
bit_flag = FTS_PHYSICAL;
}
let verbosity = if matches.is_present(options::verbosity::CHANGES) {
Verbosity::Changes
} else if matches.is_present(options::verbosity::SILENT)
|| matches.is_present(options::verbosity::QUIET)
{
Verbosity::Silent
} else if matches.is_present(options::verbosity::VERBOSE) {
Verbosity::Verbose
} else {
Verbosity::Normal
};
let filter = if let Some(spec) = matches.value_of(options::FROM) {
match parse_spec(spec)? {
(Some(uid), None) => IfFrom::User(uid),
(None, Some(gid)) => IfFrom::Group(gid),
(Some(uid), Some(gid)) => IfFrom::UserGroup(uid, gid),
(None, None) => IfFrom::All,
}
} else {
IfFrom::All
};
let dest_uid: Option<u32>;
let dest_gid: Option<u32>;
if let Some(file) = matches.value_of(options::REFERENCE) {
let meta = fs::metadata(&file)
.map_err_context(|| format!("failed to get attributes of '{}'", file))?;
dest_gid = Some(meta.gid());
dest_uid = Some(meta.uid());
} else {
let (u, g) = parse_spec(owner)?;
dest_uid = u;
dest_gid = g;
}
let executor = Chowner {
bit_flag,
dest_uid,
dest_gid,
verbosity,
recursive,
dereference: derefer!= 0,
filter,
preserve_root,
files,
};
executor.exec()
}
pub fn uu_app() -> App<'static,'static> {
App::new(executable!())
.version(crate_version!())
.about(ABOUT)
.arg(
Arg::with_name(options::verbosity::CHANGES)
.short("c")
.long(options::verbosity::CHANGES)
.help("like verbose but report only when a change is made"),
)
.arg(Arg::with_name(options::dereference::DEREFERENCE).long(options::dereference::DEREFERENCE).help(
"affect the referent of each symbolic link (this is the default), rather than the symbolic link itself",
))
.arg(
Arg::with_name(options::dereference::NO_DEREFERENCE)
.short("h")
.long(options::dereference::NO_DEREFERENCE)
.help(
"affect symbolic links instead of any referenced file (useful only on systems that can change the ownership of a symlink)",
),
)
.arg(
Arg::with_name(options::FROM)
.long(options::FROM)
.help(
"change the owner and/or group of each file only if its current owner and/or group match those specified here. Either may be omitted, in which case a match is not required for the omitted attribute",
)
.value_name("CURRENT_OWNER:CURRENT_GROUP"),
)
.arg(
Arg::with_name(options::preserve_root::PRESERVE)
.long(options::preserve_root::PRESERVE)
.help("fail to operate recursively on '/'"),
)
.arg(
Arg::with_name(options::preserve_root::NO_PRESERVE)
.long(options::preserve_root::NO_PRESERVE)
.help("do not treat '/' specially (the default)"),
)
.arg(
Arg::with_name(options::verbosity::QUIET)
.long(options::verbosity::QUIET)
.help("suppress most error messages"),
)
.arg(
Arg::with_name(options::RECURSIVE)
.short("R")
.long(options::RECURSIVE)
.help("operate on files and directories recursively"),
)
.arg(
Arg::with_name(options::REFERENCE)
.long(options::REFERENCE)
.help("use RFILE's owner and group rather than specifying OWNER:GROUP values")
.value_name("RFILE")
.min_values(1),
)
.arg(Arg::with_name(options::verbosity::SILENT).short("f").long(options::verbosity::SILENT))
.arg(
Arg::with_name(options::traverse::TRAVERSE)
.short(options::traverse::TRAVERSE)
.help("if a command line argument is a symbolic link to a directory, traverse it")
.overrides_with_all(&[options::traverse::EVERY, options::traverse::NO_TRAVERSE]),
)
.arg(
Arg::with_name(options::traverse::EVERY)
.short(options::traverse::EVERY)
.help("traverse every symbolic link to a directory encountered")
.overrides_with_all(&[options::traverse::TRAVERSE, options::traverse::NO_TRAVERSE]),
)
.arg(
Arg::with_name(options::traverse::NO_TRAVERSE)
.short(options::traverse::NO_TRAVERSE)
.help("do not traverse any symbolic links (default)")
.overrides_with_all(&[options::traverse::TRAVERSE, options::traverse::EVERY]),
)
.arg(
Arg::with_name(options::verbosity::VERBOSE)
.long(options::verbosity::VERBOSE)
.help("output a diagnostic for every file processed"),
)
.arg(
Arg::with_name(ARG_OWNER)
.multiple(false)
.takes_value(true)
.required(true),
)
.arg(
Arg::with_name(ARG_FILES)
.multiple(true)
.takes_value(true)
.required(true)
.min_values(1),
)
}
fn | (spec: &str) -> UResult<(Option<u32>, Option<u32>)> {
let args = spec.split_terminator(':').collect::<Vec<_>>();
let usr_only = args.len() == 1 &&!args[0].is_empty();
let grp_only = args.len() == 2 && args[0].is_empty();
let usr_grp = args.len() == 2 &&!args[0].is_empty() &&!args[1].is_empty();
let uid = if usr_only || usr_grp {
Some(
Passwd::locate(args[0])
.map_err(|_| USimpleError::new(1, format!("invalid user: '{}'", spec)))?
.uid(),
)
} else {
None
};
let gid = if grp_only || usr_grp {
Some(
Group::locate(args[1])
.map_err(|_| USimpleError::new(1, format!("invalid group: '{}'", spec)))?
.gid(),
)
} else {
None
};
Ok((uid, gid))
}
enum IfFrom {
All,
User(u32),
Group(u32),
UserGroup(u32, u32),
}
struct Chowner {
dest_uid: Option<u32>,
dest_gid: Option<u32>,
bit_flag: u8,
verbosity: Verbosity,
filter: IfFrom,
files: Vec<String>,
recursive: bool,
preserve_root: bool,
dereference: bool,
}
macro_rules! unwrap {
($m:expr, $e:ident, $err:block) => {
match $m {
Ok(meta) => meta,
Err($e) => $err,
}
};
}
impl Chowner {
fn exec(&self) -> UResult<()> {
let mut ret = 0;
for f in &self.files {
ret |= self.traverse(f);
}
if ret!= 0 {
return Err(ret.into());
}
Ok(())
}
fn traverse<P: AsRef<Path>>(&self, root: P) -> i32 {
let follow_arg = self.dereference || self.bit_flag!= FTS_PHYSICAL;
let path = root.as_ref();
let meta = match self.obtain_meta(path, follow_arg) {
Some(m) => m,
_ => return 1,
};
// Prohibit only if:
// (--preserve-root and -R present) &&
// (
// (argument is not symlink && resolved to be '/') ||
// (argument is symlink && should follow argument && resolved to be '/')
// )
if self.recursive && self.preserve_root {
let may_exist = if follow_arg {
path.canonicalize().ok()
} else {
let real = resolve_relative_path(path);
if real.is_dir() {
Some(real.canonicalize().expect("failed to get real path"))
} else {
Some(real.into_owned())
}
};
if let Some(p) = may_exist {
if p.parent().is_none() {
show_error!("it is dangerous to operate recursively on '/'");
show_error!("use --no-preserve-root to override this failsafe");
return 1;
}
}
}
let ret = if self.matched(meta.uid(), meta.gid()) {
match wrap_chown(
path,
&meta,
self.dest_uid,
self.dest_gid,
follow_arg,
self.verbosity.clone(),
) {
Ok(n) => {
if!n.is_empty() {
show_error!("{}", n);
}
0
}
Err(e) => {
if self.verbosity!= Verbosity::Silent {
show_error!("{}", e);
}
1
}
}
} else {
0
};
if!self.recursive {
ret
} else {
ret | self.dive_into(&root)
}
}
fn dive_into<P: AsRef<Path>>(&self, root: P) -> i32 {
let mut ret = 0;
let root = root.as_ref();
let follow = self.dereference || self.bit_flag & FTS_LOGICAL!= 0;
for entry in WalkDir::new(root).follow_links(follow).min_depth(1) {
let entry = unwrap!(entry, e, {
ret = 1;
show_error!("{}", e);
continue;
});
let path = entry.path();
let meta = match self.obtain_meta(path, follow) {
Some(m) => m,
_ => {
ret = 1;
continue;
}
};
if!self.matched(meta.uid(), meta.gid()) {
continue;
}
ret = match wrap_chown(
path,
&meta,
self.dest_uid,
self.dest_gid,
follow,
self.verbosity.clone(),
) {
Ok(n) => {
if!n.is_empty() {
show_error!("{}", n);
}
0
}
Err(e) => {
if self.verbosity!= Verbosity::Silent {
show_error!("{}", e);
}
1
}
}
}
ret
}
fn obtain_meta<P: AsRef<Path>>(&self, path: P, follow: bool) -> Option<Metadata> {
use self::Verbosity::*;
let path = path.as_ref();
let meta = if follow {
unwrap!(path.metadata(), e, {
match self.verbosity {
Silent => (),
_ => show_error!("cannot access '{}': {}", path.display(), e),
}
return None;
})
} else {
unwrap!(path.symlink_metadata(), e, {
match self.verbosity {
Silent => (),
_ => show_error!("cannot dereference '{}': {}", path.display(), e),
}
return None;
})
};
Some(meta)
}
#[inline]
fn matched(&self, uid: uid_t, gid: gid_t) -> bool {
match self.filter {
IfFrom::All => true,
IfFrom::User(u) => u == uid,
IfFrom::Group(g) => g == gid,
IfFrom::UserGroup(u, g) => u == uid && g == gid,
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_parse_spec() {
assert!(matches!(parse_spec(":"), Ok((None, None))));
assert!(format!("{}", parse_spec("::").err().unwrap()).starts_with("invalid group: "));
}
}
| parse_spec | identifier_name |
chown.rs | // This file is part of the uutils coreutils package.
//
// (c) Jian Zeng <[email protected]>
//
// For the full copyright and license information, please view the LICENSE
// file that was distributed with this source code.
// spell-checker:ignore (ToDO) COMFOLLOW Chowner Passwd RFILE RFILE's derefer dgid duid
#[macro_use]
extern crate uucore;
pub use uucore::entries::{self, Group, Locate, Passwd};
use uucore::fs::resolve_relative_path;
use uucore::libc::{gid_t, uid_t};
use uucore::perms::{wrap_chown, Verbosity};
use uucore::error::{FromIo, UResult, USimpleError};
use clap::{crate_version, App, Arg};
use walkdir::WalkDir;
use std::fs::{self, Metadata};
use std::os::unix::fs::MetadataExt;
use std::convert::AsRef;
use std::path::Path;
use uucore::InvalidEncodingHandling;
static ABOUT: &str = "change file owner and group";
pub mod options {
pub mod verbosity {
pub static CHANGES: &str = "changes";
pub static QUIET: &str = "quiet";
pub static SILENT: &str = "silent";
pub static VERBOSE: &str = "verbose";
}
pub mod preserve_root {
pub static PRESERVE: &str = "preserve-root";
pub static NO_PRESERVE: &str = "no-preserve-root";
}
pub mod dereference {
pub static DEREFERENCE: &str = "dereference";
pub static NO_DEREFERENCE: &str = "no-dereference";
}
pub static FROM: &str = "from";
pub static RECURSIVE: &str = "recursive";
pub mod traverse {
pub static TRAVERSE: &str = "H";
pub static NO_TRAVERSE: &str = "P";
pub static EVERY: &str = "L";
}
pub static REFERENCE: &str = "reference";
}
static ARG_OWNER: &str = "owner";
static ARG_FILES: &str = "files";
const FTS_COMFOLLOW: u8 = 1;
const FTS_PHYSICAL: u8 = 1 << 1;
const FTS_LOGICAL: u8 = 1 << 2;
fn get_usage() -> String {
format!(
"{0} [OPTION]... [OWNER][:[GROUP]] FILE...\n{0} [OPTION]... --reference=RFILE FILE...",
executable!()
)
}
#[uucore_procs::gen_uumain]
pub fn uumain(args: impl uucore::Args) -> UResult<()> {
let args = args
.collect_str(InvalidEncodingHandling::Ignore)
.accept_any();
let usage = get_usage();
let matches = uu_app().usage(&usage[..]).get_matches_from(args);
/* First arg is the owner/group */
let owner = matches.value_of(ARG_OWNER).unwrap();
/* Then the list of files */
let files: Vec<String> = matches
.values_of(ARG_FILES)
.map(|v| v.map(ToString::to_string).collect())
.unwrap_or_default();
let preserve_root = matches.is_present(options::preserve_root::PRESERVE);
let mut derefer = if matches.is_present(options::dereference::NO_DEREFERENCE) {
1
} else {
0
};
let mut bit_flag = if matches.is_present(options::traverse::TRAVERSE) {
FTS_COMFOLLOW | FTS_PHYSICAL
} else if matches.is_present(options::traverse::EVERY) {
FTS_LOGICAL
} else {
FTS_PHYSICAL
};
let recursive = matches.is_present(options::RECURSIVE);
if recursive {
if bit_flag == FTS_PHYSICAL {
if derefer == 1 {
return Err(USimpleError::new(1, "-R --dereference requires -H or -L"));
}
derefer = 0;
}
} else {
bit_flag = FTS_PHYSICAL;
}
let verbosity = if matches.is_present(options::verbosity::CHANGES) {
Verbosity::Changes
} else if matches.is_present(options::verbosity::SILENT)
|| matches.is_present(options::verbosity::QUIET)
{
Verbosity::Silent
} else if matches.is_present(options::verbosity::VERBOSE) {
Verbosity::Verbose
} else {
Verbosity::Normal
};
let filter = if let Some(spec) = matches.value_of(options::FROM) {
match parse_spec(spec)? {
(Some(uid), None) => IfFrom::User(uid),
(None, Some(gid)) => IfFrom::Group(gid),
(Some(uid), Some(gid)) => IfFrom::UserGroup(uid, gid),
(None, None) => IfFrom::All,
}
} else {
IfFrom::All
};
let dest_uid: Option<u32>;
let dest_gid: Option<u32>;
if let Some(file) = matches.value_of(options::REFERENCE) {
let meta = fs::metadata(&file)
.map_err_context(|| format!("failed to get attributes of '{}'", file))?;
dest_gid = Some(meta.gid());
dest_uid = Some(meta.uid());
} else {
let (u, g) = parse_spec(owner)?;
dest_uid = u;
dest_gid = g;
}
let executor = Chowner {
bit_flag,
dest_uid,
dest_gid,
verbosity,
recursive,
dereference: derefer!= 0,
filter,
preserve_root,
files,
};
executor.exec()
}
pub fn uu_app() -> App<'static,'static> {
App::new(executable!())
.version(crate_version!())
.about(ABOUT)
.arg(
Arg::with_name(options::verbosity::CHANGES)
.short("c")
.long(options::verbosity::CHANGES)
.help("like verbose but report only when a change is made"),
)
.arg(Arg::with_name(options::dereference::DEREFERENCE).long(options::dereference::DEREFERENCE).help(
"affect the referent of each symbolic link (this is the default), rather than the symbolic link itself",
))
.arg(
Arg::with_name(options::dereference::NO_DEREFERENCE)
.short("h")
.long(options::dereference::NO_DEREFERENCE)
.help(
"affect symbolic links instead of any referenced file (useful only on systems that can change the ownership of a symlink)",
),
)
.arg(
Arg::with_name(options::FROM)
.long(options::FROM)
.help(
"change the owner and/or group of each file only if its current owner and/or group match those specified here. Either may be omitted, in which case a match is not required for the omitted attribute",
)
.value_name("CURRENT_OWNER:CURRENT_GROUP"),
)
.arg(
Arg::with_name(options::preserve_root::PRESERVE)
.long(options::preserve_root::PRESERVE)
.help("fail to operate recursively on '/'"),
)
.arg(
Arg::with_name(options::preserve_root::NO_PRESERVE)
.long(options::preserve_root::NO_PRESERVE)
.help("do not treat '/' specially (the default)"),
)
.arg(
Arg::with_name(options::verbosity::QUIET)
.long(options::verbosity::QUIET)
.help("suppress most error messages"),
)
.arg(
Arg::with_name(options::RECURSIVE)
.short("R")
.long(options::RECURSIVE)
.help("operate on files and directories recursively"),
)
.arg(
Arg::with_name(options::REFERENCE)
.long(options::REFERENCE)
.help("use RFILE's owner and group rather than specifying OWNER:GROUP values")
.value_name("RFILE")
.min_values(1),
)
.arg(Arg::with_name(options::verbosity::SILENT).short("f").long(options::verbosity::SILENT))
.arg(
Arg::with_name(options::traverse::TRAVERSE)
.short(options::traverse::TRAVERSE)
.help("if a command line argument is a symbolic link to a directory, traverse it")
.overrides_with_all(&[options::traverse::EVERY, options::traverse::NO_TRAVERSE]),
) | )
.arg(
Arg::with_name(options::traverse::NO_TRAVERSE)
.short(options::traverse::NO_TRAVERSE)
.help("do not traverse any symbolic links (default)")
.overrides_with_all(&[options::traverse::TRAVERSE, options::traverse::EVERY]),
)
.arg(
Arg::with_name(options::verbosity::VERBOSE)
.long(options::verbosity::VERBOSE)
.help("output a diagnostic for every file processed"),
)
.arg(
Arg::with_name(ARG_OWNER)
.multiple(false)
.takes_value(true)
.required(true),
)
.arg(
Arg::with_name(ARG_FILES)
.multiple(true)
.takes_value(true)
.required(true)
.min_values(1),
)
}
fn parse_spec(spec: &str) -> UResult<(Option<u32>, Option<u32>)> {
let args = spec.split_terminator(':').collect::<Vec<_>>();
let usr_only = args.len() == 1 &&!args[0].is_empty();
let grp_only = args.len() == 2 && args[0].is_empty();
let usr_grp = args.len() == 2 &&!args[0].is_empty() &&!args[1].is_empty();
let uid = if usr_only || usr_grp {
Some(
Passwd::locate(args[0])
.map_err(|_| USimpleError::new(1, format!("invalid user: '{}'", spec)))?
.uid(),
)
} else {
None
};
let gid = if grp_only || usr_grp {
Some(
Group::locate(args[1])
.map_err(|_| USimpleError::new(1, format!("invalid group: '{}'", spec)))?
.gid(),
)
} else {
None
};
Ok((uid, gid))
}
enum IfFrom {
All,
User(u32),
Group(u32),
UserGroup(u32, u32),
}
struct Chowner {
dest_uid: Option<u32>,
dest_gid: Option<u32>,
bit_flag: u8,
verbosity: Verbosity,
filter: IfFrom,
files: Vec<String>,
recursive: bool,
preserve_root: bool,
dereference: bool,
}
macro_rules! unwrap {
($m:expr, $e:ident, $err:block) => {
match $m {
Ok(meta) => meta,
Err($e) => $err,
}
};
}
impl Chowner {
fn exec(&self) -> UResult<()> {
let mut ret = 0;
for f in &self.files {
ret |= self.traverse(f);
}
if ret!= 0 {
return Err(ret.into());
}
Ok(())
}
fn traverse<P: AsRef<Path>>(&self, root: P) -> i32 {
let follow_arg = self.dereference || self.bit_flag!= FTS_PHYSICAL;
let path = root.as_ref();
let meta = match self.obtain_meta(path, follow_arg) {
Some(m) => m,
_ => return 1,
};
// Prohibit only if:
// (--preserve-root and -R present) &&
// (
// (argument is not symlink && resolved to be '/') ||
// (argument is symlink && should follow argument && resolved to be '/')
// )
if self.recursive && self.preserve_root {
let may_exist = if follow_arg {
path.canonicalize().ok()
} else {
let real = resolve_relative_path(path);
if real.is_dir() {
Some(real.canonicalize().expect("failed to get real path"))
} else {
Some(real.into_owned())
}
};
if let Some(p) = may_exist {
if p.parent().is_none() {
show_error!("it is dangerous to operate recursively on '/'");
show_error!("use --no-preserve-root to override this failsafe");
return 1;
}
}
}
let ret = if self.matched(meta.uid(), meta.gid()) {
match wrap_chown(
path,
&meta,
self.dest_uid,
self.dest_gid,
follow_arg,
self.verbosity.clone(),
) {
Ok(n) => {
if!n.is_empty() {
show_error!("{}", n);
}
0
}
Err(e) => {
if self.verbosity!= Verbosity::Silent {
show_error!("{}", e);
}
1
}
}
} else {
0
};
if!self.recursive {
ret
} else {
ret | self.dive_into(&root)
}
}
fn dive_into<P: AsRef<Path>>(&self, root: P) -> i32 {
let mut ret = 0;
let root = root.as_ref();
let follow = self.dereference || self.bit_flag & FTS_LOGICAL!= 0;
for entry in WalkDir::new(root).follow_links(follow).min_depth(1) {
let entry = unwrap!(entry, e, {
ret = 1;
show_error!("{}", e);
continue;
});
let path = entry.path();
let meta = match self.obtain_meta(path, follow) {
Some(m) => m,
_ => {
ret = 1;
continue;
}
};
if!self.matched(meta.uid(), meta.gid()) {
continue;
}
ret = match wrap_chown(
path,
&meta,
self.dest_uid,
self.dest_gid,
follow,
self.verbosity.clone(),
) {
Ok(n) => {
if!n.is_empty() {
show_error!("{}", n);
}
0
}
Err(e) => {
if self.verbosity!= Verbosity::Silent {
show_error!("{}", e);
}
1
}
}
}
ret
}
fn obtain_meta<P: AsRef<Path>>(&self, path: P, follow: bool) -> Option<Metadata> {
use self::Verbosity::*;
let path = path.as_ref();
let meta = if follow {
unwrap!(path.metadata(), e, {
match self.verbosity {
Silent => (),
_ => show_error!("cannot access '{}': {}", path.display(), e),
}
return None;
})
} else {
unwrap!(path.symlink_metadata(), e, {
match self.verbosity {
Silent => (),
_ => show_error!("cannot dereference '{}': {}", path.display(), e),
}
return None;
})
};
Some(meta)
}
#[inline]
fn matched(&self, uid: uid_t, gid: gid_t) -> bool {
match self.filter {
IfFrom::All => true,
IfFrom::User(u) => u == uid,
IfFrom::Group(g) => g == gid,
IfFrom::UserGroup(u, g) => u == uid && g == gid,
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_parse_spec() {
assert!(matches!(parse_spec(":"), Ok((None, None))));
assert!(format!("{}", parse_spec("::").err().unwrap()).starts_with("invalid group: "));
}
} | .arg(
Arg::with_name(options::traverse::EVERY)
.short(options::traverse::EVERY)
.help("traverse every symbolic link to a directory encountered")
.overrides_with_all(&[options::traverse::TRAVERSE, options::traverse::NO_TRAVERSE]), | random_line_split |
chown.rs | // This file is part of the uutils coreutils package.
//
// (c) Jian Zeng <[email protected]>
//
// For the full copyright and license information, please view the LICENSE
// file that was distributed with this source code.
// spell-checker:ignore (ToDO) COMFOLLOW Chowner Passwd RFILE RFILE's derefer dgid duid
#[macro_use]
extern crate uucore;
pub use uucore::entries::{self, Group, Locate, Passwd};
use uucore::fs::resolve_relative_path;
use uucore::libc::{gid_t, uid_t};
use uucore::perms::{wrap_chown, Verbosity};
use uucore::error::{FromIo, UResult, USimpleError};
use clap::{crate_version, App, Arg};
use walkdir::WalkDir;
use std::fs::{self, Metadata};
use std::os::unix::fs::MetadataExt;
use std::convert::AsRef;
use std::path::Path;
use uucore::InvalidEncodingHandling;
static ABOUT: &str = "change file owner and group";
pub mod options {
pub mod verbosity {
pub static CHANGES: &str = "changes";
pub static QUIET: &str = "quiet";
pub static SILENT: &str = "silent";
pub static VERBOSE: &str = "verbose";
}
pub mod preserve_root {
pub static PRESERVE: &str = "preserve-root";
pub static NO_PRESERVE: &str = "no-preserve-root";
}
pub mod dereference {
pub static DEREFERENCE: &str = "dereference";
pub static NO_DEREFERENCE: &str = "no-dereference";
}
pub static FROM: &str = "from";
pub static RECURSIVE: &str = "recursive";
pub mod traverse {
pub static TRAVERSE: &str = "H";
pub static NO_TRAVERSE: &str = "P";
pub static EVERY: &str = "L";
}
pub static REFERENCE: &str = "reference";
}
static ARG_OWNER: &str = "owner";
static ARG_FILES: &str = "files";
const FTS_COMFOLLOW: u8 = 1;
const FTS_PHYSICAL: u8 = 1 << 1;
const FTS_LOGICAL: u8 = 1 << 2;
fn get_usage() -> String {
format!(
"{0} [OPTION]... [OWNER][:[GROUP]] FILE...\n{0} [OPTION]... --reference=RFILE FILE...",
executable!()
)
}
#[uucore_procs::gen_uumain]
pub fn uumain(args: impl uucore::Args) -> UResult<()> {
let args = args
.collect_str(InvalidEncodingHandling::Ignore)
.accept_any();
let usage = get_usage();
let matches = uu_app().usage(&usage[..]).get_matches_from(args);
/* First arg is the owner/group */
let owner = matches.value_of(ARG_OWNER).unwrap();
/* Then the list of files */
let files: Vec<String> = matches
.values_of(ARG_FILES)
.map(|v| v.map(ToString::to_string).collect())
.unwrap_or_default();
let preserve_root = matches.is_present(options::preserve_root::PRESERVE);
let mut derefer = if matches.is_present(options::dereference::NO_DEREFERENCE) {
1
} else {
0
};
let mut bit_flag = if matches.is_present(options::traverse::TRAVERSE) {
FTS_COMFOLLOW | FTS_PHYSICAL
} else if matches.is_present(options::traverse::EVERY) {
FTS_LOGICAL
} else {
FTS_PHYSICAL
};
let recursive = matches.is_present(options::RECURSIVE);
if recursive {
if bit_flag == FTS_PHYSICAL {
if derefer == 1 {
return Err(USimpleError::new(1, "-R --dereference requires -H or -L"));
}
derefer = 0;
}
} else {
bit_flag = FTS_PHYSICAL;
}
let verbosity = if matches.is_present(options::verbosity::CHANGES) {
Verbosity::Changes
} else if matches.is_present(options::verbosity::SILENT)
|| matches.is_present(options::verbosity::QUIET)
{
Verbosity::Silent
} else if matches.is_present(options::verbosity::VERBOSE) {
Verbosity::Verbose
} else {
Verbosity::Normal
};
let filter = if let Some(spec) = matches.value_of(options::FROM) {
match parse_spec(spec)? {
(Some(uid), None) => IfFrom::User(uid),
(None, Some(gid)) => IfFrom::Group(gid),
(Some(uid), Some(gid)) => IfFrom::UserGroup(uid, gid),
(None, None) => IfFrom::All,
}
} else {
IfFrom::All
};
let dest_uid: Option<u32>;
let dest_gid: Option<u32>;
if let Some(file) = matches.value_of(options::REFERENCE) {
let meta = fs::metadata(&file)
.map_err_context(|| format!("failed to get attributes of '{}'", file))?;
dest_gid = Some(meta.gid());
dest_uid = Some(meta.uid());
} else {
let (u, g) = parse_spec(owner)?;
dest_uid = u;
dest_gid = g;
}
let executor = Chowner {
bit_flag,
dest_uid,
dest_gid,
verbosity,
recursive,
dereference: derefer!= 0,
filter,
preserve_root,
files,
};
executor.exec()
}
pub fn uu_app() -> App<'static,'static> {
App::new(executable!())
.version(crate_version!())
.about(ABOUT)
.arg(
Arg::with_name(options::verbosity::CHANGES)
.short("c")
.long(options::verbosity::CHANGES)
.help("like verbose but report only when a change is made"),
)
.arg(Arg::with_name(options::dereference::DEREFERENCE).long(options::dereference::DEREFERENCE).help(
"affect the referent of each symbolic link (this is the default), rather than the symbolic link itself",
))
.arg(
Arg::with_name(options::dereference::NO_DEREFERENCE)
.short("h")
.long(options::dereference::NO_DEREFERENCE)
.help(
"affect symbolic links instead of any referenced file (useful only on systems that can change the ownership of a symlink)",
),
)
.arg(
Arg::with_name(options::FROM)
.long(options::FROM)
.help(
"change the owner and/or group of each file only if its current owner and/or group match those specified here. Either may be omitted, in which case a match is not required for the omitted attribute",
)
.value_name("CURRENT_OWNER:CURRENT_GROUP"),
)
.arg(
Arg::with_name(options::preserve_root::PRESERVE)
.long(options::preserve_root::PRESERVE)
.help("fail to operate recursively on '/'"),
)
.arg(
Arg::with_name(options::preserve_root::NO_PRESERVE)
.long(options::preserve_root::NO_PRESERVE)
.help("do not treat '/' specially (the default)"),
)
.arg(
Arg::with_name(options::verbosity::QUIET)
.long(options::verbosity::QUIET)
.help("suppress most error messages"),
)
.arg(
Arg::with_name(options::RECURSIVE)
.short("R")
.long(options::RECURSIVE)
.help("operate on files and directories recursively"),
)
.arg(
Arg::with_name(options::REFERENCE)
.long(options::REFERENCE)
.help("use RFILE's owner and group rather than specifying OWNER:GROUP values")
.value_name("RFILE")
.min_values(1),
)
.arg(Arg::with_name(options::verbosity::SILENT).short("f").long(options::verbosity::SILENT))
.arg(
Arg::with_name(options::traverse::TRAVERSE)
.short(options::traverse::TRAVERSE)
.help("if a command line argument is a symbolic link to a directory, traverse it")
.overrides_with_all(&[options::traverse::EVERY, options::traverse::NO_TRAVERSE]),
)
.arg(
Arg::with_name(options::traverse::EVERY)
.short(options::traverse::EVERY)
.help("traverse every symbolic link to a directory encountered")
.overrides_with_all(&[options::traverse::TRAVERSE, options::traverse::NO_TRAVERSE]),
)
.arg(
Arg::with_name(options::traverse::NO_TRAVERSE)
.short(options::traverse::NO_TRAVERSE)
.help("do not traverse any symbolic links (default)")
.overrides_with_all(&[options::traverse::TRAVERSE, options::traverse::EVERY]),
)
.arg(
Arg::with_name(options::verbosity::VERBOSE)
.long(options::verbosity::VERBOSE)
.help("output a diagnostic for every file processed"),
)
.arg(
Arg::with_name(ARG_OWNER)
.multiple(false)
.takes_value(true)
.required(true),
)
.arg(
Arg::with_name(ARG_FILES)
.multiple(true)
.takes_value(true)
.required(true)
.min_values(1),
)
}
fn parse_spec(spec: &str) -> UResult<(Option<u32>, Option<u32>)> {
let args = spec.split_terminator(':').collect::<Vec<_>>();
let usr_only = args.len() == 1 &&!args[0].is_empty();
let grp_only = args.len() == 2 && args[0].is_empty();
let usr_grp = args.len() == 2 &&!args[0].is_empty() &&!args[1].is_empty();
let uid = if usr_only || usr_grp {
Some(
Passwd::locate(args[0])
.map_err(|_| USimpleError::new(1, format!("invalid user: '{}'", spec)))?
.uid(),
)
} else {
None
};
let gid = if grp_only || usr_grp {
Some(
Group::locate(args[1])
.map_err(|_| USimpleError::new(1, format!("invalid group: '{}'", spec)))?
.gid(),
)
} else {
None
};
Ok((uid, gid))
}
enum IfFrom {
All,
User(u32),
Group(u32),
UserGroup(u32, u32),
}
struct Chowner {
dest_uid: Option<u32>,
dest_gid: Option<u32>,
bit_flag: u8,
verbosity: Verbosity,
filter: IfFrom,
files: Vec<String>,
recursive: bool,
preserve_root: bool,
dereference: bool,
}
macro_rules! unwrap {
($m:expr, $e:ident, $err:block) => {
match $m {
Ok(meta) => meta,
Err($e) => $err,
}
};
}
impl Chowner {
fn exec(&self) -> UResult<()> {
let mut ret = 0;
for f in &self.files {
ret |= self.traverse(f);
}
if ret!= 0 {
return Err(ret.into());
}
Ok(())
}
fn traverse<P: AsRef<Path>>(&self, root: P) -> i32 {
let follow_arg = self.dereference || self.bit_flag!= FTS_PHYSICAL;
let path = root.as_ref();
let meta = match self.obtain_meta(path, follow_arg) {
Some(m) => m,
_ => return 1,
};
// Prohibit only if:
// (--preserve-root and -R present) &&
// (
// (argument is not symlink && resolved to be '/') ||
// (argument is symlink && should follow argument && resolved to be '/')
// )
if self.recursive && self.preserve_root {
let may_exist = if follow_arg {
path.canonicalize().ok()
} else {
let real = resolve_relative_path(path);
if real.is_dir() {
Some(real.canonicalize().expect("failed to get real path"))
} else {
Some(real.into_owned())
}
};
if let Some(p) = may_exist {
if p.parent().is_none() {
show_error!("it is dangerous to operate recursively on '/'");
show_error!("use --no-preserve-root to override this failsafe");
return 1;
}
}
}
let ret = if self.matched(meta.uid(), meta.gid()) {
match wrap_chown(
path,
&meta,
self.dest_uid,
self.dest_gid,
follow_arg,
self.verbosity.clone(),
) {
Ok(n) => {
if!n.is_empty() {
show_error!("{}", n);
}
0
}
Err(e) => {
if self.verbosity!= Verbosity::Silent {
show_error!("{}", e);
}
1
}
}
} else {
0
};
if!self.recursive {
ret
} else {
ret | self.dive_into(&root)
}
}
fn dive_into<P: AsRef<Path>>(&self, root: P) -> i32 {
let mut ret = 0;
let root = root.as_ref();
let follow = self.dereference || self.bit_flag & FTS_LOGICAL!= 0;
for entry in WalkDir::new(root).follow_links(follow).min_depth(1) {
let entry = unwrap!(entry, e, {
ret = 1;
show_error!("{}", e);
continue;
});
let path = entry.path();
let meta = match self.obtain_meta(path, follow) {
Some(m) => m,
_ => {
ret = 1;
continue;
}
};
if!self.matched(meta.uid(), meta.gid()) {
continue;
}
ret = match wrap_chown(
path,
&meta,
self.dest_uid,
self.dest_gid,
follow,
self.verbosity.clone(),
) {
Ok(n) => |
Err(e) => {
if self.verbosity!= Verbosity::Silent {
show_error!("{}", e);
}
1
}
}
}
ret
}
fn obtain_meta<P: AsRef<Path>>(&self, path: P, follow: bool) -> Option<Metadata> {
use self::Verbosity::*;
let path = path.as_ref();
let meta = if follow {
unwrap!(path.metadata(), e, {
match self.verbosity {
Silent => (),
_ => show_error!("cannot access '{}': {}", path.display(), e),
}
return None;
})
} else {
unwrap!(path.symlink_metadata(), e, {
match self.verbosity {
Silent => (),
_ => show_error!("cannot dereference '{}': {}", path.display(), e),
}
return None;
})
};
Some(meta)
}
#[inline]
fn matched(&self, uid: uid_t, gid: gid_t) -> bool {
match self.filter {
IfFrom::All => true,
IfFrom::User(u) => u == uid,
IfFrom::Group(g) => g == gid,
IfFrom::UserGroup(u, g) => u == uid && g == gid,
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_parse_spec() {
assert!(matches!(parse_spec(":"), Ok((None, None))));
assert!(format!("{}", parse_spec("::").err().unwrap()).starts_with("invalid group: "));
}
}
| {
if !n.is_empty() {
show_error!("{}", n);
}
0
} | conditional_block |
bls12_377_scalar.rs | //! This module implements field arithmetic for BLS12-377's scalar field.
use std::cmp::Ordering::Less;
use std::convert::TryInto;
use std::ops::{Add, Div, Mul, Neg, Sub};
use rand::Rng;
use unroll::unroll_for_loops;
use crate::{add_no_overflow, cmp, Field, sub, field_to_biguint, rand_range, rand_range_from_rng};
use crate::nonzero_multiplicative_inverse;
use std::cmp::Ordering;
use std::fmt;
use std::fmt::{Display, Formatter};
/// An element of the BLS12 group's scalar field.
#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug, Default)]
pub struct Bls12377Scalar {
/// Montgomery representation, encoded with little-endian u64 limbs.
pub limbs: [u64; 4],
}
impl Bls12377Scalar {
/// The order of the field:
/// 8444461749428370424248824938781546531375899335154063827935233455917409239041
pub const ORDER: [u64; 4] = [725501752471715841, 6461107452199829505, 6968279316240510977, 1345280370688173398];
/// R in the context of the Montgomery reduction, i.e. 2^256 % |F|.
pub(crate) const R: [u64; 4] =
[9015221291577245683, 8239323489949974514, 1646089257421115374, 958099254763297437];
/// R^2 in the context of the Montgomery reduction, i.e. 2^256^2 % |F|.
pub(crate) const R2: [u64; 4] =
[2726216793283724667, 14712177743343147295, 12091039717619697043, 81024008013859129];
/// R^3 in the context of the Montgomery reduction, i.e. 2^256^3 % |F|.
pub(crate) const R3: [u64; 4] =
[7656847007262524748, 7083357369969088153, 12818756329091487507, 432872940405820890];
/// In the context of Montgomery multiplication, µ = -|F|^-1 mod 2^64.
const MU: u64 = 725501752471715839;
pub fn from_canonical(c: [u64; 4]) -> Self {
// We compute M(c, R^2) = c * R^2 * R^-1 = c * R.
Self { limbs: Self::montgomery_multiply(c, Self::R2) }
}
pub fn to_canonical(&self) -> [u64; 4] {
// Let x * R = self. We compute M(x * R, 1) = x * R * R^-1 = x.
Self::montgomery_multiply(self.limbs, [1, 0, 0, 0])
}
#[unroll_for_loops]
fn montgomery_multiply(a: [u64; 4], b: [u64; 4]) -> [u64; 4] {
// Interleaved Montgomery multiplication, as described in Algorithm 2 of
// https://eprint.iacr.org/2017/1057.pdf
// Note that in the loop below, to avoid explicitly shifting c, we will treat i as the least
// significant digit and wrap around.
let mut c = [0u64; 5];
for i in 0..4 {
// Add a[i] b to c.
let mut carry = 0;
for j in 0..4 {
let result = c[(i + j) % 5] as u128 + a[i] as u128 * b[j] as u128 + carry as u128;
c[(i + j) % 5] = result as u64;
carry = (result >> 64) as u64;
}
c[(i + 4) % 5] += carry;
// q = u c mod r = u c[0] mod r.
let q = Self::MU.wrapping_mul(c[i]);
// C += N q
carry = 0;
for j in 0..4 {
let result = c[(i + j) % 5] as u128 + q as u128 * Self::ORDER[j] as u128 + carry as u128;
c[(i + j) % 5] = result as u64;
carry = (result >> 64) as u64;
}
c[(i + 4) % 5] += carry;
debug_assert_eq!(c[i], 0);
}
let mut result = [c[4], c[0], c[1], c[2]];
// Final conditional subtraction.
if cmp(result, Self::ORDER)!= Less {
result = sub(result, Self::ORDER);
}
result
}
}
impl Add<Bls12377Scalar> for Bls12377Scalar {
type Output = Self;
fn add(self, rhs: Self) -> Self {
// First we do a widening addition, then we reduce if necessary.
let sum = add_no_overflow(self.limbs, rhs.limbs);
let limbs = if cmp(sum, Self::ORDER) == Less {
sum
} else {
sub(sum, Self::ORDER)
};
Self { limbs }
}
}
impl Sub<Bls12377Scalar> for Bls12377Scalar {
type Output = Self;
fn sub(self, rhs: Self) -> Self {
let limbs = if cmp(self.limbs, rhs.limbs) == Less {
// Underflow occurs, so we compute the difference as `self + (-rhs)`.
add_no_overflow(self.limbs, (-rhs).limbs)
} else {
// No underflow, so it's faster to subtract directly.
sub(self.limbs, rhs.limbs)
};
Self { limbs }
}
}
impl Mul<Self> for Bls12377Scalar {
type Output = Self;
fn mul(self, rhs: Self) -> Self {
Self { limbs: Self::montgomery_multiply(self.limbs, rhs.limbs) }
}
}
impl Div<Bls12377Scalar> for Bls12377Scalar {
type Output = Self;
fn div(self, rhs: Self) -> Self {
self * rhs.multiplicative_inverse().expect("No inverse")
}
}
impl Neg for Bls12377Scalar {
type Output = Self;
fn neg(self) -> Self {
if self == Self::ZERO {
Self::ZERO
} else { | }
}
impl Field for Bls12377Scalar {
const BITS: usize = 253;
const BYTES: usize = 32;
const ZERO: Self = Self { limbs: [0; 4] };
const ONE: Self = Self { limbs: Self::R };
const TWO: Self = Self { limbs: [17304940830682775525, 10017539527700119523, 14770643272311271387, 570918138838421475] };
const THREE: Self = Self { limbs: [7147916296078753751, 11795755565450264533, 9448453213491875784, 183737022913545514] };
const FOUR: Self = Self { limbs: [16163137587655999434, 1588334981690687431, 11094542470912991159, 1141836277676842951] };
const FIVE: Self = Self { limbs: [6006113053051977660, 3366551019440832441, 5772352412093595556, 754655161751966990] };
const NEG_ONE: Self = Self { limbs: [10157024534604021774, 16668528035959406606, 5322190058819395602, 387181115924875961] };
const MULTIPLICATIVE_SUBGROUP_GENERATOR: Self = Self { limbs: [1855201571499933546, 8511318076631809892, 6222514765367795509, 1122129207579058019] };
/// x^11 is a permutation in this field.
const ALPHA: Self = Self { limbs: [1855201571499933546, 8511318076631809892, 6222514765367795509, 1122129207579058019] };
const TWO_ADICITY: usize = 47;
/// 60001509534603559531609739528203892656505753216962260608619555
const T: Self = Self { limbs: [725501752471715841, 6461107452199829505, 6968279316240510977, 1345280370688042326] };
fn to_canonical_u64_vec(&self) -> Vec<u64> {
self.to_canonical().to_vec()
}
fn from_canonical_u64_vec(v: Vec<u64>) -> Self {
Self::from_canonical(v[..].try_into().unwrap())
}
fn from_canonical_u64(n: u64) -> Self {
Self::from_canonical([n, 0, 0, 0])
}
fn is_valid_canonical_u64(v: &[u64]) -> bool {
v.len() == 4 && cmp(v[..].try_into().unwrap(), Self::ORDER) == Less
}
fn multiplicative_inverse_assuming_nonzero(&self) -> Self {
// Let x R = self. We compute M((x R)^-1, R^3) = x^-1 R^-1 R^3 R^-1 = x^-1 R.
let self_r_inv = nonzero_multiplicative_inverse(self.limbs, Self::ORDER);
Self { limbs: Self::montgomery_multiply(self_r_inv, Self::R3) }
}
fn rand() -> Self {
Self {
limbs: rand_range(Self::ORDER),
}
}
fn rand_from_rng<R: Rng>(rng: &mut R) -> Self {
Self {
limbs: rand_range_from_rng(Self::ORDER, rng),
}
}
}
impl Ord for Bls12377Scalar {
fn cmp(&self, other: &Self) -> Ordering {
self.cmp_helper(other)
}
}
impl PartialOrd for Bls12377Scalar {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Display for Bls12377Scalar {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
field_to_biguint(*self).fmt(f)
}
}
#[cfg(test)]
mod tests {
use crate::{Bls12377Scalar, Field};
use crate::conversions::u64_slice_to_biguint;
use crate::test_arithmetic;
#[test]
fn bls12scalar_to_and_from_canonical() {
let a = [1, 2, 3, 4];
let a_biguint = u64_slice_to_biguint(&a);
let order_biguint = u64_slice_to_biguint(&Bls12377Scalar::ORDER);
let r_biguint = u64_slice_to_biguint(&Bls12377Scalar::R);
let a_bls12scalar = Bls12377Scalar::from_canonical(a);
assert_eq!(u64_slice_to_biguint(&a_bls12scalar.limbs),
&a_biguint * &r_biguint % &order_biguint);
assert_eq!(u64_slice_to_biguint(&a_bls12scalar.to_canonical()), a_biguint);
}
#[test]
fn mul_bls12_scalar() {
let a = [1, 2, 3, 4];
let b = [3, 4, 5, 6];
let a_biguint = u64_slice_to_biguint(&a);
let b_biguint = u64_slice_to_biguint(&b);
let order_biguint = u64_slice_to_biguint(&Bls12377Scalar::ORDER);
let a_blsbase = Bls12377Scalar::from_canonical(a);
let b_blsbase = Bls12377Scalar::from_canonical(b);
assert_eq!(
u64_slice_to_biguint(&(a_blsbase * b_blsbase).to_canonical()),
a_biguint * b_biguint % order_biguint);
}
#[test]
fn test_bls12_rand() {
let random_element = Bls12377Scalar::rand();
for i in 0..4 {
assert_ne!(random_element.limbs[i], 0x0);
}
}
#[test]
fn exp() {
assert_eq!(Bls12377Scalar::THREE.exp(Bls12377Scalar::ZERO), Bls12377Scalar::ONE);
assert_eq!(Bls12377Scalar::THREE.exp(Bls12377Scalar::ONE), Bls12377Scalar::THREE);
assert_eq!(Bls12377Scalar::THREE.exp(Bls12377Scalar::from_canonical_u64(2)), Bls12377Scalar::from_canonical_u64(9));
assert_eq!(Bls12377Scalar::THREE.exp(Bls12377Scalar::from_canonical_u64(3)), Bls12377Scalar::from_canonical_u64(27));
}
#[test]
fn negation() {
for i in 0..25 {
let i_blsscalar = Bls12377Scalar::from_canonical_u64(i);
assert_eq!(i_blsscalar + -i_blsscalar, Bls12377Scalar::ZERO);
}
}
#[test]
fn multiplicative_inverse() {
for i in 0..25 {
let i_blsscalar = Bls12377Scalar::from_canonical_u64(i);
let i_inv_blsscalar = i_blsscalar.multiplicative_inverse();
if i == 0 {
assert!(i_inv_blsscalar.is_none());
} else {
assert_eq!(i_blsscalar * i_inv_blsscalar.unwrap(), Bls12377Scalar::ONE);
}
}
}
#[test]
fn batch_multiplicative_inverse() {
let mut x = Vec::new();
for i in 1..25 {
x.push(Bls12377Scalar::from_canonical_u64(i));
}
let x_inv = Bls12377Scalar::batch_multiplicative_inverse(&x);
assert_eq!(x.len(), x_inv.len());
for (x_i, x_i_inv) in x.into_iter().zip(x_inv) {
assert_eq!(x_i * x_i_inv, Bls12377Scalar::ONE);
}
}
#[test]
fn num_bits() {
assert_eq!(Bls12377Scalar::from_canonical_u64(0b10101).num_bits(), 5);
assert_eq!(Bls12377Scalar::from_canonical_u64(u64::max_value()).num_bits(), 64);
assert_eq!(Bls12377Scalar::from_canonical([0, 1, 0, 0]).num_bits(), 64 + 1);
assert_eq!(Bls12377Scalar::from_canonical([0, 0, 0, 1]).num_bits(), 64 * 3 + 1);
assert_eq!(Bls12377Scalar::from_canonical([0, 0, 0, 0b10101]).num_bits(), 64 * 3 + 5)
}
#[test]
fn roots_of_unity() {
for n_power in 0..10 {
let n = 1 << n_power as u64;
let root = Bls12377Scalar::primitive_root_of_unity(n_power);
assert_eq!(root.exp(Bls12377Scalar::from_canonical_u64(n)), Bls12377Scalar::ONE);
if n > 1 {
assert_ne!(root.exp(Bls12377Scalar::from_canonical_u64(n - 1)), Bls12377Scalar::ONE)
}
}
}
#[test]
fn primitive_root_order() {
for n_power in 0..10 {
let root = Bls12377Scalar::primitive_root_of_unity(n_power);
let order = Bls12377Scalar::generator_order(root);
assert_eq!(order, 1 << n_power, "2^{}'th primitive root", n_power);
}
}
test_arithmetic!(crate::Bls12377Scalar);
}
|
Self { limbs: sub(Self::ORDER, self.limbs) }
}
| conditional_block |
bls12_377_scalar.rs | //! This module implements field arithmetic for BLS12-377's scalar field.
use std::cmp::Ordering::Less;
use std::convert::TryInto;
use std::ops::{Add, Div, Mul, Neg, Sub};
use rand::Rng;
use unroll::unroll_for_loops;
use crate::{add_no_overflow, cmp, Field, sub, field_to_biguint, rand_range, rand_range_from_rng};
use crate::nonzero_multiplicative_inverse;
use std::cmp::Ordering;
use std::fmt;
use std::fmt::{Display, Formatter};
/// An element of the BLS12 group's scalar field.
#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug, Default)]
pub struct Bls12377Scalar {
/// Montgomery representation, encoded with little-endian u64 limbs.
pub limbs: [u64; 4],
}
impl Bls12377Scalar {
/// The order of the field:
/// 8444461749428370424248824938781546531375899335154063827935233455917409239041
pub const ORDER: [u64; 4] = [725501752471715841, 6461107452199829505, 6968279316240510977, 1345280370688173398];
/// R in the context of the Montgomery reduction, i.e. 2^256 % |F|.
pub(crate) const R: [u64; 4] =
[9015221291577245683, 8239323489949974514, 1646089257421115374, 958099254763297437];
/// R^2 in the context of the Montgomery reduction, i.e. 2^256^2 % |F|.
pub(crate) const R2: [u64; 4] =
[2726216793283724667, 14712177743343147295, 12091039717619697043, 81024008013859129];
/// R^3 in the context of the Montgomery reduction, i.e. 2^256^3 % |F|.
pub(crate) const R3: [u64; 4] =
[7656847007262524748, 7083357369969088153, 12818756329091487507, 432872940405820890];
/// In the context of Montgomery multiplication, µ = -|F|^-1 mod 2^64.
const MU: u64 = 725501752471715839;
pub fn from_canonical(c: [u64; 4]) -> Self {
// We compute M(c, R^2) = c * R^2 * R^-1 = c * R.
Self { limbs: Self::montgomery_multiply(c, Self::R2) }
}
pub fn to_canonical(&self) -> [u64; 4] {
// Let x * R = self. We compute M(x * R, 1) = x * R * R^-1 = x.
Self::montgomery_multiply(self.limbs, [1, 0, 0, 0])
}
#[unroll_for_loops]
fn montgomery_multiply(a: [u64; 4], b: [u64; 4]) -> [u64; 4] {
// Interleaved Montgomery multiplication, as described in Algorithm 2 of
// https://eprint.iacr.org/2017/1057.pdf
// Note that in the loop below, to avoid explicitly shifting c, we will treat i as the least
// significant digit and wrap around.
let mut c = [0u64; 5];
for i in 0..4 {
// Add a[i] b to c.
let mut carry = 0;
for j in 0..4 {
let result = c[(i + j) % 5] as u128 + a[i] as u128 * b[j] as u128 + carry as u128;
c[(i + j) % 5] = result as u64;
carry = (result >> 64) as u64;
}
c[(i + 4) % 5] += carry;
// q = u c mod r = u c[0] mod r.
let q = Self::MU.wrapping_mul(c[i]);
// C += N q
carry = 0;
for j in 0..4 {
let result = c[(i + j) % 5] as u128 + q as u128 * Self::ORDER[j] as u128 + carry as u128;
c[(i + j) % 5] = result as u64;
carry = (result >> 64) as u64;
}
c[(i + 4) % 5] += carry;
debug_assert_eq!(c[i], 0);
}
let mut result = [c[4], c[0], c[1], c[2]];
// Final conditional subtraction.
if cmp(result, Self::ORDER)!= Less {
result = sub(result, Self::ORDER);
}
result
}
}
impl Add<Bls12377Scalar> for Bls12377Scalar {
type Output = Self;
fn add(self, rhs: Self) -> Self {
// First we do a widening addition, then we reduce if necessary.
let sum = add_no_overflow(self.limbs, rhs.limbs);
let limbs = if cmp(sum, Self::ORDER) == Less {
sum
} else {
sub(sum, Self::ORDER)
};
Self { limbs }
}
}
impl Sub<Bls12377Scalar> for Bls12377Scalar {
type Output = Self;
fn sub(self, rhs: Self) -> Self {
let limbs = if cmp(self.limbs, rhs.limbs) == Less {
// Underflow occurs, so we compute the difference as `self + (-rhs)`.
add_no_overflow(self.limbs, (-rhs).limbs)
} else {
// No underflow, so it's faster to subtract directly.
sub(self.limbs, rhs.limbs)
};
Self { limbs }
}
}
impl Mul<Self> for Bls12377Scalar {
type Output = Self;
fn mul(self, rhs: Self) -> Self {
Self { limbs: Self::montgomery_multiply(self.limbs, rhs.limbs) }
}
}
impl Div<Bls12377Scalar> for Bls12377Scalar {
type Output = Self;
fn div(self, rhs: Self) -> Self {
self * rhs.multiplicative_inverse().expect("No inverse")
}
}
impl Neg for Bls12377Scalar {
type Output = Self;
fn neg(self) -> Self {
if self == Self::ZERO {
Self::ZERO
} else {
Self { limbs: sub(Self::ORDER, self.limbs) }
}
}
}
impl Field for Bls12377Scalar {
const BITS: usize = 253;
const BYTES: usize = 32;
const ZERO: Self = Self { limbs: [0; 4] };
const ONE: Self = Self { limbs: Self::R };
const TWO: Self = Self { limbs: [17304940830682775525, 10017539527700119523, 14770643272311271387, 570918138838421475] };
const THREE: Self = Self { limbs: [7147916296078753751, 11795755565450264533, 9448453213491875784, 183737022913545514] };
const FOUR: Self = Self { limbs: [16163137587655999434, 1588334981690687431, 11094542470912991159, 1141836277676842951] };
const FIVE: Self = Self { limbs: [6006113053051977660, 3366551019440832441, 5772352412093595556, 754655161751966990] };
const NEG_ONE: Self = Self { limbs: [10157024534604021774, 16668528035959406606, 5322190058819395602, 387181115924875961] };
const MULTIPLICATIVE_SUBGROUP_GENERATOR: Self = Self { limbs: [1855201571499933546, 8511318076631809892, 6222514765367795509, 1122129207579058019] };
/// x^11 is a permutation in this field.
const ALPHA: Self = Self { limbs: [1855201571499933546, 8511318076631809892, 6222514765367795509, 1122129207579058019] };
const TWO_ADICITY: usize = 47;
/// 60001509534603559531609739528203892656505753216962260608619555
const T: Self = Self { limbs: [725501752471715841, 6461107452199829505, 6968279316240510977, 1345280370688042326] };
fn to_canonical_u64_vec(&self) -> Vec<u64> {
self.to_canonical().to_vec()
}
fn from_canonical_u64_vec(v: Vec<u64>) -> Self {
Self::from_canonical(v[..].try_into().unwrap())
}
fn from_canonical_u64(n: u64) -> Self {
Self::from_canonical([n, 0, 0, 0])
}
fn is_valid_canonical_u64(v: &[u64]) -> bool {
v.len() == 4 && cmp(v[..].try_into().unwrap(), Self::ORDER) == Less
}
fn multiplicative_inverse_assuming_nonzero(&self) -> Self {
// Let x R = self. We compute M((x R)^-1, R^3) = x^-1 R^-1 R^3 R^-1 = x^-1 R.
let self_r_inv = nonzero_multiplicative_inverse(self.limbs, Self::ORDER);
Self { limbs: Self::montgomery_multiply(self_r_inv, Self::R3) }
}
fn rand() -> Self {
Self {
limbs: rand_range(Self::ORDER),
}
}
fn rand_from_rng<R: Rng>(rng: &mut R) -> Self {
Self {
limbs: rand_range_from_rng(Self::ORDER, rng),
}
}
}
impl Ord for Bls12377Scalar {
fn cmp(&self, other: &Self) -> Ordering {
self.cmp_helper(other)
}
}
impl PartialOrd for Bls12377Scalar {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Display for Bls12377Scalar {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
field_to_biguint(*self).fmt(f)
}
}
#[cfg(test)]
mod tests {
use crate::{Bls12377Scalar, Field};
use crate::conversions::u64_slice_to_biguint;
use crate::test_arithmetic;
#[test]
fn bls12scalar_to_and_from_canonical() {
let a = [1, 2, 3, 4];
let a_biguint = u64_slice_to_biguint(&a);
let order_biguint = u64_slice_to_biguint(&Bls12377Scalar::ORDER);
let r_biguint = u64_slice_to_biguint(&Bls12377Scalar::R);
let a_bls12scalar = Bls12377Scalar::from_canonical(a);
assert_eq!(u64_slice_to_biguint(&a_bls12scalar.limbs),
&a_biguint * &r_biguint % &order_biguint);
assert_eq!(u64_slice_to_biguint(&a_bls12scalar.to_canonical()), a_biguint);
}
#[test]
fn mul_bls12_scalar() {
let a = [1, 2, 3, 4];
let b = [3, 4, 5, 6];
let a_biguint = u64_slice_to_biguint(&a);
let b_biguint = u64_slice_to_biguint(&b);
let order_biguint = u64_slice_to_biguint(&Bls12377Scalar::ORDER);
let a_blsbase = Bls12377Scalar::from_canonical(a);
let b_blsbase = Bls12377Scalar::from_canonical(b);
assert_eq!(
u64_slice_to_biguint(&(a_blsbase * b_blsbase).to_canonical()),
a_biguint * b_biguint % order_biguint);
}
#[test]
fn test_bls12_rand() {
let random_element = Bls12377Scalar::rand();
for i in 0..4 {
assert_ne!(random_element.limbs[i], 0x0);
}
}
#[test]
fn exp() {
assert_eq!(Bls12377Scalar::THREE.exp(Bls12377Scalar::ZERO), Bls12377Scalar::ONE);
assert_eq!(Bls12377Scalar::THREE.exp(Bls12377Scalar::ONE), Bls12377Scalar::THREE);
assert_eq!(Bls12377Scalar::THREE.exp(Bls12377Scalar::from_canonical_u64(2)), Bls12377Scalar::from_canonical_u64(9));
assert_eq!(Bls12377Scalar::THREE.exp(Bls12377Scalar::from_canonical_u64(3)), Bls12377Scalar::from_canonical_u64(27));
}
#[test]
fn negation() {
for i in 0..25 {
let i_blsscalar = Bls12377Scalar::from_canonical_u64(i);
assert_eq!(i_blsscalar + -i_blsscalar, Bls12377Scalar::ZERO);
}
}
#[test]
fn multiplicative_inverse() {
for i in 0..25 {
let i_blsscalar = Bls12377Scalar::from_canonical_u64(i);
let i_inv_blsscalar = i_blsscalar.multiplicative_inverse();
if i == 0 {
assert!(i_inv_blsscalar.is_none());
} else {
assert_eq!(i_blsscalar * i_inv_blsscalar.unwrap(), Bls12377Scalar::ONE);
}
}
}
#[test]
fn batch_multiplicative_inverse() {
let mut x = Vec::new();
for i in 1..25 {
x.push(Bls12377Scalar::from_canonical_u64(i));
}
let x_inv = Bls12377Scalar::batch_multiplicative_inverse(&x);
assert_eq!(x.len(), x_inv.len());
for (x_i, x_i_inv) in x.into_iter().zip(x_inv) {
assert_eq!(x_i * x_i_inv, Bls12377Scalar::ONE);
}
}
#[test]
fn n | ) {
assert_eq!(Bls12377Scalar::from_canonical_u64(0b10101).num_bits(), 5);
assert_eq!(Bls12377Scalar::from_canonical_u64(u64::max_value()).num_bits(), 64);
assert_eq!(Bls12377Scalar::from_canonical([0, 1, 0, 0]).num_bits(), 64 + 1);
assert_eq!(Bls12377Scalar::from_canonical([0, 0, 0, 1]).num_bits(), 64 * 3 + 1);
assert_eq!(Bls12377Scalar::from_canonical([0, 0, 0, 0b10101]).num_bits(), 64 * 3 + 5)
}
#[test]
fn roots_of_unity() {
for n_power in 0..10 {
let n = 1 << n_power as u64;
let root = Bls12377Scalar::primitive_root_of_unity(n_power);
assert_eq!(root.exp(Bls12377Scalar::from_canonical_u64(n)), Bls12377Scalar::ONE);
if n > 1 {
assert_ne!(root.exp(Bls12377Scalar::from_canonical_u64(n - 1)), Bls12377Scalar::ONE)
}
}
}
#[test]
fn primitive_root_order() {
for n_power in 0..10 {
let root = Bls12377Scalar::primitive_root_of_unity(n_power);
let order = Bls12377Scalar::generator_order(root);
assert_eq!(order, 1 << n_power, "2^{}'th primitive root", n_power);
}
}
test_arithmetic!(crate::Bls12377Scalar);
}
| um_bits( | identifier_name |
bls12_377_scalar.rs | //! This module implements field arithmetic for BLS12-377's scalar field.
use std::cmp::Ordering::Less;
use std::convert::TryInto;
use std::ops::{Add, Div, Mul, Neg, Sub};
use rand::Rng;
use unroll::unroll_for_loops;
use crate::{add_no_overflow, cmp, Field, sub, field_to_biguint, rand_range, rand_range_from_rng};
use crate::nonzero_multiplicative_inverse;
use std::cmp::Ordering;
use std::fmt;
use std::fmt::{Display, Formatter};
/// An element of the BLS12 group's scalar field.
#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug, Default)]
pub struct Bls12377Scalar {
/// Montgomery representation, encoded with little-endian u64 limbs.
pub limbs: [u64; 4],
}
impl Bls12377Scalar {
/// The order of the field:
/// 8444461749428370424248824938781546531375899335154063827935233455917409239041
pub const ORDER: [u64; 4] = [725501752471715841, 6461107452199829505, 6968279316240510977, 1345280370688173398];
/// R in the context of the Montgomery reduction, i.e. 2^256 % |F|.
pub(crate) const R: [u64; 4] =
[9015221291577245683, 8239323489949974514, 1646089257421115374, 958099254763297437];
/// R^2 in the context of the Montgomery reduction, i.e. 2^256^2 % |F|.
pub(crate) const R2: [u64; 4] =
[2726216793283724667, 14712177743343147295, 12091039717619697043, 81024008013859129];
/// R^3 in the context of the Montgomery reduction, i.e. 2^256^3 % |F|.
pub(crate) const R3: [u64; 4] =
[7656847007262524748, 7083357369969088153, 12818756329091487507, 432872940405820890];
/// In the context of Montgomery multiplication, µ = -|F|^-1 mod 2^64.
const MU: u64 = 725501752471715839;
pub fn from_canonical(c: [u64; 4]) -> Self {
// We compute M(c, R^2) = c * R^2 * R^-1 = c * R.
Self { limbs: Self::montgomery_multiply(c, Self::R2) }
}
pub fn to_canonical(&self) -> [u64; 4] {
// Let x * R = self. We compute M(x * R, 1) = x * R * R^-1 = x.
Self::montgomery_multiply(self.limbs, [1, 0, 0, 0])
}
#[unroll_for_loops]
fn montgomery_multiply(a: [u64; 4], b: [u64; 4]) -> [u64; 4] {
// Interleaved Montgomery multiplication, as described in Algorithm 2 of
// https://eprint.iacr.org/2017/1057.pdf
// Note that in the loop below, to avoid explicitly shifting c, we will treat i as the least
// significant digit and wrap around.
let mut c = [0u64; 5];
for i in 0..4 {
// Add a[i] b to c.
let mut carry = 0;
for j in 0..4 {
let result = c[(i + j) % 5] as u128 + a[i] as u128 * b[j] as u128 + carry as u128;
c[(i + j) % 5] = result as u64;
carry = (result >> 64) as u64;
}
c[(i + 4) % 5] += carry;
// q = u c mod r = u c[0] mod r.
let q = Self::MU.wrapping_mul(c[i]);
// C += N q
carry = 0;
for j in 0..4 {
let result = c[(i + j) % 5] as u128 + q as u128 * Self::ORDER[j] as u128 + carry as u128;
c[(i + j) % 5] = result as u64;
carry = (result >> 64) as u64;
}
c[(i + 4) % 5] += carry;
debug_assert_eq!(c[i], 0);
}
let mut result = [c[4], c[0], c[1], c[2]];
// Final conditional subtraction.
if cmp(result, Self::ORDER)!= Less {
result = sub(result, Self::ORDER);
}
result
}
}
impl Add<Bls12377Scalar> for Bls12377Scalar {
type Output = Self;
fn add(self, rhs: Self) -> Self {
// First we do a widening addition, then we reduce if necessary.
let sum = add_no_overflow(self.limbs, rhs.limbs);
let limbs = if cmp(sum, Self::ORDER) == Less {
sum
} else {
sub(sum, Self::ORDER)
};
Self { limbs }
}
}
impl Sub<Bls12377Scalar> for Bls12377Scalar {
type Output = Self;
fn sub(self, rhs: Self) -> Self {
let limbs = if cmp(self.limbs, rhs.limbs) == Less {
// Underflow occurs, so we compute the difference as `self + (-rhs)`.
add_no_overflow(self.limbs, (-rhs).limbs)
} else {
// No underflow, so it's faster to subtract directly.
sub(self.limbs, rhs.limbs)
};
Self { limbs }
}
}
impl Mul<Self> for Bls12377Scalar {
type Output = Self;
fn mul(self, rhs: Self) -> Self {
Self { limbs: Self::montgomery_multiply(self.limbs, rhs.limbs) } | }
impl Div<Bls12377Scalar> for Bls12377Scalar {
type Output = Self;
fn div(self, rhs: Self) -> Self {
self * rhs.multiplicative_inverse().expect("No inverse")
}
}
impl Neg for Bls12377Scalar {
type Output = Self;
fn neg(self) -> Self {
if self == Self::ZERO {
Self::ZERO
} else {
Self { limbs: sub(Self::ORDER, self.limbs) }
}
}
}
impl Field for Bls12377Scalar {
const BITS: usize = 253;
const BYTES: usize = 32;
const ZERO: Self = Self { limbs: [0; 4] };
const ONE: Self = Self { limbs: Self::R };
const TWO: Self = Self { limbs: [17304940830682775525, 10017539527700119523, 14770643272311271387, 570918138838421475] };
const THREE: Self = Self { limbs: [7147916296078753751, 11795755565450264533, 9448453213491875784, 183737022913545514] };
const FOUR: Self = Self { limbs: [16163137587655999434, 1588334981690687431, 11094542470912991159, 1141836277676842951] };
const FIVE: Self = Self { limbs: [6006113053051977660, 3366551019440832441, 5772352412093595556, 754655161751966990] };
const NEG_ONE: Self = Self { limbs: [10157024534604021774, 16668528035959406606, 5322190058819395602, 387181115924875961] };
const MULTIPLICATIVE_SUBGROUP_GENERATOR: Self = Self { limbs: [1855201571499933546, 8511318076631809892, 6222514765367795509, 1122129207579058019] };
/// x^11 is a permutation in this field.
const ALPHA: Self = Self { limbs: [1855201571499933546, 8511318076631809892, 6222514765367795509, 1122129207579058019] };
const TWO_ADICITY: usize = 47;
/// 60001509534603559531609739528203892656505753216962260608619555
const T: Self = Self { limbs: [725501752471715841, 6461107452199829505, 6968279316240510977, 1345280370688042326] };
fn to_canonical_u64_vec(&self) -> Vec<u64> {
self.to_canonical().to_vec()
}
fn from_canonical_u64_vec(v: Vec<u64>) -> Self {
Self::from_canonical(v[..].try_into().unwrap())
}
fn from_canonical_u64(n: u64) -> Self {
Self::from_canonical([n, 0, 0, 0])
}
fn is_valid_canonical_u64(v: &[u64]) -> bool {
v.len() == 4 && cmp(v[..].try_into().unwrap(), Self::ORDER) == Less
}
fn multiplicative_inverse_assuming_nonzero(&self) -> Self {
// Let x R = self. We compute M((x R)^-1, R^3) = x^-1 R^-1 R^3 R^-1 = x^-1 R.
let self_r_inv = nonzero_multiplicative_inverse(self.limbs, Self::ORDER);
Self { limbs: Self::montgomery_multiply(self_r_inv, Self::R3) }
}
fn rand() -> Self {
Self {
limbs: rand_range(Self::ORDER),
}
}
fn rand_from_rng<R: Rng>(rng: &mut R) -> Self {
Self {
limbs: rand_range_from_rng(Self::ORDER, rng),
}
}
}
impl Ord for Bls12377Scalar {
fn cmp(&self, other: &Self) -> Ordering {
self.cmp_helper(other)
}
}
impl PartialOrd for Bls12377Scalar {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Display for Bls12377Scalar {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
field_to_biguint(*self).fmt(f)
}
}
#[cfg(test)]
mod tests {
use crate::{Bls12377Scalar, Field};
use crate::conversions::u64_slice_to_biguint;
use crate::test_arithmetic;
#[test]
fn bls12scalar_to_and_from_canonical() {
let a = [1, 2, 3, 4];
let a_biguint = u64_slice_to_biguint(&a);
let order_biguint = u64_slice_to_biguint(&Bls12377Scalar::ORDER);
let r_biguint = u64_slice_to_biguint(&Bls12377Scalar::R);
let a_bls12scalar = Bls12377Scalar::from_canonical(a);
assert_eq!(u64_slice_to_biguint(&a_bls12scalar.limbs),
&a_biguint * &r_biguint % &order_biguint);
assert_eq!(u64_slice_to_biguint(&a_bls12scalar.to_canonical()), a_biguint);
}
#[test]
fn mul_bls12_scalar() {
let a = [1, 2, 3, 4];
let b = [3, 4, 5, 6];
let a_biguint = u64_slice_to_biguint(&a);
let b_biguint = u64_slice_to_biguint(&b);
let order_biguint = u64_slice_to_biguint(&Bls12377Scalar::ORDER);
let a_blsbase = Bls12377Scalar::from_canonical(a);
let b_blsbase = Bls12377Scalar::from_canonical(b);
assert_eq!(
u64_slice_to_biguint(&(a_blsbase * b_blsbase).to_canonical()),
a_biguint * b_biguint % order_biguint);
}
#[test]
fn test_bls12_rand() {
let random_element = Bls12377Scalar::rand();
for i in 0..4 {
assert_ne!(random_element.limbs[i], 0x0);
}
}
#[test]
fn exp() {
assert_eq!(Bls12377Scalar::THREE.exp(Bls12377Scalar::ZERO), Bls12377Scalar::ONE);
assert_eq!(Bls12377Scalar::THREE.exp(Bls12377Scalar::ONE), Bls12377Scalar::THREE);
assert_eq!(Bls12377Scalar::THREE.exp(Bls12377Scalar::from_canonical_u64(2)), Bls12377Scalar::from_canonical_u64(9));
assert_eq!(Bls12377Scalar::THREE.exp(Bls12377Scalar::from_canonical_u64(3)), Bls12377Scalar::from_canonical_u64(27));
}
#[test]
fn negation() {
for i in 0..25 {
let i_blsscalar = Bls12377Scalar::from_canonical_u64(i);
assert_eq!(i_blsscalar + -i_blsscalar, Bls12377Scalar::ZERO);
}
}
#[test]
fn multiplicative_inverse() {
for i in 0..25 {
let i_blsscalar = Bls12377Scalar::from_canonical_u64(i);
let i_inv_blsscalar = i_blsscalar.multiplicative_inverse();
if i == 0 {
assert!(i_inv_blsscalar.is_none());
} else {
assert_eq!(i_blsscalar * i_inv_blsscalar.unwrap(), Bls12377Scalar::ONE);
}
}
}
#[test]
fn batch_multiplicative_inverse() {
let mut x = Vec::new();
for i in 1..25 {
x.push(Bls12377Scalar::from_canonical_u64(i));
}
let x_inv = Bls12377Scalar::batch_multiplicative_inverse(&x);
assert_eq!(x.len(), x_inv.len());
for (x_i, x_i_inv) in x.into_iter().zip(x_inv) {
assert_eq!(x_i * x_i_inv, Bls12377Scalar::ONE);
}
}
#[test]
fn num_bits() {
assert_eq!(Bls12377Scalar::from_canonical_u64(0b10101).num_bits(), 5);
assert_eq!(Bls12377Scalar::from_canonical_u64(u64::max_value()).num_bits(), 64);
assert_eq!(Bls12377Scalar::from_canonical([0, 1, 0, 0]).num_bits(), 64 + 1);
assert_eq!(Bls12377Scalar::from_canonical([0, 0, 0, 1]).num_bits(), 64 * 3 + 1);
assert_eq!(Bls12377Scalar::from_canonical([0, 0, 0, 0b10101]).num_bits(), 64 * 3 + 5)
}
#[test]
fn roots_of_unity() {
for n_power in 0..10 {
let n = 1 << n_power as u64;
let root = Bls12377Scalar::primitive_root_of_unity(n_power);
assert_eq!(root.exp(Bls12377Scalar::from_canonical_u64(n)), Bls12377Scalar::ONE);
if n > 1 {
assert_ne!(root.exp(Bls12377Scalar::from_canonical_u64(n - 1)), Bls12377Scalar::ONE)
}
}
}
#[test]
fn primitive_root_order() {
for n_power in 0..10 {
let root = Bls12377Scalar::primitive_root_of_unity(n_power);
let order = Bls12377Scalar::generator_order(root);
assert_eq!(order, 1 << n_power, "2^{}'th primitive root", n_power);
}
}
test_arithmetic!(crate::Bls12377Scalar);
} | } | random_line_split |
bls12_377_scalar.rs | //! This module implements field arithmetic for BLS12-377's scalar field.
use std::cmp::Ordering::Less;
use std::convert::TryInto;
use std::ops::{Add, Div, Mul, Neg, Sub};
use rand::Rng;
use unroll::unroll_for_loops;
use crate::{add_no_overflow, cmp, Field, sub, field_to_biguint, rand_range, rand_range_from_rng};
use crate::nonzero_multiplicative_inverse;
use std::cmp::Ordering;
use std::fmt;
use std::fmt::{Display, Formatter};
/// An element of the BLS12 group's scalar field.
#[derive(Copy, Clone, Eq, PartialEq, Hash, Debug, Default)]
pub struct Bls12377Scalar {
/// Montgomery representation, encoded with little-endian u64 limbs.
pub limbs: [u64; 4],
}
impl Bls12377Scalar {
/// The order of the field:
/// 8444461749428370424248824938781546531375899335154063827935233455917409239041
pub const ORDER: [u64; 4] = [725501752471715841, 6461107452199829505, 6968279316240510977, 1345280370688173398];
/// R in the context of the Montgomery reduction, i.e. 2^256 % |F|.
pub(crate) const R: [u64; 4] =
[9015221291577245683, 8239323489949974514, 1646089257421115374, 958099254763297437];
/// R^2 in the context of the Montgomery reduction, i.e. 2^256^2 % |F|.
pub(crate) const R2: [u64; 4] =
[2726216793283724667, 14712177743343147295, 12091039717619697043, 81024008013859129];
/// R^3 in the context of the Montgomery reduction, i.e. 2^256^3 % |F|.
pub(crate) const R3: [u64; 4] =
[7656847007262524748, 7083357369969088153, 12818756329091487507, 432872940405820890];
/// In the context of Montgomery multiplication, µ = -|F|^-1 mod 2^64.
const MU: u64 = 725501752471715839;
pub fn from_canonical(c: [u64; 4]) -> Self {
// We compute M(c, R^2) = c * R^2 * R^-1 = c * R.
Self { limbs: Self::montgomery_multiply(c, Self::R2) }
}
pub fn to_canonical(&self) -> [u64; 4] {
// Let x * R = self. We compute M(x * R, 1) = x * R * R^-1 = x.
Self::montgomery_multiply(self.limbs, [1, 0, 0, 0])
}
#[unroll_for_loops]
fn montgomery_multiply(a: [u64; 4], b: [u64; 4]) -> [u64; 4] {
// Interleaved Montgomery multiplication, as described in Algorithm 2 of
// https://eprint.iacr.org/2017/1057.pdf
// Note that in the loop below, to avoid explicitly shifting c, we will treat i as the least
// significant digit and wrap around.
let mut c = [0u64; 5];
for i in 0..4 {
// Add a[i] b to c.
let mut carry = 0;
for j in 0..4 {
let result = c[(i + j) % 5] as u128 + a[i] as u128 * b[j] as u128 + carry as u128;
c[(i + j) % 5] = result as u64;
carry = (result >> 64) as u64;
}
c[(i + 4) % 5] += carry;
// q = u c mod r = u c[0] mod r.
let q = Self::MU.wrapping_mul(c[i]);
// C += N q
carry = 0;
for j in 0..4 {
let result = c[(i + j) % 5] as u128 + q as u128 * Self::ORDER[j] as u128 + carry as u128;
c[(i + j) % 5] = result as u64;
carry = (result >> 64) as u64;
}
c[(i + 4) % 5] += carry;
debug_assert_eq!(c[i], 0);
}
let mut result = [c[4], c[0], c[1], c[2]];
// Final conditional subtraction.
if cmp(result, Self::ORDER)!= Less {
result = sub(result, Self::ORDER);
}
result
}
}
impl Add<Bls12377Scalar> for Bls12377Scalar {
type Output = Self;
fn add(self, rhs: Self) -> Self {
// First we do a widening addition, then we reduce if necessary.
let sum = add_no_overflow(self.limbs, rhs.limbs);
let limbs = if cmp(sum, Self::ORDER) == Less {
sum
} else {
sub(sum, Self::ORDER)
};
Self { limbs }
}
}
impl Sub<Bls12377Scalar> for Bls12377Scalar {
type Output = Self;
fn sub(self, rhs: Self) -> Self { | }
impl Mul<Self> for Bls12377Scalar {
type Output = Self;
fn mul(self, rhs: Self) -> Self {
Self { limbs: Self::montgomery_multiply(self.limbs, rhs.limbs) }
}
}
impl Div<Bls12377Scalar> for Bls12377Scalar {
type Output = Self;
fn div(self, rhs: Self) -> Self {
self * rhs.multiplicative_inverse().expect("No inverse")
}
}
impl Neg for Bls12377Scalar {
type Output = Self;
fn neg(self) -> Self {
if self == Self::ZERO {
Self::ZERO
} else {
Self { limbs: sub(Self::ORDER, self.limbs) }
}
}
}
impl Field for Bls12377Scalar {
const BITS: usize = 253;
const BYTES: usize = 32;
const ZERO: Self = Self { limbs: [0; 4] };
const ONE: Self = Self { limbs: Self::R };
const TWO: Self = Self { limbs: [17304940830682775525, 10017539527700119523, 14770643272311271387, 570918138838421475] };
const THREE: Self = Self { limbs: [7147916296078753751, 11795755565450264533, 9448453213491875784, 183737022913545514] };
const FOUR: Self = Self { limbs: [16163137587655999434, 1588334981690687431, 11094542470912991159, 1141836277676842951] };
const FIVE: Self = Self { limbs: [6006113053051977660, 3366551019440832441, 5772352412093595556, 754655161751966990] };
const NEG_ONE: Self = Self { limbs: [10157024534604021774, 16668528035959406606, 5322190058819395602, 387181115924875961] };
const MULTIPLICATIVE_SUBGROUP_GENERATOR: Self = Self { limbs: [1855201571499933546, 8511318076631809892, 6222514765367795509, 1122129207579058019] };
/// x^11 is a permutation in this field.
const ALPHA: Self = Self { limbs: [1855201571499933546, 8511318076631809892, 6222514765367795509, 1122129207579058019] };
const TWO_ADICITY: usize = 47;
/// 60001509534603559531609739528203892656505753216962260608619555
const T: Self = Self { limbs: [725501752471715841, 6461107452199829505, 6968279316240510977, 1345280370688042326] };
fn to_canonical_u64_vec(&self) -> Vec<u64> {
self.to_canonical().to_vec()
}
fn from_canonical_u64_vec(v: Vec<u64>) -> Self {
Self::from_canonical(v[..].try_into().unwrap())
}
fn from_canonical_u64(n: u64) -> Self {
Self::from_canonical([n, 0, 0, 0])
}
fn is_valid_canonical_u64(v: &[u64]) -> bool {
v.len() == 4 && cmp(v[..].try_into().unwrap(), Self::ORDER) == Less
}
fn multiplicative_inverse_assuming_nonzero(&self) -> Self {
// Let x R = self. We compute M((x R)^-1, R^3) = x^-1 R^-1 R^3 R^-1 = x^-1 R.
let self_r_inv = nonzero_multiplicative_inverse(self.limbs, Self::ORDER);
Self { limbs: Self::montgomery_multiply(self_r_inv, Self::R3) }
}
fn rand() -> Self {
Self {
limbs: rand_range(Self::ORDER),
}
}
fn rand_from_rng<R: Rng>(rng: &mut R) -> Self {
Self {
limbs: rand_range_from_rng(Self::ORDER, rng),
}
}
}
impl Ord for Bls12377Scalar {
fn cmp(&self, other: &Self) -> Ordering {
self.cmp_helper(other)
}
}
impl PartialOrd for Bls12377Scalar {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Display for Bls12377Scalar {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
field_to_biguint(*self).fmt(f)
}
}
#[cfg(test)]
mod tests {
use crate::{Bls12377Scalar, Field};
use crate::conversions::u64_slice_to_biguint;
use crate::test_arithmetic;
#[test]
fn bls12scalar_to_and_from_canonical() {
let a = [1, 2, 3, 4];
let a_biguint = u64_slice_to_biguint(&a);
let order_biguint = u64_slice_to_biguint(&Bls12377Scalar::ORDER);
let r_biguint = u64_slice_to_biguint(&Bls12377Scalar::R);
let a_bls12scalar = Bls12377Scalar::from_canonical(a);
assert_eq!(u64_slice_to_biguint(&a_bls12scalar.limbs),
&a_biguint * &r_biguint % &order_biguint);
assert_eq!(u64_slice_to_biguint(&a_bls12scalar.to_canonical()), a_biguint);
}
#[test]
fn mul_bls12_scalar() {
let a = [1, 2, 3, 4];
let b = [3, 4, 5, 6];
let a_biguint = u64_slice_to_biguint(&a);
let b_biguint = u64_slice_to_biguint(&b);
let order_biguint = u64_slice_to_biguint(&Bls12377Scalar::ORDER);
let a_blsbase = Bls12377Scalar::from_canonical(a);
let b_blsbase = Bls12377Scalar::from_canonical(b);
assert_eq!(
u64_slice_to_biguint(&(a_blsbase * b_blsbase).to_canonical()),
a_biguint * b_biguint % order_biguint);
}
#[test]
fn test_bls12_rand() {
let random_element = Bls12377Scalar::rand();
for i in 0..4 {
assert_ne!(random_element.limbs[i], 0x0);
}
}
#[test]
fn exp() {
assert_eq!(Bls12377Scalar::THREE.exp(Bls12377Scalar::ZERO), Bls12377Scalar::ONE);
assert_eq!(Bls12377Scalar::THREE.exp(Bls12377Scalar::ONE), Bls12377Scalar::THREE);
assert_eq!(Bls12377Scalar::THREE.exp(Bls12377Scalar::from_canonical_u64(2)), Bls12377Scalar::from_canonical_u64(9));
assert_eq!(Bls12377Scalar::THREE.exp(Bls12377Scalar::from_canonical_u64(3)), Bls12377Scalar::from_canonical_u64(27));
}
#[test]
fn negation() {
for i in 0..25 {
let i_blsscalar = Bls12377Scalar::from_canonical_u64(i);
assert_eq!(i_blsscalar + -i_blsscalar, Bls12377Scalar::ZERO);
}
}
#[test]
fn multiplicative_inverse() {
for i in 0..25 {
let i_blsscalar = Bls12377Scalar::from_canonical_u64(i);
let i_inv_blsscalar = i_blsscalar.multiplicative_inverse();
if i == 0 {
assert!(i_inv_blsscalar.is_none());
} else {
assert_eq!(i_blsscalar * i_inv_blsscalar.unwrap(), Bls12377Scalar::ONE);
}
}
}
#[test]
fn batch_multiplicative_inverse() {
let mut x = Vec::new();
for i in 1..25 {
x.push(Bls12377Scalar::from_canonical_u64(i));
}
let x_inv = Bls12377Scalar::batch_multiplicative_inverse(&x);
assert_eq!(x.len(), x_inv.len());
for (x_i, x_i_inv) in x.into_iter().zip(x_inv) {
assert_eq!(x_i * x_i_inv, Bls12377Scalar::ONE);
}
}
#[test]
fn num_bits() {
assert_eq!(Bls12377Scalar::from_canonical_u64(0b10101).num_bits(), 5);
assert_eq!(Bls12377Scalar::from_canonical_u64(u64::max_value()).num_bits(), 64);
assert_eq!(Bls12377Scalar::from_canonical([0, 1, 0, 0]).num_bits(), 64 + 1);
assert_eq!(Bls12377Scalar::from_canonical([0, 0, 0, 1]).num_bits(), 64 * 3 + 1);
assert_eq!(Bls12377Scalar::from_canonical([0, 0, 0, 0b10101]).num_bits(), 64 * 3 + 5)
}
#[test]
fn roots_of_unity() {
for n_power in 0..10 {
let n = 1 << n_power as u64;
let root = Bls12377Scalar::primitive_root_of_unity(n_power);
assert_eq!(root.exp(Bls12377Scalar::from_canonical_u64(n)), Bls12377Scalar::ONE);
if n > 1 {
assert_ne!(root.exp(Bls12377Scalar::from_canonical_u64(n - 1)), Bls12377Scalar::ONE)
}
}
}
#[test]
fn primitive_root_order() {
for n_power in 0..10 {
let root = Bls12377Scalar::primitive_root_of_unity(n_power);
let order = Bls12377Scalar::generator_order(root);
assert_eq!(order, 1 << n_power, "2^{}'th primitive root", n_power);
}
}
test_arithmetic!(crate::Bls12377Scalar);
}
|
let limbs = if cmp(self.limbs, rhs.limbs) == Less {
// Underflow occurs, so we compute the difference as `self + (-rhs)`.
add_no_overflow(self.limbs, (-rhs).limbs)
} else {
// No underflow, so it's faster to subtract directly.
sub(self.limbs, rhs.limbs)
};
Self { limbs }
}
| identifier_body |
lib.rs | //! Adler-32 checksum implementation.
//!
//! This implementation features:
//!
//! - Permissively licensed (0BSD) clean-room implementation.
//! - Zero dependencies.
//! - Decent performance (3-4 GB/s).
//! - `#![no_std]` support (with `default-features = false`).
#![doc(html_root_url = "https://docs.rs/adler/0.2.2")]
// Deny a few warnings in doctests, since rustdoc `allow`s many warnings by default
#![doc(test(attr(deny(unused_imports, unused_must_use))))]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![warn(missing_debug_implementations)]
#![forbid(unsafe_code)]
#![cfg_attr(not(feature = "std"), no_std)]
#[cfg(not(feature = "std"))]
extern crate core as std;
use std::hash::Hasher;
use std::ops::{AddAssign, MulAssign, RemAssign};
#[cfg(feature = "std")]
use std::io::{self, BufRead};
/// Adler-32 checksum calculator.
///
/// An instance of this type is equivalent to an Adler-32 checksum: It can be created in the default
/// state via [`new`] (or the provided `Default` impl), or from a precalculated checksum via
/// [`from_checksum`], and the currently stored checksum can be fetched via [`checksum`].
///
/// This type also implements `Hasher`, which makes it easy to calculate Adler-32 checksums of any
/// type that implements or derives `Hash`. This also allows using Adler-32 in a `HashMap`, although
/// that is not recommended (while every checksum is a hash, they are not necessarily good at being
/// one).
///
/// [`new`]: #method.new
/// [`from_checksum`]: #method.from_checksum
/// [`checksum`]: #method.checksum
#[derive(Debug, Copy, Clone)]
pub struct Adler32 {
a: u16,
b: u16,
}
impl Adler32 {
/// Creates a new Adler-32 instance with default state.
#[inline]
pub fn new() -> Self {
Self::default()
}
/// Creates an `Adler32` instance from a precomputed Adler-32 checksum.
///
/// This allows resuming checksum calculation without having to keep the `Adler32` instance
/// around.
///
/// # Example
///
/// ```
/// # use adler::Adler32;
/// let parts = [
/// "rust",
/// "acean",
/// ];
/// let whole = adler::adler32_slice(b"rustacean");
///
/// let mut sum = Adler32::new();
/// sum.write_slice(parts[0].as_bytes());
/// let partial = sum.checksum();
///
/// //...later
///
/// let mut sum = Adler32::from_checksum(partial);
/// sum.write_slice(parts[1].as_bytes());
/// assert_eq!(sum.checksum(), whole);
/// ```
#[inline]
pub fn from_checksum(sum: u32) -> Self |
/// Returns the calculated checksum at this point in time.
#[inline]
pub fn checksum(&self) -> u32 {
(u32::from(self.b) << 16) | u32::from(self.a)
}
/// Adds `bytes` to the checksum calculation.
///
/// If efficiency matters, this should be called with Byte slices that contain at least a few
/// thousand Bytes.
pub fn write_slice(&mut self, bytes: &[u8]) {
// The basic algorithm is, for every byte:
// a = (a + byte) % MOD
// b = (b + a) % MOD
// where MOD = 65521.
//
// For efficiency, we can defer the `% MOD` operations as long as neither a nor b overflows:
// - Between calls to `write`, we ensure that a and b are always in range 0..MOD.
// - We use 32-bit arithmetic in this function.
// - Therefore, a and b must not increase by more than 2^32-MOD without performing a `% MOD`
// operation.
//
// According to Wikipedia, b is calculated as follows for non-incremental checksumming:
// b = n×D1 + (n−1)×D2 + (n−2)×D3 +... + Dn + n*1 (mod 65521)
// Where n is the number of bytes and Di is the i-th Byte. We need to change this to account
// for the previous values of a and b, as well as treat every input Byte as being 255:
// b_inc = n×255 + (n-1)×255 +... + 255 + n*65520
// Or in other words:
// b_inc = n*65520 + n(n+1)/2*255
// The max chunk size is thus the largest value of n so that b_inc <= 2^32-65521.
// 2^32-65521 = n*65520 + n(n+1)/2*255
// Plugging this into an equation solver since I can't math gives n = 5552.18..., so 5552.
//
// On top of the optimization outlined above, the algorithm can also be parallelized with a
// bit more work:
//
// Note that b is a linear combination of a vector of input bytes (D1,..., Dn).
//
// If we fix some value k<N and rewrite indices 1,..., N as
//
// 1_1, 1_2,..., 1_k, 2_1,..., 2_k,..., (N/k)_k,
//
// then we can express a and b in terms of sums of smaller sequences kb and ka:
//
// ka(j) := D1_j + D2_j +... + D(N/k)_j where j <= k
// kb(j) := (N/k)*D1_j + (N/k-1)*D2_j +... + D(N/k)_j where j <= k
//
// a = ka(1) + ka(2) +... + ka(k) + 1
// b = k*(kb(1) + kb(2) +... + kb(k)) - 1*ka(2) -... - (k-1)*ka(k) + N
//
// We use this insight to unroll the main loop and process k=4 bytes at a time.
// The resulting code is highly amenable to SIMD acceleration, although the immediate speedups
// stem from increased pipeline parallelism rather than auto-vectorization.
//
// This technique is described in-depth (here:)[https://software.intel.com/content/www/us/\
// en/develop/articles/fast-computation-of-fletcher-checksums.html]
const MOD: u32 = 65521;
const CHUNK_SIZE: usize = 5552 * 4;
let mut a = u32::from(self.a);
let mut b = u32::from(self.b);
let mut a_vec = U32X4([0; 4]);
let mut b_vec = a_vec;
let (bytes, remainder) = bytes.split_at(bytes.len() - bytes.len() % 4);
// iterate over 4 bytes at a time
let chunk_iter = bytes.chunks_exact(CHUNK_SIZE);
let remainder_chunk = chunk_iter.remainder();
for chunk in chunk_iter {
for byte_vec in chunk.chunks_exact(4) {
let val = U32X4::from(byte_vec);
a_vec += val;
b_vec += a_vec;
}
b += CHUNK_SIZE as u32 * a;
a_vec %= MOD;
b_vec %= MOD;
b %= MOD;
}
// special-case the final chunk because it may be shorter than the rest
for byte_vec in remainder_chunk.chunks_exact(4) {
let val = U32X4::from(byte_vec);
a_vec += val;
b_vec += a_vec;
}
b += remainder_chunk.len() as u32 * a;
a_vec %= MOD;
b_vec %= MOD;
b %= MOD;
// combine the sub-sum results into the main sum
b_vec *= 4;
b_vec.0[1] += MOD - a_vec.0[1];
b_vec.0[2] += (MOD - a_vec.0[2]) * 2;
b_vec.0[3] += (MOD - a_vec.0[3]) * 3;
for &av in a_vec.0.iter() {
a += av;
}
for &bv in b_vec.0.iter() {
b += bv;
}
// iterate over the remaining few bytes in serial
for &byte in remainder.iter() {
a += u32::from(byte);
b += a;
}
self.a = (a % MOD) as u16;
self.b = (b % MOD) as u16;
}
}
impl Default for Adler32 {
#[inline]
fn default() -> Self {
Adler32 { a: 1, b: 0 }
}
}
impl Hasher for Adler32 {
#[inline]
fn finish(&self) -> u64 {
u64::from(self.checksum())
}
fn write(&mut self, bytes: &[u8]) {
self.write_slice(bytes);
}
}
/// Calculates the Adler-32 checksum of a byte slice.
pub fn adler32_slice(data: &[u8]) -> u32 {
let mut h = Adler32::new();
h.write_slice(data);
h.checksum()
}
#[derive(Copy, Clone)]
struct U32X4([u32; 4]);
impl U32X4 {
fn from(bytes: &[u8]) -> Self {
U32X4([
u32::from(bytes[0]),
u32::from(bytes[1]),
u32::from(bytes[2]),
u32::from(bytes[3]),
])
}
}
impl AddAssign<Self> for U32X4 {
fn add_assign(&mut self, other: Self) {
for (s, o) in self.0.iter_mut().zip(other.0.iter()) {
*s += o;
}
}
}
impl RemAssign<u32> for U32X4 {
fn rem_assign(&mut self, quotient: u32) {
for s in self.0.iter_mut() {
*s %= quotient;
}
}
}
impl MulAssign<u32> for U32X4 {
fn mul_assign(&mut self, rhs: u32) {
for s in self.0.iter_mut() {
*s *= rhs;
}
}
}
/// Calculates the Adler-32 checksum of a `BufRead`'s contents.
///
/// The passed `BufRead` implementor will be read until it reaches EOF.
///
/// If you only have a `Read` implementor, wrap it in `std::io::BufReader`.
#[cfg(feature = "std")]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
pub fn adler32_reader<R: BufRead>(reader: &mut R) -> io::Result<u32> {
let mut h = Adler32::new();
loop {
let len = {
let buf = reader.fill_buf()?;
if buf.is_empty() {
return Ok(h.checksum());
}
h.write_slice(buf);
buf.len()
};
reader.consume(len);
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::BufReader;
#[test]
fn zeroes() {
assert_eq!(adler32_slice(&[]), 1);
assert_eq!(adler32_slice(&[0]), 1 | 1 << 16);
assert_eq!(adler32_slice(&[0, 0]), 1 | 2 << 16);
assert_eq!(adler32_slice(&[0; 100]), 0x00640001);
assert_eq!(adler32_slice(&[0; 1024]), 0x04000001);
assert_eq!(adler32_slice(&[0; 1024 * 1024]), 0x00f00001);
}
#[test]
fn ones() {
assert_eq!(adler32_slice(&[0xff; 1024]), 0x79a6fc2e);
assert_eq!(adler32_slice(&[0xff; 1024 * 1024]), 0x8e88ef11);
}
#[test]
fn mixed() {
assert_eq!(adler32_slice(&[1]), 2 | 2 << 16);
assert_eq!(adler32_slice(&[40]), 41 | 41 << 16);
assert_eq!(adler32_slice(&[0xA5; 1024 * 1024]), 0xd5009ab1);
}
/// Example calculation from https://en.wikipedia.org/wiki/Adler-32.
#[test]
fn wiki() {
assert_eq!(adler32_slice(b"Wikipedia"), 0x11E60398);
}
#[test]
fn resume() {
let mut adler = Adler32::new();
adler.write_slice(&[0xff; 1024]);
let partial = adler.checksum();
assert_eq!(partial, 0x79a6fc2e); // from above
adler.write_slice(&[0xff; 1024 * 1024 - 1024]);
assert_eq!(adler.checksum(), 0x8e88ef11); // from above
// Make sure that we can resume computing from the partial checksum via `from_checksum`.
let mut adler = Adler32::from_checksum(partial);
adler.write_slice(&[0xff; 1024 * 1024 - 1024]);
assert_eq!(adler.checksum(), 0x8e88ef11); // from above
}
#[test]
fn bufread() {
fn test(data: &[u8], checksum: u32) {
// `BufReader` uses an 8 KB buffer, so this will test buffer refilling.
let mut buf = BufReader::new(data);
let real_sum = adler32_reader(&mut buf).unwrap();
assert_eq!(checksum, real_sum);
}
test(&[], 1);
test(&[0; 1024], 0x04000001);
test(&[0; 1024 * 1024], 0x00f00001);
test(&[0xA5; 1024 * 1024], 0xd5009ab1);
}
}
| {
Adler32 {
a: sum as u16,
b: (sum >> 16) as u16,
}
} | identifier_body |
lib.rs | //! Adler-32 checksum implementation.
//!
//! This implementation features:
//!
//! - Permissively licensed (0BSD) clean-room implementation.
//! - Zero dependencies.
//! - Decent performance (3-4 GB/s).
//! - `#![no_std]` support (with `default-features = false`).
#![doc(html_root_url = "https://docs.rs/adler/0.2.2")]
// Deny a few warnings in doctests, since rustdoc `allow`s many warnings by default
#![doc(test(attr(deny(unused_imports, unused_must_use))))]
#![cfg_attr(docsrs, feature(doc_cfg))]
#![warn(missing_debug_implementations)]
#![forbid(unsafe_code)]
#![cfg_attr(not(feature = "std"), no_std)]
#[cfg(not(feature = "std"))]
extern crate core as std;
use std::hash::Hasher;
use std::ops::{AddAssign, MulAssign, RemAssign};
#[cfg(feature = "std")]
use std::io::{self, BufRead};
/// Adler-32 checksum calculator.
///
/// An instance of this type is equivalent to an Adler-32 checksum: It can be created in the default
/// state via [`new`] (or the provided `Default` impl), or from a precalculated checksum via
/// [`from_checksum`], and the currently stored checksum can be fetched via [`checksum`].
///
/// This type also implements `Hasher`, which makes it easy to calculate Adler-32 checksums of any
/// type that implements or derives `Hash`. This also allows using Adler-32 in a `HashMap`, although
/// that is not recommended (while every checksum is a hash, they are not necessarily good at being
/// one).
///
/// [`new`]: #method.new
/// [`from_checksum`]: #method.from_checksum
/// [`checksum`]: #method.checksum
#[derive(Debug, Copy, Clone)]
pub struct | {
a: u16,
b: u16,
}
impl Adler32 {
/// Creates a new Adler-32 instance with default state.
#[inline]
pub fn new() -> Self {
Self::default()
}
/// Creates an `Adler32` instance from a precomputed Adler-32 checksum.
///
/// This allows resuming checksum calculation without having to keep the `Adler32` instance
/// around.
///
/// # Example
///
/// ```
/// # use adler::Adler32;
/// let parts = [
/// "rust",
/// "acean",
/// ];
/// let whole = adler::adler32_slice(b"rustacean");
///
/// let mut sum = Adler32::new();
/// sum.write_slice(parts[0].as_bytes());
/// let partial = sum.checksum();
///
/// //...later
///
/// let mut sum = Adler32::from_checksum(partial);
/// sum.write_slice(parts[1].as_bytes());
/// assert_eq!(sum.checksum(), whole);
/// ```
#[inline]
pub fn from_checksum(sum: u32) -> Self {
Adler32 {
a: sum as u16,
b: (sum >> 16) as u16,
}
}
/// Returns the calculated checksum at this point in time.
#[inline]
pub fn checksum(&self) -> u32 {
(u32::from(self.b) << 16) | u32::from(self.a)
}
/// Adds `bytes` to the checksum calculation.
///
/// If efficiency matters, this should be called with Byte slices that contain at least a few
/// thousand Bytes.
pub fn write_slice(&mut self, bytes: &[u8]) {
// The basic algorithm is, for every byte:
// a = (a + byte) % MOD
// b = (b + a) % MOD
// where MOD = 65521.
//
// For efficiency, we can defer the `% MOD` operations as long as neither a nor b overflows:
// - Between calls to `write`, we ensure that a and b are always in range 0..MOD.
// - We use 32-bit arithmetic in this function.
// - Therefore, a and b must not increase by more than 2^32-MOD without performing a `% MOD`
// operation.
//
// According to Wikipedia, b is calculated as follows for non-incremental checksumming:
// b = n×D1 + (n−1)×D2 + (n−2)×D3 +... + Dn + n*1 (mod 65521)
// Where n is the number of bytes and Di is the i-th Byte. We need to change this to account
// for the previous values of a and b, as well as treat every input Byte as being 255:
// b_inc = n×255 + (n-1)×255 +... + 255 + n*65520
// Or in other words:
// b_inc = n*65520 + n(n+1)/2*255
// The max chunk size is thus the largest value of n so that b_inc <= 2^32-65521.
// 2^32-65521 = n*65520 + n(n+1)/2*255
// Plugging this into an equation solver since I can't math gives n = 5552.18..., so 5552.
//
// On top of the optimization outlined above, the algorithm can also be parallelized with a
// bit more work:
//
// Note that b is a linear combination of a vector of input bytes (D1,..., Dn).
//
// If we fix some value k<N and rewrite indices 1,..., N as
//
// 1_1, 1_2,..., 1_k, 2_1,..., 2_k,..., (N/k)_k,
//
// then we can express a and b in terms of sums of smaller sequences kb and ka:
//
// ka(j) := D1_j + D2_j +... + D(N/k)_j where j <= k
// kb(j) := (N/k)*D1_j + (N/k-1)*D2_j +... + D(N/k)_j where j <= k
//
// a = ka(1) + ka(2) +... + ka(k) + 1
// b = k*(kb(1) + kb(2) +... + kb(k)) - 1*ka(2) -... - (k-1)*ka(k) + N
//
// We use this insight to unroll the main loop and process k=4 bytes at a time.
// The resulting code is highly amenable to SIMD acceleration, although the immediate speedups
// stem from increased pipeline parallelism rather than auto-vectorization.
//
// This technique is described in-depth (here:)[https://software.intel.com/content/www/us/\
// en/develop/articles/fast-computation-of-fletcher-checksums.html]
const MOD: u32 = 65521;
const CHUNK_SIZE: usize = 5552 * 4;
let mut a = u32::from(self.a);
let mut b = u32::from(self.b);
let mut a_vec = U32X4([0; 4]);
let mut b_vec = a_vec;
let (bytes, remainder) = bytes.split_at(bytes.len() - bytes.len() % 4);
// iterate over 4 bytes at a time
let chunk_iter = bytes.chunks_exact(CHUNK_SIZE);
let remainder_chunk = chunk_iter.remainder();
for chunk in chunk_iter {
for byte_vec in chunk.chunks_exact(4) {
let val = U32X4::from(byte_vec);
a_vec += val;
b_vec += a_vec;
}
b += CHUNK_SIZE as u32 * a;
a_vec %= MOD;
b_vec %= MOD;
b %= MOD;
}
// special-case the final chunk because it may be shorter than the rest
for byte_vec in remainder_chunk.chunks_exact(4) {
let val = U32X4::from(byte_vec);
a_vec += val;
b_vec += a_vec;
}
b += remainder_chunk.len() as u32 * a;
a_vec %= MOD;
b_vec %= MOD;
b %= MOD;
// combine the sub-sum results into the main sum
b_vec *= 4;
b_vec.0[1] += MOD - a_vec.0[1];
b_vec.0[2] += (MOD - a_vec.0[2]) * 2;
b_vec.0[3] += (MOD - a_vec.0[3]) * 3;
for &av in a_vec.0.iter() {
a += av;
}
for &bv in b_vec.0.iter() {
b += bv;
}
// iterate over the remaining few bytes in serial
for &byte in remainder.iter() {
a += u32::from(byte);
b += a;
}
self.a = (a % MOD) as u16;
self.b = (b % MOD) as u16;
}
}
impl Default for Adler32 {
#[inline]
fn default() -> Self {
Adler32 { a: 1, b: 0 }
}
}
impl Hasher for Adler32 {
#[inline]
fn finish(&self) -> u64 {
u64::from(self.checksum())
}
fn write(&mut self, bytes: &[u8]) {
self.write_slice(bytes);
}
}
/// Calculates the Adler-32 checksum of a byte slice.
pub fn adler32_slice(data: &[u8]) -> u32 {
let mut h = Adler32::new();
h.write_slice(data);
h.checksum()
}
#[derive(Copy, Clone)]
struct U32X4([u32; 4]);
impl U32X4 {
fn from(bytes: &[u8]) -> Self {
U32X4([
u32::from(bytes[0]),
u32::from(bytes[1]),
u32::from(bytes[2]),
u32::from(bytes[3]),
])
}
}
impl AddAssign<Self> for U32X4 {
fn add_assign(&mut self, other: Self) {
for (s, o) in self.0.iter_mut().zip(other.0.iter()) {
*s += o;
}
}
}
impl RemAssign<u32> for U32X4 {
fn rem_assign(&mut self, quotient: u32) {
for s in self.0.iter_mut() {
*s %= quotient;
}
}
}
impl MulAssign<u32> for U32X4 {
fn mul_assign(&mut self, rhs: u32) {
for s in self.0.iter_mut() {
*s *= rhs;
}
}
}
/// Calculates the Adler-32 checksum of a `BufRead`'s contents.
///
/// The passed `BufRead` implementor will be read until it reaches EOF.
///
/// If you only have a `Read` implementor, wrap it in `std::io::BufReader`.
#[cfg(feature = "std")]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
pub fn adler32_reader<R: BufRead>(reader: &mut R) -> io::Result<u32> {
let mut h = Adler32::new();
loop {
let len = {
let buf = reader.fill_buf()?;
if buf.is_empty() {
return Ok(h.checksum());
}
h.write_slice(buf);
buf.len()
};
reader.consume(len);
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::BufReader;
#[test]
fn zeroes() {
assert_eq!(adler32_slice(&[]), 1);
assert_eq!(adler32_slice(&[0]), 1 | 1 << 16);
assert_eq!(adler32_slice(&[0, 0]), 1 | 2 << 16);
assert_eq!(adler32_slice(&[0; 100]), 0x00640001);
assert_eq!(adler32_slice(&[0; 1024]), 0x04000001);
assert_eq!(adler32_slice(&[0; 1024 * 1024]), 0x00f00001);
}
#[test]
fn ones() {
assert_eq!(adler32_slice(&[0xff; 1024]), 0x79a6fc2e);
assert_eq!(adler32_slice(&[0xff; 1024 * 1024]), 0x8e88ef11);
}
#[test]
fn mixed() {
assert_eq!(adler32_slice(&[1]), 2 | 2 << 16);
assert_eq!(adler32_slice(&[40]), 41 | 41 << 16);
assert_eq!(adler32_slice(&[0xA5; 1024 * 1024]), 0xd5009ab1);
}
/// Example calculation from https://en.wikipedia.org/wiki/Adler-32.
#[test]
fn wiki() {
assert_eq!(adler32_slice(b"Wikipedia"), 0x11E60398);
}
#[test]
fn resume() {
let mut adler = Adler32::new();
adler.write_slice(&[0xff; 1024]);
let partial = adler.checksum();
assert_eq!(partial, 0x79a6fc2e); // from above
adler.write_slice(&[0xff; 1024 * 1024 - 1024]);
assert_eq!(adler.checksum(), 0x8e88ef11); // from above
// Make sure that we can resume computing from the partial checksum via `from_checksum`.
let mut adler = Adler32::from_checksum(partial);
adler.write_slice(&[0xff; 1024 * 1024 - 1024]);
assert_eq!(adler.checksum(), 0x8e88ef11); // from above
}
#[test]
fn bufread() {
fn test(data: &[u8], checksum: u32) {
// `BufReader` uses an 8 KB buffer, so this will test buffer refilling.
let mut buf = BufReader::new(data);
let real_sum = adler32_reader(&mut buf).unwrap();
assert_eq!(checksum, real_sum);
}
test(&[], 1);
test(&[0; 1024], 0x04000001);
test(&[0; 1024 * 1024], 0x00f00001);
test(&[0xA5; 1024 * 1024], 0xd5009ab1);
}
}
| Adler32 | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.