file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
main.rs | #![feature(rustc_private)]
extern crate im;
extern crate pretty;
extern crate rustc_ast;
extern crate rustc_driver;
extern crate rustc_errors;
extern crate rustc_hir;
extern crate rustc_interface;
extern crate rustc_metadata;
extern crate rustc_middle;
extern crate rustc_session;
extern crate rustc_span;
mod ast_to_rustspec;
mod hir_to_rustspec;
mod name_resolution;
mod rustspec;
mod rustspec_to_coq;
mod rustspec_to_easycrypt;
mod rustspec_to_fstar;
mod typechecker;
mod util;
use itertools::Itertools;
use rustc_driver::{Callbacks, Compilation, RunCompiler};
use rustc_errors::emitter::{ColorConfig, HumanReadableErrorType};
use rustc_errors::DiagnosticId;
use rustc_interface::{
interface::{Compiler, Config},
Queries,
};
use rustc_session::Session;
use rustc_session::{config::ErrorOutputType, search_paths::SearchPath};
use rustc_span::MultiSpan;
use serde::Deserialize;
use serde_json;
use std::env;
use std::ffi::OsStr;
use std::fs::File;
use std::path::Path;
use std::process::Command;
use util::APP_USAGE;
struct HacspecCallbacks {
output_file: Option<String>,
target_directory: String,
}
const ERROR_OUTPUT_CONFIG: ErrorOutputType =
ErrorOutputType::HumanReadable(HumanReadableErrorType::Default(ColorConfig::Auto));
trait HacspecErrorEmitter {
fn span_rustspec_err<S: Into<MultiSpan>>(&self, s: S, msg: &str);
fn span_rustspec_warn<S: Into<MultiSpan>>(&self, s: S, msg: &str);
}
impl HacspecErrorEmitter for Session {
fn span_rustspec_err<S: Into<MultiSpan>>(&self, s: S, msg: &str) {
self.span_err_with_code(s, msg, DiagnosticId::Error(String::from("Hacspec")));
}
fn span_rustspec_warn<S: Into<MultiSpan>>(&self, s: S, msg: &str) {
self.span_warn_with_code(s, msg, DiagnosticId::Error(String::from("Hacspec")));
}
}
impl Callbacks for HacspecCallbacks {
fn config(&mut self, config: &mut Config) {
log::debug!(" --- hacspec config callback");
log::trace!(" target directory {}", self.target_directory);
config.opts.search_paths.push(SearchPath::from_cli_opt(
&self.target_directory,
ERROR_OUTPUT_CONFIG,
));
config.crate_cfg.insert((
String::from("feature"),
Some(String::from("\"hacspec_attributes\"")),
));
}
fn after_analysis<'tcx>(
&mut self,
compiler: &Compiler,
queries: &'tcx Queries<'tcx>,
) -> Compilation {
log::debug!(" --- hacspec after_analysis callback");
let krate = queries.parse().unwrap().take();
let external_data = |imported_crates: &Vec<rustspec::Spanned<String>>| {
queries.global_ctxt().unwrap().peek_mut().enter(|tcx| {
hir_to_rustspec::retrieve_external_data(&compiler.session(), &tcx, imported_crates)
})
};
let krate = match ast_to_rustspec::translate(&compiler.session(), &krate, &external_data) {
Ok(krate) => krate,
Err(_) => {
compiler
.session()
.err("unable to translate to Hacspec due to out-of-language errors");
return Compilation::Stop;
}
};
let (krate, mut top_ctx) =
match name_resolution::resolve_crate(&compiler.session(), krate, &external_data) {
Ok(krate) => krate,
Err(_) => {
compiler
.session()
.err("found some Hacspec name resolution errors");
return Compilation::Stop;
}
};
let krate = match typechecker::typecheck_program(&compiler.session(), &krate, &mut top_ctx)
{
Ok(krate) => krate,
Err(_) => {
compiler
.session()
.err("found some Hacspec typechecking errors");
return Compilation::Stop;
}
};
let imported_crates = name_resolution::get_imported_crates(&krate);
let imported_crates = imported_crates
.into_iter()
.filter(|(x, _)| x!= "hacspec_lib")
.map(|(x, _)| x)
.collect::<Vec<_>>();
println!(
" > Successfully typechecked{}",
if imported_crates.len() == 0 {
".".to_string()
} else {
format!(
", assuming that the code in crates {} has also been Hacspec-typechecked",
imported_crates.iter().format(", ")
)
}
);
match &self.output_file {
None => return Compilation::Stop,
Some(file) => match Path::new(file).extension().and_then(OsStr::to_str).unwrap() {
"fst" => rustspec_to_fstar::translate_and_write_to_file(
&compiler.session(),
&krate,
&file,
&top_ctx,
),
"ec" => rustspec_to_easycrypt::translate_and_write_to_file(
&compiler.session(),
&krate,
&file,
&top_ctx,
),
"json" => {
let file = file.trim();
let path = Path::new(file);
let file = match File::create(&path) {
Err(why) => {
compiler.session().err(
format!("Unable to write to output file {}: \"{}\"", file, why)
.as_str(),
);
return Compilation::Stop;
}
Ok(file) => file,
};
match serde_json::to_writer_pretty(file, &krate) {
Err(why) => {
compiler
.session()
.err(format!("Unable to serialize program: \"{}\"", why).as_str());
return Compilation::Stop;
}
Ok(_) => (),
};
}
"v" => rustspec_to_coq::translate_and_write_to_file(
&compiler.session(),
&krate,
&file,
&top_ctx,
),
_ => {
compiler
.session()
.err("unknown backend extension for output file");
return Compilation::Stop;
}
},
}
Compilation::Stop
}
}
// === Cargo Metadata Helpers ===
#[derive(Debug, Default, Deserialize)]
struct Dependency {
name: String,
#[allow(dead_code)]
kind: Option<String>,
}
#[derive(Debug, Default, Deserialize)]
struct Target {
#[allow(dead_code)]
name: String,
#[allow(dead_code)]
kind: Vec<String>,
crate_types: Vec<String>,
src_path: String,
}
#[derive(Debug, Default, Deserialize)]
struct Package {
name: String,
targets: Vec<Target>,
dependencies: Vec<Dependency>,
}
#[derive(Debug, Default, Deserialize)]
struct Manifest {
packages: Vec<Package>,
target_directory: String,
}
// ===
/// Read the crate metadata and use the information for the build.
fn read_crate(
manifest: Option<String>,
package_name: Option<String>,
args: &mut Vec<String>,
callbacks: &mut HacspecCallbacks,
) | }
let json_string = String::from_utf8(stdout).expect(" ⚠️ Failed reading cargo output");
serde_json::from_str(&json_string).expect(" ⚠️ Error reading to manifest")
};
// Pick the package of the given name or the only package available.
let package = if let Some(package_name) = package_name {
manifest
.packages
.iter()
.find(|p| p.name == package_name)
.expect(&format!(
" ⚠️ Can't find the package {} in the Cargo.toml\n\n{}",
package_name, APP_USAGE,
))
} else {
&manifest.packages[0]
};
log::trace!("Typechecking '{:?}'...", package);
// Take the first lib target we find. There should be only one really.
// log::trace!("crate types: {:?}", package.targets);
// log::trace!("package targets {:?}", package.targets);
let target = package
.targets
.iter()
.find(|p| {
p.crate_types.contains(&"lib".to_string())
|| p.crate_types.contains(&"rlib".to_string())
})
.expect(&format!(" ⚠️ No target in the Cargo.toml\n\n{}", APP_USAGE));
// Add the target source file to the arguments
args.push(target.src_path.clone());
// Add build artifact path.
// This only works with debug builds.
let deps = manifest.target_directory + "/debug/deps";
callbacks.target_directory = deps;
// Add the dependencies as --extern for the hacpsec typechecker.
for dependency in package.dependencies.iter() {
args.push(format!("--extern={}", dependency.name.replace("-", "_")));
}
}
fn main() -> Result<()
, usize> {
pretty_env_logger::init();
log::debug!(" --- hacspec");
let mut args = env::args().collect::<Vec<String>>();
log::trace!(" args: {:?}", args);
// Args to pass to the compiler
let mut compiler_args = Vec::new();
// Drop and pass along binary name.
compiler_args.push(args.remove(0));
// Optionally get output file.
let output_file_index = args.iter().position(|a| a == "-o");
let output_file = match output_file_index {
Some(i) => {
args.remove(i);
Some(args.remove(i))
}
None => None,
};
// Optionally an input file can be passed in. This should be mostly used for
// testing.
let input_file = match args.iter().position(|a| a == "-f") {
Some(i) => {
args.remove(i);
Some(args.remove(i))
}
None => None,
};
// Read the --manifest-path argument if present.
let manifest = match args.iter().position(|a| a == "--manifest-path") {
Some(i) => {
args.remove(i);
Some(args.remove(i))
}
None => None,
};
// Read the --sysroot. It must be present
log::trace!("args: {:?}", args);
match args.iter().position(|a| a.starts_with("--sysroot")) {
Some(i) => {
compiler_args.push(args.remove(i));
}
None => panic!(" ⚠️ --sysroot is missing. Please report this issue."),
}
let mut callbacks = HacspecCallbacks {
output_file,
// This defaults to the default target directory.
target_directory: env::current_dir().unwrap().to_str().unwrap().to_owned()
+ "/../target/debug/deps",
};
match input_file {
Some(input_file) => {
compiler_args.push(input_file);
// If only a file is provided we add the default dependencies only.
compiler_args.extend_from_slice(&[
"--extern=abstract_integers".to_string(),
"--extern=hacspec_derive".to_string(),
"--extern=hacspec_lib".to_string(),
"--extern=secret_integers".to_string(),
]);
}
None => {
let package_name = args.pop();
log::trace!("package name to analyze: {:?}", package_name);
read_crate(manifest, package_name, &mut compiler_args, &mut callbacks);
}
}
compiler_args.push("--crate-type=lib".to_string());
compiler_args.push("--edition=2021".to_string());
log::trace!("compiler_args: {:?}", compiler_args);
let compiler = RunCompiler::new(&compiler_args, &mut callbacks);
match compiler.run() {
Ok(_) => Ok(()),
Err(_) => Err(1),
}
}
| {
let manifest: Manifest = {
let mut output = Command::new("cargo");
let mut output_args = if let Some(manifest_path) = manifest {
vec!["--manifest-path".to_string(), manifest_path]
} else {
Vec::<String>::new()
};
output_args.extend_from_slice(&[
"--no-deps".to_string(),
"--format-version".to_string(),
"1".to_string(),
]);
let output = output.arg("metadata").args(&output_args);
let output = output.output().expect(" ⚠️ Error reading cargo manifest.");
let stdout = output.stdout;
if !output.status.success() {
let error =
String::from_utf8(output.stderr).expect(" ⚠️ Failed reading cargo stderr output");
panic!("Error running cargo metadata: {:?}", error); | identifier_body |
eth.rs | }
}
impl<const TD: usize, const RD: usize> Default for DesRing<TD, RD> {
fn default() -> Self {
Self::new()
}
}
///
/// Ethernet DMA
///
pub struct EthernetDMA<const TD: usize, const RD: usize> {
ring: &'static mut DesRing<TD, RD>,
eth_dma: stm32::ETHERNET_DMA,
}
///
/// Ethernet MAC
///
pub struct EthernetMAC {
eth_mac: stm32::ETHERNET_MAC,
eth_phy_addr: u8,
clock_range: u8,
}
/// Create and initialise the ethernet driver.
///
/// You must move in ETH_MAC, ETH_MTL, ETH_DMA.
///
/// Sets up the descriptor structures, sets up the peripheral
/// clocks and GPIO configuration, and configures the ETH MAC and
/// DMA peripherals. Automatically sets slew rate to VeryHigh.
/// If you wish to use another configuration, please see
/// [new_unchecked](new_unchecked).
///
/// This method does not initialise the external PHY. However it does return an
/// [EthernetMAC](EthernetMAC) which implements the
/// [StationManagement](super::StationManagement) trait. This can be used to
/// communicate with the external PHY.
///
/// # Safety
///
/// `EthernetDMA` shall not be moved as it is initialised here
#[allow(clippy::too_many_arguments)]
pub fn new<const TD: usize, const RD: usize>(
eth_mac: stm32::ETHERNET_MAC,
eth_mtl: stm32::ETHERNET_MTL,
eth_dma: stm32::ETHERNET_DMA,
mut pins: impl PinsRMII,
ring: &'static mut DesRing<TD, RD>,
mac_addr: EthernetAddress,
prec: rec::Eth1Mac,
clocks: &CoreClocks,
) -> (EthernetDMA<TD, RD>, EthernetMAC) {
pins.set_speed(Speed::VeryHigh);
unsafe {
new_unchecked(eth_mac, eth_mtl, eth_dma, ring, mac_addr, prec, clocks)
}
}
/// Create and initialise the ethernet driver.
///
/// You must move in ETH_MAC, ETH_MTL, ETH_DMA.
///
/// Sets up the descriptor structures, sets up the peripheral
/// clocks and GPIO configuration, and configures the ETH MAC and
/// DMA peripherals.
///
/// This method does not initialise the external PHY. However it does return an
/// [EthernetMAC](EthernetMAC) which implements the
/// [StationManagement](super::StationManagement) trait. This can be used to
/// communicate with the external PHY.
///
/// All the documented interrupts in the `MMC_TX_INTERRUPT_MASK` and
/// `MMC_RX_INTERRUPT_MASK` registers are masked, since these cause unexpected
/// interrupts after a number of days of heavy ethernet traffic. If these
/// interrupts are desired, you can be unmask them in your own code after this
/// method.
///
/// # Safety
///
/// `EthernetDMA` shall not be moved as it is initialised here
pub unsafe fn new_unchecked<const TD: usize, const RD: usize>(
eth_mac: stm32::ETHERNET_MAC,
eth_mtl: stm32::ETHERNET_MTL,
eth_dma: stm32::ETHERNET_DMA,
ring: &'static mut DesRing<TD, RD>,
mac_addr: EthernetAddress,
prec: rec::Eth1Mac,
clocks: &CoreClocks,
) -> (EthernetDMA<TD, RD>, EthernetMAC) {
// RCC
{
let rcc = &*stm32::RCC::ptr();
let syscfg = &*stm32::SYSCFG::ptr();
// Ensure syscfg is enabled (for PMCR)
rcc.apb4enr.modify(|_, w| w.syscfgen().set_bit());
// Reset ETH_DMA - write 1 and wait for 0.
// On the H723, we have to do this before prec.enable()
// or the DMA will never come out of reset
eth_dma.dmamr.modify(|_, w| w.swr().set_bit());
while eth_dma.dmamr.read().swr().bit_is_set() {}
// AHB1 ETH1MACEN
prec.enable();
// Also need to enable the transmission and reception clocks, which
// don't have prec objects. They don't have prec objects because they
// can't be reset.
rcc.ahb1enr
.modify(|_, w| w.eth1txen().set_bit().eth1rxen().set_bit());
syscfg.pmcr.modify(|_, w| w.epis().bits(0b100)); // RMII
}
// reset ETH_MAC - write 1 then 0
//rcc.ahb1rstr.modify(|_, w| w.eth1macrst().set_bit());
//rcc.ahb1rstr.modify(|_, w| w.eth1macrst().clear_bit());
cortex_m::interrupt::free(|_cs| {
// 200 MHz
eth_mac
.mac1ustcr
.modify(|_, w| w.tic_1us_cntr().bits(200 - 1));
// Configuration Register
eth_mac.maccr.modify(|_, w| {
w.arpen()
.clear_bit()
.ipc()
.set_bit()
.ipg()
.bits(0b000) // 96 bit
.ecrsfd()
.clear_bit()
.dcrs()
.clear_bit()
.bl()
.bits(0b00) // 19
.prelen()
.bits(0b00) // 7
// CRC stripping for Type frames
.cst()
.set_bit()
// Fast Ethernet speed
.fes()
.set_bit()
// Duplex mode
.dm()
.set_bit()
// Automatic pad/CRC stripping
.acs()
.set_bit()
// Retry disable in half-duplex mode
.dr()
.set_bit()
});
eth_mac.macecr.modify(|_, w| {
w.eipgen()
.clear_bit()
.usp()
.clear_bit()
.spen()
.clear_bit()
.dcrcc()
.clear_bit()
});
// Set the MAC address.
// Writes to LR trigger both registers to be loaded into the MAC,
// so write to LR last.
eth_mac.maca0hr.write(|w| {
w.addrhi().bits(
u16::from(mac_addr.0[4]) | (u16::from(mac_addr.0[5]) << 8),
)
});
eth_mac.maca0lr.write(|w| {
w.addrlo().bits(
u32::from(mac_addr.0[0])
| (u32::from(mac_addr.0[1]) << 8)
| (u32::from(mac_addr.0[2]) << 16)
| (u32::from(mac_addr.0[3]) << 24),
)
});
// frame filter register
eth_mac.macpfr.modify(|_, w| {
w.dntu()
.clear_bit()
.ipfe()
.clear_bit()
.vtfe()
.clear_bit()
.hpf()
.clear_bit()
.saf()
.clear_bit()
.saif()
.clear_bit()
.pcf()
.bits(0b00)
.dbf()
.clear_bit()
.pm()
.clear_bit()
.daif()
.clear_bit()
.hmc()
.clear_bit()
.huc()
.clear_bit()
// Receive All
.ra()
.clear_bit()
// Promiscuous mode
.pr()
.clear_bit()
});
eth_mac.macwtr.write(|w| w.pwe().clear_bit());
// Flow Control Register
eth_mac.macqtx_fcr.modify(|_, w| {
// Pause time
w.pt().bits(0x100)
});
eth_mac.macrx_fcr.modify(|_, w| w);
// Mask away Ethernet MAC MMC RX/TX interrupts. These are statistics
// counter interrupts and are enabled by default. We need to manually
// disable various ethernet interrupts so they don't unintentionally
// hang the device. The user is free to re-enable them later to provide
// ethernet MAC-related statistics
eth_mac.mmc_rx_interrupt_mask.modify(|_, w| {
w.rxlpiuscim()
.set_bit()
.rxucgpim()
.set_bit()
.rxalgnerpim()
.set_bit()
.rxcrcerpim()
.set_bit()
});
eth_mac.mmc_tx_interrupt_mask.modify(|_, w| {
w.txlpiuscim()
.set_bit()
.txgpktim()
.set_bit()
.txmcolgpim()
.set_bit()
.txscolgpim()
.set_bit()
});
// TODO: The MMC_TX/RX_INTERRUPT_MASK registers incorrectly mark
// LPITRCIM as read-only, so svd2rust doens't generate bindings to
// modify them. Instead, as a workaround, we manually manipulate the
// bits
eth_mac
.mmc_tx_interrupt_mask
.modify(|r, w| w.bits(r.bits() | (1 << 27)));
eth_mac
.mmc_rx_interrupt_mask
.modify(|r, w| w.bits(r.bits() | (1 << 27)));
eth_mtl.mtlrx_qomr.modify(|_, w| {
w
// Receive store and forward
.rsf()
.set_bit()
// Dropping of TCP/IP checksum error frames disable
.dis_tcp_ef()
.clear_bit()
// Forward error frames
.fep()
.clear_bit()
// Forward undersized good packets
.fup()
.clear_bit()
});
eth_mtl.mtltx_qomr.modify(|_, w| {
w
// Transmit store and forward
.tsf()
.set_bit()
});
// operation mode register
eth_dma.dmamr.modify(|_, w| {
w.intm()
.bits(0b00)
// Rx Tx priority ratio 1:1
.pr()
.bits(0b000)
.txpr()
.clear_bit()
.da()
.clear_bit()
});
// bus mode register
eth_dma.dmasbmr.modify(|_, w| {
// Address-aligned beats
w.aal()
.set_bit()
// Fixed burst
.fb()
.set_bit()
});
eth_dma
.dmaccr
.modify(|_, w| w.dsl().bits(0).pblx8().clear_bit().mss().bits(536));
eth_dma.dmactx_cr.modify(|_, w| {
w
// Tx DMA PBL
.txpbl()
.bits(32)
.tse()
.clear_bit()
// Operate on second frame
.osf()
.clear_bit()
});
eth_dma.dmacrx_cr.modify(|_, w| {
w
// receive buffer size
.rbsz()
.bits(ETH_BUF_SIZE as u16)
// Rx DMA PBL
.rxpbl()
.bits(32)
// Disable flushing of received frames
.rpf()
.clear_bit()
});
// Initialise DMA descriptors
ring.tx.init();
ring.rx.init();
// Ensure the DMA descriptors are committed
cortex_m::asm::dsb();
// Manage MAC transmission and reception
eth_mac.maccr.modify(|_, w| {
w.re()
.bit(true) // Receiver Enable
.te()
.bit(true) // Transmiter Enable
});
eth_mtl.mtltx_qomr.modify(|_, w| w.ftq().set_bit());
// Manage DMA transmission and reception
eth_dma.dmactx_cr.modify(|_, w| w.st().set_bit());
eth_dma.dmacrx_cr.modify(|_, w| w.sr().set_bit());
eth_dma
.dmacsr
.modify(|_, w| w.tps().set_bit().rps().set_bit());
});
// MAC layer
// Set the MDC clock frequency in the range 1MHz - 2.5MHz
let hclk_mhz = clocks.hclk().raw() / 1_000_000;
let csr_clock_range = match hclk_mhz {
0..=34 => 2, // Divide by 16
35..=59 => 3, // Divide by 26
60..=99 => 0, // Divide by 42
100..=149 => 1, // Divide by 62
150..=249 => 4, // Divide by 102
250..=310 => 5, // Divide by 124
_ => panic!(
"HCLK results in MDC clock > 2.5MHz even for the \
highest CSR clock divider"
),
};
let mac = EthernetMAC {
eth_mac,
eth_phy_addr: 0,
clock_range: csr_clock_range,
};
let dma = EthernetDMA { ring, eth_dma };
(dma, mac)
}
impl EthernetMAC {
/// Sets the SMI address to use for the PHY
pub fn set_phy_addr(self, eth_phy_addr: u8) -> Self {
Self {
eth_mac: self.eth_mac,
eth_phy_addr,
clock_range: self.clock_range,
}
}
}
/// PHY Operations
impl StationManagement for EthernetMAC {
/// Read a register over SMI.
fn smi_read(&mut self, reg: u8) -> u16 {
while self.eth_mac.macmdioar.read().mb().bit_is_set() {}
self.eth_mac.macmdioar.modify(|_, w| unsafe {
w.pa()
.bits(self.eth_phy_addr)
.rda()
.bits(reg)
.goc()
.bits(0b11) // read
.cr()
.bits(self.clock_range)
.mb()
.set_bit()
});
while self.eth_mac.macmdioar.read().mb().bit_is_set() {}
self.eth_mac.macmdiodr.read().md().bits()
}
/// Write a register over SMI.
fn smi_write(&mut self, reg: u8, val: u16) {
while self.eth_mac.macmdioar.read().mb().bit_is_set() {}
self.eth_mac
.macmdiodr
.write(|w| unsafe { w.md().bits(val) });
self.eth_mac.macmdioar.modify(|_, w| unsafe {
w.pa()
.bits(self.eth_phy_addr)
.rda()
.bits(reg)
.goc()
.bits(0b01) // write
.cr()
.bits(self.clock_range)
.mb()
.set_bit()
});
while self.eth_mac.macmdioar.read().mb().bit_is_set() {}
}
}
/// Define TxToken type and implement consume method
pub struct TxToken<'a, const TD: usize>(&'a mut TDesRing<TD>);
impl<'a, const TD: usize> phy::TxToken for TxToken<'a, TD> {
fn consume<R, F>(self, len: usize, f: F) -> R
where
F: FnOnce(&mut [u8]) -> R,
{
assert!(len <= ETH_BUF_SIZE);
let result = f(unsafe { self.0.buf_as_slice_mut(len) });
self.0.release();
result
}
}
/// Define RxToken type and implement consume method
pub struct RxToken<'a, const RD: usize>(&'a mut RDesRing<RD>);
impl<'a, const RD: usize> phy::RxToken for RxToken<'a, RD> {
fn consume<R, F>(self, f: F) -> R
where
F: FnOnce(&mut [u8]) -> R,
{
let result = f(unsafe { self.0.buf_as_slice_mut() });
self.0.release();
result
}
}
/// Implement the smoltcp Device interface
impl<const TD: usize, const RD: usize> phy::Device for EthernetDMA<TD, RD> {
type RxToken<'a> = RxToken<'a, RD>;
type TxToken<'a> = TxToken<'a, TD>;
// Clippy false positive because DeviceCapabilities is non-exhaustive
#[allow(clippy::field_reassign_with_default)]
fn capabilities(&self) -> DeviceCapabilities {
let mut caps = DeviceCapabilities::default();
// ethernet frame type II (6 smac, 6 dmac, 2 ethertype),
// sans CRC (4), 1500 IP MTU
caps.max_transmission_unit = 1514;
caps.max_burst_size = Some(core::cmp::min(TD, RD));
caps
}
fn receive(
&mut self,
_timestamp: Instant,
) -> Option<(RxToken<RD>, TxToken<TD>)> {
// Skip all queued packets with errors.
while self.ring.rx.available() &&!self.ring.rx.valid() {
self.ring.rx.release()
}
if self.ring.rx.available() && self.ring.tx.available() {
Some((RxToken(&mut self.ring.rx), TxToken(&mut self.ring.tx)))
} else {
None
}
}
fn transmit(&mut self, _timestamp: Instant) -> Option<TxToken<TD>> {
if self.ring.tx.available() {
Some(TxToken(&mut self.ring.tx))
} else {
None
}
}
}
impl<const TD: usize, const RD: usize> EthernetDMA<TD, RD> {
/// Return the number of packets dropped since this method was
/// last called
pub fn | number_packets_dropped | identifier_name |
|
eth.rs | move in ETH_MAC, ETH_MTL, ETH_DMA.
///
/// Sets up the descriptor structures, sets up the peripheral
/// clocks and GPIO configuration, and configures the ETH MAC and
/// DMA peripherals. Automatically sets slew rate to VeryHigh.
/// If you wish to use another configuration, please see
/// [new_unchecked](new_unchecked).
///
/// This method does not initialise the external PHY. However it does return an
/// [EthernetMAC](EthernetMAC) which implements the
/// [StationManagement](super::StationManagement) trait. This can be used to
/// communicate with the external PHY.
///
/// # Safety
///
/// `EthernetDMA` shall not be moved as it is initialised here
#[allow(clippy::too_many_arguments)]
pub fn new<const TD: usize, const RD: usize>(
eth_mac: stm32::ETHERNET_MAC,
eth_mtl: stm32::ETHERNET_MTL,
eth_dma: stm32::ETHERNET_DMA,
mut pins: impl PinsRMII,
ring: &'static mut DesRing<TD, RD>,
mac_addr: EthernetAddress,
prec: rec::Eth1Mac,
clocks: &CoreClocks,
) -> (EthernetDMA<TD, RD>, EthernetMAC) {
pins.set_speed(Speed::VeryHigh);
unsafe {
new_unchecked(eth_mac, eth_mtl, eth_dma, ring, mac_addr, prec, clocks)
}
}
/// Create and initialise the ethernet driver.
///
/// You must move in ETH_MAC, ETH_MTL, ETH_DMA.
///
/// Sets up the descriptor structures, sets up the peripheral
/// clocks and GPIO configuration, and configures the ETH MAC and
/// DMA peripherals.
///
/// This method does not initialise the external PHY. However it does return an
/// [EthernetMAC](EthernetMAC) which implements the
/// [StationManagement](super::StationManagement) trait. This can be used to
/// communicate with the external PHY.
///
/// All the documented interrupts in the `MMC_TX_INTERRUPT_MASK` and
/// `MMC_RX_INTERRUPT_MASK` registers are masked, since these cause unexpected
/// interrupts after a number of days of heavy ethernet traffic. If these
/// interrupts are desired, you can be unmask them in your own code after this
/// method.
///
/// # Safety
///
/// `EthernetDMA` shall not be moved as it is initialised here
pub unsafe fn new_unchecked<const TD: usize, const RD: usize>(
eth_mac: stm32::ETHERNET_MAC,
eth_mtl: stm32::ETHERNET_MTL,
eth_dma: stm32::ETHERNET_DMA,
ring: &'static mut DesRing<TD, RD>,
mac_addr: EthernetAddress,
prec: rec::Eth1Mac,
clocks: &CoreClocks,
) -> (EthernetDMA<TD, RD>, EthernetMAC) {
// RCC
{
let rcc = &*stm32::RCC::ptr();
let syscfg = &*stm32::SYSCFG::ptr();
// Ensure syscfg is enabled (for PMCR)
rcc.apb4enr.modify(|_, w| w.syscfgen().set_bit());
// Reset ETH_DMA - write 1 and wait for 0.
// On the H723, we have to do this before prec.enable()
// or the DMA will never come out of reset
eth_dma.dmamr.modify(|_, w| w.swr().set_bit());
while eth_dma.dmamr.read().swr().bit_is_set() {}
// AHB1 ETH1MACEN
prec.enable();
// Also need to enable the transmission and reception clocks, which
// don't have prec objects. They don't have prec objects because they
// can't be reset.
rcc.ahb1enr
.modify(|_, w| w.eth1txen().set_bit().eth1rxen().set_bit());
syscfg.pmcr.modify(|_, w| w.epis().bits(0b100)); // RMII
}
// reset ETH_MAC - write 1 then 0
//rcc.ahb1rstr.modify(|_, w| w.eth1macrst().set_bit());
//rcc.ahb1rstr.modify(|_, w| w.eth1macrst().clear_bit());
cortex_m::interrupt::free(|_cs| {
// 200 MHz
eth_mac
.mac1ustcr
.modify(|_, w| w.tic_1us_cntr().bits(200 - 1));
// Configuration Register
eth_mac.maccr.modify(|_, w| {
w.arpen()
.clear_bit()
.ipc()
.set_bit()
.ipg()
.bits(0b000) // 96 bit
.ecrsfd()
.clear_bit()
.dcrs()
.clear_bit()
.bl()
.bits(0b00) // 19
.prelen()
.bits(0b00) // 7
// CRC stripping for Type frames
.cst()
.set_bit()
// Fast Ethernet speed
.fes()
.set_bit()
// Duplex mode
.dm()
.set_bit()
// Automatic pad/CRC stripping
.acs()
.set_bit()
// Retry disable in half-duplex mode
.dr()
.set_bit()
});
eth_mac.macecr.modify(|_, w| {
w.eipgen()
.clear_bit()
.usp()
.clear_bit()
.spen()
.clear_bit()
.dcrcc()
.clear_bit()
});
// Set the MAC address.
// Writes to LR trigger both registers to be loaded into the MAC,
// so write to LR last.
eth_mac.maca0hr.write(|w| {
w.addrhi().bits(
u16::from(mac_addr.0[4]) | (u16::from(mac_addr.0[5]) << 8),
)
});
eth_mac.maca0lr.write(|w| {
w.addrlo().bits(
u32::from(mac_addr.0[0])
| (u32::from(mac_addr.0[1]) << 8)
| (u32::from(mac_addr.0[2]) << 16)
| (u32::from(mac_addr.0[3]) << 24),
)
});
// frame filter register
eth_mac.macpfr.modify(|_, w| {
w.dntu()
.clear_bit()
.ipfe()
.clear_bit()
.vtfe()
.clear_bit()
.hpf()
.clear_bit()
.saf()
.clear_bit()
.saif()
.clear_bit()
.pcf()
.bits(0b00)
.dbf()
.clear_bit()
.pm()
.clear_bit()
.daif()
.clear_bit()
.hmc()
.clear_bit()
.huc()
.clear_bit()
// Receive All
.ra()
.clear_bit()
// Promiscuous mode
.pr()
.clear_bit()
});
eth_mac.macwtr.write(|w| w.pwe().clear_bit());
// Flow Control Register
eth_mac.macqtx_fcr.modify(|_, w| {
// Pause time
w.pt().bits(0x100)
});
eth_mac.macrx_fcr.modify(|_, w| w);
// Mask away Ethernet MAC MMC RX/TX interrupts. These are statistics
// counter interrupts and are enabled by default. We need to manually
// disable various ethernet interrupts so they don't unintentionally
// hang the device. The user is free to re-enable them later to provide
// ethernet MAC-related statistics
eth_mac.mmc_rx_interrupt_mask.modify(|_, w| {
w.rxlpiuscim()
.set_bit()
.rxucgpim()
.set_bit()
.rxalgnerpim()
.set_bit()
.rxcrcerpim()
.set_bit()
});
eth_mac.mmc_tx_interrupt_mask.modify(|_, w| {
w.txlpiuscim()
.set_bit()
.txgpktim()
.set_bit()
.txmcolgpim()
.set_bit()
.txscolgpim()
.set_bit()
});
// TODO: The MMC_TX/RX_INTERRUPT_MASK registers incorrectly mark
// LPITRCIM as read-only, so svd2rust doens't generate bindings to
// modify them. Instead, as a workaround, we manually manipulate the
// bits
eth_mac
.mmc_tx_interrupt_mask
.modify(|r, w| w.bits(r.bits() | (1 << 27)));
eth_mac
.mmc_rx_interrupt_mask
.modify(|r, w| w.bits(r.bits() | (1 << 27)));
eth_mtl.mtlrx_qomr.modify(|_, w| {
w
// Receive store and forward
.rsf()
.set_bit()
// Dropping of TCP/IP checksum error frames disable
.dis_tcp_ef()
.clear_bit()
// Forward error frames
.fep()
.clear_bit()
// Forward undersized good packets
.fup()
.clear_bit()
});
eth_mtl.mtltx_qomr.modify(|_, w| {
w
// Transmit store and forward
.tsf()
.set_bit()
});
// operation mode register
eth_dma.dmamr.modify(|_, w| {
w.intm()
.bits(0b00)
// Rx Tx priority ratio 1:1
.pr()
.bits(0b000)
.txpr()
.clear_bit()
.da()
.clear_bit()
});
// bus mode register
eth_dma.dmasbmr.modify(|_, w| {
// Address-aligned beats
w.aal()
.set_bit()
// Fixed burst
.fb()
.set_bit()
});
eth_dma
.dmaccr
.modify(|_, w| w.dsl().bits(0).pblx8().clear_bit().mss().bits(536));
eth_dma.dmactx_cr.modify(|_, w| {
w
// Tx DMA PBL
.txpbl()
.bits(32)
.tse()
.clear_bit()
// Operate on second frame
.osf()
.clear_bit()
});
eth_dma.dmacrx_cr.modify(|_, w| {
w
// receive buffer size
.rbsz()
.bits(ETH_BUF_SIZE as u16)
// Rx DMA PBL
.rxpbl()
.bits(32)
// Disable flushing of received frames
.rpf()
.clear_bit()
});
// Initialise DMA descriptors
ring.tx.init();
ring.rx.init();
// Ensure the DMA descriptors are committed
cortex_m::asm::dsb();
// Manage MAC transmission and reception
eth_mac.maccr.modify(|_, w| {
w.re()
.bit(true) // Receiver Enable
.te()
.bit(true) // Transmiter Enable
});
eth_mtl.mtltx_qomr.modify(|_, w| w.ftq().set_bit());
// Manage DMA transmission and reception
eth_dma.dmactx_cr.modify(|_, w| w.st().set_bit());
eth_dma.dmacrx_cr.modify(|_, w| w.sr().set_bit());
eth_dma
.dmacsr
.modify(|_, w| w.tps().set_bit().rps().set_bit());
});
// MAC layer
// Set the MDC clock frequency in the range 1MHz - 2.5MHz
let hclk_mhz = clocks.hclk().raw() / 1_000_000;
let csr_clock_range = match hclk_mhz {
0..=34 => 2, // Divide by 16
35..=59 => 3, // Divide by 26
60..=99 => 0, // Divide by 42
100..=149 => 1, // Divide by 62
150..=249 => 4, // Divide by 102
250..=310 => 5, // Divide by 124
_ => panic!(
"HCLK results in MDC clock > 2.5MHz even for the \
highest CSR clock divider"
),
};
let mac = EthernetMAC {
eth_mac,
eth_phy_addr: 0,
clock_range: csr_clock_range,
};
let dma = EthernetDMA { ring, eth_dma };
(dma, mac)
}
impl EthernetMAC {
/// Sets the SMI address to use for the PHY
pub fn set_phy_addr(self, eth_phy_addr: u8) -> Self {
Self {
eth_mac: self.eth_mac,
eth_phy_addr,
clock_range: self.clock_range,
}
}
}
/// PHY Operations
impl StationManagement for EthernetMAC {
/// Read a register over SMI.
fn smi_read(&mut self, reg: u8) -> u16 {
while self.eth_mac.macmdioar.read().mb().bit_is_set() {}
self.eth_mac.macmdioar.modify(|_, w| unsafe {
w.pa()
.bits(self.eth_phy_addr)
.rda()
.bits(reg)
.goc()
.bits(0b11) // read
.cr()
.bits(self.clock_range)
.mb()
.set_bit()
});
while self.eth_mac.macmdioar.read().mb().bit_is_set() {}
self.eth_mac.macmdiodr.read().md().bits()
}
/// Write a register over SMI.
fn smi_write(&mut self, reg: u8, val: u16) {
while self.eth_mac.macmdioar.read().mb().bit_is_set() {}
self.eth_mac
.macmdiodr
.write(|w| unsafe { w.md().bits(val) });
self.eth_mac.macmdioar.modify(|_, w| unsafe {
w.pa()
.bits(self.eth_phy_addr)
.rda()
.bits(reg)
.goc()
.bits(0b01) // write
.cr()
.bits(self.clock_range)
.mb()
.set_bit()
});
while self.eth_mac.macmdioar.read().mb().bit_is_set() {}
}
}
/// Define TxToken type and implement consume method
pub struct TxToken<'a, const TD: usize>(&'a mut TDesRing<TD>);
impl<'a, const TD: usize> phy::TxToken for TxToken<'a, TD> {
fn consume<R, F>(self, len: usize, f: F) -> R
where
F: FnOnce(&mut [u8]) -> R,
{
assert!(len <= ETH_BUF_SIZE);
let result = f(unsafe { self.0.buf_as_slice_mut(len) });
self.0.release();
result
}
}
/// Define RxToken type and implement consume method
pub struct RxToken<'a, const RD: usize>(&'a mut RDesRing<RD>);
impl<'a, const RD: usize> phy::RxToken for RxToken<'a, RD> {
fn consume<R, F>(self, f: F) -> R
where
F: FnOnce(&mut [u8]) -> R,
{
let result = f(unsafe { self.0.buf_as_slice_mut() });
self.0.release();
result
}
}
/// Implement the smoltcp Device interface
impl<const TD: usize, const RD: usize> phy::Device for EthernetDMA<TD, RD> {
type RxToken<'a> = RxToken<'a, RD>;
type TxToken<'a> = TxToken<'a, TD>;
// Clippy false positive because DeviceCapabilities is non-exhaustive
#[allow(clippy::field_reassign_with_default)]
fn capabilities(&self) -> DeviceCapabilities {
let mut caps = DeviceCapabilities::default();
// ethernet frame type II (6 smac, 6 dmac, 2 ethertype),
// sans CRC (4), 1500 IP MTU
caps.max_transmission_unit = 1514;
caps.max_burst_size = Some(core::cmp::min(TD, RD));
caps
}
fn receive(
&mut self,
_timestamp: Instant,
) -> Option<(RxToken<RD>, TxToken<TD>)> {
// Skip all queued packets with errors.
while self.ring.rx.available() &&!self.ring.rx.valid() {
self.ring.rx.release()
}
if self.ring.rx.available() && self.ring.tx.available() {
Some((RxToken(&mut self.ring.rx), TxToken(&mut self.ring.tx)))
} else {
None
}
}
fn transmit(&mut self, _timestamp: Instant) -> Option<TxToken<TD>> {
if self.ring.tx.available() {
Some(TxToken(&mut self.ring.tx))
} else {
None
}
}
}
impl<const TD: usize, const RD: usize> EthernetDMA<TD, RD> {
/// Return the number of packets dropped since this method was
/// last called
pub fn number_packets_dropped(&self) -> u32 {
self.eth_dma.dmacmfcr.read().mfc().bits() as u32
}
}
/// Clears the Ethernet interrupt flag
///
/// # Safety
///
/// This method implements a single register write to DMACSR
pub unsafe fn interrupt_handler() | {
let eth_dma = &*stm32::ETHERNET_DMA::ptr();
eth_dma
.dmacsr
.write(|w| w.nis().set_bit().ri().set_bit().ti().set_bit());
let _ = eth_dma.dmacsr.read();
let _ = eth_dma.dmacsr.read(); // Delay 2 peripheral clocks
} | identifier_body |
|
eth.rs | // before the DMA engine is enabled.)
unsafe {
let dma = &*stm32::ETHERNET_DMA::ptr();
dma.dmactx_dlar
.write(|w| w.bits(&self.td[0] as *const _ as u32));
dma.dmactx_rlr.write(|w| w.tdrl().bits(TD as u16 - 1));
dma.dmactx_dtpr
.write(|w| w.bits(&self.td[0] as *const _ as u32));
}
}
/// Return true if a TDes is available for use
pub fn available(&self) -> bool {
self.td[self.tdidx].available()
}
/// Release the next TDes to the DMA engine for transmission
pub fn release(&mut self) {
let x = self.tdidx;
assert!(self.td[x].tdes3 & EMAC_DES3_OWN == 0); // Owned by us
let address = ptr::addr_of!(self.tbuf[x]) as u32;
// Read format
self.td[x].tdes0 = address; // Buffer 1
self.td[x].tdes1 = 0; // Not used
assert!(self.td[x].tdes2 &!EMAC_TDES2_B1L == 0); // Not used
assert!(self.td[x].tdes2 & EMAC_TDES2_B1L > 0); // Length must be valid
self.td[x].tdes3 = 0;
self.td[x].tdes3 |= EMAC_DES3_FD; // FD: Contains first buffer of packet
self.td[x].tdes3 |= EMAC_DES3_LD; // LD: Contains last buffer of packet
self.td[x].tdes3 |= EMAC_DES3_OWN; // Give the DMA engine ownership
// Ensure changes to the descriptor are committed before
// DMA engine sees tail pointer store
cortex_m::asm::dsb();
// Move the tail pointer (TPR) to the next descriptor
let x = (x + 1) % TD;
unsafe {
let dma = &*stm32::ETHERNET_DMA::ptr();
dma.dmactx_dtpr
.write(|w| w.bits(&(self.td[x]) as *const _ as u32));
}
self.tdidx = x;
}
/// Access the buffer pointed to by the next TDes
pub unsafe fn buf_as_slice_mut(&mut self, length: usize) -> &mut [u8] {
let x = self.tdidx;
// Set address in descriptor
self.td[x].tdes0 = ptr::addr_of!(self.tbuf[x]) as u32; // Buffer 1
// Set length in descriptor
let len = core::cmp::min(length, ETH_BUF_SIZE);
self.td[x].tdes2 = (length as u32) & EMAC_TDES2_B1L;
// Create a raw pointer in place without an intermediate reference. Use
// this to return a slice from the packed buffer
let addr = ptr::addr_of_mut!(self.tbuf[x]) as *mut _;
core::slice::from_raw_parts_mut(addr, len)
}
}
/// Receive Descriptor representation
///
/// * rdes0: recieve buffer address
/// * rdes1:
/// * rdes2:
/// * rdes3: OWN and Status
///
/// Note that Copy and Clone are derived to support initialising an
/// array of RDes, but you may not move a RDes after its address has
/// been given to the ETH_DMA engine.
#[derive(Copy, Clone)]
#[repr(C, packed)]
struct RDes {
rdes0: u32,
rdes1: u32,
rdes2: u32,
rdes3: u32,
}
impl RDes {
/// Initialises RDes
pub fn init(&mut self) {
self.rdes0 = 0;
self.rdes1 = 0;
self.rdes2 = 0;
self.rdes3 = 0; // Owned by us
}
/// Return true if this RDes is acceptable to us
pub fn valid(&self) -> bool { | // Contains first buffer of packet AND contains last buf of
// packet AND no errors AND not a contex descriptor
self.rdes3
& (EMAC_DES3_FD | EMAC_DES3_LD | EMAC_DES3_ES | EMAC_DES3_CTXT)
== (EMAC_DES3_FD | EMAC_DES3_LD)
}
/// Return true if this RDes is not currently owned by the DMA
pub fn available(&self) -> bool {
self.rdes3 & EMAC_DES3_OWN == 0 // Owned by us
}
}
/// Store a ring of RDes and associated buffers
#[repr(C, packed)]
struct RDesRing<const RD: usize> {
rd: [RDes; RD],
rbuf: [[u32; ETH_BUF_SIZE / 4]; RD],
rdidx: usize,
}
impl<const RD: usize> RDesRing<RD> {
const fn new() -> Self {
Self {
rd: [RDes {
rdes0: 0,
rdes1: 0,
rdes2: 0,
rdes3: 0,
}; RD],
rbuf: [[0; ETH_BUF_SIZE / 4]; RD],
rdidx: 0,
}
}
/// Initialise this RDesRing. Assume RDesRing is corrupt
///
/// The current memory address of the buffers inside this RDesRing
/// will be stored in the descriptors, so ensure the RDesRing is
/// not moved after initialisation.
pub fn init(&mut self) {
for x in 0..RD {
self.rd[x].init();
}
self.rdidx = 0;
// Initialise pointers in the DMA engine
unsafe {
let dma = &*stm32::ETHERNET_DMA::ptr();
dma.dmacrx_dlar
.write(|w| w.bits(&self.rd[0] as *const _ as u32));
dma.dmacrx_rlr.write(|w| w.rdrl().bits(RD as u16 - 1));
}
// Release descriptors to the DMA engine
while self.available() {
self.release()
}
}
/// Return true if a RDes is available for use
pub fn available(&self) -> bool {
self.rd[self.rdidx].available()
}
/// Return true if current RDes is valid
pub fn valid(&self) -> bool {
self.rd[self.rdidx].valid()
}
/// Release the next RDes to the DMA engine
pub fn release(&mut self) {
let x = self.rdidx;
assert!(self.rd[x].rdes3 & EMAC_DES3_OWN == 0); // Owned by us
let address = ptr::addr_of!(self.rbuf[x]) as u32;
// Read format
self.rd[x].rdes0 = address; // Buffer 1
self.rd[x].rdes1 = 0; // Reserved
self.rd[x].rdes2 = 0; // Marked as invalid
self.rd[x].rdes3 = 0;
self.rd[x].rdes3 |= EMAC_DES3_OWN; // Give the DMA engine ownership
self.rd[x].rdes3 |= EMAC_RDES3_BUF1V; // BUF1V: 1st buffer address is valid
self.rd[x].rdes3 |= EMAC_RDES3_IOC; // IOC: Interrupt on complete
// Ensure changes to the descriptor are committed before
// DMA engine sees tail pointer store
cortex_m::asm::dsb();
// Move the tail pointer (TPR) to this descriptor
unsafe {
let dma = &*stm32::ETHERNET_DMA::ptr();
dma.dmacrx_dtpr
.write(|w| w.bits(&(self.rd[x]) as *const _ as u32));
}
// Update active descriptor
self.rdidx = (x + 1) % RD;
}
/// Access the buffer pointed to by the next RDes
///
/// # Safety
///
/// Ensure that release() is called between subsequent calls to this
/// function.
#[allow(clippy::mut_from_ref)]
pub unsafe fn buf_as_slice_mut(&self) -> &mut [u8] {
let x = self.rdidx;
// Write-back format
let addr = ptr::addr_of!(self.rbuf[x]) as *mut u8;
let len = (self.rd[x].rdes3 & EMAC_RDES3_PL) as usize;
let len = core::cmp::min(len, ETH_BUF_SIZE);
core::slice::from_raw_parts_mut(addr, len)
}
}
pub struct DesRing<const TD: usize, const RD: usize> {
tx: TDesRing<TD>,
rx: RDesRing<RD>,
}
impl<const TD: usize, const RD: usize> DesRing<TD, RD> {
pub const fn new() -> Self {
DesRing {
tx: TDesRing::new(),
rx: RDesRing::new(),
}
}
}
impl<const TD: usize, const RD: usize> Default for DesRing<TD, RD> {
fn default() -> Self {
Self::new()
}
}
///
/// Ethernet DMA
///
pub struct EthernetDMA<const TD: usize, const RD: usize> {
ring: &'static mut DesRing<TD, RD>,
eth_dma: stm32::ETHERNET_DMA,
}
///
/// Ethernet MAC
///
pub struct EthernetMAC {
eth_mac: stm32::ETHERNET_MAC,
eth_phy_addr: u8,
clock_range: u8,
}
/// Create and initialise the ethernet driver.
///
/// You must move in ETH_MAC, ETH_MTL, ETH_DMA.
///
/// Sets up the descriptor structures, sets up the peripheral
/// clocks and GPIO configuration, and configures the ETH MAC and
/// DMA peripherals. Automatically sets slew rate to VeryHigh.
/// If you wish to use another configuration, please see
/// [new_unchecked](new_unchecked).
///
/// This method does not initialise the external PHY. However it does return an
/// [EthernetMAC](EthernetMAC) which implements the
/// [StationManagement](super::StationManagement) trait. This can be used to
/// communicate with the external PHY.
///
/// # Safety
///
/// `EthernetDMA` shall not be moved as it is initialised here
#[allow(clippy::too_many_arguments)]
pub fn new<const TD: usize, const RD: usize>(
eth_mac: stm32::ETHERNET_MAC,
eth_mtl: stm32::ETHERNET_MTL,
eth_dma: stm32::ETHERNET_DMA,
mut pins: impl PinsRMII,
ring: &'static mut DesRing<TD, RD>,
mac_addr: EthernetAddress,
prec: rec::Eth1Mac,
clocks: &CoreClocks,
) -> (EthernetDMA<TD, RD>, EthernetMAC) {
pins.set_speed(Speed::VeryHigh);
unsafe {
new_unchecked(eth_mac, eth_mtl, eth_dma, ring, mac_addr, prec, clocks)
}
}
/// Create and initialise the ethernet driver.
///
/// You must move in ETH_MAC, ETH_MTL, ETH_DMA.
///
/// Sets up the descriptor structures, sets up the peripheral
/// clocks and GPIO configuration, and configures the ETH MAC and
/// DMA peripherals.
///
/// This method does not initialise the external PHY. However it does return an
/// [EthernetMAC](EthernetMAC) which implements the
/// [StationManagement](super::StationManagement) trait. This can be used to
/// communicate with the external PHY.
///
/// All the documented interrupts in the `MMC_TX_INTERRUPT_MASK` and
/// `MMC_RX_INTERRUPT_MASK` registers are masked, since these cause unexpected
/// interrupts after a number of days of heavy ethernet traffic. If these
/// interrupts are desired, you can be unmask them in your own code after this
/// method.
///
/// # Safety
///
/// `EthernetDMA` shall not be moved as it is initialised here
pub unsafe fn new_unchecked<const TD: usize, const RD: usize>(
eth_mac: stm32::ETHERNET_MAC,
eth_mtl: stm32::ETHERNET_MTL,
eth_dma: stm32::ETHERNET_DMA,
ring: &'static mut DesRing<TD, RD>,
mac_addr: EthernetAddress,
prec: rec::Eth1Mac,
clocks: &CoreClocks,
) -> (EthernetDMA<TD, RD>, EthernetMAC) {
// RCC
{
let rcc = &*stm32::RCC::ptr();
let syscfg = &*stm32::SYSCFG::ptr();
// Ensure syscfg is enabled (for PMCR)
rcc.apb4enr.modify(|_, w| w.syscfgen().set_bit());
// Reset ETH_DMA - write 1 and wait for 0.
// On the H723, we have to do this before prec.enable()
// or the DMA will never come out of reset
eth_dma.dmamr.modify(|_, w| w.swr().set_bit());
while eth_dma.dmamr.read().swr().bit_is_set() {}
// AHB1 ETH1MACEN
prec.enable();
// Also need to enable the transmission and reception clocks, which
// don't have prec objects. They don't have prec objects because they
// can't be reset.
rcc.ahb1enr
.modify(|_, w| w.eth1txen().set_bit().eth1rxen().set_bit());
syscfg.pmcr.modify(|_, w| w.epis().bits(0b100)); // RMII
}
// reset ETH_MAC - write 1 then 0
//rcc.ahb1rstr.modify(|_, w| w.eth1macrst().set_bit());
//rcc.ahb1rstr.modify(|_, w| w.eth1macrst().clear_bit());
cortex_m::interrupt::free(|_cs| {
// 200 MHz
eth_mac
.mac1ustcr
.modify(|_, w| w.tic_1us_cntr().bits(200 - 1));
// Configuration Register
eth_mac.maccr.modify(|_, w| {
w.arpen()
.clear_bit()
.ipc()
.set_bit()
.ipg()
.bits(0b000) // 96 bit
.ecrsfd()
.clear_bit()
.dcrs()
.clear_bit()
.bl()
.bits(0b00) // 19
.prelen()
.bits(0b00) // 7
// CRC stripping for Type frames
.cst()
.set_bit()
// Fast Ethernet speed
.fes()
.set_bit()
// Duplex mode
.dm()
.set_bit()
// Automatic pad/CRC stripping
.acs()
.set_bit()
// Retry disable in half-duplex mode
.dr()
.set_bit()
});
eth_mac.macecr.modify(|_, w| {
w.eipgen()
.clear_bit()
.usp()
.clear_bit()
.spen()
.clear_bit()
.dcrcc()
.clear_bit()
});
// Set the MAC address.
// Writes to LR trigger both registers to be loaded into the MAC,
// so write to LR last.
eth_mac.maca0hr.write(|w| {
w.addrhi().bits(
u16::from(mac_addr.0[4]) | (u16::from(mac_addr.0[5]) << 8),
)
});
eth_mac.maca0lr.write(|w| {
w.addrlo().bits(
u32::from(mac_addr.0[0])
| (u32::from(mac_addr.0[1]) << 8)
| (u32::from(mac_addr.0[2]) << 16)
| (u32::from(mac_addr.0[3]) << 24),
)
});
// frame filter register
eth_mac.macpfr.modify(|_, w| {
w.dntu()
.clear_bit()
.ipfe()
.clear_bit()
.vtfe()
.clear_bit()
.hpf()
.clear_bit()
.saf()
.clear_bit()
.saif()
.clear_bit()
.pcf()
.bits(0b00)
.dbf()
.clear_bit()
.pm()
.clear_bit()
.daif()
.clear_bit()
.hmc()
.clear_bit()
.huc()
.clear_bit()
// Receive All
.ra()
.clear_bit()
// Promiscuous mode
.pr()
.clear_bit()
});
eth_mac.macwtr.write(|w| w.pwe().clear_bit());
// Flow Control Register
eth_mac.macqtx_fcr.modify(|_, w| {
// Pause time
w.pt().bits(0x100)
});
eth_mac.macrx_fcr.modify(|_, w| w);
// Mask away Ethernet MAC MMC RX/TX interrupts. These are statistics
// counter interrupts and are enabled by default. We need to manually
// disable various ethernet interrupts so they don't unintentionally
// hang the device. The user is free to re-enable them later to provide
// ethernet MAC-related statistics
eth_mac.mmc_rx_interrupt_mask.modify(|_, w| {
w.rxlpiuscim()
.set_bit()
.rxucgpim()
.set_bit()
.rxalgnerpim()
.set_bit()
.rxcrcerpim()
.set_bit()
});
eth_mac.mmc_tx_interrupt_mask.modify(|_, w| {
w.txlpiuscim()
.set_bit()
.txgpktim()
.set_bit()
.txmcolgpim()
.set_bit()
.txscolgpim()
.set_bit()
});
// TODO: The MMC_TX/RX_INTERRUPT_MASK registers incorrectly mark
// LPITRCIM as read-only, so svd2rust doens't generate bindings to
// modify them. Instead, as a workaround, we manually manipulate the
// bits
eth_mac
.mmc_tx_interrupt_mask
.modify(|r, w| w.bits(r.bits() | (1 << 27)));
eth_mac
.mmc_rx_interrupt_mask
.modify(|r, w| w.bits(r.bits() | (1 << 27)));
eth_mtl.mtlrx_qomr.modify(|_, w| {
w
// Receive store and forward
.rsf()
.set_bit()
// Dropping of TCP/IP checksum error frames disable
.dis_tcp_ef()
.clear_bit()
// Forward error frames
.fep()
.clear_bit()
// Forward undersized good packets
.fup()
.clear_bit()
});
eth_mtl.mtltx_qomr.modify(|_, w| {
w
// Transmit store and forward
.tsf()
.set_bit()
});
// operation mode register
eth_dma.dmamr.modify(|_, w| {
w.intm()
.bits(0b00)
// Rx Tx priority ratio 1:1
.pr()
.bits(0b000)
.txpr()
.clear_bit()
.da()
.clear_bit()
});
// bus mode register
| // Write-back descriptor is valid if:
// | random_line_split |
types.rs | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! Loaded representation for runtime types.
use libra_types::{
account_address::AccountAddress,
vm_error::{StatusCode, VMStatus},
};
use move_core_types::{
identifier::Identifier,
language_storage::{StructTag, TypeTag},
value::{MoveStructLayout, MoveTypeLayout},
};
use std::{convert::TryInto, fmt::Write};
use vm::errors::VMResult;
use libra_types::access_path::AccessPath;
use serde::{Deserialize, Serialize};
/// VM representation of a struct type in Move.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[cfg_attr(feature = "fuzzing", derive(Eq, PartialEq))]
pub struct FatStructType {
pub address: AccountAddress,
pub module: Identifier,
pub name: Identifier,
pub is_resource: bool,
pub ty_args: Vec<FatType>,
pub layout: Vec<FatType>,
}
/// VM representation of a Move type that gives access to both the fully qualified
/// name and data layout of the type.
///
/// TODO: this data structure itself is intended to be used in runtime only and
/// should NOT be serialized in any form. Currently we still derive `Serialize` and
/// `Deserialize`, but this is a hack for fuzzing and should be guarded behind the
/// "fuzzing" feature flag. We should look into ways to get rid of this.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[cfg_attr(feature = "fuzzing", derive(Eq, PartialEq))]
pub enum FatType {
Bool,
U8,
U64,
U128,
Address,
Signer,
Vector(Box<FatType>),
Struct(Box<FatStructType>),
Reference(Box<FatType>),
MutableReference(Box<FatType>),
TyParam(usize),
}
impl FatStructType {
pub fn resource_path(&self) -> VMResult<Vec<u8>> {
Ok(AccessPath::resource_access_vec(&self.struct_tag()?))
}
pub fn subst(&self, ty_args: &[FatType]) -> VMResult<FatStructType> {
Ok(Self {
address: self.address,
module: self.module.clone(),
name: self.name.clone(),
is_resource: self.is_resource,
ty_args: self
.ty_args
.iter()
.map(|ty| ty.subst(ty_args))
.collect::<VMResult<_>>()?,
layout: self
.layout
.iter()
.map(|ty| ty.subst(ty_args))
.collect::<VMResult<_>>()?,
})
}
pub fn struct_tag(&self) -> VMResult<StructTag> {
let ty_args = self
.ty_args
.iter()
.map(|ty| ty.type_tag())
.collect::<VMResult<Vec<_>>>()?;
Ok(StructTag {
address: self.address,
module: self.module.clone(),
name: self.name.clone(),
type_params: ty_args,
})
}
pub fn debug_print<B: Write>(&self, buf: &mut B) -> VMResult<()> {
debug_write!(buf, "{}::{}", self.module, self.name)?;
let mut it = self.ty_args.iter();
if let Some(ty) = it.next() {
debug_write!(buf, "<")?;
ty.debug_print(buf)?;
for ty in it {
debug_write!(buf, ", ")?;
ty.debug_print(buf)?;
}
debug_write!(buf, ">")?;
}
Ok(())
}
}
impl FatType {
pub fn subst(&self, ty_args: &[FatType]) -> VMResult<FatType> {
use FatType::*;
let res = match self {
TyParam(idx) => match ty_args.get(*idx) {
Some(ty) => ty.clone(),
None => {
return Err(VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR)
.with_message(format!(
"fat type substitution failed: index out of bounds -- len {} got {}",
ty_args.len(),
idx
)));
}
},
Bool => Bool,
U8 => U8,
U64 => U64,
U128 => U128,
Address => Address,
Signer => Signer,
Vector(ty) => Vector(Box::new(ty.subst(ty_args)?)),
Reference(ty) => Reference(Box::new(ty.subst(ty_args)?)),
MutableReference(ty) => MutableReference(Box::new(ty.subst(ty_args)?)),
Struct(struct_ty) => Struct(Box::new(struct_ty.subst(ty_args)?)),
};
Ok(res)
}
pub fn type_tag(&self) -> VMResult<TypeTag> {
use FatType::*;
let res = match self {
Bool => TypeTag::Bool,
U8 => TypeTag::U8,
U64 => TypeTag::U64,
U128 => TypeTag::U128,
Address => TypeTag::Address,
Signer => TypeTag::Signer,
Vector(ty) => TypeTag::Vector(Box::new(ty.type_tag()?)),
Struct(struct_ty) => TypeTag::Struct(struct_ty.struct_tag()?),
ty @ Reference(_) | ty @ MutableReference(_) | ty @ TyParam(_) => {
return Err(VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR)
.with_message(format!("cannot derive type tag for {:?}", ty)))
}
};
Ok(res)
}
pub fn is_resource(&self) -> VMResult<bool> {
use FatType::*;
match self {
Bool | U8 | U64 | U128 | Address | Reference(_) | MutableReference(_) => Ok(false),
Signer => Ok(true),
Vector(ty) => ty.is_resource(),
Struct(struct_ty) => Ok(struct_ty.is_resource),
// In the VM, concrete type arguments are required for type resolution and the only place
// uninstantiated type parameters can show up is the cache.
//
// Therefore `is_resource` should only be called upon types outside the cache, in which
// case it will always succeed. (Internal invariant violation otherwise.)
TyParam(_) => Err(VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR)
.with_message("cannot check if a type parameter is a resource or not".to_string())),
}
}
pub fn debug_print<B: Write>(&self, buf: &mut B) -> VMResult<()> {
use FatType::*;
match self {
Bool => debug_write!(buf, "bool"),
U8 => debug_write!(buf, "u8"),
U64 => debug_write!(buf, "u64"),
U128 => debug_write!(buf, "u128"), | Signer => debug_write!(buf, "signer"),
Vector(elem_ty) => {
debug_write!(buf, "vector<")?;
elem_ty.debug_print(buf)?;
debug_write!(buf, ">")
}
Struct(struct_ty) => struct_ty.debug_print(buf),
Reference(ty) => {
debug_write!(buf, "&")?;
ty.debug_print(buf)
}
MutableReference(ty) => {
debug_write!(buf, "&mut ")?;
ty.debug_print(buf)
}
TyParam(_) => Err(VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR)
.with_message("cannot print out uninstantiated type params".to_string())),
}
}
}
#[cfg(feature = "fuzzing")]
pub mod prop {
use super::*;
use proptest::{collection::vec, prelude::*};
impl FatType {
/// Generate a random primitive Type, no Struct or Vector.
pub fn single_value_strategy() -> impl Strategy<Value = Self> {
use FatType::*;
prop_oneof![
Just(Bool),
Just(U8),
Just(U64),
Just(U128),
Just(Address),
Just(Signer)
]
}
/// Generate a primitive Value, a Struct or a Vector.
pub fn nested_strategy(
depth: u32,
desired_size: u32,
expected_branch_size: u32,
) -> impl Strategy<Value = Self> {
use FatType::*;
let leaf = Self::single_value_strategy();
leaf.prop_recursive(depth, desired_size, expected_branch_size, |inner| {
prop_oneof![
inner
.clone()
.prop_map(|layout| FatType::Vector(Box::new(layout))),
(
any::<AccountAddress>(),
any::<Identifier>(),
any::<Identifier>(),
any::<bool>(),
vec(inner.clone(), 0..4),
vec(inner, 0..10)
)
.prop_map(
|(address, module, name, is_resource, ty_args, layout)| Struct(
Box::new(FatStructType {
address,
module,
name,
is_resource,
ty_args,
layout,
})
)
),
]
})
}
}
impl Arbitrary for FatType {
type Parameters = ();
fn arbitrary_with(_args: ()) -> Self::Strategy {
Self::nested_strategy(3, 20, 10).boxed()
}
type Strategy = BoxedStrategy<Self>;
}
}
impl TryInto<MoveStructLayout> for &FatStructType {
type Error = VMStatus;
fn try_into(self) -> Result<MoveStructLayout, Self::Error> {
Ok(MoveStructLayout::new(
self.layout
.iter()
.map(|ty| ty.try_into())
.collect::<VMResult<Vec<_>>>()?,
))
}
}
impl TryInto<MoveTypeLayout> for &FatType {
type Error = VMStatus;
fn try_into(self) -> Result<MoveTypeLayout, Self::Error> {
Ok(match self {
FatType::Address => MoveTypeLayout::Address,
FatType::U8 => MoveTypeLayout::U8,
FatType::U64 => MoveTypeLayout::U64,
FatType::U128 => MoveTypeLayout::U128,
FatType::Bool => MoveTypeLayout::Bool,
FatType::Vector(v) => MoveTypeLayout::Vector(Box::new(v.as_ref().try_into()?)),
FatType::Struct(s) => MoveTypeLayout::Struct(MoveStructLayout::new(
s.layout
.iter()
.map(|ty| ty.try_into())
.collect::<VMResult<Vec<_>>>()?,
)),
FatType::Signer => MoveTypeLayout::Signer,
_ => return Err(VMStatus::new(StatusCode::ABORT_TYPE_MISMATCH_ERROR)),
})
}
} | Address => debug_write!(buf, "address"), | random_line_split |
types.rs | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! Loaded representation for runtime types.
use libra_types::{
account_address::AccountAddress,
vm_error::{StatusCode, VMStatus},
};
use move_core_types::{
identifier::Identifier,
language_storage::{StructTag, TypeTag},
value::{MoveStructLayout, MoveTypeLayout},
};
use std::{convert::TryInto, fmt::Write};
use vm::errors::VMResult;
use libra_types::access_path::AccessPath;
use serde::{Deserialize, Serialize};
/// VM representation of a struct type in Move.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[cfg_attr(feature = "fuzzing", derive(Eq, PartialEq))]
pub struct FatStructType {
pub address: AccountAddress,
pub module: Identifier,
pub name: Identifier,
pub is_resource: bool,
pub ty_args: Vec<FatType>,
pub layout: Vec<FatType>,
}
/// VM representation of a Move type that gives access to both the fully qualified
/// name and data layout of the type.
///
/// TODO: this data structure itself is intended to be used in runtime only and
/// should NOT be serialized in any form. Currently we still derive `Serialize` and
/// `Deserialize`, but this is a hack for fuzzing and should be guarded behind the
/// "fuzzing" feature flag. We should look into ways to get rid of this.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[cfg_attr(feature = "fuzzing", derive(Eq, PartialEq))]
pub enum FatType {
Bool,
U8,
U64,
U128,
Address,
Signer,
Vector(Box<FatType>),
Struct(Box<FatStructType>),
Reference(Box<FatType>),
MutableReference(Box<FatType>),
TyParam(usize),
}
impl FatStructType {
pub fn resource_path(&self) -> VMResult<Vec<u8>> {
Ok(AccessPath::resource_access_vec(&self.struct_tag()?))
}
pub fn subst(&self, ty_args: &[FatType]) -> VMResult<FatStructType> {
Ok(Self {
address: self.address,
module: self.module.clone(),
name: self.name.clone(),
is_resource: self.is_resource,
ty_args: self
.ty_args
.iter()
.map(|ty| ty.subst(ty_args))
.collect::<VMResult<_>>()?,
layout: self
.layout
.iter()
.map(|ty| ty.subst(ty_args))
.collect::<VMResult<_>>()?,
})
}
pub fn struct_tag(&self) -> VMResult<StructTag> {
let ty_args = self
.ty_args
.iter()
.map(|ty| ty.type_tag())
.collect::<VMResult<Vec<_>>>()?;
Ok(StructTag {
address: self.address,
module: self.module.clone(),
name: self.name.clone(),
type_params: ty_args,
})
}
pub fn debug_print<B: Write>(&self, buf: &mut B) -> VMResult<()> {
debug_write!(buf, "{}::{}", self.module, self.name)?;
let mut it = self.ty_args.iter();
if let Some(ty) = it.next() {
debug_write!(buf, "<")?;
ty.debug_print(buf)?;
for ty in it {
debug_write!(buf, ", ")?;
ty.debug_print(buf)?;
}
debug_write!(buf, ">")?;
}
Ok(())
}
}
impl FatType {
pub fn subst(&self, ty_args: &[FatType]) -> VMResult<FatType> {
use FatType::*;
let res = match self {
TyParam(idx) => match ty_args.get(*idx) {
Some(ty) => ty.clone(),
None => {
return Err(VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR)
.with_message(format!(
"fat type substitution failed: index out of bounds -- len {} got {}",
ty_args.len(),
idx
)));
}
},
Bool => Bool,
U8 => U8,
U64 => U64,
U128 => U128,
Address => Address,
Signer => Signer,
Vector(ty) => Vector(Box::new(ty.subst(ty_args)?)),
Reference(ty) => Reference(Box::new(ty.subst(ty_args)?)),
MutableReference(ty) => MutableReference(Box::new(ty.subst(ty_args)?)),
Struct(struct_ty) => Struct(Box::new(struct_ty.subst(ty_args)?)),
};
Ok(res)
}
pub fn type_tag(&self) -> VMResult<TypeTag> {
use FatType::*;
let res = match self {
Bool => TypeTag::Bool,
U8 => TypeTag::U8,
U64 => TypeTag::U64,
U128 => TypeTag::U128,
Address => TypeTag::Address,
Signer => TypeTag::Signer,
Vector(ty) => TypeTag::Vector(Box::new(ty.type_tag()?)),
Struct(struct_ty) => TypeTag::Struct(struct_ty.struct_tag()?),
ty @ Reference(_) | ty @ MutableReference(_) | ty @ TyParam(_) => {
return Err(VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR)
.with_message(format!("cannot derive type tag for {:?}", ty)))
}
};
Ok(res)
}
pub fn | (&self) -> VMResult<bool> {
use FatType::*;
match self {
Bool | U8 | U64 | U128 | Address | Reference(_) | MutableReference(_) => Ok(false),
Signer => Ok(true),
Vector(ty) => ty.is_resource(),
Struct(struct_ty) => Ok(struct_ty.is_resource),
// In the VM, concrete type arguments are required for type resolution and the only place
// uninstantiated type parameters can show up is the cache.
//
// Therefore `is_resource` should only be called upon types outside the cache, in which
// case it will always succeed. (Internal invariant violation otherwise.)
TyParam(_) => Err(VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR)
.with_message("cannot check if a type parameter is a resource or not".to_string())),
}
}
pub fn debug_print<B: Write>(&self, buf: &mut B) -> VMResult<()> {
use FatType::*;
match self {
Bool => debug_write!(buf, "bool"),
U8 => debug_write!(buf, "u8"),
U64 => debug_write!(buf, "u64"),
U128 => debug_write!(buf, "u128"),
Address => debug_write!(buf, "address"),
Signer => debug_write!(buf, "signer"),
Vector(elem_ty) => {
debug_write!(buf, "vector<")?;
elem_ty.debug_print(buf)?;
debug_write!(buf, ">")
}
Struct(struct_ty) => struct_ty.debug_print(buf),
Reference(ty) => {
debug_write!(buf, "&")?;
ty.debug_print(buf)
}
MutableReference(ty) => {
debug_write!(buf, "&mut ")?;
ty.debug_print(buf)
}
TyParam(_) => Err(VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR)
.with_message("cannot print out uninstantiated type params".to_string())),
}
}
}
#[cfg(feature = "fuzzing")]
pub mod prop {
use super::*;
use proptest::{collection::vec, prelude::*};
impl FatType {
/// Generate a random primitive Type, no Struct or Vector.
pub fn single_value_strategy() -> impl Strategy<Value = Self> {
use FatType::*;
prop_oneof![
Just(Bool),
Just(U8),
Just(U64),
Just(U128),
Just(Address),
Just(Signer)
]
}
/// Generate a primitive Value, a Struct or a Vector.
pub fn nested_strategy(
depth: u32,
desired_size: u32,
expected_branch_size: u32,
) -> impl Strategy<Value = Self> {
use FatType::*;
let leaf = Self::single_value_strategy();
leaf.prop_recursive(depth, desired_size, expected_branch_size, |inner| {
prop_oneof![
inner
.clone()
.prop_map(|layout| FatType::Vector(Box::new(layout))),
(
any::<AccountAddress>(),
any::<Identifier>(),
any::<Identifier>(),
any::<bool>(),
vec(inner.clone(), 0..4),
vec(inner, 0..10)
)
.prop_map(
|(address, module, name, is_resource, ty_args, layout)| Struct(
Box::new(FatStructType {
address,
module,
name,
is_resource,
ty_args,
layout,
})
)
),
]
})
}
}
impl Arbitrary for FatType {
type Parameters = ();
fn arbitrary_with(_args: ()) -> Self::Strategy {
Self::nested_strategy(3, 20, 10).boxed()
}
type Strategy = BoxedStrategy<Self>;
}
}
impl TryInto<MoveStructLayout> for &FatStructType {
type Error = VMStatus;
fn try_into(self) -> Result<MoveStructLayout, Self::Error> {
Ok(MoveStructLayout::new(
self.layout
.iter()
.map(|ty| ty.try_into())
.collect::<VMResult<Vec<_>>>()?,
))
}
}
impl TryInto<MoveTypeLayout> for &FatType {
type Error = VMStatus;
fn try_into(self) -> Result<MoveTypeLayout, Self::Error> {
Ok(match self {
FatType::Address => MoveTypeLayout::Address,
FatType::U8 => MoveTypeLayout::U8,
FatType::U64 => MoveTypeLayout::U64,
FatType::U128 => MoveTypeLayout::U128,
FatType::Bool => MoveTypeLayout::Bool,
FatType::Vector(v) => MoveTypeLayout::Vector(Box::new(v.as_ref().try_into()?)),
FatType::Struct(s) => MoveTypeLayout::Struct(MoveStructLayout::new(
s.layout
.iter()
.map(|ty| ty.try_into())
.collect::<VMResult<Vec<_>>>()?,
)),
FatType::Signer => MoveTypeLayout::Signer,
_ => return Err(VMStatus::new(StatusCode::ABORT_TYPE_MISMATCH_ERROR)),
})
}
}
| is_resource | identifier_name |
types.rs | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! Loaded representation for runtime types.
use libra_types::{
account_address::AccountAddress,
vm_error::{StatusCode, VMStatus},
};
use move_core_types::{
identifier::Identifier,
language_storage::{StructTag, TypeTag},
value::{MoveStructLayout, MoveTypeLayout},
};
use std::{convert::TryInto, fmt::Write};
use vm::errors::VMResult;
use libra_types::access_path::AccessPath;
use serde::{Deserialize, Serialize};
/// VM representation of a struct type in Move.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[cfg_attr(feature = "fuzzing", derive(Eq, PartialEq))]
pub struct FatStructType {
pub address: AccountAddress,
pub module: Identifier,
pub name: Identifier,
pub is_resource: bool,
pub ty_args: Vec<FatType>,
pub layout: Vec<FatType>,
}
/// VM representation of a Move type that gives access to both the fully qualified
/// name and data layout of the type.
///
/// TODO: this data structure itself is intended to be used in runtime only and
/// should NOT be serialized in any form. Currently we still derive `Serialize` and
/// `Deserialize`, but this is a hack for fuzzing and should be guarded behind the
/// "fuzzing" feature flag. We should look into ways to get rid of this.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[cfg_attr(feature = "fuzzing", derive(Eq, PartialEq))]
pub enum FatType {
Bool,
U8,
U64,
U128,
Address,
Signer,
Vector(Box<FatType>),
Struct(Box<FatStructType>),
Reference(Box<FatType>),
MutableReference(Box<FatType>),
TyParam(usize),
}
impl FatStructType {
pub fn resource_path(&self) -> VMResult<Vec<u8>> {
Ok(AccessPath::resource_access_vec(&self.struct_tag()?))
}
pub fn subst(&self, ty_args: &[FatType]) -> VMResult<FatStructType> {
Ok(Self {
address: self.address,
module: self.module.clone(),
name: self.name.clone(),
is_resource: self.is_resource,
ty_args: self
.ty_args
.iter()
.map(|ty| ty.subst(ty_args))
.collect::<VMResult<_>>()?,
layout: self
.layout
.iter()
.map(|ty| ty.subst(ty_args))
.collect::<VMResult<_>>()?,
})
}
pub fn struct_tag(&self) -> VMResult<StructTag> {
let ty_args = self
.ty_args
.iter()
.map(|ty| ty.type_tag())
.collect::<VMResult<Vec<_>>>()?;
Ok(StructTag {
address: self.address,
module: self.module.clone(),
name: self.name.clone(),
type_params: ty_args,
})
}
pub fn debug_print<B: Write>(&self, buf: &mut B) -> VMResult<()> {
debug_write!(buf, "{}::{}", self.module, self.name)?;
let mut it = self.ty_args.iter();
if let Some(ty) = it.next() {
debug_write!(buf, "<")?;
ty.debug_print(buf)?;
for ty in it {
debug_write!(buf, ", ")?;
ty.debug_print(buf)?;
}
debug_write!(buf, ">")?;
}
Ok(())
}
}
impl FatType {
pub fn subst(&self, ty_args: &[FatType]) -> VMResult<FatType> {
use FatType::*;
let res = match self {
TyParam(idx) => match ty_args.get(*idx) {
Some(ty) => ty.clone(),
None => {
return Err(VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR)
.with_message(format!(
"fat type substitution failed: index out of bounds -- len {} got {}",
ty_args.len(),
idx
)));
}
},
Bool => Bool,
U8 => U8,
U64 => U64,
U128 => U128,
Address => Address,
Signer => Signer,
Vector(ty) => Vector(Box::new(ty.subst(ty_args)?)),
Reference(ty) => Reference(Box::new(ty.subst(ty_args)?)),
MutableReference(ty) => MutableReference(Box::new(ty.subst(ty_args)?)),
Struct(struct_ty) => Struct(Box::new(struct_ty.subst(ty_args)?)),
};
Ok(res)
}
pub fn type_tag(&self) -> VMResult<TypeTag> | }
pub fn is_resource(&self) -> VMResult<bool> {
use FatType::*;
match self {
Bool | U8 | U64 | U128 | Address | Reference(_) | MutableReference(_) => Ok(false),
Signer => Ok(true),
Vector(ty) => ty.is_resource(),
Struct(struct_ty) => Ok(struct_ty.is_resource),
// In the VM, concrete type arguments are required for type resolution and the only place
// uninstantiated type parameters can show up is the cache.
//
// Therefore `is_resource` should only be called upon types outside the cache, in which
// case it will always succeed. (Internal invariant violation otherwise.)
TyParam(_) => Err(VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR)
.with_message("cannot check if a type parameter is a resource or not".to_string())),
}
}
pub fn debug_print<B: Write>(&self, buf: &mut B) -> VMResult<()> {
use FatType::*;
match self {
Bool => debug_write!(buf, "bool"),
U8 => debug_write!(buf, "u8"),
U64 => debug_write!(buf, "u64"),
U128 => debug_write!(buf, "u128"),
Address => debug_write!(buf, "address"),
Signer => debug_write!(buf, "signer"),
Vector(elem_ty) => {
debug_write!(buf, "vector<")?;
elem_ty.debug_print(buf)?;
debug_write!(buf, ">")
}
Struct(struct_ty) => struct_ty.debug_print(buf),
Reference(ty) => {
debug_write!(buf, "&")?;
ty.debug_print(buf)
}
MutableReference(ty) => {
debug_write!(buf, "&mut ")?;
ty.debug_print(buf)
}
TyParam(_) => Err(VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR)
.with_message("cannot print out uninstantiated type params".to_string())),
}
}
}
#[cfg(feature = "fuzzing")]
pub mod prop {
use super::*;
use proptest::{collection::vec, prelude::*};
impl FatType {
/// Generate a random primitive Type, no Struct or Vector.
pub fn single_value_strategy() -> impl Strategy<Value = Self> {
use FatType::*;
prop_oneof![
Just(Bool),
Just(U8),
Just(U64),
Just(U128),
Just(Address),
Just(Signer)
]
}
/// Generate a primitive Value, a Struct or a Vector.
pub fn nested_strategy(
depth: u32,
desired_size: u32,
expected_branch_size: u32,
) -> impl Strategy<Value = Self> {
use FatType::*;
let leaf = Self::single_value_strategy();
leaf.prop_recursive(depth, desired_size, expected_branch_size, |inner| {
prop_oneof![
inner
.clone()
.prop_map(|layout| FatType::Vector(Box::new(layout))),
(
any::<AccountAddress>(),
any::<Identifier>(),
any::<Identifier>(),
any::<bool>(),
vec(inner.clone(), 0..4),
vec(inner, 0..10)
)
.prop_map(
|(address, module, name, is_resource, ty_args, layout)| Struct(
Box::new(FatStructType {
address,
module,
name,
is_resource,
ty_args,
layout,
})
)
),
]
})
}
}
impl Arbitrary for FatType {
type Parameters = ();
fn arbitrary_with(_args: ()) -> Self::Strategy {
Self::nested_strategy(3, 20, 10).boxed()
}
type Strategy = BoxedStrategy<Self>;
}
}
impl TryInto<MoveStructLayout> for &FatStructType {
type Error = VMStatus;
fn try_into(self) -> Result<MoveStructLayout, Self::Error> {
Ok(MoveStructLayout::new(
self.layout
.iter()
.map(|ty| ty.try_into())
.collect::<VMResult<Vec<_>>>()?,
))
}
}
impl TryInto<MoveTypeLayout> for &FatType {
type Error = VMStatus;
fn try_into(self) -> Result<MoveTypeLayout, Self::Error> {
Ok(match self {
FatType::Address => MoveTypeLayout::Address,
FatType::U8 => MoveTypeLayout::U8,
FatType::U64 => MoveTypeLayout::U64,
FatType::U128 => MoveTypeLayout::U128,
FatType::Bool => MoveTypeLayout::Bool,
FatType::Vector(v) => MoveTypeLayout::Vector(Box::new(v.as_ref().try_into()?)),
FatType::Struct(s) => MoveTypeLayout::Struct(MoveStructLayout::new(
s.layout
.iter()
.map(|ty| ty.try_into())
.collect::<VMResult<Vec<_>>>()?,
)),
FatType::Signer => MoveTypeLayout::Signer,
_ => return Err(VMStatus::new(StatusCode::ABORT_TYPE_MISMATCH_ERROR)),
})
}
}
| {
use FatType::*;
let res = match self {
Bool => TypeTag::Bool,
U8 => TypeTag::U8,
U64 => TypeTag::U64,
U128 => TypeTag::U128,
Address => TypeTag::Address,
Signer => TypeTag::Signer,
Vector(ty) => TypeTag::Vector(Box::new(ty.type_tag()?)),
Struct(struct_ty) => TypeTag::Struct(struct_ty.struct_tag()?),
ty @ Reference(_) | ty @ MutableReference(_) | ty @ TyParam(_) => {
return Err(VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR)
.with_message(format!("cannot derive type tag for {:?}", ty)))
}
};
Ok(res) | identifier_body |
hash2curve.rs | fn from_okm(data: &GenericArray<u8, Self::Length>) -> Self {
const F_2_288: FieldElement = FieldElement::from_hex(
"000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000",
);
let mut d0 = FieldBytes::default();
d0[12..].copy_from_slice(&data[0..36]);
let d0 = FieldElement::from_uint_unchecked(U384::from_be_byte_array(d0));
let mut d1 = FieldBytes::default();
d1[12..].copy_from_slice(&data[36..]);
let d1 = FieldElement::from_uint_unchecked(U384::from_be_byte_array(d1));
d0 * F_2_288 + d1
}
}
impl Sgn0 for FieldElement {
fn sgn0(&self) -> Choice {
self.is_odd()
}
}
impl OsswuMap for FieldElement {
const PARAMS: OsswuMapParams<Self> = OsswuMapParams {
c1: &[
0x0000_0000_3fff_ffff,
0xbfff_ffff_c000_0000,
0xffff_ffff_ffff_ffff,
0xffff_ffff_ffff_ffff,
0xffff_ffff_ffff_ffff,
0x3fff_ffff_ffff_ffff,
],
c2: FieldElement::from_hex(
"019877cc1041b7555743c0ae2e3a3e61fb2aaa2e0e87ea557a563d8b598a0940d0a697a9e0b9e92cfaa314f583c9d066",
),
map_a: FieldElement::from_hex(
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000fffffffc",
),
map_b: FieldElement::from_hex(
"b3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875ac656398d8a2ed19d2a85c8edd3ec2aef",
),
z: FieldElement::from_hex(
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000fffffff3",
),
};
}
impl MapToCurve for FieldElement {
type Output = ProjectivePoint;
fn map_to_curve(&self) -> Self::Output {
let (qx, qy) = self.osswu();
// TODO(tarcieri): assert that `qy` is correct? less circuitous conversion?
AffinePoint::decompress(&qx.to_bytes(), qy.is_odd())
.unwrap()
.into()
}
}
impl FromOkm for Scalar {
type Length = U72;
fn from_okm(data: &GenericArray<u8, Self::Length>) -> Self {
const F_2_288: Scalar = Scalar::from_hex(
"000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000",
);
let mut d0 = FieldBytes::default();
d0[12..].copy_from_slice(&data[0..36]);
let d0 = Scalar::reduce(U384::from_be_byte_array(d0));
let mut d1 = FieldBytes::default();
d1[12..].copy_from_slice(&data[36..]);
let d1 = Scalar::reduce(U384::from_be_byte_array(d1));
d0 * F_2_288 + d1
}
}
#[cfg(test)]
mod tests {
use crate::{FieldElement, NistP384, Scalar};
use elliptic_curve::{
bigint::{ArrayEncoding, NonZero, U384, U576},
consts::U72,
generic_array::GenericArray,
group::cofactor::CofactorGroup,
hash2curve::{self, ExpandMsgXmd, FromOkm, GroupDigest, MapToCurve},
ops::Reduce,
sec1::{self, ToEncodedPoint},
Curve,
};
use hex_literal::hex;
use proptest::{num::u64::ANY, prelude::ProptestConfig, proptest};
use sha2::Sha384;
#[test]
fn hash_to_curve() {
struct TestVector {
msg: &'static [u8],
p_x: [u8; 48],
p_y: [u8; 48],
u_0: [u8; 48],
u_1: [u8; 48],
q0_x: [u8; 48],
q0_y: [u8; 48],
q1_x: [u8; 48],
q1_y: [u8; 48],
}
const DST: &[u8] = b"QUUX-V01-CS02-with-P384_XMD:SHA-384_SSWU_RO_";
const TEST_VECTORS: &[TestVector] = &[
TestVector {
msg: b"",
p_x: hex!("eb9fe1b4f4e14e7140803c1d99d0a93cd823d2b024040f9c067a8eca1f5a2eeac9ad604973527a356f3fa3aeff0e4d83"),
p_y: hex!("0c21708cff382b7f4643c07b105c2eaec2cead93a917d825601e63c8f21f6abd9abc22c93c2bed6f235954b25048bb1a"),
u_0: hex!("25c8d7dc1acd4ee617766693f7f8829396065d1b447eedb155871feffd9c6653279ac7e5c46edb7010a0e4ff64c9f3b4"),
u_1: hex!("59428be4ed69131df59a0c6a8e188d2d4ece3f1b2a3a02602962b47efa4d7905945b1e2cc80b36aa35c99451073521ac"),
q0_x: hex!("e4717e29eef38d862bee4902a7d21b44efb58c464e3e1f0d03894d94de310f8ffc6de86786dd3e15a1541b18d4eb2846"),
q0_y: hex!("6b95a6e639822312298a47526bb77d9cd7bcf76244c991c8cd70075e2ee6e8b9a135c4a37e3c0768c7ca871c0ceb53d4"),
q1_x: hex!("509527cfc0750eedc53147e6d5f78596c8a3b7360e0608e2fab0563a1670d58d8ae107c9f04bcf90e89489ace5650efd"),
q1_y: hex!("33337b13cb35e173fdea4cb9e8cce915d836ff57803dbbeb7998aa49d17df2ff09b67031773039d09fbd9305a1566bc4"),
},
TestVector {
msg: b"abc",
p_x: hex!("e02fc1a5f44a7519419dd314e29863f30df55a514da2d655775a81d413003c4d4e7fd59af0826dfaad4200ac6f60abe1"),
p_y: hex!("01f638d04d98677d65bef99aef1a12a70a4cbb9270ec55248c04530d8bc1f8f90f8a6a859a7c1f1ddccedf8f96d675f6"),
u_0: hex!("53350214cb6bef0b51abb791b1c4209a2b4c16a0c67e1ab1401017fad774cd3b3f9a8bcdf7f6229dd8dd5a075cb149a0"),
u_1: hex!("c0473083898f63e03f26f14877a2407bd60c75ad491e7d26cbc6cc5ce815654075ec6b6898c7a41d74ceaf720a10c02e"),
q0_x: hex!("fc853b69437aee9a19d5acf96a4ee4c5e04cf7b53406dfaa2afbdd7ad2351b7f554e4bbc6f5db4177d4d44f933a8f6ee"),
q0_y: hex!("7e042547e01834c9043b10f3a8221c4a879cb156f04f72bfccab0c047a304e30f2aa8b2e260d34c4592c0c33dd0c6482"),
q1_x: hex!("57912293709b3556b43a2dfb137a315d256d573b82ded120ef8c782d607c05d930d958e50cb6dc1cc480b9afc38c45f1"),
q1_y: hex!("de9387dab0eef0bda219c6f168a92645a84665c4f2137c14270fb424b7532ff84843c3da383ceea24c47fa343c227bb8"),
},
TestVector {
msg: b"abcdef0123456789",
p_x: hex!("bdecc1c1d870624965f19505be50459d363c71a699a496ab672f9a5d6b78676400926fbceee6fcd1780fe86e62b2aa89"),
p_y: hex!("57cf1f99b5ee00f3c201139b3bfe4dd30a653193778d89a0accc5e0f47e46e4e4b85a0595da29c9494c1814acafe183c"),
u_0: hex!("aab7fb87238cf6b2ab56cdcca7e028959bb2ea599d34f68484139dde85ec6548a6e48771d17956421bdb7790598ea52e"),
u_1: hex!("26e8d833552d7844d167833ca5a87c35bcfaa5a0d86023479fb28e5cd6075c18b168bf1f5d2a0ea146d057971336d8d1"),
q0_x: hex!("0ceece45b73f89844671df962ad2932122e878ad2259e650626924e4e7f132589341dec1480ebcbbbe3509d11fb570b7"),
q0_y: hex!("fafd71a3115298f6be4ae5c6dfc96c400cfb55760f185b7b03f3fa45f3f91eb65d27628b3c705cafd0466fafa54883ce"),
q1_x: hex!("dea1be8d3f9be4cbf4fab9d71d549dde76875b5d9b876832313a083ec81e528cbc2a0a1d0596b3bcb0ba77866b129776"),
q1_y: hex!("eb15fe71662214fb03b65541f40d3eb0f4cf5c3b559f647da138c9f9b7484c48a08760e02c16f1992762cb7298fa52cf"),
},
TestVector {
msg: b"q128_qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq",
p_x: hex!("03c3a9f401b78c6c36a52f07eeee0ec1289f178adf78448f43a3850e0456f5dd7f7633dd31676d990eda32882ab486c0"),
p_y: hex!("cc183d0d7bdfd0a3af05f50e16a3f2de4abbc523215bf57c848d5ea662482b8c1f43dc453a93b94a8026db58f3f5d878"),
u_0: hex!("04c00051b0de6e726d228c85bf243bf5f4789efb512b22b498cde3821db9da667199b74bd5a09a79583c6d353a3bb41c"),
u_1: hex!("97580f218255f899f9204db64cd15e6a312cb4d8182375d1e5157c8f80f41d6a1a4b77fb1ded9dce56c32058b8d5202b"),
q0_x: hex!("051a22105e0817a35d66196338c8d85bd52690d79bba373ead8a86dd9899411513bb9f75273f6483395a7847fb21edb4"),
q0_y: hex!("f168295c1bbcff5f8b01248e9dbc885335d6d6a04aea960f7384f746ba6502ce477e624151cc1d1392b00df0f5400c06"),
q1_x: hex!("6ad7bc8ed8b841efd8ad0765c8a23d0b968ec9aa360a558ff33500f164faa02bee6c704f5f91507c4c5aad2b0dc5b943"),
q1_y: hex!("47313cc0a873ade774048338fc34ca5313f96bbf6ae22ac6ef475d85f03d24792dc6afba8d0b4a70170c1b4f0f716629"),
},
TestVector {
msg: b"a512_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
p_x: hex!("7b18d210b1f090ac701f65f606f6ca18fb8d081e3bc6cbd937c5604325f1cdea4c15c10a54ef303aabf2ea58bd9947a4"),
p_y: hex!("ea857285a33abb516732915c353c75c576bf82ccc96adb63c094dde580021eddeafd91f8c0bfee6f636528f3d0c47fd2"),
u_0: hex!("480cb3ac2c389db7f9dac9c396d2647ae946db844598971c26d1afd53912a1491199c0a5902811e4b809c26fcd37a014"),
u_1: hex!("d28435eb34680e148bf3908536e42231cba9e1f73ae2c6902a222a89db5c49c97db2f8fa4d4cd6e424b17ac60bdb9bb6"),
q0_x: hex!("42e6666f505e854187186bad3011598d9278b9d6e3e4d2503c3d236381a56748dec5d139c223129b324df53fa147c4df"),
q0_y: hex!("8ee51dbda46413bf621838cc935d18d617881c6f33f3838a79c767a1e5618e34b22f79142df708d2432f75c7366c8512"),
q1_x: hex!("4ff01ceeba60484fa1bc0d825fe1e5e383d8f79f1e5bb78e5fb26b7a7ef758153e31e78b9d60ce75c5e32e43869d4e12"),
q1_y: hex!("0f84b978fac8ceda7304b47e229d6037d32062e597dc7a9b95bcd9af441f3c56c619a901d21635f9ec6ab4710b9fcd0e"),
},
];
for test_vector in TEST_VECTORS {
// in parts
let mut u = [FieldElement::default(), FieldElement::default()];
hash2curve::hash_to_field::<ExpandMsgXmd<Sha384>, FieldElement>(
&[test_vector.msg],
&[DST],
&mut u,
)
.unwrap();
/// Assert that the provided projective point matches the given test vector.
// TODO(tarcieri): use coordinate APIs. See zkcrypto/group#30
macro_rules! assert_point_eq {
($actual:expr, $expected_x:expr, $expected_y:expr) => {
let point = $actual.to_affine().to_encoded_point(false);
let (actual_x, actual_y) = match point.coordinates() {
sec1::Coordinates::Uncompressed { x, y } => (x, y),
_ => unreachable!(),
};
assert_eq!(&$expected_x, actual_x.as_slice());
assert_eq!(&$expected_y, actual_y.as_slice());
};
}
assert_eq!(u[0].to_bytes().as_slice(), test_vector.u_0);
assert_eq!(u[1].to_bytes().as_slice(), test_vector.u_1);
let q0 = u[0].map_to_curve();
assert_point_eq!(q0, test_vector.q0_x, test_vector.q0_y);
let q1 = u[1].map_to_curve();
assert_point_eq!(q1, test_vector.q1_x, test_vector.q1_y);
let p = q0.clear_cofactor() + q1.clear_cofactor();
assert_point_eq!(p, test_vector.p_x, test_vector.p_y);
// complete run
let pt = NistP384::hash_from_bytes::<ExpandMsgXmd<Sha384>>(&[test_vector.msg], &[DST])
.unwrap();
assert_point_eq!(pt, test_vector.p_x, test_vector.p_y);
}
}
/// Taken from <https://www.ietf.org/archive/id/draft-irtf-cfrg-voprf-16.html#name-oprfp-384-sha-384-2>.
#[test]
fn hash_to_scalar_voprf() {
struct TestVector {
dst: &'static [u8],
key_info: &'static [u8],
seed: &'static [u8],
sk_sm: &'static [u8],
}
const TEST_VECTORS: &[TestVector] = &[
TestVector {
dst: b"DeriveKeyPairVOPRF10-\x00\x00\x04",
key_info: b"test key",
seed: &hex!("a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3"),
sk_sm: &hex!("c0503759ddd1e31d8c7eae9304c9b1c16f83d1f6d962e3e7b789cd85fd581800e96c5c4256131aafcff9a76919abbd55"),
},
TestVector {
dst: b"DeriveKeyPairVOPRF10-\x01\x00\x04",
key_info: b"test key",
seed: &hex!("a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3"),
sk_sm: &hex!("514fb6fe2e66af1383840759d56f71730331280f062930ee2a2f7ea42f935acf94087355699d788abfdf09d19a5c85ac"),
},
TestVector {
dst: b"DeriveKeyPairVOPRF10-\x02\x00\x04",
key_info: b"test key",
seed: &hex!("a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3"),
sk_sm: &hex!("0fcba4a204f67d6c13f780e613915f755319aaa3cb03cd20a5a4a6c403a4812a4fff5d3223e2c309aa66b05cb7611fd4"),
},
];
'outer: for test_vector in TEST_VECTORS {
let key_info_len = u16::try_from(test_vector.key_info.len())
.unwrap()
.to_be_bytes();
for counter in 0_u8..=u8::MAX {
let scalar = NistP384::hash_to_scalar::<ExpandMsgXmd<Sha384>>(
&[
test_vector.seed,
&key_info_len,
test_vector.key_info,
&counter.to_be_bytes(),
],
&[test_vector.dst],
)
.unwrap();
if!bool::from(scalar.is_zero()) |
}
panic!("deriving key failed");
}
}
#[test]
fn from_okm_fuzz() {
let mut wide_order = GenericArray::default();
wide_order[24..].copy_from_slice(&NistP384::ORDER.to_be_byte_array());
let wide_order = NonZero::new(U576::from_be_byte_array(wide_order)).unwrap();
let simple_from_okm = move |data: GenericArray<u8, U72>| -> Scalar {
let data = U576::from_be_slice(&data);
let scalar = data % wide_order;
let reduced_scalar = U384::from_be_slice(&scalar.to_be_byte_array()[24..]);
Scalar::reduce(reduced_scalar)
};
proptest!(ProptestConfig::with_cases(1000), |(b0 in ANY, b1 in ANY, b2 in ANY, b3 in ANY, b4 in ANY, b5 in ANY, b6 in ANY, b7 in ANY, b8 in ANY)| {
| {
assert_eq!(scalar.to_bytes().as_slice(), test_vector.sk_sm);
continue 'outer;
} | conditional_block |
hash2curve.rs | fn from_okm(data: &GenericArray<u8, Self::Length>) -> Self {
const F_2_288: FieldElement = FieldElement::from_hex(
"000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000",
);
let mut d0 = FieldBytes::default();
d0[12..].copy_from_slice(&data[0..36]);
let d0 = FieldElement::from_uint_unchecked(U384::from_be_byte_array(d0));
let mut d1 = FieldBytes::default();
d1[12..].copy_from_slice(&data[36..]);
let d1 = FieldElement::from_uint_unchecked(U384::from_be_byte_array(d1));
d0 * F_2_288 + d1
}
}
impl Sgn0 for FieldElement {
fn sgn0(&self) -> Choice {
self.is_odd()
}
}
impl OsswuMap for FieldElement {
const PARAMS: OsswuMapParams<Self> = OsswuMapParams {
c1: &[
0x0000_0000_3fff_ffff,
0xbfff_ffff_c000_0000,
0xffff_ffff_ffff_ffff,
0xffff_ffff_ffff_ffff,
0xffff_ffff_ffff_ffff,
0x3fff_ffff_ffff_ffff,
],
c2: FieldElement::from_hex(
"019877cc1041b7555743c0ae2e3a3e61fb2aaa2e0e87ea557a563d8b598a0940d0a697a9e0b9e92cfaa314f583c9d066",
),
map_a: FieldElement::from_hex(
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000fffffffc",
),
map_b: FieldElement::from_hex(
"b3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875ac656398d8a2ed19d2a85c8edd3ec2aef",
),
z: FieldElement::from_hex(
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000fffffff3",
),
};
}
impl MapToCurve for FieldElement {
type Output = ProjectivePoint;
fn map_to_curve(&self) -> Self::Output {
let (qx, qy) = self.osswu();
// TODO(tarcieri): assert that `qy` is correct? less circuitous conversion?
AffinePoint::decompress(&qx.to_bytes(), qy.is_odd())
.unwrap()
.into()
}
}
impl FromOkm for Scalar {
type Length = U72;
fn from_okm(data: &GenericArray<u8, Self::Length>) -> Self {
const F_2_288: Scalar = Scalar::from_hex(
"000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000",
);
let mut d0 = FieldBytes::default();
d0[12..].copy_from_slice(&data[0..36]);
let d0 = Scalar::reduce(U384::from_be_byte_array(d0));
let mut d1 = FieldBytes::default();
d1[12..].copy_from_slice(&data[36..]);
let d1 = Scalar::reduce(U384::from_be_byte_array(d1));
d0 * F_2_288 + d1
}
}
#[cfg(test)]
mod tests {
use crate::{FieldElement, NistP384, Scalar};
use elliptic_curve::{
bigint::{ArrayEncoding, NonZero, U384, U576},
consts::U72,
generic_array::GenericArray,
group::cofactor::CofactorGroup,
hash2curve::{self, ExpandMsgXmd, FromOkm, GroupDigest, MapToCurve},
ops::Reduce,
sec1::{self, ToEncodedPoint},
Curve,
};
use hex_literal::hex;
use proptest::{num::u64::ANY, prelude::ProptestConfig, proptest};
use sha2::Sha384;
#[test]
fn hash_to_curve() {
struct TestVector {
msg: &'static [u8],
p_x: [u8; 48],
p_y: [u8; 48],
u_0: [u8; 48],
u_1: [u8; 48],
q0_x: [u8; 48],
q0_y: [u8; 48],
q1_x: [u8; 48],
q1_y: [u8; 48],
}
const DST: &[u8] = b"QUUX-V01-CS02-with-P384_XMD:SHA-384_SSWU_RO_";
const TEST_VECTORS: &[TestVector] = &[
TestVector {
msg: b"",
p_x: hex!("eb9fe1b4f4e14e7140803c1d99d0a93cd823d2b024040f9c067a8eca1f5a2eeac9ad604973527a356f3fa3aeff0e4d83"),
p_y: hex!("0c21708cff382b7f4643c07b105c2eaec2cead93a917d825601e63c8f21f6abd9abc22c93c2bed6f235954b25048bb1a"),
u_0: hex!("25c8d7dc1acd4ee617766693f7f8829396065d1b447eedb155871feffd9c6653279ac7e5c46edb7010a0e4ff64c9f3b4"),
u_1: hex!("59428be4ed69131df59a0c6a8e188d2d4ece3f1b2a3a02602962b47efa4d7905945b1e2cc80b36aa35c99451073521ac"),
q0_x: hex!("e4717e29eef38d862bee4902a7d21b44efb58c464e3e1f0d03894d94de310f8ffc6de86786dd3e15a1541b18d4eb2846"),
q0_y: hex!("6b95a6e639822312298a47526bb77d9cd7bcf76244c991c8cd70075e2ee6e8b9a135c4a37e3c0768c7ca871c0ceb53d4"),
q1_x: hex!("509527cfc0750eedc53147e6d5f78596c8a3b7360e0608e2fab0563a1670d58d8ae107c9f04bcf90e89489ace5650efd"),
q1_y: hex!("33337b13cb35e173fdea4cb9e8cce915d836ff57803dbbeb7998aa49d17df2ff09b67031773039d09fbd9305a1566bc4"),
},
TestVector {
msg: b"abc",
p_x: hex!("e02fc1a5f44a7519419dd314e29863f30df55a514da2d655775a81d413003c4d4e7fd59af0826dfaad4200ac6f60abe1"),
p_y: hex!("01f638d04d98677d65bef99aef1a12a70a4cbb9270ec55248c04530d8bc1f8f90f8a6a859a7c1f1ddccedf8f96d675f6"),
u_0: hex!("53350214cb6bef0b51abb791b1c4209a2b4c16a0c67e1ab1401017fad774cd3b3f9a8bcdf7f6229dd8dd5a075cb149a0"),
u_1: hex!("c0473083898f63e03f26f14877a2407bd60c75ad491e7d26cbc6cc5ce815654075ec6b6898c7a41d74ceaf720a10c02e"),
q0_x: hex!("fc853b69437aee9a19d5acf96a4ee4c5e04cf7b53406dfaa2afbdd7ad2351b7f554e4bbc6f5db4177d4d44f933a8f6ee"),
q0_y: hex!("7e042547e01834c9043b10f3a8221c4a879cb156f04f72bfccab0c047a304e30f2aa8b2e260d34c4592c0c33dd0c6482"),
q1_x: hex!("57912293709b3556b43a2dfb137a315d256d573b82ded120ef8c782d607c05d930d958e50cb6dc1cc480b9afc38c45f1"),
q1_y: hex!("de9387dab0eef0bda219c6f168a92645a84665c4f2137c14270fb424b7532ff84843c3da383ceea24c47fa343c227bb8"),
},
TestVector {
msg: b"abcdef0123456789",
p_x: hex!("bdecc1c1d870624965f19505be50459d363c71a699a496ab672f9a5d6b78676400926fbceee6fcd1780fe86e62b2aa89"),
p_y: hex!("57cf1f99b5ee00f3c201139b3bfe4dd30a653193778d89a0accc5e0f47e46e4e4b85a0595da29c9494c1814acafe183c"),
u_0: hex!("aab7fb87238cf6b2ab56cdcca7e028959bb2ea599d34f68484139dde85ec6548a6e48771d17956421bdb7790598ea52e"),
u_1: hex!("26e8d833552d7844d167833ca5a87c35bcfaa5a0d86023479fb28e5cd6075c18b168bf1f5d2a0ea146d057971336d8d1"),
q0_x: hex!("0ceece45b73f89844671df962ad2932122e878ad2259e650626924e4e7f132589341dec1480ebcbbbe3509d11fb570b7"),
q0_y: hex!("fafd71a3115298f6be4ae5c6dfc96c400cfb55760f185b7b03f3fa45f3f91eb65d27628b3c705cafd0466fafa54883ce"),
q1_x: hex!("dea1be8d3f9be4cbf4fab9d71d549dde76875b5d9b876832313a083ec81e528cbc2a0a1d0596b3bcb0ba77866b129776"),
q1_y: hex!("eb15fe71662214fb03b65541f40d3eb0f4cf5c3b559f647da138c9f9b7484c48a08760e02c16f1992762cb7298fa52cf"),
},
TestVector {
msg: b"q128_qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq",
p_x: hex!("03c3a9f401b78c6c36a52f07eeee0ec1289f178adf78448f43a3850e0456f5dd7f7633dd31676d990eda32882ab486c0"),
p_y: hex!("cc183d0d7bdfd0a3af05f50e16a3f2de4abbc523215bf57c848d5ea662482b8c1f43dc453a93b94a8026db58f3f5d878"),
u_0: hex!("04c00051b0de6e726d228c85bf243bf5f4789efb512b22b498cde3821db9da667199b74bd5a09a79583c6d353a3bb41c"),
u_1: hex!("97580f218255f899f9204db64cd15e6a312cb4d8182375d1e5157c8f80f41d6a1a4b77fb1ded9dce56c32058b8d5202b"),
q0_x: hex!("051a22105e0817a35d66196338c8d85bd52690d79bba373ead8a86dd9899411513bb9f75273f6483395a7847fb21edb4"),
q0_y: hex!("f168295c1bbcff5f8b01248e9dbc885335d6d6a04aea960f7384f746ba6502ce477e624151cc1d1392b00df0f5400c06"),
q1_x: hex!("6ad7bc8ed8b841efd8ad0765c8a23d0b968ec9aa360a558ff33500f164faa02bee6c704f5f91507c4c5aad2b0dc5b943"),
q1_y: hex!("47313cc0a873ade774048338fc34ca5313f96bbf6ae22ac6ef475d85f03d24792dc6afba8d0b4a70170c1b4f0f716629"),
},
TestVector {
msg: b"a512_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
p_x: hex!("7b18d210b1f090ac701f65f606f6ca18fb8d081e3bc6cbd937c5604325f1cdea4c15c10a54ef303aabf2ea58bd9947a4"),
p_y: hex!("ea857285a33abb516732915c353c75c576bf82ccc96adb63c094dde580021eddeafd91f8c0bfee6f636528f3d0c47fd2"),
u_0: hex!("480cb3ac2c389db7f9dac9c396d2647ae946db844598971c26d1afd53912a1491199c0a5902811e4b809c26fcd37a014"),
u_1: hex!("d28435eb34680e148bf3908536e42231cba9e1f73ae2c6902a222a89db5c49c97db2f8fa4d4cd6e424b17ac60bdb9bb6"),
q0_x: hex!("42e6666f505e854187186bad3011598d9278b9d6e3e4d2503c3d236381a56748dec5d139c223129b324df53fa147c4df"),
q0_y: hex!("8ee51dbda46413bf621838cc935d18d617881c6f33f3838a79c767a1e5618e34b22f79142df708d2432f75c7366c8512"),
q1_x: hex!("4ff01ceeba60484fa1bc0d825fe1e5e383d8f79f1e5bb78e5fb26b7a7ef758153e31e78b9d60ce75c5e32e43869d4e12"),
q1_y: hex!("0f84b978fac8ceda7304b47e229d6037d32062e597dc7a9b95bcd9af441f3c56c619a901d21635f9ec6ab4710b9fcd0e"),
},
];
for test_vector in TEST_VECTORS {
// in parts
let mut u = [FieldElement::default(), FieldElement::default()];
hash2curve::hash_to_field::<ExpandMsgXmd<Sha384>, FieldElement>(
&[test_vector.msg],
&[DST],
&mut u,
)
.unwrap();
/// Assert that the provided projective point matches the given test vector.
// TODO(tarcieri): use coordinate APIs. See zkcrypto/group#30
macro_rules! assert_point_eq {
($actual:expr, $expected_x:expr, $expected_y:expr) => {
let point = $actual.to_affine().to_encoded_point(false);
let (actual_x, actual_y) = match point.coordinates() {
sec1::Coordinates::Uncompressed { x, y } => (x, y),
_ => unreachable!(),
};
assert_eq!(&$expected_x, actual_x.as_slice());
assert_eq!(&$expected_y, actual_y.as_slice());
};
}
assert_eq!(u[0].to_bytes().as_slice(), test_vector.u_0);
assert_eq!(u[1].to_bytes().as_slice(), test_vector.u_1);
let q0 = u[0].map_to_curve();
assert_point_eq!(q0, test_vector.q0_x, test_vector.q0_y);
let q1 = u[1].map_to_curve();
assert_point_eq!(q1, test_vector.q1_x, test_vector.q1_y);
let p = q0.clear_cofactor() + q1.clear_cofactor();
assert_point_eq!(p, test_vector.p_x, test_vector.p_y);
// complete run
let pt = NistP384::hash_from_bytes::<ExpandMsgXmd<Sha384>>(&[test_vector.msg], &[DST])
.unwrap();
assert_point_eq!(pt, test_vector.p_x, test_vector.p_y);
}
}
/// Taken from <https://www.ietf.org/archive/id/draft-irtf-cfrg-voprf-16.html#name-oprfp-384-sha-384-2>.
#[test]
fn hash_to_scalar_voprf() | },
TestVector {
dst: b"DeriveKeyPairVOPRF10-\x02\x00\x04",
key_info: b"test key",
seed: &hex!("a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3"),
sk_sm: &hex!("0fcba4a204f67d6c13f780e613915f755319aaa3cb03cd20a5a4a6c403a4812a4fff5d3223e2c309aa66b05cb7611fd4"),
},
];
'outer: for test_vector in TEST_VECTORS {
let key_info_len = u16::try_from(test_vector.key_info.len())
.unwrap()
.to_be_bytes();
for counter in 0_u8..=u8::MAX {
let scalar = NistP384::hash_to_scalar::<ExpandMsgXmd<Sha384>>(
&[
test_vector.seed,
&key_info_len,
test_vector.key_info,
&counter.to_be_bytes(),
],
&[test_vector.dst],
)
.unwrap();
if!bool::from(scalar.is_zero()) {
assert_eq!(scalar.to_bytes().as_slice(), test_vector.sk_sm);
continue 'outer;
}
}
panic!("deriving key failed");
}
}
#[test]
fn from_okm_fuzz() {
let mut wide_order = GenericArray::default();
wide_order[24..].copy_from_slice(&NistP384::ORDER.to_be_byte_array());
let wide_order = NonZero::new(U576::from_be_byte_array(wide_order)).unwrap();
let simple_from_okm = move |data: GenericArray<u8, U72>| -> Scalar {
let data = U576::from_be_slice(&data);
let scalar = data % wide_order;
let reduced_scalar = U384::from_be_slice(&scalar.to_be_byte_array()[24..]);
Scalar::reduce(reduced_scalar)
};
proptest!(ProptestConfig::with_cases(1000), |(b0 in ANY, b1 in ANY, b2 in ANY, b3 in ANY, b4 in ANY, b5 in ANY, b6 in ANY, b7 in ANY, b8 in ANY)| {
| {
struct TestVector {
dst: &'static [u8],
key_info: &'static [u8],
seed: &'static [u8],
sk_sm: &'static [u8],
}
const TEST_VECTORS: &[TestVector] = &[
TestVector {
dst: b"DeriveKeyPairVOPRF10-\x00\x00\x04",
key_info: b"test key",
seed: &hex!("a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3"),
sk_sm: &hex!("c0503759ddd1e31d8c7eae9304c9b1c16f83d1f6d962e3e7b789cd85fd581800e96c5c4256131aafcff9a76919abbd55"),
},
TestVector {
dst: b"DeriveKeyPairVOPRF10-\x01\x00\x04",
key_info: b"test key",
seed: &hex!("a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3"),
sk_sm: &hex!("514fb6fe2e66af1383840759d56f71730331280f062930ee2a2f7ea42f935acf94087355699d788abfdf09d19a5c85ac"), | identifier_body |
hash2curve.rs | fn from_okm(data: &GenericArray<u8, Self::Length>) -> Self {
const F_2_288: FieldElement = FieldElement::from_hex(
"000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000",
);
let mut d0 = FieldBytes::default();
d0[12..].copy_from_slice(&data[0..36]);
let d0 = FieldElement::from_uint_unchecked(U384::from_be_byte_array(d0));
let mut d1 = FieldBytes::default();
d1[12..].copy_from_slice(&data[36..]);
let d1 = FieldElement::from_uint_unchecked(U384::from_be_byte_array(d1));
d0 * F_2_288 + d1
}
}
impl Sgn0 for FieldElement {
fn sgn0(&self) -> Choice {
self.is_odd()
}
}
impl OsswuMap for FieldElement {
const PARAMS: OsswuMapParams<Self> = OsswuMapParams {
c1: &[
0x0000_0000_3fff_ffff,
0xbfff_ffff_c000_0000,
0xffff_ffff_ffff_ffff,
0xffff_ffff_ffff_ffff,
0xffff_ffff_ffff_ffff,
0x3fff_ffff_ffff_ffff,
],
c2: FieldElement::from_hex(
"019877cc1041b7555743c0ae2e3a3e61fb2aaa2e0e87ea557a563d8b598a0940d0a697a9e0b9e92cfaa314f583c9d066",
),
map_a: FieldElement::from_hex(
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000fffffffc",
),
map_b: FieldElement::from_hex(
"b3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875ac656398d8a2ed19d2a85c8edd3ec2aef",
),
z: FieldElement::from_hex(
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000fffffff3",
),
};
}
impl MapToCurve for FieldElement {
type Output = ProjectivePoint;
fn map_to_curve(&self) -> Self::Output {
let (qx, qy) = self.osswu();
// TODO(tarcieri): assert that `qy` is correct? less circuitous conversion?
AffinePoint::decompress(&qx.to_bytes(), qy.is_odd())
.unwrap()
.into()
}
}
impl FromOkm for Scalar {
type Length = U72;
fn from_okm(data: &GenericArray<u8, Self::Length>) -> Self {
const F_2_288: Scalar = Scalar::from_hex(
"000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000",
);
let mut d0 = FieldBytes::default();
d0[12..].copy_from_slice(&data[0..36]);
let d0 = Scalar::reduce(U384::from_be_byte_array(d0));
let mut d1 = FieldBytes::default();
d1[12..].copy_from_slice(&data[36..]);
let d1 = Scalar::reduce(U384::from_be_byte_array(d1));
d0 * F_2_288 + d1
}
}
#[cfg(test)]
mod tests {
use crate::{FieldElement, NistP384, Scalar};
use elliptic_curve::{
bigint::{ArrayEncoding, NonZero, U384, U576},
consts::U72,
generic_array::GenericArray,
group::cofactor::CofactorGroup,
hash2curve::{self, ExpandMsgXmd, FromOkm, GroupDigest, MapToCurve},
ops::Reduce,
sec1::{self, ToEncodedPoint},
Curve,
};
use hex_literal::hex;
use proptest::{num::u64::ANY, prelude::ProptestConfig, proptest};
use sha2::Sha384;
#[test]
fn hash_to_curve() {
struct TestVector {
msg: &'static [u8],
p_x: [u8; 48],
p_y: [u8; 48],
u_0: [u8; 48],
u_1: [u8; 48],
q0_x: [u8; 48],
q0_y: [u8; 48], |
const DST: &[u8] = b"QUUX-V01-CS02-with-P384_XMD:SHA-384_SSWU_RO_";
const TEST_VECTORS: &[TestVector] = &[
TestVector {
msg: b"",
p_x: hex!("eb9fe1b4f4e14e7140803c1d99d0a93cd823d2b024040f9c067a8eca1f5a2eeac9ad604973527a356f3fa3aeff0e4d83"),
p_y: hex!("0c21708cff382b7f4643c07b105c2eaec2cead93a917d825601e63c8f21f6abd9abc22c93c2bed6f235954b25048bb1a"),
u_0: hex!("25c8d7dc1acd4ee617766693f7f8829396065d1b447eedb155871feffd9c6653279ac7e5c46edb7010a0e4ff64c9f3b4"),
u_1: hex!("59428be4ed69131df59a0c6a8e188d2d4ece3f1b2a3a02602962b47efa4d7905945b1e2cc80b36aa35c99451073521ac"),
q0_x: hex!("e4717e29eef38d862bee4902a7d21b44efb58c464e3e1f0d03894d94de310f8ffc6de86786dd3e15a1541b18d4eb2846"),
q0_y: hex!("6b95a6e639822312298a47526bb77d9cd7bcf76244c991c8cd70075e2ee6e8b9a135c4a37e3c0768c7ca871c0ceb53d4"),
q1_x: hex!("509527cfc0750eedc53147e6d5f78596c8a3b7360e0608e2fab0563a1670d58d8ae107c9f04bcf90e89489ace5650efd"),
q1_y: hex!("33337b13cb35e173fdea4cb9e8cce915d836ff57803dbbeb7998aa49d17df2ff09b67031773039d09fbd9305a1566bc4"),
},
TestVector {
msg: b"abc",
p_x: hex!("e02fc1a5f44a7519419dd314e29863f30df55a514da2d655775a81d413003c4d4e7fd59af0826dfaad4200ac6f60abe1"),
p_y: hex!("01f638d04d98677d65bef99aef1a12a70a4cbb9270ec55248c04530d8bc1f8f90f8a6a859a7c1f1ddccedf8f96d675f6"),
u_0: hex!("53350214cb6bef0b51abb791b1c4209a2b4c16a0c67e1ab1401017fad774cd3b3f9a8bcdf7f6229dd8dd5a075cb149a0"),
u_1: hex!("c0473083898f63e03f26f14877a2407bd60c75ad491e7d26cbc6cc5ce815654075ec6b6898c7a41d74ceaf720a10c02e"),
q0_x: hex!("fc853b69437aee9a19d5acf96a4ee4c5e04cf7b53406dfaa2afbdd7ad2351b7f554e4bbc6f5db4177d4d44f933a8f6ee"),
q0_y: hex!("7e042547e01834c9043b10f3a8221c4a879cb156f04f72bfccab0c047a304e30f2aa8b2e260d34c4592c0c33dd0c6482"),
q1_x: hex!("57912293709b3556b43a2dfb137a315d256d573b82ded120ef8c782d607c05d930d958e50cb6dc1cc480b9afc38c45f1"),
q1_y: hex!("de9387dab0eef0bda219c6f168a92645a84665c4f2137c14270fb424b7532ff84843c3da383ceea24c47fa343c227bb8"),
},
TestVector {
msg: b"abcdef0123456789",
p_x: hex!("bdecc1c1d870624965f19505be50459d363c71a699a496ab672f9a5d6b78676400926fbceee6fcd1780fe86e62b2aa89"),
p_y: hex!("57cf1f99b5ee00f3c201139b3bfe4dd30a653193778d89a0accc5e0f47e46e4e4b85a0595da29c9494c1814acafe183c"),
u_0: hex!("aab7fb87238cf6b2ab56cdcca7e028959bb2ea599d34f68484139dde85ec6548a6e48771d17956421bdb7790598ea52e"),
u_1: hex!("26e8d833552d7844d167833ca5a87c35bcfaa5a0d86023479fb28e5cd6075c18b168bf1f5d2a0ea146d057971336d8d1"),
q0_x: hex!("0ceece45b73f89844671df962ad2932122e878ad2259e650626924e4e7f132589341dec1480ebcbbbe3509d11fb570b7"),
q0_y: hex!("fafd71a3115298f6be4ae5c6dfc96c400cfb55760f185b7b03f3fa45f3f91eb65d27628b3c705cafd0466fafa54883ce"),
q1_x: hex!("dea1be8d3f9be4cbf4fab9d71d549dde76875b5d9b876832313a083ec81e528cbc2a0a1d0596b3bcb0ba77866b129776"),
q1_y: hex!("eb15fe71662214fb03b65541f40d3eb0f4cf5c3b559f647da138c9f9b7484c48a08760e02c16f1992762cb7298fa52cf"),
},
TestVector {
msg: b"q128_qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq",
p_x: hex!("03c3a9f401b78c6c36a52f07eeee0ec1289f178adf78448f43a3850e0456f5dd7f7633dd31676d990eda32882ab486c0"),
p_y: hex!("cc183d0d7bdfd0a3af05f50e16a3f2de4abbc523215bf57c848d5ea662482b8c1f43dc453a93b94a8026db58f3f5d878"),
u_0: hex!("04c00051b0de6e726d228c85bf243bf5f4789efb512b22b498cde3821db9da667199b74bd5a09a79583c6d353a3bb41c"),
u_1: hex!("97580f218255f899f9204db64cd15e6a312cb4d8182375d1e5157c8f80f41d6a1a4b77fb1ded9dce56c32058b8d5202b"),
q0_x: hex!("051a22105e0817a35d66196338c8d85bd52690d79bba373ead8a86dd9899411513bb9f75273f6483395a7847fb21edb4"),
q0_y: hex!("f168295c1bbcff5f8b01248e9dbc885335d6d6a04aea960f7384f746ba6502ce477e624151cc1d1392b00df0f5400c06"),
q1_x: hex!("6ad7bc8ed8b841efd8ad0765c8a23d0b968ec9aa360a558ff33500f164faa02bee6c704f5f91507c4c5aad2b0dc5b943"),
q1_y: hex!("47313cc0a873ade774048338fc34ca5313f96bbf6ae22ac6ef475d85f03d24792dc6afba8d0b4a70170c1b4f0f716629"),
},
TestVector {
msg: b"a512_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
p_x: hex!("7b18d210b1f090ac701f65f606f6ca18fb8d081e3bc6cbd937c5604325f1cdea4c15c10a54ef303aabf2ea58bd9947a4"),
p_y: hex!("ea857285a33abb516732915c353c75c576bf82ccc96adb63c094dde580021eddeafd91f8c0bfee6f636528f3d0c47fd2"),
u_0: hex!("480cb3ac2c389db7f9dac9c396d2647ae946db844598971c26d1afd53912a1491199c0a5902811e4b809c26fcd37a014"),
u_1: hex!("d28435eb34680e148bf3908536e42231cba9e1f73ae2c6902a222a89db5c49c97db2f8fa4d4cd6e424b17ac60bdb9bb6"),
q0_x: hex!("42e6666f505e854187186bad3011598d9278b9d6e3e4d2503c3d236381a56748dec5d139c223129b324df53fa147c4df"),
q0_y: hex!("8ee51dbda46413bf621838cc935d18d617881c6f33f3838a79c767a1e5618e34b22f79142df708d2432f75c7366c8512"),
q1_x: hex!("4ff01ceeba60484fa1bc0d825fe1e5e383d8f79f1e5bb78e5fb26b7a7ef758153e31e78b9d60ce75c5e32e43869d4e12"),
q1_y: hex!("0f84b978fac8ceda7304b47e229d6037d32062e597dc7a9b95bcd9af441f3c56c619a901d21635f9ec6ab4710b9fcd0e"),
},
];
for test_vector in TEST_VECTORS {
// in parts
let mut u = [FieldElement::default(), FieldElement::default()];
hash2curve::hash_to_field::<ExpandMsgXmd<Sha384>, FieldElement>(
&[test_vector.msg],
&[DST],
&mut u,
)
.unwrap();
/// Assert that the provided projective point matches the given test vector.
// TODO(tarcieri): use coordinate APIs. See zkcrypto/group#30
macro_rules! assert_point_eq {
($actual:expr, $expected_x:expr, $expected_y:expr) => {
let point = $actual.to_affine().to_encoded_point(false);
let (actual_x, actual_y) = match point.coordinates() {
sec1::Coordinates::Uncompressed { x, y } => (x, y),
_ => unreachable!(),
};
assert_eq!(&$expected_x, actual_x.as_slice());
assert_eq!(&$expected_y, actual_y.as_slice());
};
}
assert_eq!(u[0].to_bytes().as_slice(), test_vector.u_0);
assert_eq!(u[1].to_bytes().as_slice(), test_vector.u_1);
let q0 = u[0].map_to_curve();
assert_point_eq!(q0, test_vector.q0_x, test_vector.q0_y);
let q1 = u[1].map_to_curve();
assert_point_eq!(q1, test_vector.q1_x, test_vector.q1_y);
let p = q0.clear_cofactor() + q1.clear_cofactor();
assert_point_eq!(p, test_vector.p_x, test_vector.p_y);
// complete run
let pt = NistP384::hash_from_bytes::<ExpandMsgXmd<Sha384>>(&[test_vector.msg], &[DST])
.unwrap();
assert_point_eq!(pt, test_vector.p_x, test_vector.p_y);
}
}
/// Taken from <https://www.ietf.org/archive/id/draft-irtf-cfrg-voprf-16.html#name-oprfp-384-sha-384-2>.
#[test]
fn hash_to_scalar_voprf() {
struct TestVector {
dst: &'static [u8],
key_info: &'static [u8],
seed: &'static [u8],
sk_sm: &'static [u8],
}
const TEST_VECTORS: &[TestVector] = &[
TestVector {
dst: b"DeriveKeyPairVOPRF10-\x00\x00\x04",
key_info: b"test key",
seed: &hex!("a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3"),
sk_sm: &hex!("c0503759ddd1e31d8c7eae9304c9b1c16f83d1f6d962e3e7b789cd85fd581800e96c5c4256131aafcff9a76919abbd55"),
},
TestVector {
dst: b"DeriveKeyPairVOPRF10-\x01\x00\x04",
key_info: b"test key",
seed: &hex!("a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3"),
sk_sm: &hex!("514fb6fe2e66af1383840759d56f71730331280f062930ee2a2f7ea42f935acf94087355699d788abfdf09d19a5c85ac"),
},
TestVector {
dst: b"DeriveKeyPairVOPRF10-\x02\x00\x04",
key_info: b"test key",
seed: &hex!("a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3"),
sk_sm: &hex!("0fcba4a204f67d6c13f780e613915f755319aaa3cb03cd20a5a4a6c403a4812a4fff5d3223e2c309aa66b05cb7611fd4"),
},
];
'outer: for test_vector in TEST_VECTORS {
let key_info_len = u16::try_from(test_vector.key_info.len())
.unwrap()
.to_be_bytes();
for counter in 0_u8..=u8::MAX {
let scalar = NistP384::hash_to_scalar::<ExpandMsgXmd<Sha384>>(
&[
test_vector.seed,
&key_info_len,
test_vector.key_info,
&counter.to_be_bytes(),
],
&[test_vector.dst],
)
.unwrap();
if!bool::from(scalar.is_zero()) {
assert_eq!(scalar.to_bytes().as_slice(), test_vector.sk_sm);
continue 'outer;
}
}
panic!("deriving key failed");
}
}
#[test]
fn from_okm_fuzz() {
let mut wide_order = GenericArray::default();
wide_order[24..].copy_from_slice(&NistP384::ORDER.to_be_byte_array());
let wide_order = NonZero::new(U576::from_be_byte_array(wide_order)).unwrap();
let simple_from_okm = move |data: GenericArray<u8, U72>| -> Scalar {
let data = U576::from_be_slice(&data);
let scalar = data % wide_order;
let reduced_scalar = U384::from_be_slice(&scalar.to_be_byte_array()[24..]);
Scalar::reduce(reduced_scalar)
};
proptest!(ProptestConfig::with_cases(1000), |(b0 in ANY, b1 in ANY, b2 in ANY, b3 in ANY, b4 in ANY, b5 in ANY, b6 in ANY, b7 in ANY, b8 in ANY)| {
| q1_x: [u8; 48],
q1_y: [u8; 48],
} | random_line_split |
hash2curve.rs | fn from_okm(data: &GenericArray<u8, Self::Length>) -> Self {
const F_2_288: FieldElement = FieldElement::from_hex(
"000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000",
);
let mut d0 = FieldBytes::default();
d0[12..].copy_from_slice(&data[0..36]);
let d0 = FieldElement::from_uint_unchecked(U384::from_be_byte_array(d0));
let mut d1 = FieldBytes::default();
d1[12..].copy_from_slice(&data[36..]);
let d1 = FieldElement::from_uint_unchecked(U384::from_be_byte_array(d1));
d0 * F_2_288 + d1
}
}
impl Sgn0 for FieldElement {
fn sgn0(&self) -> Choice {
self.is_odd()
}
}
impl OsswuMap for FieldElement {
const PARAMS: OsswuMapParams<Self> = OsswuMapParams {
c1: &[
0x0000_0000_3fff_ffff,
0xbfff_ffff_c000_0000,
0xffff_ffff_ffff_ffff,
0xffff_ffff_ffff_ffff,
0xffff_ffff_ffff_ffff,
0x3fff_ffff_ffff_ffff,
],
c2: FieldElement::from_hex(
"019877cc1041b7555743c0ae2e3a3e61fb2aaa2e0e87ea557a563d8b598a0940d0a697a9e0b9e92cfaa314f583c9d066",
),
map_a: FieldElement::from_hex(
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000fffffffc",
),
map_b: FieldElement::from_hex(
"b3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875ac656398d8a2ed19d2a85c8edd3ec2aef",
),
z: FieldElement::from_hex(
"fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffeffffffff0000000000000000fffffff3",
),
};
}
impl MapToCurve for FieldElement {
type Output = ProjectivePoint;
fn map_to_curve(&self) -> Self::Output {
let (qx, qy) = self.osswu();
// TODO(tarcieri): assert that `qy` is correct? less circuitous conversion?
AffinePoint::decompress(&qx.to_bytes(), qy.is_odd())
.unwrap()
.into()
}
}
impl FromOkm for Scalar {
type Length = U72;
fn from_okm(data: &GenericArray<u8, Self::Length>) -> Self {
const F_2_288: Scalar = Scalar::from_hex(
"000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000",
);
let mut d0 = FieldBytes::default();
d0[12..].copy_from_slice(&data[0..36]);
let d0 = Scalar::reduce(U384::from_be_byte_array(d0));
let mut d1 = FieldBytes::default();
d1[12..].copy_from_slice(&data[36..]);
let d1 = Scalar::reduce(U384::from_be_byte_array(d1));
d0 * F_2_288 + d1
}
}
#[cfg(test)]
mod tests {
use crate::{FieldElement, NistP384, Scalar};
use elliptic_curve::{
bigint::{ArrayEncoding, NonZero, U384, U576},
consts::U72,
generic_array::GenericArray,
group::cofactor::CofactorGroup,
hash2curve::{self, ExpandMsgXmd, FromOkm, GroupDigest, MapToCurve},
ops::Reduce,
sec1::{self, ToEncodedPoint},
Curve,
};
use hex_literal::hex;
use proptest::{num::u64::ANY, prelude::ProptestConfig, proptest};
use sha2::Sha384;
#[test]
fn | () {
struct TestVector {
msg: &'static [u8],
p_x: [u8; 48],
p_y: [u8; 48],
u_0: [u8; 48],
u_1: [u8; 48],
q0_x: [u8; 48],
q0_y: [u8; 48],
q1_x: [u8; 48],
q1_y: [u8; 48],
}
const DST: &[u8] = b"QUUX-V01-CS02-with-P384_XMD:SHA-384_SSWU_RO_";
const TEST_VECTORS: &[TestVector] = &[
TestVector {
msg: b"",
p_x: hex!("eb9fe1b4f4e14e7140803c1d99d0a93cd823d2b024040f9c067a8eca1f5a2eeac9ad604973527a356f3fa3aeff0e4d83"),
p_y: hex!("0c21708cff382b7f4643c07b105c2eaec2cead93a917d825601e63c8f21f6abd9abc22c93c2bed6f235954b25048bb1a"),
u_0: hex!("25c8d7dc1acd4ee617766693f7f8829396065d1b447eedb155871feffd9c6653279ac7e5c46edb7010a0e4ff64c9f3b4"),
u_1: hex!("59428be4ed69131df59a0c6a8e188d2d4ece3f1b2a3a02602962b47efa4d7905945b1e2cc80b36aa35c99451073521ac"),
q0_x: hex!("e4717e29eef38d862bee4902a7d21b44efb58c464e3e1f0d03894d94de310f8ffc6de86786dd3e15a1541b18d4eb2846"),
q0_y: hex!("6b95a6e639822312298a47526bb77d9cd7bcf76244c991c8cd70075e2ee6e8b9a135c4a37e3c0768c7ca871c0ceb53d4"),
q1_x: hex!("509527cfc0750eedc53147e6d5f78596c8a3b7360e0608e2fab0563a1670d58d8ae107c9f04bcf90e89489ace5650efd"),
q1_y: hex!("33337b13cb35e173fdea4cb9e8cce915d836ff57803dbbeb7998aa49d17df2ff09b67031773039d09fbd9305a1566bc4"),
},
TestVector {
msg: b"abc",
p_x: hex!("e02fc1a5f44a7519419dd314e29863f30df55a514da2d655775a81d413003c4d4e7fd59af0826dfaad4200ac6f60abe1"),
p_y: hex!("01f638d04d98677d65bef99aef1a12a70a4cbb9270ec55248c04530d8bc1f8f90f8a6a859a7c1f1ddccedf8f96d675f6"),
u_0: hex!("53350214cb6bef0b51abb791b1c4209a2b4c16a0c67e1ab1401017fad774cd3b3f9a8bcdf7f6229dd8dd5a075cb149a0"),
u_1: hex!("c0473083898f63e03f26f14877a2407bd60c75ad491e7d26cbc6cc5ce815654075ec6b6898c7a41d74ceaf720a10c02e"),
q0_x: hex!("fc853b69437aee9a19d5acf96a4ee4c5e04cf7b53406dfaa2afbdd7ad2351b7f554e4bbc6f5db4177d4d44f933a8f6ee"),
q0_y: hex!("7e042547e01834c9043b10f3a8221c4a879cb156f04f72bfccab0c047a304e30f2aa8b2e260d34c4592c0c33dd0c6482"),
q1_x: hex!("57912293709b3556b43a2dfb137a315d256d573b82ded120ef8c782d607c05d930d958e50cb6dc1cc480b9afc38c45f1"),
q1_y: hex!("de9387dab0eef0bda219c6f168a92645a84665c4f2137c14270fb424b7532ff84843c3da383ceea24c47fa343c227bb8"),
},
TestVector {
msg: b"abcdef0123456789",
p_x: hex!("bdecc1c1d870624965f19505be50459d363c71a699a496ab672f9a5d6b78676400926fbceee6fcd1780fe86e62b2aa89"),
p_y: hex!("57cf1f99b5ee00f3c201139b3bfe4dd30a653193778d89a0accc5e0f47e46e4e4b85a0595da29c9494c1814acafe183c"),
u_0: hex!("aab7fb87238cf6b2ab56cdcca7e028959bb2ea599d34f68484139dde85ec6548a6e48771d17956421bdb7790598ea52e"),
u_1: hex!("26e8d833552d7844d167833ca5a87c35bcfaa5a0d86023479fb28e5cd6075c18b168bf1f5d2a0ea146d057971336d8d1"),
q0_x: hex!("0ceece45b73f89844671df962ad2932122e878ad2259e650626924e4e7f132589341dec1480ebcbbbe3509d11fb570b7"),
q0_y: hex!("fafd71a3115298f6be4ae5c6dfc96c400cfb55760f185b7b03f3fa45f3f91eb65d27628b3c705cafd0466fafa54883ce"),
q1_x: hex!("dea1be8d3f9be4cbf4fab9d71d549dde76875b5d9b876832313a083ec81e528cbc2a0a1d0596b3bcb0ba77866b129776"),
q1_y: hex!("eb15fe71662214fb03b65541f40d3eb0f4cf5c3b559f647da138c9f9b7484c48a08760e02c16f1992762cb7298fa52cf"),
},
TestVector {
msg: b"q128_qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq",
p_x: hex!("03c3a9f401b78c6c36a52f07eeee0ec1289f178adf78448f43a3850e0456f5dd7f7633dd31676d990eda32882ab486c0"),
p_y: hex!("cc183d0d7bdfd0a3af05f50e16a3f2de4abbc523215bf57c848d5ea662482b8c1f43dc453a93b94a8026db58f3f5d878"),
u_0: hex!("04c00051b0de6e726d228c85bf243bf5f4789efb512b22b498cde3821db9da667199b74bd5a09a79583c6d353a3bb41c"),
u_1: hex!("97580f218255f899f9204db64cd15e6a312cb4d8182375d1e5157c8f80f41d6a1a4b77fb1ded9dce56c32058b8d5202b"),
q0_x: hex!("051a22105e0817a35d66196338c8d85bd52690d79bba373ead8a86dd9899411513bb9f75273f6483395a7847fb21edb4"),
q0_y: hex!("f168295c1bbcff5f8b01248e9dbc885335d6d6a04aea960f7384f746ba6502ce477e624151cc1d1392b00df0f5400c06"),
q1_x: hex!("6ad7bc8ed8b841efd8ad0765c8a23d0b968ec9aa360a558ff33500f164faa02bee6c704f5f91507c4c5aad2b0dc5b943"),
q1_y: hex!("47313cc0a873ade774048338fc34ca5313f96bbf6ae22ac6ef475d85f03d24792dc6afba8d0b4a70170c1b4f0f716629"),
},
TestVector {
msg: b"a512_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
p_x: hex!("7b18d210b1f090ac701f65f606f6ca18fb8d081e3bc6cbd937c5604325f1cdea4c15c10a54ef303aabf2ea58bd9947a4"),
p_y: hex!("ea857285a33abb516732915c353c75c576bf82ccc96adb63c094dde580021eddeafd91f8c0bfee6f636528f3d0c47fd2"),
u_0: hex!("480cb3ac2c389db7f9dac9c396d2647ae946db844598971c26d1afd53912a1491199c0a5902811e4b809c26fcd37a014"),
u_1: hex!("d28435eb34680e148bf3908536e42231cba9e1f73ae2c6902a222a89db5c49c97db2f8fa4d4cd6e424b17ac60bdb9bb6"),
q0_x: hex!("42e6666f505e854187186bad3011598d9278b9d6e3e4d2503c3d236381a56748dec5d139c223129b324df53fa147c4df"),
q0_y: hex!("8ee51dbda46413bf621838cc935d18d617881c6f33f3838a79c767a1e5618e34b22f79142df708d2432f75c7366c8512"),
q1_x: hex!("4ff01ceeba60484fa1bc0d825fe1e5e383d8f79f1e5bb78e5fb26b7a7ef758153e31e78b9d60ce75c5e32e43869d4e12"),
q1_y: hex!("0f84b978fac8ceda7304b47e229d6037d32062e597dc7a9b95bcd9af441f3c56c619a901d21635f9ec6ab4710b9fcd0e"),
},
];
for test_vector in TEST_VECTORS {
// in parts
let mut u = [FieldElement::default(), FieldElement::default()];
hash2curve::hash_to_field::<ExpandMsgXmd<Sha384>, FieldElement>(
&[test_vector.msg],
&[DST],
&mut u,
)
.unwrap();
/// Assert that the provided projective point matches the given test vector.
// TODO(tarcieri): use coordinate APIs. See zkcrypto/group#30
macro_rules! assert_point_eq {
($actual:expr, $expected_x:expr, $expected_y:expr) => {
let point = $actual.to_affine().to_encoded_point(false);
let (actual_x, actual_y) = match point.coordinates() {
sec1::Coordinates::Uncompressed { x, y } => (x, y),
_ => unreachable!(),
};
assert_eq!(&$expected_x, actual_x.as_slice());
assert_eq!(&$expected_y, actual_y.as_slice());
};
}
assert_eq!(u[0].to_bytes().as_slice(), test_vector.u_0);
assert_eq!(u[1].to_bytes().as_slice(), test_vector.u_1);
let q0 = u[0].map_to_curve();
assert_point_eq!(q0, test_vector.q0_x, test_vector.q0_y);
let q1 = u[1].map_to_curve();
assert_point_eq!(q1, test_vector.q1_x, test_vector.q1_y);
let p = q0.clear_cofactor() + q1.clear_cofactor();
assert_point_eq!(p, test_vector.p_x, test_vector.p_y);
// complete run
let pt = NistP384::hash_from_bytes::<ExpandMsgXmd<Sha384>>(&[test_vector.msg], &[DST])
.unwrap();
assert_point_eq!(pt, test_vector.p_x, test_vector.p_y);
}
}
/// Taken from <https://www.ietf.org/archive/id/draft-irtf-cfrg-voprf-16.html#name-oprfp-384-sha-384-2>.
#[test]
fn hash_to_scalar_voprf() {
struct TestVector {
dst: &'static [u8],
key_info: &'static [u8],
seed: &'static [u8],
sk_sm: &'static [u8],
}
const TEST_VECTORS: &[TestVector] = &[
TestVector {
dst: b"DeriveKeyPairVOPRF10-\x00\x00\x04",
key_info: b"test key",
seed: &hex!("a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3"),
sk_sm: &hex!("c0503759ddd1e31d8c7eae9304c9b1c16f83d1f6d962e3e7b789cd85fd581800e96c5c4256131aafcff9a76919abbd55"),
},
TestVector {
dst: b"DeriveKeyPairVOPRF10-\x01\x00\x04",
key_info: b"test key",
seed: &hex!("a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3"),
sk_sm: &hex!("514fb6fe2e66af1383840759d56f71730331280f062930ee2a2f7ea42f935acf94087355699d788abfdf09d19a5c85ac"),
},
TestVector {
dst: b"DeriveKeyPairVOPRF10-\x02\x00\x04",
key_info: b"test key",
seed: &hex!("a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3"),
sk_sm: &hex!("0fcba4a204f67d6c13f780e613915f755319aaa3cb03cd20a5a4a6c403a4812a4fff5d3223e2c309aa66b05cb7611fd4"),
},
];
'outer: for test_vector in TEST_VECTORS {
let key_info_len = u16::try_from(test_vector.key_info.len())
.unwrap()
.to_be_bytes();
for counter in 0_u8..=u8::MAX {
let scalar = NistP384::hash_to_scalar::<ExpandMsgXmd<Sha384>>(
&[
test_vector.seed,
&key_info_len,
test_vector.key_info,
&counter.to_be_bytes(),
],
&[test_vector.dst],
)
.unwrap();
if!bool::from(scalar.is_zero()) {
assert_eq!(scalar.to_bytes().as_slice(), test_vector.sk_sm);
continue 'outer;
}
}
panic!("deriving key failed");
}
}
#[test]
fn from_okm_fuzz() {
let mut wide_order = GenericArray::default();
wide_order[24..].copy_from_slice(&NistP384::ORDER.to_be_byte_array());
let wide_order = NonZero::new(U576::from_be_byte_array(wide_order)).unwrap();
let simple_from_okm = move |data: GenericArray<u8, U72>| -> Scalar {
let data = U576::from_be_slice(&data);
let scalar = data % wide_order;
let reduced_scalar = U384::from_be_slice(&scalar.to_be_byte_array()[24..]);
Scalar::reduce(reduced_scalar)
};
proptest!(ProptestConfig::with_cases(1000), |(b0 in ANY, b1 in ANY, b2 in ANY, b3 in ANY, b4 in ANY, b5 in ANY, b6 in ANY, b7 in ANY, b8 in ANY)| {
| hash_to_curve | identifier_name |
mod.rs | pub mod context;
pub mod types;
///////////////////// Validation Helpers /////////////////////
use std::collections::HashMap;
use crate::frontend::validate::types::Type;
use crate::frontend::parse::ast;
///////////////////// TYPES /////////////////////
// NOTE: Offsets are i32 for Cranelift
/// Stores struct definitions
struct StructDefinition {
/// Map of field_name -> (type, byte offset)
fields: HashMap<String, StructField>,
}
pub struct StructField {
pub ty: Type,
pub offset: i32,
pub is_public: bool,
}
pub struct TypeTableEntry {
/// Size of type in bytes
pub size: usize,
/// Alignment of type in bytes
alignment: usize,
// TODO: Store fields and their offsets here too
// field_offets: HashMap<?>,
}
impl TypeTableEntry {
fn new(size: usize, alignment: usize) -> Self {
Self { size, alignment }
}
}
/// Stores type sizes and alignments
pub struct TypeTable {
/// Map of field_name -> (size, alignment) in bytes
data: HashMap<Type, TypeTableEntry>
}
impl TypeTable {
// TODO: Accept word size here and adjust table accordingly
// TODO: Support `isize` and `usize`
fn new() -> Self {
let mut data = HashMap::new();
// FIXME: This could be looked up via `match`, but this is more consistent
// FIXME: Only 64-bit architectures are supported by the below values
data.insert(Type::u8, TypeTableEntry::new(1, 1));
data.insert(Type::u16, TypeTableEntry::new(2, 2));
data.insert(Type::u32, TypeTableEntry::new(4, 4));
data.insert(Type::u64, TypeTableEntry::new(8, 8));
data.insert(Type::u128, TypeTableEntry::new(16, 8));
data.insert(Type::i8, TypeTableEntry::new(1, 1));
data.insert(Type::i16, TypeTableEntry::new(2, 2));
data.insert(Type::i32, TypeTableEntry::new(4, 4));
data.insert(Type::i64, TypeTableEntry::new(8, 8));
data.insert(Type::i128, TypeTableEntry::new(16, 8));
data.insert(Type::f32, TypeTableEntry::new(4, 4));
data.insert(Type::f64, TypeTableEntry::new(8, 8));
data.insert(Type::bool, TypeTableEntry::new(1, 1));
data.insert(Type::Unit, TypeTableEntry::new(0, 1));
Self { data }
}
fn insert(&mut self, t: &Type, entry: TypeTableEntry) -> Result<(), String> {
match self.data.insert(t.clone(), entry) {
Some(_) => Err(format!("Type {} already exists", t.clone())),
None => Ok(()),
}
}
fn assert_valid(&self, t: &Type) -> Result<(), String> {
match t {
// Strip away references to check the underlying type
Type::Reference { ty,.. } => Ok(self.assert_valid(ty)?),
// Check all contained types
Type::Tuple(types) => {
// TODO: All types can be checked (rather than stopping at first error)
// Just store all errors, then build an error string
for ty in types {
let result = self.assert_valid(ty);
if result.is_err() {
return result;
}
}
Ok(())
}
// Base types
_ => {
if self.data.contains_key(t) {
Ok(())
} else {
Err(format!("Type `{}` is not valid", t))
}
}
}
}
/// Returns alignment of the type in bytes
fn alignment_of(&self, t: &Type) -> usize {
match t {
// TODO: Alignment should be same as pointer type
Type::Reference { ty,.. } => todo!("need pointer type stuff"),
// TODO: Tuples should align same as structs
Type::Tuple(types) => todo!("tuple alignment"),
_ => self.data.get(t).expect("alignment_of").alignment,
}
}
/// Returns the size of the type in bytes
pub fn size_of(&self, t: &Type) -> usize {
self.data.get(t).unwrap().size
}
}
///////////////////// SCOPES + VARIABLES /////////////////////
#[derive(Debug)]
pub enum MemoryUsage {
/// The variable is new -> requires allocation
/// e.g.: `let x: u32 = 7;`
StackSlot,
/// The variable is a struct being returned
/// e.g.: `return Type {...};`
StructReturn,
/// Aliases an existing variable -> use its allocation
/// e.g.: `let x: u32 = y;`
Alias(String),
/// The variable is allocated elsewhere before being passed as a param
/// e.g.: `function(12, x);`
FunctionParam,
| // e.g.: `let x: u32 = y.a;`
// FieldAlias(),
}
pub struct AllocationTable {
// Map of ((function_name, variable name) -> variable's usage)
pub allocations: HashMap<(String, String), MemoryUsage>,
}
impl AllocationTable {
pub fn new() -> Self {
Self {
allocations: HashMap::new(),
}
}
pub fn insert(&mut self, function: String, variable: String, usage: MemoryUsage) -> Result<(), String> {
if let Some(_existing) = self.allocations.insert((function.clone(), variable.clone()), usage) {
return Err(format!("Variable {} is already defined in function {}", variable, function));
}
Ok(())
}
pub fn get_usage(&mut self, function: &str, variable: &str) -> &MemoryUsage {
// NOTE: This should always be valid
self.allocations.get(&(function.to_owned(), variable.to_owned())).expect("get_usage")
}
}
struct VariableData {
/// Type of the variable
pub ty: Type,
/// What allocation this variable needs
pub memory_usage: MemoryUsage,
/// Is the variable mutable
pub mutable: bool,
}
impl VariableData {
fn new(ty: Type, memory_usage: MemoryUsage, mutable: bool) -> Self {
Self { ty, memory_usage, mutable }
}
}
struct Scope {
/// **This scope's** map of (variable name -> data)
variables: HashMap<String, VariableData>,
}
impl Scope {
fn new() -> Self {
Self {
variables: HashMap::new(),
}
}
fn get_var_data(&self, var: &str) -> &VariableData {
// NOTE: This operation should always succeed
self.variables.get(var).expect("get_var_data")
}
fn get_var_data_mut(&mut self, var: &str) -> &mut VariableData {
// NOTE: This operation should always succeed
self.variables.get_mut(var).expect("get_var_data_mut")
}
fn insert_var_data(&mut self, name: String, var: VariableData) {
// NOTE: This operation should never overwrite existing
self.variables.insert(name, var);
}
}
/// Uses alias analysis to determine stack slot allocations and struct return slot usage
struct Scopes {
/// Each element represents a subsequently nested scope
scopes: Vec<Scope>,
/// Map of (variable name -> its scope)
all_variables: HashMap<String, usize>,
num_scopes: usize,
}
impl Scopes {
fn new() -> Self {
Self {
scopes: Vec::new(),
all_variables: HashMap::new(),
num_scopes: 0,
}
}
fn push_scope(&mut self) {
self.scopes.push(Scope::new());
self.num_scopes += 1;
}
fn pop_scope(&mut self) -> Scope {
// NOTE: These operations should always succeed
let removed_scope = self.scopes.pop().expect("pop_scope");
for key in removed_scope.variables.keys() {
self.all_variables.remove(key);
}
self.num_scopes -= 1;
removed_scope
}
fn current_index(&self) -> usize {
self.num_scopes - 1
}
fn current_scope(&mut self) -> &mut Scope {
let i = self.current_index();
&mut self.scopes[i]
}
// TODO: Field aliasing
// TODO: Handle shadowing
fn add_var_to_scope(&mut self, name: String, mutable: bool, ty: Type, memory_usage: MemoryUsage) -> Result<(), String> {
// if name exists already
if let Some(scope_index) = self.all_variables.insert(name.clone(), self.current_index()) {
// Name exists in the current scope
if scope_index == self.current_index() {
return Err(format!("Variable `{}` is already defined in this scope", name));
} else {
// TODO: This
todo!("Nested scope shadowing")
}
}
self.current_scope().insert_var_data(name, VariableData::new(ty, memory_usage, mutable));
Ok(())
}
// TODO: Handle shadowing
fn get_variable(&self, name: &str) -> Result<&VariableData, String> {
if let Some(&index) = self.all_variables.get(name) {
return Ok(self.scopes[index].get_var_data(name));
}
Err(format!("No variable `{}` in scope", name))
}
fn get_variable_mut(&mut self, name: &str) -> Result<&mut VariableData, String> {
if let Some(&index) = self.all_variables.get(name) {
return Ok(self.scopes[index].get_var_data_mut(name));
}
Err(format!("No variable `{}` in scope", name))
}
// NOTE: Program is valid at this point. No safety checks needed
/// Uses aliases to convert the return variable's generic allocation to struct-return allocation
/// Target variable is always in the current scope.
fn signal_return_variable(&mut self, mut target: String) {
let mut current;
// Traverse the alias graph to find the true variable being returned.
loop {
current = self.current_scope().get_var_data_mut(&target);
match ¤t.memory_usage {
// keep looking for root
MemoryUsage::Alias(next) => target = next.clone(),
// TODO: I don't know if this is correct
// returning what was input -> use it instead of an allocation
MemoryUsage::FunctionParam => {
current.memory_usage = MemoryUsage::Alias(target);
break;
}
// Found the root
MemoryUsage::StackSlot
| MemoryUsage::StructReturn => {
current.memory_usage = MemoryUsage::StructReturn;
break;
}
}
}
}
}
///////////////////// FUNCTIONS /////////////////////
pub struct FunctionDefinition {
/// Function parameters (field_name, field_type, mutable) in order
pub parameters: Vec<(String, Type, bool)>,
pub return_type: Type,
pub is_extern: bool,
pub is_validated: bool,
}
pub struct FunctionTable {
// Map of (name -> data)
pub functions: HashMap<String, FunctionDefinition>
}
impl FunctionTable {
fn new() -> Self {
Self {
functions: HashMap::new(),
}
}
// FIXME: A few copies and clones, but nothing bad
fn forward_declare_function(&mut self, validated_prototype: &ast::FunctionPrototype, is_extern: bool) -> Result<(), String> {
if self.functions.contains_key(&validated_prototype.name) {
return Err(format!("Function `{}` already exists", validated_prototype.name));
}
let parameters = validated_prototype.parameters.iter().map(|param| {
(param.name.clone(), param.ty.clone(), param.mutable)
}).collect();
let definition = FunctionDefinition {
parameters,
return_type: validated_prototype.return_type.clone(),
is_extern,
is_validated: false,
};
self.functions.insert(validated_prototype.name.clone(), definition);
Ok(())
}
fn __get_mut(&mut self, name: &str) -> Result<&mut FunctionDefinition, String> {
self.functions.get_mut(name)
.ok_or(format!("Could not find function `{}`", name))
}
fn __get(&self, name: &str) -> Result<&FunctionDefinition, String> {
self.functions.get(name)
.ok_or(format!("Could not find function `{}`", name))
}
// TODO: This and `get_validated_function_definition` may not ever be used
// (this functionality exists in finalized JIT product)
fn mark_function_validated(&mut self, name: &str) -> Result<(), String> {
self.__get_mut(name)?
.is_validated = true;
Ok(())
}
// TODO: Will this ever be used?
// fn get_validated_function_definition(&mut self, name: &str) -> Result<&FunctionDefinition<'input>, String> {
// let function = self.__get(name)?;
// if!function.is_validated {
// // FIXME: This should not be possible
// Err(format!("Function `{}` was not validated", name))
// } else {
// Ok(function)
// }
// }
/// Returns a `FunctionDefinition` that is not guarenteed to have been
/// successfully validated
fn get_unchecked_function_definition(&mut self, name: &str) -> Result<&FunctionDefinition, String> {
self.__get(name)
}
} | // TODO: References an existing variable -> ??
// e.g.: `let x: &u32 = &y;`
// Borrow(&'input str),
// TODO: Aliases a field of an existing variable -> ?? | random_line_split |
mod.rs | pub mod context;
pub mod types;
///////////////////// Validation Helpers /////////////////////
use std::collections::HashMap;
use crate::frontend::validate::types::Type;
use crate::frontend::parse::ast;
///////////////////// TYPES /////////////////////
// NOTE: Offsets are i32 for Cranelift
/// Stores struct definitions
struct StructDefinition {
/// Map of field_name -> (type, byte offset)
fields: HashMap<String, StructField>,
}
pub struct StructField {
pub ty: Type,
pub offset: i32,
pub is_public: bool,
}
pub struct TypeTableEntry {
/// Size of type in bytes
pub size: usize,
/// Alignment of type in bytes
alignment: usize,
// TODO: Store fields and their offsets here too
// field_offets: HashMap<?>,
}
impl TypeTableEntry {
fn new(size: usize, alignment: usize) -> Self {
Self { size, alignment }
}
}
/// Stores type sizes and alignments
pub struct TypeTable {
/// Map of field_name -> (size, alignment) in bytes
data: HashMap<Type, TypeTableEntry>
}
impl TypeTable {
// TODO: Accept word size here and adjust table accordingly
// TODO: Support `isize` and `usize`
fn new() -> Self {
let mut data = HashMap::new();
// FIXME: This could be looked up via `match`, but this is more consistent
// FIXME: Only 64-bit architectures are supported by the below values
data.insert(Type::u8, TypeTableEntry::new(1, 1));
data.insert(Type::u16, TypeTableEntry::new(2, 2));
data.insert(Type::u32, TypeTableEntry::new(4, 4));
data.insert(Type::u64, TypeTableEntry::new(8, 8));
data.insert(Type::u128, TypeTableEntry::new(16, 8));
data.insert(Type::i8, TypeTableEntry::new(1, 1));
data.insert(Type::i16, TypeTableEntry::new(2, 2));
data.insert(Type::i32, TypeTableEntry::new(4, 4));
data.insert(Type::i64, TypeTableEntry::new(8, 8));
data.insert(Type::i128, TypeTableEntry::new(16, 8));
data.insert(Type::f32, TypeTableEntry::new(4, 4));
data.insert(Type::f64, TypeTableEntry::new(8, 8));
data.insert(Type::bool, TypeTableEntry::new(1, 1));
data.insert(Type::Unit, TypeTableEntry::new(0, 1));
Self { data }
}
fn insert(&mut self, t: &Type, entry: TypeTableEntry) -> Result<(), String> {
match self.data.insert(t.clone(), entry) {
Some(_) => Err(format!("Type {} already exists", t.clone())),
None => Ok(()),
}
}
fn assert_valid(&self, t: &Type) -> Result<(), String> {
match t {
// Strip away references to check the underlying type
Type::Reference { ty,.. } => Ok(self.assert_valid(ty)?),
// Check all contained types
Type::Tuple(types) => {
// TODO: All types can be checked (rather than stopping at first error)
// Just store all errors, then build an error string
for ty in types {
let result = self.assert_valid(ty);
if result.is_err() {
return result;
}
}
Ok(())
}
// Base types
_ => {
if self.data.contains_key(t) {
Ok(())
} else {
Err(format!("Type `{}` is not valid", t))
}
}
}
}
/// Returns alignment of the type in bytes
fn alignment_of(&self, t: &Type) -> usize {
match t {
// TODO: Alignment should be same as pointer type
Type::Reference { ty,.. } => todo!("need pointer type stuff"),
// TODO: Tuples should align same as structs
Type::Tuple(types) => todo!("tuple alignment"),
_ => self.data.get(t).expect("alignment_of").alignment,
}
}
/// Returns the size of the type in bytes
pub fn size_of(&self, t: &Type) -> usize {
self.data.get(t).unwrap().size
}
}
///////////////////// SCOPES + VARIABLES /////////////////////
#[derive(Debug)]
pub enum MemoryUsage {
/// The variable is new -> requires allocation
/// e.g.: `let x: u32 = 7;`
StackSlot,
/// The variable is a struct being returned
/// e.g.: `return Type {...};`
StructReturn,
/// Aliases an existing variable -> use its allocation
/// e.g.: `let x: u32 = y;`
Alias(String),
/// The variable is allocated elsewhere before being passed as a param
/// e.g.: `function(12, x);`
FunctionParam,
// TODO: References an existing variable ->??
// e.g.: `let x: &u32 = &y;`
// Borrow(&'input str),
// TODO: Aliases a field of an existing variable ->??
// e.g.: `let x: u32 = y.a;`
// FieldAlias(),
}
pub struct AllocationTable {
// Map of ((function_name, variable name) -> variable's usage)
pub allocations: HashMap<(String, String), MemoryUsage>,
}
impl AllocationTable {
pub fn new() -> Self {
Self {
allocations: HashMap::new(),
}
}
pub fn insert(&mut self, function: String, variable: String, usage: MemoryUsage) -> Result<(), String> {
if let Some(_existing) = self.allocations.insert((function.clone(), variable.clone()), usage) {
return Err(format!("Variable {} is already defined in function {}", variable, function));
}
Ok(())
}
pub fn get_usage(&mut self, function: &str, variable: &str) -> &MemoryUsage {
// NOTE: This should always be valid
self.allocations.get(&(function.to_owned(), variable.to_owned())).expect("get_usage")
}
}
struct VariableData {
/// Type of the variable
pub ty: Type,
/// What allocation this variable needs
pub memory_usage: MemoryUsage,
/// Is the variable mutable
pub mutable: bool,
}
impl VariableData {
fn new(ty: Type, memory_usage: MemoryUsage, mutable: bool) -> Self {
Self { ty, memory_usage, mutable }
}
}
struct Scope {
/// **This scope's** map of (variable name -> data)
variables: HashMap<String, VariableData>,
}
impl Scope {
fn new() -> Self {
Self {
variables: HashMap::new(),
}
}
fn get_var_data(&self, var: &str) -> &VariableData {
// NOTE: This operation should always succeed
self.variables.get(var).expect("get_var_data")
}
fn get_var_data_mut(&mut self, var: &str) -> &mut VariableData {
// NOTE: This operation should always succeed
self.variables.get_mut(var).expect("get_var_data_mut")
}
fn insert_var_data(&mut self, name: String, var: VariableData) {
// NOTE: This operation should never overwrite existing
self.variables.insert(name, var);
}
}
/// Uses alias analysis to determine stack slot allocations and struct return slot usage
struct Scopes {
/// Each element represents a subsequently nested scope
scopes: Vec<Scope>,
/// Map of (variable name -> its scope)
all_variables: HashMap<String, usize>,
num_scopes: usize,
}
impl Scopes {
fn new() -> Self {
Self {
scopes: Vec::new(),
all_variables: HashMap::new(),
num_scopes: 0,
}
}
fn push_scope(&mut self) {
self.scopes.push(Scope::new());
self.num_scopes += 1;
}
fn pop_scope(&mut self) -> Scope {
// NOTE: These operations should always succeed
let removed_scope = self.scopes.pop().expect("pop_scope");
for key in removed_scope.variables.keys() {
self.all_variables.remove(key);
}
self.num_scopes -= 1;
removed_scope
}
fn current_index(&self) -> usize {
self.num_scopes - 1
}
fn current_scope(&mut self) -> &mut Scope {
let i = self.current_index();
&mut self.scopes[i]
}
// TODO: Field aliasing
// TODO: Handle shadowing
fn add_var_to_scope(&mut self, name: String, mutable: bool, ty: Type, memory_usage: MemoryUsage) -> Result<(), String> {
// if name exists already
if let Some(scope_index) = self.all_variables.insert(name.clone(), self.current_index()) {
// Name exists in the current scope
if scope_index == self.current_index() {
return Err(format!("Variable `{}` is already defined in this scope", name));
} else {
// TODO: This
todo!("Nested scope shadowing")
}
}
self.current_scope().insert_var_data(name, VariableData::new(ty, memory_usage, mutable));
Ok(())
}
// TODO: Handle shadowing
fn get_variable(&self, name: &str) -> Result<&VariableData, String> {
if let Some(&index) = self.all_variables.get(name) {
return Ok(self.scopes[index].get_var_data(name));
}
Err(format!("No variable `{}` in scope", name))
}
fn | (&mut self, name: &str) -> Result<&mut VariableData, String> {
if let Some(&index) = self.all_variables.get(name) {
return Ok(self.scopes[index].get_var_data_mut(name));
}
Err(format!("No variable `{}` in scope", name))
}
// NOTE: Program is valid at this point. No safety checks needed
/// Uses aliases to convert the return variable's generic allocation to struct-return allocation
/// Target variable is always in the current scope.
fn signal_return_variable(&mut self, mut target: String) {
let mut current;
// Traverse the alias graph to find the true variable being returned.
loop {
current = self.current_scope().get_var_data_mut(&target);
match ¤t.memory_usage {
// keep looking for root
MemoryUsage::Alias(next) => target = next.clone(),
// TODO: I don't know if this is correct
// returning what was input -> use it instead of an allocation
MemoryUsage::FunctionParam => {
current.memory_usage = MemoryUsage::Alias(target);
break;
}
// Found the root
MemoryUsage::StackSlot
| MemoryUsage::StructReturn => {
current.memory_usage = MemoryUsage::StructReturn;
break;
}
}
}
}
}
///////////////////// FUNCTIONS /////////////////////
pub struct FunctionDefinition {
/// Function parameters (field_name, field_type, mutable) in order
pub parameters: Vec<(String, Type, bool)>,
pub return_type: Type,
pub is_extern: bool,
pub is_validated: bool,
}
pub struct FunctionTable {
// Map of (name -> data)
pub functions: HashMap<String, FunctionDefinition>
}
impl FunctionTable {
fn new() -> Self {
Self {
functions: HashMap::new(),
}
}
// FIXME: A few copies and clones, but nothing bad
fn forward_declare_function(&mut self, validated_prototype: &ast::FunctionPrototype, is_extern: bool) -> Result<(), String> {
if self.functions.contains_key(&validated_prototype.name) {
return Err(format!("Function `{}` already exists", validated_prototype.name));
}
let parameters = validated_prototype.parameters.iter().map(|param| {
(param.name.clone(), param.ty.clone(), param.mutable)
}).collect();
let definition = FunctionDefinition {
parameters,
return_type: validated_prototype.return_type.clone(),
is_extern,
is_validated: false,
};
self.functions.insert(validated_prototype.name.clone(), definition);
Ok(())
}
fn __get_mut(&mut self, name: &str) -> Result<&mut FunctionDefinition, String> {
self.functions.get_mut(name)
.ok_or(format!("Could not find function `{}`", name))
}
fn __get(&self, name: &str) -> Result<&FunctionDefinition, String> {
self.functions.get(name)
.ok_or(format!("Could not find function `{}`", name))
}
// TODO: This and `get_validated_function_definition` may not ever be used
// (this functionality exists in finalized JIT product)
fn mark_function_validated(&mut self, name: &str) -> Result<(), String> {
self.__get_mut(name)?
.is_validated = true;
Ok(())
}
// TODO: Will this ever be used?
// fn get_validated_function_definition(&mut self, name: &str) -> Result<&FunctionDefinition<'input>, String> {
// let function = self.__get(name)?;
// if!function.is_validated {
// // FIXME: This should not be possible
// Err(format!("Function `{}` was not validated", name))
// } else {
// Ok(function)
// }
// }
/// Returns a `FunctionDefinition` that is not guarenteed to have been
/// successfully validated
fn get_unchecked_function_definition(&mut self, name: &str) -> Result<&FunctionDefinition, String> {
self.__get(name)
}
} | get_variable_mut | identifier_name |
mod.rs | pub mod context;
pub mod types;
///////////////////// Validation Helpers /////////////////////
use std::collections::HashMap;
use crate::frontend::validate::types::Type;
use crate::frontend::parse::ast;
///////////////////// TYPES /////////////////////
// NOTE: Offsets are i32 for Cranelift
/// Stores struct definitions
struct StructDefinition {
/// Map of field_name -> (type, byte offset)
fields: HashMap<String, StructField>,
}
pub struct StructField {
pub ty: Type,
pub offset: i32,
pub is_public: bool,
}
pub struct TypeTableEntry {
/// Size of type in bytes
pub size: usize,
/// Alignment of type in bytes
alignment: usize,
// TODO: Store fields and their offsets here too
// field_offets: HashMap<?>,
}
impl TypeTableEntry {
fn new(size: usize, alignment: usize) -> Self {
Self { size, alignment }
}
}
/// Stores type sizes and alignments
pub struct TypeTable {
/// Map of field_name -> (size, alignment) in bytes
data: HashMap<Type, TypeTableEntry>
}
impl TypeTable {
// TODO: Accept word size here and adjust table accordingly
// TODO: Support `isize` and `usize`
fn new() -> Self {
let mut data = HashMap::new();
// FIXME: This could be looked up via `match`, but this is more consistent
// FIXME: Only 64-bit architectures are supported by the below values
data.insert(Type::u8, TypeTableEntry::new(1, 1));
data.insert(Type::u16, TypeTableEntry::new(2, 2));
data.insert(Type::u32, TypeTableEntry::new(4, 4));
data.insert(Type::u64, TypeTableEntry::new(8, 8));
data.insert(Type::u128, TypeTableEntry::new(16, 8));
data.insert(Type::i8, TypeTableEntry::new(1, 1));
data.insert(Type::i16, TypeTableEntry::new(2, 2));
data.insert(Type::i32, TypeTableEntry::new(4, 4));
data.insert(Type::i64, TypeTableEntry::new(8, 8));
data.insert(Type::i128, TypeTableEntry::new(16, 8));
data.insert(Type::f32, TypeTableEntry::new(4, 4));
data.insert(Type::f64, TypeTableEntry::new(8, 8));
data.insert(Type::bool, TypeTableEntry::new(1, 1));
data.insert(Type::Unit, TypeTableEntry::new(0, 1));
Self { data }
}
fn insert(&mut self, t: &Type, entry: TypeTableEntry) -> Result<(), String> {
match self.data.insert(t.clone(), entry) {
Some(_) => Err(format!("Type {} already exists", t.clone())),
None => Ok(()),
}
}
fn assert_valid(&self, t: &Type) -> Result<(), String> {
match t {
// Strip away references to check the underlying type
Type::Reference { ty,.. } => Ok(self.assert_valid(ty)?),
// Check all contained types
Type::Tuple(types) => {
// TODO: All types can be checked (rather than stopping at first error)
// Just store all errors, then build an error string
for ty in types {
let result = self.assert_valid(ty);
if result.is_err() {
return result;
}
}
Ok(())
}
// Base types
_ => {
if self.data.contains_key(t) | else {
Err(format!("Type `{}` is not valid", t))
}
}
}
}
/// Returns alignment of the type in bytes
fn alignment_of(&self, t: &Type) -> usize {
match t {
// TODO: Alignment should be same as pointer type
Type::Reference { ty,.. } => todo!("need pointer type stuff"),
// TODO: Tuples should align same as structs
Type::Tuple(types) => todo!("tuple alignment"),
_ => self.data.get(t).expect("alignment_of").alignment,
}
}
/// Returns the size of the type in bytes
pub fn size_of(&self, t: &Type) -> usize {
self.data.get(t).unwrap().size
}
}
///////////////////// SCOPES + VARIABLES /////////////////////
#[derive(Debug)]
pub enum MemoryUsage {
/// The variable is new -> requires allocation
/// e.g.: `let x: u32 = 7;`
StackSlot,
/// The variable is a struct being returned
/// e.g.: `return Type {...};`
StructReturn,
/// Aliases an existing variable -> use its allocation
/// e.g.: `let x: u32 = y;`
Alias(String),
/// The variable is allocated elsewhere before being passed as a param
/// e.g.: `function(12, x);`
FunctionParam,
// TODO: References an existing variable ->??
// e.g.: `let x: &u32 = &y;`
// Borrow(&'input str),
// TODO: Aliases a field of an existing variable ->??
// e.g.: `let x: u32 = y.a;`
// FieldAlias(),
}
pub struct AllocationTable {
// Map of ((function_name, variable name) -> variable's usage)
pub allocations: HashMap<(String, String), MemoryUsage>,
}
impl AllocationTable {
pub fn new() -> Self {
Self {
allocations: HashMap::new(),
}
}
pub fn insert(&mut self, function: String, variable: String, usage: MemoryUsage) -> Result<(), String> {
if let Some(_existing) = self.allocations.insert((function.clone(), variable.clone()), usage) {
return Err(format!("Variable {} is already defined in function {}", variable, function));
}
Ok(())
}
pub fn get_usage(&mut self, function: &str, variable: &str) -> &MemoryUsage {
// NOTE: This should always be valid
self.allocations.get(&(function.to_owned(), variable.to_owned())).expect("get_usage")
}
}
struct VariableData {
/// Type of the variable
pub ty: Type,
/// What allocation this variable needs
pub memory_usage: MemoryUsage,
/// Is the variable mutable
pub mutable: bool,
}
impl VariableData {
fn new(ty: Type, memory_usage: MemoryUsage, mutable: bool) -> Self {
Self { ty, memory_usage, mutable }
}
}
struct Scope {
/// **This scope's** map of (variable name -> data)
variables: HashMap<String, VariableData>,
}
impl Scope {
fn new() -> Self {
Self {
variables: HashMap::new(),
}
}
fn get_var_data(&self, var: &str) -> &VariableData {
// NOTE: This operation should always succeed
self.variables.get(var).expect("get_var_data")
}
fn get_var_data_mut(&mut self, var: &str) -> &mut VariableData {
// NOTE: This operation should always succeed
self.variables.get_mut(var).expect("get_var_data_mut")
}
fn insert_var_data(&mut self, name: String, var: VariableData) {
// NOTE: This operation should never overwrite existing
self.variables.insert(name, var);
}
}
/// Uses alias analysis to determine stack slot allocations and struct return slot usage
struct Scopes {
/// Each element represents a subsequently nested scope
scopes: Vec<Scope>,
/// Map of (variable name -> its scope)
all_variables: HashMap<String, usize>,
num_scopes: usize,
}
impl Scopes {
fn new() -> Self {
Self {
scopes: Vec::new(),
all_variables: HashMap::new(),
num_scopes: 0,
}
}
fn push_scope(&mut self) {
self.scopes.push(Scope::new());
self.num_scopes += 1;
}
fn pop_scope(&mut self) -> Scope {
// NOTE: These operations should always succeed
let removed_scope = self.scopes.pop().expect("pop_scope");
for key in removed_scope.variables.keys() {
self.all_variables.remove(key);
}
self.num_scopes -= 1;
removed_scope
}
fn current_index(&self) -> usize {
self.num_scopes - 1
}
fn current_scope(&mut self) -> &mut Scope {
let i = self.current_index();
&mut self.scopes[i]
}
// TODO: Field aliasing
// TODO: Handle shadowing
fn add_var_to_scope(&mut self, name: String, mutable: bool, ty: Type, memory_usage: MemoryUsage) -> Result<(), String> {
// if name exists already
if let Some(scope_index) = self.all_variables.insert(name.clone(), self.current_index()) {
// Name exists in the current scope
if scope_index == self.current_index() {
return Err(format!("Variable `{}` is already defined in this scope", name));
} else {
// TODO: This
todo!("Nested scope shadowing")
}
}
self.current_scope().insert_var_data(name, VariableData::new(ty, memory_usage, mutable));
Ok(())
}
// TODO: Handle shadowing
fn get_variable(&self, name: &str) -> Result<&VariableData, String> {
if let Some(&index) = self.all_variables.get(name) {
return Ok(self.scopes[index].get_var_data(name));
}
Err(format!("No variable `{}` in scope", name))
}
fn get_variable_mut(&mut self, name: &str) -> Result<&mut VariableData, String> {
if let Some(&index) = self.all_variables.get(name) {
return Ok(self.scopes[index].get_var_data_mut(name));
}
Err(format!("No variable `{}` in scope", name))
}
// NOTE: Program is valid at this point. No safety checks needed
/// Uses aliases to convert the return variable's generic allocation to struct-return allocation
/// Target variable is always in the current scope.
fn signal_return_variable(&mut self, mut target: String) {
let mut current;
// Traverse the alias graph to find the true variable being returned.
loop {
current = self.current_scope().get_var_data_mut(&target);
match ¤t.memory_usage {
// keep looking for root
MemoryUsage::Alias(next) => target = next.clone(),
// TODO: I don't know if this is correct
// returning what was input -> use it instead of an allocation
MemoryUsage::FunctionParam => {
current.memory_usage = MemoryUsage::Alias(target);
break;
}
// Found the root
MemoryUsage::StackSlot
| MemoryUsage::StructReturn => {
current.memory_usage = MemoryUsage::StructReturn;
break;
}
}
}
}
}
///////////////////// FUNCTIONS /////////////////////
pub struct FunctionDefinition {
/// Function parameters (field_name, field_type, mutable) in order
pub parameters: Vec<(String, Type, bool)>,
pub return_type: Type,
pub is_extern: bool,
pub is_validated: bool,
}
pub struct FunctionTable {
// Map of (name -> data)
pub functions: HashMap<String, FunctionDefinition>
}
impl FunctionTable {
fn new() -> Self {
Self {
functions: HashMap::new(),
}
}
// FIXME: A few copies and clones, but nothing bad
fn forward_declare_function(&mut self, validated_prototype: &ast::FunctionPrototype, is_extern: bool) -> Result<(), String> {
if self.functions.contains_key(&validated_prototype.name) {
return Err(format!("Function `{}` already exists", validated_prototype.name));
}
let parameters = validated_prototype.parameters.iter().map(|param| {
(param.name.clone(), param.ty.clone(), param.mutable)
}).collect();
let definition = FunctionDefinition {
parameters,
return_type: validated_prototype.return_type.clone(),
is_extern,
is_validated: false,
};
self.functions.insert(validated_prototype.name.clone(), definition);
Ok(())
}
fn __get_mut(&mut self, name: &str) -> Result<&mut FunctionDefinition, String> {
self.functions.get_mut(name)
.ok_or(format!("Could not find function `{}`", name))
}
fn __get(&self, name: &str) -> Result<&FunctionDefinition, String> {
self.functions.get(name)
.ok_or(format!("Could not find function `{}`", name))
}
// TODO: This and `get_validated_function_definition` may not ever be used
// (this functionality exists in finalized JIT product)
fn mark_function_validated(&mut self, name: &str) -> Result<(), String> {
self.__get_mut(name)?
.is_validated = true;
Ok(())
}
// TODO: Will this ever be used?
// fn get_validated_function_definition(&mut self, name: &str) -> Result<&FunctionDefinition<'input>, String> {
// let function = self.__get(name)?;
// if!function.is_validated {
// // FIXME: This should not be possible
// Err(format!("Function `{}` was not validated", name))
// } else {
// Ok(function)
// }
// }
/// Returns a `FunctionDefinition` that is not guarenteed to have been
/// successfully validated
fn get_unchecked_function_definition(&mut self, name: &str) -> Result<&FunctionDefinition, String> {
self.__get(name)
}
} | {
Ok(())
} | conditional_block |
value.rs | // Copyright 2019 The Starlark in Rust Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Define the set type of Starlark
use crate::values::*;
use linked_hash_set::LinkedHashSet;
use std::borrow::BorrowMut;
use std::cmp::{Eq, Ordering, PartialEq};
use std::hash::{Hash, Hasher};
use std::num::Wrapping;
pub struct Set {
mutability: IterableMutability,
content: LinkedHashSet<ValueWrapper>,
}
impl Default for Set {
fn default() -> Self {
Set {
mutability: IterableMutability::Mutable,
content: LinkedHashSet::new(),
}
}
}
impl Set {
pub fn empty() -> Value {
Value::new(Set::default())
}
pub fn from<V: Into<Value>>(values: Vec<V>) -> Result<Value, ValueError> {
let mut result = Self::default();
for v in values.into_iter() {
result
.content
.insert_if_absent(ValueWrapper::new(v.into())?);
}
Ok(Value::new(result))
}
pub fn insert_if_absent(set: &Value, v: Value) -> Result<Value, ValueError> {
let v = v.clone_for_container_value(set)?;
Self::mutate(set, &|hashset| {
hashset.insert_if_absent(ValueWrapper::new(v.clone())?);
Ok(Value::from(None))
})
}
pub fn mutate(
v: &Value,
f: &Fn(&mut LinkedHashSet<ValueWrapper>) -> ValueResult,
) -> ValueResult {
if v.get_type()!= "set" {
Err(ValueError::IncorrectParameterType)
} else {
let mut v = v.clone();
v.downcast_apply_mut(|x: &mut Set| -> ValueResult {
x.mutability.test()?;
f(&mut x.content)
})
}
}
pub fn compare<Return>(
v1: &Value,
v2: &Value,
f: &Fn(
&LinkedHashSet<ValueWrapper>,
&LinkedHashSet<ValueWrapper>,
) -> Result<Return, ValueError>,
) -> Result<Return, ValueError> {
if v1.get_type()!= "set" || v2.get_type()!= "set" {
Err(ValueError::IncorrectParameterType)
} else {
v1.downcast_apply(|v1: &Set| v2.downcast_apply(|v2: &Set| f(&v1.content, &v2.content)))
}
}
}
impl TypedValue for Set {
any!();
define_iterable_mutability!(mutability);
fn freeze(&mut self) {
self.mutability.freeze();
let mut new = LinkedHashSet::with_capacity(self.content.len());
while!self.content.is_empty() {
let mut value = self.content.pop_front().unwrap();
value.value.borrow_mut().freeze();
new.insert(value);
}
self.content = new;
}
/// Returns a string representation for the set
///
/// # Examples:
/// ```
/// # use starlark::values::*;
/// # use starlark::values::list::List;
/// assert_eq!("[1, 2, 3]", Value::from(vec![1, 2, 3]).to_str());
/// assert_eq!("[1, [2, 3]]",
/// Value::from(vec![Value::from(1), Value::from(vec![2, 3])]).to_str());
/// assert_eq!("[1]", Value::from(vec![1]).to_str());
/// assert_eq!("[]", Value::from(Vec::<i64>::new()).to_str());
/// ```
fn to_str(&self) -> String {
format!(
"{{{}}}",
self.content
.iter()
.map(|x| x.value.to_repr(),)
.enumerate()
.fold("".to_string(), |accum, s| if s.0 == 0 {
accum + &s.1
} else {
accum + ", " + &s.1
},)
)
}
fn to_repr(&self) -> String {
self.to_str()
}
not_supported!(to_int);
fn get_type(&self) -> &'static str {
"set"
}
fn to_bool(&self) -> bool {
!self.content.is_empty()
}
fn compare(&self, other: &TypedValue, _recursion: u32) -> Result<Ordering, ValueError> {
if other.get_type() == "set" {
let other = other.as_any().downcast_ref::<Self>().unwrap();
if self
.content
.symmetric_difference(&other.content)
.next()
.is_none()
{
return Ok(Ordering::Equal);
}
// Comparing based on hash value isn't particularly meaningful to users, who may expect
// sets to compare based on, say, their size, or comparing their elements.
// We do this because it's guaranteed to provide a consistent ordering for any pair of
// sets. We should consider better defining the sort order of sets if users complain.
let l = self.get_hash().unwrap();
let r = other.get_hash().unwrap();
if l <= r {
Ok(Ordering::Less)
} else {
Ok(Ordering::Greater)
}
} else {
default_compare(self, other)
}
}
fn at(&self, index: Value) -> ValueResult {
let i = index.convert_index(self.length()?)? as usize;
let to_skip = if i == 0 { 0 } else { i - 1 };
Ok(self.content.iter().nth(to_skip).unwrap().value.clone())
}
fn length(&self) -> Result<i64, ValueError> {
Ok(self.content.len() as i64)
}
fn is_in(&self, other: &Value) -> ValueResult {
Ok(Value::new(
self.content.contains(&ValueWrapper::new(other.clone())?),
))
}
fn is_descendant(&self, other: &TypedValue) -> bool {
self.content
.iter()
.any(|x| x.value.same_as(other) || x.value.is_descendant(other))
}
fn slice(
&self,
start: Option<Value>,
stop: Option<Value>,
stride: Option<Value>,
) -> ValueResult {
let (start, stop, stride) =
Value::convert_slice_indices(self.length()?, start, stop, stride)?;
Ok(Value::from(tuple::slice_vector(
start,
stop,
stride,
self.content.iter().map(|v| &v.value),
)))
}
fn iter<'a>(&'a self) -> Result<Box<Iterator<Item = Value> + 'a>, ValueError> {
Ok(Box::new(self.content.iter().map(|x| x.value.clone())))
}
/// Concatenate `other` to the current value.
///
/// `other` has to be a set.
///
/// # Example
///
/// ```rust
/// # use starlark::values::*;
/// # use starlark::values::list::List;
/// # assert!(
/// // {1, 2, 3} + {2, 3, 4} == {1, 2, 3, 4}
/// Value::from(vec![1,2,3]).add(Value::from(vec![2,3])).unwrap()
/// == Value::from(vec![1, 2, 3, 2, 3])
/// # );
/// ```
fn add(&self, other: Value) -> ValueResult {
if other.get_type() == "set" {
let mut result = Set {
mutability: IterableMutability::Mutable,
content: LinkedHashSet::new(),
};
for x in self.content.iter() {
result.content.insert(x.clone());
}
for x in other.iter()? {
result
.content
.insert_if_absent(ValueWrapper::new(x.clone())?);
}
Ok(Value::new(result)) | }
}
fn get_hash(&self) -> Result<u64, ValueError> {
Ok(self
.content
.iter()
.map(|v| v.precomputed_hash)
.map(Wrapping)
.fold(Wrapping(0_u64), |acc, v| acc + v)
.0)
}
not_supported!(mul, set_at);
not_supported!(attr, function);
not_supported!(plus, minus, sub, div, pipe, percent, floor_div);
}
#[derive(Clone)]
pub struct ValueWrapper {
pub value: Value,
// Precompute the hash to verify that the value is hashable. Eagerly error if it's not, so that
// the caller who wants to use the ValueWrapper knows it can't be done.
precomputed_hash: u64,
}
impl ValueWrapper {
pub fn new(value: Value) -> Result<ValueWrapper, ValueError> {
let precomputed_hash = value.get_hash()?;
Ok(ValueWrapper {
value,
precomputed_hash,
})
}
}
impl Into<Value> for &ValueWrapper {
fn into(self) -> Value {
self.clone().value
}
}
impl PartialEq for ValueWrapper {
fn eq(&self, other: &ValueWrapper) -> bool {
self.value.compare(&other.value, 0) == Ok(Ordering::Equal)
}
}
impl Eq for ValueWrapper {}
impl Hash for ValueWrapper {
fn hash<H: Hasher>(&self, h: &mut H) {
h.write_u64(self.precomputed_hash);
}
}
impl Into<Value> for ValueWrapper {
fn into(self) -> Value {
self.value
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_to_str() {
assert_eq!("{1, 2, 3}", Set::from(vec![1, 2, 3]).unwrap().to_str());
assert_eq!(
"{1, {2, 3}}",
Set::from(vec![Value::from(1), Set::from(vec![2, 3]).unwrap()])
.unwrap()
.to_str()
);
assert_eq!("{1}", Set::from(vec![1]).unwrap().to_str());
assert_eq!("{}", Set::from(Vec::<i64>::new()).unwrap().to_str());
}
#[test]
fn equality_ignores_order() {
assert_eq!(
Set::from(vec![1, 2, 3]).unwrap(),
Set::from(vec![3, 2, 1]).unwrap(),
);
}
#[test]
fn test_value_alias() {
let v1 = Set::from(vec![1, 2]).unwrap();
let v2 = v1.clone();
Set::insert_if_absent(&v2, Value::from(3)).unwrap();
assert_eq!(v2.to_str(), "{1, 2, 3}");
assert_eq!(v1.to_str(), "{1, 2, 3}");
}
#[test]
fn test_is_descendant() {
let v1 = Set::from(vec![1, 2, 3]).unwrap();
let v2 = Set::from(vec![Value::new(1), Value::new(2), v1.clone()]).unwrap();
let v3 = Set::from(vec![Value::new(1), Value::new(2), v2.clone()]).unwrap();
assert!(v3.is_descendant_value(&v2));
assert!(v3.is_descendant_value(&v1));
assert!(v3.is_descendant_value(&v3));
assert!(v2.is_descendant_value(&v1));
assert!(v2.is_descendant_value(&v2));
assert!(!v2.is_descendant_value(&v3));
assert!(v1.is_descendant_value(&v1));
assert!(!v1.is_descendant_value(&v2));
assert!(!v1.is_descendant_value(&v3));
}
} | } else {
Err(ValueError::IncorrectParameterType) | random_line_split |
value.rs | // Copyright 2019 The Starlark in Rust Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Define the set type of Starlark
use crate::values::*;
use linked_hash_set::LinkedHashSet;
use std::borrow::BorrowMut;
use std::cmp::{Eq, Ordering, PartialEq};
use std::hash::{Hash, Hasher};
use std::num::Wrapping;
pub struct Set {
mutability: IterableMutability,
content: LinkedHashSet<ValueWrapper>,
}
impl Default for Set {
fn default() -> Self {
Set {
mutability: IterableMutability::Mutable,
content: LinkedHashSet::new(),
}
}
}
impl Set {
pub fn empty() -> Value {
Value::new(Set::default())
}
pub fn from<V: Into<Value>>(values: Vec<V>) -> Result<Value, ValueError> {
let mut result = Self::default();
for v in values.into_iter() {
result
.content
.insert_if_absent(ValueWrapper::new(v.into())?);
}
Ok(Value::new(result))
}
pub fn insert_if_absent(set: &Value, v: Value) -> Result<Value, ValueError> {
let v = v.clone_for_container_value(set)?;
Self::mutate(set, &|hashset| {
hashset.insert_if_absent(ValueWrapper::new(v.clone())?);
Ok(Value::from(None))
})
}
pub fn mutate(
v: &Value,
f: &Fn(&mut LinkedHashSet<ValueWrapper>) -> ValueResult,
) -> ValueResult {
if v.get_type()!= "set" | else {
let mut v = v.clone();
v.downcast_apply_mut(|x: &mut Set| -> ValueResult {
x.mutability.test()?;
f(&mut x.content)
})
}
}
pub fn compare<Return>(
v1: &Value,
v2: &Value,
f: &Fn(
&LinkedHashSet<ValueWrapper>,
&LinkedHashSet<ValueWrapper>,
) -> Result<Return, ValueError>,
) -> Result<Return, ValueError> {
if v1.get_type()!= "set" || v2.get_type()!= "set" {
Err(ValueError::IncorrectParameterType)
} else {
v1.downcast_apply(|v1: &Set| v2.downcast_apply(|v2: &Set| f(&v1.content, &v2.content)))
}
}
}
impl TypedValue for Set {
any!();
define_iterable_mutability!(mutability);
fn freeze(&mut self) {
self.mutability.freeze();
let mut new = LinkedHashSet::with_capacity(self.content.len());
while!self.content.is_empty() {
let mut value = self.content.pop_front().unwrap();
value.value.borrow_mut().freeze();
new.insert(value);
}
self.content = new;
}
/// Returns a string representation for the set
///
/// # Examples:
/// ```
/// # use starlark::values::*;
/// # use starlark::values::list::List;
/// assert_eq!("[1, 2, 3]", Value::from(vec![1, 2, 3]).to_str());
/// assert_eq!("[1, [2, 3]]",
/// Value::from(vec![Value::from(1), Value::from(vec![2, 3])]).to_str());
/// assert_eq!("[1]", Value::from(vec![1]).to_str());
/// assert_eq!("[]", Value::from(Vec::<i64>::new()).to_str());
/// ```
fn to_str(&self) -> String {
format!(
"{{{}}}",
self.content
.iter()
.map(|x| x.value.to_repr(),)
.enumerate()
.fold("".to_string(), |accum, s| if s.0 == 0 {
accum + &s.1
} else {
accum + ", " + &s.1
},)
)
}
fn to_repr(&self) -> String {
self.to_str()
}
not_supported!(to_int);
fn get_type(&self) -> &'static str {
"set"
}
fn to_bool(&self) -> bool {
!self.content.is_empty()
}
fn compare(&self, other: &TypedValue, _recursion: u32) -> Result<Ordering, ValueError> {
if other.get_type() == "set" {
let other = other.as_any().downcast_ref::<Self>().unwrap();
if self
.content
.symmetric_difference(&other.content)
.next()
.is_none()
{
return Ok(Ordering::Equal);
}
// Comparing based on hash value isn't particularly meaningful to users, who may expect
// sets to compare based on, say, their size, or comparing their elements.
// We do this because it's guaranteed to provide a consistent ordering for any pair of
// sets. We should consider better defining the sort order of sets if users complain.
let l = self.get_hash().unwrap();
let r = other.get_hash().unwrap();
if l <= r {
Ok(Ordering::Less)
} else {
Ok(Ordering::Greater)
}
} else {
default_compare(self, other)
}
}
fn at(&self, index: Value) -> ValueResult {
let i = index.convert_index(self.length()?)? as usize;
let to_skip = if i == 0 { 0 } else { i - 1 };
Ok(self.content.iter().nth(to_skip).unwrap().value.clone())
}
fn length(&self) -> Result<i64, ValueError> {
Ok(self.content.len() as i64)
}
fn is_in(&self, other: &Value) -> ValueResult {
Ok(Value::new(
self.content.contains(&ValueWrapper::new(other.clone())?),
))
}
fn is_descendant(&self, other: &TypedValue) -> bool {
self.content
.iter()
.any(|x| x.value.same_as(other) || x.value.is_descendant(other))
}
fn slice(
&self,
start: Option<Value>,
stop: Option<Value>,
stride: Option<Value>,
) -> ValueResult {
let (start, stop, stride) =
Value::convert_slice_indices(self.length()?, start, stop, stride)?;
Ok(Value::from(tuple::slice_vector(
start,
stop,
stride,
self.content.iter().map(|v| &v.value),
)))
}
fn iter<'a>(&'a self) -> Result<Box<Iterator<Item = Value> + 'a>, ValueError> {
Ok(Box::new(self.content.iter().map(|x| x.value.clone())))
}
/// Concatenate `other` to the current value.
///
/// `other` has to be a set.
///
/// # Example
///
/// ```rust
/// # use starlark::values::*;
/// # use starlark::values::list::List;
/// # assert!(
/// // {1, 2, 3} + {2, 3, 4} == {1, 2, 3, 4}
/// Value::from(vec![1,2,3]).add(Value::from(vec![2,3])).unwrap()
/// == Value::from(vec![1, 2, 3, 2, 3])
/// # );
/// ```
fn add(&self, other: Value) -> ValueResult {
if other.get_type() == "set" {
let mut result = Set {
mutability: IterableMutability::Mutable,
content: LinkedHashSet::new(),
};
for x in self.content.iter() {
result.content.insert(x.clone());
}
for x in other.iter()? {
result
.content
.insert_if_absent(ValueWrapper::new(x.clone())?);
}
Ok(Value::new(result))
} else {
Err(ValueError::IncorrectParameterType)
}
}
fn get_hash(&self) -> Result<u64, ValueError> {
Ok(self
.content
.iter()
.map(|v| v.precomputed_hash)
.map(Wrapping)
.fold(Wrapping(0_u64), |acc, v| acc + v)
.0)
}
not_supported!(mul, set_at);
not_supported!(attr, function);
not_supported!(plus, minus, sub, div, pipe, percent, floor_div);
}
#[derive(Clone)]
pub struct ValueWrapper {
pub value: Value,
// Precompute the hash to verify that the value is hashable. Eagerly error if it's not, so that
// the caller who wants to use the ValueWrapper knows it can't be done.
precomputed_hash: u64,
}
impl ValueWrapper {
pub fn new(value: Value) -> Result<ValueWrapper, ValueError> {
let precomputed_hash = value.get_hash()?;
Ok(ValueWrapper {
value,
precomputed_hash,
})
}
}
impl Into<Value> for &ValueWrapper {
fn into(self) -> Value {
self.clone().value
}
}
impl PartialEq for ValueWrapper {
fn eq(&self, other: &ValueWrapper) -> bool {
self.value.compare(&other.value, 0) == Ok(Ordering::Equal)
}
}
impl Eq for ValueWrapper {}
impl Hash for ValueWrapper {
fn hash<H: Hasher>(&self, h: &mut H) {
h.write_u64(self.precomputed_hash);
}
}
impl Into<Value> for ValueWrapper {
fn into(self) -> Value {
self.value
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_to_str() {
assert_eq!("{1, 2, 3}", Set::from(vec![1, 2, 3]).unwrap().to_str());
assert_eq!(
"{1, {2, 3}}",
Set::from(vec![Value::from(1), Set::from(vec![2, 3]).unwrap()])
.unwrap()
.to_str()
);
assert_eq!("{1}", Set::from(vec![1]).unwrap().to_str());
assert_eq!("{}", Set::from(Vec::<i64>::new()).unwrap().to_str());
}
#[test]
fn equality_ignores_order() {
assert_eq!(
Set::from(vec![1, 2, 3]).unwrap(),
Set::from(vec![3, 2, 1]).unwrap(),
);
}
#[test]
fn test_value_alias() {
let v1 = Set::from(vec![1, 2]).unwrap();
let v2 = v1.clone();
Set::insert_if_absent(&v2, Value::from(3)).unwrap();
assert_eq!(v2.to_str(), "{1, 2, 3}");
assert_eq!(v1.to_str(), "{1, 2, 3}");
}
#[test]
fn test_is_descendant() {
let v1 = Set::from(vec![1, 2, 3]).unwrap();
let v2 = Set::from(vec![Value::new(1), Value::new(2), v1.clone()]).unwrap();
let v3 = Set::from(vec![Value::new(1), Value::new(2), v2.clone()]).unwrap();
assert!(v3.is_descendant_value(&v2));
assert!(v3.is_descendant_value(&v1));
assert!(v3.is_descendant_value(&v3));
assert!(v2.is_descendant_value(&v1));
assert!(v2.is_descendant_value(&v2));
assert!(!v2.is_descendant_value(&v3));
assert!(v1.is_descendant_value(&v1));
assert!(!v1.is_descendant_value(&v2));
assert!(!v1.is_descendant_value(&v3));
}
}
| {
Err(ValueError::IncorrectParameterType)
} | conditional_block |
value.rs | // Copyright 2019 The Starlark in Rust Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Define the set type of Starlark
use crate::values::*;
use linked_hash_set::LinkedHashSet;
use std::borrow::BorrowMut;
use std::cmp::{Eq, Ordering, PartialEq};
use std::hash::{Hash, Hasher};
use std::num::Wrapping;
pub struct Set {
mutability: IterableMutability,
content: LinkedHashSet<ValueWrapper>,
}
impl Default for Set {
fn default() -> Self {
Set {
mutability: IterableMutability::Mutable,
content: LinkedHashSet::new(),
}
}
}
impl Set {
pub fn empty() -> Value {
Value::new(Set::default())
}
pub fn from<V: Into<Value>>(values: Vec<V>) -> Result<Value, ValueError> {
let mut result = Self::default();
for v in values.into_iter() {
result
.content
.insert_if_absent(ValueWrapper::new(v.into())?);
}
Ok(Value::new(result))
}
pub fn insert_if_absent(set: &Value, v: Value) -> Result<Value, ValueError> {
let v = v.clone_for_container_value(set)?;
Self::mutate(set, &|hashset| {
hashset.insert_if_absent(ValueWrapper::new(v.clone())?);
Ok(Value::from(None))
})
}
pub fn mutate(
v: &Value,
f: &Fn(&mut LinkedHashSet<ValueWrapper>) -> ValueResult,
) -> ValueResult {
if v.get_type()!= "set" {
Err(ValueError::IncorrectParameterType)
} else {
let mut v = v.clone();
v.downcast_apply_mut(|x: &mut Set| -> ValueResult {
x.mutability.test()?;
f(&mut x.content)
})
}
}
pub fn compare<Return>(
v1: &Value,
v2: &Value,
f: &Fn(
&LinkedHashSet<ValueWrapper>,
&LinkedHashSet<ValueWrapper>,
) -> Result<Return, ValueError>,
) -> Result<Return, ValueError> {
if v1.get_type()!= "set" || v2.get_type()!= "set" {
Err(ValueError::IncorrectParameterType)
} else {
v1.downcast_apply(|v1: &Set| v2.downcast_apply(|v2: &Set| f(&v1.content, &v2.content)))
}
}
}
impl TypedValue for Set {
any!();
define_iterable_mutability!(mutability);
fn freeze(&mut self) {
self.mutability.freeze();
let mut new = LinkedHashSet::with_capacity(self.content.len());
while!self.content.is_empty() {
let mut value = self.content.pop_front().unwrap();
value.value.borrow_mut().freeze();
new.insert(value);
}
self.content = new;
}
/// Returns a string representation for the set
///
/// # Examples:
/// ```
/// # use starlark::values::*;
/// # use starlark::values::list::List;
/// assert_eq!("[1, 2, 3]", Value::from(vec![1, 2, 3]).to_str());
/// assert_eq!("[1, [2, 3]]",
/// Value::from(vec![Value::from(1), Value::from(vec![2, 3])]).to_str());
/// assert_eq!("[1]", Value::from(vec![1]).to_str());
/// assert_eq!("[]", Value::from(Vec::<i64>::new()).to_str());
/// ```
fn to_str(&self) -> String {
format!(
"{{{}}}",
self.content
.iter()
.map(|x| x.value.to_repr(),)
.enumerate()
.fold("".to_string(), |accum, s| if s.0 == 0 {
accum + &s.1
} else {
accum + ", " + &s.1
},)
)
}
fn to_repr(&self) -> String {
self.to_str()
}
not_supported!(to_int);
fn get_type(&self) -> &'static str {
"set"
}
fn to_bool(&self) -> bool {
!self.content.is_empty()
}
fn compare(&self, other: &TypedValue, _recursion: u32) -> Result<Ordering, ValueError> {
if other.get_type() == "set" {
let other = other.as_any().downcast_ref::<Self>().unwrap();
if self
.content
.symmetric_difference(&other.content)
.next()
.is_none()
{
return Ok(Ordering::Equal);
}
// Comparing based on hash value isn't particularly meaningful to users, who may expect
// sets to compare based on, say, their size, or comparing their elements.
// We do this because it's guaranteed to provide a consistent ordering for any pair of
// sets. We should consider better defining the sort order of sets if users complain.
let l = self.get_hash().unwrap();
let r = other.get_hash().unwrap();
if l <= r {
Ok(Ordering::Less)
} else {
Ok(Ordering::Greater)
}
} else {
default_compare(self, other)
}
}
fn at(&self, index: Value) -> ValueResult {
let i = index.convert_index(self.length()?)? as usize;
let to_skip = if i == 0 { 0 } else { i - 1 };
Ok(self.content.iter().nth(to_skip).unwrap().value.clone())
}
fn length(&self) -> Result<i64, ValueError> {
Ok(self.content.len() as i64)
}
fn is_in(&self, other: &Value) -> ValueResult |
fn is_descendant(&self, other: &TypedValue) -> bool {
self.content
.iter()
.any(|x| x.value.same_as(other) || x.value.is_descendant(other))
}
fn slice(
&self,
start: Option<Value>,
stop: Option<Value>,
stride: Option<Value>,
) -> ValueResult {
let (start, stop, stride) =
Value::convert_slice_indices(self.length()?, start, stop, stride)?;
Ok(Value::from(tuple::slice_vector(
start,
stop,
stride,
self.content.iter().map(|v| &v.value),
)))
}
fn iter<'a>(&'a self) -> Result<Box<Iterator<Item = Value> + 'a>, ValueError> {
Ok(Box::new(self.content.iter().map(|x| x.value.clone())))
}
/// Concatenate `other` to the current value.
///
/// `other` has to be a set.
///
/// # Example
///
/// ```rust
/// # use starlark::values::*;
/// # use starlark::values::list::List;
/// # assert!(
/// // {1, 2, 3} + {2, 3, 4} == {1, 2, 3, 4}
/// Value::from(vec![1,2,3]).add(Value::from(vec![2,3])).unwrap()
/// == Value::from(vec![1, 2, 3, 2, 3])
/// # );
/// ```
fn add(&self, other: Value) -> ValueResult {
if other.get_type() == "set" {
let mut result = Set {
mutability: IterableMutability::Mutable,
content: LinkedHashSet::new(),
};
for x in self.content.iter() {
result.content.insert(x.clone());
}
for x in other.iter()? {
result
.content
.insert_if_absent(ValueWrapper::new(x.clone())?);
}
Ok(Value::new(result))
} else {
Err(ValueError::IncorrectParameterType)
}
}
fn get_hash(&self) -> Result<u64, ValueError> {
Ok(self
.content
.iter()
.map(|v| v.precomputed_hash)
.map(Wrapping)
.fold(Wrapping(0_u64), |acc, v| acc + v)
.0)
}
not_supported!(mul, set_at);
not_supported!(attr, function);
not_supported!(plus, minus, sub, div, pipe, percent, floor_div);
}
#[derive(Clone)]
pub struct ValueWrapper {
pub value: Value,
// Precompute the hash to verify that the value is hashable. Eagerly error if it's not, so that
// the caller who wants to use the ValueWrapper knows it can't be done.
precomputed_hash: u64,
}
impl ValueWrapper {
pub fn new(value: Value) -> Result<ValueWrapper, ValueError> {
let precomputed_hash = value.get_hash()?;
Ok(ValueWrapper {
value,
precomputed_hash,
})
}
}
impl Into<Value> for &ValueWrapper {
fn into(self) -> Value {
self.clone().value
}
}
impl PartialEq for ValueWrapper {
fn eq(&self, other: &ValueWrapper) -> bool {
self.value.compare(&other.value, 0) == Ok(Ordering::Equal)
}
}
impl Eq for ValueWrapper {}
impl Hash for ValueWrapper {
fn hash<H: Hasher>(&self, h: &mut H) {
h.write_u64(self.precomputed_hash);
}
}
impl Into<Value> for ValueWrapper {
fn into(self) -> Value {
self.value
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_to_str() {
assert_eq!("{1, 2, 3}", Set::from(vec![1, 2, 3]).unwrap().to_str());
assert_eq!(
"{1, {2, 3}}",
Set::from(vec![Value::from(1), Set::from(vec![2, 3]).unwrap()])
.unwrap()
.to_str()
);
assert_eq!("{1}", Set::from(vec![1]).unwrap().to_str());
assert_eq!("{}", Set::from(Vec::<i64>::new()).unwrap().to_str());
}
#[test]
fn equality_ignores_order() {
assert_eq!(
Set::from(vec![1, 2, 3]).unwrap(),
Set::from(vec![3, 2, 1]).unwrap(),
);
}
#[test]
fn test_value_alias() {
let v1 = Set::from(vec![1, 2]).unwrap();
let v2 = v1.clone();
Set::insert_if_absent(&v2, Value::from(3)).unwrap();
assert_eq!(v2.to_str(), "{1, 2, 3}");
assert_eq!(v1.to_str(), "{1, 2, 3}");
}
#[test]
fn test_is_descendant() {
let v1 = Set::from(vec![1, 2, 3]).unwrap();
let v2 = Set::from(vec![Value::new(1), Value::new(2), v1.clone()]).unwrap();
let v3 = Set::from(vec![Value::new(1), Value::new(2), v2.clone()]).unwrap();
assert!(v3.is_descendant_value(&v2));
assert!(v3.is_descendant_value(&v1));
assert!(v3.is_descendant_value(&v3));
assert!(v2.is_descendant_value(&v1));
assert!(v2.is_descendant_value(&v2));
assert!(!v2.is_descendant_value(&v3));
assert!(v1.is_descendant_value(&v1));
assert!(!v1.is_descendant_value(&v2));
assert!(!v1.is_descendant_value(&v3));
}
}
| {
Ok(Value::new(
self.content.contains(&ValueWrapper::new(other.clone())?),
))
} | identifier_body |
value.rs | // Copyright 2019 The Starlark in Rust Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Define the set type of Starlark
use crate::values::*;
use linked_hash_set::LinkedHashSet;
use std::borrow::BorrowMut;
use std::cmp::{Eq, Ordering, PartialEq};
use std::hash::{Hash, Hasher};
use std::num::Wrapping;
pub struct Set {
mutability: IterableMutability,
content: LinkedHashSet<ValueWrapper>,
}
impl Default for Set {
fn | () -> Self {
Set {
mutability: IterableMutability::Mutable,
content: LinkedHashSet::new(),
}
}
}
impl Set {
pub fn empty() -> Value {
Value::new(Set::default())
}
pub fn from<V: Into<Value>>(values: Vec<V>) -> Result<Value, ValueError> {
let mut result = Self::default();
for v in values.into_iter() {
result
.content
.insert_if_absent(ValueWrapper::new(v.into())?);
}
Ok(Value::new(result))
}
pub fn insert_if_absent(set: &Value, v: Value) -> Result<Value, ValueError> {
let v = v.clone_for_container_value(set)?;
Self::mutate(set, &|hashset| {
hashset.insert_if_absent(ValueWrapper::new(v.clone())?);
Ok(Value::from(None))
})
}
pub fn mutate(
v: &Value,
f: &Fn(&mut LinkedHashSet<ValueWrapper>) -> ValueResult,
) -> ValueResult {
if v.get_type()!= "set" {
Err(ValueError::IncorrectParameterType)
} else {
let mut v = v.clone();
v.downcast_apply_mut(|x: &mut Set| -> ValueResult {
x.mutability.test()?;
f(&mut x.content)
})
}
}
pub fn compare<Return>(
v1: &Value,
v2: &Value,
f: &Fn(
&LinkedHashSet<ValueWrapper>,
&LinkedHashSet<ValueWrapper>,
) -> Result<Return, ValueError>,
) -> Result<Return, ValueError> {
if v1.get_type()!= "set" || v2.get_type()!= "set" {
Err(ValueError::IncorrectParameterType)
} else {
v1.downcast_apply(|v1: &Set| v2.downcast_apply(|v2: &Set| f(&v1.content, &v2.content)))
}
}
}
impl TypedValue for Set {
any!();
define_iterable_mutability!(mutability);
fn freeze(&mut self) {
self.mutability.freeze();
let mut new = LinkedHashSet::with_capacity(self.content.len());
while!self.content.is_empty() {
let mut value = self.content.pop_front().unwrap();
value.value.borrow_mut().freeze();
new.insert(value);
}
self.content = new;
}
/// Returns a string representation for the set
///
/// # Examples:
/// ```
/// # use starlark::values::*;
/// # use starlark::values::list::List;
/// assert_eq!("[1, 2, 3]", Value::from(vec![1, 2, 3]).to_str());
/// assert_eq!("[1, [2, 3]]",
/// Value::from(vec![Value::from(1), Value::from(vec![2, 3])]).to_str());
/// assert_eq!("[1]", Value::from(vec![1]).to_str());
/// assert_eq!("[]", Value::from(Vec::<i64>::new()).to_str());
/// ```
fn to_str(&self) -> String {
format!(
"{{{}}}",
self.content
.iter()
.map(|x| x.value.to_repr(),)
.enumerate()
.fold("".to_string(), |accum, s| if s.0 == 0 {
accum + &s.1
} else {
accum + ", " + &s.1
},)
)
}
fn to_repr(&self) -> String {
self.to_str()
}
not_supported!(to_int);
fn get_type(&self) -> &'static str {
"set"
}
fn to_bool(&self) -> bool {
!self.content.is_empty()
}
fn compare(&self, other: &TypedValue, _recursion: u32) -> Result<Ordering, ValueError> {
if other.get_type() == "set" {
let other = other.as_any().downcast_ref::<Self>().unwrap();
if self
.content
.symmetric_difference(&other.content)
.next()
.is_none()
{
return Ok(Ordering::Equal);
}
// Comparing based on hash value isn't particularly meaningful to users, who may expect
// sets to compare based on, say, their size, or comparing their elements.
// We do this because it's guaranteed to provide a consistent ordering for any pair of
// sets. We should consider better defining the sort order of sets if users complain.
let l = self.get_hash().unwrap();
let r = other.get_hash().unwrap();
if l <= r {
Ok(Ordering::Less)
} else {
Ok(Ordering::Greater)
}
} else {
default_compare(self, other)
}
}
fn at(&self, index: Value) -> ValueResult {
let i = index.convert_index(self.length()?)? as usize;
let to_skip = if i == 0 { 0 } else { i - 1 };
Ok(self.content.iter().nth(to_skip).unwrap().value.clone())
}
fn length(&self) -> Result<i64, ValueError> {
Ok(self.content.len() as i64)
}
fn is_in(&self, other: &Value) -> ValueResult {
Ok(Value::new(
self.content.contains(&ValueWrapper::new(other.clone())?),
))
}
fn is_descendant(&self, other: &TypedValue) -> bool {
self.content
.iter()
.any(|x| x.value.same_as(other) || x.value.is_descendant(other))
}
fn slice(
&self,
start: Option<Value>,
stop: Option<Value>,
stride: Option<Value>,
) -> ValueResult {
let (start, stop, stride) =
Value::convert_slice_indices(self.length()?, start, stop, stride)?;
Ok(Value::from(tuple::slice_vector(
start,
stop,
stride,
self.content.iter().map(|v| &v.value),
)))
}
fn iter<'a>(&'a self) -> Result<Box<Iterator<Item = Value> + 'a>, ValueError> {
Ok(Box::new(self.content.iter().map(|x| x.value.clone())))
}
/// Concatenate `other` to the current value.
///
/// `other` has to be a set.
///
/// # Example
///
/// ```rust
/// # use starlark::values::*;
/// # use starlark::values::list::List;
/// # assert!(
/// // {1, 2, 3} + {2, 3, 4} == {1, 2, 3, 4}
/// Value::from(vec![1,2,3]).add(Value::from(vec![2,3])).unwrap()
/// == Value::from(vec![1, 2, 3, 2, 3])
/// # );
/// ```
fn add(&self, other: Value) -> ValueResult {
if other.get_type() == "set" {
let mut result = Set {
mutability: IterableMutability::Mutable,
content: LinkedHashSet::new(),
};
for x in self.content.iter() {
result.content.insert(x.clone());
}
for x in other.iter()? {
result
.content
.insert_if_absent(ValueWrapper::new(x.clone())?);
}
Ok(Value::new(result))
} else {
Err(ValueError::IncorrectParameterType)
}
}
fn get_hash(&self) -> Result<u64, ValueError> {
Ok(self
.content
.iter()
.map(|v| v.precomputed_hash)
.map(Wrapping)
.fold(Wrapping(0_u64), |acc, v| acc + v)
.0)
}
not_supported!(mul, set_at);
not_supported!(attr, function);
not_supported!(plus, minus, sub, div, pipe, percent, floor_div);
}
#[derive(Clone)]
pub struct ValueWrapper {
pub value: Value,
// Precompute the hash to verify that the value is hashable. Eagerly error if it's not, so that
// the caller who wants to use the ValueWrapper knows it can't be done.
precomputed_hash: u64,
}
impl ValueWrapper {
pub fn new(value: Value) -> Result<ValueWrapper, ValueError> {
let precomputed_hash = value.get_hash()?;
Ok(ValueWrapper {
value,
precomputed_hash,
})
}
}
impl Into<Value> for &ValueWrapper {
fn into(self) -> Value {
self.clone().value
}
}
impl PartialEq for ValueWrapper {
fn eq(&self, other: &ValueWrapper) -> bool {
self.value.compare(&other.value, 0) == Ok(Ordering::Equal)
}
}
impl Eq for ValueWrapper {}
impl Hash for ValueWrapper {
fn hash<H: Hasher>(&self, h: &mut H) {
h.write_u64(self.precomputed_hash);
}
}
impl Into<Value> for ValueWrapper {
fn into(self) -> Value {
self.value
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_to_str() {
assert_eq!("{1, 2, 3}", Set::from(vec![1, 2, 3]).unwrap().to_str());
assert_eq!(
"{1, {2, 3}}",
Set::from(vec![Value::from(1), Set::from(vec![2, 3]).unwrap()])
.unwrap()
.to_str()
);
assert_eq!("{1}", Set::from(vec![1]).unwrap().to_str());
assert_eq!("{}", Set::from(Vec::<i64>::new()).unwrap().to_str());
}
#[test]
fn equality_ignores_order() {
assert_eq!(
Set::from(vec![1, 2, 3]).unwrap(),
Set::from(vec![3, 2, 1]).unwrap(),
);
}
#[test]
fn test_value_alias() {
let v1 = Set::from(vec![1, 2]).unwrap();
let v2 = v1.clone();
Set::insert_if_absent(&v2, Value::from(3)).unwrap();
assert_eq!(v2.to_str(), "{1, 2, 3}");
assert_eq!(v1.to_str(), "{1, 2, 3}");
}
#[test]
fn test_is_descendant() {
let v1 = Set::from(vec![1, 2, 3]).unwrap();
let v2 = Set::from(vec![Value::new(1), Value::new(2), v1.clone()]).unwrap();
let v3 = Set::from(vec![Value::new(1), Value::new(2), v2.clone()]).unwrap();
assert!(v3.is_descendant_value(&v2));
assert!(v3.is_descendant_value(&v1));
assert!(v3.is_descendant_value(&v3));
assert!(v2.is_descendant_value(&v1));
assert!(v2.is_descendant_value(&v2));
assert!(!v2.is_descendant_value(&v3));
assert!(v1.is_descendant_value(&v1));
assert!(!v1.is_descendant_value(&v2));
assert!(!v1.is_descendant_value(&v3));
}
}
| default | identifier_name |
general.rs | extern crate serenity;
use serenity::{framework::standard::{
help_commands,
macros::{ command, group, help},
Args,
CommandGroup, CommandResult, HelpOptions,
}, model::{channel::{Message,ReactionType}, id::UserId }, prelude::*};
use serenity::utils::Colour;
// use serenity::model::application::CurrentApplicationInfo;
use std::collections::HashSet;
#[help]
#[individual_command_tip = "§help [command] Gives info about the command\n"]
#[command_not_found_text = "This command is not valid\n"]
// #[strikethrough_commands_tip_in_guild(None)]
// If a user lacks permissions for a command, we can hide the command
// #[lacking_permissions = "Hide"]
// #[lacking_role = "Nothing"]
async fn help(
context: &Context,
msg: &Message,
args: Args,
help_options: &'static HelpOptions,
groups: &[&'static CommandGroup],
owners: HashSet<UserId>,
) -> CommandResult {
let _ = help_commands::with_embeds(context, msg, args, help_options, groups, owners).await;
Ok(())
}
#[group]
#[commands(avatar,ping, hi, about, embed, poll,which,server_info)]
#[description = "Some general commands\n"]
struct General;
#[command]
#[description = "Says pong on \"§ping\"\n"]
async fn ping(ctx: &Context, msg: &Message) -> CommandResult {
msg.reply(&ctx, "Pong§§§").await?;
Ok(())
}
#[command]
#[description = "Just react to your hi\n"]
#[aliases(hello, Hello, Hi)]
async fn hi(ctx: &Context, msg: &Message) -> CommandResult {
let phrase = format!("HIIII {}",&msg.author.name);
msg.reply(&ctx, phrase).await?;
// msg.reply(&ctx, msg.author_nick(&ctx).await.unwrap()).await?;
// msg.reply(&ctx, &msg.author.name).await?;
// msg.reply(&ctx, &msg.member.unwrap().nick.unwrap()).await?;
msg.react(ctx, '🔥').await?;
Ok(())
}
#[command]
#[description = "Server's Information\n"]
#[aliases(server)]
async fn server_info(ctx: &Context, msg: &Message) -> CommandResult {
l |
if let Some(description) = &guild.description {
e.field("Description",description,false);
};
e.field("Members",number_users,true)
//.field("MSG",number_msgs,true)
.field("Channels",number_channels,true)
.field("Roles",number_roles,true)
.field("Emojis",number_emoji,true)
.field("Members in voice",number_voice_user,true);
if let Some(icon_url) = guild.icon_url() {
e.image(icon_url);
};
e
// e.footer(|f| f.icon_url(&msg.mem)
})
});
msg.await.unwrap();
Ok(())
}
// //T
ODO mehhhh
// #[command]
// #[checks(Bot)]
// #[description = "Talk with your self\n"]
// #[aliases(talk)]
// async fn talk_to_self(ctx: &Context, msg: &Message) -> CommandResult {
// msg.reply(&ctx, "Hello, myself!").await?;
// Ok(())
// }
// #[check]
// #[name = "Bot"]
// async fn bot_check(ctx: &Context, msg: &Message) -> CheckResult {
// if let Some(member) = msg.member(&ctx.cache) {
// let user = member.user.read();
// user.bot.into()
// } else {
// false.into()
// }
// }
#[command]
#[description = "Bot will reply with pretty embed containing title and description of bot"]
async fn about(ctx: & Context, msg: &Message) -> CommandResult {
// Obtain Bot's profile pic: cache -> current info -> bot user -> bot icon
// let cache_http = &ctx.http;
// let current_info = cache_http.get_current_application_info();
// let current_info = match cache_http.get_current_application_info().await {
// Ok(c) => c,
// Err(err) => return Err(err.to_string()),
// };
// // let bot_user = current_info.id.to_user(cache_http);
// let bot_user = match current_info.id.to_user(cache_http).await {
// Ok(u) => u,
// // Err(err) => return Err(CommandError(err.to_string())),
// Err(err) => return Err(err.to_string()),
// };
// let bot_icon = match bot_user.avatar_url(){
// Some(u) => u,
// None => bot_user.default_avatar_url(),
// };
// // let bot_icon = &ctx.http.get_current_application_info().await.id.to_user(&ctx.http).avatar_url;
// let bot_icon = match &ctx.http.get_current_application_info().await {
// Ok(u) => u.id//.to_user(&ctx.http).avatar_url
// ,
// Err(err) => return Err(err.to_string()),
// };
// let bot_icon = match &bot_icon.to_user(&ctx.http).await {
// Ok(u) => u//.avatar_url()
// ,
// Err(err) => return Err(err.to_string()),
// };
// let bot_icon = match bot_icon.avatar_url() {
// Some(u) => u,
// None => bot_user.default_avatar_url(),
// };
let msg = msg.channel_id.send_message(&ctx.http, |m| {
m.embed(|e| {
e.title("`§23`");
e.description("Hellooooo!!!\nMy name is Caracol Tobias, and I'm a \"carangueijo\"(crab)\n");
//TODO: This dont work
// e.thumbnail(bot_icon);
// false = not inline;
e.fields(vec", false),
]);
e
});
m
});
msg.await.unwrap();
Ok(())
}
#[command]
#[description = "Bot will generate an embed based on input."]
#[usage = "title description <image_link>"]
#[example = "rust hihih https://docs.rs/rust-logo-20210302-1.52.0-nightly-35dbef235.png"]
async fn embed(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult {
let title = args.single::<String>()?;
let description = args.single::<String>()?;
let image = args.single::<String>().unwrap_or("false".to_string());
let link = if image == "false" {
"https://i.imgur.com/pMBcpoq.png".to_string()
} else {
image.replace("<", "").replace(">", "")
};
let msg = msg.channel_id.send_message(&ctx.http, |m| {
m.embed(|e| {
e.title(title);
e.description(description);
e.image(link)
});
m
});
msg.await.unwrap();
Ok(())
}
#[command]
#[description = "Create a poll, with or without options\n"]
#[usage = "\"title\" \"options\""]
#[example = "\"Cinema tonight?\""]
#[example = "\"Choose one options\" \"Funny\" \"Great\" \"Cool\""]
#[min_args(1)]
#[max_args(27)]
async fn poll(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult {
// let abc: Vec<char> = vec![
// '🇦', '🇧', '🇨', '🇩', '🇪', '🇫', '🇬', '🇭', '🇮', '🇯', '🇰', '🇱', '🇲', '🇳', '🇴', '🇵', '🇶', '🇷',
// '🇸', '🇹', '🇺', '🇻', '🇼', '🇽', '🇾', '🇿',
// ];
let question = args.single_quoted::<String>()?;
let answers = args
.quoted()
.iter::<String>()
.filter_map(|x| x.ok())
.collect::<Vec<_>>();
// let args = msg.content[2..].split_once(" ").unwrap();
// let mut title = String::from("Poll: ") + args.1;
let title = String::from("Poll: ") + &question;
// let options = args.1.split(';');
let mut description = String::new();
// let mut count_options: usize = 0;
let count_options: usize = answers.len();
let emojis = (0..count_options)
.map(|i| std::char::from_u32('🇦' as u32 + i as u32).expect("Failed to format emoji"))
.collect::<Vec<_>>();
let mut count = 0;
for &emoji in &emojis {
let option = answers.get(count).unwrap();
let string = format!("{} -> {}\n", ReactionType::Unicode(emoji.to_string()), option);
description.push_str(&string);
count +=1;
}
let embed = msg.channel_id.send_message(&ctx, |m| {
m.embed(|e| {
e.title(&title).description(&description).footer(|f| {
f.icon_url("https://www.clipartkey.com/mpngs/m/203-2037526_diamonds-clipart-blue-diamond-logo-png-hd.png")
.text("React with one emoji")
})
})
});
let poll = embed.await.unwrap();
if count_options == 0 {
poll.react(&ctx, '✅').await?;
poll.react(&ctx, '❌').await?;
} else {
for &emoji in &emojis {
poll
.react(&ctx.http, ReactionType::Unicode(emoji.to_string()))
.await?;
}
}
Ok(())
}
// use std::fs::File;
// use std::io::{self, prelude::*, BufReader};
#[command]
#[description("I will choose one of your given lines\nBetween the given lines it is necessary to have a enter\n")]
#[usage = "\noption 1\noption 2\n..."]
#[example = "\nFunny\nGreat\nCool"]
#[min_args(1)]
//TODO add feature to give a file and choose one random line of that file.
//TODO you can give a number and the bot will given x random lines
async fn which(ctx: &Context, msg: &Message) -> CommandResult {
// let file_name = msg.content[2..].split_once(" ").unwrap();
// if std::path::Path::new(&file_name.1).exists() {
// let file = File::open(&file_name.1)?;
// let reader = BufReader::new(file);
// for line in reader.lines() {
// // println!("{}", line?);
// msg.channel_id.say(&ctx,line?);
// }
// } else {
// msg.reply(&ctx, "The path given dont exist.").await?;
// }
let args = msg.content[2..].split_once("\n").unwrap();
let args = args.1.split("\n");
let mut count_options: usize = 0;
let mut v: Vec<String> = Vec::new();
for s in args {
count_options+=1;
v.push(s.to_string());
}
extern crate rand;
use rand::Rng;
let random_number = rand::thread_rng().gen_range(1,&count_options);
match v.get(random_number) {
Some(elem) => {
let string = format!("I choose -> {}\n", elem);
msg.reply(&ctx, string).await?;
},
None => { msg.reply(&ctx, "Something happen\nError\n").await?;},
}
Ok(())
}
#[command]
#[description = "Shows person's avatar\n"]
#[usage = "\"person\""]
#[example = "@person1"]
#[max_args(1)]
async fn avatar(ctx: &Context, msg: &Message) -> CommandResult {
let person = &msg.mentions;
if person.is_empty() && msg.content.is_empty() {
msg.channel_id.say(&ctx.http, "Error! Command is wrong! Try §help").await?;
return Ok(());
}
let msg = msg.channel_id.send_message(&ctx.http, |m| {
m.embed(|e| {
use serenity::utils::Colour;
e.colour(Colour::BLITZ_BLUE);
if person.is_empty() {
e.title(&msg.author.name);
e.image(&msg.author.avatar_url().unwrap());
}
else {
e.title(&person[0].name);
e.image(person[0].avatar_url().unwrap());
};
e
});
m
});
msg.await.unwrap();
Ok(())
}
| et guild = match ctx.cache.guild(&msg.guild_id.unwrap()).await {
Some(guild) => guild,
None => {
msg.reply(ctx, "Error" ).await;
return Ok(());
}
};
let number_users = guild.member_count;
// let number_msgs = channel.message_count;
let number_channels = guild.channels.len();
let number_emoji = guild.emojis.len();
let number_voice_user = guild.voice_states.len();
let number_roles = guild.roles.len();
let msg = msg.channel_id.send_message(&ctx.http, |m| {
m.embed(|e| {
e.colour(Colour::BLITZ_BLUE)
.title(&guild.name); | identifier_body |
general.rs | extern crate serenity;
use serenity::{framework::standard::{
help_commands,
macros::{ command, group, help},
Args,
CommandGroup, CommandResult, HelpOptions,
}, model::{channel::{Message,ReactionType}, id::UserId }, prelude::*};
use serenity::utils::Colour;
// use serenity::model::application::CurrentApplicationInfo;
use std::collections::HashSet;
#[help]
#[individual_command_tip = "§help [command] Gives info about the command\n"]
#[command_not_found_text = "This command is not valid\n"]
// #[strikethrough_commands_tip_in_guild(None)]
// If a user lacks permissions for a command, we can hide the command
// #[lacking_permissions = "Hide"]
// #[lacking_role = "Nothing"]
async fn help(
context: &Context,
msg: &Message,
args: Args,
help_options: &'static HelpOptions,
groups: &[&'static CommandGroup],
owners: HashSet<UserId>,
) -> CommandResult {
let _ = help_commands::with_embeds(context, msg, args, help_options, groups, owners).await;
Ok(())
}
#[group]
#[commands(avatar,ping, hi, about, embed, poll,which,server_info)]
#[description = "Some general commands\n"]
struct General;
#[command]
#[description = "Says pong on \"§ping\"\n"]
async fn ping(ctx: &Context, msg: &Message) -> CommandResult {
msg.reply(&ctx, "Pong§§§").await?;
Ok(())
}
#[command]
#[description = "Just react to your hi\n"]
#[aliases(hello, Hello, Hi)]
async fn hi(ctx: &Context, msg: &Message) -> CommandResult {
let phrase = format!("HIIII {}",&msg.author.name);
msg.reply(&ctx, phrase).await?;
// msg.reply(&ctx, msg.author_nick(&ctx).await.unwrap()).await?;
// msg.reply(&ctx, &msg.author.name).await?;
// msg.reply(&ctx, &msg.member.unwrap().nick.unwrap()).await?;
msg.react(ctx, '🔥').await?;
Ok(())
}
#[command]
#[description = "Server's Information\n"]
#[aliases(server)]
async fn server_info(ctx: &Context, msg: &Message) -> CommandResult {
let guild = match ctx.cache.guild(&msg.guild_id.unwrap()).await {
Some(guild) => guild,
None => {
msg.reply(ctx, "Error" ).await;
return Ok(());
}
};
let number_users = guild.member_count;
// let number_msgs = channel.message_count;
let number_channels = guild.channels.len();
let number_emoji = guild.emojis.len();
let number_voice_user = guild.voice_states.len();
let number_roles = guild.roles.len();
let msg = msg.channel_id.send_message(&ctx.http, |m| {
m.embed(|e| {
e.colour(Colour::BLITZ_BLUE)
.title(&guild.name);
if let Some(description) = &guild.description {
e.field("Description",description,false);
};
e.field("Members",number_users,true)
//.field("MSG",number_msgs,true)
.field("Channels",number_channels,true)
.field("Roles",number_roles,true)
.field("Emojis",number_emoji,true)
.field("Members in voice",number_voice_user,true);
if let Some(icon_url) = guild.icon_url() {
e.image(icon_url);
};
e
// e.footer(|f| f.icon_url(&msg.mem)
})
});
msg.await.unwrap();
Ok(())
}
// //TODO mehhhh
// #[command]
// #[checks(Bot)]
// #[description = "Talk with your self\n"]
// #[aliases(talk)]
// async fn talk_to_self(ctx: &Context, msg: &Message) -> CommandResult {
// msg.reply(&ctx, "Hello, myself!").await?;
// Ok(())
// }
// #[check]
// #[name = "Bot"]
// async fn bot_check(ctx: &Context, msg: &Message) -> CheckResult {
// if let Some(member) = msg.member(&ctx.cache) {
// let user = member.user.read();
// user.bot.into()
// } else {
// false.into()
// }
// }
#[command]
#[description = "Bot will reply with pretty embed containing title and description of bot"]
async fn about(ctx: & Context, msg: &Message) -> CommandResult {
// Obtain Bot's profile pic: cache -> current info -> bot user -> bot icon
// let cache_http = &ctx.http;
// let current_info = cache_http.get_current_application_info();
// let current_info = match cache_http.get_current_application_info().await {
// Ok(c) => c,
// Err(err) => return Err(err.to_string()),
// };
// // let bot_user = current_info.id.to_user(cache_http);
// let bot_user = match current_info.id.to_user(cache_http).await {
// Ok(u) => u,
// // Err(err) => return Err(CommandError(err.to_string())),
// Err(err) => return Err(err.to_string()),
// };
// let bot_icon = match bot_user.avatar_url(){
// Some(u) => u,
// None => bot_user.default_avatar_url(),
// };
// // let bot_icon = &ctx.http.get_current_application_info().await.id.to_user(&ctx.http).avatar_url;
// let bot_icon = match &ctx.http.get_current_application_info().await {
// Ok(u) => u.id//.to_user(&ctx.http).avatar_url
// ,
// Err(err) => return Err(err.to_string()),
// };
// let bot_icon = match &bot_icon.to_user(&ctx.http).await {
// Ok(u) => u//.avatar_url()
// ,
// Err(err) => return Err(err.to_string()),
// };
// let bot_icon = match bot_icon.avatar_url() {
// Some(u) => u,
// None => bot_user.default_avatar_url(),
// };
let msg = msg.channel_id.send_message(&ctx.http, |m| {
m.embed(|e| {
e.title("`§23`");
e.description("Hellooooo!!!\nMy name is Caracol Tobias, and I'm a \"carangueijo\"(crab)\n");
//TODO: This dont work
// e.thumbnail(bot_icon);
// false = not inline;
e.fields(vec", false),
]);
e
});
m
});
msg.await.unwrap();
Ok(())
}
#[command]
#[description = "Bot will generate an embed based on input."]
#[usage = "title description <image_link>"]
#[example = "rust hihih https://docs.rs/rust-logo-20210302-1.52.0-nightly-35dbef235.png"]
async fn embed(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult {
let title = args.single::<String>()?;
let description = args.single::<String>()?;
let image = args.single::<String>().unwrap_or("false".to_string());
let link = if image == "false" {
"https://i.imgur.com/pMBcpoq.png".to_string()
} else {
image.replace("<", "").replace(">", "")
};
let msg = msg.channel_id.send_message(&ctx.http, |m| {
m.embed(|e| {
e.title(title);
e.description(description);
e.image(link)
});
m
});
msg.await.unwrap();
Ok(())
}
#[command]
#[description = "Create a poll, with or without options\n"]
#[usage = "\"title\" \"options\""]
#[example = "\"Cinema tonight?\""]
#[example = "\"Choose one options\" \"Funny\" \"Great\" \"Cool\""]
#[min_args(1)]
#[max_args(27)]
async fn poll(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult {
// let abc: Vec<char> = vec![
// '🇦', '🇧', '🇨', '🇩', '🇪', '🇫', '🇬', '🇭', '🇮', '🇯', '🇰', '🇱', '🇲', '🇳', '🇴', '🇵', '🇶', '🇷',
// '🇸', '🇹', '🇺', '🇻', '🇼', '🇽', '🇾', '🇿',
// ];
let question = args.single_quoted::<String>()?;
let answers = args
.quoted()
.iter::<String>()
.filter_map(|x| x.ok())
.collect::<Vec<_>>();
// let args = msg.content[2..].split_once(" ").unwrap();
// let mut title = String::from("Poll: ") + args.1;
let title = String::from("Poll: ") + &question;
// let options = args.1.split(';');
let mut description = String::new();
// let mut count_options: usize = 0;
let count_options: usize = answers.len();
let emojis = (0..count_options)
.map(|i| std::char::from_u32('🇦' as u32 + i as u32).expect("Failed to format emoji"))
.collect::<Vec<_>>();
let mut count = 0;
for &emoji in &emojis {
let option = answers.get(count).unwrap();
let string = format!("{} -> {}\n", ReactionType::Unicode(emoji.to_string()), option);
description.push_str(&string);
count +=1;
}
let embed = msg.channel_id.send_message(&ctx, |m| {
m.embed(|e| {
e.title(&title).description(&description).footer(|f| {
f.icon_url("https://www.clipartkey.com/mpngs/m/203-2037526_diamonds-clipart-blue-diamond-logo-png-hd.png")
.text("React with one emoji")
})
})
});
let poll = embed.await.unwrap();
if count_options == 0 {
poll.react(&ctx, '✅').await?;
poll.react(&ctx, '❌').await?;
} else {
for &emoji in &emojis {
poll
.react(&ctx.http, ReactionType::Unicode(emoji.to_string()))
.await?;
}
}
Ok(())
}
// use std::fs::File;
// use std::io::{self, prelude::*, BufReader};
#[command]
#[description("I will choose one of your given lines\nBetween the given lines it is necessary to have a enter\n")]
#[usage = "\noption 1\noption 2\n..."]
#[example = "\nFunny\nGreat\nCool"]
#[min_args(1)]
//TODO add feature to give a file and choose one random line of that file.
//TODO you can give a number and the bot will given x random lines
async fn which(ctx: &Context, msg: &Message) -> CommandResult {
// let file_name = msg.content[2..].split_once(" ").unwrap();
// if std::path::Path::new(&file_name.1).exists() {
// let file = File::open(&file_name.1)?;
// let reader = BufReader::new(file);
// for line in reader.lines() {
// // println!("{}", line?);
// msg.channel_id.say(&ctx,line?);
// }
// } else {
// msg.reply(&ctx, "The path given dont exist.").await?;
// }
let args = msg.content[2..].split_once("\n").unwrap();
let args = args.1.split("\n");
let mut count_options: usize = 0;
let mut v: Vec<String> = Vec::new();
for s in args {
count_options+=1;
v.push(s.to_string());
}
extern crate rand;
use rand::Rng;
let random_number = rand::thread_rng().gen_range(1,&count_options);
match v.get(random_number) {
Some(elem) => {
let string = format!("I choose -> {}\n", elem);
msg.reply(&ctx, string).await?;
},
None => { msg.reply(&ctx, "Something happen\nError\n").await?;},
}
Ok(())
}
#[command]
#[description = "Shows person's avatar\n"]
#[usage = "\"person\""]
#[example = "@person1"]
#[max_args(1)]
async fn avatar(ctx: &Context, msg: &Message) -> CommandResult {
let person = &msg.mentions;
if person.is_empty() && msg.content.is_empty() {
msg.channel_id.say(&ctx.http, "Error! Command is wrong! Try §help").await?;
return Ok(());
}
let msg = msg.channel_id.send_message(&ctx.http, |m| {
m.embed(|e| {
use serenity::utils::Colour;
e.colour(Colour::BLITZ_BLUE);
if person.is_empty() {
e.title(&msg.author.name);
e.image(&msg.author.avatar_url().unwr | atar_url().unwrap());
};
e
});
m
});
msg.await.unwrap();
Ok(())
}
| ap());
}
else {
e.title(&person[0].name);
e.image(person[0].av | conditional_block |
general.rs | extern crate serenity;
use serenity::{framework::standard::{
help_commands,
macros::{ command, group, help},
Args,
CommandGroup, CommandResult, HelpOptions,
}, model::{channel::{Message,ReactionType}, id::UserId }, prelude::*};
use serenity::utils::Colour;
// use serenity::model::application::CurrentApplicationInfo;
use std::collections::HashSet;
#[help]
#[individual_command_tip = "§help [command] Gives info about the command\n"]
#[command_not_found_text = "This command is not valid\n"]
// #[strikethrough_commands_tip_in_guild(None)]
// If a user lacks permissions for a command, we can hide the command
// #[lacking_permissions = "Hide"]
// #[lacking_role = "Nothing"]
async fn help(
context: &Context,
msg: &Message,
args: Args,
help_options: &'static HelpOptions,
groups: &[&'static CommandGroup],
owners: HashSet<UserId>,
) -> CommandResult {
let _ = help_commands::with_embeds(context, msg, args, help_options, groups, owners).await;
Ok(())
}
#[group]
#[commands(avatar,ping, hi, about, embed, poll,which,server_info)]
#[description = "Some general commands\n"]
struct General;
#[command]
#[description = "Says pong on \"§ping\"\n"]
async fn ping(ctx: &Context, msg: &Message) -> CommandResult {
msg.reply(&ctx, "Pong§§§").await?;
Ok(())
}
#[command]
#[description = "Just react to your hi\n"]
#[aliases(hello, Hello, Hi)]
async fn hi(ctx: &Context, msg: &Message) -> CommandResult {
let phrase = format!("HIIII {}",&msg.author.name);
msg.reply(&ctx, phrase).await?;
// msg.reply(&ctx, msg.author_nick(&ctx).await.unwrap()).await?;
// msg.reply(&ctx, &msg.author.name).await?;
// msg.reply(&ctx, &msg.member.unwrap().nick.unwrap()).await?;
msg.react(ctx, '🔥').await?;
Ok(())
}
#[command]
#[description = "Server's Information\n"]
#[aliases(server)]
async fn server_info(ctx: &Context, msg: &Message) -> CommandResult {
let guild = match ctx.cache.guild(&msg.guild_id.unwrap()).await {
Some(guild) => guild,
None => {
msg.reply(ctx, "Error" ).await;
return Ok(());
}
};
let number_users = guild.member_count;
// let number_msgs = channel.message_count;
let number_channels = guild.channels.len();
let number_emoji = guild.emojis.len();
let number_voice_user = guild.voice_states.len();
let number_roles = guild.roles.len();
let msg = msg.channel_id.send_message(&ctx.http, |m| {
m.embed(|e| {
e.colour(Colour::BLITZ_BLUE)
.title(&guild.name);
if let Some(description) = &guild.description {
e.field("Description",description,false);
};
e.field("Members",number_users,true)
//.field("MSG",number_msgs,true)
.field("Channels",number_channels,true)
.field("Roles",number_roles,true)
.field("Emojis",number_emoji,true)
.field("Members in voice",number_voice_user,true);
if let Some(icon_url) = guild.icon_url() {
e.image(icon_url);
};
e
// e.footer(|f| f.icon_url(&msg.mem)
})
});
msg.await.unwrap();
Ok(())
}
// //TODO mehhhh
// #[command]
// #[checks(Bot)]
// #[description = "Talk with your self\n"]
// #[aliases(talk)]
// async fn talk_to_self(ctx: &Context, msg: &Message) -> CommandResult {
// msg.reply(&ctx, "Hello, myself!").await?;
// Ok(()) | // }
// #[check]
// #[name = "Bot"]
// async fn bot_check(ctx: &Context, msg: &Message) -> CheckResult {
// if let Some(member) = msg.member(&ctx.cache) {
// let user = member.user.read();
// user.bot.into()
// } else {
// false.into()
// }
// }
#[command]
#[description = "Bot will reply with pretty embed containing title and description of bot"]
async fn about(ctx: & Context, msg: &Message) -> CommandResult {
// Obtain Bot's profile pic: cache -> current info -> bot user -> bot icon
// let cache_http = &ctx.http;
// let current_info = cache_http.get_current_application_info();
// let current_info = match cache_http.get_current_application_info().await {
// Ok(c) => c,
// Err(err) => return Err(err.to_string()),
// };
// // let bot_user = current_info.id.to_user(cache_http);
// let bot_user = match current_info.id.to_user(cache_http).await {
// Ok(u) => u,
// // Err(err) => return Err(CommandError(err.to_string())),
// Err(err) => return Err(err.to_string()),
// };
// let bot_icon = match bot_user.avatar_url(){
// Some(u) => u,
// None => bot_user.default_avatar_url(),
// };
// // let bot_icon = &ctx.http.get_current_application_info().await.id.to_user(&ctx.http).avatar_url;
// let bot_icon = match &ctx.http.get_current_application_info().await {
// Ok(u) => u.id//.to_user(&ctx.http).avatar_url
// ,
// Err(err) => return Err(err.to_string()),
// };
// let bot_icon = match &bot_icon.to_user(&ctx.http).await {
// Ok(u) => u//.avatar_url()
// ,
// Err(err) => return Err(err.to_string()),
// };
// let bot_icon = match bot_icon.avatar_url() {
// Some(u) => u,
// None => bot_user.default_avatar_url(),
// };
let msg = msg.channel_id.send_message(&ctx.http, |m| {
m.embed(|e| {
e.title("`§23`");
e.description("Hellooooo!!!\nMy name is Caracol Tobias, and I'm a \"carangueijo\"(crab)\n");
//TODO: This dont work
// e.thumbnail(bot_icon);
// false = not inline;
e.fields(vec", false),
]);
e
});
m
});
msg.await.unwrap();
Ok(())
}
#[command]
#[description = "Bot will generate an embed based on input."]
#[usage = "title description <image_link>"]
#[example = "rust hihih https://docs.rs/rust-logo-20210302-1.52.0-nightly-35dbef235.png"]
async fn embed(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult {
let title = args.single::<String>()?;
let description = args.single::<String>()?;
let image = args.single::<String>().unwrap_or("false".to_string());
let link = if image == "false" {
"https://i.imgur.com/pMBcpoq.png".to_string()
} else {
image.replace("<", "").replace(">", "")
};
let msg = msg.channel_id.send_message(&ctx.http, |m| {
m.embed(|e| {
e.title(title);
e.description(description);
e.image(link)
});
m
});
msg.await.unwrap();
Ok(())
}
#[command]
#[description = "Create a poll, with or without options\n"]
#[usage = "\"title\" \"options\""]
#[example = "\"Cinema tonight?\""]
#[example = "\"Choose one options\" \"Funny\" \"Great\" \"Cool\""]
#[min_args(1)]
#[max_args(27)]
async fn poll(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult {
// let abc: Vec<char> = vec![
// '🇦', '🇧', '🇨', '🇩', '🇪', '🇫', '🇬', '🇭', '🇮', '🇯', '🇰', '🇱', '🇲', '🇳', '🇴', '🇵', '🇶', '🇷',
// '🇸', '🇹', '🇺', '🇻', '🇼', '🇽', '🇾', '🇿',
// ];
let question = args.single_quoted::<String>()?;
let answers = args
.quoted()
.iter::<String>()
.filter_map(|x| x.ok())
.collect::<Vec<_>>();
// let args = msg.content[2..].split_once(" ").unwrap();
// let mut title = String::from("Poll: ") + args.1;
let title = String::from("Poll: ") + &question;
// let options = args.1.split(';');
let mut description = String::new();
// let mut count_options: usize = 0;
let count_options: usize = answers.len();
let emojis = (0..count_options)
.map(|i| std::char::from_u32('🇦' as u32 + i as u32).expect("Failed to format emoji"))
.collect::<Vec<_>>();
let mut count = 0;
for &emoji in &emojis {
let option = answers.get(count).unwrap();
let string = format!("{} -> {}\n", ReactionType::Unicode(emoji.to_string()), option);
description.push_str(&string);
count +=1;
}
let embed = msg.channel_id.send_message(&ctx, |m| {
m.embed(|e| {
e.title(&title).description(&description).footer(|f| {
f.icon_url("https://www.clipartkey.com/mpngs/m/203-2037526_diamonds-clipart-blue-diamond-logo-png-hd.png")
.text("React with one emoji")
})
})
});
let poll = embed.await.unwrap();
if count_options == 0 {
poll.react(&ctx, '✅').await?;
poll.react(&ctx, '❌').await?;
} else {
for &emoji in &emojis {
poll
.react(&ctx.http, ReactionType::Unicode(emoji.to_string()))
.await?;
}
}
Ok(())
}
// use std::fs::File;
// use std::io::{self, prelude::*, BufReader};
#[command]
#[description("I will choose one of your given lines\nBetween the given lines it is necessary to have a enter\n")]
#[usage = "\noption 1\noption 2\n..."]
#[example = "\nFunny\nGreat\nCool"]
#[min_args(1)]
//TODO add feature to give a file and choose one random line of that file.
//TODO you can give a number and the bot will given x random lines
async fn which(ctx: &Context, msg: &Message) -> CommandResult {
// let file_name = msg.content[2..].split_once(" ").unwrap();
// if std::path::Path::new(&file_name.1).exists() {
// let file = File::open(&file_name.1)?;
// let reader = BufReader::new(file);
// for line in reader.lines() {
// // println!("{}", line?);
// msg.channel_id.say(&ctx,line?);
// }
// } else {
// msg.reply(&ctx, "The path given dont exist.").await?;
// }
let args = msg.content[2..].split_once("\n").unwrap();
let args = args.1.split("\n");
let mut count_options: usize = 0;
let mut v: Vec<String> = Vec::new();
for s in args {
count_options+=1;
v.push(s.to_string());
}
extern crate rand;
use rand::Rng;
let random_number = rand::thread_rng().gen_range(1,&count_options);
match v.get(random_number) {
Some(elem) => {
let string = format!("I choose -> {}\n", elem);
msg.reply(&ctx, string).await?;
},
None => { msg.reply(&ctx, "Something happen\nError\n").await?;},
}
Ok(())
}
#[command]
#[description = "Shows person's avatar\n"]
#[usage = "\"person\""]
#[example = "@person1"]
#[max_args(1)]
async fn avatar(ctx: &Context, msg: &Message) -> CommandResult {
let person = &msg.mentions;
if person.is_empty() && msg.content.is_empty() {
msg.channel_id.say(&ctx.http, "Error! Command is wrong! Try §help").await?;
return Ok(());
}
let msg = msg.channel_id.send_message(&ctx.http, |m| {
m.embed(|e| {
use serenity::utils::Colour;
e.colour(Colour::BLITZ_BLUE);
if person.is_empty() {
e.title(&msg.author.name);
e.image(&msg.author.avatar_url().unwrap());
}
else {
e.title(&person[0].name);
e.image(person[0].avatar_url().unwrap());
};
e
});
m
});
msg.await.unwrap();
Ok(())
} | random_line_split |
|
general.rs | extern crate serenity;
use serenity::{framework::standard::{
help_commands,
macros::{ command, group, help},
Args,
CommandGroup, CommandResult, HelpOptions,
}, model::{channel::{Message,ReactionType}, id::UserId }, prelude::*};
use serenity::utils::Colour;
// use serenity::model::application::CurrentApplicationInfo;
use std::collections::HashSet;
#[help]
#[individual_command_tip = "§help [command] Gives info about the command\n"]
#[command_not_found_text = "This command is not valid\n"]
// #[strikethrough_commands_tip_in_guild(None)]
// If a user lacks permissions for a command, we can hide the command
// #[lacking_permissions = "Hide"]
// #[lacking_role = "Nothing"]
async fn help(
context: &Context,
msg: &Message,
args: Args,
help_options: &'static HelpOptions,
groups: &[&'static CommandGroup],
owners: HashSet<UserId>,
) -> CommandResult {
let _ = help_commands::with_embeds(context, msg, args, help_options, groups, owners).await;
Ok(())
}
#[group]
#[commands(avatar,ping, hi, about, embed, poll,which,server_info)]
#[description = "Some general commands\n"]
struct General;
#[command]
#[description = "Says pong on \"§ping\"\n"]
async fn ping(ctx: &Context, msg: &Message) -> CommandResult {
msg.reply(&ctx, "Pong§§§").await?;
Ok(())
}
#[command]
#[description = "Just react to your hi\n"]
#[aliases(hello, Hello, Hi)]
async fn hi(ct | &Context, msg: &Message) -> CommandResult {
let phrase = format!("HIIII {}",&msg.author.name);
msg.reply(&ctx, phrase).await?;
// msg.reply(&ctx, msg.author_nick(&ctx).await.unwrap()).await?;
// msg.reply(&ctx, &msg.author.name).await?;
// msg.reply(&ctx, &msg.member.unwrap().nick.unwrap()).await?;
msg.react(ctx, '🔥').await?;
Ok(())
}
#[command]
#[description = "Server's Information\n"]
#[aliases(server)]
async fn server_info(ctx: &Context, msg: &Message) -> CommandResult {
let guild = match ctx.cache.guild(&msg.guild_id.unwrap()).await {
Some(guild) => guild,
None => {
msg.reply(ctx, "Error" ).await;
return Ok(());
}
};
let number_users = guild.member_count;
// let number_msgs = channel.message_count;
let number_channels = guild.channels.len();
let number_emoji = guild.emojis.len();
let number_voice_user = guild.voice_states.len();
let number_roles = guild.roles.len();
let msg = msg.channel_id.send_message(&ctx.http, |m| {
m.embed(|e| {
e.colour(Colour::BLITZ_BLUE)
.title(&guild.name);
if let Some(description) = &guild.description {
e.field("Description",description,false);
};
e.field("Members",number_users,true)
//.field("MSG",number_msgs,true)
.field("Channels",number_channels,true)
.field("Roles",number_roles,true)
.field("Emojis",number_emoji,true)
.field("Members in voice",number_voice_user,true);
if let Some(icon_url) = guild.icon_url() {
e.image(icon_url);
};
e
// e.footer(|f| f.icon_url(&msg.mem)
})
});
msg.await.unwrap();
Ok(())
}
// //TODO mehhhh
// #[command]
// #[checks(Bot)]
// #[description = "Talk with your self\n"]
// #[aliases(talk)]
// async fn talk_to_self(ctx: &Context, msg: &Message) -> CommandResult {
// msg.reply(&ctx, "Hello, myself!").await?;
// Ok(())
// }
// #[check]
// #[name = "Bot"]
// async fn bot_check(ctx: &Context, msg: &Message) -> CheckResult {
// if let Some(member) = msg.member(&ctx.cache) {
// let user = member.user.read();
// user.bot.into()
// } else {
// false.into()
// }
// }
#[command]
#[description = "Bot will reply with pretty embed containing title and description of bot"]
async fn about(ctx: & Context, msg: &Message) -> CommandResult {
// Obtain Bot's profile pic: cache -> current info -> bot user -> bot icon
// let cache_http = &ctx.http;
// let current_info = cache_http.get_current_application_info();
// let current_info = match cache_http.get_current_application_info().await {
// Ok(c) => c,
// Err(err) => return Err(err.to_string()),
// };
// // let bot_user = current_info.id.to_user(cache_http);
// let bot_user = match current_info.id.to_user(cache_http).await {
// Ok(u) => u,
// // Err(err) => return Err(CommandError(err.to_string())),
// Err(err) => return Err(err.to_string()),
// };
// let bot_icon = match bot_user.avatar_url(){
// Some(u) => u,
// None => bot_user.default_avatar_url(),
// };
// // let bot_icon = &ctx.http.get_current_application_info().await.id.to_user(&ctx.http).avatar_url;
// let bot_icon = match &ctx.http.get_current_application_info().await {
// Ok(u) => u.id//.to_user(&ctx.http).avatar_url
// ,
// Err(err) => return Err(err.to_string()),
// };
// let bot_icon = match &bot_icon.to_user(&ctx.http).await {
// Ok(u) => u//.avatar_url()
// ,
// Err(err) => return Err(err.to_string()),
// };
// let bot_icon = match bot_icon.avatar_url() {
// Some(u) => u,
// None => bot_user.default_avatar_url(),
// };
let msg = msg.channel_id.send_message(&ctx.http, |m| {
m.embed(|e| {
e.title("`§23`");
e.description("Hellooooo!!!\nMy name is Caracol Tobias, and I'm a \"carangueijo\"(crab)\n");
//TODO: This dont work
// e.thumbnail(bot_icon);
// false = not inline;
e.fields(vec", false),
]);
e
});
m
});
msg.await.unwrap();
Ok(())
}
#[command]
#[description = "Bot will generate an embed based on input."]
#[usage = "title description <image_link>"]
#[example = "rust hihih https://docs.rs/rust-logo-20210302-1.52.0-nightly-35dbef235.png"]
async fn embed(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult {
let title = args.single::<String>()?;
let description = args.single::<String>()?;
let image = args.single::<String>().unwrap_or("false".to_string());
let link = if image == "false" {
"https://i.imgur.com/pMBcpoq.png".to_string()
} else {
image.replace("<", "").replace(">", "")
};
let msg = msg.channel_id.send_message(&ctx.http, |m| {
m.embed(|e| {
e.title(title);
e.description(description);
e.image(link)
});
m
});
msg.await.unwrap();
Ok(())
}
#[command]
#[description = "Create a poll, with or without options\n"]
#[usage = "\"title\" \"options\""]
#[example = "\"Cinema tonight?\""]
#[example = "\"Choose one options\" \"Funny\" \"Great\" \"Cool\""]
#[min_args(1)]
#[max_args(27)]
async fn poll(ctx: &Context, msg: &Message, mut args: Args) -> CommandResult {
// let abc: Vec<char> = vec![
// '🇦', '🇧', '🇨', '🇩', '🇪', '🇫', '🇬', '🇭', '🇮', '🇯', '🇰', '🇱', '🇲', '🇳', '🇴', '🇵', '🇶', '🇷',
// '🇸', '🇹', '🇺', '🇻', '🇼', '🇽', '🇾', '🇿',
// ];
let question = args.single_quoted::<String>()?;
let answers = args
.quoted()
.iter::<String>()
.filter_map(|x| x.ok())
.collect::<Vec<_>>();
// let args = msg.content[2..].split_once(" ").unwrap();
// let mut title = String::from("Poll: ") + args.1;
let title = String::from("Poll: ") + &question;
// let options = args.1.split(';');
let mut description = String::new();
// let mut count_options: usize = 0;
let count_options: usize = answers.len();
let emojis = (0..count_options)
.map(|i| std::char::from_u32('🇦' as u32 + i as u32).expect("Failed to format emoji"))
.collect::<Vec<_>>();
let mut count = 0;
for &emoji in &emojis {
let option = answers.get(count).unwrap();
let string = format!("{} -> {}\n", ReactionType::Unicode(emoji.to_string()), option);
description.push_str(&string);
count +=1;
}
let embed = msg.channel_id.send_message(&ctx, |m| {
m.embed(|e| {
e.title(&title).description(&description).footer(|f| {
f.icon_url("https://www.clipartkey.com/mpngs/m/203-2037526_diamonds-clipart-blue-diamond-logo-png-hd.png")
.text("React with one emoji")
})
})
});
let poll = embed.await.unwrap();
if count_options == 0 {
poll.react(&ctx, '✅').await?;
poll.react(&ctx, '❌').await?;
} else {
for &emoji in &emojis {
poll
.react(&ctx.http, ReactionType::Unicode(emoji.to_string()))
.await?;
}
}
Ok(())
}
// use std::fs::File;
// use std::io::{self, prelude::*, BufReader};
#[command]
#[description("I will choose one of your given lines\nBetween the given lines it is necessary to have a enter\n")]
#[usage = "\noption 1\noption 2\n..."]
#[example = "\nFunny\nGreat\nCool"]
#[min_args(1)]
//TODO add feature to give a file and choose one random line of that file.
//TODO you can give a number and the bot will given x random lines
async fn which(ctx: &Context, msg: &Message) -> CommandResult {
// let file_name = msg.content[2..].split_once(" ").unwrap();
// if std::path::Path::new(&file_name.1).exists() {
// let file = File::open(&file_name.1)?;
// let reader = BufReader::new(file);
// for line in reader.lines() {
// // println!("{}", line?);
// msg.channel_id.say(&ctx,line?);
// }
// } else {
// msg.reply(&ctx, "The path given dont exist.").await?;
// }
let args = msg.content[2..].split_once("\n").unwrap();
let args = args.1.split("\n");
let mut count_options: usize = 0;
let mut v: Vec<String> = Vec::new();
for s in args {
count_options+=1;
v.push(s.to_string());
}
extern crate rand;
use rand::Rng;
let random_number = rand::thread_rng().gen_range(1,&count_options);
match v.get(random_number) {
Some(elem) => {
let string = format!("I choose -> {}\n", elem);
msg.reply(&ctx, string).await?;
},
None => { msg.reply(&ctx, "Something happen\nError\n").await?;},
}
Ok(())
}
#[command]
#[description = "Shows person's avatar\n"]
#[usage = "\"person\""]
#[example = "@person1"]
#[max_args(1)]
async fn avatar(ctx: &Context, msg: &Message) -> CommandResult {
let person = &msg.mentions;
if person.is_empty() && msg.content.is_empty() {
msg.channel_id.say(&ctx.http, "Error! Command is wrong! Try §help").await?;
return Ok(());
}
let msg = msg.channel_id.send_message(&ctx.http, |m| {
m.embed(|e| {
use serenity::utils::Colour;
e.colour(Colour::BLITZ_BLUE);
if person.is_empty() {
e.title(&msg.author.name);
e.image(&msg.author.avatar_url().unwrap());
}
else {
e.title(&person[0].name);
e.image(person[0].avatar_url().unwrap());
};
e
});
m
});
msg.await.unwrap();
Ok(())
}
| x: | identifier_name |
haystack.rs | //! Haystacks.
//!
//! A *haystack* refers to any linear structure which can be split or sliced
//! into smaller, non-overlapping parts. Examples are strings and vectors.
//!
//! ```rust
//! let haystack: &str = "hello"; // a string slice (`&str`) is a haystack.
//! let (a, b) = haystack.split_at(4); // it can be split into two strings.
//! let c = &a[1..3]; // it can be sliced.
//! ```
//!
//! The minimal haystack which cannot be further sliced is called a *codeword*.
//! For instance, the codeword of a string would be a UTF-8 sequence. A haystack
//! can therefore be viewed as a consecutive list of codewords.
//!
//! The boundary between codewords can be addressed using an *index*. The
//! numbers 1, 3 and 4 in the snippet above are sample indices of a string. An
//! index is usually a `usize`.
//!
//! An arbitrary number may point outside of a haystack, or in the interior of a
//! codeword. These indices are invalid. A *valid index* of a certain haystack
//! would only point to the boundaries.
use std::ops::{Deref, Range};
use std::fmt::Debug;
use std::mem;
/// Borrowed [`Haystack`].
///
/// Every `Haystack` type can be borrowed as references to `Hay` types. This
/// allows multiple similar types to share the same implementation (e.g. the
/// haystacks `&[T]`, `&mut [T]` and `Vec<T>` all have the same corresponding
/// hay type `[T]`).
///
/// In the other words, a `Haystack` is a generalized reference to `Hay`.
/// `Hay`s are typically implemented on unsized slice types like `str` and `[T]`.
///
/// # Safety
///
/// This trait is unsafe as there are some unchecked requirements which the
/// implementor must uphold. Failing to meet these requirements would lead to
/// out-of-bound access. The safety requirements are written in each member of
/// this trait.
pub unsafe trait Hay {
/// The index type of the haystack. Typically a `usize`.
///
/// Splitting a hay must be sublinear using this index type. For instance,
/// if we implement `Hay` for a linked list, the index should not be an
/// integer offset (`usize`) as this would require O(n) time to chase the
/// pointer and find the split point. Instead, for a linked list we should
/// directly use the node pointer as the index.
///
/// # Safety
///
/// Valid indices of a single hay have a total order, even this type does
/// not require an `Ord` bound — for instance, to order two linked list
/// cursors, we need to chase the links and see if they meet; this is slow
/// and not suitable for implementing `Ord`, but conceptually an ordering
/// can be defined on linked list cursors.
type Index: Copy + Debug + Eq;
/// Creates an empty hay.
///
/// # Safety
///
/// An empty hay's start and end indices must be the same, e.g.
///
/// ```rust
/// extern crate pattern_3;
/// use pattern_3::Hay;
///
/// let empty = <str>::empty();
/// assert_eq!(empty.start_index(), empty.end_index());
/// ```
///
/// This also suggests that there is exactly one valid index for an empty
/// hay.
///
/// There is no guarantee that two separate calls to `.empty()` will produce
/// the same hay reference.
fn empty<'a>() -> &'a Self;
/// Obtains the index to the start of the hay.
///
/// Usually this method returns `0`.
///
/// # Safety
///
/// Implementation must ensure that the start index of hay is the first
/// valid index, i.e. for all valid indices `i` of `self`, we have
/// `self.start_index() <= i`.
fn start_index(&self) -> Self::Index;
/// Obtains the index to the end of the hay.
///
/// Usually this method returns the length of the hay.
///
/// # Safety
///
/// Implementation must ensure that the end index of hay is the last valid
/// index, i.e. for all valid indices `i` of `self`, we have
/// `i <= self.end_index()`.
fn end_index(&self) -> Self::Index;
/// Returns the next immediate index in this haystack.
///
/// # Safety
///
/// The `index` must be a valid index, and also must not equal to
/// `self.end_index()`.
///
/// Implementation must ensure that if `j = self.next_index(i)`, then `j`
/// is also a valid index satisfying `j > i`.
///
/// # Examples
///
/// ```rust
/// use pattern_3::Hay;
///
/// let sample = "A→😀";
/// unsafe {
/// assert_eq!(sample.next_index(0), 1);
/// assert_eq!(sample.next_index(1), 4);
/// assert_eq!(sample.next_index(4), 8);
/// }
/// ```
unsafe fn next_index(&self, index: Self::Index) -> Self::Index;
/// Returns the previous immediate index in this haystack.
///
/// # Safety
///
/// The `index` must be a valid index, and also must not equal to
/// `self.start_index()`.
///
/// Implementation must ensure that if `j = self.prev_index(i)`, then `j`
/// is also a valid index satisfying `j < i`.
///
/// # Examples
///
/// ```rust
/// use pattern_3::Hay;
///
/// let sample = "A→😀";
/// unsafe {
/// assert_eq!(sample.prev_index(8), 4);
/// assert_eq!(sample.prev_index(4), 1);
/// assert_eq!(sample.prev_index(1), 0);
/// }
/// ```
unsafe fn prev_index(&self, index: Self::Index) -> Self::Index;
/// Obtains a child hay by slicing `self`.
///
/// # Safety
///
/// The two ends of the range must be valid indices. The start of the range
/// must be before the end of the range (`range.start <= range.end`).
unsafe fn slice_unchecked(&self, range: Range<Self::Index>) -> &Self;
}
/// Linear splittable structure.
///
/// A `Haystack` is implemented for reference and collection types such as
/// `&str`, `&mut [T]` and `Vec<T>`. Every haystack can be borrowed as an
/// underlying representation called a [`Hay`]. Multiple haystacks may share the
/// same hay type, and thus share the same implementation of string search
/// algorithms.
///
/// In the other words, a `Haystack` is a generalized reference to `Hay`.
///
/// # Safety
///
/// This trait is unsafe as there are some unchecked requirements which the
/// implementor must uphold. Failing to meet these requirements would lead to
/// out-of-bound access. The safety requirements are written in each member of
/// this trait.
pub unsafe trait Haystack: Deref + Sized where Self::Target: Hay {
/// Creates an empty haystack.
fn empty() -> Self;
/// Splits the haystack into 3 slices around the given range.
///
/// This method splits `self` into 3 non-overlapping parts:
///
/// 1. Before the range (`self[..range.start]`),
/// 2. Inside the range (`self[range]`), and
/// 3. After the range (`self[range.end..]`)
///
/// The returned array contains these 3 parts in order.
///
/// # Safety
///
/// Caller should ensure that the starts and end indices of `range` are
/// valid indices for the haystack `self` with `range.start <= range.end`.
///
/// If the haystack is a mutable reference (`&mut A`), implementation must
/// ensure that the 3 returned haystack are truly non-overlapping in memory.
/// This is required to uphold the "Aliasing XOR Mutability" guarantee. If a
/// haystack cannot be physically split into non-overlapping parts (e.g. in
/// `OsStr`), then `&mut A` should not implement `Haystack` either.
///
/// # Examples
///
/// ```rust
/// use pattern_3::Haystack;
///
/// let haystack = &mut [0, 1, 2, 3, 4, 5, 6];
/// let [left, middle, right] = unsafe { haystack.split_around(2..6) };
/// assert_eq!(left, &mut [0, 1]);
/// assert_eq!(middle, &mut [2, 3, 4, 5]);
/// assert_eq!(right, &mut [6]);
/// ```
unsafe fn split_around(self, range: Range<<Self::Target as Hay>::Index>) -> [Self; 3];
/// Subslices this haystack.
///
/// # Safety
///
/// The starts and end indices of `range` must be valid indices for the
/// haystack `self` with `range.start <= range.end`.
unsafe fn slice_unchecked(self, range: Range<<Self::Target as Hay>::Index>) -> Self {
let [_, middle, _] = self.split_around(range);
middle
}
/// Transforms the range from relative to self's parent to the original
/// haystack it was sliced from.
///
/// Typically this method can be simply implemented as
///
/// ```text
/// (original.start + parent.start)..(original.start + parent.end)
/// ```
///
/// If this haystack is a [`SharedHaystack`], this method would never be
/// called.
///
/// # Safety
///
/// The `parent` range should be a valid range relative to a hay *a*, which
/// was used to slice out *self*: `self == &a[parent]`.
///
/// Similarly, the `original` range should be a valid range relative to
/// another hay *b* used to slice out *a*: `a == &b[original]`.
///
/// The distance of `parent` must be consistent with the length of `self`.
///
/// This method should return a range which satisfies:
///
/// ```text
/// self == &b[parent][original] == &b[range]
/// ```
///
/// Slicing can be destructive and *invalidates* some indices, in particular
/// for owned type with a pointer-like index, e.g. linked list. In this
/// case, one should derive an entirely new index range from `self`, e.g.
/// returning `self.start_index()..self.end_index()`.
///
/// # Examples
///
/// ```rust
/// use pattern_3::Haystack;
///
/// let hay = b"This is a sample haystack";
/// let this = hay[2..23][3..19].to_vec();
/// assert_eq!(&*this, &hay[this.restore_range(2..23, 3..19)]);
/// ```
fn restore_range(
&self,
original: Range<<Self::Target as Hay>::Index>,
parent: Range<<Self::Target as Hay>::Index>,
) -> Range<<Self::Target as Hay>::Index>;
}
/// A [`Haystack`] which can be shared and cheaply cloned (e.g. `&H`, `Rc<H>`).
///
/// If a haystack implements this marker trait, during internal operations the
/// original haystack will be retained in full and cloned, rather than being
/// sliced and splitted. Being a shared haystack allows searcher to see the
/// entire haystack, including the consumed portion.
pub trait SharedHaystack: Haystack + Clone
where Self::Target: Hay // FIXME: RFC 2089 or 2289
{}
/// The borrowing behavior differs between a (unique) haystack and shared
/// haystack. We use *specialization* to distinguish between these behavior:
///
/// * When using `split_around()` and `slice_unchecked()` with a unique
/// haystack, the original haystack will be splitted or sliced accordingly
/// to maintain unique ownership.
/// * When using these functions with a shared haystack, the original haystack
/// will be cloned in full as that could provide more context into
/// searchers.
///
/// This trait will never be public.
trait SpanBehavior: Haystack
where Self::Target: Hay // FIXME: RFC 2089 or 2289
{
fn take(&mut self) -> Self;
fn from_span(span: Span<Self>) -> Self;
unsafe fn split_around_for_span(self, subrange: Range<<Self::Target as Hay>::Index>) -> [Self; 3];
unsafe fn slice_unchecked_for_span(self, subrange: Range<<Self::Target as Hay>::Index>) -> Self;
fn borrow_range(
&self,
range: Range<<Self::Target as Hay>::Index>,
) -> Range<<Self::Target as Hay>::Index>;
fn do_restore_range(
&self,
range: Range<<Self::Target as Hay>::Index>,
subrange: Range<<Self::Target as Hay>::Index>,
) -> Range<<Self::Target as Hay>::Index>;
}
impl<H: Haystack> SpanBehavior for H
where H::Target: Hay // FIXME: RFC 2089 or 2289
{
#[inline]
default fn take(&mut self) -> Self {
mem::replace(self, Self::empty())
}
#[inline]
default fn from_span(span: Span<Self>) -> Self {
span.haystack
}
#[inline]
default fn borrow_range(&self, _: Range<<Self::Target as Hay>::Index>) -> Range<<Self::Target as Hay>::Index> {
self.start_index()..self.end_index()
}
#[inline]
default fn do_restore_range(
&self,
range: Range<<Self::Target as Hay>::Index>,
subrange: Range<<Self::Target as Hay>::Index>,
) -> Range<<Self::Target as Hay>::Index> {
self.restore_range(range, subrange)
}
#[inline]
default unsafe fn split_around_for_span(self, subrange: Range<<Self::Target as Hay>::Index>) -> [Self; 3] {
self.split_around(subrange)
}
#[inline]
default unsafe fn slice_unchecked_for_span(self, subrange: Range<<Self::Target as Hay>::Index>) -> Self {
self.slice_unchecked(subrange)
}
}
impl<H: SharedHaystack> SpanBehavior for H
where H::Target: Hay // FIXME: RFC 2089 or 2289
{
#[inline]
fn take(&mut self) -> Self {
self.clone()
}
#[inline]
fn from_span(span: Span<Self>) -> Self {
unsafe {
span.haystack.slice_unchecked(span.range)
}
}
#[inline]
fn borrow_range(&self, range: Range<<Self::Target as Hay>::Index>) -> Range<<Self::Target as Hay>::Index> {
range
}
#[inline]
fn do_restore_range(
&self,
_: Range<<Self::Target as Hay>::Index>,
subrange: Range<<Self::Target as Hay>::Index>,
) -> Range<<Self::Target as Hay>::Index> {
subrange
}
#[inline]
unsafe fn split_around_for_span(self, _: Range<<Self::Target as Hay>::Index>) -> [Self; 3] {
[self.clone(), self.clone(), self]
}
#[inline]
unsafe fn slice_unchecked_for_span(self, _: Range<<Self::Target as Hay>::Index>) -> Self {
self
}
}
/// A span is a haystack coupled with the original range where the haystack is found.
///
/// It can be considered as a tuple `(H, Range<H::Target::Index>)`
/// where the range is guaranteed to be valid for the haystack.
///
/// # Examples
///
/// ```
/// use pattern_3::Span;
///
/// let orig_str = "Hello世界";
/// let orig_span = Span::<&str>::from(orig_str);
///
/// // slice a span.
/// let span = unsafe { orig_span.slice_unchecked(3..8) };
///
/// // further slicing (note the range is relative to the original span)
/// let subspan = unsafe { span.slice_unchecked(4..8) };
///
/// // obtains the substring.
/// let substring = subspan.into();
/// assert_eq!(substring, "o世");
/// ```
///
/// Visualizing the spans:
///
/// ```text
///
/// 0 1 2 3 4 5 6 7 8 9 10 11
/// +---+---+---+---+---+---+---+---+---+---+---+
/// | H | e | l | l | o | U+4E16 | U+754C | orig_str
/// +---+---+---+---+---+---+---+---+---+---+---+
///
/// ^___________________________________________^ orig_span = (orig_str, 0..11)
///
/// ^___________________^ span = (orig_str, 3..8)
///
/// ^_______________^ subspan = (orig_str, 4..8)
/// ```
#[derive(Debug, Clone)]
pub struct Span<H: Haystack>
where H::Target: Hay // FIXME: RFC 2089 or 2289
{
haystack: H,
range: Range<<<H as Deref>::Target as Hay>::Index>,
//^ The `<H as Trait>` is to trick `#[derive]` not to generate
// the where bound for `H::Hay`.
}
/// Creates a span which covers the entire haystack.
impl<H: Haystack> From<H> for Span<H>
where H::Target: Hay // FIXME: RFC 2089 or 2289
{
#[inline]
fn from(haystack: H) -> Self {
let range = haystack.start_index()..haystack.end_index();
Self { haystack, range }
}
}
impl<H: SharedHaystack> Span<H>
where H::Target: Hay // FIXME: RFC 2089 or 2289
{
/// Decomposes this span into the original haystack, and the range it focuses on.
#[inline]
pub fn into_parts(self) -> (H, Range<<H::Target as Hay>::Index>) {
(self.haystack, self.range)
}
/// Creates a span from a haystack, and a range it should focus on.
///
/// # Safety
///
/// The `range` must be a valid range relative to `haystack`.
#[inline]
pub unsafe fn from_parts(haystack: H, range: Range<<H::Target as Hay>::Index>) -> Self {
Self { h | &'h str> {
/// Reinterprets the string span as a byte-array span.
#[inline]
pub fn as_bytes(self) -> Span<&'h [u8]> {
Span {
haystack: self.haystack.as_bytes(),
range: self.range,
}
}
}
impl<H: Haystack> Span<H>
where H::Target: Hay // FIXME: RFC 2089 or 2289
{
/// The range of the span, relative to the ultimate original haystack it was sliced from.
#[inline]
pub fn original_range(&self) -> Range<<H::Target as Hay>::Index> {
self.range.clone()
}
/// Borrows a shared span.
#[inline]
pub fn borrow(&self) -> Span<&H::Target> {
Span {
haystack: &*self.haystack,
range: self.haystack.borrow_range(self.range.clone()),
}
}
/// Checks whether this span is empty.
#[inline]
pub fn is_empty(&self) -> bool {
self.range.start == self.range.end
}
/// Returns this span by value, and replaces the original span by an empty
/// span.
#[inline]
pub fn take(&mut self) -> Self {
let haystack = self.haystack.take();
let range = self.range.clone();
self.range.end = self.range.start;
Span { haystack, range }
}
// FIXME: This should be changed to an `impl From<Span<H>> for H`.
/// Slices the original haystack to the focused range.
#[inline]
pub fn into(self) -> H {
H::from_span(self)
}
/// Splits this span into 3 spans around the given range.
///
/// # Safety
///
/// `subrange` must be a valid range relative to `self.borrow()`. A safe
/// usage is like:
///
/// ```rust
/// # use pattern_3::{Span, Needle, Searcher};
/// # let span = Span::from("foo");
/// # let mut searcher = <&str as Needle<&str>>::into_searcher("o");
/// # (|| -> Option<()> {
/// let range = searcher.search(span.borrow())?;
/// let [left, middle, right] = unsafe { span.split_around(range) };
/// # Some(()) })();
/// ```
#[inline]
pub unsafe fn split_around(self, subrange: Range<<H::Target as Hay>::Index>) -> [Self; 3] {
let self_range = self.haystack.borrow_range(self.range.clone());
let [left, middle, right] = self.haystack.split_around_for_span(subrange.clone());
let left_range = left.do_restore_range(self.range.clone(), self_range.start..subrange.start);
let right_range = right.do_restore_range(self.range.clone(), subrange.end..self_range.end);
let middle_range = middle.do_restore_range(self.range, subrange);
[
Self { haystack: left, range: left_range },
Self { haystack: middle, range: middle_range },
Self { haystack: right, range: right_range },
]
}
/// Slices this span to the given range.
///
/// # Safety
///
/// `subrange` must be a valid range relative to `self.borrow()`.
#[inline]
pub unsafe fn slice_unchecked(self, subrange: Range<<H::Target as Hay>::Index>) -> Self {
let haystack = self.haystack.slice_unchecked_for_span(subrange.clone());
let range = haystack.do_restore_range(self.range, subrange);
Self { haystack, range }
}
}
unsafe impl<'a, A: Hay +?Sized + 'a> Haystack for &'a A {
#[inline]
fn empty() -> Self {
A::empty()
}
#[inline]
unsafe fn split_around(self, range: Range<A::Index>) -> [Self; 3] {
[
self.slice_unchecked(self.start_index()..range.start),
self.slice_unchecked(range.clone()),
self.slice_unchecked(range.end..self.end_index()),
]
}
#[inline]
unsafe fn slice_unchecked(self, range: Range<A::Index>) -> Self {
A::slice_unchecked(self, range)
}
#[inline]
fn restore_range(&self, _: Range<A::Index>, _: Range<A::Index>) -> Range<A::Index> {
unreachable!()
}
}
impl<'a, A: Hay +?Sized + 'a> SharedHaystack for &'a A {}
| aystack, range }
}
}
impl<'h> Span< | identifier_body |
haystack.rs | //! Haystacks.
//!
//! A *haystack* refers to any linear structure which can be split or sliced
//! into smaller, non-overlapping parts. Examples are strings and vectors.
//!
//! ```rust
//! let haystack: &str = "hello"; // a string slice (`&str`) is a haystack.
//! let (a, b) = haystack.split_at(4); // it can be split into two strings.
//! let c = &a[1..3]; // it can be sliced.
//! ```
//!
//! The minimal haystack which cannot be further sliced is called a *codeword*.
//! For instance, the codeword of a string would be a UTF-8 sequence. A haystack
//! can therefore be viewed as a consecutive list of codewords.
//!
//! The boundary between codewords can be addressed using an *index*. The
//! numbers 1, 3 and 4 in the snippet above are sample indices of a string. An
//! index is usually a `usize`.
//!
//! An arbitrary number may point outside of a haystack, or in the interior of a
//! codeword. These indices are invalid. A *valid index* of a certain haystack
//! would only point to the boundaries.
use std::ops::{Deref, Range};
use std::fmt::Debug;
use std::mem;
/// Borrowed [`Haystack`].
///
/// Every `Haystack` type can be borrowed as references to `Hay` types. This
/// allows multiple similar types to share the same implementation (e.g. the
/// haystacks `&[T]`, `&mut [T]` and `Vec<T>` all have the same corresponding
/// hay type `[T]`).
///
/// In the other words, a `Haystack` is a generalized reference to `Hay`.
/// `Hay`s are typically implemented on unsized slice types like `str` and `[T]`.
///
/// # Safety
///
/// This trait is unsafe as there are some unchecked requirements which the
/// implementor must uphold. Failing to meet these requirements would lead to
/// out-of-bound access. The safety requirements are written in each member of
/// this trait.
pub unsafe trait Hay {
/// The index type of the haystack. Typically a `usize`.
///
/// Splitting a hay must be sublinear using this index type. For instance,
/// if we implement `Hay` for a linked list, the index should not be an
/// integer offset (`usize`) as this would require O(n) time to chase the
/// pointer and find the split point. Instead, for a linked list we should
/// directly use the node pointer as the index.
///
/// # Safety
///
/// Valid indices of a single hay have a total order, even this type does
/// not require an `Ord` bound — for instance, to order two linked list
/// cursors, we need to chase the links and see if they meet; this is slow
/// and not suitable for implementing `Ord`, but conceptually an ordering
/// can be defined on linked list cursors.
type Index: Copy + Debug + Eq;
/// Creates an empty hay.
///
/// # Safety
///
/// An empty hay's start and end indices must be the same, e.g.
///
/// ```rust
/// extern crate pattern_3;
/// use pattern_3::Hay;
///
/// let empty = <str>::empty();
/// assert_eq!(empty.start_index(), empty.end_index());
/// ```
///
/// This also suggests that there is exactly one valid index for an empty
/// hay.
///
/// There is no guarantee that two separate calls to `.empty()` will produce
/// the same hay reference.
fn empty<'a>() -> &'a Self;
/// Obtains the index to the start of the hay.
///
/// Usually this method returns `0`.
///
/// # Safety
///
/// Implementation must ensure that the start index of hay is the first
/// valid index, i.e. for all valid indices `i` of `self`, we have
/// `self.start_index() <= i`.
fn start_index(&self) -> Self::Index;
/// Obtains the index to the end of the hay.
///
/// Usually this method returns the length of the hay.
///
/// # Safety
///
/// Implementation must ensure that the end index of hay is the last valid
/// index, i.e. for all valid indices `i` of `self`, we have
/// `i <= self.end_index()`.
fn end_index(&self) -> Self::Index;
/// Returns the next immediate index in this haystack.
///
/// # Safety
///
/// The `index` must be a valid index, and also must not equal to
/// `self.end_index()`.
///
/// Implementation must ensure that if `j = self.next_index(i)`, then `j`
/// is also a valid index satisfying `j > i`.
///
/// # Examples
///
/// ```rust
/// use pattern_3::Hay;
///
/// let sample = "A→😀";
/// unsafe {
/// assert_eq!(sample.next_index(0), 1);
/// assert_eq!(sample.next_index(1), 4);
/// assert_eq!(sample.next_index(4), 8);
/// }
/// ```
unsafe fn next_index(&self, index: Self::Index) -> Self::Index;
/// Returns the previous immediate index in this haystack.
///
/// # Safety
///
/// The `index` must be a valid index, and also must not equal to
/// `self.start_index()`.
///
/// Implementation must ensure that if `j = self.prev_index(i)`, then `j`
/// is also a valid index satisfying `j < i`.
///
/// # Examples
///
/// ```rust
/// use pattern_3::Hay;
///
/// let sample = "A→😀";
/// unsafe {
/// assert_eq!(sample.prev_index(8), 4);
/// assert_eq!(sample.prev_index(4), 1);
/// assert_eq!(sample.prev_index(1), 0);
/// }
/// ```
unsafe fn prev_index(&self, index: Self::Index) -> Self::Index;
/// Obtains a child hay by slicing `self`.
///
/// # Safety
///
/// The two ends of the range must be valid indices. The start of the range
/// must be before the end of the range (`range.start <= range.end`).
unsafe fn slice_unchecked(&self, range: Range<Self::Index>) -> &Self;
}
/// Linear splittable structure.
///
/// A `Haystack` is implemented for reference and collection types such as
/// `&str`, `&mut [T]` and `Vec<T>`. Every haystack can be borrowed as an
/// underlying representation called a [`Hay`]. Multiple haystacks may share the
/// same hay type, and thus share the same implementation of string search
/// algorithms.
///
/// In the other words, a `Haystack` is a generalized reference to `Hay`.
///
/// # Safety
///
/// This trait is unsafe as there are some unchecked requirements which the
/// implementor must uphold. Failing to meet these requirements would lead to
/// out-of-bound access. The safety requirements are written in each member of
/// this trait.
pub unsafe trait Haystack: Deref + Sized where Self::Target: Hay {
/// Creates an empty haystack.
fn empty() -> Self;
/// Splits the haystack into 3 slices around the given range.
///
/// This method splits `self` into 3 non-overlapping parts:
///
/// 1. Before the range (`self[..range.start]`),
/// 2. Inside the range (`self[range]`), and
/// 3. After the range (`self[range.end..]`)
///
/// The returned array contains these 3 parts in order.
///
/// # Safety
///
/// Caller should ensure that the starts and end indices of `range` are
/// valid indices for the haystack `self` with `range.start <= range.end`.
///
/// If the haystack is a mutable reference (`&mut A`), implementation must
/// ensure that the 3 returned haystack are truly non-overlapping in memory.
/// This is required to uphold the "Aliasing XOR Mutability" guarantee. If a
/// haystack cannot be physically split into non-overlapping parts (e.g. in
/// `OsStr`), then `&mut A` should not implement `Haystack` either.
///
/// # Examples
///
/// ```rust
/// use pattern_3::Haystack;
///
/// let haystack = &mut [0, 1, 2, 3, 4, 5, 6];
/// let [left, middle, right] = unsafe { haystack.split_around(2..6) };
/// assert_eq!(left, &mut [0, 1]);
/// assert_eq!(middle, &mut [2, 3, 4, 5]);
/// assert_eq!(right, &mut [6]);
/// ```
unsafe fn split_around(self, range: Range<<Self::Target as Hay>::Index>) -> [Self; 3];
/// Subslices this haystack.
///
/// # Safety
///
/// The starts and end indices of `range` must be valid indices for the
/// haystack `self` with `range.start <= range.end`.
unsafe fn slice_unchecked(self, range: Range<<Self::Target as Hay>::Index>) -> Self {
let [_, middle, _] = self.split_around(range);
middle
}
/// Transforms the range from relative to self's parent to the original
/// haystack it was sliced from.
///
/// Typically this method can be simply implemented as
///
/// ```text
/// (original.start + parent.start)..(original.start + parent.end)
/// ```
///
/// If this haystack is a [`SharedHaystack`], this method would never be
/// called.
///
/// # Safety
///
/// The `parent` range should be a valid range relative to a hay *a*, which
/// was used to slice out *self*: `self == &a[parent]`.
///
/// Similarly, the `original` range should be a valid range relative to
/// another hay *b* used to slice out *a*: `a == &b[original]`.
///
/// The distance of `parent` must be consistent with the length of `self`.
///
/// This method should return a range which satisfies:
///
/// ```text
/// self == &b[parent][original] == &b[range]
/// ```
///
/// Slicing can be destructive and *invalidates* some indices, in particular
/// for owned type with a pointer-like index, e.g. linked list. In this
/// case, one should derive an entirely new index range from `self`, e.g.
/// returning `self.start_index()..self.end_index()`.
///
/// # Examples
///
/// ```rust
/// use pattern_3::Haystack;
///
/// let hay = b"This is a sample haystack";
/// let this = hay[2..23][3..19].to_vec();
/// assert_eq!(&*this, &hay[this.restore_range(2..23, 3..19)]);
/// ```
fn restore_range(
&self,
original: Range<<Self::Target as Hay>::Index>,
parent: Range<<Self::Target as Hay>::Index>,
) -> Range<<Self::Target as Hay>::Index>;
}
/// A [`Haystack`] which can be shared and cheaply cloned (e.g. `&H`, `Rc<H>`).
///
/// If a haystack implements this marker trait, during internal operations the
/// original haystack will be retained in full and cloned, rather than being
/// sliced and splitted. Being a shared haystack allows searcher to see the
/// entire haystack, including the consumed portion.
pub trait SharedHaystack: Haystack + Clone
where Self::Target: Hay // FIXME: RFC 2089 or 2289
{}
/// The borrowing behavior differs between a (unique) haystack and shared
/// haystack. We use *specialization* to distinguish between these behavior:
///
/// * When using `split_around()` and `slice_unchecked()` with a unique
/// haystack, the original haystack will be splitted or sliced accordingly
/// to maintain unique ownership.
/// * When using these functions with a shared haystack, the original haystack
/// will be cloned in full as that could provide more context into
/// searchers.
///
/// This trait will never be public.
trait SpanBehavior: Haystack
where Self::Target: Hay // FIXME: RFC 2089 or 2289
{
fn take(&mut self) -> Self;
fn from_span(span: Span<Self>) -> Self;
unsafe fn split_around_for_span(self, subrange: Range<<Self::Target as Hay>::Index>) -> [Self; 3];
unsafe fn slice_unchecked_for_span(self, subrange: Range<<Self::Target as Hay>::Index>) -> Self;
fn borrow_range(
&self,
range: Range<<Self::Target as Hay>::Index>,
) -> Range<<Self::Target as Hay>::Index>;
fn do_restore_range(
&self,
range: Range<<Self::Target as Hay>::Index>,
subrange: Range<<Self::Target as Hay>::Index>,
) -> Range<<Self::Target as Hay>::Index>;
}
impl<H: Haystack> SpanBehavior for H
where H::Target: Hay // FIXME: RFC 2089 or 2289
{
#[inline]
default fn take(&mut self) -> Self {
mem::replace(self, Self::empty())
}
#[inline]
default fn from_span(span: Span<Self>) -> Self {
span.haystack
}
#[inline]
default fn borrow_range(&self, _: Range<<Self::Target as Hay>::Index>) -> Range<<Self::Target as Hay>::Index> {
self.start_index()..self.end_index()
}
#[inline]
default fn do_restore_range(
&self,
range: Range<<Self::Target as Hay>::Index>,
subrange: Range<<Self::Target as Hay>::Index>,
) -> Range<<Self::Target as Hay>::Index> {
self.restore_range(range, subrange)
}
#[inline]
default unsafe fn split_around_for_span(self, subrange: Range<<Self::Target as Hay>::Index>) -> [Self; 3] {
self.split_around(subrange)
}
#[inline]
default unsafe fn slice_unchecked_for_span(self, subrange: Range<<Self::Target as Hay>::Index>) -> Self {
self.slice_unchecked(subrange)
}
}
impl<H: SharedHaystack> SpanBehavior for H
where H::Target: Hay // FIXME: RFC 2089 or 2289
{
#[inline]
fn take(&mut self) -> Self {
self.clone()
}
#[inline]
fn from_span(span: Span<Self>) -> Self {
unsafe {
span.haystack.slice_unchecked(span.range)
}
}
#[inline]
fn borrow_range(&self, range: Range<<Self::Target as Hay>::Index>) -> Range<<Self::Target as Hay>::Index> {
range
}
#[inline]
fn do_restore_range(
&self,
_: Range<<Self::Target as Hay>::Index>,
subrange: Range<<Self::Target as Hay>::Index>,
) -> Range<<Self::Target as Hay>::Index> {
subrange
}
#[inline]
unsafe fn split_around_for_span(self, _: Range<<Self::Target as Hay>::Index>) -> [Self; 3] {
[self.clone(), self.clone(), self]
}
#[inline]
unsafe fn slice_unchecked_for_span(self, _: Range<<Self::Target as Hay>::Index>) -> Self {
self
}
}
/// A span is a haystack coupled with the original range where the haystack is found.
///
/// It can be considered as a tuple `(H, Range<H::Target::Index>)`
/// where the range is guaranteed to be valid for the haystack.
///
/// # Examples
///
/// ```
/// use pattern_3::Span;
///
/// let orig_str = "Hello世界";
/// let orig_span = Span::<&str>::from(orig_str);
///
/// // slice a span.
/// let span = unsafe { orig_span.slice_unchecked(3..8) };
///
/// // further slicing (note the range is relative to the original span)
/// let subspan = unsafe { span.slice_unchecked(4..8) };
///
/// // obtains the substring.
/// let substring = subspan.into();
/// assert_eq!(substring, "o世");
/// ```
///
/// Visualizing the spans:
///
/// ```text
///
/// 0 1 2 3 4 5 6 7 8 9 10 11
/// +---+---+---+---+---+---+---+---+---+---+---+
/// | H | e | l | l | o | U+4E16 | U+754C | orig_str
/// +---+---+---+---+---+---+---+---+---+---+---+
///
/// ^___________________________________________^ orig_span = (orig_str, 0..11)
///
/// ^___________________^ span = (orig_str, 3..8)
///
/// ^_______________^ subspan = (orig_str, 4..8)
/// ```
#[derive(Debug, Clone)]
pub struct Span<H: Haystack>
where H::Target: Hay // FIXME: RFC 2089 or 2289
{
haystack: H,
range: Range<<<H as Deref>::Target as Hay>::Index>,
//^ The `<H as Trait>` is to trick `#[derive]` not to generate
// the where bound for `H::Hay`.
}
/// Creates a span which covers the entire haystack.
impl<H: Haystack> From<H> for Span<H>
where H::Target: Hay // FIXME: RFC 2089 or 2289
{
#[inline]
fn from(haystack: H) -> Self {
let range = haystack.start_index()..haystack.end_index();
Self { haystack, range }
}
}
impl<H: SharedHaystack> Span<H>
where H::Target: Hay // FIXME: RFC 2089 or 2289
{
/// Decomposes this span into the original haystack, and the range it focuses on.
#[inline]
pub fn into_parts(self) -> (H, Range<<H::Target as Hay>::Index>) {
(self.haystack, self.range)
}
/// Creates a span from a haystack, and a range it should focus on.
///
/// # Safety
///
/// The `range` must be a valid range relative to `haystack`.
#[inline]
pub unsafe fn from_parts(haystack: H, range: Range<<H::Target as Hay>::Index>) -> Self {
Self { haystack, range }
}
}
impl<'h> Span<&'h str> {
/// Reinterprets the string span as a byte-array span.
#[inline]
pub fn as_bytes(self) -> Span<&'h [u8]> {
Span {
haystack: self.haystack.as_bytes(),
range: self.range,
}
}
}
impl<H: Haystack> Span<H>
where H::Target: Hay // FIXME: RFC 2089 or 2289
{
/// The range of the span, relative to the ultimate original haystack it was sliced from.
#[inline]
pub fn original_range(&self) -> Range<<H::Target as Hay>::Index> {
self.range.clone()
}
/// Borrows a shared span.
#[inline]
pub fn borrow(&self) -> Span<&H::Target> {
Span {
haystack: &*self.haystack,
range: self.haystack.borrow_range(self.range.clone()),
}
}
/// Checks whether this span is empty.
#[inline]
pub fn is_empty(&self) -> bool {
self.range.start == self.range.end
}
/// Returns this span by value, and replaces the original span by an empty
/// span.
#[inline]
pub fn take(&mut self) -> Self {
let haystack = self.haystack.take();
let range = self.range.clone();
self.range.end = self.range.start;
Span { haystack, range }
}
// FIXME: This should be changed to an `impl From<Span<H>> for H`.
/// Slices the original haystack to the focused range.
#[inline]
pub fn into(self) -> H {
H::from_span(self)
}
/// Splits this span into 3 spans around the given range.
///
/// # Safety
///
/// `subrange` must be a valid range relative to `self.borrow()`. A safe
/// usage is like:
///
/// ```rust
/// # use pattern_3::{Span, Needle, Searcher};
/// # let span = Span::from("foo");
/// # let mut searcher = <&str as Needle<&str>>::into_searcher("o");
/// # (|| -> Option<()> {
/// let range = searcher.search(span.borrow())?;
/// let [left, middle, right] = unsafe { span.split_around(range) };
/// # Some(()) })();
/// ```
#[inline]
pub unsafe fn split_around(self, subrange: Range<<H::Target as Hay>::Index>) -> [Self; 3] {
let self_range = self.haystack.borrow_range(self.range.clone());
let [left, middle, right] = self.haystack.split_around_for_span(subrange.clone());
let left_range = left.do_restore_range(self.range.clone(), self_range.start..subrange.start);
let right_range = right.do_restore_range(self.range.clone(), subrange.end..self_range.end);
let middle_range = middle.do_restore_range(self.range, subrange);
[
Self { haystack: left, range: left_range },
Self { haystack: middle, range: middle_range },
Self { haystack: right, range: right_range },
]
}
/// Slices this span to the given range.
///
/// # Safety
///
/// `subrange` must be a valid range relative to `self.borrow()`.
#[inline]
pub unsafe fn slice_unchecked(self, subrange: Range<<H::Target as Hay>::Index>) -> Self {
let haystack = self.haystack.slice_unchecked_for_span(subrange.clone());
let range = haystack.do_restore_range(self.range, subrange);
Self { haystack, range }
}
}
unsafe impl<'a, A: Hay +?Sized + 'a> Haystack for &'a A {
#[inline]
fn empty() -> Self {
| A::empty()
}
#[inline]
unsafe fn split_around(self, range: Range<A::Index>) -> [Self; 3] {
[
self.slice_unchecked(self.start_index()..range.start),
self.slice_unchecked(range.clone()),
self.slice_unchecked(range.end..self.end_index()),
]
}
#[inline]
unsafe fn slice_unchecked(self, range: Range<A::Index>) -> Self {
A::slice_unchecked(self, range)
}
#[inline]
fn restore_range(&self, _: Range<A::Index>, _: Range<A::Index>) -> Range<A::Index> {
unreachable!()
}
}
impl<'a, A: Hay +?Sized + 'a> SharedHaystack for &'a A {}
| identifier_name |
|
haystack.rs | //! Haystacks.
//!
//! A *haystack* refers to any linear structure which can be split or sliced
//! into smaller, non-overlapping parts. Examples are strings and vectors.
//!
//! ```rust
//! let haystack: &str = "hello"; // a string slice (`&str`) is a haystack.
//! let (a, b) = haystack.split_at(4); // it can be split into two strings.
//! let c = &a[1..3]; // it can be sliced.
//! ```
//!
//! The minimal haystack which cannot be further sliced is called a *codeword*.
//! For instance, the codeword of a string would be a UTF-8 sequence. A haystack
//! can therefore be viewed as a consecutive list of codewords.
//!
//! The boundary between codewords can be addressed using an *index*. The
//! numbers 1, 3 and 4 in the snippet above are sample indices of a string. An
//! index is usually a `usize`.
//!
//! An arbitrary number may point outside of a haystack, or in the interior of a
//! codeword. These indices are invalid. A *valid index* of a certain haystack
//! would only point to the boundaries.
use std::ops::{Deref, Range};
use std::fmt::Debug;
use std::mem;
/// Borrowed [`Haystack`].
///
/// Every `Haystack` type can be borrowed as references to `Hay` types. This
/// allows multiple similar types to share the same implementation (e.g. the
/// haystacks `&[T]`, `&mut [T]` and `Vec<T>` all have the same corresponding
/// hay type `[T]`).
///
/// In the other words, a `Haystack` is a generalized reference to `Hay`.
/// `Hay`s are typically implemented on unsized slice types like `str` and `[T]`.
///
/// # Safety
///
/// This trait is unsafe as there are some unchecked requirements which the
/// implementor must uphold. Failing to meet these requirements would lead to
/// out-of-bound access. The safety requirements are written in each member of
/// this trait.
pub unsafe trait Hay {
/// The index type of the haystack. Typically a `usize`.
///
/// Splitting a hay must be sublinear using this index type. For instance,
/// if we implement `Hay` for a linked list, the index should not be an
/// integer offset (`usize`) as this would require O(n) time to chase the
/// pointer and find the split point. Instead, for a linked list we should
/// directly use the node pointer as the index.
///
/// # Safety
///
/// Valid indices of a single hay have a total order, even this type does
/// not require an `Ord` bound — for instance, to order two linked list
/// cursors, we need to chase the links and see if they meet; this is slow
/// and not suitable for implementing `Ord`, but conceptually an ordering
/// can be defined on linked list cursors.
type Index: Copy + Debug + Eq;
/// Creates an empty hay.
///
/// # Safety
///
/// An empty hay's start and end indices must be the same, e.g.
///
/// ```rust
/// extern crate pattern_3;
/// use pattern_3::Hay;
///
/// let empty = <str>::empty();
/// assert_eq!(empty.start_index(), empty.end_index());
/// ```
///
/// This also suggests that there is exactly one valid index for an empty
/// hay.
///
/// There is no guarantee that two separate calls to `.empty()` will produce
/// the same hay reference.
fn empty<'a>() -> &'a Self;
/// Obtains the index to the start of the hay.
///
/// Usually this method returns `0`.
///
/// # Safety
///
/// Implementation must ensure that the start index of hay is the first
/// valid index, i.e. for all valid indices `i` of `self`, we have
/// `self.start_index() <= i`.
fn start_index(&self) -> Self::Index;
/// Obtains the index to the end of the hay.
///
/// Usually this method returns the length of the hay.
///
/// # Safety
///
/// Implementation must ensure that the end index of hay is the last valid
/// index, i.e. for all valid indices `i` of `self`, we have
/// `i <= self.end_index()`.
fn end_index(&self) -> Self::Index;
/// Returns the next immediate index in this haystack.
///
/// # Safety
///
/// The `index` must be a valid index, and also must not equal to
/// `self.end_index()`.
///
/// Implementation must ensure that if `j = self.next_index(i)`, then `j`
/// is also a valid index satisfying `j > i`.
///
/// # Examples
///
/// ```rust
/// use pattern_3::Hay;
///
/// let sample = "A→😀";
/// unsafe {
/// assert_eq!(sample.next_index(0), 1);
/// assert_eq!(sample.next_index(1), 4);
/// assert_eq!(sample.next_index(4), 8);
/// }
/// ```
unsafe fn next_index(&self, index: Self::Index) -> Self::Index;
/// Returns the previous immediate index in this haystack.
///
/// # Safety
///
/// The `index` must be a valid index, and also must not equal to
/// `self.start_index()`.
///
/// Implementation must ensure that if `j = self.prev_index(i)`, then `j`
/// is also a valid index satisfying `j < i`.
///
/// # Examples
///
/// ```rust
/// use pattern_3::Hay;
///
/// let sample = "A→😀";
/// unsafe {
/// assert_eq!(sample.prev_index(8), 4);
/// assert_eq!(sample.prev_index(4), 1);
/// assert_eq!(sample.prev_index(1), 0);
/// }
/// ```
unsafe fn prev_index(&self, index: Self::Index) -> Self::Index;
/// Obtains a child hay by slicing `self`.
///
/// # Safety
///
/// The two ends of the range must be valid indices. The start of the range
/// must be before the end of the range (`range.start <= range.end`).
unsafe fn slice_unchecked(&self, range: Range<Self::Index>) -> &Self;
}
/// Linear splittable structure.
///
/// A `Haystack` is implemented for reference and collection types such as
/// `&str`, `&mut [T]` and `Vec<T>`. Every haystack can be borrowed as an
/// underlying representation called a [`Hay`]. Multiple haystacks may share the
/// same hay type, and thus share the same implementation of string search
/// algorithms.
///
/// In the other words, a `Haystack` is a generalized reference to `Hay`.
///
/// # Safety
///
/// This trait is unsafe as there are some unchecked requirements which the
/// implementor must uphold. Failing to meet these requirements would lead to
/// out-of-bound access. The safety requirements are written in each member of
/// this trait.
pub unsafe trait Haystack: Deref + Sized where Self::Target: Hay {
/// Creates an empty haystack.
fn empty() -> Self;
/// Splits the haystack into 3 slices around the given range.
///
/// This method splits `self` into 3 non-overlapping parts:
///
/// 1. Before the range (`self[..range.start]`),
/// 2. Inside the range (`self[range]`), and
/// 3. After the range (`self[range.end..]`)
///
/// The returned array contains these 3 parts in order.
///
/// # Safety
///
/// Caller should ensure that the starts and end indices of `range` are
/// valid indices for the haystack `self` with `range.start <= range.end`.
///
/// If the haystack is a mutable reference (`&mut A`), implementation must
/// ensure that the 3 returned haystack are truly non-overlapping in memory.
/// This is required to uphold the "Aliasing XOR Mutability" guarantee. If a
/// haystack cannot be physically split into non-overlapping parts (e.g. in
/// `OsStr`), then `&mut A` should not implement `Haystack` either.
///
/// # Examples
///
/// ```rust
/// use pattern_3::Haystack;
///
/// let haystack = &mut [0, 1, 2, 3, 4, 5, 6];
/// let [left, middle, right] = unsafe { haystack.split_around(2..6) };
/// assert_eq!(left, &mut [0, 1]);
/// assert_eq!(middle, &mut [2, 3, 4, 5]);
/// assert_eq!(right, &mut [6]);
/// ```
unsafe fn split_around(self, range: Range<<Self::Target as Hay>::Index>) -> [Self; 3];
/// Subslices this haystack.
///
/// # Safety
///
/// The starts and end indices of `range` must be valid indices for the
/// haystack `self` with `range.start <= range.end`.
unsafe fn slice_unchecked(self, range: Range<<Self::Target as Hay>::Index>) -> Self {
let [_, middle, _] = self.split_around(range);
middle
}
/// Transforms the range from relative to self's parent to the original
/// haystack it was sliced from.
///
/// Typically this method can be simply implemented as
///
/// ```text
/// (original.start + parent.start)..(original.start + parent.end)
/// ```
///
/// If this haystack is a [`SharedHaystack`], this method would never be
/// called.
///
/// # Safety
///
/// The `parent` range should be a valid range relative to a hay *a*, which
/// was used to slice out *self*: `self == &a[parent]`.
///
/// Similarly, the `original` range should be a valid range relative to
/// another hay *b* used to slice out *a*: `a == &b[original]`.
///
/// The distance of `parent` must be consistent with the length of `self`.
///
/// This method should return a range which satisfies:
///
/// ```text
/// self == &b[parent][original] == &b[range]
/// ```
///
/// Slicing can be destructive and *invalidates* some indices, in particular
/// for owned type with a pointer-like index, e.g. linked list. In this
/// case, one should derive an entirely new index range from `self`, e.g.
/// returning `self.start_index()..self.end_index()`.
///
/// # Examples
///
/// ```rust
/// use pattern_3::Haystack;
///
/// let hay = b"This is a sample haystack";
/// let this = hay[2..23][3..19].to_vec();
/// assert_eq!(&*this, &hay[this.restore_range(2..23, 3..19)]);
/// ```
fn restore_range(
&self,
original: Range<<Self::Target as Hay>::Index>,
parent: Range<<Self::Target as Hay>::Index>,
) -> Range<<Self::Target as Hay>::Index>;
}
/// A [`Haystack`] which can be shared and cheaply cloned (e.g. `&H`, `Rc<H>`).
///
/// If a haystack implements this marker trait, during internal operations the
/// original haystack will be retained in full and cloned, rather than being
/// sliced and splitted. Being a shared haystack allows searcher to see the
/// entire haystack, including the consumed portion.
pub trait SharedHaystack: Haystack + Clone
where Self::Target: Hay // FIXME: RFC 2089 or 2289
{}
/// The borrowing behavior differs between a (unique) haystack and shared
/// haystack. We use *specialization* to distinguish between these behavior:
///
/// * When using `split_around()` and `slice_unchecked()` with a unique
/// haystack, the original haystack will be splitted or sliced accordingly
/// to maintain unique ownership.
/// * When using these functions with a shared haystack, the original haystack
/// will be cloned in full as that could provide more context into
/// searchers.
///
/// This trait will never be public.
trait SpanBehavior: Haystack
where Self::Target: Hay // FIXME: RFC 2089 or 2289
{
fn take(&mut self) -> Self;
fn from_span(span: Span<Self>) -> Self;
unsafe fn split_around_for_span(self, subrange: Range<<Self::Target as Hay>::Index>) -> [Self; 3];
unsafe fn slice_unchecked_for_span(self, subrange: Range<<Self::Target as Hay>::Index>) -> Self;
fn borrow_range( | &self,
range: Range<<Self::Target as Hay>::Index>,
subrange: Range<<Self::Target as Hay>::Index>,
) -> Range<<Self::Target as Hay>::Index>;
}
impl<H: Haystack> SpanBehavior for H
where H::Target: Hay // FIXME: RFC 2089 or 2289
{
#[inline]
default fn take(&mut self) -> Self {
mem::replace(self, Self::empty())
}
#[inline]
default fn from_span(span: Span<Self>) -> Self {
span.haystack
}
#[inline]
default fn borrow_range(&self, _: Range<<Self::Target as Hay>::Index>) -> Range<<Self::Target as Hay>::Index> {
self.start_index()..self.end_index()
}
#[inline]
default fn do_restore_range(
&self,
range: Range<<Self::Target as Hay>::Index>,
subrange: Range<<Self::Target as Hay>::Index>,
) -> Range<<Self::Target as Hay>::Index> {
self.restore_range(range, subrange)
}
#[inline]
default unsafe fn split_around_for_span(self, subrange: Range<<Self::Target as Hay>::Index>) -> [Self; 3] {
self.split_around(subrange)
}
#[inline]
default unsafe fn slice_unchecked_for_span(self, subrange: Range<<Self::Target as Hay>::Index>) -> Self {
self.slice_unchecked(subrange)
}
}
impl<H: SharedHaystack> SpanBehavior for H
where H::Target: Hay // FIXME: RFC 2089 or 2289
{
#[inline]
fn take(&mut self) -> Self {
self.clone()
}
#[inline]
fn from_span(span: Span<Self>) -> Self {
unsafe {
span.haystack.slice_unchecked(span.range)
}
}
#[inline]
fn borrow_range(&self, range: Range<<Self::Target as Hay>::Index>) -> Range<<Self::Target as Hay>::Index> {
range
}
#[inline]
fn do_restore_range(
&self,
_: Range<<Self::Target as Hay>::Index>,
subrange: Range<<Self::Target as Hay>::Index>,
) -> Range<<Self::Target as Hay>::Index> {
subrange
}
#[inline]
unsafe fn split_around_for_span(self, _: Range<<Self::Target as Hay>::Index>) -> [Self; 3] {
[self.clone(), self.clone(), self]
}
#[inline]
unsafe fn slice_unchecked_for_span(self, _: Range<<Self::Target as Hay>::Index>) -> Self {
self
}
}
/// A span is a haystack coupled with the original range where the haystack is found.
///
/// It can be considered as a tuple `(H, Range<H::Target::Index>)`
/// where the range is guaranteed to be valid for the haystack.
///
/// # Examples
///
/// ```
/// use pattern_3::Span;
///
/// let orig_str = "Hello世界";
/// let orig_span = Span::<&str>::from(orig_str);
///
/// // slice a span.
/// let span = unsafe { orig_span.slice_unchecked(3..8) };
///
/// // further slicing (note the range is relative to the original span)
/// let subspan = unsafe { span.slice_unchecked(4..8) };
///
/// // obtains the substring.
/// let substring = subspan.into();
/// assert_eq!(substring, "o世");
/// ```
///
/// Visualizing the spans:
///
/// ```text
///
/// 0 1 2 3 4 5 6 7 8 9 10 11
/// +---+---+---+---+---+---+---+---+---+---+---+
/// | H | e | l | l | o | U+4E16 | U+754C | orig_str
/// +---+---+---+---+---+---+---+---+---+---+---+
///
/// ^___________________________________________^ orig_span = (orig_str, 0..11)
///
/// ^___________________^ span = (orig_str, 3..8)
///
/// ^_______________^ subspan = (orig_str, 4..8)
/// ```
#[derive(Debug, Clone)]
pub struct Span<H: Haystack>
where H::Target: Hay // FIXME: RFC 2089 or 2289
{
haystack: H,
range: Range<<<H as Deref>::Target as Hay>::Index>,
//^ The `<H as Trait>` is to trick `#[derive]` not to generate
// the where bound for `H::Hay`.
}
/// Creates a span which covers the entire haystack.
impl<H: Haystack> From<H> for Span<H>
where H::Target: Hay // FIXME: RFC 2089 or 2289
{
#[inline]
fn from(haystack: H) -> Self {
let range = haystack.start_index()..haystack.end_index();
Self { haystack, range }
}
}
impl<H: SharedHaystack> Span<H>
where H::Target: Hay // FIXME: RFC 2089 or 2289
{
/// Decomposes this span into the original haystack, and the range it focuses on.
#[inline]
pub fn into_parts(self) -> (H, Range<<H::Target as Hay>::Index>) {
(self.haystack, self.range)
}
/// Creates a span from a haystack, and a range it should focus on.
///
/// # Safety
///
/// The `range` must be a valid range relative to `haystack`.
#[inline]
pub unsafe fn from_parts(haystack: H, range: Range<<H::Target as Hay>::Index>) -> Self {
Self { haystack, range }
}
}
impl<'h> Span<&'h str> {
/// Reinterprets the string span as a byte-array span.
#[inline]
pub fn as_bytes(self) -> Span<&'h [u8]> {
Span {
haystack: self.haystack.as_bytes(),
range: self.range,
}
}
}
impl<H: Haystack> Span<H>
where H::Target: Hay // FIXME: RFC 2089 or 2289
{
/// The range of the span, relative to the ultimate original haystack it was sliced from.
#[inline]
pub fn original_range(&self) -> Range<<H::Target as Hay>::Index> {
self.range.clone()
}
/// Borrows a shared span.
#[inline]
pub fn borrow(&self) -> Span<&H::Target> {
Span {
haystack: &*self.haystack,
range: self.haystack.borrow_range(self.range.clone()),
}
}
/// Checks whether this span is empty.
#[inline]
pub fn is_empty(&self) -> bool {
self.range.start == self.range.end
}
/// Returns this span by value, and replaces the original span by an empty
/// span.
#[inline]
pub fn take(&mut self) -> Self {
let haystack = self.haystack.take();
let range = self.range.clone();
self.range.end = self.range.start;
Span { haystack, range }
}
// FIXME: This should be changed to an `impl From<Span<H>> for H`.
/// Slices the original haystack to the focused range.
#[inline]
pub fn into(self) -> H {
H::from_span(self)
}
/// Splits this span into 3 spans around the given range.
///
/// # Safety
///
/// `subrange` must be a valid range relative to `self.borrow()`. A safe
/// usage is like:
///
/// ```rust
/// # use pattern_3::{Span, Needle, Searcher};
/// # let span = Span::from("foo");
/// # let mut searcher = <&str as Needle<&str>>::into_searcher("o");
/// # (|| -> Option<()> {
/// let range = searcher.search(span.borrow())?;
/// let [left, middle, right] = unsafe { span.split_around(range) };
/// # Some(()) })();
/// ```
#[inline]
pub unsafe fn split_around(self, subrange: Range<<H::Target as Hay>::Index>) -> [Self; 3] {
let self_range = self.haystack.borrow_range(self.range.clone());
let [left, middle, right] = self.haystack.split_around_for_span(subrange.clone());
let left_range = left.do_restore_range(self.range.clone(), self_range.start..subrange.start);
let right_range = right.do_restore_range(self.range.clone(), subrange.end..self_range.end);
let middle_range = middle.do_restore_range(self.range, subrange);
[
Self { haystack: left, range: left_range },
Self { haystack: middle, range: middle_range },
Self { haystack: right, range: right_range },
]
}
/// Slices this span to the given range.
///
/// # Safety
///
/// `subrange` must be a valid range relative to `self.borrow()`.
#[inline]
pub unsafe fn slice_unchecked(self, subrange: Range<<H::Target as Hay>::Index>) -> Self {
let haystack = self.haystack.slice_unchecked_for_span(subrange.clone());
let range = haystack.do_restore_range(self.range, subrange);
Self { haystack, range }
}
}
unsafe impl<'a, A: Hay +?Sized + 'a> Haystack for &'a A {
#[inline]
fn empty() -> Self {
A::empty()
}
#[inline]
unsafe fn split_around(self, range: Range<A::Index>) -> [Self; 3] {
[
self.slice_unchecked(self.start_index()..range.start),
self.slice_unchecked(range.clone()),
self.slice_unchecked(range.end..self.end_index()),
]
}
#[inline]
unsafe fn slice_unchecked(self, range: Range<A::Index>) -> Self {
A::slice_unchecked(self, range)
}
#[inline]
fn restore_range(&self, _: Range<A::Index>, _: Range<A::Index>) -> Range<A::Index> {
unreachable!()
}
}
impl<'a, A: Hay +?Sized + 'a> SharedHaystack for &'a A {} | &self,
range: Range<<Self::Target as Hay>::Index>,
) -> Range<<Self::Target as Hay>::Index>;
fn do_restore_range( | random_line_split |
tokio_ct.rs | use
{
crate :: { SpawnHandle, LocalSpawnHandle, JoinHandle, BlockingHandle },
std :: { fmt, rc::Rc, future::Future, convert::TryFrom },
tokio :: { task::LocalSet, runtime::{ Builder, Runtime, Handle, RuntimeFlavor } },
futures_task :: { FutureObj, LocalFutureObj, Spawn, LocalSpawn, SpawnError },
};
#[derive(Debug, Clone)]
enum Spawner
{
Runtime( Rc<Runtime> ),
Handle ( Handle ),
}
/// An executor that uses a [`tokio::runtime::Runtime`] with the [`current thread`](tokio::runtime::Builder::new_current_thread)
/// and a [`tokio::task::LocalSet`]. Can spawn `!Send` futures.
///
/// ## Creation of the runtime
///
/// ```
/// // Make sure to set the `tokio_ct` feature on async_executors.
/// //
/// use
/// {
/// async_executors :: { TokioCt, LocalSpawnHandleExt },
/// tokio :: { runtime::Builder },
/// std :: { rc::Rc },
/// };
///
/// // If you need to configure tokio, you can use `tokio::runtimer::Builder`
/// // to create your [`Runtime`] and then create the `TokioCt` from it.
///
/// let exec = TokioCt::new().expect( "create tokio runtime" );
///
/// // block_on takes a &self, so if you need to `async move`,
/// // just clone it for use inside the async block.
/// //
/// exec.block_on( async
/// {
/// let not_send = async { let rc = Rc::new(()); };
///
/// // We can spawn!Send futures here.
/// //
/// let join_handle = exec.spawn_handle_local( not_send ).expect( "spawn" );
///
/// join_handle.await;
/// });
///```
///
/// ## Unwind Safety.
///
/// When a future spawned on this wrapper panics, the panic will be caught by tokio in the poll function.
///
/// You must only spawn futures to this API that are unwind safe. Tokio will wrap spawned tasks in
/// [`std::panic::AssertUnwindSafe`] and wrap the poll invocation with [`std::panic::catch_unwind`].
///
/// They reason that this is fine because they require `Send +'static` on the task. As far
/// as I can tell this is wrong. Unwind safety can be circumvented in several ways even with
/// `Send +'static` (eg. `parking_lot::Mutex` is `Send +'static` but `!UnwindSafe`).
///
/// You should make sure that if your future panics, no code that lives on after the panic,
/// nor any destructors called during the unwind can observe data in an inconsistent state.
///
/// Note: the future running from within `block_on` as opposed to `spawn` does not exhibit this behavior and will panic
/// the current thread.
///
/// Note that these are logic errors, not related to the class of problems that cannot happen
/// in safe rust (memory safety, undefined behavior, unsoundness, data races,...). See the relevant
/// [catch_unwind RFC](https://github.com/rust-lang/rfcs/blob/master/text/1236-stabilize-catch-panic.md)
/// and it's discussion threads for more info as well as the documentation of [`std::panic::UnwindSafe`]
/// for more information.
///
//
#[ derive( Debug, Clone ) ]
//
#[ cfg_attr( nightly, doc(cfg( feature = "tokio_ct" )) ) ]
//
pub struct TokioCt
{
spawner: Spawner,
local: Rc< LocalSet >,
}
/// Create a `TokioCt` from a `Runtime`.
///
/// # Errors
///
/// Will fail if you pass a multithreaded runtime. In that case it will return your [`Runtime`].
//
impl TryFrom<Runtime> for TokioCt
{
type Error = Runtime;
fn try_from( rt: Runtime ) -> Result<Self, Runtime>
{
match rt.handle().runtime_flavor()
{
RuntimeFlavor::CurrentThread => Ok( Self
{
spawner: Spawner::Runtime( Rc::new(rt) ),
local: Rc::new( LocalSet::new() ),
}),
_ => Err( rt ),
}
}
}
/// Create a [`TokioCt`] from a [`Handle`].
///
/// # Errors
/// Will fail if you pass a handle to a multithreaded runtime. Will return your [`Handle`].
//
impl TryFrom<Handle> for TokioCt
{
type Error = Handle;
fn try_from( handle: Handle ) -> Result<Self, Handle>
{
match handle.runtime_flavor()
{
RuntimeFlavor::CurrentThread => Ok( Self
{
spawner: Spawner::Handle( handle ),
local: Rc::new( LocalSet::new() ),
}),
_ => Err( handle ),
}
}
}
impl TokioCt
{
/// Create a new `TokioCt`. Uses a default current thread [`Runtime`] setting timers and io depending
/// on the features enabled on _async_executors_.
//
pub fn new() -> Result<Self, TokioCtErr>
{
let mut builder = Builder::new_current_thread();
#[ cfg( feature = "tokio_io" ) ]
//
builder.enable_io();
#[ cfg( feature = "tokio_timer" ) ]
//
builder.enable_time();
let rt = builder.build().map_err( |e| TokioCtErr::Builder(e.kind()) )?;
Ok(Self
{
spawner: Spawner::Runtime(Rc::new( rt )),
local : Rc::new( LocalSet::new() ),
})
}
/// Try to construct a [`TokioCt`] from the currently entered [`Runtime`]. You can do this
/// if you want to construct your runtime with the tokio macros eg:
///
/// ```
/// #[ tokio::main(flavor = "current_thread") ]
/// async fn main()
/// {
/// //...
/// }
/// ```
///
/// # Warning
///
/// `TokioCt::new()` is preferred over this. It's brief, doesn't require macros and is
/// the intended behavior for this type. The whole library aims at a paradigm without
/// global executors.
///
/// The main footgun here is that you are now already in async context, so you must call
/// [`TokioCt::run_until`] instead of [`TokioCt::block_on`]. `block_on` will panic when run from
/// within an existing async context. This can be surprising for your upstream libraries to
/// which you pass a [`TokioCt`] executor.
///
/// # Errors
///
/// Will fail if trying to construct from a multithreaded runtime or if no runtime
/// is running.
///
///
pub fn try_current() -> Result< Self, TokioCtErr >
{
let handle = Handle::try_current()
.map_err(|_| TokioCtErr::NoRuntime )?;
Self::try_from( handle )
.map_err(|_| TokioCtErr::WrongFlavour )
}
/// This is the entry point for this executor. Once this call returns, no remaining tasks shall be polled anymore.
/// However the tasks stay in the executor, so if you make a second call to `block_on` with a new task, the older
/// tasks will start making progress again.
///
/// For simplicity, it's advised to just create top level task that you run through `block_on` and make sure your
/// program is done when it returns.
///
/// See: [`tokio::runtime::Runtime::block_on`]
///
/// ## Panics
///
/// This function will panic if it is called from an async context, including but not limited to making a nested
/// call. It will also panic if the provided future panics.
///
/// When you created this executor with [`TokioCt::try_current`], you should call `run_until` instead.
//
pub fn block_on<F: Future>( &self, f: F ) -> F::Output
{
match &self.spawner
{
Spawner::Runtime( rt ) => rt .block_on( self.local.run_until( f ) ),
Spawner::Handle ( handle ) => handle.block_on( self.local.run_until( f ) ),
}
}
/// Run the given future to completion. This is the entrypoint for execution of all the code spawned on this
/// executor when you are already in an async context.
/// Eg. when you have created this executor from an already running runtime with [`TokioCt::try_current`].
/// This will run the [`tokio::task::LocalSet`] which makes spawning possible.
///
/// Similarly to [`TokioCt::block_on`], spawned tasks will no longer be polled once the given future
/// has ended, but will stay in the executor and you can call this function again to have every task
/// continue to make progress.
//
pub async fn run_until<F: Future>( &self, f: F ) -> F::Output
{
self.local.run_until( f ).await
}
}
impl Spawn for TokioCt
{
fn | ( &self, future: FutureObj<'static, ()> ) -> Result<(), SpawnError>
{
// We drop the tokio JoinHandle, so the task becomes detached.
//
drop( self.local.spawn_local(future) );
Ok(())
}
}
impl LocalSpawn for TokioCt
{
fn spawn_local_obj( &self, future: LocalFutureObj<'static, ()> ) -> Result<(), SpawnError>
{
// We drop the tokio JoinHandle, so the task becomes detached.
//
drop( self.local.spawn_local(future) );
Ok(())
}
}
impl<Out:'static + Send> SpawnHandle<Out> for TokioCt
{
fn spawn_handle_obj( &self, future: FutureObj<'static, Out> ) -> Result<JoinHandle<Out>, SpawnError>
{
let handle = match &self.spawner
{
Spawner::Runtime( rt ) => rt .spawn( future ),
Spawner::Handle ( handle ) => handle.spawn( future ),
};
Ok( JoinHandle::tokio(handle) )
}
}
impl<Out:'static> LocalSpawnHandle<Out> for TokioCt
{
fn spawn_handle_local_obj( &self, future: LocalFutureObj<'static, Out> ) -> Result<JoinHandle<Out>, SpawnError>
{
let handle = self.local.spawn_local( future );
Ok( JoinHandle::tokio(handle) )
}
}
#[ cfg(all( feature = "timer", not(feature="tokio_timer" )) ) ]
//
#[ cfg_attr( nightly, doc(cfg(all( feature = "timer", feature = "tokio_ct" ))) ) ]
//
impl crate::Timer for TokioCt
{
fn sleep( &self, dur: std::time::Duration ) -> futures_core::future::BoxFuture<'static, ()>
{
Box::pin( futures_timer::Delay::new(dur) )
}
}
#[ cfg( feature = "tokio_timer" ) ]
//
#[ cfg_attr( nightly, doc(cfg(all( feature = "tokio_timer", feature = "tokio_ct" ))) ) ]
//
impl crate::Timer for TokioCt
{
fn sleep( &self, dur: std::time::Duration ) -> futures_core::future::BoxFuture<'static, ()>
{
Box::pin( tokio::time::sleep(dur) )
}
}
#[ cfg( feature = "tokio_io" ) ]
//
#[ cfg_attr( nightly, doc(cfg( feature = "tokio_io" )) ) ]
//
impl crate::TokioIo for TokioCt {}
impl crate::YieldNow for TokioCt {}
impl<R: Send +'static> crate::SpawnBlocking<R> for TokioCt
{
fn spawn_blocking<F>( &self, f: F ) -> BlockingHandle<R>
where F: FnOnce() -> R + Send +'static,
{
let handle = match &self.spawner
{
Spawner::Runtime( rt ) => rt .spawn_blocking( f ),
Spawner::Handle ( handle ) => handle.spawn_blocking( f ),
};
BlockingHandle::tokio( handle )
}
fn spawn_blocking_dyn( &self, f: Box< dyn FnOnce()->R + Send > ) -> BlockingHandle<R>
{
self.spawn_blocking( f )
}
}
#[cfg( feature = "tokio_ct" )]
/// A few errors that can happen while using _tokio_ executors.
#[derive(Debug, Clone)]
pub enum TokioCtErr
{
/// The [`tokio::runtime::Builder`] returned an error when construting the [`Runtime`].
Builder( std::io::ErrorKind ),
/// There are other clones of the [`Runtime`], so we cannot shut it down.
Cloned( TokioCt ),
/// This executor was constructed from the a [`Handle`], so cannot be shut down.
Handle( TokioCt ),
/// Can't create from current runtime because no runtime currently entered.
NoRuntime,
/// Can't construct from a multithreaded runtime.
WrongFlavour,
}
impl fmt::Display for TokioCtErr
{
fn fmt( &self, f: &mut fmt::Formatter<'_> ) -> fmt::Result
{
use TokioCtErr::*;
match self
{
Builder(source) =>
write!( f, "tokio::runtime::Builder returned an error: {source}" ),
Cloned(_) => write!( f, "The TokioCt executor was cloned. Only the last copy can shut it down." ),
Handle(_) => write!( f, "The TokioCt was created from tokio::runtime::Handle. Only an owned executor (created from `Runtime`) can be shut down." ),
NoRuntime => write!( f, "Call to tokio::Handle::try_current failed, generally because no entered runtime is active." ),
WrongFlavour => write!( f, "Can't create TokioCt from a multithreaded `Runtime`." ),
}
}
}
impl std::error::Error for TokioCtErr {}
#[ cfg(test) ]
//
mod tests
{
use super::*;
// It's important that this is not Send, as we allow spawning!Send futures on it.
//
static_assertions::assert_not_impl_any!( TokioCt: Send, Sync );
}
| spawn_obj | identifier_name |
tokio_ct.rs | use
{
crate :: { SpawnHandle, LocalSpawnHandle, JoinHandle, BlockingHandle },
std :: { fmt, rc::Rc, future::Future, convert::TryFrom },
tokio :: { task::LocalSet, runtime::{ Builder, Runtime, Handle, RuntimeFlavor } },
futures_task :: { FutureObj, LocalFutureObj, Spawn, LocalSpawn, SpawnError },
};
#[derive(Debug, Clone)]
enum Spawner
{
Runtime( Rc<Runtime> ),
Handle ( Handle ),
}
/// An executor that uses a [`tokio::runtime::Runtime`] with the [`current thread`](tokio::runtime::Builder::new_current_thread)
/// and a [`tokio::task::LocalSet`]. Can spawn `!Send` futures.
///
/// ## Creation of the runtime
///
/// ```
/// // Make sure to set the `tokio_ct` feature on async_executors.
/// //
/// use
/// {
/// async_executors :: { TokioCt, LocalSpawnHandleExt },
/// tokio :: { runtime::Builder },
/// std :: { rc::Rc },
/// };
///
/// // If you need to configure tokio, you can use `tokio::runtimer::Builder`
/// // to create your [`Runtime`] and then create the `TokioCt` from it.
///
/// let exec = TokioCt::new().expect( "create tokio runtime" );
///
/// // block_on takes a &self, so if you need to `async move`,
/// // just clone it for use inside the async block.
/// //
/// exec.block_on( async
/// {
/// let not_send = async { let rc = Rc::new(()); };
///
/// // We can spawn!Send futures here.
/// //
/// let join_handle = exec.spawn_handle_local( not_send ).expect( "spawn" );
///
/// join_handle.await;
/// });
///```
///
/// ## Unwind Safety.
///
/// When a future spawned on this wrapper panics, the panic will be caught by tokio in the poll function.
///
/// You must only spawn futures to this API that are unwind safe. Tokio will wrap spawned tasks in
/// [`std::panic::AssertUnwindSafe`] and wrap the poll invocation with [`std::panic::catch_unwind`].
///
/// They reason that this is fine because they require `Send +'static` on the task. As far
/// as I can tell this is wrong. Unwind safety can be circumvented in several ways even with
/// `Send +'static` (eg. `parking_lot::Mutex` is `Send +'static` but `!UnwindSafe`).
///
/// You should make sure that if your future panics, no code that lives on after the panic,
/// nor any destructors called during the unwind can observe data in an inconsistent state.
///
/// Note: the future running from within `block_on` as opposed to `spawn` does not exhibit this behavior and will panic
/// the current thread.
///
/// Note that these are logic errors, not related to the class of problems that cannot happen
/// in safe rust (memory safety, undefined behavior, unsoundness, data races,...). See the relevant
/// [catch_unwind RFC](https://github.com/rust-lang/rfcs/blob/master/text/1236-stabilize-catch-panic.md)
/// and it's discussion threads for more info as well as the documentation of [`std::panic::UnwindSafe`]
/// for more information.
///
//
#[ derive( Debug, Clone ) ]
//
#[ cfg_attr( nightly, doc(cfg( feature = "tokio_ct" )) ) ]
//
pub struct TokioCt
{
spawner: Spawner,
local: Rc< LocalSet >,
}
/// Create a `TokioCt` from a `Runtime`.
///
/// # Errors
///
/// Will fail if you pass a multithreaded runtime. In that case it will return your [`Runtime`].
//
impl TryFrom<Runtime> for TokioCt
{
type Error = Runtime;
fn try_from( rt: Runtime ) -> Result<Self, Runtime>
{
match rt.handle().runtime_flavor()
{
RuntimeFlavor::CurrentThread => Ok( Self
{
spawner: Spawner::Runtime( Rc::new(rt) ),
local: Rc::new( LocalSet::new() ),
}),
_ => Err( rt ),
}
}
}
/// Create a [`TokioCt`] from a [`Handle`].
///
/// # Errors
/// Will fail if you pass a handle to a multithreaded runtime. Will return your [`Handle`].
//
impl TryFrom<Handle> for TokioCt
{
type Error = Handle;
fn try_from( handle: Handle ) -> Result<Self, Handle>
{
match handle.runtime_flavor()
{
RuntimeFlavor::CurrentThread => Ok( Self
{
spawner: Spawner::Handle( handle ),
local: Rc::new( LocalSet::new() ),
}),
_ => Err( handle ),
}
}
}
impl TokioCt
{
/// Create a new `TokioCt`. Uses a default current thread [`Runtime`] setting timers and io depending
/// on the features enabled on _async_executors_.
//
pub fn new() -> Result<Self, TokioCtErr>
{
let mut builder = Builder::new_current_thread();
#[ cfg( feature = "tokio_io" ) ]
//
builder.enable_io();
#[ cfg( feature = "tokio_timer" ) ]
//
builder.enable_time();
let rt = builder.build().map_err( |e| TokioCtErr::Builder(e.kind()) )?;
Ok(Self
{
spawner: Spawner::Runtime(Rc::new( rt )),
local : Rc::new( LocalSet::new() ),
})
}
/// Try to construct a [`TokioCt`] from the currently entered [`Runtime`]. You can do this
/// if you want to construct your runtime with the tokio macros eg:
///
/// ```
/// #[ tokio::main(flavor = "current_thread") ]
/// async fn main()
/// {
/// //...
/// }
/// ```
///
/// # Warning
///
/// `TokioCt::new()` is preferred over this. It's brief, doesn't require macros and is
/// the intended behavior for this type. The whole library aims at a paradigm without
/// global executors.
///
/// The main footgun here is that you are now already in async context, so you must call
/// [`TokioCt::run_until`] instead of [`TokioCt::block_on`]. `block_on` will panic when run from
/// within an existing async context. This can be surprising for your upstream libraries to
/// which you pass a [`TokioCt`] executor.
///
/// # Errors
///
/// Will fail if trying to construct from a multithreaded runtime or if no runtime
/// is running.
///
///
pub fn try_current() -> Result< Self, TokioCtErr >
{
let handle = Handle::try_current()
.map_err(|_| TokioCtErr::NoRuntime )?;
Self::try_from( handle )
.map_err(|_| TokioCtErr::WrongFlavour )
}
/// This is the entry point for this executor. Once this call returns, no remaining tasks shall be polled anymore.
/// However the tasks stay in the executor, so if you make a second call to `block_on` with a new task, the older
/// tasks will start making progress again.
///
/// For simplicity, it's advised to just create top level task that you run through `block_on` and make sure your
/// program is done when it returns.
///
/// See: [`tokio::runtime::Runtime::block_on`]
///
/// ## Panics
///
/// This function will panic if it is called from an async context, including but not limited to making a nested
/// call. It will also panic if the provided future panics.
///
/// When you created this executor with [`TokioCt::try_current`], you should call `run_until` instead.
//
pub fn block_on<F: Future>( &self, f: F ) -> F::Output
{
match &self.spawner
{
Spawner::Runtime( rt ) => rt .block_on( self.local.run_until( f ) ),
Spawner::Handle ( handle ) => handle.block_on( self.local.run_until( f ) ),
}
}
/// Run the given future to completion. This is the entrypoint for execution of all the code spawned on this
/// executor when you are already in an async context.
/// Eg. when you have created this executor from an already running runtime with [`TokioCt::try_current`].
/// This will run the [`tokio::task::LocalSet`] which makes spawning possible.
///
/// Similarly to [`TokioCt::block_on`], spawned tasks will no longer be polled once the given future
/// has ended, but will stay in the executor and you can call this function again to have every task
/// continue to make progress.
//
pub async fn run_until<F: Future>( &self, f: F ) -> F::Output
{
self.local.run_until( f ).await
}
}
impl Spawn for TokioCt
{
fn spawn_obj( &self, future: FutureObj<'static, ()> ) -> Result<(), SpawnError>
{
// We drop the tokio JoinHandle, so the task becomes detached.
//
drop( self.local.spawn_local(future) );
Ok(())
}
}
impl LocalSpawn for TokioCt
{
fn spawn_local_obj( &self, future: LocalFutureObj<'static, ()> ) -> Result<(), SpawnError>
{
// We drop the tokio JoinHandle, so the task becomes detached.
//
drop( self.local.spawn_local(future) );
Ok(())
}
}
impl<Out:'static + Send> SpawnHandle<Out> for TokioCt
{
fn spawn_handle_obj( &self, future: FutureObj<'static, Out> ) -> Result<JoinHandle<Out>, SpawnError>
{
let handle = match &self.spawner
{
Spawner::Runtime( rt ) => rt .spawn( future ),
Spawner::Handle ( handle ) => handle.spawn( future ),
};
Ok( JoinHandle::tokio(handle) )
}
}
impl<Out:'static> LocalSpawnHandle<Out> for TokioCt
{
fn spawn_handle_local_obj( &self, future: LocalFutureObj<'static, Out> ) -> Result<JoinHandle<Out>, SpawnError>
{ | }
#[ cfg(all( feature = "timer", not(feature="tokio_timer" )) ) ]
//
#[ cfg_attr( nightly, doc(cfg(all( feature = "timer", feature = "tokio_ct" ))) ) ]
//
impl crate::Timer for TokioCt
{
fn sleep( &self, dur: std::time::Duration ) -> futures_core::future::BoxFuture<'static, ()>
{
Box::pin( futures_timer::Delay::new(dur) )
}
}
#[ cfg( feature = "tokio_timer" ) ]
//
#[ cfg_attr( nightly, doc(cfg(all( feature = "tokio_timer", feature = "tokio_ct" ))) ) ]
//
impl crate::Timer for TokioCt
{
fn sleep( &self, dur: std::time::Duration ) -> futures_core::future::BoxFuture<'static, ()>
{
Box::pin( tokio::time::sleep(dur) )
}
}
#[ cfg( feature = "tokio_io" ) ]
//
#[ cfg_attr( nightly, doc(cfg( feature = "tokio_io" )) ) ]
//
impl crate::TokioIo for TokioCt {}
impl crate::YieldNow for TokioCt {}
impl<R: Send +'static> crate::SpawnBlocking<R> for TokioCt
{
fn spawn_blocking<F>( &self, f: F ) -> BlockingHandle<R>
where F: FnOnce() -> R + Send +'static,
{
let handle = match &self.spawner
{
Spawner::Runtime( rt ) => rt .spawn_blocking( f ),
Spawner::Handle ( handle ) => handle.spawn_blocking( f ),
};
BlockingHandle::tokio( handle )
}
fn spawn_blocking_dyn( &self, f: Box< dyn FnOnce()->R + Send > ) -> BlockingHandle<R>
{
self.spawn_blocking( f )
}
}
#[cfg( feature = "tokio_ct" )]
/// A few errors that can happen while using _tokio_ executors.
#[derive(Debug, Clone)]
pub enum TokioCtErr
{
/// The [`tokio::runtime::Builder`] returned an error when construting the [`Runtime`].
Builder( std::io::ErrorKind ),
/// There are other clones of the [`Runtime`], so we cannot shut it down.
Cloned( TokioCt ),
/// This executor was constructed from the a [`Handle`], so cannot be shut down.
Handle( TokioCt ),
/// Can't create from current runtime because no runtime currently entered.
NoRuntime,
/// Can't construct from a multithreaded runtime.
WrongFlavour,
}
impl fmt::Display for TokioCtErr
{
fn fmt( &self, f: &mut fmt::Formatter<'_> ) -> fmt::Result
{
use TokioCtErr::*;
match self
{
Builder(source) =>
write!( f, "tokio::runtime::Builder returned an error: {source}" ),
Cloned(_) => write!( f, "The TokioCt executor was cloned. Only the last copy can shut it down." ),
Handle(_) => write!( f, "The TokioCt was created from tokio::runtime::Handle. Only an owned executor (created from `Runtime`) can be shut down." ),
NoRuntime => write!( f, "Call to tokio::Handle::try_current failed, generally because no entered runtime is active." ),
WrongFlavour => write!( f, "Can't create TokioCt from a multithreaded `Runtime`." ),
}
}
}
impl std::error::Error for TokioCtErr {}
#[ cfg(test) ]
//
mod tests
{
use super::*;
// It's important that this is not Send, as we allow spawning!Send futures on it.
//
static_assertions::assert_not_impl_any!( TokioCt: Send, Sync );
} | let handle = self.local.spawn_local( future );
Ok( JoinHandle::tokio(handle) )
} | random_line_split |
tokio_ct.rs | use
{
crate :: { SpawnHandle, LocalSpawnHandle, JoinHandle, BlockingHandle },
std :: { fmt, rc::Rc, future::Future, convert::TryFrom },
tokio :: { task::LocalSet, runtime::{ Builder, Runtime, Handle, RuntimeFlavor } },
futures_task :: { FutureObj, LocalFutureObj, Spawn, LocalSpawn, SpawnError },
};
#[derive(Debug, Clone)]
enum Spawner
{
Runtime( Rc<Runtime> ),
Handle ( Handle ),
}
/// An executor that uses a [`tokio::runtime::Runtime`] with the [`current thread`](tokio::runtime::Builder::new_current_thread)
/// and a [`tokio::task::LocalSet`]. Can spawn `!Send` futures.
///
/// ## Creation of the runtime
///
/// ```
/// // Make sure to set the `tokio_ct` feature on async_executors.
/// //
/// use
/// {
/// async_executors :: { TokioCt, LocalSpawnHandleExt },
/// tokio :: { runtime::Builder },
/// std :: { rc::Rc },
/// };
///
/// // If you need to configure tokio, you can use `tokio::runtimer::Builder`
/// // to create your [`Runtime`] and then create the `TokioCt` from it.
///
/// let exec = TokioCt::new().expect( "create tokio runtime" );
///
/// // block_on takes a &self, so if you need to `async move`,
/// // just clone it for use inside the async block.
/// //
/// exec.block_on( async
/// {
/// let not_send = async { let rc = Rc::new(()); };
///
/// // We can spawn!Send futures here.
/// //
/// let join_handle = exec.spawn_handle_local( not_send ).expect( "spawn" );
///
/// join_handle.await;
/// });
///```
///
/// ## Unwind Safety.
///
/// When a future spawned on this wrapper panics, the panic will be caught by tokio in the poll function.
///
/// You must only spawn futures to this API that are unwind safe. Tokio will wrap spawned tasks in
/// [`std::panic::AssertUnwindSafe`] and wrap the poll invocation with [`std::panic::catch_unwind`].
///
/// They reason that this is fine because they require `Send +'static` on the task. As far
/// as I can tell this is wrong. Unwind safety can be circumvented in several ways even with
/// `Send +'static` (eg. `parking_lot::Mutex` is `Send +'static` but `!UnwindSafe`).
///
/// You should make sure that if your future panics, no code that lives on after the panic,
/// nor any destructors called during the unwind can observe data in an inconsistent state.
///
/// Note: the future running from within `block_on` as opposed to `spawn` does not exhibit this behavior and will panic
/// the current thread.
///
/// Note that these are logic errors, not related to the class of problems that cannot happen
/// in safe rust (memory safety, undefined behavior, unsoundness, data races,...). See the relevant
/// [catch_unwind RFC](https://github.com/rust-lang/rfcs/blob/master/text/1236-stabilize-catch-panic.md)
/// and it's discussion threads for more info as well as the documentation of [`std::panic::UnwindSafe`]
/// for more information.
///
//
#[ derive( Debug, Clone ) ]
//
#[ cfg_attr( nightly, doc(cfg( feature = "tokio_ct" )) ) ]
//
pub struct TokioCt
{
spawner: Spawner,
local: Rc< LocalSet >,
}
/// Create a `TokioCt` from a `Runtime`.
///
/// # Errors
///
/// Will fail if you pass a multithreaded runtime. In that case it will return your [`Runtime`].
//
impl TryFrom<Runtime> for TokioCt
{
type Error = Runtime;
fn try_from( rt: Runtime ) -> Result<Self, Runtime>
{
match rt.handle().runtime_flavor()
{
RuntimeFlavor::CurrentThread => Ok( Self
{
spawner: Spawner::Runtime( Rc::new(rt) ),
local: Rc::new( LocalSet::new() ),
}),
_ => Err( rt ),
}
}
}
/// Create a [`TokioCt`] from a [`Handle`].
///
/// # Errors
/// Will fail if you pass a handle to a multithreaded runtime. Will return your [`Handle`].
//
impl TryFrom<Handle> for TokioCt
{
type Error = Handle;
fn try_from( handle: Handle ) -> Result<Self, Handle>
|
}
impl TokioCt
{
/// Create a new `TokioCt`. Uses a default current thread [`Runtime`] setting timers and io depending
/// on the features enabled on _async_executors_.
//
pub fn new() -> Result<Self, TokioCtErr>
{
let mut builder = Builder::new_current_thread();
#[ cfg( feature = "tokio_io" ) ]
//
builder.enable_io();
#[ cfg( feature = "tokio_timer" ) ]
//
builder.enable_time();
let rt = builder.build().map_err( |e| TokioCtErr::Builder(e.kind()) )?;
Ok(Self
{
spawner: Spawner::Runtime(Rc::new( rt )),
local : Rc::new( LocalSet::new() ),
})
}
/// Try to construct a [`TokioCt`] from the currently entered [`Runtime`]. You can do this
/// if you want to construct your runtime with the tokio macros eg:
///
/// ```
/// #[ tokio::main(flavor = "current_thread") ]
/// async fn main()
/// {
/// //...
/// }
/// ```
///
/// # Warning
///
/// `TokioCt::new()` is preferred over this. It's brief, doesn't require macros and is
/// the intended behavior for this type. The whole library aims at a paradigm without
/// global executors.
///
/// The main footgun here is that you are now already in async context, so you must call
/// [`TokioCt::run_until`] instead of [`TokioCt::block_on`]. `block_on` will panic when run from
/// within an existing async context. This can be surprising for your upstream libraries to
/// which you pass a [`TokioCt`] executor.
///
/// # Errors
///
/// Will fail if trying to construct from a multithreaded runtime or if no runtime
/// is running.
///
///
pub fn try_current() -> Result< Self, TokioCtErr >
{
let handle = Handle::try_current()
.map_err(|_| TokioCtErr::NoRuntime )?;
Self::try_from( handle )
.map_err(|_| TokioCtErr::WrongFlavour )
}
/// This is the entry point for this executor. Once this call returns, no remaining tasks shall be polled anymore.
/// However the tasks stay in the executor, so if you make a second call to `block_on` with a new task, the older
/// tasks will start making progress again.
///
/// For simplicity, it's advised to just create top level task that you run through `block_on` and make sure your
/// program is done when it returns.
///
/// See: [`tokio::runtime::Runtime::block_on`]
///
/// ## Panics
///
/// This function will panic if it is called from an async context, including but not limited to making a nested
/// call. It will also panic if the provided future panics.
///
/// When you created this executor with [`TokioCt::try_current`], you should call `run_until` instead.
//
pub fn block_on<F: Future>( &self, f: F ) -> F::Output
{
match &self.spawner
{
Spawner::Runtime( rt ) => rt .block_on( self.local.run_until( f ) ),
Spawner::Handle ( handle ) => handle.block_on( self.local.run_until( f ) ),
}
}
/// Run the given future to completion. This is the entrypoint for execution of all the code spawned on this
/// executor when you are already in an async context.
/// Eg. when you have created this executor from an already running runtime with [`TokioCt::try_current`].
/// This will run the [`tokio::task::LocalSet`] which makes spawning possible.
///
/// Similarly to [`TokioCt::block_on`], spawned tasks will no longer be polled once the given future
/// has ended, but will stay in the executor and you can call this function again to have every task
/// continue to make progress.
//
pub async fn run_until<F: Future>( &self, f: F ) -> F::Output
{
self.local.run_until( f ).await
}
}
impl Spawn for TokioCt
{
fn spawn_obj( &self, future: FutureObj<'static, ()> ) -> Result<(), SpawnError>
{
// We drop the tokio JoinHandle, so the task becomes detached.
//
drop( self.local.spawn_local(future) );
Ok(())
}
}
impl LocalSpawn for TokioCt
{
fn spawn_local_obj( &self, future: LocalFutureObj<'static, ()> ) -> Result<(), SpawnError>
{
// We drop the tokio JoinHandle, so the task becomes detached.
//
drop( self.local.spawn_local(future) );
Ok(())
}
}
impl<Out:'static + Send> SpawnHandle<Out> for TokioCt
{
fn spawn_handle_obj( &self, future: FutureObj<'static, Out> ) -> Result<JoinHandle<Out>, SpawnError>
{
let handle = match &self.spawner
{
Spawner::Runtime( rt ) => rt .spawn( future ),
Spawner::Handle ( handle ) => handle.spawn( future ),
};
Ok( JoinHandle::tokio(handle) )
}
}
impl<Out:'static> LocalSpawnHandle<Out> for TokioCt
{
fn spawn_handle_local_obj( &self, future: LocalFutureObj<'static, Out> ) -> Result<JoinHandle<Out>, SpawnError>
{
let handle = self.local.spawn_local( future );
Ok( JoinHandle::tokio(handle) )
}
}
#[ cfg(all( feature = "timer", not(feature="tokio_timer" )) ) ]
//
#[ cfg_attr( nightly, doc(cfg(all( feature = "timer", feature = "tokio_ct" ))) ) ]
//
impl crate::Timer for TokioCt
{
fn sleep( &self, dur: std::time::Duration ) -> futures_core::future::BoxFuture<'static, ()>
{
Box::pin( futures_timer::Delay::new(dur) )
}
}
#[ cfg( feature = "tokio_timer" ) ]
//
#[ cfg_attr( nightly, doc(cfg(all( feature = "tokio_timer", feature = "tokio_ct" ))) ) ]
//
impl crate::Timer for TokioCt
{
fn sleep( &self, dur: std::time::Duration ) -> futures_core::future::BoxFuture<'static, ()>
{
Box::pin( tokio::time::sleep(dur) )
}
}
#[ cfg( feature = "tokio_io" ) ]
//
#[ cfg_attr( nightly, doc(cfg( feature = "tokio_io" )) ) ]
//
impl crate::TokioIo for TokioCt {}
impl crate::YieldNow for TokioCt {}
impl<R: Send +'static> crate::SpawnBlocking<R> for TokioCt
{
fn spawn_blocking<F>( &self, f: F ) -> BlockingHandle<R>
where F: FnOnce() -> R + Send +'static,
{
let handle = match &self.spawner
{
Spawner::Runtime( rt ) => rt .spawn_blocking( f ),
Spawner::Handle ( handle ) => handle.spawn_blocking( f ),
};
BlockingHandle::tokio( handle )
}
fn spawn_blocking_dyn( &self, f: Box< dyn FnOnce()->R + Send > ) -> BlockingHandle<R>
{
self.spawn_blocking( f )
}
}
#[cfg( feature = "tokio_ct" )]
/// A few errors that can happen while using _tokio_ executors.
#[derive(Debug, Clone)]
pub enum TokioCtErr
{
/// The [`tokio::runtime::Builder`] returned an error when construting the [`Runtime`].
Builder( std::io::ErrorKind ),
/// There are other clones of the [`Runtime`], so we cannot shut it down.
Cloned( TokioCt ),
/// This executor was constructed from the a [`Handle`], so cannot be shut down.
Handle( TokioCt ),
/// Can't create from current runtime because no runtime currently entered.
NoRuntime,
/// Can't construct from a multithreaded runtime.
WrongFlavour,
}
impl fmt::Display for TokioCtErr
{
fn fmt( &self, f: &mut fmt::Formatter<'_> ) -> fmt::Result
{
use TokioCtErr::*;
match self
{
Builder(source) =>
write!( f, "tokio::runtime::Builder returned an error: {source}" ),
Cloned(_) => write!( f, "The TokioCt executor was cloned. Only the last copy can shut it down." ),
Handle(_) => write!( f, "The TokioCt was created from tokio::runtime::Handle. Only an owned executor (created from `Runtime`) can be shut down." ),
NoRuntime => write!( f, "Call to tokio::Handle::try_current failed, generally because no entered runtime is active." ),
WrongFlavour => write!( f, "Can't create TokioCt from a multithreaded `Runtime`." ),
}
}
}
impl std::error::Error for TokioCtErr {}
#[ cfg(test) ]
//
mod tests
{
use super::*;
// It's important that this is not Send, as we allow spawning!Send futures on it.
//
static_assertions::assert_not_impl_any!( TokioCt: Send, Sync );
}
| {
match handle.runtime_flavor()
{
RuntimeFlavor::CurrentThread => Ok( Self
{
spawner: Spawner::Handle( handle ) ,
local: Rc::new( LocalSet::new() ) ,
}),
_ => Err( handle ),
}
} | identifier_body |
lib.rs | /*!
Faster, growable buffering reader for when there's little to no need to modify data, nor to keep it alive past next read.
`std::io::BufReader` works by copying data from its internal buffer into user-provided `Vec`/`String`,
or, in case of `.lines()`, by emitting new heap-allocated `String` for each iteration.
While convenient and versatile, this is not the fastest approach.
Instead, `BufRefReader` references its internal buffer with each read, returning `&[u8]`.
Lack of extra allocations yields better read performance in situations where most (if not all) of read data:
- requires no modifications,
- is never used outside of a loop body and does not need to be duplicated into the heap for future use.
While being more performant, this approach also severely limits applicability of this reader:
- it does not (and cannot) implement `BufRead` and cannot be used as a direct replacement for `BufReader`;
- returned values are only valid between calls to reading functions (i.e. they cannot outlive even a single loop cycle), and Rust's borrow checker will prevent you from using stale references;
- consequently, `BufRefReader` cannot be turned into an `Iterator` (here's an easy way to think about it: what would `Iterator::collect()` return?);
- returned references are immutable;
- obviously, there's also nothing that can return `String`s or `&str`s for you.
## Choice a of buffer
Use [`MmapBuffer`](struct.MmapBuffer.html) unless:
- [slice-deque](https://github.com/gnzlbg/slice_deque) is not available for your platform (e.g. no support for `mmap`),
- you need very small buffers (smaller than 1 memory page),
- you're about to create a lot of buffers in a short period of time ([`new()`](trait.Buffer.html#tymethod.new) is relatively expensive),
- you're expecting buffer to grow a lot (consider, if possible, preallocating larger buffers through [`BufRefReaderBuilder.capacity`](struct.BufRefReaderBuilder.html#method.capacity)),
- you have some very special concerns re: memory maps and malloc bypass (special allocators, possible kernel inefficiency due to large amount of mapped memory regions etc.).
## Examples
Read data word by word:
```
use buf_ref_reader::*;
fn read<B: Buffer>() -> Result<(), Error>
where
Error: From<B::Error>,
// add this if you plan to `unwrap()` errors returned by `read()` et al.
//B::Error: std::fmt::Debug,
{
// &[u8] implements Read, hence we use it as our data source for this example
let data = b"lorem ipsum dolor sit amet";
let mut r = BufRefReaderBuilder::new(&data[..])
.capacity(4)
.build::<B>()?;
assert_eq!(r.read_until(b' ')?, Some(&b"lorem "[..]));
assert_eq!(r.read_until(b' ')?, Some(&b"ipsum "[..]));
assert_eq!(r.read_until(b' ')?, Some(&b"dolor "[..]));
assert_eq!(r.read_until(b' ')?, Some(&b"sit "[..]));
assert_eq!(r.read_until(b' ')?, Some(&b"amet"[..]));
assert_eq!(r.read_until(b' ')?, None); // EOF
assert_eq!(r.read_until(b' ')?, None);
Ok(())
}
fn main() {
read::<VecBuffer>().unwrap();
read::<MmapBuffer>().unwrap();
}
```
*/
#![warn(missing_docs)]
use quick_error::quick_error;
use std::io::{self, Read};
use memchr::memchr;
mod buffer;
pub use buffer::{
Buffer,
VecBuffer,
MmapBuffer,
};
use slice_deque::AllocError;
use std::convert::From;
/**
Buffering reader.
See [module-level docs](index.html) for examples.
*/
pub struct BufRefReader<R, B> {
src: R,
buf: B,
}
/**
Builder for [`BufRefReader`](struct.BufRefReader.html).
See [module-level docs](index.html) for examples.
*/
pub struct BufRefReaderBuilder<R> {
src: R,
bufsize: usize,
}
impl<R: Read> BufRefReaderBuilder<R> {
/// Creates new builder with given reader and default options.
pub fn new(src: R) -> Self {
BufRefReaderBuilder {
src,
bufsize: 8192,
}
}
/// Set initial buffer capacity.
pub fn capacity(mut self, bufsize: usize) -> Self {
self.bufsize = bufsize;
self
}
/// Create actual reader.
pub fn build<B: Buffer>(self) -> Result<BufRefReader<R, B>, B::Error> {
Ok(BufRefReader {
src: self.src,
buf: B::new(self.bufsize)?,
})
}
}
quick_error! {
/// Error type that reading functions might emit
#[derive(Debug)]
pub enum Error {
/// Error reading from actual reader
IO(err: io::Error) { from() }
/// Indicates failure to create/grow buffer
Buf(err: AllocError) { from() }
}
}
impl From<()> for Error {
// VecBuffer never emits errors, it only panics
fn from(_: ()) -> Self |
}
impl<R: Read, B: Buffer> BufRefReader<R, B>
where Error: From<B::Error>
{
/// Creates buffered reader with default options. Look for [`BufRefReaderBuilder`](struct.BufRefReaderBuilder.html) for tweaks.
pub fn new(src: R) -> Result<BufRefReader<R, B>, B::Error> {
BufRefReaderBuilder::new(src)
.build()
}
// returns Some(where appended data starts within the filled part of the buffer),
// or None for EOF
#[inline]
fn fill(&mut self) -> Result<Option<usize>, Error> {
self.buf.enlarge()?;
let old_len = self.buf.len();
match self.src.read(self.buf.appendable())? {
0 => Ok(None), // EOF
n => {
self.buf.grow(n);
Ok(Some(old_len))
}
}
}
/**
Returns requested amount of bytes, or less if EOF prevents reader from fulfilling the request.
Returns:
- `Ok(Some(data))` with, well, data,
- `Ok(None)` if no more data is available,
- `Err(err)`: see `std::io::Read::read()`
*/
#[inline]
pub fn read(&mut self, n: usize) -> Result<Option<&[u8]>, Error> {
while n > self.buf.len() {
// fill and expand buffer until either:
// - buffer starts holding the requested amount of data
// - EOF is reached
if self.fill()?.is_none() { break };
}
if self.buf.len() == 0 {
// reading past EOF
Ok(None)
} else {
let output = self.buf.consume(n);
Ok(Some(output))
}
}
/**
Returns bytes up until and including `delim`, or until EOF mark. If no content is available, returns `None`.
Returns:
- `Ok(Some(data))` with, well, data,
- `Ok(None)` if no more data is available,
- `Err(err)`: see `std::io::Read::read()`
*/
#[inline]
pub fn read_until(&mut self, delim: u8) -> Result<Option<&[u8]>, Error> {
let mut len = None;
// position within filled part of the buffer,
// from which to continue search for character
let mut pos = 0;
loop {
// fill and expand buffer until either:
// - `delim` appears in the buffer
// - EOF is reached
if let Some(n) = memchr(delim, &self.buf.filled()[pos..]) {
len = Some(pos+n);
break;
}
pos = match self.fill()? {
None => break, // EOF
Some(pos) => pos,
};
}
match len {
None => { // EOF
if self.buf.len() == 0 {
Ok(None)
} else {
let output = self.buf.consume(self.buf.len());
Ok(Some(output))
}
},
Some(len) => {
let len = len + 1; // also include matching delimiter
let output = self.buf.consume(len);
Ok(Some(output))
},
}
}
}
#[cfg(test)]
static WORDS: &'static [u8] = include_bytes!("/usr/share/dict/words");
#[cfg(test)]
mod tests {
use super::*;
use std::fmt::Debug;
fn read_until_empty_lines<B: Buffer>()
where
B::Error: Debug,
Error: From<B::Error>,
{
// two spaces, three spaces, two spaces
let mut r = BufRefReaderBuilder::new(&b" lorem ipsum "[..])
.capacity(4)
.build::<B>()
.unwrap();
assert_eq!(r.read_until(b' ').unwrap(), Some(&b" "[..]));
assert_eq!(r.read_until(b' ').unwrap(), Some(&b" "[..]));
assert_eq!(r.read_until(b' ').unwrap(), Some(&b"lorem "[..]));
assert_eq!(r.read_until(b' ').unwrap(), Some(&b" "[..]));
assert_eq!(r.read_until(b' ').unwrap(), Some(&b" "[..]));
assert_eq!(r.read_until(b' ').unwrap(), Some(&b"ipsum "[..]));
assert_eq!(r.read_until(b' ').unwrap(), Some(&b" "[..]));
assert_eq!(r.read_until(b' ').unwrap(), None);
}
#[test] fn read_until_empty_lines_vec() { read_until_empty_lines::<VecBuffer>() }
#[test] fn read_until_empty_lines_mmap() { read_until_empty_lines::<MmapBuffer>() }
fn read_until_words<B: Buffer>()
where
B::Error: Debug,
Error: From<B::Error>,
{
let mut r = BufRefReaderBuilder::new(WORDS)
.capacity(4)
.build::<B>()
.unwrap();
let mut words = WORDS.split(|&c| c == b'\n');
while let Ok(Some(slice_buf)) = r.read_until(b'\n') {
let mut slice_words = words.next().unwrap()
.to_vec();
slice_words.push(b'\n');
assert_eq!(slice_buf, &slice_words[..]);
}
// reader: returned immediately after hitting EOF past last b'\n'
// words: this is.split(), hence empty string past last b'\n'
assert_eq!(words.next(), Some(&b""[..]));
assert_eq!(words.next(), None);
}
#[test] fn read_until_words_vec() { read_until_words::<VecBuffer>() }
#[test] fn read_until_words_mmap() { read_until_words::<MmapBuffer>() }
// like read_until_words, but splits by rarest character, which is b'Q'
// also uses slightly bigger initial buffers
fn read_until_words_long<B: Buffer>()
where
B::Error: Debug,
Error: From<B::Error>,
{
let mut r = BufRefReaderBuilder::new(WORDS)
.capacity(32)
.build::<B>()
.unwrap();
let mut words = WORDS.split(|&c| c == b'Q').peekable();
while let Ok(Some(slice_buf)) = r.read_until(b'Q') {
let mut slice_words = words.next().unwrap()
.to_vec();
if words.peek()!= None {
slice_words.push(b'Q');
}
assert_eq!(slice_buf, &slice_words[..]);
}
assert_eq!(words.next(), None);
}
#[test] fn read_until_words_long_vec() { read_until_words_long::<VecBuffer>() }
#[test] fn read_until_words_long_mmap() { read_until_words_long::<MmapBuffer>() }
fn read<B: Buffer>()
where
B::Error: Debug,
Error: From<B::Error>,
{
let mut r = BufRefReaderBuilder::new(&b"lorem ipsum dolor sit amet"[..])
.capacity(4)
.build::<B>()
.unwrap();
assert_eq!(r.read(5).unwrap(), Some(&b"lorem"[..]));
assert_eq!(r.read(6).unwrap(), Some(&b" ipsum"[..]));
assert_eq!(r.read(1024).unwrap(), Some(&b" dolor sit amet"[..]));
assert_eq!(r.read(1).unwrap(), None);
}
#[test] fn read_vec() { read::<VecBuffer>() }
#[test] fn read_mmap() { read::<MmapBuffer>() }
fn read_words<B: Buffer>(cap: usize, read: usize)
where
B::Error: Debug,
Error: From<B::Error>,
{
let mut r = BufRefReaderBuilder::new(WORDS)
.capacity(cap)
.build::<B>()
.unwrap();
let mut words = WORDS.chunks(read);
while let Ok(Some(slice_buf)) = r.read(read) {
let slice_words = words.next().unwrap();
assert_eq!(slice_buf, slice_words);
}
assert_eq!(words.next(), None);
}
#[test] fn read_words_vec_4x3() { read_words::<VecBuffer>(4, 3) }
#[test] fn read_words_vec_4x5() { read_words::<VecBuffer>(4, 5) }
#[test] fn read_words_mmap_4x3() { read_words::<MmapBuffer>(4, 3) }
#[test] fn read_words_mmap_4x5() { read_words::<MmapBuffer>(4, 5) }
}
| {
unimplemented!()
} | identifier_body |
lib.rs | /*!
Faster, growable buffering reader for when there's little to no need to modify data, nor to keep it alive past next read.
`std::io::BufReader` works by copying data from its internal buffer into user-provided `Vec`/`String`,
or, in case of `.lines()`, by emitting new heap-allocated `String` for each iteration.
While convenient and versatile, this is not the fastest approach.
Instead, `BufRefReader` references its internal buffer with each read, returning `&[u8]`.
Lack of extra allocations yields better read performance in situations where most (if not all) of read data:
- requires no modifications,
- is never used outside of a loop body and does not need to be duplicated into the heap for future use.
While being more performant, this approach also severely limits applicability of this reader:
- it does not (and cannot) implement `BufRead` and cannot be used as a direct replacement for `BufReader`;
- returned values are only valid between calls to reading functions (i.e. they cannot outlive even a single loop cycle), and Rust's borrow checker will prevent you from using stale references;
- consequently, `BufRefReader` cannot be turned into an `Iterator` (here's an easy way to think about it: what would `Iterator::collect()` return?);
- returned references are immutable;
- obviously, there's also nothing that can return `String`s or `&str`s for you.
## Choice a of buffer
Use [`MmapBuffer`](struct.MmapBuffer.html) unless:
- [slice-deque](https://github.com/gnzlbg/slice_deque) is not available for your platform (e.g. no support for `mmap`),
- you need very small buffers (smaller than 1 memory page),
- you're about to create a lot of buffers in a short period of time ([`new()`](trait.Buffer.html#tymethod.new) is relatively expensive),
- you're expecting buffer to grow a lot (consider, if possible, preallocating larger buffers through [`BufRefReaderBuilder.capacity`](struct.BufRefReaderBuilder.html#method.capacity)),
- you have some very special concerns re: memory maps and malloc bypass (special allocators, possible kernel inefficiency due to large amount of mapped memory regions etc.).
## Examples
Read data word by word:
```
use buf_ref_reader::*;
fn read<B: Buffer>() -> Result<(), Error>
where
Error: From<B::Error>,
// add this if you plan to `unwrap()` errors returned by `read()` et al.
//B::Error: std::fmt::Debug,
{
// &[u8] implements Read, hence we use it as our data source for this example
let data = b"lorem ipsum dolor sit amet";
let mut r = BufRefReaderBuilder::new(&data[..])
.capacity(4)
.build::<B>()?;
assert_eq!(r.read_until(b' ')?, Some(&b"lorem "[..]));
assert_eq!(r.read_until(b' ')?, Some(&b"ipsum "[..]));
assert_eq!(r.read_until(b' ')?, Some(&b"dolor "[..]));
assert_eq!(r.read_until(b' ')?, Some(&b"sit "[..]));
assert_eq!(r.read_until(b' ')?, Some(&b"amet"[..]));
assert_eq!(r.read_until(b' ')?, None); // EOF
assert_eq!(r.read_until(b' ')?, None);
Ok(())
}
fn main() {
read::<VecBuffer>().unwrap();
read::<MmapBuffer>().unwrap();
}
```
*/
#![warn(missing_docs)]
use quick_error::quick_error;
use std::io::{self, Read};
use memchr::memchr;
mod buffer;
pub use buffer::{
Buffer,
VecBuffer,
MmapBuffer,
};
use slice_deque::AllocError;
use std::convert::From;
/**
Buffering reader.
See [module-level docs](index.html) for examples.
*/
pub struct BufRefReader<R, B> {
src: R,
buf: B,
}
/**
Builder for [`BufRefReader`](struct.BufRefReader.html).
See [module-level docs](index.html) for examples.
*/
pub struct BufRefReaderBuilder<R> {
src: R,
bufsize: usize,
}
impl<R: Read> BufRefReaderBuilder<R> {
/// Creates new builder with given reader and default options.
pub fn new(src: R) -> Self {
BufRefReaderBuilder {
src,
bufsize: 8192,
}
}
/// Set initial buffer capacity.
pub fn capacity(mut self, bufsize: usize) -> Self {
self.bufsize = bufsize;
self
}
/// Create actual reader.
pub fn build<B: Buffer>(self) -> Result<BufRefReader<R, B>, B::Error> {
Ok(BufRefReader {
src: self.src,
buf: B::new(self.bufsize)?,
})
}
}
quick_error! {
/// Error type that reading functions might emit
#[derive(Debug)]
pub enum Error {
/// Error reading from actual reader
IO(err: io::Error) { from() }
/// Indicates failure to create/grow buffer
Buf(err: AllocError) { from() }
}
}
impl From<()> for Error {
// VecBuffer never emits errors, it only panics
fn from(_: ()) -> Self {
unimplemented!()
}
}
impl<R: Read, B: Buffer> BufRefReader<R, B>
where Error: From<B::Error>
{
/// Creates buffered reader with default options. Look for [`BufRefReaderBuilder`](struct.BufRefReaderBuilder.html) for tweaks.
pub fn new(src: R) -> Result<BufRefReader<R, B>, B::Error> {
BufRefReaderBuilder::new(src)
.build()
}
// returns Some(where appended data starts within the filled part of the buffer),
// or None for EOF
#[inline]
fn fill(&mut self) -> Result<Option<usize>, Error> {
self.buf.enlarge()?;
let old_len = self.buf.len();
match self.src.read(self.buf.appendable())? {
0 => Ok(None), // EOF
n => {
self.buf.grow(n);
Ok(Some(old_len))
}
}
}
/**
Returns requested amount of bytes, or less if EOF prevents reader from fulfilling the request.
Returns:
- `Ok(Some(data))` with, well, data,
- `Ok(None)` if no more data is available,
- `Err(err)`: see `std::io::Read::read()`
*/
#[inline]
pub fn read(&mut self, n: usize) -> Result<Option<&[u8]>, Error> {
while n > self.buf.len() {
// fill and expand buffer until either:
// - buffer starts holding the requested amount of data
// - EOF is reached
if self.fill()?.is_none() { break };
}
if self.buf.len() == 0 {
// reading past EOF
Ok(None)
} else {
let output = self.buf.consume(n);
Ok(Some(output))
}
}
/**
Returns bytes up until and including `delim`, or until EOF mark. If no content is available, returns `None`.
Returns:
- `Ok(Some(data))` with, well, data,
- `Ok(None)` if no more data is available,
- `Err(err)`: see `std::io::Read::read()`
*/
#[inline]
pub fn read_until(&mut self, delim: u8) -> Result<Option<&[u8]>, Error> {
let mut len = None;
// position within filled part of the buffer,
// from which to continue search for character
let mut pos = 0;
loop {
// fill and expand buffer until either:
// - `delim` appears in the buffer
// - EOF is reached
if let Some(n) = memchr(delim, &self.buf.filled()[pos..]) {
len = Some(pos+n);
break;
}
pos = match self.fill()? {
None => break, // EOF
Some(pos) => pos,
};
}
match len {
None => { // EOF
if self.buf.len() == 0 {
Ok(None)
} else {
let output = self.buf.consume(self.buf.len());
Ok(Some(output))
}
},
Some(len) => {
let len = len + 1; // also include matching delimiter
let output = self.buf.consume(len);
Ok(Some(output))
},
}
}
}
#[cfg(test)]
static WORDS: &'static [u8] = include_bytes!("/usr/share/dict/words");
#[cfg(test)]
mod tests {
use super::*; | Error: From<B::Error>,
{
// two spaces, three spaces, two spaces
let mut r = BufRefReaderBuilder::new(&b" lorem ipsum "[..])
.capacity(4)
.build::<B>()
.unwrap();
assert_eq!(r.read_until(b' ').unwrap(), Some(&b" "[..]));
assert_eq!(r.read_until(b' ').unwrap(), Some(&b" "[..]));
assert_eq!(r.read_until(b' ').unwrap(), Some(&b"lorem "[..]));
assert_eq!(r.read_until(b' ').unwrap(), Some(&b" "[..]));
assert_eq!(r.read_until(b' ').unwrap(), Some(&b" "[..]));
assert_eq!(r.read_until(b' ').unwrap(), Some(&b"ipsum "[..]));
assert_eq!(r.read_until(b' ').unwrap(), Some(&b" "[..]));
assert_eq!(r.read_until(b' ').unwrap(), None);
}
#[test] fn read_until_empty_lines_vec() { read_until_empty_lines::<VecBuffer>() }
#[test] fn read_until_empty_lines_mmap() { read_until_empty_lines::<MmapBuffer>() }
fn read_until_words<B: Buffer>()
where
B::Error: Debug,
Error: From<B::Error>,
{
let mut r = BufRefReaderBuilder::new(WORDS)
.capacity(4)
.build::<B>()
.unwrap();
let mut words = WORDS.split(|&c| c == b'\n');
while let Ok(Some(slice_buf)) = r.read_until(b'\n') {
let mut slice_words = words.next().unwrap()
.to_vec();
slice_words.push(b'\n');
assert_eq!(slice_buf, &slice_words[..]);
}
// reader: returned immediately after hitting EOF past last b'\n'
// words: this is.split(), hence empty string past last b'\n'
assert_eq!(words.next(), Some(&b""[..]));
assert_eq!(words.next(), None);
}
#[test] fn read_until_words_vec() { read_until_words::<VecBuffer>() }
#[test] fn read_until_words_mmap() { read_until_words::<MmapBuffer>() }
// like read_until_words, but splits by rarest character, which is b'Q'
// also uses slightly bigger initial buffers
fn read_until_words_long<B: Buffer>()
where
B::Error: Debug,
Error: From<B::Error>,
{
let mut r = BufRefReaderBuilder::new(WORDS)
.capacity(32)
.build::<B>()
.unwrap();
let mut words = WORDS.split(|&c| c == b'Q').peekable();
while let Ok(Some(slice_buf)) = r.read_until(b'Q') {
let mut slice_words = words.next().unwrap()
.to_vec();
if words.peek()!= None {
slice_words.push(b'Q');
}
assert_eq!(slice_buf, &slice_words[..]);
}
assert_eq!(words.next(), None);
}
#[test] fn read_until_words_long_vec() { read_until_words_long::<VecBuffer>() }
#[test] fn read_until_words_long_mmap() { read_until_words_long::<MmapBuffer>() }
fn read<B: Buffer>()
where
B::Error: Debug,
Error: From<B::Error>,
{
let mut r = BufRefReaderBuilder::new(&b"lorem ipsum dolor sit amet"[..])
.capacity(4)
.build::<B>()
.unwrap();
assert_eq!(r.read(5).unwrap(), Some(&b"lorem"[..]));
assert_eq!(r.read(6).unwrap(), Some(&b" ipsum"[..]));
assert_eq!(r.read(1024).unwrap(), Some(&b" dolor sit amet"[..]));
assert_eq!(r.read(1).unwrap(), None);
}
#[test] fn read_vec() { read::<VecBuffer>() }
#[test] fn read_mmap() { read::<MmapBuffer>() }
fn read_words<B: Buffer>(cap: usize, read: usize)
where
B::Error: Debug,
Error: From<B::Error>,
{
let mut r = BufRefReaderBuilder::new(WORDS)
.capacity(cap)
.build::<B>()
.unwrap();
let mut words = WORDS.chunks(read);
while let Ok(Some(slice_buf)) = r.read(read) {
let slice_words = words.next().unwrap();
assert_eq!(slice_buf, slice_words);
}
assert_eq!(words.next(), None);
}
#[test] fn read_words_vec_4x3() { read_words::<VecBuffer>(4, 3) }
#[test] fn read_words_vec_4x5() { read_words::<VecBuffer>(4, 5) }
#[test] fn read_words_mmap_4x3() { read_words::<MmapBuffer>(4, 3) }
#[test] fn read_words_mmap_4x5() { read_words::<MmapBuffer>(4, 5) }
} | use std::fmt::Debug;
fn read_until_empty_lines<B: Buffer>()
where
B::Error: Debug, | random_line_split |
lib.rs | /*!
Faster, growable buffering reader for when there's little to no need to modify data, nor to keep it alive past next read.
`std::io::BufReader` works by copying data from its internal buffer into user-provided `Vec`/`String`,
or, in case of `.lines()`, by emitting new heap-allocated `String` for each iteration.
While convenient and versatile, this is not the fastest approach.
Instead, `BufRefReader` references its internal buffer with each read, returning `&[u8]`.
Lack of extra allocations yields better read performance in situations where most (if not all) of read data:
- requires no modifications,
- is never used outside of a loop body and does not need to be duplicated into the heap for future use.
While being more performant, this approach also severely limits applicability of this reader:
- it does not (and cannot) implement `BufRead` and cannot be used as a direct replacement for `BufReader`;
- returned values are only valid between calls to reading functions (i.e. they cannot outlive even a single loop cycle), and Rust's borrow checker will prevent you from using stale references;
- consequently, `BufRefReader` cannot be turned into an `Iterator` (here's an easy way to think about it: what would `Iterator::collect()` return?);
- returned references are immutable;
- obviously, there's also nothing that can return `String`s or `&str`s for you.
## Choice a of buffer
Use [`MmapBuffer`](struct.MmapBuffer.html) unless:
- [slice-deque](https://github.com/gnzlbg/slice_deque) is not available for your platform (e.g. no support for `mmap`),
- you need very small buffers (smaller than 1 memory page),
- you're about to create a lot of buffers in a short period of time ([`new()`](trait.Buffer.html#tymethod.new) is relatively expensive),
- you're expecting buffer to grow a lot (consider, if possible, preallocating larger buffers through [`BufRefReaderBuilder.capacity`](struct.BufRefReaderBuilder.html#method.capacity)),
- you have some very special concerns re: memory maps and malloc bypass (special allocators, possible kernel inefficiency due to large amount of mapped memory regions etc.).
## Examples
Read data word by word:
```
use buf_ref_reader::*;
fn read<B: Buffer>() -> Result<(), Error>
where
Error: From<B::Error>,
// add this if you plan to `unwrap()` errors returned by `read()` et al.
//B::Error: std::fmt::Debug,
{
// &[u8] implements Read, hence we use it as our data source for this example
let data = b"lorem ipsum dolor sit amet";
let mut r = BufRefReaderBuilder::new(&data[..])
.capacity(4)
.build::<B>()?;
assert_eq!(r.read_until(b' ')?, Some(&b"lorem "[..]));
assert_eq!(r.read_until(b' ')?, Some(&b"ipsum "[..]));
assert_eq!(r.read_until(b' ')?, Some(&b"dolor "[..]));
assert_eq!(r.read_until(b' ')?, Some(&b"sit "[..]));
assert_eq!(r.read_until(b' ')?, Some(&b"amet"[..]));
assert_eq!(r.read_until(b' ')?, None); // EOF
assert_eq!(r.read_until(b' ')?, None);
Ok(())
}
fn main() {
read::<VecBuffer>().unwrap();
read::<MmapBuffer>().unwrap();
}
```
*/
#![warn(missing_docs)]
use quick_error::quick_error;
use std::io::{self, Read};
use memchr::memchr;
mod buffer;
pub use buffer::{
Buffer,
VecBuffer,
MmapBuffer,
};
use slice_deque::AllocError;
use std::convert::From;
/**
Buffering reader.
See [module-level docs](index.html) for examples.
*/
pub struct BufRefReader<R, B> {
src: R,
buf: B,
}
/**
Builder for [`BufRefReader`](struct.BufRefReader.html).
See [module-level docs](index.html) for examples.
*/
pub struct BufRefReaderBuilder<R> {
src: R,
bufsize: usize,
}
impl<R: Read> BufRefReaderBuilder<R> {
/// Creates new builder with given reader and default options.
pub fn new(src: R) -> Self {
BufRefReaderBuilder {
src,
bufsize: 8192,
}
}
/// Set initial buffer capacity.
pub fn capacity(mut self, bufsize: usize) -> Self {
self.bufsize = bufsize;
self
}
/// Create actual reader.
pub fn build<B: Buffer>(self) -> Result<BufRefReader<R, B>, B::Error> {
Ok(BufRefReader {
src: self.src,
buf: B::new(self.bufsize)?,
})
}
}
quick_error! {
/// Error type that reading functions might emit
#[derive(Debug)]
pub enum Error {
/// Error reading from actual reader
IO(err: io::Error) { from() }
/// Indicates failure to create/grow buffer
Buf(err: AllocError) { from() }
}
}
impl From<()> for Error {
// VecBuffer never emits errors, it only panics
fn from(_: ()) -> Self {
unimplemented!()
}
}
impl<R: Read, B: Buffer> BufRefReader<R, B>
where Error: From<B::Error>
{
/// Creates buffered reader with default options. Look for [`BufRefReaderBuilder`](struct.BufRefReaderBuilder.html) for tweaks.
pub fn new(src: R) -> Result<BufRefReader<R, B>, B::Error> {
BufRefReaderBuilder::new(src)
.build()
}
// returns Some(where appended data starts within the filled part of the buffer),
// or None for EOF
#[inline]
fn fill(&mut self) -> Result<Option<usize>, Error> {
self.buf.enlarge()?;
let old_len = self.buf.len();
match self.src.read(self.buf.appendable())? {
0 => Ok(None), // EOF
n => {
self.buf.grow(n);
Ok(Some(old_len))
}
}
}
/**
Returns requested amount of bytes, or less if EOF prevents reader from fulfilling the request.
Returns:
- `Ok(Some(data))` with, well, data,
- `Ok(None)` if no more data is available,
- `Err(err)`: see `std::io::Read::read()`
*/
#[inline]
pub fn read(&mut self, n: usize) -> Result<Option<&[u8]>, Error> {
while n > self.buf.len() {
// fill and expand buffer until either:
// - buffer starts holding the requested amount of data
// - EOF is reached
if self.fill()?.is_none() { break };
}
if self.buf.len() == 0 {
// reading past EOF
Ok(None)
} else {
let output = self.buf.consume(n);
Ok(Some(output))
}
}
/**
Returns bytes up until and including `delim`, or until EOF mark. If no content is available, returns `None`.
Returns:
- `Ok(Some(data))` with, well, data,
- `Ok(None)` if no more data is available,
- `Err(err)`: see `std::io::Read::read()`
*/
#[inline]
pub fn read_until(&mut self, delim: u8) -> Result<Option<&[u8]>, Error> {
let mut len = None;
// position within filled part of the buffer,
// from which to continue search for character
let mut pos = 0;
loop {
// fill and expand buffer until either:
// - `delim` appears in the buffer
// - EOF is reached
if let Some(n) = memchr(delim, &self.buf.filled()[pos..]) {
len = Some(pos+n);
break;
}
pos = match self.fill()? {
None => break, // EOF
Some(pos) => pos,
};
}
match len {
None => { // EOF
if self.buf.len() == 0 {
Ok(None)
} else {
let output = self.buf.consume(self.buf.len());
Ok(Some(output))
}
},
Some(len) => {
let len = len + 1; // also include matching delimiter
let output = self.buf.consume(len);
Ok(Some(output))
},
}
}
}
#[cfg(test)]
static WORDS: &'static [u8] = include_bytes!("/usr/share/dict/words");
#[cfg(test)]
mod tests {
use super::*;
use std::fmt::Debug;
fn read_until_empty_lines<B: Buffer>()
where
B::Error: Debug,
Error: From<B::Error>,
{
// two spaces, three spaces, two spaces
let mut r = BufRefReaderBuilder::new(&b" lorem ipsum "[..])
.capacity(4)
.build::<B>()
.unwrap();
assert_eq!(r.read_until(b' ').unwrap(), Some(&b" "[..]));
assert_eq!(r.read_until(b' ').unwrap(), Some(&b" "[..]));
assert_eq!(r.read_until(b' ').unwrap(), Some(&b"lorem "[..]));
assert_eq!(r.read_until(b' ').unwrap(), Some(&b" "[..]));
assert_eq!(r.read_until(b' ').unwrap(), Some(&b" "[..]));
assert_eq!(r.read_until(b' ').unwrap(), Some(&b"ipsum "[..]));
assert_eq!(r.read_until(b' ').unwrap(), Some(&b" "[..]));
assert_eq!(r.read_until(b' ').unwrap(), None);
}
#[test] fn read_until_empty_lines_vec() { read_until_empty_lines::<VecBuffer>() }
#[test] fn read_until_empty_lines_mmap() { read_until_empty_lines::<MmapBuffer>() }
fn read_until_words<B: Buffer>()
where
B::Error: Debug,
Error: From<B::Error>,
{
let mut r = BufRefReaderBuilder::new(WORDS)
.capacity(4)
.build::<B>()
.unwrap();
let mut words = WORDS.split(|&c| c == b'\n');
while let Ok(Some(slice_buf)) = r.read_until(b'\n') {
let mut slice_words = words.next().unwrap()
.to_vec();
slice_words.push(b'\n');
assert_eq!(slice_buf, &slice_words[..]);
}
// reader: returned immediately after hitting EOF past last b'\n'
// words: this is.split(), hence empty string past last b'\n'
assert_eq!(words.next(), Some(&b""[..]));
assert_eq!(words.next(), None);
}
#[test] fn read_until_words_vec() { read_until_words::<VecBuffer>() }
#[test] fn read_until_words_mmap() { read_until_words::<MmapBuffer>() }
// like read_until_words, but splits by rarest character, which is b'Q'
// also uses slightly bigger initial buffers
fn read_until_words_long<B: Buffer>()
where
B::Error: Debug,
Error: From<B::Error>,
{
let mut r = BufRefReaderBuilder::new(WORDS)
.capacity(32)
.build::<B>()
.unwrap();
let mut words = WORDS.split(|&c| c == b'Q').peekable();
while let Ok(Some(slice_buf)) = r.read_until(b'Q') {
let mut slice_words = words.next().unwrap()
.to_vec();
if words.peek()!= None {
slice_words.push(b'Q');
}
assert_eq!(slice_buf, &slice_words[..]);
}
assert_eq!(words.next(), None);
}
#[test] fn read_until_words_long_vec() { read_until_words_long::<VecBuffer>() }
#[test] fn read_until_words_long_mmap() { read_until_words_long::<MmapBuffer>() }
fn read<B: Buffer>()
where
B::Error: Debug,
Error: From<B::Error>,
{
let mut r = BufRefReaderBuilder::new(&b"lorem ipsum dolor sit amet"[..])
.capacity(4)
.build::<B>()
.unwrap();
assert_eq!(r.read(5).unwrap(), Some(&b"lorem"[..]));
assert_eq!(r.read(6).unwrap(), Some(&b" ipsum"[..]));
assert_eq!(r.read(1024).unwrap(), Some(&b" dolor sit amet"[..]));
assert_eq!(r.read(1).unwrap(), None);
}
#[test] fn read_vec() { read::<VecBuffer>() }
#[test] fn | () { read::<MmapBuffer>() }
fn read_words<B: Buffer>(cap: usize, read: usize)
where
B::Error: Debug,
Error: From<B::Error>,
{
let mut r = BufRefReaderBuilder::new(WORDS)
.capacity(cap)
.build::<B>()
.unwrap();
let mut words = WORDS.chunks(read);
while let Ok(Some(slice_buf)) = r.read(read) {
let slice_words = words.next().unwrap();
assert_eq!(slice_buf, slice_words);
}
assert_eq!(words.next(), None);
}
#[test] fn read_words_vec_4x3() { read_words::<VecBuffer>(4, 3) }
#[test] fn read_words_vec_4x5() { read_words::<VecBuffer>(4, 5) }
#[test] fn read_words_mmap_4x3() { read_words::<MmapBuffer>(4, 3) }
#[test] fn read_words_mmap_4x5() { read_words::<MmapBuffer>(4, 5) }
}
| read_mmap | identifier_name |
mod.rs | use std::collections::HashMap;
use std::ptr::NonNull;
use std::fmt::Debug;
use once_cell::sync::OnceCell;
use bit_vec::BitVec;
use crate::tag::{TagType, TagTypeKey};
use crate::util::OpaquePtr;
mod state;
mod property;
mod util;
pub use state::*;
pub use property::*;
pub use util::*;
/// A basic block defined by a name, its states and properties. This block structure
/// is made especially for static definition, its states are computed lazily and
/// almost all method requires a self reference with static lifetime.
#[derive(Debug)]
pub struct Block {
name: &'static str,
spec: BlockSpec,
states: OnceCell<BlockStorage>,
}
/// The type of hashable value that can represent a block as a map key.
/// See `Block::get_key`, its only usable for statically defined blocks.
pub type BlockKey = OpaquePtr<Block>;
/// Internal enumeration to avoid allocation over-head for single block. This allows
/// blocks with no properties to avoid allocating a `Vec` and a `HashMap`.
#[derive(Debug)]
enum BlockStorage {
/// Storage for a single state.
Single(BlockState),
/// Storage when there is single or multiple properties. This type of storage
/// implies that all owned states must have BlockStateProperties::Some.
/// By using this storage you assert that properties map is not empty.
Complex {
states: Vec<BlockState>,
properties: HashMap<&'static str, SharedProperty>,
default_state_index: usize
}
}
/// Made for static definitions of all properties of a block.
#[derive(Debug)]
pub enum BlockSpec {
/// For blocks with no properties, they have a **single** state.
Single,
/// For blocks with some properties, requires a slice to a static array of properties
/// references. Use the `blocks_specs!` macro to generate such arrays.
Complex(&'static [&'static dyn UntypedProperty]),
// /// Same a `Complex`, but with a callback function used to set the default block state.
// ComplexWithDefault(&'static [&'static dyn UntypedProperty], fn(&BlockState) -> &BlockState)
}
impl Block {
/// Construct a new block, this method should be used to define blocks statically.
/// The preferred way of defining static blocks is to use the `blocks!` macro.
pub const fn new(name: &'static str, spec: BlockSpec) -> Self {
Self {
name,
spec,
states: OnceCell::new()
}
}
#[inline]
pub fn get_name(&self) -> &'static str {
self.name
}
#[inline]
pub fn get_key(&'static self) -> BlockKey {
OpaquePtr::new(self)
}
fn get_storage(&'static self) -> &'static BlockStorage {
self.states.get_or_init(|| self.make_storage())
}
fn make_storage(&'static self) -> BlockStorage {
// Internal function to generate new BlockStorage from properties,
// if there are no properties, BlockStorage::Single is returned.
fn new_storage(properties: &'static [&'static dyn UntypedProperty]) -> BlockStorage {
if properties.is_empty() {
BlockStorage::Single(BlockState::build_singleton())
} else {
let (
properties,
states
) = BlockState::build_complex(properties);
BlockStorage::Complex {
states,
properties,
default_state_index: 0
}
}
}
// let mut default_supplier = None;
let mut storage = match self.spec {
BlockSpec::Single => BlockStorage::Single(BlockState::build_singleton()),
BlockSpec::Complex(properties) => new_storage(properties),
/*BlockSpec::ComplexWithDefault(properties, fun) => {
default_supplier = Some(fun);
new_storage(properties)
}*/
};
let block_ptr = NonNull::from(self);
match &mut storage {
BlockStorage::Single( state) => {
state.set_block(block_ptr);
},
BlockStorage::Complex {
states,
/*default_state_index,*/..
} => {
for state in states {
state.set_block(block_ptr);
}
/*if let Some(default_supplier) = default_supplier {
*default_state_index = default_supplier(&states[0]).get_index() as usize;
}*/
}
}
storage
}
#[inline]
pub fn get_default_state(&'static self) -> &'static BlockState {
self.get_storage().get_default_state()
}
#[inline]
pub fn get_states(&'static self) -> &'static [BlockState] {
self.get_storage().get_states()
}
}
impl PartialEq for &'static Block {
fn eq(&self, other: &Self) -> bool {
std::ptr::eq(*self, *other)
}
}
impl Eq for &'static Block {}
impl BlockStorage {
pub fn get_default_state(&self) -> &BlockState {
match self {
BlockStorage::Single(state) => state,
BlockStorage::Complex {
states,
default_state_index,..
} => &states[*default_state_index]
}
}
pub fn get_states(&self) -> &[BlockState] {
match self {
BlockStorage::Single(state) => std::slice::from_ref(state),
BlockStorage::Complex { states,.. } => &states[..]
}
}
/// Internal method for neighbor and values resolution of `BlockState`.
fn get_shared_prop(&self, name: &str) -> Option<&SharedProperty> {
match self {
BlockStorage::Single(_) => None,
BlockStorage::Complex {
properties,..
} => properties.get(name)
}
}
/// Internal method for Debug implementation of `BlockState` and values iteration.
/// None is returned if there is no properties and the block has a single state.
fn get_shared_props(&self) -> Option<&HashMap<&'static str, SharedProperty>> {
match self {
BlockStorage::Single(_) => None,
BlockStorage::Complex {
properties,..
} => Some(properties)
}
}
/// Internal method for `BlockState` to get a state a specific index.
fn get_state_unchecked(&self, index: usize) -> &BlockState {
match self {
BlockStorage::Single(state) => {
debug_assert!(index == 0, "index!= 0 with BlockStorage::Single");
state
},
BlockStorage::Complex { states,.. } => &states[index]
}
}
}
/// This is a global blocks palette, it is used in chunk storage to store block states.
/// It allows you to register individual blocks in it as well as static blocks arrays
/// defined using the macro `blocks!`.
pub struct GlobalBlocks {
next_sid: u32,
/// Each registered block is mapped to a tuple (index, sid), where index is the index of
/// insertion of the block and sid being the save ID of the first state of this block.
block_to_indices: HashMap<BlockKey, (usize, u32)>,
/// A vector storing references to each block state, the index of each state is called
/// its "save ID".
ordered_states: Vec<&'static BlockState>,
/// A mapping of block's names to them.
name_to_blocks: HashMap<&'static str, &'static Block>,
/// Contains stores of each tag type. For each tag, either small of big stores are used.
tag_stores: HashMap<TagTypeKey, TagStore>
}
impl GlobalBlocks {
pub fn new() -> Self {
Self {
next_sid: 0,
block_to_indices: HashMap::new(),
ordered_states: Vec::new(),
name_to_blocks: HashMap::new(),
tag_stores: HashMap::new()
}
}
/// A simple constructor to directly call `register_all` with given blocks slice.
pub fn with_all(slice: &[&'static Block]) -> Result<Self, ()> {
let mut blocks = Self::new();
blocks.register_all(slice)?;
Ok(blocks)
}
/// Register a single block to this palette, returns `Err` if no more save ID (SID) is
/// available, `Ok` is returned if successful, if a block was already in the palette
/// it also returns `Ok`.
pub fn register(&mut self, block: &'static Block) -> Result<(), ()> {
let states = block.get_states();
let states_count = states.len();
let sid = self.next_sid;
let idx = self.block_to_indices.len();
let next_sid = sid.checked_add(states_count as u32).ok_or(())?;
for store in self.tag_stores.values_mut() {
if let TagStore::Big(store) = store {
store.push(false);
}
}
if self.block_to_indices.insert(block.get_key(), (idx, sid)).is_none() {
self.next_sid = next_sid;
self.name_to_blocks.insert(block.name, block);
self.ordered_states.reserve(states_count);
for state in states {
self.ordered_states.push(state);
}
}
Ok(())
}
/// An optimized way to call `register` multiple times for each given block,
/// the returned follow the same rules as `register`, if an error happens, it
/// return without and previous added blocks are kept.
pub fn register_all(&mut self, slice: &[&'static Block]) -> Result<(), ()> {
let count = slice.len();
self.block_to_indices.reserve(count);
self.name_to_blocks.reserve(count);
for store in self.tag_stores.values_mut() {
if let TagStore::Big(store) = store {
store.reserve(count);
}
}
for &block in slice {
self.register(block)?;
}
Ok(())
}
|
/// Get the block state from the given save ID.
pub fn get_state_from(&self, sid: u32) -> Option<&'static BlockState> {
self.ordered_states.get(sid as usize).copied()
}
/// Get the default state from the given block name.
pub fn get_block_from_name(&self, name: &str) -> Option<&'static Block> {
self.name_to_blocks.get(name).cloned()
}
/// Return true if the palette contains the given block.
pub fn has_block(&self, block: &'static Block) -> bool {
self.block_to_indices.contains_key(&block.get_key())
}
/// Return true if the palette contains the given block state.
pub fn has_state(&self, state: &'static BlockState) -> bool {
self.has_block(state.get_block())
}
/// Check if the given state is registered in this palette, `Ok` is returned if true, in
/// the other case `Err` is returned with the error created by the given `err` closure.
pub fn check_state<E>(&self, state: &'static BlockState, err: impl FnOnce() -> E) -> Result<&'static BlockState, E> {
if self.has_state(state) { Ok(state) } else { Err(err()) }
}
/// Register a tag type that will be later possible to set to blocks.
pub fn register_tag_type(&mut self, tag_type: &'static TagType) {
self.tag_stores.insert(tag_type.get_key(), TagStore::Small(Vec::new()));
}
/// Set or unset a tag to some blocks.
pub fn set_blocks_tag<I>(&mut self, tag_type: &'static TagType, enabled: bool, blocks: I) -> Result<(), ()>
where
I: IntoIterator<Item = &'static Block>
{
const MAX_SMALL_LEN: usize = 8;
let store = self.tag_stores.get_mut(&tag_type.get_key()).ok_or(())?;
for block in blocks {
if let TagStore::Small(vec) = store {
let idx = vec.iter().position(move |&b| b == block);
if enabled {
if idx.is_none() {
if vec.len() >= MAX_SMALL_LEN {
// If the small vector is too big, migrate to a big bit vector.
let mut new_vec = BitVec::from_elem(self.block_to_indices.len(), false);
for old_block in vec {
let (idx, _) = *self.block_to_indices.get(&old_block.get_key()).ok_or(())?;
new_vec.set(idx, true);
}
*store = TagStore::Big(new_vec);
} else {
vec.push(block);
}
}
} else if let Some(idx) = idx {
vec.swap_remove(idx);
}
}
if let TagStore::Big(vec) = store {
let (idx, _) = *self.block_to_indices.get(&block.get_key()).ok_or(())?;
vec.set(idx, enabled);
}
}
Ok(())
}
/// Get the tag state on specific block, returning false if unknown block or tag type.
pub fn has_block_tag(&self, block: &'static Block, tag_type: &'static TagType) -> bool {
match self.tag_stores.get(&tag_type.get_key()) {
None => false,
Some(store) => {
match store {
TagStore::Small(vec) => vec.iter().any(move |&b| b == block),
TagStore::Big(vec) => match self.block_to_indices.get(&block.get_key()) {
None => false,
Some(&(idx, _)) => vec.get(idx).unwrap()
}
}
}
}
}
pub fn blocks_count(&self) -> usize {
self.block_to_indices.len()
}
pub fn states_count(&self) -> usize {
self.ordered_states.len()
}
pub fn tags_count(&self) -> usize {
self.tag_stores.len()
}
}
#[derive(Debug)]
enum TagStore {
Small(Vec<&'static Block>),
Big(BitVec)
}
#[macro_export]
macro_rules! blocks_specs {
($($v:vis $id:ident: [$($prop_const:ident),+];)*) => {
$(
$v static $id: [&'static dyn $crate::block::UntypedProperty; $crate::count!($($prop_const)+)] = [
$(&$prop_const),+
];
)*
};
}
#[macro_export]
macro_rules! blocks {
($global_vis:vis $static_id:ident $namespace:literal [
$($block_id:ident $block_name:literal $($spec_id:ident)?),*
$(,)?
]) => {
$($global_vis static $block_id: $crate::block::Block = $crate::block::Block::new(
concat!($namespace, ':', $block_name),
$crate::_blocks_spec!($($spec_id)?)
);)*
$global_vis static $static_id: [&'static $crate::block::Block; $crate::count!($($block_id)*)] = [
$(&$block_id),*
];
};
}
#[macro_export]
macro_rules! _blocks_spec {
() => { $crate::block::BlockSpec::Single };
($spec_id:ident) => { $crate::block::BlockSpec::Complex(&$spec_id) }
} | /// Get the save ID from the given state.
pub fn get_sid_from(&self, state: &'static BlockState) -> Option<u32> {
let (_, block_offset) = *self.block_to_indices.get(&state.get_block().get_key())?;
Some(block_offset + state.get_index() as u32)
} | random_line_split |
mod.rs | use std::collections::HashMap;
use std::ptr::NonNull;
use std::fmt::Debug;
use once_cell::sync::OnceCell;
use bit_vec::BitVec;
use crate::tag::{TagType, TagTypeKey};
use crate::util::OpaquePtr;
mod state;
mod property;
mod util;
pub use state::*;
pub use property::*;
pub use util::*;
/// A basic block defined by a name, its states and properties. This block structure
/// is made especially for static definition, its states are computed lazily and
/// almost all method requires a self reference with static lifetime.
#[derive(Debug)]
pub struct Block {
name: &'static str,
spec: BlockSpec,
states: OnceCell<BlockStorage>,
}
/// The type of hashable value that can represent a block as a map key.
/// See `Block::get_key`, its only usable for statically defined blocks.
pub type BlockKey = OpaquePtr<Block>;
/// Internal enumeration to avoid allocation over-head for single block. This allows
/// blocks with no properties to avoid allocating a `Vec` and a `HashMap`.
#[derive(Debug)]
enum BlockStorage {
/// Storage for a single state.
Single(BlockState),
/// Storage when there is single or multiple properties. This type of storage
/// implies that all owned states must have BlockStateProperties::Some.
/// By using this storage you assert that properties map is not empty.
Complex {
states: Vec<BlockState>,
properties: HashMap<&'static str, SharedProperty>,
default_state_index: usize
}
}
/// Made for static definitions of all properties of a block.
#[derive(Debug)]
pub enum BlockSpec {
/// For blocks with no properties, they have a **single** state.
Single,
/// For blocks with some properties, requires a slice to a static array of properties
/// references. Use the `blocks_specs!` macro to generate such arrays.
Complex(&'static [&'static dyn UntypedProperty]),
// /// Same a `Complex`, but with a callback function used to set the default block state.
// ComplexWithDefault(&'static [&'static dyn UntypedProperty], fn(&BlockState) -> &BlockState)
}
impl Block {
/// Construct a new block, this method should be used to define blocks statically.
/// The preferred way of defining static blocks is to use the `blocks!` macro.
pub const fn new(name: &'static str, spec: BlockSpec) -> Self {
Self {
name,
spec,
states: OnceCell::new()
}
}
#[inline]
pub fn get_name(&self) -> &'static str {
self.name
}
#[inline]
pub fn get_key(&'static self) -> BlockKey {
OpaquePtr::new(self)
}
fn get_storage(&'static self) -> &'static BlockStorage {
self.states.get_or_init(|| self.make_storage())
}
fn make_storage(&'static self) -> BlockStorage {
// Internal function to generate new BlockStorage from properties,
// if there are no properties, BlockStorage::Single is returned.
fn new_storage(properties: &'static [&'static dyn UntypedProperty]) -> BlockStorage {
if properties.is_empty() {
BlockStorage::Single(BlockState::build_singleton())
} else {
let (
properties,
states
) = BlockState::build_complex(properties);
BlockStorage::Complex {
states,
properties,
default_state_index: 0
}
}
}
// let mut default_supplier = None;
let mut storage = match self.spec {
BlockSpec::Single => BlockStorage::Single(BlockState::build_singleton()),
BlockSpec::Complex(properties) => new_storage(properties),
/*BlockSpec::ComplexWithDefault(properties, fun) => {
default_supplier = Some(fun);
new_storage(properties)
}*/
};
let block_ptr = NonNull::from(self);
match &mut storage {
BlockStorage::Single( state) => {
state.set_block(block_ptr);
},
BlockStorage::Complex {
states,
/*default_state_index,*/..
} => {
for state in states {
state.set_block(block_ptr);
}
/*if let Some(default_supplier) = default_supplier {
*default_state_index = default_supplier(&states[0]).get_index() as usize;
}*/
}
}
storage
}
#[inline]
pub fn get_default_state(&'static self) -> &'static BlockState {
self.get_storage().get_default_state()
}
#[inline]
pub fn get_states(&'static self) -> &'static [BlockState] {
self.get_storage().get_states()
}
}
impl PartialEq for &'static Block {
fn eq(&self, other: &Self) -> bool {
std::ptr::eq(*self, *other)
}
}
impl Eq for &'static Block {}
impl BlockStorage {
pub fn get_default_state(&self) -> &BlockState {
match self {
BlockStorage::Single(state) => state,
BlockStorage::Complex {
states,
default_state_index,..
} => &states[*default_state_index]
}
}
pub fn get_states(&self) -> &[BlockState] {
match self {
BlockStorage::Single(state) => std::slice::from_ref(state),
BlockStorage::Complex { states,.. } => &states[..]
}
}
/// Internal method for neighbor and values resolution of `BlockState`.
fn get_shared_prop(&self, name: &str) -> Option<&SharedProperty> {
match self {
BlockStorage::Single(_) => None,
BlockStorage::Complex {
properties,..
} => properties.get(name)
}
}
/// Internal method for Debug implementation of `BlockState` and values iteration.
/// None is returned if there is no properties and the block has a single state.
fn get_shared_props(&self) -> Option<&HashMap<&'static str, SharedProperty>> {
match self {
BlockStorage::Single(_) => None,
BlockStorage::Complex {
properties,..
} => Some(properties)
}
}
/// Internal method for `BlockState` to get a state a specific index.
fn get_state_unchecked(&self, index: usize) -> &BlockState {
match self {
BlockStorage::Single(state) => {
debug_assert!(index == 0, "index!= 0 with BlockStorage::Single");
state
},
BlockStorage::Complex { states,.. } => &states[index]
}
}
}
/// This is a global blocks palette, it is used in chunk storage to store block states.
/// It allows you to register individual blocks in it as well as static blocks arrays
/// defined using the macro `blocks!`.
pub struct GlobalBlocks {
next_sid: u32,
/// Each registered block is mapped to a tuple (index, sid), where index is the index of
/// insertion of the block and sid being the save ID of the first state of this block.
block_to_indices: HashMap<BlockKey, (usize, u32)>,
/// A vector storing references to each block state, the index of each state is called
/// its "save ID".
ordered_states: Vec<&'static BlockState>,
/// A mapping of block's names to them.
name_to_blocks: HashMap<&'static str, &'static Block>,
/// Contains stores of each tag type. For each tag, either small of big stores are used.
tag_stores: HashMap<TagTypeKey, TagStore>
}
impl GlobalBlocks {
pub fn new() -> Self {
Self {
next_sid: 0,
block_to_indices: HashMap::new(),
ordered_states: Vec::new(),
name_to_blocks: HashMap::new(),
tag_stores: HashMap::new()
}
}
/// A simple constructor to directly call `register_all` with given blocks slice.
pub fn with_all(slice: &[&'static Block]) -> Result<Self, ()> {
let mut blocks = Self::new();
blocks.register_all(slice)?;
Ok(blocks)
}
/// Register a single block to this palette, returns `Err` if no more save ID (SID) is
/// available, `Ok` is returned if successful, if a block was already in the palette
/// it also returns `Ok`.
pub fn register(&mut self, block: &'static Block) -> Result<(), ()> {
let states = block.get_states();
let states_count = states.len();
let sid = self.next_sid;
let idx = self.block_to_indices.len();
let next_sid = sid.checked_add(states_count as u32).ok_or(())?;
for store in self.tag_stores.values_mut() {
if let TagStore::Big(store) = store {
store.push(false);
}
}
if self.block_to_indices.insert(block.get_key(), (idx, sid)).is_none() {
self.next_sid = next_sid;
self.name_to_blocks.insert(block.name, block);
self.ordered_states.reserve(states_count);
for state in states {
self.ordered_states.push(state);
}
}
Ok(())
}
/// An optimized way to call `register` multiple times for each given block,
/// the returned follow the same rules as `register`, if an error happens, it
/// return without and previous added blocks are kept.
pub fn register_all(&mut self, slice: &[&'static Block]) -> Result<(), ()> {
let count = slice.len();
self.block_to_indices.reserve(count);
self.name_to_blocks.reserve(count);
for store in self.tag_stores.values_mut() {
if let TagStore::Big(store) = store {
store.reserve(count);
}
}
for &block in slice {
self.register(block)?;
}
Ok(())
}
/// Get the save ID from the given state.
pub fn get_sid_from(&self, state: &'static BlockState) -> Option<u32> {
let (_, block_offset) = *self.block_to_indices.get(&state.get_block().get_key())?;
Some(block_offset + state.get_index() as u32)
}
/// Get the block state from the given save ID.
pub fn get_state_from(&self, sid: u32) -> Option<&'static BlockState> {
self.ordered_states.get(sid as usize).copied()
}
/// Get the default state from the given block name.
pub fn get_block_from_name(&self, name: &str) -> Option<&'static Block> {
self.name_to_blocks.get(name).cloned()
}
/// Return true if the palette contains the given block.
pub fn has_block(&self, block: &'static Block) -> bool {
self.block_to_indices.contains_key(&block.get_key())
}
/// Return true if the palette contains the given block state.
pub fn has_state(&self, state: &'static BlockState) -> bool {
self.has_block(state.get_block())
}
/// Check if the given state is registered in this palette, `Ok` is returned if true, in
/// the other case `Err` is returned with the error created by the given `err` closure.
pub fn check_state<E>(&self, state: &'static BlockState, err: impl FnOnce() -> E) -> Result<&'static BlockState, E> {
if self.has_state(state) { Ok(state) } else { Err(err()) }
}
/// Register a tag type that will be later possible to set to blocks.
pub fn | (&mut self, tag_type: &'static TagType) {
self.tag_stores.insert(tag_type.get_key(), TagStore::Small(Vec::new()));
}
/// Set or unset a tag to some blocks.
pub fn set_blocks_tag<I>(&mut self, tag_type: &'static TagType, enabled: bool, blocks: I) -> Result<(), ()>
where
I: IntoIterator<Item = &'static Block>
{
const MAX_SMALL_LEN: usize = 8;
let store = self.tag_stores.get_mut(&tag_type.get_key()).ok_or(())?;
for block in blocks {
if let TagStore::Small(vec) = store {
let idx = vec.iter().position(move |&b| b == block);
if enabled {
if idx.is_none() {
if vec.len() >= MAX_SMALL_LEN {
// If the small vector is too big, migrate to a big bit vector.
let mut new_vec = BitVec::from_elem(self.block_to_indices.len(), false);
for old_block in vec {
let (idx, _) = *self.block_to_indices.get(&old_block.get_key()).ok_or(())?;
new_vec.set(idx, true);
}
*store = TagStore::Big(new_vec);
} else {
vec.push(block);
}
}
} else if let Some(idx) = idx {
vec.swap_remove(idx);
}
}
if let TagStore::Big(vec) = store {
let (idx, _) = *self.block_to_indices.get(&block.get_key()).ok_or(())?;
vec.set(idx, enabled);
}
}
Ok(())
}
/// Get the tag state on specific block, returning false if unknown block or tag type.
pub fn has_block_tag(&self, block: &'static Block, tag_type: &'static TagType) -> bool {
match self.tag_stores.get(&tag_type.get_key()) {
None => false,
Some(store) => {
match store {
TagStore::Small(vec) => vec.iter().any(move |&b| b == block),
TagStore::Big(vec) => match self.block_to_indices.get(&block.get_key()) {
None => false,
Some(&(idx, _)) => vec.get(idx).unwrap()
}
}
}
}
}
pub fn blocks_count(&self) -> usize {
self.block_to_indices.len()
}
pub fn states_count(&self) -> usize {
self.ordered_states.len()
}
pub fn tags_count(&self) -> usize {
self.tag_stores.len()
}
}
#[derive(Debug)]
enum TagStore {
Small(Vec<&'static Block>),
Big(BitVec)
}
#[macro_export]
macro_rules! blocks_specs {
($($v:vis $id:ident: [$($prop_const:ident),+];)*) => {
$(
$v static $id: [&'static dyn $crate::block::UntypedProperty; $crate::count!($($prop_const)+)] = [
$(&$prop_const),+
];
)*
};
}
#[macro_export]
macro_rules! blocks {
($global_vis:vis $static_id:ident $namespace:literal [
$($block_id:ident $block_name:literal $($spec_id:ident)?),*
$(,)?
]) => {
$($global_vis static $block_id: $crate::block::Block = $crate::block::Block::new(
concat!($namespace, ':', $block_name),
$crate::_blocks_spec!($($spec_id)?)
);)*
$global_vis static $static_id: [&'static $crate::block::Block; $crate::count!($($block_id)*)] = [
$(&$block_id),*
];
};
}
#[macro_export]
macro_rules! _blocks_spec {
() => { $crate::block::BlockSpec::Single };
($spec_id:ident) => { $crate::block::BlockSpec::Complex(&$spec_id) }
}
| register_tag_type | identifier_name |
mod.rs | use std::collections::HashMap;
use std::ptr::NonNull;
use std::fmt::Debug;
use once_cell::sync::OnceCell;
use bit_vec::BitVec;
use crate::tag::{TagType, TagTypeKey};
use crate::util::OpaquePtr;
mod state;
mod property;
mod util;
pub use state::*;
pub use property::*;
pub use util::*;
/// A basic block defined by a name, its states and properties. This block structure
/// is made especially for static definition, its states are computed lazily and
/// almost all method requires a self reference with static lifetime.
#[derive(Debug)]
pub struct Block {
name: &'static str,
spec: BlockSpec,
states: OnceCell<BlockStorage>,
}
/// The type of hashable value that can represent a block as a map key.
/// See `Block::get_key`, its only usable for statically defined blocks.
pub type BlockKey = OpaquePtr<Block>;
/// Internal enumeration to avoid allocation over-head for single block. This allows
/// blocks with no properties to avoid allocating a `Vec` and a `HashMap`.
#[derive(Debug)]
enum BlockStorage {
/// Storage for a single state.
Single(BlockState),
/// Storage when there is single or multiple properties. This type of storage
/// implies that all owned states must have BlockStateProperties::Some.
/// By using this storage you assert that properties map is not empty.
Complex {
states: Vec<BlockState>,
properties: HashMap<&'static str, SharedProperty>,
default_state_index: usize
}
}
/// Made for static definitions of all properties of a block.
#[derive(Debug)]
pub enum BlockSpec {
/// For blocks with no properties, they have a **single** state.
Single,
/// For blocks with some properties, requires a slice to a static array of properties
/// references. Use the `blocks_specs!` macro to generate such arrays.
Complex(&'static [&'static dyn UntypedProperty]),
// /// Same a `Complex`, but with a callback function used to set the default block state.
// ComplexWithDefault(&'static [&'static dyn UntypedProperty], fn(&BlockState) -> &BlockState)
}
impl Block {
/// Construct a new block, this method should be used to define blocks statically.
/// The preferred way of defining static blocks is to use the `blocks!` macro.
pub const fn new(name: &'static str, spec: BlockSpec) -> Self {
Self {
name,
spec,
states: OnceCell::new()
}
}
#[inline]
pub fn get_name(&self) -> &'static str {
self.name
}
#[inline]
pub fn get_key(&'static self) -> BlockKey {
OpaquePtr::new(self)
}
fn get_storage(&'static self) -> &'static BlockStorage {
self.states.get_or_init(|| self.make_storage())
}
fn make_storage(&'static self) -> BlockStorage {
// Internal function to generate new BlockStorage from properties,
// if there are no properties, BlockStorage::Single is returned.
fn new_storage(properties: &'static [&'static dyn UntypedProperty]) -> BlockStorage {
if properties.is_empty() {
BlockStorage::Single(BlockState::build_singleton())
} else {
let (
properties,
states
) = BlockState::build_complex(properties);
BlockStorage::Complex {
states,
properties,
default_state_index: 0
}
}
}
// let mut default_supplier = None;
let mut storage = match self.spec {
BlockSpec::Single => BlockStorage::Single(BlockState::build_singleton()),
BlockSpec::Complex(properties) => new_storage(properties),
/*BlockSpec::ComplexWithDefault(properties, fun) => {
default_supplier = Some(fun);
new_storage(properties)
}*/
};
let block_ptr = NonNull::from(self);
match &mut storage {
BlockStorage::Single( state) => {
state.set_block(block_ptr);
},
BlockStorage::Complex {
states,
/*default_state_index,*/..
} => {
for state in states {
state.set_block(block_ptr);
}
/*if let Some(default_supplier) = default_supplier {
*default_state_index = default_supplier(&states[0]).get_index() as usize;
}*/
}
}
storage
}
#[inline]
pub fn get_default_state(&'static self) -> &'static BlockState {
self.get_storage().get_default_state()
}
#[inline]
pub fn get_states(&'static self) -> &'static [BlockState] {
self.get_storage().get_states()
}
}
impl PartialEq for &'static Block {
fn eq(&self, other: &Self) -> bool {
std::ptr::eq(*self, *other)
}
}
impl Eq for &'static Block {}
impl BlockStorage {
pub fn get_default_state(&self) -> &BlockState {
match self {
BlockStorage::Single(state) => state,
BlockStorage::Complex {
states,
default_state_index,..
} => &states[*default_state_index]
}
}
pub fn get_states(&self) -> &[BlockState] {
match self {
BlockStorage::Single(state) => std::slice::from_ref(state),
BlockStorage::Complex { states,.. } => &states[..]
}
}
/// Internal method for neighbor and values resolution of `BlockState`.
fn get_shared_prop(&self, name: &str) -> Option<&SharedProperty> {
match self {
BlockStorage::Single(_) => None,
BlockStorage::Complex {
properties,..
} => properties.get(name)
}
}
/// Internal method for Debug implementation of `BlockState` and values iteration.
/// None is returned if there is no properties and the block has a single state.
fn get_shared_props(&self) -> Option<&HashMap<&'static str, SharedProperty>> {
match self {
BlockStorage::Single(_) => None,
BlockStorage::Complex {
properties,..
} => Some(properties)
}
}
/// Internal method for `BlockState` to get a state a specific index.
fn get_state_unchecked(&self, index: usize) -> &BlockState {
match self {
BlockStorage::Single(state) => {
debug_assert!(index == 0, "index!= 0 with BlockStorage::Single");
state
},
BlockStorage::Complex { states,.. } => &states[index]
}
}
}
/// This is a global blocks palette, it is used in chunk storage to store block states.
/// It allows you to register individual blocks in it as well as static blocks arrays
/// defined using the macro `blocks!`.
pub struct GlobalBlocks {
next_sid: u32,
/// Each registered block is mapped to a tuple (index, sid), where index is the index of
/// insertion of the block and sid being the save ID of the first state of this block.
block_to_indices: HashMap<BlockKey, (usize, u32)>,
/// A vector storing references to each block state, the index of each state is called
/// its "save ID".
ordered_states: Vec<&'static BlockState>,
/// A mapping of block's names to them.
name_to_blocks: HashMap<&'static str, &'static Block>,
/// Contains stores of each tag type. For each tag, either small of big stores are used.
tag_stores: HashMap<TagTypeKey, TagStore>
}
impl GlobalBlocks {
pub fn new() -> Self {
Self {
next_sid: 0,
block_to_indices: HashMap::new(),
ordered_states: Vec::new(),
name_to_blocks: HashMap::new(),
tag_stores: HashMap::new()
}
}
/// A simple constructor to directly call `register_all` with given blocks slice.
pub fn with_all(slice: &[&'static Block]) -> Result<Self, ()> {
let mut blocks = Self::new();
blocks.register_all(slice)?;
Ok(blocks)
}
/// Register a single block to this palette, returns `Err` if no more save ID (SID) is
/// available, `Ok` is returned if successful, if a block was already in the palette
/// it also returns `Ok`.
pub fn register(&mut self, block: &'static Block) -> Result<(), ()> {
let states = block.get_states();
let states_count = states.len();
let sid = self.next_sid;
let idx = self.block_to_indices.len();
let next_sid = sid.checked_add(states_count as u32).ok_or(())?;
for store in self.tag_stores.values_mut() {
if let TagStore::Big(store) = store {
store.push(false);
}
}
if self.block_to_indices.insert(block.get_key(), (idx, sid)).is_none() {
self.next_sid = next_sid;
self.name_to_blocks.insert(block.name, block);
self.ordered_states.reserve(states_count);
for state in states {
self.ordered_states.push(state);
}
}
Ok(())
}
/// An optimized way to call `register` multiple times for each given block,
/// the returned follow the same rules as `register`, if an error happens, it
/// return without and previous added blocks are kept.
pub fn register_all(&mut self, slice: &[&'static Block]) -> Result<(), ()> {
let count = slice.len();
self.block_to_indices.reserve(count);
self.name_to_blocks.reserve(count);
for store in self.tag_stores.values_mut() {
if let TagStore::Big(store) = store {
store.reserve(count);
}
}
for &block in slice {
self.register(block)?;
}
Ok(())
}
/// Get the save ID from the given state.
pub fn get_sid_from(&self, state: &'static BlockState) -> Option<u32> {
let (_, block_offset) = *self.block_to_indices.get(&state.get_block().get_key())?;
Some(block_offset + state.get_index() as u32)
}
/// Get the block state from the given save ID.
pub fn get_state_from(&self, sid: u32) -> Option<&'static BlockState> {
self.ordered_states.get(sid as usize).copied()
}
/// Get the default state from the given block name.
pub fn get_block_from_name(&self, name: &str) -> Option<&'static Block> {
self.name_to_blocks.get(name).cloned()
}
/// Return true if the palette contains the given block.
pub fn has_block(&self, block: &'static Block) -> bool {
self.block_to_indices.contains_key(&block.get_key())
}
/// Return true if the palette contains the given block state.
pub fn has_state(&self, state: &'static BlockState) -> bool {
self.has_block(state.get_block())
}
/// Check if the given state is registered in this palette, `Ok` is returned if true, in
/// the other case `Err` is returned with the error created by the given `err` closure.
pub fn check_state<E>(&self, state: &'static BlockState, err: impl FnOnce() -> E) -> Result<&'static BlockState, E> {
if self.has_state(state) { Ok(state) } else { Err(err()) }
}
/// Register a tag type that will be later possible to set to blocks.
pub fn register_tag_type(&mut self, tag_type: &'static TagType) {
self.tag_stores.insert(tag_type.get_key(), TagStore::Small(Vec::new()));
}
/// Set or unset a tag to some blocks.
pub fn set_blocks_tag<I>(&mut self, tag_type: &'static TagType, enabled: bool, blocks: I) -> Result<(), ()>
where
I: IntoIterator<Item = &'static Block>
| } else {
vec.push(block);
}
}
} else if let Some(idx) = idx {
vec.swap_remove(idx);
}
}
if let TagStore::Big(vec) = store {
let (idx, _) = *self.block_to_indices.get(&block.get_key()).ok_or(())?;
vec.set(idx, enabled);
}
}
Ok(())
}
/// Get the tag state on specific block, returning false if unknown block or tag type.
pub fn has_block_tag(&self, block: &'static Block, tag_type: &'static TagType) -> bool {
match self.tag_stores.get(&tag_type.get_key()) {
None => false,
Some(store) => {
match store {
TagStore::Small(vec) => vec.iter().any(move |&b| b == block),
TagStore::Big(vec) => match self.block_to_indices.get(&block.get_key()) {
None => false,
Some(&(idx, _)) => vec.get(idx).unwrap()
}
}
}
}
}
pub fn blocks_count(&self) -> usize {
self.block_to_indices.len()
}
pub fn states_count(&self) -> usize {
self.ordered_states.len()
}
pub fn tags_count(&self) -> usize {
self.tag_stores.len()
}
}
#[derive(Debug)]
enum TagStore {
Small(Vec<&'static Block>),
Big(BitVec)
}
#[macro_export]
macro_rules! blocks_specs {
($($v:vis $id:ident: [$($prop_const:ident),+];)*) => {
$(
$v static $id: [&'static dyn $crate::block::UntypedProperty; $crate::count!($($prop_const)+)] = [
$(&$prop_const),+
];
)*
};
}
#[macro_export]
macro_rules! blocks {
($global_vis:vis $static_id:ident $namespace:literal [
$($block_id:ident $block_name:literal $($spec_id:ident)?),*
$(,)?
]) => {
$($global_vis static $block_id: $crate::block::Block = $crate::block::Block::new(
concat!($namespace, ':', $block_name),
$crate::_blocks_spec!($($spec_id)?)
);)*
$global_vis static $static_id: [&'static $crate::block::Block; $crate::count!($($block_id)*)] = [
$(&$block_id),*
];
};
}
#[macro_export]
macro_rules! _blocks_spec {
() => { $crate::block::BlockSpec::Single };
($spec_id:ident) => { $crate::block::BlockSpec::Complex(&$spec_id) }
}
| {
const MAX_SMALL_LEN: usize = 8;
let store = self.tag_stores.get_mut(&tag_type.get_key()).ok_or(())?;
for block in blocks {
if let TagStore::Small(vec) = store {
let idx = vec.iter().position(move |&b| b == block);
if enabled {
if idx.is_none() {
if vec.len() >= MAX_SMALL_LEN {
// If the small vector is too big, migrate to a big bit vector.
let mut new_vec = BitVec::from_elem(self.block_to_indices.len(), false);
for old_block in vec {
let (idx, _) = *self.block_to_indices.get(&old_block.get_key()).ok_or(())?;
new_vec.set(idx, true);
}
*store = TagStore::Big(new_vec); | identifier_body |
lib.rs | extern crate num_bigint;
use num_bigint::BigInt;
use poseidon_rs::Poseidon;
use wasm_bindgen::prelude::*;
///////////////////////////////////////////////////////////////////////////////
// EXPORTED FUNCTIONS FUNCTIONS
///////////////////////////////////////////////////////////////////////////////
#[wasm_bindgen]
pub fn digest_string_claim(claim: &str) -> String {
// Convert into a byte array
let claim_bytes = claim.as_bytes().to_vec();
// Hash
let poseidon = Poseidon::new();
let hash = match poseidon.hash_bytes(claim_bytes) {
Ok(v) => v,
Err(reason) => {
return format!("ERROR: {}", reason);
}
};
let claim_bytes = pad_bigint_le(&hash);
base64::encode(claim_bytes)
}
#[wasm_bindgen]
pub fn digest_hex_claim(hex_claim: &str) -> String {
// Decode hex into a byte array
let hex_claim_clean: &str = if hex_claim.starts_with("0x") {
&hex_claim[2..] // skip 0x
} else {
hex_claim
};
let claim_bytes = match hex::decode(hex_claim_clean) {
Ok(v) => v,
Err(err) => {
return format!(
"ERROR: The given claim ({}) is not a valid hex string - {}",
hex_claim, err
);
}
};
// Hash
let poseidon = Poseidon::new();
let hash = match poseidon.hash_bytes(claim_bytes) {
Ok(v) => v,
Err(reason) => {
return format!("ERROR: {}", reason);
}
};
let claim_bytes = pad_bigint_le(&hash);
base64::encode(claim_bytes)
}
///////////////////////////////////////////////////////////////////////////////
// HELPERS
///////////////////////////////////////////////////////////////////////////////
fn pad_bigint_le(num: &BigInt) -> Vec<u8> {
let mut claim_bytes = num.to_bytes_le().1;
while claim_bytes.len() < 32 {
claim_bytes.push(0);
}
claim_bytes
}
#[allow(dead_code)]
fn pad_bigint_be(num: &BigInt) -> Vec<u8> |
///////////////////////////////////////////////////////////////////////////////
// TESTS
///////////////////////////////////////////////////////////////////////////////
#[cfg(test)]
mod tests {
use super::*;
use num_bigint::{Sign, ToBigInt};
#[test]
fn should_hash_strings() {
let str_claim = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.";
let b64_hash = digest_string_claim(str_claim);
assert_eq!(b64_hash, "iV5141xlrW8I217IitUHtoDC/gd/LMsgcF0zpDfUaiM=");
}
#[test]
fn should_hash_hex_claims() {
let hex_claim = "0x045a126cbbd3c66b6d542d40d91085e3f2b5db3bbc8cda0d59615deb08784e4f833e0bb082194790143c3d01cedb4a9663cb8c7bdaaad839cb794dd309213fcf30";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "nGOYvS4aqqUVAT9YjWcUzA89DlHPWaooNpBTStOaHRA=");
let hex_claim = "0x049969c7741ade2e9f89f81d12080651038838e8089682158f3d892e57609b64e2137463c816e4d52f6688d490c35a0b8e524ac6d9722eed2616dbcaf676fc2578";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "j7jJlnBN73ORKWbNbVCHG9WkoqSr+IEKDwjcsb6N4xw=");
let hex_claim = "0x049622878da186a8a31f4dc03454dbbc62365060458db174618218b51d5014fa56c8ea772234341ae326ce278091c39e30c02fa1f04792035d79311fe3283f1380";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "6CUGhnmKQchF6Ter05laVgQYcEWm0p2qlLzX24rk3Ck=");
let hex_claim = "0x04e355263aa6cbc99d2fdd0898f5ed8630115ad54e9073c41a8aa0df6d75842d8b8309d0d26a95565996b17da48f8ddff704ebcd1d8a982dc5ba8be7458c677b17";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "k0UwNtWW4UQifisXuoDiO/QGRZNNTY7giWK1Nx/hoSo=");
let hex_claim = "0x04020d62c94296539224b885c6cdf79d0c2dd437471425be26bf62ab522949f83f3eed34528b0b9a7fbe96e50ca85471c894e1aa819bbf12ff78ad07ce8b4117b2";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "5EhP0859lic41RIpIrnotv/BCR7v5nVcXsXkTXlbuhI=");
let hex_claim = "0x046bd65449f336b888fc36c64708940da0d1c864a0ac46236f60b455841a4d15c9b815ed725093b3266aaca2f15210d14a1eadf34efeda3bd44a803fbf1590cfba";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "oseI7fM8wWIYslDUOXJne7AOiK+IpFL3q8MTqiZHWw8=");
let hex_claim = "0x0412cf2bd4a9613ad988f7f008a5297b8e8c98df8759a2ef9d3dfae63b3870cfbb78d35789745f82710da61a61a9c06c6f6166bf1d5ce73f9416e6b67713001aa2";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "9Y3JcjUHZLGmENRQpnML/+TG2EbHWjU46h+LtT9sQi8=");
let hex_claim = "0x04a2e6914db4a81ea9ec72e71b41cf88d4bc19ea54f29ae2beb3db8e4acf6531b5c163e58427831832b10fce899a030d12e82a398d4eeefe451c7e261fba973be4";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "Llx5F6lP/hbU6ZTT10Q5PF+7o1VdylvrolT8vSHJMAA=");
let hex_claim = "0x041508189a6f1737f50dd2c603c1ded8a83f97073d33cbb317e7409c1487b8351aa2b89455cda61ce8ed3ba3c130372870b187239b900da8948a53ca3e02db9aaf";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "MyRpb4ZDTwtJNflc8ZbZdmKOf+fuZjUEZkgZMCmlKxw=");
let hex_claim = "0x04f11597483032666b20ec51b27e1337577f63a5e1d5962575b555bf899380ae15482f031a297094b0c60980f3c4f1f7ad2346de5357ad82a6a3d4eef2bd1956c6";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "ytwkzcBixiBMsblxEEPpiDFV6MCBG/IY+XUc6/+xIQ8=");
let hex_claim = "0x044c01f3d0ef3d60652aa7c6489b2f10edcae1b04a10460ab2d5e4bd752eb0686cac7aa6057fd4c65606e8a4c33c0b519b1764229395cde2c8537ee01136ef0776";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "VS5c2JQT3x++ltSQHqnCFIBHttdjU2Lk2RuCGkUhnQ8=");
}
#[test]
fn should_return_32_byte_hashes() {
let hex_claim = "0x04c94699a259ec27e1cf67fe46653f0dc2f38e6d32abb33b45fc9ffe793171a44b4ff5c9517c1be22f8a47915debcf1e512717fe33986f287e79d2f3099725f179";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "uJM6qiWAIIej9CGonWlR0cU64wqtdlh+csikpC6wSgA=");
let len = base64::decode(b64_hash)
.expect("The hash is not a valid base64")
.len();
assert_eq!(len, 32);
let hex_claim = "0x0424a71e7c24b38aaeeebbc334113045885bfae154071426e21c021ebc47a5a85a3a691a76d8253ce6e03bf4e8fe154c89b2d967765bb060e61360305d1b8df7c5";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "9wxP7eLFnTk5VDsj9rXL63r7QPKTTjCkNhjZri1nEQA=");
let len = base64::decode(b64_hash)
.expect("The hash is not a valid base64")
.len();
assert_eq!(len, 32);
let hex_claim = "0x04ff51151c6bd759d723af2d0571df5e794c28b204242f4b540b0d3449eab192cafd44b241c96b39fa7dd7ead2d2265a598a23cba0f54cb79b9829d355d74304a2";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "iS7BUPgGpY/WAdWyZb0s1wE21tMz5ZWBc8LJ6jgqSwA=");
let len = base64::decode(b64_hash)
.expect("The hash is not a valid base64")
.len();
assert_eq!(len, 32);
let hex_claim = "0x043f10ff1b295bf4d2f24c40c93cce04210ae812dd5ad1a06d5dafd9a2e18fa1247bdf36bef6a9e45e97d246cfb8a0ab25c406cf6fe7569b17e83fd6d33563003a";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "CCxtK0qT7cTxCS7e4uONSHcPQdbQzBqrC3GQvFz4KwA=");
let len = base64::decode(b64_hash)
.expect("The hash is not a valid base64")
.len();
assert_eq!(len, 32);
let hex_claim = "0x0409d240a33ca9c486c090135f06c5d801aceec6eaed94b8bef1c9763b6c39708819207786fe92b22c6661957e83923e24a5ba754755b181f82fdaed2ed3914453";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "3/AaoqHPrz20tfLmhLz4ay5nrlKN5WiuvlDZkfZyfgA=");
let len = base64::decode(b64_hash)
.expect("The hash is not a valid base64")
.len();
assert_eq!(len, 32);
let hex_claim = "0x04220da30ddd87fed1b65ef75706507f397138d8cac8917e118157124b7e1cf45b8a38ac8c8b65a6ed662d62b09d100e53abacbc27500bb9d0365f3d6d60a981fa";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "YiEgjvg1VeCMrlWJkAuOQIgDX1fWtkHk9OBJy225UgA=");
let len = base64::decode(b64_hash)
.expect("The hash is not a valid base64")
.len();
assert_eq!(len, 32);
let hex_claim = "0x04acdbbdba45841ddcc1c3cb2e8b696eae69ba9d57686bff0cd58e4033a08d9dc6c272a3577508cdb18bdb1c6fcc818538664bb6dc4cc32ee668198c7be044800c";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "UPqwKZBMhq21uwgLWJUFMgCBMPzhseiziVaqN4EQvwA=");
let len = base64::decode(b64_hash)
.expect("The hash is not a valid base64")
.len();
assert_eq!(len, 32);
}
#[test]
fn should_match_string_and_hex() {
let str_claim = "Hello";
let hex_claim = "48656c6c6f"; // Hello
let b64_hash1 = digest_string_claim(str_claim);
let b64_hash2 = digest_hex_claim(hex_claim);
assert_eq!(b64_hash1, b64_hash2);
let str_claim = "Hello UTF8 ©âëíòÚ ✨";
let hex_claim = "48656c6c6f205554463820c2a9c3a2c3abc3adc3b2c39a20e29ca8"; // Hello UTF8 ©âëíòÚ ✨
let b64_hash1 = digest_string_claim(str_claim);
let b64_hash2 = digest_hex_claim(hex_claim);
assert_eq!(b64_hash1, b64_hash2);
}
#[test]
fn should_hash_hex_with_0x() {
let b64_hash1 = digest_hex_claim(
"48656c6c6f48656c6c6f48656c6c6f48656c6c6f48656c6c6f48656c6c6f48656c6c6f",
);
let b64_hash2 = digest_hex_claim(
"0x48656c6c6f48656c6c6f48656c6c6f48656c6c6f48656c6c6f48656c6c6f48656c6c6f",
);
assert_eq!(b64_hash1, b64_hash2);
let b64_hash1 = digest_hex_claim(
"12345678901234567890123456789012345678901234567890123456789012345678901234567890",
);
let b64_hash2 = digest_hex_claim(
"0x12345678901234567890123456789012345678901234567890123456789012345678901234567890",
);
assert_eq!(b64_hash1, b64_hash2);
let b64_hash1 = digest_hex_claim(
"01234567890123456789012345678901234567890123456789012345678901234567890123456789",
);
let b64_hash2 = digest_hex_claim(
"0x01234567890123456789012345678901234567890123456789012345678901234567890123456789",
);
assert_eq!(b64_hash1, b64_hash2);
let b64_hash1 =
digest_hex_claim("0000000000000000000000000000000000000000000000000000000000000000");
let b64_hash2 =
digest_hex_claim("0x0000000000000000000000000000000000000000000000000000000000000000");
assert_eq!(b64_hash1, b64_hash2);
let b64_hash1 =
digest_hex_claim("8888888888888888888888888888888888888888888888888888888888888888");
let b64_hash2 =
digest_hex_claim("0x8888888888888888888888888888888888888888888888888888888888888888");
assert_eq!(b64_hash1, b64_hash2);
let b64_hash1 =
digest_hex_claim("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff");
let b64_hash2 =
digest_hex_claim("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff");
assert_eq!(b64_hash1, b64_hash2);
let b64_hash1 = digest_hex_claim("1234567890123456789012345678901234567890");
let b64_hash2 = digest_hex_claim("0x1234567890123456789012345678901234567890");
assert_eq!(b64_hash1, b64_hash2);
}
#[test]
fn should_pad_bigints_in_le() {
let bigint = -1125.to_bigint().unwrap();
assert_eq!(bigint.to_bytes_le(), (Sign::Minus, vec![101, 4]));
let num_bytes = pad_bigint_le(&bigint);
assert_eq!(num_bytes.len(), 32);
assert_eq!(
num_bytes,
vec![
101, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0
]
);
}
#[test]
fn should_pad_bigints_in_be() {
let bigint = -1125.to_bigint().unwrap();
assert_eq!(bigint.to_bytes_be(), (Sign::Minus, vec![4, 101]));
let num_bytes = pad_bigint_be(&bigint);
assert_eq!(num_bytes.len(), 32);
assert_eq!(
num_bytes,
vec![
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 4, 101
]
);
}
#[test]
fn bigint_padding_should_match() {
let bigint = -1125.to_bigint().unwrap();
assert_eq!(bigint.to_bytes_be(), (Sign::Minus, vec![4, 101]));
let num_bytes_le = pad_bigint_le(&bigint);
let mut num_bytes_be = pad_bigint_be(&bigint);
assert_eq!(num_bytes_le.len(), 32);
assert_eq!(num_bytes_be.len(), 32);
num_bytes_be.reverse();
assert_eq!(num_bytes_be, num_bytes_le);
}
}
| {
let mut claim_bytes = num.to_bytes_be().1;
while claim_bytes.len() < 32 {
claim_bytes = [&[0], &claim_bytes[..]].concat();
}
claim_bytes
} | identifier_body |
lib.rs | extern crate num_bigint;
use num_bigint::BigInt;
use poseidon_rs::Poseidon;
use wasm_bindgen::prelude::*;
///////////////////////////////////////////////////////////////////////////////
// EXPORTED FUNCTIONS FUNCTIONS
///////////////////////////////////////////////////////////////////////////////
#[wasm_bindgen]
pub fn digest_string_claim(claim: &str) -> String {
// Convert into a byte array
let claim_bytes = claim.as_bytes().to_vec();
// Hash
let poseidon = Poseidon::new();
let hash = match poseidon.hash_bytes(claim_bytes) {
Ok(v) => v,
Err(reason) => {
return format!("ERROR: {}", reason);
}
};
let claim_bytes = pad_bigint_le(&hash);
base64::encode(claim_bytes)
}
#[wasm_bindgen]
pub fn digest_hex_claim(hex_claim: &str) -> String {
// Decode hex into a byte array
let hex_claim_clean: &str = if hex_claim.starts_with("0x") {
&hex_claim[2..] // skip 0x
} else {
hex_claim
};
let claim_bytes = match hex::decode(hex_claim_clean) {
Ok(v) => v,
Err(err) => {
return format!(
"ERROR: The given claim ({}) is not a valid hex string - {}",
hex_claim, err
);
}
};
// Hash
let poseidon = Poseidon::new();
let hash = match poseidon.hash_bytes(claim_bytes) {
Ok(v) => v,
Err(reason) => {
return format!("ERROR: {}", reason);
}
};
let claim_bytes = pad_bigint_le(&hash);
base64::encode(claim_bytes)
}
///////////////////////////////////////////////////////////////////////////////
// HELPERS
///////////////////////////////////////////////////////////////////////////////
fn pad_bigint_le(num: &BigInt) -> Vec<u8> {
let mut claim_bytes = num.to_bytes_le().1;
while claim_bytes.len() < 32 {
claim_bytes.push(0);
}
claim_bytes
}
#[allow(dead_code)]
fn pad_bigint_be(num: &BigInt) -> Vec<u8> {
let mut claim_bytes = num.to_bytes_be().1;
while claim_bytes.len() < 32 {
claim_bytes = [&[0], &claim_bytes[..]].concat();
}
claim_bytes
}
///////////////////////////////////////////////////////////////////////////////
// TESTS
///////////////////////////////////////////////////////////////////////////////
#[cfg(test)]
mod tests {
use super::*;
use num_bigint::{Sign, ToBigInt};
#[test]
fn should_hash_strings() {
let str_claim = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.";
let b64_hash = digest_string_claim(str_claim);
assert_eq!(b64_hash, "iV5141xlrW8I217IitUHtoDC/gd/LMsgcF0zpDfUaiM=");
}
#[test]
fn | () {
let hex_claim = "0x045a126cbbd3c66b6d542d40d91085e3f2b5db3bbc8cda0d59615deb08784e4f833e0bb082194790143c3d01cedb4a9663cb8c7bdaaad839cb794dd309213fcf30";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "nGOYvS4aqqUVAT9YjWcUzA89DlHPWaooNpBTStOaHRA=");
let hex_claim = "0x049969c7741ade2e9f89f81d12080651038838e8089682158f3d892e57609b64e2137463c816e4d52f6688d490c35a0b8e524ac6d9722eed2616dbcaf676fc2578";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "j7jJlnBN73ORKWbNbVCHG9WkoqSr+IEKDwjcsb6N4xw=");
let hex_claim = "0x049622878da186a8a31f4dc03454dbbc62365060458db174618218b51d5014fa56c8ea772234341ae326ce278091c39e30c02fa1f04792035d79311fe3283f1380";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "6CUGhnmKQchF6Ter05laVgQYcEWm0p2qlLzX24rk3Ck=");
let hex_claim = "0x04e355263aa6cbc99d2fdd0898f5ed8630115ad54e9073c41a8aa0df6d75842d8b8309d0d26a95565996b17da48f8ddff704ebcd1d8a982dc5ba8be7458c677b17";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "k0UwNtWW4UQifisXuoDiO/QGRZNNTY7giWK1Nx/hoSo=");
let hex_claim = "0x04020d62c94296539224b885c6cdf79d0c2dd437471425be26bf62ab522949f83f3eed34528b0b9a7fbe96e50ca85471c894e1aa819bbf12ff78ad07ce8b4117b2";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "5EhP0859lic41RIpIrnotv/BCR7v5nVcXsXkTXlbuhI=");
let hex_claim = "0x046bd65449f336b888fc36c64708940da0d1c864a0ac46236f60b455841a4d15c9b815ed725093b3266aaca2f15210d14a1eadf34efeda3bd44a803fbf1590cfba";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "oseI7fM8wWIYslDUOXJne7AOiK+IpFL3q8MTqiZHWw8=");
let hex_claim = "0x0412cf2bd4a9613ad988f7f008a5297b8e8c98df8759a2ef9d3dfae63b3870cfbb78d35789745f82710da61a61a9c06c6f6166bf1d5ce73f9416e6b67713001aa2";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "9Y3JcjUHZLGmENRQpnML/+TG2EbHWjU46h+LtT9sQi8=");
let hex_claim = "0x04a2e6914db4a81ea9ec72e71b41cf88d4bc19ea54f29ae2beb3db8e4acf6531b5c163e58427831832b10fce899a030d12e82a398d4eeefe451c7e261fba973be4";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "Llx5F6lP/hbU6ZTT10Q5PF+7o1VdylvrolT8vSHJMAA=");
let hex_claim = "0x041508189a6f1737f50dd2c603c1ded8a83f97073d33cbb317e7409c1487b8351aa2b89455cda61ce8ed3ba3c130372870b187239b900da8948a53ca3e02db9aaf";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "MyRpb4ZDTwtJNflc8ZbZdmKOf+fuZjUEZkgZMCmlKxw=");
let hex_claim = "0x04f11597483032666b20ec51b27e1337577f63a5e1d5962575b555bf899380ae15482f031a297094b0c60980f3c4f1f7ad2346de5357ad82a6a3d4eef2bd1956c6";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "ytwkzcBixiBMsblxEEPpiDFV6MCBG/IY+XUc6/+xIQ8=");
let hex_claim = "0x044c01f3d0ef3d60652aa7c6489b2f10edcae1b04a10460ab2d5e4bd752eb0686cac7aa6057fd4c65606e8a4c33c0b519b1764229395cde2c8537ee01136ef0776";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "VS5c2JQT3x++ltSQHqnCFIBHttdjU2Lk2RuCGkUhnQ8=");
}
#[test]
fn should_return_32_byte_hashes() {
let hex_claim = "0x04c94699a259ec27e1cf67fe46653f0dc2f38e6d32abb33b45fc9ffe793171a44b4ff5c9517c1be22f8a47915debcf1e512717fe33986f287e79d2f3099725f179";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "uJM6qiWAIIej9CGonWlR0cU64wqtdlh+csikpC6wSgA=");
let len = base64::decode(b64_hash)
.expect("The hash is not a valid base64")
.len();
assert_eq!(len, 32);
let hex_claim = "0x0424a71e7c24b38aaeeebbc334113045885bfae154071426e21c021ebc47a5a85a3a691a76d8253ce6e03bf4e8fe154c89b2d967765bb060e61360305d1b8df7c5";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "9wxP7eLFnTk5VDsj9rXL63r7QPKTTjCkNhjZri1nEQA=");
let len = base64::decode(b64_hash)
.expect("The hash is not a valid base64")
.len();
assert_eq!(len, 32);
let hex_claim = "0x04ff51151c6bd759d723af2d0571df5e794c28b204242f4b540b0d3449eab192cafd44b241c96b39fa7dd7ead2d2265a598a23cba0f54cb79b9829d355d74304a2";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "iS7BUPgGpY/WAdWyZb0s1wE21tMz5ZWBc8LJ6jgqSwA=");
let len = base64::decode(b64_hash)
.expect("The hash is not a valid base64")
.len();
assert_eq!(len, 32);
let hex_claim = "0x043f10ff1b295bf4d2f24c40c93cce04210ae812dd5ad1a06d5dafd9a2e18fa1247bdf36bef6a9e45e97d246cfb8a0ab25c406cf6fe7569b17e83fd6d33563003a";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "CCxtK0qT7cTxCS7e4uONSHcPQdbQzBqrC3GQvFz4KwA=");
let len = base64::decode(b64_hash)
.expect("The hash is not a valid base64")
.len();
assert_eq!(len, 32);
let hex_claim = "0x0409d240a33ca9c486c090135f06c5d801aceec6eaed94b8bef1c9763b6c39708819207786fe92b22c6661957e83923e24a5ba754755b181f82fdaed2ed3914453";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "3/AaoqHPrz20tfLmhLz4ay5nrlKN5WiuvlDZkfZyfgA=");
let len = base64::decode(b64_hash)
.expect("The hash is not a valid base64")
.len();
assert_eq!(len, 32);
let hex_claim = "0x04220da30ddd87fed1b65ef75706507f397138d8cac8917e118157124b7e1cf45b8a38ac8c8b65a6ed662d62b09d100e53abacbc27500bb9d0365f3d6d60a981fa";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "YiEgjvg1VeCMrlWJkAuOQIgDX1fWtkHk9OBJy225UgA=");
let len = base64::decode(b64_hash)
.expect("The hash is not a valid base64")
.len();
assert_eq!(len, 32);
let hex_claim = "0x04acdbbdba45841ddcc1c3cb2e8b696eae69ba9d57686bff0cd58e4033a08d9dc6c272a3577508cdb18bdb1c6fcc818538664bb6dc4cc32ee668198c7be044800c";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "UPqwKZBMhq21uwgLWJUFMgCBMPzhseiziVaqN4EQvwA=");
let len = base64::decode(b64_hash)
.expect("The hash is not a valid base64")
.len();
assert_eq!(len, 32);
}
#[test]
fn should_match_string_and_hex() {
let str_claim = "Hello";
let hex_claim = "48656c6c6f"; // Hello
let b64_hash1 = digest_string_claim(str_claim);
let b64_hash2 = digest_hex_claim(hex_claim);
assert_eq!(b64_hash1, b64_hash2);
let str_claim = "Hello UTF8 ©âëíòÚ ✨";
let hex_claim = "48656c6c6f205554463820c2a9c3a2c3abc3adc3b2c39a20e29ca8"; // Hello UTF8 ©âëíòÚ ✨
let b64_hash1 = digest_string_claim(str_claim);
let b64_hash2 = digest_hex_claim(hex_claim);
assert_eq!(b64_hash1, b64_hash2);
}
#[test]
fn should_hash_hex_with_0x() {
let b64_hash1 = digest_hex_claim(
"48656c6c6f48656c6c6f48656c6c6f48656c6c6f48656c6c6f48656c6c6f48656c6c6f",
);
let b64_hash2 = digest_hex_claim(
"0x48656c6c6f48656c6c6f48656c6c6f48656c6c6f48656c6c6f48656c6c6f48656c6c6f",
);
assert_eq!(b64_hash1, b64_hash2);
let b64_hash1 = digest_hex_claim(
"12345678901234567890123456789012345678901234567890123456789012345678901234567890",
);
let b64_hash2 = digest_hex_claim(
"0x12345678901234567890123456789012345678901234567890123456789012345678901234567890",
);
assert_eq!(b64_hash1, b64_hash2);
let b64_hash1 = digest_hex_claim(
"01234567890123456789012345678901234567890123456789012345678901234567890123456789",
);
let b64_hash2 = digest_hex_claim(
"0x01234567890123456789012345678901234567890123456789012345678901234567890123456789",
);
assert_eq!(b64_hash1, b64_hash2);
let b64_hash1 =
digest_hex_claim("0000000000000000000000000000000000000000000000000000000000000000");
let b64_hash2 =
digest_hex_claim("0x0000000000000000000000000000000000000000000000000000000000000000");
assert_eq!(b64_hash1, b64_hash2);
let b64_hash1 =
digest_hex_claim("8888888888888888888888888888888888888888888888888888888888888888");
let b64_hash2 =
digest_hex_claim("0x8888888888888888888888888888888888888888888888888888888888888888");
assert_eq!(b64_hash1, b64_hash2);
let b64_hash1 =
digest_hex_claim("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff");
let b64_hash2 =
digest_hex_claim("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff");
assert_eq!(b64_hash1, b64_hash2);
let b64_hash1 = digest_hex_claim("1234567890123456789012345678901234567890");
let b64_hash2 = digest_hex_claim("0x1234567890123456789012345678901234567890");
assert_eq!(b64_hash1, b64_hash2);
}
#[test]
fn should_pad_bigints_in_le() {
let bigint = -1125.to_bigint().unwrap();
assert_eq!(bigint.to_bytes_le(), (Sign::Minus, vec![101, 4]));
let num_bytes = pad_bigint_le(&bigint);
assert_eq!(num_bytes.len(), 32);
assert_eq!(
num_bytes,
vec![
101, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0
]
);
}
#[test]
fn should_pad_bigints_in_be() {
let bigint = -1125.to_bigint().unwrap();
assert_eq!(bigint.to_bytes_be(), (Sign::Minus, vec![4, 101]));
let num_bytes = pad_bigint_be(&bigint);
assert_eq!(num_bytes.len(), 32);
assert_eq!(
num_bytes,
vec![
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 4, 101
]
);
}
#[test]
fn bigint_padding_should_match() {
let bigint = -1125.to_bigint().unwrap();
assert_eq!(bigint.to_bytes_be(), (Sign::Minus, vec![4, 101]));
let num_bytes_le = pad_bigint_le(&bigint);
let mut num_bytes_be = pad_bigint_be(&bigint);
assert_eq!(num_bytes_le.len(), 32);
assert_eq!(num_bytes_be.len(), 32);
num_bytes_be.reverse();
assert_eq!(num_bytes_be, num_bytes_le);
}
}
| should_hash_hex_claims | identifier_name |
lib.rs | extern crate num_bigint;
use num_bigint::BigInt;
use poseidon_rs::Poseidon;
use wasm_bindgen::prelude::*;
///////////////////////////////////////////////////////////////////////////////
// EXPORTED FUNCTIONS FUNCTIONS
///////////////////////////////////////////////////////////////////////////////
#[wasm_bindgen]
pub fn digest_string_claim(claim: &str) -> String {
// Convert into a byte array
let claim_bytes = claim.as_bytes().to_vec();
// Hash
let poseidon = Poseidon::new();
let hash = match poseidon.hash_bytes(claim_bytes) {
Ok(v) => v,
Err(reason) => {
return format!("ERROR: {}", reason);
}
};
let claim_bytes = pad_bigint_le(&hash);
base64::encode(claim_bytes)
}
#[wasm_bindgen]
pub fn digest_hex_claim(hex_claim: &str) -> String {
// Decode hex into a byte array
let hex_claim_clean: &str = if hex_claim.starts_with("0x") {
&hex_claim[2..] // skip 0x
} else {
hex_claim
};
let claim_bytes = match hex::decode(hex_claim_clean) {
Ok(v) => v,
Err(err) => {
return format!(
"ERROR: The given claim ({}) is not a valid hex string - {}",
hex_claim, err
);
}
};
// Hash
let poseidon = Poseidon::new();
let hash = match poseidon.hash_bytes(claim_bytes) {
Ok(v) => v,
Err(reason) => {
return format!("ERROR: {}", reason);
}
};
let claim_bytes = pad_bigint_le(&hash);
base64::encode(claim_bytes)
}
///////////////////////////////////////////////////////////////////////////////
// HELPERS
///////////////////////////////////////////////////////////////////////////////
fn pad_bigint_le(num: &BigInt) -> Vec<u8> {
let mut claim_bytes = num.to_bytes_le().1;
while claim_bytes.len() < 32 {
claim_bytes.push(0);
}
claim_bytes
}
#[allow(dead_code)]
fn pad_bigint_be(num: &BigInt) -> Vec<u8> {
let mut claim_bytes = num.to_bytes_be().1;
while claim_bytes.len() < 32 {
claim_bytes = [&[0], &claim_bytes[..]].concat();
}
claim_bytes
}
///////////////////////////////////////////////////////////////////////////////
// TESTS
///////////////////////////////////////////////////////////////////////////////
#[cfg(test)]
mod tests {
use super::*;
use num_bigint::{Sign, ToBigInt};
#[test]
fn should_hash_strings() {
let str_claim = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.";
let b64_hash = digest_string_claim(str_claim);
assert_eq!(b64_hash, "iV5141xlrW8I217IitUHtoDC/gd/LMsgcF0zpDfUaiM=");
}
#[test]
fn should_hash_hex_claims() {
let hex_claim = "0x045a126cbbd3c66b6d542d40d91085e3f2b5db3bbc8cda0d59615deb08784e4f833e0bb082194790143c3d01cedb4a9663cb8c7bdaaad839cb794dd309213fcf30";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "nGOYvS4aqqUVAT9YjWcUzA89DlHPWaooNpBTStOaHRA=");
let hex_claim = "0x049969c7741ade2e9f89f81d12080651038838e8089682158f3d892e57609b64e2137463c816e4d52f6688d490c35a0b8e524ac6d9722eed2616dbcaf676fc2578";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "j7jJlnBN73ORKWbNbVCHG9WkoqSr+IEKDwjcsb6N4xw=");
let hex_claim = "0x049622878da186a8a31f4dc03454dbbc62365060458db174618218b51d5014fa56c8ea772234341ae326ce278091c39e30c02fa1f04792035d79311fe3283f1380";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "6CUGhnmKQchF6Ter05laVgQYcEWm0p2qlLzX24rk3Ck=");
let hex_claim = "0x04e355263aa6cbc99d2fdd0898f5ed8630115ad54e9073c41a8aa0df6d75842d8b8309d0d26a95565996b17da48f8ddff704ebcd1d8a982dc5ba8be7458c677b17";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "k0UwNtWW4UQifisXuoDiO/QGRZNNTY7giWK1Nx/hoSo=");
let hex_claim = "0x04020d62c94296539224b885c6cdf79d0c2dd437471425be26bf62ab522949f83f3eed34528b0b9a7fbe96e50ca85471c894e1aa819bbf12ff78ad07ce8b4117b2";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "5EhP0859lic41RIpIrnotv/BCR7v5nVcXsXkTXlbuhI=");
let hex_claim = "0x046bd65449f336b888fc36c64708940da0d1c864a0ac46236f60b455841a4d15c9b815ed725093b3266aaca2f15210d14a1eadf34efeda3bd44a803fbf1590cfba";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "oseI7fM8wWIYslDUOXJne7AOiK+IpFL3q8MTqiZHWw8=");
let hex_claim = "0x0412cf2bd4a9613ad988f7f008a5297b8e8c98df8759a2ef9d3dfae63b3870cfbb78d35789745f82710da61a61a9c06c6f6166bf1d5ce73f9416e6b67713001aa2";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "9Y3JcjUHZLGmENRQpnML/+TG2EbHWjU46h+LtT9sQi8=");
let hex_claim = "0x04a2e6914db4a81ea9ec72e71b41cf88d4bc19ea54f29ae2beb3db8e4acf6531b5c163e58427831832b10fce899a030d12e82a398d4eeefe451c7e261fba973be4";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "Llx5F6lP/hbU6ZTT10Q5PF+7o1VdylvrolT8vSHJMAA=");
let hex_claim = "0x041508189a6f1737f50dd2c603c1ded8a83f97073d33cbb317e7409c1487b8351aa2b89455cda61ce8ed3ba3c130372870b187239b900da8948a53ca3e02db9aaf";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "MyRpb4ZDTwtJNflc8ZbZdmKOf+fuZjUEZkgZMCmlKxw=");
let hex_claim = "0x04f11597483032666b20ec51b27e1337577f63a5e1d5962575b555bf899380ae15482f031a297094b0c60980f3c4f1f7ad2346de5357ad82a6a3d4eef2bd1956c6";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "ytwkzcBixiBMsblxEEPpiDFV6MCBG/IY+XUc6/+xIQ8=");
let hex_claim = "0x044c01f3d0ef3d60652aa7c6489b2f10edcae1b04a10460ab2d5e4bd752eb0686cac7aa6057fd4c65606e8a4c33c0b519b1764229395cde2c8537ee01136ef0776";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "VS5c2JQT3x++ltSQHqnCFIBHttdjU2Lk2RuCGkUhnQ8=");
}
#[test]
fn should_return_32_byte_hashes() {
let hex_claim = "0x04c94699a259ec27e1cf67fe46653f0dc2f38e6d32abb33b45fc9ffe793171a44b4ff5c9517c1be22f8a47915debcf1e512717fe33986f287e79d2f3099725f179";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "uJM6qiWAIIej9CGonWlR0cU64wqtdlh+csikpC6wSgA=");
let len = base64::decode(b64_hash)
.expect("The hash is not a valid base64")
.len();
assert_eq!(len, 32);
let hex_claim = "0x0424a71e7c24b38aaeeebbc334113045885bfae154071426e21c021ebc47a5a85a3a691a76d8253ce6e03bf4e8fe154c89b2d967765bb060e61360305d1b8df7c5";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "9wxP7eLFnTk5VDsj9rXL63r7QPKTTjCkNhjZri1nEQA=");
let len = base64::decode(b64_hash)
.expect("The hash is not a valid base64")
.len();
assert_eq!(len, 32);
let hex_claim = "0x04ff51151c6bd759d723af2d0571df5e794c28b204242f4b540b0d3449eab192cafd44b241c96b39fa7dd7ead2d2265a598a23cba0f54cb79b9829d355d74304a2";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "iS7BUPgGpY/WAdWyZb0s1wE21tMz5ZWBc8LJ6jgqSwA=");
let len = base64::decode(b64_hash)
.expect("The hash is not a valid base64")
.len(); | let hex_claim = "0x043f10ff1b295bf4d2f24c40c93cce04210ae812dd5ad1a06d5dafd9a2e18fa1247bdf36bef6a9e45e97d246cfb8a0ab25c406cf6fe7569b17e83fd6d33563003a";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "CCxtK0qT7cTxCS7e4uONSHcPQdbQzBqrC3GQvFz4KwA=");
let len = base64::decode(b64_hash)
.expect("The hash is not a valid base64")
.len();
assert_eq!(len, 32);
let hex_claim = "0x0409d240a33ca9c486c090135f06c5d801aceec6eaed94b8bef1c9763b6c39708819207786fe92b22c6661957e83923e24a5ba754755b181f82fdaed2ed3914453";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "3/AaoqHPrz20tfLmhLz4ay5nrlKN5WiuvlDZkfZyfgA=");
let len = base64::decode(b64_hash)
.expect("The hash is not a valid base64")
.len();
assert_eq!(len, 32);
let hex_claim = "0x04220da30ddd87fed1b65ef75706507f397138d8cac8917e118157124b7e1cf45b8a38ac8c8b65a6ed662d62b09d100e53abacbc27500bb9d0365f3d6d60a981fa";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "YiEgjvg1VeCMrlWJkAuOQIgDX1fWtkHk9OBJy225UgA=");
let len = base64::decode(b64_hash)
.expect("The hash is not a valid base64")
.len();
assert_eq!(len, 32);
let hex_claim = "0x04acdbbdba45841ddcc1c3cb2e8b696eae69ba9d57686bff0cd58e4033a08d9dc6c272a3577508cdb18bdb1c6fcc818538664bb6dc4cc32ee668198c7be044800c";
let b64_hash = digest_hex_claim(hex_claim);
assert_eq!(b64_hash, "UPqwKZBMhq21uwgLWJUFMgCBMPzhseiziVaqN4EQvwA=");
let len = base64::decode(b64_hash)
.expect("The hash is not a valid base64")
.len();
assert_eq!(len, 32);
}
#[test]
fn should_match_string_and_hex() {
let str_claim = "Hello";
let hex_claim = "48656c6c6f"; // Hello
let b64_hash1 = digest_string_claim(str_claim);
let b64_hash2 = digest_hex_claim(hex_claim);
assert_eq!(b64_hash1, b64_hash2);
let str_claim = "Hello UTF8 ©âëíòÚ ✨";
let hex_claim = "48656c6c6f205554463820c2a9c3a2c3abc3adc3b2c39a20e29ca8"; // Hello UTF8 ©âëíòÚ ✨
let b64_hash1 = digest_string_claim(str_claim);
let b64_hash2 = digest_hex_claim(hex_claim);
assert_eq!(b64_hash1, b64_hash2);
}
#[test]
fn should_hash_hex_with_0x() {
let b64_hash1 = digest_hex_claim(
"48656c6c6f48656c6c6f48656c6c6f48656c6c6f48656c6c6f48656c6c6f48656c6c6f",
);
let b64_hash2 = digest_hex_claim(
"0x48656c6c6f48656c6c6f48656c6c6f48656c6c6f48656c6c6f48656c6c6f48656c6c6f",
);
assert_eq!(b64_hash1, b64_hash2);
let b64_hash1 = digest_hex_claim(
"12345678901234567890123456789012345678901234567890123456789012345678901234567890",
);
let b64_hash2 = digest_hex_claim(
"0x12345678901234567890123456789012345678901234567890123456789012345678901234567890",
);
assert_eq!(b64_hash1, b64_hash2);
let b64_hash1 = digest_hex_claim(
"01234567890123456789012345678901234567890123456789012345678901234567890123456789",
);
let b64_hash2 = digest_hex_claim(
"0x01234567890123456789012345678901234567890123456789012345678901234567890123456789",
);
assert_eq!(b64_hash1, b64_hash2);
let b64_hash1 =
digest_hex_claim("0000000000000000000000000000000000000000000000000000000000000000");
let b64_hash2 =
digest_hex_claim("0x0000000000000000000000000000000000000000000000000000000000000000");
assert_eq!(b64_hash1, b64_hash2);
let b64_hash1 =
digest_hex_claim("8888888888888888888888888888888888888888888888888888888888888888");
let b64_hash2 =
digest_hex_claim("0x8888888888888888888888888888888888888888888888888888888888888888");
assert_eq!(b64_hash1, b64_hash2);
let b64_hash1 =
digest_hex_claim("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff");
let b64_hash2 =
digest_hex_claim("0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff");
assert_eq!(b64_hash1, b64_hash2);
let b64_hash1 = digest_hex_claim("1234567890123456789012345678901234567890");
let b64_hash2 = digest_hex_claim("0x1234567890123456789012345678901234567890");
assert_eq!(b64_hash1, b64_hash2);
}
#[test]
fn should_pad_bigints_in_le() {
let bigint = -1125.to_bigint().unwrap();
assert_eq!(bigint.to_bytes_le(), (Sign::Minus, vec![101, 4]));
let num_bytes = pad_bigint_le(&bigint);
assert_eq!(num_bytes.len(), 32);
assert_eq!(
num_bytes,
vec![
101, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0
]
);
}
#[test]
fn should_pad_bigints_in_be() {
let bigint = -1125.to_bigint().unwrap();
assert_eq!(bigint.to_bytes_be(), (Sign::Minus, vec![4, 101]));
let num_bytes = pad_bigint_be(&bigint);
assert_eq!(num_bytes.len(), 32);
assert_eq!(
num_bytes,
vec![
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 4, 101
]
);
}
#[test]
fn bigint_padding_should_match() {
let bigint = -1125.to_bigint().unwrap();
assert_eq!(bigint.to_bytes_be(), (Sign::Minus, vec![4, 101]));
let num_bytes_le = pad_bigint_le(&bigint);
let mut num_bytes_be = pad_bigint_be(&bigint);
assert_eq!(num_bytes_le.len(), 32);
assert_eq!(num_bytes_be.len(), 32);
num_bytes_be.reverse();
assert_eq!(num_bytes_be, num_bytes_le);
}
} | assert_eq!(len, 32);
| random_line_split |
reader.rs | //a Imports
use crate::{Char, Error, Result, StreamPosition};
//a Constants
/// [BUFFER_SIZE] is the maximum number of bytes held in the UTF-8
/// character reader from the incoming stream. The larger the value,
/// the larger the data read requests from the stream. This value must be larger than `BUFFER_SLACK`.
/// For testing purposes this value should be small (such as 8), to catch corner cases in the code where UTF-8 encodings
/// run over the end of a buffer; for performance, this value should be larger (e.g. 2048).
const BUFFER_SIZE : usize = 2048;
/// [BUFFER_SLACK] must be at least 4 - the maximum number of bytes in
/// a UTF-8 encoding; when fewer than BUFFER_SLACK bytes are in the
/// buffer a read from the buffer stream is performed - attempting to
/// fill the `BUFFER_SIZE` buffer with current data and new read data.
/// There is no reason why `BUFFER_SLACK` should be larger than 4.
const BUFFER_SLACK : usize = 4;
//a Reader
//tp Reader
/// The [Reader] provides a stream of characters by UTF-8 decoding a byte
/// stream provided by any type that implements the [std::io::Read] stream trait.
///
/// It utilizes an internal buffer of bytes that are filled as
/// required from the read stream; it maintains a position with the
/// stream (line and character) for the next character, and provides
/// the ability to get a stream of characters from the stream with any
/// UTF-8 encoding errors reported by line and character.
///
/// The stream can be reclaimed by completing the use of the
/// [Reader], in which case any unused bytes that have been read from
/// the stream are also returned.
///
/// If simple short files are to be read, using
/// [std::fs::read_to_string] may a better approach than using the
/// `Reader`
///
/// # Example
///
/// ```
/// use utf8_read::Reader;
/// let str = "This is a \u{1f600} string\nWith a newline\n";
/// let mut buf_bytes = str.as_bytes();
/// let mut reader = Reader::new(&mut buf_bytes);
/// for x in reader.into_iter() {
/// // use char x
/// }
/// ```
///
/// This example could just as easily use 'for x in str'
///
/// The [Reader], though, can be used over any object supporting the
/// [Read](std::io::Read) trait such as a a
/// [TcpStrema](std::net::TcpStream).
///
pub struct Reader<R:std::io::Read> {
/// The reader from which data is to be fetched
buf_reader : R,
/// `eof_on_no_data` defaults to true; it can be set to false to indicate that
/// if the stream has no data then the reader should return Char::NoData
/// when its buffer does not contain a complete UTF-8 character
eof_on_no_data : bool,
/// `eof` is set when the stream is complete - any character
/// requested once `eof` is asserted will be `Char::Eof`.
eof : bool,
/// Internal buffer
current : [u8; BUFFER_SIZE],
/// Offset of the first byte within the internal buffer that is valid
start : usize,
/// `Offset of the last byte + 1 within the internal buffer that is valid
end : usize,
/// `valid_end` is the last byte + 1 within the internal buffer
/// used by a valid UTF-8 byte stream that begins with `start` As
/// such `start` <= `valid_end` <= `end` If `start` < `valid_end`
/// then the bytes in the buffer between the two are a valid UTF-8
/// byte stream; this should perhaps be kept in a string inside
/// the structure for performance
valid_end : usize,
/// position in the file
stream_pos : StreamPosition,
}
//ip Reader
impl <R:std::io::Read> Reader<R> {
//fp new
/// Returns a new UTF-8 character [Reader], with a stream position
/// set to the normal start of the file - byte 0, line 1,
/// character 1
///
/// The [Reader] will default to handling zero bytes returned by
/// the stream as an EOF; to modify this default behavior use the
/// [set_eof_on_no_data](Reader::set_eof_on_no_data) builder to
/// modify the construction.
pub fn new(buf_reader: R) -> Self {
Self {
buf_reader,
eof_on_no_data : true,
eof : false,
current : [0; BUFFER_SIZE],
start : 0,
end : 0,
valid_end : 0,
stream_pos : StreamPosition::new(),
}
}
//cp set_eof_on_no_data
/// Build pattern function to set the `eof_on_no_data` on the [Reader] to true or false
///
/// This should not need to be set dynamically; an external source
/// can set the eof flag directly if required using the
/// [set_eof](Reader::set_eof) method
pub fn set_eof_on_no_data(mut self, eof_on_no_data:bool) -> Self {
self.eof_on_no_data = eof_on_no_data;
self
}
//mp set_position
/// Set the current stream position
///
/// This may be used if, for example, a stream is being restarted;
/// or if a UTF8 encoded stream occurs in the middle of a byte
/// file.
pub fn set_position(&mut self, stream_pos:StreamPosition) {
self.stream_pos = stream_pos;
}
//mp set_eof
/// Set the eof indicator as required; when `true` this will halt
/// any new data being returned, and the internal buffer points
/// will not change when more data is requested of the [Reader].
///
/// This method may be invoked on behalf of a stream that has
/// completed, but that cannot indicate this by a read operation
/// returning zero bytes. For example, it may be used by an
/// application which uses a TcpStream for data, and which needs
/// to ensure future operations on the [Reader] return no more
/// data after the TcpStream has closed.
pub fn set_eof(&mut self, eof:bool) {
self.eof = eof;
}
//mp eof
/// Get the current eof indicator value.
///
/// The `EOF` indication is normally set for [Reader]s that have a
/// stream that returns no data on a read operation, with that
/// behavior modified by the
/// [set_eof_on_no_data](Reader::set_eof_on_no_data) method.
pub fn eof(&self) -> bool {
self.eof
}
//mp complete
/// Finish with the stream, returning the buffer handle, the
/// position of the *next* character in the stream (if there were
/// to be one), and any unused buffer data.
pub fn complete(self) -> (R, StreamPosition, Vec<u8>) {
(self.buf_reader, self.stream_pos, self.current[self.start..self.end].into())
}
//mp drop_buffer
/// Drop the unconsumed data, for example after it has been borrowed and used, and before [complete](Reader::complete) is invoked
pub fn drop_buffer(&mut self) {
self.stream_pos.move_on_bytes(self.end - self.start);
self.start = self.end;
}
//mp buffer_is_empty
/// Returns true if the internal buffer is empty
pub fn buffer_is_empty(&self) -> bool {
self.start == self.end
}
//mp borrow_buffer
/// Borrow the data held in the [Reader]'s buffer.
pub fn borrow_buffer(&self) -> &[u8] {
&self.current[self.start..self.end]
}
//mp borrow_pos
/// Borrow the stream position of the next character to be returned
pub fn borrow_pos(&self) -> &StreamPosition {
&self.stream_pos
}
//mp borrow
/// Borrow the underlying stream
pub fn borrow(&self) -> &R {
&self.buf_reader
}
//mp borrow_mut
/// Borrow the underlying stream as a mutable reference
pub fn borrow_mut(&mut self) -> &mut R {
&mut self.buf_reader
}
//fi fetch_input
/// Fetch input from the underlying stream into the internal buffer,
/// moving valid data to the start of the buffer first if
/// required. This method should only be invoked if more data is
/// required; it is relatively code-heavy.
fn fetch_input(&mut self) -> Result<usize> {
if self.start>BUFFER_SIZE-BUFFER_SLACK {
// Move everything down by self.start
let n = self.end - self.start;
if n>0 {
for i in 0..n {
self.current[i] = self.current[self.start+i];
}
}
self.valid_end -= self.start;
self.start = 0; // == self.start - self.start
self.end = n; // == self.end - self.start
}
let n = self.buf_reader.read( &mut self.current[self.end..BUFFER_SIZE] )?;
self.end += n;
if n==0 && self.eof_on_no_data {
self.eof = true;
}
Ok(n)
}
//mp next_char
/// Return the next character from the stream, if one is available, or [EOF](Char::Eof).
///
/// If there is no data - or not enough data - from the underlying stream, and the [Reader] is operating with the underlying stream *not* indicating EOF with a zero-byte read result, then [NoData](Char::NoData) is returned.
///
/// # Errors
///
/// May return [Error::MalformedUtf8] if the next bytes in the stream do not make a well-formed UTF8 character.
///
/// May return [Error::IoError] if the underlying stream has an IO Error.
pub fn next_char(&mut self) -> Result<Char> {
if self.eof {
Ok(Char::Eof)
} else if self.start == self.end { // no data present, try reading data
if self.fetch_input()? == 0 {
Ok(Char::NoData)
} else {
self.next_char()
}
} else if self.start < self.valid_end { // there is valid UTF-8 data at buffer+self.start
let s = {
// std::str::from_utf8(&self.current[self.start..self.valid_end]).unwrap()
unsafe {
std::str::from_utf8_unchecked(&self.current[self.start..self.valid_end])
}
};
let ch = s.chars().next().unwrap();
let n = ch.len_utf8();
self.start += n;
self.stream_pos.move_by(n, ch);
Ok(Char::Char(ch))
} else { // there is data but it may or may not be valid
match std::str::from_utf8(&self.current[self.start..self.end]) {
Ok(_) => { // the data is valid, mark it and the return from there
self.valid_end = self.end;
self.next_char()
}
Err(e) => { // the data is not all valid
if e.valid_up_to()>0 { // some bytes form valid UTF-8 - mark them and return that data
self.valid_end = self.start+e.valid_up_to();
self.next_char()
} else | r
},
}
}
},
}
}
}
//zz All done
}
//ip Iterator for Reader - iterate over characters
//
// allow missing doc code examples for this as it *has* an example but
// rustdoc does not pick it up.
#[allow(missing_doc_code_examples)]
impl <'a, R:std::io::Read> Iterator for &'a mut Reader<R> {
// we will be counting with usize
type Item = Result<char>;
//mp next - return next character or None if end of file
fn next(&mut self) -> Option<Self::Item> {
match self.next_char() {
Ok(Char::Char(ch)) => Some(Ok(ch)),
Ok(_) => None,
Err(x) => Some(Err(x)),
}
}
//zz All done
}
| { // no valid data - check it is just incomplete, or an actual error
match e.error_len() {
None => { // incomplete UTF-8 fetch more
match self.fetch_input()? {
0 => { // ... and eof reached when incomplete UTF8 is present
if self.eof {
Error::malformed_utf8(self.stream_pos, self.end-self.start)
} else {
Ok(Char::NoData)
}
}
_ => { // ... but got more data so try that!
self.next_char()
}
}
}
Some(n) => { // Bad UTF-8 with n bytes used
let r = Error::malformed_utf8(self.stream_pos, n);
self.stream_pos.move_on_bytes(n);
self.start += n; | conditional_block |
reader.rs | //a Imports
use crate::{Char, Error, Result, StreamPosition};
//a Constants
/// [BUFFER_SIZE] is the maximum number of bytes held in the UTF-8
/// character reader from the incoming stream. The larger the value,
/// the larger the data read requests from the stream. This value must be larger than `BUFFER_SLACK`.
/// For testing purposes this value should be small (such as 8), to catch corner cases in the code where UTF-8 encodings
/// run over the end of a buffer; for performance, this value should be larger (e.g. 2048).
const BUFFER_SIZE : usize = 2048;
/// [BUFFER_SLACK] must be at least 4 - the maximum number of bytes in
/// a UTF-8 encoding; when fewer than BUFFER_SLACK bytes are in the
/// buffer a read from the buffer stream is performed - attempting to
/// fill the `BUFFER_SIZE` buffer with current data and new read data.
/// There is no reason why `BUFFER_SLACK` should be larger than 4.
const BUFFER_SLACK : usize = 4;
//a Reader
//tp Reader
/// The [Reader] provides a stream of characters by UTF-8 decoding a byte
/// stream provided by any type that implements the [std::io::Read] stream trait.
///
/// It utilizes an internal buffer of bytes that are filled as
/// required from the read stream; it maintains a position with the
/// stream (line and character) for the next character, and provides
/// the ability to get a stream of characters from the stream with any
/// UTF-8 encoding errors reported by line and character.
///
/// The stream can be reclaimed by completing the use of the
/// [Reader], in which case any unused bytes that have been read from
/// the stream are also returned.
///
/// If simple short files are to be read, using
/// [std::fs::read_to_string] may a better approach than using the
/// `Reader`
///
/// # Example
///
/// ```
/// use utf8_read::Reader;
/// let str = "This is a \u{1f600} string\nWith a newline\n";
/// let mut buf_bytes = str.as_bytes();
/// let mut reader = Reader::new(&mut buf_bytes);
/// for x in reader.into_iter() {
/// // use char x
/// }
/// ```
///
/// This example could just as easily use 'for x in str'
///
/// The [Reader], though, can be used over any object supporting the
/// [Read](std::io::Read) trait such as a a
/// [TcpStrema](std::net::TcpStream).
///
pub struct Reader<R:std::io::Read> {
/// The reader from which data is to be fetched
buf_reader : R,
/// `eof_on_no_data` defaults to true; it can be set to false to indicate that
/// if the stream has no data then the reader should return Char::NoData
/// when its buffer does not contain a complete UTF-8 character
eof_on_no_data : bool,
/// `eof` is set when the stream is complete - any character
/// requested once `eof` is asserted will be `Char::Eof`.
eof : bool,
/// Internal buffer
current : [u8; BUFFER_SIZE],
/// Offset of the first byte within the internal buffer that is valid
start : usize,
/// `Offset of the last byte + 1 within the internal buffer that is valid
end : usize,
/// `valid_end` is the last byte + 1 within the internal buffer
/// used by a valid UTF-8 byte stream that begins with `start` As
/// such `start` <= `valid_end` <= `end` If `start` < `valid_end`
/// then the bytes in the buffer between the two are a valid UTF-8
/// byte stream; this should perhaps be kept in a string inside
/// the structure for performance
valid_end : usize,
/// position in the file
stream_pos : StreamPosition,
}
//ip Reader
impl <R:std::io::Read> Reader<R> {
//fp new
/// Returns a new UTF-8 character [Reader], with a stream position
/// set to the normal start of the file - byte 0, line 1,
/// character 1
///
/// The [Reader] will default to handling zero bytes returned by
/// the stream as an EOF; to modify this default behavior use the
/// [set_eof_on_no_data](Reader::set_eof_on_no_data) builder to
/// modify the construction.
pub fn new(buf_reader: R) -> Self {
Self {
buf_reader,
eof_on_no_data : true,
eof : false,
current : [0; BUFFER_SIZE],
start : 0,
end : 0,
valid_end : 0,
stream_pos : StreamPosition::new(),
}
}
//cp set_eof_on_no_data
/// Build pattern function to set the `eof_on_no_data` on the [Reader] to true or false
///
/// This should not need to be set dynamically; an external source
/// can set the eof flag directly if required using the
/// [set_eof](Reader::set_eof) method
pub fn set_eof_on_no_data(mut self, eof_on_no_data:bool) -> Self {
self.eof_on_no_data = eof_on_no_data;
self
}
//mp set_position
/// Set the current stream position
///
/// This may be used if, for example, a stream is being restarted;
/// or if a UTF8 encoded stream occurs in the middle of a byte
/// file.
pub fn set_position(&mut self, stream_pos:StreamPosition) {
self.stream_pos = stream_pos;
}
//mp set_eof
/// Set the eof indicator as required; when `true` this will halt
/// any new data being returned, and the internal buffer points
/// will not change when more data is requested of the [Reader].
///
/// This method may be invoked on behalf of a stream that has
/// completed, but that cannot indicate this by a read operation
/// returning zero bytes. For example, it may be used by an
/// application which uses a TcpStream for data, and which needs
/// to ensure future operations on the [Reader] return no more
/// data after the TcpStream has closed.
pub fn set_eof(&mut self, eof:bool) {
self.eof = eof;
}
//mp eof
/// Get the current eof indicator value.
///
/// The `EOF` indication is normally set for [Reader]s that have a
/// stream that returns no data on a read operation, with that
/// behavior modified by the
/// [set_eof_on_no_data](Reader::set_eof_on_no_data) method.
pub fn eof(&self) -> bool {
self.eof
}
//mp complete
/// Finish with the stream, returning the buffer handle, the
/// position of the *next* character in the stream (if there were
/// to be one), and any unused buffer data.
pub fn complete(self) -> (R, StreamPosition, Vec<u8>) {
(self.buf_reader, self.stream_pos, self.current[self.start..self.end].into())
}
//mp drop_buffer
/// Drop the unconsumed data, for example after it has been borrowed and used, and before [complete](Reader::complete) is invoked
pub fn drop_buffer(&mut self) {
self.stream_pos.move_on_bytes(self.end - self.start);
self.start = self.end;
}
//mp buffer_is_empty
/// Returns true if the internal buffer is empty
pub fn buffer_is_empty(&self) -> bool {
self.start == self.end
}
//mp borrow_buffer
/// Borrow the data held in the [Reader]'s buffer.
pub fn borrow_buffer(&self) -> &[u8] {
&self.current[self.start..self.end]
}
//mp borrow_pos
/// Borrow the stream position of the next character to be returned
pub fn borrow_pos(&self) -> &StreamPosition {
&self.stream_pos
}
//mp borrow
/// Borrow the underlying stream
pub fn borrow(&self) -> &R {
&self.buf_reader
}
//mp borrow_mut
/// Borrow the underlying stream as a mutable reference
pub fn borrow_mut(&mut self) -> &mut R {
&mut self.buf_reader
}
//fi fetch_input
/// Fetch input from the underlying stream into the internal buffer,
/// moving valid data to the start of the buffer first if
/// required. This method should only be invoked if more data is
/// required; it is relatively code-heavy.
fn fetch_input(&mut self) -> Result<usize> {
if self.start>BUFFER_SIZE-BUFFER_SLACK {
// Move everything down by self.start
let n = self.end - self.start;
if n>0 {
for i in 0..n {
self.current[i] = self.current[self.start+i];
}
}
self.valid_end -= self.start;
self.start = 0; // == self.start - self.start
self.end = n; // == self.end - self.start
}
let n = self.buf_reader.read( &mut self.current[self.end..BUFFER_SIZE] )?;
self.end += n;
if n==0 && self.eof_on_no_data {
self.eof = true;
}
Ok(n)
}
//mp next_char
/// Return the next character from the stream, if one is available, or [EOF](Char::Eof).
///
/// If there is no data - or not enough data - from the underlying stream, and the [Reader] is operating with the underlying stream *not* indicating EOF with a zero-byte read result, then [NoData](Char::NoData) is returned.
///
/// # Errors
///
/// May return [Error::MalformedUtf8] if the next bytes in the stream do not make a well-formed UTF8 character.
///
/// May return [Error::IoError] if the underlying stream has an IO Error.
pub fn next_char(&mut self) -> Result<Char> {
if self.eof {
Ok(Char::Eof)
} else if self.start == self.end { // no data present, try reading data
if self.fetch_input()? == 0 {
Ok(Char::NoData)
} else {
self.next_char()
}
} else if self.start < self.valid_end { // there is valid UTF-8 data at buffer+self.start
let s = {
// std::str::from_utf8(&self.current[self.start..self.valid_end]).unwrap()
unsafe {
std::str::from_utf8_unchecked(&self.current[self.start..self.valid_end])
}
};
let ch = s.chars().next().unwrap();
let n = ch.len_utf8();
self.start += n;
self.stream_pos.move_by(n, ch);
Ok(Char::Char(ch))
} else { // there is data but it may or may not be valid
match std::str::from_utf8(&self.current[self.start..self.end]) {
Ok(_) => { // the data is valid, mark it and the return from there
self.valid_end = self.end;
self.next_char()
}
Err(e) => { // the data is not all valid
if e.valid_up_to()>0 { // some bytes form valid UTF-8 - mark them and return that data
self.valid_end = self.start+e.valid_up_to();
self.next_char()
} else { // no valid data - check it is just incomplete, or an actual error
match e.error_len() {
None => { // incomplete UTF-8 fetch more
match self.fetch_input()? {
0 => { //... and eof reached when incomplete UTF8 is present
if self.eof {
Error::malformed_utf8(self.stream_pos, self.end-self.start)
} else {
Ok(Char::NoData)
}
}
_ => { //... but got more data so try that!
self.next_char()
}
}
}
Some(n) => { // Bad UTF-8 with n bytes used
let r = Error::malformed_utf8(self.stream_pos, n);
self.stream_pos.move_on_bytes(n);
self.start += n;
r
},
}
}
},
}
}
}
//zz All done
}
//ip Iterator for Reader - iterate over characters
//
// allow missing doc code examples for this as it *has* an example but
// rustdoc does not pick it up.
#[allow(missing_doc_code_examples)] | type Item = Result<char>;
//mp next - return next character or None if end of file
fn next(&mut self) -> Option<Self::Item> {
match self.next_char() {
Ok(Char::Char(ch)) => Some(Ok(ch)),
Ok(_) => None,
Err(x) => Some(Err(x)),
}
}
//zz All done
} | impl <'a, R:std::io::Read> Iterator for &'a mut Reader<R> {
// we will be counting with usize | random_line_split |
reader.rs | //a Imports
use crate::{Char, Error, Result, StreamPosition};
//a Constants
/// [BUFFER_SIZE] is the maximum number of bytes held in the UTF-8
/// character reader from the incoming stream. The larger the value,
/// the larger the data read requests from the stream. This value must be larger than `BUFFER_SLACK`.
/// For testing purposes this value should be small (such as 8), to catch corner cases in the code where UTF-8 encodings
/// run over the end of a buffer; for performance, this value should be larger (e.g. 2048).
const BUFFER_SIZE : usize = 2048;
/// [BUFFER_SLACK] must be at least 4 - the maximum number of bytes in
/// a UTF-8 encoding; when fewer than BUFFER_SLACK bytes are in the
/// buffer a read from the buffer stream is performed - attempting to
/// fill the `BUFFER_SIZE` buffer with current data and new read data.
/// There is no reason why `BUFFER_SLACK` should be larger than 4.
const BUFFER_SLACK : usize = 4;
//a Reader
//tp Reader
/// The [Reader] provides a stream of characters by UTF-8 decoding a byte
/// stream provided by any type that implements the [std::io::Read] stream trait.
///
/// It utilizes an internal buffer of bytes that are filled as
/// required from the read stream; it maintains a position with the
/// stream (line and character) for the next character, and provides
/// the ability to get a stream of characters from the stream with any
/// UTF-8 encoding errors reported by line and character.
///
/// The stream can be reclaimed by completing the use of the
/// [Reader], in which case any unused bytes that have been read from
/// the stream are also returned.
///
/// If simple short files are to be read, using
/// [std::fs::read_to_string] may a better approach than using the
/// `Reader`
///
/// # Example
///
/// ```
/// use utf8_read::Reader;
/// let str = "This is a \u{1f600} string\nWith a newline\n";
/// let mut buf_bytes = str.as_bytes();
/// let mut reader = Reader::new(&mut buf_bytes);
/// for x in reader.into_iter() {
/// // use char x
/// }
/// ```
///
/// This example could just as easily use 'for x in str'
///
/// The [Reader], though, can be used over any object supporting the
/// [Read](std::io::Read) trait such as a a
/// [TcpStrema](std::net::TcpStream).
///
pub struct Reader<R:std::io::Read> {
/// The reader from which data is to be fetched
buf_reader : R,
/// `eof_on_no_data` defaults to true; it can be set to false to indicate that
/// if the stream has no data then the reader should return Char::NoData
/// when its buffer does not contain a complete UTF-8 character
eof_on_no_data : bool,
/// `eof` is set when the stream is complete - any character
/// requested once `eof` is asserted will be `Char::Eof`.
eof : bool,
/// Internal buffer
current : [u8; BUFFER_SIZE],
/// Offset of the first byte within the internal buffer that is valid
start : usize,
/// `Offset of the last byte + 1 within the internal buffer that is valid
end : usize,
/// `valid_end` is the last byte + 1 within the internal buffer
/// used by a valid UTF-8 byte stream that begins with `start` As
/// such `start` <= `valid_end` <= `end` If `start` < `valid_end`
/// then the bytes in the buffer between the two are a valid UTF-8
/// byte stream; this should perhaps be kept in a string inside
/// the structure for performance
valid_end : usize,
/// position in the file
stream_pos : StreamPosition,
}
//ip Reader
impl <R:std::io::Read> Reader<R> {
//fp new
/// Returns a new UTF-8 character [Reader], with a stream position
/// set to the normal start of the file - byte 0, line 1,
/// character 1
///
/// The [Reader] will default to handling zero bytes returned by
/// the stream as an EOF; to modify this default behavior use the
/// [set_eof_on_no_data](Reader::set_eof_on_no_data) builder to
/// modify the construction.
pub fn new(buf_reader: R) -> Self {
Self {
buf_reader,
eof_on_no_data : true,
eof : false,
current : [0; BUFFER_SIZE],
start : 0,
end : 0,
valid_end : 0,
stream_pos : StreamPosition::new(),
}
}
//cp set_eof_on_no_data
/// Build pattern function to set the `eof_on_no_data` on the [Reader] to true or false
///
/// This should not need to be set dynamically; an external source
/// can set the eof flag directly if required using the
/// [set_eof](Reader::set_eof) method
pub fn set_eof_on_no_data(mut self, eof_on_no_data:bool) -> Self {
self.eof_on_no_data = eof_on_no_data;
self
}
//mp set_position
/// Set the current stream position
///
/// This may be used if, for example, a stream is being restarted;
/// or if a UTF8 encoded stream occurs in the middle of a byte
/// file.
pub fn set_position(&mut self, stream_pos:StreamPosition) {
self.stream_pos = stream_pos;
}
//mp set_eof
/// Set the eof indicator as required; when `true` this will halt
/// any new data being returned, and the internal buffer points
/// will not change when more data is requested of the [Reader].
///
/// This method may be invoked on behalf of a stream that has
/// completed, but that cannot indicate this by a read operation
/// returning zero bytes. For example, it may be used by an
/// application which uses a TcpStream for data, and which needs
/// to ensure future operations on the [Reader] return no more
/// data after the TcpStream has closed.
pub fn set_eof(&mut self, eof:bool) {
self.eof = eof;
}
//mp eof
/// Get the current eof indicator value.
///
/// The `EOF` indication is normally set for [Reader]s that have a
/// stream that returns no data on a read operation, with that
/// behavior modified by the
/// [set_eof_on_no_data](Reader::set_eof_on_no_data) method.
pub fn eof(&self) -> bool {
self.eof
}
//mp complete
/// Finish with the stream, returning the buffer handle, the
/// position of the *next* character in the stream (if there were
/// to be one), and any unused buffer data.
pub fn complete(self) -> (R, StreamPosition, Vec<u8>) {
(self.buf_reader, self.stream_pos, self.current[self.start..self.end].into())
}
//mp drop_buffer
/// Drop the unconsumed data, for example after it has been borrowed and used, and before [complete](Reader::complete) is invoked
pub fn drop_buffer(&mut self) {
self.stream_pos.move_on_bytes(self.end - self.start);
self.start = self.end;
}
//mp buffer_is_empty
/// Returns true if the internal buffer is empty
pub fn | (&self) -> bool {
self.start == self.end
}
//mp borrow_buffer
/// Borrow the data held in the [Reader]'s buffer.
pub fn borrow_buffer(&self) -> &[u8] {
&self.current[self.start..self.end]
}
//mp borrow_pos
/// Borrow the stream position of the next character to be returned
pub fn borrow_pos(&self) -> &StreamPosition {
&self.stream_pos
}
//mp borrow
/// Borrow the underlying stream
pub fn borrow(&self) -> &R {
&self.buf_reader
}
//mp borrow_mut
/// Borrow the underlying stream as a mutable reference
pub fn borrow_mut(&mut self) -> &mut R {
&mut self.buf_reader
}
//fi fetch_input
/// Fetch input from the underlying stream into the internal buffer,
/// moving valid data to the start of the buffer first if
/// required. This method should only be invoked if more data is
/// required; it is relatively code-heavy.
fn fetch_input(&mut self) -> Result<usize> {
if self.start>BUFFER_SIZE-BUFFER_SLACK {
// Move everything down by self.start
let n = self.end - self.start;
if n>0 {
for i in 0..n {
self.current[i] = self.current[self.start+i];
}
}
self.valid_end -= self.start;
self.start = 0; // == self.start - self.start
self.end = n; // == self.end - self.start
}
let n = self.buf_reader.read( &mut self.current[self.end..BUFFER_SIZE] )?;
self.end += n;
if n==0 && self.eof_on_no_data {
self.eof = true;
}
Ok(n)
}
//mp next_char
/// Return the next character from the stream, if one is available, or [EOF](Char::Eof).
///
/// If there is no data - or not enough data - from the underlying stream, and the [Reader] is operating with the underlying stream *not* indicating EOF with a zero-byte read result, then [NoData](Char::NoData) is returned.
///
/// # Errors
///
/// May return [Error::MalformedUtf8] if the next bytes in the stream do not make a well-formed UTF8 character.
///
/// May return [Error::IoError] if the underlying stream has an IO Error.
pub fn next_char(&mut self) -> Result<Char> {
if self.eof {
Ok(Char::Eof)
} else if self.start == self.end { // no data present, try reading data
if self.fetch_input()? == 0 {
Ok(Char::NoData)
} else {
self.next_char()
}
} else if self.start < self.valid_end { // there is valid UTF-8 data at buffer+self.start
let s = {
// std::str::from_utf8(&self.current[self.start..self.valid_end]).unwrap()
unsafe {
std::str::from_utf8_unchecked(&self.current[self.start..self.valid_end])
}
};
let ch = s.chars().next().unwrap();
let n = ch.len_utf8();
self.start += n;
self.stream_pos.move_by(n, ch);
Ok(Char::Char(ch))
} else { // there is data but it may or may not be valid
match std::str::from_utf8(&self.current[self.start..self.end]) {
Ok(_) => { // the data is valid, mark it and the return from there
self.valid_end = self.end;
self.next_char()
}
Err(e) => { // the data is not all valid
if e.valid_up_to()>0 { // some bytes form valid UTF-8 - mark them and return that data
self.valid_end = self.start+e.valid_up_to();
self.next_char()
} else { // no valid data - check it is just incomplete, or an actual error
match e.error_len() {
None => { // incomplete UTF-8 fetch more
match self.fetch_input()? {
0 => { //... and eof reached when incomplete UTF8 is present
if self.eof {
Error::malformed_utf8(self.stream_pos, self.end-self.start)
} else {
Ok(Char::NoData)
}
}
_ => { //... but got more data so try that!
self.next_char()
}
}
}
Some(n) => { // Bad UTF-8 with n bytes used
let r = Error::malformed_utf8(self.stream_pos, n);
self.stream_pos.move_on_bytes(n);
self.start += n;
r
},
}
}
},
}
}
}
//zz All done
}
//ip Iterator for Reader - iterate over characters
//
// allow missing doc code examples for this as it *has* an example but
// rustdoc does not pick it up.
#[allow(missing_doc_code_examples)]
impl <'a, R:std::io::Read> Iterator for &'a mut Reader<R> {
// we will be counting with usize
type Item = Result<char>;
//mp next - return next character or None if end of file
fn next(&mut self) -> Option<Self::Item> {
match self.next_char() {
Ok(Char::Char(ch)) => Some(Ok(ch)),
Ok(_) => None,
Err(x) => Some(Err(x)),
}
}
//zz All done
}
| buffer_is_empty | identifier_name |
reader.rs | //a Imports
use crate::{Char, Error, Result, StreamPosition};
//a Constants
/// [BUFFER_SIZE] is the maximum number of bytes held in the UTF-8
/// character reader from the incoming stream. The larger the value,
/// the larger the data read requests from the stream. This value must be larger than `BUFFER_SLACK`.
/// For testing purposes this value should be small (such as 8), to catch corner cases in the code where UTF-8 encodings
/// run over the end of a buffer; for performance, this value should be larger (e.g. 2048).
const BUFFER_SIZE : usize = 2048;
/// [BUFFER_SLACK] must be at least 4 - the maximum number of bytes in
/// a UTF-8 encoding; when fewer than BUFFER_SLACK bytes are in the
/// buffer a read from the buffer stream is performed - attempting to
/// fill the `BUFFER_SIZE` buffer with current data and new read data.
/// There is no reason why `BUFFER_SLACK` should be larger than 4.
const BUFFER_SLACK : usize = 4;
//a Reader
//tp Reader
/// The [Reader] provides a stream of characters by UTF-8 decoding a byte
/// stream provided by any type that implements the [std::io::Read] stream trait.
///
/// It utilizes an internal buffer of bytes that are filled as
/// required from the read stream; it maintains a position with the
/// stream (line and character) for the next character, and provides
/// the ability to get a stream of characters from the stream with any
/// UTF-8 encoding errors reported by line and character.
///
/// The stream can be reclaimed by completing the use of the
/// [Reader], in which case any unused bytes that have been read from
/// the stream are also returned.
///
/// If simple short files are to be read, using
/// [std::fs::read_to_string] may a better approach than using the
/// `Reader`
///
/// # Example
///
/// ```
/// use utf8_read::Reader;
/// let str = "This is a \u{1f600} string\nWith a newline\n";
/// let mut buf_bytes = str.as_bytes();
/// let mut reader = Reader::new(&mut buf_bytes);
/// for x in reader.into_iter() {
/// // use char x
/// }
/// ```
///
/// This example could just as easily use 'for x in str'
///
/// The [Reader], though, can be used over any object supporting the
/// [Read](std::io::Read) trait such as a a
/// [TcpStrema](std::net::TcpStream).
///
pub struct Reader<R:std::io::Read> {
/// The reader from which data is to be fetched
buf_reader : R,
/// `eof_on_no_data` defaults to true; it can be set to false to indicate that
/// if the stream has no data then the reader should return Char::NoData
/// when its buffer does not contain a complete UTF-8 character
eof_on_no_data : bool,
/// `eof` is set when the stream is complete - any character
/// requested once `eof` is asserted will be `Char::Eof`.
eof : bool,
/// Internal buffer
current : [u8; BUFFER_SIZE],
/// Offset of the first byte within the internal buffer that is valid
start : usize,
/// `Offset of the last byte + 1 within the internal buffer that is valid
end : usize,
/// `valid_end` is the last byte + 1 within the internal buffer
/// used by a valid UTF-8 byte stream that begins with `start` As
/// such `start` <= `valid_end` <= `end` If `start` < `valid_end`
/// then the bytes in the buffer between the two are a valid UTF-8
/// byte stream; this should perhaps be kept in a string inside
/// the structure for performance
valid_end : usize,
/// position in the file
stream_pos : StreamPosition,
}
//ip Reader
impl <R:std::io::Read> Reader<R> {
//fp new
/// Returns a new UTF-8 character [Reader], with a stream position
/// set to the normal start of the file - byte 0, line 1,
/// character 1
///
/// The [Reader] will default to handling zero bytes returned by
/// the stream as an EOF; to modify this default behavior use the
/// [set_eof_on_no_data](Reader::set_eof_on_no_data) builder to
/// modify the construction.
pub fn new(buf_reader: R) -> Self {
Self {
buf_reader,
eof_on_no_data : true,
eof : false,
current : [0; BUFFER_SIZE],
start : 0,
end : 0,
valid_end : 0,
stream_pos : StreamPosition::new(),
}
}
//cp set_eof_on_no_data
/// Build pattern function to set the `eof_on_no_data` on the [Reader] to true or false
///
/// This should not need to be set dynamically; an external source
/// can set the eof flag directly if required using the
/// [set_eof](Reader::set_eof) method
pub fn set_eof_on_no_data(mut self, eof_on_no_data:bool) -> Self {
self.eof_on_no_data = eof_on_no_data;
self
}
//mp set_position
/// Set the current stream position
///
/// This may be used if, for example, a stream is being restarted;
/// or if a UTF8 encoded stream occurs in the middle of a byte
/// file.
pub fn set_position(&mut self, stream_pos:StreamPosition) {
self.stream_pos = stream_pos;
}
//mp set_eof
/// Set the eof indicator as required; when `true` this will halt
/// any new data being returned, and the internal buffer points
/// will not change when more data is requested of the [Reader].
///
/// This method may be invoked on behalf of a stream that has
/// completed, but that cannot indicate this by a read operation
/// returning zero bytes. For example, it may be used by an
/// application which uses a TcpStream for data, and which needs
/// to ensure future operations on the [Reader] return no more
/// data after the TcpStream has closed.
pub fn set_eof(&mut self, eof:bool) {
self.eof = eof;
}
//mp eof
/// Get the current eof indicator value.
///
/// The `EOF` indication is normally set for [Reader]s that have a
/// stream that returns no data on a read operation, with that
/// behavior modified by the
/// [set_eof_on_no_data](Reader::set_eof_on_no_data) method.
pub fn eof(&self) -> bool {
self.eof
}
//mp complete
/// Finish with the stream, returning the buffer handle, the
/// position of the *next* character in the stream (if there were
/// to be one), and any unused buffer data.
pub fn complete(self) -> (R, StreamPosition, Vec<u8>) {
(self.buf_reader, self.stream_pos, self.current[self.start..self.end].into())
}
//mp drop_buffer
/// Drop the unconsumed data, for example after it has been borrowed and used, and before [complete](Reader::complete) is invoked
pub fn drop_buffer(&mut self) {
self.stream_pos.move_on_bytes(self.end - self.start);
self.start = self.end;
}
//mp buffer_is_empty
/// Returns true if the internal buffer is empty
pub fn buffer_is_empty(&self) -> bool {
self.start == self.end
}
//mp borrow_buffer
/// Borrow the data held in the [Reader]'s buffer.
pub fn borrow_buffer(&self) -> &[u8] {
&self.current[self.start..self.end]
}
//mp borrow_pos
/// Borrow the stream position of the next character to be returned
pub fn borrow_pos(&self) -> &StreamPosition {
&self.stream_pos
}
//mp borrow
/// Borrow the underlying stream
pub fn borrow(&self) -> &R {
&self.buf_reader
}
//mp borrow_mut
/// Borrow the underlying stream as a mutable reference
pub fn borrow_mut(&mut self) -> &mut R {
&mut self.buf_reader
}
//fi fetch_input
/// Fetch input from the underlying stream into the internal buffer,
/// moving valid data to the start of the buffer first if
/// required. This method should only be invoked if more data is
/// required; it is relatively code-heavy.
fn fetch_input(&mut self) -> Result<usize> {
if self.start>BUFFER_SIZE-BUFFER_SLACK {
// Move everything down by self.start
let n = self.end - self.start;
if n>0 {
for i in 0..n {
self.current[i] = self.current[self.start+i];
}
}
self.valid_end -= self.start;
self.start = 0; // == self.start - self.start
self.end = n; // == self.end - self.start
}
let n = self.buf_reader.read( &mut self.current[self.end..BUFFER_SIZE] )?;
self.end += n;
if n==0 && self.eof_on_no_data {
self.eof = true;
}
Ok(n)
}
//mp next_char
/// Return the next character from the stream, if one is available, or [EOF](Char::Eof).
///
/// If there is no data - or not enough data - from the underlying stream, and the [Reader] is operating with the underlying stream *not* indicating EOF with a zero-byte read result, then [NoData](Char::NoData) is returned.
///
/// # Errors
///
/// May return [Error::MalformedUtf8] if the next bytes in the stream do not make a well-formed UTF8 character.
///
/// May return [Error::IoError] if the underlying stream has an IO Error.
pub fn next_char(&mut self) -> Result<Char> {
if self.eof {
Ok(Char::Eof)
} else if self.start == self.end { // no data present, try reading data
if self.fetch_input()? == 0 {
Ok(Char::NoData)
} else {
self.next_char()
}
} else if self.start < self.valid_end { // there is valid UTF-8 data at buffer+self.start
let s = {
// std::str::from_utf8(&self.current[self.start..self.valid_end]).unwrap()
unsafe {
std::str::from_utf8_unchecked(&self.current[self.start..self.valid_end])
}
};
let ch = s.chars().next().unwrap();
let n = ch.len_utf8();
self.start += n;
self.stream_pos.move_by(n, ch);
Ok(Char::Char(ch))
} else { // there is data but it may or may not be valid
match std::str::from_utf8(&self.current[self.start..self.end]) {
Ok(_) => { // the data is valid, mark it and the return from there
self.valid_end = self.end;
self.next_char()
}
Err(e) => { // the data is not all valid
if e.valid_up_to()>0 { // some bytes form valid UTF-8 - mark them and return that data
self.valid_end = self.start+e.valid_up_to();
self.next_char()
} else { // no valid data - check it is just incomplete, or an actual error
match e.error_len() {
None => { // incomplete UTF-8 fetch more
match self.fetch_input()? {
0 => { //... and eof reached when incomplete UTF8 is present
if self.eof {
Error::malformed_utf8(self.stream_pos, self.end-self.start)
} else {
Ok(Char::NoData)
}
}
_ => { //... but got more data so try that!
self.next_char()
}
}
}
Some(n) => { // Bad UTF-8 with n bytes used
let r = Error::malformed_utf8(self.stream_pos, n);
self.stream_pos.move_on_bytes(n);
self.start += n;
r
},
}
}
},
}
}
}
//zz All done
}
//ip Iterator for Reader - iterate over characters
//
// allow missing doc code examples for this as it *has* an example but
// rustdoc does not pick it up.
#[allow(missing_doc_code_examples)]
impl <'a, R:std::io::Read> Iterator for &'a mut Reader<R> {
// we will be counting with usize
type Item = Result<char>;
//mp next - return next character or None if end of file
fn next(&mut self) -> Option<Self::Item> |
//zz All done
}
| {
match self.next_char() {
Ok(Char::Char(ch)) => Some(Ok(ch)),
Ok(_) => None,
Err(x) => Some(Err(x)),
}
} | identifier_body |
address.rs | use bech32::{u5, FromBase32, ToBase32};
use extended_primitives::Buffer;
use handshake_encoding::{Decodable, DecodingError, Encodable};
use std::fmt;
use std::str::FromStr;
#[cfg(feature = "json")]
use encodings::ToHex;
#[cfg(feature = "json")]
use serde::de::{self, Deserialize, Deserializer, MapAccess, SeqAccess, Visitor};
#[cfg(feature = "json")]
use serde::ser::SerializeStruct;
//@todo we need a toHS1 syntax function.
//bech32
#[derive(Debug)]
pub enum AddressError {
InvalidAddressVersion,
InvalidAddressSize,
InvalidNetworkPrefix,
InvalidHash,
Bech32(bech32::Error),
}
impl From<bech32::Error> for AddressError {
fn from(e: bech32::Error) -> Self {
AddressError::Bech32(e)
}
}
impl From<AddressError> for DecodingError {
fn from(e: AddressError) -> DecodingError {
DecodingError::InvalidData(format!("{:?}", e))
}
}
impl fmt::Display for AddressError {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
match self {
_ => formatter.write_str("todo"),
}
}
}
// #[derive(PartialEq, Clone, Debug, Copy)]
#[derive(PartialEq, Clone, Debug)]
pub enum Payload {
PubkeyHash(Buffer),
ScriptHash(Buffer),
Unknown(Buffer),
}
impl Payload {
pub fn len(&self) -> usize {
match self {
Payload::PubkeyHash(hash) => hash.len(),
Payload::ScriptHash(hash) => hash.len(),
Payload::Unknown(hash) => hash.len(),
}
}
pub fn is_empty(&self) -> bool {
match self {
Payload::PubkeyHash(hash) => hash.is_empty(),
Payload::ScriptHash(hash) => hash.is_empty(),
Payload::Unknown(hash) => hash.is_empty(),
}
}
pub fn to_hash(self) -> Buffer {
match self {
Payload::PubkeyHash(hash) => hash,
Payload::ScriptHash(hash) => hash,
Payload::Unknown(hash) => hash,
}
}
pub fn as_hash(&self) -> &Buffer {
match self {
Payload::PubkeyHash(hash) => hash,
Payload::ScriptHash(hash) => hash,
Payload::Unknown(hash) => hash,
}
}
pub fn from_hash(hash: Buffer) -> Result<Payload, AddressError> {
match hash.len() {
20 => Ok(Payload::PubkeyHash(hash)),
32 => Ok(Payload::ScriptHash(hash)),
_ => Ok(Payload::Unknown(hash)),
}
}
}
//@todo Impl FromHex, ToHex
//@todo ideally implement copy here, but we need to implement it for Buffer, and we really need to
//look into performance degration there.
// #[derive(PartialEq, Clone, Debug, Copy)]
#[derive(PartialEq, Clone, Debug)]
pub struct Address {
//Can we make this u8? TODO
//And do we even need this?
pub version: u8,
pub hash: Payload,
}
impl Address {
pub fn new(version: u8, hash: Payload) -> Self {
Address { version, hash }
}
//TODO
// pub fn is_null(&self) -> bool {
// self.hash.is_null()
// }
pub fn is_null_data(&self) -> bool {
self.version == 31
}
pub fn is_unspendable(&self) -> bool {
self.is_null_data()
}
pub fn to_bech32(&self) -> String {
//Also todo this should probably just be in toString, and should use writers so that we
//don't allocate.
//@todo this should be network dependant. Need to put work into this.
//Right now this will only support mainnet addresses.
// let mut data = vec![self.version];
let mut data = vec![bech32::u5::try_from_u8(self.version).unwrap()];
data.extend_from_slice(&self.hash.clone().to_hash().to_base32());
bech32::encode("hs", data).unwrap()
}
}
//@todo review if this is a good default. Should be triggered on "null"
impl Default for Address {
fn default() -> Self {
Address {
version: 0,
hash: Payload::PubkeyHash(Buffer::new()),
}
}
}
impl Decodable for Address {
type Err = DecodingError;
fn decode(buffer: &mut Buffer) -> Result<Self, Self::Err> {
let version = buffer.read_u8()?;
if version > 31 {
return Err(DecodingError::InvalidData(
"Invalid Address Version".to_string(),
));
}
let size = buffer.read_u8()?;
if size < 2 || size > 40 {
return Err(DecodingError::InvalidData(
"Invalid Address Size".to_string(),
));
}
let hash = buffer.read_bytes(size as usize)?;
let hash = Payload::from_hash(Buffer::from(hash))?;
Ok(Address { version, hash })
}
}
impl Encodable for Address {
fn size(&self) -> usize {
1 + 1 + self.hash.len()
}
fn encode(&self) -> Buffer {
let mut buffer = Buffer::new();
buffer.write_u8(self.version);
buffer.write_u8(self.hash.len() as u8);
//TODO fix this
buffer.extend(self.hash.as_hash().clone());
buffer
}
}
impl FromStr for Address {
type Err = AddressError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
//@todo should we be checking network here?
let (_hrp, data) = bech32::decode(s)?;
let (version, hash) = version_hash_from_bech32(data);
let hash = Payload::from_hash(hash)?;
Ok(Address { version, hash })
}
}
// //TODO eq, partial eq, ordering.
fn version_hash_from_bech32(data: Vec<u5>) -> (u8, Buffer) {
let (version, d) = data.split_at(1);
let hash_data = Vec::from_base32(d).unwrap();
let mut hash = Buffer::new();
for elem in hash_data.iter() {
hash.write_u8(*elem);
}
(version[0].to_u8(), hash)
}
#[cfg(feature = "json")]
impl serde::Serialize for Address {
fn serialize<S: serde::Serializer>(&self, s: S) -> std::result::Result<S::Ok, S::Error> {
let mut state = s.serialize_struct("Address", 2)?;
state.serialize_field("version", &self.version)?;
state.serialize_field("hash", &self.hash.as_hash().to_hex())?;
state.end()
}
}
#[cfg(feature = "json")]
impl<'de> Deserialize<'de> for Address {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
enum Field {
Version,
Hash,
Str,
};
impl<'de> Deserialize<'de> for Field {
fn deserialize<D>(deserializer: D) -> Result<Field, D::Error>
where
D: Deserializer<'de>,
| }
}
deserializer.deserialize_identifier(FieldVisitor)
}
}
struct AddressVisitor;
impl<'de> Visitor<'de> for AddressVisitor {
type Value = Address;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("struct Address")
}
fn visit_seq<V>(self, mut seq: V) -> Result<Address, V::Error>
where
V: SeqAccess<'de>,
{
//Skip string
seq.next_element()?
.ok_or_else(|| de::Error::invalid_length(0, &self))?;
let version = seq
.next_element()?
.ok_or_else(|| de::Error::invalid_length(1, &self))?;
let hash_raw: Buffer = seq
.next_element()?
.ok_or_else(|| de::Error::invalid_length(2, &self))?;
let hash = Payload::from_hash(hash_raw).map_err(de::Error::custom)?;
Ok(Address::new(version, hash))
}
fn visit_str<E>(self, value: &str) -> Result<Address, E>
where
E: de::Error,
{
Ok(Address::from_str(value).map_err(de::Error::custom)?)
}
fn visit_map<V>(self, mut map: V) -> Result<Address, V::Error>
where
V: MapAccess<'de>,
{
let mut version = None;
let mut hash = None;
while let Some(key) = map.next_key()? {
match key {
Field::Version => {
if version.is_some() {
return Err(de::Error::duplicate_field("version"));
}
version = Some(map.next_value()?);
}
Field::Hash => {
if hash.is_some() {
return Err(de::Error::duplicate_field("hash"));
}
hash = Some(map.next_value()?);
}
Field::Str => {}
}
}
let version = version.ok_or_else(|| de::Error::missing_field("version"))?;
let hash_raw = hash.ok_or_else(|| de::Error::missing_field("hash"))?;
let hash = Payload::from_hash(hash_raw).map_err(de::Error::custom)?;
Ok(Address::new(version, hash))
}
}
const FIELDS: &'static [&'static str] = &["version", "hash"];
deserializer.deserialize_struct("Address", FIELDS, AddressVisitor)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_to_from_bech32() {
let addr = Address::from_str("hs1qd42hrldu5yqee58se4uj6xctm7nk28r70e84vx").unwrap();
dbg!(&addr);
dbg!(addr.to_bech32());
}
#[test]
fn test_from_unknown() {
let addr = Address::from_str("hs1lqqqqhuxwgy");
dbg!(addr);
}
}
| {
struct FieldVisitor;
impl<'de> Visitor<'de> for FieldVisitor {
type Value = Field;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("`version` or `hash`")
}
fn visit_str<E>(self, value: &str) -> Result<Field, E>
where
E: de::Error,
{
match value {
"version" => Ok(Field::Version),
"hash" => Ok(Field::Hash),
"string" => Ok(Field::Str),
_ => Err(de::Error::unknown_field(value, FIELDS)),
} | identifier_body |
address.rs | use bech32::{u5, FromBase32, ToBase32};
use extended_primitives::Buffer;
use handshake_encoding::{Decodable, DecodingError, Encodable};
use std::fmt;
use std::str::FromStr;
#[cfg(feature = "json")]
use encodings::ToHex;
#[cfg(feature = "json")]
use serde::de::{self, Deserialize, Deserializer, MapAccess, SeqAccess, Visitor};
#[cfg(feature = "json")]
use serde::ser::SerializeStruct;
//@todo we need a toHS1 syntax function.
//bech32
#[derive(Debug)]
pub enum AddressError {
InvalidAddressVersion,
InvalidAddressSize,
InvalidNetworkPrefix,
InvalidHash,
Bech32(bech32::Error),
}
impl From<bech32::Error> for AddressError {
fn from(e: bech32::Error) -> Self {
AddressError::Bech32(e)
}
}
impl From<AddressError> for DecodingError {
fn from(e: AddressError) -> DecodingError {
DecodingError::InvalidData(format!("{:?}", e))
}
}
impl fmt::Display for AddressError {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
match self {
_ => formatter.write_str("todo"),
}
}
}
// #[derive(PartialEq, Clone, Debug, Copy)]
#[derive(PartialEq, Clone, Debug)]
pub enum Payload {
PubkeyHash(Buffer),
ScriptHash(Buffer),
Unknown(Buffer),
}
impl Payload {
pub fn len(&self) -> usize {
match self {
Payload::PubkeyHash(hash) => hash.len(),
Payload::ScriptHash(hash) => hash.len(),
Payload::Unknown(hash) => hash.len(),
}
}
pub fn is_empty(&self) -> bool {
match self {
Payload::PubkeyHash(hash) => hash.is_empty(),
Payload::ScriptHash(hash) => hash.is_empty(),
Payload::Unknown(hash) => hash.is_empty(),
}
}
pub fn to_hash(self) -> Buffer {
match self {
Payload::PubkeyHash(hash) => hash,
Payload::ScriptHash(hash) => hash,
Payload::Unknown(hash) => hash,
}
}
pub fn as_hash(&self) -> &Buffer {
match self {
Payload::PubkeyHash(hash) => hash,
Payload::ScriptHash(hash) => hash,
Payload::Unknown(hash) => hash,
}
}
pub fn from_hash(hash: Buffer) -> Result<Payload, AddressError> {
match hash.len() {
20 => Ok(Payload::PubkeyHash(hash)),
32 => Ok(Payload::ScriptHash(hash)),
_ => Ok(Payload::Unknown(hash)),
}
}
}
//@todo Impl FromHex, ToHex
//@todo ideally implement copy here, but we need to implement it for Buffer, and we really need to
//look into performance degration there.
// #[derive(PartialEq, Clone, Debug, Copy)]
#[derive(PartialEq, Clone, Debug)]
pub struct Address {
//Can we make this u8? TODO
//And do we even need this?
pub version: u8,
pub hash: Payload,
}
impl Address {
pub fn new(version: u8, hash: Payload) -> Self {
Address { version, hash }
}
//TODO
// pub fn is_null(&self) -> bool {
// self.hash.is_null()
// }
pub fn is_null_data(&self) -> bool {
self.version == 31
}
pub fn is_unspendable(&self) -> bool {
self.is_null_data()
}
pub fn to_bech32(&self) -> String {
//Also todo this should probably just be in toString, and should use writers so that we
//don't allocate.
//@todo this should be network dependant. Need to put work into this.
//Right now this will only support mainnet addresses.
// let mut data = vec![self.version];
let mut data = vec![bech32::u5::try_from_u8(self.version).unwrap()];
data.extend_from_slice(&self.hash.clone().to_hash().to_base32());
bech32::encode("hs", data).unwrap()
}
}
//@todo review if this is a good default. Should be triggered on "null"
impl Default for Address {
fn default() -> Self {
Address {
version: 0,
hash: Payload::PubkeyHash(Buffer::new()),
}
}
}
impl Decodable for Address {
type Err = DecodingError;
fn decode(buffer: &mut Buffer) -> Result<Self, Self::Err> {
let version = buffer.read_u8()?;
if version > 31 {
return Err(DecodingError::InvalidData(
"Invalid Address Version".to_string(),
));
}
let size = buffer.read_u8()?;
if size < 2 || size > 40 {
return Err(DecodingError::InvalidData(
"Invalid Address Size".to_string(),
));
}
let hash = buffer.read_bytes(size as usize)?;
let hash = Payload::from_hash(Buffer::from(hash))?;
Ok(Address { version, hash })
}
}
impl Encodable for Address {
fn size(&self) -> usize {
1 + 1 + self.hash.len()
}
fn encode(&self) -> Buffer {
let mut buffer = Buffer::new();
buffer.write_u8(self.version);
buffer.write_u8(self.hash.len() as u8);
//TODO fix this
buffer.extend(self.hash.as_hash().clone());
buffer
}
}
impl FromStr for Address {
type Err = AddressError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
//@todo should we be checking network here?
let (_hrp, data) = bech32::decode(s)?;
let (version, hash) = version_hash_from_bech32(data);
let hash = Payload::from_hash(hash)?;
Ok(Address { version, hash })
}
}
// //TODO eq, partial eq, ordering.
fn version_hash_from_bech32(data: Vec<u5>) -> (u8, Buffer) {
let (version, d) = data.split_at(1);
let hash_data = Vec::from_base32(d).unwrap();
let mut hash = Buffer::new();
for elem in hash_data.iter() {
hash.write_u8(*elem); | #[cfg(feature = "json")]
impl serde::Serialize for Address {
fn serialize<S: serde::Serializer>(&self, s: S) -> std::result::Result<S::Ok, S::Error> {
let mut state = s.serialize_struct("Address", 2)?;
state.serialize_field("version", &self.version)?;
state.serialize_field("hash", &self.hash.as_hash().to_hex())?;
state.end()
}
}
#[cfg(feature = "json")]
impl<'de> Deserialize<'de> for Address {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
enum Field {
Version,
Hash,
Str,
};
impl<'de> Deserialize<'de> for Field {
fn deserialize<D>(deserializer: D) -> Result<Field, D::Error>
where
D: Deserializer<'de>,
{
struct FieldVisitor;
impl<'de> Visitor<'de> for FieldVisitor {
type Value = Field;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("`version` or `hash`")
}
fn visit_str<E>(self, value: &str) -> Result<Field, E>
where
E: de::Error,
{
match value {
"version" => Ok(Field::Version),
"hash" => Ok(Field::Hash),
"string" => Ok(Field::Str),
_ => Err(de::Error::unknown_field(value, FIELDS)),
}
}
}
deserializer.deserialize_identifier(FieldVisitor)
}
}
struct AddressVisitor;
impl<'de> Visitor<'de> for AddressVisitor {
type Value = Address;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("struct Address")
}
fn visit_seq<V>(self, mut seq: V) -> Result<Address, V::Error>
where
V: SeqAccess<'de>,
{
//Skip string
seq.next_element()?
.ok_or_else(|| de::Error::invalid_length(0, &self))?;
let version = seq
.next_element()?
.ok_or_else(|| de::Error::invalid_length(1, &self))?;
let hash_raw: Buffer = seq
.next_element()?
.ok_or_else(|| de::Error::invalid_length(2, &self))?;
let hash = Payload::from_hash(hash_raw).map_err(de::Error::custom)?;
Ok(Address::new(version, hash))
}
fn visit_str<E>(self, value: &str) -> Result<Address, E>
where
E: de::Error,
{
Ok(Address::from_str(value).map_err(de::Error::custom)?)
}
fn visit_map<V>(self, mut map: V) -> Result<Address, V::Error>
where
V: MapAccess<'de>,
{
let mut version = None;
let mut hash = None;
while let Some(key) = map.next_key()? {
match key {
Field::Version => {
if version.is_some() {
return Err(de::Error::duplicate_field("version"));
}
version = Some(map.next_value()?);
}
Field::Hash => {
if hash.is_some() {
return Err(de::Error::duplicate_field("hash"));
}
hash = Some(map.next_value()?);
}
Field::Str => {}
}
}
let version = version.ok_or_else(|| de::Error::missing_field("version"))?;
let hash_raw = hash.ok_or_else(|| de::Error::missing_field("hash"))?;
let hash = Payload::from_hash(hash_raw).map_err(de::Error::custom)?;
Ok(Address::new(version, hash))
}
}
const FIELDS: &'static [&'static str] = &["version", "hash"];
deserializer.deserialize_struct("Address", FIELDS, AddressVisitor)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_to_from_bech32() {
let addr = Address::from_str("hs1qd42hrldu5yqee58se4uj6xctm7nk28r70e84vx").unwrap();
dbg!(&addr);
dbg!(addr.to_bech32());
}
#[test]
fn test_from_unknown() {
let addr = Address::from_str("hs1lqqqqhuxwgy");
dbg!(addr);
}
} | }
(version[0].to_u8(), hash)
}
| random_line_split |
address.rs | use bech32::{u5, FromBase32, ToBase32};
use extended_primitives::Buffer;
use handshake_encoding::{Decodable, DecodingError, Encodable};
use std::fmt;
use std::str::FromStr;
#[cfg(feature = "json")]
use encodings::ToHex;
#[cfg(feature = "json")]
use serde::de::{self, Deserialize, Deserializer, MapAccess, SeqAccess, Visitor};
#[cfg(feature = "json")]
use serde::ser::SerializeStruct;
//@todo we need a toHS1 syntax function.
//bech32
#[derive(Debug)]
pub enum AddressError {
InvalidAddressVersion,
InvalidAddressSize,
InvalidNetworkPrefix,
InvalidHash,
Bech32(bech32::Error),
}
impl From<bech32::Error> for AddressError {
fn from(e: bech32::Error) -> Self {
AddressError::Bech32(e)
}
}
impl From<AddressError> for DecodingError {
fn from(e: AddressError) -> DecodingError {
DecodingError::InvalidData(format!("{:?}", e))
}
}
impl fmt::Display for AddressError {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
match self {
_ => formatter.write_str("todo"),
}
}
}
// #[derive(PartialEq, Clone, Debug, Copy)]
#[derive(PartialEq, Clone, Debug)]
pub enum Payload {
PubkeyHash(Buffer),
ScriptHash(Buffer),
Unknown(Buffer),
}
impl Payload {
pub fn len(&self) -> usize {
match self {
Payload::PubkeyHash(hash) => hash.len(),
Payload::ScriptHash(hash) => hash.len(),
Payload::Unknown(hash) => hash.len(),
}
}
pub fn is_empty(&self) -> bool {
match self {
Payload::PubkeyHash(hash) => hash.is_empty(),
Payload::ScriptHash(hash) => hash.is_empty(),
Payload::Unknown(hash) => hash.is_empty(),
}
}
pub fn to_hash(self) -> Buffer {
match self {
Payload::PubkeyHash(hash) => hash,
Payload::ScriptHash(hash) => hash,
Payload::Unknown(hash) => hash,
}
}
pub fn as_hash(&self) -> &Buffer {
match self {
Payload::PubkeyHash(hash) => hash,
Payload::ScriptHash(hash) => hash,
Payload::Unknown(hash) => hash,
}
}
pub fn from_hash(hash: Buffer) -> Result<Payload, AddressError> {
match hash.len() {
20 => Ok(Payload::PubkeyHash(hash)),
32 => Ok(Payload::ScriptHash(hash)),
_ => Ok(Payload::Unknown(hash)),
}
}
}
//@todo Impl FromHex, ToHex
//@todo ideally implement copy here, but we need to implement it for Buffer, and we really need to
//look into performance degration there.
// #[derive(PartialEq, Clone, Debug, Copy)]
#[derive(PartialEq, Clone, Debug)]
pub struct Address {
//Can we make this u8? TODO
//And do we even need this?
pub version: u8,
pub hash: Payload,
}
impl Address {
pub fn new(version: u8, hash: Payload) -> Self {
Address { version, hash }
}
//TODO
// pub fn is_null(&self) -> bool {
// self.hash.is_null()
// }
pub fn is_null_data(&self) -> bool {
self.version == 31
}
pub fn is_unspendable(&self) -> bool {
self.is_null_data()
}
pub fn to_bech32(&self) -> String {
//Also todo this should probably just be in toString, and should use writers so that we
//don't allocate.
//@todo this should be network dependant. Need to put work into this.
//Right now this will only support mainnet addresses.
// let mut data = vec![self.version];
let mut data = vec![bech32::u5::try_from_u8(self.version).unwrap()];
data.extend_from_slice(&self.hash.clone().to_hash().to_base32());
bech32::encode("hs", data).unwrap()
}
}
//@todo review if this is a good default. Should be triggered on "null"
impl Default for Address {
fn default() -> Self {
Address {
version: 0,
hash: Payload::PubkeyHash(Buffer::new()),
}
}
}
impl Decodable for Address {
type Err = DecodingError;
fn decode(buffer: &mut Buffer) -> Result<Self, Self::Err> {
let version = buffer.read_u8()?;
if version > 31 {
return Err(DecodingError::InvalidData(
"Invalid Address Version".to_string(),
));
}
let size = buffer.read_u8()?;
if size < 2 || size > 40 {
return Err(DecodingError::InvalidData(
"Invalid Address Size".to_string(),
));
}
let hash = buffer.read_bytes(size as usize)?;
let hash = Payload::from_hash(Buffer::from(hash))?;
Ok(Address { version, hash })
}
}
impl Encodable for Address {
fn size(&self) -> usize {
1 + 1 + self.hash.len()
}
fn encode(&self) -> Buffer {
let mut buffer = Buffer::new();
buffer.write_u8(self.version);
buffer.write_u8(self.hash.len() as u8);
//TODO fix this
buffer.extend(self.hash.as_hash().clone());
buffer
}
}
impl FromStr for Address {
type Err = AddressError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
//@todo should we be checking network here?
let (_hrp, data) = bech32::decode(s)?;
let (version, hash) = version_hash_from_bech32(data);
let hash = Payload::from_hash(hash)?;
Ok(Address { version, hash })
}
}
// //TODO eq, partial eq, ordering.
fn version_hash_from_bech32(data: Vec<u5>) -> (u8, Buffer) {
let (version, d) = data.split_at(1);
let hash_data = Vec::from_base32(d).unwrap();
let mut hash = Buffer::new();
for elem in hash_data.iter() {
hash.write_u8(*elem);
}
(version[0].to_u8(), hash)
}
#[cfg(feature = "json")]
impl serde::Serialize for Address {
fn serialize<S: serde::Serializer>(&self, s: S) -> std::result::Result<S::Ok, S::Error> {
let mut state = s.serialize_struct("Address", 2)?;
state.serialize_field("version", &self.version)?;
state.serialize_field("hash", &self.hash.as_hash().to_hex())?;
state.end()
}
}
#[cfg(feature = "json")]
impl<'de> Deserialize<'de> for Address {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
enum Field {
Version,
Hash,
Str,
};
impl<'de> Deserialize<'de> for Field {
fn deserialize<D>(deserializer: D) -> Result<Field, D::Error>
where
D: Deserializer<'de>,
{
struct FieldVisitor;
impl<'de> Visitor<'de> for FieldVisitor {
type Value = Field;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("`version` or `hash`")
}
fn visit_str<E>(self, value: &str) -> Result<Field, E>
where
E: de::Error,
{
match value {
"version" => Ok(Field::Version),
"hash" => Ok(Field::Hash),
"string" => Ok(Field::Str),
_ => Err(de::Error::unknown_field(value, FIELDS)),
}
}
}
deserializer.deserialize_identifier(FieldVisitor)
}
}
struct AddressVisitor;
impl<'de> Visitor<'de> for AddressVisitor {
type Value = Address;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("struct Address")
}
fn visit_seq<V>(self, mut seq: V) -> Result<Address, V::Error>
where
V: SeqAccess<'de>,
{
//Skip string
seq.next_element()?
.ok_or_else(|| de::Error::invalid_length(0, &self))?;
let version = seq
.next_element()?
.ok_or_else(|| de::Error::invalid_length(1, &self))?;
let hash_raw: Buffer = seq
.next_element()?
.ok_or_else(|| de::Error::invalid_length(2, &self))?;
let hash = Payload::from_hash(hash_raw).map_err(de::Error::custom)?;
Ok(Address::new(version, hash))
}
fn visit_str<E>(self, value: &str) -> Result<Address, E>
where
E: de::Error,
{
Ok(Address::from_str(value).map_err(de::Error::custom)?)
}
fn visit_map<V>(self, mut map: V) -> Result<Address, V::Error>
where
V: MapAccess<'de>,
{
let mut version = None;
let mut hash = None;
while let Some(key) = map.next_key()? {
match key {
Field::Version => {
if version.is_some() |
version = Some(map.next_value()?);
}
Field::Hash => {
if hash.is_some() {
return Err(de::Error::duplicate_field("hash"));
}
hash = Some(map.next_value()?);
}
Field::Str => {}
}
}
let version = version.ok_or_else(|| de::Error::missing_field("version"))?;
let hash_raw = hash.ok_or_else(|| de::Error::missing_field("hash"))?;
let hash = Payload::from_hash(hash_raw).map_err(de::Error::custom)?;
Ok(Address::new(version, hash))
}
}
const FIELDS: &'static [&'static str] = &["version", "hash"];
deserializer.deserialize_struct("Address", FIELDS, AddressVisitor)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_to_from_bech32() {
let addr = Address::from_str("hs1qd42hrldu5yqee58se4uj6xctm7nk28r70e84vx").unwrap();
dbg!(&addr);
dbg!(addr.to_bech32());
}
#[test]
fn test_from_unknown() {
let addr = Address::from_str("hs1lqqqqhuxwgy");
dbg!(addr);
}
}
| {
return Err(de::Error::duplicate_field("version"));
} | conditional_block |
address.rs | use bech32::{u5, FromBase32, ToBase32};
use extended_primitives::Buffer;
use handshake_encoding::{Decodable, DecodingError, Encodable};
use std::fmt;
use std::str::FromStr;
#[cfg(feature = "json")]
use encodings::ToHex;
#[cfg(feature = "json")]
use serde::de::{self, Deserialize, Deserializer, MapAccess, SeqAccess, Visitor};
#[cfg(feature = "json")]
use serde::ser::SerializeStruct;
//@todo we need a toHS1 syntax function.
//bech32
#[derive(Debug)]
pub enum AddressError {
InvalidAddressVersion,
InvalidAddressSize,
InvalidNetworkPrefix,
InvalidHash,
Bech32(bech32::Error),
}
impl From<bech32::Error> for AddressError {
fn from(e: bech32::Error) -> Self {
AddressError::Bech32(e)
}
}
impl From<AddressError> for DecodingError {
fn from(e: AddressError) -> DecodingError {
DecodingError::InvalidData(format!("{:?}", e))
}
}
impl fmt::Display for AddressError {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
match self {
_ => formatter.write_str("todo"),
}
}
}
// #[derive(PartialEq, Clone, Debug, Copy)]
#[derive(PartialEq, Clone, Debug)]
pub enum Payload {
PubkeyHash(Buffer),
ScriptHash(Buffer),
Unknown(Buffer),
}
impl Payload {
pub fn len(&self) -> usize {
match self {
Payload::PubkeyHash(hash) => hash.len(),
Payload::ScriptHash(hash) => hash.len(),
Payload::Unknown(hash) => hash.len(),
}
}
pub fn is_empty(&self) -> bool {
match self {
Payload::PubkeyHash(hash) => hash.is_empty(),
Payload::ScriptHash(hash) => hash.is_empty(),
Payload::Unknown(hash) => hash.is_empty(),
}
}
pub fn to_hash(self) -> Buffer {
match self {
Payload::PubkeyHash(hash) => hash,
Payload::ScriptHash(hash) => hash,
Payload::Unknown(hash) => hash,
}
}
pub fn as_hash(&self) -> &Buffer {
match self {
Payload::PubkeyHash(hash) => hash,
Payload::ScriptHash(hash) => hash,
Payload::Unknown(hash) => hash,
}
}
pub fn from_hash(hash: Buffer) -> Result<Payload, AddressError> {
match hash.len() {
20 => Ok(Payload::PubkeyHash(hash)),
32 => Ok(Payload::ScriptHash(hash)),
_ => Ok(Payload::Unknown(hash)),
}
}
}
//@todo Impl FromHex, ToHex
//@todo ideally implement copy here, but we need to implement it for Buffer, and we really need to
//look into performance degration there.
// #[derive(PartialEq, Clone, Debug, Copy)]
#[derive(PartialEq, Clone, Debug)]
pub struct Address {
//Can we make this u8? TODO
//And do we even need this?
pub version: u8,
pub hash: Payload,
}
impl Address {
pub fn new(version: u8, hash: Payload) -> Self {
Address { version, hash }
}
//TODO
// pub fn is_null(&self) -> bool {
// self.hash.is_null()
// }
pub fn | (&self) -> bool {
self.version == 31
}
pub fn is_unspendable(&self) -> bool {
self.is_null_data()
}
pub fn to_bech32(&self) -> String {
//Also todo this should probably just be in toString, and should use writers so that we
//don't allocate.
//@todo this should be network dependant. Need to put work into this.
//Right now this will only support mainnet addresses.
// let mut data = vec![self.version];
let mut data = vec![bech32::u5::try_from_u8(self.version).unwrap()];
data.extend_from_slice(&self.hash.clone().to_hash().to_base32());
bech32::encode("hs", data).unwrap()
}
}
//@todo review if this is a good default. Should be triggered on "null"
impl Default for Address {
fn default() -> Self {
Address {
version: 0,
hash: Payload::PubkeyHash(Buffer::new()),
}
}
}
impl Decodable for Address {
type Err = DecodingError;
fn decode(buffer: &mut Buffer) -> Result<Self, Self::Err> {
let version = buffer.read_u8()?;
if version > 31 {
return Err(DecodingError::InvalidData(
"Invalid Address Version".to_string(),
));
}
let size = buffer.read_u8()?;
if size < 2 || size > 40 {
return Err(DecodingError::InvalidData(
"Invalid Address Size".to_string(),
));
}
let hash = buffer.read_bytes(size as usize)?;
let hash = Payload::from_hash(Buffer::from(hash))?;
Ok(Address { version, hash })
}
}
impl Encodable for Address {
fn size(&self) -> usize {
1 + 1 + self.hash.len()
}
fn encode(&self) -> Buffer {
let mut buffer = Buffer::new();
buffer.write_u8(self.version);
buffer.write_u8(self.hash.len() as u8);
//TODO fix this
buffer.extend(self.hash.as_hash().clone());
buffer
}
}
impl FromStr for Address {
type Err = AddressError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
//@todo should we be checking network here?
let (_hrp, data) = bech32::decode(s)?;
let (version, hash) = version_hash_from_bech32(data);
let hash = Payload::from_hash(hash)?;
Ok(Address { version, hash })
}
}
// //TODO eq, partial eq, ordering.
fn version_hash_from_bech32(data: Vec<u5>) -> (u8, Buffer) {
let (version, d) = data.split_at(1);
let hash_data = Vec::from_base32(d).unwrap();
let mut hash = Buffer::new();
for elem in hash_data.iter() {
hash.write_u8(*elem);
}
(version[0].to_u8(), hash)
}
#[cfg(feature = "json")]
impl serde::Serialize for Address {
fn serialize<S: serde::Serializer>(&self, s: S) -> std::result::Result<S::Ok, S::Error> {
let mut state = s.serialize_struct("Address", 2)?;
state.serialize_field("version", &self.version)?;
state.serialize_field("hash", &self.hash.as_hash().to_hex())?;
state.end()
}
}
#[cfg(feature = "json")]
impl<'de> Deserialize<'de> for Address {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
enum Field {
Version,
Hash,
Str,
};
impl<'de> Deserialize<'de> for Field {
fn deserialize<D>(deserializer: D) -> Result<Field, D::Error>
where
D: Deserializer<'de>,
{
struct FieldVisitor;
impl<'de> Visitor<'de> for FieldVisitor {
type Value = Field;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("`version` or `hash`")
}
fn visit_str<E>(self, value: &str) -> Result<Field, E>
where
E: de::Error,
{
match value {
"version" => Ok(Field::Version),
"hash" => Ok(Field::Hash),
"string" => Ok(Field::Str),
_ => Err(de::Error::unknown_field(value, FIELDS)),
}
}
}
deserializer.deserialize_identifier(FieldVisitor)
}
}
struct AddressVisitor;
impl<'de> Visitor<'de> for AddressVisitor {
type Value = Address;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("struct Address")
}
fn visit_seq<V>(self, mut seq: V) -> Result<Address, V::Error>
where
V: SeqAccess<'de>,
{
//Skip string
seq.next_element()?
.ok_or_else(|| de::Error::invalid_length(0, &self))?;
let version = seq
.next_element()?
.ok_or_else(|| de::Error::invalid_length(1, &self))?;
let hash_raw: Buffer = seq
.next_element()?
.ok_or_else(|| de::Error::invalid_length(2, &self))?;
let hash = Payload::from_hash(hash_raw).map_err(de::Error::custom)?;
Ok(Address::new(version, hash))
}
fn visit_str<E>(self, value: &str) -> Result<Address, E>
where
E: de::Error,
{
Ok(Address::from_str(value).map_err(de::Error::custom)?)
}
fn visit_map<V>(self, mut map: V) -> Result<Address, V::Error>
where
V: MapAccess<'de>,
{
let mut version = None;
let mut hash = None;
while let Some(key) = map.next_key()? {
match key {
Field::Version => {
if version.is_some() {
return Err(de::Error::duplicate_field("version"));
}
version = Some(map.next_value()?);
}
Field::Hash => {
if hash.is_some() {
return Err(de::Error::duplicate_field("hash"));
}
hash = Some(map.next_value()?);
}
Field::Str => {}
}
}
let version = version.ok_or_else(|| de::Error::missing_field("version"))?;
let hash_raw = hash.ok_or_else(|| de::Error::missing_field("hash"))?;
let hash = Payload::from_hash(hash_raw).map_err(de::Error::custom)?;
Ok(Address::new(version, hash))
}
}
const FIELDS: &'static [&'static str] = &["version", "hash"];
deserializer.deserialize_struct("Address", FIELDS, AddressVisitor)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_to_from_bech32() {
let addr = Address::from_str("hs1qd42hrldu5yqee58se4uj6xctm7nk28r70e84vx").unwrap();
dbg!(&addr);
dbg!(addr.to_bech32());
}
#[test]
fn test_from_unknown() {
let addr = Address::from_str("hs1lqqqqhuxwgy");
dbg!(addr);
}
}
| is_null_data | identifier_name |
time_driver.rs | use core::cell::Cell;
use core::convert::TryInto;
use core::sync::atomic::{compiler_fence, Ordering};
use core::{mem, ptr};
use atomic_polyfill::{AtomicU32, AtomicU8};
use embassy_sync::blocking_mutex::raw::CriticalSectionRawMutex;
use embassy_sync::blocking_mutex::Mutex;
use embassy_time::driver::{AlarmHandle, Driver};
use embassy_time::TICK_HZ;
use stm32_metapac::timer::regs;
use crate::interrupt::{CriticalSection, InterruptExt};
use crate::pac::timer::vals;
use crate::rcc::sealed::RccPeripheral;
use crate::timer::sealed::{Basic16bitInstance as BasicInstance, GeneralPurpose16bitInstance as Instance};
use crate::{interrupt, peripherals};
#[cfg(not(any(time_driver_tim12, time_driver_tim15)))]
const ALARM_COUNT: usize = 3;
#[cfg(any(time_driver_tim12, time_driver_tim15))]
const ALARM_COUNT: usize = 1;
#[cfg(time_driver_tim2)]
type T = peripherals::TIM2;
#[cfg(time_driver_tim3)]
type T = peripherals::TIM3;
#[cfg(time_driver_tim4)]
type T = peripherals::TIM4;
#[cfg(time_driver_tim5)]
type T = peripherals::TIM5;
#[cfg(time_driver_tim12)]
type T = peripherals::TIM12;
#[cfg(time_driver_tim15)]
type T = peripherals::TIM15;
foreach_interrupt! {
(TIM2, timer, $block:ident, UP, $irq:ident) => {
#[cfg(time_driver_tim2)]
#[interrupt]
fn $irq() {
DRIVER.on_interrupt()
}
};
(TIM3, timer, $block:ident, UP, $irq:ident) => {
#[cfg(time_driver_tim3)]
#[interrupt]
fn $irq() {
DRIVER.on_interrupt()
}
};
(TIM4, timer, $block:ident, UP, $irq:ident) => {
#[cfg(time_driver_tim4)]
#[interrupt]
fn $irq() {
DRIVER.on_interrupt()
}
};
(TIM5, timer, $block:ident, UP, $irq:ident) => {
#[cfg(time_driver_tim5)]
#[interrupt]
fn $irq() {
DRIVER.on_interrupt()
}
};
(TIM12, timer, $block:ident, UP, $irq:ident) => {
#[cfg(time_driver_tim12)]
#[interrupt]
fn $irq() {
DRIVER.on_interrupt()
}
};
(TIM15, timer, $block:ident, UP, $irq:ident) => {
#[cfg(time_driver_tim15)]
#[interrupt]
fn $irq() {
DRIVER.on_interrupt()
}
};
}
// Clock timekeeping works with something we call "periods", which are time intervals
// of 2^15 ticks. The Clock counter value is 16 bits, so one "overflow cycle" is 2 periods.
//
// A `period` count is maintained in parallel to the Timer hardware `counter`, like this:
// - `period` and `counter` start at 0
// - `period` is incremented on overflow (at counter value 0)
// - `period` is incremented "midway" between overflows (at counter value 0x8000)
//
// Therefore, when `period` is even, counter is in 0..0x7FFF. When odd, counter is in 0x8000..0xFFFF
// This allows for now() to return the correct value even if it races an overflow.
//
// To get `now()`, `period` is read first, then `counter` is read. If the counter value matches
// the expected range for the `period` parity, we're done. If it doesn't, this means that
// a new period start has raced us between reading `period` and `counter`, so we assume the `counter` value
// corresponds to the next period.
//
// `period` is a 32bit integer, so It overflows on 2^32 * 2^15 / 32768 seconds of uptime, which is 136 years.
fn calc_now(period: u32, counter: u16) -> u64 {
((period as u64) << 15) + ((counter as u32 ^ ((period & 1) << 15)) as u64)
}
struct AlarmState {
timestamp: Cell<u64>,
// This is really a Option<(fn(*mut ()), *mut ())>
// but fn pointers aren't allowed in const yet
callback: Cell<*const ()>,
ctx: Cell<*mut ()>,
}
unsafe impl Send for AlarmState {}
impl AlarmState {
const fn new() -> Self {
Self {
timestamp: Cell::new(u64::MAX),
callback: Cell::new(ptr::null()),
ctx: Cell::new(ptr::null_mut()),
}
}
}
struct RtcDriver {
/// Number of 2^15 periods elapsed since boot.
period: AtomicU32,
alarm_count: AtomicU8,
/// Timestamp at which to fire alarm. u64::MAX if no alarm is scheduled.
alarms: Mutex<CriticalSectionRawMutex, [AlarmState; ALARM_COUNT]>,
}
const ALARM_STATE_NEW: AlarmState = AlarmState::new();
embassy_time::time_driver_impl!(static DRIVER: RtcDriver = RtcDriver {
period: AtomicU32::new(0),
alarm_count: AtomicU8::new(0),
alarms: Mutex::const_new(CriticalSectionRawMutex::new(), [ALARM_STATE_NEW; ALARM_COUNT]),
});
impl RtcDriver {
fn init(&'static self) {
let r = T::regs_gp16();
<T as RccPeripheral>::enable();
<T as RccPeripheral>::reset();
let timer_freq = T::frequency();
// NOTE(unsafe) Critical section to use the unsafe methods
critical_section::with(|_| unsafe {
r.cr1().modify(|w| w.set_cen(false));
r.cnt().write(|w| w.set_cnt(0));
let psc = timer_freq.0 / TICK_HZ as u32 - 1;
let psc: u16 = match psc.try_into() {
Err(_) => panic!("psc division overflow: {}", psc),
Ok(n) => n,
};
r.psc().write(|w| w.set_psc(psc));
r.arr().write(|w| w.set_arr(u16::MAX));
// Set URS, generate update and clear URS
r.cr1().modify(|w| w.set_urs(vals::Urs::COUNTERONLY));
r.egr().write(|w| w.set_ug(true));
r.cr1().modify(|w| w.set_urs(vals::Urs::ANYEVENT));
// Mid-way point
r.ccr(0).write(|w| w.set_ccr(0x8000));
// Enable overflow and half-overflow interrupts
r.dier().write(|w| {
w.set_uie(true);
w.set_ccie(0, true);
});
let irq: <T as BasicInstance>::Interrupt = core::mem::transmute(());
irq.unpend();
irq.enable();
r.cr1().modify(|w| w.set_cen(true));
})
}
fn on_interrupt(&self) {
let r = T::regs_gp16();
// NOTE(unsafe) Use critical section to access the methods
// XXX: reduce the size of this critical section?
critical_section::with(|cs| unsafe {
let sr = r.sr().read();
let dier = r.dier().read();
// Clear all interrupt flags. Bits in SR are "write 0 to clear", so write the bitwise NOT.
// Other approaches such as writing all zeros, or RMWing won't work, they can
// miss interrupts.
r.sr().write_value(regs::SrGp(!sr.0));
// Overflow
if sr.uif() {
self.next_period();
}
// Half overflow
if sr.ccif(0) {
self.next_period();
}
for n in 0..ALARM_COUNT {
if sr.ccif(n + 1) && dier.ccie(n + 1) {
self.trigger_alarm(n, cs);
}
}
})
}
fn next_period(&self) {
let r = T::regs_gp16();
let period = self.period.fetch_add(1, Ordering::Relaxed) + 1;
let t = (period as u64) << 15;
critical_section::with(move |cs| unsafe {
r.dier().modify(move |w| {
for n in 0..ALARM_COUNT {
let alarm = &self.alarms.borrow(cs)[n];
let at = alarm.timestamp.get();
if at < t + 0xc000 {
// just enable it. `set_alarm` has already set the correct CCR val.
w.set_ccie(n + 1, true);
}
}
})
})
}
fn get_alarm<'a>(&'a self, cs: CriticalSection<'a>, alarm: AlarmHandle) -> &'a AlarmState {
// safety: we're allowed to assume the AlarmState is created by us, and
// we never create one that's out of bounds.
unsafe { self.alarms.borrow(cs).get_unchecked(alarm.id() as usize) }
}
fn trigger_alarm(&self, n: usize, cs: CriticalSection) {
let alarm = &self.alarms.borrow(cs)[n];
alarm.timestamp.set(u64::MAX);
// Call after clearing alarm, so the callback can set another alarm.
// safety:
// - we can ignore the possiblity of `f` being unset (null) because of the safety contract of `allocate_alarm`.
// - other than that we only store valid function pointers into alarm.callback
let f: fn(*mut ()) = unsafe { mem::transmute(alarm.callback.get()) };
f(alarm.ctx.get());
}
}
impl Driver for RtcDriver {
fn now(&self) -> u64 {
let r = T::regs_gp16();
let period = self.period.load(Ordering::Relaxed);
compiler_fence(Ordering::Acquire);
// NOTE(unsafe) Atomic read with no side-effects
let counter = unsafe { r.cnt().read().cnt() };
calc_now(period, counter)
}
unsafe fn allocate_alarm(&self) -> Option<AlarmHandle> {
let id = self.alarm_count.fetch_update(Ordering::AcqRel, Ordering::Acquire, |x| {
if x < ALARM_COUNT as u8 {
Some(x + 1)
} else {
None
}
});
match id {
Ok(id) => Some(AlarmHandle::new(id)),
Err(_) => None,
}
}
fn | (&self, alarm: AlarmHandle, callback: fn(*mut ()), ctx: *mut ()) {
critical_section::with(|cs| {
let alarm = self.get_alarm(cs, alarm);
alarm.callback.set(callback as *const ());
alarm.ctx.set(ctx);
})
}
fn set_alarm(&self, alarm: AlarmHandle, timestamp: u64) -> bool {
critical_section::with(|cs| {
let r = T::regs_gp16();
let n = alarm.id() as usize;
let alarm = self.get_alarm(cs, alarm);
alarm.timestamp.set(timestamp);
let t = self.now();
if timestamp <= t {
// If alarm timestamp has passed the alarm will not fire.
// Disarm the alarm and return `false` to indicate that.
unsafe { r.dier().modify(|w| w.set_ccie(n + 1, false)) };
alarm.timestamp.set(u64::MAX);
return false;
}
let safe_timestamp = timestamp.max(t + 3);
// Write the CCR value regardless of whether we're going to enable it now or not.
// This way, when we enable it later, the right value is already set.
unsafe { r.ccr(n + 1).write(|w| w.set_ccr(safe_timestamp as u16)) };
// Enable it if it'll happen soon. Otherwise, `next_period` will enable it.
let diff = timestamp - t;
// NOTE(unsafe) We're in a critical section
unsafe { r.dier().modify(|w| w.set_ccie(n + 1, diff < 0xc000)) };
true
})
}
}
pub(crate) fn init() {
DRIVER.init()
}
| set_alarm_callback | identifier_name |
time_driver.rs | use core::cell::Cell;
use core::convert::TryInto;
use core::sync::atomic::{compiler_fence, Ordering};
use core::{mem, ptr};
use atomic_polyfill::{AtomicU32, AtomicU8};
use embassy_sync::blocking_mutex::raw::CriticalSectionRawMutex;
use embassy_sync::blocking_mutex::Mutex;
use embassy_time::driver::{AlarmHandle, Driver};
use embassy_time::TICK_HZ;
use stm32_metapac::timer::regs;
use crate::interrupt::{CriticalSection, InterruptExt};
use crate::pac::timer::vals;
use crate::rcc::sealed::RccPeripheral;
use crate::timer::sealed::{Basic16bitInstance as BasicInstance, GeneralPurpose16bitInstance as Instance};
use crate::{interrupt, peripherals};
#[cfg(not(any(time_driver_tim12, time_driver_tim15)))]
const ALARM_COUNT: usize = 3;
#[cfg(any(time_driver_tim12, time_driver_tim15))]
const ALARM_COUNT: usize = 1;
#[cfg(time_driver_tim2)]
type T = peripherals::TIM2;
#[cfg(time_driver_tim3)]
type T = peripherals::TIM3;
#[cfg(time_driver_tim4)]
type T = peripherals::TIM4;
#[cfg(time_driver_tim5)]
type T = peripherals::TIM5;
#[cfg(time_driver_tim12)]
type T = peripherals::TIM12;
#[cfg(time_driver_tim15)]
type T = peripherals::TIM15;
foreach_interrupt! {
(TIM2, timer, $block:ident, UP, $irq:ident) => {
#[cfg(time_driver_tim2)]
#[interrupt]
fn $irq() {
DRIVER.on_interrupt()
}
};
(TIM3, timer, $block:ident, UP, $irq:ident) => {
#[cfg(time_driver_tim3)]
#[interrupt]
fn $irq() {
DRIVER.on_interrupt()
}
};
(TIM4, timer, $block:ident, UP, $irq:ident) => {
#[cfg(time_driver_tim4)]
#[interrupt]
fn $irq() {
DRIVER.on_interrupt()
}
};
(TIM5, timer, $block:ident, UP, $irq:ident) => {
#[cfg(time_driver_tim5)]
#[interrupt]
fn $irq() {
DRIVER.on_interrupt()
}
};
(TIM12, timer, $block:ident, UP, $irq:ident) => {
#[cfg(time_driver_tim12)]
#[interrupt]
fn $irq() {
DRIVER.on_interrupt()
}
};
(TIM15, timer, $block:ident, UP, $irq:ident) => { | #[cfg(time_driver_tim15)]
#[interrupt]
fn $irq() {
DRIVER.on_interrupt()
}
};
}
// Clock timekeeping works with something we call "periods", which are time intervals
// of 2^15 ticks. The Clock counter value is 16 bits, so one "overflow cycle" is 2 periods.
//
// A `period` count is maintained in parallel to the Timer hardware `counter`, like this:
// - `period` and `counter` start at 0
// - `period` is incremented on overflow (at counter value 0)
// - `period` is incremented "midway" between overflows (at counter value 0x8000)
//
// Therefore, when `period` is even, counter is in 0..0x7FFF. When odd, counter is in 0x8000..0xFFFF
// This allows for now() to return the correct value even if it races an overflow.
//
// To get `now()`, `period` is read first, then `counter` is read. If the counter value matches
// the expected range for the `period` parity, we're done. If it doesn't, this means that
// a new period start has raced us between reading `period` and `counter`, so we assume the `counter` value
// corresponds to the next period.
//
// `period` is a 32bit integer, so It overflows on 2^32 * 2^15 / 32768 seconds of uptime, which is 136 years.
fn calc_now(period: u32, counter: u16) -> u64 {
((period as u64) << 15) + ((counter as u32 ^ ((period & 1) << 15)) as u64)
}
struct AlarmState {
timestamp: Cell<u64>,
// This is really a Option<(fn(*mut ()), *mut ())>
// but fn pointers aren't allowed in const yet
callback: Cell<*const ()>,
ctx: Cell<*mut ()>,
}
unsafe impl Send for AlarmState {}
impl AlarmState {
const fn new() -> Self {
Self {
timestamp: Cell::new(u64::MAX),
callback: Cell::new(ptr::null()),
ctx: Cell::new(ptr::null_mut()),
}
}
}
struct RtcDriver {
/// Number of 2^15 periods elapsed since boot.
period: AtomicU32,
alarm_count: AtomicU8,
/// Timestamp at which to fire alarm. u64::MAX if no alarm is scheduled.
alarms: Mutex<CriticalSectionRawMutex, [AlarmState; ALARM_COUNT]>,
}
const ALARM_STATE_NEW: AlarmState = AlarmState::new();
embassy_time::time_driver_impl!(static DRIVER: RtcDriver = RtcDriver {
period: AtomicU32::new(0),
alarm_count: AtomicU8::new(0),
alarms: Mutex::const_new(CriticalSectionRawMutex::new(), [ALARM_STATE_NEW; ALARM_COUNT]),
});
impl RtcDriver {
fn init(&'static self) {
let r = T::regs_gp16();
<T as RccPeripheral>::enable();
<T as RccPeripheral>::reset();
let timer_freq = T::frequency();
// NOTE(unsafe) Critical section to use the unsafe methods
critical_section::with(|_| unsafe {
r.cr1().modify(|w| w.set_cen(false));
r.cnt().write(|w| w.set_cnt(0));
let psc = timer_freq.0 / TICK_HZ as u32 - 1;
let psc: u16 = match psc.try_into() {
Err(_) => panic!("psc division overflow: {}", psc),
Ok(n) => n,
};
r.psc().write(|w| w.set_psc(psc));
r.arr().write(|w| w.set_arr(u16::MAX));
// Set URS, generate update and clear URS
r.cr1().modify(|w| w.set_urs(vals::Urs::COUNTERONLY));
r.egr().write(|w| w.set_ug(true));
r.cr1().modify(|w| w.set_urs(vals::Urs::ANYEVENT));
// Mid-way point
r.ccr(0).write(|w| w.set_ccr(0x8000));
// Enable overflow and half-overflow interrupts
r.dier().write(|w| {
w.set_uie(true);
w.set_ccie(0, true);
});
let irq: <T as BasicInstance>::Interrupt = core::mem::transmute(());
irq.unpend();
irq.enable();
r.cr1().modify(|w| w.set_cen(true));
})
}
fn on_interrupt(&self) {
let r = T::regs_gp16();
// NOTE(unsafe) Use critical section to access the methods
// XXX: reduce the size of this critical section?
critical_section::with(|cs| unsafe {
let sr = r.sr().read();
let dier = r.dier().read();
// Clear all interrupt flags. Bits in SR are "write 0 to clear", so write the bitwise NOT.
// Other approaches such as writing all zeros, or RMWing won't work, they can
// miss interrupts.
r.sr().write_value(regs::SrGp(!sr.0));
// Overflow
if sr.uif() {
self.next_period();
}
// Half overflow
if sr.ccif(0) {
self.next_period();
}
for n in 0..ALARM_COUNT {
if sr.ccif(n + 1) && dier.ccie(n + 1) {
self.trigger_alarm(n, cs);
}
}
})
}
fn next_period(&self) {
let r = T::regs_gp16();
let period = self.period.fetch_add(1, Ordering::Relaxed) + 1;
let t = (period as u64) << 15;
critical_section::with(move |cs| unsafe {
r.dier().modify(move |w| {
for n in 0..ALARM_COUNT {
let alarm = &self.alarms.borrow(cs)[n];
let at = alarm.timestamp.get();
if at < t + 0xc000 {
// just enable it. `set_alarm` has already set the correct CCR val.
w.set_ccie(n + 1, true);
}
}
})
})
}
fn get_alarm<'a>(&'a self, cs: CriticalSection<'a>, alarm: AlarmHandle) -> &'a AlarmState {
// safety: we're allowed to assume the AlarmState is created by us, and
// we never create one that's out of bounds.
unsafe { self.alarms.borrow(cs).get_unchecked(alarm.id() as usize) }
}
fn trigger_alarm(&self, n: usize, cs: CriticalSection) {
let alarm = &self.alarms.borrow(cs)[n];
alarm.timestamp.set(u64::MAX);
// Call after clearing alarm, so the callback can set another alarm.
// safety:
// - we can ignore the possiblity of `f` being unset (null) because of the safety contract of `allocate_alarm`.
// - other than that we only store valid function pointers into alarm.callback
let f: fn(*mut ()) = unsafe { mem::transmute(alarm.callback.get()) };
f(alarm.ctx.get());
}
}
impl Driver for RtcDriver {
fn now(&self) -> u64 {
let r = T::regs_gp16();
let period = self.period.load(Ordering::Relaxed);
compiler_fence(Ordering::Acquire);
// NOTE(unsafe) Atomic read with no side-effects
let counter = unsafe { r.cnt().read().cnt() };
calc_now(period, counter)
}
unsafe fn allocate_alarm(&self) -> Option<AlarmHandle> {
let id = self.alarm_count.fetch_update(Ordering::AcqRel, Ordering::Acquire, |x| {
if x < ALARM_COUNT as u8 {
Some(x + 1)
} else {
None
}
});
match id {
Ok(id) => Some(AlarmHandle::new(id)),
Err(_) => None,
}
}
fn set_alarm_callback(&self, alarm: AlarmHandle, callback: fn(*mut ()), ctx: *mut ()) {
critical_section::with(|cs| {
let alarm = self.get_alarm(cs, alarm);
alarm.callback.set(callback as *const ());
alarm.ctx.set(ctx);
})
}
fn set_alarm(&self, alarm: AlarmHandle, timestamp: u64) -> bool {
critical_section::with(|cs| {
let r = T::regs_gp16();
let n = alarm.id() as usize;
let alarm = self.get_alarm(cs, alarm);
alarm.timestamp.set(timestamp);
let t = self.now();
if timestamp <= t {
// If alarm timestamp has passed the alarm will not fire.
// Disarm the alarm and return `false` to indicate that.
unsafe { r.dier().modify(|w| w.set_ccie(n + 1, false)) };
alarm.timestamp.set(u64::MAX);
return false;
}
let safe_timestamp = timestamp.max(t + 3);
// Write the CCR value regardless of whether we're going to enable it now or not.
// This way, when we enable it later, the right value is already set.
unsafe { r.ccr(n + 1).write(|w| w.set_ccr(safe_timestamp as u16)) };
// Enable it if it'll happen soon. Otherwise, `next_period` will enable it.
let diff = timestamp - t;
// NOTE(unsafe) We're in a critical section
unsafe { r.dier().modify(|w| w.set_ccie(n + 1, diff < 0xc000)) };
true
})
}
}
pub(crate) fn init() {
DRIVER.init()
} | random_line_split |
|
time_driver.rs | use core::cell::Cell;
use core::convert::TryInto;
use core::sync::atomic::{compiler_fence, Ordering};
use core::{mem, ptr};
use atomic_polyfill::{AtomicU32, AtomicU8};
use embassy_sync::blocking_mutex::raw::CriticalSectionRawMutex;
use embassy_sync::blocking_mutex::Mutex;
use embassy_time::driver::{AlarmHandle, Driver};
use embassy_time::TICK_HZ;
use stm32_metapac::timer::regs;
use crate::interrupt::{CriticalSection, InterruptExt};
use crate::pac::timer::vals;
use crate::rcc::sealed::RccPeripheral;
use crate::timer::sealed::{Basic16bitInstance as BasicInstance, GeneralPurpose16bitInstance as Instance};
use crate::{interrupt, peripherals};
#[cfg(not(any(time_driver_tim12, time_driver_tim15)))]
const ALARM_COUNT: usize = 3;
#[cfg(any(time_driver_tim12, time_driver_tim15))]
const ALARM_COUNT: usize = 1;
#[cfg(time_driver_tim2)]
type T = peripherals::TIM2;
#[cfg(time_driver_tim3)]
type T = peripherals::TIM3;
#[cfg(time_driver_tim4)]
type T = peripherals::TIM4;
#[cfg(time_driver_tim5)]
type T = peripherals::TIM5;
#[cfg(time_driver_tim12)]
type T = peripherals::TIM12;
#[cfg(time_driver_tim15)]
type T = peripherals::TIM15;
foreach_interrupt! {
(TIM2, timer, $block:ident, UP, $irq:ident) => {
#[cfg(time_driver_tim2)]
#[interrupt]
fn $irq() {
DRIVER.on_interrupt()
}
};
(TIM3, timer, $block:ident, UP, $irq:ident) => {
#[cfg(time_driver_tim3)]
#[interrupt]
fn $irq() {
DRIVER.on_interrupt()
}
};
(TIM4, timer, $block:ident, UP, $irq:ident) => {
#[cfg(time_driver_tim4)]
#[interrupt]
fn $irq() {
DRIVER.on_interrupt()
}
};
(TIM5, timer, $block:ident, UP, $irq:ident) => {
#[cfg(time_driver_tim5)]
#[interrupt]
fn $irq() {
DRIVER.on_interrupt()
}
};
(TIM12, timer, $block:ident, UP, $irq:ident) => {
#[cfg(time_driver_tim12)]
#[interrupt]
fn $irq() {
DRIVER.on_interrupt()
}
};
(TIM15, timer, $block:ident, UP, $irq:ident) => {
#[cfg(time_driver_tim15)]
#[interrupt]
fn $irq() {
DRIVER.on_interrupt()
}
};
}
// Clock timekeeping works with something we call "periods", which are time intervals
// of 2^15 ticks. The Clock counter value is 16 bits, so one "overflow cycle" is 2 periods.
//
// A `period` count is maintained in parallel to the Timer hardware `counter`, like this:
// - `period` and `counter` start at 0
// - `period` is incremented on overflow (at counter value 0)
// - `period` is incremented "midway" between overflows (at counter value 0x8000)
//
// Therefore, when `period` is even, counter is in 0..0x7FFF. When odd, counter is in 0x8000..0xFFFF
// This allows for now() to return the correct value even if it races an overflow.
//
// To get `now()`, `period` is read first, then `counter` is read. If the counter value matches
// the expected range for the `period` parity, we're done. If it doesn't, this means that
// a new period start has raced us between reading `period` and `counter`, so we assume the `counter` value
// corresponds to the next period.
//
// `period` is a 32bit integer, so It overflows on 2^32 * 2^15 / 32768 seconds of uptime, which is 136 years.
fn calc_now(period: u32, counter: u16) -> u64 {
((period as u64) << 15) + ((counter as u32 ^ ((period & 1) << 15)) as u64)
}
struct AlarmState {
timestamp: Cell<u64>,
// This is really a Option<(fn(*mut ()), *mut ())>
// but fn pointers aren't allowed in const yet
callback: Cell<*const ()>,
ctx: Cell<*mut ()>,
}
unsafe impl Send for AlarmState {}
impl AlarmState {
const fn new() -> Self {
Self {
timestamp: Cell::new(u64::MAX),
callback: Cell::new(ptr::null()),
ctx: Cell::new(ptr::null_mut()),
}
}
}
struct RtcDriver {
/// Number of 2^15 periods elapsed since boot.
period: AtomicU32,
alarm_count: AtomicU8,
/// Timestamp at which to fire alarm. u64::MAX if no alarm is scheduled.
alarms: Mutex<CriticalSectionRawMutex, [AlarmState; ALARM_COUNT]>,
}
const ALARM_STATE_NEW: AlarmState = AlarmState::new();
embassy_time::time_driver_impl!(static DRIVER: RtcDriver = RtcDriver {
period: AtomicU32::new(0),
alarm_count: AtomicU8::new(0),
alarms: Mutex::const_new(CriticalSectionRawMutex::new(), [ALARM_STATE_NEW; ALARM_COUNT]),
});
impl RtcDriver {
fn init(&'static self) {
let r = T::regs_gp16();
<T as RccPeripheral>::enable();
<T as RccPeripheral>::reset();
let timer_freq = T::frequency();
// NOTE(unsafe) Critical section to use the unsafe methods
critical_section::with(|_| unsafe {
r.cr1().modify(|w| w.set_cen(false));
r.cnt().write(|w| w.set_cnt(0));
let psc = timer_freq.0 / TICK_HZ as u32 - 1;
let psc: u16 = match psc.try_into() {
Err(_) => panic!("psc division overflow: {}", psc),
Ok(n) => n,
};
r.psc().write(|w| w.set_psc(psc));
r.arr().write(|w| w.set_arr(u16::MAX));
// Set URS, generate update and clear URS
r.cr1().modify(|w| w.set_urs(vals::Urs::COUNTERONLY));
r.egr().write(|w| w.set_ug(true));
r.cr1().modify(|w| w.set_urs(vals::Urs::ANYEVENT));
// Mid-way point
r.ccr(0).write(|w| w.set_ccr(0x8000));
// Enable overflow and half-overflow interrupts
r.dier().write(|w| {
w.set_uie(true);
w.set_ccie(0, true);
});
let irq: <T as BasicInstance>::Interrupt = core::mem::transmute(());
irq.unpend();
irq.enable();
r.cr1().modify(|w| w.set_cen(true));
})
}
fn on_interrupt(&self) {
let r = T::regs_gp16();
// NOTE(unsafe) Use critical section to access the methods
// XXX: reduce the size of this critical section?
critical_section::with(|cs| unsafe {
let sr = r.sr().read();
let dier = r.dier().read();
// Clear all interrupt flags. Bits in SR are "write 0 to clear", so write the bitwise NOT.
// Other approaches such as writing all zeros, or RMWing won't work, they can
// miss interrupts.
r.sr().write_value(regs::SrGp(!sr.0));
// Overflow
if sr.uif() {
self.next_period();
}
// Half overflow
if sr.ccif(0) {
self.next_period();
}
for n in 0..ALARM_COUNT {
if sr.ccif(n + 1) && dier.ccie(n + 1) {
self.trigger_alarm(n, cs);
}
}
})
}
fn next_period(&self) {
let r = T::regs_gp16();
let period = self.period.fetch_add(1, Ordering::Relaxed) + 1;
let t = (period as u64) << 15;
critical_section::with(move |cs| unsafe {
r.dier().modify(move |w| {
for n in 0..ALARM_COUNT {
let alarm = &self.alarms.borrow(cs)[n];
let at = alarm.timestamp.get();
if at < t + 0xc000 {
// just enable it. `set_alarm` has already set the correct CCR val.
w.set_ccie(n + 1, true);
}
}
})
})
}
fn get_alarm<'a>(&'a self, cs: CriticalSection<'a>, alarm: AlarmHandle) -> &'a AlarmState {
// safety: we're allowed to assume the AlarmState is created by us, and
// we never create one that's out of bounds.
unsafe { self.alarms.borrow(cs).get_unchecked(alarm.id() as usize) }
}
fn trigger_alarm(&self, n: usize, cs: CriticalSection) {
let alarm = &self.alarms.borrow(cs)[n];
alarm.timestamp.set(u64::MAX);
// Call after clearing alarm, so the callback can set another alarm.
// safety:
// - we can ignore the possiblity of `f` being unset (null) because of the safety contract of `allocate_alarm`.
// - other than that we only store valid function pointers into alarm.callback
let f: fn(*mut ()) = unsafe { mem::transmute(alarm.callback.get()) };
f(alarm.ctx.get());
}
}
impl Driver for RtcDriver {
fn now(&self) -> u64 {
let r = T::regs_gp16();
let period = self.period.load(Ordering::Relaxed);
compiler_fence(Ordering::Acquire);
// NOTE(unsafe) Atomic read with no side-effects
let counter = unsafe { r.cnt().read().cnt() };
calc_now(period, counter)
}
unsafe fn allocate_alarm(&self) -> Option<AlarmHandle> |
fn set_alarm_callback(&self, alarm: AlarmHandle, callback: fn(*mut ()), ctx: *mut ()) {
critical_section::with(|cs| {
let alarm = self.get_alarm(cs, alarm);
alarm.callback.set(callback as *const ());
alarm.ctx.set(ctx);
})
}
fn set_alarm(&self, alarm: AlarmHandle, timestamp: u64) -> bool {
critical_section::with(|cs| {
let r = T::regs_gp16();
let n = alarm.id() as usize;
let alarm = self.get_alarm(cs, alarm);
alarm.timestamp.set(timestamp);
let t = self.now();
if timestamp <= t {
// If alarm timestamp has passed the alarm will not fire.
// Disarm the alarm and return `false` to indicate that.
unsafe { r.dier().modify(|w| w.set_ccie(n + 1, false)) };
alarm.timestamp.set(u64::MAX);
return false;
}
let safe_timestamp = timestamp.max(t + 3);
// Write the CCR value regardless of whether we're going to enable it now or not.
// This way, when we enable it later, the right value is already set.
unsafe { r.ccr(n + 1).write(|w| w.set_ccr(safe_timestamp as u16)) };
// Enable it if it'll happen soon. Otherwise, `next_period` will enable it.
let diff = timestamp - t;
// NOTE(unsafe) We're in a critical section
unsafe { r.dier().modify(|w| w.set_ccie(n + 1, diff < 0xc000)) };
true
})
}
}
pub(crate) fn init() {
DRIVER.init()
}
| {
let id = self.alarm_count.fetch_update(Ordering::AcqRel, Ordering::Acquire, |x| {
if x < ALARM_COUNT as u8 {
Some(x + 1)
} else {
None
}
});
match id {
Ok(id) => Some(AlarmHandle::new(id)),
Err(_) => None,
}
} | identifier_body |
mod.rs | , vendor_info),
Version(major, minor, None, vendor_info) =>
write!(f, "Version({}.{}, {})", major, minor, vendor_info),
}
}
}
/// A unique platform identifier that does not change between releases
#[deriving(Eq, PartialEq, Show)]
pub struct PlatformName {
/// The company responsible for the OpenGL implementation
pub vendor: &'static str,
/// The name of the renderer
pub renderer: &'static str,
}
impl PlatformName {
fn get() -> PlatformName {
PlatformName {
vendor: get_string(gl::VENDOR),
renderer: get_string(gl::RENDERER),
}
}
}
/// OpenGL implementation information
#[deriving(Show)]
pub struct Info {
/// The platform identifier
pub platform_name: PlatformName,
/// The OpenGL API vesion number
pub version: Version,
/// The GLSL vesion number
pub shading_language: Version,
/// The extensions supported by the implementation
pub extensions: HashSet<&'static str>,
}
impl Info {
fn get() -> Info {
let info = {
let platform_name = PlatformName::get();
let version = Version::parse(get_string(gl::VERSION)).unwrap();
let shading_language = Version::parse(get_string(gl::SHADING_LANGUAGE_VERSION)).unwrap();
let extensions = if version >= Version(3, 2, None, "") {
let num_exts = get_uint(gl::NUM_EXTENSIONS) as gl::types::GLuint;
range(0, num_exts).map(|i| {
unsafe {
str::raw::c_str_to_static_slice(
gl::GetStringi(gl::EXTENSIONS, i) as *const i8,
)
}
}).collect()
} else {
// Fallback
get_string(gl::EXTENSIONS).split(' ').collect()
};
Info {
platform_name: platform_name,
version: version,
shading_language: shading_language,
extensions: extensions,
}
};
info!("Vendor: {}", info.platform_name.vendor);
info!("Renderer: {}", info.platform_name.renderer);
info!("Version: {}", info.version);
info!("Shading Language: {}", info.shading_language);
info!("Loaded Extensions:")
for extension in info.extensions.iter() {
info!("- {}", *extension);
}
info
}
/// Returns `true` if the implementation supports the extension
pub fn is_extension_supported(&self, s: &str) -> bool {
self.extensions.contains_equiv(&s)
}
}
#[deriving(Eq, PartialEq, Show)]
pub enum ErrorType {
InvalidEnum,
InvalidValue,
InvalidOperation,
InvalidFramebufferOperation,
OutOfMemory,
UnknownError,
}
/// An OpenGL back-end with GLSL shaders
pub struct GlBackEnd {
caps: super::Capabilities,
info: Info,
make_texture: fn(::tex::TextureInfo) -> Texture,
/// Maps (by the index) from texture name to TextureInfo, so we can look up what texture target
/// to bind this texture to later. Yuck!
// Doesn't use a SmallIntMap to avoid the overhead of Option
samplers: Vec<::tex::SamplerInfo>,
}
impl GlBackEnd {
/// Load OpenGL symbols and detect driver information
pub fn new(provider: &super::GlProvider) -> GlBackEnd {
gl::load_with(|s| provider.get_proc_address(s));
let info = Info::get();
let caps = super::Capabilities {
shader_model: shade::get_model(),
max_draw_buffers: get_uint(gl::MAX_DRAW_BUFFERS),
max_texture_size: get_uint(gl::MAX_TEXTURE_SIZE),
max_vertex_attributes: get_uint(gl::MAX_VERTEX_ATTRIBS),
uniform_block_supported: info.version >= Version(3, 1, None, "")
|| info.is_extension_supported("GL_ARB_uniform_buffer_object"),
array_buffer_supported: info.version >= Version(3, 0, None, "")
|| info.is_extension_supported("GL_ARB_vertex_array_object"),
immutable_storage_supported: info.version >= Version(4, 2, None, "")
|| info.is_extension_supported("GL_ARB_texture_storage"),
sampler_objects_supported: info.version >= Version(3, 3, None, "")
|| info.is_extension_supported("GL_ARB_sampler_objects"),
};
GlBackEnd {
caps: caps,
info: info,
make_texture: if caps.immutable_storage_supported {
tex::make_with_storage
} else {
tex::make_without_storage
},
samplers: Vec::new(),
}
}
#[allow(dead_code)]
fn get_error(&mut self) -> Result<(), ErrorType> {
match gl::GetError() {
gl::NO_ERROR => Ok(()),
gl::INVALID_ENUM => Err(InvalidEnum),
gl::INVALID_VALUE => Err(InvalidValue),
gl::INVALID_OPERATION => Err(InvalidOperation),
gl::INVALID_FRAMEBUFFER_OPERATION => Err(InvalidFramebufferOperation),
gl::OUT_OF_MEMORY => Err(OutOfMemory),
_ => Err(UnknownError),
}
}
/// Fails during a debug build if the implementation's error flag was set.
#[allow(dead_code)]
fn check(&mut self) {
debug_assert_eq!(self.get_error(), Ok(()));
}
/// Get the OpenGL-specific driver information
pub fn get_info<'a>(&'a self) -> &'a Info {
&self.info
}
}
impl super::ApiBackEnd for GlBackEnd {
fn get_capabilities<'a>(&'a self) -> &'a super::Capabilities {
&self.caps
}
fn create_buffer(&mut self) -> Buffer {
let mut name = 0 as Buffer;
unsafe {
gl::GenBuffers(1, &mut name);
}
info!("\tCreated buffer {}", name);
name
}
fn create_array_buffer(&mut self) -> Result<ArrayBuffer, ()> {
if self.caps.array_buffer_supported {
let mut name = 0 as ArrayBuffer;
unsafe {
gl::GenVertexArrays(1, &mut name);
}
info!("\tCreated array buffer {}", name);
Ok(name)
} else {
error!("\tarray buffer creation unsupported, ignored")
Err(())
}
}
fn create_shader(&mut self, stage: super::shade::Stage, code: super::shade::ShaderSource) -> Result<Shader, super::shade::CreateShaderError> {
let (name, info) = shade::create_shader(stage, code, self.get_capabilities().shader_model);
info.map(|info| {
let level = if name.is_err() { log::ERROR } else { log::WARN };
log!(level, "\tShader compile log: {}", info);
});
name
}
fn create_program(&mut self, shaders: &[Shader]) -> Result<super::shade::ProgramMeta, ()> {
let (meta, info) = shade::create_program(&self.caps, shaders);
info.map(|info| {
let level = if meta.is_err() { log::ERROR } else { log::WARN };
log!(level, "\tProgram link log: {}", info);
});
meta
}
fn create_frame_buffer(&mut self) -> FrameBuffer {
let mut name = 0 as FrameBuffer;
unsafe {
gl::GenFramebuffers(1, &mut name);
}
info!("\tCreated frame buffer {}", name);
name
}
fn create_texture(&mut self, info: ::tex::TextureInfo) -> Texture {
(self.make_texture)(info)
}
fn create_sampler(&mut self, info: ::tex::SamplerInfo) -> Sampler {
if self.caps.sampler_objects_supported {
tex::make_sampler(info)
} else {
self.samplers.push(info);
self.samplers.len() as Sampler - 1
}
}
fn update_buffer(&mut self, buffer: Buffer, data: &super::Blob, usage: super::BufferUsage) {
gl::BindBuffer(gl::ARRAY_BUFFER, buffer);
let size = data.get_size() as gl::types::GLsizeiptr;
let raw = data.get_address() as *const gl::types::GLvoid;
let usage = match usage {
super::UsageStatic => gl::STATIC_DRAW,
super::UsageDynamic => gl::DYNAMIC_DRAW,
super::UsageStream => gl::STREAM_DRAW,
};
unsafe {
gl::BufferData(gl::ARRAY_BUFFER, size, raw, usage);
}
}
fn process(&mut self, request: super::CastRequest) {
match request {
super::Clear(data) => {
let mut flags = match data.color {
//gl::ColorMask(gl::TRUE, gl::TRUE, gl::TRUE, gl::TRUE);
Some(super::target::Color([r,g,b,a])) => {
gl::ClearColor(r, g, b, a);
gl::COLOR_BUFFER_BIT
},
None => 0 as gl::types::GLenum
};
data.depth.map(|value| {
gl::DepthMask(gl::TRUE);
gl::ClearDepth(value as gl::types::GLclampd);
flags |= gl::DEPTH_BUFFER_BIT;
});
data.stencil.map(|value| {
gl::StencilMask(-1);
gl::ClearStencil(value as gl::types::GLint);
flags |= gl::STENCIL_BUFFER_BIT;
});
gl::Clear(flags);
},
super::BindProgram(program) => {
gl::UseProgram(program);
},
super::BindArrayBuffer(array_buffer) => {
if self.caps.array_buffer_supported {
gl::BindVertexArray(array_buffer);
} else {
error!("Ignored unsupported GL Request: {}", request)
}
},
super::BindAttribute(slot, buffer, count, el_type, stride, offset) => {
let gl_type = match el_type {
a::Int(_, a::U8, a::Unsigned) => gl::UNSIGNED_BYTE,
a::Int(_, a::U8, a::Signed) => gl::BYTE,
a::Int(_, a::U16, a::Unsigned) => gl::UNSIGNED_SHORT,
a::Int(_, a::U16, a::Signed) => gl::SHORT,
a::Int(_, a::U32, a::Unsigned) => gl::UNSIGNED_INT,
a::Int(_, a::U32, a::Signed) => gl::INT,
a::Float(_, a::F16) => gl::HALF_FLOAT,
a::Float(_, a::F32) => gl::FLOAT,
a::Float(_, a::F64) => gl::DOUBLE,
_ => {
error!("Unsupported element type: {}", el_type);
return
}
};
gl::BindBuffer(gl::ARRAY_BUFFER, buffer);
let offset = offset as *const gl::types::GLvoid;
match el_type {
a::Int(a::IntRaw, _, _) => unsafe {
gl::VertexAttribIPointer(slot as gl::types::GLuint,
count as gl::types::GLint, gl_type,
stride as gl::types::GLint, offset);
},
a::Int(a::IntNormalized, _, _) => unsafe {
gl::VertexAttribPointer(slot as gl::types::GLuint,
count as gl::types::GLint, gl_type, gl::TRUE,
stride as gl::types::GLint, offset);
},
a::Int(a::IntAsFloat, _, _) => unsafe {
gl::VertexAttribPointer(slot as gl::types::GLuint,
count as gl::types::GLint, gl_type, gl::FALSE,
stride as gl::types::GLint, offset);
},
a::Float(a::FloatDefault, _) => unsafe {
gl::VertexAttribPointer(slot as gl::types::GLuint,
count as gl::types::GLint, gl_type, gl::FALSE,
stride as gl::types::GLint, offset);
},
a::Float(a::FloatPrecision, _) => unsafe {
gl::VertexAttribLPointer(slot as gl::types::GLuint,
count as gl::types::GLint, gl_type,
stride as gl::types::GLint, offset);
},
_ => ()
}
gl::EnableVertexAttribArray(slot as gl::types::GLuint);
},
super::BindIndex(buffer) => {
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, buffer);
},
super::BindFrameBuffer(frame_buffer) => {
gl::BindFramebuffer(gl::DRAW_FRAMEBUFFER, frame_buffer);
},
super::BindTarget(target, plane) => {
let attachment = match target {
super::target::TargetColor(index) =>
gl::COLOR_ATTACHMENT0 + (index as gl::types::GLenum),
super::target::TargetDepth => gl::DEPTH_ATTACHMENT,
super::target::TargetStencil => gl::STENCIL_ATTACHMENT,
super::target::TargetDepthStencil => gl::DEPTH_STENCIL_ATTACHMENT,
};
match plane {
super::target::PlaneEmpty => gl::FramebufferRenderbuffer
(gl::DRAW_FRAMEBUFFER, attachment, gl::RENDERBUFFER, 0),
super::target::PlaneSurface(name) => gl::FramebufferRenderbuffer
(gl::DRAW_FRAMEBUFFER, attachment, gl::RENDERBUFFER, name),
super::target::PlaneTexture(tex, level) => gl::FramebufferTexture
(gl::DRAW_FRAMEBUFFER, attachment, tex.name, level as gl::types::GLint),
super::target::PlaneTextureLayer(tex, level, layer) => gl::FramebufferTextureLayer
(gl::DRAW_FRAMEBUFFER, attachment, tex.name, level as gl::types::GLint, layer as gl::types::GLint),
}
},
super::BindUniformBlock(program, index, loc, buffer) => {
gl::UniformBlockBinding(program, index as gl::types::GLuint, loc as gl::types::GLuint);
gl::BindBufferBase(gl::UNIFORM_BUFFER, loc as gl::types::GLuint, buffer);
},
super::BindUniform(loc, uniform) => {
shade::bind_uniform(loc as gl::types::GLint, uniform);
},
super::BindTexture(loc, tex, sam) => {
tex::bind_texture(loc as gl::types::GLuint, tex, sam, self);
},
super::SetPrimitiveState(prim) => {
rast::bind_primitive(prim);
},
super::SetDepthStencilState(depth, stencil, cull) => {
rast::bind_stencil(stencil, cull);
rast::bind_depth(depth);
},
super::SetBlendState(blend) => {
rast::bind_blend(blend);
},
super::UpdateBuffer(buffer, data) => {
self.update_buffer(buffer, data, super::UsageDynamic);
},
super::UpdateTexture(tex, image_info, data) => {
tex::update_texture(tex, image_info, data);
},
super::Draw(start, count) => {
gl::DrawArrays(gl::TRIANGLES,
start as gl::types::GLsizei,
count as gl::types::GLsizei);
self.check();
},
super::DrawIndexed(start, count) => {
let offset = start * (std::mem::size_of::<u16>() as u16);
unsafe {
gl::DrawElements(gl::TRIANGLES,
count as gl::types::GLsizei,
gl::UNSIGNED_SHORT,
offset as *const gl::types::GLvoid);
}
self.check();
},
}
}
}
#[cfg(test)]
mod tests {
use super::Version;
#[test]
fn test_version_parse() | {
assert_eq!(Version::parse("1"), Err("1"));
assert_eq!(Version::parse("1."), Err("1."));
assert_eq!(Version::parse("1 h3l1o. W0rld"), Err("1 h3l1o. W0rld"));
assert_eq!(Version::parse("1. h3l1o. W0rld"), Err("1. h3l1o. W0rld"));
assert_eq!(Version::parse("1.2.3"), Ok(Version(1, 2, Some(3), "")));
assert_eq!(Version::parse("1.2"), Ok(Version(1, 2, None, "")));
assert_eq!(Version::parse("1.2 h3l1o. W0rld"), Ok(Version(1, 2, None, "h3l1o. W0rld")));
assert_eq!(Version::parse("1.2.h3l1o. W0rld"), Ok(Version(1, 2, None, "W0rld")));
assert_eq!(Version::parse("1.2. h3l1o. W0rld"), Ok(Version(1, 2, None, "h3l1o. W0rld")));
assert_eq!(Version::parse("1.2.3.h3l1o. W0rld"), Ok(Version(1, 2, Some(3), "W0rld")));
assert_eq!(Version::parse("1.2.3 h3l1o. W0rld"), Ok(Version(1, 2, Some(3), "h3l1o. W0rld")));
} | identifier_body |
|
mod.rs | allocated string from the implementation using
/// `glGetString`. Fails if it `GLenum` cannot be handled by the
/// implementation's `gl::GetString` function.
fn get_string(name: gl::types::GLenum) -> &'static str {
let ptr = gl::GetString(name) as *const i8;
if!ptr.is_null() {
// This should be safe to mark as statically allocated because
// GlGetString only returns static strings.
unsafe { str::raw::c_str_to_static_slice(ptr) }
} else {
fail!("Invalid GLenum passed to `get_string`: {:x}", name)
}
}
pub type VersionMajor = uint;
pub type VersionMinor = uint;
pub type Revision = uint;
pub type VendorDetails = &'static str;
/// A version number for a specific component of an OpenGL implementation
#[deriving(Eq, PartialEq, Ord, PartialOrd)]
pub struct Version(VersionMajor, VersionMinor, Option<Revision>, VendorDetails);
impl Version {
/// According to the OpenGL spec, the version information is expected to
/// follow the following syntax:
///
/// ~~~bnf
/// <major> ::= <number>
/// <minor> ::= <number>
/// <revision> ::= <number>
/// <vendor-info> ::= <string>
/// <release> ::= <major> "." <minor> ["." <release>]
/// <version> ::= <release> [" " <vendor-info>]
/// ~~~
///
/// Note that this function is intentionally lenient in regards to parsing,
/// and will try to recover at least the first two version numbers without
/// resulting in an `Err`.
fn parse(src: &'static str) -> Result<Version, &'static str> {
let (version, vendor_info) = match src.find(' ') {
Some(i) => (src.slice_to(i), src.slice_from(i + 1)),
None => (src, ""),
};
// TODO: make this even more lenient so that we can also accept
// `<major> "." <minor> [<???>]`
let mut it = version.split('.');
let major = it.next().and_then(from_str);
let minor = it.next().and_then(from_str);
let revision = it.next().and_then(from_str);
match (major, minor, revision) {
(Some(major), Some(minor), revision) =>
Ok(Version(major, minor, revision, vendor_info)),
(_, _, _) => Err(src),
}
}
}
impl fmt::Show for Version {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Version(major, minor, Some(revision), "") =>
write!(f, "Version({}.{}.{})", major, minor, revision),
Version(major, minor, None, "") =>
write!(f, "Version({}.{})", major, minor),
Version(major, minor, Some(revision), vendor_info) =>
write!(f, "Version({}.{}.{}, {})", major, minor, revision, vendor_info),
Version(major, minor, None, vendor_info) =>
write!(f, "Version({}.{}, {})", major, minor, vendor_info),
}
}
}
/// A unique platform identifier that does not change between releases
#[deriving(Eq, PartialEq, Show)]
pub struct PlatformName {
/// The company responsible for the OpenGL implementation
pub vendor: &'static str,
/// The name of the renderer
pub renderer: &'static str,
}
impl PlatformName {
fn get() -> PlatformName {
PlatformName {
vendor: get_string(gl::VENDOR),
renderer: get_string(gl::RENDERER),
}
}
}
/// OpenGL implementation information
#[deriving(Show)]
pub struct Info {
/// The platform identifier
pub platform_name: PlatformName,
/// The OpenGL API vesion number
pub version: Version,
/// The GLSL vesion number
pub shading_language: Version,
/// The extensions supported by the implementation
pub extensions: HashSet<&'static str>,
}
impl Info {
fn get() -> Info {
let info = {
let platform_name = PlatformName::get();
let version = Version::parse(get_string(gl::VERSION)).unwrap();
let shading_language = Version::parse(get_string(gl::SHADING_LANGUAGE_VERSION)).unwrap();
let extensions = if version >= Version(3, 2, None, "") {
let num_exts = get_uint(gl::NUM_EXTENSIONS) as gl::types::GLuint;
range(0, num_exts).map(|i| {
unsafe {
str::raw::c_str_to_static_slice(
gl::GetStringi(gl::EXTENSIONS, i) as *const i8,
)
}
}).collect()
} else {
// Fallback
get_string(gl::EXTENSIONS).split(' ').collect()
};
Info {
platform_name: platform_name,
version: version,
shading_language: shading_language,
extensions: extensions,
}
};
info!("Vendor: {}", info.platform_name.vendor);
info!("Renderer: {}", info.platform_name.renderer);
info!("Version: {}", info.version);
info!("Shading Language: {}", info.shading_language);
info!("Loaded Extensions:")
for extension in info.extensions.iter() {
info!("- {}", *extension);
}
info
}
/// Returns `true` if the implementation supports the extension
pub fn is_extension_supported(&self, s: &str) -> bool {
self.extensions.contains_equiv(&s)
}
}
#[deriving(Eq, PartialEq, Show)]
pub enum ErrorType {
InvalidEnum,
InvalidValue,
InvalidOperation,
InvalidFramebufferOperation,
OutOfMemory,
UnknownError,
}
/// An OpenGL back-end with GLSL shaders
pub struct GlBackEnd {
caps: super::Capabilities,
info: Info,
make_texture: fn(::tex::TextureInfo) -> Texture,
/// Maps (by the index) from texture name to TextureInfo, so we can look up what texture target
/// to bind this texture to later. Yuck!
// Doesn't use a SmallIntMap to avoid the overhead of Option
samplers: Vec<::tex::SamplerInfo>,
}
impl GlBackEnd {
/// Load OpenGL symbols and detect driver information
pub fn new(provider: &super::GlProvider) -> GlBackEnd {
gl::load_with(|s| provider.get_proc_address(s));
let info = Info::get();
let caps = super::Capabilities {
shader_model: shade::get_model(),
max_draw_buffers: get_uint(gl::MAX_DRAW_BUFFERS),
max_texture_size: get_uint(gl::MAX_TEXTURE_SIZE),
max_vertex_attributes: get_uint(gl::MAX_VERTEX_ATTRIBS),
uniform_block_supported: info.version >= Version(3, 1, None, "")
|| info.is_extension_supported("GL_ARB_uniform_buffer_object"),
array_buffer_supported: info.version >= Version(3, 0, None, "")
|| info.is_extension_supported("GL_ARB_vertex_array_object"),
immutable_storage_supported: info.version >= Version(4, 2, None, "")
|| info.is_extension_supported("GL_ARB_texture_storage"),
sampler_objects_supported: info.version >= Version(3, 3, None, "")
|| info.is_extension_supported("GL_ARB_sampler_objects"),
};
GlBackEnd {
caps: caps,
info: info,
make_texture: if caps.immutable_storage_supported {
tex::make_with_storage
} else {
tex::make_without_storage
},
samplers: Vec::new(),
}
}
#[allow(dead_code)]
fn get_error(&mut self) -> Result<(), ErrorType> {
match gl::GetError() {
gl::NO_ERROR => Ok(()),
gl::INVALID_ENUM => Err(InvalidEnum),
gl::INVALID_VALUE => Err(InvalidValue),
gl::INVALID_OPERATION => Err(InvalidOperation),
gl::INVALID_FRAMEBUFFER_OPERATION => Err(InvalidFramebufferOperation),
gl::OUT_OF_MEMORY => Err(OutOfMemory),
_ => Err(UnknownError),
}
}
/// Fails during a debug build if the implementation's error flag was set.
#[allow(dead_code)]
fn check(&mut self) {
debug_assert_eq!(self.get_error(), Ok(()));
}
/// Get the OpenGL-specific driver information
pub fn get_info<'a>(&'a self) -> &'a Info {
&self.info
}
}
impl super::ApiBackEnd for GlBackEnd {
fn get_capabilities<'a>(&'a self) -> &'a super::Capabilities {
&self.caps
}
fn create_buffer(&mut self) -> Buffer {
let mut name = 0 as Buffer;
unsafe {
gl::GenBuffers(1, &mut name);
}
info!("\tCreated buffer {}", name);
name
}
fn create_array_buffer(&mut self) -> Result<ArrayBuffer, ()> {
if self.caps.array_buffer_supported {
let mut name = 0 as ArrayBuffer;
unsafe {
gl::GenVertexArrays(1, &mut name);
}
info!("\tCreated array buffer {}", name);
Ok(name)
} else {
error!("\tarray buffer creation unsupported, ignored")
Err(())
}
}
fn create_shader(&mut self, stage: super::shade::Stage, code: super::shade::ShaderSource) -> Result<Shader, super::shade::CreateShaderError> {
let (name, info) = shade::create_shader(stage, code, self.get_capabilities().shader_model);
info.map(|info| {
let level = if name.is_err() { log::ERROR } else { log::WARN };
log!(level, "\tShader compile log: {}", info);
});
name
}
fn create_program(&mut self, shaders: &[Shader]) -> Result<super::shade::ProgramMeta, ()> {
let (meta, info) = shade::create_program(&self.caps, shaders);
info.map(|info| {
let level = if meta.is_err() { log::ERROR } else { log::WARN };
log!(level, "\tProgram link log: {}", info);
});
meta
}
fn create_frame_buffer(&mut self) -> FrameBuffer {
let mut name = 0 as FrameBuffer;
unsafe {
gl::GenFramebuffers(1, &mut name);
}
info!("\tCreated frame buffer {}", name);
name
}
fn create_texture(&mut self, info: ::tex::TextureInfo) -> Texture {
(self.make_texture)(info)
}
fn create_sampler(&mut self, info: ::tex::SamplerInfo) -> Sampler {
if self.caps.sampler_objects_supported {
tex::make_sampler(info)
} else {
self.samplers.push(info);
self.samplers.len() as Sampler - 1
}
}
fn update_buffer(&mut self, buffer: Buffer, data: &super::Blob, usage: super::BufferUsage) {
gl::BindBuffer(gl::ARRAY_BUFFER, buffer);
let size = data.get_size() as gl::types::GLsizeiptr;
let raw = data.get_address() as *const gl::types::GLvoid;
let usage = match usage {
super::UsageStatic => gl::STATIC_DRAW,
super::UsageDynamic => gl::DYNAMIC_DRAW,
super::UsageStream => gl::STREAM_DRAW,
};
unsafe {
gl::BufferData(gl::ARRAY_BUFFER, size, raw, usage);
}
}
fn process(&mut self, request: super::CastRequest) {
match request {
super::Clear(data) => {
let mut flags = match data.color {
//gl::ColorMask(gl::TRUE, gl::TRUE, gl::TRUE, gl::TRUE);
Some(super::target::Color([r,g,b,a])) => {
gl::ClearColor(r, g, b, a);
gl::COLOR_BUFFER_BIT
},
None => 0 as gl::types::GLenum
};
data.depth.map(|value| {
gl::DepthMask(gl::TRUE);
gl::ClearDepth(value as gl::types::GLclampd);
flags |= gl::DEPTH_BUFFER_BIT;
});
data.stencil.map(|value| {
gl::StencilMask(-1);
gl::ClearStencil(value as gl::types::GLint);
flags |= gl::STENCIL_BUFFER_BIT;
});
gl::Clear(flags);
},
super::BindProgram(program) => {
gl::UseProgram(program);
},
super::BindArrayBuffer(array_buffer) => {
if self.caps.array_buffer_supported {
gl::BindVertexArray(array_buffer); | error!("Ignored unsupported GL Request: {}", request)
}
},
super::BindAttribute(slot, buffer, count, el_type, stride, offset) => {
let gl_type = match el_type {
a::Int(_, a::U8, a::Unsigned) => gl::UNSIGNED_BYTE,
a::Int(_, a::U8, a::Signed) => gl::BYTE,
a::Int(_, a::U16, a::Unsigned) => gl::UNSIGNED_SHORT,
a::Int(_, a::U16, a::Signed) => gl::SHORT,
a::Int(_, a::U32, a::Unsigned) => gl::UNSIGNED_INT,
a::Int(_, a::U32, a::Signed) => gl::INT,
a::Float(_, a::F16) => gl::HALF_FLOAT,
a::Float(_, a::F32) => gl::FLOAT,
a::Float(_, a::F64) => gl::DOUBLE,
_ => {
error!("Unsupported element type: {}", el_type);
return
}
};
gl::BindBuffer(gl::ARRAY_BUFFER, buffer);
let offset = offset as *const gl::types::GLvoid;
match el_type {
a::Int(a::IntRaw, _, _) => unsafe {
gl::VertexAttribIPointer(slot as gl::types::GLuint,
count as gl::types::GLint, gl_type,
stride as gl::types::GLint, offset);
},
a::Int(a::IntNormalized, _, _) => unsafe {
gl::VertexAttribPointer(slot as gl::types::GLuint,
count as gl::types::GLint, gl_type, gl::TRUE,
stride as gl::types::GLint, offset);
},
a::Int(a::IntAsFloat, _, _) => unsafe {
gl::VertexAttribPointer(slot as gl::types::GLuint,
count as gl::types::GLint, gl_type, gl::FALSE,
stride as gl::types::GLint, offset);
},
a::Float(a::FloatDefault, _) => unsafe {
gl::VertexAttribPointer(slot as gl::types::GLuint,
count as gl::types::GLint, gl_type, gl::FALSE,
stride as gl::types::GLint, offset);
},
a::Float(a::FloatPrecision, _) => unsafe {
gl::VertexAttribLPointer(slot as gl::types::GLuint,
count as gl::types::GLint, gl_type,
stride as gl::types::GLint, offset);
},
_ => ()
}
gl::EnableVertexAttribArray(slot as gl::types::GLuint);
},
super::BindIndex(buffer) => {
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, buffer);
},
super::BindFrameBuffer(frame_buffer) => {
gl::BindFramebuffer(gl::DRAW_FRAMEBUFFER, frame_buffer);
},
super::BindTarget(target, plane) => {
let attachment = match target {
super::target::TargetColor(index) =>
gl::COLOR_ATTACHMENT0 + (index as gl::types::GLenum),
super::target::TargetDepth => gl::DEPTH_ATTACHMENT,
super::target::TargetStencil => gl::STENCIL_ATTACHMENT,
super::target::TargetDepthStencil => gl::DEPTH_STENCIL_ATTACHMENT,
};
match plane {
super::target::PlaneEmpty => gl::FramebufferRenderbuffer
(gl::DRAW_FRAMEBUFFER, attachment, gl::RENDERBUFFER, 0),
super::target::PlaneSurface(name) => gl::FramebufferRenderbuffer
(gl::DRAW_FRAMEBUFFER, attachment, gl::RENDERBUFFER, name),
super::target::PlaneTexture(tex, level) => gl::FramebufferTexture
(gl::DRAW_FRAMEBUFFER, attachment, tex.name, level as gl::types::GLint),
super::target::PlaneTextureLayer(tex, level, layer) => gl::FramebufferTextureLayer
(gl::DRAW_FRAMEBUFFER, attachment, tex.name, level as gl::types::GLint, layer as gl::types::GLint),
}
},
super::BindUniformBlock(program, index, loc, buffer) => {
gl::UniformBlockBinding(program, index as gl::types::GLuint, loc as gl::types::GLuint);
gl::BindBufferBase(gl::UNIFORM_BUFFER, loc as gl::types::GLuint, buffer);
},
super::BindUniform(loc, uniform) => {
shade::bind_uniform(loc as gl::types::GLint, uniform);
},
super::BindTexture(loc, tex, sam) => {
tex::bind_texture(loc as gl::types::GLuint, tex, sam, self);
},
super::SetPrimitiveState(prim) => {
rast::bind_primitive(prim);
},
super::SetDepthStencilState(depth, stencil, cull) => {
rast::bind_stencil(stencil, cull);
rast::bind_depth(depth);
},
super::SetBlendState(blend) => {
rast::bind_blend(blend);
},
| } else { | random_line_split |
mod.rs | string from the implementation using
/// `glGetString`. Fails if it `GLenum` cannot be handled by the
/// implementation's `gl::GetString` function.
fn get_string(name: gl::types::GLenum) -> &'static str {
let ptr = gl::GetString(name) as *const i8;
if!ptr.is_null() {
// This should be safe to mark as statically allocated because
// GlGetString only returns static strings.
unsafe { str::raw::c_str_to_static_slice(ptr) }
} else {
fail!("Invalid GLenum passed to `get_string`: {:x}", name)
}
}
pub type VersionMajor = uint;
pub type VersionMinor = uint;
pub type Revision = uint;
pub type VendorDetails = &'static str;
/// A version number for a specific component of an OpenGL implementation
#[deriving(Eq, PartialEq, Ord, PartialOrd)]
pub struct Version(VersionMajor, VersionMinor, Option<Revision>, VendorDetails);
impl Version {
/// According to the OpenGL spec, the version information is expected to
/// follow the following syntax:
///
/// ~~~bnf
/// <major> ::= <number>
/// <minor> ::= <number>
/// <revision> ::= <number>
/// <vendor-info> ::= <string>
/// <release> ::= <major> "." <minor> ["." <release>]
/// <version> ::= <release> [" " <vendor-info>]
/// ~~~
///
/// Note that this function is intentionally lenient in regards to parsing,
/// and will try to recover at least the first two version numbers without
/// resulting in an `Err`.
fn | (src: &'static str) -> Result<Version, &'static str> {
let (version, vendor_info) = match src.find(' ') {
Some(i) => (src.slice_to(i), src.slice_from(i + 1)),
None => (src, ""),
};
// TODO: make this even more lenient so that we can also accept
// `<major> "." <minor> [<???>]`
let mut it = version.split('.');
let major = it.next().and_then(from_str);
let minor = it.next().and_then(from_str);
let revision = it.next().and_then(from_str);
match (major, minor, revision) {
(Some(major), Some(minor), revision) =>
Ok(Version(major, minor, revision, vendor_info)),
(_, _, _) => Err(src),
}
}
}
impl fmt::Show for Version {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Version(major, minor, Some(revision), "") =>
write!(f, "Version({}.{}.{})", major, minor, revision),
Version(major, minor, None, "") =>
write!(f, "Version({}.{})", major, minor),
Version(major, minor, Some(revision), vendor_info) =>
write!(f, "Version({}.{}.{}, {})", major, minor, revision, vendor_info),
Version(major, minor, None, vendor_info) =>
write!(f, "Version({}.{}, {})", major, minor, vendor_info),
}
}
}
/// A unique platform identifier that does not change between releases
#[deriving(Eq, PartialEq, Show)]
pub struct PlatformName {
/// The company responsible for the OpenGL implementation
pub vendor: &'static str,
/// The name of the renderer
pub renderer: &'static str,
}
impl PlatformName {
fn get() -> PlatformName {
PlatformName {
vendor: get_string(gl::VENDOR),
renderer: get_string(gl::RENDERER),
}
}
}
/// OpenGL implementation information
#[deriving(Show)]
pub struct Info {
/// The platform identifier
pub platform_name: PlatformName,
/// The OpenGL API vesion number
pub version: Version,
/// The GLSL vesion number
pub shading_language: Version,
/// The extensions supported by the implementation
pub extensions: HashSet<&'static str>,
}
impl Info {
fn get() -> Info {
let info = {
let platform_name = PlatformName::get();
let version = Version::parse(get_string(gl::VERSION)).unwrap();
let shading_language = Version::parse(get_string(gl::SHADING_LANGUAGE_VERSION)).unwrap();
let extensions = if version >= Version(3, 2, None, "") {
let num_exts = get_uint(gl::NUM_EXTENSIONS) as gl::types::GLuint;
range(0, num_exts).map(|i| {
unsafe {
str::raw::c_str_to_static_slice(
gl::GetStringi(gl::EXTENSIONS, i) as *const i8,
)
}
}).collect()
} else {
// Fallback
get_string(gl::EXTENSIONS).split(' ').collect()
};
Info {
platform_name: platform_name,
version: version,
shading_language: shading_language,
extensions: extensions,
}
};
info!("Vendor: {}", info.platform_name.vendor);
info!("Renderer: {}", info.platform_name.renderer);
info!("Version: {}", info.version);
info!("Shading Language: {}", info.shading_language);
info!("Loaded Extensions:")
for extension in info.extensions.iter() {
info!("- {}", *extension);
}
info
}
/// Returns `true` if the implementation supports the extension
pub fn is_extension_supported(&self, s: &str) -> bool {
self.extensions.contains_equiv(&s)
}
}
#[deriving(Eq, PartialEq, Show)]
pub enum ErrorType {
InvalidEnum,
InvalidValue,
InvalidOperation,
InvalidFramebufferOperation,
OutOfMemory,
UnknownError,
}
/// An OpenGL back-end with GLSL shaders
pub struct GlBackEnd {
caps: super::Capabilities,
info: Info,
make_texture: fn(::tex::TextureInfo) -> Texture,
/// Maps (by the index) from texture name to TextureInfo, so we can look up what texture target
/// to bind this texture to later. Yuck!
// Doesn't use a SmallIntMap to avoid the overhead of Option
samplers: Vec<::tex::SamplerInfo>,
}
impl GlBackEnd {
/// Load OpenGL symbols and detect driver information
pub fn new(provider: &super::GlProvider) -> GlBackEnd {
gl::load_with(|s| provider.get_proc_address(s));
let info = Info::get();
let caps = super::Capabilities {
shader_model: shade::get_model(),
max_draw_buffers: get_uint(gl::MAX_DRAW_BUFFERS),
max_texture_size: get_uint(gl::MAX_TEXTURE_SIZE),
max_vertex_attributes: get_uint(gl::MAX_VERTEX_ATTRIBS),
uniform_block_supported: info.version >= Version(3, 1, None, "")
|| info.is_extension_supported("GL_ARB_uniform_buffer_object"),
array_buffer_supported: info.version >= Version(3, 0, None, "")
|| info.is_extension_supported("GL_ARB_vertex_array_object"),
immutable_storage_supported: info.version >= Version(4, 2, None, "")
|| info.is_extension_supported("GL_ARB_texture_storage"),
sampler_objects_supported: info.version >= Version(3, 3, None, "")
|| info.is_extension_supported("GL_ARB_sampler_objects"),
};
GlBackEnd {
caps: caps,
info: info,
make_texture: if caps.immutable_storage_supported {
tex::make_with_storage
} else {
tex::make_without_storage
},
samplers: Vec::new(),
}
}
#[allow(dead_code)]
fn get_error(&mut self) -> Result<(), ErrorType> {
match gl::GetError() {
gl::NO_ERROR => Ok(()),
gl::INVALID_ENUM => Err(InvalidEnum),
gl::INVALID_VALUE => Err(InvalidValue),
gl::INVALID_OPERATION => Err(InvalidOperation),
gl::INVALID_FRAMEBUFFER_OPERATION => Err(InvalidFramebufferOperation),
gl::OUT_OF_MEMORY => Err(OutOfMemory),
_ => Err(UnknownError),
}
}
/// Fails during a debug build if the implementation's error flag was set.
#[allow(dead_code)]
fn check(&mut self) {
debug_assert_eq!(self.get_error(), Ok(()));
}
/// Get the OpenGL-specific driver information
pub fn get_info<'a>(&'a self) -> &'a Info {
&self.info
}
}
impl super::ApiBackEnd for GlBackEnd {
fn get_capabilities<'a>(&'a self) -> &'a super::Capabilities {
&self.caps
}
fn create_buffer(&mut self) -> Buffer {
let mut name = 0 as Buffer;
unsafe {
gl::GenBuffers(1, &mut name);
}
info!("\tCreated buffer {}", name);
name
}
fn create_array_buffer(&mut self) -> Result<ArrayBuffer, ()> {
if self.caps.array_buffer_supported {
let mut name = 0 as ArrayBuffer;
unsafe {
gl::GenVertexArrays(1, &mut name);
}
info!("\tCreated array buffer {}", name);
Ok(name)
} else {
error!("\tarray buffer creation unsupported, ignored")
Err(())
}
}
fn create_shader(&mut self, stage: super::shade::Stage, code: super::shade::ShaderSource) -> Result<Shader, super::shade::CreateShaderError> {
let (name, info) = shade::create_shader(stage, code, self.get_capabilities().shader_model);
info.map(|info| {
let level = if name.is_err() { log::ERROR } else { log::WARN };
log!(level, "\tShader compile log: {}", info);
});
name
}
fn create_program(&mut self, shaders: &[Shader]) -> Result<super::shade::ProgramMeta, ()> {
let (meta, info) = shade::create_program(&self.caps, shaders);
info.map(|info| {
let level = if meta.is_err() { log::ERROR } else { log::WARN };
log!(level, "\tProgram link log: {}", info);
});
meta
}
fn create_frame_buffer(&mut self) -> FrameBuffer {
let mut name = 0 as FrameBuffer;
unsafe {
gl::GenFramebuffers(1, &mut name);
}
info!("\tCreated frame buffer {}", name);
name
}
fn create_texture(&mut self, info: ::tex::TextureInfo) -> Texture {
(self.make_texture)(info)
}
fn create_sampler(&mut self, info: ::tex::SamplerInfo) -> Sampler {
if self.caps.sampler_objects_supported {
tex::make_sampler(info)
} else {
self.samplers.push(info);
self.samplers.len() as Sampler - 1
}
}
fn update_buffer(&mut self, buffer: Buffer, data: &super::Blob, usage: super::BufferUsage) {
gl::BindBuffer(gl::ARRAY_BUFFER, buffer);
let size = data.get_size() as gl::types::GLsizeiptr;
let raw = data.get_address() as *const gl::types::GLvoid;
let usage = match usage {
super::UsageStatic => gl::STATIC_DRAW,
super::UsageDynamic => gl::DYNAMIC_DRAW,
super::UsageStream => gl::STREAM_DRAW,
};
unsafe {
gl::BufferData(gl::ARRAY_BUFFER, size, raw, usage);
}
}
fn process(&mut self, request: super::CastRequest) {
match request {
super::Clear(data) => {
let mut flags = match data.color {
//gl::ColorMask(gl::TRUE, gl::TRUE, gl::TRUE, gl::TRUE);
Some(super::target::Color([r,g,b,a])) => {
gl::ClearColor(r, g, b, a);
gl::COLOR_BUFFER_BIT
},
None => 0 as gl::types::GLenum
};
data.depth.map(|value| {
gl::DepthMask(gl::TRUE);
gl::ClearDepth(value as gl::types::GLclampd);
flags |= gl::DEPTH_BUFFER_BIT;
});
data.stencil.map(|value| {
gl::StencilMask(-1);
gl::ClearStencil(value as gl::types::GLint);
flags |= gl::STENCIL_BUFFER_BIT;
});
gl::Clear(flags);
},
super::BindProgram(program) => {
gl::UseProgram(program);
},
super::BindArrayBuffer(array_buffer) => {
if self.caps.array_buffer_supported {
gl::BindVertexArray(array_buffer);
} else {
error!("Ignored unsupported GL Request: {}", request)
}
},
super::BindAttribute(slot, buffer, count, el_type, stride, offset) => {
let gl_type = match el_type {
a::Int(_, a::U8, a::Unsigned) => gl::UNSIGNED_BYTE,
a::Int(_, a::U8, a::Signed) => gl::BYTE,
a::Int(_, a::U16, a::Unsigned) => gl::UNSIGNED_SHORT,
a::Int(_, a::U16, a::Signed) => gl::SHORT,
a::Int(_, a::U32, a::Unsigned) => gl::UNSIGNED_INT,
a::Int(_, a::U32, a::Signed) => gl::INT,
a::Float(_, a::F16) => gl::HALF_FLOAT,
a::Float(_, a::F32) => gl::FLOAT,
a::Float(_, a::F64) => gl::DOUBLE,
_ => {
error!("Unsupported element type: {}", el_type);
return
}
};
gl::BindBuffer(gl::ARRAY_BUFFER, buffer);
let offset = offset as *const gl::types::GLvoid;
match el_type {
a::Int(a::IntRaw, _, _) => unsafe {
gl::VertexAttribIPointer(slot as gl::types::GLuint,
count as gl::types::GLint, gl_type,
stride as gl::types::GLint, offset);
},
a::Int(a::IntNormalized, _, _) => unsafe {
gl::VertexAttribPointer(slot as gl::types::GLuint,
count as gl::types::GLint, gl_type, gl::TRUE,
stride as gl::types::GLint, offset);
},
a::Int(a::IntAsFloat, _, _) => unsafe {
gl::VertexAttribPointer(slot as gl::types::GLuint,
count as gl::types::GLint, gl_type, gl::FALSE,
stride as gl::types::GLint, offset);
},
a::Float(a::FloatDefault, _) => unsafe {
gl::VertexAttribPointer(slot as gl::types::GLuint,
count as gl::types::GLint, gl_type, gl::FALSE,
stride as gl::types::GLint, offset);
},
a::Float(a::FloatPrecision, _) => unsafe {
gl::VertexAttribLPointer(slot as gl::types::GLuint,
count as gl::types::GLint, gl_type,
stride as gl::types::GLint, offset);
},
_ => ()
}
gl::EnableVertexAttribArray(slot as gl::types::GLuint);
},
super::BindIndex(buffer) => {
gl::BindBuffer(gl::ELEMENT_ARRAY_BUFFER, buffer);
},
super::BindFrameBuffer(frame_buffer) => {
gl::BindFramebuffer(gl::DRAW_FRAMEBUFFER, frame_buffer);
},
super::BindTarget(target, plane) => {
let attachment = match target {
super::target::TargetColor(index) =>
gl::COLOR_ATTACHMENT0 + (index as gl::types::GLenum),
super::target::TargetDepth => gl::DEPTH_ATTACHMENT,
super::target::TargetStencil => gl::STENCIL_ATTACHMENT,
super::target::TargetDepthStencil => gl::DEPTH_STENCIL_ATTACHMENT,
};
match plane {
super::target::PlaneEmpty => gl::FramebufferRenderbuffer
(gl::DRAW_FRAMEBUFFER, attachment, gl::RENDERBUFFER, 0),
super::target::PlaneSurface(name) => gl::FramebufferRenderbuffer
(gl::DRAW_FRAMEBUFFER, attachment, gl::RENDERBUFFER, name),
super::target::PlaneTexture(tex, level) => gl::FramebufferTexture
(gl::DRAW_FRAMEBUFFER, attachment, tex.name, level as gl::types::GLint),
super::target::PlaneTextureLayer(tex, level, layer) => gl::FramebufferTextureLayer
(gl::DRAW_FRAMEBUFFER, attachment, tex.name, level as gl::types::GLint, layer as gl::types::GLint),
}
},
super::BindUniformBlock(program, index, loc, buffer) => {
gl::UniformBlockBinding(program, index as gl::types::GLuint, loc as gl::types::GLuint);
gl::BindBufferBase(gl::UNIFORM_BUFFER, loc as gl::types::GLuint, buffer);
},
super::BindUniform(loc, uniform) => {
shade::bind_uniform(loc as gl::types::GLint, uniform);
},
super::BindTexture(loc, tex, sam) => {
tex::bind_texture(loc as gl::types::GLuint, tex, sam, self);
},
super::SetPrimitiveState(prim) => {
rast::bind_primitive(prim);
},
super::SetDepthStencilState(depth, stencil, cull) => {
rast::bind_stencil(stencil, cull);
rast::bind_depth(depth);
},
super::SetBlendState(blend) => {
rast::bind_blend(blend);
},
| parse | identifier_name |
edit_ops.rs | // Copyright 2020 The xi-editor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Functions for editing ropes.
use std::borrow::Cow;
use std::collections::BTreeSet;
use xi_rope::{Cursor, DeltaBuilder, Interval, LinesMetric, Rope, RopeDelta};
use crate::backspace::offset_for_delete_backwards;
use crate::config::BufferItems;
use crate::line_offset::{LineOffset, LogicalLines};
use crate::linewrap::Lines;
use crate::movement::{region_movement, Movement};
use crate::selection::{SelRegion, Selection};
use crate::word_boundaries::WordCursor;
#[derive(Debug, Copy, Clone)]
pub enum IndentDirection {
In,
Out,
}
/// Replaces the selection with the text `T`.
pub fn insert<T: Into<Rope>>(base: &Rope, regions: &[SelRegion], text: T) -> RopeDelta {
let rope = text.into();
let mut builder = DeltaBuilder::new(base.len());
for region in regions {
let iv = Interval::new(region.min(), region.max());
builder.replace(iv, rope.clone());
}
builder.build()
}
/// Leaves the current selection untouched, but surrounds it with two insertions.
pub fn | <BT, AT>(
base: &Rope,
regions: &[SelRegion],
before_text: BT,
after_text: AT,
) -> RopeDelta
where
BT: Into<Rope>,
AT: Into<Rope>,
{
let mut builder = DeltaBuilder::new(base.len());
let before_rope = before_text.into();
let after_rope = after_text.into();
for region in regions {
let before_iv = Interval::new(region.min(), region.min());
builder.replace(before_iv, before_rope.clone());
let after_iv = Interval::new(region.max(), region.max());
builder.replace(after_iv, after_rope.clone());
}
builder.build()
}
pub fn duplicate_line(base: &Rope, regions: &[SelRegion], config: &BufferItems) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
// get affected lines or regions
let mut to_duplicate = BTreeSet::new();
for region in regions {
let (first_line, _) = LogicalLines.offset_to_line_col(base, region.min());
let line_start = LogicalLines.offset_of_line(base, first_line);
let mut cursor = match region.is_caret() {
true => Cursor::new(base, line_start),
false => {
// duplicate all lines together that are part of the same selections
let (last_line, _) = LogicalLines.offset_to_line_col(base, region.max());
let line_end = LogicalLines.offset_of_line(base, last_line);
Cursor::new(base, line_end)
}
};
if let Some(line_end) = cursor.next::<LinesMetric>() {
to_duplicate.insert((line_start, line_end));
}
}
for (start, end) in to_duplicate {
// insert duplicates
let iv = Interval::new(start, start);
builder.replace(iv, base.slice(start..end));
// last line does not have new line character so it needs to be manually added
if end == base.len() {
builder.replace(iv, Rope::from(&config.line_ending))
}
}
builder.build()
}
/// Used when the user presses the backspace key. If no delta is returned, then nothing changes.
pub fn delete_backward(base: &Rope, regions: &[SelRegion], config: &BufferItems) -> RopeDelta {
// TODO: this function is workable but probably overall code complexity
// could be improved by implementing a "backspace" movement instead.
let mut builder = DeltaBuilder::new(base.len());
for region in regions {
let start = offset_for_delete_backwards(region, base, config);
let iv = Interval::new(start, region.max());
if!iv.is_empty() {
builder.delete(iv);
}
}
builder.build()
}
/// Common logic for a number of delete methods. For each region in the
/// selection, if the selection is a caret, delete the region between
/// the caret and the movement applied to the caret, otherwise delete
/// the region.
///
/// If `save` is set, the tuple will contain a rope with the deleted text.
///
/// # Arguments
///
/// * `height` - viewport height
pub(crate) fn delete_by_movement(
base: &Rope,
regions: &[SelRegion],
lines: &Lines,
movement: Movement,
height: usize,
save: bool,
) -> (RopeDelta, Option<Rope>) {
// We compute deletions as a selection because the merge logic
// is convenient. Another possibility would be to make the delta
// builder able to handle overlapping deletions (with union semantics).
let mut deletions = Selection::new();
for &r in regions {
if r.is_caret() {
let new_region = region_movement(movement, r, lines, height, base, true);
deletions.add_region(new_region);
} else {
deletions.add_region(r);
}
}
let kill_ring = if save {
let saved = extract_sel_regions(base, &deletions).unwrap_or_default();
Some(Rope::from(saved))
} else {
None
};
(delete_sel_regions(base, &deletions), kill_ring)
}
/// Deletes the given regions.
pub(crate) fn delete_sel_regions(base: &Rope, sel_regions: &[SelRegion]) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
for region in sel_regions {
let iv = Interval::new(region.min(), region.max());
if!iv.is_empty() {
builder.delete(iv);
}
}
builder.build()
}
/// Extracts non-caret selection regions into a string,
/// joining multiple regions with newlines.
pub(crate) fn extract_sel_regions<'a>(
base: &'a Rope,
sel_regions: &[SelRegion],
) -> Option<Cow<'a, str>> {
let mut saved = None;
for region in sel_regions {
if!region.is_caret() {
let val = base.slice_to_cow(region);
match saved {
None => saved = Some(val),
Some(ref mut s) => {
s.to_mut().push('\n');
s.to_mut().push_str(&val);
}
}
}
}
saved
}
pub fn insert_newline(base: &Rope, regions: &[SelRegion], config: &BufferItems) -> RopeDelta {
insert(base, regions, &config.line_ending)
}
pub fn insert_tab(base: &Rope, regions: &[SelRegion], config: &BufferItems) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
let const_tab_text = get_tab_text(config, None);
for region in regions {
let line_range = LogicalLines.get_line_range(base, region);
if line_range.len() > 1 {
for line in line_range {
let offset = LogicalLines.line_col_to_offset(base, line, 0);
let iv = Interval::new(offset, offset);
builder.replace(iv, Rope::from(const_tab_text));
}
} else {
let (_, col) = LogicalLines.offset_to_line_col(base, region.start);
let mut tab_size = config.tab_size;
tab_size = tab_size - (col % tab_size);
let tab_text = get_tab_text(config, Some(tab_size));
let iv = Interval::new(region.min(), region.max());
builder.replace(iv, Rope::from(tab_text));
}
}
builder.build()
}
/// Indents or outdents lines based on selection and user's tab settings.
/// Uses a BTreeSet to holds the collection of lines to modify.
/// Preserves cursor position and current selection as much as possible.
/// Tries to have behavior consistent with other editors like Atom,
/// Sublime and VSCode, with non-caret selections not being modified.
pub fn modify_indent(
base: &Rope,
regions: &[SelRegion],
config: &BufferItems,
direction: IndentDirection,
) -> RopeDelta {
let mut lines = BTreeSet::new();
let tab_text = get_tab_text(config, None);
for region in regions {
let line_range = LogicalLines.get_line_range(base, region);
for line in line_range {
lines.insert(line);
}
}
match direction {
IndentDirection::In => indent(base, lines, tab_text),
IndentDirection::Out => outdent(base, lines, tab_text),
}
}
fn indent(base: &Rope, lines: BTreeSet<usize>, tab_text: &str) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
for line in lines {
let offset = LogicalLines.line_col_to_offset(base, line, 0);
let interval = Interval::new(offset, offset);
builder.replace(interval, Rope::from(tab_text));
}
builder.build()
}
fn outdent(base: &Rope, lines: BTreeSet<usize>, tab_text: &str) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
for line in lines {
let offset = LogicalLines.line_col_to_offset(base, line, 0);
let tab_offset = LogicalLines.line_col_to_offset(base, line, tab_text.len());
let interval = Interval::new(offset, tab_offset);
let leading_slice = base.slice_to_cow(interval.start()..interval.end());
if leading_slice == tab_text {
builder.delete(interval);
} else if let Some(first_char_col) = leading_slice.find(|c: char|!c.is_whitespace()) {
let first_char_offset = LogicalLines.line_col_to_offset(base, line, first_char_col);
let interval = Interval::new(offset, first_char_offset);
builder.delete(interval);
}
}
builder.build()
}
pub fn transpose(base: &Rope, regions: &[SelRegion]) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
let mut last = 0;
let mut optional_previous_selection: Option<(Interval, Rope)> =
last_selection_region(regions).map(|®ion| sel_region_to_interval_and_rope(base, region));
for ®ion in regions {
if region.is_caret() {
let mut middle = region.end;
let mut start = base.prev_grapheme_offset(middle).unwrap_or(0);
let mut end = base.next_grapheme_offset(middle).unwrap_or(middle);
// Note: this matches Emac's behavior. It swaps last
// two characters of line if at end of line.
if start >= last {
let end_line_offset =
LogicalLines.offset_of_line(base, LogicalLines.line_of_offset(base, end));
// include end!= base.len() because if the editor is entirely empty, we dont' want to pull from empty space
if (end == middle || end == end_line_offset) && end!= base.len() {
middle = start;
start = base.prev_grapheme_offset(middle).unwrap_or(0);
end = middle.wrapping_add(1);
}
let interval = Interval::new(start, end);
let before = base.slice_to_cow(start..middle);
let after = base.slice_to_cow(middle..end);
let swapped: String = [after, before].concat();
builder.replace(interval, Rope::from(swapped));
last = end;
}
} else if let Some(previous_selection) = optional_previous_selection {
let current_interval = sel_region_to_interval_and_rope(base, region);
builder.replace(current_interval.0, previous_selection.1);
optional_previous_selection = Some(current_interval);
}
}
builder.build()
}
pub fn transform_text<F: Fn(&str) -> String>(
base: &Rope,
regions: &[SelRegion],
transform_function: F,
) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
for region in regions {
let selected_text = base.slice_to_cow(region);
let interval = Interval::new(region.min(), region.max());
builder.replace(interval, Rope::from(transform_function(&selected_text)));
}
builder.build()
}
/// Changes the number(s) under the cursor(s) with the `transform_function`.
/// If there is a number next to or on the beginning of the region, then
/// this number will be replaced with the result of `transform_function` and
/// the cursor will be placed at the end of the number.
/// Some Examples with a increment `transform_function`:
///
/// "|1234" -> "1235|"
/// "12|34" -> "1235|"
/// "-|12" -> "-11|"
/// "another number is 123|]" -> "another number is 124"
///
/// This function also works fine with multiple regions.
pub fn change_number<F: Fn(i128) -> Option<i128>>(
base: &Rope,
regions: &[SelRegion],
transform_function: F,
) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
for region in regions {
let mut cursor = WordCursor::new(base, region.end);
let (mut start, end) = cursor.select_word();
// if the word begins with '-', then it is a negative number
if start > 0 && base.byte_at(start - 1) == (b'-') {
start -= 1;
}
let word = base.slice_to_cow(start..end);
if let Some(number) = word.parse::<i128>().ok().and_then(&transform_function) {
let interval = Interval::new(start, end);
builder.replace(interval, Rope::from(number.to_string()));
}
}
builder.build()
}
// capitalization behaviour is similar to behaviour in XCode
pub fn capitalize_text(base: &Rope, regions: &[SelRegion]) -> (RopeDelta, Selection) {
let mut builder = DeltaBuilder::new(base.len());
let mut final_selection = Selection::new();
for ®ion in regions {
final_selection.add_region(SelRegion::new(region.max(), region.max()));
let mut word_cursor = WordCursor::new(base, region.min());
loop {
// capitalize each word in the current selection
let (start, end) = word_cursor.select_word();
if start < end {
let interval = Interval::new(start, end);
let word = base.slice_to_cow(start..end);
// first letter is uppercase, remaining letters are lowercase
let (first_char, rest) = word.split_at(1);
let capitalized_text = [first_char.to_uppercase(), rest.to_lowercase()].concat();
builder.replace(interval, Rope::from(capitalized_text));
}
if word_cursor.next_boundary().is_none() || end > region.max() {
break;
}
}
}
(builder.build(), final_selection)
}
fn sel_region_to_interval_and_rope(base: &Rope, region: SelRegion) -> (Interval, Rope) {
let as_interval = Interval::new(region.min(), region.max());
let interval_rope = base.subseq(as_interval);
(as_interval, interval_rope)
}
fn last_selection_region(regions: &[SelRegion]) -> Option<&SelRegion> {
for region in regions.iter().rev() {
if!region.is_caret() {
return Some(region);
}
}
None
}
fn get_tab_text(config: &BufferItems, tab_size: Option<usize>) -> &'static str {
let tab_size = tab_size.unwrap_or(config.tab_size);
let tab_text = if config.translate_tabs_to_spaces { n_spaces(tab_size) } else { "\t" };
tab_text
}
fn n_spaces(n: usize) -> &'static str {
let spaces = " ";
assert!(n <= spaces.len());
&spaces[..n]
}
| surround | identifier_name |
edit_ops.rs | // Copyright 2020 The xi-editor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Functions for editing ropes.
use std::borrow::Cow;
use std::collections::BTreeSet;
use xi_rope::{Cursor, DeltaBuilder, Interval, LinesMetric, Rope, RopeDelta};
use crate::backspace::offset_for_delete_backwards;
use crate::config::BufferItems;
use crate::line_offset::{LineOffset, LogicalLines};
use crate::linewrap::Lines;
use crate::movement::{region_movement, Movement};
use crate::selection::{SelRegion, Selection};
use crate::word_boundaries::WordCursor;
#[derive(Debug, Copy, Clone)]
pub enum IndentDirection {
In,
Out,
}
/// Replaces the selection with the text `T`.
pub fn insert<T: Into<Rope>>(base: &Rope, regions: &[SelRegion], text: T) -> RopeDelta {
let rope = text.into();
let mut builder = DeltaBuilder::new(base.len());
for region in regions {
let iv = Interval::new(region.min(), region.max());
builder.replace(iv, rope.clone());
}
builder.build()
}
/// Leaves the current selection untouched, but surrounds it with two insertions.
pub fn surround<BT, AT>(
base: &Rope,
regions: &[SelRegion],
before_text: BT,
after_text: AT,
) -> RopeDelta
where
BT: Into<Rope>,
AT: Into<Rope>,
{
let mut builder = DeltaBuilder::new(base.len());
let before_rope = before_text.into();
let after_rope = after_text.into();
for region in regions {
let before_iv = Interval::new(region.min(), region.min());
builder.replace(before_iv, before_rope.clone());
let after_iv = Interval::new(region.max(), region.max());
builder.replace(after_iv, after_rope.clone());
}
builder.build()
}
pub fn duplicate_line(base: &Rope, regions: &[SelRegion], config: &BufferItems) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
// get affected lines or regions
let mut to_duplicate = BTreeSet::new();
for region in regions {
let (first_line, _) = LogicalLines.offset_to_line_col(base, region.min());
let line_start = LogicalLines.offset_of_line(base, first_line);
let mut cursor = match region.is_caret() {
true => Cursor::new(base, line_start),
false => {
// duplicate all lines together that are part of the same selections
let (last_line, _) = LogicalLines.offset_to_line_col(base, region.max());
let line_end = LogicalLines.offset_of_line(base, last_line);
Cursor::new(base, line_end)
}
};
if let Some(line_end) = cursor.next::<LinesMetric>() {
to_duplicate.insert((line_start, line_end));
}
}
for (start, end) in to_duplicate {
// insert duplicates
let iv = Interval::new(start, start);
builder.replace(iv, base.slice(start..end));
// last line does not have new line character so it needs to be manually added
if end == base.len() {
builder.replace(iv, Rope::from(&config.line_ending))
}
}
builder.build()
}
/// Used when the user presses the backspace key. If no delta is returned, then nothing changes.
pub fn delete_backward(base: &Rope, regions: &[SelRegion], config: &BufferItems) -> RopeDelta {
// TODO: this function is workable but probably overall code complexity
// could be improved by implementing a "backspace" movement instead.
let mut builder = DeltaBuilder::new(base.len());
for region in regions {
let start = offset_for_delete_backwards(region, base, config);
let iv = Interval::new(start, region.max());
if!iv.is_empty() {
builder.delete(iv);
}
}
builder.build()
}
/// Common logic for a number of delete methods. For each region in the
/// selection, if the selection is a caret, delete the region between
/// the caret and the movement applied to the caret, otherwise delete
/// the region.
///
/// If `save` is set, the tuple will contain a rope with the deleted text.
///
/// # Arguments
///
/// * `height` - viewport height
pub(crate) fn delete_by_movement(
base: &Rope,
regions: &[SelRegion],
lines: &Lines,
movement: Movement,
height: usize,
save: bool,
) -> (RopeDelta, Option<Rope>) {
// We compute deletions as a selection because the merge logic
// is convenient. Another possibility would be to make the delta
// builder able to handle overlapping deletions (with union semantics).
let mut deletions = Selection::new();
for &r in regions {
if r.is_caret() {
let new_region = region_movement(movement, r, lines, height, base, true);
deletions.add_region(new_region);
} else {
deletions.add_region(r);
}
}
let kill_ring = if save | else {
None
};
(delete_sel_regions(base, &deletions), kill_ring)
}
/// Deletes the given regions.
pub(crate) fn delete_sel_regions(base: &Rope, sel_regions: &[SelRegion]) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
for region in sel_regions {
let iv = Interval::new(region.min(), region.max());
if!iv.is_empty() {
builder.delete(iv);
}
}
builder.build()
}
/// Extracts non-caret selection regions into a string,
/// joining multiple regions with newlines.
pub(crate) fn extract_sel_regions<'a>(
base: &'a Rope,
sel_regions: &[SelRegion],
) -> Option<Cow<'a, str>> {
let mut saved = None;
for region in sel_regions {
if!region.is_caret() {
let val = base.slice_to_cow(region);
match saved {
None => saved = Some(val),
Some(ref mut s) => {
s.to_mut().push('\n');
s.to_mut().push_str(&val);
}
}
}
}
saved
}
pub fn insert_newline(base: &Rope, regions: &[SelRegion], config: &BufferItems) -> RopeDelta {
insert(base, regions, &config.line_ending)
}
pub fn insert_tab(base: &Rope, regions: &[SelRegion], config: &BufferItems) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
let const_tab_text = get_tab_text(config, None);
for region in regions {
let line_range = LogicalLines.get_line_range(base, region);
if line_range.len() > 1 {
for line in line_range {
let offset = LogicalLines.line_col_to_offset(base, line, 0);
let iv = Interval::new(offset, offset);
builder.replace(iv, Rope::from(const_tab_text));
}
} else {
let (_, col) = LogicalLines.offset_to_line_col(base, region.start);
let mut tab_size = config.tab_size;
tab_size = tab_size - (col % tab_size);
let tab_text = get_tab_text(config, Some(tab_size));
let iv = Interval::new(region.min(), region.max());
builder.replace(iv, Rope::from(tab_text));
}
}
builder.build()
}
/// Indents or outdents lines based on selection and user's tab settings.
/// Uses a BTreeSet to holds the collection of lines to modify.
/// Preserves cursor position and current selection as much as possible.
/// Tries to have behavior consistent with other editors like Atom,
/// Sublime and VSCode, with non-caret selections not being modified.
pub fn modify_indent(
base: &Rope,
regions: &[SelRegion],
config: &BufferItems,
direction: IndentDirection,
) -> RopeDelta {
let mut lines = BTreeSet::new();
let tab_text = get_tab_text(config, None);
for region in regions {
let line_range = LogicalLines.get_line_range(base, region);
for line in line_range {
lines.insert(line);
}
}
match direction {
IndentDirection::In => indent(base, lines, tab_text),
IndentDirection::Out => outdent(base, lines, tab_text),
}
}
fn indent(base: &Rope, lines: BTreeSet<usize>, tab_text: &str) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
for line in lines {
let offset = LogicalLines.line_col_to_offset(base, line, 0);
let interval = Interval::new(offset, offset);
builder.replace(interval, Rope::from(tab_text));
}
builder.build()
}
fn outdent(base: &Rope, lines: BTreeSet<usize>, tab_text: &str) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
for line in lines {
let offset = LogicalLines.line_col_to_offset(base, line, 0);
let tab_offset = LogicalLines.line_col_to_offset(base, line, tab_text.len());
let interval = Interval::new(offset, tab_offset);
let leading_slice = base.slice_to_cow(interval.start()..interval.end());
if leading_slice == tab_text {
builder.delete(interval);
} else if let Some(first_char_col) = leading_slice.find(|c: char|!c.is_whitespace()) {
let first_char_offset = LogicalLines.line_col_to_offset(base, line, first_char_col);
let interval = Interval::new(offset, first_char_offset);
builder.delete(interval);
}
}
builder.build()
}
pub fn transpose(base: &Rope, regions: &[SelRegion]) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
let mut last = 0;
let mut optional_previous_selection: Option<(Interval, Rope)> =
last_selection_region(regions).map(|®ion| sel_region_to_interval_and_rope(base, region));
for ®ion in regions {
if region.is_caret() {
let mut middle = region.end;
let mut start = base.prev_grapheme_offset(middle).unwrap_or(0);
let mut end = base.next_grapheme_offset(middle).unwrap_or(middle);
// Note: this matches Emac's behavior. It swaps last
// two characters of line if at end of line.
if start >= last {
let end_line_offset =
LogicalLines.offset_of_line(base, LogicalLines.line_of_offset(base, end));
// include end!= base.len() because if the editor is entirely empty, we dont' want to pull from empty space
if (end == middle || end == end_line_offset) && end!= base.len() {
middle = start;
start = base.prev_grapheme_offset(middle).unwrap_or(0);
end = middle.wrapping_add(1);
}
let interval = Interval::new(start, end);
let before = base.slice_to_cow(start..middle);
let after = base.slice_to_cow(middle..end);
let swapped: String = [after, before].concat();
builder.replace(interval, Rope::from(swapped));
last = end;
}
} else if let Some(previous_selection) = optional_previous_selection {
let current_interval = sel_region_to_interval_and_rope(base, region);
builder.replace(current_interval.0, previous_selection.1);
optional_previous_selection = Some(current_interval);
}
}
builder.build()
}
pub fn transform_text<F: Fn(&str) -> String>(
base: &Rope,
regions: &[SelRegion],
transform_function: F,
) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
for region in regions {
let selected_text = base.slice_to_cow(region);
let interval = Interval::new(region.min(), region.max());
builder.replace(interval, Rope::from(transform_function(&selected_text)));
}
builder.build()
}
/// Changes the number(s) under the cursor(s) with the `transform_function`.
/// If there is a number next to or on the beginning of the region, then
/// this number will be replaced with the result of `transform_function` and
/// the cursor will be placed at the end of the number.
/// Some Examples with a increment `transform_function`:
///
/// "|1234" -> "1235|"
/// "12|34" -> "1235|"
/// "-|12" -> "-11|"
/// "another number is 123|]" -> "another number is 124"
///
/// This function also works fine with multiple regions.
pub fn change_number<F: Fn(i128) -> Option<i128>>(
base: &Rope,
regions: &[SelRegion],
transform_function: F,
) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
for region in regions {
let mut cursor = WordCursor::new(base, region.end);
let (mut start, end) = cursor.select_word();
// if the word begins with '-', then it is a negative number
if start > 0 && base.byte_at(start - 1) == (b'-') {
start -= 1;
}
let word = base.slice_to_cow(start..end);
if let Some(number) = word.parse::<i128>().ok().and_then(&transform_function) {
let interval = Interval::new(start, end);
builder.replace(interval, Rope::from(number.to_string()));
}
}
builder.build()
}
// capitalization behaviour is similar to behaviour in XCode
pub fn capitalize_text(base: &Rope, regions: &[SelRegion]) -> (RopeDelta, Selection) {
let mut builder = DeltaBuilder::new(base.len());
let mut final_selection = Selection::new();
for ®ion in regions {
final_selection.add_region(SelRegion::new(region.max(), region.max()));
let mut word_cursor = WordCursor::new(base, region.min());
loop {
// capitalize each word in the current selection
let (start, end) = word_cursor.select_word();
if start < end {
let interval = Interval::new(start, end);
let word = base.slice_to_cow(start..end);
// first letter is uppercase, remaining letters are lowercase
let (first_char, rest) = word.split_at(1);
let capitalized_text = [first_char.to_uppercase(), rest.to_lowercase()].concat();
builder.replace(interval, Rope::from(capitalized_text));
}
if word_cursor.next_boundary().is_none() || end > region.max() {
break;
}
}
}
(builder.build(), final_selection)
}
fn sel_region_to_interval_and_rope(base: &Rope, region: SelRegion) -> (Interval, Rope) {
let as_interval = Interval::new(region.min(), region.max());
let interval_rope = base.subseq(as_interval);
(as_interval, interval_rope)
}
fn last_selection_region(regions: &[SelRegion]) -> Option<&SelRegion> {
for region in regions.iter().rev() {
if!region.is_caret() {
return Some(region);
}
}
None
}
fn get_tab_text(config: &BufferItems, tab_size: Option<usize>) -> &'static str {
let tab_size = tab_size.unwrap_or(config.tab_size);
let tab_text = if config.translate_tabs_to_spaces { n_spaces(tab_size) } else { "\t" };
tab_text
}
fn n_spaces(n: usize) -> &'static str {
let spaces = " ";
assert!(n <= spaces.len());
&spaces[..n]
}
| {
let saved = extract_sel_regions(base, &deletions).unwrap_or_default();
Some(Rope::from(saved))
} | conditional_block |
edit_ops.rs | // Copyright 2020 The xi-editor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Functions for editing ropes.
use std::borrow::Cow;
use std::collections::BTreeSet;
use xi_rope::{Cursor, DeltaBuilder, Interval, LinesMetric, Rope, RopeDelta};
use crate::backspace::offset_for_delete_backwards;
use crate::config::BufferItems;
use crate::line_offset::{LineOffset, LogicalLines};
use crate::linewrap::Lines;
use crate::movement::{region_movement, Movement};
use crate::selection::{SelRegion, Selection};
use crate::word_boundaries::WordCursor;
#[derive(Debug, Copy, Clone)]
pub enum IndentDirection {
In,
Out,
}
/// Replaces the selection with the text `T`.
pub fn insert<T: Into<Rope>>(base: &Rope, regions: &[SelRegion], text: T) -> RopeDelta {
let rope = text.into();
let mut builder = DeltaBuilder::new(base.len());
for region in regions {
let iv = Interval::new(region.min(), region.max());
builder.replace(iv, rope.clone());
}
builder.build()
}
/// Leaves the current selection untouched, but surrounds it with two insertions.
pub fn surround<BT, AT>(
base: &Rope,
regions: &[SelRegion],
before_text: BT,
after_text: AT,
) -> RopeDelta
where
BT: Into<Rope>,
AT: Into<Rope>,
{
let mut builder = DeltaBuilder::new(base.len());
let before_rope = before_text.into();
let after_rope = after_text.into();
for region in regions {
let before_iv = Interval::new(region.min(), region.min());
builder.replace(before_iv, before_rope.clone());
let after_iv = Interval::new(region.max(), region.max());
builder.replace(after_iv, after_rope.clone());
}
builder.build()
}
pub fn duplicate_line(base: &Rope, regions: &[SelRegion], config: &BufferItems) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
// get affected lines or regions
let mut to_duplicate = BTreeSet::new();
for region in regions {
let (first_line, _) = LogicalLines.offset_to_line_col(base, region.min());
let line_start = LogicalLines.offset_of_line(base, first_line);
let mut cursor = match region.is_caret() {
true => Cursor::new(base, line_start),
false => {
// duplicate all lines together that are part of the same selections
let (last_line, _) = LogicalLines.offset_to_line_col(base, region.max());
let line_end = LogicalLines.offset_of_line(base, last_line);
Cursor::new(base, line_end)
}
};
if let Some(line_end) = cursor.next::<LinesMetric>() {
to_duplicate.insert((line_start, line_end));
}
}
for (start, end) in to_duplicate {
// insert duplicates
let iv = Interval::new(start, start);
builder.replace(iv, base.slice(start..end));
// last line does not have new line character so it needs to be manually added
if end == base.len() {
builder.replace(iv, Rope::from(&config.line_ending))
}
}
builder.build()
}
/// Used when the user presses the backspace key. If no delta is returned, then nothing changes.
pub fn delete_backward(base: &Rope, regions: &[SelRegion], config: &BufferItems) -> RopeDelta {
// TODO: this function is workable but probably overall code complexity
// could be improved by implementing a "backspace" movement instead.
let mut builder = DeltaBuilder::new(base.len());
for region in regions {
let start = offset_for_delete_backwards(region, base, config);
let iv = Interval::new(start, region.max());
if!iv.is_empty() {
builder.delete(iv);
}
}
builder.build()
}
/// Common logic for a number of delete methods. For each region in the
/// selection, if the selection is a caret, delete the region between
/// the caret and the movement applied to the caret, otherwise delete
/// the region.
///
/// If `save` is set, the tuple will contain a rope with the deleted text.
///
/// # Arguments
///
/// * `height` - viewport height
pub(crate) fn delete_by_movement(
base: &Rope,
regions: &[SelRegion],
lines: &Lines,
movement: Movement,
height: usize,
save: bool,
) -> (RopeDelta, Option<Rope>) |
(delete_sel_regions(base, &deletions), kill_ring)
}
/// Deletes the given regions.
pub(crate) fn delete_sel_regions(base: &Rope, sel_regions: &[SelRegion]) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
for region in sel_regions {
let iv = Interval::new(region.min(), region.max());
if!iv.is_empty() {
builder.delete(iv);
}
}
builder.build()
}
/// Extracts non-caret selection regions into a string,
/// joining multiple regions with newlines.
pub(crate) fn extract_sel_regions<'a>(
base: &'a Rope,
sel_regions: &[SelRegion],
) -> Option<Cow<'a, str>> {
let mut saved = None;
for region in sel_regions {
if!region.is_caret() {
let val = base.slice_to_cow(region);
match saved {
None => saved = Some(val),
Some(ref mut s) => {
s.to_mut().push('\n');
s.to_mut().push_str(&val);
}
}
}
}
saved
}
pub fn insert_newline(base: &Rope, regions: &[SelRegion], config: &BufferItems) -> RopeDelta {
insert(base, regions, &config.line_ending)
}
pub fn insert_tab(base: &Rope, regions: &[SelRegion], config: &BufferItems) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
let const_tab_text = get_tab_text(config, None);
for region in regions {
let line_range = LogicalLines.get_line_range(base, region);
if line_range.len() > 1 {
for line in line_range {
let offset = LogicalLines.line_col_to_offset(base, line, 0);
let iv = Interval::new(offset, offset);
builder.replace(iv, Rope::from(const_tab_text));
}
} else {
let (_, col) = LogicalLines.offset_to_line_col(base, region.start);
let mut tab_size = config.tab_size;
tab_size = tab_size - (col % tab_size);
let tab_text = get_tab_text(config, Some(tab_size));
let iv = Interval::new(region.min(), region.max());
builder.replace(iv, Rope::from(tab_text));
}
}
builder.build()
}
/// Indents or outdents lines based on selection and user's tab settings.
/// Uses a BTreeSet to holds the collection of lines to modify.
/// Preserves cursor position and current selection as much as possible.
/// Tries to have behavior consistent with other editors like Atom,
/// Sublime and VSCode, with non-caret selections not being modified.
pub fn modify_indent(
base: &Rope,
regions: &[SelRegion],
config: &BufferItems,
direction: IndentDirection,
) -> RopeDelta {
let mut lines = BTreeSet::new();
let tab_text = get_tab_text(config, None);
for region in regions {
let line_range = LogicalLines.get_line_range(base, region);
for line in line_range {
lines.insert(line);
}
}
match direction {
IndentDirection::In => indent(base, lines, tab_text),
IndentDirection::Out => outdent(base, lines, tab_text),
}
}
fn indent(base: &Rope, lines: BTreeSet<usize>, tab_text: &str) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
for line in lines {
let offset = LogicalLines.line_col_to_offset(base, line, 0);
let interval = Interval::new(offset, offset);
builder.replace(interval, Rope::from(tab_text));
}
builder.build()
}
fn outdent(base: &Rope, lines: BTreeSet<usize>, tab_text: &str) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
for line in lines {
let offset = LogicalLines.line_col_to_offset(base, line, 0);
let tab_offset = LogicalLines.line_col_to_offset(base, line, tab_text.len());
let interval = Interval::new(offset, tab_offset);
let leading_slice = base.slice_to_cow(interval.start()..interval.end());
if leading_slice == tab_text {
builder.delete(interval);
} else if let Some(first_char_col) = leading_slice.find(|c: char|!c.is_whitespace()) {
let first_char_offset = LogicalLines.line_col_to_offset(base, line, first_char_col);
let interval = Interval::new(offset, first_char_offset);
builder.delete(interval);
}
}
builder.build()
}
pub fn transpose(base: &Rope, regions: &[SelRegion]) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
let mut last = 0;
let mut optional_previous_selection: Option<(Interval, Rope)> =
last_selection_region(regions).map(|®ion| sel_region_to_interval_and_rope(base, region));
for ®ion in regions {
if region.is_caret() {
let mut middle = region.end;
let mut start = base.prev_grapheme_offset(middle).unwrap_or(0);
let mut end = base.next_grapheme_offset(middle).unwrap_or(middle);
// Note: this matches Emac's behavior. It swaps last
// two characters of line if at end of line.
if start >= last {
let end_line_offset =
LogicalLines.offset_of_line(base, LogicalLines.line_of_offset(base, end));
// include end!= base.len() because if the editor is entirely empty, we dont' want to pull from empty space
if (end == middle || end == end_line_offset) && end!= base.len() {
middle = start;
start = base.prev_grapheme_offset(middle).unwrap_or(0);
end = middle.wrapping_add(1);
}
let interval = Interval::new(start, end);
let before = base.slice_to_cow(start..middle);
let after = base.slice_to_cow(middle..end);
let swapped: String = [after, before].concat();
builder.replace(interval, Rope::from(swapped));
last = end;
}
} else if let Some(previous_selection) = optional_previous_selection {
let current_interval = sel_region_to_interval_and_rope(base, region);
builder.replace(current_interval.0, previous_selection.1);
optional_previous_selection = Some(current_interval);
}
}
builder.build()
}
pub fn transform_text<F: Fn(&str) -> String>(
base: &Rope,
regions: &[SelRegion],
transform_function: F,
) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
for region in regions {
let selected_text = base.slice_to_cow(region);
let interval = Interval::new(region.min(), region.max());
builder.replace(interval, Rope::from(transform_function(&selected_text)));
}
builder.build()
}
/// Changes the number(s) under the cursor(s) with the `transform_function`.
/// If there is a number next to or on the beginning of the region, then
/// this number will be replaced with the result of `transform_function` and
/// the cursor will be placed at the end of the number.
/// Some Examples with a increment `transform_function`:
///
/// "|1234" -> "1235|"
/// "12|34" -> "1235|"
/// "-|12" -> "-11|"
/// "another number is 123|]" -> "another number is 124"
///
/// This function also works fine with multiple regions.
pub fn change_number<F: Fn(i128) -> Option<i128>>(
base: &Rope,
regions: &[SelRegion],
transform_function: F,
) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
for region in regions {
let mut cursor = WordCursor::new(base, region.end);
let (mut start, end) = cursor.select_word();
// if the word begins with '-', then it is a negative number
if start > 0 && base.byte_at(start - 1) == (b'-') {
start -= 1;
}
let word = base.slice_to_cow(start..end);
if let Some(number) = word.parse::<i128>().ok().and_then(&transform_function) {
let interval = Interval::new(start, end);
builder.replace(interval, Rope::from(number.to_string()));
}
}
builder.build()
}
// capitalization behaviour is similar to behaviour in XCode
pub fn capitalize_text(base: &Rope, regions: &[SelRegion]) -> (RopeDelta, Selection) {
let mut builder = DeltaBuilder::new(base.len());
let mut final_selection = Selection::new();
for ®ion in regions {
final_selection.add_region(SelRegion::new(region.max(), region.max()));
let mut word_cursor = WordCursor::new(base, region.min());
loop {
// capitalize each word in the current selection
let (start, end) = word_cursor.select_word();
if start < end {
let interval = Interval::new(start, end);
let word = base.slice_to_cow(start..end);
// first letter is uppercase, remaining letters are lowercase
let (first_char, rest) = word.split_at(1);
let capitalized_text = [first_char.to_uppercase(), rest.to_lowercase()].concat();
builder.replace(interval, Rope::from(capitalized_text));
}
if word_cursor.next_boundary().is_none() || end > region.max() {
break;
}
}
}
(builder.build(), final_selection)
}
fn sel_region_to_interval_and_rope(base: &Rope, region: SelRegion) -> (Interval, Rope) {
let as_interval = Interval::new(region.min(), region.max());
let interval_rope = base.subseq(as_interval);
(as_interval, interval_rope)
}
fn last_selection_region(regions: &[SelRegion]) -> Option<&SelRegion> {
for region in regions.iter().rev() {
if!region.is_caret() {
return Some(region);
}
}
None
}
fn get_tab_text(config: &BufferItems, tab_size: Option<usize>) -> &'static str {
let tab_size = tab_size.unwrap_or(config.tab_size);
let tab_text = if config.translate_tabs_to_spaces { n_spaces(tab_size) } else { "\t" };
tab_text
}
fn n_spaces(n: usize) -> &'static str {
let spaces = " ";
assert!(n <= spaces.len());
&spaces[..n]
}
| {
// We compute deletions as a selection because the merge logic
// is convenient. Another possibility would be to make the delta
// builder able to handle overlapping deletions (with union semantics).
let mut deletions = Selection::new();
for &r in regions {
if r.is_caret() {
let new_region = region_movement(movement, r, lines, height, base, true);
deletions.add_region(new_region);
} else {
deletions.add_region(r);
}
}
let kill_ring = if save {
let saved = extract_sel_regions(base, &deletions).unwrap_or_default();
Some(Rope::from(saved))
} else {
None
}; | identifier_body |
edit_ops.rs | // Copyright 2020 The xi-editor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Functions for editing ropes.
use std::borrow::Cow;
use std::collections::BTreeSet;
use xi_rope::{Cursor, DeltaBuilder, Interval, LinesMetric, Rope, RopeDelta};
use crate::backspace::offset_for_delete_backwards;
use crate::config::BufferItems;
use crate::line_offset::{LineOffset, LogicalLines};
use crate::linewrap::Lines;
use crate::movement::{region_movement, Movement};
use crate::selection::{SelRegion, Selection};
use crate::word_boundaries::WordCursor;
#[derive(Debug, Copy, Clone)]
pub enum IndentDirection {
In,
Out,
}
/// Replaces the selection with the text `T`.
pub fn insert<T: Into<Rope>>(base: &Rope, regions: &[SelRegion], text: T) -> RopeDelta {
let rope = text.into();
let mut builder = DeltaBuilder::new(base.len());
for region in regions {
let iv = Interval::new(region.min(), region.max());
builder.replace(iv, rope.clone());
}
builder.build()
}
/// Leaves the current selection untouched, but surrounds it with two insertions.
pub fn surround<BT, AT>(
base: &Rope,
regions: &[SelRegion],
before_text: BT,
after_text: AT,
) -> RopeDelta
where
BT: Into<Rope>,
AT: Into<Rope>,
{
let mut builder = DeltaBuilder::new(base.len());
let before_rope = before_text.into();
let after_rope = after_text.into();
for region in regions {
let before_iv = Interval::new(region.min(), region.min());
builder.replace(before_iv, before_rope.clone());
let after_iv = Interval::new(region.max(), region.max());
builder.replace(after_iv, after_rope.clone());
}
builder.build()
}
pub fn duplicate_line(base: &Rope, regions: &[SelRegion], config: &BufferItems) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
// get affected lines or regions
let mut to_duplicate = BTreeSet::new();
for region in regions {
let (first_line, _) = LogicalLines.offset_to_line_col(base, region.min());
let line_start = LogicalLines.offset_of_line(base, first_line);
let mut cursor = match region.is_caret() {
true => Cursor::new(base, line_start),
false => {
// duplicate all lines together that are part of the same selections
let (last_line, _) = LogicalLines.offset_to_line_col(base, region.max());
let line_end = LogicalLines.offset_of_line(base, last_line);
Cursor::new(base, line_end)
}
};
if let Some(line_end) = cursor.next::<LinesMetric>() {
to_duplicate.insert((line_start, line_end));
}
}
for (start, end) in to_duplicate {
// insert duplicates
let iv = Interval::new(start, start);
builder.replace(iv, base.slice(start..end));
// last line does not have new line character so it needs to be manually added
if end == base.len() {
builder.replace(iv, Rope::from(&config.line_ending))
}
}
builder.build()
}
/// Used when the user presses the backspace key. If no delta is returned, then nothing changes.
pub fn delete_backward(base: &Rope, regions: &[SelRegion], config: &BufferItems) -> RopeDelta {
// TODO: this function is workable but probably overall code complexity
// could be improved by implementing a "backspace" movement instead.
let mut builder = DeltaBuilder::new(base.len());
for region in regions {
let start = offset_for_delete_backwards(region, base, config);
let iv = Interval::new(start, region.max());
if!iv.is_empty() {
builder.delete(iv);
}
}
builder.build()
}
/// Common logic for a number of delete methods. For each region in the
/// selection, if the selection is a caret, delete the region between
/// the caret and the movement applied to the caret, otherwise delete
/// the region.
///
/// If `save` is set, the tuple will contain a rope with the deleted text.
///
/// # Arguments
///
/// * `height` - viewport height
pub(crate) fn delete_by_movement(
base: &Rope,
regions: &[SelRegion],
lines: &Lines,
movement: Movement,
height: usize,
save: bool,
) -> (RopeDelta, Option<Rope>) {
// We compute deletions as a selection because the merge logic
// is convenient. Another possibility would be to make the delta
// builder able to handle overlapping deletions (with union semantics).
let mut deletions = Selection::new();
for &r in regions {
if r.is_caret() {
let new_region = region_movement(movement, r, lines, height, base, true);
deletions.add_region(new_region);
} else {
deletions.add_region(r);
}
}
let kill_ring = if save {
let saved = extract_sel_regions(base, &deletions).unwrap_or_default();
Some(Rope::from(saved))
} else {
None
};
(delete_sel_regions(base, &deletions), kill_ring)
}
/// Deletes the given regions.
pub(crate) fn delete_sel_regions(base: &Rope, sel_regions: &[SelRegion]) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
for region in sel_regions {
let iv = Interval::new(region.min(), region.max());
if!iv.is_empty() {
builder.delete(iv);
}
}
builder.build()
}
/// Extracts non-caret selection regions into a string,
/// joining multiple regions with newlines.
pub(crate) fn extract_sel_regions<'a>(
base: &'a Rope,
sel_regions: &[SelRegion],
) -> Option<Cow<'a, str>> {
let mut saved = None;
for region in sel_regions {
if!region.is_caret() {
let val = base.slice_to_cow(region);
match saved {
None => saved = Some(val),
Some(ref mut s) => {
s.to_mut().push('\n');
s.to_mut().push_str(&val);
}
}
}
}
saved
}
pub fn insert_newline(base: &Rope, regions: &[SelRegion], config: &BufferItems) -> RopeDelta {
insert(base, regions, &config.line_ending)
}
pub fn insert_tab(base: &Rope, regions: &[SelRegion], config: &BufferItems) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
let const_tab_text = get_tab_text(config, None);
for region in regions {
let line_range = LogicalLines.get_line_range(base, region);
if line_range.len() > 1 {
for line in line_range {
let offset = LogicalLines.line_col_to_offset(base, line, 0);
let iv = Interval::new(offset, offset);
builder.replace(iv, Rope::from(const_tab_text));
}
} else {
let (_, col) = LogicalLines.offset_to_line_col(base, region.start);
let mut tab_size = config.tab_size;
tab_size = tab_size - (col % tab_size);
let tab_text = get_tab_text(config, Some(tab_size));
let iv = Interval::new(region.min(), region.max());
builder.replace(iv, Rope::from(tab_text));
}
}
builder.build()
}
/// Indents or outdents lines based on selection and user's tab settings.
/// Uses a BTreeSet to holds the collection of lines to modify.
/// Preserves cursor position and current selection as much as possible.
/// Tries to have behavior consistent with other editors like Atom,
/// Sublime and VSCode, with non-caret selections not being modified.
pub fn modify_indent(
base: &Rope,
regions: &[SelRegion],
config: &BufferItems,
direction: IndentDirection,
) -> RopeDelta {
let mut lines = BTreeSet::new();
let tab_text = get_tab_text(config, None);
for region in regions {
let line_range = LogicalLines.get_line_range(base, region);
for line in line_range {
lines.insert(line);
}
}
match direction {
IndentDirection::In => indent(base, lines, tab_text),
IndentDirection::Out => outdent(base, lines, tab_text),
}
}
fn indent(base: &Rope, lines: BTreeSet<usize>, tab_text: &str) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
for line in lines {
let offset = LogicalLines.line_col_to_offset(base, line, 0);
let interval = Interval::new(offset, offset);
builder.replace(interval, Rope::from(tab_text));
}
builder.build()
} |
fn outdent(base: &Rope, lines: BTreeSet<usize>, tab_text: &str) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
for line in lines {
let offset = LogicalLines.line_col_to_offset(base, line, 0);
let tab_offset = LogicalLines.line_col_to_offset(base, line, tab_text.len());
let interval = Interval::new(offset, tab_offset);
let leading_slice = base.slice_to_cow(interval.start()..interval.end());
if leading_slice == tab_text {
builder.delete(interval);
} else if let Some(first_char_col) = leading_slice.find(|c: char|!c.is_whitespace()) {
let first_char_offset = LogicalLines.line_col_to_offset(base, line, first_char_col);
let interval = Interval::new(offset, first_char_offset);
builder.delete(interval);
}
}
builder.build()
}
pub fn transpose(base: &Rope, regions: &[SelRegion]) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
let mut last = 0;
let mut optional_previous_selection: Option<(Interval, Rope)> =
last_selection_region(regions).map(|®ion| sel_region_to_interval_and_rope(base, region));
for ®ion in regions {
if region.is_caret() {
let mut middle = region.end;
let mut start = base.prev_grapheme_offset(middle).unwrap_or(0);
let mut end = base.next_grapheme_offset(middle).unwrap_or(middle);
// Note: this matches Emac's behavior. It swaps last
// two characters of line if at end of line.
if start >= last {
let end_line_offset =
LogicalLines.offset_of_line(base, LogicalLines.line_of_offset(base, end));
// include end!= base.len() because if the editor is entirely empty, we dont' want to pull from empty space
if (end == middle || end == end_line_offset) && end!= base.len() {
middle = start;
start = base.prev_grapheme_offset(middle).unwrap_or(0);
end = middle.wrapping_add(1);
}
let interval = Interval::new(start, end);
let before = base.slice_to_cow(start..middle);
let after = base.slice_to_cow(middle..end);
let swapped: String = [after, before].concat();
builder.replace(interval, Rope::from(swapped));
last = end;
}
} else if let Some(previous_selection) = optional_previous_selection {
let current_interval = sel_region_to_interval_and_rope(base, region);
builder.replace(current_interval.0, previous_selection.1);
optional_previous_selection = Some(current_interval);
}
}
builder.build()
}
pub fn transform_text<F: Fn(&str) -> String>(
base: &Rope,
regions: &[SelRegion],
transform_function: F,
) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
for region in regions {
let selected_text = base.slice_to_cow(region);
let interval = Interval::new(region.min(), region.max());
builder.replace(interval, Rope::from(transform_function(&selected_text)));
}
builder.build()
}
/// Changes the number(s) under the cursor(s) with the `transform_function`.
/// If there is a number next to or on the beginning of the region, then
/// this number will be replaced with the result of `transform_function` and
/// the cursor will be placed at the end of the number.
/// Some Examples with a increment `transform_function`:
///
/// "|1234" -> "1235|"
/// "12|34" -> "1235|"
/// "-|12" -> "-11|"
/// "another number is 123|]" -> "another number is 124"
///
/// This function also works fine with multiple regions.
pub fn change_number<F: Fn(i128) -> Option<i128>>(
base: &Rope,
regions: &[SelRegion],
transform_function: F,
) -> RopeDelta {
let mut builder = DeltaBuilder::new(base.len());
for region in regions {
let mut cursor = WordCursor::new(base, region.end);
let (mut start, end) = cursor.select_word();
// if the word begins with '-', then it is a negative number
if start > 0 && base.byte_at(start - 1) == (b'-') {
start -= 1;
}
let word = base.slice_to_cow(start..end);
if let Some(number) = word.parse::<i128>().ok().and_then(&transform_function) {
let interval = Interval::new(start, end);
builder.replace(interval, Rope::from(number.to_string()));
}
}
builder.build()
}
// capitalization behaviour is similar to behaviour in XCode
pub fn capitalize_text(base: &Rope, regions: &[SelRegion]) -> (RopeDelta, Selection) {
let mut builder = DeltaBuilder::new(base.len());
let mut final_selection = Selection::new();
for ®ion in regions {
final_selection.add_region(SelRegion::new(region.max(), region.max()));
let mut word_cursor = WordCursor::new(base, region.min());
loop {
// capitalize each word in the current selection
let (start, end) = word_cursor.select_word();
if start < end {
let interval = Interval::new(start, end);
let word = base.slice_to_cow(start..end);
// first letter is uppercase, remaining letters are lowercase
let (first_char, rest) = word.split_at(1);
let capitalized_text = [first_char.to_uppercase(), rest.to_lowercase()].concat();
builder.replace(interval, Rope::from(capitalized_text));
}
if word_cursor.next_boundary().is_none() || end > region.max() {
break;
}
}
}
(builder.build(), final_selection)
}
fn sel_region_to_interval_and_rope(base: &Rope, region: SelRegion) -> (Interval, Rope) {
let as_interval = Interval::new(region.min(), region.max());
let interval_rope = base.subseq(as_interval);
(as_interval, interval_rope)
}
fn last_selection_region(regions: &[SelRegion]) -> Option<&SelRegion> {
for region in regions.iter().rev() {
if!region.is_caret() {
return Some(region);
}
}
None
}
fn get_tab_text(config: &BufferItems, tab_size: Option<usize>) -> &'static str {
let tab_size = tab_size.unwrap_or(config.tab_size);
let tab_text = if config.translate_tabs_to_spaces { n_spaces(tab_size) } else { "\t" };
tab_text
}
fn n_spaces(n: usize) -> &'static str {
let spaces = " ";
assert!(n <= spaces.len());
&spaces[..n]
} | random_line_split |
|
generate.rs | /*fn search(width: usize, height: usize, words: &[Word], tries: &[&Trie], acrosses: &mut Vec<Word>) {
if height == acrosses.len() {
let downs: Vec<Word> = (0..width).map(|x| (0..height).map(|y| acrosses[y][x]).collect()).collect();
if!iproduct!(acrosses.iter(), downs.iter()).any(|(a, b)| a == b) {
println!("{:?} {:?}", acrosses, downs);
}
return;
}
let mut tries2: Vec<&Trie> = tries.iter().cloned().collect();
for &word in words {
let mut min_size = usize::max_value();
for x in 0..width {
tries2[x] = tries[x].child(word[x]);
min_size = min_size.min(tries2[x].len());
}
if min_size > 0 {
acrosses.push(word);
search(width, height, words, &tries2, acrosses);
acrosses.pop();
}
}
}*/
/*fn main() {
let dictionary = ScoredWord::default().unwrap();
let width = 3;
let height = 9;
let mut acrosses: Vec<Word> =
dictionary.iter()
.filter_map(|w| if w.word.len() == width { Some(w.word) } else { None })
.collect();
acrosses.sort();
let downs: Trie =
dictionary.iter()
.filter_map(|w| if w.word.len() == height { Some(w.word) } else { None })
.collect();
search(width, height, &acrosses, &vec![&downs; width], &mut vec![]);
}*/
const START: &'static [u8] =
b",,,,,!,,,,,!,,,,,,
,,,,,!,,,,,!,,,,,,
H,O,O,D,W,I,N,K,E,D,!,,,,,,
,,,,,,!,!,!,,,,!,!,,,
!,!,!,,,,,!,,,,,,,!,!,!
,,,,!,S,H,O,R,T,C,H,A,N,G,E,D
,,,!,,,,,,!,!,!,,,,,
,,,!,,,,,!,,,,,!,,,
,,,,,!,!,!,,,,,,!,,,
H,O,R,N,S,W,O,G,G,L,E,D,!,,,,
!,!,!,,,,,,,!,,,,,!,!,!
,,,!,!,,,,!,!,!,,,,,,
,,,,,,!,B,A,M,B,O,O,Z,L,E,D
,,,,,,!,,,,,!,,,,,
,,,,,,!,,,,,!,,,,,";
const GRID: &'static str =
"ALLAH█NASA█BAMBOO
GEODE█ARAB█RIBALD
HOODWINKED█ELATED
ANTLER███USA██SOS
███EDIT█ICETEA███
PIED█SHORTCHANGED
ADD█LEAVE███RALLY
RAG█ASIA█PAIL█EEK
CHEAP███EARNS█ACE
HORNSWOGGLED█ANTS
███TEABAG█SIAM███
DOB██HIS███GRACED
OVERDO█BAMBOOZLED
LATINO█AGAR█MOOLA
LLAMAS█GAGA█ANTSY";
fn write_impl() {
let mut rows = vec![];
for line in GRID.split('\n') {
let mut row = vec![];
for c in line.chars() {
if c == '█' {
row.push(Cell::Black);
} else {
row.push(Cell::White(Letter::from_unicode(c)));
}
}
rows.push(row);
}
let grid = Grid::new((rows[0].len(), rows.len()), |x, y| rows[y][x]);
println!("{:?}", grid);
let windows = WindowMap::from_grid(&Grid::new(grid.size(), |x, y| grid[(x, y)]!= Cell::Black));
let mut clues = HashMap::<&str, &str>::new();
clues.insert("PIED", "Like the proverbial piper");
clues.insert("DNA", "Twisted pair?");
clues.insert("PAL", "Alternative to NTSC");
clues.insert("LATINO", "16.7% of the American population");
clues.insert("IDAHO", "The Gem State");
clues.insert("ARES", "Foe of Wonderwoman");
clues.insert("ELECT", "Opt (to)");
clues.insert("IRE", "Choler");
clues.insert("INDIGO", "Infraviolet?");
clues.insert("TEABAG", "Rude post-victory celebration");
clues.insert("MAG", "Toner color: abbr.");
clues.insert("OVERDO", "Cook for 20 minutes, as pasta");
clues.insert("ADD", "More: abbr.");
clues.insert("BETA", "Advice, in climbing jargon");
clues.insert("ARK", "Couple's cruise ship?");
clues.insert("AIL", "Bedevil");
clues.insert("EGG", "Urge (on)");
clues.insert("BREATH", "Form of investiture on Nalthis");
clues.insert("GRACED", "Adorned");
clues.insert("OLEO", "Hydrogenated food product");
clues.insert("ODDS", "What were the ____?");
clues.insert("GEODE", "Rock formation that starts as a gas bubble");
clues.insert("HIS", "Label on a towel");
clues.insert("LEON", "A large gato");
clues.insert("ADDLED", "Like a brain in love");
clues.insert("WAHOOS", "Exclamations of joy");
clues.insert("ARAB", "Desert steed");
clues.insert("ABDUCT", "Take, as by a UFO");
clues.insert("MBA", "Degree for CEOs"); | clues.insert("CHEAP", "Overpowered, in the 90's");
clues.insert("RAG", "with \"on\", tease");
clues.insert("OVA", "Largest human cells");
clues.insert("RALLY", "Make a comeback, as a military force");
clues.insert("ANTS", "Pants' contents?");
clues.insert("EDIT", "Amend");
clues.insert("AGAR", "Gelatin alternative");
clues.insert("ASIA", "Home of the Indian elephant");
clues.insert("AGA", "Ottoman honorific");
clues.insert("THAI", "Basil variety");
clues.insert("HORNSWOGGLED", "How you feel after solving this puzzle");
clues.insert("SOS", "[Help!]");
clues.insert("EDGER", "Lawnkeeping tool");
clues.insert("OBI", "Kimono part");
clues.insert("RIBALD", "Blue");
clues.insert("ANTLER", "Classic sexual dimorphism feature");
clues.insert("HOODWINKED", "How you feel after solving this puzzle");
clues.insert("ACE", "Skilled pilot");
clues.insert("NASA", "Apollo originator");
clues.insert("EELS", "Fish caught in pots");
clues.insert("NAN", "IEEE-754 reflexivity violator");
clues.insert("DDAY", "Action time");
clues.insert("SIAM", "Name on old Risk boards");
clues.insert("EARLS", "Superiors to viscounts");
clues.insert("USA", "Home of Athens, Berlin, Milan, Palermo, Tripoli, Versailles, and Vienna: abbr");
clues.insert("BAMBOO", "One of the fasting growing plants in the world");
clues.insert("ALLAH", "Being with 99 names");
clues.insert("PAIL", "Bucket");
clues.insert("PARCH", "Scorch");
clues.insert("HEWED", "Sawn");
clues.insert("IRISES", "Organic annuli");
clues.insert("BRA", "Supporter of women?");
clues.insert("AROMA", "Bakery attractant");
clues.insert("LAPSE", "Gap");
clues.insert("GASBAG", "Yapper");
clues.insert("ANA", "Serbian tennis player Ivanovic");
clues.insert("ELATED", "On cloud nine");
clues.insert("AGHA", "Ottoman honorific");
clues.insert("BATS", "Spreaders of White-Nose syndrome");
clues.insert("OVAL", "Egg-like");
clues.insert("SEC", "Short time, for short");
clues.insert("MOOLA", "\"Cheddar\"");
clues.insert("DOLL", "\"It's an action figure, not a ____\"");
clues.insert("GLEAN", "Reap");
clues.insert("EARNS", "Reaps");
clues.insert("ANTSY", "On edge");
clues.insert("ANT", "Inspiration for a size-warping Marvel hero");
clues.insert("RIM", "Lid connector");
clues.insert("BAMBOOZLED", "How you feel after solving this puzzle");
clues.insert("LOOT", "Reward for killing things, in video games");
clues.insert("SHORTCHANGED", "How you feel after solving this puzzle");
clues.insert("EEK", "[A mouse!]");
clues.insert("GAGA", "Player of the Hotel owner in \"American Horror Story: Hotel\"");
clues.insert("LLAMAS", "Halfway between a tibetan priest and major fire?");
clues.insert("DYKES", "Common earthworks");
clues.insert("SAE", "Standards organization for cars");
clues.insert("CLOT", "Response to injury, or cause of illness");
clues.insert("AMAZON", "Origin of Wonderwoman");
clues.insert("LEAVE", "\"Make like a tree and _____\"");
let mut clue_map = HashMap::new();
for (word, clue) in clues {
clue_map.insert(Word::from_str(word).unwrap(), clue);
}
let mut clue_list = vec![];
for window in windows.windows() {
let word: Word = window.positions().map(|(x, y)| match grid[(x, y)] {
Cell::White(Some(x)) => x,
_ => unreachable!(),
}).collect();
clue_list.push((window, clue_map[&word].clone()));
}
clue_list.sort_by_key(|(window, clue)| (window.position().0, window.position().1, window.direction()));
let puzzle = Puzzle {
preamble: vec![],
version: *b"1.4\0",
title: "The First Crossword".to_string(),
author: "Nathan Dobson".to_string(),
copyright: "".to_string(),
grid: Grid::new(grid.size(), |x, y| {
match grid[(x, y)] {
Cell::Black => None,
Cell::White(Some(x)) => Some(PuzzleCell {
solution: [x.to_unicode()].iter().cloned().collect(),
..Default::default()
}),
_ => panic!(),
}
}),
clues: WindowMap::new(clue_list.into_iter().map(|(window, clue)| (window, clue.to_string())), grid.size()),
note: "".to_string(),
};
let mut new_data: Vec<u8> = vec![];
puzzle.write_to(&mut new_data).unwrap();
fs::write("output.puz", &new_data).unwrap();
}
/*fn make_choices(dictionary: &[ScoredWord], grid: &Grid<Cell>, windows: &[Window]) {
let (window, options) = match windows.iter().enumerate().filter(|(index, window)| {
for position in window.positions() {
if grid[position] == Cell::White(None) {
return true;
}
}
false
}).map(|(index, window)| {
let words: Vec<ScoredWord> = dictionary.iter().filter(|word| {
if word.word.len()!= window.length {
return false;
}
for (position, &letter) in window.positions().zip(word.word.iter()) {
match grid[position] {
Cell::White(Some(needed)) => if needed!= letter {
return false;
}
_ => {}
}
}
true
}).cloned().collect();
(index, words)
}).min_by_key(|(index, words)| words.len()) {
None => {
for y in 0..grid.size().1 {
for x in 0..grid.size().0 {
print!("{}", grid[(x, y)]);
}
println!();
}
println!();
return;
}
Some(x) => x,
};
for y in 0..grid.size().1 {
for x in 0..grid.size().0 {
print!("{}", grid[(x, y)]);
}
println!();
}
println!();
for option in options {
let mut grid2 = grid.clone();
for (position, value) in windows[window].positions().zip(option.word.iter()) {
grid2[position] = Cell::White(Some(*value));
}
make_choices(dictionary, &grid2, windows);
}
}*/
fn search_impl() -> io::Result<()> {
let mut reader = ReaderBuilder::new()
.has_headers(false)
.from_reader(START);
let mut rows = vec![];
for line in reader.records() {
let mut row = vec![];
for cell in line?.iter() {
row.push(match cell {
"!" => Cell::Black,
"" => Cell::White(None),
letter => Cell::White(Some(Letter::from_unicode(letter.chars().next().unwrap()).unwrap())),
});
}
rows.push(row);
}
let grid = Grid::new((rows[0].len(), rows.len()), |x, y| {
rows[y][x]
});
println!("{}", AsciiGrid(&grid));
let scored_words = ScoredWord::default().unwrap();
let mut dictionary = scored_words.iter().map(|scored_word| scored_word.word).collect::<Vec<Word>>();
//dictionary=dictionary[dictionary.]
//dictionary.push(Word::from_str("bamboozled").unwrap());
//dictionary.push(Word::from_str("shortchanged").unwrap());
/* let windows = windows(&grid, |cell| {
match cell {
Cell::Black => true,
_ => false,
}
});
make_choices(&dictionary, &grid, &windows);*/
let banned: HashSet<Word> = ["apsis",
"asdic", "jcloth", "galah", "algin",
"jinni", "slaty", "jingo", "saiga", "scuta",
"echt", "concha", "duma", "obeche", "teazel",
"toerag", "yalta", "howdah", "purdah", "agin", "teehee", "faery", "aubade", "nyala",
"taenia", "auden", "diable", "craped", "oscan", "halvah",
"reeve", "dhole", "oca", "balzac", "wahine", "kaons", "medico", "stelae", "asci",
"anorak", "madedo", "aurous", "dhoti", "GAUR", "AUTEUR", "piaf", "BANZAI", "WABASH", "ERUCT",
"THRO", "LINAGE", "LABAN", "CHID", "ADDY", "ANOA", "EDO", "BUHL", "BASTS", "EDDO", "IRISED",
"RAFFIA", "SHARI", "OTIC", "ALTHO", "EFFING", "BROOKE", "BOLL", "BSE", "PEDALO",
"ELUL", "LARCH", "BORZOI", "DAGO", "LAREDO", "GAOLS", "GIGUE", "TSURIS", "DYADS",
"STALAG", "SERINE", "BRANDT", "efface", "GAELS", "CRU", "HONIED", "PARSI", "BENZOL", "AACHEN", "DEEDY",
"CHOREA", "AERY", "CURIO", "RAMEAU", "EFFETE", "EFFUSE", "RIALTO", "ballup", "HAAR",
"ABATOR", "BAOBAB", "SHILOH", "ATTAR", "ETUI", "REEFY", "RATTAN", "AARHUS", "TENUTO", "NOLL",
"TWEE", "FOOZLE", "GIBBER", "OUSE", "agm", "gam", "alb", "APPALS",
"GLOGG", "NEEP", "RIPELY", "ARIL", "ELIJAH", "ADAR", "ALINE", "LIENAL", "EPIZOA", "OGEE"
].iter().map(|str| Word::from_str(*str).unwrap()).collect();
// for &word in banned.iter() {
// println!("{:?}", word);
// /println!("{:?} {:?}", word, scored_words.iter().find(|sw| sw.word == word).unwrap().score);
// }
// for word in scored_words {
// if word.score > 30 {
// println!("{:?}", word.word);
// }
// }
//dictionary.resize((dictionary.len() as f32 * 0.4) as usize, Word::new());
dictionary.push(Word::from_str("HOODWINKED").unwrap());
dictionary.push(Word::from_str("SHORTCHANGED").unwrap());
dictionary.push(Word::from_str("HORNSWOGGLED").unwrap());
dictionary.push(Word::from_str("BAMBOOZLED").unwrap());
dictionary.retain(|word|!banned.contains(word));
{
let mut search = Search::new(WindowMap::from_grid(&Grid::new(grid.size(), |x, y| grid[(x, y)]!= Cell::Black)), &dictionary);
search.retain(&grid);
search.refine_all();
let mut result = None;
let _ = search.solve(&mut take_one_result(&mut result));
println!("{:?}", result);
}
/*for i in 0..window_set.len() {
let search = Search {
windows: window_set.retain(&mut |&window| window!= window_set.windows()[i])
};
let mut partial = search.start(&dictionary);
search.retain(&grid, &mut partial);
search.refine_all(&mut partial);
if window_set.windows()[i].length < 5 {
println!("{:?} {:?}", i, window_set.windows()[i]);
println!("{:?} {:?}", i, search.search_words_by_count(&partial));
let mut count = 1;
for p in partial.iter() {
count *= p.size();
}
}
}*/
Ok(())
} | clues.insert("ICETEA", "???");
clues.insert("DOB", "Important date: abbr"); | random_line_split |
generate.rs | /*fn search(width: usize, height: usize, words: &[Word], tries: &[&Trie], acrosses: &mut Vec<Word>) {
if height == acrosses.len() {
let downs: Vec<Word> = (0..width).map(|x| (0..height).map(|y| acrosses[y][x]).collect()).collect();
if!iproduct!(acrosses.iter(), downs.iter()).any(|(a, b)| a == b) {
println!("{:?} {:?}", acrosses, downs);
}
return;
}
let mut tries2: Vec<&Trie> = tries.iter().cloned().collect();
for &word in words {
let mut min_size = usize::max_value();
for x in 0..width {
tries2[x] = tries[x].child(word[x]);
min_size = min_size.min(tries2[x].len());
}
if min_size > 0 {
acrosses.push(word);
search(width, height, words, &tries2, acrosses);
acrosses.pop();
}
}
}*/
/*fn main() {
let dictionary = ScoredWord::default().unwrap();
let width = 3;
let height = 9;
let mut acrosses: Vec<Word> =
dictionary.iter()
.filter_map(|w| if w.word.len() == width { Some(w.word) } else { None })
.collect();
acrosses.sort();
let downs: Trie =
dictionary.iter()
.filter_map(|w| if w.word.len() == height { Some(w.word) } else { None })
.collect();
search(width, height, &acrosses, &vec![&downs; width], &mut vec![]);
}*/
const START: &'static [u8] =
b",,,,,!,,,,,!,,,,,,
,,,,,!,,,,,!,,,,,,
H,O,O,D,W,I,N,K,E,D,!,,,,,,
,,,,,,!,!,!,,,,!,!,,,
!,!,!,,,,,!,,,,,,,!,!,!
,,,,!,S,H,O,R,T,C,H,A,N,G,E,D
,,,!,,,,,,!,!,!,,,,,
,,,!,,,,,!,,,,,!,,,
,,,,,!,!,!,,,,,,!,,,
H,O,R,N,S,W,O,G,G,L,E,D,!,,,,
!,!,!,,,,,,,!,,,,,!,!,!
,,,!,!,,,,!,!,!,,,,,,
,,,,,,!,B,A,M,B,O,O,Z,L,E,D
,,,,,,!,,,,,!,,,,,
,,,,,,!,,,,,!,,,,,";
const GRID: &'static str =
"ALLAH█NASA█BAMBOO
GEODE█ARAB█RIBALD
HOODWINKED█ELATED
ANTLER███USA██SOS
███EDIT█ICETEA███
PIED█SHORTCHANGED
ADD█LEAVE███RALLY
RAG█ASIA█PAIL█EEK
CHEAP███EARNS█ACE
HORNSWOGGLED█ANTS
███TEABAG█SIAM███
DOB██HIS███GRACED
OVERDO█BAMBOOZLED
LATINO█AGAR█MOOLA
LLAMAS█GAGA█ANTSY";
fn write_impl() {
let mut rows = vec![];
for line in GRID.split('\n') {
let mut r | ];
for c in line.chars() {
if c == '█' {
row.push(Cell::Black);
} else {
row.push(Cell::White(Letter::from_unicode(c)));
}
}
rows.push(row);
}
let grid = Grid::new((rows[0].len(), rows.len()), |x, y| rows[y][x]);
println!("{:?}", grid);
let windows = WindowMap::from_grid(&Grid::new(grid.size(), |x, y| grid[(x, y)]!= Cell::Black));
let mut clues = HashMap::<&str, &str>::new();
clues.insert("PIED", "Like the proverbial piper");
clues.insert("DNA", "Twisted pair?");
clues.insert("PAL", "Alternative to NTSC");
clues.insert("LATINO", "16.7% of the American population");
clues.insert("IDAHO", "The Gem State");
clues.insert("ARES", "Foe of Wonderwoman");
clues.insert("ELECT", "Opt (to)");
clues.insert("IRE", "Choler");
clues.insert("INDIGO", "Infraviolet?");
clues.insert("TEABAG", "Rude post-victory celebration");
clues.insert("MAG", "Toner color: abbr.");
clues.insert("OVERDO", "Cook for 20 minutes, as pasta");
clues.insert("ADD", "More: abbr.");
clues.insert("BETA", "Advice, in climbing jargon");
clues.insert("ARK", "Couple's cruise ship?");
clues.insert("AIL", "Bedevil");
clues.insert("EGG", "Urge (on)");
clues.insert("BREATH", "Form of investiture on Nalthis");
clues.insert("GRACED", "Adorned");
clues.insert("OLEO", "Hydrogenated food product");
clues.insert("ODDS", "What were the ____?");
clues.insert("GEODE", "Rock formation that starts as a gas bubble");
clues.insert("HIS", "Label on a towel");
clues.insert("LEON", "A large gato");
clues.insert("ADDLED", "Like a brain in love");
clues.insert("WAHOOS", "Exclamations of joy");
clues.insert("ARAB", "Desert steed");
clues.insert("ABDUCT", "Take, as by a UFO");
clues.insert("MBA", "Degree for CEOs");
clues.insert("ICETEA", "???");
clues.insert("DOB", "Important date: abbr");
clues.insert("CHEAP", "Overpowered, in the 90's");
clues.insert("RAG", "with \"on\", tease");
clues.insert("OVA", "Largest human cells");
clues.insert("RALLY", "Make a comeback, as a military force");
clues.insert("ANTS", "Pants' contents?");
clues.insert("EDIT", "Amend");
clues.insert("AGAR", "Gelatin alternative");
clues.insert("ASIA", "Home of the Indian elephant");
clues.insert("AGA", "Ottoman honorific");
clues.insert("THAI", "Basil variety");
clues.insert("HORNSWOGGLED", "How you feel after solving this puzzle");
clues.insert("SOS", "[Help!]");
clues.insert("EDGER", "Lawnkeeping tool");
clues.insert("OBI", "Kimono part");
clues.insert("RIBALD", "Blue");
clues.insert("ANTLER", "Classic sexual dimorphism feature");
clues.insert("HOODWINKED", "How you feel after solving this puzzle");
clues.insert("ACE", "Skilled pilot");
clues.insert("NASA", "Apollo originator");
clues.insert("EELS", "Fish caught in pots");
clues.insert("NAN", "IEEE-754 reflexivity violator");
clues.insert("DDAY", "Action time");
clues.insert("SIAM", "Name on old Risk boards");
clues.insert("EARLS", "Superiors to viscounts");
clues.insert("USA", "Home of Athens, Berlin, Milan, Palermo, Tripoli, Versailles, and Vienna: abbr");
clues.insert("BAMBOO", "One of the fasting growing plants in the world");
clues.insert("ALLAH", "Being with 99 names");
clues.insert("PAIL", "Bucket");
clues.insert("PARCH", "Scorch");
clues.insert("HEWED", "Sawn");
clues.insert("IRISES", "Organic annuli");
clues.insert("BRA", "Supporter of women?");
clues.insert("AROMA", "Bakery attractant");
clues.insert("LAPSE", "Gap");
clues.insert("GASBAG", "Yapper");
clues.insert("ANA", "Serbian tennis player Ivanovic");
clues.insert("ELATED", "On cloud nine");
clues.insert("AGHA", "Ottoman honorific");
clues.insert("BATS", "Spreaders of White-Nose syndrome");
clues.insert("OVAL", "Egg-like");
clues.insert("SEC", "Short time, for short");
clues.insert("MOOLA", "\"Cheddar\"");
clues.insert("DOLL", "\"It's an action figure, not a ____\"");
clues.insert("GLEAN", "Reap");
clues.insert("EARNS", "Reaps");
clues.insert("ANTSY", "On edge");
clues.insert("ANT", "Inspiration for a size-warping Marvel hero");
clues.insert("RIM", "Lid connector");
clues.insert("BAMBOOZLED", "How you feel after solving this puzzle");
clues.insert("LOOT", "Reward for killing things, in video games");
clues.insert("SHORTCHANGED", "How you feel after solving this puzzle");
clues.insert("EEK", "[A mouse!]");
clues.insert("GAGA", "Player of the Hotel owner in \"American Horror Story: Hotel\"");
clues.insert("LLAMAS", "Halfway between a tibetan priest and major fire?");
clues.insert("DYKES", "Common earthworks");
clues.insert("SAE", "Standards organization for cars");
clues.insert("CLOT", "Response to injury, or cause of illness");
clues.insert("AMAZON", "Origin of Wonderwoman");
clues.insert("LEAVE", "\"Make like a tree and _____\"");
let mut clue_map = HashMap::new();
for (word, clue) in clues {
clue_map.insert(Word::from_str(word).unwrap(), clue);
}
let mut clue_list = vec![];
for window in windows.windows() {
let word: Word = window.positions().map(|(x, y)| match grid[(x, y)] {
Cell::White(Some(x)) => x,
_ => unreachable!(),
}).collect();
clue_list.push((window, clue_map[&word].clone()));
}
clue_list.sort_by_key(|(window, clue)| (window.position().0, window.position().1, window.direction()));
let puzzle = Puzzle {
preamble: vec![],
version: *b"1.4\0",
title: "The First Crossword".to_string(),
author: "Nathan Dobson".to_string(),
copyright: "".to_string(),
grid: Grid::new(grid.size(), |x, y| {
match grid[(x, y)] {
Cell::Black => None,
Cell::White(Some(x)) => Some(PuzzleCell {
solution: [x.to_unicode()].iter().cloned().collect(),
..Default::default()
}),
_ => panic!(),
}
}),
clues: WindowMap::new(clue_list.into_iter().map(|(window, clue)| (window, clue.to_string())), grid.size()),
note: "".to_string(),
};
let mut new_data: Vec<u8> = vec![];
puzzle.write_to(&mut new_data).unwrap();
fs::write("output.puz", &new_data).unwrap();
}
/*fn make_choices(dictionary: &[ScoredWord], grid: &Grid<Cell>, windows: &[Window]) {
let (window, options) = match windows.iter().enumerate().filter(|(index, window)| {
for position in window.positions() {
if grid[position] == Cell::White(None) {
return true;
}
}
false
}).map(|(index, window)| {
let words: Vec<ScoredWord> = dictionary.iter().filter(|word| {
if word.word.len()!= window.length {
return false;
}
for (position, &letter) in window.positions().zip(word.word.iter()) {
match grid[position] {
Cell::White(Some(needed)) => if needed!= letter {
return false;
}
_ => {}
}
}
true
}).cloned().collect();
(index, words)
}).min_by_key(|(index, words)| words.len()) {
None => {
for y in 0..grid.size().1 {
for x in 0..grid.size().0 {
print!("{}", grid[(x, y)]);
}
println!();
}
println!();
return;
}
Some(x) => x,
};
for y in 0..grid.size().1 {
for x in 0..grid.size().0 {
print!("{}", grid[(x, y)]);
}
println!();
}
println!();
for option in options {
let mut grid2 = grid.clone();
for (position, value) in windows[window].positions().zip(option.word.iter()) {
grid2[position] = Cell::White(Some(*value));
}
make_choices(dictionary, &grid2, windows);
}
}*/
fn search_impl() -> io::Result<()> {
let mut reader = ReaderBuilder::new()
.has_headers(false)
.from_reader(START);
let mut rows = vec![];
for line in reader.records() {
let mut row = vec![];
for cell in line?.iter() {
row.push(match cell {
"!" => Cell::Black,
"" => Cell::White(None),
letter => Cell::White(Some(Letter::from_unicode(letter.chars().next().unwrap()).unwrap())),
});
}
rows.push(row);
}
let grid = Grid::new((rows[0].len(), rows.len()), |x, y| {
rows[y][x]
});
println!("{}", AsciiGrid(&grid));
let scored_words = ScoredWord::default().unwrap();
let mut dictionary = scored_words.iter().map(|scored_word| scored_word.word).collect::<Vec<Word>>();
//dictionary=dictionary[dictionary.]
//dictionary.push(Word::from_str("bamboozled").unwrap());
//dictionary.push(Word::from_str("shortchanged").unwrap());
/* let windows = windows(&grid, |cell| {
match cell {
Cell::Black => true,
_ => false,
}
});
make_choices(&dictionary, &grid, &windows);*/
let banned: HashSet<Word> = ["apsis",
"asdic", "jcloth", "galah", "algin",
"jinni", "slaty", "jingo", "saiga", "scuta",
"echt", "concha", "duma", "obeche", "teazel",
"toerag", "yalta", "howdah", "purdah", "agin", "teehee", "faery", "aubade", "nyala",
"taenia", "auden", "diable", "craped", "oscan", "halvah",
"reeve", "dhole", "oca", "balzac", "wahine", "kaons", "medico", "stelae", "asci",
"anorak", "madedo", "aurous", "dhoti", "GAUR", "AUTEUR", "piaf", "BANZAI", "WABASH", "ERUCT",
"THRO", "LINAGE", "LABAN", "CHID", "ADDY", "ANOA", "EDO", "BUHL", "BASTS", "EDDO", "IRISED",
"RAFFIA", "SHARI", "OTIC", "ALTHO", "EFFING", "BROOKE", "BOLL", "BSE", "PEDALO",
"ELUL", "LARCH", "BORZOI", "DAGO", "LAREDO", "GAOLS", "GIGUE", "TSURIS", "DYADS",
"STALAG", "SERINE", "BRANDT", "efface", "GAELS", "CRU", "HONIED", "PARSI", "BENZOL", "AACHEN", "DEEDY",
"CHOREA", "AERY", "CURIO", "RAMEAU", "EFFETE", "EFFUSE", "RIALTO", "ballup", "HAAR",
"ABATOR", "BAOBAB", "SHILOH", "ATTAR", "ETUI", "REEFY", "RATTAN", "AARHUS", "TENUTO", "NOLL",
"TWEE", "FOOZLE", "GIBBER", "OUSE", "agm", "gam", "alb", "APPALS",
"GLOGG", "NEEP", "RIPELY", "ARIL", "ELIJAH", "ADAR", "ALINE", "LIENAL", "EPIZOA", "OGEE"
].iter().map(|str| Word::from_str(*str).unwrap()).collect();
// for &word in banned.iter() {
// println!("{:?}", word);
// /println!("{:?} {:?}", word, scored_words.iter().find(|sw| sw.word == word).unwrap().score);
// }
// for word in scored_words {
// if word.score > 30 {
// println!("{:?}", word.word);
// }
// }
//dictionary.resize((dictionary.len() as f32 * 0.4) as usize, Word::new());
dictionary.push(Word::from_str("HOODWINKED").unwrap());
dictionary.push(Word::from_str("SHORTCHANGED").unwrap());
dictionary.push(Word::from_str("HORNSWOGGLED").unwrap());
dictionary.push(Word::from_str("BAMBOOZLED").unwrap());
dictionary.retain(|word|!banned.contains(word));
{
let mut search = Search::new(WindowMap::from_grid(&Grid::new(grid.size(), |x, y| grid[(x, y)]!= Cell::Black)), &dictionary);
search.retain(&grid);
search.refine_all();
let mut result = None;
let _ = search.solve(&mut take_one_result(&mut result));
println!("{:?}", result);
}
/*for i in 0..window_set.len() {
let search = Search {
windows: window_set.retain(&mut |&window| window!= window_set.windows()[i])
};
let mut partial = search.start(&dictionary);
search.retain(&grid, &mut partial);
search.refine_all(&mut partial);
if window_set.windows()[i].length < 5 {
println!("{:?} {:?}", i, window_set.windows()[i]);
println!("{:?} {:?}", i, search.search_words_by_count(&partial));
let mut count = 1;
for p in partial.iter() {
count *= p.size();
}
}
}*/
Ok(())
} | ow = vec![ | identifier_name |
generate.rs | /*fn search(width: usize, height: usize, words: &[Word], tries: &[&Trie], acrosses: &mut Vec<Word>) {
if height == acrosses.len() {
let downs: Vec<Word> = (0..width).map(|x| (0..height).map(|y| acrosses[y][x]).collect()).collect();
if!iproduct!(acrosses.iter(), downs.iter()).any(|(a, b)| a == b) {
println!("{:?} {:?}", acrosses, downs);
}
return;
}
let mut tries2: Vec<&Trie> = tries.iter().cloned().collect();
for &word in words {
let mut min_size = usize::max_value();
for x in 0..width {
tries2[x] = tries[x].child(word[x]);
min_size = min_size.min(tries2[x].len());
}
if min_size > 0 {
acrosses.push(word);
search(width, height, words, &tries2, acrosses);
acrosses.pop();
}
}
}*/
/*fn main() {
let dictionary = ScoredWord::default().unwrap();
let width = 3;
let height = 9;
let mut acrosses: Vec<Word> =
dictionary.iter()
.filter_map(|w| if w.word.len() == width { Some(w.word) } else { None })
.collect();
acrosses.sort();
let downs: Trie =
dictionary.iter()
.filter_map(|w| if w.word.len() == height { Some(w.word) } else { None })
.collect();
search(width, height, &acrosses, &vec![&downs; width], &mut vec![]);
}*/
const START: &'static [u8] =
b",,,,,!,,,,,!,,,,,,
,,,,,!,,,,,!,,,,,,
H,O,O,D,W,I,N,K,E,D,!,,,,,,
,,,,,,!,!,!,,,,!,!,,,
!,!,!,,,,,!,,,,,,,!,!,!
,,,,!,S,H,O,R,T,C,H,A,N,G,E,D
,,,!,,,,,,!,!,!,,,,,
,,,!,,,,,!,,,,,!,,,
,,,,,!,!,!,,,,,,!,,,
H,O,R,N,S,W,O,G,G,L,E,D,!,,,,
!,!,!,,,,,,,!,,,,,!,!,!
,,,!,!,,,,!,!,!,,,,,,
,,,,,,!,B,A,M,B,O,O,Z,L,E,D
,,,,,,!,,,,,!,,,,,
,,,,,,!,,,,,!,,,,,";
const GRID: &'static str =
"ALLAH█NASA█BAMBOO
GEODE█ARAB█RIBALD
HOODWINKED█ELATED
ANTLER███USA██SOS
███EDIT█ICETEA███
PIED█SHORTCHANGED
ADD█LEAVE███RALLY
RAG█ASIA█PAIL█EEK
CHEAP███EARNS█ACE
HORNSWOGGLED█ANTS
███TEABAG█SIAM███
DOB██HIS███GRACED
OVERDO█BAMBOOZLED
LATINO█AGAR█MOOLA
LLAMAS█GAGA█ANTSY";
fn write_impl() {
let mut rows = vec![];
for line in GRID.split('\n') {
let mut row = vec![];
for c in line.chars() {
if c == '█' {
row.push(Cell::Black);
} else {
row.push(Cell::White(Letter::from_unicode(c)));
}
}
rows.push(row);
}
let grid = Grid::new((rows[0].len(), rows.len()), |x, y| rows[y][x]);
println!("{:?}", grid);
let windows = WindowMap::from_grid(&Grid::new(grid.size(), |x, y| grid[(x, y)]!= Cell::Black));
let mut clues = HashMap::<&str, &str>::new();
clues.insert("PIED", "Like the proverbial piper");
clues.insert("DNA", "Twisted pair?");
clues.insert("PAL", "Alternative to NTSC");
clues.insert("LATINO", "16.7% of the American population");
clues.insert("IDAHO", "The Gem State");
clues.insert("ARES", "Foe of Wonderwoman");
clues.insert("ELECT", "Opt (to)");
clues.insert("IRE", "Choler");
clues.insert("INDIGO", "Infraviolet?");
clues.insert("TEABAG", "Rude post-victory celebration");
clues.insert("MAG", "Toner color: abbr.");
clues.insert("OVERDO", "Cook for 20 minutes, as pasta");
clues.insert("ADD", "More: abbr.");
clues.insert("BETA", "Advice, in climbing jargon");
clues.insert("ARK", "Couple's cruise ship?");
clues.insert("AIL", "Bedevil");
clues.insert("EGG", "Urge (on)");
clues.insert("BREATH", "Form of investiture on Nalthis");
clues.insert("GRACED", "Adorned");
clues.insert("OLEO", "Hydrogenated food product");
clues.insert("ODDS", "What were the ____?");
clues.insert("GEODE", "Rock formation that starts as a gas bubble");
clues.insert("HIS", "Label on a towel");
clues.insert("LEON", "A large gato");
clues.insert("ADDLED", "Like a brain in love");
clues.insert("WAHOOS", "Exclamations of joy");
clues.insert("ARAB", "Desert steed");
clues.insert("ABDUCT", "Take, as by a UFO");
clues.insert("MBA", "Degree for CEOs");
clues.insert("ICETEA", "???");
clues.insert("DOB", "Important date: abbr");
clues.insert("CHEAP", "Overpowered, in the 90's");
clues.insert("RAG", "with \"on\", tease");
clues.insert("OVA", "Largest human cells");
clues.insert("RALLY", "Make a comeback, as a military force");
clues.insert("ANTS", "Pants' contents?");
clues.insert("EDIT", "Amend");
clues.insert("AGAR", "Gelatin alternative");
clues.insert("ASIA", "Home of the Indian elephant");
clues.insert("AGA", "Ottoman honorific");
clues.insert("THAI", "Basil variety");
clues.insert("HORNSWOGGLED", "How you feel after solving this puzzle");
clues.insert("SOS", "[Help!]");
clues.insert("EDGER", "Lawnkeeping tool");
clues.insert("OBI", "Kimono part");
clues.insert("RIBALD", "Blue");
clues.insert("ANTLER", "Classic sexual dimorphism feature");
clues.insert("HOODWINKED", "How you feel after solving this puzzle");
clues.insert("ACE", "Skilled pilot");
clues.insert("NASA", "Apollo originator");
clues.insert("EELS", "Fish caught in pots");
clues.insert("NAN", "IEEE-754 reflexivity violator");
clues.insert("DDAY", "Action time");
clues.insert("SIAM", "Name on old Risk boards");
clues.insert("EARLS", "Superiors to viscounts");
clues.insert("USA", "Home of Athens, Berlin, Milan, Palermo, Tripoli, Versailles, and Vienna: abbr");
clues.insert("BAMBOO", "One of the fasting growing plants in the world");
clues.insert("ALLAH", "Being with 99 names");
clues.insert("PAIL", "Bucket");
clues.insert("PARCH", "Scorch");
clues.insert("HEWED", "Sawn");
clues.insert("IRISES", "Organic annuli");
clues.insert("BRA", "Supporter of women?");
clues.insert("AROMA", "Bakery attractant");
clues.insert("LAPSE", "Gap");
clues.insert("GASBAG", "Yapper");
clues.insert("ANA", "Serbian tennis player Ivanovic");
clues.insert("ELATED", "On cloud nine");
clues.insert("AGHA", "Ottoman honorific");
clues.insert("BATS", "Spreaders of White-Nose syndrome");
clues.insert("OVAL", "Egg-like");
clues.insert("SEC", "Short time, for short");
clues.insert("MOOLA", "\"Cheddar\"");
clues.insert("DOLL", "\"It's an action figure, not a ____\"");
clues.insert("GLEAN", "Reap");
clues.insert("EARNS", "Reaps");
clues.insert("ANTSY", "On edge");
clues.insert("ANT", "Inspiration for a size-warping Marvel hero");
clues.insert("RIM", "Lid connector");
clues.insert("BAMBOOZLED", "How you feel after solving this puzzle");
clues.insert("LOOT", "Reward for killing things, in video games");
clues.insert("SHORTCHANGED", "How you feel after solving this puzzle");
clues.insert("EEK", "[A mouse!]");
clues.insert("GAGA", "Player of the Hotel owner in \"American Horror Story: Hotel\"");
clues.insert("LLAMAS", "Halfway between a tibetan priest and major fire?");
clues.insert("DYKES", "Common earthworks");
clues.insert("SAE", "Standards organization for cars");
clues.insert("CLOT", "Response to injury, or cause of illness");
clues.insert("AMAZON", "Origin of Wonderwoman");
clues.insert("LEAVE", "\"Make like a tree and _____\"");
let mut clue_map = HashMap::new();
for (word, clue) in clues {
clue_map.insert(Word::from_str(word).unwrap(), clue);
}
let mut clue_list = vec![];
for window in windows.windows() {
let word: Word = window.positions().map(|(x, y)| match grid[(x, y)] {
Cell::White(Some(x)) => x,
_ => unreachable!(),
}).collect();
clue_list.push((window, clue_map[&word].clone()));
}
clue_list.sort_by_key(|(window, clue)| (window.position().0, window.position().1, window.direction()));
let puzzle = Puzzle {
preamble: vec![],
version: *b"1.4\0",
title: "The First Crossword".to_string(),
author: "Nathan Dobson".to_string(),
copyright: "".to_string(),
grid: Grid::new(grid.size(), |x, y| {
match grid[(x, y)] {
Cell::Black => None,
Cell::White(Some(x)) => Some(PuzzleCell {
solution: [x.to_unicode()].iter().cloned().collect(),
..Default::default()
}),
_ => panic!(),
}
}),
clues: WindowMap::new(clue_list.into_iter().map(|(window, clue)| (window, clue.to_string())), grid.size()),
note: "".to_string(),
};
let mut new_data: Vec<u8> = vec![];
puzzle.write_to(&mut new_data).unwrap();
fs::write("output.puz", &new_data).unwrap();
}
/*fn make_choices(dictionary: &[ScoredWord], grid: &Grid<Cell>, windows: &[Window]) {
let (window, options) = match windows.iter().enumerate().filter(|(index, window)| {
for position in window.positions() {
if grid[position] == Cell::White(None) {
return true;
}
}
false
}).map(|(index, window)| {
let words: Vec<ScoredWord> = dictionary.iter().filter(|word| {
if word.word.len()!= window.length {
return false;
}
for (position, &letter) in window.positions().zip(word.word.iter()) {
match grid[position] {
Cell::White(Some(needed)) => if needed!= letter {
return false;
}
_ => {}
}
}
true
}).cloned().collect();
(index, words)
}).min_by_key(|(index, words)| words.len()) {
None => {
for y in 0..grid.size().1 {
for x in 0..grid.size().0 {
print!("{}", grid[(x, y)]);
}
println!();
}
println!();
return;
}
Some(x) => x,
};
for y in 0..grid.size().1 {
for x in 0..grid.size().0 {
print!("{}", grid[(x, y)]);
}
println!();
}
println!();
for option in options {
let mut grid2 = grid.clone();
for (position, value) in windows[window].positions().zip(option.word.iter()) {
grid2[position] = Cell::White(Some(*value));
}
make_choices(dictionary, &grid2, windows);
}
}*/
fn search_impl() -> io::Result<()> {
let mut reader = ReaderBuilder::new()
.has_headers(false)
.from_reader(STA | //dictionary.push(Word::from_str("bamboozled").unwrap());
//dictionary.push(Word::from_str("shortchanged").unwrap());
/* let windows = windows(&grid, |cell| {
match cell {
Cell::Black => true,
_ => false,
}
});
make_choices(&dictionary, &grid, &windows);*/
let banned: HashSet<Word> = ["apsis",
"asdic", "jcloth", "galah", "algin",
"jinni", "slaty", "jingo", "saiga", "scuta",
"echt", "concha", "duma", "obeche", "teazel",
"toerag", "yalta", "howdah", "purdah", "agin", "teehee", "faery", "aubade", "nyala",
"taenia", "auden", "diable", "craped", "oscan", "halvah",
"reeve", "dhole", "oca", "balzac", "wahine", "kaons", "medico", "stelae", "asci",
"anorak", "madedo", "aurous", "dhoti", "GAUR", "AUTEUR", "piaf", "BANZAI", "WABASH", "ERUCT",
"THRO", "LINAGE", "LABAN", "CHID", "ADDY", "ANOA", "EDO", "BUHL", "BASTS", "EDDO", "IRISED",
"RAFFIA", "SHARI", "OTIC", "ALTHO", "EFFING", "BROOKE", "BOLL", "BSE", "PEDALO",
"ELUL", "LARCH", "BORZOI", "DAGO", "LAREDO", "GAOLS", "GIGUE", "TSURIS", "DYADS",
"STALAG", "SERINE", "BRANDT", "efface", "GAELS", "CRU", "HONIED", "PARSI", "BENZOL", "AACHEN", "DEEDY",
"CHOREA", "AERY", "CURIO", "RAMEAU", "EFFETE", "EFFUSE", "RIALTO", "ballup", "HAAR",
"ABATOR", "BAOBAB", "SHILOH", "ATTAR", "ETUI", "REEFY", "RATTAN", "AARHUS", "TENUTO", "NOLL",
"TWEE", "FOOZLE", "GIBBER", "OUSE", "agm", "gam", "alb", "APPALS",
"GLOGG", "NEEP", "RIPELY", "ARIL", "ELIJAH", "ADAR", "ALINE", "LIENAL", "EPIZOA", "OGEE"
].iter().map(|str| Word::from_str(*str).unwrap()).collect();
// for &word in banned.iter() {
// println!("{:?}", word);
// /println!("{:?} {:?}", word, scored_words.iter().find(|sw| sw.word == word).unwrap().score);
// }
// for word in scored_words {
// if word.score > 30 {
// println!("{:?}", word.word);
// }
// }
//dictionary.resize((dictionary.len() as f32 * 0.4) as usize, Word::new());
dictionary.push(Word::from_str("HOODWINKED").unwrap());
dictionary.push(Word::from_str("SHORTCHANGED").unwrap());
dictionary.push(Word::from_str("HORNSWOGGLED").unwrap());
dictionary.push(Word::from_str("BAMBOOZLED").unwrap());
dictionary.retain(|word|!banned.contains(word));
{
let mut search = Search::new(WindowMap::from_grid(&Grid::new(grid.size(), |x, y| grid[(x, y)]!= Cell::Black)), &dictionary);
search.retain(&grid);
search.refine_all();
let mut result = None;
let _ = search.solve(&mut take_one_result(&mut result));
println!("{:?}", result);
}
/*for i in 0..window_set.len() {
let search = Search {
windows: window_set.retain(&mut |&window| window!= window_set.windows()[i])
};
let mut partial = search.start(&dictionary);
search.retain(&grid, &mut partial);
search.refine_all(&mut partial);
if window_set.windows()[i].length < 5 {
println!("{:?} {:?}", i, window_set.windows()[i]);
println!("{:?} {:?}", i, search.search_words_by_count(&partial));
let mut count = 1;
for p in partial.iter() {
count *= p.size();
}
}
}*/
Ok(())
} | RT);
let mut rows = vec![];
for line in reader.records() {
let mut row = vec![];
for cell in line?.iter() {
row.push(match cell {
"!" => Cell::Black,
"" => Cell::White(None),
letter => Cell::White(Some(Letter::from_unicode(letter.chars().next().unwrap()).unwrap())),
});
}
rows.push(row);
}
let grid = Grid::new((rows[0].len(), rows.len()), |x, y| {
rows[y][x]
});
println!("{}", AsciiGrid(&grid));
let scored_words = ScoredWord::default().unwrap();
let mut dictionary = scored_words.iter().map(|scored_word| scored_word.word).collect::<Vec<Word>>();
//dictionary=dictionary[dictionary.] | identifier_body |
ctl.rs | use std::{path::PathBuf, process::ExitCode};
use crate::daemon::{config::CliArg, tracing::LogLevel, Config, ObservableState};
use tracing_subscriber::util::SubscriberInitExt;
const USAGE_MSG: &str = "\
usage: ntp-ctl validate [-c PATH]
ntp-ctl status [-f FORMAT] [-c PATH] [-o PATH]
ntp-ctl -h | ntp-ctl -v";
const DESCRIPTOR: &str = "ntp-ctl - ntp-daemon monitoring";
const HELP_MSG: &str = "Options:
-f, --format=FORMAT which format to use for printing statistics [plain, prometheus]
-c, --config=CONFIG which configuration file to read the socket paths from";
pub fn long_help_message() -> String {
format!("{DESCRIPTOR}\n\n{USAGE_MSG}\n\n{HELP_MSG}")
}
#[derive(Debug, Default, PartialEq, Eq)]
enum Format {
#[default]
Plain,
Prometheus,
}
#[derive(Debug, Default, PartialEq, Eq)]
pub enum NtpCtlAction {
#[default]
Help,
Version,
Validate,
Status,
}
#[derive(Debug, Default)]
pub(crate) struct NtpDaemonOptions {
config: Option<PathBuf>,
format: Format,
help: bool,
version: bool,
validate: bool,
status: bool,
action: NtpCtlAction,
}
impl NtpDaemonOptions {
const TAKES_ARGUMENT: &[&'static str] = &["--config", "--format"];
const TAKES_ARGUMENT_SHORT: &[char] = &['c', 'f'];
/// parse an iterator over command line arguments
pub fn try_parse_from<I, T>(iter: I) -> Result<Self, String>
where
I: IntoIterator<Item = T>,
T: AsRef<str> + Clone,
{
let mut options = NtpDaemonOptions::default();
let mut it = iter.into_iter().map(|x| x.as_ref().to_string()).peekable();
match it.peek().map(|x| x.as_str()) {
Some("validate") => {
let _ = it.next();
options.validate = true;
}
Some("status") => {
let _ = it.next();
options.status = true;
}
_ => { /* do nothing */ }
};
let arg_iter =
CliArg::normalize_arguments(Self::TAKES_ARGUMENT, Self::TAKES_ARGUMENT_SHORT, it)?
.into_iter()
.peekable();
for arg in arg_iter {
match arg {
CliArg::Flag(flag) => match flag.as_str() {
"-h" | "--help" => {
options.help = true;
}
"-v" | "--version" => {
options.version = true;
}
option => {
Err(format!("invalid option provided: {option}"))?;
}
},
CliArg::Argument(option, value) => match option.as_str() {
"-c" | "--config" => {
options.config = Some(PathBuf::from(value));
}
"-f" | "--format" => match value.as_str() {
"plain" => options.format = Format::Plain,
"prometheus" => options.format = Format::Prometheus,
_ => Err(format!("invalid format option provided: {value}"))?,
},
option => {
Err(format!("invalid option provided: {option}"))?;
}
},
CliArg::Rest(_rest) => { /* do nothing, drop remaining arguments */ }
}
}
options.resolve_action();
// nothing to validate at the moment
Ok(options)
}
/// from the arguments resolve which action should be performed
fn resolve_action(&mut self) {
if self.help {
self.action = NtpCtlAction::Help;
} else if self.version {
self.action = NtpCtlAction::Version;
} else if self.validate {
self.action = NtpCtlAction::Validate;
} else if self.status {
self.action = NtpCtlAction::Status;
} else {
self.action = NtpCtlAction::Help;
}
}
}
async fn validate(config: Option<PathBuf>) -> std::io::Result<ExitCode> {
// Late completion not needed, so ignore result.
crate::daemon::tracing::tracing_init(LogLevel::Info).init();
match Config::from_args(config, vec![], vec![]).await {
Ok(config) => {
if config.check() {
eprintln!("Config looks good");
Ok(ExitCode::SUCCESS)
} else {
Ok(ExitCode::FAILURE)
}
}
Err(e) => {
eprintln!("Error: Could not load configuration: {e}");
Ok(ExitCode::FAILURE)
}
}
}
const VERSION: &str = env!("CARGO_PKG_VERSION");
pub async fn | () -> std::io::Result<ExitCode> {
let options = match NtpDaemonOptions::try_parse_from(std::env::args()) {
Ok(options) => options,
Err(msg) => return Err(std::io::Error::new(std::io::ErrorKind::InvalidInput, msg)),
};
match options.action {
NtpCtlAction::Help => {
println!("{}", long_help_message());
Ok(ExitCode::SUCCESS)
}
NtpCtlAction::Version => {
eprintln!("ntp-ctl {VERSION}");
Ok(ExitCode::SUCCESS)
}
NtpCtlAction::Validate => validate(options.config).await,
NtpCtlAction::Status => {
let config = Config::from_args(options.config, vec![], vec![]).await;
if let Err(ref e) = config {
println!("Warning: Unable to load configuration file: {e}");
}
let config = config.unwrap_or_default();
let observation = config
.observability
.observe
.observation_path
.unwrap_or_else(|| PathBuf::from("/run/ntpd-rs/observe"));
match options.format {
Format::Plain => print_state(Format::Plain, observation).await,
Format::Prometheus => print_state(Format::Prometheus, observation).await,
}
}
}
}
async fn print_state(print: Format, observe_socket: PathBuf) -> Result<ExitCode, std::io::Error> {
let mut stream = match tokio::net::UnixStream::connect(&observe_socket).await {
Ok(stream) => stream,
Err(e) => {
eprintln!("Could not open socket at {}: {e}", observe_socket.display(),);
return Ok(ExitCode::FAILURE);
}
};
let mut msg = Vec::with_capacity(16 * 1024);
let output =
match crate::daemon::sockets::read_json::<ObservableState>(&mut stream, &mut msg).await {
Ok(output) => output,
Err(e) => {
eprintln!("Failed to read state from observation socket: {e}");
return Ok(ExitCode::FAILURE);
}
};
match print {
Format::Plain => {
println!("Synchronization status:");
println!(
"Dispersion: {}s, Delay: {}s",
output.system.time_snapshot.root_dispersion.to_seconds(),
output.system.time_snapshot.root_delay.to_seconds()
);
println!(
"Desired poll interval: {}s",
output
.system
.time_snapshot
.poll_interval
.as_duration()
.to_seconds()
);
println!("Stratum: {}", output.system.stratum);
println!();
println!("Peers:");
for peer in &output.peers {
match peer {
crate::daemon::ObservablePeerState::Nothing => {}
crate::daemon::ObservablePeerState::Observable(
crate::daemon::ObservedPeerState {
timedata,
unanswered_polls,
poll_interval,
address,
id,
},
) => {
println!(
"{} ({}): {}±{}(±{})s\n pollinterval: {}s, missing polls: {}",
address,
id,
timedata.offset.to_seconds(),
timedata.uncertainty.to_seconds(),
timedata.delay.to_seconds(),
poll_interval.as_duration().to_seconds(),
unanswered_polls
);
}
}
}
let in_startup = output
.peers
.iter()
.filter(|peer| matches!(peer, crate::daemon::ObservablePeerState::Nothing))
.count();
match in_startup {
0 => {} // no peers in startup, so no line for that
1 => println!("1 peer still in startup"),
_ => println!("{} peers still in startup", in_startup),
}
println!();
println!("Servers:");
for server in &output.servers {
println!(
"{}: received {}, accepted {}, errors {}",
server.address,
server.stats.received_packets.get(),
server.stats.accepted_packets.get(),
server.stats.response_send_errors.get()
);
println!(
" denied {}, rate limited {}, ignored {}",
server.stats.denied_packets.get(),
server.stats.rate_limited_packets.get(),
server.stats.ignored_packets.get()
);
}
}
Format::Prometheus => {
let mut buf = String::new();
if let Err(e) = crate::metrics::format_state(&mut buf, &output) {
eprintln!("Failed to encode prometheus data: {e}");
return Ok(ExitCode::FAILURE);
}
println!("{buf}");
}
}
Ok(ExitCode::SUCCESS)
}
#[cfg(test)]
mod tests {
use std::os::unix::prelude::PermissionsExt;
use std::path::Path;
use crate::daemon::{
config::ObserveConfig,
sockets::{create_unix_socket, write_json},
};
use super::*;
async fn write_socket_helper(
command: Format,
socket_name: &str,
) -> std::io::Result<Result<ExitCode, std::io::Error>> {
let config: ObserveConfig = Default::default();
// be careful with copying: tests run concurrently and should use a unique socket name!
let path = std::env::temp_dir().join(socket_name);
if path.exists() {
std::fs::remove_file(&path).unwrap();
}
let peers_listener = create_unix_socket(&path)?;
let permissions: std::fs::Permissions =
PermissionsExt::from_mode(config.observation_permissions);
std::fs::set_permissions(&path, permissions)?;
let fut = super::print_state(command, path);
let handle = tokio::spawn(fut);
let value = ObservableState {
system: Default::default(),
peers: vec![],
servers: vec![],
};
let (mut stream, _addr) = peers_listener.accept().await?;
write_json(&mut stream, &value).await?;
let result = handle.await.unwrap();
Ok(result)
}
#[tokio::test]
async fn test_control_socket_peer() -> std::io::Result<()> {
// be careful with copying: tests run concurrently and should use a unique socket name!
let result = write_socket_helper(Format::Plain, "ntp-test-stream-6").await?;
assert_eq!(
format!("{:?}", result.unwrap()),
format!("{:?}", ExitCode::SUCCESS)
);
Ok(())
}
#[tokio::test]
async fn test_control_socket_prometheus() -> std::io::Result<()> {
// be careful with copying: tests run concurrently and should use a unique socket name!
let result = write_socket_helper(Format::Prometheus, "ntp-test-stream-8").await?;
assert_eq!(
format!("{:?}", result.unwrap()),
format!("{:?}", ExitCode::SUCCESS)
);
Ok(())
}
#[tokio::test]
async fn test_control_socket_peer_invalid_input() -> std::io::Result<()> {
let config: ObserveConfig = Default::default();
// be careful with copying: tests run concurrently and should use a unique socket name!
let path = std::env::temp_dir().join("ntp-test-stream-10");
if path.exists() {
std::fs::remove_file(&path).unwrap();
}
let peers_listener = create_unix_socket(&path)?;
let permissions: std::fs::Permissions =
PermissionsExt::from_mode(config.observation_permissions);
std::fs::set_permissions(&path, permissions)?;
let fut = super::print_state(Format::Plain, path);
let handle = tokio::spawn(fut);
let value = 42u32;
let (mut stream, _addr) = peers_listener.accept().await?;
write_json(&mut stream, &value).await?;
let result = handle.await.unwrap();
assert_eq!(
format!("{:?}", result.unwrap()),
format!("{:?}", ExitCode::FAILURE)
);
Ok(())
}
const BINARY: &str = "/usr/bin/ntp-ctl";
#[test]
fn cli_config() {
let config_str = "/foo/bar/ntp.toml";
let config = Path::new(config_str);
let arguments = &[BINARY, "-c", config_str];
let options = NtpDaemonOptions::try_parse_from(arguments).unwrap();
assert_eq!(options.config.unwrap().as_path(), config);
}
#[test]
fn cli_format() {
let arguments = &[BINARY, "-f", "plain"];
let options = NtpDaemonOptions::try_parse_from(arguments).unwrap();
assert_eq!(options.format, Format::Plain);
let arguments = &[BINARY, "-f", "prometheus"];
let options = NtpDaemonOptions::try_parse_from(arguments).unwrap();
assert_eq!(options.format, Format::Prometheus);
let arguments = &[BINARY, "-f", "yaml"];
let err = NtpDaemonOptions::try_parse_from(arguments).unwrap_err();
assert_eq!(err, "invalid format option provided: yaml");
}
}
| main | identifier_name |
ctl.rs | use std::{path::PathBuf, process::ExitCode};
use crate::daemon::{config::CliArg, tracing::LogLevel, Config, ObservableState};
use tracing_subscriber::util::SubscriberInitExt;
const USAGE_MSG: &str = "\
usage: ntp-ctl validate [-c PATH]
ntp-ctl status [-f FORMAT] [-c PATH] [-o PATH]
ntp-ctl -h | ntp-ctl -v";
const DESCRIPTOR: &str = "ntp-ctl - ntp-daemon monitoring";
const HELP_MSG: &str = "Options:
-f, --format=FORMAT which format to use for printing statistics [plain, prometheus]
-c, --config=CONFIG which configuration file to read the socket paths from";
pub fn long_help_message() -> String {
format!("{DESCRIPTOR}\n\n{USAGE_MSG}\n\n{HELP_MSG}")
}
#[derive(Debug, Default, PartialEq, Eq)]
enum Format {
#[default]
Plain,
Prometheus,
}
#[derive(Debug, Default, PartialEq, Eq)]
pub enum NtpCtlAction {
#[default]
Help,
Version,
Validate,
Status,
}
#[derive(Debug, Default)]
pub(crate) struct NtpDaemonOptions {
config: Option<PathBuf>,
format: Format,
help: bool,
version: bool,
validate: bool,
status: bool,
action: NtpCtlAction,
}
impl NtpDaemonOptions {
const TAKES_ARGUMENT: &[&'static str] = &["--config", "--format"];
const TAKES_ARGUMENT_SHORT: &[char] = &['c', 'f'];
/// parse an iterator over command line arguments
pub fn try_parse_from<I, T>(iter: I) -> Result<Self, String>
where
I: IntoIterator<Item = T>,
T: AsRef<str> + Clone,
{
let mut options = NtpDaemonOptions::default();
let mut it = iter.into_iter().map(|x| x.as_ref().to_string()).peekable();
match it.peek().map(|x| x.as_str()) {
Some("validate") => {
let _ = it.next();
options.validate = true;
}
Some("status") => {
let _ = it.next();
options.status = true;
}
_ => { /* do nothing */ }
};
let arg_iter =
CliArg::normalize_arguments(Self::TAKES_ARGUMENT, Self::TAKES_ARGUMENT_SHORT, it)?
.into_iter()
.peekable();
for arg in arg_iter {
match arg {
CliArg::Flag(flag) => match flag.as_str() {
"-h" | "--help" => {
options.help = true;
}
"-v" | "--version" => {
options.version = true;
}
option => {
Err(format!("invalid option provided: {option}"))?;
}
},
CliArg::Argument(option, value) => match option.as_str() {
"-c" | "--config" => {
options.config = Some(PathBuf::from(value));
}
"-f" | "--format" => match value.as_str() {
"plain" => options.format = Format::Plain,
"prometheus" => options.format = Format::Prometheus,
_ => Err(format!("invalid format option provided: {value}"))?,
},
option => {
Err(format!("invalid option provided: {option}"))?;
}
},
CliArg::Rest(_rest) => { /* do nothing, drop remaining arguments */ }
}
}
options.resolve_action();
// nothing to validate at the moment
Ok(options)
}
/// from the arguments resolve which action should be performed
fn resolve_action(&mut self) {
if self.help {
self.action = NtpCtlAction::Help;
} else if self.version {
self.action = NtpCtlAction::Version;
} else if self.validate {
self.action = NtpCtlAction::Validate;
} else if self.status {
self.action = NtpCtlAction::Status;
} else {
self.action = NtpCtlAction::Help;
}
}
}
async fn validate(config: Option<PathBuf>) -> std::io::Result<ExitCode> {
// Late completion not needed, so ignore result.
crate::daemon::tracing::tracing_init(LogLevel::Info).init();
match Config::from_args(config, vec![], vec![]).await {
Ok(config) => {
if config.check() {
eprintln!("Config looks good");
Ok(ExitCode::SUCCESS)
} else {
Ok(ExitCode::FAILURE)
}
}
Err(e) => {
eprintln!("Error: Could not load configuration: {e}");
Ok(ExitCode::FAILURE)
}
}
}
const VERSION: &str = env!("CARGO_PKG_VERSION");
pub async fn main() -> std::io::Result<ExitCode> {
let options = match NtpDaemonOptions::try_parse_from(std::env::args()) {
Ok(options) => options,
Err(msg) => return Err(std::io::Error::new(std::io::ErrorKind::InvalidInput, msg)),
};
match options.action {
NtpCtlAction::Help => {
println!("{}", long_help_message());
Ok(ExitCode::SUCCESS)
}
NtpCtlAction::Version => {
eprintln!("ntp-ctl {VERSION}");
Ok(ExitCode::SUCCESS)
}
NtpCtlAction::Validate => validate(options.config).await,
NtpCtlAction::Status => {
let config = Config::from_args(options.config, vec![], vec![]).await;
if let Err(ref e) = config {
println!("Warning: Unable to load configuration file: {e}");
}
| let observation = config
.observability
.observe
.observation_path
.unwrap_or_else(|| PathBuf::from("/run/ntpd-rs/observe"));
match options.format {
Format::Plain => print_state(Format::Plain, observation).await,
Format::Prometheus => print_state(Format::Prometheus, observation).await,
}
}
}
}
async fn print_state(print: Format, observe_socket: PathBuf) -> Result<ExitCode, std::io::Error> {
let mut stream = match tokio::net::UnixStream::connect(&observe_socket).await {
Ok(stream) => stream,
Err(e) => {
eprintln!("Could not open socket at {}: {e}", observe_socket.display(),);
return Ok(ExitCode::FAILURE);
}
};
let mut msg = Vec::with_capacity(16 * 1024);
let output =
match crate::daemon::sockets::read_json::<ObservableState>(&mut stream, &mut msg).await {
Ok(output) => output,
Err(e) => {
eprintln!("Failed to read state from observation socket: {e}");
return Ok(ExitCode::FAILURE);
}
};
match print {
Format::Plain => {
println!("Synchronization status:");
println!(
"Dispersion: {}s, Delay: {}s",
output.system.time_snapshot.root_dispersion.to_seconds(),
output.system.time_snapshot.root_delay.to_seconds()
);
println!(
"Desired poll interval: {}s",
output
.system
.time_snapshot
.poll_interval
.as_duration()
.to_seconds()
);
println!("Stratum: {}", output.system.stratum);
println!();
println!("Peers:");
for peer in &output.peers {
match peer {
crate::daemon::ObservablePeerState::Nothing => {}
crate::daemon::ObservablePeerState::Observable(
crate::daemon::ObservedPeerState {
timedata,
unanswered_polls,
poll_interval,
address,
id,
},
) => {
println!(
"{} ({}): {}±{}(±{})s\n pollinterval: {}s, missing polls: {}",
address,
id,
timedata.offset.to_seconds(),
timedata.uncertainty.to_seconds(),
timedata.delay.to_seconds(),
poll_interval.as_duration().to_seconds(),
unanswered_polls
);
}
}
}
let in_startup = output
.peers
.iter()
.filter(|peer| matches!(peer, crate::daemon::ObservablePeerState::Nothing))
.count();
match in_startup {
0 => {} // no peers in startup, so no line for that
1 => println!("1 peer still in startup"),
_ => println!("{} peers still in startup", in_startup),
}
println!();
println!("Servers:");
for server in &output.servers {
println!(
"{}: received {}, accepted {}, errors {}",
server.address,
server.stats.received_packets.get(),
server.stats.accepted_packets.get(),
server.stats.response_send_errors.get()
);
println!(
" denied {}, rate limited {}, ignored {}",
server.stats.denied_packets.get(),
server.stats.rate_limited_packets.get(),
server.stats.ignored_packets.get()
);
}
}
Format::Prometheus => {
let mut buf = String::new();
if let Err(e) = crate::metrics::format_state(&mut buf, &output) {
eprintln!("Failed to encode prometheus data: {e}");
return Ok(ExitCode::FAILURE);
}
println!("{buf}");
}
}
Ok(ExitCode::SUCCESS)
}
#[cfg(test)]
mod tests {
use std::os::unix::prelude::PermissionsExt;
use std::path::Path;
use crate::daemon::{
config::ObserveConfig,
sockets::{create_unix_socket, write_json},
};
use super::*;
async fn write_socket_helper(
command: Format,
socket_name: &str,
) -> std::io::Result<Result<ExitCode, std::io::Error>> {
let config: ObserveConfig = Default::default();
// be careful with copying: tests run concurrently and should use a unique socket name!
let path = std::env::temp_dir().join(socket_name);
if path.exists() {
std::fs::remove_file(&path).unwrap();
}
let peers_listener = create_unix_socket(&path)?;
let permissions: std::fs::Permissions =
PermissionsExt::from_mode(config.observation_permissions);
std::fs::set_permissions(&path, permissions)?;
let fut = super::print_state(command, path);
let handle = tokio::spawn(fut);
let value = ObservableState {
system: Default::default(),
peers: vec![],
servers: vec![],
};
let (mut stream, _addr) = peers_listener.accept().await?;
write_json(&mut stream, &value).await?;
let result = handle.await.unwrap();
Ok(result)
}
#[tokio::test]
async fn test_control_socket_peer() -> std::io::Result<()> {
// be careful with copying: tests run concurrently and should use a unique socket name!
let result = write_socket_helper(Format::Plain, "ntp-test-stream-6").await?;
assert_eq!(
format!("{:?}", result.unwrap()),
format!("{:?}", ExitCode::SUCCESS)
);
Ok(())
}
#[tokio::test]
async fn test_control_socket_prometheus() -> std::io::Result<()> {
// be careful with copying: tests run concurrently and should use a unique socket name!
let result = write_socket_helper(Format::Prometheus, "ntp-test-stream-8").await?;
assert_eq!(
format!("{:?}", result.unwrap()),
format!("{:?}", ExitCode::SUCCESS)
);
Ok(())
}
#[tokio::test]
async fn test_control_socket_peer_invalid_input() -> std::io::Result<()> {
let config: ObserveConfig = Default::default();
// be careful with copying: tests run concurrently and should use a unique socket name!
let path = std::env::temp_dir().join("ntp-test-stream-10");
if path.exists() {
std::fs::remove_file(&path).unwrap();
}
let peers_listener = create_unix_socket(&path)?;
let permissions: std::fs::Permissions =
PermissionsExt::from_mode(config.observation_permissions);
std::fs::set_permissions(&path, permissions)?;
let fut = super::print_state(Format::Plain, path);
let handle = tokio::spawn(fut);
let value = 42u32;
let (mut stream, _addr) = peers_listener.accept().await?;
write_json(&mut stream, &value).await?;
let result = handle.await.unwrap();
assert_eq!(
format!("{:?}", result.unwrap()),
format!("{:?}", ExitCode::FAILURE)
);
Ok(())
}
const BINARY: &str = "/usr/bin/ntp-ctl";
#[test]
fn cli_config() {
let config_str = "/foo/bar/ntp.toml";
let config = Path::new(config_str);
let arguments = &[BINARY, "-c", config_str];
let options = NtpDaemonOptions::try_parse_from(arguments).unwrap();
assert_eq!(options.config.unwrap().as_path(), config);
}
#[test]
fn cli_format() {
let arguments = &[BINARY, "-f", "plain"];
let options = NtpDaemonOptions::try_parse_from(arguments).unwrap();
assert_eq!(options.format, Format::Plain);
let arguments = &[BINARY, "-f", "prometheus"];
let options = NtpDaemonOptions::try_parse_from(arguments).unwrap();
assert_eq!(options.format, Format::Prometheus);
let arguments = &[BINARY, "-f", "yaml"];
let err = NtpDaemonOptions::try_parse_from(arguments).unwrap_err();
assert_eq!(err, "invalid format option provided: yaml");
}
} | let config = config.unwrap_or_default();
| random_line_split |
ctl.rs | use std::{path::PathBuf, process::ExitCode};
use crate::daemon::{config::CliArg, tracing::LogLevel, Config, ObservableState};
use tracing_subscriber::util::SubscriberInitExt;
const USAGE_MSG: &str = "\
usage: ntp-ctl validate [-c PATH]
ntp-ctl status [-f FORMAT] [-c PATH] [-o PATH]
ntp-ctl -h | ntp-ctl -v";
const DESCRIPTOR: &str = "ntp-ctl - ntp-daemon monitoring";
const HELP_MSG: &str = "Options:
-f, --format=FORMAT which format to use for printing statistics [plain, prometheus]
-c, --config=CONFIG which configuration file to read the socket paths from";
pub fn long_help_message() -> String {
format!("{DESCRIPTOR}\n\n{USAGE_MSG}\n\n{HELP_MSG}")
}
#[derive(Debug, Default, PartialEq, Eq)]
enum Format {
#[default]
Plain,
Prometheus,
}
#[derive(Debug, Default, PartialEq, Eq)]
pub enum NtpCtlAction {
#[default]
Help,
Version,
Validate,
Status,
}
#[derive(Debug, Default)]
pub(crate) struct NtpDaemonOptions {
config: Option<PathBuf>,
format: Format,
help: bool,
version: bool,
validate: bool,
status: bool,
action: NtpCtlAction,
}
impl NtpDaemonOptions {
const TAKES_ARGUMENT: &[&'static str] = &["--config", "--format"];
const TAKES_ARGUMENT_SHORT: &[char] = &['c', 'f'];
/// parse an iterator over command line arguments
pub fn try_parse_from<I, T>(iter: I) -> Result<Self, String>
where
I: IntoIterator<Item = T>,
T: AsRef<str> + Clone,
{
let mut options = NtpDaemonOptions::default();
let mut it = iter.into_iter().map(|x| x.as_ref().to_string()).peekable();
match it.peek().map(|x| x.as_str()) {
Some("validate") => {
let _ = it.next();
options.validate = true;
}
Some("status") => {
let _ = it.next();
options.status = true;
}
_ => { /* do nothing */ }
};
let arg_iter =
CliArg::normalize_arguments(Self::TAKES_ARGUMENT, Self::TAKES_ARGUMENT_SHORT, it)?
.into_iter()
.peekable();
for arg in arg_iter {
match arg {
CliArg::Flag(flag) => match flag.as_str() {
"-h" | "--help" => {
options.help = true;
}
"-v" | "--version" => {
options.version = true;
}
option => |
},
CliArg::Argument(option, value) => match option.as_str() {
"-c" | "--config" => {
options.config = Some(PathBuf::from(value));
}
"-f" | "--format" => match value.as_str() {
"plain" => options.format = Format::Plain,
"prometheus" => options.format = Format::Prometheus,
_ => Err(format!("invalid format option provided: {value}"))?,
},
option => {
Err(format!("invalid option provided: {option}"))?;
}
},
CliArg::Rest(_rest) => { /* do nothing, drop remaining arguments */ }
}
}
options.resolve_action();
// nothing to validate at the moment
Ok(options)
}
/// from the arguments resolve which action should be performed
fn resolve_action(&mut self) {
if self.help {
self.action = NtpCtlAction::Help;
} else if self.version {
self.action = NtpCtlAction::Version;
} else if self.validate {
self.action = NtpCtlAction::Validate;
} else if self.status {
self.action = NtpCtlAction::Status;
} else {
self.action = NtpCtlAction::Help;
}
}
}
async fn validate(config: Option<PathBuf>) -> std::io::Result<ExitCode> {
// Late completion not needed, so ignore result.
crate::daemon::tracing::tracing_init(LogLevel::Info).init();
match Config::from_args(config, vec![], vec![]).await {
Ok(config) => {
if config.check() {
eprintln!("Config looks good");
Ok(ExitCode::SUCCESS)
} else {
Ok(ExitCode::FAILURE)
}
}
Err(e) => {
eprintln!("Error: Could not load configuration: {e}");
Ok(ExitCode::FAILURE)
}
}
}
const VERSION: &str = env!("CARGO_PKG_VERSION");
pub async fn main() -> std::io::Result<ExitCode> {
let options = match NtpDaemonOptions::try_parse_from(std::env::args()) {
Ok(options) => options,
Err(msg) => return Err(std::io::Error::new(std::io::ErrorKind::InvalidInput, msg)),
};
match options.action {
NtpCtlAction::Help => {
println!("{}", long_help_message());
Ok(ExitCode::SUCCESS)
}
NtpCtlAction::Version => {
eprintln!("ntp-ctl {VERSION}");
Ok(ExitCode::SUCCESS)
}
NtpCtlAction::Validate => validate(options.config).await,
NtpCtlAction::Status => {
let config = Config::from_args(options.config, vec![], vec![]).await;
if let Err(ref e) = config {
println!("Warning: Unable to load configuration file: {e}");
}
let config = config.unwrap_or_default();
let observation = config
.observability
.observe
.observation_path
.unwrap_or_else(|| PathBuf::from("/run/ntpd-rs/observe"));
match options.format {
Format::Plain => print_state(Format::Plain, observation).await,
Format::Prometheus => print_state(Format::Prometheus, observation).await,
}
}
}
}
async fn print_state(print: Format, observe_socket: PathBuf) -> Result<ExitCode, std::io::Error> {
let mut stream = match tokio::net::UnixStream::connect(&observe_socket).await {
Ok(stream) => stream,
Err(e) => {
eprintln!("Could not open socket at {}: {e}", observe_socket.display(),);
return Ok(ExitCode::FAILURE);
}
};
let mut msg = Vec::with_capacity(16 * 1024);
let output =
match crate::daemon::sockets::read_json::<ObservableState>(&mut stream, &mut msg).await {
Ok(output) => output,
Err(e) => {
eprintln!("Failed to read state from observation socket: {e}");
return Ok(ExitCode::FAILURE);
}
};
match print {
Format::Plain => {
println!("Synchronization status:");
println!(
"Dispersion: {}s, Delay: {}s",
output.system.time_snapshot.root_dispersion.to_seconds(),
output.system.time_snapshot.root_delay.to_seconds()
);
println!(
"Desired poll interval: {}s",
output
.system
.time_snapshot
.poll_interval
.as_duration()
.to_seconds()
);
println!("Stratum: {}", output.system.stratum);
println!();
println!("Peers:");
for peer in &output.peers {
match peer {
crate::daemon::ObservablePeerState::Nothing => {}
crate::daemon::ObservablePeerState::Observable(
crate::daemon::ObservedPeerState {
timedata,
unanswered_polls,
poll_interval,
address,
id,
},
) => {
println!(
"{} ({}): {}±{}(±{})s\n pollinterval: {}s, missing polls: {}",
address,
id,
timedata.offset.to_seconds(),
timedata.uncertainty.to_seconds(),
timedata.delay.to_seconds(),
poll_interval.as_duration().to_seconds(),
unanswered_polls
);
}
}
}
let in_startup = output
.peers
.iter()
.filter(|peer| matches!(peer, crate::daemon::ObservablePeerState::Nothing))
.count();
match in_startup {
0 => {} // no peers in startup, so no line for that
1 => println!("1 peer still in startup"),
_ => println!("{} peers still in startup", in_startup),
}
println!();
println!("Servers:");
for server in &output.servers {
println!(
"{}: received {}, accepted {}, errors {}",
server.address,
server.stats.received_packets.get(),
server.stats.accepted_packets.get(),
server.stats.response_send_errors.get()
);
println!(
" denied {}, rate limited {}, ignored {}",
server.stats.denied_packets.get(),
server.stats.rate_limited_packets.get(),
server.stats.ignored_packets.get()
);
}
}
Format::Prometheus => {
let mut buf = String::new();
if let Err(e) = crate::metrics::format_state(&mut buf, &output) {
eprintln!("Failed to encode prometheus data: {e}");
return Ok(ExitCode::FAILURE);
}
println!("{buf}");
}
}
Ok(ExitCode::SUCCESS)
}
#[cfg(test)]
mod tests {
use std::os::unix::prelude::PermissionsExt;
use std::path::Path;
use crate::daemon::{
config::ObserveConfig,
sockets::{create_unix_socket, write_json},
};
use super::*;
async fn write_socket_helper(
command: Format,
socket_name: &str,
) -> std::io::Result<Result<ExitCode, std::io::Error>> {
let config: ObserveConfig = Default::default();
// be careful with copying: tests run concurrently and should use a unique socket name!
let path = std::env::temp_dir().join(socket_name);
if path.exists() {
std::fs::remove_file(&path).unwrap();
}
let peers_listener = create_unix_socket(&path)?;
let permissions: std::fs::Permissions =
PermissionsExt::from_mode(config.observation_permissions);
std::fs::set_permissions(&path, permissions)?;
let fut = super::print_state(command, path);
let handle = tokio::spawn(fut);
let value = ObservableState {
system: Default::default(),
peers: vec![],
servers: vec![],
};
let (mut stream, _addr) = peers_listener.accept().await?;
write_json(&mut stream, &value).await?;
let result = handle.await.unwrap();
Ok(result)
}
#[tokio::test]
async fn test_control_socket_peer() -> std::io::Result<()> {
// be careful with copying: tests run concurrently and should use a unique socket name!
let result = write_socket_helper(Format::Plain, "ntp-test-stream-6").await?;
assert_eq!(
format!("{:?}", result.unwrap()),
format!("{:?}", ExitCode::SUCCESS)
);
Ok(())
}
#[tokio::test]
async fn test_control_socket_prometheus() -> std::io::Result<()> {
// be careful with copying: tests run concurrently and should use a unique socket name!
let result = write_socket_helper(Format::Prometheus, "ntp-test-stream-8").await?;
assert_eq!(
format!("{:?}", result.unwrap()),
format!("{:?}", ExitCode::SUCCESS)
);
Ok(())
}
#[tokio::test]
async fn test_control_socket_peer_invalid_input() -> std::io::Result<()> {
let config: ObserveConfig = Default::default();
// be careful with copying: tests run concurrently and should use a unique socket name!
let path = std::env::temp_dir().join("ntp-test-stream-10");
if path.exists() {
std::fs::remove_file(&path).unwrap();
}
let peers_listener = create_unix_socket(&path)?;
let permissions: std::fs::Permissions =
PermissionsExt::from_mode(config.observation_permissions);
std::fs::set_permissions(&path, permissions)?;
let fut = super::print_state(Format::Plain, path);
let handle = tokio::spawn(fut);
let value = 42u32;
let (mut stream, _addr) = peers_listener.accept().await?;
write_json(&mut stream, &value).await?;
let result = handle.await.unwrap();
assert_eq!(
format!("{:?}", result.unwrap()),
format!("{:?}", ExitCode::FAILURE)
);
Ok(())
}
const BINARY: &str = "/usr/bin/ntp-ctl";
#[test]
fn cli_config() {
let config_str = "/foo/bar/ntp.toml";
let config = Path::new(config_str);
let arguments = &[BINARY, "-c", config_str];
let options = NtpDaemonOptions::try_parse_from(arguments).unwrap();
assert_eq!(options.config.unwrap().as_path(), config);
}
#[test]
fn cli_format() {
let arguments = &[BINARY, "-f", "plain"];
let options = NtpDaemonOptions::try_parse_from(arguments).unwrap();
assert_eq!(options.format, Format::Plain);
let arguments = &[BINARY, "-f", "prometheus"];
let options = NtpDaemonOptions::try_parse_from(arguments).unwrap();
assert_eq!(options.format, Format::Prometheus);
let arguments = &[BINARY, "-f", "yaml"];
let err = NtpDaemonOptions::try_parse_from(arguments).unwrap_err();
assert_eq!(err, "invalid format option provided: yaml");
}
}
| {
Err(format!("invalid option provided: {option}"))?;
} | conditional_block |
util.rs | //! Utility traits and functions.
use std::ffi;
use std::fs::{self, File};
use std::io::{self, Write};
use std::path::{Path, PathBuf};
use std::result;
use std::time;
use anyhow::{Context as ResultExt, Error, Result};
use fs2::{lock_contended_error, FileExt};
use crate::context::{Context, SettingsExt};
/// Returns the underlying error kind for the given error.
pub fn underlying_io_error_kind(error: &Error) -> Option<io::ErrorKind> {
for cause in error.chain() {
if let Some(io_error) = cause.downcast_ref::<io::Error>() {
return Some(io_error.kind());
}
}
None
}
/// Remove a file or directory.
fn nuke_path(path: &Path) -> io::Result<()> {
if path.is_dir() {
fs::remove_dir_all(path)
} else {
fs::remove_file(path)
}
}
/// Download a remote file.
pub fn download(url: &str, mut file: File) -> result::Result<(), curl::Error> {
let mut easy = curl::easy::Easy::new();
easy.fail_on_error(true)?; // -f
easy.follow_location(true)?; // -L
easy.url(url.as_ref())?;
let mut transfer = easy.transfer();
transfer.write_function(move |data| {
match file.write_all(data) {
Ok(()) => Ok(data.len()),
Err(_) => Ok(0), // signals to cURL that the writing failed
}
})?;
transfer.perform()?;
Ok(())
}
////////////////////////////////////////////////////////////////////////////////
// PathExt trait
////////////////////////////////////////////////////////////////////////////////
/// An extension trait for [`Path`] types.
///
/// [`Path`]: https://doc.rust-lang.org/std/path/struct.Path.html
pub trait PathExt {
fn metadata_modified(&self) -> Option<time::SystemTime>;
fn newer_than<P>(&self, other: P) -> bool
where
P: AsRef<Path>;
fn expand_tilde<P>(&self, home: P) -> PathBuf
where
P: AsRef<Path>;
fn replace_home<P>(&self, home: P) -> PathBuf
where
P: AsRef<Path>;
}
impl PathExt for Path {
/// Returns the modified time of the file if available.
fn metadata_modified(&self) -> Option<time::SystemTime> {
fs::metadata(&self).and_then(|m| m.modified()).ok()
}
/// Returns whether the file at this path is newer than the file at the
/// given one. If either file does not exist, this method returns `false`.
fn newer_than<P>(&self, other: P) -> bool
where
P: AsRef<Path>,
{
match (self.metadata_modified(), other.as_ref().metadata_modified()) {
(Some(self_time), Some(other_time)) => self_time > other_time,
_ => false,
}
}
/// Expands the tilde in the path with the given home directory.
fn expand_tilde<P>(&self, home: P) -> PathBuf
where
P: AsRef<Path>,
{
if let Ok(path) = self.strip_prefix("~") {
home.as_ref().join(path)
} else {
self.to_path_buf()
}
}
/// Replaces the home directory in the path with a tilde.
fn replace_home<P>(&self, home: P) -> PathBuf
where
P: AsRef<Path>,
{
if let Ok(path) = self.strip_prefix(home) {
Self::new("~").join(path)
} else {
self.to_path_buf()
}
}
}
////////////////////////////////////////////////////////////////////////////////
// TempPath type
////////////////////////////////////////////////////////////////////////////////
/// Holds a temporary directory or file path that is removed when dropped.
pub struct TempPath {
/// The temporary directory or file path.
path: Option<PathBuf>,
}
impl TempPath {
/// Create a new `TempPath` based on an original path, the temporary
/// filename will be placed in the same directory with a deterministic name.
///
/// # Errors
///
/// If the temporary path already exists.
pub fn new(original_path: &Path) -> result::Result<Self, Self> {
let mut path = original_path.parent().unwrap().to_path_buf();
let mut file_name = ffi::OsString::from("~");
file_name.push(original_path.file_name().unwrap());
path.push(file_name);
let temp = Self { path: Some(path) };
if temp.path().exists() {
Err(temp)
} else {
Ok(temp)
}
}
/// Create a new `TempPath` based on an original path, if something exists
/// at that temporary path is will be deleted.
pub fn new_force(original_path: &Path) -> Result<Self> {
match Self::new(original_path) {
Ok(temp) => Ok(temp),
Err(temp) => {
nuke_path(temp.path())?;
Ok(temp)
}
}
}
/// Access the underlying `Path`.
pub fn path(&self) -> &Path {
self.path.as_ref().unwrap()
}
/// Move the temporary path to a new location.
pub fn rename(mut self, new_path: &Path) -> io::Result<()> {
if let Err(err) = nuke_path(new_path) {
if err.kind()!= io::ErrorKind::NotFound {
return Err(err);
}
};
if let Some(path) = &self.path {
fs::rename(path, new_path)?;
// This is so that the Drop impl doesn't try delete a non-existent file.
self.path = None;
}
Ok(())
}
}
impl Drop for TempPath {
fn drop(&mut self) {
if let Some(path) = &self.path {
nuke_path(&path).ok();
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Mutex type
////////////////////////////////////////////////////////////////////////////////
#[derive(Debug)]
pub struct Mutex(File);
impl Mutex {
/// Create a new `Mutex` at the given path and attempt to acquire it.
pub fn acquire(ctx: &Context, path: &Path) -> Result<Self> {
let file = fs::OpenOptions::new()
.read(true)
.open(path)
.with_context(s!("failed to open `{}`", path.display()))?;
if let Err(e) = file.try_lock_exclusive() {
let msg = s!("failed to acquire file lock `{}`", path.display());
if e.raw_os_error() == lock_contended_error().raw_os_error() {
warning!(
ctx,
"Blocking",
&format!(
"waiting for file lock on {}",
ctx.replace_home(path).display()
)
);
file.lock_exclusive().with_context(msg)?;
} else {
return Err(e).with_context(msg);
}
}
Ok(Self(file))
}
}
impl Drop for Mutex {
fn drop(&mut self) {
self.0.unlock().ok();
}
}
////////////////////////////////////////////////////////////////////////////////
// Git module
////////////////////////////////////////////////////////////////////////////////
pub mod git {
use std::path::Path;
use git2::{
BranchType, Cred, CredentialType, Error, FetchOptions, Oid, RemoteCallbacks, Repository,
ResetType,
};
use once_cell::sync::Lazy;
use url::Url;
use anyhow::Context as ResultExt;
/// Call a function with generated fetch options.
fn with_fetch_options<T, F>(f: F) -> anyhow::Result<T>
where
F: FnOnce(FetchOptions<'_>) -> anyhow::Result<T>,
{
let mut rcb = RemoteCallbacks::new();
rcb.credentials(|_, username, allowed| {
if allowed.contains(CredentialType::SSH_KEY) {
if let Some(username) = username {
return Cred::ssh_key_from_agent(username);
}
}
if allowed.contains(CredentialType::DEFAULT) {
return Cred::default();
}
Err(Error::from_str(
"remote authentication required but none available",
))
});
let mut opts = FetchOptions::new();
opts.remote_callbacks(rcb);
f(opts)
}
/// Open a Git repository.
pub fn open(dir: &Path) -> anyhow::Result<Repository> {
let repo = Repository::open(dir)
.with_context(s!("failed to open repository at `{}`", dir.display()))?;
Ok(repo)
}
static DEFAULT_REFSPECS: Lazy<Vec<String>> = Lazy::new(|| {
vec_into![
"refs/heads/*:refs/remotes/origin/*",
"HEAD:refs/remotes/origin/HEAD"
]
});
/// Clone a Git repository.
pub fn clone(url: &Url, dir: &Path) -> anyhow::Result<Repository> {
with_fetch_options(|mut opts| {
let repo = Repository::init(dir)?;
repo.remote("origin", url.as_str())?
.fetch(&DEFAULT_REFSPECS, Some(&mut opts), None)?;
Ok(repo)
})
.with_context(s!("failed to git clone `{}`", url))
}
/// Fetch a Git repository.
pub fn fetch(repo: &Repository) -> anyhow::Result<()> {
with_fetch_options(|mut opts| {
repo.find_remote("origin")
.context("failed to find remote `origin`")?
.fetch(&DEFAULT_REFSPECS, Some(&mut opts), None)?;
Ok(())
})
.context("failed to git fetch")
}
/// Checkout at repository at a particular revision.
pub fn checkout(repo: &Repository, oid: Oid) -> anyhow::Result<()> {
let obj = repo
.find_object(oid, None)
.with_context(s!("failed to find `{}`", oid))?;
repo.reset(&obj, ResetType::Hard, None)
.with_context(s!("failed to set HEAD to `{}`", oid))?;
repo.checkout_tree(&obj, None)
.with_context(s!("failed to checkout `{}`", oid)) | fn _submodule_update(repo: &Repository, todo: &mut Vec<Repository>) -> Result<(), Error> {
for mut submodule in repo.submodules()? {
submodule.update(true, None)?;
todo.push(submodule.open()?);
}
Ok(())
}
let mut repos = Vec::new();
_submodule_update(&repo, &mut repos)?;
while let Some(repo) = repos.pop() {
_submodule_update(&repo, &mut repos)?;
}
Ok(())
}
fn resolve_refname(repo: &Repository, refname: &str) -> Result<Oid, Error> {
let ref_id = repo.refname_to_id(refname)?;
let obj = repo.find_object(ref_id, None)?;
let obj = obj.peel(git2::ObjectType::Commit)?;
Ok(obj.id())
}
/// Get the *remote* HEAD as an object identifier.
pub fn resolve_head(repo: &Repository) -> anyhow::Result<Oid> {
resolve_refname(repo, "refs/remotes/origin/HEAD").context("failed to find remote HEAD")
}
/// Resolve a branch to a object identifier.
pub fn resolve_branch(repo: &Repository, branch: &str) -> anyhow::Result<Oid> {
repo.find_branch(&format!("origin/{}", branch), BranchType::Remote)
.with_context(s!("failed to find branch `{}`", branch))?
.get()
.target()
.with_context(s!("branch `{}` does not have a target", branch))
}
/// Resolve a revision to a object identifier.
pub fn resolve_rev(repo: &Repository, rev: &str) -> anyhow::Result<Oid> {
let obj = repo
.revparse_single(rev)
.with_context(s!("failed to find revision `{}`", rev))?;
Ok(match obj.as_tag() {
Some(tag) => tag.target_id(),
None => obj.id(),
})
}
/// Resolve a tag to a object identifier.
pub fn resolve_tag(repo: &Repository, tag: &str) -> anyhow::Result<Oid> {
fn _resolve_tag(repo: &Repository, tag: &str) -> Result<Oid, Error> {
let id = repo.refname_to_id(&format!("refs/tags/{}", tag))?;
let obj = repo.find_object(id, None)?;
let obj = obj.peel(git2::ObjectType::Commit)?;
Ok(obj.id())
}
_resolve_tag(repo, tag).with_context(s!("failed to find tag `{}`", tag))
}
}
////////////////////////////////////////////////////////////////////////////////
// Unit tests
////////////////////////////////////////////////////////////////////////////////
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
#[test]
fn path_buf_expand_tilde_with_root() {
assert_eq!(PathBuf::from("/").expand_tilde("/test"), PathBuf::from("/"))
}
#[test]
fn path_buf_expand_tilde_with_folder_in_root() {
assert_eq!(
PathBuf::from("/fol/der").expand_tilde("/test"),
PathBuf::from("/fol/der")
)
}
#[test]
fn path_buf_expand_tilde_with_home() {
assert_eq!(
PathBuf::from("~/").expand_tilde("/test"),
PathBuf::from("/test")
)
}
#[test]
fn path_buf_expand_tilde_with_folder_in_home() {
assert_eq!(
PathBuf::from("~/fol/der").expand_tilde("/test"),
PathBuf::from("/test/fol/der")
)
}
#[test]
fn path_buf_replace_home_with_root() {
assert_eq!(
PathBuf::from("/not/home").replace_home("/test/home"),
PathBuf::from("/not/home")
)
}
#[test]
fn path_buf_replace_home_with_home() {
assert_eq!(
PathBuf::from("/test/home").replace_home("/test/home"),
PathBuf::from("~")
)
}
} | }
/// Recursively update Git submodules.
pub fn submodule_update(repo: &Repository) -> Result<(), Error> { | random_line_split |
util.rs | //! Utility traits and functions.
use std::ffi;
use std::fs::{self, File};
use std::io::{self, Write};
use std::path::{Path, PathBuf};
use std::result;
use std::time;
use anyhow::{Context as ResultExt, Error, Result};
use fs2::{lock_contended_error, FileExt};
use crate::context::{Context, SettingsExt};
/// Returns the underlying error kind for the given error.
pub fn underlying_io_error_kind(error: &Error) -> Option<io::ErrorKind> {
for cause in error.chain() {
if let Some(io_error) = cause.downcast_ref::<io::Error>() {
return Some(io_error.kind());
}
}
None
}
/// Remove a file or directory.
fn nuke_path(path: &Path) -> io::Result<()> {
if path.is_dir() {
fs::remove_dir_all(path)
} else {
fs::remove_file(path)
}
}
/// Download a remote file.
pub fn download(url: &str, mut file: File) -> result::Result<(), curl::Error> {
let mut easy = curl::easy::Easy::new();
easy.fail_on_error(true)?; // -f
easy.follow_location(true)?; // -L
easy.url(url.as_ref())?;
let mut transfer = easy.transfer();
transfer.write_function(move |data| {
match file.write_all(data) {
Ok(()) => Ok(data.len()),
Err(_) => Ok(0), // signals to cURL that the writing failed
}
})?;
transfer.perform()?;
Ok(())
}
////////////////////////////////////////////////////////////////////////////////
// PathExt trait
////////////////////////////////////////////////////////////////////////////////
/// An extension trait for [`Path`] types.
///
/// [`Path`]: https://doc.rust-lang.org/std/path/struct.Path.html
pub trait PathExt {
fn metadata_modified(&self) -> Option<time::SystemTime>;
fn newer_than<P>(&self, other: P) -> bool
where
P: AsRef<Path>;
fn expand_tilde<P>(&self, home: P) -> PathBuf
where
P: AsRef<Path>;
fn replace_home<P>(&self, home: P) -> PathBuf
where
P: AsRef<Path>;
}
impl PathExt for Path {
/// Returns the modified time of the file if available.
fn metadata_modified(&self) -> Option<time::SystemTime> {
fs::metadata(&self).and_then(|m| m.modified()).ok()
}
/// Returns whether the file at this path is newer than the file at the
/// given one. If either file does not exist, this method returns `false`.
fn newer_than<P>(&self, other: P) -> bool
where
P: AsRef<Path>,
{
match (self.metadata_modified(), other.as_ref().metadata_modified()) {
(Some(self_time), Some(other_time)) => self_time > other_time,
_ => false,
}
}
/// Expands the tilde in the path with the given home directory.
fn expand_tilde<P>(&self, home: P) -> PathBuf
where
P: AsRef<Path>,
{
if let Ok(path) = self.strip_prefix("~") {
home.as_ref().join(path)
} else {
self.to_path_buf()
}
}
/// Replaces the home directory in the path with a tilde.
fn replace_home<P>(&self, home: P) -> PathBuf
where
P: AsRef<Path>,
{
if let Ok(path) = self.strip_prefix(home) {
Self::new("~").join(path)
} else {
self.to_path_buf()
}
}
}
////////////////////////////////////////////////////////////////////////////////
// TempPath type
////////////////////////////////////////////////////////////////////////////////
/// Holds a temporary directory or file path that is removed when dropped.
pub struct | {
/// The temporary directory or file path.
path: Option<PathBuf>,
}
impl TempPath {
/// Create a new `TempPath` based on an original path, the temporary
/// filename will be placed in the same directory with a deterministic name.
///
/// # Errors
///
/// If the temporary path already exists.
pub fn new(original_path: &Path) -> result::Result<Self, Self> {
let mut path = original_path.parent().unwrap().to_path_buf();
let mut file_name = ffi::OsString::from("~");
file_name.push(original_path.file_name().unwrap());
path.push(file_name);
let temp = Self { path: Some(path) };
if temp.path().exists() {
Err(temp)
} else {
Ok(temp)
}
}
/// Create a new `TempPath` based on an original path, if something exists
/// at that temporary path is will be deleted.
pub fn new_force(original_path: &Path) -> Result<Self> {
match Self::new(original_path) {
Ok(temp) => Ok(temp),
Err(temp) => {
nuke_path(temp.path())?;
Ok(temp)
}
}
}
/// Access the underlying `Path`.
pub fn path(&self) -> &Path {
self.path.as_ref().unwrap()
}
/// Move the temporary path to a new location.
pub fn rename(mut self, new_path: &Path) -> io::Result<()> {
if let Err(err) = nuke_path(new_path) {
if err.kind()!= io::ErrorKind::NotFound {
return Err(err);
}
};
if let Some(path) = &self.path {
fs::rename(path, new_path)?;
// This is so that the Drop impl doesn't try delete a non-existent file.
self.path = None;
}
Ok(())
}
}
impl Drop for TempPath {
fn drop(&mut self) {
if let Some(path) = &self.path {
nuke_path(&path).ok();
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Mutex type
////////////////////////////////////////////////////////////////////////////////
#[derive(Debug)]
pub struct Mutex(File);
impl Mutex {
/// Create a new `Mutex` at the given path and attempt to acquire it.
pub fn acquire(ctx: &Context, path: &Path) -> Result<Self> {
let file = fs::OpenOptions::new()
.read(true)
.open(path)
.with_context(s!("failed to open `{}`", path.display()))?;
if let Err(e) = file.try_lock_exclusive() {
let msg = s!("failed to acquire file lock `{}`", path.display());
if e.raw_os_error() == lock_contended_error().raw_os_error() {
warning!(
ctx,
"Blocking",
&format!(
"waiting for file lock on {}",
ctx.replace_home(path).display()
)
);
file.lock_exclusive().with_context(msg)?;
} else {
return Err(e).with_context(msg);
}
}
Ok(Self(file))
}
}
impl Drop for Mutex {
fn drop(&mut self) {
self.0.unlock().ok();
}
}
////////////////////////////////////////////////////////////////////////////////
// Git module
////////////////////////////////////////////////////////////////////////////////
pub mod git {
use std::path::Path;
use git2::{
BranchType, Cred, CredentialType, Error, FetchOptions, Oid, RemoteCallbacks, Repository,
ResetType,
};
use once_cell::sync::Lazy;
use url::Url;
use anyhow::Context as ResultExt;
/// Call a function with generated fetch options.
fn with_fetch_options<T, F>(f: F) -> anyhow::Result<T>
where
F: FnOnce(FetchOptions<'_>) -> anyhow::Result<T>,
{
let mut rcb = RemoteCallbacks::new();
rcb.credentials(|_, username, allowed| {
if allowed.contains(CredentialType::SSH_KEY) {
if let Some(username) = username {
return Cred::ssh_key_from_agent(username);
}
}
if allowed.contains(CredentialType::DEFAULT) {
return Cred::default();
}
Err(Error::from_str(
"remote authentication required but none available",
))
});
let mut opts = FetchOptions::new();
opts.remote_callbacks(rcb);
f(opts)
}
/// Open a Git repository.
pub fn open(dir: &Path) -> anyhow::Result<Repository> {
let repo = Repository::open(dir)
.with_context(s!("failed to open repository at `{}`", dir.display()))?;
Ok(repo)
}
static DEFAULT_REFSPECS: Lazy<Vec<String>> = Lazy::new(|| {
vec_into![
"refs/heads/*:refs/remotes/origin/*",
"HEAD:refs/remotes/origin/HEAD"
]
});
/// Clone a Git repository.
pub fn clone(url: &Url, dir: &Path) -> anyhow::Result<Repository> {
with_fetch_options(|mut opts| {
let repo = Repository::init(dir)?;
repo.remote("origin", url.as_str())?
.fetch(&DEFAULT_REFSPECS, Some(&mut opts), None)?;
Ok(repo)
})
.with_context(s!("failed to git clone `{}`", url))
}
/// Fetch a Git repository.
pub fn fetch(repo: &Repository) -> anyhow::Result<()> {
with_fetch_options(|mut opts| {
repo.find_remote("origin")
.context("failed to find remote `origin`")?
.fetch(&DEFAULT_REFSPECS, Some(&mut opts), None)?;
Ok(())
})
.context("failed to git fetch")
}
/// Checkout at repository at a particular revision.
pub fn checkout(repo: &Repository, oid: Oid) -> anyhow::Result<()> {
let obj = repo
.find_object(oid, None)
.with_context(s!("failed to find `{}`", oid))?;
repo.reset(&obj, ResetType::Hard, None)
.with_context(s!("failed to set HEAD to `{}`", oid))?;
repo.checkout_tree(&obj, None)
.with_context(s!("failed to checkout `{}`", oid))
}
/// Recursively update Git submodules.
pub fn submodule_update(repo: &Repository) -> Result<(), Error> {
fn _submodule_update(repo: &Repository, todo: &mut Vec<Repository>) -> Result<(), Error> {
for mut submodule in repo.submodules()? {
submodule.update(true, None)?;
todo.push(submodule.open()?);
}
Ok(())
}
let mut repos = Vec::new();
_submodule_update(&repo, &mut repos)?;
while let Some(repo) = repos.pop() {
_submodule_update(&repo, &mut repos)?;
}
Ok(())
}
fn resolve_refname(repo: &Repository, refname: &str) -> Result<Oid, Error> {
let ref_id = repo.refname_to_id(refname)?;
let obj = repo.find_object(ref_id, None)?;
let obj = obj.peel(git2::ObjectType::Commit)?;
Ok(obj.id())
}
/// Get the *remote* HEAD as an object identifier.
pub fn resolve_head(repo: &Repository) -> anyhow::Result<Oid> {
resolve_refname(repo, "refs/remotes/origin/HEAD").context("failed to find remote HEAD")
}
/// Resolve a branch to a object identifier.
pub fn resolve_branch(repo: &Repository, branch: &str) -> anyhow::Result<Oid> {
repo.find_branch(&format!("origin/{}", branch), BranchType::Remote)
.with_context(s!("failed to find branch `{}`", branch))?
.get()
.target()
.with_context(s!("branch `{}` does not have a target", branch))
}
/// Resolve a revision to a object identifier.
pub fn resolve_rev(repo: &Repository, rev: &str) -> anyhow::Result<Oid> {
let obj = repo
.revparse_single(rev)
.with_context(s!("failed to find revision `{}`", rev))?;
Ok(match obj.as_tag() {
Some(tag) => tag.target_id(),
None => obj.id(),
})
}
/// Resolve a tag to a object identifier.
pub fn resolve_tag(repo: &Repository, tag: &str) -> anyhow::Result<Oid> {
fn _resolve_tag(repo: &Repository, tag: &str) -> Result<Oid, Error> {
let id = repo.refname_to_id(&format!("refs/tags/{}", tag))?;
let obj = repo.find_object(id, None)?;
let obj = obj.peel(git2::ObjectType::Commit)?;
Ok(obj.id())
}
_resolve_tag(repo, tag).with_context(s!("failed to find tag `{}`", tag))
}
}
////////////////////////////////////////////////////////////////////////////////
// Unit tests
////////////////////////////////////////////////////////////////////////////////
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
#[test]
fn path_buf_expand_tilde_with_root() {
assert_eq!(PathBuf::from("/").expand_tilde("/test"), PathBuf::from("/"))
}
#[test]
fn path_buf_expand_tilde_with_folder_in_root() {
assert_eq!(
PathBuf::from("/fol/der").expand_tilde("/test"),
PathBuf::from("/fol/der")
)
}
#[test]
fn path_buf_expand_tilde_with_home() {
assert_eq!(
PathBuf::from("~/").expand_tilde("/test"),
PathBuf::from("/test")
)
}
#[test]
fn path_buf_expand_tilde_with_folder_in_home() {
assert_eq!(
PathBuf::from("~/fol/der").expand_tilde("/test"),
PathBuf::from("/test/fol/der")
)
}
#[test]
fn path_buf_replace_home_with_root() {
assert_eq!(
PathBuf::from("/not/home").replace_home("/test/home"),
PathBuf::from("/not/home")
)
}
#[test]
fn path_buf_replace_home_with_home() {
assert_eq!(
PathBuf::from("/test/home").replace_home("/test/home"),
PathBuf::from("~")
)
}
}
| TempPath | identifier_name |
util.rs | //! Utility traits and functions.
use std::ffi;
use std::fs::{self, File};
use std::io::{self, Write};
use std::path::{Path, PathBuf};
use std::result;
use std::time;
use anyhow::{Context as ResultExt, Error, Result};
use fs2::{lock_contended_error, FileExt};
use crate::context::{Context, SettingsExt};
/// Returns the underlying error kind for the given error.
pub fn underlying_io_error_kind(error: &Error) -> Option<io::ErrorKind> {
for cause in error.chain() {
if let Some(io_error) = cause.downcast_ref::<io::Error>() {
return Some(io_error.kind());
}
}
None
}
/// Remove a file or directory.
fn nuke_path(path: &Path) -> io::Result<()> {
if path.is_dir() {
fs::remove_dir_all(path)
} else {
fs::remove_file(path)
}
}
/// Download a remote file.
pub fn download(url: &str, mut file: File) -> result::Result<(), curl::Error> {
let mut easy = curl::easy::Easy::new();
easy.fail_on_error(true)?; // -f
easy.follow_location(true)?; // -L
easy.url(url.as_ref())?;
let mut transfer = easy.transfer();
transfer.write_function(move |data| {
match file.write_all(data) {
Ok(()) => Ok(data.len()),
Err(_) => Ok(0), // signals to cURL that the writing failed
}
})?;
transfer.perform()?;
Ok(())
}
////////////////////////////////////////////////////////////////////////////////
// PathExt trait
////////////////////////////////////////////////////////////////////////////////
/// An extension trait for [`Path`] types.
///
/// [`Path`]: https://doc.rust-lang.org/std/path/struct.Path.html
pub trait PathExt {
fn metadata_modified(&self) -> Option<time::SystemTime>;
fn newer_than<P>(&self, other: P) -> bool
where
P: AsRef<Path>;
fn expand_tilde<P>(&self, home: P) -> PathBuf
where
P: AsRef<Path>;
fn replace_home<P>(&self, home: P) -> PathBuf
where
P: AsRef<Path>;
}
impl PathExt for Path {
/// Returns the modified time of the file if available.
fn metadata_modified(&self) -> Option<time::SystemTime> {
fs::metadata(&self).and_then(|m| m.modified()).ok()
}
/// Returns whether the file at this path is newer than the file at the
/// given one. If either file does not exist, this method returns `false`.
fn newer_than<P>(&self, other: P) -> bool
where
P: AsRef<Path>,
{
match (self.metadata_modified(), other.as_ref().metadata_modified()) {
(Some(self_time), Some(other_time)) => self_time > other_time,
_ => false,
}
}
/// Expands the tilde in the path with the given home directory.
fn expand_tilde<P>(&self, home: P) -> PathBuf
where
P: AsRef<Path>,
{
if let Ok(path) = self.strip_prefix("~") {
home.as_ref().join(path)
} else {
self.to_path_buf()
}
}
/// Replaces the home directory in the path with a tilde.
fn replace_home<P>(&self, home: P) -> PathBuf
where
P: AsRef<Path>,
{
if let Ok(path) = self.strip_prefix(home) {
Self::new("~").join(path)
} else {
self.to_path_buf()
}
}
}
////////////////////////////////////////////////////////////////////////////////
// TempPath type
////////////////////////////////////////////////////////////////////////////////
/// Holds a temporary directory or file path that is removed when dropped.
pub struct TempPath {
/// The temporary directory or file path.
path: Option<PathBuf>,
}
impl TempPath {
/// Create a new `TempPath` based on an original path, the temporary
/// filename will be placed in the same directory with a deterministic name.
///
/// # Errors
///
/// If the temporary path already exists.
pub fn new(original_path: &Path) -> result::Result<Self, Self> {
let mut path = original_path.parent().unwrap().to_path_buf();
let mut file_name = ffi::OsString::from("~");
file_name.push(original_path.file_name().unwrap());
path.push(file_name);
let temp = Self { path: Some(path) };
if temp.path().exists() {
Err(temp)
} else {
Ok(temp)
}
}
/// Create a new `TempPath` based on an original path, if something exists
/// at that temporary path is will be deleted.
pub fn new_force(original_path: &Path) -> Result<Self> {
match Self::new(original_path) {
Ok(temp) => Ok(temp),
Err(temp) => {
nuke_path(temp.path())?;
Ok(temp)
}
}
}
/// Access the underlying `Path`.
pub fn path(&self) -> &Path {
self.path.as_ref().unwrap()
}
/// Move the temporary path to a new location.
pub fn rename(mut self, new_path: &Path) -> io::Result<()> {
if let Err(err) = nuke_path(new_path) {
if err.kind()!= io::ErrorKind::NotFound {
return Err(err);
}
};
if let Some(path) = &self.path {
fs::rename(path, new_path)?;
// This is so that the Drop impl doesn't try delete a non-existent file.
self.path = None;
}
Ok(())
}
}
impl Drop for TempPath {
fn drop(&mut self) {
if let Some(path) = &self.path {
nuke_path(&path).ok();
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Mutex type
////////////////////////////////////////////////////////////////////////////////
#[derive(Debug)]
pub struct Mutex(File);
impl Mutex {
/// Create a new `Mutex` at the given path and attempt to acquire it.
pub fn acquire(ctx: &Context, path: &Path) -> Result<Self> {
let file = fs::OpenOptions::new()
.read(true)
.open(path)
.with_context(s!("failed to open `{}`", path.display()))?;
if let Err(e) = file.try_lock_exclusive() {
let msg = s!("failed to acquire file lock `{}`", path.display());
if e.raw_os_error() == lock_contended_error().raw_os_error() {
warning!(
ctx,
"Blocking",
&format!(
"waiting for file lock on {}",
ctx.replace_home(path).display()
)
);
file.lock_exclusive().with_context(msg)?;
} else {
return Err(e).with_context(msg);
}
}
Ok(Self(file))
}
}
impl Drop for Mutex {
fn drop(&mut self) {
self.0.unlock().ok();
}
}
////////////////////////////////////////////////////////////////////////////////
// Git module
////////////////////////////////////////////////////////////////////////////////
pub mod git {
use std::path::Path;
use git2::{
BranchType, Cred, CredentialType, Error, FetchOptions, Oid, RemoteCallbacks, Repository,
ResetType,
};
use once_cell::sync::Lazy;
use url::Url;
use anyhow::Context as ResultExt;
/// Call a function with generated fetch options.
fn with_fetch_options<T, F>(f: F) -> anyhow::Result<T>
where
F: FnOnce(FetchOptions<'_>) -> anyhow::Result<T>,
{
let mut rcb = RemoteCallbacks::new();
rcb.credentials(|_, username, allowed| {
if allowed.contains(CredentialType::SSH_KEY) {
if let Some(username) = username {
return Cred::ssh_key_from_agent(username);
}
}
if allowed.contains(CredentialType::DEFAULT) {
return Cred::default();
}
Err(Error::from_str(
"remote authentication required but none available",
))
});
let mut opts = FetchOptions::new();
opts.remote_callbacks(rcb);
f(opts)
}
/// Open a Git repository.
pub fn open(dir: &Path) -> anyhow::Result<Repository> {
let repo = Repository::open(dir)
.with_context(s!("failed to open repository at `{}`", dir.display()))?;
Ok(repo)
}
static DEFAULT_REFSPECS: Lazy<Vec<String>> = Lazy::new(|| {
vec_into![
"refs/heads/*:refs/remotes/origin/*",
"HEAD:refs/remotes/origin/HEAD"
]
});
/// Clone a Git repository.
pub fn clone(url: &Url, dir: &Path) -> anyhow::Result<Repository> |
/// Fetch a Git repository.
pub fn fetch(repo: &Repository) -> anyhow::Result<()> {
with_fetch_options(|mut opts| {
repo.find_remote("origin")
.context("failed to find remote `origin`")?
.fetch(&DEFAULT_REFSPECS, Some(&mut opts), None)?;
Ok(())
})
.context("failed to git fetch")
}
/// Checkout at repository at a particular revision.
pub fn checkout(repo: &Repository, oid: Oid) -> anyhow::Result<()> {
let obj = repo
.find_object(oid, None)
.with_context(s!("failed to find `{}`", oid))?;
repo.reset(&obj, ResetType::Hard, None)
.with_context(s!("failed to set HEAD to `{}`", oid))?;
repo.checkout_tree(&obj, None)
.with_context(s!("failed to checkout `{}`", oid))
}
/// Recursively update Git submodules.
pub fn submodule_update(repo: &Repository) -> Result<(), Error> {
fn _submodule_update(repo: &Repository, todo: &mut Vec<Repository>) -> Result<(), Error> {
for mut submodule in repo.submodules()? {
submodule.update(true, None)?;
todo.push(submodule.open()?);
}
Ok(())
}
let mut repos = Vec::new();
_submodule_update(&repo, &mut repos)?;
while let Some(repo) = repos.pop() {
_submodule_update(&repo, &mut repos)?;
}
Ok(())
}
fn resolve_refname(repo: &Repository, refname: &str) -> Result<Oid, Error> {
let ref_id = repo.refname_to_id(refname)?;
let obj = repo.find_object(ref_id, None)?;
let obj = obj.peel(git2::ObjectType::Commit)?;
Ok(obj.id())
}
/// Get the *remote* HEAD as an object identifier.
pub fn resolve_head(repo: &Repository) -> anyhow::Result<Oid> {
resolve_refname(repo, "refs/remotes/origin/HEAD").context("failed to find remote HEAD")
}
/// Resolve a branch to a object identifier.
pub fn resolve_branch(repo: &Repository, branch: &str) -> anyhow::Result<Oid> {
repo.find_branch(&format!("origin/{}", branch), BranchType::Remote)
.with_context(s!("failed to find branch `{}`", branch))?
.get()
.target()
.with_context(s!("branch `{}` does not have a target", branch))
}
/// Resolve a revision to a object identifier.
pub fn resolve_rev(repo: &Repository, rev: &str) -> anyhow::Result<Oid> {
let obj = repo
.revparse_single(rev)
.with_context(s!("failed to find revision `{}`", rev))?;
Ok(match obj.as_tag() {
Some(tag) => tag.target_id(),
None => obj.id(),
})
}
/// Resolve a tag to a object identifier.
pub fn resolve_tag(repo: &Repository, tag: &str) -> anyhow::Result<Oid> {
fn _resolve_tag(repo: &Repository, tag: &str) -> Result<Oid, Error> {
let id = repo.refname_to_id(&format!("refs/tags/{}", tag))?;
let obj = repo.find_object(id, None)?;
let obj = obj.peel(git2::ObjectType::Commit)?;
Ok(obj.id())
}
_resolve_tag(repo, tag).with_context(s!("failed to find tag `{}`", tag))
}
}
////////////////////////////////////////////////////////////////////////////////
// Unit tests
////////////////////////////////////////////////////////////////////////////////
#[cfg(test)]
mod tests {
use super::*;
use pretty_assertions::assert_eq;
#[test]
fn path_buf_expand_tilde_with_root() {
assert_eq!(PathBuf::from("/").expand_tilde("/test"), PathBuf::from("/"))
}
#[test]
fn path_buf_expand_tilde_with_folder_in_root() {
assert_eq!(
PathBuf::from("/fol/der").expand_tilde("/test"),
PathBuf::from("/fol/der")
)
}
#[test]
fn path_buf_expand_tilde_with_home() {
assert_eq!(
PathBuf::from("~/").expand_tilde("/test"),
PathBuf::from("/test")
)
}
#[test]
fn path_buf_expand_tilde_with_folder_in_home() {
assert_eq!(
PathBuf::from("~/fol/der").expand_tilde("/test"),
PathBuf::from("/test/fol/der")
)
}
#[test]
fn path_buf_replace_home_with_root() {
assert_eq!(
PathBuf::from("/not/home").replace_home("/test/home"),
PathBuf::from("/not/home")
)
}
#[test]
fn path_buf_replace_home_with_home() {
assert_eq!(
PathBuf::from("/test/home").replace_home("/test/home"),
PathBuf::from("~")
)
}
}
| {
with_fetch_options(|mut opts| {
let repo = Repository::init(dir)?;
repo.remote("origin", url.as_str())?
.fetch(&DEFAULT_REFSPECS, Some(&mut opts), None)?;
Ok(repo)
})
.with_context(s!("failed to git clone `{}`", url))
} | identifier_body |
bignum.rs | /* Copyright (c) Fortanix, Inc.
*
* Licensed under the GNU General Public License, version 2 <LICENSE-GPL or
* https://www.gnu.org/licenses/gpl-2.0.html> or the Apache License, Version
* 2.0 <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0>, at your
* option. This file may not be copied, modified, or distributed except
* according to those terms. */
extern crate mbedtls;
use mbedtls::bignum::Mpi;
#[cfg(feature = "std")]
#[test]
fn bignum_from_str() {
use std::str::FromStr;
let p256_16 =
Mpi::from_str("0xffffffff00000001000000000000000000000000ffffffffffffffffffffffff")
.unwrap();
let p256_10 = Mpi::from_str(
"115792089210356248762697446949407573530086143415290314195533631308867097853951",
)
.unwrap();
assert!(p256_16.eq(&p256_10));
assert_eq!(
format!("{}", p256_10),
"115792089210356248762697446949407573530086143415290314195533631308867097853951"
);
assert_eq!(
format!("{:X}", p256_10),
"FFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFF"
);
assert_eq!(
format!("{:o}", p256_10),
"17777777777400000000010000000000000000000000000000000077777777777777777777777777777777"
);
assert_eq!(format!("{:b}", p256_10), "1111111111111111111111111111111100000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111");
}
#[test]
fn bignum() {
let six = Mpi::new(6).unwrap();
assert_eq!(six.byte_length().unwrap(), 1);
assert_eq!(six.bit_length().unwrap(), 3);
let six_bytes = six.to_binary().unwrap();
assert_eq!(six_bytes.len(), 1);
assert_eq!(six_bytes[0], 6);
let five = Mpi::new(5).unwrap();
assert_eq!(six.cmp(&five), ::std::cmp::Ordering::Greater);
assert_eq!(five.cmp(&five), ::std::cmp::Ordering::Equal);
assert_eq!(five.cmp(&six), ::std::cmp::Ordering::Less);
let bigger = Mpi::new(0x2a2f5dce).unwrap();
assert_eq!(bigger.byte_length().unwrap(), 4);
assert_eq!(bigger.bit_length().unwrap(), 30);
let b_bytes = bigger.to_binary().unwrap();
assert_eq!(b_bytes.len(), 4);
assert_eq!(b_bytes[0], 0x2a);
assert_eq!(b_bytes[1], 0x2f);
assert_eq!(b_bytes[2], 0x5d);
assert_eq!(b_bytes[3], 0xce);
assert!(bigger.eq(&Mpi::from_binary(&b_bytes).unwrap()));
}
#[test]
fn bignum_shifts() {
let x = Mpi::new(3).unwrap();
let y = (&x << 30).unwrap();
assert_eq!(format!("{}", y), "3221225472");
let y = (&y >> 30).unwrap();
assert_eq!(format!("{}", y), "3");
let y = (&y >> 2).unwrap();
assert_eq!(format!("{}", y), "0");
let mut z = Mpi::new(1).unwrap();
z <<= 5;
assert_eq!(format!("{}", z), "32");
z <<= 15;
assert_eq!(format!("{}", z), "1048576");
z >>= 10;
assert_eq!(format!("{}", z), "1024");
}
#[test]
fn bignum_op_assign() {
let mut x = Mpi::new(4).unwrap();
x += 9;
assert_eq!(format!("{}", x), "13");
x += Mpi::new(13).unwrap();
assert_eq!(format!("{}", x), "26");
let y = Mpi::new(10).unwrap();
x += &y;
assert_eq!(format!("{}", x), "36");
x -= 3;
assert_eq!(format!("{}", x), "33");
x -= Mpi::new(5).unwrap();
assert_eq!(format!("{}", x), "28");
x -= &y;
assert_eq!(format!("{}", x), "18");
x *= &y;
assert_eq!(format!("{}", x), "180");
x *= 2;
assert_eq!(format!("{}", x), "360");
x *= Mpi::new(-2).unwrap();
assert_eq!(format!("{}", x), "-720");
x /= Mpi::new(-3).unwrap();
assert_eq!(format!("{}", x), "240");
x /= 2;
assert_eq!(format!("{}", x), "120");
x /= &y;
assert_eq!(format!("{}", x), "12");
x %= 100;
assert_eq!(format!("{}", x), "12");
x %= Mpi::new(5).unwrap();
assert_eq!(format!("{}", x), "2");
assert_eq!(format!("{}", y), "10"); // verify y not moved
}
#[cfg(feature = "std")]
#[test]
fn test_jacobi_fn() {
use std::str::FromStr;
fn jacobi_symbol_test(a: &str, n: &str, expected: i32) {
let a = Mpi::from_str(a).unwrap();
let n = Mpi::from_str(n).unwrap();
let j = a.jacobi(&n).unwrap();
//println!("a={} n={} J={}", a, n, j);
assert_eq!(j, expected);
}
// Tests generated by Sagemath
jacobi_symbol_test("5", "9", 1);
jacobi_symbol_test(
"80530568503105393620776136885268819039",
"136759011081214619901277936869624817013",
-1,
);
jacobi_symbol_test("541641436", "50733077", -1);
jacobi_symbol_test("541641437", "50733077", 1);
jacobi_symbol_test("50733077", "50733077", 0);
jacobi_symbol_test("126192963", "2869415899", 1);
jacobi_symbol_test("126192964", "2869415899", -1);
jacobi_symbol_test(
"290122183148875935619099270547",
"392382503032982745991600930111",
-1,
);
jacobi_symbol_test(
"652189681324592774835681787902",
"851019412553174450003757422011",
1,
);
jacobi_symbol_test(
"68607521964935451958858272376",
"89491088927603607083107403767",
1,
);
jacobi_symbol_test(
"218068701715357900365812660263",
"238095134266847041021320150827",
-1,
);
jacobi_symbol_test(
"9847597030024907406584779047",
"20414312383664964481261270711",
1,
);
jacobi_symbol_test(
"38938513347318987388516082474",
"49516772312071161029219932219",
1,
);
jacobi_symbol_test(
"300820947915083731970108494721",
"657305681340895250386089542863",
-1,
);
jacobi_symbol_test(
"12565726709694140412667952162",
"31771076028760826448147679003",
-1,
);
jacobi_symbol_test(
"344945231515347227453035588988",
"828252022515408040124517036011",
1,
);
jacobi_symbol_test(
"93331799786934264132380785163",
"313205417670262818093976413871",
-1,
);
}
#[cfg(feature = "std")]
#[test]
fn | () {
use std::str::FromStr;
fn mod_sqrt_test(a: &str, n: &str, expected: &str) {
let a = Mpi::from_str(a).unwrap();
let n = Mpi::from_str(n).unwrap();
let expected = Mpi::from_str(expected).unwrap();
let mut computed = a.mod_sqrt(&n).unwrap();
/*
If x = (a*a) mod p then also x = (-a*-a) mod p, ie
if a square root exists then there are two square roots related by
x and p-x. The mod sqrt might return either of these options
*/
if &computed!= &expected {
computed = (&n - &computed).unwrap();
}
assert_eq!(computed, expected);
}
// Tests generated by Sagemath
mod_sqrt_test("2", "7", "4");
mod_sqrt_test("5", "469289024411159", "234325000312516");
mod_sqrt_test(
"458050473005020050313790240477",
"905858848829014223214249213947",
"126474086260479574845714194337",
);
mod_sqrt_test("4", "13", "2");
mod_sqrt_test("2", "113", "62");
mod_sqrt_test(
"14432894130216089699367965001582109139186342668614313620824414613061488655",
"145226202540352375281647974706811878790868025723961296389762379073201613561",
"64346440714386899555372506097606752274599811989145306413544609746921648646",
);
mod_sqrt_test(
"2",
"145226202540352375281647974706811878790868025723961296389762379073201613561",
"29863506841820532608636271306847583140720915984413766535227954746838873278",
);
mod_sqrt_test(
"2",
"0xFFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFF",
"0x507442007322AA895340CBA4ABC2D730BFD0B16C2C79A46815F8780D2C55A2DD",
);
mod_sqrt_test(
"0x5AC635D8AA3A93E7B3EBBD55769886BC651D06B0CC53B0F63BCE3C3E27D2604B",
"0xFFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFF",
"0x99B7A386F1D07C29DBCC42A27B5F9449ABE3D50DE25178E8D7407A95E8B06C0B",
);
// Primes where 2^s divides p-1 for s >= 3 which caught a bug
mod_sqrt_test("2", "17", "6");
mod_sqrt_test("2", "97", "14");
mod_sqrt_test("2", "193", "52");
mod_sqrt_test("2", "257", "60");
mod_sqrt_test("2", "65537", "4080");
mod_sqrt_test("2", "0x1200000001", "17207801277");
mod_sqrt_test(
"2",
"0x660000000000000000000000000000000000000000000000000000000000000001",
"0xce495874f10d32d28105400c73f73aafc7cbbae7cd1dfa1525f2701b3573d78c0",
);
}
#[test]
fn bignum_cmp() {
let big = Mpi::new(2147483647).unwrap();
let small = Mpi::new(2).unwrap();
assert!(big > small);
assert!(small < big);
assert!(big >= small);
assert!(small <= big);
assert!(small >= small);
assert!(big <= big);
assert!(small == small);
assert!(small!= big);
}
#[test]
fn bigint_ops() {
let x = Mpi::new(100).unwrap();
let y = Mpi::new(20900).unwrap();
assert_eq!(x.as_u32().unwrap(), 100);
let z = (&x + &y).unwrap();
assert_eq!(z.as_u32().unwrap(), 21000);
let z = (&z * &y).unwrap();
assert_eq!(z, Mpi::new(438900000).unwrap());
let z = (&z - &x).unwrap();
assert_eq!(z, Mpi::new(0x1A2914BC).unwrap());
let r = (&z % 127).unwrap();
assert_eq!(r.as_u32().unwrap(), 92);
let r = (&z % &Mpi::new(127).unwrap()).unwrap();
assert_eq!(r.as_u32().unwrap(), 92);
let q = (&z / 53).unwrap();
assert_eq!(q.as_u32().unwrap(), 8281130);
let q = (&z / &Mpi::new(53).unwrap()).unwrap();
assert_eq!(q.as_u32().unwrap(), 8281130);
let nan = &z / 0;
assert!(nan.is_err());
}
const BASE58_ALPHABET: &[u8] = b"123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz";
fn base58_encode(bits: &[u8]) -> mbedtls::Result<String> {
let zero = Mpi::new(0)?;
let mut n = Mpi::from_binary(bits)?;
let radix: i64 = 58;
let mut s = Vec::new();
while n > zero {
let (q, r) = n.divrem_int(radix)?;
n = q;
s.push(BASE58_ALPHABET[r.as_u32()? as usize]);
}
s.reverse();
Ok(String::from_utf8(s).unwrap())
}
fn base58_decode(b58: &str) -> mbedtls::Result<Vec<u8>> {
let radix: i64 = 58;
let mut n = Mpi::new(0)?;
fn base58_val(b: u8) -> mbedtls::Result<usize> {
for (i, c) in BASE58_ALPHABET.iter().enumerate() {
if *c == b {
return Ok(i);
}
}
Err(mbedtls::Error::Base64InvalidCharacter)
}
for c in b58.bytes() {
let v = base58_val(c)? as i64;
n = (&n * radix)?;
n = (&n + v)?;
}
n.to_binary()
}
#[test]
fn test_base58_encode() {
fn test_base58_rt(input: &[u8], expected: &str) {
assert_eq!(base58_encode(input).unwrap(), expected);
assert_eq!(base58_decode(expected).unwrap(), input);
}
test_base58_rt(b"", "");
test_base58_rt(&[32], "Z");
test_base58_rt(&[45], "n");
test_base58_rt(&[48], "q");
test_base58_rt(&[49], "r");
test_base58_rt(&[57], "z");
test_base58_rt(&[45, 49], "4SU");
test_base58_rt(&[49, 49], "4k8");
test_base58_rt(b"abc", "ZiCa");
test_base58_rt(b"1234598760", "3mJr7AoUXx2Wqd");
test_base58_rt(
b"abcdefghijklmnopqrstuvwxyz",
"3yxU3u1igY8WkgtjK92fbJQCd4BZiiT1v25f",
);
}
| test_mod_sqrt_fn | identifier_name |
bignum.rs | /* Copyright (c) Fortanix, Inc.
*
* Licensed under the GNU General Public License, version 2 <LICENSE-GPL or
* https://www.gnu.org/licenses/gpl-2.0.html> or the Apache License, Version
* 2.0 <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0>, at your
* option. This file may not be copied, modified, or distributed except
* according to those terms. */
extern crate mbedtls;
use mbedtls::bignum::Mpi;
#[cfg(feature = "std")]
#[test]
fn bignum_from_str() {
use std::str::FromStr;
let p256_16 =
Mpi::from_str("0xffffffff00000001000000000000000000000000ffffffffffffffffffffffff")
.unwrap();
let p256_10 = Mpi::from_str(
"115792089210356248762697446949407573530086143415290314195533631308867097853951",
)
.unwrap();
assert!(p256_16.eq(&p256_10));
assert_eq!(
format!("{}", p256_10),
"115792089210356248762697446949407573530086143415290314195533631308867097853951"
);
assert_eq!(
format!("{:X}", p256_10),
"FFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFF"
);
assert_eq!(
format!("{:o}", p256_10),
"17777777777400000000010000000000000000000000000000000077777777777777777777777777777777"
);
assert_eq!(format!("{:b}", p256_10), "1111111111111111111111111111111100000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111");
}
#[test]
fn bignum() {
let six = Mpi::new(6).unwrap();
assert_eq!(six.byte_length().unwrap(), 1);
assert_eq!(six.bit_length().unwrap(), 3);
let six_bytes = six.to_binary().unwrap();
assert_eq!(six_bytes.len(), 1);
assert_eq!(six_bytes[0], 6);
let five = Mpi::new(5).unwrap();
assert_eq!(six.cmp(&five), ::std::cmp::Ordering::Greater);
assert_eq!(five.cmp(&five), ::std::cmp::Ordering::Equal);
assert_eq!(five.cmp(&six), ::std::cmp::Ordering::Less);
let bigger = Mpi::new(0x2a2f5dce).unwrap();
assert_eq!(bigger.byte_length().unwrap(), 4);
assert_eq!(bigger.bit_length().unwrap(), 30);
let b_bytes = bigger.to_binary().unwrap();
assert_eq!(b_bytes.len(), 4);
assert_eq!(b_bytes[0], 0x2a);
assert_eq!(b_bytes[1], 0x2f);
assert_eq!(b_bytes[2], 0x5d);
assert_eq!(b_bytes[3], 0xce);
assert!(bigger.eq(&Mpi::from_binary(&b_bytes).unwrap()));
}
#[test]
fn bignum_shifts() {
let x = Mpi::new(3).unwrap();
let y = (&x << 30).unwrap();
assert_eq!(format!("{}", y), "3221225472");
let y = (&y >> 30).unwrap();
assert_eq!(format!("{}", y), "3");
let y = (&y >> 2).unwrap();
assert_eq!(format!("{}", y), "0");
let mut z = Mpi::new(1).unwrap();
z <<= 5;
assert_eq!(format!("{}", z), "32");
z <<= 15;
assert_eq!(format!("{}", z), "1048576");
z >>= 10;
assert_eq!(format!("{}", z), "1024");
}
#[test]
fn bignum_op_assign() {
let mut x = Mpi::new(4).unwrap();
x += 9;
assert_eq!(format!("{}", x), "13");
x += Mpi::new(13).unwrap();
assert_eq!(format!("{}", x), "26");
let y = Mpi::new(10).unwrap();
x += &y;
assert_eq!(format!("{}", x), "36");
x -= 3;
assert_eq!(format!("{}", x), "33");
x -= Mpi::new(5).unwrap();
assert_eq!(format!("{}", x), "28");
x -= &y;
assert_eq!(format!("{}", x), "18");
x *= &y;
assert_eq!(format!("{}", x), "180");
x *= 2;
assert_eq!(format!("{}", x), "360");
x *= Mpi::new(-2).unwrap();
assert_eq!(format!("{}", x), "-720");
x /= Mpi::new(-3).unwrap();
assert_eq!(format!("{}", x), "240");
x /= 2;
assert_eq!(format!("{}", x), "120");
x /= &y;
assert_eq!(format!("{}", x), "12");
x %= 100;
assert_eq!(format!("{}", x), "12");
x %= Mpi::new(5).unwrap();
assert_eq!(format!("{}", x), "2");
assert_eq!(format!("{}", y), "10"); // verify y not moved
}
#[cfg(feature = "std")]
#[test]
fn test_jacobi_fn() {
use std::str::FromStr;
fn jacobi_symbol_test(a: &str, n: &str, expected: i32) {
let a = Mpi::from_str(a).unwrap();
let n = Mpi::from_str(n).unwrap();
let j = a.jacobi(&n).unwrap();
//println!("a={} n={} J={}", a, n, j);
assert_eq!(j, expected);
}
// Tests generated by Sagemath
jacobi_symbol_test("5", "9", 1);
jacobi_symbol_test(
"80530568503105393620776136885268819039",
"136759011081214619901277936869624817013",
-1,
);
jacobi_symbol_test("541641436", "50733077", -1);
jacobi_symbol_test("541641437", "50733077", 1);
jacobi_symbol_test("50733077", "50733077", 0);
jacobi_symbol_test("126192963", "2869415899", 1);
jacobi_symbol_test("126192964", "2869415899", -1);
jacobi_symbol_test(
"290122183148875935619099270547",
"392382503032982745991600930111",
-1,
);
jacobi_symbol_test(
"652189681324592774835681787902",
"851019412553174450003757422011",
1,
);
jacobi_symbol_test(
"68607521964935451958858272376",
"89491088927603607083107403767",
1,
);
jacobi_symbol_test(
"218068701715357900365812660263",
"238095134266847041021320150827",
-1,
);
jacobi_symbol_test(
"9847597030024907406584779047",
"20414312383664964481261270711",
1,
);
jacobi_symbol_test(
"38938513347318987388516082474",
"49516772312071161029219932219",
1,
);
jacobi_symbol_test(
"300820947915083731970108494721",
"657305681340895250386089542863",
-1,
);
jacobi_symbol_test(
"12565726709694140412667952162",
"31771076028760826448147679003",
-1,
);
jacobi_symbol_test(
"344945231515347227453035588988",
"828252022515408040124517036011",
1,
);
jacobi_symbol_test(
"93331799786934264132380785163",
"313205417670262818093976413871",
-1,
);
}
#[cfg(feature = "std")]
#[test]
fn test_mod_sqrt_fn() {
use std::str::FromStr;
fn mod_sqrt_test(a: &str, n: &str, expected: &str) {
let a = Mpi::from_str(a).unwrap();
let n = Mpi::from_str(n).unwrap();
let expected = Mpi::from_str(expected).unwrap();
let mut computed = a.mod_sqrt(&n).unwrap();
/*
If x = (a*a) mod p then also x = (-a*-a) mod p, ie
if a square root exists then there are two square roots related by
x and p-x. The mod sqrt might return either of these options
*/
if &computed!= &expected {
computed = (&n - &computed).unwrap();
}
assert_eq!(computed, expected);
}
// Tests generated by Sagemath
mod_sqrt_test("2", "7", "4");
mod_sqrt_test("5", "469289024411159", "234325000312516");
mod_sqrt_test(
"458050473005020050313790240477",
"905858848829014223214249213947",
"126474086260479574845714194337",
);
mod_sqrt_test("4", "13", "2");
mod_sqrt_test("2", "113", "62");
mod_sqrt_test(
"14432894130216089699367965001582109139186342668614313620824414613061488655",
"145226202540352375281647974706811878790868025723961296389762379073201613561",
"64346440714386899555372506097606752274599811989145306413544609746921648646",
);
mod_sqrt_test(
"2",
"145226202540352375281647974706811878790868025723961296389762379073201613561",
"29863506841820532608636271306847583140720915984413766535227954746838873278",
);
mod_sqrt_test(
"2",
"0xFFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFF",
"0x507442007322AA895340CBA4ABC2D730BFD0B16C2C79A46815F8780D2C55A2DD",
);
mod_sqrt_test(
"0x5AC635D8AA3A93E7B3EBBD55769886BC651D06B0CC53B0F63BCE3C3E27D2604B",
"0xFFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFF",
"0x99B7A386F1D07C29DBCC42A27B5F9449ABE3D50DE25178E8D7407A95E8B06C0B",
);
// Primes where 2^s divides p-1 for s >= 3 which caught a bug
mod_sqrt_test("2", "17", "6");
mod_sqrt_test("2", "97", "14");
mod_sqrt_test("2", "193", "52");
mod_sqrt_test("2", "257", "60");
mod_sqrt_test("2", "65537", "4080");
mod_sqrt_test("2", "0x1200000001", "17207801277");
mod_sqrt_test(
"2",
"0x660000000000000000000000000000000000000000000000000000000000000001",
"0xce495874f10d32d28105400c73f73aafc7cbbae7cd1dfa1525f2701b3573d78c0",
);
}
#[test]
fn bignum_cmp() {
let big = Mpi::new(2147483647).unwrap();
let small = Mpi::new(2).unwrap();
assert!(big > small);
assert!(small < big);
assert!(big >= small);
assert!(small <= big);
assert!(small >= small);
assert!(big <= big);
assert!(small == small);
assert!(small!= big);
}
#[test]
fn bigint_ops() {
let x = Mpi::new(100).unwrap();
let y = Mpi::new(20900).unwrap();
assert_eq!(x.as_u32().unwrap(), 100);
let z = (&x + &y).unwrap();
assert_eq!(z.as_u32().unwrap(), 21000);
let z = (&z * &y).unwrap();
assert_eq!(z, Mpi::new(438900000).unwrap());
let z = (&z - &x).unwrap();
assert_eq!(z, Mpi::new(0x1A2914BC).unwrap());
let r = (&z % 127).unwrap();
assert_eq!(r.as_u32().unwrap(), 92);
let r = (&z % &Mpi::new(127).unwrap()).unwrap();
assert_eq!(r.as_u32().unwrap(), 92);
let q = (&z / 53).unwrap();
assert_eq!(q.as_u32().unwrap(), 8281130);
let q = (&z / &Mpi::new(53).unwrap()).unwrap();
assert_eq!(q.as_u32().unwrap(), 8281130);
let nan = &z / 0;
assert!(nan.is_err());
}
const BASE58_ALPHABET: &[u8] = b"123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz";
fn base58_encode(bits: &[u8]) -> mbedtls::Result<String> {
let zero = Mpi::new(0)?;
let mut n = Mpi::from_binary(bits)?;
let radix: i64 = 58;
let mut s = Vec::new();
while n > zero {
let (q, r) = n.divrem_int(radix)?;
n = q;
s.push(BASE58_ALPHABET[r.as_u32()? as usize]);
}
s.reverse();
Ok(String::from_utf8(s).unwrap())
}
fn base58_decode(b58: &str) -> mbedtls::Result<Vec<u8>> | n.to_binary()
}
#[test]
fn test_base58_encode() {
fn test_base58_rt(input: &[u8], expected: &str) {
assert_eq!(base58_encode(input).unwrap(), expected);
assert_eq!(base58_decode(expected).unwrap(), input);
}
test_base58_rt(b"", "");
test_base58_rt(&[32], "Z");
test_base58_rt(&[45], "n");
test_base58_rt(&[48], "q");
test_base58_rt(&[49], "r");
test_base58_rt(&[57], "z");
test_base58_rt(&[45, 49], "4SU");
test_base58_rt(&[49, 49], "4k8");
test_base58_rt(b"abc", "ZiCa");
test_base58_rt(b"1234598760", "3mJr7AoUXx2Wqd");
test_base58_rt(
b"abcdefghijklmnopqrstuvwxyz",
"3yxU3u1igY8WkgtjK92fbJQCd4BZiiT1v25f",
);
}
| {
let radix: i64 = 58;
let mut n = Mpi::new(0)?;
fn base58_val(b: u8) -> mbedtls::Result<usize> {
for (i, c) in BASE58_ALPHABET.iter().enumerate() {
if *c == b {
return Ok(i);
}
}
Err(mbedtls::Error::Base64InvalidCharacter)
}
for c in b58.bytes() {
let v = base58_val(c)? as i64;
n = (&n * radix)?;
n = (&n + v)?;
}
| identifier_body |
bignum.rs | /* Copyright (c) Fortanix, Inc.
*
* Licensed under the GNU General Public License, version 2 <LICENSE-GPL or
* https://www.gnu.org/licenses/gpl-2.0.html> or the Apache License, Version
* 2.0 <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0>, at your
* option. This file may not be copied, modified, or distributed except
* according to those terms. */
extern crate mbedtls;
use mbedtls::bignum::Mpi;
#[cfg(feature = "std")]
#[test]
fn bignum_from_str() {
use std::str::FromStr;
let p256_16 =
Mpi::from_str("0xffffffff00000001000000000000000000000000ffffffffffffffffffffffff")
.unwrap();
let p256_10 = Mpi::from_str(
"115792089210356248762697446949407573530086143415290314195533631308867097853951",
)
.unwrap();
assert!(p256_16.eq(&p256_10));
assert_eq!(
format!("{}", p256_10),
"115792089210356248762697446949407573530086143415290314195533631308867097853951"
);
assert_eq!(
format!("{:X}", p256_10),
"FFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFF"
);
assert_eq!(
format!("{:o}", p256_10),
"17777777777400000000010000000000000000000000000000000077777777777777777777777777777777"
);
assert_eq!(format!("{:b}", p256_10), "1111111111111111111111111111111100000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111");
}
#[test]
fn bignum() {
let six = Mpi::new(6).unwrap();
assert_eq!(six.byte_length().unwrap(), 1);
assert_eq!(six.bit_length().unwrap(), 3);
let six_bytes = six.to_binary().unwrap();
assert_eq!(six_bytes.len(), 1);
assert_eq!(six_bytes[0], 6);
let five = Mpi::new(5).unwrap();
assert_eq!(six.cmp(&five), ::std::cmp::Ordering::Greater);
assert_eq!(five.cmp(&five), ::std::cmp::Ordering::Equal);
assert_eq!(five.cmp(&six), ::std::cmp::Ordering::Less);
let bigger = Mpi::new(0x2a2f5dce).unwrap();
assert_eq!(bigger.byte_length().unwrap(), 4);
assert_eq!(bigger.bit_length().unwrap(), 30);
let b_bytes = bigger.to_binary().unwrap();
assert_eq!(b_bytes.len(), 4);
assert_eq!(b_bytes[0], 0x2a);
assert_eq!(b_bytes[1], 0x2f);
assert_eq!(b_bytes[2], 0x5d);
assert_eq!(b_bytes[3], 0xce);
assert!(bigger.eq(&Mpi::from_binary(&b_bytes).unwrap()));
}
#[test]
fn bignum_shifts() {
let x = Mpi::new(3).unwrap();
let y = (&x << 30).unwrap();
assert_eq!(format!("{}", y), "3221225472");
let y = (&y >> 30).unwrap();
assert_eq!(format!("{}", y), "3");
let y = (&y >> 2).unwrap();
assert_eq!(format!("{}", y), "0");
let mut z = Mpi::new(1).unwrap();
z <<= 5;
assert_eq!(format!("{}", z), "32");
z <<= 15;
assert_eq!(format!("{}", z), "1048576");
z >>= 10;
assert_eq!(format!("{}", z), "1024");
}
#[test]
fn bignum_op_assign() {
let mut x = Mpi::new(4).unwrap();
x += 9;
assert_eq!(format!("{}", x), "13");
x += Mpi::new(13).unwrap();
assert_eq!(format!("{}", x), "26");
let y = Mpi::new(10).unwrap();
x += &y;
assert_eq!(format!("{}", x), "36");
x -= 3;
assert_eq!(format!("{}", x), "33");
x -= Mpi::new(5).unwrap();
assert_eq!(format!("{}", x), "28");
x -= &y;
assert_eq!(format!("{}", x), "18");
x *= &y;
assert_eq!(format!("{}", x), "180");
x *= 2;
assert_eq!(format!("{}", x), "360");
x *= Mpi::new(-2).unwrap();
assert_eq!(format!("{}", x), "-720");
x /= Mpi::new(-3).unwrap();
assert_eq!(format!("{}", x), "240");
x /= 2;
assert_eq!(format!("{}", x), "120");
x /= &y;
assert_eq!(format!("{}", x), "12");
x %= 100;
assert_eq!(format!("{}", x), "12");
x %= Mpi::new(5).unwrap();
assert_eq!(format!("{}", x), "2");
assert_eq!(format!("{}", y), "10"); // verify y not moved | #[cfg(feature = "std")]
#[test]
fn test_jacobi_fn() {
use std::str::FromStr;
fn jacobi_symbol_test(a: &str, n: &str, expected: i32) {
let a = Mpi::from_str(a).unwrap();
let n = Mpi::from_str(n).unwrap();
let j = a.jacobi(&n).unwrap();
//println!("a={} n={} J={}", a, n, j);
assert_eq!(j, expected);
}
// Tests generated by Sagemath
jacobi_symbol_test("5", "9", 1);
jacobi_symbol_test(
"80530568503105393620776136885268819039",
"136759011081214619901277936869624817013",
-1,
);
jacobi_symbol_test("541641436", "50733077", -1);
jacobi_symbol_test("541641437", "50733077", 1);
jacobi_symbol_test("50733077", "50733077", 0);
jacobi_symbol_test("126192963", "2869415899", 1);
jacobi_symbol_test("126192964", "2869415899", -1);
jacobi_symbol_test(
"290122183148875935619099270547",
"392382503032982745991600930111",
-1,
);
jacobi_symbol_test(
"652189681324592774835681787902",
"851019412553174450003757422011",
1,
);
jacobi_symbol_test(
"68607521964935451958858272376",
"89491088927603607083107403767",
1,
);
jacobi_symbol_test(
"218068701715357900365812660263",
"238095134266847041021320150827",
-1,
);
jacobi_symbol_test(
"9847597030024907406584779047",
"20414312383664964481261270711",
1,
);
jacobi_symbol_test(
"38938513347318987388516082474",
"49516772312071161029219932219",
1,
);
jacobi_symbol_test(
"300820947915083731970108494721",
"657305681340895250386089542863",
-1,
);
jacobi_symbol_test(
"12565726709694140412667952162",
"31771076028760826448147679003",
-1,
);
jacobi_symbol_test(
"344945231515347227453035588988",
"828252022515408040124517036011",
1,
);
jacobi_symbol_test(
"93331799786934264132380785163",
"313205417670262818093976413871",
-1,
);
}
#[cfg(feature = "std")]
#[test]
fn test_mod_sqrt_fn() {
use std::str::FromStr;
fn mod_sqrt_test(a: &str, n: &str, expected: &str) {
let a = Mpi::from_str(a).unwrap();
let n = Mpi::from_str(n).unwrap();
let expected = Mpi::from_str(expected).unwrap();
let mut computed = a.mod_sqrt(&n).unwrap();
/*
If x = (a*a) mod p then also x = (-a*-a) mod p, ie
if a square root exists then there are two square roots related by
x and p-x. The mod sqrt might return either of these options
*/
if &computed!= &expected {
computed = (&n - &computed).unwrap();
}
assert_eq!(computed, expected);
}
// Tests generated by Sagemath
mod_sqrt_test("2", "7", "4");
mod_sqrt_test("5", "469289024411159", "234325000312516");
mod_sqrt_test(
"458050473005020050313790240477",
"905858848829014223214249213947",
"126474086260479574845714194337",
);
mod_sqrt_test("4", "13", "2");
mod_sqrt_test("2", "113", "62");
mod_sqrt_test(
"14432894130216089699367965001582109139186342668614313620824414613061488655",
"145226202540352375281647974706811878790868025723961296389762379073201613561",
"64346440714386899555372506097606752274599811989145306413544609746921648646",
);
mod_sqrt_test(
"2",
"145226202540352375281647974706811878790868025723961296389762379073201613561",
"29863506841820532608636271306847583140720915984413766535227954746838873278",
);
mod_sqrt_test(
"2",
"0xFFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFF",
"0x507442007322AA895340CBA4ABC2D730BFD0B16C2C79A46815F8780D2C55A2DD",
);
mod_sqrt_test(
"0x5AC635D8AA3A93E7B3EBBD55769886BC651D06B0CC53B0F63BCE3C3E27D2604B",
"0xFFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFF",
"0x99B7A386F1D07C29DBCC42A27B5F9449ABE3D50DE25178E8D7407A95E8B06C0B",
);
// Primes where 2^s divides p-1 for s >= 3 which caught a bug
mod_sqrt_test("2", "17", "6");
mod_sqrt_test("2", "97", "14");
mod_sqrt_test("2", "193", "52");
mod_sqrt_test("2", "257", "60");
mod_sqrt_test("2", "65537", "4080");
mod_sqrt_test("2", "0x1200000001", "17207801277");
mod_sqrt_test(
"2",
"0x660000000000000000000000000000000000000000000000000000000000000001",
"0xce495874f10d32d28105400c73f73aafc7cbbae7cd1dfa1525f2701b3573d78c0",
);
}
#[test]
fn bignum_cmp() {
let big = Mpi::new(2147483647).unwrap();
let small = Mpi::new(2).unwrap();
assert!(big > small);
assert!(small < big);
assert!(big >= small);
assert!(small <= big);
assert!(small >= small);
assert!(big <= big);
assert!(small == small);
assert!(small!= big);
}
#[test]
fn bigint_ops() {
let x = Mpi::new(100).unwrap();
let y = Mpi::new(20900).unwrap();
assert_eq!(x.as_u32().unwrap(), 100);
let z = (&x + &y).unwrap();
assert_eq!(z.as_u32().unwrap(), 21000);
let z = (&z * &y).unwrap();
assert_eq!(z, Mpi::new(438900000).unwrap());
let z = (&z - &x).unwrap();
assert_eq!(z, Mpi::new(0x1A2914BC).unwrap());
let r = (&z % 127).unwrap();
assert_eq!(r.as_u32().unwrap(), 92);
let r = (&z % &Mpi::new(127).unwrap()).unwrap();
assert_eq!(r.as_u32().unwrap(), 92);
let q = (&z / 53).unwrap();
assert_eq!(q.as_u32().unwrap(), 8281130);
let q = (&z / &Mpi::new(53).unwrap()).unwrap();
assert_eq!(q.as_u32().unwrap(), 8281130);
let nan = &z / 0;
assert!(nan.is_err());
}
const BASE58_ALPHABET: &[u8] = b"123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz";
fn base58_encode(bits: &[u8]) -> mbedtls::Result<String> {
let zero = Mpi::new(0)?;
let mut n = Mpi::from_binary(bits)?;
let radix: i64 = 58;
let mut s = Vec::new();
while n > zero {
let (q, r) = n.divrem_int(radix)?;
n = q;
s.push(BASE58_ALPHABET[r.as_u32()? as usize]);
}
s.reverse();
Ok(String::from_utf8(s).unwrap())
}
fn base58_decode(b58: &str) -> mbedtls::Result<Vec<u8>> {
let radix: i64 = 58;
let mut n = Mpi::new(0)?;
fn base58_val(b: u8) -> mbedtls::Result<usize> {
for (i, c) in BASE58_ALPHABET.iter().enumerate() {
if *c == b {
return Ok(i);
}
}
Err(mbedtls::Error::Base64InvalidCharacter)
}
for c in b58.bytes() {
let v = base58_val(c)? as i64;
n = (&n * radix)?;
n = (&n + v)?;
}
n.to_binary()
}
#[test]
fn test_base58_encode() {
fn test_base58_rt(input: &[u8], expected: &str) {
assert_eq!(base58_encode(input).unwrap(), expected);
assert_eq!(base58_decode(expected).unwrap(), input);
}
test_base58_rt(b"", "");
test_base58_rt(&[32], "Z");
test_base58_rt(&[45], "n");
test_base58_rt(&[48], "q");
test_base58_rt(&[49], "r");
test_base58_rt(&[57], "z");
test_base58_rt(&[45, 49], "4SU");
test_base58_rt(&[49, 49], "4k8");
test_base58_rt(b"abc", "ZiCa");
test_base58_rt(b"1234598760", "3mJr7AoUXx2Wqd");
test_base58_rt(
b"abcdefghijklmnopqrstuvwxyz",
"3yxU3u1igY8WkgtjK92fbJQCd4BZiiT1v25f",
);
} | }
| random_line_split |
bignum.rs | /* Copyright (c) Fortanix, Inc.
*
* Licensed under the GNU General Public License, version 2 <LICENSE-GPL or
* https://www.gnu.org/licenses/gpl-2.0.html> or the Apache License, Version
* 2.0 <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0>, at your
* option. This file may not be copied, modified, or distributed except
* according to those terms. */
extern crate mbedtls;
use mbedtls::bignum::Mpi;
#[cfg(feature = "std")]
#[test]
fn bignum_from_str() {
use std::str::FromStr;
let p256_16 =
Mpi::from_str("0xffffffff00000001000000000000000000000000ffffffffffffffffffffffff")
.unwrap();
let p256_10 = Mpi::from_str(
"115792089210356248762697446949407573530086143415290314195533631308867097853951",
)
.unwrap();
assert!(p256_16.eq(&p256_10));
assert_eq!(
format!("{}", p256_10),
"115792089210356248762697446949407573530086143415290314195533631308867097853951"
);
assert_eq!(
format!("{:X}", p256_10),
"FFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFF"
);
assert_eq!(
format!("{:o}", p256_10),
"17777777777400000000010000000000000000000000000000000077777777777777777777777777777777"
);
assert_eq!(format!("{:b}", p256_10), "1111111111111111111111111111111100000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111");
}
#[test]
fn bignum() {
let six = Mpi::new(6).unwrap();
assert_eq!(six.byte_length().unwrap(), 1);
assert_eq!(six.bit_length().unwrap(), 3);
let six_bytes = six.to_binary().unwrap();
assert_eq!(six_bytes.len(), 1);
assert_eq!(six_bytes[0], 6);
let five = Mpi::new(5).unwrap();
assert_eq!(six.cmp(&five), ::std::cmp::Ordering::Greater);
assert_eq!(five.cmp(&five), ::std::cmp::Ordering::Equal);
assert_eq!(five.cmp(&six), ::std::cmp::Ordering::Less);
let bigger = Mpi::new(0x2a2f5dce).unwrap();
assert_eq!(bigger.byte_length().unwrap(), 4);
assert_eq!(bigger.bit_length().unwrap(), 30);
let b_bytes = bigger.to_binary().unwrap();
assert_eq!(b_bytes.len(), 4);
assert_eq!(b_bytes[0], 0x2a);
assert_eq!(b_bytes[1], 0x2f);
assert_eq!(b_bytes[2], 0x5d);
assert_eq!(b_bytes[3], 0xce);
assert!(bigger.eq(&Mpi::from_binary(&b_bytes).unwrap()));
}
#[test]
fn bignum_shifts() {
let x = Mpi::new(3).unwrap();
let y = (&x << 30).unwrap();
assert_eq!(format!("{}", y), "3221225472");
let y = (&y >> 30).unwrap();
assert_eq!(format!("{}", y), "3");
let y = (&y >> 2).unwrap();
assert_eq!(format!("{}", y), "0");
let mut z = Mpi::new(1).unwrap();
z <<= 5;
assert_eq!(format!("{}", z), "32");
z <<= 15;
assert_eq!(format!("{}", z), "1048576");
z >>= 10;
assert_eq!(format!("{}", z), "1024");
}
#[test]
fn bignum_op_assign() {
let mut x = Mpi::new(4).unwrap();
x += 9;
assert_eq!(format!("{}", x), "13");
x += Mpi::new(13).unwrap();
assert_eq!(format!("{}", x), "26");
let y = Mpi::new(10).unwrap();
x += &y;
assert_eq!(format!("{}", x), "36");
x -= 3;
assert_eq!(format!("{}", x), "33");
x -= Mpi::new(5).unwrap();
assert_eq!(format!("{}", x), "28");
x -= &y;
assert_eq!(format!("{}", x), "18");
x *= &y;
assert_eq!(format!("{}", x), "180");
x *= 2;
assert_eq!(format!("{}", x), "360");
x *= Mpi::new(-2).unwrap();
assert_eq!(format!("{}", x), "-720");
x /= Mpi::new(-3).unwrap();
assert_eq!(format!("{}", x), "240");
x /= 2;
assert_eq!(format!("{}", x), "120");
x /= &y;
assert_eq!(format!("{}", x), "12");
x %= 100;
assert_eq!(format!("{}", x), "12");
x %= Mpi::new(5).unwrap();
assert_eq!(format!("{}", x), "2");
assert_eq!(format!("{}", y), "10"); // verify y not moved
}
#[cfg(feature = "std")]
#[test]
fn test_jacobi_fn() {
use std::str::FromStr;
fn jacobi_symbol_test(a: &str, n: &str, expected: i32) {
let a = Mpi::from_str(a).unwrap();
let n = Mpi::from_str(n).unwrap();
let j = a.jacobi(&n).unwrap();
//println!("a={} n={} J={}", a, n, j);
assert_eq!(j, expected);
}
// Tests generated by Sagemath
jacobi_symbol_test("5", "9", 1);
jacobi_symbol_test(
"80530568503105393620776136885268819039",
"136759011081214619901277936869624817013",
-1,
);
jacobi_symbol_test("541641436", "50733077", -1);
jacobi_symbol_test("541641437", "50733077", 1);
jacobi_symbol_test("50733077", "50733077", 0);
jacobi_symbol_test("126192963", "2869415899", 1);
jacobi_symbol_test("126192964", "2869415899", -1);
jacobi_symbol_test(
"290122183148875935619099270547",
"392382503032982745991600930111",
-1,
);
jacobi_symbol_test(
"652189681324592774835681787902",
"851019412553174450003757422011",
1,
);
jacobi_symbol_test(
"68607521964935451958858272376",
"89491088927603607083107403767",
1,
);
jacobi_symbol_test(
"218068701715357900365812660263",
"238095134266847041021320150827",
-1,
);
jacobi_symbol_test(
"9847597030024907406584779047",
"20414312383664964481261270711",
1,
);
jacobi_symbol_test(
"38938513347318987388516082474",
"49516772312071161029219932219",
1,
);
jacobi_symbol_test(
"300820947915083731970108494721",
"657305681340895250386089542863",
-1,
);
jacobi_symbol_test(
"12565726709694140412667952162",
"31771076028760826448147679003",
-1,
);
jacobi_symbol_test(
"344945231515347227453035588988",
"828252022515408040124517036011",
1,
);
jacobi_symbol_test(
"93331799786934264132380785163",
"313205417670262818093976413871",
-1,
);
}
#[cfg(feature = "std")]
#[test]
fn test_mod_sqrt_fn() {
use std::str::FromStr;
fn mod_sqrt_test(a: &str, n: &str, expected: &str) {
let a = Mpi::from_str(a).unwrap();
let n = Mpi::from_str(n).unwrap();
let expected = Mpi::from_str(expected).unwrap();
let mut computed = a.mod_sqrt(&n).unwrap();
/*
If x = (a*a) mod p then also x = (-a*-a) mod p, ie
if a square root exists then there are two square roots related by
x and p-x. The mod sqrt might return either of these options
*/
if &computed!= &expected |
assert_eq!(computed, expected);
}
// Tests generated by Sagemath
mod_sqrt_test("2", "7", "4");
mod_sqrt_test("5", "469289024411159", "234325000312516");
mod_sqrt_test(
"458050473005020050313790240477",
"905858848829014223214249213947",
"126474086260479574845714194337",
);
mod_sqrt_test("4", "13", "2");
mod_sqrt_test("2", "113", "62");
mod_sqrt_test(
"14432894130216089699367965001582109139186342668614313620824414613061488655",
"145226202540352375281647974706811878790868025723961296389762379073201613561",
"64346440714386899555372506097606752274599811989145306413544609746921648646",
);
mod_sqrt_test(
"2",
"145226202540352375281647974706811878790868025723961296389762379073201613561",
"29863506841820532608636271306847583140720915984413766535227954746838873278",
);
mod_sqrt_test(
"2",
"0xFFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFF",
"0x507442007322AA895340CBA4ABC2D730BFD0B16C2C79A46815F8780D2C55A2DD",
);
mod_sqrt_test(
"0x5AC635D8AA3A93E7B3EBBD55769886BC651D06B0CC53B0F63BCE3C3E27D2604B",
"0xFFFFFFFF00000001000000000000000000000000FFFFFFFFFFFFFFFFFFFFFFFF",
"0x99B7A386F1D07C29DBCC42A27B5F9449ABE3D50DE25178E8D7407A95E8B06C0B",
);
// Primes where 2^s divides p-1 for s >= 3 which caught a bug
mod_sqrt_test("2", "17", "6");
mod_sqrt_test("2", "97", "14");
mod_sqrt_test("2", "193", "52");
mod_sqrt_test("2", "257", "60");
mod_sqrt_test("2", "65537", "4080");
mod_sqrt_test("2", "0x1200000001", "17207801277");
mod_sqrt_test(
"2",
"0x660000000000000000000000000000000000000000000000000000000000000001",
"0xce495874f10d32d28105400c73f73aafc7cbbae7cd1dfa1525f2701b3573d78c0",
);
}
#[test]
fn bignum_cmp() {
let big = Mpi::new(2147483647).unwrap();
let small = Mpi::new(2).unwrap();
assert!(big > small);
assert!(small < big);
assert!(big >= small);
assert!(small <= big);
assert!(small >= small);
assert!(big <= big);
assert!(small == small);
assert!(small!= big);
}
#[test]
fn bigint_ops() {
let x = Mpi::new(100).unwrap();
let y = Mpi::new(20900).unwrap();
assert_eq!(x.as_u32().unwrap(), 100);
let z = (&x + &y).unwrap();
assert_eq!(z.as_u32().unwrap(), 21000);
let z = (&z * &y).unwrap();
assert_eq!(z, Mpi::new(438900000).unwrap());
let z = (&z - &x).unwrap();
assert_eq!(z, Mpi::new(0x1A2914BC).unwrap());
let r = (&z % 127).unwrap();
assert_eq!(r.as_u32().unwrap(), 92);
let r = (&z % &Mpi::new(127).unwrap()).unwrap();
assert_eq!(r.as_u32().unwrap(), 92);
let q = (&z / 53).unwrap();
assert_eq!(q.as_u32().unwrap(), 8281130);
let q = (&z / &Mpi::new(53).unwrap()).unwrap();
assert_eq!(q.as_u32().unwrap(), 8281130);
let nan = &z / 0;
assert!(nan.is_err());
}
const BASE58_ALPHABET: &[u8] = b"123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz";
fn base58_encode(bits: &[u8]) -> mbedtls::Result<String> {
let zero = Mpi::new(0)?;
let mut n = Mpi::from_binary(bits)?;
let radix: i64 = 58;
let mut s = Vec::new();
while n > zero {
let (q, r) = n.divrem_int(radix)?;
n = q;
s.push(BASE58_ALPHABET[r.as_u32()? as usize]);
}
s.reverse();
Ok(String::from_utf8(s).unwrap())
}
fn base58_decode(b58: &str) -> mbedtls::Result<Vec<u8>> {
let radix: i64 = 58;
let mut n = Mpi::new(0)?;
fn base58_val(b: u8) -> mbedtls::Result<usize> {
for (i, c) in BASE58_ALPHABET.iter().enumerate() {
if *c == b {
return Ok(i);
}
}
Err(mbedtls::Error::Base64InvalidCharacter)
}
for c in b58.bytes() {
let v = base58_val(c)? as i64;
n = (&n * radix)?;
n = (&n + v)?;
}
n.to_binary()
}
#[test]
fn test_base58_encode() {
fn test_base58_rt(input: &[u8], expected: &str) {
assert_eq!(base58_encode(input).unwrap(), expected);
assert_eq!(base58_decode(expected).unwrap(), input);
}
test_base58_rt(b"", "");
test_base58_rt(&[32], "Z");
test_base58_rt(&[45], "n");
test_base58_rt(&[48], "q");
test_base58_rt(&[49], "r");
test_base58_rt(&[57], "z");
test_base58_rt(&[45, 49], "4SU");
test_base58_rt(&[49, 49], "4k8");
test_base58_rt(b"abc", "ZiCa");
test_base58_rt(b"1234598760", "3mJr7AoUXx2Wqd");
test_base58_rt(
b"abcdefghijklmnopqrstuvwxyz",
"3yxU3u1igY8WkgtjK92fbJQCd4BZiiT1v25f",
);
}
| {
computed = (&n - &computed).unwrap();
} | conditional_block |
lib.rs | /*
Copyright 2021 Integritee AG and Supercomputing Systems AG
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#![cfg_attr(not(feature = "std"), no_std)]
use codec::{Decode, Encode};
use frame_support::{
decl_error, decl_event, decl_module, decl_storage,
dispatch::DispatchResult,
ensure,
traits::{Currency, ExistenceRequirement, Get, OnTimestampSet},
weights::{DispatchClass, Pays},
};
use frame_system::{self as system, ensure_signed};
use sp_core::H256;
use sp_runtime::traits::SaturatedConversion;
use sp_std::prelude::*;
use sp_std::str;
#[cfg(not(feature = "skip-ias-check"))]
use ias_verify::{verify_ias_report, SgxReport};
pub use crate::weights::WeightInfo;
use ias_verify::SgxBuildMode;
pub trait Config: system::Config + timestamp::Config {
type Event: From<Event<Self>> + Into<<Self as system::Config>::Event>;
type Currency: Currency<<Self as system::Config>::AccountId>;
type MomentsPerDay: Get<Self::Moment>;
type WeightInfo: WeightInfo;
type MaxSilenceTime: Get<Self::Moment>;
}
const MAX_RA_REPORT_LEN: usize = 4096;
const MAX_URL_LEN: usize = 256;
#[derive(Encode, Decode, Default, Copy, Clone, PartialEq, sp_core::RuntimeDebug)]
pub struct Enclave<PubKey, Url> {
pub pubkey: PubKey, // FIXME: this is redundant information
pub mr_enclave: [u8; 32],
// Todo: make timestamp: Moment
pub timestamp: u64, // unix epoch in milliseconds
pub url: Url, // utf8 encoded url
pub sgx_mode: SgxBuildMode,
}
impl<PubKey, Url> Enclave<PubKey, Url> {
pub fn new(
pubkey: PubKey,
mr_enclave: [u8; 32],
timestamp: u64,
url: Url,
sgx_build_mode: SgxBuildMode,
) -> Self {
Enclave {
pubkey,
mr_enclave,
timestamp,
url,
sgx_mode: sgx_build_mode,
}
}
}
pub type ShardIdentifier = H256;
// Disambiguate associated types
pub type AccountId<T> = <T as frame_system::Config>::AccountId;
pub type BalanceOf<T> = <<T as Config>::Currency as Currency<AccountId<T>>>::Balance;
#[derive(Encode, Decode, Default, Clone, PartialEq, Eq, sp_core::RuntimeDebug)]
pub struct Request {
pub shard: ShardIdentifier,
pub cyphertext: Vec<u8>,
}
decl_event!(
pub enum Event<T>
where
<T as system::Config>::AccountId,
{
AddedEnclave(AccountId, Vec<u8>),
RemovedEnclave(AccountId),
UpdatedIpfsHash(ShardIdentifier, u64, Vec<u8>),
Forwarded(ShardIdentifier),
ShieldFunds(Vec<u8>),
UnshieldedFunds(AccountId),
CallConfirmed(AccountId, H256),
BlockConfirmed(AccountId, H256),
}
);
decl_storage! {
trait Store for Module<T: Config> as Teerex {
// Simple lists are not supported in runtime modules as theoretically O(n)
// operations can be executed while only being charged O(1), see substrate
// Kitties tutorial Chapter 2, Tracking all Kitties.
// watch out: we start indexing with 1 instead of zero in order to
// avoid ambiguity between Null and 0
pub EnclaveRegistry get(fn enclave): map hasher(blake2_128_concat) u64 => Enclave<T::AccountId, Vec<u8>>;
pub EnclaveCount get(fn enclave_count): u64;
pub EnclaveIndex get(fn enclave_index): map hasher(blake2_128_concat) T::AccountId => u64;
pub LatestIpfsHash get(fn latest_ipfs_hash) : map hasher(blake2_128_concat) ShardIdentifier => Vec<u8>;
// enclave index of the worker that recently committed an update
pub WorkerForShard get(fn worker_for_shard) : map hasher(blake2_128_concat) ShardIdentifier => u64;
pub ConfirmedCalls get(fn confirmed_calls): map hasher(blake2_128_concat) H256 => u64;
//pub ConfirmedBlocks get(fn confirmed_blocks): map hasher(blake2_128_concat) H256 => u64;
pub AllowSGXDebugMode get(fn allow_sgx_debug_mode) config(allow_sgx_debug_mode): bool;
}
}
decl_module! {
pub struct Module<T: Config> for enum Call where origin: T::Origin {
type Error = Error<T>;
fn deposit_event() = default;
// the integritee-service wants to register his enclave
#[weight = (<T as Config>::WeightInfo::register_enclave(), DispatchClass::Normal, Pays::Yes)]
pub fn register_enclave(origin, ra_report: Vec<u8>, worker_url: Vec<u8>) -> DispatchResult {
log::info!("teerex: called into runtime call register_enclave()");
let sender = ensure_signed(origin)?;
ensure!(ra_report.len() <= MAX_RA_REPORT_LEN, <Error<T>>::RaReportTooLong);
ensure!(worker_url.len() <= MAX_URL_LEN, <Error<T>>::EnclaveUrlTooLong);
log::info!("teerex: parameter lenght ok");
#[cfg(not(feature = "skip-ias-check"))]
let enclave = Self::verify_report(&sender, ra_report)
.map(|report| Enclave::new(sender.clone(), report.mr_enclave, report.timestamp, worker_url.clone(), report.build_mode))?;
#[cfg(not(feature = "skip-ias-check"))]
if!<AllowSGXDebugMode>::get() && enclave.sgx_mode == SgxBuildMode::Debug {
log::error!("substraTEE_registry: debug mode is not allowed to attest!");
return Err(<Error<T>>::SgxModeNotAllowed.into());
}
#[cfg(feature = "skip-ias-check")]
log::warn!("[teerex]: Skipping remote attestation check. Only dev-chains are allowed to do this!");
#[cfg(feature = "skip-ias-check")]
let enclave = Enclave::new(
sender.clone(),
// insert mrenclave if the ra_report represents one, otherwise insert default
<[u8; 32]>::decode(&mut ra_report.as_slice()).unwrap_or_default(),
<timestamp::Pallet<T>>::get().saturated_into(),
worker_url.clone(),
SgxBuildMode::default()
);
Self::add_enclave(&sender, &enclave)?;
Self::deposit_event(RawEvent::AddedEnclave(sender, worker_url));
Ok(())
}
// TODO: we can't expect a dead enclave to unregister itself
// alternative: allow anyone to unregister an enclave that hasn't recently supplied a RA
// such a call should be feeless if successful
#[weight = (<T as Config>::WeightInfo::unregister_enclave(), DispatchClass::Normal, Pays::Yes)]
pub fn unregister_enclave(origin) -> DispatchResult {
let sender = ensure_signed(origin)?;
Self::remove_enclave(&sender)?;
Self::deposit_event(RawEvent::RemovedEnclave(sender));
Ok(())
}
#[weight = (<T as Config>::WeightInfo::call_worker(), DispatchClass::Normal, Pays::Yes)]
pub fn call_worker(origin, request: Request) -> DispatchResult {
let _sender = ensure_signed(origin)?;
log::info!("call_worker with {:?}", request);
Self::deposit_event(RawEvent::Forwarded(request.shard));
Ok(())
}
// the integritee-service calls this function for every processed call to confirm a state update
#[weight = (<T as Config>::WeightInfo::confirm_call(), DispatchClass::Normal, Pays::Yes)]
pub fn confirm_call(origin, shard: ShardIdentifier, call_hash: H256, ipfs_hash: Vec<u8>) -> DispatchResult {
let sender = ensure_signed(origin)?;
ensure!(<EnclaveIndex<T>>::contains_key(&sender), <Error<T>>::EnclaveIsNotRegistered);
let sender_index = Self::enclave_index(&sender);
ensure!(<EnclaveRegistry::<T>>::get(sender_index).mr_enclave.encode() == shard.encode(), <Error<T>>::WrongMrenclaveForShard);
<LatestIpfsHash>::insert(shard, ipfs_hash.clone());
<WorkerForShard>::insert(shard, sender_index);
log::debug!("call confirmed with shard {:?}, call hash {:?}, ipfs_hash {:?}", shard, call_hash, ipfs_hash);
Self::deposit_event(RawEvent::CallConfirmed(sender, call_hash));
Self::deposit_event(RawEvent::UpdatedIpfsHash(shard, sender_index, ipfs_hash));
Ok(())
}
// the integritee-service calls this function for every processed block to confirm a state update
#[weight = (<T as Config>::WeightInfo::confirm_block(), DispatchClass::Normal, Pays::Yes)]
pub fn confirm_block(origin, shard: ShardIdentifier, block_hash: H256, ipfs_hash: Vec<u8>) -> DispatchResult {
let sender = ensure_signed(origin)?;
ensure!(<EnclaveIndex<T>>::contains_key(&sender), <Error<T>>::EnclaveIsNotRegistered);
let sender_index = Self::enclave_index(&sender);
ensure!(<EnclaveRegistry::<T>>::get(sender_index).mr_enclave.encode() == shard.encode(),<Error<T>>::WrongMrenclaveForShard);
<LatestIpfsHash>::insert(shard, ipfs_hash.clone());
<WorkerForShard>::insert(shard, sender_index);
log::debug!("block confirmed with shard {:?}, block hash {:?}, ipfs_hash {:?}", shard, block_hash, ipfs_hash);
Self::deposit_event(RawEvent::BlockConfirmed(sender, block_hash));
Self::deposit_event(RawEvent::UpdatedIpfsHash(shard, sender_index, ipfs_hash));
Ok(())
}
/// Sent by a client who requests to get shielded funds managed by an enclave. For this on-chain balance is sent to the bonding_account of the enclave.
/// The bonding_account does not have a private key as the balance on this account is exclusively managed from withing the pallet_teerex.
/// Note: The bonding_account is bit-equivalent to the worker shard.
#[weight = (1000, DispatchClass::Normal, Pays::No)]
pub fn shield_funds(origin, incognito_account_encrypted: Vec<u8>, amount: BalanceOf<T>, bonding_account: T::AccountId) -> DispatchResult {
let sender = ensure_signed(origin)?;
T::Currency::transfer(&sender, &bonding_account, amount, ExistenceRequirement::AllowDeath)?;
Self::deposit_event(RawEvent::ShieldFunds(incognito_account_encrypted));
Ok(())
}
/// Sent by enclaves only as a result of an `unshield` request from a client to an enclave.
#[weight = (1000, DispatchClass::Normal, Pays::No)]
pub fn unshield_funds(origin, public_account: T::AccountId, amount: BalanceOf<T>, bonding_account: T::AccountId, call_hash: H256) -> DispatchResult {
let sender = ensure_signed(origin)?;
ensure!(<EnclaveIndex<T>>::contains_key(&sender), <Error<T>>::EnclaveIsNotRegistered);
let sender_index = <EnclaveIndex<T>>::get(sender);
ensure!(<EnclaveRegistry::<T>>::get(sender_index).mr_enclave.encode() == bonding_account.encode(),<Error<T>>::WrongMrenclaveForBondingAccount);
if!<ConfirmedCalls>::contains_key(call_hash) {
log::info!("First confirmation for call: {:?}", call_hash);
T::Currency::transfer(&bonding_account, &public_account, amount, ExistenceRequirement::AllowDeath)?;
<ConfirmedCalls>::insert(call_hash, 0);
Self::deposit_event(RawEvent::UnshieldedFunds(public_account));
} else {
log::info!("Second confirmation for call: {:?}", call_hash);
}
<ConfirmedCalls>::mutate(call_hash, |confirmations| {*confirmations += 1 });
Ok(())
}
}
}
decl_error! {
pub enum Error for Module<T: Config> {
/// failed to decode enclave signer
EnclaveSignerDecodeError,
/// Sender does not match attested enclave in report
SenderIsNotAttestedEnclave,
/// Verifying RA report failed
RemoteAttestationVerificationFailed,
RemoteAttestationTooOld,
///The enclave cannot attest, because its building mode is not allowed
SgxModeNotAllowed,
///The enclave is not registered
EnclaveIsNotRegistered,
///The bonding account doesn't match the enclave
WrongMrenclaveForBondingAccount,
///The shard doesn't match the enclave
WrongMrenclaveForShard,
///The worker url is too long
EnclaveUrlTooLong,
///The RA report is too long
RaReportTooLong,
///The enclave doesn't exists
InexistentEnclave,
}
}
impl<T: Config> Module<T> {
fn add_enclave(
sender: &T::AccountId,
enclave: &Enclave<T::AccountId, Vec<u8>>,
) -> DispatchResult {
let enclave_idx = if <EnclaveIndex<T>>::contains_key(sender) {
log::info!("Updating already registered enclave");
<EnclaveIndex<T>>::get(sender)
} else | ;
<EnclaveRegistry<T>>::insert(enclave_idx, &enclave);
Ok(())
}
fn remove_enclave(sender: &T::AccountId) -> DispatchResult {
ensure!(
<EnclaveIndex<T>>::contains_key(sender),
<Error<T>>::InexistentEnclave
);
let index_to_remove = <EnclaveIndex<T>>::take(sender);
let enclaves_count = Self::enclave_count();
let new_enclaves_count = enclaves_count
.checked_sub(1)
.ok_or("[Teerex]: Underflow removing an enclave from the registry")?;
Self::swap_and_pop(index_to_remove, new_enclaves_count + 1)?;
<EnclaveCount>::put(new_enclaves_count);
Ok(())
}
/// Our list implementation would introduce holes in out list if if we try to remove elements from the middle.
/// As the order of the enclave entries is not important, we use the swap an pop method to remove elements from
/// the registry.
fn swap_and_pop(index_to_remove: u64, new_enclaves_count: u64) -> DispatchResult {
if index_to_remove!= new_enclaves_count {
let last_enclave = <EnclaveRegistry<T>>::get(&new_enclaves_count);
<EnclaveRegistry<T>>::insert(index_to_remove, &last_enclave);
<EnclaveIndex<T>>::insert(last_enclave.pubkey, index_to_remove);
}
<EnclaveRegistry<T>>::remove(new_enclaves_count);
Ok(())
}
fn unregister_silent_workers(now: T::Moment) {
let minimum = (now - T::MaxSilenceTime::get()).saturated_into::<u64>();
let silent_workers = <EnclaveRegistry<T>>::iter()
.filter(|e| e.1.timestamp < minimum)
.map(|e| e.1.pubkey);
for index in silent_workers {
let result = Self::remove_enclave(&index);
match result {
Ok(_) => {
log::info!("Unregister enclave because silent worker : {:?}", index);
Self::deposit_event(RawEvent::RemovedEnclave(index));
}
Err(e) => {
log::error!("Cannot unregister enclave : {:?}", e);
}
};
}
}
#[cfg(not(feature = "skip-ias-check"))]
fn verify_report(
sender: &T::AccountId,
ra_report: Vec<u8>,
) -> Result<SgxReport, sp_runtime::DispatchError> {
let report = verify_ias_report(&ra_report)
.map_err(|_| <Error<T>>::RemoteAttestationVerificationFailed)?;
log::info!("RA Report: {:?}", report);
let enclave_signer = T::AccountId::decode(&mut &report.pubkey[..])
.map_err(|_| <Error<T>>::EnclaveSignerDecodeError)?;
ensure!(
sender == &enclave_signer,
<Error<T>>::SenderIsNotAttestedEnclave
);
// TODO: activate state checks as soon as we've fixed our setup
// ensure!((report.status == SgxStatus::Ok) | (report.status == SgxStatus::ConfigurationNeeded),
// "RA status is insufficient");
// log::info!("teerex: status is acceptable");
Self::ensure_timestamp_within_24_hours(report.timestamp)?;
Ok(report)
}
#[cfg(not(feature = "skip-ias-check"))]
fn ensure_timestamp_within_24_hours(report_timestamp: u64) -> DispatchResult {
use sp_runtime::traits::CheckedSub;
let elapsed_time = <timestamp::Pallet<T>>::get()
.checked_sub(&T::Moment::saturated_from(report_timestamp))
.ok_or("Underflow while calculating elapsed time since report creation")?;
if elapsed_time < T::MomentsPerDay::get() {
Ok(())
} else {
Err(<Error<T>>::RemoteAttestationTooOld.into())
}
}
}
impl<T: Config> OnTimestampSet<T::Moment> for Module<T> {
fn on_timestamp_set(moment: T::Moment) {
Self::unregister_silent_workers(moment)
}
}
mod benchmarking;
#[cfg(test)]
mod mock;
mod test_utils;
#[cfg(test)]
mod tests;
pub mod weights;
| {
let enclaves_count = Self::enclave_count()
.checked_add(1)
.ok_or("[Teerex]: Overflow adding new enclave to registry")?;
<EnclaveIndex<T>>::insert(sender, enclaves_count);
<EnclaveCount>::put(enclaves_count);
enclaves_count
} | conditional_block |
lib.rs | /*
Copyright 2021 Integritee AG and Supercomputing Systems AG
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#![cfg_attr(not(feature = "std"), no_std)]
use codec::{Decode, Encode};
use frame_support::{
decl_error, decl_event, decl_module, decl_storage,
dispatch::DispatchResult,
ensure,
traits::{Currency, ExistenceRequirement, Get, OnTimestampSet},
weights::{DispatchClass, Pays},
};
use frame_system::{self as system, ensure_signed};
use sp_core::H256;
use sp_runtime::traits::SaturatedConversion;
use sp_std::prelude::*;
use sp_std::str;
#[cfg(not(feature = "skip-ias-check"))]
use ias_verify::{verify_ias_report, SgxReport};
pub use crate::weights::WeightInfo;
use ias_verify::SgxBuildMode;
pub trait Config: system::Config + timestamp::Config {
type Event: From<Event<Self>> + Into<<Self as system::Config>::Event>;
type Currency: Currency<<Self as system::Config>::AccountId>;
type MomentsPerDay: Get<Self::Moment>;
type WeightInfo: WeightInfo;
type MaxSilenceTime: Get<Self::Moment>;
}
const MAX_RA_REPORT_LEN: usize = 4096;
const MAX_URL_LEN: usize = 256;
#[derive(Encode, Decode, Default, Copy, Clone, PartialEq, sp_core::RuntimeDebug)]
pub struct Enclave<PubKey, Url> {
pub pubkey: PubKey, // FIXME: this is redundant information
pub mr_enclave: [u8; 32],
// Todo: make timestamp: Moment
pub timestamp: u64, // unix epoch in milliseconds
pub url: Url, // utf8 encoded url
pub sgx_mode: SgxBuildMode,
}
impl<PubKey, Url> Enclave<PubKey, Url> {
pub fn new(
pubkey: PubKey,
mr_enclave: [u8; 32],
timestamp: u64,
url: Url,
sgx_build_mode: SgxBuildMode,
) -> Self {
Enclave {
pubkey,
mr_enclave,
timestamp,
url,
sgx_mode: sgx_build_mode,
}
}
}
pub type ShardIdentifier = H256;
// Disambiguate associated types
pub type AccountId<T> = <T as frame_system::Config>::AccountId;
pub type BalanceOf<T> = <<T as Config>::Currency as Currency<AccountId<T>>>::Balance;
#[derive(Encode, Decode, Default, Clone, PartialEq, Eq, sp_core::RuntimeDebug)]
pub struct Request {
pub shard: ShardIdentifier,
pub cyphertext: Vec<u8>,
}
decl_event!(
pub enum Event<T>
where
<T as system::Config>::AccountId,
{
AddedEnclave(AccountId, Vec<u8>),
RemovedEnclave(AccountId),
UpdatedIpfsHash(ShardIdentifier, u64, Vec<u8>),
Forwarded(ShardIdentifier),
ShieldFunds(Vec<u8>),
UnshieldedFunds(AccountId),
CallConfirmed(AccountId, H256),
BlockConfirmed(AccountId, H256),
}
);
decl_storage! {
trait Store for Module<T: Config> as Teerex {
// Simple lists are not supported in runtime modules as theoretically O(n)
// operations can be executed while only being charged O(1), see substrate
// Kitties tutorial Chapter 2, Tracking all Kitties.
// watch out: we start indexing with 1 instead of zero in order to
// avoid ambiguity between Null and 0
pub EnclaveRegistry get(fn enclave): map hasher(blake2_128_concat) u64 => Enclave<T::AccountId, Vec<u8>>;
pub EnclaveCount get(fn enclave_count): u64;
pub EnclaveIndex get(fn enclave_index): map hasher(blake2_128_concat) T::AccountId => u64;
pub LatestIpfsHash get(fn latest_ipfs_hash) : map hasher(blake2_128_concat) ShardIdentifier => Vec<u8>;
// enclave index of the worker that recently committed an update
pub WorkerForShard get(fn worker_for_shard) : map hasher(blake2_128_concat) ShardIdentifier => u64;
pub ConfirmedCalls get(fn confirmed_calls): map hasher(blake2_128_concat) H256 => u64;
//pub ConfirmedBlocks get(fn confirmed_blocks): map hasher(blake2_128_concat) H256 => u64;
pub AllowSGXDebugMode get(fn allow_sgx_debug_mode) config(allow_sgx_debug_mode): bool;
}
}
decl_module! {
pub struct Module<T: Config> for enum Call where origin: T::Origin {
type Error = Error<T>;
fn deposit_event() = default;
// the integritee-service wants to register his enclave
#[weight = (<T as Config>::WeightInfo::register_enclave(), DispatchClass::Normal, Pays::Yes)]
pub fn register_enclave(origin, ra_report: Vec<u8>, worker_url: Vec<u8>) -> DispatchResult {
log::info!("teerex: called into runtime call register_enclave()");
let sender = ensure_signed(origin)?;
ensure!(ra_report.len() <= MAX_RA_REPORT_LEN, <Error<T>>::RaReportTooLong);
ensure!(worker_url.len() <= MAX_URL_LEN, <Error<T>>::EnclaveUrlTooLong);
log::info!("teerex: parameter lenght ok");
#[cfg(not(feature = "skip-ias-check"))]
let enclave = Self::verify_report(&sender, ra_report)
.map(|report| Enclave::new(sender.clone(), report.mr_enclave, report.timestamp, worker_url.clone(), report.build_mode))?;
#[cfg(not(feature = "skip-ias-check"))]
if!<AllowSGXDebugMode>::get() && enclave.sgx_mode == SgxBuildMode::Debug {
log::error!("substraTEE_registry: debug mode is not allowed to attest!");
return Err(<Error<T>>::SgxModeNotAllowed.into());
}
#[cfg(feature = "skip-ias-check")]
log::warn!("[teerex]: Skipping remote attestation check. Only dev-chains are allowed to do this!");
#[cfg(feature = "skip-ias-check")]
let enclave = Enclave::new(
sender.clone(),
// insert mrenclave if the ra_report represents one, otherwise insert default
<[u8; 32]>::decode(&mut ra_report.as_slice()).unwrap_or_default(),
<timestamp::Pallet<T>>::get().saturated_into(),
worker_url.clone(),
SgxBuildMode::default()
);
Self::add_enclave(&sender, &enclave)?;
Self::deposit_event(RawEvent::AddedEnclave(sender, worker_url));
Ok(())
}
// TODO: we can't expect a dead enclave to unregister itself
// alternative: allow anyone to unregister an enclave that hasn't recently supplied a RA
// such a call should be feeless if successful
#[weight = (<T as Config>::WeightInfo::unregister_enclave(), DispatchClass::Normal, Pays::Yes)]
pub fn unregister_enclave(origin) -> DispatchResult {
let sender = ensure_signed(origin)?;
Self::remove_enclave(&sender)?;
Self::deposit_event(RawEvent::RemovedEnclave(sender));
Ok(())
}
#[weight = (<T as Config>::WeightInfo::call_worker(), DispatchClass::Normal, Pays::Yes)]
pub fn call_worker(origin, request: Request) -> DispatchResult {
let _sender = ensure_signed(origin)?;
log::info!("call_worker with {:?}", request);
Self::deposit_event(RawEvent::Forwarded(request.shard));
Ok(())
}
// the integritee-service calls this function for every processed call to confirm a state update
#[weight = (<T as Config>::WeightInfo::confirm_call(), DispatchClass::Normal, Pays::Yes)]
pub fn confirm_call(origin, shard: ShardIdentifier, call_hash: H256, ipfs_hash: Vec<u8>) -> DispatchResult {
let sender = ensure_signed(origin)?;
ensure!(<EnclaveIndex<T>>::contains_key(&sender), <Error<T>>::EnclaveIsNotRegistered);
let sender_index = Self::enclave_index(&sender);
ensure!(<EnclaveRegistry::<T>>::get(sender_index).mr_enclave.encode() == shard.encode(), <Error<T>>::WrongMrenclaveForShard);
<LatestIpfsHash>::insert(shard, ipfs_hash.clone());
<WorkerForShard>::insert(shard, sender_index);
log::debug!("call confirmed with shard {:?}, call hash {:?}, ipfs_hash {:?}", shard, call_hash, ipfs_hash);
Self::deposit_event(RawEvent::CallConfirmed(sender, call_hash));
Self::deposit_event(RawEvent::UpdatedIpfsHash(shard, sender_index, ipfs_hash));
Ok(())
}
// the integritee-service calls this function for every processed block to confirm a state update
#[weight = (<T as Config>::WeightInfo::confirm_block(), DispatchClass::Normal, Pays::Yes)]
pub fn confirm_block(origin, shard: ShardIdentifier, block_hash: H256, ipfs_hash: Vec<u8>) -> DispatchResult {
let sender = ensure_signed(origin)?;
ensure!(<EnclaveIndex<T>>::contains_key(&sender), <Error<T>>::EnclaveIsNotRegistered);
let sender_index = Self::enclave_index(&sender);
ensure!(<EnclaveRegistry::<T>>::get(sender_index).mr_enclave.encode() == shard.encode(),<Error<T>>::WrongMrenclaveForShard);
<LatestIpfsHash>::insert(shard, ipfs_hash.clone());
<WorkerForShard>::insert(shard, sender_index);
log::debug!("block confirmed with shard {:?}, block hash {:?}, ipfs_hash {:?}", shard, block_hash, ipfs_hash);
Self::deposit_event(RawEvent::BlockConfirmed(sender, block_hash));
Self::deposit_event(RawEvent::UpdatedIpfsHash(shard, sender_index, ipfs_hash));
Ok(())
}
/// Sent by a client who requests to get shielded funds managed by an enclave. For this on-chain balance is sent to the bonding_account of the enclave.
/// The bonding_account does not have a private key as the balance on this account is exclusively managed from withing the pallet_teerex.
/// Note: The bonding_account is bit-equivalent to the worker shard.
#[weight = (1000, DispatchClass::Normal, Pays::No)]
pub fn shield_funds(origin, incognito_account_encrypted: Vec<u8>, amount: BalanceOf<T>, bonding_account: T::AccountId) -> DispatchResult {
let sender = ensure_signed(origin)?;
T::Currency::transfer(&sender, &bonding_account, amount, ExistenceRequirement::AllowDeath)?;
Self::deposit_event(RawEvent::ShieldFunds(incognito_account_encrypted));
Ok(())
}
/// Sent by enclaves only as a result of an `unshield` request from a client to an enclave.
#[weight = (1000, DispatchClass::Normal, Pays::No)]
pub fn unshield_funds(origin, public_account: T::AccountId, amount: BalanceOf<T>, bonding_account: T::AccountId, call_hash: H256) -> DispatchResult {
let sender = ensure_signed(origin)?;
ensure!(<EnclaveIndex<T>>::contains_key(&sender), <Error<T>>::EnclaveIsNotRegistered);
let sender_index = <EnclaveIndex<T>>::get(sender);
ensure!(<EnclaveRegistry::<T>>::get(sender_index).mr_enclave.encode() == bonding_account.encode(),<Error<T>>::WrongMrenclaveForBondingAccount);
if!<ConfirmedCalls>::contains_key(call_hash) {
log::info!("First confirmation for call: {:?}", call_hash);
T::Currency::transfer(&bonding_account, &public_account, amount, ExistenceRequirement::AllowDeath)?;
<ConfirmedCalls>::insert(call_hash, 0);
Self::deposit_event(RawEvent::UnshieldedFunds(public_account));
} else {
log::info!("Second confirmation for call: {:?}", call_hash);
}
<ConfirmedCalls>::mutate(call_hash, |confirmations| {*confirmations += 1 });
Ok(())
}
}
}
decl_error! {
pub enum Error for Module<T: Config> {
/// failed to decode enclave signer
EnclaveSignerDecodeError,
/// Sender does not match attested enclave in report
SenderIsNotAttestedEnclave,
/// Verifying RA report failed
RemoteAttestationVerificationFailed,
RemoteAttestationTooOld,
///The enclave cannot attest, because its building mode is not allowed
SgxModeNotAllowed,
///The enclave is not registered
EnclaveIsNotRegistered,
///The bonding account doesn't match the enclave
WrongMrenclaveForBondingAccount,
///The shard doesn't match the enclave
WrongMrenclaveForShard,
///The worker url is too long
EnclaveUrlTooLong,
///The RA report is too long
RaReportTooLong,
///The enclave doesn't exists
InexistentEnclave,
}
}
impl<T: Config> Module<T> {
fn add_enclave(
sender: &T::AccountId,
enclave: &Enclave<T::AccountId, Vec<u8>>,
) -> DispatchResult {
let enclave_idx = if <EnclaveIndex<T>>::contains_key(sender) {
log::info!("Updating already registered enclave");
<EnclaveIndex<T>>::get(sender)
} else {
let enclaves_count = Self::enclave_count()
.checked_add(1)
.ok_or("[Teerex]: Overflow adding new enclave to registry")?;
<EnclaveIndex<T>>::insert(sender, enclaves_count);
<EnclaveCount>::put(enclaves_count);
enclaves_count
};
<EnclaveRegistry<T>>::insert(enclave_idx, &enclave);
Ok(())
}
fn remove_enclave(sender: &T::AccountId) -> DispatchResult {
ensure!(
<EnclaveIndex<T>>::contains_key(sender),
<Error<T>>::InexistentEnclave
);
let index_to_remove = <EnclaveIndex<T>>::take(sender);
let enclaves_count = Self::enclave_count();
let new_enclaves_count = enclaves_count
.checked_sub(1)
.ok_or("[Teerex]: Underflow removing an enclave from the registry")?;
Self::swap_and_pop(index_to_remove, new_enclaves_count + 1)?;
<EnclaveCount>::put(new_enclaves_count);
Ok(())
}
/// Our list implementation would introduce holes in out list if if we try to remove elements from the middle.
/// As the order of the enclave entries is not important, we use the swap an pop method to remove elements from
/// the registry.
fn swap_and_pop(index_to_remove: u64, new_enclaves_count: u64) -> DispatchResult {
if index_to_remove!= new_enclaves_count {
let last_enclave = <EnclaveRegistry<T>>::get(&new_enclaves_count);
<EnclaveRegistry<T>>::insert(index_to_remove, &last_enclave);
<EnclaveIndex<T>>::insert(last_enclave.pubkey, index_to_remove);
}
<EnclaveRegistry<T>>::remove(new_enclaves_count);
Ok(())
}
fn unregister_silent_workers(now: T::Moment) {
let minimum = (now - T::MaxSilenceTime::get()).saturated_into::<u64>();
let silent_workers = <EnclaveRegistry<T>>::iter()
.filter(|e| e.1.timestamp < minimum)
.map(|e| e.1.pubkey);
for index in silent_workers {
let result = Self::remove_enclave(&index);
match result {
Ok(_) => {
log::info!("Unregister enclave because silent worker : {:?}", index);
Self::deposit_event(RawEvent::RemovedEnclave(index));
}
Err(e) => {
log::error!("Cannot unregister enclave : {:?}", e);
}
};
}
}
#[cfg(not(feature = "skip-ias-check"))]
fn verify_report(
sender: &T::AccountId,
ra_report: Vec<u8>,
) -> Result<SgxReport, sp_runtime::DispatchError> {
let report = verify_ias_report(&ra_report)
.map_err(|_| <Error<T>>::RemoteAttestationVerificationFailed)?;
log::info!("RA Report: {:?}", report);
let enclave_signer = T::AccountId::decode(&mut &report.pubkey[..])
.map_err(|_| <Error<T>>::EnclaveSignerDecodeError)?;
ensure!(
sender == &enclave_signer,
<Error<T>>::SenderIsNotAttestedEnclave
);
// TODO: activate state checks as soon as we've fixed our setup
// ensure!((report.status == SgxStatus::Ok) | (report.status == SgxStatus::ConfigurationNeeded),
// "RA status is insufficient");
// log::info!("teerex: status is acceptable");
Self::ensure_timestamp_within_24_hours(report.timestamp)?;
Ok(report)
}
#[cfg(not(feature = "skip-ias-check"))]
fn ensure_timestamp_within_24_hours(report_timestamp: u64) -> DispatchResult |
}
impl<T: Config> OnTimestampSet<T::Moment> for Module<T> {
fn on_timestamp_set(moment: T::Moment) {
Self::unregister_silent_workers(moment)
}
}
mod benchmarking;
#[cfg(test)]
mod mock;
mod test_utils;
#[cfg(test)]
mod tests;
pub mod weights;
| {
use sp_runtime::traits::CheckedSub;
let elapsed_time = <timestamp::Pallet<T>>::get()
.checked_sub(&T::Moment::saturated_from(report_timestamp))
.ok_or("Underflow while calculating elapsed time since report creation")?;
if elapsed_time < T::MomentsPerDay::get() {
Ok(())
} else {
Err(<Error<T>>::RemoteAttestationTooOld.into())
}
} | identifier_body |
lib.rs | /*
Copyright 2021 Integritee AG and Supercomputing Systems AG
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#![cfg_attr(not(feature = "std"), no_std)]
use codec::{Decode, Encode};
use frame_support::{
decl_error, decl_event, decl_module, decl_storage,
dispatch::DispatchResult,
ensure,
traits::{Currency, ExistenceRequirement, Get, OnTimestampSet},
weights::{DispatchClass, Pays},
};
use frame_system::{self as system, ensure_signed};
use sp_core::H256;
use sp_runtime::traits::SaturatedConversion;
use sp_std::prelude::*;
use sp_std::str;
#[cfg(not(feature = "skip-ias-check"))]
use ias_verify::{verify_ias_report, SgxReport};
pub use crate::weights::WeightInfo;
use ias_verify::SgxBuildMode;
pub trait Config: system::Config + timestamp::Config {
type Event: From<Event<Self>> + Into<<Self as system::Config>::Event>;
type Currency: Currency<<Self as system::Config>::AccountId>;
type MomentsPerDay: Get<Self::Moment>;
type WeightInfo: WeightInfo;
type MaxSilenceTime: Get<Self::Moment>;
}
const MAX_RA_REPORT_LEN: usize = 4096;
const MAX_URL_LEN: usize = 256;
#[derive(Encode, Decode, Default, Copy, Clone, PartialEq, sp_core::RuntimeDebug)]
pub struct Enclave<PubKey, Url> {
pub pubkey: PubKey, // FIXME: this is redundant information
pub mr_enclave: [u8; 32],
// Todo: make timestamp: Moment
pub timestamp: u64, // unix epoch in milliseconds
pub url: Url, // utf8 encoded url
pub sgx_mode: SgxBuildMode,
}
impl<PubKey, Url> Enclave<PubKey, Url> {
pub fn new(
pubkey: PubKey,
mr_enclave: [u8; 32],
timestamp: u64,
url: Url,
sgx_build_mode: SgxBuildMode,
) -> Self {
Enclave {
pubkey,
mr_enclave,
timestamp,
url,
sgx_mode: sgx_build_mode,
}
}
}
pub type ShardIdentifier = H256;
// Disambiguate associated types
pub type AccountId<T> = <T as frame_system::Config>::AccountId;
pub type BalanceOf<T> = <<T as Config>::Currency as Currency<AccountId<T>>>::Balance;
#[derive(Encode, Decode, Default, Clone, PartialEq, Eq, sp_core::RuntimeDebug)]
pub struct Request {
pub shard: ShardIdentifier,
pub cyphertext: Vec<u8>,
}
decl_event!(
pub enum Event<T>
where
<T as system::Config>::AccountId,
{
AddedEnclave(AccountId, Vec<u8>),
RemovedEnclave(AccountId),
UpdatedIpfsHash(ShardIdentifier, u64, Vec<u8>),
Forwarded(ShardIdentifier),
ShieldFunds(Vec<u8>),
UnshieldedFunds(AccountId),
CallConfirmed(AccountId, H256),
BlockConfirmed(AccountId, H256),
}
);
decl_storage! {
trait Store for Module<T: Config> as Teerex {
// Simple lists are not supported in runtime modules as theoretically O(n)
// operations can be executed while only being charged O(1), see substrate
// Kitties tutorial Chapter 2, Tracking all Kitties.
// watch out: we start indexing with 1 instead of zero in order to
// avoid ambiguity between Null and 0
pub EnclaveRegistry get(fn enclave): map hasher(blake2_128_concat) u64 => Enclave<T::AccountId, Vec<u8>>;
pub EnclaveCount get(fn enclave_count): u64;
pub EnclaveIndex get(fn enclave_index): map hasher(blake2_128_concat) T::AccountId => u64;
pub LatestIpfsHash get(fn latest_ipfs_hash) : map hasher(blake2_128_concat) ShardIdentifier => Vec<u8>;
// enclave index of the worker that recently committed an update
pub WorkerForShard get(fn worker_for_shard) : map hasher(blake2_128_concat) ShardIdentifier => u64;
pub ConfirmedCalls get(fn confirmed_calls): map hasher(blake2_128_concat) H256 => u64;
//pub ConfirmedBlocks get(fn confirmed_blocks): map hasher(blake2_128_concat) H256 => u64;
pub AllowSGXDebugMode get(fn allow_sgx_debug_mode) config(allow_sgx_debug_mode): bool;
}
}
decl_module! {
pub struct Module<T: Config> for enum Call where origin: T::Origin {
type Error = Error<T>;
fn deposit_event() = default;
// the integritee-service wants to register his enclave
#[weight = (<T as Config>::WeightInfo::register_enclave(), DispatchClass::Normal, Pays::Yes)]
pub fn register_enclave(origin, ra_report: Vec<u8>, worker_url: Vec<u8>) -> DispatchResult {
log::info!("teerex: called into runtime call register_enclave()");
let sender = ensure_signed(origin)?;
ensure!(ra_report.len() <= MAX_RA_REPORT_LEN, <Error<T>>::RaReportTooLong);
ensure!(worker_url.len() <= MAX_URL_LEN, <Error<T>>::EnclaveUrlTooLong);
log::info!("teerex: parameter lenght ok");
#[cfg(not(feature = "skip-ias-check"))]
let enclave = Self::verify_report(&sender, ra_report)
.map(|report| Enclave::new(sender.clone(), report.mr_enclave, report.timestamp, worker_url.clone(), report.build_mode))?;
#[cfg(not(feature = "skip-ias-check"))]
if!<AllowSGXDebugMode>::get() && enclave.sgx_mode == SgxBuildMode::Debug {
log::error!("substraTEE_registry: debug mode is not allowed to attest!");
return Err(<Error<T>>::SgxModeNotAllowed.into());
}
#[cfg(feature = "skip-ias-check")]
log::warn!("[teerex]: Skipping remote attestation check. Only dev-chains are allowed to do this!");
#[cfg(feature = "skip-ias-check")]
let enclave = Enclave::new(
sender.clone(),
// insert mrenclave if the ra_report represents one, otherwise insert default
<[u8; 32]>::decode(&mut ra_report.as_slice()).unwrap_or_default(),
<timestamp::Pallet<T>>::get().saturated_into(),
worker_url.clone(),
SgxBuildMode::default()
);
Self::add_enclave(&sender, &enclave)?;
Self::deposit_event(RawEvent::AddedEnclave(sender, worker_url));
Ok(())
}
// TODO: we can't expect a dead enclave to unregister itself
// alternative: allow anyone to unregister an enclave that hasn't recently supplied a RA
// such a call should be feeless if successful
#[weight = (<T as Config>::WeightInfo::unregister_enclave(), DispatchClass::Normal, Pays::Yes)]
pub fn unregister_enclave(origin) -> DispatchResult {
let sender = ensure_signed(origin)?;
Self::remove_enclave(&sender)?;
Self::deposit_event(RawEvent::RemovedEnclave(sender));
Ok(())
}
#[weight = (<T as Config>::WeightInfo::call_worker(), DispatchClass::Normal, Pays::Yes)]
pub fn call_worker(origin, request: Request) -> DispatchResult {
let _sender = ensure_signed(origin)?;
log::info!("call_worker with {:?}", request);
Self::deposit_event(RawEvent::Forwarded(request.shard));
Ok(())
}
// the integritee-service calls this function for every processed call to confirm a state update
#[weight = (<T as Config>::WeightInfo::confirm_call(), DispatchClass::Normal, Pays::Yes)]
pub fn confirm_call(origin, shard: ShardIdentifier, call_hash: H256, ipfs_hash: Vec<u8>) -> DispatchResult {
let sender = ensure_signed(origin)?;
ensure!(<EnclaveIndex<T>>::contains_key(&sender), <Error<T>>::EnclaveIsNotRegistered);
let sender_index = Self::enclave_index(&sender);
ensure!(<EnclaveRegistry::<T>>::get(sender_index).mr_enclave.encode() == shard.encode(), <Error<T>>::WrongMrenclaveForShard);
<LatestIpfsHash>::insert(shard, ipfs_hash.clone());
<WorkerForShard>::insert(shard, sender_index);
log::debug!("call confirmed with shard {:?}, call hash {:?}, ipfs_hash {:?}", shard, call_hash, ipfs_hash);
Self::deposit_event(RawEvent::CallConfirmed(sender, call_hash));
Self::deposit_event(RawEvent::UpdatedIpfsHash(shard, sender_index, ipfs_hash));
Ok(())
}
// the integritee-service calls this function for every processed block to confirm a state update
#[weight = (<T as Config>::WeightInfo::confirm_block(), DispatchClass::Normal, Pays::Yes)]
pub fn confirm_block(origin, shard: ShardIdentifier, block_hash: H256, ipfs_hash: Vec<u8>) -> DispatchResult {
let sender = ensure_signed(origin)?;
ensure!(<EnclaveIndex<T>>::contains_key(&sender), <Error<T>>::EnclaveIsNotRegistered);
let sender_index = Self::enclave_index(&sender);
ensure!(<EnclaveRegistry::<T>>::get(sender_index).mr_enclave.encode() == shard.encode(),<Error<T>>::WrongMrenclaveForShard);
<LatestIpfsHash>::insert(shard, ipfs_hash.clone());
<WorkerForShard>::insert(shard, sender_index);
log::debug!("block confirmed with shard {:?}, block hash {:?}, ipfs_hash {:?}", shard, block_hash, ipfs_hash);
Self::deposit_event(RawEvent::BlockConfirmed(sender, block_hash));
Self::deposit_event(RawEvent::UpdatedIpfsHash(shard, sender_index, ipfs_hash));
Ok(())
}
/// Sent by a client who requests to get shielded funds managed by an enclave. For this on-chain balance is sent to the bonding_account of the enclave.
/// The bonding_account does not have a private key as the balance on this account is exclusively managed from withing the pallet_teerex.
/// Note: The bonding_account is bit-equivalent to the worker shard.
#[weight = (1000, DispatchClass::Normal, Pays::No)]
pub fn shield_funds(origin, incognito_account_encrypted: Vec<u8>, amount: BalanceOf<T>, bonding_account: T::AccountId) -> DispatchResult {
let sender = ensure_signed(origin)?;
T::Currency::transfer(&sender, &bonding_account, amount, ExistenceRequirement::AllowDeath)?;
Self::deposit_event(RawEvent::ShieldFunds(incognito_account_encrypted));
Ok(())
}
/// Sent by enclaves only as a result of an `unshield` request from a client to an enclave.
#[weight = (1000, DispatchClass::Normal, Pays::No)]
pub fn unshield_funds(origin, public_account: T::AccountId, amount: BalanceOf<T>, bonding_account: T::AccountId, call_hash: H256) -> DispatchResult {
let sender = ensure_signed(origin)?;
ensure!(<EnclaveIndex<T>>::contains_key(&sender), <Error<T>>::EnclaveIsNotRegistered);
let sender_index = <EnclaveIndex<T>>::get(sender);
ensure!(<EnclaveRegistry::<T>>::get(sender_index).mr_enclave.encode() == bonding_account.encode(),<Error<T>>::WrongMrenclaveForBondingAccount);
if!<ConfirmedCalls>::contains_key(call_hash) {
log::info!("First confirmation for call: {:?}", call_hash);
T::Currency::transfer(&bonding_account, &public_account, amount, ExistenceRequirement::AllowDeath)?;
<ConfirmedCalls>::insert(call_hash, 0);
Self::deposit_event(RawEvent::UnshieldedFunds(public_account));
} else {
log::info!("Second confirmation for call: {:?}", call_hash);
}
<ConfirmedCalls>::mutate(call_hash, |confirmations| {*confirmations += 1 });
Ok(())
}
}
}
decl_error! {
pub enum Error for Module<T: Config> {
/// failed to decode enclave signer
EnclaveSignerDecodeError,
/// Sender does not match attested enclave in report
SenderIsNotAttestedEnclave,
/// Verifying RA report failed
RemoteAttestationVerificationFailed,
RemoteAttestationTooOld,
///The enclave cannot attest, because its building mode is not allowed
SgxModeNotAllowed,
///The enclave is not registered
EnclaveIsNotRegistered,
///The bonding account doesn't match the enclave
WrongMrenclaveForBondingAccount,
///The shard doesn't match the enclave
WrongMrenclaveForShard,
///The worker url is too long
EnclaveUrlTooLong,
///The RA report is too long
RaReportTooLong,
///The enclave doesn't exists
InexistentEnclave,
}
}
impl<T: Config> Module<T> {
fn add_enclave(
sender: &T::AccountId,
enclave: &Enclave<T::AccountId, Vec<u8>>,
) -> DispatchResult {
let enclave_idx = if <EnclaveIndex<T>>::contains_key(sender) {
log::info!("Updating already registered enclave");
<EnclaveIndex<T>>::get(sender)
} else {
let enclaves_count = Self::enclave_count()
.checked_add(1)
.ok_or("[Teerex]: Overflow adding new enclave to registry")?;
<EnclaveIndex<T>>::insert(sender, enclaves_count);
<EnclaveCount>::put(enclaves_count);
enclaves_count
};
<EnclaveRegistry<T>>::insert(enclave_idx, &enclave);
Ok(())
}
fn remove_enclave(sender: &T::AccountId) -> DispatchResult {
ensure!(
<EnclaveIndex<T>>::contains_key(sender),
<Error<T>>::InexistentEnclave
);
let index_to_remove = <EnclaveIndex<T>>::take(sender);
let enclaves_count = Self::enclave_count();
let new_enclaves_count = enclaves_count
.checked_sub(1)
.ok_or("[Teerex]: Underflow removing an enclave from the registry")?;
Self::swap_and_pop(index_to_remove, new_enclaves_count + 1)?;
<EnclaveCount>::put(new_enclaves_count);
Ok(())
}
/// Our list implementation would introduce holes in out list if if we try to remove elements from the middle.
/// As the order of the enclave entries is not important, we use the swap an pop method to remove elements from
/// the registry.
fn | (index_to_remove: u64, new_enclaves_count: u64) -> DispatchResult {
if index_to_remove!= new_enclaves_count {
let last_enclave = <EnclaveRegistry<T>>::get(&new_enclaves_count);
<EnclaveRegistry<T>>::insert(index_to_remove, &last_enclave);
<EnclaveIndex<T>>::insert(last_enclave.pubkey, index_to_remove);
}
<EnclaveRegistry<T>>::remove(new_enclaves_count);
Ok(())
}
fn unregister_silent_workers(now: T::Moment) {
let minimum = (now - T::MaxSilenceTime::get()).saturated_into::<u64>();
let silent_workers = <EnclaveRegistry<T>>::iter()
.filter(|e| e.1.timestamp < minimum)
.map(|e| e.1.pubkey);
for index in silent_workers {
let result = Self::remove_enclave(&index);
match result {
Ok(_) => {
log::info!("Unregister enclave because silent worker : {:?}", index);
Self::deposit_event(RawEvent::RemovedEnclave(index));
}
Err(e) => {
log::error!("Cannot unregister enclave : {:?}", e);
}
};
}
}
#[cfg(not(feature = "skip-ias-check"))]
fn verify_report(
sender: &T::AccountId,
ra_report: Vec<u8>,
) -> Result<SgxReport, sp_runtime::DispatchError> {
let report = verify_ias_report(&ra_report)
.map_err(|_| <Error<T>>::RemoteAttestationVerificationFailed)?;
log::info!("RA Report: {:?}", report);
let enclave_signer = T::AccountId::decode(&mut &report.pubkey[..])
.map_err(|_| <Error<T>>::EnclaveSignerDecodeError)?;
ensure!(
sender == &enclave_signer,
<Error<T>>::SenderIsNotAttestedEnclave
);
// TODO: activate state checks as soon as we've fixed our setup
// ensure!((report.status == SgxStatus::Ok) | (report.status == SgxStatus::ConfigurationNeeded),
// "RA status is insufficient");
// log::info!("teerex: status is acceptable");
Self::ensure_timestamp_within_24_hours(report.timestamp)?;
Ok(report)
}
#[cfg(not(feature = "skip-ias-check"))]
fn ensure_timestamp_within_24_hours(report_timestamp: u64) -> DispatchResult {
use sp_runtime::traits::CheckedSub;
let elapsed_time = <timestamp::Pallet<T>>::get()
.checked_sub(&T::Moment::saturated_from(report_timestamp))
.ok_or("Underflow while calculating elapsed time since report creation")?;
if elapsed_time < T::MomentsPerDay::get() {
Ok(())
} else {
Err(<Error<T>>::RemoteAttestationTooOld.into())
}
}
}
impl<T: Config> OnTimestampSet<T::Moment> for Module<T> {
fn on_timestamp_set(moment: T::Moment) {
Self::unregister_silent_workers(moment)
}
}
mod benchmarking;
#[cfg(test)]
mod mock;
mod test_utils;
#[cfg(test)]
mod tests;
pub mod weights;
| swap_and_pop | identifier_name |
lib.rs | /*
Copyright 2021 Integritee AG and Supercomputing Systems AG
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#![cfg_attr(not(feature = "std"), no_std)]
use codec::{Decode, Encode};
use frame_support::{
decl_error, decl_event, decl_module, decl_storage,
dispatch::DispatchResult,
ensure,
traits::{Currency, ExistenceRequirement, Get, OnTimestampSet},
weights::{DispatchClass, Pays},
};
use frame_system::{self as system, ensure_signed};
use sp_core::H256;
use sp_runtime::traits::SaturatedConversion;
use sp_std::prelude::*;
use sp_std::str;
#[cfg(not(feature = "skip-ias-check"))]
use ias_verify::{verify_ias_report, SgxReport};
pub use crate::weights::WeightInfo;
use ias_verify::SgxBuildMode;
pub trait Config: system::Config + timestamp::Config {
type Event: From<Event<Self>> + Into<<Self as system::Config>::Event>;
type Currency: Currency<<Self as system::Config>::AccountId>;
type MomentsPerDay: Get<Self::Moment>;
type WeightInfo: WeightInfo;
type MaxSilenceTime: Get<Self::Moment>;
}
const MAX_RA_REPORT_LEN: usize = 4096;
const MAX_URL_LEN: usize = 256;
#[derive(Encode, Decode, Default, Copy, Clone, PartialEq, sp_core::RuntimeDebug)]
pub struct Enclave<PubKey, Url> {
pub pubkey: PubKey, // FIXME: this is redundant information
pub mr_enclave: [u8; 32],
// Todo: make timestamp: Moment
pub timestamp: u64, // unix epoch in milliseconds
pub url: Url, // utf8 encoded url
pub sgx_mode: SgxBuildMode,
}
impl<PubKey, Url> Enclave<PubKey, Url> {
pub fn new(
pubkey: PubKey,
mr_enclave: [u8; 32],
timestamp: u64,
url: Url,
sgx_build_mode: SgxBuildMode,
) -> Self {
Enclave {
pubkey,
mr_enclave,
timestamp,
url,
sgx_mode: sgx_build_mode,
}
}
}
pub type ShardIdentifier = H256;
// Disambiguate associated types
pub type AccountId<T> = <T as frame_system::Config>::AccountId;
pub type BalanceOf<T> = <<T as Config>::Currency as Currency<AccountId<T>>>::Balance;
#[derive(Encode, Decode, Default, Clone, PartialEq, Eq, sp_core::RuntimeDebug)]
pub struct Request {
pub shard: ShardIdentifier,
pub cyphertext: Vec<u8>,
}
decl_event!(
pub enum Event<T>
where
<T as system::Config>::AccountId,
{
AddedEnclave(AccountId, Vec<u8>),
RemovedEnclave(AccountId),
UpdatedIpfsHash(ShardIdentifier, u64, Vec<u8>),
Forwarded(ShardIdentifier),
ShieldFunds(Vec<u8>),
UnshieldedFunds(AccountId), | CallConfirmed(AccountId, H256),
BlockConfirmed(AccountId, H256),
}
);
decl_storage! {
trait Store for Module<T: Config> as Teerex {
// Simple lists are not supported in runtime modules as theoretically O(n)
// operations can be executed while only being charged O(1), see substrate
// Kitties tutorial Chapter 2, Tracking all Kitties.
// watch out: we start indexing with 1 instead of zero in order to
// avoid ambiguity between Null and 0
pub EnclaveRegistry get(fn enclave): map hasher(blake2_128_concat) u64 => Enclave<T::AccountId, Vec<u8>>;
pub EnclaveCount get(fn enclave_count): u64;
pub EnclaveIndex get(fn enclave_index): map hasher(blake2_128_concat) T::AccountId => u64;
pub LatestIpfsHash get(fn latest_ipfs_hash) : map hasher(blake2_128_concat) ShardIdentifier => Vec<u8>;
// enclave index of the worker that recently committed an update
pub WorkerForShard get(fn worker_for_shard) : map hasher(blake2_128_concat) ShardIdentifier => u64;
pub ConfirmedCalls get(fn confirmed_calls): map hasher(blake2_128_concat) H256 => u64;
//pub ConfirmedBlocks get(fn confirmed_blocks): map hasher(blake2_128_concat) H256 => u64;
pub AllowSGXDebugMode get(fn allow_sgx_debug_mode) config(allow_sgx_debug_mode): bool;
}
}
decl_module! {
pub struct Module<T: Config> for enum Call where origin: T::Origin {
type Error = Error<T>;
fn deposit_event() = default;
// the integritee-service wants to register his enclave
#[weight = (<T as Config>::WeightInfo::register_enclave(), DispatchClass::Normal, Pays::Yes)]
pub fn register_enclave(origin, ra_report: Vec<u8>, worker_url: Vec<u8>) -> DispatchResult {
log::info!("teerex: called into runtime call register_enclave()");
let sender = ensure_signed(origin)?;
ensure!(ra_report.len() <= MAX_RA_REPORT_LEN, <Error<T>>::RaReportTooLong);
ensure!(worker_url.len() <= MAX_URL_LEN, <Error<T>>::EnclaveUrlTooLong);
log::info!("teerex: parameter lenght ok");
#[cfg(not(feature = "skip-ias-check"))]
let enclave = Self::verify_report(&sender, ra_report)
.map(|report| Enclave::new(sender.clone(), report.mr_enclave, report.timestamp, worker_url.clone(), report.build_mode))?;
#[cfg(not(feature = "skip-ias-check"))]
if!<AllowSGXDebugMode>::get() && enclave.sgx_mode == SgxBuildMode::Debug {
log::error!("substraTEE_registry: debug mode is not allowed to attest!");
return Err(<Error<T>>::SgxModeNotAllowed.into());
}
#[cfg(feature = "skip-ias-check")]
log::warn!("[teerex]: Skipping remote attestation check. Only dev-chains are allowed to do this!");
#[cfg(feature = "skip-ias-check")]
let enclave = Enclave::new(
sender.clone(),
// insert mrenclave if the ra_report represents one, otherwise insert default
<[u8; 32]>::decode(&mut ra_report.as_slice()).unwrap_or_default(),
<timestamp::Pallet<T>>::get().saturated_into(),
worker_url.clone(),
SgxBuildMode::default()
);
Self::add_enclave(&sender, &enclave)?;
Self::deposit_event(RawEvent::AddedEnclave(sender, worker_url));
Ok(())
}
// TODO: we can't expect a dead enclave to unregister itself
// alternative: allow anyone to unregister an enclave that hasn't recently supplied a RA
// such a call should be feeless if successful
#[weight = (<T as Config>::WeightInfo::unregister_enclave(), DispatchClass::Normal, Pays::Yes)]
pub fn unregister_enclave(origin) -> DispatchResult {
let sender = ensure_signed(origin)?;
Self::remove_enclave(&sender)?;
Self::deposit_event(RawEvent::RemovedEnclave(sender));
Ok(())
}
#[weight = (<T as Config>::WeightInfo::call_worker(), DispatchClass::Normal, Pays::Yes)]
pub fn call_worker(origin, request: Request) -> DispatchResult {
let _sender = ensure_signed(origin)?;
log::info!("call_worker with {:?}", request);
Self::deposit_event(RawEvent::Forwarded(request.shard));
Ok(())
}
// the integritee-service calls this function for every processed call to confirm a state update
#[weight = (<T as Config>::WeightInfo::confirm_call(), DispatchClass::Normal, Pays::Yes)]
pub fn confirm_call(origin, shard: ShardIdentifier, call_hash: H256, ipfs_hash: Vec<u8>) -> DispatchResult {
let sender = ensure_signed(origin)?;
ensure!(<EnclaveIndex<T>>::contains_key(&sender), <Error<T>>::EnclaveIsNotRegistered);
let sender_index = Self::enclave_index(&sender);
ensure!(<EnclaveRegistry::<T>>::get(sender_index).mr_enclave.encode() == shard.encode(), <Error<T>>::WrongMrenclaveForShard);
<LatestIpfsHash>::insert(shard, ipfs_hash.clone());
<WorkerForShard>::insert(shard, sender_index);
log::debug!("call confirmed with shard {:?}, call hash {:?}, ipfs_hash {:?}", shard, call_hash, ipfs_hash);
Self::deposit_event(RawEvent::CallConfirmed(sender, call_hash));
Self::deposit_event(RawEvent::UpdatedIpfsHash(shard, sender_index, ipfs_hash));
Ok(())
}
// the integritee-service calls this function for every processed block to confirm a state update
#[weight = (<T as Config>::WeightInfo::confirm_block(), DispatchClass::Normal, Pays::Yes)]
pub fn confirm_block(origin, shard: ShardIdentifier, block_hash: H256, ipfs_hash: Vec<u8>) -> DispatchResult {
let sender = ensure_signed(origin)?;
ensure!(<EnclaveIndex<T>>::contains_key(&sender), <Error<T>>::EnclaveIsNotRegistered);
let sender_index = Self::enclave_index(&sender);
ensure!(<EnclaveRegistry::<T>>::get(sender_index).mr_enclave.encode() == shard.encode(),<Error<T>>::WrongMrenclaveForShard);
<LatestIpfsHash>::insert(shard, ipfs_hash.clone());
<WorkerForShard>::insert(shard, sender_index);
log::debug!("block confirmed with shard {:?}, block hash {:?}, ipfs_hash {:?}", shard, block_hash, ipfs_hash);
Self::deposit_event(RawEvent::BlockConfirmed(sender, block_hash));
Self::deposit_event(RawEvent::UpdatedIpfsHash(shard, sender_index, ipfs_hash));
Ok(())
}
/// Sent by a client who requests to get shielded funds managed by an enclave. For this on-chain balance is sent to the bonding_account of the enclave.
/// The bonding_account does not have a private key as the balance on this account is exclusively managed from withing the pallet_teerex.
/// Note: The bonding_account is bit-equivalent to the worker shard.
#[weight = (1000, DispatchClass::Normal, Pays::No)]
pub fn shield_funds(origin, incognito_account_encrypted: Vec<u8>, amount: BalanceOf<T>, bonding_account: T::AccountId) -> DispatchResult {
let sender = ensure_signed(origin)?;
T::Currency::transfer(&sender, &bonding_account, amount, ExistenceRequirement::AllowDeath)?;
Self::deposit_event(RawEvent::ShieldFunds(incognito_account_encrypted));
Ok(())
}
/// Sent by enclaves only as a result of an `unshield` request from a client to an enclave.
#[weight = (1000, DispatchClass::Normal, Pays::No)]
pub fn unshield_funds(origin, public_account: T::AccountId, amount: BalanceOf<T>, bonding_account: T::AccountId, call_hash: H256) -> DispatchResult {
let sender = ensure_signed(origin)?;
ensure!(<EnclaveIndex<T>>::contains_key(&sender), <Error<T>>::EnclaveIsNotRegistered);
let sender_index = <EnclaveIndex<T>>::get(sender);
ensure!(<EnclaveRegistry::<T>>::get(sender_index).mr_enclave.encode() == bonding_account.encode(),<Error<T>>::WrongMrenclaveForBondingAccount);
if!<ConfirmedCalls>::contains_key(call_hash) {
log::info!("First confirmation for call: {:?}", call_hash);
T::Currency::transfer(&bonding_account, &public_account, amount, ExistenceRequirement::AllowDeath)?;
<ConfirmedCalls>::insert(call_hash, 0);
Self::deposit_event(RawEvent::UnshieldedFunds(public_account));
} else {
log::info!("Second confirmation for call: {:?}", call_hash);
}
<ConfirmedCalls>::mutate(call_hash, |confirmations| {*confirmations += 1 });
Ok(())
}
}
}
decl_error! {
pub enum Error for Module<T: Config> {
/// failed to decode enclave signer
EnclaveSignerDecodeError,
/// Sender does not match attested enclave in report
SenderIsNotAttestedEnclave,
/// Verifying RA report failed
RemoteAttestationVerificationFailed,
RemoteAttestationTooOld,
///The enclave cannot attest, because its building mode is not allowed
SgxModeNotAllowed,
///The enclave is not registered
EnclaveIsNotRegistered,
///The bonding account doesn't match the enclave
WrongMrenclaveForBondingAccount,
///The shard doesn't match the enclave
WrongMrenclaveForShard,
///The worker url is too long
EnclaveUrlTooLong,
///The RA report is too long
RaReportTooLong,
///The enclave doesn't exists
InexistentEnclave,
}
}
impl<T: Config> Module<T> {
fn add_enclave(
sender: &T::AccountId,
enclave: &Enclave<T::AccountId, Vec<u8>>,
) -> DispatchResult {
let enclave_idx = if <EnclaveIndex<T>>::contains_key(sender) {
log::info!("Updating already registered enclave");
<EnclaveIndex<T>>::get(sender)
} else {
let enclaves_count = Self::enclave_count()
.checked_add(1)
.ok_or("[Teerex]: Overflow adding new enclave to registry")?;
<EnclaveIndex<T>>::insert(sender, enclaves_count);
<EnclaveCount>::put(enclaves_count);
enclaves_count
};
<EnclaveRegistry<T>>::insert(enclave_idx, &enclave);
Ok(())
}
fn remove_enclave(sender: &T::AccountId) -> DispatchResult {
ensure!(
<EnclaveIndex<T>>::contains_key(sender),
<Error<T>>::InexistentEnclave
);
let index_to_remove = <EnclaveIndex<T>>::take(sender);
let enclaves_count = Self::enclave_count();
let new_enclaves_count = enclaves_count
.checked_sub(1)
.ok_or("[Teerex]: Underflow removing an enclave from the registry")?;
Self::swap_and_pop(index_to_remove, new_enclaves_count + 1)?;
<EnclaveCount>::put(new_enclaves_count);
Ok(())
}
/// Our list implementation would introduce holes in out list if if we try to remove elements from the middle.
/// As the order of the enclave entries is not important, we use the swap an pop method to remove elements from
/// the registry.
fn swap_and_pop(index_to_remove: u64, new_enclaves_count: u64) -> DispatchResult {
if index_to_remove!= new_enclaves_count {
let last_enclave = <EnclaveRegistry<T>>::get(&new_enclaves_count);
<EnclaveRegistry<T>>::insert(index_to_remove, &last_enclave);
<EnclaveIndex<T>>::insert(last_enclave.pubkey, index_to_remove);
}
<EnclaveRegistry<T>>::remove(new_enclaves_count);
Ok(())
}
fn unregister_silent_workers(now: T::Moment) {
let minimum = (now - T::MaxSilenceTime::get()).saturated_into::<u64>();
let silent_workers = <EnclaveRegistry<T>>::iter()
.filter(|e| e.1.timestamp < minimum)
.map(|e| e.1.pubkey);
for index in silent_workers {
let result = Self::remove_enclave(&index);
match result {
Ok(_) => {
log::info!("Unregister enclave because silent worker : {:?}", index);
Self::deposit_event(RawEvent::RemovedEnclave(index));
}
Err(e) => {
log::error!("Cannot unregister enclave : {:?}", e);
}
};
}
}
#[cfg(not(feature = "skip-ias-check"))]
fn verify_report(
sender: &T::AccountId,
ra_report: Vec<u8>,
) -> Result<SgxReport, sp_runtime::DispatchError> {
let report = verify_ias_report(&ra_report)
.map_err(|_| <Error<T>>::RemoteAttestationVerificationFailed)?;
log::info!("RA Report: {:?}", report);
let enclave_signer = T::AccountId::decode(&mut &report.pubkey[..])
.map_err(|_| <Error<T>>::EnclaveSignerDecodeError)?;
ensure!(
sender == &enclave_signer,
<Error<T>>::SenderIsNotAttestedEnclave
);
// TODO: activate state checks as soon as we've fixed our setup
// ensure!((report.status == SgxStatus::Ok) | (report.status == SgxStatus::ConfigurationNeeded),
// "RA status is insufficient");
// log::info!("teerex: status is acceptable");
Self::ensure_timestamp_within_24_hours(report.timestamp)?;
Ok(report)
}
#[cfg(not(feature = "skip-ias-check"))]
fn ensure_timestamp_within_24_hours(report_timestamp: u64) -> DispatchResult {
use sp_runtime::traits::CheckedSub;
let elapsed_time = <timestamp::Pallet<T>>::get()
.checked_sub(&T::Moment::saturated_from(report_timestamp))
.ok_or("Underflow while calculating elapsed time since report creation")?;
if elapsed_time < T::MomentsPerDay::get() {
Ok(())
} else {
Err(<Error<T>>::RemoteAttestationTooOld.into())
}
}
}
impl<T: Config> OnTimestampSet<T::Moment> for Module<T> {
fn on_timestamp_set(moment: T::Moment) {
Self::unregister_silent_workers(moment)
}
}
mod benchmarking;
#[cfg(test)]
mod mock;
mod test_utils;
#[cfg(test)]
mod tests;
pub mod weights; | random_line_split |
|
mod.rs | .
// See the License for the specific language governing permissions and
// limitations under the License.
pub use self::metadata::{BinaryAttribute, IndexState, IndexType, ViewWithMetadata};
use std::{borrow::Cow, fmt, iter::Peekable, marker::PhantomData};
use super::{
db::{Change, ChangesMut, ChangesRef, ForkIter, ViewChanges},
BinaryKey, BinaryValue, Iter as BytesIter, Iterator as BytesIterator, Snapshot,
};
mod metadata;
#[cfg(test)]
mod tests;
/// Separator between the name and the additional bytes in family indexes.
const INDEX_NAME_SEPARATOR: &[u8] = &[0];
/// Represents current view of the database by specified `address` and
/// changes that took place after that view had been created. `View`
/// implementation provides an interface to work with related `changes`.
pub struct View<T: RawAccess> {
address: IndexAddress,
index_access: T,
changes: T::Changes,
}
impl<T: RawAccess> fmt::Debug for View<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("View")
.field("address", &self.address)
.finish()
}
}
/// Utility trait to provide optional references to `ViewChanges`. | pub trait ChangeSet {
fn as_ref(&self) -> Option<&ViewChanges>;
/// Provides mutable reference to changes. The implementation for a `RawAccessMut` type
/// should always return `Some(_)`.
fn as_mut(&mut self) -> Option<&mut ViewChanges>;
}
/// No-op implementation used in `Snapshot`.
impl ChangeSet for () {
fn as_ref(&self) -> Option<&ViewChanges> {
None
}
fn as_mut(&mut self) -> Option<&mut ViewChanges> {
None
}
}
impl ChangeSet for ChangesRef {
fn as_ref(&self) -> Option<&ViewChanges> {
Some(&*self)
}
fn as_mut(&mut self) -> Option<&mut ViewChanges> {
None
}
}
impl ChangeSet for ChangesMut<'_> {
fn as_ref(&self) -> Option<&ViewChanges> {
Some(&*self)
}
fn as_mut(&mut self) -> Option<&mut ViewChanges> {
Some(&mut *self)
}
}
/// Allows to read data from the database.
///
/// This trait is rarely needs to be used directly; [`Access`] is a more high-level trait
/// encompassing access to database.
///
/// [`Access`]: trait.Access.html
pub trait RawAccess: Clone {
/// Type of the `changes()` that will be applied to the database.
type Changes: ChangeSet;
/// Reference to a `Snapshot`.
fn snapshot(&self) -> &dyn Snapshot;
/// Returns changes related to specific `address` compared to the `snapshot()`.
fn changes(&self, address: &IndexAddress) -> Self::Changes;
}
/// Allows to mutate data in indexes.
///
/// This is a marker trait that is used as a bound for mutable operations on indexes.
/// It can be used in the same way for high-level database objects:
///
/// # Example
///
/// ```
/// use exonum_merkledb::{access::{Access, RawAccessMut}, ListIndex, MapIndex};
///
/// pub struct Schema<T: Access> {
/// list: ListIndex<T::Base, String>,
/// map: MapIndex<T::Base, u64, u64>,
/// }
///
/// impl<T: Access> Schema<T>
/// where
/// T::Base: RawAccessMut,
/// {
/// pub fn mutate(&mut self) {
/// self.list.push("foo".to_owned());
/// self.map.put(&1, 2);
/// }
/// }
/// ```
pub trait RawAccessMut: RawAccess {}
impl<'a, T> RawAccessMut for T where T: RawAccess<Changes = ChangesMut<'a>> {}
/// Converts index access to a readonly presentation. The conversion operation is cheap.
pub trait AsReadonly: RawAccess {
/// Readonly version of the access.
type Readonly: RawAccess;
/// Performs the conversion.
fn as_readonly(&self) -> Self::Readonly;
}
/// Represents address of the index in the database.
///
/// # Examples
///
/// `IndexAddress` can be used implicitly, since `&str` and `(&str, &impl BinaryKey)` can both
/// be converted into an address.
///
/// ```
/// use exonum_merkledb::{access::AccessExt, IndexAddress, TemporaryDB, Database};
///
/// let db = TemporaryDB::new();
/// let fork = db.fork();
///
/// // Using a string address:
/// let map = fork.get_map::<_, String, u8>("map");
/// // Using an address within an index family:
/// let list = fork.get_list::<_, String>(("index", &3_u32));
/// // Using `IndexAddress` explicitly:
/// let addr = IndexAddress::with_root("data").append_bytes(&vec![1, 2, 3]);
/// let set = fork.get_value_set::<_, u64>(addr);
/// ```
#[derive(Debug, Clone, Eq, PartialEq, Hash, Default)]
pub struct IndexAddress {
pub(super) name: String,
pub(super) bytes: Option<Vec<u8>>,
}
impl IndexAddress {
/// Creates empty `IndexAddress`.
pub fn new() -> Self {
Self::default()
}
/// Creates new `IndexAddress` with specified `root` name.
pub fn with_root<S: Into<String>>(root: S) -> Self {
Self {
name: root.into(),
bytes: None,
}
}
/// Returns name part of `IndexAddress`.
pub fn name(&self) -> &str {
&self.name
}
/// Returns bytes part of `IndexAddress`.
pub fn bytes(&self) -> Option<&[u8]> {
self.bytes.as_ref().map(Vec::as_slice)
}
/// Returns tuple consists of `name` and `bytes` concatenated with provided `key`.
/// This is used to obtain single value(serialized as byte array) from the database.
pub(crate) fn keyed<'a>(&self, key: &'a [u8]) -> (&str, Cow<'a, [u8]>) {
(
&self.name,
match self.bytes {
None => Cow::Borrowed(key),
Some(ref bytes) => {
let bytes = concat_keys!(bytes, key);
bytes.into()
}
},
)
}
/// Prepends a name part to `IndexAddress`. The name is separated from the existing name
/// by a dot `.`.
///
/// # Examples
///
/// ```
/// # use exonum_merkledb::IndexAddress;
/// let addr = IndexAddress::with_root("foo");
/// let prefixed = addr.prepend_name("prefix");
/// assert_eq!(prefixed.name(), "prefix.foo");
/// ```
pub fn prepend_name<'a>(self, prefix: impl Into<Cow<'a, str>>) -> Self {
let prefix = prefix.into();
Self {
name: if self.name.is_empty() {
prefix.into_owned()
} else {
// Because `concat` is faster than `format!("...")` in all cases.
[prefix.as_ref(), ".", self.name()].concat()
},
bytes: self.bytes,
}
}
/// Appends a name part to `IndexAddress`. The name is separated from the existing name
/// by a dot `.`.
///
/// # Examples
///
/// ```
/// # use exonum_merkledb::IndexAddress;
/// let addr = IndexAddress::with_root("foo");
/// let suffixed = addr.append_name("suffix");
/// assert_eq!(suffixed.name(), "foo.suffix");
/// ```
pub fn append_name<'a>(self, suffix: impl Into<Cow<'a, str>>) -> Self {
let suffix = suffix.into();
Self {
name: if self.name.is_empty() {
suffix.into_owned()
} else {
// Because `concat` is faster than `format!("...")` in all cases.
[self.name(), ".", suffix.as_ref()].concat()
},
bytes: self.bytes,
}
}
/// Appends a bytes part to `IndexAddress`.
pub fn append_bytes<K: BinaryKey +?Sized>(self, suffix: &K) -> Self {
let name = self.name;
let bytes = if let Some(ref bytes) = self.bytes {
concat_keys!(bytes, suffix)
} else {
concat_keys!(suffix)
};
Self {
name,
bytes: Some(bytes),
}
}
/// Full address with a separator between `name` and `bytes` represented as byte array.
pub fn fully_qualified_name(&self) -> Vec<u8> {
if let Some(bytes) = self.bytes() {
concat_keys!(self.name(), INDEX_NAME_SEPARATOR, bytes)
} else {
concat_keys!(self.name())
}
}
}
impl<'a> From<&'a str> for IndexAddress {
fn from(name: &'a str) -> Self {
Self::with_root(name)
}
}
impl From<String> for IndexAddress {
fn from(name: String) -> Self {
Self::with_root(name)
}
}
// TODO should we have this impl in public interface? ECR-2834
impl<'a, K: BinaryKey +?Sized> From<(&'a str, &'a K)> for IndexAddress {
fn from((name, key): (&'a str, &'a K)) -> Self {
Self {
name: name.to_owned(),
bytes: Some(key_bytes(key)),
}
}
}
macro_rules! impl_snapshot_access {
($typ:ty) => {
impl RawAccess for $typ {
type Changes = ();
fn snapshot(&self) -> &dyn Snapshot {
self.as_ref()
}
fn changes(&self, _address: &IndexAddress) -> Self::Changes {}
}
impl AsReadonly for $typ {
type Readonly = Self;
fn as_readonly(&self) -> Self::Readonly {
self.clone()
}
}
};
}
impl_snapshot_access!(&'_ dyn Snapshot);
impl_snapshot_access!(&'_ Box<dyn Snapshot>);
impl_snapshot_access!(std::rc::Rc<dyn Snapshot>);
impl_snapshot_access!(std::sync::Arc<dyn Snapshot>);
fn key_bytes<K: BinaryKey +?Sized>(key: &K) -> Vec<u8> {
concat_keys!(key)
}
impl<T: RawAccess> View<T> {
/// Creates a new view for an index with the specified address.
#[doc(hidden)]
// ^-- This method is used in the testkit to revert blocks. It should not be used
// in the user-facing code; use more high-level abstractions instead (e.g., indexes or
// `AccessExt` methods).
pub fn new<I: Into<IndexAddress>>(index_access: T, address: I) -> Self {
let address = address.into();
let changes = index_access.changes(&address);
Self {
index_access,
changes,
address,
}
}
fn snapshot(&self) -> &dyn Snapshot {
self.index_access.snapshot()
}
fn get_bytes(&self, key: &[u8]) -> Option<Vec<u8>> {
if let Some(ref changes) = self.changes.as_ref() {
if let Some(change) = changes.data.get(key) {
match *change {
Change::Put(ref v) => return Some(v.clone()),
Change::Delete => return None,
}
}
if changes.is_empty() {
return None;
}
}
let (name, key) = self.address.keyed(key);
self.snapshot().get(name, &key)
}
fn contains_raw_key(&self, key: &[u8]) -> bool {
if let Some(ref changes) = self.changes.as_ref() {
if let Some(change) = changes.data.get(key) {
match *change {
Change::Put(..) => return true,
Change::Delete => return false,
}
}
if changes.is_empty() {
return false;
}
}
let (name, key) = self.address.keyed(key);
self.snapshot().contains(name, &key)
}
fn iter_bytes(&self, from: &[u8]) -> BytesIter<'_> {
use std::collections::Bound::*;
let (name, key) = self.address.keyed(from);
let prefix = self.address.bytes.clone().unwrap_or_else(|| vec![]);
let changes_iter = self
.changes
.as_ref()
.map(|changes| changes.data.range::<[u8], _>((Included(from), Unbounded)));
let is_empty = self.changes.as_ref().map_or(false, ViewChanges::is_empty);
if is_empty {
// Ignore all changes from the snapshot
Box::new(ChangesIter::new(changes_iter.unwrap()))
} else {
Box::new(ForkIter::new(
Box::new(SnapshotIter::new(self.snapshot(), name, prefix, &key)),
changes_iter,
))
}
}
/// Returns a value of *any* type corresponding to the key of *any* type.
pub fn get<K, V>(&self, key: &K) -> Option<V>
where
K: BinaryKey +?Sized,
V: BinaryValue,
{
self.get_bytes(&key_bytes(key)).map(|v| {
BinaryValue::from_bytes(Cow::Owned(v)).expect("Error while deserializing value")
})
}
/// Returns `true` if the index contains a value of *any* type for the specified key of
/// *any* type.
pub fn contains<K>(&self, key: &K) -> bool
where
K: BinaryKey +?Sized,
{
self.contains_raw_key(&key_bytes(key))
}
/// Returns an iterator over the entries of the index in ascending order. The iterator element
/// type is *any* key-value pair. An argument `subprefix` allows specifying a subset of keys
/// for iteration.
pub fn iter<P, K, V>(&self, subprefix: &P) -> Iter<'_, K, V>
where
P: BinaryKey +?Sized,
K: BinaryKey,
V: BinaryValue,
{
let iter_prefix = key_bytes(subprefix);
Iter {
base_iter: self.iter_bytes(&iter_prefix),
prefix: iter_prefix,
ended: false,
_k: PhantomData,
_v: PhantomData,
}
}
/// Returns an iterator over the entries of the index in ascending order starting from the
/// specified key. The iterator element type is *any* key-value pair. An argument `subprefix`
/// allows specifying a subset of iteration.
pub fn iter_from<P, F, K, V>(&self, subprefix: &P, from: &F) -> Iter<'_, K, V>
where
P: BinaryKey,
F: BinaryKey +?Sized,
K: BinaryKey,
V: BinaryValue,
{
let iter_prefix = key_bytes(subprefix);
let iter_from = key_bytes(from);
Iter {
base_iter: self.iter_bytes(&iter_from),
prefix: iter_prefix,
ended: false,
_k: PhantomData,
_v: PhantomData,
}
}
/// Crutch to be able to create metadata for indexes not present in the storage.
///
/// # Return value
///
/// Returns whether the changes were saved.
pub(crate) fn put_or_forget<K, V>(&mut self, key: &K, value: V) -> bool
where
K: BinaryKey +?Sized,
V: BinaryValue,
{
if let Some(changes) = self.changes.as_mut() {
changes
.data
.insert(concat_keys!(key), Change::Put(value.into_bytes()));
true
} else {
false
}
}
}
impl<T: RawAccessMut> View<T> {
/// Inserts a key-value pair into the fork.
pub fn put<K, V>(&mut self, key: &K, value: V)
where
K: BinaryKey +?Sized,
V: BinaryValue,
{
self.changes
.as_mut()
.unwrap()
.data
.insert(concat_keys!(key), Change::Put(value.into_bytes()));
}
/// Removes a key from the view.
pub fn remove<K>(&mut self, key: &K)
where
K: BinaryKey +?Sized,
{
self.changes
.as_mut()
.unwrap()
.data
.insert(concat_keys!(key), Change::Delete);
}
/// Clears the view removing all its elements.
pub fn clear(&mut self) {
self.changes.as_mut().unwrap().clear();
}
}
/// Iterator over entries in a snapshot limited to a specific view.
struct SnapshotIter<'a> {
inner: BytesIter<'a>,
prefix: Vec<u8>,
ended: bool,
}
impl<'a> fmt::Debug for SnapshotIter<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SnapshotIter")
.field("prefix", &self.prefix)
.field("ended", &self.ended)
.finish()
}
}
impl<'a> SnapshotIter<'a> {
fn new(snapshot: &'a dyn Snapshot, name: &str, prefix: Vec<u8>, from: &[u8]) -> Self {
debug_assert!(from.starts_with(&prefix));
SnapshotIter {
inner: snapshot.iter(name, from),
prefix,
ended: false,
}
}
}
impl BytesIterator for SnapshotIter<'_> {
fn next(&mut self) -> Option<(&[u8], &[u8])> {
if self.ended {
return None;
}
let next = self.inner.next();
match next {
Some((k, v)) if k.starts_with(&self.prefix) => Some((&k[self.prefix.len()..], v)),
_ => {
self.ended = true;
None
}
}
}
fn peek(&mut self) -> Option<(&[u8], &[u8])> {
if self.ended {
return None;
}
let peeked = self.inner.peek();
match peeked {
Some((k, v)) if k.starts_with(&self.prefix) => Some((&k[self.prefix.len()..], v)),
_ => {
self.ended = true;
None
}
}
}
}
struct ChangesIter<'a, T: Iterator + 'a> {
inner: Peekable<T>,
_lifetime: PhantomData<&'a ()>,
}
/// Iterator over a set of changes.
impl<'a, T> ChangesIter<'a, T>
where
T: Iterator<Item = (&'a Vec<u8>, &'a Change)>,
{
fn new(iterator: T) -> Self {
ChangesIter {
inner: iterator.peekable(),
_lifetime: PhantomData,
}
}
}
impl<'a, T> BytesIterator for ChangesIter<'a, T>
where
T: Iterator<Item = (&'a Vec<u8>, &'a Change)>,
{
fn next(&mut self) -> Option<(&[u8], &[u8])> {
loop {
match self.inner.next() {
Some((key, &Change::Put(ref value))) => {
return Some((key.as_slice(), value.as_slice()));
}
Some((_, &Change::Delete)) => {}
None => {
return None;
}
}
}
}
fn peek(&mut self) -> Option<(&[u8], &[u8])> {
loop {
match self.inner.peek() {
Some((key, &Change::Put(ref value))) => {
return Some((key.as_slice(), value.as_slice()));
}
Some((_, &Change::Delete)) => {}
None => {
return None;
}
}
}
}
}
/// An iterator over the entries of a `View`.
///
/// This struct is created by the [`iter`] or
/// [`iter_from`] method on [`View`]. See its documentation for details.
///
/// [`iter`]: struct.BaseIndex.html#method.iter
/// [`iter_from`]: struct.BaseIndex.html#method.iter_from
/// [`BaseIndex`]: struct.BaseIndex.html
pub struct Iter<'a, K, V> {
base_iter: BytesIter<'a>,
prefix: Vec<u8>,
ended: bool,
_k: PhantomData<K>,
_v: PhantomData<V>,
}
impl<'a, K, V> fmt::Debug for Iter<'a, K, V> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Iter(..)")
}
}
impl<'a, K, V> Iterator for Iter<'a, K, V>
where
K: BinaryKey,
V: BinaryValue,
{
type Item = (K::Owned, V);
fn next(&mut self) -> | random_line_split |
|
mod.rs |
// See the License for the specific language governing permissions and
// limitations under the License.
pub use self::metadata::{BinaryAttribute, IndexState, IndexType, ViewWithMetadata};
use std::{borrow::Cow, fmt, iter::Peekable, marker::PhantomData};
use super::{
db::{Change, ChangesMut, ChangesRef, ForkIter, ViewChanges},
BinaryKey, BinaryValue, Iter as BytesIter, Iterator as BytesIterator, Snapshot,
};
mod metadata;
#[cfg(test)]
mod tests;
/// Separator between the name and the additional bytes in family indexes.
const INDEX_NAME_SEPARATOR: &[u8] = &[0];
/// Represents current view of the database by specified `address` and
/// changes that took place after that view had been created. `View`
/// implementation provides an interface to work with related `changes`.
pub struct View<T: RawAccess> {
address: IndexAddress,
index_access: T,
changes: T::Changes,
}
impl<T: RawAccess> fmt::Debug for View<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("View")
.field("address", &self.address)
.finish()
}
}
/// Utility trait to provide optional references to `ViewChanges`.
pub trait ChangeSet {
fn as_ref(&self) -> Option<&ViewChanges>;
/// Provides mutable reference to changes. The implementation for a `RawAccessMut` type
/// should always return `Some(_)`.
fn as_mut(&mut self) -> Option<&mut ViewChanges>;
}
/// No-op implementation used in `Snapshot`.
impl ChangeSet for () {
fn as_ref(&self) -> Option<&ViewChanges> |
fn as_mut(&mut self) -> Option<&mut ViewChanges> {
None
}
}
impl ChangeSet for ChangesRef {
fn as_ref(&self) -> Option<&ViewChanges> {
Some(&*self)
}
fn as_mut(&mut self) -> Option<&mut ViewChanges> {
None
}
}
impl ChangeSet for ChangesMut<'_> {
fn as_ref(&self) -> Option<&ViewChanges> {
Some(&*self)
}
fn as_mut(&mut self) -> Option<&mut ViewChanges> {
Some(&mut *self)
}
}
/// Allows to read data from the database.
///
/// This trait is rarely needs to be used directly; [`Access`] is a more high-level trait
/// encompassing access to database.
///
/// [`Access`]: trait.Access.html
pub trait RawAccess: Clone {
/// Type of the `changes()` that will be applied to the database.
type Changes: ChangeSet;
/// Reference to a `Snapshot`.
fn snapshot(&self) -> &dyn Snapshot;
/// Returns changes related to specific `address` compared to the `snapshot()`.
fn changes(&self, address: &IndexAddress) -> Self::Changes;
}
/// Allows to mutate data in indexes.
///
/// This is a marker trait that is used as a bound for mutable operations on indexes.
/// It can be used in the same way for high-level database objects:
///
/// # Example
///
/// ```
/// use exonum_merkledb::{access::{Access, RawAccessMut}, ListIndex, MapIndex};
///
/// pub struct Schema<T: Access> {
/// list: ListIndex<T::Base, String>,
/// map: MapIndex<T::Base, u64, u64>,
/// }
///
/// impl<T: Access> Schema<T>
/// where
/// T::Base: RawAccessMut,
/// {
/// pub fn mutate(&mut self) {
/// self.list.push("foo".to_owned());
/// self.map.put(&1, 2);
/// }
/// }
/// ```
pub trait RawAccessMut: RawAccess {}
impl<'a, T> RawAccessMut for T where T: RawAccess<Changes = ChangesMut<'a>> {}
/// Converts index access to a readonly presentation. The conversion operation is cheap.
pub trait AsReadonly: RawAccess {
/// Readonly version of the access.
type Readonly: RawAccess;
/// Performs the conversion.
fn as_readonly(&self) -> Self::Readonly;
}
/// Represents address of the index in the database.
///
/// # Examples
///
/// `IndexAddress` can be used implicitly, since `&str` and `(&str, &impl BinaryKey)` can both
/// be converted into an address.
///
/// ```
/// use exonum_merkledb::{access::AccessExt, IndexAddress, TemporaryDB, Database};
///
/// let db = TemporaryDB::new();
/// let fork = db.fork();
///
/// // Using a string address:
/// let map = fork.get_map::<_, String, u8>("map");
/// // Using an address within an index family:
/// let list = fork.get_list::<_, String>(("index", &3_u32));
/// // Using `IndexAddress` explicitly:
/// let addr = IndexAddress::with_root("data").append_bytes(&vec![1, 2, 3]);
/// let set = fork.get_value_set::<_, u64>(addr);
/// ```
#[derive(Debug, Clone, Eq, PartialEq, Hash, Default)]
pub struct IndexAddress {
pub(super) name: String,
pub(super) bytes: Option<Vec<u8>>,
}
impl IndexAddress {
/// Creates empty `IndexAddress`.
pub fn new() -> Self {
Self::default()
}
/// Creates new `IndexAddress` with specified `root` name.
pub fn with_root<S: Into<String>>(root: S) -> Self {
Self {
name: root.into(),
bytes: None,
}
}
/// Returns name part of `IndexAddress`.
pub fn name(&self) -> &str {
&self.name
}
/// Returns bytes part of `IndexAddress`.
pub fn bytes(&self) -> Option<&[u8]> {
self.bytes.as_ref().map(Vec::as_slice)
}
/// Returns tuple consists of `name` and `bytes` concatenated with provided `key`.
/// This is used to obtain single value(serialized as byte array) from the database.
pub(crate) fn keyed<'a>(&self, key: &'a [u8]) -> (&str, Cow<'a, [u8]>) {
(
&self.name,
match self.bytes {
None => Cow::Borrowed(key),
Some(ref bytes) => {
let bytes = concat_keys!(bytes, key);
bytes.into()
}
},
)
}
/// Prepends a name part to `IndexAddress`. The name is separated from the existing name
/// by a dot `.`.
///
/// # Examples
///
/// ```
/// # use exonum_merkledb::IndexAddress;
/// let addr = IndexAddress::with_root("foo");
/// let prefixed = addr.prepend_name("prefix");
/// assert_eq!(prefixed.name(), "prefix.foo");
/// ```
pub fn prepend_name<'a>(self, prefix: impl Into<Cow<'a, str>>) -> Self {
let prefix = prefix.into();
Self {
name: if self.name.is_empty() {
prefix.into_owned()
} else {
// Because `concat` is faster than `format!("...")` in all cases.
[prefix.as_ref(), ".", self.name()].concat()
},
bytes: self.bytes,
}
}
/// Appends a name part to `IndexAddress`. The name is separated from the existing name
/// by a dot `.`.
///
/// # Examples
///
/// ```
/// # use exonum_merkledb::IndexAddress;
/// let addr = IndexAddress::with_root("foo");
/// let suffixed = addr.append_name("suffix");
/// assert_eq!(suffixed.name(), "foo.suffix");
/// ```
pub fn append_name<'a>(self, suffix: impl Into<Cow<'a, str>>) -> Self {
let suffix = suffix.into();
Self {
name: if self.name.is_empty() {
suffix.into_owned()
} else {
// Because `concat` is faster than `format!("...")` in all cases.
[self.name(), ".", suffix.as_ref()].concat()
},
bytes: self.bytes,
}
}
/// Appends a bytes part to `IndexAddress`.
pub fn append_bytes<K: BinaryKey +?Sized>(self, suffix: &K) -> Self {
let name = self.name;
let bytes = if let Some(ref bytes) = self.bytes {
concat_keys!(bytes, suffix)
} else {
concat_keys!(suffix)
};
Self {
name,
bytes: Some(bytes),
}
}
/// Full address with a separator between `name` and `bytes` represented as byte array.
pub fn fully_qualified_name(&self) -> Vec<u8> {
if let Some(bytes) = self.bytes() {
concat_keys!(self.name(), INDEX_NAME_SEPARATOR, bytes)
} else {
concat_keys!(self.name())
}
}
}
impl<'a> From<&'a str> for IndexAddress {
fn from(name: &'a str) -> Self {
Self::with_root(name)
}
}
impl From<String> for IndexAddress {
fn from(name: String) -> Self {
Self::with_root(name)
}
}
// TODO should we have this impl in public interface? ECR-2834
impl<'a, K: BinaryKey +?Sized> From<(&'a str, &'a K)> for IndexAddress {
fn from((name, key): (&'a str, &'a K)) -> Self {
Self {
name: name.to_owned(),
bytes: Some(key_bytes(key)),
}
}
}
macro_rules! impl_snapshot_access {
($typ:ty) => {
impl RawAccess for $typ {
type Changes = ();
fn snapshot(&self) -> &dyn Snapshot {
self.as_ref()
}
fn changes(&self, _address: &IndexAddress) -> Self::Changes {}
}
impl AsReadonly for $typ {
type Readonly = Self;
fn as_readonly(&self) -> Self::Readonly {
self.clone()
}
}
};
}
impl_snapshot_access!(&'_ dyn Snapshot);
impl_snapshot_access!(&'_ Box<dyn Snapshot>);
impl_snapshot_access!(std::rc::Rc<dyn Snapshot>);
impl_snapshot_access!(std::sync::Arc<dyn Snapshot>);
fn key_bytes<K: BinaryKey +?Sized>(key: &K) -> Vec<u8> {
concat_keys!(key)
}
impl<T: RawAccess> View<T> {
/// Creates a new view for an index with the specified address.
#[doc(hidden)]
// ^-- This method is used in the testkit to revert blocks. It should not be used
// in the user-facing code; use more high-level abstractions instead (e.g., indexes or
// `AccessExt` methods).
pub fn new<I: Into<IndexAddress>>(index_access: T, address: I) -> Self {
let address = address.into();
let changes = index_access.changes(&address);
Self {
index_access,
changes,
address,
}
}
fn snapshot(&self) -> &dyn Snapshot {
self.index_access.snapshot()
}
fn get_bytes(&self, key: &[u8]) -> Option<Vec<u8>> {
if let Some(ref changes) = self.changes.as_ref() {
if let Some(change) = changes.data.get(key) {
match *change {
Change::Put(ref v) => return Some(v.clone()),
Change::Delete => return None,
}
}
if changes.is_empty() {
return None;
}
}
let (name, key) = self.address.keyed(key);
self.snapshot().get(name, &key)
}
fn contains_raw_key(&self, key: &[u8]) -> bool {
if let Some(ref changes) = self.changes.as_ref() {
if let Some(change) = changes.data.get(key) {
match *change {
Change::Put(..) => return true,
Change::Delete => return false,
}
}
if changes.is_empty() {
return false;
}
}
let (name, key) = self.address.keyed(key);
self.snapshot().contains(name, &key)
}
fn iter_bytes(&self, from: &[u8]) -> BytesIter<'_> {
use std::collections::Bound::*;
let (name, key) = self.address.keyed(from);
let prefix = self.address.bytes.clone().unwrap_or_else(|| vec![]);
let changes_iter = self
.changes
.as_ref()
.map(|changes| changes.data.range::<[u8], _>((Included(from), Unbounded)));
let is_empty = self.changes.as_ref().map_or(false, ViewChanges::is_empty);
if is_empty {
// Ignore all changes from the snapshot
Box::new(ChangesIter::new(changes_iter.unwrap()))
} else {
Box::new(ForkIter::new(
Box::new(SnapshotIter::new(self.snapshot(), name, prefix, &key)),
changes_iter,
))
}
}
/// Returns a value of *any* type corresponding to the key of *any* type.
pub fn get<K, V>(&self, key: &K) -> Option<V>
where
K: BinaryKey +?Sized,
V: BinaryValue,
{
self.get_bytes(&key_bytes(key)).map(|v| {
BinaryValue::from_bytes(Cow::Owned(v)).expect("Error while deserializing value")
})
}
/// Returns `true` if the index contains a value of *any* type for the specified key of
/// *any* type.
pub fn contains<K>(&self, key: &K) -> bool
where
K: BinaryKey +?Sized,
{
self.contains_raw_key(&key_bytes(key))
}
/// Returns an iterator over the entries of the index in ascending order. The iterator element
/// type is *any* key-value pair. An argument `subprefix` allows specifying a subset of keys
/// for iteration.
pub fn iter<P, K, V>(&self, subprefix: &P) -> Iter<'_, K, V>
where
P: BinaryKey +?Sized,
K: BinaryKey,
V: BinaryValue,
{
let iter_prefix = key_bytes(subprefix);
Iter {
base_iter: self.iter_bytes(&iter_prefix),
prefix: iter_prefix,
ended: false,
_k: PhantomData,
_v: PhantomData,
}
}
/// Returns an iterator over the entries of the index in ascending order starting from the
/// specified key. The iterator element type is *any* key-value pair. An argument `subprefix`
/// allows specifying a subset of iteration.
pub fn iter_from<P, F, K, V>(&self, subprefix: &P, from: &F) -> Iter<'_, K, V>
where
P: BinaryKey,
F: BinaryKey +?Sized,
K: BinaryKey,
V: BinaryValue,
{
let iter_prefix = key_bytes(subprefix);
let iter_from = key_bytes(from);
Iter {
base_iter: self.iter_bytes(&iter_from),
prefix: iter_prefix,
ended: false,
_k: PhantomData,
_v: PhantomData,
}
}
/// Crutch to be able to create metadata for indexes not present in the storage.
///
/// # Return value
///
/// Returns whether the changes were saved.
pub(crate) fn put_or_forget<K, V>(&mut self, key: &K, value: V) -> bool
where
K: BinaryKey +?Sized,
V: BinaryValue,
{
if let Some(changes) = self.changes.as_mut() {
changes
.data
.insert(concat_keys!(key), Change::Put(value.into_bytes()));
true
} else {
false
}
}
}
impl<T: RawAccessMut> View<T> {
/// Inserts a key-value pair into the fork.
pub fn put<K, V>(&mut self, key: &K, value: V)
where
K: BinaryKey +?Sized,
V: BinaryValue,
{
self.changes
.as_mut()
.unwrap()
.data
.insert(concat_keys!(key), Change::Put(value.into_bytes()));
}
/// Removes a key from the view.
pub fn remove<K>(&mut self, key: &K)
where
K: BinaryKey +?Sized,
{
self.changes
.as_mut()
.unwrap()
.data
.insert(concat_keys!(key), Change::Delete);
}
/// Clears the view removing all its elements.
pub fn clear(&mut self) {
self.changes.as_mut().unwrap().clear();
}
}
/// Iterator over entries in a snapshot limited to a specific view.
struct SnapshotIter<'a> {
inner: BytesIter<'a>,
prefix: Vec<u8>,
ended: bool,
}
impl<'a> fmt::Debug for SnapshotIter<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SnapshotIter")
.field("prefix", &self.prefix)
.field("ended", &self.ended)
.finish()
}
}
impl<'a> SnapshotIter<'a> {
fn new(snapshot: &'a dyn Snapshot, name: &str, prefix: Vec<u8>, from: &[u8]) -> Self {
debug_assert!(from.starts_with(&prefix));
SnapshotIter {
inner: snapshot.iter(name, from),
prefix,
ended: false,
}
}
}
impl BytesIterator for SnapshotIter<'_> {
fn next(&mut self) -> Option<(&[u8], &[u8])> {
if self.ended {
return None;
}
let next = self.inner.next();
match next {
Some((k, v)) if k.starts_with(&self.prefix) => Some((&k[self.prefix.len()..], v)),
_ => {
self.ended = true;
None
}
}
}
fn peek(&mut self) -> Option<(&[u8], &[u8])> {
if self.ended {
return None;
}
let peeked = self.inner.peek();
match peeked {
Some((k, v)) if k.starts_with(&self.prefix) => Some((&k[self.prefix.len()..], v)),
_ => {
self.ended = true;
None
}
}
}
}
struct ChangesIter<'a, T: Iterator + 'a> {
inner: Peekable<T>,
_lifetime: PhantomData<&'a ()>,
}
/// Iterator over a set of changes.
impl<'a, T> ChangesIter<'a, T>
where
T: Iterator<Item = (&'a Vec<u8>, &'a Change)>,
{
fn new(iterator: T) -> Self {
ChangesIter {
inner: iterator.peekable(),
_lifetime: PhantomData,
}
}
}
impl<'a, T> BytesIterator for ChangesIter<'a, T>
where
T: Iterator<Item = (&'a Vec<u8>, &'a Change)>,
{
fn next(&mut self) -> Option<(&[u8], &[u8])> {
loop {
match self.inner.next() {
Some((key, &Change::Put(ref value))) => {
return Some((key.as_slice(), value.as_slice()));
}
Some((_, &Change::Delete)) => {}
None => {
return None;
}
}
}
}
fn peek(&mut self) -> Option<(&[u8], &[u8])> {
loop {
match self.inner.peek() {
Some((key, &Change::Put(ref value))) => {
return Some((key.as_slice(), value.as_slice()));
}
Some((_, &Change::Delete)) => {}
None => {
return None;
}
}
}
}
}
/// An iterator over the entries of a `View`.
///
/// This struct is created by the [`iter`] or
/// [`iter_from`] method on [`View`]. See its documentation for details.
///
/// [`iter`]: struct.BaseIndex.html#method.iter
/// [`iter_from`]: struct.BaseIndex.html#method.iter_from
/// [`BaseIndex`]: struct.BaseIndex.html
pub struct Iter<'a, K, V> {
base_iter: BytesIter<'a>,
prefix: Vec<u8>,
ended: bool,
_k: PhantomData<K>,
_v: PhantomData<V>,
}
impl<'a, K, V> fmt::Debug for Iter<'a, K, V> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Iter(..)")
}
}
impl<'a, K, V> Iterator for Iter<'a, K, V>
where
K: BinaryKey,
V: BinaryValue,
{
type Item = (K::Owned, V);
fn next(&mut self) | {
None
} | identifier_body |
mod.rs |
// See the License for the specific language governing permissions and
// limitations under the License.
pub use self::metadata::{BinaryAttribute, IndexState, IndexType, ViewWithMetadata};
use std::{borrow::Cow, fmt, iter::Peekable, marker::PhantomData};
use super::{
db::{Change, ChangesMut, ChangesRef, ForkIter, ViewChanges},
BinaryKey, BinaryValue, Iter as BytesIter, Iterator as BytesIterator, Snapshot,
};
mod metadata;
#[cfg(test)]
mod tests;
/// Separator between the name and the additional bytes in family indexes.
const INDEX_NAME_SEPARATOR: &[u8] = &[0];
/// Represents current view of the database by specified `address` and
/// changes that took place after that view had been created. `View`
/// implementation provides an interface to work with related `changes`.
pub struct View<T: RawAccess> {
address: IndexAddress,
index_access: T,
changes: T::Changes,
}
impl<T: RawAccess> fmt::Debug for View<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("View")
.field("address", &self.address)
.finish()
}
}
/// Utility trait to provide optional references to `ViewChanges`.
pub trait ChangeSet {
fn as_ref(&self) -> Option<&ViewChanges>;
/// Provides mutable reference to changes. The implementation for a `RawAccessMut` type
/// should always return `Some(_)`.
fn as_mut(&mut self) -> Option<&mut ViewChanges>;
}
/// No-op implementation used in `Snapshot`.
impl ChangeSet for () {
fn as_ref(&self) -> Option<&ViewChanges> {
None
}
fn as_mut(&mut self) -> Option<&mut ViewChanges> {
None
}
}
impl ChangeSet for ChangesRef {
fn as_ref(&self) -> Option<&ViewChanges> {
Some(&*self)
}
fn as_mut(&mut self) -> Option<&mut ViewChanges> {
None
}
}
impl ChangeSet for ChangesMut<'_> {
fn as_ref(&self) -> Option<&ViewChanges> {
Some(&*self)
}
fn as_mut(&mut self) -> Option<&mut ViewChanges> {
Some(&mut *self)
}
}
/// Allows to read data from the database.
///
/// This trait is rarely needs to be used directly; [`Access`] is a more high-level trait
/// encompassing access to database.
///
/// [`Access`]: trait.Access.html
pub trait RawAccess: Clone {
/// Type of the `changes()` that will be applied to the database.
type Changes: ChangeSet;
/// Reference to a `Snapshot`.
fn snapshot(&self) -> &dyn Snapshot;
/// Returns changes related to specific `address` compared to the `snapshot()`.
fn changes(&self, address: &IndexAddress) -> Self::Changes;
}
/// Allows to mutate data in indexes.
///
/// This is a marker trait that is used as a bound for mutable operations on indexes.
/// It can be used in the same way for high-level database objects:
///
/// # Example
///
/// ```
/// use exonum_merkledb::{access::{Access, RawAccessMut}, ListIndex, MapIndex};
///
/// pub struct Schema<T: Access> {
/// list: ListIndex<T::Base, String>,
/// map: MapIndex<T::Base, u64, u64>,
/// }
///
/// impl<T: Access> Schema<T>
/// where
/// T::Base: RawAccessMut,
/// {
/// pub fn mutate(&mut self) {
/// self.list.push("foo".to_owned());
/// self.map.put(&1, 2);
/// }
/// }
/// ```
pub trait RawAccessMut: RawAccess {}
impl<'a, T> RawAccessMut for T where T: RawAccess<Changes = ChangesMut<'a>> {}
/// Converts index access to a readonly presentation. The conversion operation is cheap.
pub trait AsReadonly: RawAccess {
/// Readonly version of the access.
type Readonly: RawAccess;
/// Performs the conversion.
fn as_readonly(&self) -> Self::Readonly;
}
/// Represents address of the index in the database.
///
/// # Examples
///
/// `IndexAddress` can be used implicitly, since `&str` and `(&str, &impl BinaryKey)` can both
/// be converted into an address.
///
/// ```
/// use exonum_merkledb::{access::AccessExt, IndexAddress, TemporaryDB, Database};
///
/// let db = TemporaryDB::new();
/// let fork = db.fork();
///
/// // Using a string address:
/// let map = fork.get_map::<_, String, u8>("map");
/// // Using an address within an index family:
/// let list = fork.get_list::<_, String>(("index", &3_u32));
/// // Using `IndexAddress` explicitly:
/// let addr = IndexAddress::with_root("data").append_bytes(&vec![1, 2, 3]);
/// let set = fork.get_value_set::<_, u64>(addr);
/// ```
#[derive(Debug, Clone, Eq, PartialEq, Hash, Default)]
pub struct IndexAddress {
pub(super) name: String,
pub(super) bytes: Option<Vec<u8>>,
}
impl IndexAddress {
/// Creates empty `IndexAddress`.
pub fn new() -> Self {
Self::default()
}
/// Creates new `IndexAddress` with specified `root` name.
pub fn with_root<S: Into<String>>(root: S) -> Self {
Self {
name: root.into(),
bytes: None,
}
}
/// Returns name part of `IndexAddress`.
pub fn name(&self) -> &str {
&self.name
}
/// Returns bytes part of `IndexAddress`.
pub fn bytes(&self) -> Option<&[u8]> {
self.bytes.as_ref().map(Vec::as_slice)
}
/// Returns tuple consists of `name` and `bytes` concatenated with provided `key`.
/// This is used to obtain single value(serialized as byte array) from the database.
pub(crate) fn keyed<'a>(&self, key: &'a [u8]) -> (&str, Cow<'a, [u8]>) {
(
&self.name,
match self.bytes {
None => Cow::Borrowed(key),
Some(ref bytes) => {
let bytes = concat_keys!(bytes, key);
bytes.into()
}
},
)
}
/// Prepends a name part to `IndexAddress`. The name is separated from the existing name
/// by a dot `.`.
///
/// # Examples
///
/// ```
/// # use exonum_merkledb::IndexAddress;
/// let addr = IndexAddress::with_root("foo");
/// let prefixed = addr.prepend_name("prefix");
/// assert_eq!(prefixed.name(), "prefix.foo");
/// ```
pub fn prepend_name<'a>(self, prefix: impl Into<Cow<'a, str>>) -> Self {
let prefix = prefix.into();
Self {
name: if self.name.is_empty() {
prefix.into_owned()
} else {
// Because `concat` is faster than `format!("...")` in all cases.
[prefix.as_ref(), ".", self.name()].concat()
},
bytes: self.bytes,
}
}
/// Appends a name part to `IndexAddress`. The name is separated from the existing name
/// by a dot `.`.
///
/// # Examples
///
/// ```
/// # use exonum_merkledb::IndexAddress;
/// let addr = IndexAddress::with_root("foo");
/// let suffixed = addr.append_name("suffix");
/// assert_eq!(suffixed.name(), "foo.suffix");
/// ```
pub fn append_name<'a>(self, suffix: impl Into<Cow<'a, str>>) -> Self {
let suffix = suffix.into();
Self {
name: if self.name.is_empty() {
suffix.into_owned()
} else {
// Because `concat` is faster than `format!("...")` in all cases.
[self.name(), ".", suffix.as_ref()].concat()
},
bytes: self.bytes,
}
}
/// Appends a bytes part to `IndexAddress`.
pub fn append_bytes<K: BinaryKey +?Sized>(self, suffix: &K) -> Self {
let name = self.name;
let bytes = if let Some(ref bytes) = self.bytes {
concat_keys!(bytes, suffix)
} else {
concat_keys!(suffix)
};
Self {
name,
bytes: Some(bytes),
}
}
/// Full address with a separator between `name` and `bytes` represented as byte array.
pub fn fully_qualified_name(&self) -> Vec<u8> {
if let Some(bytes) = self.bytes() {
concat_keys!(self.name(), INDEX_NAME_SEPARATOR, bytes)
} else {
concat_keys!(self.name())
}
}
}
impl<'a> From<&'a str> for IndexAddress {
fn from(name: &'a str) -> Self {
Self::with_root(name)
}
}
impl From<String> for IndexAddress {
fn from(name: String) -> Self {
Self::with_root(name)
}
}
// TODO should we have this impl in public interface? ECR-2834
impl<'a, K: BinaryKey +?Sized> From<(&'a str, &'a K)> for IndexAddress {
fn from((name, key): (&'a str, &'a K)) -> Self {
Self {
name: name.to_owned(),
bytes: Some(key_bytes(key)),
}
}
}
macro_rules! impl_snapshot_access {
($typ:ty) => {
impl RawAccess for $typ {
type Changes = ();
fn snapshot(&self) -> &dyn Snapshot {
self.as_ref()
}
fn changes(&self, _address: &IndexAddress) -> Self::Changes {}
}
impl AsReadonly for $typ {
type Readonly = Self;
fn as_readonly(&self) -> Self::Readonly {
self.clone()
}
}
};
}
impl_snapshot_access!(&'_ dyn Snapshot);
impl_snapshot_access!(&'_ Box<dyn Snapshot>);
impl_snapshot_access!(std::rc::Rc<dyn Snapshot>);
impl_snapshot_access!(std::sync::Arc<dyn Snapshot>);
fn key_bytes<K: BinaryKey +?Sized>(key: &K) -> Vec<u8> {
concat_keys!(key)
}
impl<T: RawAccess> View<T> {
/// Creates a new view for an index with the specified address.
#[doc(hidden)]
// ^-- This method is used in the testkit to revert blocks. It should not be used
// in the user-facing code; use more high-level abstractions instead (e.g., indexes or
// `AccessExt` methods).
pub fn new<I: Into<IndexAddress>>(index_access: T, address: I) -> Self {
let address = address.into();
let changes = index_access.changes(&address);
Self {
index_access,
changes,
address,
}
}
fn snapshot(&self) -> &dyn Snapshot {
self.index_access.snapshot()
}
fn get_bytes(&self, key: &[u8]) -> Option<Vec<u8>> {
if let Some(ref changes) = self.changes.as_ref() {
if let Some(change) = changes.data.get(key) {
match *change {
Change::Put(ref v) => return Some(v.clone()),
Change::Delete => return None,
}
}
if changes.is_empty() {
return None;
}
}
let (name, key) = self.address.keyed(key);
self.snapshot().get(name, &key)
}
fn contains_raw_key(&self, key: &[u8]) -> bool {
if let Some(ref changes) = self.changes.as_ref() {
if let Some(change) = changes.data.get(key) {
match *change {
Change::Put(..) => return true,
Change::Delete => return false,
}
}
if changes.is_empty() {
return false;
}
}
let (name, key) = self.address.keyed(key);
self.snapshot().contains(name, &key)
}
fn iter_bytes(&self, from: &[u8]) -> BytesIter<'_> {
use std::collections::Bound::*;
let (name, key) = self.address.keyed(from);
let prefix = self.address.bytes.clone().unwrap_or_else(|| vec![]);
let changes_iter = self
.changes
.as_ref()
.map(|changes| changes.data.range::<[u8], _>((Included(from), Unbounded)));
let is_empty = self.changes.as_ref().map_or(false, ViewChanges::is_empty);
if is_empty {
// Ignore all changes from the snapshot
Box::new(ChangesIter::new(changes_iter.unwrap()))
} else {
Box::new(ForkIter::new(
Box::new(SnapshotIter::new(self.snapshot(), name, prefix, &key)),
changes_iter,
))
}
}
/// Returns a value of *any* type corresponding to the key of *any* type.
pub fn get<K, V>(&self, key: &K) -> Option<V>
where
K: BinaryKey +?Sized,
V: BinaryValue,
{
self.get_bytes(&key_bytes(key)).map(|v| {
BinaryValue::from_bytes(Cow::Owned(v)).expect("Error while deserializing value")
})
}
/// Returns `true` if the index contains a value of *any* type for the specified key of
/// *any* type.
pub fn contains<K>(&self, key: &K) -> bool
where
K: BinaryKey +?Sized,
{
self.contains_raw_key(&key_bytes(key))
}
/// Returns an iterator over the entries of the index in ascending order. The iterator element
/// type is *any* key-value pair. An argument `subprefix` allows specifying a subset of keys
/// for iteration.
pub fn iter<P, K, V>(&self, subprefix: &P) -> Iter<'_, K, V>
where
P: BinaryKey +?Sized,
K: BinaryKey,
V: BinaryValue,
{
let iter_prefix = key_bytes(subprefix);
Iter {
base_iter: self.iter_bytes(&iter_prefix),
prefix: iter_prefix,
ended: false,
_k: PhantomData,
_v: PhantomData,
}
}
/// Returns an iterator over the entries of the index in ascending order starting from the
/// specified key. The iterator element type is *any* key-value pair. An argument `subprefix`
/// allows specifying a subset of iteration.
pub fn iter_from<P, F, K, V>(&self, subprefix: &P, from: &F) -> Iter<'_, K, V>
where
P: BinaryKey,
F: BinaryKey +?Sized,
K: BinaryKey,
V: BinaryValue,
{
let iter_prefix = key_bytes(subprefix);
let iter_from = key_bytes(from);
Iter {
base_iter: self.iter_bytes(&iter_from),
prefix: iter_prefix,
ended: false,
_k: PhantomData,
_v: PhantomData,
}
}
/// Crutch to be able to create metadata for indexes not present in the storage.
///
/// # Return value
///
/// Returns whether the changes were saved.
pub(crate) fn put_or_forget<K, V>(&mut self, key: &K, value: V) -> bool
where
K: BinaryKey +?Sized,
V: BinaryValue,
{
if let Some(changes) = self.changes.as_mut() {
changes
.data
.insert(concat_keys!(key), Change::Put(value.into_bytes()));
true
} else {
false
}
}
}
impl<T: RawAccessMut> View<T> {
/// Inserts a key-value pair into the fork.
pub fn put<K, V>(&mut self, key: &K, value: V)
where
K: BinaryKey +?Sized,
V: BinaryValue,
{
self.changes
.as_mut()
.unwrap()
.data
.insert(concat_keys!(key), Change::Put(value.into_bytes()));
}
/// Removes a key from the view.
pub fn remove<K>(&mut self, key: &K)
where
K: BinaryKey +?Sized,
{
self.changes
.as_mut()
.unwrap()
.data
.insert(concat_keys!(key), Change::Delete);
}
/// Clears the view removing all its elements.
pub fn clear(&mut self) {
self.changes.as_mut().unwrap().clear();
}
}
/// Iterator over entries in a snapshot limited to a specific view.
struct SnapshotIter<'a> {
inner: BytesIter<'a>,
prefix: Vec<u8>,
ended: bool,
}
impl<'a> fmt::Debug for SnapshotIter<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SnapshotIter")
.field("prefix", &self.prefix)
.field("ended", &self.ended)
.finish()
}
}
impl<'a> SnapshotIter<'a> {
fn new(snapshot: &'a dyn Snapshot, name: &str, prefix: Vec<u8>, from: &[u8]) -> Self {
debug_assert!(from.starts_with(&prefix));
SnapshotIter {
inner: snapshot.iter(name, from),
prefix,
ended: false,
}
}
}
impl BytesIterator for SnapshotIter<'_> {
fn next(&mut self) -> Option<(&[u8], &[u8])> {
if self.ended {
return None;
}
let next = self.inner.next();
match next {
Some((k, v)) if k.starts_with(&self.prefix) => Some((&k[self.prefix.len()..], v)),
_ => {
self.ended = true;
None
}
}
}
fn peek(&mut self) -> Option<(&[u8], &[u8])> {
if self.ended {
return None;
}
let peeked = self.inner.peek();
match peeked {
Some((k, v)) if k.starts_with(&self.prefix) => Some((&k[self.prefix.len()..], v)),
_ => |
}
}
}
struct ChangesIter<'a, T: Iterator + 'a> {
inner: Peekable<T>,
_lifetime: PhantomData<&'a ()>,
}
/// Iterator over a set of changes.
impl<'a, T> ChangesIter<'a, T>
where
T: Iterator<Item = (&'a Vec<u8>, &'a Change)>,
{
fn new(iterator: T) -> Self {
ChangesIter {
inner: iterator.peekable(),
_lifetime: PhantomData,
}
}
}
impl<'a, T> BytesIterator for ChangesIter<'a, T>
where
T: Iterator<Item = (&'a Vec<u8>, &'a Change)>,
{
fn next(&mut self) -> Option<(&[u8], &[u8])> {
loop {
match self.inner.next() {
Some((key, &Change::Put(ref value))) => {
return Some((key.as_slice(), value.as_slice()));
}
Some((_, &Change::Delete)) => {}
None => {
return None;
}
}
}
}
fn peek(&mut self) -> Option<(&[u8], &[u8])> {
loop {
match self.inner.peek() {
Some((key, &Change::Put(ref value))) => {
return Some((key.as_slice(), value.as_slice()));
}
Some((_, &Change::Delete)) => {}
None => {
return None;
}
}
}
}
}
/// An iterator over the entries of a `View`.
///
/// This struct is created by the [`iter`] or
/// [`iter_from`] method on [`View`]. See its documentation for details.
///
/// [`iter`]: struct.BaseIndex.html#method.iter
/// [`iter_from`]: struct.BaseIndex.html#method.iter_from
/// [`BaseIndex`]: struct.BaseIndex.html
pub struct Iter<'a, K, V> {
base_iter: BytesIter<'a>,
prefix: Vec<u8>,
ended: bool,
_k: PhantomData<K>,
_v: PhantomData<V>,
}
impl<'a, K, V> fmt::Debug for Iter<'a, K, V> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Iter(..)")
}
}
impl<'a, K, V> Iterator for Iter<'a, K, V>
where
K: BinaryKey,
V: BinaryValue,
{
type Item = (K::Owned, V);
fn next(&mut self) | {
self.ended = true;
None
} | conditional_block |
mod.rs |
// See the License for the specific language governing permissions and
// limitations under the License.
pub use self::metadata::{BinaryAttribute, IndexState, IndexType, ViewWithMetadata};
use std::{borrow::Cow, fmt, iter::Peekable, marker::PhantomData};
use super::{
db::{Change, ChangesMut, ChangesRef, ForkIter, ViewChanges},
BinaryKey, BinaryValue, Iter as BytesIter, Iterator as BytesIterator, Snapshot,
};
mod metadata;
#[cfg(test)]
mod tests;
/// Separator between the name and the additional bytes in family indexes.
const INDEX_NAME_SEPARATOR: &[u8] = &[0];
/// Represents current view of the database by specified `address` and
/// changes that took place after that view had been created. `View`
/// implementation provides an interface to work with related `changes`.
pub struct View<T: RawAccess> {
address: IndexAddress,
index_access: T,
changes: T::Changes,
}
impl<T: RawAccess> fmt::Debug for View<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("View")
.field("address", &self.address)
.finish()
}
}
/// Utility trait to provide optional references to `ViewChanges`.
pub trait ChangeSet {
fn as_ref(&self) -> Option<&ViewChanges>;
/// Provides mutable reference to changes. The implementation for a `RawAccessMut` type
/// should always return `Some(_)`.
fn as_mut(&mut self) -> Option<&mut ViewChanges>;
}
/// No-op implementation used in `Snapshot`.
impl ChangeSet for () {
fn as_ref(&self) -> Option<&ViewChanges> {
None
}
fn as_mut(&mut self) -> Option<&mut ViewChanges> {
None
}
}
impl ChangeSet for ChangesRef {
fn as_ref(&self) -> Option<&ViewChanges> {
Some(&*self)
}
fn as_mut(&mut self) -> Option<&mut ViewChanges> {
None
}
}
impl ChangeSet for ChangesMut<'_> {
fn as_ref(&self) -> Option<&ViewChanges> {
Some(&*self)
}
fn as_mut(&mut self) -> Option<&mut ViewChanges> {
Some(&mut *self)
}
}
/// Allows to read data from the database.
///
/// This trait is rarely needs to be used directly; [`Access`] is a more high-level trait
/// encompassing access to database.
///
/// [`Access`]: trait.Access.html
pub trait RawAccess: Clone {
/// Type of the `changes()` that will be applied to the database.
type Changes: ChangeSet;
/// Reference to a `Snapshot`.
fn snapshot(&self) -> &dyn Snapshot;
/// Returns changes related to specific `address` compared to the `snapshot()`.
fn changes(&self, address: &IndexAddress) -> Self::Changes;
}
/// Allows to mutate data in indexes.
///
/// This is a marker trait that is used as a bound for mutable operations on indexes.
/// It can be used in the same way for high-level database objects:
///
/// # Example
///
/// ```
/// use exonum_merkledb::{access::{Access, RawAccessMut}, ListIndex, MapIndex};
///
/// pub struct Schema<T: Access> {
/// list: ListIndex<T::Base, String>,
/// map: MapIndex<T::Base, u64, u64>,
/// }
///
/// impl<T: Access> Schema<T>
/// where
/// T::Base: RawAccessMut,
/// {
/// pub fn mutate(&mut self) {
/// self.list.push("foo".to_owned());
/// self.map.put(&1, 2);
/// }
/// }
/// ```
pub trait RawAccessMut: RawAccess {}
impl<'a, T> RawAccessMut for T where T: RawAccess<Changes = ChangesMut<'a>> {}
/// Converts index access to a readonly presentation. The conversion operation is cheap.
pub trait AsReadonly: RawAccess {
/// Readonly version of the access.
type Readonly: RawAccess;
/// Performs the conversion.
fn as_readonly(&self) -> Self::Readonly;
}
/// Represents address of the index in the database.
///
/// # Examples
///
/// `IndexAddress` can be used implicitly, since `&str` and `(&str, &impl BinaryKey)` can both
/// be converted into an address.
///
/// ```
/// use exonum_merkledb::{access::AccessExt, IndexAddress, TemporaryDB, Database};
///
/// let db = TemporaryDB::new();
/// let fork = db.fork();
///
/// // Using a string address:
/// let map = fork.get_map::<_, String, u8>("map");
/// // Using an address within an index family:
/// let list = fork.get_list::<_, String>(("index", &3_u32));
/// // Using `IndexAddress` explicitly:
/// let addr = IndexAddress::with_root("data").append_bytes(&vec![1, 2, 3]);
/// let set = fork.get_value_set::<_, u64>(addr);
/// ```
#[derive(Debug, Clone, Eq, PartialEq, Hash, Default)]
pub struct IndexAddress {
pub(super) name: String,
pub(super) bytes: Option<Vec<u8>>,
}
impl IndexAddress {
/// Creates empty `IndexAddress`.
pub fn new() -> Self {
Self::default()
}
/// Creates new `IndexAddress` with specified `root` name.
pub fn with_root<S: Into<String>>(root: S) -> Self {
Self {
name: root.into(),
bytes: None,
}
}
/// Returns name part of `IndexAddress`.
pub fn name(&self) -> &str {
&self.name
}
/// Returns bytes part of `IndexAddress`.
pub fn bytes(&self) -> Option<&[u8]> {
self.bytes.as_ref().map(Vec::as_slice)
}
/// Returns tuple consists of `name` and `bytes` concatenated with provided `key`.
/// This is used to obtain single value(serialized as byte array) from the database.
pub(crate) fn keyed<'a>(&self, key: &'a [u8]) -> (&str, Cow<'a, [u8]>) {
(
&self.name,
match self.bytes {
None => Cow::Borrowed(key),
Some(ref bytes) => {
let bytes = concat_keys!(bytes, key);
bytes.into()
}
},
)
}
/// Prepends a name part to `IndexAddress`. The name is separated from the existing name
/// by a dot `.`.
///
/// # Examples
///
/// ```
/// # use exonum_merkledb::IndexAddress;
/// let addr = IndexAddress::with_root("foo");
/// let prefixed = addr.prepend_name("prefix");
/// assert_eq!(prefixed.name(), "prefix.foo");
/// ```
pub fn prepend_name<'a>(self, prefix: impl Into<Cow<'a, str>>) -> Self {
let prefix = prefix.into();
Self {
name: if self.name.is_empty() {
prefix.into_owned()
} else {
// Because `concat` is faster than `format!("...")` in all cases.
[prefix.as_ref(), ".", self.name()].concat()
},
bytes: self.bytes,
}
}
/// Appends a name part to `IndexAddress`. The name is separated from the existing name
/// by a dot `.`.
///
/// # Examples
///
/// ```
/// # use exonum_merkledb::IndexAddress;
/// let addr = IndexAddress::with_root("foo");
/// let suffixed = addr.append_name("suffix");
/// assert_eq!(suffixed.name(), "foo.suffix");
/// ```
pub fn append_name<'a>(self, suffix: impl Into<Cow<'a, str>>) -> Self {
let suffix = suffix.into();
Self {
name: if self.name.is_empty() {
suffix.into_owned()
} else {
// Because `concat` is faster than `format!("...")` in all cases.
[self.name(), ".", suffix.as_ref()].concat()
},
bytes: self.bytes,
}
}
/// Appends a bytes part to `IndexAddress`.
pub fn append_bytes<K: BinaryKey +?Sized>(self, suffix: &K) -> Self {
let name = self.name;
let bytes = if let Some(ref bytes) = self.bytes {
concat_keys!(bytes, suffix)
} else {
concat_keys!(suffix)
};
Self {
name,
bytes: Some(bytes),
}
}
/// Full address with a separator between `name` and `bytes` represented as byte array.
pub fn fully_qualified_name(&self) -> Vec<u8> {
if let Some(bytes) = self.bytes() {
concat_keys!(self.name(), INDEX_NAME_SEPARATOR, bytes)
} else {
concat_keys!(self.name())
}
}
}
impl<'a> From<&'a str> for IndexAddress {
fn from(name: &'a str) -> Self {
Self::with_root(name)
}
}
impl From<String> for IndexAddress {
fn from(name: String) -> Self {
Self::with_root(name)
}
}
// TODO should we have this impl in public interface? ECR-2834
impl<'a, K: BinaryKey +?Sized> From<(&'a str, &'a K)> for IndexAddress {
fn from((name, key): (&'a str, &'a K)) -> Self {
Self {
name: name.to_owned(),
bytes: Some(key_bytes(key)),
}
}
}
macro_rules! impl_snapshot_access {
($typ:ty) => {
impl RawAccess for $typ {
type Changes = ();
fn snapshot(&self) -> &dyn Snapshot {
self.as_ref()
}
fn changes(&self, _address: &IndexAddress) -> Self::Changes {}
}
impl AsReadonly for $typ {
type Readonly = Self;
fn as_readonly(&self) -> Self::Readonly {
self.clone()
}
}
};
}
impl_snapshot_access!(&'_ dyn Snapshot);
impl_snapshot_access!(&'_ Box<dyn Snapshot>);
impl_snapshot_access!(std::rc::Rc<dyn Snapshot>);
impl_snapshot_access!(std::sync::Arc<dyn Snapshot>);
fn key_bytes<K: BinaryKey +?Sized>(key: &K) -> Vec<u8> {
concat_keys!(key)
}
impl<T: RawAccess> View<T> {
/// Creates a new view for an index with the specified address.
#[doc(hidden)]
// ^-- This method is used in the testkit to revert blocks. It should not be used
// in the user-facing code; use more high-level abstractions instead (e.g., indexes or
// `AccessExt` methods).
pub fn new<I: Into<IndexAddress>>(index_access: T, address: I) -> Self {
let address = address.into();
let changes = index_access.changes(&address);
Self {
index_access,
changes,
address,
}
}
fn snapshot(&self) -> &dyn Snapshot {
self.index_access.snapshot()
}
fn get_bytes(&self, key: &[u8]) -> Option<Vec<u8>> {
if let Some(ref changes) = self.changes.as_ref() {
if let Some(change) = changes.data.get(key) {
match *change {
Change::Put(ref v) => return Some(v.clone()),
Change::Delete => return None,
}
}
if changes.is_empty() {
return None;
}
}
let (name, key) = self.address.keyed(key);
self.snapshot().get(name, &key)
}
fn contains_raw_key(&self, key: &[u8]) -> bool {
if let Some(ref changes) = self.changes.as_ref() {
if let Some(change) = changes.data.get(key) {
match *change {
Change::Put(..) => return true,
Change::Delete => return false,
}
}
if changes.is_empty() {
return false;
}
}
let (name, key) = self.address.keyed(key);
self.snapshot().contains(name, &key)
}
fn iter_bytes(&self, from: &[u8]) -> BytesIter<'_> {
use std::collections::Bound::*;
let (name, key) = self.address.keyed(from);
let prefix = self.address.bytes.clone().unwrap_or_else(|| vec![]);
let changes_iter = self
.changes
.as_ref()
.map(|changes| changes.data.range::<[u8], _>((Included(from), Unbounded)));
let is_empty = self.changes.as_ref().map_or(false, ViewChanges::is_empty);
if is_empty {
// Ignore all changes from the snapshot
Box::new(ChangesIter::new(changes_iter.unwrap()))
} else {
Box::new(ForkIter::new(
Box::new(SnapshotIter::new(self.snapshot(), name, prefix, &key)),
changes_iter,
))
}
}
/// Returns a value of *any* type corresponding to the key of *any* type.
pub fn get<K, V>(&self, key: &K) -> Option<V>
where
K: BinaryKey +?Sized,
V: BinaryValue,
{
self.get_bytes(&key_bytes(key)).map(|v| {
BinaryValue::from_bytes(Cow::Owned(v)).expect("Error while deserializing value")
})
}
/// Returns `true` if the index contains a value of *any* type for the specified key of
/// *any* type.
pub fn contains<K>(&self, key: &K) -> bool
where
K: BinaryKey +?Sized,
{
self.contains_raw_key(&key_bytes(key))
}
/// Returns an iterator over the entries of the index in ascending order. The iterator element
/// type is *any* key-value pair. An argument `subprefix` allows specifying a subset of keys
/// for iteration.
pub fn iter<P, K, V>(&self, subprefix: &P) -> Iter<'_, K, V>
where
P: BinaryKey +?Sized,
K: BinaryKey,
V: BinaryValue,
{
let iter_prefix = key_bytes(subprefix);
Iter {
base_iter: self.iter_bytes(&iter_prefix),
prefix: iter_prefix,
ended: false,
_k: PhantomData,
_v: PhantomData,
}
}
/// Returns an iterator over the entries of the index in ascending order starting from the
/// specified key. The iterator element type is *any* key-value pair. An argument `subprefix`
/// allows specifying a subset of iteration.
pub fn iter_from<P, F, K, V>(&self, subprefix: &P, from: &F) -> Iter<'_, K, V>
where
P: BinaryKey,
F: BinaryKey +?Sized,
K: BinaryKey,
V: BinaryValue,
{
let iter_prefix = key_bytes(subprefix);
let iter_from = key_bytes(from);
Iter {
base_iter: self.iter_bytes(&iter_from),
prefix: iter_prefix,
ended: false,
_k: PhantomData,
_v: PhantomData,
}
}
/// Crutch to be able to create metadata for indexes not present in the storage.
///
/// # Return value
///
/// Returns whether the changes were saved.
pub(crate) fn put_or_forget<K, V>(&mut self, key: &K, value: V) -> bool
where
K: BinaryKey +?Sized,
V: BinaryValue,
{
if let Some(changes) = self.changes.as_mut() {
changes
.data
.insert(concat_keys!(key), Change::Put(value.into_bytes()));
true
} else {
false
}
}
}
impl<T: RawAccessMut> View<T> {
/// Inserts a key-value pair into the fork.
pub fn put<K, V>(&mut self, key: &K, value: V)
where
K: BinaryKey +?Sized,
V: BinaryValue,
{
self.changes
.as_mut()
.unwrap()
.data
.insert(concat_keys!(key), Change::Put(value.into_bytes()));
}
/// Removes a key from the view.
pub fn remove<K>(&mut self, key: &K)
where
K: BinaryKey +?Sized,
{
self.changes
.as_mut()
.unwrap()
.data
.insert(concat_keys!(key), Change::Delete);
}
/// Clears the view removing all its elements.
pub fn clear(&mut self) {
self.changes.as_mut().unwrap().clear();
}
}
/// Iterator over entries in a snapshot limited to a specific view.
struct SnapshotIter<'a> {
inner: BytesIter<'a>,
prefix: Vec<u8>,
ended: bool,
}
impl<'a> fmt::Debug for SnapshotIter<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SnapshotIter")
.field("prefix", &self.prefix)
.field("ended", &self.ended)
.finish()
}
}
impl<'a> SnapshotIter<'a> {
fn new(snapshot: &'a dyn Snapshot, name: &str, prefix: Vec<u8>, from: &[u8]) -> Self {
debug_assert!(from.starts_with(&prefix));
SnapshotIter {
inner: snapshot.iter(name, from),
prefix,
ended: false,
}
}
}
impl BytesIterator for SnapshotIter<'_> {
fn next(&mut self) -> Option<(&[u8], &[u8])> {
if self.ended {
return None;
}
let next = self.inner.next();
match next {
Some((k, v)) if k.starts_with(&self.prefix) => Some((&k[self.prefix.len()..], v)),
_ => {
self.ended = true;
None
}
}
}
fn peek(&mut self) -> Option<(&[u8], &[u8])> {
if self.ended {
return None;
}
let peeked = self.inner.peek();
match peeked {
Some((k, v)) if k.starts_with(&self.prefix) => Some((&k[self.prefix.len()..], v)),
_ => {
self.ended = true;
None
}
}
}
}
struct ChangesIter<'a, T: Iterator + 'a> {
inner: Peekable<T>,
_lifetime: PhantomData<&'a ()>,
}
/// Iterator over a set of changes.
impl<'a, T> ChangesIter<'a, T>
where
T: Iterator<Item = (&'a Vec<u8>, &'a Change)>,
{
fn new(iterator: T) -> Self {
ChangesIter {
inner: iterator.peekable(),
_lifetime: PhantomData,
}
}
}
impl<'a, T> BytesIterator for ChangesIter<'a, T>
where
T: Iterator<Item = (&'a Vec<u8>, &'a Change)>,
{
fn next(&mut self) -> Option<(&[u8], &[u8])> {
loop {
match self.inner.next() {
Some((key, &Change::Put(ref value))) => {
return Some((key.as_slice(), value.as_slice()));
}
Some((_, &Change::Delete)) => {}
None => {
return None;
}
}
}
}
fn | (&mut self) -> Option<(&[u8], &[u8])> {
loop {
match self.inner.peek() {
Some((key, &Change::Put(ref value))) => {
return Some((key.as_slice(), value.as_slice()));
}
Some((_, &Change::Delete)) => {}
None => {
return None;
}
}
}
}
}
/// An iterator over the entries of a `View`.
///
/// This struct is created by the [`iter`] or
/// [`iter_from`] method on [`View`]. See its documentation for details.
///
/// [`iter`]: struct.BaseIndex.html#method.iter
/// [`iter_from`]: struct.BaseIndex.html#method.iter_from
/// [`BaseIndex`]: struct.BaseIndex.html
pub struct Iter<'a, K, V> {
base_iter: BytesIter<'a>,
prefix: Vec<u8>,
ended: bool,
_k: PhantomData<K>,
_v: PhantomData<V>,
}
impl<'a, K, V> fmt::Debug for Iter<'a, K, V> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Iter(..)")
}
}
impl<'a, K, V> Iterator for Iter<'a, K, V>
where
K: BinaryKey,
V: BinaryValue,
{
type Item = (K::Owned, V);
fn next(&mut self) | peek | identifier_name |
network.rs | extern crate openssl;
extern crate rand;
extern crate reqwest;
use crate::utils;
use crate::mojang::{Mojang, MojangHasJoinedResponse};
use crate::packets::*;
use crate::player::Player;
use openssl::pkey::Private;
use openssl::rsa::{Padding, Rsa};
use rand::Rng;
use serde::{Deserialize, Serialize};
use serde_json::json;
use std::io::prelude::*;
use std::net::{TcpListener, TcpStream};
use std::sync::mpsc;
use std::thread;
use std::time::{Duration};
struct Connection {
packet_receiver: mpsc::Receiver<PacketBuffer>,
stream: TcpStream,
alive: bool,
}
impl Connection {
fn new(stream: TcpStream) -> Connection {
println!("New connection!");
let reader = stream.try_clone().unwrap();
let (tx, rx) = mpsc::channel();
let connection = Connection {
packet_receiver: rx,
stream,
alive: true,
};
thread::spawn(|| {
Connection::handle_connection(reader, tx);
});
connection
}
fn handle_connection(mut stream: TcpStream, packet_sender: mpsc::Sender<PacketBuffer>) {
loop {
let mut data = vec![0u8; 512];
let length = stream.read(&mut data).unwrap();
if length == 0 {
thread::sleep(Duration::from_millis(2));
continue;
}
data.drain(length..);
data.shrink_to_fit();
packet_sender.send(data).unwrap();
}
}
fn receive_packets(&mut self) -> Vec<PacketBuffer> {
let mut packets = Vec::new();
loop {
match self.packet_receiver.try_recv() {
Ok(packet) => packets.push(packet),
Err(mpsc::TryRecvError::Empty) => return packets,
Err(mpsc::TryRecvError::Disconnected) => {
self.alive = false;
return packets;
}
}
}
}
}
pub struct Client {
connection: Connection,
state: NetworkState,
pub shared_secret: Option<Vec<u8>>,
pub compressed: bool,
verify_token: Option<Vec<u8>>,
player: Option<Player>,
username: Option<String>,
id: u32,
}
impl Client {
fn new(stream: TcpStream, id: u32) -> Client {
let connection = Connection::new(stream);
Client {
connection,
state: NetworkState::HANDSHAKING,
shared_secret: None,
compressed: false,
verify_token: None,
player: None,
username: None,
id,
}
}
fn send_packet(&mut self, encoder: &PacketEncoder) {
let buffer = encoder.finalize(self.compressed, &self.shared_secret);
self.connection.stream.write(buffer.as_slice()).unwrap();
}
}
#[derive(Serialize, Deserialize)]
pub struct ServerConfig {
max_players: i32,
motd: String,
}
pub struct Server {
clients: Vec<Client>,
client_receiver: mpsc::Receiver<Client>,
key_pair: Rsa<Private>,
mojang: Mojang,
}
impl Server {
fn new() -> Server {
let rsa = Rsa::generate(1024).unwrap();
let (tx, rx) = mpsc::channel();
let server = Server {
clients: Vec::new(),
key_pair: rsa,
mojang: Mojang::new(),
client_receiver: rx
};
server.listen_for_connections(tx);
server
}
fn get_client(&self, client_id: u32) -> &Client {
self.clients.iter().filter(|client| client.id == client_id).collect::<Vec<&Client>>()[0]
}
fn listen_for_connections(&self, sender: mpsc::Sender<Client>) {
let mut next_id = 0;
thread::spawn(move || {
let listener = TcpListener::bind("0.0.0.0:25566").unwrap();
for stream in listener.incoming() {
let stream = stream.unwrap();
let client = Client::new(stream, next_id);
sender.send(client).unwrap();
next_id += 1;
}
});
}
fn unknown_packet(id: i32) {
eprintln!("Unknown packet with id: {}", id);
}
fn handle_packet(&mut self, client: usize, packet: PacketBuffer) {
let client = self.clients.get_mut(client).unwrap();
let decoder = PacketDecoder::new(packet, client);
println!(
"Packet received: {}, with the length of: {}",
decoder.packet_id, decoder.length
);
let state = client.state;
match state {
NetworkState::HANDSHAKING => match decoder.packet_id {
0x00 => {
let packet = S00Handshake::decode(decoder);
println!("New state: {:#?}", packet.next_state);
client.state = packet.next_state;
}
_ => Server::unknown_packet(decoder.packet_id),
},
NetworkState::STATUS => match decoder.packet_id {
0x00 => {
let json_response = json!({
"version": {
"name": "RustMC 1.15.1",
"protocol": 575
},
"players": {
"max": 100,
"online": 1,
"sample": [],
},
"description": {
"text": "Hello World!",
"color": "gold"
}
})
.to_string();
let response_encoder = C00Response { json_response }.encode();
client.send_packet(&response_encoder);
}
0x01 => {
let packet = S01Ping::decode(decoder);
let pong_encoder = C01Pong {
payload: packet.payload,
}
.encode();
client.send_packet(&pong_encoder);
}
_ => Server::unknown_packet(decoder.packet_id),
},
NetworkState::LOGIN => match decoder.packet_id {
0x00 => {
let packet = S00LoginStart::decode(decoder);
let public_key = self.key_pair.public_key_to_der().unwrap();
let verify_token = rand::thread_rng().gen::<[u8; 4]>().to_vec();
let request_encoder = C01EcryptionRequest {
server_id: "".to_string(),
public_key_length: public_key.len() as i32,
public_key,
verify_token_length: 4,
verify_token: verify_token.clone(),
}
.encode();
client.verify_token = Some(verify_token);
client.username = Some(packet.name);
client.send_packet(&request_encoder);
}
0x01 => {
let packet = S01EncryptionResponse::decode(decoder);
let mut received_verify_token = vec![0u8; packet.verify_token_length as usize];
let length_decrypted = self
.key_pair
.private_decrypt(
packet.verify_token.as_slice(),
received_verify_token.as_mut(),
Padding::PKCS1,
)
.unwrap();
received_verify_token.drain(length_decrypted..received_verify_token.len());
if &received_verify_token == client.verify_token.as_ref().unwrap() {
// Start login process
println!("Starting login process");
/*self.mojang.send_has_joined(
&clients[client].username.unwrap(),
clients[client].id
);*/
} else {
println!("Verify token incorrent!!");
}
}
_ => Server::unknown_packet(decoder.packet_id),
},
NetworkState::PLAY => match decoder.packet_id {
_ => Server::unknown_packet(decoder.packet_id),
},
}
}
fn on_mojang_has_joined_response(&mut self, client_id: u32, result: MojangHasJoinedResponse) {
let client = self.get_client(client_id);
}
fn receive_packets(&mut self) {
let num_clients = self.clients.len();
for client in 0..num_clients {
let mut packets = self.clients[client]
.connection
.receive_packets();
for packet_batch in packets.drain(..) {
for packet in PacketDecoder::new_batch(packet_batch, &self.clients[client]) {
println!("{}", utils::to_hex_string(&packet.buffer));
self.handle_packet(client, packet.buffer);
}
}
}
}
fn receive_clients(&mut self) {
let result = self.client_receiver.try_recv();
if let Ok(client) = result {
self.clients.push(client);
}
}
fn poll_mojang(&mut self) { // TODO: Clean up maybe
let mut finished_indicies = Vec::new();
for (i, pending) in self.mojang.has_joined_pending.iter().enumerate() {
if pending.result.is_some() {
finished_indicies.push(i);
}
}
for index in finished_indicies {
let response = self.mojang.has_joined_pending.remove(index);
self.on_mojang_has_joined_response(response.client_id, response.result.unwrap());
}
self.mojang.clean();
}
fn start(mut self) {
println!("Listening for connections...");
//let mut last_tick_time = SystemTime::now(); | loop {
/*let now = SystemTime::now();
let time_since = now.duration_since(last_tick_time).unwrap().as_millis();
if time_since > 50 {
last_tick_time = now;
}
*/
self.receive_clients();
self.receive_packets();
self.poll_mojang();
thread::sleep(Duration::from_millis(1));
}
}
}
pub fn start_server() {
println!("Starting server...");
let server = Server::new();
server.start();
} | random_line_split |
|
network.rs | extern crate openssl;
extern crate rand;
extern crate reqwest;
use crate::utils;
use crate::mojang::{Mojang, MojangHasJoinedResponse};
use crate::packets::*;
use crate::player::Player;
use openssl::pkey::Private;
use openssl::rsa::{Padding, Rsa};
use rand::Rng;
use serde::{Deserialize, Serialize};
use serde_json::json;
use std::io::prelude::*;
use std::net::{TcpListener, TcpStream};
use std::sync::mpsc;
use std::thread;
use std::time::{Duration};
struct Connection {
packet_receiver: mpsc::Receiver<PacketBuffer>,
stream: TcpStream,
alive: bool,
}
impl Connection {
fn new(stream: TcpStream) -> Connection {
println!("New connection!");
let reader = stream.try_clone().unwrap();
let (tx, rx) = mpsc::channel();
let connection = Connection {
packet_receiver: rx,
stream,
alive: true,
};
thread::spawn(|| {
Connection::handle_connection(reader, tx);
});
connection
}
fn handle_connection(mut stream: TcpStream, packet_sender: mpsc::Sender<PacketBuffer>) {
loop {
let mut data = vec![0u8; 512];
let length = stream.read(&mut data).unwrap();
if length == 0 {
thread::sleep(Duration::from_millis(2));
continue;
}
data.drain(length..);
data.shrink_to_fit();
packet_sender.send(data).unwrap();
}
}
fn receive_packets(&mut self) -> Vec<PacketBuffer> {
let mut packets = Vec::new();
loop {
match self.packet_receiver.try_recv() {
Ok(packet) => packets.push(packet),
Err(mpsc::TryRecvError::Empty) => return packets,
Err(mpsc::TryRecvError::Disconnected) => {
self.alive = false;
return packets;
}
}
}
}
}
pub struct Client {
connection: Connection,
state: NetworkState,
pub shared_secret: Option<Vec<u8>>,
pub compressed: bool,
verify_token: Option<Vec<u8>>,
player: Option<Player>,
username: Option<String>,
id: u32,
}
impl Client {
fn new(stream: TcpStream, id: u32) -> Client {
let connection = Connection::new(stream);
Client {
connection,
state: NetworkState::HANDSHAKING,
shared_secret: None,
compressed: false,
verify_token: None,
player: None,
username: None,
id,
}
}
fn send_packet(&mut self, encoder: &PacketEncoder) {
let buffer = encoder.finalize(self.compressed, &self.shared_secret);
self.connection.stream.write(buffer.as_slice()).unwrap();
}
}
#[derive(Serialize, Deserialize)]
pub struct ServerConfig {
max_players: i32,
motd: String,
}
pub struct Server {
clients: Vec<Client>,
client_receiver: mpsc::Receiver<Client>,
key_pair: Rsa<Private>,
mojang: Mojang,
}
impl Server {
fn new() -> Server {
let rsa = Rsa::generate(1024).unwrap();
let (tx, rx) = mpsc::channel();
let server = Server {
clients: Vec::new(),
key_pair: rsa,
mojang: Mojang::new(),
client_receiver: rx
};
server.listen_for_connections(tx);
server
}
fn get_client(&self, client_id: u32) -> &Client {
self.clients.iter().filter(|client| client.id == client_id).collect::<Vec<&Client>>()[0]
}
fn listen_for_connections(&self, sender: mpsc::Sender<Client>) {
let mut next_id = 0;
thread::spawn(move || {
let listener = TcpListener::bind("0.0.0.0:25566").unwrap();
for stream in listener.incoming() {
let stream = stream.unwrap();
let client = Client::new(stream, next_id);
sender.send(client).unwrap();
next_id += 1;
}
});
}
fn unknown_packet(id: i32) {
eprintln!("Unknown packet with id: {}", id);
}
fn handle_packet(&mut self, client: usize, packet: PacketBuffer) {
let client = self.clients.get_mut(client).unwrap();
let decoder = PacketDecoder::new(packet, client);
println!(
"Packet received: {}, with the length of: {}",
decoder.packet_id, decoder.length
);
let state = client.state;
match state {
NetworkState::HANDSHAKING => match decoder.packet_id {
0x00 => {
let packet = S00Handshake::decode(decoder);
println!("New state: {:#?}", packet.next_state);
client.state = packet.next_state;
}
_ => Server::unknown_packet(decoder.packet_id),
},
NetworkState::STATUS => match decoder.packet_id {
0x00 => {
let json_response = json!({
"version": {
"name": "RustMC 1.15.1",
"protocol": 575
},
"players": {
"max": 100,
"online": 1,
"sample": [],
},
"description": {
"text": "Hello World!",
"color": "gold"
}
})
.to_string();
let response_encoder = C00Response { json_response }.encode();
client.send_packet(&response_encoder);
}
0x01 => {
let packet = S01Ping::decode(decoder);
let pong_encoder = C01Pong {
payload: packet.payload,
}
.encode();
client.send_packet(&pong_encoder);
}
_ => Server::unknown_packet(decoder.packet_id),
},
NetworkState::LOGIN => match decoder.packet_id {
0x00 => {
let packet = S00LoginStart::decode(decoder);
let public_key = self.key_pair.public_key_to_der().unwrap();
let verify_token = rand::thread_rng().gen::<[u8; 4]>().to_vec();
let request_encoder = C01EcryptionRequest {
server_id: "".to_string(),
public_key_length: public_key.len() as i32,
public_key,
verify_token_length: 4,
verify_token: verify_token.clone(),
}
.encode();
client.verify_token = Some(verify_token);
client.username = Some(packet.name);
client.send_packet(&request_encoder);
}
0x01 => {
let packet = S01EncryptionResponse::decode(decoder);
let mut received_verify_token = vec![0u8; packet.verify_token_length as usize];
let length_decrypted = self
.key_pair
.private_decrypt(
packet.verify_token.as_slice(),
received_verify_token.as_mut(),
Padding::PKCS1,
)
.unwrap();
received_verify_token.drain(length_decrypted..received_verify_token.len());
if &received_verify_token == client.verify_token.as_ref().unwrap() {
// Start login process
println!("Starting login process");
/*self.mojang.send_has_joined(
&clients[client].username.unwrap(),
clients[client].id
);*/
} else {
println!("Verify token incorrent!!");
}
}
_ => Server::unknown_packet(decoder.packet_id),
},
NetworkState::PLAY => match decoder.packet_id {
_ => Server::unknown_packet(decoder.packet_id),
},
}
}
fn on_mojang_has_joined_response(&mut self, client_id: u32, result: MojangHasJoinedResponse) {
let client = self.get_client(client_id);
}
fn receive_packets(&mut self) {
let num_clients = self.clients.len();
for client in 0..num_clients {
let mut packets = self.clients[client]
.connection
.receive_packets();
for packet_batch in packets.drain(..) {
for packet in PacketDecoder::new_batch(packet_batch, &self.clients[client]) {
println!("{}", utils::to_hex_string(&packet.buffer));
self.handle_packet(client, packet.buffer);
}
}
}
}
fn receive_clients(&mut self) {
let result = self.client_receiver.try_recv();
if let Ok(client) = result {
self.clients.push(client);
}
}
fn poll_mojang(&mut self) |
fn start(mut self) {
println!("Listening for connections...");
//let mut last_tick_time = SystemTime::now();
loop {
/*let now = SystemTime::now();
let time_since = now.duration_since(last_tick_time).unwrap().as_millis();
if time_since > 50 {
last_tick_time = now;
}
*/
self.receive_clients();
self.receive_packets();
self.poll_mojang();
thread::sleep(Duration::from_millis(1));
}
}
}
pub fn start_server() {
println!("Starting server...");
let server = Server::new();
server.start();
}
| { // TODO: Clean up maybe
let mut finished_indicies = Vec::new();
for (i, pending) in self.mojang.has_joined_pending.iter().enumerate() {
if pending.result.is_some() {
finished_indicies.push(i);
}
}
for index in finished_indicies {
let response = self.mojang.has_joined_pending.remove(index);
self.on_mojang_has_joined_response(response.client_id, response.result.unwrap());
}
self.mojang.clean();
} | identifier_body |
network.rs | extern crate openssl;
extern crate rand;
extern crate reqwest;
use crate::utils;
use crate::mojang::{Mojang, MojangHasJoinedResponse};
use crate::packets::*;
use crate::player::Player;
use openssl::pkey::Private;
use openssl::rsa::{Padding, Rsa};
use rand::Rng;
use serde::{Deserialize, Serialize};
use serde_json::json;
use std::io::prelude::*;
use std::net::{TcpListener, TcpStream};
use std::sync::mpsc;
use std::thread;
use std::time::{Duration};
struct Connection {
packet_receiver: mpsc::Receiver<PacketBuffer>,
stream: TcpStream,
alive: bool,
}
impl Connection {
fn new(stream: TcpStream) -> Connection {
println!("New connection!");
let reader = stream.try_clone().unwrap();
let (tx, rx) = mpsc::channel();
let connection = Connection {
packet_receiver: rx,
stream,
alive: true,
};
thread::spawn(|| {
Connection::handle_connection(reader, tx);
});
connection
}
fn handle_connection(mut stream: TcpStream, packet_sender: mpsc::Sender<PacketBuffer>) {
loop {
let mut data = vec![0u8; 512];
let length = stream.read(&mut data).unwrap();
if length == 0 {
thread::sleep(Duration::from_millis(2));
continue;
}
data.drain(length..);
data.shrink_to_fit();
packet_sender.send(data).unwrap();
}
}
fn receive_packets(&mut self) -> Vec<PacketBuffer> {
let mut packets = Vec::new();
loop {
match self.packet_receiver.try_recv() {
Ok(packet) => packets.push(packet),
Err(mpsc::TryRecvError::Empty) => return packets,
Err(mpsc::TryRecvError::Disconnected) => {
self.alive = false;
return packets;
}
}
}
}
}
pub struct Client {
connection: Connection,
state: NetworkState,
pub shared_secret: Option<Vec<u8>>,
pub compressed: bool,
verify_token: Option<Vec<u8>>,
player: Option<Player>,
username: Option<String>,
id: u32,
}
impl Client {
fn new(stream: TcpStream, id: u32) -> Client {
let connection = Connection::new(stream);
Client {
connection,
state: NetworkState::HANDSHAKING,
shared_secret: None,
compressed: false,
verify_token: None,
player: None,
username: None,
id,
}
}
fn send_packet(&mut self, encoder: &PacketEncoder) {
let buffer = encoder.finalize(self.compressed, &self.shared_secret);
self.connection.stream.write(buffer.as_slice()).unwrap();
}
}
#[derive(Serialize, Deserialize)]
pub struct ServerConfig {
max_players: i32,
motd: String,
}
pub struct Server {
clients: Vec<Client>,
client_receiver: mpsc::Receiver<Client>,
key_pair: Rsa<Private>,
mojang: Mojang,
}
impl Server {
fn new() -> Server {
let rsa = Rsa::generate(1024).unwrap();
let (tx, rx) = mpsc::channel();
let server = Server {
clients: Vec::new(),
key_pair: rsa,
mojang: Mojang::new(),
client_receiver: rx
};
server.listen_for_connections(tx);
server
}
fn get_client(&self, client_id: u32) -> &Client {
self.clients.iter().filter(|client| client.id == client_id).collect::<Vec<&Client>>()[0]
}
fn listen_for_connections(&self, sender: mpsc::Sender<Client>) {
let mut next_id = 0;
thread::spawn(move || {
let listener = TcpListener::bind("0.0.0.0:25566").unwrap();
for stream in listener.incoming() {
let stream = stream.unwrap();
let client = Client::new(stream, next_id);
sender.send(client).unwrap();
next_id += 1;
}
});
}
fn unknown_packet(id: i32) {
eprintln!("Unknown packet with id: {}", id);
}
fn handle_packet(&mut self, client: usize, packet: PacketBuffer) {
let client = self.clients.get_mut(client).unwrap();
let decoder = PacketDecoder::new(packet, client);
println!(
"Packet received: {}, with the length of: {}",
decoder.packet_id, decoder.length
);
let state = client.state;
match state {
NetworkState::HANDSHAKING => match decoder.packet_id {
0x00 => {
let packet = S00Handshake::decode(decoder);
println!("New state: {:#?}", packet.next_state);
client.state = packet.next_state;
}
_ => Server::unknown_packet(decoder.packet_id),
},
NetworkState::STATUS => match decoder.packet_id {
0x00 => {
let json_response = json!({
"version": {
"name": "RustMC 1.15.1",
"protocol": 575
},
"players": {
"max": 100,
"online": 1,
"sample": [],
},
"description": {
"text": "Hello World!",
"color": "gold"
}
})
.to_string();
let response_encoder = C00Response { json_response }.encode();
client.send_packet(&response_encoder);
}
0x01 => {
let packet = S01Ping::decode(decoder);
let pong_encoder = C01Pong {
payload: packet.payload,
}
.encode();
client.send_packet(&pong_encoder);
}
_ => Server::unknown_packet(decoder.packet_id),
},
NetworkState::LOGIN => match decoder.packet_id {
0x00 => {
let packet = S00LoginStart::decode(decoder);
let public_key = self.key_pair.public_key_to_der().unwrap();
let verify_token = rand::thread_rng().gen::<[u8; 4]>().to_vec();
let request_encoder = C01EcryptionRequest {
server_id: "".to_string(),
public_key_length: public_key.len() as i32,
public_key,
verify_token_length: 4,
verify_token: verify_token.clone(),
}
.encode();
client.verify_token = Some(verify_token);
client.username = Some(packet.name);
client.send_packet(&request_encoder);
}
0x01 => {
let packet = S01EncryptionResponse::decode(decoder);
let mut received_verify_token = vec![0u8; packet.verify_token_length as usize];
let length_decrypted = self
.key_pair
.private_decrypt(
packet.verify_token.as_slice(),
received_verify_token.as_mut(),
Padding::PKCS1,
)
.unwrap();
received_verify_token.drain(length_decrypted..received_verify_token.len());
if &received_verify_token == client.verify_token.as_ref().unwrap() {
// Start login process
println!("Starting login process");
/*self.mojang.send_has_joined(
&clients[client].username.unwrap(),
clients[client].id
);*/
} else {
println!("Verify token incorrent!!");
}
}
_ => Server::unknown_packet(decoder.packet_id),
},
NetworkState::PLAY => match decoder.packet_id {
_ => Server::unknown_packet(decoder.packet_id),
},
}
}
fn on_mojang_has_joined_response(&mut self, client_id: u32, result: MojangHasJoinedResponse) {
let client = self.get_client(client_id);
}
fn | (&mut self) {
let num_clients = self.clients.len();
for client in 0..num_clients {
let mut packets = self.clients[client]
.connection
.receive_packets();
for packet_batch in packets.drain(..) {
for packet in PacketDecoder::new_batch(packet_batch, &self.clients[client]) {
println!("{}", utils::to_hex_string(&packet.buffer));
self.handle_packet(client, packet.buffer);
}
}
}
}
fn receive_clients(&mut self) {
let result = self.client_receiver.try_recv();
if let Ok(client) = result {
self.clients.push(client);
}
}
fn poll_mojang(&mut self) { // TODO: Clean up maybe
let mut finished_indicies = Vec::new();
for (i, pending) in self.mojang.has_joined_pending.iter().enumerate() {
if pending.result.is_some() {
finished_indicies.push(i);
}
}
for index in finished_indicies {
let response = self.mojang.has_joined_pending.remove(index);
self.on_mojang_has_joined_response(response.client_id, response.result.unwrap());
}
self.mojang.clean();
}
fn start(mut self) {
println!("Listening for connections...");
//let mut last_tick_time = SystemTime::now();
loop {
/*let now = SystemTime::now();
let time_since = now.duration_since(last_tick_time).unwrap().as_millis();
if time_since > 50 {
last_tick_time = now;
}
*/
self.receive_clients();
self.receive_packets();
self.poll_mojang();
thread::sleep(Duration::from_millis(1));
}
}
}
pub fn start_server() {
println!("Starting server...");
let server = Server::new();
server.start();
}
| receive_packets | identifier_name |
network.rs | extern crate openssl;
extern crate rand;
extern crate reqwest;
use crate::utils;
use crate::mojang::{Mojang, MojangHasJoinedResponse};
use crate::packets::*;
use crate::player::Player;
use openssl::pkey::Private;
use openssl::rsa::{Padding, Rsa};
use rand::Rng;
use serde::{Deserialize, Serialize};
use serde_json::json;
use std::io::prelude::*;
use std::net::{TcpListener, TcpStream};
use std::sync::mpsc;
use std::thread;
use std::time::{Duration};
struct Connection {
packet_receiver: mpsc::Receiver<PacketBuffer>,
stream: TcpStream,
alive: bool,
}
impl Connection {
fn new(stream: TcpStream) -> Connection {
println!("New connection!");
let reader = stream.try_clone().unwrap();
let (tx, rx) = mpsc::channel();
let connection = Connection {
packet_receiver: rx,
stream,
alive: true,
};
thread::spawn(|| {
Connection::handle_connection(reader, tx);
});
connection
}
fn handle_connection(mut stream: TcpStream, packet_sender: mpsc::Sender<PacketBuffer>) {
loop {
let mut data = vec![0u8; 512];
let length = stream.read(&mut data).unwrap();
if length == 0 {
thread::sleep(Duration::from_millis(2));
continue;
}
data.drain(length..);
data.shrink_to_fit();
packet_sender.send(data).unwrap();
}
}
fn receive_packets(&mut self) -> Vec<PacketBuffer> {
let mut packets = Vec::new();
loop {
match self.packet_receiver.try_recv() {
Ok(packet) => packets.push(packet),
Err(mpsc::TryRecvError::Empty) => return packets,
Err(mpsc::TryRecvError::Disconnected) => {
self.alive = false;
return packets;
}
}
}
}
}
pub struct Client {
connection: Connection,
state: NetworkState,
pub shared_secret: Option<Vec<u8>>,
pub compressed: bool,
verify_token: Option<Vec<u8>>,
player: Option<Player>,
username: Option<String>,
id: u32,
}
impl Client {
fn new(stream: TcpStream, id: u32) -> Client {
let connection = Connection::new(stream);
Client {
connection,
state: NetworkState::HANDSHAKING,
shared_secret: None,
compressed: false,
verify_token: None,
player: None,
username: None,
id,
}
}
fn send_packet(&mut self, encoder: &PacketEncoder) {
let buffer = encoder.finalize(self.compressed, &self.shared_secret);
self.connection.stream.write(buffer.as_slice()).unwrap();
}
}
#[derive(Serialize, Deserialize)]
pub struct ServerConfig {
max_players: i32,
motd: String,
}
pub struct Server {
clients: Vec<Client>,
client_receiver: mpsc::Receiver<Client>,
key_pair: Rsa<Private>,
mojang: Mojang,
}
impl Server {
fn new() -> Server {
let rsa = Rsa::generate(1024).unwrap();
let (tx, rx) = mpsc::channel();
let server = Server {
clients: Vec::new(),
key_pair: rsa,
mojang: Mojang::new(),
client_receiver: rx
};
server.listen_for_connections(tx);
server
}
fn get_client(&self, client_id: u32) -> &Client {
self.clients.iter().filter(|client| client.id == client_id).collect::<Vec<&Client>>()[0]
}
fn listen_for_connections(&self, sender: mpsc::Sender<Client>) {
let mut next_id = 0;
thread::spawn(move || {
let listener = TcpListener::bind("0.0.0.0:25566").unwrap();
for stream in listener.incoming() {
let stream = stream.unwrap();
let client = Client::new(stream, next_id);
sender.send(client).unwrap();
next_id += 1;
}
});
}
fn unknown_packet(id: i32) {
eprintln!("Unknown packet with id: {}", id);
}
fn handle_packet(&mut self, client: usize, packet: PacketBuffer) {
let client = self.clients.get_mut(client).unwrap();
let decoder = PacketDecoder::new(packet, client);
println!(
"Packet received: {}, with the length of: {}",
decoder.packet_id, decoder.length
);
let state = client.state;
match state {
NetworkState::HANDSHAKING => match decoder.packet_id {
0x00 => {
let packet = S00Handshake::decode(decoder);
println!("New state: {:#?}", packet.next_state);
client.state = packet.next_state;
}
_ => Server::unknown_packet(decoder.packet_id),
},
NetworkState::STATUS => match decoder.packet_id {
0x00 => {
let json_response = json!({
"version": {
"name": "RustMC 1.15.1",
"protocol": 575
},
"players": {
"max": 100,
"online": 1,
"sample": [],
},
"description": {
"text": "Hello World!",
"color": "gold"
}
})
.to_string();
let response_encoder = C00Response { json_response }.encode();
client.send_packet(&response_encoder);
}
0x01 => {
let packet = S01Ping::decode(decoder);
let pong_encoder = C01Pong {
payload: packet.payload,
}
.encode();
client.send_packet(&pong_encoder);
}
_ => Server::unknown_packet(decoder.packet_id),
},
NetworkState::LOGIN => match decoder.packet_id {
0x00 => {
let packet = S00LoginStart::decode(decoder);
let public_key = self.key_pair.public_key_to_der().unwrap();
let verify_token = rand::thread_rng().gen::<[u8; 4]>().to_vec();
let request_encoder = C01EcryptionRequest {
server_id: "".to_string(),
public_key_length: public_key.len() as i32,
public_key,
verify_token_length: 4,
verify_token: verify_token.clone(),
}
.encode();
client.verify_token = Some(verify_token);
client.username = Some(packet.name);
client.send_packet(&request_encoder);
}
0x01 => {
let packet = S01EncryptionResponse::decode(decoder);
let mut received_verify_token = vec![0u8; packet.verify_token_length as usize];
let length_decrypted = self
.key_pair
.private_decrypt(
packet.verify_token.as_slice(),
received_verify_token.as_mut(),
Padding::PKCS1,
)
.unwrap();
received_verify_token.drain(length_decrypted..received_verify_token.len());
if &received_verify_token == client.verify_token.as_ref().unwrap() {
// Start login process
println!("Starting login process");
/*self.mojang.send_has_joined(
&clients[client].username.unwrap(),
clients[client].id
);*/
} else {
println!("Verify token incorrent!!");
}
}
_ => Server::unknown_packet(decoder.packet_id),
},
NetworkState::PLAY => match decoder.packet_id {
_ => Server::unknown_packet(decoder.packet_id),
},
}
}
fn on_mojang_has_joined_response(&mut self, client_id: u32, result: MojangHasJoinedResponse) {
let client = self.get_client(client_id);
}
fn receive_packets(&mut self) {
let num_clients = self.clients.len();
for client in 0..num_clients {
let mut packets = self.clients[client]
.connection
.receive_packets();
for packet_batch in packets.drain(..) {
for packet in PacketDecoder::new_batch(packet_batch, &self.clients[client]) {
println!("{}", utils::to_hex_string(&packet.buffer));
self.handle_packet(client, packet.buffer);
}
}
}
}
fn receive_clients(&mut self) {
let result = self.client_receiver.try_recv();
if let Ok(client) = result |
}
fn poll_mojang(&mut self) { // TODO: Clean up maybe
let mut finished_indicies = Vec::new();
for (i, pending) in self.mojang.has_joined_pending.iter().enumerate() {
if pending.result.is_some() {
finished_indicies.push(i);
}
}
for index in finished_indicies {
let response = self.mojang.has_joined_pending.remove(index);
self.on_mojang_has_joined_response(response.client_id, response.result.unwrap());
}
self.mojang.clean();
}
fn start(mut self) {
println!("Listening for connections...");
//let mut last_tick_time = SystemTime::now();
loop {
/*let now = SystemTime::now();
let time_since = now.duration_since(last_tick_time).unwrap().as_millis();
if time_since > 50 {
last_tick_time = now;
}
*/
self.receive_clients();
self.receive_packets();
self.poll_mojang();
thread::sleep(Duration::from_millis(1));
}
}
}
pub fn start_server() {
println!("Starting server...");
let server = Server::new();
server.start();
}
| {
self.clients.push(client);
} | conditional_block |
color.rs | //!
//! A library providing simple `Color` and `Gradient` types along with useful transformations and
//! presets.
//!
//!
//! Inspiration taken from [elm-lang's color module]
//! (https://github.com/elm-lang/core/blob/62b22218c42fb8ccc996c86bea450a14991ab815/src/Color.elm)
//!
//!
//! Module for working with colors. Includes [RGB](https://en.wikipedia.org/wiki/RGB_color_model)
//! and [HSL](http://en.wikipedia.org/wiki/HSL_and_HSV) creation, gradients and built-in names.
//!
use std::f32::consts::PI;
use utils::{degrees, fmod, turns};
/// Color supporting RGB and HSL variants.
#[derive(PartialEq, Copy, Clone, Debug)]
pub enum Color {
/// Red, Green, Blue, Alpha - All values' scales represented between 0.0 and 1.0.
Rgba(f32, f32, f32, f32),
/// Hue, Saturation, Lightness, Alpha - all valuess scales represented between 0.0 and 1.0.
Hsla(f32, f32, f32, f32),
}
/// Regional spelling alias.
pub type Colour = Color;
/// Create RGB colors with an alpha component for transparency.
/// The alpha component is specified with numbers between 0 and 1.
#[inline]
pub fn rgba(r: f32, g: f32, b: f32, a: f32) -> Color {
Color::Rgba(r, g, b, a)
}
/// Create RGB colors from numbers between 0.0 and 1.0.
#[inline]
pub fn rgb(r: f32, g: f32, b: f32) -> Color {
Color::Rgba(r, g, b, 1.0)
}
/// Create RGB colors from numbers between 0 and 255 inclusive.
/// The alpha component is specified with numbers between 0 and 1.
#[inline]
pub fn rgba_bytes(r: u8, g: u8, b: u8, a: f32) -> Color {
Color::Rgba(r as f32 / 255.0, g as f32 / 255.0, b as f32 / 255.0, a)
}
/// Create RGB colors from numbers between 0 and 255 inclusive.
#[inline]
pub fn rgb_bytes(r: u8, g: u8, b: u8) -> Color {
rgba_bytes(r, g, b, 1.0)
}
/// Create [HSL colors](http://en.wikipedia.org/wiki/HSL_and_HSV) with an alpha component for
/// transparency.
#[inline]
pub fn hsla(hue: f32, saturation: f32, lightness: f32, alpha: f32) -> Color {
Color::Hsla(hue - turns((hue / (2.0 * PI)).floor()), saturation, lightness, alpha)
}
/// Create [HSL colors](http://en.wikipedia.org/wiki/HSL_and_HSV). This gives you access to colors
/// more like a color wheel, where all hues are arranged in a circle that you specify with radians.
///
/// red = hsl(degrees(0.0) , 1.0, 0.5)
/// green = hsl(degrees(120.0), 1.0, 0.5)
/// blue = hsl(degrees(240.0), 1.0, 0.5)
/// pastel_red = hsl(degrees(0.0) , 0.7, 0.7)
///
/// To cycle through all colors, just cycle through degrees. The saturation level is how vibrant
/// the color is, like a dial between grey and bright colors. The lightness level is a dial between
/// white and black.
#[inline]
pub fn hsl(hue: f32, saturation: f32, lightness: f32) -> Color {
hsla(hue, saturation, lightness, 1.0)
}
/// Produce a gray based on the input. 0.0 is white, 1.0 is black.
pub fn grayscale(p: f32) -> Color {
Color::Hsla(0.0, 0.0, 1.0-p, 1.0)
}
/// Produce a gray based on the input. 0.0 is white, 1.0 is black.
pub fn greyscale(p: f32) -> Color {
Color::Hsla(0.0, 0.0, 1.0-p, 1.0)
}
/// Construct a random color.
pub fn random() -> Color {
rgb(::rand::random(), ::rand::random(), ::rand::random())
}
/// Clamp a f32 between 0f32 and 1f32.
fn clampf32(f: f32) -> f32 {
if f < 0.0 { 0.0 } else if f > 1.0 { 1.0 } else { f }
}
impl Color {
/// Produce a complementary color. The two colors will accent each other. This is the same as
/// rotating the hue by 180 degrees.
pub fn complement(self) -> Color {
match self {
Color::Hsla(h, s, l, a) => hsla(h + degrees(180.0), s, l, a),
Color::Rgba(r, g, b, a) => {
let (h, s, l) = rgb_to_hsl(r, g, b);
hsla(h + degrees(180.0), s, l, a)
},
}
}
/// Calculate and return the luminance of the Color.
pub fn luminance(&self) -> f32 {
match *self {
Color::Rgba(r, g, b, _) => (r + g + b) / 3.0,
Color::Hsla(_, _, l, _) => l,
}
}
/// Return either black or white, depending which contrasts the Color the most. This will be
/// useful for determining a readable color for text on any given background Color.
pub fn plain_contrast(self) -> Color {
if self.luminance() > 0.5 { black() } else { white() }
}
/// Extract the components of a color in the HSL format.
pub fn to_hsl(self) -> Hsla {
match self {
Color::Hsla(h, s, l, a) => Hsla(h, s, l, a),
Color::Rgba(r, g, b, a) => {
let (h, s, l) = rgb_to_hsl(r, g, b);
Hsla(h, s, l, a)
},
}
}
/// Extract the components of a color in the RGB format.
pub fn to_rgb(self) -> Rgba {
match self {
Color::Rgba(r, g, b, a) => Rgba(r, g, b, a),
Color::Hsla(h, s, l, a) => {
let (r, g, b) = hsl_to_rgb(h, s, l);
Rgba(r, g, b, a)
},
}
}
/// Extract the components of a color in the RGB format within a fixed-size array.
pub fn to_fsa(self) -> [f32; 4] {
let Rgba(r, g, b, a) = self.to_rgb();
[r, g, b, a]
}
/// Same as `to_fsa`, except r, g, b and a are represented in byte form.
pub fn to_byte_fsa(self) -> [u8; 4] {
let Rgba(r, g, b, a) = self.to_rgb();
[f32_to_byte(r), f32_to_byte(g), f32_to_byte(b), f32_to_byte(a)]
}
// /// Return the hex representation of this color in the format #RRGGBBAA
// /// e.g. `Color(1.0, 0.0, 5.0, 1.0) == "#FF0080FF"`
// pub fn to_hex(self) -> String {
// let vals = self.to_byte_fsa();
// let hex = vals.to_hex().to_ascii_uppercase();
// format!("#{}", &hex)
// }
/// Return the same color but with the given luminance.
pub fn with_luminance(self, l: f32) -> Color {
let Hsla(h, s, _, a) = self.to_hsl();
Color::Hsla(h, s, l, a)
}
/// Return the same color but with the alpha multiplied by the given alpha.
pub fn alpha(self, alpha: f32) -> Color {
match self {
Color::Rgba(r, g, b, a) => Color::Rgba(r, g, b, a * alpha),
Color::Hsla(h, s, l, a) => Color::Hsla(h, s, l, a * alpha),
}
}
/// Return the same color but with the given alpha.
pub fn with_alpha(self, a: f32) -> Color {
match self {
Color::Rgba(r, g, b, _) => Color::Rgba(r, g, b, a),
Color::Hsla(h, s, l, _) => Color::Hsla(h, s, l, a),
}
}
/// Return a highlighted version of the current Color.
pub fn highlighted(self) -> Color {
let luminance = self.luminance();
let Rgba(r, g, b, a) = self.to_rgb();
let (r, g, b) = {
if luminance > 0.8 { (r - 0.2, g - 0.2, b - 0.2) }
else if luminance < 0.2 { (r + 0.2, g + 0.2, b + 0.2) }
else {
(clampf32((1.0 - r) * 0.5 * r + r),
clampf32((1.0 - g) * 0.1 * g + g),
clampf32((1.0 - b) * 0.1 * b + b))
}
};
let a = clampf32((1.0 - a) * 0.5 + a);
rgba(r, g, b, a)
}
/// Return a clicked version of the current Color.
pub fn clicked(&self) -> Color {
let luminance = self.luminance();
let Rgba(r, g, b, a) = self.to_rgb();
let (r, g, b) = {
if luminance > 0.8 { (r , g - 0.2, b - 0.2) }
else if luminance < 0.2 { (r + 0.4, g + 0.2, b + 0.2) }
else {
(clampf32((1.0 - r) * 0.75 + r),
clampf32((1.0 - g) * 0.25 + g),
clampf32((1.0 - b) * 0.25 + b))
}
};
let a = clampf32((1.0 - a) * 0.75 + a);
rgba(r, g, b, a)
}
/// Return the Color's invert.
pub fn invert(self) -> Color {
let Rgba(r, g, b, a) = self.to_rgb();
rgba((r - 1.0).abs(), (g - 1.0).abs(), (b - 1.0).abs(), a)
}
/// Return the red value.
pub fn red(&self) -> f32 {
let Rgba(r, _, _, _) = self.to_rgb();
r
}
/// Return the green value.
pub fn green(&self) -> f32 {
let Rgba(_, g, _, _) = self.to_rgb();
g
}
/// Return the blue value.
pub fn blue(&self) -> f32 {
let Rgba(_, _, b, _) = self.to_rgb();
b
}
/// Set the red value.
pub fn set_red(&mut self, r: f32) {
let Rgba(_, g, b, a) = self.to_rgb();
*self = rgba(r, g, b, a);
}
/// Set the green value.
pub fn set_green(&mut self, g: f32) {
let Rgba(r, _, b, a) = self.to_rgb();
*self = rgba(r, g, b, a);
}
/// Set the blue value.
pub fn set_blue(&mut self, b: f32) {
let Rgba(r, g, _, a) = self.to_rgb();
*self = rgba(r, g, b, a);
}
}
/// The parts of HSL along with an alpha for transparency.
#[derive(Copy, Clone, Debug)]
pub struct Hsla(pub f32, pub f32, pub f32, pub f32);
/// The parts of RGB along with an alpha for transparency.
#[derive(Copy, Clone, Debug)]
pub struct Rgba(pub f32, pub f32, pub f32, pub f32);
/// Convert an f32 color to a byte.
#[inline]
pub fn f32_to_byte(c: f32) -> u8 { (c * 255.0) as u8 }
/// Pure function for converting rgb to hsl.
pub fn rgb_to_hsl(r: f32, g: f32, b: f32) -> (f32, f32, f32) {
let c_max = r.max(g).max(b);
let c_min = r.min(g).min(b);
let c = c_max - c_min;
let hue = if c == 0.0 {
// If there's no difference in the channels we have grayscale, so the hue is undefined.
0.0
} else {
degrees(60.0) * if c_max == r { fmod(((g - b) / c), 6) }
else if c_max == g { ((b - r) / c) + 2.0 }
else { ((r - g) / c) + 4.0 }
};
let lightness = (c_max + c_min) / 2.0;
let saturation = if lightness == 0.0 { 0.0 }
else { c / (1.0 - (2.0 * lightness - 1.0).abs()) };
(hue, saturation, lightness)
}
/// Pure function for converting hsl to rgb.
pub fn hsl_to_rgb(hue: f32, saturation: f32, lightness: f32) -> (f32, f32, f32) {
let chroma = (1.0 - (2.0 * lightness - 1.0).abs()) * saturation;
let hue = hue / degrees(60.0);
let x = chroma * (1.0 - (fmod(hue, 2) - 1.0).abs());
let (r, g, b) = match hue {
hue if hue < 0.0 => (0.0, 0.0, 0.0),
hue if hue < 1.0 => (chroma, x, 0.0),
hue if hue < 2.0 => (x, chroma, 0.0),
hue if hue < 3.0 => (0.0, chroma, x),
hue if hue < 4.0 => (0.0, x, chroma),
hue if hue < 5.0 => (x, 0.0, chroma),
hue if hue < 6.0 => (chroma, 0.0, x),
_ => (0.0, 0.0, 0.0),
};
let m = lightness - chroma / 2.0;
(r + m, g + m, b + m)
}
/// Linear or Radial Gradient.
#[derive(Clone, Debug)]
pub enum Gradient {
/// Takes a start and end point and then a series of color stops that indicate how to
/// interpolate between the start and end points.
Linear((f64, f64), (f64, f64), Vec<(f64, Color)>),
/// First takes a start point and inner radius. Then takes an end point and outer radius.
/// It then takes a series of color stops that indicate how to interpolate between the
/// inner and outer circles.
Radial((f64, f64), f64, (f64, f64), f64, Vec<(f64, Color)>),
}
/// Create a linear gradient.
pub fn linear(start: (f64, f64), end: (f64, f64), colors: Vec<(f64, Color)>) -> Gradient {
Gradient::Linear(start, end, colors)
}
/// Create a radial gradient.
pub fn radial(start: (f64, f64), start_r: f64,
end: (f64, f64), end_r: f64,
colors: Vec<(f64, Color)>) -> Gradient {
Gradient::Radial(start, start_r, end, end_r, colors)
}
/// Built-in colors.
///
/// These colors come from the
/// [Tango palette](http://tango.freedesktop.org/Tango_Icon_Theme_Guidelines) which provides
/// aesthetically reasonable defaults for colors. Each color also comes with a light and dark
/// version.
/// Scarlet Red - Light - #EF2929
pub fn light_red() -> Color { rgb_bytes(239, 41 , 41 ) }
/// Scarlet Red - Regular - #CC0000
pub fn red() -> Color { rgb_bytes(204, 0 , 0 ) }
/// Scarlet Red - Dark - #A30000
pub fn dark_red() -> Color { rgb_bytes(164, 0 , 0 ) }
/// Orange - Light - #FCAF3E
pub fn light_orange() -> Color { rgb_bytes(252, 175, 62 ) }
/// Orange - Regular - #F57900
pub fn orange() -> Color { rgb_bytes(245, 121, 0 ) }
/// Orange - Dark - #CE5C00
pub fn dark_orange() -> Color { rgb_bytes(206, 92 , 0 ) }
/// Butter - Light - #FCE94F
pub fn light_yellow() -> Color { rgb_bytes(255, 233, 79 ) }
/// Butter - Regular - #EDD400
pub fn yellow() -> Color { rgb_bytes(237, 212, 0 ) }
/// Butter - Dark - #C4A000
pub fn dark_yellow() -> Color { rgb_bytes(196, 160, 0 ) }
/// Chameleon - Light - #8AE234
pub fn light_green() -> Color { rgb_bytes(138, 226, 52 ) }
/// Chameleon - Regular - #73D216
pub fn green() -> Color { rgb_bytes(115, 210, 22 ) }
/// Chameleon - Dark - #4E9A06
pub fn dark_green() -> Color { rgb_bytes(78 , 154, 6 ) }
/// Sky Blue - Light - #729FCF
pub fn light_blue() -> Color { rgb_bytes(114, 159, 207) }
/// Sky Blue - Regular - #3465A4
pub fn blue() -> Color { rgb_bytes(52 , 101, 164) }
/// Sky Blue - Dark - #204A87
pub fn dark_blue() -> Color { rgb_bytes(32 , 74 , 135) }
/// Plum - Light - #AD7FA8
pub fn light_purple() -> Color { rgb_bytes(173, 127, 168) }
/// Plum - Regular - #75507B
pub fn purple() -> Color { rgb_bytes(117, 80 , 123) }
/// Plum - Dark - #5C3566
pub fn dark_purple() -> Color { rgb_bytes(92 , 53 , 102) }
| pub fn light_brown() -> Color { rgb_bytes(233, 185, 110) }
/// Chocolate - Regular - #C17D11
pub fn brown() -> Color { rgb_bytes(193, 125, 17 ) }
/// Chocolate - Dark - #8F5902
pub fn dark_brown() -> Color { rgb_bytes(143, 89 , 2 ) }
/// Straight Black.
pub fn black() -> Color { rgb_bytes(0 , 0 , 0 ) }
/// Straight White.
pub fn white() -> Color { rgb_bytes(255, 255, 255) }
/// Alluminium - Light
pub fn light_gray() -> Color { rgb_bytes(238, 238, 236) }
/// Alluminium - Regular
pub fn gray() -> Color { rgb_bytes(211, 215, 207) }
/// Alluminium - Dark
pub fn dark_gray() -> Color { rgb_bytes(186, 189, 182) }
/// Aluminium - Light - #EEEEEC
pub fn light_grey() -> Color { rgb_bytes(238, 238, 236) }
/// Aluminium - Regular - #D3D7CF
pub fn grey() -> Color { rgb_bytes(211, 215, 207) }
/// Aluminium - Dark - #BABDB6
pub fn dark_grey() -> Color { rgb_bytes(186, 189, 182) }
/// Charcoal - Light - #888A85
pub fn light_charcoal() -> Color { rgb_bytes(136, 138, 133) }
/// Charcoal - Regular - #555753
pub fn charcoal() -> Color { rgb_bytes(85 , 87 , 83 ) }
/// Charcoal - Dark - #2E3436
pub fn dark_charcoal() -> Color { rgb_bytes(46 , 52 , 54 ) }
/// Types that can be colored.
pub trait Colorable: Sized {
/// Set the color of the widget.
fn color(self, color: Color) -> Self;
/// Set the color of the widget from rgba values.
fn rgba(self, r: f32, g: f32, b: f32, a: f32) -> Self {
self.color(rgba(r, g, b, a))
}
/// Set the color of the widget from rgb values.
fn rgb(self, r: f32, g: f32, b: f32) -> Self {
self.color(rgb(r, g, b))
}
/// Set the color of the widget from hsla values.
fn hsla(self, h: f32, s: f32, l: f32, a: f32) -> Self {
self.color(hsla(h, s, l, a))
}
/// Set the color of the widget from hsl values.
fn hsl(self, h: f32, s: f32, l: f32) -> Self {
self.color(hsl(h, s, l))
}
} | /// Chocolate - Light - #E9B96E | random_line_split |
color.rs | //!
//! A library providing simple `Color` and `Gradient` types along with useful transformations and
//! presets.
//!
//!
//! Inspiration taken from [elm-lang's color module]
//! (https://github.com/elm-lang/core/blob/62b22218c42fb8ccc996c86bea450a14991ab815/src/Color.elm)
//!
//!
//! Module for working with colors. Includes [RGB](https://en.wikipedia.org/wiki/RGB_color_model)
//! and [HSL](http://en.wikipedia.org/wiki/HSL_and_HSV) creation, gradients and built-in names.
//!
use std::f32::consts::PI;
use utils::{degrees, fmod, turns};
/// Color supporting RGB and HSL variants.
#[derive(PartialEq, Copy, Clone, Debug)]
pub enum Color {
/// Red, Green, Blue, Alpha - All values' scales represented between 0.0 and 1.0.
Rgba(f32, f32, f32, f32),
/// Hue, Saturation, Lightness, Alpha - all valuess scales represented between 0.0 and 1.0.
Hsla(f32, f32, f32, f32),
}
/// Regional spelling alias.
pub type Colour = Color;
/// Create RGB colors with an alpha component for transparency.
/// The alpha component is specified with numbers between 0 and 1.
#[inline]
pub fn rgba(r: f32, g: f32, b: f32, a: f32) -> Color {
Color::Rgba(r, g, b, a)
}
/// Create RGB colors from numbers between 0.0 and 1.0.
#[inline]
pub fn rgb(r: f32, g: f32, b: f32) -> Color {
Color::Rgba(r, g, b, 1.0)
}
/// Create RGB colors from numbers between 0 and 255 inclusive.
/// The alpha component is specified with numbers between 0 and 1.
#[inline]
pub fn rgba_bytes(r: u8, g: u8, b: u8, a: f32) -> Color {
Color::Rgba(r as f32 / 255.0, g as f32 / 255.0, b as f32 / 255.0, a)
}
/// Create RGB colors from numbers between 0 and 255 inclusive.
#[inline]
pub fn rgb_bytes(r: u8, g: u8, b: u8) -> Color {
rgba_bytes(r, g, b, 1.0)
}
/// Create [HSL colors](http://en.wikipedia.org/wiki/HSL_and_HSV) with an alpha component for
/// transparency.
#[inline]
pub fn hsla(hue: f32, saturation: f32, lightness: f32, alpha: f32) -> Color {
Color::Hsla(hue - turns((hue / (2.0 * PI)).floor()), saturation, lightness, alpha)
}
/// Create [HSL colors](http://en.wikipedia.org/wiki/HSL_and_HSV). This gives you access to colors
/// more like a color wheel, where all hues are arranged in a circle that you specify with radians.
///
/// red = hsl(degrees(0.0) , 1.0, 0.5)
/// green = hsl(degrees(120.0), 1.0, 0.5)
/// blue = hsl(degrees(240.0), 1.0, 0.5)
/// pastel_red = hsl(degrees(0.0) , 0.7, 0.7)
///
/// To cycle through all colors, just cycle through degrees. The saturation level is how vibrant
/// the color is, like a dial between grey and bright colors. The lightness level is a dial between
/// white and black.
#[inline]
pub fn hsl(hue: f32, saturation: f32, lightness: f32) -> Color {
hsla(hue, saturation, lightness, 1.0)
}
/// Produce a gray based on the input. 0.0 is white, 1.0 is black.
pub fn grayscale(p: f32) -> Color {
Color::Hsla(0.0, 0.0, 1.0-p, 1.0)
}
/// Produce a gray based on the input. 0.0 is white, 1.0 is black.
pub fn greyscale(p: f32) -> Color {
Color::Hsla(0.0, 0.0, 1.0-p, 1.0)
}
/// Construct a random color.
pub fn random() -> Color {
rgb(::rand::random(), ::rand::random(), ::rand::random())
}
/// Clamp a f32 between 0f32 and 1f32.
fn clampf32(f: f32) -> f32 {
if f < 0.0 { 0.0 } else if f > 1.0 { 1.0 } else { f }
}
impl Color {
/// Produce a complementary color. The two colors will accent each other. This is the same as
/// rotating the hue by 180 degrees.
pub fn complement(self) -> Color {
match self {
Color::Hsla(h, s, l, a) => hsla(h + degrees(180.0), s, l, a),
Color::Rgba(r, g, b, a) => {
let (h, s, l) = rgb_to_hsl(r, g, b);
hsla(h + degrees(180.0), s, l, a)
},
}
}
/// Calculate and return the luminance of the Color.
pub fn luminance(&self) -> f32 {
match *self {
Color::Rgba(r, g, b, _) => (r + g + b) / 3.0,
Color::Hsla(_, _, l, _) => l,
}
}
/// Return either black or white, depending which contrasts the Color the most. This will be
/// useful for determining a readable color for text on any given background Color.
pub fn plain_contrast(self) -> Color {
if self.luminance() > 0.5 { black() } else { white() }
}
/// Extract the components of a color in the HSL format.
pub fn to_hsl(self) -> Hsla {
match self {
Color::Hsla(h, s, l, a) => Hsla(h, s, l, a),
Color::Rgba(r, g, b, a) => {
let (h, s, l) = rgb_to_hsl(r, g, b);
Hsla(h, s, l, a)
},
}
}
/// Extract the components of a color in the RGB format.
pub fn to_rgb(self) -> Rgba {
match self {
Color::Rgba(r, g, b, a) => Rgba(r, g, b, a),
Color::Hsla(h, s, l, a) => {
let (r, g, b) = hsl_to_rgb(h, s, l);
Rgba(r, g, b, a)
},
}
}
/// Extract the components of a color in the RGB format within a fixed-size array.
pub fn to_fsa(self) -> [f32; 4] {
let Rgba(r, g, b, a) = self.to_rgb();
[r, g, b, a]
}
/// Same as `to_fsa`, except r, g, b and a are represented in byte form.
pub fn to_byte_fsa(self) -> [u8; 4] {
let Rgba(r, g, b, a) = self.to_rgb();
[f32_to_byte(r), f32_to_byte(g), f32_to_byte(b), f32_to_byte(a)]
}
// /// Return the hex representation of this color in the format #RRGGBBAA
// /// e.g. `Color(1.0, 0.0, 5.0, 1.0) == "#FF0080FF"`
// pub fn to_hex(self) -> String {
// let vals = self.to_byte_fsa();
// let hex = vals.to_hex().to_ascii_uppercase();
// format!("#{}", &hex)
// }
/// Return the same color but with the given luminance.
pub fn with_luminance(self, l: f32) -> Color {
let Hsla(h, s, _, a) = self.to_hsl();
Color::Hsla(h, s, l, a)
}
/// Return the same color but with the alpha multiplied by the given alpha.
pub fn alpha(self, alpha: f32) -> Color {
match self {
Color::Rgba(r, g, b, a) => Color::Rgba(r, g, b, a * alpha),
Color::Hsla(h, s, l, a) => Color::Hsla(h, s, l, a * alpha),
}
}
/// Return the same color but with the given alpha.
pub fn with_alpha(self, a: f32) -> Color {
match self {
Color::Rgba(r, g, b, _) => Color::Rgba(r, g, b, a),
Color::Hsla(h, s, l, _) => Color::Hsla(h, s, l, a),
}
}
/// Return a highlighted version of the current Color.
pub fn highlighted(self) -> Color {
let luminance = self.luminance();
let Rgba(r, g, b, a) = self.to_rgb();
let (r, g, b) = {
if luminance > 0.8 { (r - 0.2, g - 0.2, b - 0.2) }
else if luminance < 0.2 { (r + 0.2, g + 0.2, b + 0.2) }
else {
(clampf32((1.0 - r) * 0.5 * r + r),
clampf32((1.0 - g) * 0.1 * g + g),
clampf32((1.0 - b) * 0.1 * b + b))
}
};
let a = clampf32((1.0 - a) * 0.5 + a);
rgba(r, g, b, a)
}
/// Return a clicked version of the current Color.
pub fn clicked(&self) -> Color {
let luminance = self.luminance();
let Rgba(r, g, b, a) = self.to_rgb();
let (r, g, b) = {
if luminance > 0.8 { (r , g - 0.2, b - 0.2) }
else if luminance < 0.2 { (r + 0.4, g + 0.2, b + 0.2) }
else {
(clampf32((1.0 - r) * 0.75 + r),
clampf32((1.0 - g) * 0.25 + g),
clampf32((1.0 - b) * 0.25 + b))
}
};
let a = clampf32((1.0 - a) * 0.75 + a);
rgba(r, g, b, a)
}
/// Return the Color's invert.
pub fn invert(self) -> Color {
let Rgba(r, g, b, a) = self.to_rgb();
rgba((r - 1.0).abs(), (g - 1.0).abs(), (b - 1.0).abs(), a)
}
/// Return the red value.
pub fn red(&self) -> f32 {
let Rgba(r, _, _, _) = self.to_rgb();
r
}
/// Return the green value.
pub fn green(&self) -> f32 {
let Rgba(_, g, _, _) = self.to_rgb();
g
}
/// Return the blue value.
pub fn blue(&self) -> f32 {
let Rgba(_, _, b, _) = self.to_rgb();
b
}
/// Set the red value.
pub fn set_red(&mut self, r: f32) {
let Rgba(_, g, b, a) = self.to_rgb();
*self = rgba(r, g, b, a);
}
/// Set the green value.
pub fn set_green(&mut self, g: f32) {
let Rgba(r, _, b, a) = self.to_rgb();
*self = rgba(r, g, b, a);
}
/// Set the blue value.
pub fn set_blue(&mut self, b: f32) {
let Rgba(r, g, _, a) = self.to_rgb();
*self = rgba(r, g, b, a);
}
}
/// The parts of HSL along with an alpha for transparency.
#[derive(Copy, Clone, Debug)]
pub struct Hsla(pub f32, pub f32, pub f32, pub f32);
/// The parts of RGB along with an alpha for transparency.
#[derive(Copy, Clone, Debug)]
pub struct Rgba(pub f32, pub f32, pub f32, pub f32);
/// Convert an f32 color to a byte.
#[inline]
pub fn f32_to_byte(c: f32) -> u8 { (c * 255.0) as u8 }
/// Pure function for converting rgb to hsl.
pub fn rgb_to_hsl(r: f32, g: f32, b: f32) -> (f32, f32, f32) {
let c_max = r.max(g).max(b);
let c_min = r.min(g).min(b);
let c = c_max - c_min;
let hue = if c == 0.0 {
// If there's no difference in the channels we have grayscale, so the hue is undefined.
0.0
} else {
degrees(60.0) * if c_max == r { fmod(((g - b) / c), 6) }
else if c_max == g { ((b - r) / c) + 2.0 }
else { ((r - g) / c) + 4.0 }
};
let lightness = (c_max + c_min) / 2.0;
let saturation = if lightness == 0.0 { 0.0 }
else { c / (1.0 - (2.0 * lightness - 1.0).abs()) };
(hue, saturation, lightness)
}
/// Pure function for converting hsl to rgb.
pub fn hsl_to_rgb(hue: f32, saturation: f32, lightness: f32) -> (f32, f32, f32) {
let chroma = (1.0 - (2.0 * lightness - 1.0).abs()) * saturation;
let hue = hue / degrees(60.0);
let x = chroma * (1.0 - (fmod(hue, 2) - 1.0).abs());
let (r, g, b) = match hue {
hue if hue < 0.0 => (0.0, 0.0, 0.0),
hue if hue < 1.0 => (chroma, x, 0.0),
hue if hue < 2.0 => (x, chroma, 0.0),
hue if hue < 3.0 => (0.0, chroma, x),
hue if hue < 4.0 => (0.0, x, chroma),
hue if hue < 5.0 => (x, 0.0, chroma),
hue if hue < 6.0 => (chroma, 0.0, x),
_ => (0.0, 0.0, 0.0),
};
let m = lightness - chroma / 2.0;
(r + m, g + m, b + m)
}
/// Linear or Radial Gradient.
#[derive(Clone, Debug)]
pub enum Gradient {
/// Takes a start and end point and then a series of color stops that indicate how to
/// interpolate between the start and end points.
Linear((f64, f64), (f64, f64), Vec<(f64, Color)>),
/// First takes a start point and inner radius. Then takes an end point and outer radius.
/// It then takes a series of color stops that indicate how to interpolate between the
/// inner and outer circles.
Radial((f64, f64), f64, (f64, f64), f64, Vec<(f64, Color)>),
}
/// Create a linear gradient.
pub fn linear(start: (f64, f64), end: (f64, f64), colors: Vec<(f64, Color)>) -> Gradient {
Gradient::Linear(start, end, colors)
}
/// Create a radial gradient.
pub fn radial(start: (f64, f64), start_r: f64,
end: (f64, f64), end_r: f64,
colors: Vec<(f64, Color)>) -> Gradient {
Gradient::Radial(start, start_r, end, end_r, colors)
}
/// Built-in colors.
///
/// These colors come from the
/// [Tango palette](http://tango.freedesktop.org/Tango_Icon_Theme_Guidelines) which provides
/// aesthetically reasonable defaults for colors. Each color also comes with a light and dark
/// version.
/// Scarlet Red - Light - #EF2929
pub fn light_red() -> Color { rgb_bytes(239, 41 , 41 ) }
/// Scarlet Red - Regular - #CC0000
pub fn red() -> Color { rgb_bytes(204, 0 , 0 ) }
/// Scarlet Red - Dark - #A30000
pub fn dark_red() -> Color { rgb_bytes(164, 0 , 0 ) }
/// Orange - Light - #FCAF3E
pub fn light_orange() -> Color { rgb_bytes(252, 175, 62 ) }
/// Orange - Regular - #F57900
pub fn orange() -> Color { rgb_bytes(245, 121, 0 ) }
/// Orange - Dark - #CE5C00
pub fn dark_orange() -> Color { rgb_bytes(206, 92 , 0 ) }
/// Butter - Light - #FCE94F
pub fn light_yellow() -> Color { rgb_bytes(255, 233, 79 ) }
/// Butter - Regular - #EDD400
pub fn yellow() -> Color { rgb_bytes(237, 212, 0 ) }
/// Butter - Dark - #C4A000
pub fn dark_yellow() -> Color { rgb_bytes(196, 160, 0 ) }
/// Chameleon - Light - #8AE234
pub fn light_green() -> Color { rgb_bytes(138, 226, 52 ) }
/// Chameleon - Regular - #73D216
pub fn green() -> Color { rgb_bytes(115, 210, 22 ) }
/// Chameleon - Dark - #4E9A06
pub fn dark_green() -> Color { rgb_bytes(78 , 154, 6 ) }
/// Sky Blue - Light - #729FCF
pub fn light_blue() -> Color { rgb_bytes(114, 159, 207) }
/// Sky Blue - Regular - #3465A4
pub fn blue() -> Color { rgb_bytes(52 , 101, 164) }
/// Sky Blue - Dark - #204A87
pub fn dark_blue() -> Color { rgb_bytes(32 , 74 , 135) }
/// Plum - Light - #AD7FA8
pub fn light_purple() -> Color { rgb_bytes(173, 127, 168) }
/// Plum - Regular - #75507B
pub fn purple() -> Color { rgb_bytes(117, 80 , 123) }
/// Plum - Dark - #5C3566
pub fn dark_purple() -> Color { rgb_bytes(92 , 53 , 102) }
/// Chocolate - Light - #E9B96E
pub fn light_brown() -> Color { rgb_bytes(233, 185, 110) }
/// Chocolate - Regular - #C17D11
pub fn brown() -> Color { rgb_bytes(193, 125, 17 ) }
/// Chocolate - Dark - #8F5902
pub fn dark_brown() -> Color { rgb_bytes(143, 89 , 2 ) }
/// Straight Black.
pub fn black() -> Color { rgb_bytes(0 , 0 , 0 ) }
/// Straight White.
pub fn white() -> Color { rgb_bytes(255, 255, 255) }
/// Alluminium - Light
pub fn light_gray() -> Color { rgb_bytes(238, 238, 236) }
/// Alluminium - Regular
pub fn gray() -> Color { rgb_bytes(211, 215, 207) }
/// Alluminium - Dark
pub fn dark_gray() -> Color { rgb_bytes(186, 189, 182) }
/// Aluminium - Light - #EEEEEC
pub fn light_grey() -> Color { rgb_bytes(238, 238, 236) }
/// Aluminium - Regular - #D3D7CF
pub fn grey() -> Color { rgb_bytes(211, 215, 207) }
/// Aluminium - Dark - #BABDB6
pub fn dark_grey() -> Color { rgb_bytes(186, 189, 182) }
/// Charcoal - Light - #888A85
pub fn light_charcoal() -> Color { rgb_bytes(136, 138, 133) }
/// Charcoal - Regular - #555753
pub fn charcoal() -> Color { rgb_bytes(85 , 87 , 83 ) }
/// Charcoal - Dark - #2E3436
pub fn dark_charcoal() -> Color { rgb_bytes(46 , 52 , 54 ) }
/// Types that can be colored.
pub trait Colorable: Sized {
/// Set the color of the widget.
fn color(self, color: Color) -> Self;
/// Set the color of the widget from rgba values.
fn rgba(self, r: f32, g: f32, b: f32, a: f32) -> Self {
self.color(rgba(r, g, b, a))
}
/// Set the color of the widget from rgb values.
fn rgb(self, r: f32, g: f32, b: f32) -> Self {
self.color(rgb(r, g, b))
}
/// Set the color of the widget from hsla values.
fn | (self, h: f32, s: f32, l: f32, a: f32) -> Self {
self.color(hsla(h, s, l, a))
}
/// Set the color of the widget from hsl values.
fn hsl(self, h: f32, s: f32, l: f32) -> Self {
self.color(hsl(h, s, l))
}
}
| hsla | identifier_name |
color.rs | //!
//! A library providing simple `Color` and `Gradient` types along with useful transformations and
//! presets.
//!
//!
//! Inspiration taken from [elm-lang's color module]
//! (https://github.com/elm-lang/core/blob/62b22218c42fb8ccc996c86bea450a14991ab815/src/Color.elm)
//!
//!
//! Module for working with colors. Includes [RGB](https://en.wikipedia.org/wiki/RGB_color_model)
//! and [HSL](http://en.wikipedia.org/wiki/HSL_and_HSV) creation, gradients and built-in names.
//!
use std::f32::consts::PI;
use utils::{degrees, fmod, turns};
/// Color supporting RGB and HSL variants.
#[derive(PartialEq, Copy, Clone, Debug)]
pub enum Color {
/// Red, Green, Blue, Alpha - All values' scales represented between 0.0 and 1.0.
Rgba(f32, f32, f32, f32),
/// Hue, Saturation, Lightness, Alpha - all valuess scales represented between 0.0 and 1.0.
Hsla(f32, f32, f32, f32),
}
/// Regional spelling alias.
pub type Colour = Color;
/// Create RGB colors with an alpha component for transparency.
/// The alpha component is specified with numbers between 0 and 1.
#[inline]
pub fn rgba(r: f32, g: f32, b: f32, a: f32) -> Color {
Color::Rgba(r, g, b, a)
}
/// Create RGB colors from numbers between 0.0 and 1.0.
#[inline]
pub fn rgb(r: f32, g: f32, b: f32) -> Color {
Color::Rgba(r, g, b, 1.0)
}
/// Create RGB colors from numbers between 0 and 255 inclusive.
/// The alpha component is specified with numbers between 0 and 1.
#[inline]
pub fn rgba_bytes(r: u8, g: u8, b: u8, a: f32) -> Color {
Color::Rgba(r as f32 / 255.0, g as f32 / 255.0, b as f32 / 255.0, a)
}
/// Create RGB colors from numbers between 0 and 255 inclusive.
#[inline]
pub fn rgb_bytes(r: u8, g: u8, b: u8) -> Color {
rgba_bytes(r, g, b, 1.0)
}
/// Create [HSL colors](http://en.wikipedia.org/wiki/HSL_and_HSV) with an alpha component for
/// transparency.
#[inline]
pub fn hsla(hue: f32, saturation: f32, lightness: f32, alpha: f32) -> Color {
Color::Hsla(hue - turns((hue / (2.0 * PI)).floor()), saturation, lightness, alpha)
}
/// Create [HSL colors](http://en.wikipedia.org/wiki/HSL_and_HSV). This gives you access to colors
/// more like a color wheel, where all hues are arranged in a circle that you specify with radians.
///
/// red = hsl(degrees(0.0) , 1.0, 0.5)
/// green = hsl(degrees(120.0), 1.0, 0.5)
/// blue = hsl(degrees(240.0), 1.0, 0.5)
/// pastel_red = hsl(degrees(0.0) , 0.7, 0.7)
///
/// To cycle through all colors, just cycle through degrees. The saturation level is how vibrant
/// the color is, like a dial between grey and bright colors. The lightness level is a dial between
/// white and black.
#[inline]
pub fn hsl(hue: f32, saturation: f32, lightness: f32) -> Color {
hsla(hue, saturation, lightness, 1.0)
}
/// Produce a gray based on the input. 0.0 is white, 1.0 is black.
pub fn grayscale(p: f32) -> Color {
Color::Hsla(0.0, 0.0, 1.0-p, 1.0)
}
/// Produce a gray based on the input. 0.0 is white, 1.0 is black.
pub fn greyscale(p: f32) -> Color {
Color::Hsla(0.0, 0.0, 1.0-p, 1.0)
}
/// Construct a random color.
pub fn random() -> Color {
rgb(::rand::random(), ::rand::random(), ::rand::random())
}
/// Clamp a f32 between 0f32 and 1f32.
fn clampf32(f: f32) -> f32 {
if f < 0.0 { 0.0 } else if f > 1.0 { 1.0 } else { f }
}
impl Color {
/// Produce a complementary color. The two colors will accent each other. This is the same as
/// rotating the hue by 180 degrees.
pub fn complement(self) -> Color {
match self {
Color::Hsla(h, s, l, a) => hsla(h + degrees(180.0), s, l, a),
Color::Rgba(r, g, b, a) => {
let (h, s, l) = rgb_to_hsl(r, g, b);
hsla(h + degrees(180.0), s, l, a)
},
}
}
/// Calculate and return the luminance of the Color.
pub fn luminance(&self) -> f32 {
match *self {
Color::Rgba(r, g, b, _) => (r + g + b) / 3.0,
Color::Hsla(_, _, l, _) => l,
}
}
/// Return either black or white, depending which contrasts the Color the most. This will be
/// useful for determining a readable color for text on any given background Color.
pub fn plain_contrast(self) -> Color {
if self.luminance() > 0.5 { black() } else { white() }
}
/// Extract the components of a color in the HSL format.
pub fn to_hsl(self) -> Hsla {
match self {
Color::Hsla(h, s, l, a) => Hsla(h, s, l, a),
Color::Rgba(r, g, b, a) => {
let (h, s, l) = rgb_to_hsl(r, g, b);
Hsla(h, s, l, a)
},
}
}
/// Extract the components of a color in the RGB format.
pub fn to_rgb(self) -> Rgba {
match self {
Color::Rgba(r, g, b, a) => Rgba(r, g, b, a),
Color::Hsla(h, s, l, a) => {
let (r, g, b) = hsl_to_rgb(h, s, l);
Rgba(r, g, b, a)
},
}
}
/// Extract the components of a color in the RGB format within a fixed-size array.
pub fn to_fsa(self) -> [f32; 4] {
let Rgba(r, g, b, a) = self.to_rgb();
[r, g, b, a]
}
/// Same as `to_fsa`, except r, g, b and a are represented in byte form.
pub fn to_byte_fsa(self) -> [u8; 4] {
let Rgba(r, g, b, a) = self.to_rgb();
[f32_to_byte(r), f32_to_byte(g), f32_to_byte(b), f32_to_byte(a)]
}
// /// Return the hex representation of this color in the format #RRGGBBAA
// /// e.g. `Color(1.0, 0.0, 5.0, 1.0) == "#FF0080FF"`
// pub fn to_hex(self) -> String {
// let vals = self.to_byte_fsa();
// let hex = vals.to_hex().to_ascii_uppercase();
// format!("#{}", &hex)
// }
/// Return the same color but with the given luminance.
pub fn with_luminance(self, l: f32) -> Color {
let Hsla(h, s, _, a) = self.to_hsl();
Color::Hsla(h, s, l, a)
}
/// Return the same color but with the alpha multiplied by the given alpha.
pub fn alpha(self, alpha: f32) -> Color {
match self {
Color::Rgba(r, g, b, a) => Color::Rgba(r, g, b, a * alpha),
Color::Hsla(h, s, l, a) => Color::Hsla(h, s, l, a * alpha),
}
}
/// Return the same color but with the given alpha.
pub fn with_alpha(self, a: f32) -> Color {
match self {
Color::Rgba(r, g, b, _) => Color::Rgba(r, g, b, a),
Color::Hsla(h, s, l, _) => Color::Hsla(h, s, l, a),
}
}
/// Return a highlighted version of the current Color.
pub fn highlighted(self) -> Color {
let luminance = self.luminance();
let Rgba(r, g, b, a) = self.to_rgb();
let (r, g, b) = {
if luminance > 0.8 { (r - 0.2, g - 0.2, b - 0.2) }
else if luminance < 0.2 { (r + 0.2, g + 0.2, b + 0.2) }
else {
(clampf32((1.0 - r) * 0.5 * r + r),
clampf32((1.0 - g) * 0.1 * g + g),
clampf32((1.0 - b) * 0.1 * b + b))
}
};
let a = clampf32((1.0 - a) * 0.5 + a);
rgba(r, g, b, a)
}
/// Return a clicked version of the current Color.
pub fn clicked(&self) -> Color {
let luminance = self.luminance();
let Rgba(r, g, b, a) = self.to_rgb();
let (r, g, b) = {
if luminance > 0.8 { (r , g - 0.2, b - 0.2) }
else if luminance < 0.2 |
else {
(clampf32((1.0 - r) * 0.75 + r),
clampf32((1.0 - g) * 0.25 + g),
clampf32((1.0 - b) * 0.25 + b))
}
};
let a = clampf32((1.0 - a) * 0.75 + a);
rgba(r, g, b, a)
}
/// Return the Color's invert.
pub fn invert(self) -> Color {
let Rgba(r, g, b, a) = self.to_rgb();
rgba((r - 1.0).abs(), (g - 1.0).abs(), (b - 1.0).abs(), a)
}
/// Return the red value.
pub fn red(&self) -> f32 {
let Rgba(r, _, _, _) = self.to_rgb();
r
}
/// Return the green value.
pub fn green(&self) -> f32 {
let Rgba(_, g, _, _) = self.to_rgb();
g
}
/// Return the blue value.
pub fn blue(&self) -> f32 {
let Rgba(_, _, b, _) = self.to_rgb();
b
}
/// Set the red value.
pub fn set_red(&mut self, r: f32) {
let Rgba(_, g, b, a) = self.to_rgb();
*self = rgba(r, g, b, a);
}
/// Set the green value.
pub fn set_green(&mut self, g: f32) {
let Rgba(r, _, b, a) = self.to_rgb();
*self = rgba(r, g, b, a);
}
/// Set the blue value.
pub fn set_blue(&mut self, b: f32) {
let Rgba(r, g, _, a) = self.to_rgb();
*self = rgba(r, g, b, a);
}
}
/// The parts of HSL along with an alpha for transparency.
#[derive(Copy, Clone, Debug)]
pub struct Hsla(pub f32, pub f32, pub f32, pub f32);
/// The parts of RGB along with an alpha for transparency.
#[derive(Copy, Clone, Debug)]
pub struct Rgba(pub f32, pub f32, pub f32, pub f32);
/// Convert an f32 color to a byte.
#[inline]
pub fn f32_to_byte(c: f32) -> u8 { (c * 255.0) as u8 }
/// Pure function for converting rgb to hsl.
pub fn rgb_to_hsl(r: f32, g: f32, b: f32) -> (f32, f32, f32) {
let c_max = r.max(g).max(b);
let c_min = r.min(g).min(b);
let c = c_max - c_min;
let hue = if c == 0.0 {
// If there's no difference in the channels we have grayscale, so the hue is undefined.
0.0
} else {
degrees(60.0) * if c_max == r { fmod(((g - b) / c), 6) }
else if c_max == g { ((b - r) / c) + 2.0 }
else { ((r - g) / c) + 4.0 }
};
let lightness = (c_max + c_min) / 2.0;
let saturation = if lightness == 0.0 { 0.0 }
else { c / (1.0 - (2.0 * lightness - 1.0).abs()) };
(hue, saturation, lightness)
}
/// Pure function for converting hsl to rgb.
pub fn hsl_to_rgb(hue: f32, saturation: f32, lightness: f32) -> (f32, f32, f32) {
let chroma = (1.0 - (2.0 * lightness - 1.0).abs()) * saturation;
let hue = hue / degrees(60.0);
let x = chroma * (1.0 - (fmod(hue, 2) - 1.0).abs());
let (r, g, b) = match hue {
hue if hue < 0.0 => (0.0, 0.0, 0.0),
hue if hue < 1.0 => (chroma, x, 0.0),
hue if hue < 2.0 => (x, chroma, 0.0),
hue if hue < 3.0 => (0.0, chroma, x),
hue if hue < 4.0 => (0.0, x, chroma),
hue if hue < 5.0 => (x, 0.0, chroma),
hue if hue < 6.0 => (chroma, 0.0, x),
_ => (0.0, 0.0, 0.0),
};
let m = lightness - chroma / 2.0;
(r + m, g + m, b + m)
}
/// Linear or Radial Gradient.
#[derive(Clone, Debug)]
pub enum Gradient {
/// Takes a start and end point and then a series of color stops that indicate how to
/// interpolate between the start and end points.
Linear((f64, f64), (f64, f64), Vec<(f64, Color)>),
/// First takes a start point and inner radius. Then takes an end point and outer radius.
/// It then takes a series of color stops that indicate how to interpolate between the
/// inner and outer circles.
Radial((f64, f64), f64, (f64, f64), f64, Vec<(f64, Color)>),
}
/// Create a linear gradient.
pub fn linear(start: (f64, f64), end: (f64, f64), colors: Vec<(f64, Color)>) -> Gradient {
Gradient::Linear(start, end, colors)
}
/// Create a radial gradient.
pub fn radial(start: (f64, f64), start_r: f64,
end: (f64, f64), end_r: f64,
colors: Vec<(f64, Color)>) -> Gradient {
Gradient::Radial(start, start_r, end, end_r, colors)
}
/// Built-in colors.
///
/// These colors come from the
/// [Tango palette](http://tango.freedesktop.org/Tango_Icon_Theme_Guidelines) which provides
/// aesthetically reasonable defaults for colors. Each color also comes with a light and dark
/// version.
/// Scarlet Red - Light - #EF2929
pub fn light_red() -> Color { rgb_bytes(239, 41 , 41 ) }
/// Scarlet Red - Regular - #CC0000
pub fn red() -> Color { rgb_bytes(204, 0 , 0 ) }
/// Scarlet Red - Dark - #A30000
pub fn dark_red() -> Color { rgb_bytes(164, 0 , 0 ) }
/// Orange - Light - #FCAF3E
pub fn light_orange() -> Color { rgb_bytes(252, 175, 62 ) }
/// Orange - Regular - #F57900
pub fn orange() -> Color { rgb_bytes(245, 121, 0 ) }
/// Orange - Dark - #CE5C00
pub fn dark_orange() -> Color { rgb_bytes(206, 92 , 0 ) }
/// Butter - Light - #FCE94F
pub fn light_yellow() -> Color { rgb_bytes(255, 233, 79 ) }
/// Butter - Regular - #EDD400
pub fn yellow() -> Color { rgb_bytes(237, 212, 0 ) }
/// Butter - Dark - #C4A000
pub fn dark_yellow() -> Color { rgb_bytes(196, 160, 0 ) }
/// Chameleon - Light - #8AE234
pub fn light_green() -> Color { rgb_bytes(138, 226, 52 ) }
/// Chameleon - Regular - #73D216
pub fn green() -> Color { rgb_bytes(115, 210, 22 ) }
/// Chameleon - Dark - #4E9A06
pub fn dark_green() -> Color { rgb_bytes(78 , 154, 6 ) }
/// Sky Blue - Light - #729FCF
pub fn light_blue() -> Color { rgb_bytes(114, 159, 207) }
/// Sky Blue - Regular - #3465A4
pub fn blue() -> Color { rgb_bytes(52 , 101, 164) }
/// Sky Blue - Dark - #204A87
pub fn dark_blue() -> Color { rgb_bytes(32 , 74 , 135) }
/// Plum - Light - #AD7FA8
pub fn light_purple() -> Color { rgb_bytes(173, 127, 168) }
/// Plum - Regular - #75507B
pub fn purple() -> Color { rgb_bytes(117, 80 , 123) }
/// Plum - Dark - #5C3566
pub fn dark_purple() -> Color { rgb_bytes(92 , 53 , 102) }
/// Chocolate - Light - #E9B96E
pub fn light_brown() -> Color { rgb_bytes(233, 185, 110) }
/// Chocolate - Regular - #C17D11
pub fn brown() -> Color { rgb_bytes(193, 125, 17 ) }
/// Chocolate - Dark - #8F5902
pub fn dark_brown() -> Color { rgb_bytes(143, 89 , 2 ) }
/// Straight Black.
pub fn black() -> Color { rgb_bytes(0 , 0 , 0 ) }
/// Straight White.
pub fn white() -> Color { rgb_bytes(255, 255, 255) }
/// Alluminium - Light
pub fn light_gray() -> Color { rgb_bytes(238, 238, 236) }
/// Alluminium - Regular
pub fn gray() -> Color { rgb_bytes(211, 215, 207) }
/// Alluminium - Dark
pub fn dark_gray() -> Color { rgb_bytes(186, 189, 182) }
/// Aluminium - Light - #EEEEEC
pub fn light_grey() -> Color { rgb_bytes(238, 238, 236) }
/// Aluminium - Regular - #D3D7CF
pub fn grey() -> Color { rgb_bytes(211, 215, 207) }
/// Aluminium - Dark - #BABDB6
pub fn dark_grey() -> Color { rgb_bytes(186, 189, 182) }
/// Charcoal - Light - #888A85
pub fn light_charcoal() -> Color { rgb_bytes(136, 138, 133) }
/// Charcoal - Regular - #555753
pub fn charcoal() -> Color { rgb_bytes(85 , 87 , 83 ) }
/// Charcoal - Dark - #2E3436
pub fn dark_charcoal() -> Color { rgb_bytes(46 , 52 , 54 ) }
/// Types that can be colored.
pub trait Colorable: Sized {
/// Set the color of the widget.
fn color(self, color: Color) -> Self;
/// Set the color of the widget from rgba values.
fn rgba(self, r: f32, g: f32, b: f32, a: f32) -> Self {
self.color(rgba(r, g, b, a))
}
/// Set the color of the widget from rgb values.
fn rgb(self, r: f32, g: f32, b: f32) -> Self {
self.color(rgb(r, g, b))
}
/// Set the color of the widget from hsla values.
fn hsla(self, h: f32, s: f32, l: f32, a: f32) -> Self {
self.color(hsla(h, s, l, a))
}
/// Set the color of the widget from hsl values.
fn hsl(self, h: f32, s: f32, l: f32) -> Self {
self.color(hsl(h, s, l))
}
}
| { (r + 0.4, g + 0.2, b + 0.2) } | conditional_block |
color.rs | //!
//! A library providing simple `Color` and `Gradient` types along with useful transformations and
//! presets.
//!
//!
//! Inspiration taken from [elm-lang's color module]
//! (https://github.com/elm-lang/core/blob/62b22218c42fb8ccc996c86bea450a14991ab815/src/Color.elm)
//!
//!
//! Module for working with colors. Includes [RGB](https://en.wikipedia.org/wiki/RGB_color_model)
//! and [HSL](http://en.wikipedia.org/wiki/HSL_and_HSV) creation, gradients and built-in names.
//!
use std::f32::consts::PI;
use utils::{degrees, fmod, turns};
/// Color supporting RGB and HSL variants.
#[derive(PartialEq, Copy, Clone, Debug)]
pub enum Color {
/// Red, Green, Blue, Alpha - All values' scales represented between 0.0 and 1.0.
Rgba(f32, f32, f32, f32),
/// Hue, Saturation, Lightness, Alpha - all valuess scales represented between 0.0 and 1.0.
Hsla(f32, f32, f32, f32),
}
/// Regional spelling alias.
pub type Colour = Color;
/// Create RGB colors with an alpha component for transparency.
/// The alpha component is specified with numbers between 0 and 1.
#[inline]
pub fn rgba(r: f32, g: f32, b: f32, a: f32) -> Color {
Color::Rgba(r, g, b, a)
}
/// Create RGB colors from numbers between 0.0 and 1.0.
#[inline]
pub fn rgb(r: f32, g: f32, b: f32) -> Color {
Color::Rgba(r, g, b, 1.0)
}
/// Create RGB colors from numbers between 0 and 255 inclusive.
/// The alpha component is specified with numbers between 0 and 1.
#[inline]
pub fn rgba_bytes(r: u8, g: u8, b: u8, a: f32) -> Color {
Color::Rgba(r as f32 / 255.0, g as f32 / 255.0, b as f32 / 255.0, a)
}
/// Create RGB colors from numbers between 0 and 255 inclusive.
#[inline]
pub fn rgb_bytes(r: u8, g: u8, b: u8) -> Color {
rgba_bytes(r, g, b, 1.0)
}
/// Create [HSL colors](http://en.wikipedia.org/wiki/HSL_and_HSV) with an alpha component for
/// transparency.
#[inline]
pub fn hsla(hue: f32, saturation: f32, lightness: f32, alpha: f32) -> Color {
Color::Hsla(hue - turns((hue / (2.0 * PI)).floor()), saturation, lightness, alpha)
}
/// Create [HSL colors](http://en.wikipedia.org/wiki/HSL_and_HSV). This gives you access to colors
/// more like a color wheel, where all hues are arranged in a circle that you specify with radians.
///
/// red = hsl(degrees(0.0) , 1.0, 0.5)
/// green = hsl(degrees(120.0), 1.0, 0.5)
/// blue = hsl(degrees(240.0), 1.0, 0.5)
/// pastel_red = hsl(degrees(0.0) , 0.7, 0.7)
///
/// To cycle through all colors, just cycle through degrees. The saturation level is how vibrant
/// the color is, like a dial between grey and bright colors. The lightness level is a dial between
/// white and black.
#[inline]
pub fn hsl(hue: f32, saturation: f32, lightness: f32) -> Color {
hsla(hue, saturation, lightness, 1.0)
}
/// Produce a gray based on the input. 0.0 is white, 1.0 is black.
pub fn grayscale(p: f32) -> Color {
Color::Hsla(0.0, 0.0, 1.0-p, 1.0)
}
/// Produce a gray based on the input. 0.0 is white, 1.0 is black.
pub fn greyscale(p: f32) -> Color {
Color::Hsla(0.0, 0.0, 1.0-p, 1.0)
}
/// Construct a random color.
pub fn random() -> Color {
rgb(::rand::random(), ::rand::random(), ::rand::random())
}
/// Clamp a f32 between 0f32 and 1f32.
fn clampf32(f: f32) -> f32 {
if f < 0.0 { 0.0 } else if f > 1.0 { 1.0 } else { f }
}
impl Color {
/// Produce a complementary color. The two colors will accent each other. This is the same as
/// rotating the hue by 180 degrees.
pub fn complement(self) -> Color {
match self {
Color::Hsla(h, s, l, a) => hsla(h + degrees(180.0), s, l, a),
Color::Rgba(r, g, b, a) => {
let (h, s, l) = rgb_to_hsl(r, g, b);
hsla(h + degrees(180.0), s, l, a)
},
}
}
/// Calculate and return the luminance of the Color.
pub fn luminance(&self) -> f32 |
/// Return either black or white, depending which contrasts the Color the most. This will be
/// useful for determining a readable color for text on any given background Color.
pub fn plain_contrast(self) -> Color {
if self.luminance() > 0.5 { black() } else { white() }
}
/// Extract the components of a color in the HSL format.
pub fn to_hsl(self) -> Hsla {
match self {
Color::Hsla(h, s, l, a) => Hsla(h, s, l, a),
Color::Rgba(r, g, b, a) => {
let (h, s, l) = rgb_to_hsl(r, g, b);
Hsla(h, s, l, a)
},
}
}
/// Extract the components of a color in the RGB format.
pub fn to_rgb(self) -> Rgba {
match self {
Color::Rgba(r, g, b, a) => Rgba(r, g, b, a),
Color::Hsla(h, s, l, a) => {
let (r, g, b) = hsl_to_rgb(h, s, l);
Rgba(r, g, b, a)
},
}
}
/// Extract the components of a color in the RGB format within a fixed-size array.
pub fn to_fsa(self) -> [f32; 4] {
let Rgba(r, g, b, a) = self.to_rgb();
[r, g, b, a]
}
/// Same as `to_fsa`, except r, g, b and a are represented in byte form.
pub fn to_byte_fsa(self) -> [u8; 4] {
let Rgba(r, g, b, a) = self.to_rgb();
[f32_to_byte(r), f32_to_byte(g), f32_to_byte(b), f32_to_byte(a)]
}
// /// Return the hex representation of this color in the format #RRGGBBAA
// /// e.g. `Color(1.0, 0.0, 5.0, 1.0) == "#FF0080FF"`
// pub fn to_hex(self) -> String {
// let vals = self.to_byte_fsa();
// let hex = vals.to_hex().to_ascii_uppercase();
// format!("#{}", &hex)
// }
/// Return the same color but with the given luminance.
pub fn with_luminance(self, l: f32) -> Color {
let Hsla(h, s, _, a) = self.to_hsl();
Color::Hsla(h, s, l, a)
}
/// Return the same color but with the alpha multiplied by the given alpha.
pub fn alpha(self, alpha: f32) -> Color {
match self {
Color::Rgba(r, g, b, a) => Color::Rgba(r, g, b, a * alpha),
Color::Hsla(h, s, l, a) => Color::Hsla(h, s, l, a * alpha),
}
}
/// Return the same color but with the given alpha.
pub fn with_alpha(self, a: f32) -> Color {
match self {
Color::Rgba(r, g, b, _) => Color::Rgba(r, g, b, a),
Color::Hsla(h, s, l, _) => Color::Hsla(h, s, l, a),
}
}
/// Return a highlighted version of the current Color.
pub fn highlighted(self) -> Color {
let luminance = self.luminance();
let Rgba(r, g, b, a) = self.to_rgb();
let (r, g, b) = {
if luminance > 0.8 { (r - 0.2, g - 0.2, b - 0.2) }
else if luminance < 0.2 { (r + 0.2, g + 0.2, b + 0.2) }
else {
(clampf32((1.0 - r) * 0.5 * r + r),
clampf32((1.0 - g) * 0.1 * g + g),
clampf32((1.0 - b) * 0.1 * b + b))
}
};
let a = clampf32((1.0 - a) * 0.5 + a);
rgba(r, g, b, a)
}
/// Return a clicked version of the current Color.
pub fn clicked(&self) -> Color {
let luminance = self.luminance();
let Rgba(r, g, b, a) = self.to_rgb();
let (r, g, b) = {
if luminance > 0.8 { (r , g - 0.2, b - 0.2) }
else if luminance < 0.2 { (r + 0.4, g + 0.2, b + 0.2) }
else {
(clampf32((1.0 - r) * 0.75 + r),
clampf32((1.0 - g) * 0.25 + g),
clampf32((1.0 - b) * 0.25 + b))
}
};
let a = clampf32((1.0 - a) * 0.75 + a);
rgba(r, g, b, a)
}
/// Return the Color's invert.
pub fn invert(self) -> Color {
let Rgba(r, g, b, a) = self.to_rgb();
rgba((r - 1.0).abs(), (g - 1.0).abs(), (b - 1.0).abs(), a)
}
/// Return the red value.
pub fn red(&self) -> f32 {
let Rgba(r, _, _, _) = self.to_rgb();
r
}
/// Return the green value.
pub fn green(&self) -> f32 {
let Rgba(_, g, _, _) = self.to_rgb();
g
}
/// Return the blue value.
pub fn blue(&self) -> f32 {
let Rgba(_, _, b, _) = self.to_rgb();
b
}
/// Set the red value.
pub fn set_red(&mut self, r: f32) {
let Rgba(_, g, b, a) = self.to_rgb();
*self = rgba(r, g, b, a);
}
/// Set the green value.
pub fn set_green(&mut self, g: f32) {
let Rgba(r, _, b, a) = self.to_rgb();
*self = rgba(r, g, b, a);
}
/// Set the blue value.
pub fn set_blue(&mut self, b: f32) {
let Rgba(r, g, _, a) = self.to_rgb();
*self = rgba(r, g, b, a);
}
}
/// The parts of HSL along with an alpha for transparency.
#[derive(Copy, Clone, Debug)]
pub struct Hsla(pub f32, pub f32, pub f32, pub f32);
/// The parts of RGB along with an alpha for transparency.
#[derive(Copy, Clone, Debug)]
pub struct Rgba(pub f32, pub f32, pub f32, pub f32);
/// Convert an f32 color to a byte.
#[inline]
pub fn f32_to_byte(c: f32) -> u8 { (c * 255.0) as u8 }
/// Pure function for converting rgb to hsl.
pub fn rgb_to_hsl(r: f32, g: f32, b: f32) -> (f32, f32, f32) {
let c_max = r.max(g).max(b);
let c_min = r.min(g).min(b);
let c = c_max - c_min;
let hue = if c == 0.0 {
// If there's no difference in the channels we have grayscale, so the hue is undefined.
0.0
} else {
degrees(60.0) * if c_max == r { fmod(((g - b) / c), 6) }
else if c_max == g { ((b - r) / c) + 2.0 }
else { ((r - g) / c) + 4.0 }
};
let lightness = (c_max + c_min) / 2.0;
let saturation = if lightness == 0.0 { 0.0 }
else { c / (1.0 - (2.0 * lightness - 1.0).abs()) };
(hue, saturation, lightness)
}
/// Pure function for converting hsl to rgb.
pub fn hsl_to_rgb(hue: f32, saturation: f32, lightness: f32) -> (f32, f32, f32) {
let chroma = (1.0 - (2.0 * lightness - 1.0).abs()) * saturation;
let hue = hue / degrees(60.0);
let x = chroma * (1.0 - (fmod(hue, 2) - 1.0).abs());
let (r, g, b) = match hue {
hue if hue < 0.0 => (0.0, 0.0, 0.0),
hue if hue < 1.0 => (chroma, x, 0.0),
hue if hue < 2.0 => (x, chroma, 0.0),
hue if hue < 3.0 => (0.0, chroma, x),
hue if hue < 4.0 => (0.0, x, chroma),
hue if hue < 5.0 => (x, 0.0, chroma),
hue if hue < 6.0 => (chroma, 0.0, x),
_ => (0.0, 0.0, 0.0),
};
let m = lightness - chroma / 2.0;
(r + m, g + m, b + m)
}
/// Linear or Radial Gradient.
#[derive(Clone, Debug)]
pub enum Gradient {
/// Takes a start and end point and then a series of color stops that indicate how to
/// interpolate between the start and end points.
Linear((f64, f64), (f64, f64), Vec<(f64, Color)>),
/// First takes a start point and inner radius. Then takes an end point and outer radius.
/// It then takes a series of color stops that indicate how to interpolate between the
/// inner and outer circles.
Radial((f64, f64), f64, (f64, f64), f64, Vec<(f64, Color)>),
}
/// Create a linear gradient.
pub fn linear(start: (f64, f64), end: (f64, f64), colors: Vec<(f64, Color)>) -> Gradient {
Gradient::Linear(start, end, colors)
}
/// Create a radial gradient.
pub fn radial(start: (f64, f64), start_r: f64,
end: (f64, f64), end_r: f64,
colors: Vec<(f64, Color)>) -> Gradient {
Gradient::Radial(start, start_r, end, end_r, colors)
}
/// Built-in colors.
///
/// These colors come from the
/// [Tango palette](http://tango.freedesktop.org/Tango_Icon_Theme_Guidelines) which provides
/// aesthetically reasonable defaults for colors. Each color also comes with a light and dark
/// version.
/// Scarlet Red - Light - #EF2929
pub fn light_red() -> Color { rgb_bytes(239, 41 , 41 ) }
/// Scarlet Red - Regular - #CC0000
pub fn red() -> Color { rgb_bytes(204, 0 , 0 ) }
/// Scarlet Red - Dark - #A30000
pub fn dark_red() -> Color { rgb_bytes(164, 0 , 0 ) }
/// Orange - Light - #FCAF3E
pub fn light_orange() -> Color { rgb_bytes(252, 175, 62 ) }
/// Orange - Regular - #F57900
pub fn orange() -> Color { rgb_bytes(245, 121, 0 ) }
/// Orange - Dark - #CE5C00
pub fn dark_orange() -> Color { rgb_bytes(206, 92 , 0 ) }
/// Butter - Light - #FCE94F
pub fn light_yellow() -> Color { rgb_bytes(255, 233, 79 ) }
/// Butter - Regular - #EDD400
pub fn yellow() -> Color { rgb_bytes(237, 212, 0 ) }
/// Butter - Dark - #C4A000
pub fn dark_yellow() -> Color { rgb_bytes(196, 160, 0 ) }
/// Chameleon - Light - #8AE234
pub fn light_green() -> Color { rgb_bytes(138, 226, 52 ) }
/// Chameleon - Regular - #73D216
pub fn green() -> Color { rgb_bytes(115, 210, 22 ) }
/// Chameleon - Dark - #4E9A06
pub fn dark_green() -> Color { rgb_bytes(78 , 154, 6 ) }
/// Sky Blue - Light - #729FCF
pub fn light_blue() -> Color { rgb_bytes(114, 159, 207) }
/// Sky Blue - Regular - #3465A4
pub fn blue() -> Color { rgb_bytes(52 , 101, 164) }
/// Sky Blue - Dark - #204A87
pub fn dark_blue() -> Color { rgb_bytes(32 , 74 , 135) }
/// Plum - Light - #AD7FA8
pub fn light_purple() -> Color { rgb_bytes(173, 127, 168) }
/// Plum - Regular - #75507B
pub fn purple() -> Color { rgb_bytes(117, 80 , 123) }
/// Plum - Dark - #5C3566
pub fn dark_purple() -> Color { rgb_bytes(92 , 53 , 102) }
/// Chocolate - Light - #E9B96E
pub fn light_brown() -> Color { rgb_bytes(233, 185, 110) }
/// Chocolate - Regular - #C17D11
pub fn brown() -> Color { rgb_bytes(193, 125, 17 ) }
/// Chocolate - Dark - #8F5902
pub fn dark_brown() -> Color { rgb_bytes(143, 89 , 2 ) }
/// Straight Black.
pub fn black() -> Color { rgb_bytes(0 , 0 , 0 ) }
/// Straight White.
pub fn white() -> Color { rgb_bytes(255, 255, 255) }
/// Alluminium - Light
pub fn light_gray() -> Color { rgb_bytes(238, 238, 236) }
/// Alluminium - Regular
pub fn gray() -> Color { rgb_bytes(211, 215, 207) }
/// Alluminium - Dark
pub fn dark_gray() -> Color { rgb_bytes(186, 189, 182) }
/// Aluminium - Light - #EEEEEC
pub fn light_grey() -> Color { rgb_bytes(238, 238, 236) }
/// Aluminium - Regular - #D3D7CF
pub fn grey() -> Color { rgb_bytes(211, 215, 207) }
/// Aluminium - Dark - #BABDB6
pub fn dark_grey() -> Color { rgb_bytes(186, 189, 182) }
/// Charcoal - Light - #888A85
pub fn light_charcoal() -> Color { rgb_bytes(136, 138, 133) }
/// Charcoal - Regular - #555753
pub fn charcoal() -> Color { rgb_bytes(85 , 87 , 83 ) }
/// Charcoal - Dark - #2E3436
pub fn dark_charcoal() -> Color { rgb_bytes(46 , 52 , 54 ) }
/// Types that can be colored.
pub trait Colorable: Sized {
/// Set the color of the widget.
fn color(self, color: Color) -> Self;
/// Set the color of the widget from rgba values.
fn rgba(self, r: f32, g: f32, b: f32, a: f32) -> Self {
self.color(rgba(r, g, b, a))
}
/// Set the color of the widget from rgb values.
fn rgb(self, r: f32, g: f32, b: f32) -> Self {
self.color(rgb(r, g, b))
}
/// Set the color of the widget from hsla values.
fn hsla(self, h: f32, s: f32, l: f32, a: f32) -> Self {
self.color(hsla(h, s, l, a))
}
/// Set the color of the widget from hsl values.
fn hsl(self, h: f32, s: f32, l: f32) -> Self {
self.color(hsl(h, s, l))
}
}
| {
match *self {
Color::Rgba(r, g, b, _) => (r + g + b) / 3.0,
Color::Hsla(_, _, l, _) => l,
}
} | identifier_body |
lib.rs | //! Hindley–Milner type inference for arithmetic expressions parsed
//! by the [`arithmetic-parser`] crate.
//!
//! This crate allows parsing type annotations as a part of a [`Grammar`], and to infer
//! and check types for expressions / statements produced by `arithmetic-parser`.
//! Type inference is *partially* compatible with the interpreter from [`arithmetic-eval`];
//! if the inference algorithm succeeds on a certain expression / statement / block,
//! it will execute successfully, but not all successfully executing items pass type inference.
//! (An exception here is [`Type::Any`], which is specifically designed to circumvent
//! the type system limitations. If `Any` is used too liberally, it can result in code passing
//! type checks, but failing during execution.)
//!
//! # Type system
//!
//! The type system corresponds to types of `Value`s in `arithmetic-eval`:
//!
//! - Primitive types are customizeable via [`PrimitiveType`] impl. In the simplest case,
//! there can be 2 primitive types: Booleans (`Bool`) and numbers (`Num`),
//! as ecapsulated in [`Num`].
//! - There are two container types - [tuples](Tuple) and [objects](Object).
//! - Tuple types can be represented either
//! in the tuple form, such as `(Num, Bool)`, or as a slice, such as `[Num; 3]`.
//! As in Rust, all slice elements must have the same type. Unlike Rust, tuple and slice
//! forms are equivalent; e.g., `[Num; 3]` and `(Num, Num, Num)` are the same type.
//! - Object types are represented in a brace form, such as `{ x: Num }`. Objects can act as
//! either specific types or type constraints.
//! - Functions are first-class types. Functions can have type and/or const params.
//! Const params always specify tuple length.
//! - Type params can be constrained. Constraints are expressed via [`Constraint`]s.
//! As an example, [`Num`] has a few known constraints, such as type [`Linearity`].
//!
//! [`Constraint`]: crate::arith::Constraint
//! [`Num`]: crate::arith::Num
//! [`Linearity`]: crate::arith::Linearity
//!
//! # Inference rules
//!
//! Inference mostly corresponds to [Hindley–Milner typing rules]. It does not require
//! type annotations, but utilizes them if present. Type unification (encapsulated in
//! [`Substitutions`]) is performed at each variable use or assignment. Variable uses include
//! function calls and unary and binary ops; the op behavior is customizable
//! via [`TypeArithmetic`].
//!
//! Whenever possible, the most generic type satisfying the constraints is used. In particular,
//! this means that all type / length variables not resolved at the function definition site become
//! parameters of the function. Likewise, each function call instantiates a separate instance
//! of a generic function; type / length params for each call are assigned independently.
//! See the example below for more details.
//!
//! [Hindley–Milner typing rules]: https://en.wikipedia.org/wiki/Hindley%E2%80%93Milner_type_system#Typing_rules
//! [`Substitutions`]: crate::arith::Substitutions
//! [`TypeArithmetic`]: crate::arith::TypeArithmetic
//!
//! # Operations
//!
//! ## Field access
//!
//! See [`Tuple` docs](Tuple#indexing) for discussion of indexing expressions, such as `xs.0`,
//! and [`Object` docs](Object) for discussion of field access, such as `point.x`.
//!
//! ## Type casts
//!
//! [A type cast](arithmetic_parser::Expr::TypeCast) is equivalent to introducing a new var
//! with the specified annotation, assigning to it and returning the new var. That is,
//! `x as Bool` is equivalent to `{ _x: Bool = x; _x }`. As such, casts are safe (cannot be used
//! to transmute the type arbitrarily), unless `any` type is involved.
//!
//! # Examples
//!
//! ```
//! use arithmetic_parser::grammars::{F32Grammar, Parse};
//! use arithmetic_typing::{defs::Prelude, Annotated, TypeEnvironment, Type};
//!
//! # fn main() -> anyhow::Result<()> {
//! let code = "sum = |xs| xs.fold(0, |acc, x| acc + x);";
//! let ast = Annotated::<F32Grammar>::parse_statements(code)?;
//!
//! let mut env = TypeEnvironment::new();
//! env.insert("fold", Prelude::Fold);
//!
//! // Evaluate `code` to get the inferred `sum` function signature.
//! let output_type = env.process_statements(&ast)?;
//! assert!(output_type.is_void());
//! assert_eq!(env["sum"].to_string(), "([Num; N]) -> Num");
//! # Ok(())
//! # }
//! ```
//!
//! Defining and using generic functions:
//!
//! ```
//! # use arithmetic_parser::grammars::{F32Grammar, Parse};
//! # use arithmetic_typing::{defs::Prelude, Annotated, TypeEnvironment, Type};
//! # fn main() -> anyhow::Result<()> {
//! let code = "sum_with = |xs, init| xs.fold(init, |acc, x| acc + x);";
//! let ast = Annotated::<F32Grammar>::parse_statements(code)?;
//!
//! let mut env = TypeEnvironment::new();
//! env.insert("fold", Prelude::Fold);
//!
//! let output_type = env.process_statements(&ast)?;
//! assert!(output_type.is_void());
//! assert_eq!(
//! env["sum_with"].to_string(),
//! "for<'T: Ops> (['T; N], 'T) -> 'T"
//! );
//! // Note that `sum_with` is parametric by the element of the slice
//! // (for which the linearity constraint is applied based on the arg usage)
//! // *and* by its length.
//!
//! let usage_code = r#"
//! num_sum: Num = (1, 2, 3).sum_with(0);
//! tuple_sum: (Num, Num) = ((1, 2), (3, 4)).sum_with((0, 0));
//! "#;
//! let ast = Annotated::<F32Grammar>::parse_statements(usage_code)?;
//! // Both lengths and element types differ in these invocations,
//! // but it works fine since they are treated independently.
//! env.process_statements(&ast)?;
//! # Ok(())
//! # }
//! ```
//!
//! [`arithmetic-parser`]: https://crates.io/crates/arithmetic-parser
//! [`Grammar`]: arithmetic_parser::grammars::Grammar
//! [`arithmetic-eval`]: https://crates.io/crates/arithmetic-eval
#![doc(html_root_url = "https://docs.rs/arithmetic-typing/0.3.0")]
#![warn(missing_docs, missing_debug_implementations)]
#![warn(clippy::all, clippy::pedantic)]
#![allow(
clippy::missing_errors_doc,
clippy::must_use_candidate,
clippy::module_name_repetitions,
clippy::similar_names, // too many false positives because of lhs / rhs
clippy::option_if_let_else // too many false positives
)]
use std::{fmt, marker::PhantomData, str::FromStr};
use arithmetic_parser::{
grammars::{Features, Grammar, Parse, ParseLiteral},
InputSpan, NomResult,
};
pub mod arith;
pub mod ast;
pub mod defs;
mod env;
pub mod error;
mod types;
pub mod visit;
pub use self::{
env::TypeEnvironment,
types::{
DynConstraints, FnWithConstraints, Function, FunctionBuilder, LengthVar, Object, Slice,
Tuple, TupleIndex, TupleLen, Type, TypeVar, UnknownLen,
},
};
use self::{arith::ConstraintSet, ast::TypeAst};
/// Primitive types in a certain type system.
///
/// More complex types, like [`Type`] and [`Function`], are defined with a type param
/// which determines the primitive type(s). This type param must implement [`PrimitiveType`].
///
/// [`TypeArithmetic`] has a `PrimitiveType` impl as an associated type, and one of the required
/// operations of this trait is to be able to infer type for literal values from a [`Grammar`].
///
/// # Implementation Requirements
///
/// - [`Display`](fmt::Display) and [`FromStr`] implementations must be consistent; i.e.,
/// `Display` should produce output parseable by `FromStr`. `Display` will be used in
/// `Display` impls for `Type` etc. `FromStr` will be used to read type annotations.
/// - `Display` presentations must be identifiers, such as `Num`.
/// - While not required, a `PrimitiveType` should usually contain a Boolean type and
/// implement [`WithBoolean`]. This allows to reuse [`BoolArithmetic`] and/or [`NumArithmetic`]
/// as building blocks for your [`TypeArithmetic`].
///
/// [`Grammar`]: arithmetic_parser::grammars::Grammar
/// [`TypeArithmetic`]: crate::arith::TypeArithmetic
/// [`WithBoolean`]: crate::arith::WithBoolean
/// [`BoolArithmetic`]: crate::arith::BoolArithmetic
/// [`NumArithmetic`]: crate::arith::NumArithmetic
///
/// # Examples
///
/// ```
/// # use std::{fmt, str::FromStr};
/// use arithmetic_typing::PrimitiveType;
///
/// #[derive(Debug, Clone, Copy, PartialEq)]
/// enum NumOrBytes {
/// /// Numeric value, such as 1.
/// Num,
/// /// Bytes value, such as 0x1234 or "hello".
/// Bytes,
/// }
///
/// // `NumOrBytes` should correspond to a "value" type in the `Grammar`,
/// // for example:
/// enum NumOrBytesValue {
/// Num(f64),
/// Bytes(Vec<u8>),
/// }
///
/// impl fmt::Display for NumOrBytes {
/// fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
/// match self {
/// Self::Num => formatter.write_str("Num"),
/// Self::Bytes => formatter.write_str("Bytes"),
/// }
/// }
/// }
///
/// impl FromStr for NumOrBytes {
/// type Err = anyhow::Error;
///
/// fn from_str(s: &str) -> Result<Self, Self::Err> {
/// match s {
/// "Num" => Ok(Self::Num),
/// "Bytes" => Ok(Self::Bytes),
/// _ => Err(anyhow::anyhow!("expected `Num` or `Bytes`")),
/// }
/// }
/// }
///
/// impl PrimitiveType for NumOrBytes {}
/// ```
pub trait PrimitiveType:
Clone + PartialEq + fmt::Debug + fmt::Display + FromStr + Send + Sync +'static
{
/// Returns well-known constraints for this type. These constraints are used
/// in standalone parsing of type signatures.
///
/// The default implementation returns an empty set.
fn well_known_constraints() -> ConstraintSet<Self> {
| / Grammar with support of type annotations. Works as a decorator.
///
/// # Examples
///
/// ```
/// use arithmetic_parser::grammars::{F32Grammar, Parse};
/// use arithmetic_typing::Annotated;
///
/// # fn main() -> anyhow::Result<()> {
/// let code = "x: [Num] = (1, 2, 3);";
/// let ast = Annotated::<F32Grammar>::parse_statements(code)?;
/// # assert_eq!(ast.statements.len(), 1);
/// # Ok(())
/// # }
/// ```
#[derive(Debug)]
pub struct Annotated<T>(PhantomData<T>);
impl<T: ParseLiteral> ParseLiteral for Annotated<T> {
type Lit = T::Lit;
fn parse_literal(input: InputSpan<'_>) -> NomResult<'_, Self::Lit> {
<T as ParseLiteral>::parse_literal(input)
}
}
impl<'a, T: ParseLiteral> Grammar<'a> for Annotated<T> {
type Type = TypeAst<'a>;
fn parse_type(input: InputSpan<'a>) -> NomResult<'a, Self::Type> {
use nom::combinator::map;
map(TypeAst::parse, |ast| ast.extra)(input)
}
}
/// Supports all syntax features.
impl<T: ParseLiteral> Parse<'_> for Annotated<T> {
type Base = Self;
const FEATURES: Features = Features::all();
}
| ConstraintSet::default()
}
}
// | identifier_body |
lib.rs | //! Hindley–Milner type inference for arithmetic expressions parsed
//! by the [`arithmetic-parser`] crate.
//!
//! This crate allows parsing type annotations as a part of a [`Grammar`], and to infer
//! and check types for expressions / statements produced by `arithmetic-parser`.
//! Type inference is *partially* compatible with the interpreter from [`arithmetic-eval`];
//! if the inference algorithm succeeds on a certain expression / statement / block,
//! it will execute successfully, but not all successfully executing items pass type inference.
//! (An exception here is [`Type::Any`], which is specifically designed to circumvent
//! the type system limitations. If `Any` is used too liberally, it can result in code passing
//! type checks, but failing during execution.)
//!
//! # Type system
//!
//! The type system corresponds to types of `Value`s in `arithmetic-eval`:
//!
//! - Primitive types are customizeable via [`PrimitiveType`] impl. In the simplest case,
//! there can be 2 primitive types: Booleans (`Bool`) and numbers (`Num`),
//! as ecapsulated in [`Num`].
//! - There are two container types - [tuples](Tuple) and [objects](Object).
//! - Tuple types can be represented either
//! in the tuple form, such as `(Num, Bool)`, or as a slice, such as `[Num; 3]`.
//! As in Rust, all slice elements must have the same type. Unlike Rust, tuple and slice
//! forms are equivalent; e.g., `[Num; 3]` and `(Num, Num, Num)` are the same type.
//! - Object types are represented in a brace form, such as `{ x: Num }`. Objects can act as
//! either specific types or type constraints.
//! - Functions are first-class types. Functions can have type and/or const params.
//! Const params always specify tuple length.
//! - Type params can be constrained. Constraints are expressed via [`Constraint`]s.
//! As an example, [`Num`] has a few known constraints, such as type [`Linearity`].
//!
//! [`Constraint`]: crate::arith::Constraint
//! [`Num`]: crate::arith::Num
//! [`Linearity`]: crate::arith::Linearity
//!
//! # Inference rules
//!
//! Inference mostly corresponds to [Hindley–Milner typing rules]. It does not require
//! type annotations, but utilizes them if present. Type unification (encapsulated in
//! [`Substitutions`]) is performed at each variable use or assignment. Variable uses include
//! function calls and unary and binary ops; the op behavior is customizable
//! via [`TypeArithmetic`].
//!
//! Whenever possible, the most generic type satisfying the constraints is used. In particular,
//! this means that all type / length variables not resolved at the function definition site become
//! parameters of the function. Likewise, each function call instantiates a separate instance
//! of a generic function; type / length params for each call are assigned independently.
//! See the example below for more details.
//!
//! [Hindley–Milner typing rules]: https://en.wikipedia.org/wiki/Hindley%E2%80%93Milner_type_system#Typing_rules
//! [`Substitutions`]: crate::arith::Substitutions
//! [`TypeArithmetic`]: crate::arith::TypeArithmetic
//!
//! # Operations
//!
//! ## Field access
//!
//! See [`Tuple` docs](Tuple#indexing) for discussion of indexing expressions, such as `xs.0`,
//! and [`Object` docs](Object) for discussion of field access, such as `point.x`.
//!
//! ## Type casts
//!
//! [A type cast](arithmetic_parser::Expr::TypeCast) is equivalent to introducing a new var
//! with the specified annotation, assigning to it and returning the new var. That is,
//! `x as Bool` is equivalent to `{ _x: Bool = x; _x }`. As such, casts are safe (cannot be used
//! to transmute the type arbitrarily), unless `any` type is involved.
//!
//! # Examples
//!
//! ```
//! use arithmetic_parser::grammars::{F32Grammar, Parse};
//! use arithmetic_typing::{defs::Prelude, Annotated, TypeEnvironment, Type};
//!
//! # fn main() -> anyhow::Result<()> {
//! let code = "sum = |xs| xs.fold(0, |acc, x| acc + x);";
//! let ast = Annotated::<F32Grammar>::parse_statements(code)?;
//!
//! let mut env = TypeEnvironment::new();
//! env.insert("fold", Prelude::Fold);
//!
//! // Evaluate `code` to get the inferred `sum` function signature.
//! let output_type = env.process_statements(&ast)?;
//! assert!(output_type.is_void());
//! assert_eq!(env["sum"].to_string(), "([Num; N]) -> Num");
//! # Ok(())
//! # }
//! ```
//!
//! Defining and using generic functions:
//!
//! ```
//! # use arithmetic_parser::grammars::{F32Grammar, Parse};
//! # use arithmetic_typing::{defs::Prelude, Annotated, TypeEnvironment, Type};
//! # fn main() -> anyhow::Result<()> {
//! let code = "sum_with = |xs, init| xs.fold(init, |acc, x| acc + x);";
//! let ast = Annotated::<F32Grammar>::parse_statements(code)?;
//!
//! let mut env = TypeEnvironment::new();
//! env.insert("fold", Prelude::Fold);
//!
//! let output_type = env.process_statements(&ast)?;
//! assert!(output_type.is_void());
//! assert_eq!(
//! env["sum_with"].to_string(),
//! "for<'T: Ops> (['T; N], 'T) -> 'T"
//! );
//! // Note that `sum_with` is parametric by the element of the slice
//! // (for which the linearity constraint is applied based on the arg usage)
//! // *and* by its length.
//!
//! let usage_code = r#"
//! num_sum: Num = (1, 2, 3).sum_with(0);
//! tuple_sum: (Num, Num) = ((1, 2), (3, 4)).sum_with((0, 0));
//! "#;
//! let ast = Annotated::<F32Grammar>::parse_statements(usage_code)?;
//! // Both lengths and element types differ in these invocations,
//! // but it works fine since they are treated independently.
//! env.process_statements(&ast)?;
//! # Ok(())
//! # }
//! ```
//!
//! [`arithmetic-parser`]: https://crates.io/crates/arithmetic-parser
//! [`Grammar`]: arithmetic_parser::grammars::Grammar
//! [`arithmetic-eval`]: https://crates.io/crates/arithmetic-eval
#![doc(html_root_url = "https://docs.rs/arithmetic-typing/0.3.0")]
#![warn(missing_docs, missing_debug_implementations)]
#![warn(clippy::all, clippy::pedantic)]
#![allow(
clippy::missing_errors_doc,
clippy::must_use_candidate,
clippy::module_name_repetitions,
clippy::similar_names, // too many false positives because of lhs / rhs
clippy::option_if_let_else // too many false positives
)]
use std::{fmt, marker::PhantomData, str::FromStr};
use arithmetic_parser::{
grammars::{Features, Grammar, Parse, ParseLiteral},
InputSpan, NomResult,
};
pub mod arith;
pub mod ast;
pub mod defs;
mod env;
pub mod error;
mod types;
pub mod visit;
pub use self::{
env::TypeEnvironment,
types::{
DynConstraints, FnWithConstraints, Function, FunctionBuilder, LengthVar, Object, Slice,
Tuple, TupleIndex, TupleLen, Type, TypeVar, UnknownLen,
},
};
use self::{arith::ConstraintSet, ast::TypeAst};
/// Primitive types in a certain type system.
///
/// More complex types, like [`Type`] and [`Function`], are defined with a type param
/// which determines the primitive type(s). This type param must implement [`PrimitiveType`].
///
/// [`TypeArithmetic`] has a `PrimitiveType` impl as an associated type, and one of the required
/// operations of this trait is to be able to infer type for literal values from a [`Grammar`].
///
/// # Implementation Requirements
///
/// - [`Display`](fmt::Display) and [`FromStr`] implementations must be consistent; i.e.,
/// `Display` should produce output parseable by `FromStr`. `Display` will be used in
/// `Display` impls for `Type` etc. `FromStr` will be used to read type annotations.
/// - `Display` presentations must be identifiers, such as `Num`.
/// - While not required, a `PrimitiveType` should usually contain a Boolean type and
/// implement [`WithBoolean`]. This allows to reuse [`BoolArithmetic`] and/or [`NumArithmetic`]
/// as building blocks for your [`TypeArithmetic`].
///
/// [`Grammar`]: arithmetic_parser::grammars::Grammar
/// [`TypeArithmetic`]: crate::arith::TypeArithmetic
/// [`WithBoolean`]: crate::arith::WithBoolean
/// [`BoolArithmetic`]: crate::arith::BoolArithmetic
/// [`NumArithmetic`]: crate::arith::NumArithmetic
///
/// # Examples
///
/// ```
/// # use std::{fmt, str::FromStr};
/// use arithmetic_typing::PrimitiveType;
///
/// #[derive(Debug, Clone, Copy, PartialEq)]
/// enum NumOrBytes {
/// /// Numeric value, such as 1.
/// Num,
/// /// Bytes value, such as 0x1234 or "hello".
/// Bytes,
/// }
///
/// // `NumOrBytes` should correspond to a "value" type in the `Grammar`,
/// // for example:
/// enum NumOrBytesValue {
/// Num(f64),
/// Bytes(Vec<u8>),
/// }
///
/// impl fmt::Display for NumOrBytes {
/// fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
/// match self {
/// Self::Num => formatter.write_str("Num"),
/// Self::Bytes => formatter.write_str("Bytes"),
/// }
/// }
/// }
///
/// impl FromStr for NumOrBytes {
/// type Err = anyhow::Error;
///
/// fn from_str(s: &str) -> Result<Self, Self::Err> {
/// match s {
/// "Num" => Ok(Self::Num),
/// "Bytes" => Ok(Self::Bytes),
/// _ => Err(anyhow::anyhow!("expected `Num` or `Bytes`")),
/// }
/// }
/// }
///
/// impl PrimitiveType for NumOrBytes {}
/// ```
pub trait PrimitiveType:
Clone + PartialEq + fmt::Debug + fmt::Display + FromStr + Send + Sync +'static
{
/// Returns well-known constraints for this type. These constraints are used
/// in standalone parsing of type signatures.
///
/// The default implementation returns an empty set.
fn well_known_constraints() -> ConstraintSet<Self> {
ConstraintSet::default()
}
}
/// Grammar with support of type annotations. Works as a decorator.
///
/// # Examples
///
/// ```
/// use arithmetic_parser::grammars::{F32Grammar, Parse};
/// use arithmetic_typing::Annotated;
///
/// # fn main() -> anyhow::Result<()> {
/// let code = "x: [Num] = (1, 2, 3);";
/// let ast = Annotated::<F32Grammar>::parse_statements(code)?;
/// # assert_eq!(ast.statements.len(), 1);
/// # Ok(())
/// # }
/// ```
#[derive(Debug)]
pub struct Annotated<T>(PhantomData<T>);
impl<T: ParseLiteral> ParseLiteral for Annotated<T> {
type Lit = T::Lit;
fn parse_literal(input: InputSpan<'_>) -> NomResult<'_, Self::Lit> {
<T as ParseLiteral>::parse_literal(input)
}
}
impl<'a, T: ParseLiteral> Grammar<'a> for Annotated<T> {
type Type = TypeAst<'a>;
fn parse_ | : InputSpan<'a>) -> NomResult<'a, Self::Type> {
use nom::combinator::map;
map(TypeAst::parse, |ast| ast.extra)(input)
}
}
/// Supports all syntax features.
impl<T: ParseLiteral> Parse<'_> for Annotated<T> {
type Base = Self;
const FEATURES: Features = Features::all();
}
| type(input | identifier_name |
lib.rs | //! Hindley–Milner type inference for arithmetic expressions parsed
//! by the [`arithmetic-parser`] crate.
//!
//! This crate allows parsing type annotations as a part of a [`Grammar`], and to infer
//! and check types for expressions / statements produced by `arithmetic-parser`.
//! Type inference is *partially* compatible with the interpreter from [`arithmetic-eval`];
//! if the inference algorithm succeeds on a certain expression / statement / block,
//! it will execute successfully, but not all successfully executing items pass type inference.
//! (An exception here is [`Type::Any`], which is specifically designed to circumvent
//! the type system limitations. If `Any` is used too liberally, it can result in code passing
//! type checks, but failing during execution.)
//!
//! # Type system
//!
//! The type system corresponds to types of `Value`s in `arithmetic-eval`:
//!
//! - Primitive types are customizeable via [`PrimitiveType`] impl. In the simplest case,
//! there can be 2 primitive types: Booleans (`Bool`) and numbers (`Num`),
//! as ecapsulated in [`Num`].
//! - There are two container types - [tuples](Tuple) and [objects](Object).
//! - Tuple types can be represented either
//! in the tuple form, such as `(Num, Bool)`, or as a slice, such as `[Num; 3]`.
//! As in Rust, all slice elements must have the same type. Unlike Rust, tuple and slice
//! forms are equivalent; e.g., `[Num; 3]` and `(Num, Num, Num)` are the same type.
//! - Object types are represented in a brace form, such as `{ x: Num }`. Objects can act as
//! either specific types or type constraints.
//! - Functions are first-class types. Functions can have type and/or const params.
//! Const params always specify tuple length.
//! - Type params can be constrained. Constraints are expressed via [`Constraint`]s.
//! As an example, [`Num`] has a few known constraints, such as type [`Linearity`].
//!
//! [`Constraint`]: crate::arith::Constraint
//! [`Num`]: crate::arith::Num
//! [`Linearity`]: crate::arith::Linearity
//!
//! # Inference rules
//!
//! Inference mostly corresponds to [Hindley–Milner typing rules]. It does not require
//! type annotations, but utilizes them if present. Type unification (encapsulated in
//! [`Substitutions`]) is performed at each variable use or assignment. Variable uses include
//! function calls and unary and binary ops; the op behavior is customizable
//! via [`TypeArithmetic`].
//!
//! Whenever possible, the most generic type satisfying the constraints is used. In particular,
//! this means that all type / length variables not resolved at the function definition site become
//! parameters of the function. Likewise, each function call instantiates a separate instance
//! of a generic function; type / length params for each call are assigned independently.
//! See the example below for more details.
//!
//! [Hindley–Milner typing rules]: https://en.wikipedia.org/wiki/Hindley%E2%80%93Milner_type_system#Typing_rules
//! [`Substitutions`]: crate::arith::Substitutions
//! [`TypeArithmetic`]: crate::arith::TypeArithmetic
//!
//! # Operations
//!
//! ## Field access
//!
//! See [`Tuple` docs](Tuple#indexing) for discussion of indexing expressions, such as `xs.0`,
//! and [`Object` docs](Object) for discussion of field access, such as `point.x`.
//!
//! ## Type casts
//!
//! [A type cast](arithmetic_parser::Expr::TypeCast) is equivalent to introducing a new var
//! with the specified annotation, assigning to it and returning the new var. That is,
//! `x as Bool` is equivalent to `{ _x: Bool = x; _x }`. As such, casts are safe (cannot be used
//! to transmute the type arbitrarily), unless `any` type is involved.
//!
//! # Examples
//!
//! ```
//! use arithmetic_parser::grammars::{F32Grammar, Parse};
//! use arithmetic_typing::{defs::Prelude, Annotated, TypeEnvironment, Type};
//!
//! # fn main() -> anyhow::Result<()> {
//! let code = "sum = |xs| xs.fold(0, |acc, x| acc + x);";
//! let ast = Annotated::<F32Grammar>::parse_statements(code)?;
//!
//! let mut env = TypeEnvironment::new();
//! env.insert("fold", Prelude::Fold);
//!
//! // Evaluate `code` to get the inferred `sum` function signature.
//! let output_type = env.process_statements(&ast)?;
//! assert!(output_type.is_void());
//! assert_eq!(env["sum"].to_string(), "([Num; N]) -> Num");
//! # Ok(())
//! # }
//! ```
//!
//! Defining and using generic functions:
//!
//! ```
//! # use arithmetic_parser::grammars::{F32Grammar, Parse};
//! # use arithmetic_typing::{defs::Prelude, Annotated, TypeEnvironment, Type};
//! # fn main() -> anyhow::Result<()> {
//! let code = "sum_with = |xs, init| xs.fold(init, |acc, x| acc + x);";
//! let ast = Annotated::<F32Grammar>::parse_statements(code)?;
//!
//! let mut env = TypeEnvironment::new();
//! env.insert("fold", Prelude::Fold);
//!
//! let output_type = env.process_statements(&ast)?;
//! assert!(output_type.is_void());
//! assert_eq!(
//! env["sum_with"].to_string(),
//! "for<'T: Ops> (['T; N], 'T) -> 'T"
//! );
//! // Note that `sum_with` is parametric by the element of the slice
//! // (for which the linearity constraint is applied based on the arg usage)
//! // *and* by its length.
//!
//! let usage_code = r#"
//! num_sum: Num = (1, 2, 3).sum_with(0);
//! tuple_sum: (Num, Num) = ((1, 2), (3, 4)).sum_with((0, 0));
//! "#;
//! let ast = Annotated::<F32Grammar>::parse_statements(usage_code)?;
//! // Both lengths and element types differ in these invocations,
//! // but it works fine since they are treated independently.
//! env.process_statements(&ast)?;
//! # Ok(())
//! # }
//! ```
//!
//! [`arithmetic-parser`]: https://crates.io/crates/arithmetic-parser
//! [`Grammar`]: arithmetic_parser::grammars::Grammar
//! [`arithmetic-eval`]: https://crates.io/crates/arithmetic-eval
#![doc(html_root_url = "https://docs.rs/arithmetic-typing/0.3.0")]
#![warn(missing_docs, missing_debug_implementations)]
#![warn(clippy::all, clippy::pedantic)]
#![allow(
clippy::missing_errors_doc,
clippy::must_use_candidate,
clippy::module_name_repetitions,
clippy::similar_names, // too many false positives because of lhs / rhs
clippy::option_if_let_else // too many false positives
)]
use std::{fmt, marker::PhantomData, str::FromStr};
use arithmetic_parser::{
grammars::{Features, Grammar, Parse, ParseLiteral},
InputSpan, NomResult,
};
pub mod arith;
pub mod ast;
pub mod defs;
mod env;
pub mod error;
mod types;
pub mod visit;
pub use self::{
env::TypeEnvironment,
types::{
DynConstraints, FnWithConstraints, Function, FunctionBuilder, LengthVar, Object, Slice,
Tuple, TupleIndex, TupleLen, Type, TypeVar, UnknownLen,
},
};
use self::{arith::ConstraintSet, ast::TypeAst};
/// Primitive types in a certain type system.
///
/// More complex types, like [`Type`] and [`Function`], are defined with a type param
/// which determines the primitive type(s). This type param must implement [`PrimitiveType`].
///
/// [`TypeArithmetic`] has a `PrimitiveType` impl as an associated type, and one of the required
/// operations of this trait is to be able to infer type for literal values from a [`Grammar`].
///
/// # Implementation Requirements
///
/// - [`Display`](fmt::Display) and [`FromStr`] implementations must be consistent; i.e.,
/// `Display` should produce output parseable by `FromStr`. `Display` will be used in
/// `Display` impls for `Type` etc. `FromStr` will be used to read type annotations.
/// - `Display` presentations must be identifiers, such as `Num`.
/// - While not required, a `PrimitiveType` should usually contain a Boolean type and
/// implement [`WithBoolean`]. This allows to reuse [`BoolArithmetic`] and/or [`NumArithmetic`]
/// as building blocks for your [`TypeArithmetic`].
///
/// [`Grammar`]: arithmetic_parser::grammars::Grammar
/// [`TypeArithmetic`]: crate::arith::TypeArithmetic
/// [`WithBoolean`]: crate::arith::WithBoolean
/// [`BoolArithmetic`]: crate::arith::BoolArithmetic
/// [`NumArithmetic`]: crate::arith::NumArithmetic
///
/// # Examples
///
/// ```
/// # use std::{fmt, str::FromStr};
/// use arithmetic_typing::PrimitiveType;
///
/// #[derive(Debug, Clone, Copy, PartialEq)]
/// enum NumOrBytes {
/// /// Numeric value, such as 1.
/// Num,
/// /// Bytes value, such as 0x1234 or "hello".
/// Bytes,
/// }
///
/// // `NumOrBytes` should correspond to a "value" type in the `Grammar`,
/// // for example:
/// enum NumOrBytesValue {
/// Num(f64),
/// Bytes(Vec<u8>),
/// }
///
/// impl fmt::Display for NumOrBytes {
/// fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
/// match self {
/// Self::Num => formatter.write_str("Num"),
/// Self::Bytes => formatter.write_str("Bytes"),
/// }
/// }
/// }
///
/// impl FromStr for NumOrBytes {
/// type Err = anyhow::Error;
///
/// fn from_str(s: &str) -> Result<Self, Self::Err> {
/// match s {
/// "Num" => Ok(Self::Num),
/// "Bytes" => Ok(Self::Bytes),
/// _ => Err(anyhow::anyhow!("expected `Num` or `Bytes`")),
/// }
/// }
/// }
///
/// impl PrimitiveType for NumOrBytes {}
/// ```
pub trait PrimitiveType:
Clone + PartialEq + fmt::Debug + fmt::Display + FromStr + Send + Sync +'static
{
/// Returns well-known constraints for this type. These constraints are used
/// in standalone parsing of type signatures.
///
/// The default implementation returns an empty set.
fn well_known_constraints() -> ConstraintSet<Self> {
ConstraintSet::default()
}
}
/// Grammar with support of type annotations. Works as a decorator.
///
/// # Examples
///
/// ```
/// use arithmetic_parser::grammars::{F32Grammar, Parse};
/// use arithmetic_typing::Annotated;
///
/// # fn main() -> anyhow::Result<()> { | /// # assert_eq!(ast.statements.len(), 1);
/// # Ok(())
/// # }
/// ```
#[derive(Debug)]
pub struct Annotated<T>(PhantomData<T>);
impl<T: ParseLiteral> ParseLiteral for Annotated<T> {
type Lit = T::Lit;
fn parse_literal(input: InputSpan<'_>) -> NomResult<'_, Self::Lit> {
<T as ParseLiteral>::parse_literal(input)
}
}
impl<'a, T: ParseLiteral> Grammar<'a> for Annotated<T> {
type Type = TypeAst<'a>;
fn parse_type(input: InputSpan<'a>) -> NomResult<'a, Self::Type> {
use nom::combinator::map;
map(TypeAst::parse, |ast| ast.extra)(input)
}
}
/// Supports all syntax features.
impl<T: ParseLiteral> Parse<'_> for Annotated<T> {
type Base = Self;
const FEATURES: Features = Features::all();
} | /// let code = "x: [Num] = (1, 2, 3);";
/// let ast = Annotated::<F32Grammar>::parse_statements(code)?; | random_line_split |
main.rs | use std::convert::TryInto;
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::path::Path;
use std::time::Instant;
use clap::{App, Arg};
use rand::prelude::SliceRandom;
use rand::thread_rng;
use rayon::iter::IndexedParallelIterator;
use rayon::iter::IntoParallelIterator;
use rayon::iter::ParallelIterator;
use selecting_flow::compute_graph::activation::rectified_linear::ReLU;
use selecting_flow::compute_graph::activation::softmax_with_loss::SoftmaxWithLoss;
use selecting_flow::compute_graph::fully_connected_layer::{ApplyFullyConnectedLayer, FullyConnectedLayer};
use selecting_flow::compute_graph::input_box::InputBox;
use selecting_flow::compute_graph::{ExactDimensionComputeGraphNode, GraphNode};
use selecting_flow::data_types::{Sparse, TensorEitherOwned};
use selecting_flow::hasher::sim_hash::SimHash;
use selecting_flow::hasher::FullyConnectedHasher;
use selecting_flow::optimizer::adam::Adam;
fn main() {
let arg = App::new("shield_example")
.arg(Arg::with_name("label").help("path to trn_lbl_mat.txt").long("label").short("l").takes_value(true).required(true))
.arg(Arg::with_name("feature").help("path to trn_ft_mat.txt").long("feature").short("f").takes_value(true).required(true))
.get_matches();
let labels = arg.value_of("label").unwrap();
let features = arg.value_of("feature").unwrap();
eprintln!("boot");
let train_data = read_train_data(labels, features);
eprintln!("load train_data");
train(train_data);
}
fn train(train_data: TrainData) {
let TrainData {
input_size,
output_size,
mut data_pair,
} = train_data;
const NUM_ITERATION: usize = 5;
const MINI_BATCH_SIZE: usize = 256;
const REBUILD_DELTA_INC: f64 = 1.05;
const TRAIN_DATA_RATIO: f64 = 0.95;
let (data_pair_train, data_pair_test) = {
let mid = (data_pair.len() as f64 * TRAIN_DATA_RATIO) as usize;
data_pair.shuffle(&mut thread_rng());
data_pair.split_at_mut(mid)
};
let time = Instant::now();
let mut layer1 = FullyConnectedLayer::new_random_param(input_size, 128, SimHash::new(50, 6, 128, 1, 0.333), Adam::new(0.9, 0.999, 0.001));
eprintln!("construct layer1 in {}ms", time.elapsed().as_millis());
let time = Instant::now();
let mut layer2 = FullyConnectedLayer::new_random_param(128, output_size, SimHash::new(50, 8, 4096, 3, 0.333), Adam::new(0.9, 0.999, 0.001));
eprintln!("construct layer2 in {}ms", time.elapsed().as_millis());
let mut next_rebuild = 49;
let mut rebuild_delta = 50;
let parallel_num = num_cpus::get();
dbg!(parallel_num);
eprintln!("start training");
let time = Instant::now();
let mini_batch_count = (data_pair_train.len() + MINI_BATCH_SIZE - 1) / MINI_BATCH_SIZE;
dbg!(data_pair_train.len());
dbg!(mini_batch_count);
println!("log_type,iteration,time_ms,accuracy,loss");
for e in 0..NUM_ITERATION {
data_pair_train.shuffle(&mut thread_rng());
for i in 0..mini_batch_count {
dbg!(e);
dbg!(i);
let batch_range = i * MINI_BATCH_SIZE..((i + 1) * MINI_BATCH_SIZE).min(data_pair_train.len());
let (sum_of_loss, sum_of_accuracy) = process_mini_batch(&data_pair_train[batch_range.clone()], parallel_num, true, || {
let input = InputBox::new([input_size]);
let mid = layer1.apply_to(input.clone(), ReLU::new());
let output = layer2.apply_to(mid, SoftmaxWithLoss::new());
(input, output)
});
println!(
"train_log,{},{},{},{}",
e * mini_batch_count + i,
time.elapsed().as_millis(),
sum_of_accuracy / batch_range.len() as f64,
sum_of_loss / batch_range.len() as f64,
);
layer1.update_parameter();
layer2.update_parameter();
if e * mini_batch_count + i >= next_rebuild {
layer1.rebuild_hash();
layer2.rebuild_hash();
rebuild_delta = (rebuild_delta as f64 * REBUILD_DELTA_INC) as usize;
next_rebuild += rebuild_delta;
}
}
let (sum_of_loss, sum_of_accuracy) = process_mini_batch(data_pair_test, parallel_num, false, || {
let input = InputBox::new([input_size]);
let mid = layer1.apply_to(input.clone(), ReLU::new());
let output = layer2.apply_unhash_to(mid, SoftmaxWithLoss::new());
(input, output)
});
println!(
"test_log,{},{},{},{}",
(e + 1) * mini_batch_count,
time.elapsed().as_millis(),
sum_of_accuracy / data_pair_test.len() as f64,
sum_of_loss / data_pair_test.len() as f64,
);
}
}
fn process_mini_batch<I:'static + ExactDimensionComputeGraphNode<1, Item = f32>, H: FullyConnectedHasher<f32, f32>>(
data_pair: &[TrainDataPair],
parallel_num: usize,
back_propagate: bool,
construct_layers: impl Sync + Fn() -> (GraphNode<InputBox<f32, 1>, 1>, GraphNode<ApplyFullyConnectedLayer<I, f32, H, SoftmaxWithLoss<f32>, 0>, 0>),
) -> (f64, f64) {
crossbeam::scope(|scope| {
let mut threads = Vec::with_capacity(parallel_num);
for t in 0..parallel_num {
let range = t * data_pair.len() / parallel_num..(t + 1) * data_pair.len() / parallel_num;
threads.push(scope.spawn(|_| {
let (mut input, mut output) = construct_layers();
let mut sum_of_loss = 0f64;
let mut accuracy = 0f64;
for data in &data_pair[range] {
let TrainDataPair {
input: input_value,
output: output_value,
} = &data;
assert_eq!(output_value.value_count(), 1);
input.set_value(input_value.clone().into());
output.set_expect_output(output_value.clone());
let output_loss = output.get_output_value();
let output_without_loss = output.get_output_without_loss();
accuracy += match &output_without_loss {
TensorEitherOwned::Dense(tensor) => {
let ([correct_index], _) = output_value.iter().next().unwrap();
let correct = *tensor.get([correct_index]).unwrap();
if tensor.as_all_slice().iter().enumerate().all(|(i, v)| i == correct_index || *v < correct) {
1.
} else {
0.
}
}
TensorEitherOwned::Sparse(tensor) => {
let ([correct_index], _) = output_value.iter().next().unwrap();
let correct = *tensor.get([correct_index]).unwrap();
if tensor.iter().all(|([i], v)| i == correct_index || *v < correct) {
1.
} else {
0.
}
}
};
sum_of_loss += *output_loss.get([]).unwrap() as f64;
if back_propagate {
output.clear_gradient_all();
output.back_propagate_all();
}
}
(sum_of_loss, accuracy)
}));
}
threads.into_iter().fold((0f64, 0f64), |(sum_loss, sum_accuracy), handle| {
let (loss, accuracy) = handle.join().unwrap();
(sum_loss + loss, sum_accuracy + accuracy)
})
})
.expect("failed to use thread")
}
struct TrainData {
input_size: usize,
output_size: usize,
data_pair: Vec<TrainDataPair>,
}
struct TrainDataPair {
input: Sparse<f32, 1>,
output: Sparse<f32, 1>,
}
impl TrainDataPair {
fn new(input: Sparse<f32, 1>, output: Sparse<f32, 1>) -> Self |
}
fn read_train_data(labels: impl AsRef<Path>, features: impl AsRef<Path>) -> TrainData {
let (output_size, labels) = read_file_as_tensors(labels);
let (input_size, features) = read_file_as_tensors(features);
let data_pair = labels
.into_par_iter()
.zip_eq(features)
.filter(|(output, _)| output.value_count() == 1)
.map(|(output, input)| TrainDataPair::new(input, output))
.collect();
TrainData { input_size, output_size, data_pair }
}
fn read_file_as_tensors(path: impl AsRef<Path>) -> (usize, Vec<Sparse<f32, 1>>) {
let path = path.as_ref();
let failed_read = &format!("failed to read file {}", path.display());
let invalid_format = &format!("invalid file format {}", path.display());
let file = File::open(path).expect("failed to open feature file");
let file = BufReader::new(file);
let mut file = file.lines();
let [len, tensor_width]: [usize; 2] = file
.next()
.expect(failed_read)
.expect(failed_read)
.trim()
.split_whitespace()
.map(|s| s.parse().expect(invalid_format))
.collect::<Vec<_>>()
.try_into()
.expect(invalid_format);
let tensor_list = file
.map(|s| {
let s = s.expect(failed_read);
let mut input = Sparse::new([tensor_width]);
s.trim().split_whitespace().for_each(|s| {
let [index, value]: [_; 2] = s.trim().split(':').collect::<Vec<_>>().try_into().expect(invalid_format);
let index = index.parse().expect(invalid_format);
assert!(index < tensor_width, "{}", invalid_format);
input.set([index], value.parse().expect(invalid_format));
});
input
})
.collect::<Vec<_>>();
assert_eq!(tensor_list.len(), len, "{}", invalid_format);
(tensor_width, tensor_list)
}
| {
TrainDataPair { input, output }
} | identifier_body |
main.rs | use std::convert::TryInto;
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::path::Path;
use std::time::Instant;
use clap::{App, Arg};
use rand::prelude::SliceRandom;
use rand::thread_rng;
use rayon::iter::IndexedParallelIterator;
use rayon::iter::IntoParallelIterator;
use rayon::iter::ParallelIterator;
use selecting_flow::compute_graph::activation::rectified_linear::ReLU;
use selecting_flow::compute_graph::activation::softmax_with_loss::SoftmaxWithLoss;
use selecting_flow::compute_graph::fully_connected_layer::{ApplyFullyConnectedLayer, FullyConnectedLayer};
use selecting_flow::compute_graph::input_box::InputBox;
use selecting_flow::compute_graph::{ExactDimensionComputeGraphNode, GraphNode};
use selecting_flow::data_types::{Sparse, TensorEitherOwned};
use selecting_flow::hasher::sim_hash::SimHash;
use selecting_flow::hasher::FullyConnectedHasher;
use selecting_flow::optimizer::adam::Adam;
fn main() {
let arg = App::new("shield_example")
.arg(Arg::with_name("label").help("path to trn_lbl_mat.txt").long("label").short("l").takes_value(true).required(true))
.arg(Arg::with_name("feature").help("path to trn_ft_mat.txt").long("feature").short("f").takes_value(true).required(true))
.get_matches();
let labels = arg.value_of("label").unwrap();
let features = arg.value_of("feature").unwrap();
eprintln!("boot");
let train_data = read_train_data(labels, features);
eprintln!("load train_data");
train(train_data);
}
fn train(train_data: TrainData) {
let TrainData {
input_size,
output_size,
mut data_pair,
} = train_data;
const NUM_ITERATION: usize = 5;
const MINI_BATCH_SIZE: usize = 256;
const REBUILD_DELTA_INC: f64 = 1.05;
const TRAIN_DATA_RATIO: f64 = 0.95;
let (data_pair_train, data_pair_test) = {
let mid = (data_pair.len() as f64 * TRAIN_DATA_RATIO) as usize;
data_pair.shuffle(&mut thread_rng());
data_pair.split_at_mut(mid)
};
let time = Instant::now();
let mut layer1 = FullyConnectedLayer::new_random_param(input_size, 128, SimHash::new(50, 6, 128, 1, 0.333), Adam::new(0.9, 0.999, 0.001));
eprintln!("construct layer1 in {}ms", time.elapsed().as_millis());
let time = Instant::now();
let mut layer2 = FullyConnectedLayer::new_random_param(128, output_size, SimHash::new(50, 8, 4096, 3, 0.333), Adam::new(0.9, 0.999, 0.001));
eprintln!("construct layer2 in {}ms", time.elapsed().as_millis());
let mut next_rebuild = 49;
let mut rebuild_delta = 50;
let parallel_num = num_cpus::get();
dbg!(parallel_num);
eprintln!("start training");
let time = Instant::now();
let mini_batch_count = (data_pair_train.len() + MINI_BATCH_SIZE - 1) / MINI_BATCH_SIZE;
dbg!(data_pair_train.len());
dbg!(mini_batch_count);
println!("log_type,iteration,time_ms,accuracy,loss");
for e in 0..NUM_ITERATION {
data_pair_train.shuffle(&mut thread_rng());
for i in 0..mini_batch_count {
dbg!(e);
dbg!(i);
let batch_range = i * MINI_BATCH_SIZE..((i + 1) * MINI_BATCH_SIZE).min(data_pair_train.len());
let (sum_of_loss, sum_of_accuracy) = process_mini_batch(&data_pair_train[batch_range.clone()], parallel_num, true, || {
let input = InputBox::new([input_size]);
let mid = layer1.apply_to(input.clone(), ReLU::new());
let output = layer2.apply_to(mid, SoftmaxWithLoss::new());
(input, output)
});
println!(
"train_log,{},{},{},{}",
e * mini_batch_count + i,
time.elapsed().as_millis(),
sum_of_accuracy / batch_range.len() as f64,
sum_of_loss / batch_range.len() as f64,
);
layer1.update_parameter();
layer2.update_parameter();
if e * mini_batch_count + i >= next_rebuild {
layer1.rebuild_hash();
layer2.rebuild_hash();
rebuild_delta = (rebuild_delta as f64 * REBUILD_DELTA_INC) as usize;
next_rebuild += rebuild_delta;
}
}
let (sum_of_loss, sum_of_accuracy) = process_mini_batch(data_pair_test, parallel_num, false, || {
let input = InputBox::new([input_size]);
let mid = layer1.apply_to(input.clone(), ReLU::new());
let output = layer2.apply_unhash_to(mid, SoftmaxWithLoss::new());
(input, output)
});
println!(
"test_log,{},{},{},{}",
(e + 1) * mini_batch_count,
time.elapsed().as_millis(),
sum_of_accuracy / data_pair_test.len() as f64,
sum_of_loss / data_pair_test.len() as f64,
);
}
}
fn process_mini_batch<I:'static + ExactDimensionComputeGraphNode<1, Item = f32>, H: FullyConnectedHasher<f32, f32>>(
data_pair: &[TrainDataPair],
parallel_num: usize,
back_propagate: bool,
construct_layers: impl Sync + Fn() -> (GraphNode<InputBox<f32, 1>, 1>, GraphNode<ApplyFullyConnectedLayer<I, f32, H, SoftmaxWithLoss<f32>, 0>, 0>),
) -> (f64, f64) {
crossbeam::scope(|scope| {
let mut threads = Vec::with_capacity(parallel_num);
for t in 0..parallel_num {
let range = t * data_pair.len() / parallel_num..(t + 1) * data_pair.len() / parallel_num;
threads.push(scope.spawn(|_| {
let (mut input, mut output) = construct_layers();
let mut sum_of_loss = 0f64;
let mut accuracy = 0f64;
for data in &data_pair[range] {
let TrainDataPair {
input: input_value,
output: output_value,
} = &data;
assert_eq!(output_value.value_count(), 1);
input.set_value(input_value.clone().into());
output.set_expect_output(output_value.clone());
let output_loss = output.get_output_value();
let output_without_loss = output.get_output_without_loss();
accuracy += match &output_without_loss {
TensorEitherOwned::Dense(tensor) => {
let ([correct_index], _) = output_value.iter().next().unwrap();
let correct = *tensor.get([correct_index]).unwrap();
if tensor.as_all_slice().iter().enumerate().all(|(i, v)| i == correct_index || *v < correct) {
1.
} else {
0.
}
}
TensorEitherOwned::Sparse(tensor) => {
let ([correct_index], _) = output_value.iter().next().unwrap();
let correct = *tensor.get([correct_index]).unwrap();
if tensor.iter().all(|([i], v)| i == correct_index || *v < correct) {
1.
} else {
0.
}
}
};
sum_of_loss += *output_loss.get([]).unwrap() as f64;
if back_propagate {
output.clear_gradient_all();
output.back_propagate_all();
}
}
(sum_of_loss, accuracy)
}));
}
threads.into_iter().fold((0f64, 0f64), |(sum_loss, sum_accuracy), handle| {
let (loss, accuracy) = handle.join().unwrap();
(sum_loss + loss, sum_accuracy + accuracy)
})
})
.expect("failed to use thread")
}
struct TrainData {
input_size: usize,
output_size: usize,
data_pair: Vec<TrainDataPair>,
}
struct TrainDataPair {
input: Sparse<f32, 1>,
output: Sparse<f32, 1>,
}
impl TrainDataPair {
fn new(input: Sparse<f32, 1>, output: Sparse<f32, 1>) -> Self {
TrainDataPair { input, output }
}
}
fn | (labels: impl AsRef<Path>, features: impl AsRef<Path>) -> TrainData {
let (output_size, labels) = read_file_as_tensors(labels);
let (input_size, features) = read_file_as_tensors(features);
let data_pair = labels
.into_par_iter()
.zip_eq(features)
.filter(|(output, _)| output.value_count() == 1)
.map(|(output, input)| TrainDataPair::new(input, output))
.collect();
TrainData { input_size, output_size, data_pair }
}
fn read_file_as_tensors(path: impl AsRef<Path>) -> (usize, Vec<Sparse<f32, 1>>) {
let path = path.as_ref();
let failed_read = &format!("failed to read file {}", path.display());
let invalid_format = &format!("invalid file format {}", path.display());
let file = File::open(path).expect("failed to open feature file");
let file = BufReader::new(file);
let mut file = file.lines();
let [len, tensor_width]: [usize; 2] = file
.next()
.expect(failed_read)
.expect(failed_read)
.trim()
.split_whitespace()
.map(|s| s.parse().expect(invalid_format))
.collect::<Vec<_>>()
.try_into()
.expect(invalid_format);
let tensor_list = file
.map(|s| {
let s = s.expect(failed_read);
let mut input = Sparse::new([tensor_width]);
s.trim().split_whitespace().for_each(|s| {
let [index, value]: [_; 2] = s.trim().split(':').collect::<Vec<_>>().try_into().expect(invalid_format);
let index = index.parse().expect(invalid_format);
assert!(index < tensor_width, "{}", invalid_format);
input.set([index], value.parse().expect(invalid_format));
});
input
})
.collect::<Vec<_>>();
assert_eq!(tensor_list.len(), len, "{}", invalid_format);
(tensor_width, tensor_list)
}
| read_train_data | identifier_name |
main.rs | use std::convert::TryInto;
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::path::Path;
use std::time::Instant;
use clap::{App, Arg};
use rand::prelude::SliceRandom;
use rand::thread_rng;
use rayon::iter::IndexedParallelIterator;
use rayon::iter::IntoParallelIterator;
use rayon::iter::ParallelIterator;
use selecting_flow::compute_graph::activation::rectified_linear::ReLU;
use selecting_flow::compute_graph::activation::softmax_with_loss::SoftmaxWithLoss;
use selecting_flow::compute_graph::fully_connected_layer::{ApplyFullyConnectedLayer, FullyConnectedLayer};
use selecting_flow::compute_graph::input_box::InputBox;
use selecting_flow::compute_graph::{ExactDimensionComputeGraphNode, GraphNode};
use selecting_flow::data_types::{Sparse, TensorEitherOwned};
use selecting_flow::hasher::sim_hash::SimHash;
use selecting_flow::hasher::FullyConnectedHasher;
use selecting_flow::optimizer::adam::Adam;
fn main() {
let arg = App::new("shield_example")
.arg(Arg::with_name("label").help("path to trn_lbl_mat.txt").long("label").short("l").takes_value(true).required(true))
.arg(Arg::with_name("feature").help("path to trn_ft_mat.txt").long("feature").short("f").takes_value(true).required(true))
.get_matches();
let labels = arg.value_of("label").unwrap();
let features = arg.value_of("feature").unwrap();
eprintln!("boot");
let train_data = read_train_data(labels, features);
eprintln!("load train_data");
train(train_data);
}
fn train(train_data: TrainData) {
let TrainData {
input_size,
output_size,
mut data_pair,
} = train_data;
const NUM_ITERATION: usize = 5;
const MINI_BATCH_SIZE: usize = 256;
const REBUILD_DELTA_INC: f64 = 1.05;
const TRAIN_DATA_RATIO: f64 = 0.95;
let (data_pair_train, data_pair_test) = {
let mid = (data_pair.len() as f64 * TRAIN_DATA_RATIO) as usize;
data_pair.shuffle(&mut thread_rng());
data_pair.split_at_mut(mid)
};
let time = Instant::now();
let mut layer1 = FullyConnectedLayer::new_random_param(input_size, 128, SimHash::new(50, 6, 128, 1, 0.333), Adam::new(0.9, 0.999, 0.001));
eprintln!("construct layer1 in {}ms", time.elapsed().as_millis());
let time = Instant::now();
let mut layer2 = FullyConnectedLayer::new_random_param(128, output_size, SimHash::new(50, 8, 4096, 3, 0.333), Adam::new(0.9, 0.999, 0.001));
eprintln!("construct layer2 in {}ms", time.elapsed().as_millis());
let mut next_rebuild = 49;
let mut rebuild_delta = 50;
let parallel_num = num_cpus::get();
dbg!(parallel_num);
eprintln!("start training");
let time = Instant::now();
let mini_batch_count = (data_pair_train.len() + MINI_BATCH_SIZE - 1) / MINI_BATCH_SIZE;
dbg!(data_pair_train.len());
dbg!(mini_batch_count);
println!("log_type,iteration,time_ms,accuracy,loss");
for e in 0..NUM_ITERATION {
data_pair_train.shuffle(&mut thread_rng());
for i in 0..mini_batch_count {
dbg!(e);
dbg!(i);
let batch_range = i * MINI_BATCH_SIZE..((i + 1) * MINI_BATCH_SIZE).min(data_pair_train.len());
let (sum_of_loss, sum_of_accuracy) = process_mini_batch(&data_pair_train[batch_range.clone()], parallel_num, true, || {
let input = InputBox::new([input_size]);
let mid = layer1.apply_to(input.clone(), ReLU::new());
let output = layer2.apply_to(mid, SoftmaxWithLoss::new());
(input, output)
});
println!(
"train_log,{},{},{},{}",
e * mini_batch_count + i,
time.elapsed().as_millis(),
sum_of_accuracy / batch_range.len() as f64,
sum_of_loss / batch_range.len() as f64,
);
layer1.update_parameter();
layer2.update_parameter();
if e * mini_batch_count + i >= next_rebuild {
layer1.rebuild_hash();
layer2.rebuild_hash();
rebuild_delta = (rebuild_delta as f64 * REBUILD_DELTA_INC) as usize;
next_rebuild += rebuild_delta;
}
}
let (sum_of_loss, sum_of_accuracy) = process_mini_batch(data_pair_test, parallel_num, false, || {
let input = InputBox::new([input_size]);
let mid = layer1.apply_to(input.clone(), ReLU::new());
let output = layer2.apply_unhash_to(mid, SoftmaxWithLoss::new());
(input, output)
});
println!(
"test_log,{},{},{},{}",
(e + 1) * mini_batch_count,
time.elapsed().as_millis(),
sum_of_accuracy / data_pair_test.len() as f64,
sum_of_loss / data_pair_test.len() as f64,
);
}
}
fn process_mini_batch<I:'static + ExactDimensionComputeGraphNode<1, Item = f32>, H: FullyConnectedHasher<f32, f32>>(
data_pair: &[TrainDataPair],
parallel_num: usize,
back_propagate: bool,
construct_layers: impl Sync + Fn() -> (GraphNode<InputBox<f32, 1>, 1>, GraphNode<ApplyFullyConnectedLayer<I, f32, H, SoftmaxWithLoss<f32>, 0>, 0>),
) -> (f64, f64) {
crossbeam::scope(|scope| {
let mut threads = Vec::with_capacity(parallel_num);
for t in 0..parallel_num {
let range = t * data_pair.len() / parallel_num..(t + 1) * data_pair.len() / parallel_num;
threads.push(scope.spawn(|_| {
let (mut input, mut output) = construct_layers();
let mut sum_of_loss = 0f64; | input: input_value,
output: output_value,
} = &data;
assert_eq!(output_value.value_count(), 1);
input.set_value(input_value.clone().into());
output.set_expect_output(output_value.clone());
let output_loss = output.get_output_value();
let output_without_loss = output.get_output_without_loss();
accuracy += match &output_without_loss {
TensorEitherOwned::Dense(tensor) => {
let ([correct_index], _) = output_value.iter().next().unwrap();
let correct = *tensor.get([correct_index]).unwrap();
if tensor.as_all_slice().iter().enumerate().all(|(i, v)| i == correct_index || *v < correct) {
1.
} else {
0.
}
}
TensorEitherOwned::Sparse(tensor) => {
let ([correct_index], _) = output_value.iter().next().unwrap();
let correct = *tensor.get([correct_index]).unwrap();
if tensor.iter().all(|([i], v)| i == correct_index || *v < correct) {
1.
} else {
0.
}
}
};
sum_of_loss += *output_loss.get([]).unwrap() as f64;
if back_propagate {
output.clear_gradient_all();
output.back_propagate_all();
}
}
(sum_of_loss, accuracy)
}));
}
threads.into_iter().fold((0f64, 0f64), |(sum_loss, sum_accuracy), handle| {
let (loss, accuracy) = handle.join().unwrap();
(sum_loss + loss, sum_accuracy + accuracy)
})
})
.expect("failed to use thread")
}
struct TrainData {
input_size: usize,
output_size: usize,
data_pair: Vec<TrainDataPair>,
}
struct TrainDataPair {
input: Sparse<f32, 1>,
output: Sparse<f32, 1>,
}
impl TrainDataPair {
fn new(input: Sparse<f32, 1>, output: Sparse<f32, 1>) -> Self {
TrainDataPair { input, output }
}
}
fn read_train_data(labels: impl AsRef<Path>, features: impl AsRef<Path>) -> TrainData {
let (output_size, labels) = read_file_as_tensors(labels);
let (input_size, features) = read_file_as_tensors(features);
let data_pair = labels
.into_par_iter()
.zip_eq(features)
.filter(|(output, _)| output.value_count() == 1)
.map(|(output, input)| TrainDataPair::new(input, output))
.collect();
TrainData { input_size, output_size, data_pair }
}
fn read_file_as_tensors(path: impl AsRef<Path>) -> (usize, Vec<Sparse<f32, 1>>) {
let path = path.as_ref();
let failed_read = &format!("failed to read file {}", path.display());
let invalid_format = &format!("invalid file format {}", path.display());
let file = File::open(path).expect("failed to open feature file");
let file = BufReader::new(file);
let mut file = file.lines();
let [len, tensor_width]: [usize; 2] = file
.next()
.expect(failed_read)
.expect(failed_read)
.trim()
.split_whitespace()
.map(|s| s.parse().expect(invalid_format))
.collect::<Vec<_>>()
.try_into()
.expect(invalid_format);
let tensor_list = file
.map(|s| {
let s = s.expect(failed_read);
let mut input = Sparse::new([tensor_width]);
s.trim().split_whitespace().for_each(|s| {
let [index, value]: [_; 2] = s.trim().split(':').collect::<Vec<_>>().try_into().expect(invalid_format);
let index = index.parse().expect(invalid_format);
assert!(index < tensor_width, "{}", invalid_format);
input.set([index], value.parse().expect(invalid_format));
});
input
})
.collect::<Vec<_>>();
assert_eq!(tensor_list.len(), len, "{}", invalid_format);
(tensor_width, tensor_list)
} | let mut accuracy = 0f64;
for data in &data_pair[range] {
let TrainDataPair { | random_line_split |
main.rs | use std::convert::TryInto;
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::path::Path;
use std::time::Instant;
use clap::{App, Arg};
use rand::prelude::SliceRandom;
use rand::thread_rng;
use rayon::iter::IndexedParallelIterator;
use rayon::iter::IntoParallelIterator;
use rayon::iter::ParallelIterator;
use selecting_flow::compute_graph::activation::rectified_linear::ReLU;
use selecting_flow::compute_graph::activation::softmax_with_loss::SoftmaxWithLoss;
use selecting_flow::compute_graph::fully_connected_layer::{ApplyFullyConnectedLayer, FullyConnectedLayer};
use selecting_flow::compute_graph::input_box::InputBox;
use selecting_flow::compute_graph::{ExactDimensionComputeGraphNode, GraphNode};
use selecting_flow::data_types::{Sparse, TensorEitherOwned};
use selecting_flow::hasher::sim_hash::SimHash;
use selecting_flow::hasher::FullyConnectedHasher;
use selecting_flow::optimizer::adam::Adam;
fn main() {
let arg = App::new("shield_example")
.arg(Arg::with_name("label").help("path to trn_lbl_mat.txt").long("label").short("l").takes_value(true).required(true))
.arg(Arg::with_name("feature").help("path to trn_ft_mat.txt").long("feature").short("f").takes_value(true).required(true))
.get_matches();
let labels = arg.value_of("label").unwrap();
let features = arg.value_of("feature").unwrap();
eprintln!("boot");
let train_data = read_train_data(labels, features);
eprintln!("load train_data");
train(train_data);
}
fn train(train_data: TrainData) {
let TrainData {
input_size,
output_size,
mut data_pair,
} = train_data;
const NUM_ITERATION: usize = 5;
const MINI_BATCH_SIZE: usize = 256;
const REBUILD_DELTA_INC: f64 = 1.05;
const TRAIN_DATA_RATIO: f64 = 0.95;
let (data_pair_train, data_pair_test) = {
let mid = (data_pair.len() as f64 * TRAIN_DATA_RATIO) as usize;
data_pair.shuffle(&mut thread_rng());
data_pair.split_at_mut(mid)
};
let time = Instant::now();
let mut layer1 = FullyConnectedLayer::new_random_param(input_size, 128, SimHash::new(50, 6, 128, 1, 0.333), Adam::new(0.9, 0.999, 0.001));
eprintln!("construct layer1 in {}ms", time.elapsed().as_millis());
let time = Instant::now();
let mut layer2 = FullyConnectedLayer::new_random_param(128, output_size, SimHash::new(50, 8, 4096, 3, 0.333), Adam::new(0.9, 0.999, 0.001));
eprintln!("construct layer2 in {}ms", time.elapsed().as_millis());
let mut next_rebuild = 49;
let mut rebuild_delta = 50;
let parallel_num = num_cpus::get();
dbg!(parallel_num);
eprintln!("start training");
let time = Instant::now();
let mini_batch_count = (data_pair_train.len() + MINI_BATCH_SIZE - 1) / MINI_BATCH_SIZE;
dbg!(data_pair_train.len());
dbg!(mini_batch_count);
println!("log_type,iteration,time_ms,accuracy,loss");
for e in 0..NUM_ITERATION {
data_pair_train.shuffle(&mut thread_rng());
for i in 0..mini_batch_count {
dbg!(e);
dbg!(i);
let batch_range = i * MINI_BATCH_SIZE..((i + 1) * MINI_BATCH_SIZE).min(data_pair_train.len());
let (sum_of_loss, sum_of_accuracy) = process_mini_batch(&data_pair_train[batch_range.clone()], parallel_num, true, || {
let input = InputBox::new([input_size]);
let mid = layer1.apply_to(input.clone(), ReLU::new());
let output = layer2.apply_to(mid, SoftmaxWithLoss::new());
(input, output)
});
println!(
"train_log,{},{},{},{}",
e * mini_batch_count + i,
time.elapsed().as_millis(),
sum_of_accuracy / batch_range.len() as f64,
sum_of_loss / batch_range.len() as f64,
);
layer1.update_parameter();
layer2.update_parameter();
if e * mini_batch_count + i >= next_rebuild {
layer1.rebuild_hash();
layer2.rebuild_hash();
rebuild_delta = (rebuild_delta as f64 * REBUILD_DELTA_INC) as usize;
next_rebuild += rebuild_delta;
}
}
let (sum_of_loss, sum_of_accuracy) = process_mini_batch(data_pair_test, parallel_num, false, || {
let input = InputBox::new([input_size]);
let mid = layer1.apply_to(input.clone(), ReLU::new());
let output = layer2.apply_unhash_to(mid, SoftmaxWithLoss::new());
(input, output)
});
println!(
"test_log,{},{},{},{}",
(e + 1) * mini_batch_count,
time.elapsed().as_millis(),
sum_of_accuracy / data_pair_test.len() as f64,
sum_of_loss / data_pair_test.len() as f64,
);
}
}
fn process_mini_batch<I:'static + ExactDimensionComputeGraphNode<1, Item = f32>, H: FullyConnectedHasher<f32, f32>>(
data_pair: &[TrainDataPair],
parallel_num: usize,
back_propagate: bool,
construct_layers: impl Sync + Fn() -> (GraphNode<InputBox<f32, 1>, 1>, GraphNode<ApplyFullyConnectedLayer<I, f32, H, SoftmaxWithLoss<f32>, 0>, 0>),
) -> (f64, f64) {
crossbeam::scope(|scope| {
let mut threads = Vec::with_capacity(parallel_num);
for t in 0..parallel_num {
let range = t * data_pair.len() / parallel_num..(t + 1) * data_pair.len() / parallel_num;
threads.push(scope.spawn(|_| {
let (mut input, mut output) = construct_layers();
let mut sum_of_loss = 0f64;
let mut accuracy = 0f64;
for data in &data_pair[range] {
let TrainDataPair {
input: input_value,
output: output_value,
} = &data;
assert_eq!(output_value.value_count(), 1);
input.set_value(input_value.clone().into());
output.set_expect_output(output_value.clone());
let output_loss = output.get_output_value();
let output_without_loss = output.get_output_without_loss();
accuracy += match &output_without_loss {
TensorEitherOwned::Dense(tensor) => {
let ([correct_index], _) = output_value.iter().next().unwrap();
let correct = *tensor.get([correct_index]).unwrap();
if tensor.as_all_slice().iter().enumerate().all(|(i, v)| i == correct_index || *v < correct) {
1.
} else {
0.
}
}
TensorEitherOwned::Sparse(tensor) => {
let ([correct_index], _) = output_value.iter().next().unwrap();
let correct = *tensor.get([correct_index]).unwrap();
if tensor.iter().all(|([i], v)| i == correct_index || *v < correct) | else {
0.
}
}
};
sum_of_loss += *output_loss.get([]).unwrap() as f64;
if back_propagate {
output.clear_gradient_all();
output.back_propagate_all();
}
}
(sum_of_loss, accuracy)
}));
}
threads.into_iter().fold((0f64, 0f64), |(sum_loss, sum_accuracy), handle| {
let (loss, accuracy) = handle.join().unwrap();
(sum_loss + loss, sum_accuracy + accuracy)
})
})
.expect("failed to use thread")
}
struct TrainData {
input_size: usize,
output_size: usize,
data_pair: Vec<TrainDataPair>,
}
struct TrainDataPair {
input: Sparse<f32, 1>,
output: Sparse<f32, 1>,
}
impl TrainDataPair {
fn new(input: Sparse<f32, 1>, output: Sparse<f32, 1>) -> Self {
TrainDataPair { input, output }
}
}
fn read_train_data(labels: impl AsRef<Path>, features: impl AsRef<Path>) -> TrainData {
let (output_size, labels) = read_file_as_tensors(labels);
let (input_size, features) = read_file_as_tensors(features);
let data_pair = labels
.into_par_iter()
.zip_eq(features)
.filter(|(output, _)| output.value_count() == 1)
.map(|(output, input)| TrainDataPair::new(input, output))
.collect();
TrainData { input_size, output_size, data_pair }
}
fn read_file_as_tensors(path: impl AsRef<Path>) -> (usize, Vec<Sparse<f32, 1>>) {
let path = path.as_ref();
let failed_read = &format!("failed to read file {}", path.display());
let invalid_format = &format!("invalid file format {}", path.display());
let file = File::open(path).expect("failed to open feature file");
let file = BufReader::new(file);
let mut file = file.lines();
let [len, tensor_width]: [usize; 2] = file
.next()
.expect(failed_read)
.expect(failed_read)
.trim()
.split_whitespace()
.map(|s| s.parse().expect(invalid_format))
.collect::<Vec<_>>()
.try_into()
.expect(invalid_format);
let tensor_list = file
.map(|s| {
let s = s.expect(failed_read);
let mut input = Sparse::new([tensor_width]);
s.trim().split_whitespace().for_each(|s| {
let [index, value]: [_; 2] = s.trim().split(':').collect::<Vec<_>>().try_into().expect(invalid_format);
let index = index.parse().expect(invalid_format);
assert!(index < tensor_width, "{}", invalid_format);
input.set([index], value.parse().expect(invalid_format));
});
input
})
.collect::<Vec<_>>();
assert_eq!(tensor_list.len(), len, "{}", invalid_format);
(tensor_width, tensor_list)
}
| {
1.
} | conditional_block |
shader.rs | #![allow(dead_code)]
use std;
use math::*;
use rendering::gl;
use std::fmt::Write;
#[derive(Copy, Clone, Debug)]
pub struct Shader {
pub gl_handle: u32,
pub proj_loc: i32,
pub view_loc: i32,
}
impl Shader {
pub fn new(vertex_shader_src: &str, fragment_shader_src: &str) -> Result<Shader, String> {
use std::ffi::{CStr, CString};
unsafe {
let (vs,fs) = (gl::CreateShader(gl::VERTEX_SHADER), gl::CreateShader(gl::FRAGMENT_SHADER));
let program = gl::CreateProgram();
for &(sh, src) in [(vs, vertex_shader_src), (fs, fragment_shader_src)].iter() {
let src = CString::new(src).unwrap();
gl::ShaderSource(sh, 1, &src.as_ptr(), std::ptr::null());
gl::CompileShader(sh);
let mut status = 0i32;
gl::GetShaderiv(sh, gl::COMPILE_STATUS, &mut status);
if status == 0 {
let mut buf = [0u8; 1024];
let mut len = 0;
gl::GetShaderInfoLog(sh, buf.len() as _, &mut len, buf.as_mut_ptr() as _);
return Err(CStr::from_bytes_with_nul_unchecked(&buf[..len as usize]).to_string_lossy().into());
}
gl::AttachShader(program, sh);
}
// TODO: Automate the everloving shit out of this please
gl::BindAttribLocation(program, 0, b"position\0".as_ptr() as _);
gl::LinkProgram(program);
let mut status = 0i32;
gl::GetProgramiv(program, gl::LINK_STATUS, &mut status);
if status == 0 {
let mut buf = [0u8; 1024];
let mut len = 0;
gl::GetProgramInfoLog(program, buf.len() as _, &mut len, buf.as_mut_ptr() as _);
return Err(CStr::from_bytes_with_nul_unchecked(&buf[..len as usize]).to_string_lossy().into());
}
gl::DeleteShader(vs);
gl::DeleteShader(fs);
Ok(Shader {
gl_handle: program,
proj_loc: gl::GetUniformLocation(program, b"u_proj\0".as_ptr() as _),
view_loc: gl::GetUniformLocation(program, b"u_view\0".as_ptr() as _),
})
}
}
pub const fn invalid() -> Shader {
Shader {
gl_handle: 0,
proj_loc: 0,
view_loc: 0,
}
}
fn get_currently_bound_raw() -> u32 {
unsafe {
let mut handle = 0;
gl::GetIntegerv(gl::CURRENT_PROGRAM, &mut handle);
handle as u32
}
}
pub fn use_program(&self) {
unsafe {
gl::UseProgram(self.gl_handle);
}
}
pub fn is_bound(&self) -> bool {
self.gl_handle == Shader::get_currently_bound_raw()
}
pub fn get_uniform_loc(&self, uniform: &str) -> i32 {
use std::ffi::CString;
unsafe {
let cstr = CString::new(uniform).unwrap();
gl::GetUniformLocation(self.gl_handle, cstr.as_ptr())
}
}
pub fn set_uniform_vec2(&self, uniform: &str, v: Vec2) {
assert!(self.is_bound(), "Tried to set uniform '{}' on unbound shader", uniform);
unsafe {
gl::Uniform2f(self.get_uniform_loc(&uniform), v.x, v.y);
}
}
pub fn set_uniform_vec3<V>(&self, uniform: &str, v: V) where V: Into<Vec3> {
assert!(self.is_bound(), "Tried to set uniform '{}' on unbound shader", uniform);
unsafe {
let v = v.into();
gl::Uniform3f(self.get_uniform_loc(&uniform), v.x, v.y, v.z);
}
}
pub fn set_uniform_vec4<V>(&self, uniform: &str, v: V) where V: Into<Vec4> {
assert!(self.is_bound(), "Tried to set uniform '{}' on unbound shader", uniform);
unsafe {
let v = v.into();
gl::Uniform4f(self.get_uniform_loc(&uniform), v.x, v.y, v.z, v.w);
}
}
pub fn set_uniform_i32(&self, uniform: &str, v: i32) {
assert!(self.is_bound(), "Tried to set uniform '{}' on unbound shader", uniform);
unsafe {
gl::Uniform1i(self.get_uniform_loc(&uniform), v);
}
}
pub fn set_uniform_f32(&self, uniform: &str, v: f32) {
assert!(self.is_bound(), "Tried to set uniform '{}' on unbound shader", uniform);
unsafe {
gl::Uniform1f(self.get_uniform_loc(&uniform), v);
}
}
pub fn set_uniform_mat_raw(&self, uniform: i32, mat: &Mat4) {
assert!(self.is_bound(), "Tried to set uniform on unbound shader");
unsafe {
gl::UniformMatrix4fv(uniform, 1, 0, mat.transpose().rows.as_ptr() as *const f32);
}
}
pub fn set_uniform_mat(&self, uniform: &str, mat: &Mat4) {
assert!(self.is_bound(), "Tried to set uniform '{}' on unbound shader", uniform);
self.set_uniform_mat_raw(self.get_uniform_loc(&uniform), &mat);
}
pub fn set_proj(&self, mat: &Mat4) {
assert!(self.is_bound(), "Tried to set uniform 'u_proj' on unbound shader");
self.set_uniform_mat_raw(self.proj_loc, &mat);
}
pub fn set_view(&self, mat: &Mat4) {
assert!(self.is_bound(), "Tried to set uniform 'u_view' on unbound shader");
self.set_uniform_mat_raw(self.view_loc, &mat);
}
}
pub struct ShaderBuilder {
attributes: Vec<String>,
varyings: Vec<String>,
uniforms: Vec<String>,
vertex_body: String,
fragment_body: String,
use_3d: bool,
use_proj: bool,
use_view: bool,
use_highp: bool,
}
impl ShaderBuilder {
pub fn new() -> Self {
ShaderBuilder {
attributes: Vec::new(),
varyings: Vec::new(),
uniforms: Vec::new(),
vertex_body: String::new(),
fragment_body: String::new(),
use_3d: false,
use_proj: false,
use_view: false,
use_highp: false,
}
}
pub fn use_3d(mut self) -> Self { self.use_3d = true; self }
pub fn use_proj(mut self) -> Self { self.use_proj = true; self.uniform("proj", "mat4") }
pub fn use_view(mut self) -> Self { self.use_view = true; self.uniform("view", "mat4") }
pub fn use_highp(mut self) -> Self { self.use_highp = true; self }
pub fn vertex(mut self, data: &str) -> Self {
write!(&mut self.vertex_body, "{};\n", data).unwrap(); self
}
pub fn fragment(mut self, data: &str) -> Self {
write!(&mut self.fragment_body, "{};\n", data).unwrap(); self
}
pub fn uniform(mut self, name: &str, ty: &str) -> Self {
self.uniforms.push(format!("{} u_{}", ty, name)); self
}
pub fn attribute(mut self, name: &str, ty: &str) -> Self {
if name == "position" {
println!("Tried to overwrite 'position' attribute while building shader - ignoring");
return self
}
self.attributes.push(format!("{} {}", ty, name)); self
}
pub fn varying(mut self, name: &str, ty: &str) -> Self {
self.varyings.push(format!("{} v_{}", ty, name)); self
}
pub fn frag_attribute(mut self, name: &str, ty: &str) -> Self {
self.attributes.push(format!("{} {}", ty, name));
self.varyings.push(format!("{} v_{}", ty, name));
write!(&mut self.vertex_body, "v_{} = {};\n", name, name).unwrap();
self
}
pub fn output(mut self, expr: &str) -> Self {
write!(&mut self.fragment_body, "gl_FragColor = {};\n", expr).unwrap();
self
}
pub fn finalize_source(mut self) -> (String, String) {
let mut varyings_and_uniforms = String::new();
for v in self.varyings.iter() { write!(&mut varyings_and_uniforms, "varying {};\n", v).unwrap(); }
for u in self.uniforms.iter() { write!(&mut varyings_and_uniforms, "uniform {};\n", u).unwrap(); }
let mut vert_src = String::new();
let mut frag_src = String::new();
let precision = if self.use_highp { "precision highp float;" } else { "precision mediump float;" };
write!(&mut vert_src, "{}\n", precision).unwrap();
write!(&mut frag_src, "{}\n", precision).unwrap();
let position_attr_ty = if self.use_3d { "vec3" } else { "vec2" };
write!(&mut vert_src, "attribute {} position;\n", position_attr_ty).unwrap();
for a in self.attributes.iter() { write!(&mut vert_src, "attribute {};\n", a).unwrap(); }
let mut gl_position = String::from("gl_Position = ");
if self.use_proj { gl_position.push_str("u_proj * "); }
if self.use_view { gl_position.push_str("u_view * "); }
if self.use_3d {
gl_position.push_str("vec4(position, 1.0);\n");
} else {
gl_position.push_str("vec4(position, 0.0, 1.0);\n");
}
self.vertex_body = format!("{}{}", gl_position, self.vertex_body);
let mut bodies = [&mut self.vertex_body, &mut self.fragment_body];
for (sh, body) in [&mut vert_src, &mut frag_src].iter_mut().zip(bodies.iter_mut()) {
write!(sh, "\n{}\n", varyings_and_uniforms).unwrap();
let mut position = 0;
while let Some(start) = body[position..].find("func ") {
let length = body[start..].chars()
.scan((false, 0), |acc, c| {
let (body, nesting) = *acc;
*acc = match (body, nesting, c) {
(false, _, '}') => return None,
(true, 1, '}') => return None,
(false, 0, '{') => (true, 1),
(true, x, '{') => (true, x+1),
(true, x, '}') => (true, x-1),
_ => *acc,
};
Some(*acc)
})
.count();
let start = start + position;
let end = start + length + 1;
write!(sh, "{}\n", &body[start+5..end]).unwrap();
body.replace_range(start..end, "");
position = start;
}
write!(sh, "void main() {{\n{}}}\n", body).unwrap();
}
(vert_src, frag_src)
}
pub fn finalize(self) -> Result<Shader, String> {
use std::ffi::CString;
let attributes = self.attributes.iter()
.map(|a| CString::new(a.split(' ').nth(1).unwrap()).unwrap()) | let (v,f) = self.finalize_source();
let mut s = Shader::new(&v, &f)?;
for (idx, attrib_name) in attributes.iter().enumerate() {
unsafe {
gl::BindAttribLocation(s.gl_handle, 1 + idx as u32, attrib_name.as_ptr());
}
}
unsafe {
gl::BindAttribLocation(s.gl_handle, 0, b"position\0".as_ptr() as _);
gl::LinkProgram(s.gl_handle);
s.proj_loc = gl::GetUniformLocation(s.gl_handle, b"u_proj\0".as_ptr() as _);
s.view_loc = gl::GetUniformLocation(s.gl_handle, b"u_view\0".as_ptr() as _);
}
Ok(s)
}
}
#[cfg(test)] mod tests {
#[test]
fn shader_builder() {
let (vsh, fsh) = ::ShaderBuilder::new()
.uniform("tex", "sampler2D")
.attribute("some_random_attribute", "vec4")
.frag_attribute("color", "vec3")
.frag_attribute("uv", "vec2")
.use_proj().use_view()
.fragment("
func vec3 function_test(vec3 c) {
return vec3(1.0) - c;
}
func vec3 function_test_2(float c) {
if (c < 0.5) {
return vec3(c);
} else {
return vec3(1.0 - c);
}
}
vec3 color = function_test(v_color);
color.g = texture2D(u_tex, v_uv).r")
.output("vec4(color, 1.0)")
.finalize_source();
println!("vert source\n==========\n{}\n", vsh);
println!("frag source\n==========\n{}", fsh);
let (vsh, fsh) = ::ShaderBuilder::new()
.use_3d()
.output("vec4(1.0)")
.finalize_source();
println!("vert source\n==========\n{}\n", vsh);
println!("frag source\n==========\n{}", fsh);
}
} | .collect::<Vec<_>>();
| random_line_split |
shader.rs | #![allow(dead_code)]
use std;
use math::*;
use rendering::gl;
use std::fmt::Write;
#[derive(Copy, Clone, Debug)]
pub struct Shader {
pub gl_handle: u32,
pub proj_loc: i32,
pub view_loc: i32,
}
impl Shader {
pub fn new(vertex_shader_src: &str, fragment_shader_src: &str) -> Result<Shader, String> {
use std::ffi::{CStr, CString};
unsafe {
let (vs,fs) = (gl::CreateShader(gl::VERTEX_SHADER), gl::CreateShader(gl::FRAGMENT_SHADER));
let program = gl::CreateProgram();
for &(sh, src) in [(vs, vertex_shader_src), (fs, fragment_shader_src)].iter() {
let src = CString::new(src).unwrap();
gl::ShaderSource(sh, 1, &src.as_ptr(), std::ptr::null());
gl::CompileShader(sh);
let mut status = 0i32;
gl::GetShaderiv(sh, gl::COMPILE_STATUS, &mut status);
if status == 0 {
let mut buf = [0u8; 1024];
let mut len = 0;
gl::GetShaderInfoLog(sh, buf.len() as _, &mut len, buf.as_mut_ptr() as _);
return Err(CStr::from_bytes_with_nul_unchecked(&buf[..len as usize]).to_string_lossy().into());
}
gl::AttachShader(program, sh);
}
// TODO: Automate the everloving shit out of this please
gl::BindAttribLocation(program, 0, b"position\0".as_ptr() as _);
gl::LinkProgram(program);
let mut status = 0i32;
gl::GetProgramiv(program, gl::LINK_STATUS, &mut status);
if status == 0 {
let mut buf = [0u8; 1024];
let mut len = 0;
gl::GetProgramInfoLog(program, buf.len() as _, &mut len, buf.as_mut_ptr() as _);
return Err(CStr::from_bytes_with_nul_unchecked(&buf[..len as usize]).to_string_lossy().into());
}
gl::DeleteShader(vs);
gl::DeleteShader(fs);
Ok(Shader {
gl_handle: program,
proj_loc: gl::GetUniformLocation(program, b"u_proj\0".as_ptr() as _),
view_loc: gl::GetUniformLocation(program, b"u_view\0".as_ptr() as _),
})
}
}
pub const fn invalid() -> Shader {
Shader {
gl_handle: 0,
proj_loc: 0,
view_loc: 0,
}
}
fn get_currently_bound_raw() -> u32 {
unsafe {
let mut handle = 0;
gl::GetIntegerv(gl::CURRENT_PROGRAM, &mut handle);
handle as u32
}
}
pub fn use_program(&self) {
unsafe {
gl::UseProgram(self.gl_handle);
}
}
pub fn is_bound(&self) -> bool {
self.gl_handle == Shader::get_currently_bound_raw()
}
pub fn get_uniform_loc(&self, uniform: &str) -> i32 {
use std::ffi::CString;
unsafe {
let cstr = CString::new(uniform).unwrap();
gl::GetUniformLocation(self.gl_handle, cstr.as_ptr())
}
}
pub fn set_uniform_vec2(&self, uniform: &str, v: Vec2) {
assert!(self.is_bound(), "Tried to set uniform '{}' on unbound shader", uniform);
unsafe {
gl::Uniform2f(self.get_uniform_loc(&uniform), v.x, v.y);
}
}
pub fn set_uniform_vec3<V>(&self, uniform: &str, v: V) where V: Into<Vec3> {
assert!(self.is_bound(), "Tried to set uniform '{}' on unbound shader", uniform);
unsafe {
let v = v.into();
gl::Uniform3f(self.get_uniform_loc(&uniform), v.x, v.y, v.z);
}
}
pub fn set_uniform_vec4<V>(&self, uniform: &str, v: V) where V: Into<Vec4> {
assert!(self.is_bound(), "Tried to set uniform '{}' on unbound shader", uniform);
unsafe {
let v = v.into();
gl::Uniform4f(self.get_uniform_loc(&uniform), v.x, v.y, v.z, v.w);
}
}
pub fn set_uniform_i32(&self, uniform: &str, v: i32) {
assert!(self.is_bound(), "Tried to set uniform '{}' on unbound shader", uniform);
unsafe {
gl::Uniform1i(self.get_uniform_loc(&uniform), v);
}
}
pub fn set_uniform_f32(&self, uniform: &str, v: f32) {
assert!(self.is_bound(), "Tried to set uniform '{}' on unbound shader", uniform);
unsafe {
gl::Uniform1f(self.get_uniform_loc(&uniform), v);
}
}
pub fn set_uniform_mat_raw(&self, uniform: i32, mat: &Mat4) {
assert!(self.is_bound(), "Tried to set uniform on unbound shader");
unsafe {
gl::UniformMatrix4fv(uniform, 1, 0, mat.transpose().rows.as_ptr() as *const f32);
}
}
pub fn set_uniform_mat(&self, uniform: &str, mat: &Mat4) {
assert!(self.is_bound(), "Tried to set uniform '{}' on unbound shader", uniform);
self.set_uniform_mat_raw(self.get_uniform_loc(&uniform), &mat);
}
pub fn set_proj(&self, mat: &Mat4) {
assert!(self.is_bound(), "Tried to set uniform 'u_proj' on unbound shader");
self.set_uniform_mat_raw(self.proj_loc, &mat);
}
pub fn set_view(&self, mat: &Mat4) {
assert!(self.is_bound(), "Tried to set uniform 'u_view' on unbound shader");
self.set_uniform_mat_raw(self.view_loc, &mat);
}
}
pub struct ShaderBuilder {
attributes: Vec<String>,
varyings: Vec<String>,
uniforms: Vec<String>,
vertex_body: String,
fragment_body: String,
use_3d: bool,
use_proj: bool,
use_view: bool,
use_highp: bool,
}
impl ShaderBuilder {
pub fn new() -> Self {
ShaderBuilder {
attributes: Vec::new(),
varyings: Vec::new(),
uniforms: Vec::new(),
vertex_body: String::new(),
fragment_body: String::new(),
use_3d: false,
use_proj: false,
use_view: false,
use_highp: false,
}
}
pub fn use_3d(mut self) -> Self { self.use_3d = true; self }
pub fn use_proj(mut self) -> Self { self.use_proj = true; self.uniform("proj", "mat4") }
pub fn use_view(mut self) -> Self { self.use_view = true; self.uniform("view", "mat4") }
pub fn use_highp(mut self) -> Self { self.use_highp = true; self }
pub fn vertex(mut self, data: &str) -> Self {
write!(&mut self.vertex_body, "{};\n", data).unwrap(); self
}
pub fn fragment(mut self, data: &str) -> Self {
write!(&mut self.fragment_body, "{};\n", data).unwrap(); self
}
pub fn uniform(mut self, name: &str, ty: &str) -> Self |
pub fn attribute(mut self, name: &str, ty: &str) -> Self {
if name == "position" {
println!("Tried to overwrite 'position' attribute while building shader - ignoring");
return self
}
self.attributes.push(format!("{} {}", ty, name)); self
}
pub fn varying(mut self, name: &str, ty: &str) -> Self {
self.varyings.push(format!("{} v_{}", ty, name)); self
}
pub fn frag_attribute(mut self, name: &str, ty: &str) -> Self {
self.attributes.push(format!("{} {}", ty, name));
self.varyings.push(format!("{} v_{}", ty, name));
write!(&mut self.vertex_body, "v_{} = {};\n", name, name).unwrap();
self
}
pub fn output(mut self, expr: &str) -> Self {
write!(&mut self.fragment_body, "gl_FragColor = {};\n", expr).unwrap();
self
}
pub fn finalize_source(mut self) -> (String, String) {
let mut varyings_and_uniforms = String::new();
for v in self.varyings.iter() { write!(&mut varyings_and_uniforms, "varying {};\n", v).unwrap(); }
for u in self.uniforms.iter() { write!(&mut varyings_and_uniforms, "uniform {};\n", u).unwrap(); }
let mut vert_src = String::new();
let mut frag_src = String::new();
let precision = if self.use_highp { "precision highp float;" } else { "precision mediump float;" };
write!(&mut vert_src, "{}\n", precision).unwrap();
write!(&mut frag_src, "{}\n", precision).unwrap();
let position_attr_ty = if self.use_3d { "vec3" } else { "vec2" };
write!(&mut vert_src, "attribute {} position;\n", position_attr_ty).unwrap();
for a in self.attributes.iter() { write!(&mut vert_src, "attribute {};\n", a).unwrap(); }
let mut gl_position = String::from("gl_Position = ");
if self.use_proj { gl_position.push_str("u_proj * "); }
if self.use_view { gl_position.push_str("u_view * "); }
if self.use_3d {
gl_position.push_str("vec4(position, 1.0);\n");
} else {
gl_position.push_str("vec4(position, 0.0, 1.0);\n");
}
self.vertex_body = format!("{}{}", gl_position, self.vertex_body);
let mut bodies = [&mut self.vertex_body, &mut self.fragment_body];
for (sh, body) in [&mut vert_src, &mut frag_src].iter_mut().zip(bodies.iter_mut()) {
write!(sh, "\n{}\n", varyings_and_uniforms).unwrap();
let mut position = 0;
while let Some(start) = body[position..].find("func ") {
let length = body[start..].chars()
.scan((false, 0), |acc, c| {
let (body, nesting) = *acc;
*acc = match (body, nesting, c) {
(false, _, '}') => return None,
(true, 1, '}') => return None,
(false, 0, '{') => (true, 1),
(true, x, '{') => (true, x+1),
(true, x, '}') => (true, x-1),
_ => *acc,
};
Some(*acc)
})
.count();
let start = start + position;
let end = start + length + 1;
write!(sh, "{}\n", &body[start+5..end]).unwrap();
body.replace_range(start..end, "");
position = start;
}
write!(sh, "void main() {{\n{}}}\n", body).unwrap();
}
(vert_src, frag_src)
}
pub fn finalize(self) -> Result<Shader, String> {
use std::ffi::CString;
let attributes = self.attributes.iter()
.map(|a| CString::new(a.split(' ').nth(1).unwrap()).unwrap())
.collect::<Vec<_>>();
let (v,f) = self.finalize_source();
let mut s = Shader::new(&v, &f)?;
for (idx, attrib_name) in attributes.iter().enumerate() {
unsafe {
gl::BindAttribLocation(s.gl_handle, 1 + idx as u32, attrib_name.as_ptr());
}
}
unsafe {
gl::BindAttribLocation(s.gl_handle, 0, b"position\0".as_ptr() as _);
gl::LinkProgram(s.gl_handle);
s.proj_loc = gl::GetUniformLocation(s.gl_handle, b"u_proj\0".as_ptr() as _);
s.view_loc = gl::GetUniformLocation(s.gl_handle, b"u_view\0".as_ptr() as _);
}
Ok(s)
}
}
#[cfg(test)] mod tests {
#[test]
fn shader_builder() {
let (vsh, fsh) = ::ShaderBuilder::new()
.uniform("tex", "sampler2D")
.attribute("some_random_attribute", "vec4")
.frag_attribute("color", "vec3")
.frag_attribute("uv", "vec2")
.use_proj().use_view()
.fragment("
func vec3 function_test(vec3 c) {
return vec3(1.0) - c;
}
func vec3 function_test_2(float c) {
if (c < 0.5) {
return vec3(c);
} else {
return vec3(1.0 - c);
}
}
vec3 color = function_test(v_color);
color.g = texture2D(u_tex, v_uv).r")
.output("vec4(color, 1.0)")
.finalize_source();
println!("vert source\n==========\n{}\n", vsh);
println!("frag source\n==========\n{}", fsh);
let (vsh, fsh) = ::ShaderBuilder::new()
.use_3d()
.output("vec4(1.0)")
.finalize_source();
println!("vert source\n==========\n{}\n", vsh);
println!("frag source\n==========\n{}", fsh);
}
} | {
self.uniforms.push(format!("{} u_{}", ty, name)); self
} | identifier_body |
shader.rs | #![allow(dead_code)]
use std;
use math::*;
use rendering::gl;
use std::fmt::Write;
#[derive(Copy, Clone, Debug)]
pub struct Shader {
pub gl_handle: u32,
pub proj_loc: i32,
pub view_loc: i32,
}
impl Shader {
pub fn new(vertex_shader_src: &str, fragment_shader_src: &str) -> Result<Shader, String> {
use std::ffi::{CStr, CString};
unsafe {
let (vs,fs) = (gl::CreateShader(gl::VERTEX_SHADER), gl::CreateShader(gl::FRAGMENT_SHADER));
let program = gl::CreateProgram();
for &(sh, src) in [(vs, vertex_shader_src), (fs, fragment_shader_src)].iter() {
let src = CString::new(src).unwrap();
gl::ShaderSource(sh, 1, &src.as_ptr(), std::ptr::null());
gl::CompileShader(sh);
let mut status = 0i32;
gl::GetShaderiv(sh, gl::COMPILE_STATUS, &mut status);
if status == 0 {
let mut buf = [0u8; 1024];
let mut len = 0;
gl::GetShaderInfoLog(sh, buf.len() as _, &mut len, buf.as_mut_ptr() as _);
return Err(CStr::from_bytes_with_nul_unchecked(&buf[..len as usize]).to_string_lossy().into());
}
gl::AttachShader(program, sh);
}
// TODO: Automate the everloving shit out of this please
gl::BindAttribLocation(program, 0, b"position\0".as_ptr() as _);
gl::LinkProgram(program);
let mut status = 0i32;
gl::GetProgramiv(program, gl::LINK_STATUS, &mut status);
if status == 0 {
let mut buf = [0u8; 1024];
let mut len = 0;
gl::GetProgramInfoLog(program, buf.len() as _, &mut len, buf.as_mut_ptr() as _);
return Err(CStr::from_bytes_with_nul_unchecked(&buf[..len as usize]).to_string_lossy().into());
}
gl::DeleteShader(vs);
gl::DeleteShader(fs);
Ok(Shader {
gl_handle: program,
proj_loc: gl::GetUniformLocation(program, b"u_proj\0".as_ptr() as _),
view_loc: gl::GetUniformLocation(program, b"u_view\0".as_ptr() as _),
})
}
}
pub const fn invalid() -> Shader {
Shader {
gl_handle: 0,
proj_loc: 0,
view_loc: 0,
}
}
fn get_currently_bound_raw() -> u32 {
unsafe {
let mut handle = 0;
gl::GetIntegerv(gl::CURRENT_PROGRAM, &mut handle);
handle as u32
}
}
pub fn use_program(&self) {
unsafe {
gl::UseProgram(self.gl_handle);
}
}
pub fn is_bound(&self) -> bool {
self.gl_handle == Shader::get_currently_bound_raw()
}
pub fn get_uniform_loc(&self, uniform: &str) -> i32 {
use std::ffi::CString;
unsafe {
let cstr = CString::new(uniform).unwrap();
gl::GetUniformLocation(self.gl_handle, cstr.as_ptr())
}
}
pub fn set_uniform_vec2(&self, uniform: &str, v: Vec2) {
assert!(self.is_bound(), "Tried to set uniform '{}' on unbound shader", uniform);
unsafe {
gl::Uniform2f(self.get_uniform_loc(&uniform), v.x, v.y);
}
}
pub fn | <V>(&self, uniform: &str, v: V) where V: Into<Vec3> {
assert!(self.is_bound(), "Tried to set uniform '{}' on unbound shader", uniform);
unsafe {
let v = v.into();
gl::Uniform3f(self.get_uniform_loc(&uniform), v.x, v.y, v.z);
}
}
pub fn set_uniform_vec4<V>(&self, uniform: &str, v: V) where V: Into<Vec4> {
assert!(self.is_bound(), "Tried to set uniform '{}' on unbound shader", uniform);
unsafe {
let v = v.into();
gl::Uniform4f(self.get_uniform_loc(&uniform), v.x, v.y, v.z, v.w);
}
}
pub fn set_uniform_i32(&self, uniform: &str, v: i32) {
assert!(self.is_bound(), "Tried to set uniform '{}' on unbound shader", uniform);
unsafe {
gl::Uniform1i(self.get_uniform_loc(&uniform), v);
}
}
pub fn set_uniform_f32(&self, uniform: &str, v: f32) {
assert!(self.is_bound(), "Tried to set uniform '{}' on unbound shader", uniform);
unsafe {
gl::Uniform1f(self.get_uniform_loc(&uniform), v);
}
}
pub fn set_uniform_mat_raw(&self, uniform: i32, mat: &Mat4) {
assert!(self.is_bound(), "Tried to set uniform on unbound shader");
unsafe {
gl::UniformMatrix4fv(uniform, 1, 0, mat.transpose().rows.as_ptr() as *const f32);
}
}
pub fn set_uniform_mat(&self, uniform: &str, mat: &Mat4) {
assert!(self.is_bound(), "Tried to set uniform '{}' on unbound shader", uniform);
self.set_uniform_mat_raw(self.get_uniform_loc(&uniform), &mat);
}
pub fn set_proj(&self, mat: &Mat4) {
assert!(self.is_bound(), "Tried to set uniform 'u_proj' on unbound shader");
self.set_uniform_mat_raw(self.proj_loc, &mat);
}
pub fn set_view(&self, mat: &Mat4) {
assert!(self.is_bound(), "Tried to set uniform 'u_view' on unbound shader");
self.set_uniform_mat_raw(self.view_loc, &mat);
}
}
pub struct ShaderBuilder {
attributes: Vec<String>,
varyings: Vec<String>,
uniforms: Vec<String>,
vertex_body: String,
fragment_body: String,
use_3d: bool,
use_proj: bool,
use_view: bool,
use_highp: bool,
}
impl ShaderBuilder {
pub fn new() -> Self {
ShaderBuilder {
attributes: Vec::new(),
varyings: Vec::new(),
uniforms: Vec::new(),
vertex_body: String::new(),
fragment_body: String::new(),
use_3d: false,
use_proj: false,
use_view: false,
use_highp: false,
}
}
pub fn use_3d(mut self) -> Self { self.use_3d = true; self }
pub fn use_proj(mut self) -> Self { self.use_proj = true; self.uniform("proj", "mat4") }
pub fn use_view(mut self) -> Self { self.use_view = true; self.uniform("view", "mat4") }
pub fn use_highp(mut self) -> Self { self.use_highp = true; self }
pub fn vertex(mut self, data: &str) -> Self {
write!(&mut self.vertex_body, "{};\n", data).unwrap(); self
}
pub fn fragment(mut self, data: &str) -> Self {
write!(&mut self.fragment_body, "{};\n", data).unwrap(); self
}
pub fn uniform(mut self, name: &str, ty: &str) -> Self {
self.uniforms.push(format!("{} u_{}", ty, name)); self
}
pub fn attribute(mut self, name: &str, ty: &str) -> Self {
if name == "position" {
println!("Tried to overwrite 'position' attribute while building shader - ignoring");
return self
}
self.attributes.push(format!("{} {}", ty, name)); self
}
pub fn varying(mut self, name: &str, ty: &str) -> Self {
self.varyings.push(format!("{} v_{}", ty, name)); self
}
pub fn frag_attribute(mut self, name: &str, ty: &str) -> Self {
self.attributes.push(format!("{} {}", ty, name));
self.varyings.push(format!("{} v_{}", ty, name));
write!(&mut self.vertex_body, "v_{} = {};\n", name, name).unwrap();
self
}
pub fn output(mut self, expr: &str) -> Self {
write!(&mut self.fragment_body, "gl_FragColor = {};\n", expr).unwrap();
self
}
pub fn finalize_source(mut self) -> (String, String) {
let mut varyings_and_uniforms = String::new();
for v in self.varyings.iter() { write!(&mut varyings_and_uniforms, "varying {};\n", v).unwrap(); }
for u in self.uniforms.iter() { write!(&mut varyings_and_uniforms, "uniform {};\n", u).unwrap(); }
let mut vert_src = String::new();
let mut frag_src = String::new();
let precision = if self.use_highp { "precision highp float;" } else { "precision mediump float;" };
write!(&mut vert_src, "{}\n", precision).unwrap();
write!(&mut frag_src, "{}\n", precision).unwrap();
let position_attr_ty = if self.use_3d { "vec3" } else { "vec2" };
write!(&mut vert_src, "attribute {} position;\n", position_attr_ty).unwrap();
for a in self.attributes.iter() { write!(&mut vert_src, "attribute {};\n", a).unwrap(); }
let mut gl_position = String::from("gl_Position = ");
if self.use_proj { gl_position.push_str("u_proj * "); }
if self.use_view { gl_position.push_str("u_view * "); }
if self.use_3d {
gl_position.push_str("vec4(position, 1.0);\n");
} else {
gl_position.push_str("vec4(position, 0.0, 1.0);\n");
}
self.vertex_body = format!("{}{}", gl_position, self.vertex_body);
let mut bodies = [&mut self.vertex_body, &mut self.fragment_body];
for (sh, body) in [&mut vert_src, &mut frag_src].iter_mut().zip(bodies.iter_mut()) {
write!(sh, "\n{}\n", varyings_and_uniforms).unwrap();
let mut position = 0;
while let Some(start) = body[position..].find("func ") {
let length = body[start..].chars()
.scan((false, 0), |acc, c| {
let (body, nesting) = *acc;
*acc = match (body, nesting, c) {
(false, _, '}') => return None,
(true, 1, '}') => return None,
(false, 0, '{') => (true, 1),
(true, x, '{') => (true, x+1),
(true, x, '}') => (true, x-1),
_ => *acc,
};
Some(*acc)
})
.count();
let start = start + position;
let end = start + length + 1;
write!(sh, "{}\n", &body[start+5..end]).unwrap();
body.replace_range(start..end, "");
position = start;
}
write!(sh, "void main() {{\n{}}}\n", body).unwrap();
}
(vert_src, frag_src)
}
pub fn finalize(self) -> Result<Shader, String> {
use std::ffi::CString;
let attributes = self.attributes.iter()
.map(|a| CString::new(a.split(' ').nth(1).unwrap()).unwrap())
.collect::<Vec<_>>();
let (v,f) = self.finalize_source();
let mut s = Shader::new(&v, &f)?;
for (idx, attrib_name) in attributes.iter().enumerate() {
unsafe {
gl::BindAttribLocation(s.gl_handle, 1 + idx as u32, attrib_name.as_ptr());
}
}
unsafe {
gl::BindAttribLocation(s.gl_handle, 0, b"position\0".as_ptr() as _);
gl::LinkProgram(s.gl_handle);
s.proj_loc = gl::GetUniformLocation(s.gl_handle, b"u_proj\0".as_ptr() as _);
s.view_loc = gl::GetUniformLocation(s.gl_handle, b"u_view\0".as_ptr() as _);
}
Ok(s)
}
}
#[cfg(test)] mod tests {
#[test]
fn shader_builder() {
let (vsh, fsh) = ::ShaderBuilder::new()
.uniform("tex", "sampler2D")
.attribute("some_random_attribute", "vec4")
.frag_attribute("color", "vec3")
.frag_attribute("uv", "vec2")
.use_proj().use_view()
.fragment("
func vec3 function_test(vec3 c) {
return vec3(1.0) - c;
}
func vec3 function_test_2(float c) {
if (c < 0.5) {
return vec3(c);
} else {
return vec3(1.0 - c);
}
}
vec3 color = function_test(v_color);
color.g = texture2D(u_tex, v_uv).r")
.output("vec4(color, 1.0)")
.finalize_source();
println!("vert source\n==========\n{}\n", vsh);
println!("frag source\n==========\n{}", fsh);
let (vsh, fsh) = ::ShaderBuilder::new()
.use_3d()
.output("vec4(1.0)")
.finalize_source();
println!("vert source\n==========\n{}\n", vsh);
println!("frag source\n==========\n{}", fsh);
}
} | set_uniform_vec3 | identifier_name |
shader.rs | #![allow(dead_code)]
use std;
use math::*;
use rendering::gl;
use std::fmt::Write;
#[derive(Copy, Clone, Debug)]
pub struct Shader {
pub gl_handle: u32,
pub proj_loc: i32,
pub view_loc: i32,
}
impl Shader {
pub fn new(vertex_shader_src: &str, fragment_shader_src: &str) -> Result<Shader, String> {
use std::ffi::{CStr, CString};
unsafe {
let (vs,fs) = (gl::CreateShader(gl::VERTEX_SHADER), gl::CreateShader(gl::FRAGMENT_SHADER));
let program = gl::CreateProgram();
for &(sh, src) in [(vs, vertex_shader_src), (fs, fragment_shader_src)].iter() {
let src = CString::new(src).unwrap();
gl::ShaderSource(sh, 1, &src.as_ptr(), std::ptr::null());
gl::CompileShader(sh);
let mut status = 0i32;
gl::GetShaderiv(sh, gl::COMPILE_STATUS, &mut status);
if status == 0 {
let mut buf = [0u8; 1024];
let mut len = 0;
gl::GetShaderInfoLog(sh, buf.len() as _, &mut len, buf.as_mut_ptr() as _);
return Err(CStr::from_bytes_with_nul_unchecked(&buf[..len as usize]).to_string_lossy().into());
}
gl::AttachShader(program, sh);
}
// TODO: Automate the everloving shit out of this please
gl::BindAttribLocation(program, 0, b"position\0".as_ptr() as _);
gl::LinkProgram(program);
let mut status = 0i32;
gl::GetProgramiv(program, gl::LINK_STATUS, &mut status);
if status == 0 {
let mut buf = [0u8; 1024];
let mut len = 0;
gl::GetProgramInfoLog(program, buf.len() as _, &mut len, buf.as_mut_ptr() as _);
return Err(CStr::from_bytes_with_nul_unchecked(&buf[..len as usize]).to_string_lossy().into());
}
gl::DeleteShader(vs);
gl::DeleteShader(fs);
Ok(Shader {
gl_handle: program,
proj_loc: gl::GetUniformLocation(program, b"u_proj\0".as_ptr() as _),
view_loc: gl::GetUniformLocation(program, b"u_view\0".as_ptr() as _),
})
}
}
pub const fn invalid() -> Shader {
Shader {
gl_handle: 0,
proj_loc: 0,
view_loc: 0,
}
}
fn get_currently_bound_raw() -> u32 {
unsafe {
let mut handle = 0;
gl::GetIntegerv(gl::CURRENT_PROGRAM, &mut handle);
handle as u32
}
}
pub fn use_program(&self) {
unsafe {
gl::UseProgram(self.gl_handle);
}
}
pub fn is_bound(&self) -> bool {
self.gl_handle == Shader::get_currently_bound_raw()
}
pub fn get_uniform_loc(&self, uniform: &str) -> i32 {
use std::ffi::CString;
unsafe {
let cstr = CString::new(uniform).unwrap();
gl::GetUniformLocation(self.gl_handle, cstr.as_ptr())
}
}
pub fn set_uniform_vec2(&self, uniform: &str, v: Vec2) {
assert!(self.is_bound(), "Tried to set uniform '{}' on unbound shader", uniform);
unsafe {
gl::Uniform2f(self.get_uniform_loc(&uniform), v.x, v.y);
}
}
pub fn set_uniform_vec3<V>(&self, uniform: &str, v: V) where V: Into<Vec3> {
assert!(self.is_bound(), "Tried to set uniform '{}' on unbound shader", uniform);
unsafe {
let v = v.into();
gl::Uniform3f(self.get_uniform_loc(&uniform), v.x, v.y, v.z);
}
}
pub fn set_uniform_vec4<V>(&self, uniform: &str, v: V) where V: Into<Vec4> {
assert!(self.is_bound(), "Tried to set uniform '{}' on unbound shader", uniform);
unsafe {
let v = v.into();
gl::Uniform4f(self.get_uniform_loc(&uniform), v.x, v.y, v.z, v.w);
}
}
pub fn set_uniform_i32(&self, uniform: &str, v: i32) {
assert!(self.is_bound(), "Tried to set uniform '{}' on unbound shader", uniform);
unsafe {
gl::Uniform1i(self.get_uniform_loc(&uniform), v);
}
}
pub fn set_uniform_f32(&self, uniform: &str, v: f32) {
assert!(self.is_bound(), "Tried to set uniform '{}' on unbound shader", uniform);
unsafe {
gl::Uniform1f(self.get_uniform_loc(&uniform), v);
}
}
pub fn set_uniform_mat_raw(&self, uniform: i32, mat: &Mat4) {
assert!(self.is_bound(), "Tried to set uniform on unbound shader");
unsafe {
gl::UniformMatrix4fv(uniform, 1, 0, mat.transpose().rows.as_ptr() as *const f32);
}
}
pub fn set_uniform_mat(&self, uniform: &str, mat: &Mat4) {
assert!(self.is_bound(), "Tried to set uniform '{}' on unbound shader", uniform);
self.set_uniform_mat_raw(self.get_uniform_loc(&uniform), &mat);
}
pub fn set_proj(&self, mat: &Mat4) {
assert!(self.is_bound(), "Tried to set uniform 'u_proj' on unbound shader");
self.set_uniform_mat_raw(self.proj_loc, &mat);
}
pub fn set_view(&self, mat: &Mat4) {
assert!(self.is_bound(), "Tried to set uniform 'u_view' on unbound shader");
self.set_uniform_mat_raw(self.view_loc, &mat);
}
}
pub struct ShaderBuilder {
attributes: Vec<String>,
varyings: Vec<String>,
uniforms: Vec<String>,
vertex_body: String,
fragment_body: String,
use_3d: bool,
use_proj: bool,
use_view: bool,
use_highp: bool,
}
impl ShaderBuilder {
pub fn new() -> Self {
ShaderBuilder {
attributes: Vec::new(),
varyings: Vec::new(),
uniforms: Vec::new(),
vertex_body: String::new(),
fragment_body: String::new(),
use_3d: false,
use_proj: false,
use_view: false,
use_highp: false,
}
}
pub fn use_3d(mut self) -> Self { self.use_3d = true; self }
pub fn use_proj(mut self) -> Self { self.use_proj = true; self.uniform("proj", "mat4") }
pub fn use_view(mut self) -> Self { self.use_view = true; self.uniform("view", "mat4") }
pub fn use_highp(mut self) -> Self { self.use_highp = true; self }
pub fn vertex(mut self, data: &str) -> Self {
write!(&mut self.vertex_body, "{};\n", data).unwrap(); self
}
pub fn fragment(mut self, data: &str) -> Self {
write!(&mut self.fragment_body, "{};\n", data).unwrap(); self
}
pub fn uniform(mut self, name: &str, ty: &str) -> Self {
self.uniforms.push(format!("{} u_{}", ty, name)); self
}
pub fn attribute(mut self, name: &str, ty: &str) -> Self {
if name == "position" {
println!("Tried to overwrite 'position' attribute while building shader - ignoring");
return self
}
self.attributes.push(format!("{} {}", ty, name)); self
}
pub fn varying(mut self, name: &str, ty: &str) -> Self {
self.varyings.push(format!("{} v_{}", ty, name)); self
}
pub fn frag_attribute(mut self, name: &str, ty: &str) -> Self {
self.attributes.push(format!("{} {}", ty, name));
self.varyings.push(format!("{} v_{}", ty, name));
write!(&mut self.vertex_body, "v_{} = {};\n", name, name).unwrap();
self
}
pub fn output(mut self, expr: &str) -> Self {
write!(&mut self.fragment_body, "gl_FragColor = {};\n", expr).unwrap();
self
}
pub fn finalize_source(mut self) -> (String, String) {
let mut varyings_and_uniforms = String::new();
for v in self.varyings.iter() { write!(&mut varyings_and_uniforms, "varying {};\n", v).unwrap(); }
for u in self.uniforms.iter() { write!(&mut varyings_and_uniforms, "uniform {};\n", u).unwrap(); }
let mut vert_src = String::new();
let mut frag_src = String::new();
let precision = if self.use_highp { "precision highp float;" } else { "precision mediump float;" };
write!(&mut vert_src, "{}\n", precision).unwrap();
write!(&mut frag_src, "{}\n", precision).unwrap();
let position_attr_ty = if self.use_3d { "vec3" } else { "vec2" };
write!(&mut vert_src, "attribute {} position;\n", position_attr_ty).unwrap();
for a in self.attributes.iter() { write!(&mut vert_src, "attribute {};\n", a).unwrap(); }
let mut gl_position = String::from("gl_Position = ");
if self.use_proj { gl_position.push_str("u_proj * "); }
if self.use_view { gl_position.push_str("u_view * "); }
if self.use_3d | else {
gl_position.push_str("vec4(position, 0.0, 1.0);\n");
}
self.vertex_body = format!("{}{}", gl_position, self.vertex_body);
let mut bodies = [&mut self.vertex_body, &mut self.fragment_body];
for (sh, body) in [&mut vert_src, &mut frag_src].iter_mut().zip(bodies.iter_mut()) {
write!(sh, "\n{}\n", varyings_and_uniforms).unwrap();
let mut position = 0;
while let Some(start) = body[position..].find("func ") {
let length = body[start..].chars()
.scan((false, 0), |acc, c| {
let (body, nesting) = *acc;
*acc = match (body, nesting, c) {
(false, _, '}') => return None,
(true, 1, '}') => return None,
(false, 0, '{') => (true, 1),
(true, x, '{') => (true, x+1),
(true, x, '}') => (true, x-1),
_ => *acc,
};
Some(*acc)
})
.count();
let start = start + position;
let end = start + length + 1;
write!(sh, "{}\n", &body[start+5..end]).unwrap();
body.replace_range(start..end, "");
position = start;
}
write!(sh, "void main() {{\n{}}}\n", body).unwrap();
}
(vert_src, frag_src)
}
pub fn finalize(self) -> Result<Shader, String> {
use std::ffi::CString;
let attributes = self.attributes.iter()
.map(|a| CString::new(a.split(' ').nth(1).unwrap()).unwrap())
.collect::<Vec<_>>();
let (v,f) = self.finalize_source();
let mut s = Shader::new(&v, &f)?;
for (idx, attrib_name) in attributes.iter().enumerate() {
unsafe {
gl::BindAttribLocation(s.gl_handle, 1 + idx as u32, attrib_name.as_ptr());
}
}
unsafe {
gl::BindAttribLocation(s.gl_handle, 0, b"position\0".as_ptr() as _);
gl::LinkProgram(s.gl_handle);
s.proj_loc = gl::GetUniformLocation(s.gl_handle, b"u_proj\0".as_ptr() as _);
s.view_loc = gl::GetUniformLocation(s.gl_handle, b"u_view\0".as_ptr() as _);
}
Ok(s)
}
}
#[cfg(test)] mod tests {
#[test]
fn shader_builder() {
let (vsh, fsh) = ::ShaderBuilder::new()
.uniform("tex", "sampler2D")
.attribute("some_random_attribute", "vec4")
.frag_attribute("color", "vec3")
.frag_attribute("uv", "vec2")
.use_proj().use_view()
.fragment("
func vec3 function_test(vec3 c) {
return vec3(1.0) - c;
}
func vec3 function_test_2(float c) {
if (c < 0.5) {
return vec3(c);
} else {
return vec3(1.0 - c);
}
}
vec3 color = function_test(v_color);
color.g = texture2D(u_tex, v_uv).r")
.output("vec4(color, 1.0)")
.finalize_source();
println!("vert source\n==========\n{}\n", vsh);
println!("frag source\n==========\n{}", fsh);
let (vsh, fsh) = ::ShaderBuilder::new()
.use_3d()
.output("vec4(1.0)")
.finalize_source();
println!("vert source\n==========\n{}\n", vsh);
println!("frag source\n==========\n{}", fsh);
}
} | {
gl_position.push_str("vec4(position, 1.0);\n");
} | conditional_block |
mod.rs | x1 && v.1 >= y0 && v.1 < y1
}
fn restore_view_plugins(docks: &[DockHandle],
view_plugins: &mut ViewPlugins,
info: &mut HashMap<u64, PluginInstanceInfo>) {
for dock in docks.iter() {
if!view_plugins.get_view(ViewHandle(dock.0)).is_some() {
let info = match info.remove(&dock.0) {
None => panic!("Could not restore view: no info in `removed_instances` found"),
Some(info) => info,
};
if info.restore(view_plugins).is_none() {
panic!("Could not restore view");
}
}
}
}
pub struct Window {
/// minifb window
pub win: minifb::Window,
pub menu: Menu,
pub ws: Workspace,
// TODO: should we serialize Workspace if this is stored in memory only?
ws_states: VecDeque<String>,
cur_state_index: usize,
removed_instances: HashMap<u64, PluginInstanceInfo>,
pub mouse_state: MouseState,
pub menu_id_offset: u32,
pub overlay: Option<(DockHandle, Rect)>,
pub context_menu_data: Option<(DockHandle, (f32, f32))>,
pub statusbar: Statusbar,
pub custom_menu_height: f32,
/// Backend that is currently being configured.
pub config_backend: Option<BackendHandle>,
/// View currently being renamed
view_rename_state: ViewRenameState,
}
impl Window {
pub fn new(width: usize, height: usize) -> minifb::Result<Window> {
let options = WindowOptions {
resize: true,
scale: Scale::X1,
..WindowOptions::default()
};
let win = try!(minifb::Window::new("ProDBG", width, height, options));
let ws = Workspace::new(Rect::new(0.0, 0.0, width as f32, (height - 20) as f32));
let ws_states = VecDeque::with_capacity(WORKSPACE_UNDO_LIMIT);
let mut res = Window {
win: win,
menu: Menu::new(),
menu_id_offset: 1000,
mouse_state: MouseState::Default,
ws: ws,
ws_states: ws_states,
cur_state_index: 0usize,
removed_instances: HashMap::new(),
overlay: None,
context_menu_data: None,
statusbar: Statusbar::new(),
custom_menu_height: 0.0,
config_backend: None,
view_rename_state: ViewRenameState::None,
};
res.initialize_workspace_state();
Ok(res)
}
pub fn pre_update(&mut self) {
self.update_imgui_mouse();
self.update_imgui_keys();
}
pub fn update(&mut self,
sessions: &mut Sessions,
view_plugins: &mut ViewPlugins,
backend_plugins: &mut BackendPlugins) {
// Update minifb window to get current window size
self.win.update();
// Update menus first to find out size of self-drawn menus (if any)
self.update_menus(view_plugins, sessions, backend_plugins);
// Status bar needs full size of window
let win_size = self.win.get_size();
let width = win_size.0 as f32;
let height = (win_size.1 as f32) - self.statusbar.get_size() - self.custom_menu_height;
// Workspace needs area without menus and status bar
self.ws.update_rect(Rect::new(0.0, self.custom_menu_height, width, height));
let mut views_to_delete = Vec::new();
let mut has_shown_menu = 0u32;
let show_context_menu = self.update_mouse_state();
let mouse = self.get_mouse_pos();
let docks = self.ws.get_docks();
for dock in docks {
let view_handle = ViewHandle(dock.0);
let session = match view_plugins.get_view(view_handle)
.and_then(|v| sessions.get_session(v.session_handle)) {
None => continue,
Some(s) => s,
};
let state = Self::update_view(&mut self.ws,
view_plugins,
view_handle,
session,
show_context_menu,
mouse,
&self.overlay);
if state.should_close {
views_to_delete.push(view_handle);
}
has_shown_menu |= state.showed_popup;
}
if!views_to_delete.is_empty() {
Self::remove_views(self, view_plugins, &views_to_delete);
self.save_cur_workspace_state();
}
self.update_statusbar(sessions, backend_plugins, win_size);
self.process_key_presses(view_plugins);
// if now plugin has showed a menu we do it here
// TODO: Handle diffrent cases when attach menu on to plugin menu or not
self.render_popup(show_context_menu && has_shown_menu == 0, view_plugins);
// If we have a backend configuration running
self.update_backend_configure(sessions, backend_plugins);
}
/// Updates the statusbar at the bottom of the window to show which state the debugger currently is in
fn update_statusbar(&mut self,
sessions: &mut Sessions,
backend_plugins: &mut BackendPlugins,
size: (usize, usize)) {
let session = sessions.get_current();
ReaderWrapper::reset_reader(&mut session.reader);
for event in session.reader.get_event() {
match event {
EVENT_SET_STATUS => {
if let Ok(status) = session.reader.find_string("status") {
self.statusbar.status = status.to_owned();
}
}
_ => (),
}
}
if let Some(ref backend) = backend_plugins.get_backend(session.backend) {
let name = &backend.plugin_type.name;
self.statusbar.update(&name, size);
} else {
self.statusbar.update("", size);
}
}
pub fn layout_to_string(&mut self, view_plugins: &mut ViewPlugins) -> String {
let layout = WindowLayout::from_current_state(self.ws.clone(), view_plugins);
layout.to_string()
}
pub fn init_layout(&mut self,
layout_data: &str,
view_plugins: &mut ViewPlugins)
-> io::Result<()> {
let layout = match WindowLayout::from_string(layout_data) {
Ok(layout) => layout,
Err(error) => return Result::Err(io::Error::new(io::ErrorKind::InvalidData, error)),
};
self.ws = layout.workspace;
// TODO: should we check here that handles stored in Workspace and handles restored in
// ViewPlugins are the same?
WindowLayout::restore_view_plugins(view_plugins, &layout.infos);
self.initialize_workspace_state();
Ok(())
}
fn update_view(ws: &mut Workspace,
view_plugins: &mut ViewPlugins,
handle: ViewHandle,
session: &mut Session,
show_context_menu: bool,
mouse: (f32, f32),
overlay: &Option<(DockHandle, Rect)>)
-> WindowState {
let ws_container = match ws.root_area
.as_mut()
.and_then(|root| root.get_container_by_dock_handle_mut(DockHandle(handle.0))) {
None => {
panic!("Tried to update view {} but it is not in workspace",
handle.0)
}
Some(container) => container,
};
if ws_container.docks[ws_container.active_dock].0!= handle.0 {
// This view is in hidden tab
return WindowState {
showed_popup: 0,
should_close: false,
};
}
let tab_names: Vec<String> = ws_container.docks
.iter()
.map(|dock_handle| {
view_plugins.get_view(ViewHandle(dock_handle.0))
.map(|plugin| plugin.name.clone())
.unwrap_or("Not loaded".to_string())
})
.collect();
let instance = match view_plugins.get_view(handle) {
None => {
return WindowState {
showed_popup: 0,
should_close: false,
}
}
Some(instance) => instance,
};
Imgui::set_window_pos(ws_container.rect.x, ws_container.rect.y);
Imgui::set_window_size(ws_container.rect.width, ws_container.rect.height);
// TODO: should we avoid repeating window names? Add handle or something like this.
let open = Imgui::begin_window(&instance.name, true);
if tab_names.len() > 1 |
let ui = &instance.ui;
Imgui::init_state(ui.api);
let pos = ui.get_window_pos();
let size = ui.get_window_size();
Imgui::mark_show_popup(ui.api, is_inside(mouse, pos, size) && show_context_menu);
// Draw drag zone
if let &Some((handle, rect)) = overlay {
if handle.0 == handle.0 {
Imgui::render_frame(rect.x, rect.y, rect.width, rect.height, OVERLAY_COLOR);
}
}
// Make sure we move the cursor to the start of the stream here
ReaderWrapper::reset_reader(&mut session.reader);
unsafe {
let plugin_funcs = instance.plugin_type.plugin_funcs as *mut CViewCallbacks;
((*plugin_funcs).update.unwrap())(instance.plugin_data,
ui.api as *mut c_void,
session.reader.api as *mut c_void,
session.get_current_writer().api as *mut c_void);
}
let has_shown_menu = Imgui::has_showed_popup(ui.api);
if tab_names.len() > 1 {
Imgui::end_window_child();
}
Imgui::end_window();
WindowState {
showed_popup: has_shown_menu,
should_close:!open,
}
}
pub fn remove_views(&mut self, view_plugins: &mut ViewPlugins, views: &Vec<ViewHandle>) {
for view in views {
if let Some(instance) = view_plugins.get_view(*view) {
self.removed_instances.insert(view.0, PluginInstanceInfo::new(instance));
}
view_plugins.destroy_instance(*view);
self.ws.delete_dock_by_handle(DockHandle(view.0));
}
}
fn update_backend_configure(&mut self,
sessions: &mut Sessions,
backend_plugins: &mut BackendPlugins) {
if self.config_backend == None {
return;
}
let backend = backend_plugins.get_backend(self.config_backend).unwrap();
unsafe {
let plugin_funcs = backend.plugin_type.plugin_funcs as *mut CBackendCallbacks;
if let Some(show_config) = (*plugin_funcs).show_config {
let ui = Imgui::get_ui();
ui.open_popup("config");
if ui.begin_popup_modal("config") {
show_config(backend.plugin_data, Imgui::get_ui_funs() as *mut c_void);
let ok_size = Some(Vec2 { x: 120.0, y: 0.0 });
let cancel_size = Some(Vec2 { x: 120.0, y: 0.0 });
if ui.button("Ok", ok_size) {
sessions.get_current().set_backend(self.config_backend);
self.config_backend = None;
ui.close_current_popup();
}
ui.same_line(0, -1);
if ui.button("Cancel", cancel_size) {
self.config_backend = None;
ui.close_current_popup();
}
ui.end_popup();
}
} else {
sessions.get_current().set_backend(self.config_backend);
self.config_backend = None;
}
}
}
fn has_source_code_view(&self, view_plugins: &mut ViewPlugins) -> bool {
// TODO: Use setting for this name
for handle in self.ws.get_docks() {
if let Some(plugin) = view_plugins.get_view(ViewHandle(handle.0)) {
if plugin.name == "Source Code View" {
return true;
}
}
}
false
}
fn open_source_file(&mut self,
filename: &str,
view_plugins: &mut ViewPlugins,
session: &mut Session) {
// check if we already have a source view open and just post the message.
if!self.has_source_code_view(view_plugins) {
let mouse = self.get_mouse_pos();
// This is somewhat hacky to set a "correct" split view for
self.context_menu_data = self.ws
.get_dock_handle_at_pos(mouse)
.map(|handle| (handle, mouse));
self.split_view(&"Source Code View".to_owned(),
view_plugins,
Direction::Vertical);
}
let writer = session.get_current_writer();
writer.event_begin(events::EVENT_SET_SOURCE_CODE_FILE as u16);
writer.write_string("filename", filename);
writer.event_end();
}
fn initialize_workspace_state(&mut self) {
self.ws_states.clear();
self.ws_states.push_back(self.ws.save_state());
}
fn restore_workspace_state(&mut self, view_plugins: &mut ViewPlugins) {
// workspace will recalculate dock areas on the next update
let docks_before_restore = self.ws.get_docks();
self.ws = Workspace::from_state(&self.ws_states[self.cur_state_index]).unwrap();
let docks = self.ws.get_docks();
let views_to_delete: Vec<ViewHandle> = docks_before_restore.iter()
.filter(|&dock_before|!docks.iter().any(|dock| dock_before == dock))
.map(|dock_before| ViewHandle(dock_before.0))
.collect();
Self::remove_views(self, view_plugins, &views_to_delete);
restore_view_plugins(&docks, view_plugins, &mut self.removed_instances);
}
fn undo_workspace_change(&mut self, view_plugins: &mut ViewPlugins) {
if self.cur_state_index > 0 {
self.cur_state_index -= 1;
self.restore_workspace_state(view_plugins);
}
}
fn redo_workspace_change(&mut self, view_plugins: &mut ViewPlugins) {
if self.cur_state_index < self.ws_states.len() - 1 {
self.cur_state_index += 1;
self.restore_workspace_state(view_plugins);
}
}
fn save_cur_workspace_state(&mut self) {
let state = self.ws.save_state();
self.save_workspace_state(state);
}
fn save_workspace_state(&mut self, state: String) {
self.ws_states.drain(self.cur_state_index + 1..);
if self.cur_state_index == WORKSPACE_UNDO_LIMIT - 1 {
self.ws_states.pop_front();
self.cur_state_index -= 1;
}
self.ws_states.push_back(state);
self.cur_state_index += 1;
}
fn split_view(&mut self,
plugin_name: &str,
view_plugins: &mut ViewPlugins,
direction: Direction) {
let ui = Imgui::create_ui_instance();
if let Some(handle) =
view_plugins.create_instance(ui, plugin_name, None, None, SessionHandle(0), None) {
let new_dock = DockHandle(handle.0);
if let Some((dock_handle, pos)) = self.context_menu_data {
| {
Imgui::begin_window_child("tabs", 20.0);
let mut borders = Vec::with_capacity(tab_names.len());
// TODO: should repeated window names be avoided?
for (i, name) in tab_names.iter().enumerate() {
if Imgui::tab(name,
i == ws_container.active_dock,
i == tab_names.len() - 1) {
ws_container.active_dock = i;
}
borders.push(Imgui::tab_pos());
}
ws_container.update_tab_borders(&borders);
Imgui::end_window_child();
Imgui::separator();
Imgui::begin_window_child("body", 0.0);
} | conditional_block |
mod.rs | < x1 && v.1 >= y0 && v.1 < y1
}
fn restore_view_plugins(docks: &[DockHandle],
view_plugins: &mut ViewPlugins,
info: &mut HashMap<u64, PluginInstanceInfo>) {
for dock in docks.iter() {
if!view_plugins.get_view(ViewHandle(dock.0)).is_some() {
let info = match info.remove(&dock.0) {
None => panic!("Could not restore view: no info in `removed_instances` found"),
Some(info) => info,
};
if info.restore(view_plugins).is_none() {
panic!("Could not restore view");
}
}
}
}
pub struct Window {
/// minifb window
pub win: minifb::Window,
pub menu: Menu,
pub ws: Workspace,
// TODO: should we serialize Workspace if this is stored in memory only?
ws_states: VecDeque<String>,
cur_state_index: usize,
removed_instances: HashMap<u64, PluginInstanceInfo>,
pub mouse_state: MouseState,
pub menu_id_offset: u32,
pub overlay: Option<(DockHandle, Rect)>,
pub context_menu_data: Option<(DockHandle, (f32, f32))>,
pub statusbar: Statusbar,
pub custom_menu_height: f32,
/// Backend that is currently being configured.
pub config_backend: Option<BackendHandle>,
/// View currently being renamed
view_rename_state: ViewRenameState,
}
impl Window {
pub fn new(width: usize, height: usize) -> minifb::Result<Window> {
let options = WindowOptions {
resize: true,
scale: Scale::X1,
..WindowOptions::default()
};
let win = try!(minifb::Window::new("ProDBG", width, height, options));
let ws = Workspace::new(Rect::new(0.0, 0.0, width as f32, (height - 20) as f32));
let ws_states = VecDeque::with_capacity(WORKSPACE_UNDO_LIMIT);
let mut res = Window {
win: win,
menu: Menu::new(),
menu_id_offset: 1000,
mouse_state: MouseState::Default,
ws: ws,
ws_states: ws_states,
cur_state_index: 0usize,
removed_instances: HashMap::new(),
overlay: None,
context_menu_data: None,
statusbar: Statusbar::new(),
custom_menu_height: 0.0,
config_backend: None,
view_rename_state: ViewRenameState::None,
};
res.initialize_workspace_state();
Ok(res)
}
pub fn pre_update(&mut self) {
self.update_imgui_mouse();
self.update_imgui_keys();
}
pub fn update(&mut self,
sessions: &mut Sessions,
view_plugins: &mut ViewPlugins,
backend_plugins: &mut BackendPlugins) {
// Update minifb window to get current window size
self.win.update();
// Update menus first to find out size of self-drawn menus (if any)
self.update_menus(view_plugins, sessions, backend_plugins);
// Status bar needs full size of window
let win_size = self.win.get_size();
let width = win_size.0 as f32;
let height = (win_size.1 as f32) - self.statusbar.get_size() - self.custom_menu_height;
// Workspace needs area without menus and status bar
self.ws.update_rect(Rect::new(0.0, self.custom_menu_height, width, height));
let mut views_to_delete = Vec::new();
let mut has_shown_menu = 0u32;
let show_context_menu = self.update_mouse_state();
let mouse = self.get_mouse_pos();
let docks = self.ws.get_docks();
for dock in docks {
let view_handle = ViewHandle(dock.0);
let session = match view_plugins.get_view(view_handle)
.and_then(|v| sessions.get_session(v.session_handle)) {
None => continue,
Some(s) => s,
};
let state = Self::update_view(&mut self.ws,
view_plugins,
view_handle,
session,
show_context_menu,
mouse,
&self.overlay);
if state.should_close {
views_to_delete.push(view_handle);
}
has_shown_menu |= state.showed_popup;
}
if!views_to_delete.is_empty() {
Self::remove_views(self, view_plugins, &views_to_delete);
self.save_cur_workspace_state();
}
self.update_statusbar(sessions, backend_plugins, win_size);
self.process_key_presses(view_plugins);
// if now plugin has showed a menu we do it here
// TODO: Handle diffrent cases when attach menu on to plugin menu or not
self.render_popup(show_context_menu && has_shown_menu == 0, view_plugins);
// If we have a backend configuration running
self.update_backend_configure(sessions, backend_plugins);
}
/// Updates the statusbar at the bottom of the window to show which state the debugger currently is in
fn update_statusbar(&mut self,
sessions: &mut Sessions,
backend_plugins: &mut BackendPlugins,
size: (usize, usize)) {
let session = sessions.get_current();
ReaderWrapper::reset_reader(&mut session.reader);
for event in session.reader.get_event() {
match event {
EVENT_SET_STATUS => {
if let Ok(status) = session.reader.find_string("status") {
self.statusbar.status = status.to_owned();
}
}
_ => (),
}
}
if let Some(ref backend) = backend_plugins.get_backend(session.backend) {
let name = &backend.plugin_type.name;
self.statusbar.update(&name, size);
} else {
self.statusbar.update("", size);
}
}
pub fn layout_to_string(&mut self, view_plugins: &mut ViewPlugins) -> String {
let layout = WindowLayout::from_current_state(self.ws.clone(), view_plugins);
layout.to_string()
}
pub fn init_layout(&mut self,
layout_data: &str,
view_plugins: &mut ViewPlugins)
-> io::Result<()> {
let layout = match WindowLayout::from_string(layout_data) {
Ok(layout) => layout,
Err(error) => return Result::Err(io::Error::new(io::ErrorKind::InvalidData, error)),
};
self.ws = layout.workspace;
// TODO: should we check here that handles stored in Workspace and handles restored in
// ViewPlugins are the same?
WindowLayout::restore_view_plugins(view_plugins, &layout.infos);
self.initialize_workspace_state();
Ok(()) | session: &mut Session,
show_context_menu: bool,
mouse: (f32, f32),
overlay: &Option<(DockHandle, Rect)>)
-> WindowState {
let ws_container = match ws.root_area
.as_mut()
.and_then(|root| root.get_container_by_dock_handle_mut(DockHandle(handle.0))) {
None => {
panic!("Tried to update view {} but it is not in workspace",
handle.0)
}
Some(container) => container,
};
if ws_container.docks[ws_container.active_dock].0!= handle.0 {
// This view is in hidden tab
return WindowState {
showed_popup: 0,
should_close: false,
};
}
let tab_names: Vec<String> = ws_container.docks
.iter()
.map(|dock_handle| {
view_plugins.get_view(ViewHandle(dock_handle.0))
.map(|plugin| plugin.name.clone())
.unwrap_or("Not loaded".to_string())
})
.collect();
let instance = match view_plugins.get_view(handle) {
None => {
return WindowState {
showed_popup: 0,
should_close: false,
}
}
Some(instance) => instance,
};
Imgui::set_window_pos(ws_container.rect.x, ws_container.rect.y);
Imgui::set_window_size(ws_container.rect.width, ws_container.rect.height);
// TODO: should we avoid repeating window names? Add handle or something like this.
let open = Imgui::begin_window(&instance.name, true);
if tab_names.len() > 1 {
Imgui::begin_window_child("tabs", 20.0);
let mut borders = Vec::with_capacity(tab_names.len());
// TODO: should repeated window names be avoided?
for (i, name) in tab_names.iter().enumerate() {
if Imgui::tab(name,
i == ws_container.active_dock,
i == tab_names.len() - 1) {
ws_container.active_dock = i;
}
borders.push(Imgui::tab_pos());
}
ws_container.update_tab_borders(&borders);
Imgui::end_window_child();
Imgui::separator();
Imgui::begin_window_child("body", 0.0);
}
let ui = &instance.ui;
Imgui::init_state(ui.api);
let pos = ui.get_window_pos();
let size = ui.get_window_size();
Imgui::mark_show_popup(ui.api, is_inside(mouse, pos, size) && show_context_menu);
// Draw drag zone
if let &Some((handle, rect)) = overlay {
if handle.0 == handle.0 {
Imgui::render_frame(rect.x, rect.y, rect.width, rect.height, OVERLAY_COLOR);
}
}
// Make sure we move the cursor to the start of the stream here
ReaderWrapper::reset_reader(&mut session.reader);
unsafe {
let plugin_funcs = instance.plugin_type.plugin_funcs as *mut CViewCallbacks;
((*plugin_funcs).update.unwrap())(instance.plugin_data,
ui.api as *mut c_void,
session.reader.api as *mut c_void,
session.get_current_writer().api as *mut c_void);
}
let has_shown_menu = Imgui::has_showed_popup(ui.api);
if tab_names.len() > 1 {
Imgui::end_window_child();
}
Imgui::end_window();
WindowState {
showed_popup: has_shown_menu,
should_close:!open,
}
}
pub fn remove_views(&mut self, view_plugins: &mut ViewPlugins, views: &Vec<ViewHandle>) {
for view in views {
if let Some(instance) = view_plugins.get_view(*view) {
self.removed_instances.insert(view.0, PluginInstanceInfo::new(instance));
}
view_plugins.destroy_instance(*view);
self.ws.delete_dock_by_handle(DockHandle(view.0));
}
}
fn update_backend_configure(&mut self,
sessions: &mut Sessions,
backend_plugins: &mut BackendPlugins) {
if self.config_backend == None {
return;
}
let backend = backend_plugins.get_backend(self.config_backend).unwrap();
unsafe {
let plugin_funcs = backend.plugin_type.plugin_funcs as *mut CBackendCallbacks;
if let Some(show_config) = (*plugin_funcs).show_config {
let ui = Imgui::get_ui();
ui.open_popup("config");
if ui.begin_popup_modal("config") {
show_config(backend.plugin_data, Imgui::get_ui_funs() as *mut c_void);
let ok_size = Some(Vec2 { x: 120.0, y: 0.0 });
let cancel_size = Some(Vec2 { x: 120.0, y: 0.0 });
if ui.button("Ok", ok_size) {
sessions.get_current().set_backend(self.config_backend);
self.config_backend = None;
ui.close_current_popup();
}
ui.same_line(0, -1);
if ui.button("Cancel", cancel_size) {
self.config_backend = None;
ui.close_current_popup();
}
ui.end_popup();
}
} else {
sessions.get_current().set_backend(self.config_backend);
self.config_backend = None;
}
}
}
fn has_source_code_view(&self, view_plugins: &mut ViewPlugins) -> bool {
// TODO: Use setting for this name
for handle in self.ws.get_docks() {
if let Some(plugin) = view_plugins.get_view(ViewHandle(handle.0)) {
if plugin.name == "Source Code View" {
return true;
}
}
}
false
}
fn open_source_file(&mut self,
filename: &str,
view_plugins: &mut ViewPlugins,
session: &mut Session) {
// check if we already have a source view open and just post the message.
if!self.has_source_code_view(view_plugins) {
let mouse = self.get_mouse_pos();
// This is somewhat hacky to set a "correct" split view for
self.context_menu_data = self.ws
.get_dock_handle_at_pos(mouse)
.map(|handle| (handle, mouse));
self.split_view(&"Source Code View".to_owned(),
view_plugins,
Direction::Vertical);
}
let writer = session.get_current_writer();
writer.event_begin(events::EVENT_SET_SOURCE_CODE_FILE as u16);
writer.write_string("filename", filename);
writer.event_end();
}
fn initialize_workspace_state(&mut self) {
self.ws_states.clear();
self.ws_states.push_back(self.ws.save_state());
}
fn restore_workspace_state(&mut self, view_plugins: &mut ViewPlugins) {
// workspace will recalculate dock areas on the next update
let docks_before_restore = self.ws.get_docks();
self.ws = Workspace::from_state(&self.ws_states[self.cur_state_index]).unwrap();
let docks = self.ws.get_docks();
let views_to_delete: Vec<ViewHandle> = docks_before_restore.iter()
.filter(|&dock_before|!docks.iter().any(|dock| dock_before == dock))
.map(|dock_before| ViewHandle(dock_before.0))
.collect();
Self::remove_views(self, view_plugins, &views_to_delete);
restore_view_plugins(&docks, view_plugins, &mut self.removed_instances);
}
fn undo_workspace_change(&mut self, view_plugins: &mut ViewPlugins) {
if self.cur_state_index > 0 {
self.cur_state_index -= 1;
self.restore_workspace_state(view_plugins);
}
}
fn redo_workspace_change(&mut self, view_plugins: &mut ViewPlugins) {
if self.cur_state_index < self.ws_states.len() - 1 {
self.cur_state_index += 1;
self.restore_workspace_state(view_plugins);
}
}
fn save_cur_workspace_state(&mut self) {
let state = self.ws.save_state();
self.save_workspace_state(state);
}
fn save_workspace_state(&mut self, state: String) {
self.ws_states.drain(self.cur_state_index + 1..);
if self.cur_state_index == WORKSPACE_UNDO_LIMIT - 1 {
self.ws_states.pop_front();
self.cur_state_index -= 1;
}
self.ws_states.push_back(state);
self.cur_state_index += 1;
}
fn split_view(&mut self,
plugin_name: &str,
view_plugins: &mut ViewPlugins,
direction: Direction) {
let ui = Imgui::create_ui_instance();
if let Some(handle) =
view_plugins.create_instance(ui, plugin_name, None, None, SessionHandle(0), None) {
let new_dock = DockHandle(handle.0);
if let Some((dock_handle, pos)) = self.context_menu_data {
| }
fn update_view(ws: &mut Workspace,
view_plugins: &mut ViewPlugins,
handle: ViewHandle, | random_line_split |
mod.rs | x1 && v.1 >= y0 && v.1 < y1
}
fn restore_view_plugins(docks: &[DockHandle],
view_plugins: &mut ViewPlugins,
info: &mut HashMap<u64, PluginInstanceInfo>) {
for dock in docks.iter() {
if!view_plugins.get_view(ViewHandle(dock.0)).is_some() {
let info = match info.remove(&dock.0) {
None => panic!("Could not restore view: no info in `removed_instances` found"),
Some(info) => info,
};
if info.restore(view_plugins).is_none() {
panic!("Could not restore view");
}
}
}
}
pub struct Window {
/// minifb window
pub win: minifb::Window,
pub menu: Menu,
pub ws: Workspace,
// TODO: should we serialize Workspace if this is stored in memory only?
ws_states: VecDeque<String>,
cur_state_index: usize,
removed_instances: HashMap<u64, PluginInstanceInfo>,
pub mouse_state: MouseState,
pub menu_id_offset: u32,
pub overlay: Option<(DockHandle, Rect)>,
pub context_menu_data: Option<(DockHandle, (f32, f32))>,
pub statusbar: Statusbar,
pub custom_menu_height: f32,
/// Backend that is currently being configured.
pub config_backend: Option<BackendHandle>,
/// View currently being renamed
view_rename_state: ViewRenameState,
}
impl Window {
pub fn new(width: usize, height: usize) -> minifb::Result<Window> {
let options = WindowOptions {
resize: true,
scale: Scale::X1,
..WindowOptions::default()
};
let win = try!(minifb::Window::new("ProDBG", width, height, options));
let ws = Workspace::new(Rect::new(0.0, 0.0, width as f32, (height - 20) as f32));
let ws_states = VecDeque::with_capacity(WORKSPACE_UNDO_LIMIT);
let mut res = Window {
win: win,
menu: Menu::new(),
menu_id_offset: 1000,
mouse_state: MouseState::Default,
ws: ws,
ws_states: ws_states,
cur_state_index: 0usize,
removed_instances: HashMap::new(),
overlay: None,
context_menu_data: None,
statusbar: Statusbar::new(),
custom_menu_height: 0.0,
config_backend: None,
view_rename_state: ViewRenameState::None,
};
res.initialize_workspace_state();
Ok(res)
}
pub fn pre_update(&mut self) {
self.update_imgui_mouse();
self.update_imgui_keys();
}
pub fn update(&mut self,
sessions: &mut Sessions,
view_plugins: &mut ViewPlugins,
backend_plugins: &mut BackendPlugins) {
// Update minifb window to get current window size
self.win.update();
// Update menus first to find out size of self-drawn menus (if any)
self.update_menus(view_plugins, sessions, backend_plugins);
// Status bar needs full size of window
let win_size = self.win.get_size();
let width = win_size.0 as f32;
let height = (win_size.1 as f32) - self.statusbar.get_size() - self.custom_menu_height;
// Workspace needs area without menus and status bar
self.ws.update_rect(Rect::new(0.0, self.custom_menu_height, width, height));
let mut views_to_delete = Vec::new();
let mut has_shown_menu = 0u32;
let show_context_menu = self.update_mouse_state();
let mouse = self.get_mouse_pos();
let docks = self.ws.get_docks();
for dock in docks {
let view_handle = ViewHandle(dock.0);
let session = match view_plugins.get_view(view_handle)
.and_then(|v| sessions.get_session(v.session_handle)) {
None => continue,
Some(s) => s,
};
let state = Self::update_view(&mut self.ws,
view_plugins,
view_handle,
session,
show_context_menu,
mouse,
&self.overlay);
if state.should_close {
views_to_delete.push(view_handle);
}
has_shown_menu |= state.showed_popup;
}
if!views_to_delete.is_empty() {
Self::remove_views(self, view_plugins, &views_to_delete);
self.save_cur_workspace_state();
}
self.update_statusbar(sessions, backend_plugins, win_size);
self.process_key_presses(view_plugins);
// if now plugin has showed a menu we do it here
// TODO: Handle diffrent cases when attach menu on to plugin menu or not
self.render_popup(show_context_menu && has_shown_menu == 0, view_plugins);
// If we have a backend configuration running
self.update_backend_configure(sessions, backend_plugins);
}
/// Updates the statusbar at the bottom of the window to show which state the debugger currently is in
fn update_statusbar(&mut self,
sessions: &mut Sessions,
backend_plugins: &mut BackendPlugins,
size: (usize, usize)) {
let session = sessions.get_current();
ReaderWrapper::reset_reader(&mut session.reader);
for event in session.reader.get_event() {
match event {
EVENT_SET_STATUS => {
if let Ok(status) = session.reader.find_string("status") {
self.statusbar.status = status.to_owned();
}
}
_ => (),
}
}
if let Some(ref backend) = backend_plugins.get_backend(session.backend) {
let name = &backend.plugin_type.name;
self.statusbar.update(&name, size);
} else {
self.statusbar.update("", size);
}
}
pub fn layout_to_string(&mut self, view_plugins: &mut ViewPlugins) -> String {
let layout = WindowLayout::from_current_state(self.ws.clone(), view_plugins);
layout.to_string()
}
pub fn init_layout(&mut self,
layout_data: &str,
view_plugins: &mut ViewPlugins)
-> io::Result<()> {
let layout = match WindowLayout::from_string(layout_data) {
Ok(layout) => layout,
Err(error) => return Result::Err(io::Error::new(io::ErrorKind::InvalidData, error)),
};
self.ws = layout.workspace;
// TODO: should we check here that handles stored in Workspace and handles restored in
// ViewPlugins are the same?
WindowLayout::restore_view_plugins(view_plugins, &layout.infos);
self.initialize_workspace_state();
Ok(())
}
fn update_view(ws: &mut Workspace,
view_plugins: &mut ViewPlugins,
handle: ViewHandle,
session: &mut Session,
show_context_menu: bool,
mouse: (f32, f32),
overlay: &Option<(DockHandle, Rect)>)
-> WindowState {
let ws_container = match ws.root_area
.as_mut()
.and_then(|root| root.get_container_by_dock_handle_mut(DockHandle(handle.0))) {
None => {
panic!("Tried to update view {} but it is not in workspace",
handle.0)
}
Some(container) => container,
};
if ws_container.docks[ws_container.active_dock].0!= handle.0 {
// This view is in hidden tab
return WindowState {
showed_popup: 0,
should_close: false,
};
}
let tab_names: Vec<String> = ws_container.docks
.iter()
.map(|dock_handle| {
view_plugins.get_view(ViewHandle(dock_handle.0))
.map(|plugin| plugin.name.clone())
.unwrap_or("Not loaded".to_string())
})
.collect();
let instance = match view_plugins.get_view(handle) {
None => {
return WindowState {
showed_popup: 0,
should_close: false,
}
}
Some(instance) => instance,
};
Imgui::set_window_pos(ws_container.rect.x, ws_container.rect.y);
Imgui::set_window_size(ws_container.rect.width, ws_container.rect.height);
// TODO: should we avoid repeating window names? Add handle or something like this.
let open = Imgui::begin_window(&instance.name, true);
if tab_names.len() > 1 {
Imgui::begin_window_child("tabs", 20.0);
let mut borders = Vec::with_capacity(tab_names.len());
// TODO: should repeated window names be avoided?
for (i, name) in tab_names.iter().enumerate() {
if Imgui::tab(name,
i == ws_container.active_dock,
i == tab_names.len() - 1) {
ws_container.active_dock = i;
}
borders.push(Imgui::tab_pos());
}
ws_container.update_tab_borders(&borders);
Imgui::end_window_child();
Imgui::separator();
Imgui::begin_window_child("body", 0.0);
}
let ui = &instance.ui;
Imgui::init_state(ui.api);
let pos = ui.get_window_pos();
let size = ui.get_window_size();
Imgui::mark_show_popup(ui.api, is_inside(mouse, pos, size) && show_context_menu);
// Draw drag zone
if let &Some((handle, rect)) = overlay {
if handle.0 == handle.0 {
Imgui::render_frame(rect.x, rect.y, rect.width, rect.height, OVERLAY_COLOR);
}
}
// Make sure we move the cursor to the start of the stream here
ReaderWrapper::reset_reader(&mut session.reader);
unsafe {
let plugin_funcs = instance.plugin_type.plugin_funcs as *mut CViewCallbacks;
((*plugin_funcs).update.unwrap())(instance.plugin_data,
ui.api as *mut c_void,
session.reader.api as *mut c_void,
session.get_current_writer().api as *mut c_void);
}
let has_shown_menu = Imgui::has_showed_popup(ui.api);
if tab_names.len() > 1 {
Imgui::end_window_child();
}
Imgui::end_window();
WindowState {
showed_popup: has_shown_menu,
should_close:!open,
}
}
pub fn remove_views(&mut self, view_plugins: &mut ViewPlugins, views: &Vec<ViewHandle>) {
for view in views {
if let Some(instance) = view_plugins.get_view(*view) {
self.removed_instances.insert(view.0, PluginInstanceInfo::new(instance));
}
view_plugins.destroy_instance(*view);
self.ws.delete_dock_by_handle(DockHandle(view.0));
}
}
fn update_backend_configure(&mut self,
sessions: &mut Sessions,
backend_plugins: &mut BackendPlugins) {
if self.config_backend == None {
return;
}
let backend = backend_plugins.get_backend(self.config_backend).unwrap();
unsafe {
let plugin_funcs = backend.plugin_type.plugin_funcs as *mut CBackendCallbacks;
if let Some(show_config) = (*plugin_funcs).show_config {
let ui = Imgui::get_ui();
ui.open_popup("config");
if ui.begin_popup_modal("config") {
show_config(backend.plugin_data, Imgui::get_ui_funs() as *mut c_void);
let ok_size = Some(Vec2 { x: 120.0, y: 0.0 });
let cancel_size = Some(Vec2 { x: 120.0, y: 0.0 });
if ui.button("Ok", ok_size) {
sessions.get_current().set_backend(self.config_backend);
self.config_backend = None;
ui.close_current_popup();
}
ui.same_line(0, -1);
if ui.button("Cancel", cancel_size) {
self.config_backend = None;
ui.close_current_popup();
}
ui.end_popup();
}
} else {
sessions.get_current().set_backend(self.config_backend);
self.config_backend = None;
}
}
}
fn | (&self, view_plugins: &mut ViewPlugins) -> bool {
// TODO: Use setting for this name
for handle in self.ws.get_docks() {
if let Some(plugin) = view_plugins.get_view(ViewHandle(handle.0)) {
if plugin.name == "Source Code View" {
return true;
}
}
}
false
}
fn open_source_file(&mut self,
filename: &str,
view_plugins: &mut ViewPlugins,
session: &mut Session) {
// check if we already have a source view open and just post the message.
if!self.has_source_code_view(view_plugins) {
let mouse = self.get_mouse_pos();
// This is somewhat hacky to set a "correct" split view for
self.context_menu_data = self.ws
.get_dock_handle_at_pos(mouse)
.map(|handle| (handle, mouse));
self.split_view(&"Source Code View".to_owned(),
view_plugins,
Direction::Vertical);
}
let writer = session.get_current_writer();
writer.event_begin(events::EVENT_SET_SOURCE_CODE_FILE as u16);
writer.write_string("filename", filename);
writer.event_end();
}
fn initialize_workspace_state(&mut self) {
self.ws_states.clear();
self.ws_states.push_back(self.ws.save_state());
}
fn restore_workspace_state(&mut self, view_plugins: &mut ViewPlugins) {
// workspace will recalculate dock areas on the next update
let docks_before_restore = self.ws.get_docks();
self.ws = Workspace::from_state(&self.ws_states[self.cur_state_index]).unwrap();
let docks = self.ws.get_docks();
let views_to_delete: Vec<ViewHandle> = docks_before_restore.iter()
.filter(|&dock_before|!docks.iter().any(|dock| dock_before == dock))
.map(|dock_before| ViewHandle(dock_before.0))
.collect();
Self::remove_views(self, view_plugins, &views_to_delete);
restore_view_plugins(&docks, view_plugins, &mut self.removed_instances);
}
fn undo_workspace_change(&mut self, view_plugins: &mut ViewPlugins) {
if self.cur_state_index > 0 {
self.cur_state_index -= 1;
self.restore_workspace_state(view_plugins);
}
}
fn redo_workspace_change(&mut self, view_plugins: &mut ViewPlugins) {
if self.cur_state_index < self.ws_states.len() - 1 {
self.cur_state_index += 1;
self.restore_workspace_state(view_plugins);
}
}
fn save_cur_workspace_state(&mut self) {
let state = self.ws.save_state();
self.save_workspace_state(state);
}
fn save_workspace_state(&mut self, state: String) {
self.ws_states.drain(self.cur_state_index + 1..);
if self.cur_state_index == WORKSPACE_UNDO_LIMIT - 1 {
self.ws_states.pop_front();
self.cur_state_index -= 1;
}
self.ws_states.push_back(state);
self.cur_state_index += 1;
}
fn split_view(&mut self,
plugin_name: &str,
view_plugins: &mut ViewPlugins,
direction: Direction) {
let ui = Imgui::create_ui_instance();
if let Some(handle) =
view_plugins.create_instance(ui, plugin_name, None, None, SessionHandle(0), None) {
let new_dock = DockHandle(handle.0);
if let Some((dock_handle, pos)) = self.context_menu_data {
| has_source_code_view | identifier_name |
mod.rs | x1 && v.1 >= y0 && v.1 < y1
}
fn restore_view_plugins(docks: &[DockHandle],
view_plugins: &mut ViewPlugins,
info: &mut HashMap<u64, PluginInstanceInfo>) {
for dock in docks.iter() {
if!view_plugins.get_view(ViewHandle(dock.0)).is_some() {
let info = match info.remove(&dock.0) {
None => panic!("Could not restore view: no info in `removed_instances` found"),
Some(info) => info,
};
if info.restore(view_plugins).is_none() {
panic!("Could not restore view");
}
}
}
}
pub struct Window {
/// minifb window
pub win: minifb::Window,
pub menu: Menu,
pub ws: Workspace,
// TODO: should we serialize Workspace if this is stored in memory only?
ws_states: VecDeque<String>,
cur_state_index: usize,
removed_instances: HashMap<u64, PluginInstanceInfo>,
pub mouse_state: MouseState,
pub menu_id_offset: u32,
pub overlay: Option<(DockHandle, Rect)>,
pub context_menu_data: Option<(DockHandle, (f32, f32))>,
pub statusbar: Statusbar,
pub custom_menu_height: f32,
/// Backend that is currently being configured.
pub config_backend: Option<BackendHandle>,
/// View currently being renamed
view_rename_state: ViewRenameState,
}
impl Window {
pub fn new(width: usize, height: usize) -> minifb::Result<Window> | context_menu_data: None,
statusbar: Statusbar::new(),
custom_menu_height: 0.0,
config_backend: None,
view_rename_state: ViewRenameState::None,
};
res.initialize_workspace_state();
Ok(res)
}
pub fn pre_update(&mut self) {
self.update_imgui_mouse();
self.update_imgui_keys();
}
pub fn update(&mut self,
sessions: &mut Sessions,
view_plugins: &mut ViewPlugins,
backend_plugins: &mut BackendPlugins) {
// Update minifb window to get current window size
self.win.update();
// Update menus first to find out size of self-drawn menus (if any)
self.update_menus(view_plugins, sessions, backend_plugins);
// Status bar needs full size of window
let win_size = self.win.get_size();
let width = win_size.0 as f32;
let height = (win_size.1 as f32) - self.statusbar.get_size() - self.custom_menu_height;
// Workspace needs area without menus and status bar
self.ws.update_rect(Rect::new(0.0, self.custom_menu_height, width, height));
let mut views_to_delete = Vec::new();
let mut has_shown_menu = 0u32;
let show_context_menu = self.update_mouse_state();
let mouse = self.get_mouse_pos();
let docks = self.ws.get_docks();
for dock in docks {
let view_handle = ViewHandle(dock.0);
let session = match view_plugins.get_view(view_handle)
.and_then(|v| sessions.get_session(v.session_handle)) {
None => continue,
Some(s) => s,
};
let state = Self::update_view(&mut self.ws,
view_plugins,
view_handle,
session,
show_context_menu,
mouse,
&self.overlay);
if state.should_close {
views_to_delete.push(view_handle);
}
has_shown_menu |= state.showed_popup;
}
if!views_to_delete.is_empty() {
Self::remove_views(self, view_plugins, &views_to_delete);
self.save_cur_workspace_state();
}
self.update_statusbar(sessions, backend_plugins, win_size);
self.process_key_presses(view_plugins);
// if now plugin has showed a menu we do it here
// TODO: Handle diffrent cases when attach menu on to plugin menu or not
self.render_popup(show_context_menu && has_shown_menu == 0, view_plugins);
// If we have a backend configuration running
self.update_backend_configure(sessions, backend_plugins);
}
/// Updates the statusbar at the bottom of the window to show which state the debugger currently is in
fn update_statusbar(&mut self,
sessions: &mut Sessions,
backend_plugins: &mut BackendPlugins,
size: (usize, usize)) {
let session = sessions.get_current();
ReaderWrapper::reset_reader(&mut session.reader);
for event in session.reader.get_event() {
match event {
EVENT_SET_STATUS => {
if let Ok(status) = session.reader.find_string("status") {
self.statusbar.status = status.to_owned();
}
}
_ => (),
}
}
if let Some(ref backend) = backend_plugins.get_backend(session.backend) {
let name = &backend.plugin_type.name;
self.statusbar.update(&name, size);
} else {
self.statusbar.update("", size);
}
}
pub fn layout_to_string(&mut self, view_plugins: &mut ViewPlugins) -> String {
let layout = WindowLayout::from_current_state(self.ws.clone(), view_plugins);
layout.to_string()
}
pub fn init_layout(&mut self,
layout_data: &str,
view_plugins: &mut ViewPlugins)
-> io::Result<()> {
let layout = match WindowLayout::from_string(layout_data) {
Ok(layout) => layout,
Err(error) => return Result::Err(io::Error::new(io::ErrorKind::InvalidData, error)),
};
self.ws = layout.workspace;
// TODO: should we check here that handles stored in Workspace and handles restored in
// ViewPlugins are the same?
WindowLayout::restore_view_plugins(view_plugins, &layout.infos);
self.initialize_workspace_state();
Ok(())
}
fn update_view(ws: &mut Workspace,
view_plugins: &mut ViewPlugins,
handle: ViewHandle,
session: &mut Session,
show_context_menu: bool,
mouse: (f32, f32),
overlay: &Option<(DockHandle, Rect)>)
-> WindowState {
let ws_container = match ws.root_area
.as_mut()
.and_then(|root| root.get_container_by_dock_handle_mut(DockHandle(handle.0))) {
None => {
panic!("Tried to update view {} but it is not in workspace",
handle.0)
}
Some(container) => container,
};
if ws_container.docks[ws_container.active_dock].0!= handle.0 {
// This view is in hidden tab
return WindowState {
showed_popup: 0,
should_close: false,
};
}
let tab_names: Vec<String> = ws_container.docks
.iter()
.map(|dock_handle| {
view_plugins.get_view(ViewHandle(dock_handle.0))
.map(|plugin| plugin.name.clone())
.unwrap_or("Not loaded".to_string())
})
.collect();
let instance = match view_plugins.get_view(handle) {
None => {
return WindowState {
showed_popup: 0,
should_close: false,
}
}
Some(instance) => instance,
};
Imgui::set_window_pos(ws_container.rect.x, ws_container.rect.y);
Imgui::set_window_size(ws_container.rect.width, ws_container.rect.height);
// TODO: should we avoid repeating window names? Add handle or something like this.
let open = Imgui::begin_window(&instance.name, true);
if tab_names.len() > 1 {
Imgui::begin_window_child("tabs", 20.0);
let mut borders = Vec::with_capacity(tab_names.len());
// TODO: should repeated window names be avoided?
for (i, name) in tab_names.iter().enumerate() {
if Imgui::tab(name,
i == ws_container.active_dock,
i == tab_names.len() - 1) {
ws_container.active_dock = i;
}
borders.push(Imgui::tab_pos());
}
ws_container.update_tab_borders(&borders);
Imgui::end_window_child();
Imgui::separator();
Imgui::begin_window_child("body", 0.0);
}
let ui = &instance.ui;
Imgui::init_state(ui.api);
let pos = ui.get_window_pos();
let size = ui.get_window_size();
Imgui::mark_show_popup(ui.api, is_inside(mouse, pos, size) && show_context_menu);
// Draw drag zone
if let &Some((handle, rect)) = overlay {
if handle.0 == handle.0 {
Imgui::render_frame(rect.x, rect.y, rect.width, rect.height, OVERLAY_COLOR);
}
}
// Make sure we move the cursor to the start of the stream here
ReaderWrapper::reset_reader(&mut session.reader);
unsafe {
let plugin_funcs = instance.plugin_type.plugin_funcs as *mut CViewCallbacks;
((*plugin_funcs).update.unwrap())(instance.plugin_data,
ui.api as *mut c_void,
session.reader.api as *mut c_void,
session.get_current_writer().api as *mut c_void);
}
let has_shown_menu = Imgui::has_showed_popup(ui.api);
if tab_names.len() > 1 {
Imgui::end_window_child();
}
Imgui::end_window();
WindowState {
showed_popup: has_shown_menu,
should_close:!open,
}
}
pub fn remove_views(&mut self, view_plugins: &mut ViewPlugins, views: &Vec<ViewHandle>) {
for view in views {
if let Some(instance) = view_plugins.get_view(*view) {
self.removed_instances.insert(view.0, PluginInstanceInfo::new(instance));
}
view_plugins.destroy_instance(*view);
self.ws.delete_dock_by_handle(DockHandle(view.0));
}
}
fn update_backend_configure(&mut self,
sessions: &mut Sessions,
backend_plugins: &mut BackendPlugins) {
if self.config_backend == None {
return;
}
let backend = backend_plugins.get_backend(self.config_backend).unwrap();
unsafe {
let plugin_funcs = backend.plugin_type.plugin_funcs as *mut CBackendCallbacks;
if let Some(show_config) = (*plugin_funcs).show_config {
let ui = Imgui::get_ui();
ui.open_popup("config");
if ui.begin_popup_modal("config") {
show_config(backend.plugin_data, Imgui::get_ui_funs() as *mut c_void);
let ok_size = Some(Vec2 { x: 120.0, y: 0.0 });
let cancel_size = Some(Vec2 { x: 120.0, y: 0.0 });
if ui.button("Ok", ok_size) {
sessions.get_current().set_backend(self.config_backend);
self.config_backend = None;
ui.close_current_popup();
}
ui.same_line(0, -1);
if ui.button("Cancel", cancel_size) {
self.config_backend = None;
ui.close_current_popup();
}
ui.end_popup();
}
} else {
sessions.get_current().set_backend(self.config_backend);
self.config_backend = None;
}
}
}
fn has_source_code_view(&self, view_plugins: &mut ViewPlugins) -> bool {
// TODO: Use setting for this name
for handle in self.ws.get_docks() {
if let Some(plugin) = view_plugins.get_view(ViewHandle(handle.0)) {
if plugin.name == "Source Code View" {
return true;
}
}
}
false
}
fn open_source_file(&mut self,
filename: &str,
view_plugins: &mut ViewPlugins,
session: &mut Session) {
// check if we already have a source view open and just post the message.
if!self.has_source_code_view(view_plugins) {
let mouse = self.get_mouse_pos();
// This is somewhat hacky to set a "correct" split view for
self.context_menu_data = self.ws
.get_dock_handle_at_pos(mouse)
.map(|handle| (handle, mouse));
self.split_view(&"Source Code View".to_owned(),
view_plugins,
Direction::Vertical);
}
let writer = session.get_current_writer();
writer.event_begin(events::EVENT_SET_SOURCE_CODE_FILE as u16);
writer.write_string("filename", filename);
writer.event_end();
}
fn initialize_workspace_state(&mut self) {
self.ws_states.clear();
self.ws_states.push_back(self.ws.save_state());
}
fn restore_workspace_state(&mut self, view_plugins: &mut ViewPlugins) {
// workspace will recalculate dock areas on the next update
let docks_before_restore = self.ws.get_docks();
self.ws = Workspace::from_state(&self.ws_states[self.cur_state_index]).unwrap();
let docks = self.ws.get_docks();
let views_to_delete: Vec<ViewHandle> = docks_before_restore.iter()
.filter(|&dock_before|!docks.iter().any(|dock| dock_before == dock))
.map(|dock_before| ViewHandle(dock_before.0))
.collect();
Self::remove_views(self, view_plugins, &views_to_delete);
restore_view_plugins(&docks, view_plugins, &mut self.removed_instances);
}
fn undo_workspace_change(&mut self, view_plugins: &mut ViewPlugins) {
if self.cur_state_index > 0 {
self.cur_state_index -= 1;
self.restore_workspace_state(view_plugins);
}
}
fn redo_workspace_change(&mut self, view_plugins: &mut ViewPlugins) {
if self.cur_state_index < self.ws_states.len() - 1 {
self.cur_state_index += 1;
self.restore_workspace_state(view_plugins);
}
}
fn save_cur_workspace_state(&mut self) {
let state = self.ws.save_state();
self.save_workspace_state(state);
}
fn save_workspace_state(&mut self, state: String) {
self.ws_states.drain(self.cur_state_index + 1..);
if self.cur_state_index == WORKSPACE_UNDO_LIMIT - 1 {
self.ws_states.pop_front();
self.cur_state_index -= 1;
}
self.ws_states.push_back(state);
self.cur_state_index += 1;
}
fn split_view(&mut self,
plugin_name: &str,
view_plugins: &mut ViewPlugins,
direction: Direction) {
let ui = Imgui::create_ui_instance();
if let Some(handle) =
view_plugins.create_instance(ui, plugin_name, None, None, SessionHandle(0), None) {
let new_dock = DockHandle(handle.0);
if let Some((dock_handle, pos)) = self.context_menu_data {
| {
let options = WindowOptions {
resize: true,
scale: Scale::X1,
..WindowOptions::default()
};
let win = try!(minifb::Window::new("ProDBG", width, height, options));
let ws = Workspace::new(Rect::new(0.0, 0.0, width as f32, (height - 20) as f32));
let ws_states = VecDeque::with_capacity(WORKSPACE_UNDO_LIMIT);
let mut res = Window {
win: win,
menu: Menu::new(),
menu_id_offset: 1000,
mouse_state: MouseState::Default,
ws: ws,
ws_states: ws_states,
cur_state_index: 0usize,
removed_instances: HashMap::new(),
overlay: None, | identifier_body |
stack.rs | // Copyright 2017-2018 the authors. See the 'Copyright and license' section of the
// README.md file at the top-level directory of this repository.
//
// Licensed under the Apache License, Version 2.0 (the LICENSE-APACHE file) or
// the MIT license (the LICENSE-MIT file) at your option. This file may not be
// copied, modified, or distributed except according to those terms.
// So clippy doesn't complain that IllumOS isn't in tick marks
#![cfg_attr(feature = "cargo-clippy", allow(doc_markdown))]
//! A generic stack-based slab.
//!
//! This module implements a generic slab which uses an inline header and a stack of object
//! pointers in place of a free list. It differs from both the large and small slab algorithms
//! introduced by Bonwick. Bonwick's large slabs used a separately-allocated header and a free list
//! of separately-allocated control objects (`kmem_bufctl`s in the IllumOS implementation).
//! Bonwick's small slabs used an inline header like the present implementation, but used a free
//! list constructed from headers on each object instead of a stack of pointers.
//!
//! This implementation is generic in that it does not prescribe a method for mapping objects to
//! their containing slabs, but instead requires that an implementation of this functionality be
//! provided (see the `ConfigData` trait). The `aligned` module implements this functionality by
//! ensuring that slabs have an alignment equal to their size, and using this to compute a bit mask
//! for objects in the slab. The `large` module implements this functionality by storing object
//! pointers or page addresses in an allocator-global hash table.
//!
//! # Layout
//! The layout of stack-based slabs is somewhat confusing, and not readily obvious from the code.
//! This is due largely to the fact that slab size cannot be known at compile time, and must
//! instead be computed at runtime. Why this is a problem will become apparent shortly.
//!
//! The layout in memory of stack-based slabs is as follows:
//!
//! ```text
//! <header> <pre-stack padding> <stack> <post-stack padding> <array of objects>
//! ```
//!
//! The following requirements must be met with respect to memory layout:
//!
//! * The stack - which is an array of `usize` - must be aligned according to the alignment
//! required by `usize`
//! * The array of objects must be aligned according to the alignment requested by the user.
//!
//! The first requirement implies that there may need to be some padding between the header and the
//! stack. The second requirement implies that there may need to be some padding between the stack
//! and the array of objects.
//!
//! If the number of objects in a slab could be known statically, the stack could simply be an
//! array in the header. Instead, its size has to be computed dynamically, and thus cannot be a
//! field in the header (it could technically be `[*mut T]`, but this would make querying the
//! header's size more difficult).
//!
//! Instead, we use the `util::stack` module to implement a dynamically-sized stack, and to
//! dynamically compute the proper pre-stack padding required in order to give the stack the proper
//! alignment. We do the same for the post-stack padding in order to give the array of objects the
//! proper alignment.
use alloc::alloc;
use core::ptr::NonNull;
use core::{mem, ptr};
use init::InitSystem;
use object_alloc::UntypedObjectAlloc;
use util::color::{Color, ColorSettings};
use util::list::*;
use util::stack::Stack;
use SlabSystem;
/// Configuration to customize a stack-based slab implementation.
///
/// `ConfigData` completes the stack-based slab implementation by providing post-alloc and
/// pre-dealloc hooks and by providing a mechanism to look up an object's containing slab.
pub trait ConfigData
where
Self: Sized,
{
/// Perform per-slab post-allocation work.
///
/// `post_alloc` is called after a newly-allocated slab has been initialized. It is optional,
/// and defaults to a no-op.
#[allow(unused)]
fn post_alloc(&mut self, layout: &Layout, slab_size: usize, slab: NonNull<SlabHeader>) {}
/// Perform per-slab pre-deallocation work.
///
/// `pre_dealloc` is called before a slab is uninitialized and deallocated. It is optional, and
/// defaults to a no-op.
#[allow(unused)]
fn pre_dealloc(&mut self, layout: &Layout, slab_size: usize, slab: NonNull<SlabHeader>) {}
/// Look up an object's slab.
///
/// Given an object, `ptr_to_slab` locates the slab containing that object.
fn ptr_to_slab(&self, slab_size: usize, ptr: NonNull<u8>) -> NonNull<SlabHeader>;
}
pub struct System<A: UntypedObjectAlloc, C: ConfigData> {
pub data: C,
layout: Layout,
alloc: A,
}
impl<A: UntypedObjectAlloc, C: ConfigData> System<A, C> {
pub fn from_config_data(data: C, layout: Layout, alloc: A) -> System<A, C> {
System {
data: data,
layout: layout,
alloc: alloc,
}
}
}
impl<I: InitSystem, A: UntypedObjectAlloc, C: ConfigData> SlabSystem<I> for System<A, C> {
type Slab = SlabHeader;
fn alloc_slab(&mut self) -> Option<NonNull<SlabHeader>> {
unsafe {
let color = self
.layout
.color_settings
.next_color(self.layout.layout.align());
let slab = self.alloc.alloc()?.cast();
ptr::write(
slab.as_ptr(),
SlabHeader {
stack: Stack::new(),
color: color,
next: None,
prev: None,
},
);
let stack_data_ptr = self.layout.stack_begin(slab);
for i in 0..self.layout.num_obj {
let ptr = self.layout.nth_obj(slab, color, i);
(*slab.as_ptr())
.stack
.push(stack_data_ptr, I::pack(ptr, I::status_uninitialized()));
}
self.data
.post_alloc(&self.layout, self.alloc.layout().size(), slab);
Some(slab)
}
}
fn dealloc_slab(&mut self, slab: NonNull<SlabHeader>) {
unsafe {
debug_assert_eq!((*slab.as_ptr()).stack.size(), self.layout.num_obj);
self.data
.pre_dealloc(&self.layout, self.alloc.layout().size(), slab);
let stack_data_ptr = self.layout.stack_begin(slab);
for _ in 0..self.layout.num_obj {
let packed = (*slab.as_ptr()).stack.pop(stack_data_ptr);
I::drop(I::unpack_ptr(packed), I::unpack_status(packed));
}
self.alloc.dealloc(slab.cast());
}
}
fn is_full(&self, slab: NonNull<SlabHeader>) -> bool {
unsafe { (*slab.as_ptr()).stack.size() == self.layout.num_obj }
}
fn is_empty(&self, slab: NonNull<SlabHeader>) -> bool {
unsafe { (*slab.as_ptr()).stack.size() == 0 }
}
fn alloc(&self, slab: NonNull<SlabHeader>) -> (NonNull<u8>, I::Status) {
unsafe {
let stack_data_ptr = self.layout.stack_begin(slab);
let packed = (*slab.as_ptr()).stack.pop(stack_data_ptr);
(I::unpack_ptr(packed), I::unpack_status(packed))
}
}
fn dealloc(&self, obj: NonNull<u8>, init_status: I::Status) -> (NonNull<SlabHeader>, bool) {
unsafe {
let slab = self.data.ptr_to_slab(self.alloc.layout().size(), obj);
let was_empty = (*slab.as_ptr()).stack.size() == 0;
let stack_data_ptr = self.layout.stack_begin(slab);
(*slab.as_ptr())
.stack
.push(stack_data_ptr, I::pack(obj, init_status));
(slab, was_empty)
}
}
}
pub struct SlabHeader {
stack: Stack<usize>, // note: this is only the metadata; the real stack comes after this header
color: Color, // extra padding added before array beginning
next: Option<NonNull<SlabHeader>>,
prev: Option<NonNull<SlabHeader>>,
}
impl Linkable for SlabHeader {
fn | (&self) -> Option<NonNull<SlabHeader>> {
self.next
}
fn prev(&self) -> Option<NonNull<SlabHeader>> {
self.prev
}
fn set_next(&mut self, next: Option<NonNull<SlabHeader>>) {
self.next = next;
}
fn set_prev(&mut self, prev: Option<NonNull<SlabHeader>>) {
self.prev = prev;
}
}
impl SlabHeader {
pub fn get_color(&self) -> Color {
self.color
}
}
#[derive(Clone)]
pub struct Layout {
pub num_obj: usize,
pub layout: alloc::Layout,
pub stack_begin_offset: usize,
pub array_begin_offset: usize,
pub color_settings: ColorSettings,
}
impl Layout {
/// Determines whether an allocator can be constructed for T using the given slab size. If so,
/// it returns a constructed Layout for T using that slab size and the amount of unused space
/// left at the end of the slab (when no coloring is used).
pub fn for_slab_size(layout: alloc::Layout, slab_size: usize) -> Option<(Layout, usize)> {
let obj_size = layout.size();
let obj_align = layout.align();
let hdr_size = mem::size_of::<SlabHeader>();
// padding between the SlabHeader and the base of the pointer stack
let pre_stack_padding = Stack::<usize>::padding_after(hdr_size);
let stack_begin_offset = hdr_size + pre_stack_padding;
// Find the largest number of objects we can fit in the slab. array_begin_offset is the
// offset from the beginning of the slab of the array of objects.
let (mut num_obj, mut array_begin_offset) = (0, 0);
loop {
let candidate = num_obj + 1;
// total_hdr_size = size of header, post-header padding, and stack
let total_hdr_size = stack_begin_offset + Stack::<usize>::bytes_for(candidate);
// Padding between the pointer stack and the array of objects. NOTE:
// The Layout alignment isn't used here, so we use 1 because it's
// guaranteed not to cause from_size_align to return None.
let post_stack_padding = alloc::Layout::from_size_align(total_hdr_size, 1)
.unwrap()
.padding_needed_for(obj_align);
if total_hdr_size + post_stack_padding + (candidate * obj_size) <= slab_size {
num_obj = candidate;
array_begin_offset = total_hdr_size + post_stack_padding;
} else {
break;
}
}
if num_obj == 0 {
return None;
}
assert!(array_begin_offset > 0);
let unused_space = slab_size - array_begin_offset - (num_obj * obj_size);
let l = Layout {
num_obj: num_obj,
layout: layout,
stack_begin_offset: stack_begin_offset,
array_begin_offset: array_begin_offset,
color_settings: ColorSettings::new(obj_align, unused_space),
};
// assert that the objects fit within the slab
assert!(
slab_size
>= l.array_begin_offset
+ l.color_settings.max_color().as_usize()
+ (l.num_obj * obj_size)
);
Some((l, unused_space))
}
fn array_begin(&self, slab: NonNull<SlabHeader>, color: Color) -> NonNull<u8> {
debug_assert!(color.as_usize() <= self.color_settings.max_color().as_usize());
unsafe {
NonNull::new_unchecked(
((slab.as_ptr() as usize) + self.array_begin_offset + color.as_usize()) as *mut u8,
)
}
}
fn stack_begin(&self, slab: NonNull<SlabHeader>) -> NonNull<usize> {
unsafe {
NonNull::new_unchecked(
((slab.as_ptr() as usize) + self.stack_begin_offset) as *mut usize,
)
}
}
pub fn nth_obj(&self, slab: NonNull<SlabHeader>, color: Color, n: usize) -> NonNull<u8> {
debug_assert!((n as usize) < self.num_obj);
unsafe {
NonNull::new_unchecked(
(self.array_begin(slab, color).as_ptr() as usize + n * self.layout.size())
as *mut u8,
)
}
}
}
| next | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.