file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
session_data.rs | use super::{
configuration::{self, CoreConfig, SessionConfig},
core_data::{CoreData, CoreHandle},
};
use crate::cmd::dap_server::{
debug_adapter::{
dap::adapter::DebugAdapter, dap::dap_types::Source, protocol::ProtocolAdapter,
},
DebuggerError,
};
use anyhow::{anyhow, Result};
use probe_rs::{
config::TargetSelector,
debug::{debug_info::DebugInfo, SourceLocation},
CoreStatus, DebugProbeError, Permissions, Probe, ProbeCreationError, Session,
};
use std::env::set_current_dir;
use time::UtcOffset;
/// The supported breakpoint types
#[derive(Clone, Debug, PartialEq)]
pub(crate) enum BreakpointType {
/// A breakpoint was requested using an instruction address, and usually a result of a user requesting a
/// breakpoint while in a 'disassembly' view.
InstructionBreakpoint,
/// A breakpoint that has a Source, and usually a result of a user requesting a breakpoint while in a'source' view.
SourceBreakpoint {
source: Source,
location: SourceLocationScope,
},
}
/// Breakpoint requests will either be refer to a specific SourceLcoation, or unspecified, in which case it will refer to
/// all breakpoints for the Source.
#[derive(Clone, Debug, PartialEq)]
pub(crate) enum SourceLocationScope {
All,
Specific(SourceLocation),
}
/// Provide the storage and methods to handle various [`BreakpointType`]
#[derive(Clone, Debug)]
pub(crate) struct ActiveBreakpoint {
pub(crate) breakpoint_type: BreakpointType,
pub(crate) address: u64,
}
/// SessionData is designed to be similar to [probe_rs::Session], in as much that it provides handles to the [CoreHandle] instances for each of the available [probe_rs::Core] involved in the debug session.
/// To get access to the [CoreHandle] for a specific [probe_rs::Core], the
/// TODO: Adjust [SessionConfig] to allow multiple cores (and if appropriate, their binaries) to be specified.
pub(crate) struct SessionData {
pub(crate) session: Session,
/// [SessionData] will manage one [CoreData] per target core, that is also present in [SessionConfig::core_configs]
pub(crate) core_data: Vec<CoreData>,
/// Offset used for RTC timestamps
///
/// Getting the offset can fail, so it's better to store it.
timestamp_offset: UtcOffset,
}
impl SessionData {
pub(crate) fn new(
config: &mut configuration::SessionConfig,
timestamp_offset: UtcOffset,
) -> Result<Self, DebuggerError> {
// `SessionConfig` Probe/Session level configurations initialization.
let mut target_probe = match config.probe_selector.clone() {
Some(selector) => Probe::open(selector.clone()).map_err(|e| match e {
DebugProbeError::ProbeCouldNotBeCreated(ProbeCreationError::NotFound) => {
DebuggerError::Other(anyhow!(
"Could not find the probe_selector specified as {:04x}:{:04x}:{:?}",
selector.vendor_id,
selector.product_id,
selector.serial_number
))
}
other_error => DebuggerError::DebugProbe(other_error),
}),
None => {
// Only automatically select a probe if there is only a single probe detected.
let list = Probe::list_all();
if list.len() > 1 |
if let Some(info) = list.first() {
Probe::open(info).map_err(DebuggerError::DebugProbe)
} else {
return Err(DebuggerError::Other(anyhow!(
"No probes found. Please check your USB connections."
)));
}
}
}?;
let target_selector = match &config.chip {
Some(identifier) => identifier.into(),
None => TargetSelector::Auto,
};
// Set the protocol, if the user explicitly selected a protocol. Otherwise, use the default protocol of the probe.
if let Some(wire_protocol) = config.wire_protocol {
target_probe.select_protocol(wire_protocol)?;
}
// Set the speed.
if let Some(speed) = config.speed {
let actual_speed = target_probe.set_speed(speed)?;
if actual_speed!= speed {
tracing::warn!(
"Protocol speed {} kHz not supported, actual speed is {} kHz",
speed,
actual_speed
);
}
}
let mut permissions = Permissions::new();
if config.allow_erase_all {
permissions = permissions.allow_erase_all();
}
// Attach to the probe.
let target_session = if config.connect_under_reset {
target_probe.attach_under_reset(target_selector, permissions)?
} else {
target_probe
.attach(target_selector, permissions)
.map_err(|err| anyhow!("Error attaching to the probe: {:?}.", err))?
};
// Change the current working directory if `config.cwd` is `Some(T)`.
if let Some(new_cwd) = config.cwd.clone() {
set_current_dir(new_cwd.as_path()).map_err(|err| {
anyhow!(
"Failed to set current working directory to: {:?}, {:?}",
new_cwd,
err
)
})?;
};
// `FlashingConfig` probe level initialization.
// `CoreConfig` probe level initialization.
if config.core_configs.len()!= 1 {
// TODO: For multi-core, allow > 1.
return Err(DebuggerError::Other(anyhow!("probe-rs-debugger requires that one, and only one, core be configured for debugging.")));
}
// Filter `CoreConfig` entries based on those that match an actual core on the target probe.
let valid_core_configs = config
.core_configs
.iter()
.filter(|&core_config| {
target_session
.list_cores()
.iter()
.any(|(target_core_index, _)| *target_core_index == core_config.core_index)
})
.cloned()
.collect::<Vec<CoreConfig>>();
let mut core_data_vec = vec![];
for core_configuration in &valid_core_configs {
core_data_vec.push(CoreData {
core_index: core_configuration.core_index,
last_known_status: CoreStatus::Unknown,
target_name: format!(
"{}-{}",
core_configuration.core_index,
target_session.target().name
),
debug_info: debug_info_from_binary(core_configuration)?,
core_peripherals: None,
stack_frames: Vec::<probe_rs::debug::stack_frame::StackFrame>::new(),
breakpoints: Vec::<ActiveBreakpoint>::new(),
rtt_connection: None,
})
}
Ok(SessionData {
session: target_session,
core_data: core_data_vec,
timestamp_offset,
})
}
/// Reload the a specific core's debug info from the binary file.
pub(crate) fn load_debug_info_for_core(
&mut self,
core_configuration: &CoreConfig,
) -> Result<(), DebuggerError> {
if let Some(core_data) = self
.core_data
.iter_mut()
.find(|core_data| core_data.core_index == core_configuration.core_index)
{
core_data.debug_info = debug_info_from_binary(core_configuration)?;
Ok(())
} else {
Err(DebuggerError::UnableToOpenProbe(Some(
"No core at the specified index.",
)))
}
}
/// Do a 'light weight'(just get references to existing data structures) attach to the core and return relevant debug data.
pub(crate) fn attach_core(&mut self, core_index: usize) -> Result<CoreHandle, DebuggerError> {
if let (Ok(target_core), Some(core_data)) = (
self.session.core(core_index),
self.core_data
.iter_mut()
.find(|core_data| core_data.core_index == core_index),
) {
Ok(CoreHandle {
core: target_core,
core_data,
})
} else {
Err(DebuggerError::UnableToOpenProbe(Some(
"No core at the specified index.",
)))
}
}
/// The target has no way of notifying the debug adapter when things changes, so we have to constantly poll it to determine:
/// - Whether the target cores are running, and what their actual status is.
/// - Whether the target cores have data in their RTT buffers that we need to read and pass to the client.
///
/// To optimize this polling process while also optimizing the reading of RTT data, we apply a couple of principles:
/// 1. Sleep (nap for a short duration) between polling each target core, but:
/// - Only sleep IF the core's status hasn't changed AND there was no RTT data in the last poll.
/// - Otherwise move on without delay, to keep things flowing as fast as possible.
/// - The justification is that any client side CPU used to keep polling is a small price to pay for maximum throughput of debug requests and RTT from the probe.
/// 2. Check all target cores to ensure they have a configured and initialized RTT connections and if they do, process the RTT data.
/// - To keep things efficient, the polling of RTT data is done only when we expect there to be data available.
/// - We check for RTT only when the core has an RTT connection configured, and one of the following is true:
/// - While the core is NOT halted, because core processing can generate new data at any time.
/// - The first time we have entered halted status, to ensure the buffers are drained. After that, for as long as we remain in halted state, we don't need to check RTT again.
///
/// Return a Vec of [`CoreStatus`] (one entry per core) after this process has completed, as well as a boolean indicating whether we should consider a short delay before the next poll.
pub(crate) fn poll_cores<P: ProtocolAdapter>(
&mut self,
session_config: &SessionConfig,
debug_adapter: &mut DebugAdapter<P>,
) -> Result<(Vec<CoreStatus>, bool), DebuggerError> {
// By default, we will have a small delay between polls, and will disable it if we know the last poll returned data, on the assumption that there might be at least one more batch of data.
let mut suggest_delay_required = true;
let mut status_of_cores: Vec<CoreStatus> = vec![];
let target_memory_map = &self.session.target().memory_map.clone();
let timestamp_offset = self.timestamp_offset;
// Always set `all_cores_halted` to true, until one core is found to be running.
debug_adapter.all_cores_halted = true;
for core_config in session_config.core_configs.iter() {
let Ok(mut target_core) = self.attach_core(core_config.core_index) else {
tracing::debug!(
"Failed to attach to target core #{}. Cannot poll for RTT data.",
core_config.core_index
);
continue;
};
// We need to poll the core to determine its status.
let current_core_status = target_core.poll_core(debug_adapter).map_err(|error| {
let error = DebuggerError::ProbeRs(error);
let _ = debug_adapter.show_error_message(&error);
error
})?;
// If appropriate, check for RTT data.
if core_config.rtt_config.enabled {
if let Some(core_rtt) = &mut target_core.core_data.rtt_connection {
// We should poll the target for rtt data, and if any RTT data was processed, we clear the flag.
if core_rtt.process_rtt_data(debug_adapter, &mut target_core.core) {
suggest_delay_required = false;
}
} else if debug_adapter.configuration_is_done() {
// We have not yet reached the point in the target application where the RTT buffers are initialized,
// so, provided we have processed the MSDAP request for "configurationDone", we should check again.
{
#[allow(clippy::unwrap_used)]
match target_core.attach_to_rtt(
debug_adapter,
target_memory_map,
core_config.program_binary.as_ref().unwrap(),
&core_config.rtt_config,
timestamp_offset,
) {
Ok(_) => {
// Nothing else to do.
}
Err(error) => {
debug_adapter
.show_error_message(&DebuggerError::Other(error))
.ok();
}
}
}
}
}
// If the core is running, we set the flag to indicate that at least one core is not halted.
// By setting it here, we ensure that RTT will be checked at least once after the core has halted.
if!current_core_status.is_halted() {
debug_adapter.all_cores_halted = false;
}
status_of_cores.push(current_core_status);
}
Ok((status_of_cores, suggest_delay_required))
}
}
pub(crate) fn debug_info_from_binary(
core_configuration: &CoreConfig,
) -> Result<DebugInfo, DebuggerError> {
let debug_info = if let Some(binary_path) = &core_configuration.program_binary {
DebugInfo::from_file(binary_path).map_err(|error| DebuggerError::Other(anyhow!(error)))?
} else {
return Err(anyhow!(
"Please provide a valid `program_binary` for debug core: {:?}",
core_configuration.core_index
)
.into());
};
Ok(debug_info)
}
| {
return Err(DebuggerError::Other(anyhow!(
"Found multiple ({}) probes",
list.len()
)));
} | conditional_block |
session_data.rs | use super::{
configuration::{self, CoreConfig, SessionConfig},
core_data::{CoreData, CoreHandle},
};
use crate::cmd::dap_server::{
debug_adapter::{
dap::adapter::DebugAdapter, dap::dap_types::Source, protocol::ProtocolAdapter,
},
DebuggerError,
};
use anyhow::{anyhow, Result};
use probe_rs::{
config::TargetSelector,
debug::{debug_info::DebugInfo, SourceLocation},
CoreStatus, DebugProbeError, Permissions, Probe, ProbeCreationError, Session,
};
use std::env::set_current_dir;
use time::UtcOffset;
/// The supported breakpoint types
#[derive(Clone, Debug, PartialEq)]
pub(crate) enum BreakpointType {
/// A breakpoint was requested using an instruction address, and usually a result of a user requesting a
/// breakpoint while in a 'disassembly' view.
InstructionBreakpoint,
/// A breakpoint that has a Source, and usually a result of a user requesting a breakpoint while in a'source' view.
SourceBreakpoint {
source: Source,
location: SourceLocationScope,
},
}
/// Breakpoint requests will either be refer to a specific SourceLcoation, or unspecified, in which case it will refer to
/// all breakpoints for the Source.
#[derive(Clone, Debug, PartialEq)]
pub(crate) enum SourceLocationScope {
All,
Specific(SourceLocation),
}
/// Provide the storage and methods to handle various [`BreakpointType`]
#[derive(Clone, Debug)]
pub(crate) struct ActiveBreakpoint {
pub(crate) breakpoint_type: BreakpointType,
pub(crate) address: u64,
}
/// SessionData is designed to be similar to [probe_rs::Session], in as much that it provides handles to the [CoreHandle] instances for each of the available [probe_rs::Core] involved in the debug session.
/// To get access to the [CoreHandle] for a specific [probe_rs::Core], the
/// TODO: Adjust [SessionConfig] to allow multiple cores (and if appropriate, their binaries) to be specified.
pub(crate) struct SessionData {
pub(crate) session: Session,
/// [SessionData] will manage one [CoreData] per target core, that is also present in [SessionConfig::core_configs]
pub(crate) core_data: Vec<CoreData>,
/// Offset used for RTC timestamps
///
/// Getting the offset can fail, so it's better to store it.
timestamp_offset: UtcOffset,
}
impl SessionData {
pub(crate) fn new(
config: &mut configuration::SessionConfig,
timestamp_offset: UtcOffset,
) -> Result<Self, DebuggerError> {
// `SessionConfig` Probe/Session level configurations initialization.
let mut target_probe = match config.probe_selector.clone() {
Some(selector) => Probe::open(selector.clone()).map_err(|e| match e {
DebugProbeError::ProbeCouldNotBeCreated(ProbeCreationError::NotFound) => {
DebuggerError::Other(anyhow!(
"Could not find the probe_selector specified as {:04x}:{:04x}:{:?}",
selector.vendor_id,
selector.product_id,
selector.serial_number
))
}
other_error => DebuggerError::DebugProbe(other_error),
}),
None => {
// Only automatically select a probe if there is only a single probe detected.
let list = Probe::list_all();
if list.len() > 1 {
return Err(DebuggerError::Other(anyhow!(
"Found multiple ({}) probes",
list.len()
)));
}
if let Some(info) = list.first() {
Probe::open(info).map_err(DebuggerError::DebugProbe)
} else {
return Err(DebuggerError::Other(anyhow!(
"No probes found. Please check your USB connections."
)));
}
}
}?;
let target_selector = match &config.chip {
Some(identifier) => identifier.into(),
None => TargetSelector::Auto,
};
// Set the protocol, if the user explicitly selected a protocol. Otherwise, use the default protocol of the probe.
if let Some(wire_protocol) = config.wire_protocol {
target_probe.select_protocol(wire_protocol)?;
}
// Set the speed.
if let Some(speed) = config.speed {
let actual_speed = target_probe.set_speed(speed)?;
if actual_speed!= speed {
tracing::warn!(
"Protocol speed {} kHz not supported, actual speed is {} kHz",
speed,
actual_speed
);
}
}
let mut permissions = Permissions::new();
if config.allow_erase_all {
permissions = permissions.allow_erase_all();
}
// Attach to the probe.
let target_session = if config.connect_under_reset {
target_probe.attach_under_reset(target_selector, permissions)?
} else {
target_probe
.attach(target_selector, permissions)
.map_err(|err| anyhow!("Error attaching to the probe: {:?}.", err))?
};
// Change the current working directory if `config.cwd` is `Some(T)`.
if let Some(new_cwd) = config.cwd.clone() {
set_current_dir(new_cwd.as_path()).map_err(|err| {
anyhow!(
"Failed to set current working directory to: {:?}, {:?}",
new_cwd,
err
)
})?;
};
// `FlashingConfig` probe level initialization.
// `CoreConfig` probe level initialization.
if config.core_configs.len()!= 1 {
// TODO: For multi-core, allow > 1.
return Err(DebuggerError::Other(anyhow!("probe-rs-debugger requires that one, and only one, core be configured for debugging.")));
}
// Filter `CoreConfig` entries based on those that match an actual core on the target probe.
let valid_core_configs = config
.core_configs
.iter()
.filter(|&core_config| {
target_session
.list_cores()
.iter()
.any(|(target_core_index, _)| *target_core_index == core_config.core_index)
})
.cloned()
.collect::<Vec<CoreConfig>>();
let mut core_data_vec = vec![];
for core_configuration in &valid_core_configs {
core_data_vec.push(CoreData {
core_index: core_configuration.core_index,
last_known_status: CoreStatus::Unknown,
target_name: format!(
"{}-{}",
core_configuration.core_index,
target_session.target().name
),
debug_info: debug_info_from_binary(core_configuration)?,
core_peripherals: None,
stack_frames: Vec::<probe_rs::debug::stack_frame::StackFrame>::new(),
breakpoints: Vec::<ActiveBreakpoint>::new(),
rtt_connection: None,
})
}
Ok(SessionData {
session: target_session,
core_data: core_data_vec,
timestamp_offset,
})
}
/// Reload the a specific core's debug info from the binary file.
pub(crate) fn load_debug_info_for_core(
&mut self,
core_configuration: &CoreConfig,
) -> Result<(), DebuggerError> {
if let Some(core_data) = self
.core_data
.iter_mut()
.find(|core_data| core_data.core_index == core_configuration.core_index)
{
core_data.debug_info = debug_info_from_binary(core_configuration)?;
Ok(())
} else {
Err(DebuggerError::UnableToOpenProbe(Some(
"No core at the specified index.",
)))
}
}
/// Do a 'light weight'(just get references to existing data structures) attach to the core and return relevant debug data.
pub(crate) fn attach_core(&mut self, core_index: usize) -> Result<CoreHandle, DebuggerError> {
if let (Ok(target_core), Some(core_data)) = (
self.session.core(core_index),
self.core_data
.iter_mut()
.find(|core_data| core_data.core_index == core_index),
) {
Ok(CoreHandle {
core: target_core,
core_data,
})
} else {
Err(DebuggerError::UnableToOpenProbe(Some(
"No core at the specified index.",
)))
}
}
/// The target has no way of notifying the debug adapter when things changes, so we have to constantly poll it to determine:
/// - Whether the target cores are running, and what their actual status is.
/// - Whether the target cores have data in their RTT buffers that we need to read and pass to the client.
///
/// To optimize this polling process while also optimizing the reading of RTT data, we apply a couple of principles:
/// 1. Sleep (nap for a short duration) between polling each target core, but:
/// - Only sleep IF the core's status hasn't changed AND there was no RTT data in the last poll.
/// - Otherwise move on without delay, to keep things flowing as fast as possible.
/// - The justification is that any client side CPU used to keep polling is a small price to pay for maximum throughput of debug requests and RTT from the probe.
/// 2. Check all target cores to ensure they have a configured and initialized RTT connections and if they do, process the RTT data.
/// - To keep things efficient, the polling of RTT data is done only when we expect there to be data available.
/// - We check for RTT only when the core has an RTT connection configured, and one of the following is true:
/// - While the core is NOT halted, because core processing can generate new data at any time.
/// - The first time we have entered halted status, to ensure the buffers are drained. After that, for as long as we remain in halted state, we don't need to check RTT again.
///
/// Return a Vec of [`CoreStatus`] (one entry per core) after this process has completed, as well as a boolean indicating whether we should consider a short delay before the next poll.
pub(crate) fn poll_cores<P: ProtocolAdapter>(
&mut self,
session_config: &SessionConfig,
debug_adapter: &mut DebugAdapter<P>,
) -> Result<(Vec<CoreStatus>, bool), DebuggerError> {
// By default, we will have a small delay between polls, and will disable it if we know the last poll returned data, on the assumption that there might be at least one more batch of data.
let mut suggest_delay_required = true;
let mut status_of_cores: Vec<CoreStatus> = vec![];
let target_memory_map = &self.session.target().memory_map.clone();
let timestamp_offset = self.timestamp_offset;
// Always set `all_cores_halted` to true, until one core is found to be running.
debug_adapter.all_cores_halted = true;
for core_config in session_config.core_configs.iter() {
let Ok(mut target_core) = self.attach_core(core_config.core_index) else {
tracing::debug!(
"Failed to attach to target core #{}. Cannot poll for RTT data.",
core_config.core_index
);
continue;
};
// We need to poll the core to determine its status.
let current_core_status = target_core.poll_core(debug_adapter).map_err(|error| {
let error = DebuggerError::ProbeRs(error);
let _ = debug_adapter.show_error_message(&error);
error
})?;
// If appropriate, check for RTT data.
if core_config.rtt_config.enabled {
if let Some(core_rtt) = &mut target_core.core_data.rtt_connection {
// We should poll the target for rtt data, and if any RTT data was processed, we clear the flag.
if core_rtt.process_rtt_data(debug_adapter, &mut target_core.core) {
suggest_delay_required = false;
}
} else if debug_adapter.configuration_is_done() {
// We have not yet reached the point in the target application where the RTT buffers are initialized,
// so, provided we have processed the MSDAP request for "configurationDone", we should check again.
{
#[allow(clippy::unwrap_used)]
match target_core.attach_to_rtt(
debug_adapter,
target_memory_map,
core_config.program_binary.as_ref().unwrap(),
&core_config.rtt_config,
timestamp_offset,
) {
Ok(_) => {
// Nothing else to do.
}
Err(error) => {
debug_adapter
.show_error_message(&DebuggerError::Other(error))
.ok();
}
}
}
}
}
// If the core is running, we set the flag to indicate that at least one core is not halted.
// By setting it here, we ensure that RTT will be checked at least once after the core has halted.
if!current_core_status.is_halted() {
debug_adapter.all_cores_halted = false;
}
status_of_cores.push(current_core_status);
}
Ok((status_of_cores, suggest_delay_required))
}
}
pub(crate) fn debug_info_from_binary(
core_configuration: &CoreConfig,
) -> Result<DebugInfo, DebuggerError> | {
let debug_info = if let Some(binary_path) = &core_configuration.program_binary {
DebugInfo::from_file(binary_path).map_err(|error| DebuggerError::Other(anyhow!(error)))?
} else {
return Err(anyhow!(
"Please provide a valid `program_binary` for debug core: {:?}",
core_configuration.core_index
)
.into());
};
Ok(debug_info)
} | identifier_body |
|
session_data.rs | use super::{
configuration::{self, CoreConfig, SessionConfig},
core_data::{CoreData, CoreHandle},
};
use crate::cmd::dap_server::{
debug_adapter::{
dap::adapter::DebugAdapter, dap::dap_types::Source, protocol::ProtocolAdapter,
},
DebuggerError,
};
use anyhow::{anyhow, Result};
use probe_rs::{
config::TargetSelector,
debug::{debug_info::DebugInfo, SourceLocation},
CoreStatus, DebugProbeError, Permissions, Probe, ProbeCreationError, Session,
};
use std::env::set_current_dir;
use time::UtcOffset;
/// The supported breakpoint types
#[derive(Clone, Debug, PartialEq)]
pub(crate) enum BreakpointType {
/// A breakpoint was requested using an instruction address, and usually a result of a user requesting a
/// breakpoint while in a 'disassembly' view.
InstructionBreakpoint,
/// A breakpoint that has a Source, and usually a result of a user requesting a breakpoint while in a'source' view.
SourceBreakpoint {
source: Source,
location: SourceLocationScope,
},
}
/// Breakpoint requests will either be refer to a specific SourceLcoation, or unspecified, in which case it will refer to
/// all breakpoints for the Source.
#[derive(Clone, Debug, PartialEq)]
pub(crate) enum SourceLocationScope {
All,
Specific(SourceLocation),
}
/// Provide the storage and methods to handle various [`BreakpointType`]
#[derive(Clone, Debug)]
pub(crate) struct | {
pub(crate) breakpoint_type: BreakpointType,
pub(crate) address: u64,
}
/// SessionData is designed to be similar to [probe_rs::Session], in as much that it provides handles to the [CoreHandle] instances for each of the available [probe_rs::Core] involved in the debug session.
/// To get access to the [CoreHandle] for a specific [probe_rs::Core], the
/// TODO: Adjust [SessionConfig] to allow multiple cores (and if appropriate, their binaries) to be specified.
pub(crate) struct SessionData {
pub(crate) session: Session,
/// [SessionData] will manage one [CoreData] per target core, that is also present in [SessionConfig::core_configs]
pub(crate) core_data: Vec<CoreData>,
/// Offset used for RTC timestamps
///
/// Getting the offset can fail, so it's better to store it.
timestamp_offset: UtcOffset,
}
impl SessionData {
pub(crate) fn new(
config: &mut configuration::SessionConfig,
timestamp_offset: UtcOffset,
) -> Result<Self, DebuggerError> {
// `SessionConfig` Probe/Session level configurations initialization.
let mut target_probe = match config.probe_selector.clone() {
Some(selector) => Probe::open(selector.clone()).map_err(|e| match e {
DebugProbeError::ProbeCouldNotBeCreated(ProbeCreationError::NotFound) => {
DebuggerError::Other(anyhow!(
"Could not find the probe_selector specified as {:04x}:{:04x}:{:?}",
selector.vendor_id,
selector.product_id,
selector.serial_number
))
}
other_error => DebuggerError::DebugProbe(other_error),
}),
None => {
// Only automatically select a probe if there is only a single probe detected.
let list = Probe::list_all();
if list.len() > 1 {
return Err(DebuggerError::Other(anyhow!(
"Found multiple ({}) probes",
list.len()
)));
}
if let Some(info) = list.first() {
Probe::open(info).map_err(DebuggerError::DebugProbe)
} else {
return Err(DebuggerError::Other(anyhow!(
"No probes found. Please check your USB connections."
)));
}
}
}?;
let target_selector = match &config.chip {
Some(identifier) => identifier.into(),
None => TargetSelector::Auto,
};
// Set the protocol, if the user explicitly selected a protocol. Otherwise, use the default protocol of the probe.
if let Some(wire_protocol) = config.wire_protocol {
target_probe.select_protocol(wire_protocol)?;
}
// Set the speed.
if let Some(speed) = config.speed {
let actual_speed = target_probe.set_speed(speed)?;
if actual_speed!= speed {
tracing::warn!(
"Protocol speed {} kHz not supported, actual speed is {} kHz",
speed,
actual_speed
);
}
}
let mut permissions = Permissions::new();
if config.allow_erase_all {
permissions = permissions.allow_erase_all();
}
// Attach to the probe.
let target_session = if config.connect_under_reset {
target_probe.attach_under_reset(target_selector, permissions)?
} else {
target_probe
.attach(target_selector, permissions)
.map_err(|err| anyhow!("Error attaching to the probe: {:?}.", err))?
};
// Change the current working directory if `config.cwd` is `Some(T)`.
if let Some(new_cwd) = config.cwd.clone() {
set_current_dir(new_cwd.as_path()).map_err(|err| {
anyhow!(
"Failed to set current working directory to: {:?}, {:?}",
new_cwd,
err
)
})?;
};
// `FlashingConfig` probe level initialization.
// `CoreConfig` probe level initialization.
if config.core_configs.len()!= 1 {
// TODO: For multi-core, allow > 1.
return Err(DebuggerError::Other(anyhow!("probe-rs-debugger requires that one, and only one, core be configured for debugging.")));
}
// Filter `CoreConfig` entries based on those that match an actual core on the target probe.
let valid_core_configs = config
.core_configs
.iter()
.filter(|&core_config| {
target_session
.list_cores()
.iter()
.any(|(target_core_index, _)| *target_core_index == core_config.core_index)
})
.cloned()
.collect::<Vec<CoreConfig>>();
let mut core_data_vec = vec![];
for core_configuration in &valid_core_configs {
core_data_vec.push(CoreData {
core_index: core_configuration.core_index,
last_known_status: CoreStatus::Unknown,
target_name: format!(
"{}-{}",
core_configuration.core_index,
target_session.target().name
),
debug_info: debug_info_from_binary(core_configuration)?,
core_peripherals: None,
stack_frames: Vec::<probe_rs::debug::stack_frame::StackFrame>::new(),
breakpoints: Vec::<ActiveBreakpoint>::new(),
rtt_connection: None,
})
}
Ok(SessionData {
session: target_session,
core_data: core_data_vec,
timestamp_offset,
})
}
/// Reload the a specific core's debug info from the binary file.
pub(crate) fn load_debug_info_for_core(
&mut self,
core_configuration: &CoreConfig,
) -> Result<(), DebuggerError> {
if let Some(core_data) = self
.core_data
.iter_mut()
.find(|core_data| core_data.core_index == core_configuration.core_index)
{
core_data.debug_info = debug_info_from_binary(core_configuration)?;
Ok(())
} else {
Err(DebuggerError::UnableToOpenProbe(Some(
"No core at the specified index.",
)))
}
}
/// Do a 'light weight'(just get references to existing data structures) attach to the core and return relevant debug data.
pub(crate) fn attach_core(&mut self, core_index: usize) -> Result<CoreHandle, DebuggerError> {
if let (Ok(target_core), Some(core_data)) = (
self.session.core(core_index),
self.core_data
.iter_mut()
.find(|core_data| core_data.core_index == core_index),
) {
Ok(CoreHandle {
core: target_core,
core_data,
})
} else {
Err(DebuggerError::UnableToOpenProbe(Some(
"No core at the specified index.",
)))
}
}
/// The target has no way of notifying the debug adapter when things changes, so we have to constantly poll it to determine:
/// - Whether the target cores are running, and what their actual status is.
/// - Whether the target cores have data in their RTT buffers that we need to read and pass to the client.
///
/// To optimize this polling process while also optimizing the reading of RTT data, we apply a couple of principles:
/// 1. Sleep (nap for a short duration) between polling each target core, but:
/// - Only sleep IF the core's status hasn't changed AND there was no RTT data in the last poll.
/// - Otherwise move on without delay, to keep things flowing as fast as possible.
/// - The justification is that any client side CPU used to keep polling is a small price to pay for maximum throughput of debug requests and RTT from the probe.
/// 2. Check all target cores to ensure they have a configured and initialized RTT connections and if they do, process the RTT data.
/// - To keep things efficient, the polling of RTT data is done only when we expect there to be data available.
/// - We check for RTT only when the core has an RTT connection configured, and one of the following is true:
/// - While the core is NOT halted, because core processing can generate new data at any time.
/// - The first time we have entered halted status, to ensure the buffers are drained. After that, for as long as we remain in halted state, we don't need to check RTT again.
///
/// Return a Vec of [`CoreStatus`] (one entry per core) after this process has completed, as well as a boolean indicating whether we should consider a short delay before the next poll.
pub(crate) fn poll_cores<P: ProtocolAdapter>(
&mut self,
session_config: &SessionConfig,
debug_adapter: &mut DebugAdapter<P>,
) -> Result<(Vec<CoreStatus>, bool), DebuggerError> {
// By default, we will have a small delay between polls, and will disable it if we know the last poll returned data, on the assumption that there might be at least one more batch of data.
let mut suggest_delay_required = true;
let mut status_of_cores: Vec<CoreStatus> = vec![];
let target_memory_map = &self.session.target().memory_map.clone();
let timestamp_offset = self.timestamp_offset;
// Always set `all_cores_halted` to true, until one core is found to be running.
debug_adapter.all_cores_halted = true;
for core_config in session_config.core_configs.iter() {
let Ok(mut target_core) = self.attach_core(core_config.core_index) else {
tracing::debug!(
"Failed to attach to target core #{}. Cannot poll for RTT data.",
core_config.core_index
);
continue;
};
// We need to poll the core to determine its status.
let current_core_status = target_core.poll_core(debug_adapter).map_err(|error| {
let error = DebuggerError::ProbeRs(error);
let _ = debug_adapter.show_error_message(&error);
error
})?;
// If appropriate, check for RTT data.
if core_config.rtt_config.enabled {
if let Some(core_rtt) = &mut target_core.core_data.rtt_connection {
// We should poll the target for rtt data, and if any RTT data was processed, we clear the flag.
if core_rtt.process_rtt_data(debug_adapter, &mut target_core.core) {
suggest_delay_required = false;
}
} else if debug_adapter.configuration_is_done() {
// We have not yet reached the point in the target application where the RTT buffers are initialized,
// so, provided we have processed the MSDAP request for "configurationDone", we should check again.
{
#[allow(clippy::unwrap_used)]
match target_core.attach_to_rtt(
debug_adapter,
target_memory_map,
core_config.program_binary.as_ref().unwrap(),
&core_config.rtt_config,
timestamp_offset,
) {
Ok(_) => {
// Nothing else to do.
}
Err(error) => {
debug_adapter
.show_error_message(&DebuggerError::Other(error))
.ok();
}
}
}
}
}
// If the core is running, we set the flag to indicate that at least one core is not halted.
// By setting it here, we ensure that RTT will be checked at least once after the core has halted.
if!current_core_status.is_halted() {
debug_adapter.all_cores_halted = false;
}
status_of_cores.push(current_core_status);
}
Ok((status_of_cores, suggest_delay_required))
}
}
pub(crate) fn debug_info_from_binary(
core_configuration: &CoreConfig,
) -> Result<DebugInfo, DebuggerError> {
let debug_info = if let Some(binary_path) = &core_configuration.program_binary {
DebugInfo::from_file(binary_path).map_err(|error| DebuggerError::Other(anyhow!(error)))?
} else {
return Err(anyhow!(
"Please provide a valid `program_binary` for debug core: {:?}",
core_configuration.core_index
)
.into());
};
Ok(debug_info)
}
| ActiveBreakpoint | identifier_name |
render.rs | //! HTML generation
//!
use crate::{Result, TomlMap};
use chrono::DateTime;
use handlebars::Handlebars;
use serde_json::Value as JsonValue;
use std::collections::HashMap;
use toml::value::Value as TomlValue;
/// Html to insert before and after diff chunks
pub struct DiffStyle {
/// Html to insert before a span of inserted content
/// `<span class="...">`
pub ins_start: String,
/// Html to insert after a span of inserted content
/// `</span>`
pub ins_end: String,
/// Html to insert before a span of deleted content
/// `<span class="...">`
pub del_start: String,
/// Html to insert after a span of deleted content
/// `</span>`
pub del_end: String,
}
impl Default for DiffStyle {
fn default() -> DiffStyle {
DiffStyle {
ins_start: r#"<span class="bg-green-100 text-gray-600">"#.to_string(),
ins_end: r#"</span>"#.to_string(),
del_start: r#"<span class="bg-red-100 text-gray-600 line-through">"#.to_string(),
del_end: r#"</span>"#.to_string(),
}
}
}
// these defaults can be overridden by the config file
/// Pairing of template name and contents
///
pub type Template<'template> = (&'template str, &'template str);
#[derive(Debug)]
pub struct RenderConfig<'render> {
/// Templates to be loaded for renderer. List of template name, data
pub templates: Vec<Template<'render>>,
/// Whether parser is in strict mode (e.g. if true, a variable used in template
/// that is undefined would raise an error; if false, it would evaluate to 'falsey'
pub strict_mode: bool,
}
impl<'render> Default for RenderConfig<'render> {
fn default() -> Self {
Self {
templates: Vec::new(),
strict_mode: false,
}
}
}
/// HBTemplate processor for HTML generation
pub struct Renderer<'gen> {
/// Handlebars processor
hb: Handlebars<'gen>,
/// Additional dictionary that supplements data passed to render() method
vars: TomlMap,
}
impl<'gen> Default for Renderer<'gen> {
fn default() -> Self {
// unwrap ok because only error condition occurs with templates, and default has none.
Self::init(&RenderConfig::default()).unwrap()
}
}
impl<'gen> Renderer<'gen> {
/// Initialize handlebars template processor.
pub fn init(config: &RenderConfig) -> Result<Self> {
let mut hb = Handlebars::new();
// don't use strict mode because docs may have different frontmatter vars
// and it's easier in templates to use if we allow undefined ~= false-y
hb.set_strict_mode(config.strict_mode);
hb.register_escape_fn(handlebars::no_escape); //html escaping is the default and cause issue0
add_base_helpers(&mut hb);
for t in &config.templates {
hb.register_template_string(t.0, t.1)?;
}
let renderer = Self {
hb,
vars: TomlMap::new(),
};
Ok(renderer)
}
/// Replace renderer dict.
/// Values in the renderer dict override any values passed to render()
pub fn set_vars(&mut self, vars: TomlMap) {
self.vars = vars
}
/// Sets all the vars from the hashap into the render dict
pub fn set_from<T: Into<toml::Value>>(&mut self, vars: HashMap<String, T>) {
for (k, v) in vars.into_iter() {
self.set(k, v);
}
}
/// Set a value in the renderer dict. If the key was previously set, it is replaced.
/// Values in the renderer dict override any values passed to render()
pub fn set<T: Into<TomlValue>>(&mut self, key: String, val: T) {
self.vars.insert(key, val.into());
}
/// Remove key if it was present
pub fn remove(&mut self, key: &str) {
self.vars.remove(key);
}
/// Adds template to internal dictionary
pub fn add_template(&mut self, template: Template) -> Result<()> {
self.hb.register_template_string(template.0, template.1)?;
Ok(())
}
/// Render a template with data.
pub fn render<W>(&self, template_name: &str, mut data: TomlMap, writer: &mut W) -> Result<()>
where
W: std::io::Write,
{
// add variables that extend/override passed data
data.extend(self.vars.clone().into_iter());
self.hb.render_to_write(template_name, &data, writer)?;
Ok(())
}
/// Convert markdown to html and generate html page,
/// using'map' data as render vars
pub fn write_page_html<W: std::io::Write>(
&self,
mut map: TomlMap,
markdown: &str,
template_name: &str,
mut writer: &mut W,
) -> Result<()> {
let html = crate::md_parser::markdown_to_html(markdown)?;
map.insert("content".into(), TomlValue::from(html.content));
if let Some(toc) = html.toc {
map.insert("toc".into(), TomlValue::from(toc));
}
self.render(template_name, map, &mut writer)?;
Ok(())
}
}
/// Convert Value to string without adding quotes around strings
fn json_value_to_string(v: &JsonValue) -> String {
match v {
JsonValue::String(s) => s.clone(),
_ => v.to_string(),
}
}
/// Add template helpers functions
/// 'join-csv' turns array of values into comma-separate list
/// 'format-date' rewrites an ISO8601-formatted date into another format
fn add_base_helpers(hb: &mut Handlebars) | .iter()
.map(json_value_to_string)
.collect::<Vec<String>>()
.join(",");
out.write(&csv)?;
Ok(())
},
),
);
//
// format-date: strftime-like function to reformat date
hb.register_helper(
"format-date",
Box::new(
|h: &Helper,
_r: &Handlebars,
_: &Context,
_rc: &mut RenderContext,
out: &mut dyn Output|
-> HelperResult {
// get first arg as string, an ISO8601-formatted date
let date = h
.param(0)
.ok_or_else(|| RenderError::new("expect first param as date"))?
.value()
.as_str()
.ok_or_else(|| RenderError::new("expect strings"))?;
// parse into DateTime
let date = DateTime::parse_from_rfc3339(date)
.map_err(|e| RenderError::from_error("date parse", e))?;
// get second arg - the format string
let format = h
.param(1)
.ok_or_else(|| RenderError::new("expect second param as format"))?
.value()
.as_str()
.ok_or_else(|| RenderError::new("expect strings"))?;
// print date in specified format
let formatted = date.format(format).to_string();
out.write(&formatted)?;
Ok(())
},
),
);
}
/// Generate diff between two text segments.
/// Enclose additions with <span class="add_style">...</span>
/// and deletions with <span class="del_style">
/// add_style, e.g., "bg-green 100 text-gray-500"
///
pub fn generate_diff(first: &str, second: &str, style: &DiffStyle) -> Result<String> {
use dissimilar::Chunk;
let chunks = dissimilar::diff(&first, &second);
// "<span class=\"bg-red-100 text-gray-600 line-through\">");
// <span class=\"bg-green-100 text-gray-600\">");
let mut diff_content = String::with_capacity(second.len() + 1048 + 30 * chunks.len());
for chunk in chunks.iter() {
match chunk {
Chunk::Equal(s) => {
diff_content.push_str(s);
}
Chunk::Delete(s) => {
diff_content.push_str(&style.del_start);
diff_content.push_str(s);
diff_content.push_str(&style.del_end);
}
Chunk::Insert(s) => {
diff_content.push_str(&style.ins_start);
diff_content.push_str(s);
diff_content.push_str(&style.ins_end);
}
}
}
Ok(diff_content)
}
#[test]
fn initializers() {
let mut r1 = Renderer::default();
r1.set("x".into(), toml::Value::from("xyz"));
assert!(true);
let mut r2 = Renderer::init(&RenderConfig::default()).expect("ok");
r2.set("x".into(), toml::Value::from("xyz"));
assert!(true);
}
/// Test template processor
#[test]
fn test_html_page() {
use crate::render::Renderer;
const TEST_TEMPLATE: &str = "<html><body><h1>{{title}}</h1>{{content}}</body></html>";
let mut map = TomlMap::new();
map.insert("title".into(), "Abc".into());
// simulate processing
let expected = TEST_TEMPLATE
.replace("{{content}}", "<p>hello</p>")
.replace("{{title}}", "Abc");
let mut map = TomlMap::new();
map.insert("title".into(), "Abc".into());
let mut gen = Renderer::default();
gen.add_template(("test_template", TEST_TEMPLATE))
.expect("add test template");
let mut buf: Vec<u8> = Vec::new();
let result = gen.write_page_html(map, "hello", "test_template", &mut buf);
assert!(result.is_ok());
// had to remove newlines - there's an added \n after
let output = String::from_utf8_lossy(&buf).replace("\n", "");
assert_eq!(expected, output);
}
| {
use handlebars::{Context, Helper, HelperResult, Output, RenderContext, RenderError};
// "join-csv" turns array of values into comma-separated list
// Converts each value using to_string()
hb.register_helper(
"join-csv",
Box::new(
|h: &Helper,
_r: &Handlebars,
_: &Context,
_rc: &mut RenderContext,
out: &mut dyn Output|
-> HelperResult {
let csv = h
.param(0)
.ok_or_else(|| RenderError::new("param not found"))?
.value()
.as_array()
.ok_or_else(|| RenderError::new("expected array"))? | identifier_body |
render.rs | //! HTML generation
//!
use crate::{Result, TomlMap};
use chrono::DateTime;
use handlebars::Handlebars;
use serde_json::Value as JsonValue;
use std::collections::HashMap;
use toml::value::Value as TomlValue;
/// Html to insert before and after diff chunks
pub struct DiffStyle {
/// Html to insert before a span of inserted content
/// `<span class="...">`
pub ins_start: String,
/// Html to insert after a span of inserted content
/// `</span>`
pub ins_end: String,
/// Html to insert before a span of deleted content
/// `<span class="...">`
pub del_start: String,
/// Html to insert after a span of deleted content
/// `</span>`
pub del_end: String,
}
impl Default for DiffStyle {
fn default() -> DiffStyle {
DiffStyle {
ins_start: r#"<span class="bg-green-100 text-gray-600">"#.to_string(),
ins_end: r#"</span>"#.to_string(),
del_start: r#"<span class="bg-red-100 text-gray-600 line-through">"#.to_string(),
del_end: r#"</span>"#.to_string(),
}
}
}
// these defaults can be overridden by the config file
/// Pairing of template name and contents
///
pub type Template<'template> = (&'template str, &'template str);
#[derive(Debug)]
pub struct RenderConfig<'render> {
/// Templates to be loaded for renderer. List of template name, data
pub templates: Vec<Template<'render>>,
/// Whether parser is in strict mode (e.g. if true, a variable used in template
/// that is undefined would raise an error; if false, it would evaluate to 'falsey'
pub strict_mode: bool,
}
impl<'render> Default for RenderConfig<'render> {
fn default() -> Self {
Self {
templates: Vec::new(),
strict_mode: false,
}
}
}
/// HBTemplate processor for HTML generation
pub struct Renderer<'gen> {
/// Handlebars processor
hb: Handlebars<'gen>,
/// Additional dictionary that supplements data passed to render() method
vars: TomlMap,
}
impl<'gen> Default for Renderer<'gen> {
fn default() -> Self {
// unwrap ok because only error condition occurs with templates, and default has none.
Self::init(&RenderConfig::default()).unwrap()
}
}
impl<'gen> Renderer<'gen> {
/// Initialize handlebars template processor.
pub fn init(config: &RenderConfig) -> Result<Self> {
let mut hb = Handlebars::new();
// don't use strict mode because docs may have different frontmatter vars
// and it's easier in templates to use if we allow undefined ~= false-y
hb.set_strict_mode(config.strict_mode);
hb.register_escape_fn(handlebars::no_escape); //html escaping is the default and cause issue0
add_base_helpers(&mut hb);
for t in &config.templates {
hb.register_template_string(t.0, t.1)?;
}
let renderer = Self {
hb,
vars: TomlMap::new(),
};
Ok(renderer)
}
/// Replace renderer dict.
/// Values in the renderer dict override any values passed to render()
pub fn set_vars(&mut self, vars: TomlMap) {
self.vars = vars
}
/// Sets all the vars from the hashap into the render dict
pub fn set_from<T: Into<toml::Value>>(&mut self, vars: HashMap<String, T>) {
for (k, v) in vars.into_iter() {
self.set(k, v);
}
}
/// Set a value in the renderer dict. If the key was previously set, it is replaced.
/// Values in the renderer dict override any values passed to render()
pub fn set<T: Into<TomlValue>>(&mut self, key: String, val: T) {
self.vars.insert(key, val.into());
}
/// Remove key if it was present
pub fn remove(&mut self, key: &str) {
self.vars.remove(key);
}
/// Adds template to internal dictionary
pub fn add_template(&mut self, template: Template) -> Result<()> {
self.hb.register_template_string(template.0, template.1)?;
Ok(())
}
/// Render a template with data.
pub fn render<W>(&self, template_name: &str, mut data: TomlMap, writer: &mut W) -> Result<()>
where
W: std::io::Write,
{
// add variables that extend/override passed data
data.extend(self.vars.clone().into_iter());
self.hb.render_to_write(template_name, &data, writer)?;
Ok(())
}
/// Convert markdown to html and generate html page,
/// using'map' data as render vars
pub fn write_page_html<W: std::io::Write>(
&self,
mut map: TomlMap,
markdown: &str,
template_name: &str,
mut writer: &mut W,
) -> Result<()> {
let html = crate::md_parser::markdown_to_html(markdown)?;
map.insert("content".into(), TomlValue::from(html.content));
if let Some(toc) = html.toc {
map.insert("toc".into(), TomlValue::from(toc));
}
self.render(template_name, map, &mut writer)?;
Ok(())
}
}
/// Convert Value to string without adding quotes around strings
fn json_value_to_string(v: &JsonValue) -> String {
match v {
JsonValue::String(s) => s.clone(),
_ => v.to_string(),
}
}
/// Add template helpers functions
/// 'join-csv' turns array of values into comma-separate list
/// 'format-date' rewrites an ISO8601-formatted date into another format
fn add_base_helpers(hb: &mut Handlebars) {
use handlebars::{Context, Helper, HelperResult, Output, RenderContext, RenderError};
// "join-csv" turns array of values into comma-separated list
// Converts each value using to_string()
hb.register_helper(
"join-csv",
Box::new(
|h: &Helper,
_r: &Handlebars,
_: &Context,
_rc: &mut RenderContext,
out: &mut dyn Output|
-> HelperResult {
let csv = h
.param(0)
.ok_or_else(|| RenderError::new("param not found"))?
.value()
.as_array()
.ok_or_else(|| RenderError::new("expected array"))?
.iter()
.map(json_value_to_string)
.collect::<Vec<String>>()
.join(",");
out.write(&csv)?;
Ok(())
},
),
);
//
// format-date: strftime-like function to reformat date
hb.register_helper(
"format-date",
Box::new(
|h: &Helper,
_r: &Handlebars,
_: &Context,
_rc: &mut RenderContext,
out: &mut dyn Output|
-> HelperResult {
// get first arg as string, an ISO8601-formatted date
let date = h
.param(0)
.ok_or_else(|| RenderError::new("expect first param as date"))?
.value()
.as_str()
.ok_or_else(|| RenderError::new("expect strings"))?;
// parse into DateTime
let date = DateTime::parse_from_rfc3339(date)
.map_err(|e| RenderError::from_error("date parse", e))?;
// get second arg - the format string
let format = h
.param(1)
.ok_or_else(|| RenderError::new("expect second param as format"))?
.value()
.as_str()
.ok_or_else(|| RenderError::new("expect strings"))?;
// print date in specified format
let formatted = date.format(format).to_string();
out.write(&formatted)?;
Ok(())
},
),
);
}
/// Generate diff between two text segments.
/// Enclose additions with <span class="add_style">...</span>
/// and deletions with <span class="del_style">
/// add_style, e.g., "bg-green 100 text-gray-500"
///
pub fn generate_diff(first: &str, second: &str, style: &DiffStyle) -> Result<String> {
use dissimilar::Chunk;
let chunks = dissimilar::diff(&first, &second);
// "<span class=\"bg-red-100 text-gray-600 line-through\">");
// <span class=\"bg-green-100 text-gray-600\">");
let mut diff_content = String::with_capacity(second.len() + 1048 + 30 * chunks.len());
for chunk in chunks.iter() {
match chunk {
Chunk::Equal(s) => {
diff_content.push_str(s);
}
Chunk::Delete(s) => {
diff_content.push_str(&style.del_start);
diff_content.push_str(s);
diff_content.push_str(&style.del_end);
}
Chunk::Insert(s) => {
diff_content.push_str(&style.ins_start);
diff_content.push_str(s);
diff_content.push_str(&style.ins_end);
}
}
}
Ok(diff_content)
}
#[test]
fn initializers() {
let mut r1 = Renderer::default();
r1.set("x".into(), toml::Value::from("xyz"));
assert!(true);
| /// Test template processor
#[test]
fn test_html_page() {
use crate::render::Renderer;
const TEST_TEMPLATE: &str = "<html><body><h1>{{title}}</h1>{{content}}</body></html>";
let mut map = TomlMap::new();
map.insert("title".into(), "Abc".into());
// simulate processing
let expected = TEST_TEMPLATE
.replace("{{content}}", "<p>hello</p>")
.replace("{{title}}", "Abc");
let mut map = TomlMap::new();
map.insert("title".into(), "Abc".into());
let mut gen = Renderer::default();
gen.add_template(("test_template", TEST_TEMPLATE))
.expect("add test template");
let mut buf: Vec<u8> = Vec::new();
let result = gen.write_page_html(map, "hello", "test_template", &mut buf);
assert!(result.is_ok());
// had to remove newlines - there's an added \n after
let output = String::from_utf8_lossy(&buf).replace("\n", "");
assert_eq!(expected, output);
} | let mut r2 = Renderer::init(&RenderConfig::default()).expect("ok");
r2.set("x".into(), toml::Value::from("xyz"));
assert!(true);
}
| random_line_split |
render.rs | //! HTML generation
//!
use crate::{Result, TomlMap};
use chrono::DateTime;
use handlebars::Handlebars;
use serde_json::Value as JsonValue;
use std::collections::HashMap;
use toml::value::Value as TomlValue;
/// Html to insert before and after diff chunks
pub struct DiffStyle {
/// Html to insert before a span of inserted content
/// `<span class="...">`
pub ins_start: String,
/// Html to insert after a span of inserted content
/// `</span>`
pub ins_end: String,
/// Html to insert before a span of deleted content
/// `<span class="...">`
pub del_start: String,
/// Html to insert after a span of deleted content
/// `</span>`
pub del_end: String,
}
impl Default for DiffStyle {
fn default() -> DiffStyle {
DiffStyle {
ins_start: r#"<span class="bg-green-100 text-gray-600">"#.to_string(),
ins_end: r#"</span>"#.to_string(),
del_start: r#"<span class="bg-red-100 text-gray-600 line-through">"#.to_string(),
del_end: r#"</span>"#.to_string(),
}
}
}
// these defaults can be overridden by the config file
/// Pairing of template name and contents
///
pub type Template<'template> = (&'template str, &'template str);
#[derive(Debug)]
pub struct RenderConfig<'render> {
/// Templates to be loaded for renderer. List of template name, data
pub templates: Vec<Template<'render>>,
/// Whether parser is in strict mode (e.g. if true, a variable used in template
/// that is undefined would raise an error; if false, it would evaluate to 'falsey'
pub strict_mode: bool,
}
impl<'render> Default for RenderConfig<'render> {
fn default() -> Self {
Self {
templates: Vec::new(),
strict_mode: false,
}
}
}
/// HBTemplate processor for HTML generation
pub struct Renderer<'gen> {
/// Handlebars processor
hb: Handlebars<'gen>,
/// Additional dictionary that supplements data passed to render() method
vars: TomlMap,
}
impl<'gen> Default for Renderer<'gen> {
fn | () -> Self {
// unwrap ok because only error condition occurs with templates, and default has none.
Self::init(&RenderConfig::default()).unwrap()
}
}
impl<'gen> Renderer<'gen> {
/// Initialize handlebars template processor.
pub fn init(config: &RenderConfig) -> Result<Self> {
let mut hb = Handlebars::new();
// don't use strict mode because docs may have different frontmatter vars
// and it's easier in templates to use if we allow undefined ~= false-y
hb.set_strict_mode(config.strict_mode);
hb.register_escape_fn(handlebars::no_escape); //html escaping is the default and cause issue0
add_base_helpers(&mut hb);
for t in &config.templates {
hb.register_template_string(t.0, t.1)?;
}
let renderer = Self {
hb,
vars: TomlMap::new(),
};
Ok(renderer)
}
/// Replace renderer dict.
/// Values in the renderer dict override any values passed to render()
pub fn set_vars(&mut self, vars: TomlMap) {
self.vars = vars
}
/// Sets all the vars from the hashap into the render dict
pub fn set_from<T: Into<toml::Value>>(&mut self, vars: HashMap<String, T>) {
for (k, v) in vars.into_iter() {
self.set(k, v);
}
}
/// Set a value in the renderer dict. If the key was previously set, it is replaced.
/// Values in the renderer dict override any values passed to render()
pub fn set<T: Into<TomlValue>>(&mut self, key: String, val: T) {
self.vars.insert(key, val.into());
}
/// Remove key if it was present
pub fn remove(&mut self, key: &str) {
self.vars.remove(key);
}
/// Adds template to internal dictionary
pub fn add_template(&mut self, template: Template) -> Result<()> {
self.hb.register_template_string(template.0, template.1)?;
Ok(())
}
/// Render a template with data.
pub fn render<W>(&self, template_name: &str, mut data: TomlMap, writer: &mut W) -> Result<()>
where
W: std::io::Write,
{
// add variables that extend/override passed data
data.extend(self.vars.clone().into_iter());
self.hb.render_to_write(template_name, &data, writer)?;
Ok(())
}
/// Convert markdown to html and generate html page,
/// using'map' data as render vars
pub fn write_page_html<W: std::io::Write>(
&self,
mut map: TomlMap,
markdown: &str,
template_name: &str,
mut writer: &mut W,
) -> Result<()> {
let html = crate::md_parser::markdown_to_html(markdown)?;
map.insert("content".into(), TomlValue::from(html.content));
if let Some(toc) = html.toc {
map.insert("toc".into(), TomlValue::from(toc));
}
self.render(template_name, map, &mut writer)?;
Ok(())
}
}
/// Convert Value to string without adding quotes around strings
fn json_value_to_string(v: &JsonValue) -> String {
match v {
JsonValue::String(s) => s.clone(),
_ => v.to_string(),
}
}
/// Add template helpers functions
/// 'join-csv' turns array of values into comma-separate list
/// 'format-date' rewrites an ISO8601-formatted date into another format
fn add_base_helpers(hb: &mut Handlebars) {
use handlebars::{Context, Helper, HelperResult, Output, RenderContext, RenderError};
// "join-csv" turns array of values into comma-separated list
// Converts each value using to_string()
hb.register_helper(
"join-csv",
Box::new(
|h: &Helper,
_r: &Handlebars,
_: &Context,
_rc: &mut RenderContext,
out: &mut dyn Output|
-> HelperResult {
let csv = h
.param(0)
.ok_or_else(|| RenderError::new("param not found"))?
.value()
.as_array()
.ok_or_else(|| RenderError::new("expected array"))?
.iter()
.map(json_value_to_string)
.collect::<Vec<String>>()
.join(",");
out.write(&csv)?;
Ok(())
},
),
);
//
// format-date: strftime-like function to reformat date
hb.register_helper(
"format-date",
Box::new(
|h: &Helper,
_r: &Handlebars,
_: &Context,
_rc: &mut RenderContext,
out: &mut dyn Output|
-> HelperResult {
// get first arg as string, an ISO8601-formatted date
let date = h
.param(0)
.ok_or_else(|| RenderError::new("expect first param as date"))?
.value()
.as_str()
.ok_or_else(|| RenderError::new("expect strings"))?;
// parse into DateTime
let date = DateTime::parse_from_rfc3339(date)
.map_err(|e| RenderError::from_error("date parse", e))?;
// get second arg - the format string
let format = h
.param(1)
.ok_or_else(|| RenderError::new("expect second param as format"))?
.value()
.as_str()
.ok_or_else(|| RenderError::new("expect strings"))?;
// print date in specified format
let formatted = date.format(format).to_string();
out.write(&formatted)?;
Ok(())
},
),
);
}
/// Generate diff between two text segments.
/// Enclose additions with <span class="add_style">...</span>
/// and deletions with <span class="del_style">
/// add_style, e.g., "bg-green 100 text-gray-500"
///
pub fn generate_diff(first: &str, second: &str, style: &DiffStyle) -> Result<String> {
use dissimilar::Chunk;
let chunks = dissimilar::diff(&first, &second);
// "<span class=\"bg-red-100 text-gray-600 line-through\">");
// <span class=\"bg-green-100 text-gray-600\">");
let mut diff_content = String::with_capacity(second.len() + 1048 + 30 * chunks.len());
for chunk in chunks.iter() {
match chunk {
Chunk::Equal(s) => {
diff_content.push_str(s);
}
Chunk::Delete(s) => {
diff_content.push_str(&style.del_start);
diff_content.push_str(s);
diff_content.push_str(&style.del_end);
}
Chunk::Insert(s) => {
diff_content.push_str(&style.ins_start);
diff_content.push_str(s);
diff_content.push_str(&style.ins_end);
}
}
}
Ok(diff_content)
}
#[test]
fn initializers() {
let mut r1 = Renderer::default();
r1.set("x".into(), toml::Value::from("xyz"));
assert!(true);
let mut r2 = Renderer::init(&RenderConfig::default()).expect("ok");
r2.set("x".into(), toml::Value::from("xyz"));
assert!(true);
}
/// Test template processor
#[test]
fn test_html_page() {
use crate::render::Renderer;
const TEST_TEMPLATE: &str = "<html><body><h1>{{title}}</h1>{{content}}</body></html>";
let mut map = TomlMap::new();
map.insert("title".into(), "Abc".into());
// simulate processing
let expected = TEST_TEMPLATE
.replace("{{content}}", "<p>hello</p>")
.replace("{{title}}", "Abc");
let mut map = TomlMap::new();
map.insert("title".into(), "Abc".into());
let mut gen = Renderer::default();
gen.add_template(("test_template", TEST_TEMPLATE))
.expect("add test template");
let mut buf: Vec<u8> = Vec::new();
let result = gen.write_page_html(map, "hello", "test_template", &mut buf);
assert!(result.is_ok());
// had to remove newlines - there's an added \n after
let output = String::from_utf8_lossy(&buf).replace("\n", "");
assert_eq!(expected, output);
}
| default | identifier_name |
output.rs | //
//! Copyright 2020 Alibaba Group Holding Limited.
//!
//! Licensed under the Apache License, Version 2.0 (the "License");
//! you may not use this file except in compliance with the License.
//! You may obtain a copy of the License at
//!
//! http://www.apache.org/licenses/LICENSE-2.0
//!
//! Unless required by applicable law or agreed to in writing, software
//! distributed under the License is distributed on an "AS IS" BASIS,
//! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//! See the License for the specific language governing permissions and
//! limitations under the License.
use super::*;
use std::rc::Rc;
use std::cell::RefCell;
use crate::channel::eventio::EventsBuffer;
use crate::channel::tee::{WrappedTee, Tee};
use crate::common::Port;
/// Describing how dataset tags will be changed when output from an output port.
///
/// # Please note!
/// Since we've just a few built-in operators manipulating dataset tag, for simplicity
/// reasons, in our system, `OutputDelta` is defined on per output perspective and
/// against all inputs.
/// For example, to generate output dataset, if a binary operator advance dataset tag
/// of one input, it will and must to the same to the other input.
#[derive(Clone, Copy, Debug)]
pub enum OutputDelta {
/// Dataset tag won't be changed.
None,
/// Advance the current counter of tag, usually the loop body output
/// of LoopController.
Advance,
/// Add a new dimension for tag, usually the EnterScope operator.
ToChild,
/// Remove current dimension of tag, usually the LeaveScope operator.
ToParent,
}
impl OutputDelta {
pub fn matcher_of(&self, tag: &Tag) -> TagMatcher {
match self {
OutputDelta::None => TagMatcher::Equals(tag.clone()),
OutputDelta::Advance => TagMatcher::Equals(tag.retreat()),
OutputDelta::ToChild => TagMatcher::Equals(tag.to_parent()),
OutputDelta::ToParent => TagMatcher::Prefix(tag.clone()),
}
}
}
pub struct OutputBuilder<D> {
pub batch_size : usize,
pub worker: WorkerId,
pub port: Port,
shared: Rc<RefCell<Vec<(Box<dyn Push<DataSet<D>>>, ChannelId, bool)>>>,
events_buf: EventsBuffer
}
impl<D: Data> OutputBuilder<D> {
pub fn new(batch: usize, worker: WorkerId, port: Port, events_buf: &EventsBuffer) -> Self {
OutputBuilder {
batch_size: batch,
worker,
port,
shared: Rc::new(RefCell::new(Vec::new())),
events_buf: events_buf.clone()
}
}
pub fn add_push<P>(&self, ch_id: ChannelId, local: bool, push: P) where P: Push<DataSet<D>> +'static {
self.shared.borrow_mut().push((Box::new(push), ch_id, local));
}
pub fn build_tee(self) -> WrappedTee<DataSet<D>> {
let mut pushes = Vec::new();
let mut ch_ids = Vec::new();
{
let mut shared = self.shared.borrow_mut();
for (p, c, l) in shared.drain(..) {
pushes.push(p);
ch_ids.push((c, l));
}
}
let tee = Tee::<DataSet<D>>::from(pushes);
WrappedTee::new(self.worker, tee, ch_ids, &self.events_buf)
}
}
impl<D: Data> Clone for OutputBuilder<D> {
fn clone(&self) -> Self {
OutputBuilder {
batch_size: self.batch_size,
worker: self.worker,
port: self.port,
shared: self.shared.clone(),
events_buf: self.events_buf.clone()
}
}
}
pub trait TaggedOutput: AsAny + Send {
fn set_output_capacity(&mut self, capacity: usize);
fn has_capacity(&self) -> bool;
fn clear_capacity(&mut self);
fn transmit_end(&mut self, tag: Tag) -> IOResult<()>;
fn delta(&self) -> &OutputDelta;
fn close(&mut self) -> IOResult<()>;
fn is_closed(&self) -> bool;
}
pub trait TaggedOutputBuilder {
fn build_output(self: Box<Self>, delta: OutputDelta) -> Box<dyn TaggedOutput>;
}
pub struct OutputHandle<D: Data> {
pub port: Port,
pub delta: OutputDelta,
inner: WrappedTee<DataSet<D>>,
capacity: Option<usize>,
batch_size: usize,
poisoned: bool
}
impl<D: Data> AsAny for OutputHandle<D> {
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
fn as_any_ref(&self) -> &dyn Any {
self
}
}
impl<D: Data> TaggedOutput for OutputHandle<D> {
#[inline]
fn set_output_capacity(&mut self, capacity: usize) {
self.capacity.replace(capacity);
}
#[inline]
fn has_capacity(&self) -> bool {
if let Some(left) = self.capacity.as_ref() {
*left > 0
} else {
true
}
}
#[inline]
fn clear_capacity(&mut self) {
self.capacity = None;
}
#[inline]
fn transmit_end(&mut self, tag: Tag) -> IOResult<()> {
//let matched = self.match_output(&tag).unwrap_or(tag);
self.inner.transmit_end(tag)
}
#[inline]
fn delta(&self) -> &OutputDelta {
&self.delta
}
#[inline]
fn close(&mut self) -> IOResult<()> {
if!self.poisoned {
trace!("Worker[{}], output[{:?}] is closing...", self.inner.worker, self.port);
self.poisoned = true;
self.inner.close()?;
}
Ok(())
}
#[inline]
fn is_closed(&self) -> bool {
self.poisoned
}
}
impl<D: Data> OutputHandle<D> {
pub fn new(output: WrappedTee<DataSet<D>>, batch: usize, port: Port, delta: OutputDelta) -> Self {
OutputHandle {
port,
delta,
inner: output,
capacity: None,
batch_size: batch,
poisoned: false
}
}
pub fn downcast(origin: &mut Box<dyn TaggedOutput>) -> &mut Self {
// TODO: handle downcast failure
origin.as_any_mut().downcast_mut::<Self>().expect("Downcast to OutputHandle failure")
}
#[inline]
fn match_output(&self, tag: &Tag) -> Option<Tag> {
match self.delta {
OutputDelta::None => None,
OutputDelta::Advance => Some(tag.advance()),
OutputDelta::ToParent => Some(tag.to_parent()),
OutputDelta::ToChild => Some(Tag::from(tag, 0))
}
}
}
impl<D: Data> TaggedOutputBuilder for OutputBuilder<D> {
fn build_output(self: Box<Self>, delta: OutputDelta) -> Box<dyn TaggedOutput> {
let batch_size = self.batch_size;
let port = self.port;
let tee = self.build_tee();
let output = OutputHandle::new(tee, batch_size, port, delta);
Box::new(output) as Box<dyn TaggedOutput>
}
}
pub struct Session<'a, D: Data> {
output: &'a mut WrappedTee<DataSet<D>>,
capacity: Option<&'a mut usize>,
batch_size: usize,
tag: Tag,
buffer: Vec<D>
}
impl<'a, D: Data> Session<'a, D> {
pub fn new(output: &'a mut WrappedTee<DataSet<D>>, tag: Tag, batch: usize, capacity: Option<&'a mut usize>) -> Self {
Session {
output,
capacity,
batch_size: batch,
tag,
buffer: Vec::with_capacity(batch),
}
}
/// Output one message, if success, return true or false represent whether output capacity is available;
pub fn give(&mut self, msg: D) -> IOResult<bool> {
self.push(msg)?;
Ok(self.update_capacity(1))
}
pub fn give_iterator<I: Iterator<Item = D>>(&mut self, iter: &mut I) -> IOResult<bool> {
if let Some(capacity) = self.capacity.as_ref().map(|c| **c) {
let mut count = 0;
while count < capacity {
if let Some(item) = iter.next() {
self.push(item)?;
} else {
break
}
count += 1;
}
Ok(self.update_capacity(count))
} else |
}
pub fn give_entire_iterator<I: IntoIterator<Item = D>>(&mut self, iter: I) -> IOResult<bool> {
let mut count = 0;
for datum in iter.into_iter() {
count += 1;
self.push(datum)?;
}
Ok(self.update_capacity(count))
}
///
pub fn give_batch(&mut self, batch: Vec<D>) -> IOResult<bool> {
self.flush()?;
let size = batch.len();
self.output.push(DataSet::new(self.tag.clone(), batch))?;
self.output.flush()?;
Ok(self.update_capacity(size))
}
#[inline]
pub fn transmit_end(mut self) -> IOResult<()> {
self.flush()?;
self.output.transmit_end(self.tag.clone())?;
Ok(())
}
#[inline]
pub fn has_capacity(&self) -> bool {
self.check_capacity()
}
pub fn flush(&mut self) -> IOResult<()> {
if!self.buffer.is_empty() {
let size = self.buffer.len();
let msgs = ::std::mem::replace(&mut self.buffer,
Vec::with_capacity(size));
self.output.push(DataSet::new(self.tag.clone(), msgs))?;
self.output.flush()?;
}
Ok(())
}
#[inline]
fn push(&mut self, msg: D) -> IOResult<()> {
self.buffer.push(msg);
if self.buffer.len() == self.batch_size {
self.flush()?;
}
Ok(())
}
/// update the capacity of output channel's left space;
#[inline]
fn update_capacity(&mut self, decr: usize) -> bool {
if let Some(ref mut ca) = self.capacity {
if **ca <= decr {
**ca = 0;
false
} else {
**ca -= decr;
true
}
} else {
true
}
}
/// Return `true` if there is capacity left;
#[inline]
fn check_capacity(&self) -> bool {
self.capacity.as_ref().map(|ca| **ca > 0).unwrap_or(true)
}
}
impl<'a, D: Data> Drop for Session<'a, D> {
fn drop(&mut self) {
match self.flush() {
Ok(_) => (),
Err(e) => {
error!("Session flush failed, caused by {:?}", e);
}
}
}
}
impl<D: Data> OutputHandle<D> {
#[inline]
pub fn session(&mut self, tag: &Tag) -> Session<D> {
let matched = self.match_output(tag).unwrap_or(tag.clone());
let ca = self.capacity.as_mut();
Session::new(&mut self.inner, matched, self.batch_size, ca)
}
#[inline]
pub fn session_of<T: Into<Tag>>(&mut self, tag: T) -> Session<D> {
let t = tag.into();
let matched = self.match_output(&t).unwrap_or(t);
let ca = self.capacity.as_mut();
Session::new(&mut self.inner, matched, self.batch_size, ca)
}
}
| {
for item in iter {
self.push(item)?;
}
Ok(true)
} | conditional_block |
output.rs | //
//! Copyright 2020 Alibaba Group Holding Limited.
//!
//! Licensed under the Apache License, Version 2.0 (the "License");
//! you may not use this file except in compliance with the License.
//! You may obtain a copy of the License at
//!
//! http://www.apache.org/licenses/LICENSE-2.0
//!
//! Unless required by applicable law or agreed to in writing, software
//! distributed under the License is distributed on an "AS IS" BASIS,
//! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//! See the License for the specific language governing permissions and
//! limitations under the License.
use super::*;
use std::rc::Rc;
use std::cell::RefCell;
use crate::channel::eventio::EventsBuffer;
use crate::channel::tee::{WrappedTee, Tee};
use crate::common::Port;
/// Describing how dataset tags will be changed when output from an output port.
///
/// # Please note!
/// Since we've just a few built-in operators manipulating dataset tag, for simplicity
/// reasons, in our system, `OutputDelta` is defined on per output perspective and
/// against all inputs.
/// For example, to generate output dataset, if a binary operator advance dataset tag
/// of one input, it will and must to the same to the other input.
#[derive(Clone, Copy, Debug)]
pub enum OutputDelta {
/// Dataset tag won't be changed.
None,
/// Advance the current counter of tag, usually the loop body output
/// of LoopController.
Advance,
/// Add a new dimension for tag, usually the EnterScope operator.
ToChild,
/// Remove current dimension of tag, usually the LeaveScope operator.
ToParent,
}
impl OutputDelta {
pub fn matcher_of(&self, tag: &Tag) -> TagMatcher {
match self {
OutputDelta::None => TagMatcher::Equals(tag.clone()),
OutputDelta::Advance => TagMatcher::Equals(tag.retreat()),
OutputDelta::ToChild => TagMatcher::Equals(tag.to_parent()),
OutputDelta::ToParent => TagMatcher::Prefix(tag.clone()),
}
}
}
pub struct OutputBuilder<D> {
pub batch_size : usize,
pub worker: WorkerId,
pub port: Port,
shared: Rc<RefCell<Vec<(Box<dyn Push<DataSet<D>>>, ChannelId, bool)>>>,
events_buf: EventsBuffer
}
impl<D: Data> OutputBuilder<D> {
pub fn new(batch: usize, worker: WorkerId, port: Port, events_buf: &EventsBuffer) -> Self {
OutputBuilder {
batch_size: batch,
worker,
port,
shared: Rc::new(RefCell::new(Vec::new())),
events_buf: events_buf.clone()
}
}
pub fn add_push<P>(&self, ch_id: ChannelId, local: bool, push: P) where P: Push<DataSet<D>> +'static {
self.shared.borrow_mut().push((Box::new(push), ch_id, local));
}
pub fn build_tee(self) -> WrappedTee<DataSet<D>> {
let mut pushes = Vec::new();
let mut ch_ids = Vec::new();
{
let mut shared = self.shared.borrow_mut();
for (p, c, l) in shared.drain(..) {
pushes.push(p);
ch_ids.push((c, l));
}
}
let tee = Tee::<DataSet<D>>::from(pushes);
WrappedTee::new(self.worker, tee, ch_ids, &self.events_buf)
}
}
impl<D: Data> Clone for OutputBuilder<D> {
fn clone(&self) -> Self {
OutputBuilder {
batch_size: self.batch_size,
worker: self.worker,
port: self.port,
shared: self.shared.clone(),
events_buf: self.events_buf.clone()
}
}
}
pub trait TaggedOutput: AsAny + Send {
fn set_output_capacity(&mut self, capacity: usize);
fn has_capacity(&self) -> bool;
fn clear_capacity(&mut self);
fn transmit_end(&mut self, tag: Tag) -> IOResult<()>;
fn delta(&self) -> &OutputDelta;
fn close(&mut self) -> IOResult<()>;
fn is_closed(&self) -> bool;
}
pub trait TaggedOutputBuilder {
fn build_output(self: Box<Self>, delta: OutputDelta) -> Box<dyn TaggedOutput>;
}
pub struct OutputHandle<D: Data> {
pub port: Port,
pub delta: OutputDelta,
inner: WrappedTee<DataSet<D>>,
capacity: Option<usize>,
batch_size: usize,
poisoned: bool
}
impl<D: Data> AsAny for OutputHandle<D> {
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
fn as_any_ref(&self) -> &dyn Any {
self
}
}
impl<D: Data> TaggedOutput for OutputHandle<D> {
#[inline]
fn set_output_capacity(&mut self, capacity: usize) {
self.capacity.replace(capacity);
}
#[inline]
fn has_capacity(&self) -> bool {
if let Some(left) = self.capacity.as_ref() {
*left > 0
} else {
true
}
}
#[inline]
fn clear_capacity(&mut self) {
self.capacity = None;
}
#[inline]
fn transmit_end(&mut self, tag: Tag) -> IOResult<()> {
//let matched = self.match_output(&tag).unwrap_or(tag);
self.inner.transmit_end(tag)
}
#[inline]
fn delta(&self) -> &OutputDelta {
&self.delta
}
#[inline]
fn close(&mut self) -> IOResult<()> {
if!self.poisoned {
trace!("Worker[{}], output[{:?}] is closing...", self.inner.worker, self.port);
self.poisoned = true;
self.inner.close()?;
}
Ok(())
}
#[inline]
fn is_closed(&self) -> bool {
self.poisoned
}
}
impl<D: Data> OutputHandle<D> {
pub fn new(output: WrappedTee<DataSet<D>>, batch: usize, port: Port, delta: OutputDelta) -> Self {
OutputHandle {
port,
delta,
inner: output,
capacity: None,
batch_size: batch,
poisoned: false
}
}
pub fn downcast(origin: &mut Box<dyn TaggedOutput>) -> &mut Self {
// TODO: handle downcast failure
origin.as_any_mut().downcast_mut::<Self>().expect("Downcast to OutputHandle failure")
}
#[inline]
fn match_output(&self, tag: &Tag) -> Option<Tag> {
match self.delta {
OutputDelta::None => None,
OutputDelta::Advance => Some(tag.advance()),
OutputDelta::ToParent => Some(tag.to_parent()),
OutputDelta::ToChild => Some(Tag::from(tag, 0))
}
}
}
impl<D: Data> TaggedOutputBuilder for OutputBuilder<D> {
fn build_output(self: Box<Self>, delta: OutputDelta) -> Box<dyn TaggedOutput> {
let batch_size = self.batch_size;
let port = self.port;
let tee = self.build_tee();
let output = OutputHandle::new(tee, batch_size, port, delta);
Box::new(output) as Box<dyn TaggedOutput>
}
}
pub struct Session<'a, D: Data> {
output: &'a mut WrappedTee<DataSet<D>>,
capacity: Option<&'a mut usize>,
batch_size: usize,
tag: Tag,
buffer: Vec<D>
}
impl<'a, D: Data> Session<'a, D> {
pub fn new(output: &'a mut WrappedTee<DataSet<D>>, tag: Tag, batch: usize, capacity: Option<&'a mut usize>) -> Self {
Session {
output,
capacity,
batch_size: batch,
tag,
buffer: Vec::with_capacity(batch),
}
}
/// Output one message, if success, return true or false represent whether output capacity is available;
pub fn give(&mut self, msg: D) -> IOResult<bool> {
self.push(msg)?;
Ok(self.update_capacity(1))
}
pub fn give_iterator<I: Iterator<Item = D>>(&mut self, iter: &mut I) -> IOResult<bool> {
if let Some(capacity) = self.capacity.as_ref().map(|c| **c) {
let mut count = 0;
while count < capacity {
if let Some(item) = iter.next() {
self.push(item)?;
} else {
break
}
count += 1;
}
Ok(self.update_capacity(count))
} else {
for item in iter {
self.push(item)?;
}
Ok(true)
}
}
pub fn give_entire_iterator<I: IntoIterator<Item = D>>(&mut self, iter: I) -> IOResult<bool> {
let mut count = 0;
for datum in iter.into_iter() {
count += 1;
self.push(datum)?;
}
Ok(self.update_capacity(count))
}
///
pub fn give_batch(&mut self, batch: Vec<D>) -> IOResult<bool> {
self.flush()?;
let size = batch.len();
self.output.push(DataSet::new(self.tag.clone(), batch))?;
self.output.flush()?;
Ok(self.update_capacity(size))
}
#[inline]
pub fn transmit_end(mut self) -> IOResult<()> {
self.flush()?;
self.output.transmit_end(self.tag.clone())?;
Ok(())
}
#[inline]
pub fn has_capacity(&self) -> bool {
self.check_capacity()
}
pub fn flush(&mut self) -> IOResult<()> {
if!self.buffer.is_empty() {
let size = self.buffer.len();
let msgs = ::std::mem::replace(&mut self.buffer,
Vec::with_capacity(size));
self.output.push(DataSet::new(self.tag.clone(), msgs))?;
self.output.flush()?;
}
Ok(())
}
#[inline]
fn push(&mut self, msg: D) -> IOResult<()> {
self.buffer.push(msg);
if self.buffer.len() == self.batch_size {
self.flush()?;
}
Ok(())
}
/// update the capacity of output channel's left space;
#[inline]
fn update_capacity(&mut self, decr: usize) -> bool {
if let Some(ref mut ca) = self.capacity {
if **ca <= decr {
**ca = 0;
false
} else {
**ca -= decr;
true
}
} else {
true
}
}
/// Return `true` if there is capacity left;
#[inline]
fn check_capacity(&self) -> bool {
self.capacity.as_ref().map(|ca| **ca > 0).unwrap_or(true)
}
}
impl<'a, D: Data> Drop for Session<'a, D> {
fn drop(&mut self) {
match self.flush() {
Ok(_) => (),
Err(e) => {
error!("Session flush failed, caused by {:?}", e);
}
}
}
}
impl<D: Data> OutputHandle<D> {
#[inline]
pub fn session(&mut self, tag: &Tag) -> Session<D> {
let matched = self.match_output(tag).unwrap_or(tag.clone());
let ca = self.capacity.as_mut();
Session::new(&mut self.inner, matched, self.batch_size, ca)
}
#[inline]
pub fn session_of<T: Into<Tag>>(&mut self, tag: T) -> Session<D> {
let t = tag.into();
let matched = self.match_output(&t).unwrap_or(t);
let ca = self.capacity.as_mut(); | } | Session::new(&mut self.inner, matched, self.batch_size, ca)
} | random_line_split |
output.rs | //
//! Copyright 2020 Alibaba Group Holding Limited.
//!
//! Licensed under the Apache License, Version 2.0 (the "License");
//! you may not use this file except in compliance with the License.
//! You may obtain a copy of the License at
//!
//! http://www.apache.org/licenses/LICENSE-2.0
//!
//! Unless required by applicable law or agreed to in writing, software
//! distributed under the License is distributed on an "AS IS" BASIS,
//! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//! See the License for the specific language governing permissions and
//! limitations under the License.
use super::*;
use std::rc::Rc;
use std::cell::RefCell;
use crate::channel::eventio::EventsBuffer;
use crate::channel::tee::{WrappedTee, Tee};
use crate::common::Port;
/// Describing how dataset tags will be changed when output from an output port.
///
/// # Please note!
/// Since we've just a few built-in operators manipulating dataset tag, for simplicity
/// reasons, in our system, `OutputDelta` is defined on per output perspective and
/// against all inputs.
/// For example, to generate output dataset, if a binary operator advance dataset tag
/// of one input, it will and must to the same to the other input.
#[derive(Clone, Copy, Debug)]
pub enum OutputDelta {
/// Dataset tag won't be changed.
None,
/// Advance the current counter of tag, usually the loop body output
/// of LoopController.
Advance,
/// Add a new dimension for tag, usually the EnterScope operator.
ToChild,
/// Remove current dimension of tag, usually the LeaveScope operator.
ToParent,
}
impl OutputDelta {
pub fn matcher_of(&self, tag: &Tag) -> TagMatcher {
match self {
OutputDelta::None => TagMatcher::Equals(tag.clone()),
OutputDelta::Advance => TagMatcher::Equals(tag.retreat()),
OutputDelta::ToChild => TagMatcher::Equals(tag.to_parent()),
OutputDelta::ToParent => TagMatcher::Prefix(tag.clone()),
}
}
}
pub struct OutputBuilder<D> {
pub batch_size : usize,
pub worker: WorkerId,
pub port: Port,
shared: Rc<RefCell<Vec<(Box<dyn Push<DataSet<D>>>, ChannelId, bool)>>>,
events_buf: EventsBuffer
}
impl<D: Data> OutputBuilder<D> {
pub fn new(batch: usize, worker: WorkerId, port: Port, events_buf: &EventsBuffer) -> Self {
OutputBuilder {
batch_size: batch,
worker,
port,
shared: Rc::new(RefCell::new(Vec::new())),
events_buf: events_buf.clone()
}
}
pub fn add_push<P>(&self, ch_id: ChannelId, local: bool, push: P) where P: Push<DataSet<D>> +'static {
self.shared.borrow_mut().push((Box::new(push), ch_id, local));
}
pub fn build_tee(self) -> WrappedTee<DataSet<D>> {
let mut pushes = Vec::new();
let mut ch_ids = Vec::new();
{
let mut shared = self.shared.borrow_mut();
for (p, c, l) in shared.drain(..) {
pushes.push(p);
ch_ids.push((c, l));
}
}
let tee = Tee::<DataSet<D>>::from(pushes);
WrappedTee::new(self.worker, tee, ch_ids, &self.events_buf)
}
}
impl<D: Data> Clone for OutputBuilder<D> {
fn clone(&self) -> Self {
OutputBuilder {
batch_size: self.batch_size,
worker: self.worker,
port: self.port,
shared: self.shared.clone(),
events_buf: self.events_buf.clone()
}
}
}
pub trait TaggedOutput: AsAny + Send {
fn set_output_capacity(&mut self, capacity: usize);
fn has_capacity(&self) -> bool;
fn clear_capacity(&mut self);
fn transmit_end(&mut self, tag: Tag) -> IOResult<()>;
fn delta(&self) -> &OutputDelta;
fn close(&mut self) -> IOResult<()>;
fn is_closed(&self) -> bool;
}
pub trait TaggedOutputBuilder {
fn build_output(self: Box<Self>, delta: OutputDelta) -> Box<dyn TaggedOutput>;
}
pub struct OutputHandle<D: Data> {
pub port: Port,
pub delta: OutputDelta,
inner: WrappedTee<DataSet<D>>,
capacity: Option<usize>,
batch_size: usize,
poisoned: bool
}
impl<D: Data> AsAny for OutputHandle<D> {
fn as_any_mut(&mut self) -> &mut dyn Any {
self
}
fn as_any_ref(&self) -> &dyn Any {
self
}
}
impl<D: Data> TaggedOutput for OutputHandle<D> {
#[inline]
fn set_output_capacity(&mut self, capacity: usize) {
self.capacity.replace(capacity);
}
#[inline]
fn has_capacity(&self) -> bool {
if let Some(left) = self.capacity.as_ref() {
*left > 0
} else {
true
}
}
#[inline]
fn clear_capacity(&mut self) {
self.capacity = None;
}
#[inline]
fn transmit_end(&mut self, tag: Tag) -> IOResult<()> {
//let matched = self.match_output(&tag).unwrap_or(tag);
self.inner.transmit_end(tag)
}
#[inline]
fn delta(&self) -> &OutputDelta {
&self.delta
}
#[inline]
fn close(&mut self) -> IOResult<()> |
#[inline]
fn is_closed(&self) -> bool {
self.poisoned
}
}
impl<D: Data> OutputHandle<D> {
pub fn new(output: WrappedTee<DataSet<D>>, batch: usize, port: Port, delta: OutputDelta) -> Self {
OutputHandle {
port,
delta,
inner: output,
capacity: None,
batch_size: batch,
poisoned: false
}
}
pub fn downcast(origin: &mut Box<dyn TaggedOutput>) -> &mut Self {
// TODO: handle downcast failure
origin.as_any_mut().downcast_mut::<Self>().expect("Downcast to OutputHandle failure")
}
#[inline]
fn match_output(&self, tag: &Tag) -> Option<Tag> {
match self.delta {
OutputDelta::None => None,
OutputDelta::Advance => Some(tag.advance()),
OutputDelta::ToParent => Some(tag.to_parent()),
OutputDelta::ToChild => Some(Tag::from(tag, 0))
}
}
}
impl<D: Data> TaggedOutputBuilder for OutputBuilder<D> {
fn build_output(self: Box<Self>, delta: OutputDelta) -> Box<dyn TaggedOutput> {
let batch_size = self.batch_size;
let port = self.port;
let tee = self.build_tee();
let output = OutputHandle::new(tee, batch_size, port, delta);
Box::new(output) as Box<dyn TaggedOutput>
}
}
pub struct Session<'a, D: Data> {
output: &'a mut WrappedTee<DataSet<D>>,
capacity: Option<&'a mut usize>,
batch_size: usize,
tag: Tag,
buffer: Vec<D>
}
impl<'a, D: Data> Session<'a, D> {
pub fn new(output: &'a mut WrappedTee<DataSet<D>>, tag: Tag, batch: usize, capacity: Option<&'a mut usize>) -> Self {
Session {
output,
capacity,
batch_size: batch,
tag,
buffer: Vec::with_capacity(batch),
}
}
/// Output one message, if success, return true or false represent whether output capacity is available;
pub fn give(&mut self, msg: D) -> IOResult<bool> {
self.push(msg)?;
Ok(self.update_capacity(1))
}
pub fn give_iterator<I: Iterator<Item = D>>(&mut self, iter: &mut I) -> IOResult<bool> {
if let Some(capacity) = self.capacity.as_ref().map(|c| **c) {
let mut count = 0;
while count < capacity {
if let Some(item) = iter.next() {
self.push(item)?;
} else {
break
}
count += 1;
}
Ok(self.update_capacity(count))
} else {
for item in iter {
self.push(item)?;
}
Ok(true)
}
}
pub fn give_entire_iterator<I: IntoIterator<Item = D>>(&mut self, iter: I) -> IOResult<bool> {
let mut count = 0;
for datum in iter.into_iter() {
count += 1;
self.push(datum)?;
}
Ok(self.update_capacity(count))
}
///
pub fn give_batch(&mut self, batch: Vec<D>) -> IOResult<bool> {
self.flush()?;
let size = batch.len();
self.output.push(DataSet::new(self.tag.clone(), batch))?;
self.output.flush()?;
Ok(self.update_capacity(size))
}
#[inline]
pub fn transmit_end(mut self) -> IOResult<()> {
self.flush()?;
self.output.transmit_end(self.tag.clone())?;
Ok(())
}
#[inline]
pub fn has_capacity(&self) -> bool {
self.check_capacity()
}
pub fn flush(&mut self) -> IOResult<()> {
if!self.buffer.is_empty() {
let size = self.buffer.len();
let msgs = ::std::mem::replace(&mut self.buffer,
Vec::with_capacity(size));
self.output.push(DataSet::new(self.tag.clone(), msgs))?;
self.output.flush()?;
}
Ok(())
}
#[inline]
fn push(&mut self, msg: D) -> IOResult<()> {
self.buffer.push(msg);
if self.buffer.len() == self.batch_size {
self.flush()?;
}
Ok(())
}
/// update the capacity of output channel's left space;
#[inline]
fn update_capacity(&mut self, decr: usize) -> bool {
if let Some(ref mut ca) = self.capacity {
if **ca <= decr {
**ca = 0;
false
} else {
**ca -= decr;
true
}
} else {
true
}
}
/// Return `true` if there is capacity left;
#[inline]
fn check_capacity(&self) -> bool {
self.capacity.as_ref().map(|ca| **ca > 0).unwrap_or(true)
}
}
impl<'a, D: Data> Drop for Session<'a, D> {
fn drop(&mut self) {
match self.flush() {
Ok(_) => (),
Err(e) => {
error!("Session flush failed, caused by {:?}", e);
}
}
}
}
impl<D: Data> OutputHandle<D> {
#[inline]
pub fn session(&mut self, tag: &Tag) -> Session<D> {
let matched = self.match_output(tag).unwrap_or(tag.clone());
let ca = self.capacity.as_mut();
Session::new(&mut self.inner, matched, self.batch_size, ca)
}
#[inline]
pub fn session_of<T: Into<Tag>>(&mut self, tag: T) -> Session<D> {
let t = tag.into();
let matched = self.match_output(&t).unwrap_or(t);
let ca = self.capacity.as_mut();
Session::new(&mut self.inner, matched, self.batch_size, ca)
}
}
| {
if !self.poisoned {
trace!("Worker[{}], output[{:?}] is closing ...", self.inner.worker, self.port);
self.poisoned = true;
self.inner.close()?;
}
Ok(())
} | identifier_body |
output.rs | //
//! Copyright 2020 Alibaba Group Holding Limited.
//!
//! Licensed under the Apache License, Version 2.0 (the "License");
//! you may not use this file except in compliance with the License.
//! You may obtain a copy of the License at
//!
//! http://www.apache.org/licenses/LICENSE-2.0
//!
//! Unless required by applicable law or agreed to in writing, software
//! distributed under the License is distributed on an "AS IS" BASIS,
//! WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//! See the License for the specific language governing permissions and
//! limitations under the License.
use super::*;
use std::rc::Rc;
use std::cell::RefCell;
use crate::channel::eventio::EventsBuffer;
use crate::channel::tee::{WrappedTee, Tee};
use crate::common::Port;
/// Describing how dataset tags will be changed when output from an output port.
///
/// # Please note!
/// Since we've just a few built-in operators manipulating dataset tag, for simplicity
/// reasons, in our system, `OutputDelta` is defined on per output perspective and
/// against all inputs.
/// For example, to generate output dataset, if a binary operator advance dataset tag
/// of one input, it will and must to the same to the other input.
#[derive(Clone, Copy, Debug)]
pub enum OutputDelta {
/// Dataset tag won't be changed.
None,
/// Advance the current counter of tag, usually the loop body output
/// of LoopController.
Advance,
/// Add a new dimension for tag, usually the EnterScope operator.
ToChild,
/// Remove current dimension of tag, usually the LeaveScope operator.
ToParent,
}
impl OutputDelta {
pub fn matcher_of(&self, tag: &Tag) -> TagMatcher {
match self {
OutputDelta::None => TagMatcher::Equals(tag.clone()),
OutputDelta::Advance => TagMatcher::Equals(tag.retreat()),
OutputDelta::ToChild => TagMatcher::Equals(tag.to_parent()),
OutputDelta::ToParent => TagMatcher::Prefix(tag.clone()),
}
}
}
pub struct OutputBuilder<D> {
pub batch_size : usize,
pub worker: WorkerId,
pub port: Port,
shared: Rc<RefCell<Vec<(Box<dyn Push<DataSet<D>>>, ChannelId, bool)>>>,
events_buf: EventsBuffer
}
impl<D: Data> OutputBuilder<D> {
pub fn new(batch: usize, worker: WorkerId, port: Port, events_buf: &EventsBuffer) -> Self {
OutputBuilder {
batch_size: batch,
worker,
port,
shared: Rc::new(RefCell::new(Vec::new())),
events_buf: events_buf.clone()
}
}
pub fn add_push<P>(&self, ch_id: ChannelId, local: bool, push: P) where P: Push<DataSet<D>> +'static {
self.shared.borrow_mut().push((Box::new(push), ch_id, local));
}
pub fn build_tee(self) -> WrappedTee<DataSet<D>> {
let mut pushes = Vec::new();
let mut ch_ids = Vec::new();
{
let mut shared = self.shared.borrow_mut();
for (p, c, l) in shared.drain(..) {
pushes.push(p);
ch_ids.push((c, l));
}
}
let tee = Tee::<DataSet<D>>::from(pushes);
WrappedTee::new(self.worker, tee, ch_ids, &self.events_buf)
}
}
impl<D: Data> Clone for OutputBuilder<D> {
fn clone(&self) -> Self {
OutputBuilder {
batch_size: self.batch_size,
worker: self.worker,
port: self.port,
shared: self.shared.clone(),
events_buf: self.events_buf.clone()
}
}
}
pub trait TaggedOutput: AsAny + Send {
fn set_output_capacity(&mut self, capacity: usize);
fn has_capacity(&self) -> bool;
fn clear_capacity(&mut self);
fn transmit_end(&mut self, tag: Tag) -> IOResult<()>;
fn delta(&self) -> &OutputDelta;
fn close(&mut self) -> IOResult<()>;
fn is_closed(&self) -> bool;
}
pub trait TaggedOutputBuilder {
fn build_output(self: Box<Self>, delta: OutputDelta) -> Box<dyn TaggedOutput>;
}
pub struct OutputHandle<D: Data> {
pub port: Port,
pub delta: OutputDelta,
inner: WrappedTee<DataSet<D>>,
capacity: Option<usize>,
batch_size: usize,
poisoned: bool
}
impl<D: Data> AsAny for OutputHandle<D> {
fn | (&mut self) -> &mut dyn Any {
self
}
fn as_any_ref(&self) -> &dyn Any {
self
}
}
impl<D: Data> TaggedOutput for OutputHandle<D> {
#[inline]
fn set_output_capacity(&mut self, capacity: usize) {
self.capacity.replace(capacity);
}
#[inline]
fn has_capacity(&self) -> bool {
if let Some(left) = self.capacity.as_ref() {
*left > 0
} else {
true
}
}
#[inline]
fn clear_capacity(&mut self) {
self.capacity = None;
}
#[inline]
fn transmit_end(&mut self, tag: Tag) -> IOResult<()> {
//let matched = self.match_output(&tag).unwrap_or(tag);
self.inner.transmit_end(tag)
}
#[inline]
fn delta(&self) -> &OutputDelta {
&self.delta
}
#[inline]
fn close(&mut self) -> IOResult<()> {
if!self.poisoned {
trace!("Worker[{}], output[{:?}] is closing...", self.inner.worker, self.port);
self.poisoned = true;
self.inner.close()?;
}
Ok(())
}
#[inline]
fn is_closed(&self) -> bool {
self.poisoned
}
}
impl<D: Data> OutputHandle<D> {
pub fn new(output: WrappedTee<DataSet<D>>, batch: usize, port: Port, delta: OutputDelta) -> Self {
OutputHandle {
port,
delta,
inner: output,
capacity: None,
batch_size: batch,
poisoned: false
}
}
pub fn downcast(origin: &mut Box<dyn TaggedOutput>) -> &mut Self {
// TODO: handle downcast failure
origin.as_any_mut().downcast_mut::<Self>().expect("Downcast to OutputHandle failure")
}
#[inline]
fn match_output(&self, tag: &Tag) -> Option<Tag> {
match self.delta {
OutputDelta::None => None,
OutputDelta::Advance => Some(tag.advance()),
OutputDelta::ToParent => Some(tag.to_parent()),
OutputDelta::ToChild => Some(Tag::from(tag, 0))
}
}
}
impl<D: Data> TaggedOutputBuilder for OutputBuilder<D> {
fn build_output(self: Box<Self>, delta: OutputDelta) -> Box<dyn TaggedOutput> {
let batch_size = self.batch_size;
let port = self.port;
let tee = self.build_tee();
let output = OutputHandle::new(tee, batch_size, port, delta);
Box::new(output) as Box<dyn TaggedOutput>
}
}
pub struct Session<'a, D: Data> {
output: &'a mut WrappedTee<DataSet<D>>,
capacity: Option<&'a mut usize>,
batch_size: usize,
tag: Tag,
buffer: Vec<D>
}
impl<'a, D: Data> Session<'a, D> {
pub fn new(output: &'a mut WrappedTee<DataSet<D>>, tag: Tag, batch: usize, capacity: Option<&'a mut usize>) -> Self {
Session {
output,
capacity,
batch_size: batch,
tag,
buffer: Vec::with_capacity(batch),
}
}
/// Output one message, if success, return true or false represent whether output capacity is available;
pub fn give(&mut self, msg: D) -> IOResult<bool> {
self.push(msg)?;
Ok(self.update_capacity(1))
}
pub fn give_iterator<I: Iterator<Item = D>>(&mut self, iter: &mut I) -> IOResult<bool> {
if let Some(capacity) = self.capacity.as_ref().map(|c| **c) {
let mut count = 0;
while count < capacity {
if let Some(item) = iter.next() {
self.push(item)?;
} else {
break
}
count += 1;
}
Ok(self.update_capacity(count))
} else {
for item in iter {
self.push(item)?;
}
Ok(true)
}
}
pub fn give_entire_iterator<I: IntoIterator<Item = D>>(&mut self, iter: I) -> IOResult<bool> {
let mut count = 0;
for datum in iter.into_iter() {
count += 1;
self.push(datum)?;
}
Ok(self.update_capacity(count))
}
///
pub fn give_batch(&mut self, batch: Vec<D>) -> IOResult<bool> {
self.flush()?;
let size = batch.len();
self.output.push(DataSet::new(self.tag.clone(), batch))?;
self.output.flush()?;
Ok(self.update_capacity(size))
}
#[inline]
pub fn transmit_end(mut self) -> IOResult<()> {
self.flush()?;
self.output.transmit_end(self.tag.clone())?;
Ok(())
}
#[inline]
pub fn has_capacity(&self) -> bool {
self.check_capacity()
}
pub fn flush(&mut self) -> IOResult<()> {
if!self.buffer.is_empty() {
let size = self.buffer.len();
let msgs = ::std::mem::replace(&mut self.buffer,
Vec::with_capacity(size));
self.output.push(DataSet::new(self.tag.clone(), msgs))?;
self.output.flush()?;
}
Ok(())
}
#[inline]
fn push(&mut self, msg: D) -> IOResult<()> {
self.buffer.push(msg);
if self.buffer.len() == self.batch_size {
self.flush()?;
}
Ok(())
}
/// update the capacity of output channel's left space;
#[inline]
fn update_capacity(&mut self, decr: usize) -> bool {
if let Some(ref mut ca) = self.capacity {
if **ca <= decr {
**ca = 0;
false
} else {
**ca -= decr;
true
}
} else {
true
}
}
/// Return `true` if there is capacity left;
#[inline]
fn check_capacity(&self) -> bool {
self.capacity.as_ref().map(|ca| **ca > 0).unwrap_or(true)
}
}
impl<'a, D: Data> Drop for Session<'a, D> {
fn drop(&mut self) {
match self.flush() {
Ok(_) => (),
Err(e) => {
error!("Session flush failed, caused by {:?}", e);
}
}
}
}
impl<D: Data> OutputHandle<D> {
#[inline]
pub fn session(&mut self, tag: &Tag) -> Session<D> {
let matched = self.match_output(tag).unwrap_or(tag.clone());
let ca = self.capacity.as_mut();
Session::new(&mut self.inner, matched, self.batch_size, ca)
}
#[inline]
pub fn session_of<T: Into<Tag>>(&mut self, tag: T) -> Session<D> {
let t = tag.into();
let matched = self.match_output(&t).unwrap_or(t);
let ca = self.capacity.as_mut();
Session::new(&mut self.inner, matched, self.batch_size, ca)
}
}
| as_any_mut | identifier_name |
main.rs | use clap::*;
use gre::*;
use isosurface::{marching_cubes::MarchingCubes, source::Source};
use kiss3d::nalgebra::{Perspective3, Point3, Rotation3, Vector3};
use rand::prelude::*;
use std::f32::consts::PI;
use std::ops::{Add, Mul, Sub};
use svg::node::element::path::Data;
use svg::node::element::*;
#[derive(Parser)]
#[clap()]
pub struct Opts {
#[clap(short, long, default_value = "image.svg")]
file: String,
#[clap(short, long, default_value = "210.0")]
pub width: f64,
#[clap(short, long, default_value = "297.0")]
pub height: f64,
#[clap(short, long, default_value = "20.0")]
pub pad: f64,
#[clap(short, long, default_value = "0.0")]
pub seed: f64,
}
// We use sign distance function paradigm here:
fn sd_capsule(
p: Vector3<f32>,
a: Vector3<f32>,
b: Vector3<f32>,
r: f32,
) -> f32 {
let pa = p - a;
let ba = b - a;
let h = (pa.dot(&ba) / ba.dot(&ba)).max(0.0).min(1.0);
(pa - ba * h).norm() - r
}
trait BinaryOps<T> {
fn intersect(&self, other: T) -> T;
fn difference(&self, other: T) -> T;
fn union(&self, other: T) -> T;
fn smooth_intersect(&self, k: T, other: T) -> T;
fn smooth_difference(&self, k: T, other: T) -> T;
fn smooth_union(&self, k: T, other: T) -> T;
}
impl BinaryOps<f32> for f32 {
fn intersect(&self, other: f32) -> f32 {
self.max(other)
}
fn difference(&self, other: f32) -> f32 {
self.max(-other)
}
fn union(&self, other: f32) -> f32 {
self.min(other)
}
fn smooth_intersect(&self, k: f32, other: f32) -> f32 {
let h = (0.5 - 0.5 * (self - other) / k).max(0.0).min(1.0);
mix(*self, other, h) + k * h * (1.0 - h)
}
fn smooth_difference(&self, k: f32, other: f32) -> f32 {
let h = (0.5 - 0.5 * (other + self) / k).max(0.0).min(1.0);
mix(*self, -other, h) + k * h * (1.0 - h)
}
fn smooth_union(&self, k: f32, other: f32) -> f32 {
let h = (0.5 + 0.5 * (self - other) / k).max(0.0).min(1.0);
mix(*self, other, h) - k * h * (1.0 - h)
}
}
fn mix(a: f32, b: f32, x: f32) -> f32 {
(1. - x) * a + x * b
}
struct Shape {
seed: f64,
}
impl Source for Shape {
fn sample(&self, x: f32, y: f32, z: f32) -> f32 {
let p = Vector3::new(x, y, z);
let mut s = 999.;
let mut rng = rng_from_seed(self.seed);
let count = rng.gen_range(5, 80);
let max_size = 0.2;
let v = rng.gen_range(0.1, 0.9);
for _i in 0..count {
let a = Vector3::new(
rng.gen_range(0.2, 0.8),
rng.gen_range(0.2, 0.8),
rng.gen_range(0.2, 0.8),
);
let b = if rng.gen_bool(v) {
a
} else {
Vector3::new(
rng.gen_range(0.2, 0.8),
rng.gen_range(0.2, 0.8),
rng.gen_range(0.2, 0.8),
)
};
s = s.smooth_union(
rng.gen_range(0.0, 0.1),
sd_capsule(p, a, b, max_size * rng.gen_range(0.2, 1.0)),
);
}
s
}
}
fn make_triangles_from_vertices_indices(
vert: &Vec<f32>,
idx: &Vec<u32>,
) -> Vec<Tri> {
let mut triangles = vec![];
for face in idx.chunks(3) {
let i1 = face[0] as usize;
let i2 = face[1] as usize;
let i3 = face[2] as usize;
let v1 = Point3::new(vert[i1 * 3], vert[i1 * 3 + 1], vert[i1 * 3 + 2]);
let v2 = Point3::new(vert[i2 * 3], vert[i2 * 3 + 1], vert[i2 * 3 + 2]);
let v3 = Point3::new(vert[i3 * 3], vert[i3 * 3 + 1], vert[i3 * 3 + 2]);
triangles.push(Tri::new(v3, v2, v1));
}
triangles
}
#[derive(Debug, Clone)]
struct Tri {
v1: Point3<f32>,
v2: Point3<f32>,
v3: Point3<f32>,
}
impl Sub<Vector3<f32>> for Tri {
type Output = Tri;
fn sub(self, v: Vector3<f32>) -> Self::Output {
Tri {
v1: self.v1 - v,
v2: self.v2 - v,
v3: self.v3 - v,
}
} | fn add(self, v: Vector3<f32>) -> Self::Output {
Tri {
v1: self.v1 + v,
v2: self.v2 + v,
v3: self.v3 + v,
}
}
}
impl Mul<Tri> for f32 {
type Output = Tri;
fn mul(self, tri: Tri) -> Self::Output {
Tri {
v1: self * tri.v1,
v2: self * tri.v2,
v3: self * tri.v3,
}
}
}
impl Mul<Tri> for Rotation3<f32> {
type Output = Tri;
fn mul(self, tri: Tri) -> Self::Output {
Tri {
v1: self * tri.v1,
v2: self * tri.v2,
v3: self * tri.v3,
}
}
}
impl Tri {
fn new(v1: Point3<f32>, v2: Point3<f32>, v3: Point3<f32>) -> Self {
Tri { v1, v2, v3 }
}
}
struct Camera {
aspect: f32,
fovy: f32,
znear: f32,
zfar: f32,
}
impl Camera {
fn new(aspect: f32, fovy: f32, znear: f32, zfar: f32) -> Self {
Camera {
aspect,
fovy,
znear,
zfar,
}
}
fn project(&self, tri: &Tri) -> Tri {
let proj = Perspective3::new(self.aspect, self.fovy, self.znear, self.zfar);
Tri {
v1: proj.project_point(&tri.v1),
v2: proj.project_point(&tri.v2),
v3: proj.project_point(&tri.v3),
}
}
}
fn art(opts: &Opts) -> Vec<Group> {
let width = opts.width;
let height = opts.height;
let mut rng = rng_from_seed(opts.seed);
let grid_size = rng.gen_range(8, 32);
let mut vertices = vec![];
let mut indices = vec![];
let source = Shape { seed: opts.seed };
let mut marching = MarchingCubes::new(grid_size);
marching.extract(&source, &mut vertices, &mut indices);
let triangles = make_triangles_from_vertices_indices(&vertices, &indices);
// project triangles to 2D with a camera
let dist = 1.0;
let cam = Camera::new((width / height) as f32, 2.2, 0.0, 8.0);
let rot =
Rotation3::from_axis_angle(&Vector3::z_axis(), rng.gen_range(-PI, PI))
* Rotation3::from_axis_angle(&Vector3::y_axis(), rng.gen_range(-PI, PI))
* Rotation3::from_axis_angle(&Vector3::x_axis(), rng.gen_range(-PI, PI));
let mut projected = triangles
.iter()
.map(|tri| {
let t = tri.clone() + Vector3::new(-0.5, -0.5, -0.5);
let t = rot * t;
let t = t + Vector3::new(0., 0., -dist);
cam.project(&t)
})
.collect::<Vec<_>>();
// sort by z-index
let mut data = projected
.iter()
.map(|tri| {
let z = tri.v1.z + tri.v2.z + tri.v3.z;
(tri.clone(), z)
})
.collect::<Vec<(Tri, f32)>>();
data.sort_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap());
projected = data.iter().map(|(tri, _)| tri.clone()).collect::<Vec<_>>();
let mut passage = Passage::new(0.5, width, height);
let mut routes: Vec<Vec<(f64, f64)>> = vec![];
let mut polygons = vec![];
let translate = 0.5;
for tri in projected {
let points: Vec<(f64, f64)> = vec![tri.v1, tri.v2, tri.v3]
.iter()
.map(|p| {
(
(p.x as f64 + translate) * width,
(p.y as f64 + translate) * height,
)
})
.collect();
// quick hack. triangles are small enough to ignore cases where it partially overlaps
let center = centroid(&points);
let hidden = is_inside_polygons(center, &polygons);
if hidden {
continue;
}
if passage.count(center) > 10 {
continue;
}
let dr = rng.gen_range(0.3, 0.4);
// stretch the spiral based on the polygon shape
routes.push(spiral_optimized_in_triangle(&points, dr));
// routes.push(points.clone());
polygons.push(points);
}
// TESTS
/*
routes = vec![];
for x in 0..3 {
for y in 0..5 {
// rng in cell
let points = vec![
((x as f64+rng.gen_range(0.0, 0.8)) * width / 3.0, (y as f64 + rng.gen_range(0.0, 1.0)) * height / 5.0),
((x as f64+rng.gen_range(0.2, 1.0)) * width / 3.0, (y as f64 + rng.gen_range(0.2, 1.0)) * height / 5.0),
((x as f64+rng.gen_range(0.2, 1.0)) * width / 3.0, (y as f64 + rng.gen_range(0.0, 0.8)) * height / 5.0),
];
routes.push(
vec![points.clone(), vec![points[0]]].concat());
routes.push(
spiral_optimized_in_triangle(&points, 1.0, 0.1));
}
}
*/
vec![("black", routes)]
.iter()
.enumerate()
.map(|(i, (color, routes))| {
let mut data = Data::new();
for route in routes.clone() {
data = render_route(data, route);
}
let mut l = layer(format!("{} {}", i, String::from(*color)).as_str());
l = l.add(base_path(color, 0.3, data));
l
})
.collect()
}
fn is_inside_a_polygon(p: (f64, f64), polygon: &Vec<(f64, f64)>) -> bool {
let mut inside = false;
let mut j = polygon.len() - 1;
for i in 0..polygon.len() {
let pi = polygon[i];
let pj = polygon[j];
if (pi.1 > p.1)!= (pj.1 > p.1)
&& p.0 < (pj.0 - pi.0) * (p.1 - pi.1) / (pj.1 - pi.1) + pi.0
{
inside =!inside;
}
j = i;
}
inside
}
fn is_inside_polygons(p: (f64, f64), polygons: &Vec<Vec<(f64, f64)>>) -> bool {
for polygon in polygons {
if is_inside_a_polygon(p, polygon) {
return true;
}
}
false
}
fn main() {
let opts: Opts = Opts::parse();
let groups = art(&opts);
let mut document = base_document("yellow", opts.width, opts.height);
for g in groups {
document = document.add(g);
}
svg::save(opts.file, &document).unwrap();
}
fn centroid(points: &Vec<(f64, f64)>) -> (f64, f64) {
let mut x = 0.0;
let mut y = 0.0;
for (x_, y_) in points {
x += x_;
y += y_;
}
(x / points.len() as f64, y / points.len() as f64)
}
#[derive(Clone)]
struct Passage {
precision: f64,
width: f64,
height: f64,
counters: Vec<usize>,
}
impl Passage {
pub fn new(precision: f64, width: f64, height: f64) -> Self {
let wi = (width / precision).ceil() as usize;
let hi = (height / precision).ceil() as usize;
let counters = vec![0; wi * hi];
Passage {
precision,
width,
height,
counters,
}
}
fn index(self: &Self, (x, y): (f64, f64)) -> usize {
let wi = (self.width / self.precision).ceil() as usize;
let hi = (self.height / self.precision).ceil() as usize;
let xi = ((x / self.precision).round() as usize).max(0).min(wi - 1);
let yi = ((y / self.precision).round() as usize).max(0).min(hi - 1);
yi * wi + xi
}
pub fn count(self: &mut Self, p: (f64, f64)) -> usize {
let i = self.index(p);
let v = self.counters[i] + 1;
self.counters[i] = v;
v
}
}
fn spiral_optimized_in_triangle(
points: &Vec<(f64, f64)>,
dr: f64,
) -> Vec<(f64, f64)> {
let mut pts = vec![];
for i in 0..3 {
let a = points[i];
let b = points[(i + 1) % 3];
pts.push(((a.0 + b.0) * 0.5, (a.1 + b.1) * 0.5));
}
let center = centroid(&pts);
let d = pts
.iter()
.map(|&p| euclidian_dist(p, center))
.min_by(|a, b| a.partial_cmp(b).unwrap())
.unwrap();
if d < 3.0 * dr {
return vec![];
}
let radius = 0.9 * d;
let (x, y) = center;
spiral_optimized(x, y, radius, dr)
}
fn spiral_optimized(x: f64, y: f64, radius: f64, dr: f64) -> Vec<(f64, f64)> {
let approx = 0.05;
let extra = 0.5;
let two_pi = 2.0 * std::f64::consts::PI;
let mut route = Vec::new();
let mut r = radius + extra;
let mut a = 0f64;
loop {
let mr = r.min(radius);
let p = round_point((x + mr * a.cos(), y + mr * a.sin()), 0.01);
let l = route.len();
if l == 0 || euclidian_dist(route[l - 1], p) > approx {
route.push(p);
}
let da = 1.0 / (r + 8.0); // bigger radius is more we have to do angle iterations
a = (a + da) % two_pi;
r -= dr * da / two_pi;
if r < approx {
break;
}
}
route.push((x, y));
route
} | }
impl Add<Vector3<f32>> for Tri {
type Output = Tri;
| random_line_split |
main.rs | use clap::*;
use gre::*;
use isosurface::{marching_cubes::MarchingCubes, source::Source};
use kiss3d::nalgebra::{Perspective3, Point3, Rotation3, Vector3};
use rand::prelude::*;
use std::f32::consts::PI;
use std::ops::{Add, Mul, Sub};
use svg::node::element::path::Data;
use svg::node::element::*;
#[derive(Parser)]
#[clap()]
pub struct Opts {
#[clap(short, long, default_value = "image.svg")]
file: String,
#[clap(short, long, default_value = "210.0")]
pub width: f64,
#[clap(short, long, default_value = "297.0")]
pub height: f64,
#[clap(short, long, default_value = "20.0")]
pub pad: f64,
#[clap(short, long, default_value = "0.0")]
pub seed: f64,
}
// We use sign distance function paradigm here:
fn sd_capsule(
p: Vector3<f32>,
a: Vector3<f32>,
b: Vector3<f32>,
r: f32,
) -> f32 {
let pa = p - a;
let ba = b - a;
let h = (pa.dot(&ba) / ba.dot(&ba)).max(0.0).min(1.0);
(pa - ba * h).norm() - r
}
trait BinaryOps<T> {
fn intersect(&self, other: T) -> T;
fn difference(&self, other: T) -> T;
fn union(&self, other: T) -> T;
fn smooth_intersect(&self, k: T, other: T) -> T;
fn smooth_difference(&self, k: T, other: T) -> T;
fn smooth_union(&self, k: T, other: T) -> T;
}
impl BinaryOps<f32> for f32 {
fn intersect(&self, other: f32) -> f32 {
self.max(other)
}
fn difference(&self, other: f32) -> f32 {
self.max(-other)
}
fn union(&self, other: f32) -> f32 {
self.min(other)
}
fn smooth_intersect(&self, k: f32, other: f32) -> f32 {
let h = (0.5 - 0.5 * (self - other) / k).max(0.0).min(1.0);
mix(*self, other, h) + k * h * (1.0 - h)
}
fn smooth_difference(&self, k: f32, other: f32) -> f32 {
let h = (0.5 - 0.5 * (other + self) / k).max(0.0).min(1.0);
mix(*self, -other, h) + k * h * (1.0 - h)
}
fn smooth_union(&self, k: f32, other: f32) -> f32 {
let h = (0.5 + 0.5 * (self - other) / k).max(0.0).min(1.0);
mix(*self, other, h) - k * h * (1.0 - h)
}
}
fn mix(a: f32, b: f32, x: f32) -> f32 {
(1. - x) * a + x * b
}
struct Shape {
seed: f64,
}
impl Source for Shape {
fn sample(&self, x: f32, y: f32, z: f32) -> f32 {
let p = Vector3::new(x, y, z);
let mut s = 999.;
let mut rng = rng_from_seed(self.seed);
let count = rng.gen_range(5, 80);
let max_size = 0.2;
let v = rng.gen_range(0.1, 0.9);
for _i in 0..count {
let a = Vector3::new(
rng.gen_range(0.2, 0.8),
rng.gen_range(0.2, 0.8),
rng.gen_range(0.2, 0.8),
);
let b = if rng.gen_bool(v) {
a
} else {
Vector3::new(
rng.gen_range(0.2, 0.8),
rng.gen_range(0.2, 0.8),
rng.gen_range(0.2, 0.8),
)
};
s = s.smooth_union(
rng.gen_range(0.0, 0.1),
sd_capsule(p, a, b, max_size * rng.gen_range(0.2, 1.0)),
);
}
s
}
}
fn make_triangles_from_vertices_indices(
vert: &Vec<f32>,
idx: &Vec<u32>,
) -> Vec<Tri> {
let mut triangles = vec![];
for face in idx.chunks(3) {
let i1 = face[0] as usize;
let i2 = face[1] as usize;
let i3 = face[2] as usize;
let v1 = Point3::new(vert[i1 * 3], vert[i1 * 3 + 1], vert[i1 * 3 + 2]);
let v2 = Point3::new(vert[i2 * 3], vert[i2 * 3 + 1], vert[i2 * 3 + 2]);
let v3 = Point3::new(vert[i3 * 3], vert[i3 * 3 + 1], vert[i3 * 3 + 2]);
triangles.push(Tri::new(v3, v2, v1));
}
triangles
}
#[derive(Debug, Clone)]
struct Tri {
v1: Point3<f32>,
v2: Point3<f32>,
v3: Point3<f32>,
}
impl Sub<Vector3<f32>> for Tri {
type Output = Tri;
fn sub(self, v: Vector3<f32>) -> Self::Output {
Tri {
v1: self.v1 - v,
v2: self.v2 - v,
v3: self.v3 - v,
}
}
}
impl Add<Vector3<f32>> for Tri {
type Output = Tri;
fn add(self, v: Vector3<f32>) -> Self::Output {
Tri {
v1: self.v1 + v,
v2: self.v2 + v,
v3: self.v3 + v,
}
}
}
impl Mul<Tri> for f32 {
type Output = Tri;
fn mul(self, tri: Tri) -> Self::Output {
Tri {
v1: self * tri.v1,
v2: self * tri.v2,
v3: self * tri.v3,
}
}
}
impl Mul<Tri> for Rotation3<f32> {
type Output = Tri;
fn mul(self, tri: Tri) -> Self::Output {
Tri {
v1: self * tri.v1,
v2: self * tri.v2,
v3: self * tri.v3,
}
}
}
impl Tri {
fn new(v1: Point3<f32>, v2: Point3<f32>, v3: Point3<f32>) -> Self {
Tri { v1, v2, v3 }
}
}
struct Camera {
aspect: f32,
fovy: f32,
znear: f32,
zfar: f32,
}
impl Camera {
fn new(aspect: f32, fovy: f32, znear: f32, zfar: f32) -> Self {
Camera {
aspect,
fovy,
znear,
zfar,
}
}
fn project(&self, tri: &Tri) -> Tri {
let proj = Perspective3::new(self.aspect, self.fovy, self.znear, self.zfar);
Tri {
v1: proj.project_point(&tri.v1),
v2: proj.project_point(&tri.v2),
v3: proj.project_point(&tri.v3),
}
}
}
fn art(opts: &Opts) -> Vec<Group> {
let width = opts.width;
let height = opts.height;
let mut rng = rng_from_seed(opts.seed);
let grid_size = rng.gen_range(8, 32);
let mut vertices = vec![];
let mut indices = vec![];
let source = Shape { seed: opts.seed };
let mut marching = MarchingCubes::new(grid_size);
marching.extract(&source, &mut vertices, &mut indices);
let triangles = make_triangles_from_vertices_indices(&vertices, &indices);
// project triangles to 2D with a camera
let dist = 1.0;
let cam = Camera::new((width / height) as f32, 2.2, 0.0, 8.0);
let rot =
Rotation3::from_axis_angle(&Vector3::z_axis(), rng.gen_range(-PI, PI))
* Rotation3::from_axis_angle(&Vector3::y_axis(), rng.gen_range(-PI, PI))
* Rotation3::from_axis_angle(&Vector3::x_axis(), rng.gen_range(-PI, PI));
let mut projected = triangles
.iter()
.map(|tri| {
let t = tri.clone() + Vector3::new(-0.5, -0.5, -0.5);
let t = rot * t;
let t = t + Vector3::new(0., 0., -dist);
cam.project(&t)
})
.collect::<Vec<_>>();
// sort by z-index
let mut data = projected
.iter()
.map(|tri| {
let z = tri.v1.z + tri.v2.z + tri.v3.z;
(tri.clone(), z)
})
.collect::<Vec<(Tri, f32)>>();
data.sort_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap());
projected = data.iter().map(|(tri, _)| tri.clone()).collect::<Vec<_>>();
let mut passage = Passage::new(0.5, width, height);
let mut routes: Vec<Vec<(f64, f64)>> = vec![];
let mut polygons = vec![];
let translate = 0.5;
for tri in projected {
let points: Vec<(f64, f64)> = vec![tri.v1, tri.v2, tri.v3]
.iter()
.map(|p| {
(
(p.x as f64 + translate) * width,
(p.y as f64 + translate) * height,
)
})
.collect();
// quick hack. triangles are small enough to ignore cases where it partially overlaps
let center = centroid(&points);
let hidden = is_inside_polygons(center, &polygons);
if hidden {
continue;
}
if passage.count(center) > 10 {
continue;
}
let dr = rng.gen_range(0.3, 0.4);
// stretch the spiral based on the polygon shape
routes.push(spiral_optimized_in_triangle(&points, dr));
// routes.push(points.clone());
polygons.push(points);
}
// TESTS
/*
routes = vec![];
for x in 0..3 {
for y in 0..5 {
// rng in cell
let points = vec![
((x as f64+rng.gen_range(0.0, 0.8)) * width / 3.0, (y as f64 + rng.gen_range(0.0, 1.0)) * height / 5.0),
((x as f64+rng.gen_range(0.2, 1.0)) * width / 3.0, (y as f64 + rng.gen_range(0.2, 1.0)) * height / 5.0),
((x as f64+rng.gen_range(0.2, 1.0)) * width / 3.0, (y as f64 + rng.gen_range(0.0, 0.8)) * height / 5.0),
];
routes.push(
vec![points.clone(), vec![points[0]]].concat());
routes.push(
spiral_optimized_in_triangle(&points, 1.0, 0.1));
}
}
*/
vec![("black", routes)]
.iter()
.enumerate()
.map(|(i, (color, routes))| {
let mut data = Data::new();
for route in routes.clone() {
data = render_route(data, route);
}
let mut l = layer(format!("{} {}", i, String::from(*color)).as_str());
l = l.add(base_path(color, 0.3, data));
l
})
.collect()
}
fn is_inside_a_polygon(p: (f64, f64), polygon: &Vec<(f64, f64)>) -> bool {
let mut inside = false;
let mut j = polygon.len() - 1;
for i in 0..polygon.len() {
let pi = polygon[i];
let pj = polygon[j];
if (pi.1 > p.1)!= (pj.1 > p.1)
&& p.0 < (pj.0 - pi.0) * (p.1 - pi.1) / (pj.1 - pi.1) + pi.0
{
inside =!inside;
}
j = i;
}
inside
}
fn is_inside_polygons(p: (f64, f64), polygons: &Vec<Vec<(f64, f64)>>) -> bool {
for polygon in polygons {
if is_inside_a_polygon(p, polygon) {
return true;
}
}
false
}
fn main() {
let opts: Opts = Opts::parse();
let groups = art(&opts);
let mut document = base_document("yellow", opts.width, opts.height);
for g in groups {
document = document.add(g);
}
svg::save(opts.file, &document).unwrap();
}
fn centroid(points: &Vec<(f64, f64)>) -> (f64, f64) {
let mut x = 0.0;
let mut y = 0.0;
for (x_, y_) in points {
x += x_;
y += y_;
}
(x / points.len() as f64, y / points.len() as f64)
}
#[derive(Clone)]
struct Passage {
precision: f64,
width: f64,
height: f64,
counters: Vec<usize>,
}
impl Passage {
pub fn new(precision: f64, width: f64, height: f64) -> Self {
let wi = (width / precision).ceil() as usize;
let hi = (height / precision).ceil() as usize;
let counters = vec![0; wi * hi];
Passage {
precision,
width,
height,
counters,
}
}
fn index(self: &Self, (x, y): (f64, f64)) -> usize {
let wi = (self.width / self.precision).ceil() as usize;
let hi = (self.height / self.precision).ceil() as usize;
let xi = ((x / self.precision).round() as usize).max(0).min(wi - 1);
let yi = ((y / self.precision).round() as usize).max(0).min(hi - 1);
yi * wi + xi
}
pub fn count(self: &mut Self, p: (f64, f64)) -> usize {
let i = self.index(p);
let v = self.counters[i] + 1;
self.counters[i] = v;
v
}
}
fn spiral_optimized_in_triangle(
points: &Vec<(f64, f64)>,
dr: f64,
) -> Vec<(f64, f64)> {
let mut pts = vec![];
for i in 0..3 {
let a = points[i];
let b = points[(i + 1) % 3];
pts.push(((a.0 + b.0) * 0.5, (a.1 + b.1) * 0.5));
}
let center = centroid(&pts);
let d = pts
.iter()
.map(|&p| euclidian_dist(p, center))
.min_by(|a, b| a.partial_cmp(b).unwrap())
.unwrap();
if d < 3.0 * dr {
return vec![];
}
let radius = 0.9 * d;
let (x, y) = center;
spiral_optimized(x, y, radius, dr)
}
fn spiral_optimized(x: f64, y: f64, radius: f64, dr: f64) -> Vec<(f64, f64)> {
let approx = 0.05;
let extra = 0.5;
let two_pi = 2.0 * std::f64::consts::PI;
let mut route = Vec::new();
let mut r = radius + extra;
let mut a = 0f64;
loop {
let mr = r.min(radius);
let p = round_point((x + mr * a.cos(), y + mr * a.sin()), 0.01);
let l = route.len();
if l == 0 || euclidian_dist(route[l - 1], p) > approx {
route.push(p);
}
let da = 1.0 / (r + 8.0); // bigger radius is more we have to do angle iterations
a = (a + da) % two_pi;
r -= dr * da / two_pi;
if r < approx |
}
route.push((x, y));
route
}
| {
break;
} | conditional_block |
main.rs | use clap::*;
use gre::*;
use isosurface::{marching_cubes::MarchingCubes, source::Source};
use kiss3d::nalgebra::{Perspective3, Point3, Rotation3, Vector3};
use rand::prelude::*;
use std::f32::consts::PI;
use std::ops::{Add, Mul, Sub};
use svg::node::element::path::Data;
use svg::node::element::*;
#[derive(Parser)]
#[clap()]
pub struct Opts {
#[clap(short, long, default_value = "image.svg")]
file: String,
#[clap(short, long, default_value = "210.0")]
pub width: f64,
#[clap(short, long, default_value = "297.0")]
pub height: f64,
#[clap(short, long, default_value = "20.0")]
pub pad: f64,
#[clap(short, long, default_value = "0.0")]
pub seed: f64,
}
// We use sign distance function paradigm here:
fn sd_capsule(
p: Vector3<f32>,
a: Vector3<f32>,
b: Vector3<f32>,
r: f32,
) -> f32 {
let pa = p - a;
let ba = b - a;
let h = (pa.dot(&ba) / ba.dot(&ba)).max(0.0).min(1.0);
(pa - ba * h).norm() - r
}
trait BinaryOps<T> {
fn intersect(&self, other: T) -> T;
fn difference(&self, other: T) -> T;
fn union(&self, other: T) -> T;
fn smooth_intersect(&self, k: T, other: T) -> T;
fn smooth_difference(&self, k: T, other: T) -> T;
fn smooth_union(&self, k: T, other: T) -> T;
}
impl BinaryOps<f32> for f32 {
fn intersect(&self, other: f32) -> f32 {
self.max(other)
}
fn difference(&self, other: f32) -> f32 {
self.max(-other)
}
fn union(&self, other: f32) -> f32 {
self.min(other)
}
fn smooth_intersect(&self, k: f32, other: f32) -> f32 {
let h = (0.5 - 0.5 * (self - other) / k).max(0.0).min(1.0);
mix(*self, other, h) + k * h * (1.0 - h)
}
fn smooth_difference(&self, k: f32, other: f32) -> f32 {
let h = (0.5 - 0.5 * (other + self) / k).max(0.0).min(1.0);
mix(*self, -other, h) + k * h * (1.0 - h)
}
fn smooth_union(&self, k: f32, other: f32) -> f32 {
let h = (0.5 + 0.5 * (self - other) / k).max(0.0).min(1.0);
mix(*self, other, h) - k * h * (1.0 - h)
}
}
fn mix(a: f32, b: f32, x: f32) -> f32 {
(1. - x) * a + x * b
}
struct Shape {
seed: f64,
}
impl Source for Shape {
fn sample(&self, x: f32, y: f32, z: f32) -> f32 {
let p = Vector3::new(x, y, z);
let mut s = 999.;
let mut rng = rng_from_seed(self.seed);
let count = rng.gen_range(5, 80);
let max_size = 0.2;
let v = rng.gen_range(0.1, 0.9);
for _i in 0..count {
let a = Vector3::new(
rng.gen_range(0.2, 0.8),
rng.gen_range(0.2, 0.8),
rng.gen_range(0.2, 0.8),
);
let b = if rng.gen_bool(v) {
a
} else {
Vector3::new(
rng.gen_range(0.2, 0.8),
rng.gen_range(0.2, 0.8),
rng.gen_range(0.2, 0.8),
)
};
s = s.smooth_union(
rng.gen_range(0.0, 0.1),
sd_capsule(p, a, b, max_size * rng.gen_range(0.2, 1.0)),
);
}
s
}
}
fn make_triangles_from_vertices_indices(
vert: &Vec<f32>,
idx: &Vec<u32>,
) -> Vec<Tri> {
let mut triangles = vec![];
for face in idx.chunks(3) {
let i1 = face[0] as usize;
let i2 = face[1] as usize;
let i3 = face[2] as usize;
let v1 = Point3::new(vert[i1 * 3], vert[i1 * 3 + 1], vert[i1 * 3 + 2]);
let v2 = Point3::new(vert[i2 * 3], vert[i2 * 3 + 1], vert[i2 * 3 + 2]);
let v3 = Point3::new(vert[i3 * 3], vert[i3 * 3 + 1], vert[i3 * 3 + 2]);
triangles.push(Tri::new(v3, v2, v1));
}
triangles
}
#[derive(Debug, Clone)]
struct Tri {
v1: Point3<f32>,
v2: Point3<f32>,
v3: Point3<f32>,
}
impl Sub<Vector3<f32>> for Tri {
type Output = Tri;
fn sub(self, v: Vector3<f32>) -> Self::Output {
Tri {
v1: self.v1 - v,
v2: self.v2 - v,
v3: self.v3 - v,
}
}
}
impl Add<Vector3<f32>> for Tri {
type Output = Tri;
fn add(self, v: Vector3<f32>) -> Self::Output {
Tri {
v1: self.v1 + v,
v2: self.v2 + v,
v3: self.v3 + v,
}
}
}
impl Mul<Tri> for f32 {
type Output = Tri;
fn mul(self, tri: Tri) -> Self::Output {
Tri {
v1: self * tri.v1,
v2: self * tri.v2,
v3: self * tri.v3,
}
}
}
impl Mul<Tri> for Rotation3<f32> {
type Output = Tri;
fn mul(self, tri: Tri) -> Self::Output {
Tri {
v1: self * tri.v1,
v2: self * tri.v2,
v3: self * tri.v3,
}
}
}
impl Tri {
fn new(v1: Point3<f32>, v2: Point3<f32>, v3: Point3<f32>) -> Self {
Tri { v1, v2, v3 }
}
}
struct Camera {
aspect: f32,
fovy: f32,
znear: f32,
zfar: f32,
}
impl Camera {
fn new(aspect: f32, fovy: f32, znear: f32, zfar: f32) -> Self {
Camera {
aspect,
fovy,
znear,
zfar,
}
}
fn project(&self, tri: &Tri) -> Tri {
let proj = Perspective3::new(self.aspect, self.fovy, self.znear, self.zfar);
Tri {
v1: proj.project_point(&tri.v1),
v2: proj.project_point(&tri.v2),
v3: proj.project_point(&tri.v3),
}
}
}
fn art(opts: &Opts) -> Vec<Group> {
let width = opts.width;
let height = opts.height;
let mut rng = rng_from_seed(opts.seed);
let grid_size = rng.gen_range(8, 32);
let mut vertices = vec![];
let mut indices = vec![];
let source = Shape { seed: opts.seed };
let mut marching = MarchingCubes::new(grid_size);
marching.extract(&source, &mut vertices, &mut indices);
let triangles = make_triangles_from_vertices_indices(&vertices, &indices);
// project triangles to 2D with a camera
let dist = 1.0;
let cam = Camera::new((width / height) as f32, 2.2, 0.0, 8.0);
let rot =
Rotation3::from_axis_angle(&Vector3::z_axis(), rng.gen_range(-PI, PI))
* Rotation3::from_axis_angle(&Vector3::y_axis(), rng.gen_range(-PI, PI))
* Rotation3::from_axis_angle(&Vector3::x_axis(), rng.gen_range(-PI, PI));
let mut projected = triangles
.iter()
.map(|tri| {
let t = tri.clone() + Vector3::new(-0.5, -0.5, -0.5);
let t = rot * t;
let t = t + Vector3::new(0., 0., -dist);
cam.project(&t)
})
.collect::<Vec<_>>();
// sort by z-index
let mut data = projected
.iter()
.map(|tri| {
let z = tri.v1.z + tri.v2.z + tri.v3.z;
(tri.clone(), z)
})
.collect::<Vec<(Tri, f32)>>();
data.sort_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap());
projected = data.iter().map(|(tri, _)| tri.clone()).collect::<Vec<_>>();
let mut passage = Passage::new(0.5, width, height);
let mut routes: Vec<Vec<(f64, f64)>> = vec![];
let mut polygons = vec![];
let translate = 0.5;
for tri in projected {
let points: Vec<(f64, f64)> = vec![tri.v1, tri.v2, tri.v3]
.iter()
.map(|p| {
(
(p.x as f64 + translate) * width,
(p.y as f64 + translate) * height,
)
})
.collect();
// quick hack. triangles are small enough to ignore cases where it partially overlaps
let center = centroid(&points);
let hidden = is_inside_polygons(center, &polygons);
if hidden {
continue;
}
if passage.count(center) > 10 {
continue;
}
let dr = rng.gen_range(0.3, 0.4);
// stretch the spiral based on the polygon shape
routes.push(spiral_optimized_in_triangle(&points, dr));
// routes.push(points.clone());
polygons.push(points);
}
// TESTS
/*
routes = vec![];
for x in 0..3 {
for y in 0..5 {
// rng in cell
let points = vec![
((x as f64+rng.gen_range(0.0, 0.8)) * width / 3.0, (y as f64 + rng.gen_range(0.0, 1.0)) * height / 5.0),
((x as f64+rng.gen_range(0.2, 1.0)) * width / 3.0, (y as f64 + rng.gen_range(0.2, 1.0)) * height / 5.0),
((x as f64+rng.gen_range(0.2, 1.0)) * width / 3.0, (y as f64 + rng.gen_range(0.0, 0.8)) * height / 5.0),
];
routes.push(
vec![points.clone(), vec![points[0]]].concat());
routes.push(
spiral_optimized_in_triangle(&points, 1.0, 0.1));
}
}
*/
vec![("black", routes)]
.iter()
.enumerate()
.map(|(i, (color, routes))| {
let mut data = Data::new();
for route in routes.clone() {
data = render_route(data, route);
}
let mut l = layer(format!("{} {}", i, String::from(*color)).as_str());
l = l.add(base_path(color, 0.3, data));
l
})
.collect()
}
fn is_inside_a_polygon(p: (f64, f64), polygon: &Vec<(f64, f64)>) -> bool |
fn is_inside_polygons(p: (f64, f64), polygons: &Vec<Vec<(f64, f64)>>) -> bool {
for polygon in polygons {
if is_inside_a_polygon(p, polygon) {
return true;
}
}
false
}
fn main() {
let opts: Opts = Opts::parse();
let groups = art(&opts);
let mut document = base_document("yellow", opts.width, opts.height);
for g in groups {
document = document.add(g);
}
svg::save(opts.file, &document).unwrap();
}
fn centroid(points: &Vec<(f64, f64)>) -> (f64, f64) {
let mut x = 0.0;
let mut y = 0.0;
for (x_, y_) in points {
x += x_;
y += y_;
}
(x / points.len() as f64, y / points.len() as f64)
}
#[derive(Clone)]
struct Passage {
precision: f64,
width: f64,
height: f64,
counters: Vec<usize>,
}
impl Passage {
pub fn new(precision: f64, width: f64, height: f64) -> Self {
let wi = (width / precision).ceil() as usize;
let hi = (height / precision).ceil() as usize;
let counters = vec![0; wi * hi];
Passage {
precision,
width,
height,
counters,
}
}
fn index(self: &Self, (x, y): (f64, f64)) -> usize {
let wi = (self.width / self.precision).ceil() as usize;
let hi = (self.height / self.precision).ceil() as usize;
let xi = ((x / self.precision).round() as usize).max(0).min(wi - 1);
let yi = ((y / self.precision).round() as usize).max(0).min(hi - 1);
yi * wi + xi
}
pub fn count(self: &mut Self, p: (f64, f64)) -> usize {
let i = self.index(p);
let v = self.counters[i] + 1;
self.counters[i] = v;
v
}
}
fn spiral_optimized_in_triangle(
points: &Vec<(f64, f64)>,
dr: f64,
) -> Vec<(f64, f64)> {
let mut pts = vec![];
for i in 0..3 {
let a = points[i];
let b = points[(i + 1) % 3];
pts.push(((a.0 + b.0) * 0.5, (a.1 + b.1) * 0.5));
}
let center = centroid(&pts);
let d = pts
.iter()
.map(|&p| euclidian_dist(p, center))
.min_by(|a, b| a.partial_cmp(b).unwrap())
.unwrap();
if d < 3.0 * dr {
return vec![];
}
let radius = 0.9 * d;
let (x, y) = center;
spiral_optimized(x, y, radius, dr)
}
fn spiral_optimized(x: f64, y: f64, radius: f64, dr: f64) -> Vec<(f64, f64)> {
let approx = 0.05;
let extra = 0.5;
let two_pi = 2.0 * std::f64::consts::PI;
let mut route = Vec::new();
let mut r = radius + extra;
let mut a = 0f64;
loop {
let mr = r.min(radius);
let p = round_point((x + mr * a.cos(), y + mr * a.sin()), 0.01);
let l = route.len();
if l == 0 || euclidian_dist(route[l - 1], p) > approx {
route.push(p);
}
let da = 1.0 / (r + 8.0); // bigger radius is more we have to do angle iterations
a = (a + da) % two_pi;
r -= dr * da / two_pi;
if r < approx {
break;
}
}
route.push((x, y));
route
}
| {
let mut inside = false;
let mut j = polygon.len() - 1;
for i in 0..polygon.len() {
let pi = polygon[i];
let pj = polygon[j];
if (pi.1 > p.1) != (pj.1 > p.1)
&& p.0 < (pj.0 - pi.0) * (p.1 - pi.1) / (pj.1 - pi.1) + pi.0
{
inside = !inside;
}
j = i;
}
inside
} | identifier_body |
main.rs | use clap::*;
use gre::*;
use isosurface::{marching_cubes::MarchingCubes, source::Source};
use kiss3d::nalgebra::{Perspective3, Point3, Rotation3, Vector3};
use rand::prelude::*;
use std::f32::consts::PI;
use std::ops::{Add, Mul, Sub};
use svg::node::element::path::Data;
use svg::node::element::*;
#[derive(Parser)]
#[clap()]
pub struct Opts {
#[clap(short, long, default_value = "image.svg")]
file: String,
#[clap(short, long, default_value = "210.0")]
pub width: f64,
#[clap(short, long, default_value = "297.0")]
pub height: f64,
#[clap(short, long, default_value = "20.0")]
pub pad: f64,
#[clap(short, long, default_value = "0.0")]
pub seed: f64,
}
// We use sign distance function paradigm here:
fn sd_capsule(
p: Vector3<f32>,
a: Vector3<f32>,
b: Vector3<f32>,
r: f32,
) -> f32 {
let pa = p - a;
let ba = b - a;
let h = (pa.dot(&ba) / ba.dot(&ba)).max(0.0).min(1.0);
(pa - ba * h).norm() - r
}
trait BinaryOps<T> {
fn intersect(&self, other: T) -> T;
fn difference(&self, other: T) -> T;
fn union(&self, other: T) -> T;
fn smooth_intersect(&self, k: T, other: T) -> T;
fn smooth_difference(&self, k: T, other: T) -> T;
fn smooth_union(&self, k: T, other: T) -> T;
}
impl BinaryOps<f32> for f32 {
fn intersect(&self, other: f32) -> f32 {
self.max(other)
}
fn difference(&self, other: f32) -> f32 {
self.max(-other)
}
fn union(&self, other: f32) -> f32 {
self.min(other)
}
fn smooth_intersect(&self, k: f32, other: f32) -> f32 {
let h = (0.5 - 0.5 * (self - other) / k).max(0.0).min(1.0);
mix(*self, other, h) + k * h * (1.0 - h)
}
fn smooth_difference(&self, k: f32, other: f32) -> f32 {
let h = (0.5 - 0.5 * (other + self) / k).max(0.0).min(1.0);
mix(*self, -other, h) + k * h * (1.0 - h)
}
fn smooth_union(&self, k: f32, other: f32) -> f32 {
let h = (0.5 + 0.5 * (self - other) / k).max(0.0).min(1.0);
mix(*self, other, h) - k * h * (1.0 - h)
}
}
fn mix(a: f32, b: f32, x: f32) -> f32 {
(1. - x) * a + x * b
}
struct Shape {
seed: f64,
}
impl Source for Shape {
fn sample(&self, x: f32, y: f32, z: f32) -> f32 {
let p = Vector3::new(x, y, z);
let mut s = 999.;
let mut rng = rng_from_seed(self.seed);
let count = rng.gen_range(5, 80);
let max_size = 0.2;
let v = rng.gen_range(0.1, 0.9);
for _i in 0..count {
let a = Vector3::new(
rng.gen_range(0.2, 0.8),
rng.gen_range(0.2, 0.8),
rng.gen_range(0.2, 0.8),
);
let b = if rng.gen_bool(v) {
a
} else {
Vector3::new(
rng.gen_range(0.2, 0.8),
rng.gen_range(0.2, 0.8),
rng.gen_range(0.2, 0.8),
)
};
s = s.smooth_union(
rng.gen_range(0.0, 0.1),
sd_capsule(p, a, b, max_size * rng.gen_range(0.2, 1.0)),
);
}
s
}
}
fn make_triangles_from_vertices_indices(
vert: &Vec<f32>,
idx: &Vec<u32>,
) -> Vec<Tri> {
let mut triangles = vec![];
for face in idx.chunks(3) {
let i1 = face[0] as usize;
let i2 = face[1] as usize;
let i3 = face[2] as usize;
let v1 = Point3::new(vert[i1 * 3], vert[i1 * 3 + 1], vert[i1 * 3 + 2]);
let v2 = Point3::new(vert[i2 * 3], vert[i2 * 3 + 1], vert[i2 * 3 + 2]);
let v3 = Point3::new(vert[i3 * 3], vert[i3 * 3 + 1], vert[i3 * 3 + 2]);
triangles.push(Tri::new(v3, v2, v1));
}
triangles
}
#[derive(Debug, Clone)]
struct Tri {
v1: Point3<f32>,
v2: Point3<f32>,
v3: Point3<f32>,
}
impl Sub<Vector3<f32>> for Tri {
type Output = Tri;
fn sub(self, v: Vector3<f32>) -> Self::Output {
Tri {
v1: self.v1 - v,
v2: self.v2 - v,
v3: self.v3 - v,
}
}
}
impl Add<Vector3<f32>> for Tri {
type Output = Tri;
fn add(self, v: Vector3<f32>) -> Self::Output {
Tri {
v1: self.v1 + v,
v2: self.v2 + v,
v3: self.v3 + v,
}
}
}
impl Mul<Tri> for f32 {
type Output = Tri;
fn mul(self, tri: Tri) -> Self::Output {
Tri {
v1: self * tri.v1,
v2: self * tri.v2,
v3: self * tri.v3,
}
}
}
impl Mul<Tri> for Rotation3<f32> {
type Output = Tri;
fn mul(self, tri: Tri) -> Self::Output {
Tri {
v1: self * tri.v1,
v2: self * tri.v2,
v3: self * tri.v3,
}
}
}
impl Tri {
fn new(v1: Point3<f32>, v2: Point3<f32>, v3: Point3<f32>) -> Self {
Tri { v1, v2, v3 }
}
}
struct Camera {
aspect: f32,
fovy: f32,
znear: f32,
zfar: f32,
}
impl Camera {
fn new(aspect: f32, fovy: f32, znear: f32, zfar: f32) -> Self {
Camera {
aspect,
fovy,
znear,
zfar,
}
}
fn project(&self, tri: &Tri) -> Tri {
let proj = Perspective3::new(self.aspect, self.fovy, self.znear, self.zfar);
Tri {
v1: proj.project_point(&tri.v1),
v2: proj.project_point(&tri.v2),
v3: proj.project_point(&tri.v3),
}
}
}
fn art(opts: &Opts) -> Vec<Group> {
let width = opts.width;
let height = opts.height;
let mut rng = rng_from_seed(opts.seed);
let grid_size = rng.gen_range(8, 32);
let mut vertices = vec![];
let mut indices = vec![];
let source = Shape { seed: opts.seed };
let mut marching = MarchingCubes::new(grid_size);
marching.extract(&source, &mut vertices, &mut indices);
let triangles = make_triangles_from_vertices_indices(&vertices, &indices);
// project triangles to 2D with a camera
let dist = 1.0;
let cam = Camera::new((width / height) as f32, 2.2, 0.0, 8.0);
let rot =
Rotation3::from_axis_angle(&Vector3::z_axis(), rng.gen_range(-PI, PI))
* Rotation3::from_axis_angle(&Vector3::y_axis(), rng.gen_range(-PI, PI))
* Rotation3::from_axis_angle(&Vector3::x_axis(), rng.gen_range(-PI, PI));
let mut projected = triangles
.iter()
.map(|tri| {
let t = tri.clone() + Vector3::new(-0.5, -0.5, -0.5);
let t = rot * t;
let t = t + Vector3::new(0., 0., -dist);
cam.project(&t)
})
.collect::<Vec<_>>();
// sort by z-index
let mut data = projected
.iter()
.map(|tri| {
let z = tri.v1.z + tri.v2.z + tri.v3.z;
(tri.clone(), z)
})
.collect::<Vec<(Tri, f32)>>();
data.sort_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap());
projected = data.iter().map(|(tri, _)| tri.clone()).collect::<Vec<_>>();
let mut passage = Passage::new(0.5, width, height);
let mut routes: Vec<Vec<(f64, f64)>> = vec![];
let mut polygons = vec![];
let translate = 0.5;
for tri in projected {
let points: Vec<(f64, f64)> = vec![tri.v1, tri.v2, tri.v3]
.iter()
.map(|p| {
(
(p.x as f64 + translate) * width,
(p.y as f64 + translate) * height,
)
})
.collect();
// quick hack. triangles are small enough to ignore cases where it partially overlaps
let center = centroid(&points);
let hidden = is_inside_polygons(center, &polygons);
if hidden {
continue;
}
if passage.count(center) > 10 {
continue;
}
let dr = rng.gen_range(0.3, 0.4);
// stretch the spiral based on the polygon shape
routes.push(spiral_optimized_in_triangle(&points, dr));
// routes.push(points.clone());
polygons.push(points);
}
// TESTS
/*
routes = vec![];
for x in 0..3 {
for y in 0..5 {
// rng in cell
let points = vec![
((x as f64+rng.gen_range(0.0, 0.8)) * width / 3.0, (y as f64 + rng.gen_range(0.0, 1.0)) * height / 5.0),
((x as f64+rng.gen_range(0.2, 1.0)) * width / 3.0, (y as f64 + rng.gen_range(0.2, 1.0)) * height / 5.0),
((x as f64+rng.gen_range(0.2, 1.0)) * width / 3.0, (y as f64 + rng.gen_range(0.0, 0.8)) * height / 5.0),
];
routes.push(
vec![points.clone(), vec![points[0]]].concat());
routes.push(
spiral_optimized_in_triangle(&points, 1.0, 0.1));
}
}
*/
vec![("black", routes)]
.iter()
.enumerate()
.map(|(i, (color, routes))| {
let mut data = Data::new();
for route in routes.clone() {
data = render_route(data, route);
}
let mut l = layer(format!("{} {}", i, String::from(*color)).as_str());
l = l.add(base_path(color, 0.3, data));
l
})
.collect()
}
fn is_inside_a_polygon(p: (f64, f64), polygon: &Vec<(f64, f64)>) -> bool {
let mut inside = false;
let mut j = polygon.len() - 1;
for i in 0..polygon.len() {
let pi = polygon[i];
let pj = polygon[j];
if (pi.1 > p.1)!= (pj.1 > p.1)
&& p.0 < (pj.0 - pi.0) * (p.1 - pi.1) / (pj.1 - pi.1) + pi.0
{
inside =!inside;
}
j = i;
}
inside
}
fn is_inside_polygons(p: (f64, f64), polygons: &Vec<Vec<(f64, f64)>>) -> bool {
for polygon in polygons {
if is_inside_a_polygon(p, polygon) {
return true;
}
}
false
}
fn main() {
let opts: Opts = Opts::parse();
let groups = art(&opts);
let mut document = base_document("yellow", opts.width, opts.height);
for g in groups {
document = document.add(g);
}
svg::save(opts.file, &document).unwrap();
}
fn centroid(points: &Vec<(f64, f64)>) -> (f64, f64) {
let mut x = 0.0;
let mut y = 0.0;
for (x_, y_) in points {
x += x_;
y += y_;
}
(x / points.len() as f64, y / points.len() as f64)
}
#[derive(Clone)]
struct Passage {
precision: f64,
width: f64,
height: f64,
counters: Vec<usize>,
}
impl Passage {
pub fn | (precision: f64, width: f64, height: f64) -> Self {
let wi = (width / precision).ceil() as usize;
let hi = (height / precision).ceil() as usize;
let counters = vec![0; wi * hi];
Passage {
precision,
width,
height,
counters,
}
}
fn index(self: &Self, (x, y): (f64, f64)) -> usize {
let wi = (self.width / self.precision).ceil() as usize;
let hi = (self.height / self.precision).ceil() as usize;
let xi = ((x / self.precision).round() as usize).max(0).min(wi - 1);
let yi = ((y / self.precision).round() as usize).max(0).min(hi - 1);
yi * wi + xi
}
pub fn count(self: &mut Self, p: (f64, f64)) -> usize {
let i = self.index(p);
let v = self.counters[i] + 1;
self.counters[i] = v;
v
}
}
fn spiral_optimized_in_triangle(
points: &Vec<(f64, f64)>,
dr: f64,
) -> Vec<(f64, f64)> {
let mut pts = vec![];
for i in 0..3 {
let a = points[i];
let b = points[(i + 1) % 3];
pts.push(((a.0 + b.0) * 0.5, (a.1 + b.1) * 0.5));
}
let center = centroid(&pts);
let d = pts
.iter()
.map(|&p| euclidian_dist(p, center))
.min_by(|a, b| a.partial_cmp(b).unwrap())
.unwrap();
if d < 3.0 * dr {
return vec![];
}
let radius = 0.9 * d;
let (x, y) = center;
spiral_optimized(x, y, radius, dr)
}
fn spiral_optimized(x: f64, y: f64, radius: f64, dr: f64) -> Vec<(f64, f64)> {
let approx = 0.05;
let extra = 0.5;
let two_pi = 2.0 * std::f64::consts::PI;
let mut route = Vec::new();
let mut r = radius + extra;
let mut a = 0f64;
loop {
let mr = r.min(radius);
let p = round_point((x + mr * a.cos(), y + mr * a.sin()), 0.01);
let l = route.len();
if l == 0 || euclidian_dist(route[l - 1], p) > approx {
route.push(p);
}
let da = 1.0 / (r + 8.0); // bigger radius is more we have to do angle iterations
a = (a + da) % two_pi;
r -= dr * da / two_pi;
if r < approx {
break;
}
}
route.push((x, y));
route
}
| new | identifier_name |
gossip.rs | #![allow(clippy::arithmetic_side_effects)]
#[macro_use]
extern crate log;
use {
rayon::iter::*,
solana_gossip::{
cluster_info::{ClusterInfo, Node},
contact_info::{LegacyContactInfo as ContactInfo, Protocol},
crds::Cursor,
gossip_service::GossipService,
},
solana_perf::packet::Packet,
solana_runtime::bank_forks::BankForks,
solana_sdk::{
hash::Hash,
pubkey::Pubkey,
signature::{Keypair, Signer},
timing::timestamp,
transaction::Transaction,
},
solana_streamer::{
sendmmsg::{multi_target_send, SendPktsError},
socket::SocketAddrSpace,
},
solana_vote_program::{vote_instruction, vote_state::Vote},
std::{
net::UdpSocket,
sync::{
atomic::{AtomicBool, Ordering},
Arc, RwLock,
},
thread::sleep,
time::Duration,
},
};
fn test_node(exit: Arc<AtomicBool>) -> (Arc<ClusterInfo>, GossipService, UdpSocket) {
let keypair = Arc::new(Keypair::new());
let mut test_node = Node::new_localhost_with_pubkey(&keypair.pubkey());
let cluster_info = Arc::new(ClusterInfo::new(
test_node.info.clone(),
keypair,
SocketAddrSpace::Unspecified,
));
let gossip_service = GossipService::new(
&cluster_info,
None,
test_node.sockets.gossip,
None,
true, // should_check_duplicate_instance
None,
exit,
);
let _ = cluster_info.my_contact_info();
(
cluster_info,
gossip_service,
test_node.sockets.tvu.pop().unwrap(),
)
}
fn test_node_with_bank(
node_keypair: Arc<Keypair>,
exit: Arc<AtomicBool>,
bank_forks: Arc<RwLock<BankForks>>,
) -> (Arc<ClusterInfo>, GossipService, UdpSocket) {
let mut test_node = Node::new_localhost_with_pubkey(&node_keypair.pubkey());
let cluster_info = Arc::new(ClusterInfo::new(
test_node.info.clone(),
node_keypair,
SocketAddrSpace::Unspecified,
));
let gossip_service = GossipService::new(
&cluster_info,
Some(bank_forks),
test_node.sockets.gossip,
None,
true, // should_check_duplicate_instance
None,
exit,
);
let _ = cluster_info.my_contact_info();
(
cluster_info,
gossip_service,
test_node.sockets.tvu.pop().unwrap(),
)
}
/// Test that the network converges.
/// Run until every node in the network has a full ContactInfo set.
/// Check that nodes stop sending updates after all the ContactInfo has been shared.
/// tests that actually use this function are below
fn run_gossip_topo<F>(num: usize, topo: F)
where
F: Fn(&Vec<(Arc<ClusterInfo>, GossipService, UdpSocket)>),
{
let exit = Arc::new(AtomicBool::new(false));
let listen: Vec<_> = (0..num).map(|_| test_node(exit.clone())).collect();
topo(&listen);
let mut done = true;
for i in 0..(num * 32) {
done = true;
let total: usize = listen.iter().map(|v| v.0.gossip_peers().len()).sum();
if (total + num) * 10 > num * num * 9 {
done = true;
break;
} else |
sleep(Duration::from_secs(1));
}
exit.store(true, Ordering::Relaxed);
for (_, dr, _) in listen {
dr.join().unwrap();
}
assert!(done);
}
/// retransmit messages to a list of nodes
fn retransmit_to(
peers: &[&ContactInfo],
data: &[u8],
socket: &UdpSocket,
forwarded: bool,
socket_addr_space: &SocketAddrSpace,
) {
trace!("retransmit orders {}", peers.len());
let dests: Vec<_> = if forwarded {
peers
.iter()
.filter_map(|peer| peer.tvu(Protocol::UDP).ok())
.filter(|addr| socket_addr_space.check(addr))
.collect()
} else {
peers
.iter()
.filter_map(|peer| peer.tvu(Protocol::UDP).ok())
.filter(|addr| socket_addr_space.check(addr))
.collect()
};
if let Err(SendPktsError::IoError(ioerr, num_failed)) = multi_target_send(socket, data, &dests)
{
error!(
"retransmit_to multi_target_send error: {:?}, {}/{} packets failed",
ioerr,
num_failed,
dests.len(),
);
}
}
/// ring a -> b -> c -> d -> e -> a
#[test]
fn gossip_ring() {
solana_logger::setup();
run_gossip_topo(50, |listen| {
let num = listen.len();
for n in 0..num {
let y = n % listen.len();
let x = (n + 1) % listen.len();
let yv = &listen[y].0;
let mut d = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
d.set_wallclock(timestamp());
listen[x].0.insert_legacy_info(d);
}
});
}
/// ring a -> b -> c -> d -> e -> a
#[test]
#[ignore]
fn gossip_ring_large() {
solana_logger::setup();
run_gossip_topo(600, |listen| {
let num = listen.len();
for n in 0..num {
let y = n % listen.len();
let x = (n + 1) % listen.len();
let yv = &listen[y].0;
let mut d = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
d.set_wallclock(timestamp());
listen[x].0.insert_legacy_info(d);
}
});
}
/// star a -> (b,c,d,e)
#[test]
fn gossip_star() {
solana_logger::setup();
run_gossip_topo(10, |listen| {
let num = listen.len();
for n in 0..(num - 1) {
let x = 0;
let y = (n + 1) % listen.len();
let yv = &listen[y].0;
let mut yd = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
yd.set_wallclock(timestamp());
let xv = &listen[x].0;
xv.insert_legacy_info(yd);
trace!("star leader {}", &xv.id());
}
});
}
/// rstar a <- (b,c,d,e)
#[test]
fn gossip_rstar() {
solana_logger::setup();
run_gossip_topo(10, |listen| {
let num = listen.len();
let xd = {
let xv = &listen[0].0;
xv.lookup_contact_info(&xv.id(), |ci| ci.clone()).unwrap()
};
trace!("rstar leader {}", xd.pubkey());
for n in 0..(num - 1) {
let y = (n + 1) % listen.len();
let yv = &listen[y].0;
yv.insert_legacy_info(xd.clone());
trace!("rstar insert {} into {}", xd.pubkey(), yv.id());
}
});
}
#[test]
pub fn cluster_info_retransmit() {
solana_logger::setup();
let exit = Arc::new(AtomicBool::new(false));
trace!("c1:");
let (c1, dr1, tn1) = test_node(exit.clone());
trace!("c2:");
let (c2, dr2, tn2) = test_node(exit.clone());
trace!("c3:");
let (c3, dr3, tn3) = test_node(exit.clone());
let c1_contact_info = c1.my_contact_info();
c2.insert_info(c1_contact_info.clone());
c3.insert_info(c1_contact_info);
let num = 3;
//wait to converge
trace!("waiting to converge:");
let mut done = false;
for _ in 0..30 {
done = c1.gossip_peers().len() == num - 1
&& c2.gossip_peers().len() == num - 1
&& c3.gossip_peers().len() == num - 1;
if done {
break;
}
sleep(Duration::from_secs(1));
}
assert!(done);
let mut p = Packet::default();
p.meta_mut().size = 10;
let peers = c1.tvu_peers();
let retransmit_peers: Vec<_> = peers.iter().collect();
retransmit_to(
&retransmit_peers,
p.data(..).unwrap(),
&tn1,
false,
&SocketAddrSpace::Unspecified,
);
let res: Vec<_> = [tn1, tn2, tn3]
.into_par_iter()
.map(|s| {
let mut p = Packet::default();
s.set_read_timeout(Some(Duration::from_secs(1))).unwrap();
let res = s.recv_from(p.buffer_mut());
res.is_err() //true if failed to receive the retransmit packet
})
.collect();
//true if failed receive the retransmit packet, r2, and r3 should succeed
//r1 was the sender, so it should fail to receive the packet
assert_eq!(res, [true, false, false]);
exit.store(true, Ordering::Relaxed);
dr1.join().unwrap();
dr2.join().unwrap();
dr3.join().unwrap();
}
#[test]
#[ignore]
pub fn cluster_info_scale() {
use {
solana_measure::measure::Measure,
solana_perf::test_tx::test_tx,
solana_runtime::{
bank::Bank,
genesis_utils::{create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs},
},
};
solana_logger::setup();
let exit = Arc::new(AtomicBool::new(false));
let num_nodes: usize = std::env::var("NUM_NODES")
.unwrap_or_else(|_| "10".to_string())
.parse()
.expect("could not parse NUM_NODES as a number");
let vote_keypairs: Vec<_> = (0..num_nodes)
.map(|_| ValidatorVoteKeypairs::new_rand())
.collect();
let genesis_config_info = create_genesis_config_with_vote_accounts(
10_000,
&vote_keypairs,
vec![100; vote_keypairs.len()],
);
let bank0 = Bank::new_for_tests(&genesis_config_info.genesis_config);
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank0)));
let nodes: Vec<_> = vote_keypairs
.into_iter()
.map(|keypairs| {
test_node_with_bank(
Arc::new(keypairs.node_keypair),
exit.clone(),
bank_forks.clone(),
)
})
.collect();
let ci0 = nodes[0].0.my_contact_info();
for node in &nodes[1..] {
node.0.insert_info(ci0.clone());
}
let mut time = Measure::start("time");
let mut done;
let mut success = false;
for _ in 0..30 {
done = true;
for (i, node) in nodes.iter().enumerate() {
warn!("node {} peers: {}", i, node.0.gossip_peers().len());
if node.0.gossip_peers().len()!= num_nodes - 1 {
done = false;
break;
}
}
if done {
success = true;
break;
}
sleep(Duration::from_secs(1));
}
time.stop();
warn!("found {} nodes in {} success: {}", num_nodes, time, success);
for num_votes in 1..1000 {
let mut time = Measure::start("votes");
let tx = test_tx();
warn!("tx.message.account_keys: {:?}", tx.message.account_keys);
let vote = Vote::new(
vec![1, 3, num_votes + 5], // slots
Hash::default(),
);
let ix = vote_instruction::vote(
&Pubkey::new_unique(), // vote_pubkey
&Pubkey::new_unique(), // authorized_voter_pubkey
vote,
);
let tx = Transaction::new_with_payer(
&[ix], // instructions
None, // payer
);
let tower = vec![num_votes + 5];
nodes[0].0.push_vote(&tower, tx.clone());
let mut success = false;
for _ in 0..(30 * 5) {
let mut not_done = 0;
let mut num_old = 0;
let mut num_push_total = 0;
let mut num_pushes = 0;
let mut num_pulls = 0;
for (node, _, _) in nodes.iter() {
//if node.0.get_votes(0).1.len()!= (num_nodes * num_votes) {
let has_tx = node
.get_votes(&mut Cursor::default())
.iter()
.filter(|v| v.message.account_keys == tx.message.account_keys)
.count();
num_old += node.gossip.push.num_old.load(Ordering::Relaxed);
num_push_total += node.gossip.push.num_total.load(Ordering::Relaxed);
num_pushes += node.gossip.push.num_pushes.load(Ordering::Relaxed);
num_pulls += node.gossip.pull.num_pulls.load(Ordering::Relaxed);
if has_tx == 0 {
not_done += 1;
}
}
warn!("not_done: {}/{}", not_done, nodes.len());
warn!("num_old: {}", num_old);
warn!("num_push_total: {}", num_push_total);
warn!("num_pushes: {}", num_pushes);
warn!("num_pulls: {}", num_pulls);
success = not_done < (nodes.len() / 20);
if success {
break;
}
sleep(Duration::from_millis(200));
}
time.stop();
warn!(
"propagated vote {} in {} success: {}",
num_votes, time, success
);
sleep(Duration::from_millis(200));
for (node, _, _) in nodes.iter() {
node.gossip.push.num_old.store(0, Ordering::Relaxed);
node.gossip.push.num_total.store(0, Ordering::Relaxed);
node.gossip.push.num_pushes.store(0, Ordering::Relaxed);
node.gossip.pull.num_pulls.store(0, Ordering::Relaxed);
}
}
exit.store(true, Ordering::Relaxed);
for node in nodes {
node.1.join().unwrap();
}
}
| {
trace!("not converged {} {} {}", i, total + num, num * num);
} | conditional_block |
gossip.rs | #![allow(clippy::arithmetic_side_effects)]
#[macro_use]
extern crate log;
use {
rayon::iter::*,
solana_gossip::{
cluster_info::{ClusterInfo, Node},
contact_info::{LegacyContactInfo as ContactInfo, Protocol},
crds::Cursor,
gossip_service::GossipService,
},
solana_perf::packet::Packet,
solana_runtime::bank_forks::BankForks,
solana_sdk::{
hash::Hash,
pubkey::Pubkey,
signature::{Keypair, Signer},
timing::timestamp,
transaction::Transaction,
},
solana_streamer::{
sendmmsg::{multi_target_send, SendPktsError},
socket::SocketAddrSpace,
},
solana_vote_program::{vote_instruction, vote_state::Vote},
std::{
net::UdpSocket,
sync::{
atomic::{AtomicBool, Ordering},
Arc, RwLock,
},
thread::sleep,
time::Duration,
},
};
fn test_node(exit: Arc<AtomicBool>) -> (Arc<ClusterInfo>, GossipService, UdpSocket) {
let keypair = Arc::new(Keypair::new());
let mut test_node = Node::new_localhost_with_pubkey(&keypair.pubkey());
let cluster_info = Arc::new(ClusterInfo::new(
test_node.info.clone(),
keypair,
SocketAddrSpace::Unspecified,
));
let gossip_service = GossipService::new(
&cluster_info,
None,
test_node.sockets.gossip,
None,
true, // should_check_duplicate_instance
None,
exit,
);
let _ = cluster_info.my_contact_info();
(
cluster_info,
gossip_service,
test_node.sockets.tvu.pop().unwrap(),
)
}
fn test_node_with_bank(
node_keypair: Arc<Keypair>,
exit: Arc<AtomicBool>,
bank_forks: Arc<RwLock<BankForks>>,
) -> (Arc<ClusterInfo>, GossipService, UdpSocket) {
let mut test_node = Node::new_localhost_with_pubkey(&node_keypair.pubkey());
let cluster_info = Arc::new(ClusterInfo::new(
test_node.info.clone(),
node_keypair,
SocketAddrSpace::Unspecified,
));
let gossip_service = GossipService::new(
&cluster_info,
Some(bank_forks),
test_node.sockets.gossip,
None,
true, // should_check_duplicate_instance
None,
exit,
);
let _ = cluster_info.my_contact_info();
(
cluster_info,
gossip_service,
test_node.sockets.tvu.pop().unwrap(),
)
}
/// Test that the network converges.
/// Run until every node in the network has a full ContactInfo set.
/// Check that nodes stop sending updates after all the ContactInfo has been shared.
/// tests that actually use this function are below
fn run_gossip_topo<F>(num: usize, topo: F)
where
F: Fn(&Vec<(Arc<ClusterInfo>, GossipService, UdpSocket)>),
{
let exit = Arc::new(AtomicBool::new(false));
let listen: Vec<_> = (0..num).map(|_| test_node(exit.clone())).collect();
topo(&listen);
let mut done = true;
for i in 0..(num * 32) {
done = true;
let total: usize = listen.iter().map(|v| v.0.gossip_peers().len()).sum();
if (total + num) * 10 > num * num * 9 {
done = true;
break;
} else {
trace!("not converged {} {} {}", i, total + num, num * num);
}
sleep(Duration::from_secs(1));
}
exit.store(true, Ordering::Relaxed);
for (_, dr, _) in listen {
dr.join().unwrap();
}
assert!(done);
}
/// retransmit messages to a list of nodes
fn retransmit_to(
peers: &[&ContactInfo],
data: &[u8],
socket: &UdpSocket,
forwarded: bool,
socket_addr_space: &SocketAddrSpace,
) {
trace!("retransmit orders {}", peers.len());
let dests: Vec<_> = if forwarded {
peers
.iter()
.filter_map(|peer| peer.tvu(Protocol::UDP).ok())
.filter(|addr| socket_addr_space.check(addr))
.collect()
} else {
peers
.iter()
.filter_map(|peer| peer.tvu(Protocol::UDP).ok())
.filter(|addr| socket_addr_space.check(addr))
.collect()
};
if let Err(SendPktsError::IoError(ioerr, num_failed)) = multi_target_send(socket, data, &dests)
{
error!(
"retransmit_to multi_target_send error: {:?}, {}/{} packets failed",
ioerr,
num_failed,
dests.len(),
);
}
}
/// ring a -> b -> c -> d -> e -> a
#[test]
fn gossip_ring() {
solana_logger::setup();
run_gossip_topo(50, |listen| {
let num = listen.len();
for n in 0..num {
let y = n % listen.len();
let x = (n + 1) % listen.len();
let yv = &listen[y].0;
let mut d = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
d.set_wallclock(timestamp());
listen[x].0.insert_legacy_info(d);
}
});
}
/// ring a -> b -> c -> d -> e -> a
#[test]
#[ignore]
fn gossip_ring_large() {
solana_logger::setup();
run_gossip_topo(600, |listen| {
let num = listen.len();
for n in 0..num {
let y = n % listen.len();
let x = (n + 1) % listen.len();
let yv = &listen[y].0;
let mut d = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
d.set_wallclock(timestamp());
listen[x].0.insert_legacy_info(d);
}
});
}
/// star a -> (b,c,d,e)
#[test]
fn gossip_star() {
solana_logger::setup();
run_gossip_topo(10, |listen| {
let num = listen.len();
for n in 0..(num - 1) {
let x = 0;
let y = (n + 1) % listen.len();
let yv = &listen[y].0;
let mut yd = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
yd.set_wallclock(timestamp());
let xv = &listen[x].0;
xv.insert_legacy_info(yd);
trace!("star leader {}", &xv.id());
}
});
}
/// rstar a <- (b,c,d,e)
#[test]
fn gossip_rstar() {
solana_logger::setup();
run_gossip_topo(10, |listen| {
let num = listen.len();
let xd = {
let xv = &listen[0].0;
xv.lookup_contact_info(&xv.id(), |ci| ci.clone()).unwrap()
};
trace!("rstar leader {}", xd.pubkey());
for n in 0..(num - 1) {
let y = (n + 1) % listen.len();
let yv = &listen[y].0;
yv.insert_legacy_info(xd.clone());
trace!("rstar insert {} into {}", xd.pubkey(), yv.id());
}
});
}
#[test]
pub fn cluster_info_retransmit() {
solana_logger::setup();
let exit = Arc::new(AtomicBool::new(false));
trace!("c1:");
let (c1, dr1, tn1) = test_node(exit.clone());
trace!("c2:");
let (c2, dr2, tn2) = test_node(exit.clone());
trace!("c3:");
let (c3, dr3, tn3) = test_node(exit.clone());
let c1_contact_info = c1.my_contact_info();
c2.insert_info(c1_contact_info.clone());
c3.insert_info(c1_contact_info);
let num = 3;
//wait to converge
trace!("waiting to converge:");
let mut done = false;
for _ in 0..30 {
done = c1.gossip_peers().len() == num - 1
&& c2.gossip_peers().len() == num - 1
&& c3.gossip_peers().len() == num - 1;
if done {
break;
}
sleep(Duration::from_secs(1));
}
assert!(done);
let mut p = Packet::default();
p.meta_mut().size = 10;
let peers = c1.tvu_peers();
let retransmit_peers: Vec<_> = peers.iter().collect();
retransmit_to(
&retransmit_peers,
p.data(..).unwrap(),
&tn1,
false,
&SocketAddrSpace::Unspecified,
);
let res: Vec<_> = [tn1, tn2, tn3]
.into_par_iter()
.map(|s| {
let mut p = Packet::default();
s.set_read_timeout(Some(Duration::from_secs(1))).unwrap();
let res = s.recv_from(p.buffer_mut());
res.is_err() //true if failed to receive the retransmit packet
})
.collect();
//true if failed receive the retransmit packet, r2, and r3 should succeed
//r1 was the sender, so it should fail to receive the packet
assert_eq!(res, [true, false, false]);
exit.store(true, Ordering::Relaxed);
dr1.join().unwrap();
dr2.join().unwrap();
dr3.join().unwrap();
}
#[test]
#[ignore]
pub fn cluster_info_scale() {
use {
solana_measure::measure::Measure,
solana_perf::test_tx::test_tx,
solana_runtime::{
bank::Bank,
genesis_utils::{create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs},
},
};
solana_logger::setup();
let exit = Arc::new(AtomicBool::new(false));
let num_nodes: usize = std::env::var("NUM_NODES")
.unwrap_or_else(|_| "10".to_string())
.parse()
.expect("could not parse NUM_NODES as a number"); | 10_000,
&vote_keypairs,
vec![100; vote_keypairs.len()],
);
let bank0 = Bank::new_for_tests(&genesis_config_info.genesis_config);
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank0)));
let nodes: Vec<_> = vote_keypairs
.into_iter()
.map(|keypairs| {
test_node_with_bank(
Arc::new(keypairs.node_keypair),
exit.clone(),
bank_forks.clone(),
)
})
.collect();
let ci0 = nodes[0].0.my_contact_info();
for node in &nodes[1..] {
node.0.insert_info(ci0.clone());
}
let mut time = Measure::start("time");
let mut done;
let mut success = false;
for _ in 0..30 {
done = true;
for (i, node) in nodes.iter().enumerate() {
warn!("node {} peers: {}", i, node.0.gossip_peers().len());
if node.0.gossip_peers().len()!= num_nodes - 1 {
done = false;
break;
}
}
if done {
success = true;
break;
}
sleep(Duration::from_secs(1));
}
time.stop();
warn!("found {} nodes in {} success: {}", num_nodes, time, success);
for num_votes in 1..1000 {
let mut time = Measure::start("votes");
let tx = test_tx();
warn!("tx.message.account_keys: {:?}", tx.message.account_keys);
let vote = Vote::new(
vec![1, 3, num_votes + 5], // slots
Hash::default(),
);
let ix = vote_instruction::vote(
&Pubkey::new_unique(), // vote_pubkey
&Pubkey::new_unique(), // authorized_voter_pubkey
vote,
);
let tx = Transaction::new_with_payer(
&[ix], // instructions
None, // payer
);
let tower = vec![num_votes + 5];
nodes[0].0.push_vote(&tower, tx.clone());
let mut success = false;
for _ in 0..(30 * 5) {
let mut not_done = 0;
let mut num_old = 0;
let mut num_push_total = 0;
let mut num_pushes = 0;
let mut num_pulls = 0;
for (node, _, _) in nodes.iter() {
//if node.0.get_votes(0).1.len()!= (num_nodes * num_votes) {
let has_tx = node
.get_votes(&mut Cursor::default())
.iter()
.filter(|v| v.message.account_keys == tx.message.account_keys)
.count();
num_old += node.gossip.push.num_old.load(Ordering::Relaxed);
num_push_total += node.gossip.push.num_total.load(Ordering::Relaxed);
num_pushes += node.gossip.push.num_pushes.load(Ordering::Relaxed);
num_pulls += node.gossip.pull.num_pulls.load(Ordering::Relaxed);
if has_tx == 0 {
not_done += 1;
}
}
warn!("not_done: {}/{}", not_done, nodes.len());
warn!("num_old: {}", num_old);
warn!("num_push_total: {}", num_push_total);
warn!("num_pushes: {}", num_pushes);
warn!("num_pulls: {}", num_pulls);
success = not_done < (nodes.len() / 20);
if success {
break;
}
sleep(Duration::from_millis(200));
}
time.stop();
warn!(
"propagated vote {} in {} success: {}",
num_votes, time, success
);
sleep(Duration::from_millis(200));
for (node, _, _) in nodes.iter() {
node.gossip.push.num_old.store(0, Ordering::Relaxed);
node.gossip.push.num_total.store(0, Ordering::Relaxed);
node.gossip.push.num_pushes.store(0, Ordering::Relaxed);
node.gossip.pull.num_pulls.store(0, Ordering::Relaxed);
}
}
exit.store(true, Ordering::Relaxed);
for node in nodes {
node.1.join().unwrap();
}
} |
let vote_keypairs: Vec<_> = (0..num_nodes)
.map(|_| ValidatorVoteKeypairs::new_rand())
.collect();
let genesis_config_info = create_genesis_config_with_vote_accounts( | random_line_split |
gossip.rs | #![allow(clippy::arithmetic_side_effects)]
#[macro_use]
extern crate log;
use {
rayon::iter::*,
solana_gossip::{
cluster_info::{ClusterInfo, Node},
contact_info::{LegacyContactInfo as ContactInfo, Protocol},
crds::Cursor,
gossip_service::GossipService,
},
solana_perf::packet::Packet,
solana_runtime::bank_forks::BankForks,
solana_sdk::{
hash::Hash,
pubkey::Pubkey,
signature::{Keypair, Signer},
timing::timestamp,
transaction::Transaction,
},
solana_streamer::{
sendmmsg::{multi_target_send, SendPktsError},
socket::SocketAddrSpace,
},
solana_vote_program::{vote_instruction, vote_state::Vote},
std::{
net::UdpSocket,
sync::{
atomic::{AtomicBool, Ordering},
Arc, RwLock,
},
thread::sleep,
time::Duration,
},
};
fn test_node(exit: Arc<AtomicBool>) -> (Arc<ClusterInfo>, GossipService, UdpSocket) {
let keypair = Arc::new(Keypair::new());
let mut test_node = Node::new_localhost_with_pubkey(&keypair.pubkey());
let cluster_info = Arc::new(ClusterInfo::new(
test_node.info.clone(),
keypair,
SocketAddrSpace::Unspecified,
));
let gossip_service = GossipService::new(
&cluster_info,
None,
test_node.sockets.gossip,
None,
true, // should_check_duplicate_instance
None,
exit,
);
let _ = cluster_info.my_contact_info();
(
cluster_info,
gossip_service,
test_node.sockets.tvu.pop().unwrap(),
)
}
fn test_node_with_bank(
node_keypair: Arc<Keypair>,
exit: Arc<AtomicBool>,
bank_forks: Arc<RwLock<BankForks>>,
) -> (Arc<ClusterInfo>, GossipService, UdpSocket) {
let mut test_node = Node::new_localhost_with_pubkey(&node_keypair.pubkey());
let cluster_info = Arc::new(ClusterInfo::new(
test_node.info.clone(),
node_keypair,
SocketAddrSpace::Unspecified,
));
let gossip_service = GossipService::new(
&cluster_info,
Some(bank_forks),
test_node.sockets.gossip,
None,
true, // should_check_duplicate_instance
None,
exit,
);
let _ = cluster_info.my_contact_info();
(
cluster_info,
gossip_service,
test_node.sockets.tvu.pop().unwrap(),
)
}
/// Test that the network converges.
/// Run until every node in the network has a full ContactInfo set.
/// Check that nodes stop sending updates after all the ContactInfo has been shared.
/// tests that actually use this function are below
fn run_gossip_topo<F>(num: usize, topo: F)
where
F: Fn(&Vec<(Arc<ClusterInfo>, GossipService, UdpSocket)>),
{
let exit = Arc::new(AtomicBool::new(false));
let listen: Vec<_> = (0..num).map(|_| test_node(exit.clone())).collect();
topo(&listen);
let mut done = true;
for i in 0..(num * 32) {
done = true;
let total: usize = listen.iter().map(|v| v.0.gossip_peers().len()).sum();
if (total + num) * 10 > num * num * 9 {
done = true;
break;
} else {
trace!("not converged {} {} {}", i, total + num, num * num);
}
sleep(Duration::from_secs(1));
}
exit.store(true, Ordering::Relaxed);
for (_, dr, _) in listen {
dr.join().unwrap();
}
assert!(done);
}
/// retransmit messages to a list of nodes
fn retransmit_to(
peers: &[&ContactInfo],
data: &[u8],
socket: &UdpSocket,
forwarded: bool,
socket_addr_space: &SocketAddrSpace,
) {
trace!("retransmit orders {}", peers.len());
let dests: Vec<_> = if forwarded {
peers
.iter()
.filter_map(|peer| peer.tvu(Protocol::UDP).ok())
.filter(|addr| socket_addr_space.check(addr))
.collect()
} else {
peers
.iter()
.filter_map(|peer| peer.tvu(Protocol::UDP).ok())
.filter(|addr| socket_addr_space.check(addr))
.collect()
};
if let Err(SendPktsError::IoError(ioerr, num_failed)) = multi_target_send(socket, data, &dests)
{
error!(
"retransmit_to multi_target_send error: {:?}, {}/{} packets failed",
ioerr,
num_failed,
dests.len(),
);
}
}
/// ring a -> b -> c -> d -> e -> a
#[test]
fn | () {
solana_logger::setup();
run_gossip_topo(50, |listen| {
let num = listen.len();
for n in 0..num {
let y = n % listen.len();
let x = (n + 1) % listen.len();
let yv = &listen[y].0;
let mut d = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
d.set_wallclock(timestamp());
listen[x].0.insert_legacy_info(d);
}
});
}
/// ring a -> b -> c -> d -> e -> a
#[test]
#[ignore]
fn gossip_ring_large() {
solana_logger::setup();
run_gossip_topo(600, |listen| {
let num = listen.len();
for n in 0..num {
let y = n % listen.len();
let x = (n + 1) % listen.len();
let yv = &listen[y].0;
let mut d = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
d.set_wallclock(timestamp());
listen[x].0.insert_legacy_info(d);
}
});
}
/// star a -> (b,c,d,e)
#[test]
fn gossip_star() {
solana_logger::setup();
run_gossip_topo(10, |listen| {
let num = listen.len();
for n in 0..(num - 1) {
let x = 0;
let y = (n + 1) % listen.len();
let yv = &listen[y].0;
let mut yd = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
yd.set_wallclock(timestamp());
let xv = &listen[x].0;
xv.insert_legacy_info(yd);
trace!("star leader {}", &xv.id());
}
});
}
/// rstar a <- (b,c,d,e)
#[test]
fn gossip_rstar() {
solana_logger::setup();
run_gossip_topo(10, |listen| {
let num = listen.len();
let xd = {
let xv = &listen[0].0;
xv.lookup_contact_info(&xv.id(), |ci| ci.clone()).unwrap()
};
trace!("rstar leader {}", xd.pubkey());
for n in 0..(num - 1) {
let y = (n + 1) % listen.len();
let yv = &listen[y].0;
yv.insert_legacy_info(xd.clone());
trace!("rstar insert {} into {}", xd.pubkey(), yv.id());
}
});
}
#[test]
pub fn cluster_info_retransmit() {
solana_logger::setup();
let exit = Arc::new(AtomicBool::new(false));
trace!("c1:");
let (c1, dr1, tn1) = test_node(exit.clone());
trace!("c2:");
let (c2, dr2, tn2) = test_node(exit.clone());
trace!("c3:");
let (c3, dr3, tn3) = test_node(exit.clone());
let c1_contact_info = c1.my_contact_info();
c2.insert_info(c1_contact_info.clone());
c3.insert_info(c1_contact_info);
let num = 3;
//wait to converge
trace!("waiting to converge:");
let mut done = false;
for _ in 0..30 {
done = c1.gossip_peers().len() == num - 1
&& c2.gossip_peers().len() == num - 1
&& c3.gossip_peers().len() == num - 1;
if done {
break;
}
sleep(Duration::from_secs(1));
}
assert!(done);
let mut p = Packet::default();
p.meta_mut().size = 10;
let peers = c1.tvu_peers();
let retransmit_peers: Vec<_> = peers.iter().collect();
retransmit_to(
&retransmit_peers,
p.data(..).unwrap(),
&tn1,
false,
&SocketAddrSpace::Unspecified,
);
let res: Vec<_> = [tn1, tn2, tn3]
.into_par_iter()
.map(|s| {
let mut p = Packet::default();
s.set_read_timeout(Some(Duration::from_secs(1))).unwrap();
let res = s.recv_from(p.buffer_mut());
res.is_err() //true if failed to receive the retransmit packet
})
.collect();
//true if failed receive the retransmit packet, r2, and r3 should succeed
//r1 was the sender, so it should fail to receive the packet
assert_eq!(res, [true, false, false]);
exit.store(true, Ordering::Relaxed);
dr1.join().unwrap();
dr2.join().unwrap();
dr3.join().unwrap();
}
#[test]
#[ignore]
pub fn cluster_info_scale() {
use {
solana_measure::measure::Measure,
solana_perf::test_tx::test_tx,
solana_runtime::{
bank::Bank,
genesis_utils::{create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs},
},
};
solana_logger::setup();
let exit = Arc::new(AtomicBool::new(false));
let num_nodes: usize = std::env::var("NUM_NODES")
.unwrap_or_else(|_| "10".to_string())
.parse()
.expect("could not parse NUM_NODES as a number");
let vote_keypairs: Vec<_> = (0..num_nodes)
.map(|_| ValidatorVoteKeypairs::new_rand())
.collect();
let genesis_config_info = create_genesis_config_with_vote_accounts(
10_000,
&vote_keypairs,
vec![100; vote_keypairs.len()],
);
let bank0 = Bank::new_for_tests(&genesis_config_info.genesis_config);
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank0)));
let nodes: Vec<_> = vote_keypairs
.into_iter()
.map(|keypairs| {
test_node_with_bank(
Arc::new(keypairs.node_keypair),
exit.clone(),
bank_forks.clone(),
)
})
.collect();
let ci0 = nodes[0].0.my_contact_info();
for node in &nodes[1..] {
node.0.insert_info(ci0.clone());
}
let mut time = Measure::start("time");
let mut done;
let mut success = false;
for _ in 0..30 {
done = true;
for (i, node) in nodes.iter().enumerate() {
warn!("node {} peers: {}", i, node.0.gossip_peers().len());
if node.0.gossip_peers().len()!= num_nodes - 1 {
done = false;
break;
}
}
if done {
success = true;
break;
}
sleep(Duration::from_secs(1));
}
time.stop();
warn!("found {} nodes in {} success: {}", num_nodes, time, success);
for num_votes in 1..1000 {
let mut time = Measure::start("votes");
let tx = test_tx();
warn!("tx.message.account_keys: {:?}", tx.message.account_keys);
let vote = Vote::new(
vec![1, 3, num_votes + 5], // slots
Hash::default(),
);
let ix = vote_instruction::vote(
&Pubkey::new_unique(), // vote_pubkey
&Pubkey::new_unique(), // authorized_voter_pubkey
vote,
);
let tx = Transaction::new_with_payer(
&[ix], // instructions
None, // payer
);
let tower = vec![num_votes + 5];
nodes[0].0.push_vote(&tower, tx.clone());
let mut success = false;
for _ in 0..(30 * 5) {
let mut not_done = 0;
let mut num_old = 0;
let mut num_push_total = 0;
let mut num_pushes = 0;
let mut num_pulls = 0;
for (node, _, _) in nodes.iter() {
//if node.0.get_votes(0).1.len()!= (num_nodes * num_votes) {
let has_tx = node
.get_votes(&mut Cursor::default())
.iter()
.filter(|v| v.message.account_keys == tx.message.account_keys)
.count();
num_old += node.gossip.push.num_old.load(Ordering::Relaxed);
num_push_total += node.gossip.push.num_total.load(Ordering::Relaxed);
num_pushes += node.gossip.push.num_pushes.load(Ordering::Relaxed);
num_pulls += node.gossip.pull.num_pulls.load(Ordering::Relaxed);
if has_tx == 0 {
not_done += 1;
}
}
warn!("not_done: {}/{}", not_done, nodes.len());
warn!("num_old: {}", num_old);
warn!("num_push_total: {}", num_push_total);
warn!("num_pushes: {}", num_pushes);
warn!("num_pulls: {}", num_pulls);
success = not_done < (nodes.len() / 20);
if success {
break;
}
sleep(Duration::from_millis(200));
}
time.stop();
warn!(
"propagated vote {} in {} success: {}",
num_votes, time, success
);
sleep(Duration::from_millis(200));
for (node, _, _) in nodes.iter() {
node.gossip.push.num_old.store(0, Ordering::Relaxed);
node.gossip.push.num_total.store(0, Ordering::Relaxed);
node.gossip.push.num_pushes.store(0, Ordering::Relaxed);
node.gossip.pull.num_pulls.store(0, Ordering::Relaxed);
}
}
exit.store(true, Ordering::Relaxed);
for node in nodes {
node.1.join().unwrap();
}
}
| gossip_ring | identifier_name |
gossip.rs | #![allow(clippy::arithmetic_side_effects)]
#[macro_use]
extern crate log;
use {
rayon::iter::*,
solana_gossip::{
cluster_info::{ClusterInfo, Node},
contact_info::{LegacyContactInfo as ContactInfo, Protocol},
crds::Cursor,
gossip_service::GossipService,
},
solana_perf::packet::Packet,
solana_runtime::bank_forks::BankForks,
solana_sdk::{
hash::Hash,
pubkey::Pubkey,
signature::{Keypair, Signer},
timing::timestamp,
transaction::Transaction,
},
solana_streamer::{
sendmmsg::{multi_target_send, SendPktsError},
socket::SocketAddrSpace,
},
solana_vote_program::{vote_instruction, vote_state::Vote},
std::{
net::UdpSocket,
sync::{
atomic::{AtomicBool, Ordering},
Arc, RwLock,
},
thread::sleep,
time::Duration,
},
};
fn test_node(exit: Arc<AtomicBool>) -> (Arc<ClusterInfo>, GossipService, UdpSocket) {
let keypair = Arc::new(Keypair::new());
let mut test_node = Node::new_localhost_with_pubkey(&keypair.pubkey());
let cluster_info = Arc::new(ClusterInfo::new(
test_node.info.clone(),
keypair,
SocketAddrSpace::Unspecified,
));
let gossip_service = GossipService::new(
&cluster_info,
None,
test_node.sockets.gossip,
None,
true, // should_check_duplicate_instance
None,
exit,
);
let _ = cluster_info.my_contact_info();
(
cluster_info,
gossip_service,
test_node.sockets.tvu.pop().unwrap(),
)
}
fn test_node_with_bank(
node_keypair: Arc<Keypair>,
exit: Arc<AtomicBool>,
bank_forks: Arc<RwLock<BankForks>>,
) -> (Arc<ClusterInfo>, GossipService, UdpSocket) | test_node.sockets.tvu.pop().unwrap(),
)
}
/// Test that the network converges.
/// Run until every node in the network has a full ContactInfo set.
/// Check that nodes stop sending updates after all the ContactInfo has been shared.
/// tests that actually use this function are below
fn run_gossip_topo<F>(num: usize, topo: F)
where
F: Fn(&Vec<(Arc<ClusterInfo>, GossipService, UdpSocket)>),
{
let exit = Arc::new(AtomicBool::new(false));
let listen: Vec<_> = (0..num).map(|_| test_node(exit.clone())).collect();
topo(&listen);
let mut done = true;
for i in 0..(num * 32) {
done = true;
let total: usize = listen.iter().map(|v| v.0.gossip_peers().len()).sum();
if (total + num) * 10 > num * num * 9 {
done = true;
break;
} else {
trace!("not converged {} {} {}", i, total + num, num * num);
}
sleep(Duration::from_secs(1));
}
exit.store(true, Ordering::Relaxed);
for (_, dr, _) in listen {
dr.join().unwrap();
}
assert!(done);
}
/// retransmit messages to a list of nodes
fn retransmit_to(
peers: &[&ContactInfo],
data: &[u8],
socket: &UdpSocket,
forwarded: bool,
socket_addr_space: &SocketAddrSpace,
) {
trace!("retransmit orders {}", peers.len());
let dests: Vec<_> = if forwarded {
peers
.iter()
.filter_map(|peer| peer.tvu(Protocol::UDP).ok())
.filter(|addr| socket_addr_space.check(addr))
.collect()
} else {
peers
.iter()
.filter_map(|peer| peer.tvu(Protocol::UDP).ok())
.filter(|addr| socket_addr_space.check(addr))
.collect()
};
if let Err(SendPktsError::IoError(ioerr, num_failed)) = multi_target_send(socket, data, &dests)
{
error!(
"retransmit_to multi_target_send error: {:?}, {}/{} packets failed",
ioerr,
num_failed,
dests.len(),
);
}
}
/// ring a -> b -> c -> d -> e -> a
#[test]
fn gossip_ring() {
solana_logger::setup();
run_gossip_topo(50, |listen| {
let num = listen.len();
for n in 0..num {
let y = n % listen.len();
let x = (n + 1) % listen.len();
let yv = &listen[y].0;
let mut d = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
d.set_wallclock(timestamp());
listen[x].0.insert_legacy_info(d);
}
});
}
/// ring a -> b -> c -> d -> e -> a
#[test]
#[ignore]
fn gossip_ring_large() {
solana_logger::setup();
run_gossip_topo(600, |listen| {
let num = listen.len();
for n in 0..num {
let y = n % listen.len();
let x = (n + 1) % listen.len();
let yv = &listen[y].0;
let mut d = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
d.set_wallclock(timestamp());
listen[x].0.insert_legacy_info(d);
}
});
}
/// star a -> (b,c,d,e)
#[test]
fn gossip_star() {
solana_logger::setup();
run_gossip_topo(10, |listen| {
let num = listen.len();
for n in 0..(num - 1) {
let x = 0;
let y = (n + 1) % listen.len();
let yv = &listen[y].0;
let mut yd = yv.lookup_contact_info(&yv.id(), |ci| ci.clone()).unwrap();
yd.set_wallclock(timestamp());
let xv = &listen[x].0;
xv.insert_legacy_info(yd);
trace!("star leader {}", &xv.id());
}
});
}
/// rstar a <- (b,c,d,e)
#[test]
fn gossip_rstar() {
solana_logger::setup();
run_gossip_topo(10, |listen| {
let num = listen.len();
let xd = {
let xv = &listen[0].0;
xv.lookup_contact_info(&xv.id(), |ci| ci.clone()).unwrap()
};
trace!("rstar leader {}", xd.pubkey());
for n in 0..(num - 1) {
let y = (n + 1) % listen.len();
let yv = &listen[y].0;
yv.insert_legacy_info(xd.clone());
trace!("rstar insert {} into {}", xd.pubkey(), yv.id());
}
});
}
#[test]
pub fn cluster_info_retransmit() {
solana_logger::setup();
let exit = Arc::new(AtomicBool::new(false));
trace!("c1:");
let (c1, dr1, tn1) = test_node(exit.clone());
trace!("c2:");
let (c2, dr2, tn2) = test_node(exit.clone());
trace!("c3:");
let (c3, dr3, tn3) = test_node(exit.clone());
let c1_contact_info = c1.my_contact_info();
c2.insert_info(c1_contact_info.clone());
c3.insert_info(c1_contact_info);
let num = 3;
//wait to converge
trace!("waiting to converge:");
let mut done = false;
for _ in 0..30 {
done = c1.gossip_peers().len() == num - 1
&& c2.gossip_peers().len() == num - 1
&& c3.gossip_peers().len() == num - 1;
if done {
break;
}
sleep(Duration::from_secs(1));
}
assert!(done);
let mut p = Packet::default();
p.meta_mut().size = 10;
let peers = c1.tvu_peers();
let retransmit_peers: Vec<_> = peers.iter().collect();
retransmit_to(
&retransmit_peers,
p.data(..).unwrap(),
&tn1,
false,
&SocketAddrSpace::Unspecified,
);
let res: Vec<_> = [tn1, tn2, tn3]
.into_par_iter()
.map(|s| {
let mut p = Packet::default();
s.set_read_timeout(Some(Duration::from_secs(1))).unwrap();
let res = s.recv_from(p.buffer_mut());
res.is_err() //true if failed to receive the retransmit packet
})
.collect();
//true if failed receive the retransmit packet, r2, and r3 should succeed
//r1 was the sender, so it should fail to receive the packet
assert_eq!(res, [true, false, false]);
exit.store(true, Ordering::Relaxed);
dr1.join().unwrap();
dr2.join().unwrap();
dr3.join().unwrap();
}
#[test]
#[ignore]
pub fn cluster_info_scale() {
use {
solana_measure::measure::Measure,
solana_perf::test_tx::test_tx,
solana_runtime::{
bank::Bank,
genesis_utils::{create_genesis_config_with_vote_accounts, ValidatorVoteKeypairs},
},
};
solana_logger::setup();
let exit = Arc::new(AtomicBool::new(false));
let num_nodes: usize = std::env::var("NUM_NODES")
.unwrap_or_else(|_| "10".to_string())
.parse()
.expect("could not parse NUM_NODES as a number");
let vote_keypairs: Vec<_> = (0..num_nodes)
.map(|_| ValidatorVoteKeypairs::new_rand())
.collect();
let genesis_config_info = create_genesis_config_with_vote_accounts(
10_000,
&vote_keypairs,
vec![100; vote_keypairs.len()],
);
let bank0 = Bank::new_for_tests(&genesis_config_info.genesis_config);
let bank_forks = Arc::new(RwLock::new(BankForks::new(bank0)));
let nodes: Vec<_> = vote_keypairs
.into_iter()
.map(|keypairs| {
test_node_with_bank(
Arc::new(keypairs.node_keypair),
exit.clone(),
bank_forks.clone(),
)
})
.collect();
let ci0 = nodes[0].0.my_contact_info();
for node in &nodes[1..] {
node.0.insert_info(ci0.clone());
}
let mut time = Measure::start("time");
let mut done;
let mut success = false;
for _ in 0..30 {
done = true;
for (i, node) in nodes.iter().enumerate() {
warn!("node {} peers: {}", i, node.0.gossip_peers().len());
if node.0.gossip_peers().len()!= num_nodes - 1 {
done = false;
break;
}
}
if done {
success = true;
break;
}
sleep(Duration::from_secs(1));
}
time.stop();
warn!("found {} nodes in {} success: {}", num_nodes, time, success);
for num_votes in 1..1000 {
let mut time = Measure::start("votes");
let tx = test_tx();
warn!("tx.message.account_keys: {:?}", tx.message.account_keys);
let vote = Vote::new(
vec![1, 3, num_votes + 5], // slots
Hash::default(),
);
let ix = vote_instruction::vote(
&Pubkey::new_unique(), // vote_pubkey
&Pubkey::new_unique(), // authorized_voter_pubkey
vote,
);
let tx = Transaction::new_with_payer(
&[ix], // instructions
None, // payer
);
let tower = vec![num_votes + 5];
nodes[0].0.push_vote(&tower, tx.clone());
let mut success = false;
for _ in 0..(30 * 5) {
let mut not_done = 0;
let mut num_old = 0;
let mut num_push_total = 0;
let mut num_pushes = 0;
let mut num_pulls = 0;
for (node, _, _) in nodes.iter() {
//if node.0.get_votes(0).1.len()!= (num_nodes * num_votes) {
let has_tx = node
.get_votes(&mut Cursor::default())
.iter()
.filter(|v| v.message.account_keys == tx.message.account_keys)
.count();
num_old += node.gossip.push.num_old.load(Ordering::Relaxed);
num_push_total += node.gossip.push.num_total.load(Ordering::Relaxed);
num_pushes += node.gossip.push.num_pushes.load(Ordering::Relaxed);
num_pulls += node.gossip.pull.num_pulls.load(Ordering::Relaxed);
if has_tx == 0 {
not_done += 1;
}
}
warn!("not_done: {}/{}", not_done, nodes.len());
warn!("num_old: {}", num_old);
warn!("num_push_total: {}", num_push_total);
warn!("num_pushes: {}", num_pushes);
warn!("num_pulls: {}", num_pulls);
success = not_done < (nodes.len() / 20);
if success {
break;
}
sleep(Duration::from_millis(200));
}
time.stop();
warn!(
"propagated vote {} in {} success: {}",
num_votes, time, success
);
sleep(Duration::from_millis(200));
for (node, _, _) in nodes.iter() {
node.gossip.push.num_old.store(0, Ordering::Relaxed);
node.gossip.push.num_total.store(0, Ordering::Relaxed);
node.gossip.push.num_pushes.store(0, Ordering::Relaxed);
node.gossip.pull.num_pulls.store(0, Ordering::Relaxed);
}
}
exit.store(true, Ordering::Relaxed);
for node in nodes {
node.1.join().unwrap();
}
}
| {
let mut test_node = Node::new_localhost_with_pubkey(&node_keypair.pubkey());
let cluster_info = Arc::new(ClusterInfo::new(
test_node.info.clone(),
node_keypair,
SocketAddrSpace::Unspecified,
));
let gossip_service = GossipService::new(
&cluster_info,
Some(bank_forks),
test_node.sockets.gossip,
None,
true, // should_check_duplicate_instance
None,
exit,
);
let _ = cluster_info.my_contact_info();
(
cluster_info,
gossip_service, | identifier_body |
lib.rs | mod error;
use std::fs::{self, File, OpenOptions};
use std::io::{self, stdin, stdout, Write};
use std::ops::Deref;
use std::os::unix::fs::OpenOptionsExt;
use std::path::{Path, PathBuf};
use error_chain::bail;
use hex::FromHex;
use lazy_static::lazy_static;
use seckey::SecBytes;
use serde::{Deserialize, Serialize};
use sodiumoxide::crypto::{
pwhash,
secretbox,
sign,
};
use termion::input::TermRead;
pub use crate::error::{ErrorKind, Error, ResultExt};
lazy_static! {
static ref HOMEDIR: PathBuf = {
dirs::home_dir()
.unwrap_or("./".into())
};
/// The default location for pkgar to look for the user's public key.
///
/// Defaults to `$HOME/.pkgar/keys/id_ed25519.pub.toml`. If `$HOME` is
/// unset, `./.pkgar/keys/id_ed25519.pub.toml`.
pub static ref DEFAULT_PUBKEY: PathBuf = {
Path::join(&HOMEDIR, ".pkgar/keys/id_ed25519.pub.toml")
};
/// The default location for pkgar to look for the user's secret key.
///
/// Defaults to `$HOME/.pkgar/keys/id_ed25519.toml`. If `$HOME` is unset,
/// `./.pkgar/keys/id_ed25519.toml`.
pub static ref DEFAULT_SECKEY: PathBuf = {
Path::join(&HOMEDIR, ".pkgar/keys/id_ed25519.toml")
};
}
mod ser {
use hex::FromHex;
use serde::{Deserialize, Deserializer};
use serde::de::Error;
use sodiumoxide::crypto::{pwhash, secretbox, sign};
//TODO: Macro?
pub(crate) fn to_salt<'d, D: Deserializer<'d>>(deser: D) -> Result<pwhash::Salt, D::Error> {
String::deserialize(deser)
.and_then(|s| <[u8; 32]>::from_hex(s)
.map(|val| pwhash::Salt(val) )
.map_err(|err| Error::custom(err.to_string()) ) )
}
pub(crate) fn to_nonce<'d, D: Deserializer<'d>>(deser: D) -> Result<secretbox::Nonce, D::Error> {
String::deserialize(deser)
.and_then(|s| <[u8; 24]>::from_hex(s)
.map(|val| secretbox::Nonce(val) )
.map_err(|err| Error::custom(err.to_string()) ) )
}
pub(crate) fn to_pubkey<'d, D: Deserializer<'d>>(deser: D) -> Result<sign::PublicKey, D::Error> {
String::deserialize(deser)
.and_then(|s| <[u8; 32]>::from_hex(s)
.map(|val| sign::PublicKey(val) )
.map_err(|err| Error::custom(err.to_string()) ) )
}
}
/// Standard pkgar public key format definition. Use serde to serialize/deserialize
/// files into this struct (helper methods available).
#[derive(Deserialize, Serialize)]
pub struct PublicKeyFile {
#[serde(serialize_with = "hex::serialize", deserialize_with = "ser::to_pubkey")]
pub pkey: sign::PublicKey,
}
impl PublicKeyFile {
/// Parse a `PublicKeyFile` from `file` (in toml format).
pub fn open(file: impl AsRef<Path>) -> Result<PublicKeyFile, Error> {
let content = fs::read_to_string(&file)
.chain_err(|| file.as_ref() )?;
toml::from_str(&content)
.chain_err(|| file.as_ref() )
}
/// Write `self` serialized as toml to `w`.
pub fn write(&self, mut w: impl Write) -> Result<(), Error> {
w.write_all(toml::to_string(self)?.as_bytes())?;
Ok(())
}
/// Shortcut to write the public key to `file`
pub fn save(&self, file: impl AsRef<Path>) -> Result<(), Error> {
self.write(
File::create(&file)
.chain_err(|| file.as_ref() )?
).chain_err(|| file.as_ref() )
}
}
enum SKey {
Cipher([u8; 80]),
Plain(sign::SecretKey),
}
impl SKey {
fn encrypt(&mut self, passwd: Passwd, salt: pwhash::Salt, nonce: secretbox::Nonce) {
if let SKey::Plain(skey) = self {
if let Some(passwd_key) = passwd.gen_key(salt) {
let mut buf = [0; 80];
buf.copy_from_slice(&secretbox::seal(skey.as_ref(), &nonce, &passwd_key));
*self = SKey::Cipher(buf);
}
}
}
fn decrypt(&mut self, passwd: Passwd, salt: pwhash::Salt, nonce: secretbox::Nonce) -> Result<(), Error> {
if let SKey::Cipher(ciphertext) = self {
if let Some(passwd_key) = passwd.gen_key(salt) {
let skey_plain = secretbox::open(ciphertext.as_ref(), &nonce, &passwd_key)
.map_err(|_| ErrorKind::PassphraseIncorrect )?;
*self = SKey::Plain(sign::SecretKey::from_slice(&skey_plain)
.ok_or(ErrorKind::KeyInvalid)?);
} else {
*self = SKey::Plain(sign::SecretKey::from_slice(&ciphertext[..64])
.ok_or(ErrorKind::KeyInvalid)?);
}
}
Ok(())
}
/// Returns `None` if encrypted
fn skey(&self) -> Option<sign::SecretKey> {
match &self {
SKey::Plain(skey) => Some(skey.clone()),
SKey::Cipher(_) => None,
}
}
}
impl AsRef<[u8]> for SKey {
fn as_ref(&self) -> &[u8] {
match self {
SKey::Cipher(buf) => buf.as_ref(),
SKey::Plain(skey) => skey.as_ref(),
}
}
}
impl FromHex for SKey {
type Error = hex::FromHexError;
fn from_hex<T: AsRef<[u8]>>(buf: T) -> Result<SKey, hex::FromHexError> {
let bytes = hex::decode(buf)?;
// Public key is only 64 bytes...
if bytes.len() == 64 {
Ok(SKey::Plain(sign::SecretKey::from_slice(&bytes)
.expect("Somehow not the right number of bytes")))
} else {
let mut buf = [0; 80];
buf.copy_from_slice(&bytes);
Ok(SKey::Cipher(buf))
}
}
}
/// Standard pkgar private key format definition. Use serde.
/// Internally, this struct stores the encrypted state of the private key as an enum.
/// Manipulate the state using the `encrypt()`, `decrypt()` and `is_encrypted()`.
#[derive(Deserialize, Serialize)]
pub struct SecretKeyFile {
#[serde(serialize_with = "hex::serialize", deserialize_with = "ser::to_salt")]
salt: pwhash::Salt,
#[serde(serialize_with = "hex::serialize", deserialize_with = "ser::to_nonce")]
nonce: secretbox::Nonce,
#[serde(with = "hex")]
skey: SKey,
}
impl SecretKeyFile {
/// Generate a keypair with all the nessesary info to save both keys. You
/// must call `save()` on each object to persist them to disk.
pub fn new() -> (PublicKeyFile, SecretKeyFile) {
let (pkey, skey) = sign::gen_keypair();
let pkey_file = PublicKeyFile { pkey };
let skey_file = SecretKeyFile {
salt: pwhash::gen_salt(),
nonce: secretbox::gen_nonce(),
skey: SKey::Plain(skey),
};
(pkey_file, skey_file)
}
/// Parse a `SecretKeyFile` from `file` (in toml format).
pub fn open(file: impl AsRef<Path>) -> Result<SecretKeyFile, Error> {
let content = fs::read_to_string(&file)
.chain_err(|| file.as_ref() )?;
toml::from_str(&content)
.chain_err(|| file.as_ref() )
}
/// Write `self` serialized as toml to `w`.
pub fn write(&self, mut w: impl Write) -> Result<(), Error> {
w.write_all(toml::to_string(&self)?.as_bytes())?;
Ok(())
}
/// Shortcut to write the secret key to `file`.
///
/// Make sure to call `encrypt()` in order to encrypt
/// the private key, otherwise it will be stored as plain text.
pub fn save(&self, file: impl AsRef<Path>) -> Result<(), Error> {
self.write(
OpenOptions::new()
.write(true)
.create(true)
.mode(0o600)
.open(&file)
.chain_err(|| file.as_ref() )?
).chain_err(|| file.as_ref() )
}
/// Ensure that the internal state of this struct is encrypted.
/// Note that if passwd is empty, this function is a no-op.
pub fn encrypt(&mut self, passwd: Passwd) {
self.skey.encrypt(passwd, self.salt, self.nonce)
}
/// Ensure that the internal state of this struct is decrypted.
/// If the internal state is already decrypted, this function is a no-op.
pub fn decrypt(&mut self, passwd: Passwd) -> Result<(), Error> {
self.skey.decrypt(passwd, self.salt, self.nonce)
}
/// Status of the internal state.
pub fn is_encrypted(&self) -> bool {
match self.skey {
SKey::Cipher(_) => true,
SKey::Plain(_) => false,
}
}
/// Returns `None` if the secret key is encrypted.
pub fn key(&mut self) -> Option<sign::SecretKey> {
match &self.skey {
SKey::Plain(skey) => Some(skey.clone()),
SKey::Cipher(_) => None,
}
}
/// Returns `None` if the secret key is encrypted.
pub fn public_key_file(&self) -> Option<PublicKeyFile> {
Some(PublicKeyFile {
pkey: self.skey.skey()?.public_key(),
})
}
}
/// Secure in-memory representation of a password.
pub struct Passwd {
bytes: SecBytes,
}
impl Passwd {
/// Create a new `Passwd` and zero the old string.
pub fn new(passwd: &mut String) -> Passwd {
let pwd = Passwd {
bytes :SecBytes::with(
passwd.len(),
|buf| buf.copy_from_slice(passwd.as_bytes())
),
};
unsafe {
seckey::zero(passwd.as_bytes_mut());
}
pwd
}
/// Prompt the user for a `Passwd` on stdin.
pub fn prompt(prompt: impl AsRef<str>) -> Result<Passwd, Error> {
let stdout = stdout();
let mut stdout = stdout.lock();
let stdin = stdin();
let mut stdin = stdin.lock();
stdout.write_all(prompt.as_ref().as_bytes())?;
stdout.flush()?;
let mut passwd = stdin.read_passwd(&mut stdout)?
.ok_or(ErrorKind::Io(
io::Error::new(
io::ErrorKind::UnexpectedEof,
"Invalid Password Input",
)
))?;
println!();
Ok(Passwd::new(&mut passwd))
}
/// Prompt for a password on stdin and confirm it. For configurable
/// prompts, use [`Passwd::prompt`](struct.Passwd.html#method.prompt).
pub fn prompt_new() -> Result<Passwd, Error> {
let passwd = Passwd::prompt(
"Please enter a new passphrase (leave empty to store the key in plaintext): "
)?;
let confirm = Passwd::prompt("Please re-enter the passphrase: ")?;
if passwd!= confirm {
bail!(ErrorKind::PassphraseMismatch);
}
Ok(passwd)
}
/// Get a key for symmetric key encryption from a password.
fn gen_key(&self, salt: pwhash::Salt) -> Option<secretbox::Key> {
if self.bytes.read().len() > 0 {
let mut key = secretbox::Key([0; secretbox::KEYBYTES]);
let secretbox::Key(ref mut binary_key) = key;
pwhash::derive_key(
binary_key,
&self.bytes.read(),
&salt,
pwhash::OPSLIMIT_INTERACTIVE,
pwhash::MEMLIMIT_INTERACTIVE,
).expect("Failed to get key from password");
Some(key)
} else {
None
}
}
}
impl PartialEq for Passwd {
fn eq(&self, other: &Passwd) -> bool {
self.bytes.read().deref() == other.bytes.read().deref()
}
}
impl Eq for Passwd {}
/// Generate a new keypair. The new keys will be saved to `file`. The user
/// will be prompted on stdin for a password, empty passwords will cause the
/// secret key to be stored in plain text. Note that parent
/// directories will not be created.
pub fn gen_keypair(pkey_path: &Path, skey_path: &Path) -> Result<(PublicKeyFile, SecretKeyFile), Error> { | skey_file.encrypt(passwd);
skey_file.save(skey_path)?;
pkey_file.save(pkey_path)?;
println!("Generated {} and {}", pkey_path.display(), skey_path.display());
Ok((pkey_file, skey_file))
}
fn prompt_skey(skey_path: &Path, prompt: impl AsRef<str>) -> Result<SecretKeyFile, Error> {
let mut key_file = SecretKeyFile::open(skey_path)?;
if key_file.is_encrypted() {
let passwd = Passwd::prompt(&format!("{} {}: ", prompt.as_ref(), skey_path.display()))
.chain_err(|| skey_path )?;
key_file.decrypt(passwd)
.chain_err(|| skey_path )?;
}
Ok(key_file)
}
/// Get a SecretKeyFile from a path. If the file is encrypted, prompt for a password on stdin.
pub fn get_skey(skey_path: &Path) -> Result<SecretKeyFile, Error> {
prompt_skey(skey_path, "Passphrase for")
}
/// Open, decrypt, re-encrypt with a different passphrase from stdin, and save the newly encrypted
/// secret key at `skey_path`.
pub fn re_encrypt(skey_path: &Path) -> Result<(), Error> {
let mut skey_file = prompt_skey(skey_path, "Old passphrase for")?;
let passwd = Passwd::prompt_new()
.chain_err(|| skey_path )?;
skey_file.encrypt(passwd);
skey_file.save(skey_path)
} | let passwd = Passwd::prompt_new()
.chain_err(|| skey_path )?;
let (pkey_file, mut skey_file) = SecretKeyFile::new();
| random_line_split |
lib.rs | mod error;
use std::fs::{self, File, OpenOptions};
use std::io::{self, stdin, stdout, Write};
use std::ops::Deref;
use std::os::unix::fs::OpenOptionsExt;
use std::path::{Path, PathBuf};
use error_chain::bail;
use hex::FromHex;
use lazy_static::lazy_static;
use seckey::SecBytes;
use serde::{Deserialize, Serialize};
use sodiumoxide::crypto::{
pwhash,
secretbox,
sign,
};
use termion::input::TermRead;
pub use crate::error::{ErrorKind, Error, ResultExt};
lazy_static! {
static ref HOMEDIR: PathBuf = {
dirs::home_dir()
.unwrap_or("./".into())
};
/// The default location for pkgar to look for the user's public key.
///
/// Defaults to `$HOME/.pkgar/keys/id_ed25519.pub.toml`. If `$HOME` is
/// unset, `./.pkgar/keys/id_ed25519.pub.toml`.
pub static ref DEFAULT_PUBKEY: PathBuf = {
Path::join(&HOMEDIR, ".pkgar/keys/id_ed25519.pub.toml")
};
/// The default location for pkgar to look for the user's secret key.
///
/// Defaults to `$HOME/.pkgar/keys/id_ed25519.toml`. If `$HOME` is unset,
/// `./.pkgar/keys/id_ed25519.toml`.
pub static ref DEFAULT_SECKEY: PathBuf = {
Path::join(&HOMEDIR, ".pkgar/keys/id_ed25519.toml")
};
}
mod ser {
use hex::FromHex;
use serde::{Deserialize, Deserializer};
use serde::de::Error;
use sodiumoxide::crypto::{pwhash, secretbox, sign};
//TODO: Macro?
pub(crate) fn to_salt<'d, D: Deserializer<'d>>(deser: D) -> Result<pwhash::Salt, D::Error> {
String::deserialize(deser)
.and_then(|s| <[u8; 32]>::from_hex(s)
.map(|val| pwhash::Salt(val) )
.map_err(|err| Error::custom(err.to_string()) ) )
}
pub(crate) fn to_nonce<'d, D: Deserializer<'d>>(deser: D) -> Result<secretbox::Nonce, D::Error> {
String::deserialize(deser)
.and_then(|s| <[u8; 24]>::from_hex(s)
.map(|val| secretbox::Nonce(val) )
.map_err(|err| Error::custom(err.to_string()) ) )
}
pub(crate) fn to_pubkey<'d, D: Deserializer<'d>>(deser: D) -> Result<sign::PublicKey, D::Error> {
String::deserialize(deser)
.and_then(|s| <[u8; 32]>::from_hex(s)
.map(|val| sign::PublicKey(val) )
.map_err(|err| Error::custom(err.to_string()) ) )
}
}
/// Standard pkgar public key format definition. Use serde to serialize/deserialize
/// files into this struct (helper methods available).
#[derive(Deserialize, Serialize)]
pub struct PublicKeyFile {
#[serde(serialize_with = "hex::serialize", deserialize_with = "ser::to_pubkey")]
pub pkey: sign::PublicKey,
}
impl PublicKeyFile {
/// Parse a `PublicKeyFile` from `file` (in toml format).
pub fn open(file: impl AsRef<Path>) -> Result<PublicKeyFile, Error> {
let content = fs::read_to_string(&file)
.chain_err(|| file.as_ref() )?;
toml::from_str(&content)
.chain_err(|| file.as_ref() )
}
/// Write `self` serialized as toml to `w`.
pub fn write(&self, mut w: impl Write) -> Result<(), Error> {
w.write_all(toml::to_string(self)?.as_bytes())?;
Ok(())
}
/// Shortcut to write the public key to `file`
pub fn save(&self, file: impl AsRef<Path>) -> Result<(), Error> {
self.write(
File::create(&file)
.chain_err(|| file.as_ref() )?
).chain_err(|| file.as_ref() )
}
}
enum SKey {
Cipher([u8; 80]),
Plain(sign::SecretKey),
}
impl SKey {
fn encrypt(&mut self, passwd: Passwd, salt: pwhash::Salt, nonce: secretbox::Nonce) {
if let SKey::Plain(skey) = self {
if let Some(passwd_key) = passwd.gen_key(salt) {
let mut buf = [0; 80];
buf.copy_from_slice(&secretbox::seal(skey.as_ref(), &nonce, &passwd_key));
*self = SKey::Cipher(buf);
}
}
}
fn decrypt(&mut self, passwd: Passwd, salt: pwhash::Salt, nonce: secretbox::Nonce) -> Result<(), Error> {
if let SKey::Cipher(ciphertext) = self {
if let Some(passwd_key) = passwd.gen_key(salt) {
let skey_plain = secretbox::open(ciphertext.as_ref(), &nonce, &passwd_key)
.map_err(|_| ErrorKind::PassphraseIncorrect )?;
*self = SKey::Plain(sign::SecretKey::from_slice(&skey_plain)
.ok_or(ErrorKind::KeyInvalid)?);
} else {
*self = SKey::Plain(sign::SecretKey::from_slice(&ciphertext[..64])
.ok_or(ErrorKind::KeyInvalid)?);
}
}
Ok(())
}
/// Returns `None` if encrypted
fn skey(&self) -> Option<sign::SecretKey> {
match &self {
SKey::Plain(skey) => Some(skey.clone()),
SKey::Cipher(_) => None,
}
}
}
impl AsRef<[u8]> for SKey {
fn as_ref(&self) -> &[u8] {
match self {
SKey::Cipher(buf) => buf.as_ref(),
SKey::Plain(skey) => skey.as_ref(),
}
}
}
impl FromHex for SKey {
type Error = hex::FromHexError;
fn from_hex<T: AsRef<[u8]>>(buf: T) -> Result<SKey, hex::FromHexError> {
let bytes = hex::decode(buf)?;
// Public key is only 64 bytes...
if bytes.len() == 64 {
Ok(SKey::Plain(sign::SecretKey::from_slice(&bytes)
.expect("Somehow not the right number of bytes")))
} else {
let mut buf = [0; 80];
buf.copy_from_slice(&bytes);
Ok(SKey::Cipher(buf))
}
}
}
/// Standard pkgar private key format definition. Use serde.
/// Internally, this struct stores the encrypted state of the private key as an enum.
/// Manipulate the state using the `encrypt()`, `decrypt()` and `is_encrypted()`.
#[derive(Deserialize, Serialize)]
pub struct SecretKeyFile {
#[serde(serialize_with = "hex::serialize", deserialize_with = "ser::to_salt")]
salt: pwhash::Salt,
#[serde(serialize_with = "hex::serialize", deserialize_with = "ser::to_nonce")]
nonce: secretbox::Nonce,
#[serde(with = "hex")]
skey: SKey,
}
impl SecretKeyFile {
/// Generate a keypair with all the nessesary info to save both keys. You
/// must call `save()` on each object to persist them to disk.
pub fn new() -> (PublicKeyFile, SecretKeyFile) {
let (pkey, skey) = sign::gen_keypair();
let pkey_file = PublicKeyFile { pkey };
let skey_file = SecretKeyFile {
salt: pwhash::gen_salt(),
nonce: secretbox::gen_nonce(),
skey: SKey::Plain(skey),
};
(pkey_file, skey_file)
}
/// Parse a `SecretKeyFile` from `file` (in toml format).
pub fn open(file: impl AsRef<Path>) -> Result<SecretKeyFile, Error> {
let content = fs::read_to_string(&file)
.chain_err(|| file.as_ref() )?;
toml::from_str(&content)
.chain_err(|| file.as_ref() )
}
/// Write `self` serialized as toml to `w`.
pub fn write(&self, mut w: impl Write) -> Result<(), Error> {
w.write_all(toml::to_string(&self)?.as_bytes())?;
Ok(())
}
/// Shortcut to write the secret key to `file`.
///
/// Make sure to call `encrypt()` in order to encrypt
/// the private key, otherwise it will be stored as plain text.
pub fn save(&self, file: impl AsRef<Path>) -> Result<(), Error> {
self.write(
OpenOptions::new()
.write(true)
.create(true)
.mode(0o600)
.open(&file)
.chain_err(|| file.as_ref() )?
).chain_err(|| file.as_ref() )
}
/// Ensure that the internal state of this struct is encrypted.
/// Note that if passwd is empty, this function is a no-op.
pub fn encrypt(&mut self, passwd: Passwd) {
self.skey.encrypt(passwd, self.salt, self.nonce)
}
/// Ensure that the internal state of this struct is decrypted.
/// If the internal state is already decrypted, this function is a no-op.
pub fn decrypt(&mut self, passwd: Passwd) -> Result<(), Error> {
self.skey.decrypt(passwd, self.salt, self.nonce)
}
/// Status of the internal state.
pub fn is_encrypted(&self) -> bool {
match self.skey {
SKey::Cipher(_) => true,
SKey::Plain(_) => false,
}
}
/// Returns `None` if the secret key is encrypted.
pub fn key(&mut self) -> Option<sign::SecretKey> {
match &self.skey {
SKey::Plain(skey) => Some(skey.clone()),
SKey::Cipher(_) => None,
}
}
/// Returns `None` if the secret key is encrypted.
pub fn public_key_file(&self) -> Option<PublicKeyFile> {
Some(PublicKeyFile {
pkey: self.skey.skey()?.public_key(),
})
}
}
/// Secure in-memory representation of a password.
pub struct Passwd {
bytes: SecBytes,
}
impl Passwd {
/// Create a new `Passwd` and zero the old string.
pub fn new(passwd: &mut String) -> Passwd {
let pwd = Passwd {
bytes :SecBytes::with(
passwd.len(),
|buf| buf.copy_from_slice(passwd.as_bytes())
),
};
unsafe {
seckey::zero(passwd.as_bytes_mut());
}
pwd
}
/// Prompt the user for a `Passwd` on stdin.
pub fn prompt(prompt: impl AsRef<str>) -> Result<Passwd, Error> {
let stdout = stdout();
let mut stdout = stdout.lock();
let stdin = stdin();
let mut stdin = stdin.lock();
stdout.write_all(prompt.as_ref().as_bytes())?;
stdout.flush()?;
let mut passwd = stdin.read_passwd(&mut stdout)?
.ok_or(ErrorKind::Io(
io::Error::new(
io::ErrorKind::UnexpectedEof,
"Invalid Password Input",
)
))?;
println!();
Ok(Passwd::new(&mut passwd))
}
/// Prompt for a password on stdin and confirm it. For configurable
/// prompts, use [`Passwd::prompt`](struct.Passwd.html#method.prompt).
pub fn prompt_new() -> Result<Passwd, Error> {
let passwd = Passwd::prompt(
"Please enter a new passphrase (leave empty to store the key in plaintext): "
)?;
let confirm = Passwd::prompt("Please re-enter the passphrase: ")?;
if passwd!= confirm {
bail!(ErrorKind::PassphraseMismatch);
}
Ok(passwd)
}
/// Get a key for symmetric key encryption from a password.
fn gen_key(&self, salt: pwhash::Salt) -> Option<secretbox::Key> {
if self.bytes.read().len() > 0 {
let mut key = secretbox::Key([0; secretbox::KEYBYTES]);
let secretbox::Key(ref mut binary_key) = key;
pwhash::derive_key(
binary_key,
&self.bytes.read(),
&salt,
pwhash::OPSLIMIT_INTERACTIVE,
pwhash::MEMLIMIT_INTERACTIVE,
).expect("Failed to get key from password");
Some(key)
} else {
None
}
}
}
impl PartialEq for Passwd {
fn eq(&self, other: &Passwd) -> bool {
self.bytes.read().deref() == other.bytes.read().deref()
}
}
impl Eq for Passwd {}
/// Generate a new keypair. The new keys will be saved to `file`. The user
/// will be prompted on stdin for a password, empty passwords will cause the
/// secret key to be stored in plain text. Note that parent
/// directories will not be created.
pub fn gen_keypair(pkey_path: &Path, skey_path: &Path) -> Result<(PublicKeyFile, SecretKeyFile), Error> {
let passwd = Passwd::prompt_new()
.chain_err(|| skey_path )?;
let (pkey_file, mut skey_file) = SecretKeyFile::new();
skey_file.encrypt(passwd);
skey_file.save(skey_path)?;
pkey_file.save(pkey_path)?;
println!("Generated {} and {}", pkey_path.display(), skey_path.display());
Ok((pkey_file, skey_file))
}
fn prompt_skey(skey_path: &Path, prompt: impl AsRef<str>) -> Result<SecretKeyFile, Error> |
/// Get a SecretKeyFile from a path. If the file is encrypted, prompt for a password on stdin.
pub fn get_skey(skey_path: &Path) -> Result<SecretKeyFile, Error> {
prompt_skey(skey_path, "Passphrase for")
}
/// Open, decrypt, re-encrypt with a different passphrase from stdin, and save the newly encrypted
/// secret key at `skey_path`.
pub fn re_encrypt(skey_path: &Path) -> Result<(), Error> {
let mut skey_file = prompt_skey(skey_path, "Old passphrase for")?;
let passwd = Passwd::prompt_new()
.chain_err(|| skey_path )?;
skey_file.encrypt(passwd);
skey_file.save(skey_path)
}
| {
let mut key_file = SecretKeyFile::open(skey_path)?;
if key_file.is_encrypted() {
let passwd = Passwd::prompt(&format!("{} {}: ", prompt.as_ref(), skey_path.display()))
.chain_err(|| skey_path )?;
key_file.decrypt(passwd)
.chain_err(|| skey_path )?;
}
Ok(key_file)
} | identifier_body |
lib.rs | mod error;
use std::fs::{self, File, OpenOptions};
use std::io::{self, stdin, stdout, Write};
use std::ops::Deref;
use std::os::unix::fs::OpenOptionsExt;
use std::path::{Path, PathBuf};
use error_chain::bail;
use hex::FromHex;
use lazy_static::lazy_static;
use seckey::SecBytes;
use serde::{Deserialize, Serialize};
use sodiumoxide::crypto::{
pwhash,
secretbox,
sign,
};
use termion::input::TermRead;
pub use crate::error::{ErrorKind, Error, ResultExt};
lazy_static! {
static ref HOMEDIR: PathBuf = {
dirs::home_dir()
.unwrap_or("./".into())
};
/// The default location for pkgar to look for the user's public key.
///
/// Defaults to `$HOME/.pkgar/keys/id_ed25519.pub.toml`. If `$HOME` is
/// unset, `./.pkgar/keys/id_ed25519.pub.toml`.
pub static ref DEFAULT_PUBKEY: PathBuf = {
Path::join(&HOMEDIR, ".pkgar/keys/id_ed25519.pub.toml")
};
/// The default location for pkgar to look for the user's secret key.
///
/// Defaults to `$HOME/.pkgar/keys/id_ed25519.toml`. If `$HOME` is unset,
/// `./.pkgar/keys/id_ed25519.toml`.
pub static ref DEFAULT_SECKEY: PathBuf = {
Path::join(&HOMEDIR, ".pkgar/keys/id_ed25519.toml")
};
}
mod ser {
use hex::FromHex;
use serde::{Deserialize, Deserializer};
use serde::de::Error;
use sodiumoxide::crypto::{pwhash, secretbox, sign};
//TODO: Macro?
pub(crate) fn to_salt<'d, D: Deserializer<'d>>(deser: D) -> Result<pwhash::Salt, D::Error> {
String::deserialize(deser)
.and_then(|s| <[u8; 32]>::from_hex(s)
.map(|val| pwhash::Salt(val) )
.map_err(|err| Error::custom(err.to_string()) ) )
}
pub(crate) fn to_nonce<'d, D: Deserializer<'d>>(deser: D) -> Result<secretbox::Nonce, D::Error> {
String::deserialize(deser)
.and_then(|s| <[u8; 24]>::from_hex(s)
.map(|val| secretbox::Nonce(val) )
.map_err(|err| Error::custom(err.to_string()) ) )
}
pub(crate) fn to_pubkey<'d, D: Deserializer<'d>>(deser: D) -> Result<sign::PublicKey, D::Error> {
String::deserialize(deser)
.and_then(|s| <[u8; 32]>::from_hex(s)
.map(|val| sign::PublicKey(val) )
.map_err(|err| Error::custom(err.to_string()) ) )
}
}
/// Standard pkgar public key format definition. Use serde to serialize/deserialize
/// files into this struct (helper methods available).
#[derive(Deserialize, Serialize)]
pub struct PublicKeyFile {
#[serde(serialize_with = "hex::serialize", deserialize_with = "ser::to_pubkey")]
pub pkey: sign::PublicKey,
}
impl PublicKeyFile {
/// Parse a `PublicKeyFile` from `file` (in toml format).
pub fn open(file: impl AsRef<Path>) -> Result<PublicKeyFile, Error> {
let content = fs::read_to_string(&file)
.chain_err(|| file.as_ref() )?;
toml::from_str(&content)
.chain_err(|| file.as_ref() )
}
/// Write `self` serialized as toml to `w`.
pub fn write(&self, mut w: impl Write) -> Result<(), Error> {
w.write_all(toml::to_string(self)?.as_bytes())?;
Ok(())
}
/// Shortcut to write the public key to `file`
pub fn save(&self, file: impl AsRef<Path>) -> Result<(), Error> {
self.write(
File::create(&file)
.chain_err(|| file.as_ref() )?
).chain_err(|| file.as_ref() )
}
}
enum SKey {
Cipher([u8; 80]),
Plain(sign::SecretKey),
}
impl SKey {
fn encrypt(&mut self, passwd: Passwd, salt: pwhash::Salt, nonce: secretbox::Nonce) {
if let SKey::Plain(skey) = self {
if let Some(passwd_key) = passwd.gen_key(salt) {
let mut buf = [0; 80];
buf.copy_from_slice(&secretbox::seal(skey.as_ref(), &nonce, &passwd_key));
*self = SKey::Cipher(buf);
}
}
}
fn decrypt(&mut self, passwd: Passwd, salt: pwhash::Salt, nonce: secretbox::Nonce) -> Result<(), Error> {
if let SKey::Cipher(ciphertext) = self {
if let Some(passwd_key) = passwd.gen_key(salt) {
let skey_plain = secretbox::open(ciphertext.as_ref(), &nonce, &passwd_key)
.map_err(|_| ErrorKind::PassphraseIncorrect )?;
*self = SKey::Plain(sign::SecretKey::from_slice(&skey_plain)
.ok_or(ErrorKind::KeyInvalid)?);
} else {
*self = SKey::Plain(sign::SecretKey::from_slice(&ciphertext[..64])
.ok_or(ErrorKind::KeyInvalid)?);
}
}
Ok(())
}
/// Returns `None` if encrypted
fn skey(&self) -> Option<sign::SecretKey> {
match &self {
SKey::Plain(skey) => Some(skey.clone()),
SKey::Cipher(_) => None,
}
}
}
impl AsRef<[u8]> for SKey {
fn as_ref(&self) -> &[u8] {
match self {
SKey::Cipher(buf) => buf.as_ref(),
SKey::Plain(skey) => skey.as_ref(),
}
}
}
impl FromHex for SKey {
type Error = hex::FromHexError;
fn from_hex<T: AsRef<[u8]>>(buf: T) -> Result<SKey, hex::FromHexError> {
let bytes = hex::decode(buf)?;
// Public key is only 64 bytes...
if bytes.len() == 64 {
Ok(SKey::Plain(sign::SecretKey::from_slice(&bytes)
.expect("Somehow not the right number of bytes")))
} else {
let mut buf = [0; 80];
buf.copy_from_slice(&bytes);
Ok(SKey::Cipher(buf))
}
}
}
/// Standard pkgar private key format definition. Use serde.
/// Internally, this struct stores the encrypted state of the private key as an enum.
/// Manipulate the state using the `encrypt()`, `decrypt()` and `is_encrypted()`.
#[derive(Deserialize, Serialize)]
pub struct SecretKeyFile {
#[serde(serialize_with = "hex::serialize", deserialize_with = "ser::to_salt")]
salt: pwhash::Salt,
#[serde(serialize_with = "hex::serialize", deserialize_with = "ser::to_nonce")]
nonce: secretbox::Nonce,
#[serde(with = "hex")]
skey: SKey,
}
impl SecretKeyFile {
/// Generate a keypair with all the nessesary info to save both keys. You
/// must call `save()` on each object to persist them to disk.
pub fn new() -> (PublicKeyFile, SecretKeyFile) {
let (pkey, skey) = sign::gen_keypair();
let pkey_file = PublicKeyFile { pkey };
let skey_file = SecretKeyFile {
salt: pwhash::gen_salt(),
nonce: secretbox::gen_nonce(),
skey: SKey::Plain(skey),
};
(pkey_file, skey_file)
}
/// Parse a `SecretKeyFile` from `file` (in toml format).
pub fn open(file: impl AsRef<Path>) -> Result<SecretKeyFile, Error> {
let content = fs::read_to_string(&file)
.chain_err(|| file.as_ref() )?;
toml::from_str(&content)
.chain_err(|| file.as_ref() )
}
/// Write `self` serialized as toml to `w`.
pub fn write(&self, mut w: impl Write) -> Result<(), Error> {
w.write_all(toml::to_string(&self)?.as_bytes())?;
Ok(())
}
/// Shortcut to write the secret key to `file`.
///
/// Make sure to call `encrypt()` in order to encrypt
/// the private key, otherwise it will be stored as plain text.
pub fn save(&self, file: impl AsRef<Path>) -> Result<(), Error> {
self.write(
OpenOptions::new()
.write(true)
.create(true)
.mode(0o600)
.open(&file)
.chain_err(|| file.as_ref() )?
).chain_err(|| file.as_ref() )
}
/// Ensure that the internal state of this struct is encrypted.
/// Note that if passwd is empty, this function is a no-op.
pub fn encrypt(&mut self, passwd: Passwd) {
self.skey.encrypt(passwd, self.salt, self.nonce)
}
/// Ensure that the internal state of this struct is decrypted.
/// If the internal state is already decrypted, this function is a no-op.
pub fn decrypt(&mut self, passwd: Passwd) -> Result<(), Error> {
self.skey.decrypt(passwd, self.salt, self.nonce)
}
/// Status of the internal state.
pub fn is_encrypted(&self) -> bool {
match self.skey {
SKey::Cipher(_) => true,
SKey::Plain(_) => false,
}
}
/// Returns `None` if the secret key is encrypted.
pub fn | (&mut self) -> Option<sign::SecretKey> {
match &self.skey {
SKey::Plain(skey) => Some(skey.clone()),
SKey::Cipher(_) => None,
}
}
/// Returns `None` if the secret key is encrypted.
pub fn public_key_file(&self) -> Option<PublicKeyFile> {
Some(PublicKeyFile {
pkey: self.skey.skey()?.public_key(),
})
}
}
/// Secure in-memory representation of a password.
pub struct Passwd {
bytes: SecBytes,
}
impl Passwd {
/// Create a new `Passwd` and zero the old string.
pub fn new(passwd: &mut String) -> Passwd {
let pwd = Passwd {
bytes :SecBytes::with(
passwd.len(),
|buf| buf.copy_from_slice(passwd.as_bytes())
),
};
unsafe {
seckey::zero(passwd.as_bytes_mut());
}
pwd
}
/// Prompt the user for a `Passwd` on stdin.
pub fn prompt(prompt: impl AsRef<str>) -> Result<Passwd, Error> {
let stdout = stdout();
let mut stdout = stdout.lock();
let stdin = stdin();
let mut stdin = stdin.lock();
stdout.write_all(prompt.as_ref().as_bytes())?;
stdout.flush()?;
let mut passwd = stdin.read_passwd(&mut stdout)?
.ok_or(ErrorKind::Io(
io::Error::new(
io::ErrorKind::UnexpectedEof,
"Invalid Password Input",
)
))?;
println!();
Ok(Passwd::new(&mut passwd))
}
/// Prompt for a password on stdin and confirm it. For configurable
/// prompts, use [`Passwd::prompt`](struct.Passwd.html#method.prompt).
pub fn prompt_new() -> Result<Passwd, Error> {
let passwd = Passwd::prompt(
"Please enter a new passphrase (leave empty to store the key in plaintext): "
)?;
let confirm = Passwd::prompt("Please re-enter the passphrase: ")?;
if passwd!= confirm {
bail!(ErrorKind::PassphraseMismatch);
}
Ok(passwd)
}
/// Get a key for symmetric key encryption from a password.
fn gen_key(&self, salt: pwhash::Salt) -> Option<secretbox::Key> {
if self.bytes.read().len() > 0 {
let mut key = secretbox::Key([0; secretbox::KEYBYTES]);
let secretbox::Key(ref mut binary_key) = key;
pwhash::derive_key(
binary_key,
&self.bytes.read(),
&salt,
pwhash::OPSLIMIT_INTERACTIVE,
pwhash::MEMLIMIT_INTERACTIVE,
).expect("Failed to get key from password");
Some(key)
} else {
None
}
}
}
impl PartialEq for Passwd {
fn eq(&self, other: &Passwd) -> bool {
self.bytes.read().deref() == other.bytes.read().deref()
}
}
impl Eq for Passwd {}
/// Generate a new keypair. The new keys will be saved to `file`. The user
/// will be prompted on stdin for a password, empty passwords will cause the
/// secret key to be stored in plain text. Note that parent
/// directories will not be created.
pub fn gen_keypair(pkey_path: &Path, skey_path: &Path) -> Result<(PublicKeyFile, SecretKeyFile), Error> {
let passwd = Passwd::prompt_new()
.chain_err(|| skey_path )?;
let (pkey_file, mut skey_file) = SecretKeyFile::new();
skey_file.encrypt(passwd);
skey_file.save(skey_path)?;
pkey_file.save(pkey_path)?;
println!("Generated {} and {}", pkey_path.display(), skey_path.display());
Ok((pkey_file, skey_file))
}
fn prompt_skey(skey_path: &Path, prompt: impl AsRef<str>) -> Result<SecretKeyFile, Error> {
let mut key_file = SecretKeyFile::open(skey_path)?;
if key_file.is_encrypted() {
let passwd = Passwd::prompt(&format!("{} {}: ", prompt.as_ref(), skey_path.display()))
.chain_err(|| skey_path )?;
key_file.decrypt(passwd)
.chain_err(|| skey_path )?;
}
Ok(key_file)
}
/// Get a SecretKeyFile from a path. If the file is encrypted, prompt for a password on stdin.
pub fn get_skey(skey_path: &Path) -> Result<SecretKeyFile, Error> {
prompt_skey(skey_path, "Passphrase for")
}
/// Open, decrypt, re-encrypt with a different passphrase from stdin, and save the newly encrypted
/// secret key at `skey_path`.
pub fn re_encrypt(skey_path: &Path) -> Result<(), Error> {
let mut skey_file = prompt_skey(skey_path, "Old passphrase for")?;
let passwd = Passwd::prompt_new()
.chain_err(|| skey_path )?;
skey_file.encrypt(passwd);
skey_file.save(skey_path)
}
| key | identifier_name |
main.rs | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License..
#![allow(deprecated)]
extern crate sgx_types;
extern crate sgx_urts;
use sgx_types::*;
use sgx_urts::SgxEnclave;
extern crate mio;
use mio::tcp::TcpStream;
use std::os::unix::io::AsRawFd;
use std::ffi::CString;
use std::net::SocketAddr;
use std::str;
use std::io::{self, Write};
const BUFFER_SIZE: usize = 1024;
static ENCLAVE_FILE: &'static str = "enclave.signed.so";
extern {
fn tls_client_new(eid: sgx_enclave_id_t, retval: *mut usize,
fd: c_int, hostname: *const c_char, cert: *const c_char) -> sgx_status_t;
fn tls_client_read(eid: sgx_enclave_id_t, retval: *mut c_int,
session_id: usize, buf: *mut c_void, cnt: c_int) -> sgx_status_t;
fn tls_client_write(eid: sgx_enclave_id_t, retval: *mut c_int,
session_id: usize, buf: *const c_void, cnt: c_int) -> sgx_status_t;
fn tls_client_wants_read(eid: sgx_enclave_id_t, retval: *mut c_int,
session_id: usize) -> sgx_status_t;
fn tls_client_wants_write(eid: sgx_enclave_id_t, retval: *mut c_int,
session_id: usize) -> sgx_status_t;
fn tls_client_close(eid: sgx_enclave_id_t,
session_id: usize) -> sgx_status_t;
}
fn init_enclave() -> SgxResult<SgxEnclave> {
let mut launch_token: sgx_launch_token_t = [0; 1024];
let mut launch_token_updated: i32 = 0;
// call sgx_create_enclave to initialize an enclave instance
// Debug Support: set 2nd parameter to 1
let debug = 1;
let mut misc_attr = sgx_misc_attribute_t {secs_attr: sgx_attributes_t { flags:0, xfrm:0}, misc_select:0};
SgxEnclave::create(ENCLAVE_FILE,
debug,
&mut launch_token,
&mut launch_token_updated,
&mut misc_attr)
}
const CLIENT: mio::Token = mio::Token(0);
/// This encapsulates the TCP-level connection, some connection
/// state, and the underlying TLS-level session.
struct TlsClient {
enclave_id: sgx_enclave_id_t,
socket: TcpStream,
closing: bool,
tlsclient_id: usize,
}
impl TlsClient {
fn ready(&mut self,
poll: &mut mio::Poll,
ev: &mio::Event) -> bool {
assert_eq!(ev.token(), CLIENT);
if ev.readiness().is_error() {
println!("Error");
return false;
}
if ev.readiness().is_readable() {
self.do_read();
}
if ev.readiness().is_writable() {
self.do_write();
}
if self.is_closed() {
println!("Connection closed");
return false;
}
self.reregister(poll);
true
}
}
impl TlsClient {
fn new(enclave_id: sgx_enclave_id_t, sock: TcpStream, hostname: &str, cert: &str) -> Option<TlsClient> {
println!("[+] TlsClient new {} {}", hostname, cert);
let mut tlsclient_id: usize = 0xFFFF_FFFF_FFFF_FFFF;
let c_host = CString::new(hostname.to_string()).unwrap();
let c_cert = CString::new(cert.to_string()).unwrap();
let retval = unsafe {
tls_client_new(enclave_id,
&mut tlsclient_id,
sock.as_raw_fd(),
c_host.as_ptr() as *const c_char,
c_cert.as_ptr() as *const c_char)
};
if retval!= sgx_status_t::SGX_SUCCESS {
println!("[-] ECALL Enclave [tls_client_new] Failed {}!", retval);
return Option::None;
}
if tlsclient_id == 0xFFFF_FFFF_FFFF_FFFF {
println!("[-] New enclave tlsclient error");
return Option::None;
}
Option::Some(
TlsClient {
enclave_id: enclave_id,
socket: sock,
closing: false,
tlsclient_id: tlsclient_id,
})
}
fn close(&self) |
fn read_tls(&self, buf: &mut [u8]) -> isize {
let mut retval = -1;
let result = unsafe {
tls_client_read(self.enclave_id,
&mut retval,
self.tlsclient_id,
buf.as_mut_ptr() as * mut c_void,
buf.len() as c_int)
};
match result {
sgx_status_t::SGX_SUCCESS => { retval as isize }
_ => {
println!("[-] ECALL Enclave [tls_client_read] Failed {}!", result);
-1
}
}
}
fn write_tls(&self, buf: &[u8]) -> isize {
let mut retval = -1;
let result = unsafe {
tls_client_write(self.enclave_id,
&mut retval,
self.tlsclient_id,
buf.as_ptr() as * const c_void,
buf.len() as c_int)
};
match result {
sgx_status_t::SGX_SUCCESS => { retval as isize }
_ => {
println!("[-] ECALL Enclave [tls_client_write] Failed {}!", result);
-1
}
}
}
/// We're ready to do a read.
fn do_read(&mut self) {
// BUFFER_SIZE = 1024, just for test.
// Do read all plaintext, you need to do more ecalls to get buffer size and buffer.
let mut plaintext = vec![0; BUFFER_SIZE];
let rc = self.read_tls(plaintext.as_mut_slice());
if rc == -1 {
println!("TLS read error: {:?}", rc);
self.closing = true;
return;
}
plaintext.resize(rc as usize, 0);
io::stdout().write_all(&plaintext).unwrap();
}
fn do_write(&mut self) {
let buf = Vec::new();
self.write_tls(buf.as_slice());
}
fn register(&self, poll: &mut mio::Poll) {
poll.register(&self.socket,
CLIENT,
self.ready_interest(),
mio::PollOpt::level() | mio::PollOpt::oneshot())
.unwrap();
}
fn reregister(&self, poll: &mut mio::Poll) {
poll.reregister(&self.socket,
CLIENT,
self.ready_interest(),
mio::PollOpt::level() | mio::PollOpt::oneshot())
.unwrap();
}
fn wants_read(&self) -> bool {
let mut retval = -1;
let result = unsafe {
tls_client_wants_read(self.enclave_id,
&mut retval,
self.tlsclient_id)
};
match result {
sgx_status_t::SGX_SUCCESS => { },
_ => {
println!("[-] ECALL Enclave [tls_client_wants_read] Failed {}!", result);
return false;
}
}
match retval {
0 => false,
_ => true
}
}
fn wants_write(&self) -> bool {
let mut retval = -1;
let result = unsafe {
tls_client_wants_write(self.enclave_id,
&mut retval,
self.tlsclient_id)
};
match result {
sgx_status_t::SGX_SUCCESS => { },
_ => {
println!("[-] ECALL Enclave [tls_client_wants_write] Failed {}!", result);
return false;
}
}
match retval {
0 => false,
_ => true
}
}
// Use wants_read/wants_write to register for different mio-level
// IO readiness events.
fn ready_interest(&self) -> mio::Ready {
let rd = self.wants_read();
let wr = self.wants_write();
if rd && wr {
mio::Ready::readable() | mio::Ready::writable()
} else if wr {
mio::Ready::writable()
} else {
mio::Ready::readable()
}
}
fn is_closed(&self) -> bool {
self.closing
}
}
/// We implement `io::Write` and pass through to the TLS session
impl io::Write for TlsClient {
fn write(&mut self, bytes: &[u8]) -> io::Result<usize> {
Ok(self.write_tls(bytes) as usize)
}
// unused
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl io::Read for TlsClient {
fn read(&mut self, bytes: &mut [u8]) -> io::Result<usize> {
Ok(self.read_tls(bytes) as usize)
}
}
fn lookup_ipv4(host: &str, port: u16) -> SocketAddr {
use std::net::ToSocketAddrs;
let addrs = (host, port).to_socket_addrs().unwrap();
for addr in addrs {
if let SocketAddr::V4(_) = addr {
return addr;
}
}
unreachable!("Cannot lookup address");
}
fn main() {
let enclave = match init_enclave() {
Ok(r) => {
println!("[+] Init Enclave Successful {}!", r.geteid());
r
},
Err(x) => {
println!("[-] Init Enclave Failed {}!", x.as_str());
return;
},
};
println!("[+] Test tlsclient in enclave, start!");
let port = 8443;
let hostname = "localhost";
let cert = "./ca.cert";
let addr = lookup_ipv4(hostname, port);
let sock = TcpStream::connect(&addr).expect("[-] Connect tls server failed!");
let tlsclient = TlsClient::new(enclave.geteid(),
sock,
hostname,
cert);
if tlsclient.is_some() {
println!("[+] Tlsclient new success!");
let mut tlsclient = tlsclient.unwrap();
let httpreq = format!("GET / HTTP/1.1\r\nHost: {}\r\nConnection: \
close\r\nAccept-Encoding: identity\r\n\r\n",
hostname);
tlsclient.write_all(httpreq.as_bytes()).unwrap();
let mut poll = mio::Poll::new()
.unwrap();
let mut events = mio::Events::with_capacity(32);
tlsclient.register(&mut poll);
'outer: loop {
poll.poll(&mut events, None).unwrap();
for ev in events.iter() {
if!tlsclient.ready(&mut poll, &ev) {
tlsclient.close();
break 'outer ;
}
}
}
} else {
println!("[-] Tlsclient new failed!");
}
println!("[+] Test tlsclient in enclave, done!");
enclave.destroy();
}
| {
let retval = unsafe {
tls_client_close(self.enclave_id, self.tlsclient_id)
};
if retval != sgx_status_t::SGX_SUCCESS {
println!("[-] ECALL Enclave [tls_client_close] Failed {}!", retval);
}
} | identifier_body |
main.rs | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License..
#![allow(deprecated)]
extern crate sgx_types;
extern crate sgx_urts;
use sgx_types::*;
use sgx_urts::SgxEnclave;
extern crate mio;
use mio::tcp::TcpStream;
use std::os::unix::io::AsRawFd;
use std::ffi::CString;
use std::net::SocketAddr;
use std::str;
use std::io::{self, Write};
const BUFFER_SIZE: usize = 1024;
static ENCLAVE_FILE: &'static str = "enclave.signed.so";
extern {
fn tls_client_new(eid: sgx_enclave_id_t, retval: *mut usize,
fd: c_int, hostname: *const c_char, cert: *const c_char) -> sgx_status_t;
fn tls_client_read(eid: sgx_enclave_id_t, retval: *mut c_int,
session_id: usize, buf: *mut c_void, cnt: c_int) -> sgx_status_t;
fn tls_client_write(eid: sgx_enclave_id_t, retval: *mut c_int,
session_id: usize, buf: *const c_void, cnt: c_int) -> sgx_status_t;
fn tls_client_wants_read(eid: sgx_enclave_id_t, retval: *mut c_int,
session_id: usize) -> sgx_status_t;
fn tls_client_wants_write(eid: sgx_enclave_id_t, retval: *mut c_int,
session_id: usize) -> sgx_status_t;
fn tls_client_close(eid: sgx_enclave_id_t,
session_id: usize) -> sgx_status_t;
}
fn init_enclave() -> SgxResult<SgxEnclave> {
let mut launch_token: sgx_launch_token_t = [0; 1024];
let mut launch_token_updated: i32 = 0;
// call sgx_create_enclave to initialize an enclave instance
// Debug Support: set 2nd parameter to 1
let debug = 1;
let mut misc_attr = sgx_misc_attribute_t {secs_attr: sgx_attributes_t { flags:0, xfrm:0}, misc_select:0};
SgxEnclave::create(ENCLAVE_FILE,
debug,
&mut launch_token,
&mut launch_token_updated,
&mut misc_attr)
}
const CLIENT: mio::Token = mio::Token(0);
/// This encapsulates the TCP-level connection, some connection
/// state, and the underlying TLS-level session.
struct TlsClient {
enclave_id: sgx_enclave_id_t,
socket: TcpStream,
closing: bool,
tlsclient_id: usize,
}
impl TlsClient {
fn ready(&mut self,
poll: &mut mio::Poll,
ev: &mio::Event) -> bool {
assert_eq!(ev.token(), CLIENT);
if ev.readiness().is_error() {
println!("Error");
return false;
}
if ev.readiness().is_readable() {
self.do_read();
}
if ev.readiness().is_writable() {
self.do_write();
}
if self.is_closed() {
println!("Connection closed");
return false;
}
self.reregister(poll);
true
}
}
impl TlsClient {
fn new(enclave_id: sgx_enclave_id_t, sock: TcpStream, hostname: &str, cert: &str) -> Option<TlsClient> {
println!("[+] TlsClient new {} {}", hostname, cert);
let mut tlsclient_id: usize = 0xFFFF_FFFF_FFFF_FFFF;
let c_host = CString::new(hostname.to_string()).unwrap();
let c_cert = CString::new(cert.to_string()).unwrap();
let retval = unsafe {
tls_client_new(enclave_id,
&mut tlsclient_id,
sock.as_raw_fd(),
c_host.as_ptr() as *const c_char,
c_cert.as_ptr() as *const c_char)
};
if retval!= sgx_status_t::SGX_SUCCESS {
println!("[-] ECALL Enclave [tls_client_new] Failed {}!", retval);
return Option::None;
}
if tlsclient_id == 0xFFFF_FFFF_FFFF_FFFF {
println!("[-] New enclave tlsclient error");
return Option::None;
}
Option::Some(
TlsClient {
enclave_id: enclave_id,
socket: sock,
closing: false,
tlsclient_id: tlsclient_id,
})
}
fn close(&self) {
let retval = unsafe {
tls_client_close(self.enclave_id, self.tlsclient_id)
};
if retval!= sgx_status_t::SGX_SUCCESS {
println!("[-] ECALL Enclave [tls_client_close] Failed {}!", retval);
}
}
fn read_tls(&self, buf: &mut [u8]) -> isize {
let mut retval = -1;
let result = unsafe {
tls_client_read(self.enclave_id,
&mut retval,
self.tlsclient_id,
buf.as_mut_ptr() as * mut c_void,
buf.len() as c_int)
};
match result {
sgx_status_t::SGX_SUCCESS => { retval as isize }
_ => {
println!("[-] ECALL Enclave [tls_client_read] Failed {}!", result);
-1
}
}
}
fn write_tls(&self, buf: &[u8]) -> isize {
let mut retval = -1;
let result = unsafe {
tls_client_write(self.enclave_id,
&mut retval,
self.tlsclient_id,
buf.as_ptr() as * const c_void,
buf.len() as c_int)
};
match result {
sgx_status_t::SGX_SUCCESS => { retval as isize }
_ => {
println!("[-] ECALL Enclave [tls_client_write] Failed {}!", result);
-1
}
}
}
/// We're ready to do a read.
fn do_read(&mut self) {
// BUFFER_SIZE = 1024, just for test.
// Do read all plaintext, you need to do more ecalls to get buffer size and buffer.
let mut plaintext = vec![0; BUFFER_SIZE];
let rc = self.read_tls(plaintext.as_mut_slice());
if rc == -1 {
println!("TLS read error: {:?}", rc);
self.closing = true;
return;
}
plaintext.resize(rc as usize, 0);
io::stdout().write_all(&plaintext).unwrap();
}
fn do_write(&mut self) {
let buf = Vec::new();
self.write_tls(buf.as_slice());
}
fn register(&self, poll: &mut mio::Poll) {
poll.register(&self.socket,
CLIENT,
self.ready_interest(),
mio::PollOpt::level() | mio::PollOpt::oneshot())
.unwrap();
}
fn reregister(&self, poll: &mut mio::Poll) {
poll.reregister(&self.socket,
CLIENT,
self.ready_interest(),
mio::PollOpt::level() | mio::PollOpt::oneshot())
.unwrap();
}
fn | (&self) -> bool {
let mut retval = -1;
let result = unsafe {
tls_client_wants_read(self.enclave_id,
&mut retval,
self.tlsclient_id)
};
match result {
sgx_status_t::SGX_SUCCESS => { },
_ => {
println!("[-] ECALL Enclave [tls_client_wants_read] Failed {}!", result);
return false;
}
}
match retval {
0 => false,
_ => true
}
}
fn wants_write(&self) -> bool {
let mut retval = -1;
let result = unsafe {
tls_client_wants_write(self.enclave_id,
&mut retval,
self.tlsclient_id)
};
match result {
sgx_status_t::SGX_SUCCESS => { },
_ => {
println!("[-] ECALL Enclave [tls_client_wants_write] Failed {}!", result);
return false;
}
}
match retval {
0 => false,
_ => true
}
}
// Use wants_read/wants_write to register for different mio-level
// IO readiness events.
fn ready_interest(&self) -> mio::Ready {
let rd = self.wants_read();
let wr = self.wants_write();
if rd && wr {
mio::Ready::readable() | mio::Ready::writable()
} else if wr {
mio::Ready::writable()
} else {
mio::Ready::readable()
}
}
fn is_closed(&self) -> bool {
self.closing
}
}
/// We implement `io::Write` and pass through to the TLS session
impl io::Write for TlsClient {
fn write(&mut self, bytes: &[u8]) -> io::Result<usize> {
Ok(self.write_tls(bytes) as usize)
}
// unused
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl io::Read for TlsClient {
fn read(&mut self, bytes: &mut [u8]) -> io::Result<usize> {
Ok(self.read_tls(bytes) as usize)
}
}
fn lookup_ipv4(host: &str, port: u16) -> SocketAddr {
use std::net::ToSocketAddrs;
let addrs = (host, port).to_socket_addrs().unwrap();
for addr in addrs {
if let SocketAddr::V4(_) = addr {
return addr;
}
}
unreachable!("Cannot lookup address");
}
fn main() {
let enclave = match init_enclave() {
Ok(r) => {
println!("[+] Init Enclave Successful {}!", r.geteid());
r
},
Err(x) => {
println!("[-] Init Enclave Failed {}!", x.as_str());
return;
},
};
println!("[+] Test tlsclient in enclave, start!");
let port = 8443;
let hostname = "localhost";
let cert = "./ca.cert";
let addr = lookup_ipv4(hostname, port);
let sock = TcpStream::connect(&addr).expect("[-] Connect tls server failed!");
let tlsclient = TlsClient::new(enclave.geteid(),
sock,
hostname,
cert);
if tlsclient.is_some() {
println!("[+] Tlsclient new success!");
let mut tlsclient = tlsclient.unwrap();
let httpreq = format!("GET / HTTP/1.1\r\nHost: {}\r\nConnection: \
close\r\nAccept-Encoding: identity\r\n\r\n",
hostname);
tlsclient.write_all(httpreq.as_bytes()).unwrap();
let mut poll = mio::Poll::new()
.unwrap();
let mut events = mio::Events::with_capacity(32);
tlsclient.register(&mut poll);
'outer: loop {
poll.poll(&mut events, None).unwrap();
for ev in events.iter() {
if!tlsclient.ready(&mut poll, &ev) {
tlsclient.close();
break 'outer ;
}
}
}
} else {
println!("[-] Tlsclient new failed!");
}
println!("[+] Test tlsclient in enclave, done!");
enclave.destroy();
}
| wants_read | identifier_name |
main.rs | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License..
#![allow(deprecated)]
extern crate sgx_types;
extern crate sgx_urts;
use sgx_types::*;
use sgx_urts::SgxEnclave;
extern crate mio;
use mio::tcp::TcpStream;
use std::os::unix::io::AsRawFd;
use std::ffi::CString;
use std::net::SocketAddr;
use std::str;
use std::io::{self, Write};
const BUFFER_SIZE: usize = 1024;
static ENCLAVE_FILE: &'static str = "enclave.signed.so";
extern {
fn tls_client_new(eid: sgx_enclave_id_t, retval: *mut usize,
fd: c_int, hostname: *const c_char, cert: *const c_char) -> sgx_status_t;
fn tls_client_read(eid: sgx_enclave_id_t, retval: *mut c_int,
session_id: usize, buf: *mut c_void, cnt: c_int) -> sgx_status_t;
fn tls_client_write(eid: sgx_enclave_id_t, retval: *mut c_int,
session_id: usize, buf: *const c_void, cnt: c_int) -> sgx_status_t;
fn tls_client_wants_read(eid: sgx_enclave_id_t, retval: *mut c_int,
session_id: usize) -> sgx_status_t;
fn tls_client_wants_write(eid: sgx_enclave_id_t, retval: *mut c_int,
session_id: usize) -> sgx_status_t;
fn tls_client_close(eid: sgx_enclave_id_t,
session_id: usize) -> sgx_status_t;
}
fn init_enclave() -> SgxResult<SgxEnclave> {
let mut launch_token: sgx_launch_token_t = [0; 1024];
let mut launch_token_updated: i32 = 0;
// call sgx_create_enclave to initialize an enclave instance
// Debug Support: set 2nd parameter to 1
let debug = 1;
let mut misc_attr = sgx_misc_attribute_t {secs_attr: sgx_attributes_t { flags:0, xfrm:0}, misc_select:0};
SgxEnclave::create(ENCLAVE_FILE,
debug,
&mut launch_token,
&mut launch_token_updated,
&mut misc_attr)
}
const CLIENT: mio::Token = mio::Token(0);
/// This encapsulates the TCP-level connection, some connection
/// state, and the underlying TLS-level session.
struct TlsClient {
enclave_id: sgx_enclave_id_t,
socket: TcpStream,
closing: bool,
tlsclient_id: usize,
}
impl TlsClient {
fn ready(&mut self,
poll: &mut mio::Poll,
ev: &mio::Event) -> bool {
assert_eq!(ev.token(), CLIENT);
if ev.readiness().is_error() {
println!("Error");
return false;
}
if ev.readiness().is_readable() {
self.do_read();
}
if ev.readiness().is_writable() |
if self.is_closed() {
println!("Connection closed");
return false;
}
self.reregister(poll);
true
}
}
impl TlsClient {
fn new(enclave_id: sgx_enclave_id_t, sock: TcpStream, hostname: &str, cert: &str) -> Option<TlsClient> {
println!("[+] TlsClient new {} {}", hostname, cert);
let mut tlsclient_id: usize = 0xFFFF_FFFF_FFFF_FFFF;
let c_host = CString::new(hostname.to_string()).unwrap();
let c_cert = CString::new(cert.to_string()).unwrap();
let retval = unsafe {
tls_client_new(enclave_id,
&mut tlsclient_id,
sock.as_raw_fd(),
c_host.as_ptr() as *const c_char,
c_cert.as_ptr() as *const c_char)
};
if retval!= sgx_status_t::SGX_SUCCESS {
println!("[-] ECALL Enclave [tls_client_new] Failed {}!", retval);
return Option::None;
}
if tlsclient_id == 0xFFFF_FFFF_FFFF_FFFF {
println!("[-] New enclave tlsclient error");
return Option::None;
}
Option::Some(
TlsClient {
enclave_id: enclave_id,
socket: sock,
closing: false,
tlsclient_id: tlsclient_id,
})
}
fn close(&self) {
let retval = unsafe {
tls_client_close(self.enclave_id, self.tlsclient_id)
};
if retval!= sgx_status_t::SGX_SUCCESS {
println!("[-] ECALL Enclave [tls_client_close] Failed {}!", retval);
}
}
fn read_tls(&self, buf: &mut [u8]) -> isize {
let mut retval = -1;
let result = unsafe {
tls_client_read(self.enclave_id,
&mut retval,
self.tlsclient_id,
buf.as_mut_ptr() as * mut c_void,
buf.len() as c_int)
};
match result {
sgx_status_t::SGX_SUCCESS => { retval as isize }
_ => {
println!("[-] ECALL Enclave [tls_client_read] Failed {}!", result);
-1
}
}
}
fn write_tls(&self, buf: &[u8]) -> isize {
let mut retval = -1;
let result = unsafe {
tls_client_write(self.enclave_id,
&mut retval,
self.tlsclient_id,
buf.as_ptr() as * const c_void,
buf.len() as c_int)
};
match result {
sgx_status_t::SGX_SUCCESS => { retval as isize }
_ => {
println!("[-] ECALL Enclave [tls_client_write] Failed {}!", result);
-1
}
}
}
/// We're ready to do a read.
fn do_read(&mut self) {
// BUFFER_SIZE = 1024, just for test.
// Do read all plaintext, you need to do more ecalls to get buffer size and buffer.
let mut plaintext = vec![0; BUFFER_SIZE];
let rc = self.read_tls(plaintext.as_mut_slice());
if rc == -1 {
println!("TLS read error: {:?}", rc);
self.closing = true;
return;
}
plaintext.resize(rc as usize, 0);
io::stdout().write_all(&plaintext).unwrap();
}
fn do_write(&mut self) {
let buf = Vec::new();
self.write_tls(buf.as_slice());
}
fn register(&self, poll: &mut mio::Poll) {
poll.register(&self.socket,
CLIENT,
self.ready_interest(),
mio::PollOpt::level() | mio::PollOpt::oneshot())
.unwrap();
}
fn reregister(&self, poll: &mut mio::Poll) {
poll.reregister(&self.socket,
CLIENT,
self.ready_interest(),
mio::PollOpt::level() | mio::PollOpt::oneshot())
.unwrap();
}
fn wants_read(&self) -> bool {
let mut retval = -1;
let result = unsafe {
tls_client_wants_read(self.enclave_id,
&mut retval,
self.tlsclient_id)
};
match result {
sgx_status_t::SGX_SUCCESS => { },
_ => {
println!("[-] ECALL Enclave [tls_client_wants_read] Failed {}!", result);
return false;
}
}
match retval {
0 => false,
_ => true
}
}
fn wants_write(&self) -> bool {
let mut retval = -1;
let result = unsafe {
tls_client_wants_write(self.enclave_id,
&mut retval,
self.tlsclient_id)
};
match result {
sgx_status_t::SGX_SUCCESS => { },
_ => {
println!("[-] ECALL Enclave [tls_client_wants_write] Failed {}!", result);
return false;
}
}
match retval {
0 => false,
_ => true
}
}
// Use wants_read/wants_write to register for different mio-level
// IO readiness events.
fn ready_interest(&self) -> mio::Ready {
let rd = self.wants_read();
let wr = self.wants_write();
if rd && wr {
mio::Ready::readable() | mio::Ready::writable()
} else if wr {
mio::Ready::writable()
} else {
mio::Ready::readable()
}
}
fn is_closed(&self) -> bool {
self.closing
}
}
/// We implement `io::Write` and pass through to the TLS session
impl io::Write for TlsClient {
fn write(&mut self, bytes: &[u8]) -> io::Result<usize> {
Ok(self.write_tls(bytes) as usize)
}
// unused
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl io::Read for TlsClient {
fn read(&mut self, bytes: &mut [u8]) -> io::Result<usize> {
Ok(self.read_tls(bytes) as usize)
}
}
fn lookup_ipv4(host: &str, port: u16) -> SocketAddr {
use std::net::ToSocketAddrs;
let addrs = (host, port).to_socket_addrs().unwrap();
for addr in addrs {
if let SocketAddr::V4(_) = addr {
return addr;
}
}
unreachable!("Cannot lookup address");
}
fn main() {
let enclave = match init_enclave() {
Ok(r) => {
println!("[+] Init Enclave Successful {}!", r.geteid());
r
},
Err(x) => {
println!("[-] Init Enclave Failed {}!", x.as_str());
return;
},
};
println!("[+] Test tlsclient in enclave, start!");
let port = 8443;
let hostname = "localhost";
let cert = "./ca.cert";
let addr = lookup_ipv4(hostname, port);
let sock = TcpStream::connect(&addr).expect("[-] Connect tls server failed!");
let tlsclient = TlsClient::new(enclave.geteid(),
sock,
hostname,
cert);
if tlsclient.is_some() {
println!("[+] Tlsclient new success!");
let mut tlsclient = tlsclient.unwrap();
let httpreq = format!("GET / HTTP/1.1\r\nHost: {}\r\nConnection: \
close\r\nAccept-Encoding: identity\r\n\r\n",
hostname);
tlsclient.write_all(httpreq.as_bytes()).unwrap();
let mut poll = mio::Poll::new()
.unwrap();
let mut events = mio::Events::with_capacity(32);
tlsclient.register(&mut poll);
'outer: loop {
poll.poll(&mut events, None).unwrap();
for ev in events.iter() {
if!tlsclient.ready(&mut poll, &ev) {
tlsclient.close();
break 'outer ;
}
}
}
} else {
println!("[-] Tlsclient new failed!");
}
println!("[+] Test tlsclient in enclave, done!");
enclave.destroy();
}
| {
self.do_write();
} | conditional_block |
main.rs | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License..
#![allow(deprecated)]
extern crate sgx_types;
extern crate sgx_urts;
use sgx_types::*;
use sgx_urts::SgxEnclave;
extern crate mio;
use mio::tcp::TcpStream;
use std::os::unix::io::AsRawFd;
use std::ffi::CString;
use std::net::SocketAddr;
use std::str;
use std::io::{self, Write};
const BUFFER_SIZE: usize = 1024;
static ENCLAVE_FILE: &'static str = "enclave.signed.so";
extern {
fn tls_client_new(eid: sgx_enclave_id_t, retval: *mut usize,
fd: c_int, hostname: *const c_char, cert: *const c_char) -> sgx_status_t;
fn tls_client_read(eid: sgx_enclave_id_t, retval: *mut c_int,
session_id: usize, buf: *mut c_void, cnt: c_int) -> sgx_status_t;
fn tls_client_write(eid: sgx_enclave_id_t, retval: *mut c_int,
session_id: usize, buf: *const c_void, cnt: c_int) -> sgx_status_t;
fn tls_client_wants_read(eid: sgx_enclave_id_t, retval: *mut c_int,
session_id: usize) -> sgx_status_t;
fn tls_client_wants_write(eid: sgx_enclave_id_t, retval: *mut c_int,
session_id: usize) -> sgx_status_t;
fn tls_client_close(eid: sgx_enclave_id_t,
session_id: usize) -> sgx_status_t;
}
fn init_enclave() -> SgxResult<SgxEnclave> {
let mut launch_token: sgx_launch_token_t = [0; 1024];
let mut launch_token_updated: i32 = 0;
// call sgx_create_enclave to initialize an enclave instance
// Debug Support: set 2nd parameter to 1
let debug = 1;
let mut misc_attr = sgx_misc_attribute_t {secs_attr: sgx_attributes_t { flags:0, xfrm:0}, misc_select:0};
SgxEnclave::create(ENCLAVE_FILE,
debug,
&mut launch_token,
&mut launch_token_updated,
&mut misc_attr)
}
const CLIENT: mio::Token = mio::Token(0);
/// This encapsulates the TCP-level connection, some connection
/// state, and the underlying TLS-level session.
struct TlsClient {
enclave_id: sgx_enclave_id_t,
socket: TcpStream,
closing: bool,
tlsclient_id: usize,
}
impl TlsClient {
fn ready(&mut self,
poll: &mut mio::Poll,
ev: &mio::Event) -> bool {
assert_eq!(ev.token(), CLIENT);
if ev.readiness().is_error() {
println!("Error");
return false;
}
if ev.readiness().is_readable() {
self.do_read();
}
if ev.readiness().is_writable() {
self.do_write();
}
if self.is_closed() {
println!("Connection closed");
return false;
}
self.reregister(poll);
true
}
}
impl TlsClient {
fn new(enclave_id: sgx_enclave_id_t, sock: TcpStream, hostname: &str, cert: &str) -> Option<TlsClient> {
println!("[+] TlsClient new {} {}", hostname, cert);
let mut tlsclient_id: usize = 0xFFFF_FFFF_FFFF_FFFF;
let c_host = CString::new(hostname.to_string()).unwrap();
let c_cert = CString::new(cert.to_string()).unwrap();
let retval = unsafe {
tls_client_new(enclave_id,
&mut tlsclient_id,
sock.as_raw_fd(),
c_host.as_ptr() as *const c_char,
c_cert.as_ptr() as *const c_char)
};
if retval!= sgx_status_t::SGX_SUCCESS {
println!("[-] ECALL Enclave [tls_client_new] Failed {}!", retval);
return Option::None;
}
if tlsclient_id == 0xFFFF_FFFF_FFFF_FFFF {
println!("[-] New enclave tlsclient error");
return Option::None;
}
Option::Some(
TlsClient {
enclave_id: enclave_id,
socket: sock,
closing: false,
tlsclient_id: tlsclient_id,
})
}
fn close(&self) {
let retval = unsafe {
tls_client_close(self.enclave_id, self.tlsclient_id)
};
if retval!= sgx_status_t::SGX_SUCCESS {
println!("[-] ECALL Enclave [tls_client_close] Failed {}!", retval);
}
}
fn read_tls(&self, buf: &mut [u8]) -> isize {
let mut retval = -1;
let result = unsafe {
tls_client_read(self.enclave_id,
&mut retval,
self.tlsclient_id,
buf.as_mut_ptr() as * mut c_void,
buf.len() as c_int)
};
match result {
sgx_status_t::SGX_SUCCESS => { retval as isize }
_ => {
println!("[-] ECALL Enclave [tls_client_read] Failed {}!", result);
-1
} | let result = unsafe {
tls_client_write(self.enclave_id,
&mut retval,
self.tlsclient_id,
buf.as_ptr() as * const c_void,
buf.len() as c_int)
};
match result {
sgx_status_t::SGX_SUCCESS => { retval as isize }
_ => {
println!("[-] ECALL Enclave [tls_client_write] Failed {}!", result);
-1
}
}
}
/// We're ready to do a read.
fn do_read(&mut self) {
// BUFFER_SIZE = 1024, just for test.
// Do read all plaintext, you need to do more ecalls to get buffer size and buffer.
let mut plaintext = vec![0; BUFFER_SIZE];
let rc = self.read_tls(plaintext.as_mut_slice());
if rc == -1 {
println!("TLS read error: {:?}", rc);
self.closing = true;
return;
}
plaintext.resize(rc as usize, 0);
io::stdout().write_all(&plaintext).unwrap();
}
fn do_write(&mut self) {
let buf = Vec::new();
self.write_tls(buf.as_slice());
}
fn register(&self, poll: &mut mio::Poll) {
poll.register(&self.socket,
CLIENT,
self.ready_interest(),
mio::PollOpt::level() | mio::PollOpt::oneshot())
.unwrap();
}
fn reregister(&self, poll: &mut mio::Poll) {
poll.reregister(&self.socket,
CLIENT,
self.ready_interest(),
mio::PollOpt::level() | mio::PollOpt::oneshot())
.unwrap();
}
fn wants_read(&self) -> bool {
let mut retval = -1;
let result = unsafe {
tls_client_wants_read(self.enclave_id,
&mut retval,
self.tlsclient_id)
};
match result {
sgx_status_t::SGX_SUCCESS => { },
_ => {
println!("[-] ECALL Enclave [tls_client_wants_read] Failed {}!", result);
return false;
}
}
match retval {
0 => false,
_ => true
}
}
fn wants_write(&self) -> bool {
let mut retval = -1;
let result = unsafe {
tls_client_wants_write(self.enclave_id,
&mut retval,
self.tlsclient_id)
};
match result {
sgx_status_t::SGX_SUCCESS => { },
_ => {
println!("[-] ECALL Enclave [tls_client_wants_write] Failed {}!", result);
return false;
}
}
match retval {
0 => false,
_ => true
}
}
// Use wants_read/wants_write to register for different mio-level
// IO readiness events.
fn ready_interest(&self) -> mio::Ready {
let rd = self.wants_read();
let wr = self.wants_write();
if rd && wr {
mio::Ready::readable() | mio::Ready::writable()
} else if wr {
mio::Ready::writable()
} else {
mio::Ready::readable()
}
}
fn is_closed(&self) -> bool {
self.closing
}
}
/// We implement `io::Write` and pass through to the TLS session
impl io::Write for TlsClient {
fn write(&mut self, bytes: &[u8]) -> io::Result<usize> {
Ok(self.write_tls(bytes) as usize)
}
// unused
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl io::Read for TlsClient {
fn read(&mut self, bytes: &mut [u8]) -> io::Result<usize> {
Ok(self.read_tls(bytes) as usize)
}
}
fn lookup_ipv4(host: &str, port: u16) -> SocketAddr {
use std::net::ToSocketAddrs;
let addrs = (host, port).to_socket_addrs().unwrap();
for addr in addrs {
if let SocketAddr::V4(_) = addr {
return addr;
}
}
unreachable!("Cannot lookup address");
}
fn main() {
let enclave = match init_enclave() {
Ok(r) => {
println!("[+] Init Enclave Successful {}!", r.geteid());
r
},
Err(x) => {
println!("[-] Init Enclave Failed {}!", x.as_str());
return;
},
};
println!("[+] Test tlsclient in enclave, start!");
let port = 8443;
let hostname = "localhost";
let cert = "./ca.cert";
let addr = lookup_ipv4(hostname, port);
let sock = TcpStream::connect(&addr).expect("[-] Connect tls server failed!");
let tlsclient = TlsClient::new(enclave.geteid(),
sock,
hostname,
cert);
if tlsclient.is_some() {
println!("[+] Tlsclient new success!");
let mut tlsclient = tlsclient.unwrap();
let httpreq = format!("GET / HTTP/1.1\r\nHost: {}\r\nConnection: \
close\r\nAccept-Encoding: identity\r\n\r\n",
hostname);
tlsclient.write_all(httpreq.as_bytes()).unwrap();
let mut poll = mio::Poll::new()
.unwrap();
let mut events = mio::Events::with_capacity(32);
tlsclient.register(&mut poll);
'outer: loop {
poll.poll(&mut events, None).unwrap();
for ev in events.iter() {
if!tlsclient.ready(&mut poll, &ev) {
tlsclient.close();
break 'outer ;
}
}
}
} else {
println!("[-] Tlsclient new failed!");
}
println!("[+] Test tlsclient in enclave, done!");
enclave.destroy();
} | }
}
fn write_tls(&self, buf: &[u8]) -> isize {
let mut retval = -1; | random_line_split |
mod.rs | // MIT/Apache2 License
//! This module defines the `Display` object, which acts as a connection to the X11 server, and the
//! `Connection` trait, which the `Display` object abstracts itself over. See the documentation for
//! these objects for more information.
use crate::{
auth_info::AuthInfo,
auto::{
xproto::{Colormap, Screen, Setup, SetupRequest, Visualid, Visualtype, Window},
AsByteSequence,
},
event::Event,
util::cycled_zeroes,
xid::XidGenerator,
Request, XID,
};
use alloc::{boxed::Box, collections::VecDeque};
use core::{fmt, iter, marker::PhantomData, mem, num::NonZeroU32};
use cty::c_int;
use hashbrown::HashMap;
use tinyvec::TinyVec;
#[cfg(feature = "std")]
use std::borrow::Cow;
#[cfg(feature = "async")]
use std::{future::Future, pin::Pin};
mod connection;
pub use connection::*;
#[cfg(feature = "std")]
pub mod name;
mod functions;
mod input;
mod output;
pub use functions::*;
pub(crate) const EXT_KEY_SIZE: usize = 24;
/// The connection to the X11 server. Most operations done in breadx revolve around this object
/// in some way, shape or form.
///
/// Internally, this acts as a layer of abstraction over the inner `Conn` object that keeps track
/// of the setup, outgoing and pending requests and replies, the event queue, et cetera. Orthodoxically,
/// X11 usually takes place over a TCP stream or a Unix socket connection; however, `Display` is able
/// to use any object implementing the `Connection` trait as a vehicle for the X11 protocol.
///
/// Upon its instantiation, the `Display` sends bytes to the server requesting the setup information, and
/// then stores it for later use. Afterwards, it awaits commands from the programmer to send requests,
/// receive replies or process events.
///
/// # Example
///
/// Open a connection to the X11 server and get the screen resolution.
///
/// ```rust,no_run
/// use breadx::DisplayConnection;
///
/// let mut conn = DisplayConnection::create(None, None).unwrap();
///
/// let default_screen = conn.default_screen();
/// println!("Default screen is {} x {}", default_screen.width_in_pixels, default_screen.height_in_pixels);
/// ```
pub struct Display<Conn> {
// the connection to the server
pub(crate) connection: Conn,
// the setup received from the server
pub(crate) setup: Setup,
// xid generator
xid: XidGenerator,
// the screen to be used by default
default_screen: usize,
// input variables
pub(crate) event_queue: VecDeque<Event>,
pub(crate) pending_requests: VecDeque<input::PendingRequest>,
pub(crate) pending_replies: HashMap<u16, Box<[u8]>>,
// output variables
request_number: u64,
// store the interned atoms
pub(crate) wm_protocols_atom: Option<NonZeroU32>,
// context db
// context: HashMap<(XID, ContextID), NonNull<c_void>>,
// hashmap linking extension names to major opcodes
// we use byte arrays instead of static string pointers
// here because cache locality leads to an overall speedup (todo: verify)
extensions: HashMap<[u8; EXT_KEY_SIZE], u8>,
}
/// Unique identifier for a context.
pub type ContextID = c_int;
/// A cookie for a request.
///
/// Requests usually take time to resolve into replies. Therefore, the `Display::send_request` method returns
/// the `RequestCookie`, which is later used to block (or await) for the request's eventual result.
#[derive(Copy, Clone, Debug, PartialOrd, Ord, PartialEq, Default, Eq, Hash)]
#[repr(transparent)]
pub struct RequestCookie<R: Request> {
sequence: u16,
_phantom: PhantomData<Option<R::Reply>>,
}
impl<R: Request> RequestCookie<R> {
#[inline]
pub(crate) fn from_sequence(sequence: u64) -> Self {
Self {
sequence: sequence as u16, // truncate to lower bits
_phantom: PhantomData,
}
}
}
#[derive(Default, Debug)]
pub(crate) struct PendingRequestFlags {
pub discard_reply: bool,
pub checked: bool,
}
impl<Conn: fmt::Debug> fmt::Debug for Display<Conn> {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Display")
.field("connection", &self.connection)
.field("setup", &self.setup)
.field("xid", &self.xid)
.field("default_screen", &self.default_screen)
.field("event_queue", &self.event_queue)
.field("pending_requests", &self.pending_requests)
.field("pending_replies", &self.pending_replies)
.field("request_number", &self.request_number)
.finish()
}
}
#[inline]
const fn endian_byte() -> u8 {
// Excerpt from the X Window System Protocol
//
// The client must send an initial byte of data to identify the byte order to be employed.
// The value of the byte must be octal 102 or 154. The value 102 (ASCII uppercase B) means
// values are transmitted most significant byte first, and value 154 (ASCII lowercase l)
// means values are transmitted least significant byte first.
#[cfg(not(target_endian = "little"))]
{
const BE_SIGNIFIER: u8 = b'B';
BE_SIGNIFIER
}
#[cfg(target_endian = "little")]
{
const LE_SIGNIFIER: u8 = b'l';
LE_SIGNIFIER
}
}
impl<Conn: Connection> Display<Conn> {
#[inline]
fn decode_reply<R: Request>(reply: Box<[u8]>) -> crate::Result<R::Reply> {
Ok(R::Reply::from_bytes(&reply)
.ok_or(crate::BreadError::BadObjectRead(None))?
.0)
}
/// Send a request object to the X11 server.
///
/// Given a request object, this function sends it across the connection to the X11 server and returns
/// a cookie used to determine when this request will resolve. Usually, the `Display` object has functions
/// that act as a wrapper around this object; however, if you'd like to circumvent those, this is usually
/// the best option.
#[inline]
pub fn send_request<R: Request>(&mut self, req: R) -> crate::Result<RequestCookie<R>> {
self.send_request_internal(req)
}
/// Wait for a request from the X11 server.
///
/// This function checks the `Display`'s queues to see if a reply matching the given `RequestCookie`
/// has been processed by the X11 server. If not, it polls the server for new events until it has
/// determined that the request has resolved.
#[inline]
pub fn resolve_request<R: Request>(
&mut self,
token: RequestCookie<R>,
) -> crate::Result<R::Reply>
where
R::Reply: Default,
{
if mem::size_of::<R::Reply>() == 0 {
log::debug!("Immediately resolving for reply of size 0");
return Ok(Default::default());
}
loop {
log::trace!("Current replies: {:?}", &self.pending_replies);
match self.pending_replies.remove(&token.sequence) {
Some(reply) => break Self::decode_reply::<R>(reply),
None => self.wait()?,
}
}
}
/// Send a request object to the X11 server, async redox. See the `send_request` function for more
/// information.
#[cfg(feature = "async")]
#[inline]
pub fn send_request_async<'future, R: Request + Send + 'future>(
&'future mut self,
req: R,
) -> Pin<Box<dyn Future<Output = crate::Result<RequestCookie<R>>> + Send + 'future>> {
Box::pin(self.send_request_internal_async(req))
}
/// Wait for a request from the X11 server, async redox. See the `resolve_request` function for more
/// information.
#[cfg(feature = "async")]
#[inline]
pub async fn resolve_request_async<R: Request>(
&mut self,
token: RequestCookie<R>,
) -> crate::Result<R::Reply>
where
R::Reply: Default,
{
if mem::size_of::<R::Reply>() == 0 {
return Ok(Default::default());
}
loop {
match self.pending_replies.remove(&token.sequence) {
Some(reply) => {
break Self::decode_reply::<R>(reply);
}
None => self.wait_async().await?,
}
}
}
#[inline]
fn from_connection_internal(connection: Conn) -> Self {
Self {
connection,
setup: Default::default(),
xid: Default::default(),
default_screen: 0,
event_queue: VecDeque::with_capacity(8),
pending_requests: VecDeque::new(),
pending_replies: HashMap::with_capacity(4),
request_number: 1,
wm_protocols_atom: None,
// context: HashMap::new(),
extensions: HashMap::with_capacity(8),
}
}
/// Creates a new `Display` from a connection and authentication info.
///
/// It is expected that the connection passed in has not had any information sent into it aside from
/// what is necessary for the underlying protocol. After the object is created, the `Display` will poll
/// the server for setup information.
#[inline]
pub fn from_connection(connection: Conn, auth: Option<AuthInfo>) -> crate::Result<Self> {
let mut d = Self::from_connection_internal(connection);
d.init(auth)?;
Ok(d)
}
/// Creates a new `Display` from a connection and authentication info, async redox. See the `from_connection`
/// function for more information.
#[cfg(feature = "async")]
#[inline]
pub async fn from_connection_async(
connection: Conn,
auth: Option<AuthInfo>,
) -> crate::Result<Self> {
let mut d = Self::from_connection_internal(connection);
d.init_async(auth).await?;
Ok(d)
}
/// Generate the setup from the authentication info.
#[inline]
fn create_setup(auth: AuthInfo) -> SetupRequest {
let AuthInfo { name, data,.. } = auth;
SetupRequest {
byte_order: endian_byte(),
protocol_major_version: 11,
protocol_minor_version: 0,
authorization_protocol_name: name,
authorization_protocol_data: data,
}
}
/// Initialize the setup.
#[inline]
fn init(&mut self, auth: Option<AuthInfo>) -> crate::Result {
let setup = Self::create_setup(match auth {
Some(auth) => auth,
None => AuthInfo::get(),
});
let mut bytes: TinyVec<[u8; 32]> = cycled_zeroes(setup.size());
let len = setup.as_bytes(&mut bytes);
bytes.truncate(len);
self.connection.send_packet(&bytes[0..len])?;
let mut bytes: TinyVec<[u8; 32]> = cycled_zeroes(8);
self.connection.read_packet(&mut bytes)?;
match bytes[0] {
0 => return Err(crate::BreadError::FailedToConnect),
2 => return Err(crate::BreadError::FailedToAuthorize),
_ => (),
}
// read in the rest of the bytes
let length_bytes: [u8; 2] = [bytes[6], bytes[7]];
let length = (u16::from_ne_bytes(length_bytes) as usize) * 4;
bytes.extend(iter::once(0).cycle().take(length));
self.connection.read_packet(&mut bytes[8..])?;
let (setup, _) =
Setup::from_bytes(&bytes).ok_or(crate::BreadError::BadObjectRead(Some("Setup")))?;
self.setup = setup;
self.xid = XidGenerator::new(self.setup.resource_id_base, self.setup.resource_id_mask);
log::debug!("resource_id_base is {:#032b}", self.setup.resource_id_base);
log::debug!("resource_id_mask is {:#032b}", self.setup.resource_id_mask);
log::debug!(
"resource_id inc. is {:#032b}",
self.setup.resource_id_mask & self.setup.resource_id_mask.wrapping_neg()
);
Ok(())
}
/// Initialize the setup, async redox.
///
/// TODO; lots of copy-pasted code, redo this at some point
#[cfg(feature = "async")]
#[inline]
async fn init_async(&mut self, auth: Option<AuthInfo>) -> crate::Result {
let setup = Self::create_setup(match auth {
Some(auth) => auth,
None => AuthInfo::get_async().await,
});
let mut bytes: TinyVec<[u8; 32]> = cycled_zeroes(setup.size());
let len = setup.as_bytes(&mut bytes);
bytes.truncate(len);
self.connection.send_packet_async(&bytes[0..len]).await?;
let mut bytes: TinyVec<[u8; 32]> = cycled_zeroes(8);
self.connection.read_packet_async(&mut bytes).await?;
match bytes[0] {
0 => return Err(crate::BreadError::FailedToConnect),
2 => return Err(crate::BreadError::FailedToAuthorize),
_ => (),
}
// read in the rest of the bytes
let length_bytes: [u8; 2] = [bytes[6], bytes[7]];
let length = (u16::from_ne_bytes(length_bytes) as usize) * 4;
bytes.extend(iter::once(0).cycle().take(length));
self.connection.read_packet_async(&mut bytes[8..]).await?;
let (setup, _) = Setup::from_bytes(&bytes)
.ok_or_else(|| crate::BreadError::BadObjectRead(Some("Setup")))?;
self.setup = setup;
self.xid = XidGenerator::new(self.setup.resource_id_base, self.setup.resource_id_mask);
log::debug!("resource_id_base is {:#032b}", self.setup.resource_id_base);
log::debug!("resource_id_mask is {:#032b}", self.setup.resource_id_mask);
log::debug!(
"resource_id inc. is {:#032b}",
self.setup.resource_id_mask & self.setup.resource_id_mask.wrapping_neg()
);
Ok(())
}
/// Get the setup associates with this display.
#[inline]
pub fn setup(&self) -> &Setup {
&self.setup
}
#[inline]
pub fn default_root(&self) -> Window {
self.default_screen().root
}
#[inline]
pub fn default_screen(&self) -> &Screen {
&self.setup.roots[self.default_screen]
}
#[inline]
pub fn default_white_pixel(&self) -> u32 {
self.default_screen().white_pixel
}
#[inline]
pub fn default_black_pixel(&self) -> u32 {
self.default_screen().black_pixel
}
#[inline]
pub fn default_visual_id(&self) -> Visualid {
self.default_screen().root_visual
}
#[inline]
pub fn default_visual(&self) -> &Visualtype {
self.visual_id_to_visual(self.default_visual_id()).unwrap()
}
#[inline]
pub fn default_colormap(&self) -> Colormap {
self.default_screen().default_colormap
}
/// Get a visual type from a visual ID.
#[inline]
pub fn visual_id_to_visual(&self, id: Visualid) -> Option<&Visualtype> {
self.setup
.roots
.iter()
.flat_map(|s| s.allowed_depths.iter())
.flat_map(|d| d.visuals.iter())
.find(|v| v.visual_id == id)
}
/// Generate a unique X ID for a window, colormap, or other object. Usually, `Display`'s helper functions
/// will generate this for you. If you'd like to circumvent them, this will generate ID's for you.
#[inline]
pub fn generate_xid(&mut self) -> crate::Result<XID> {
Ok(self.xid.next().unwrap())
}
/// Wait for an event to be generated by the X server.
///
/// This checks the event queue for a new event. If the queue is empty, the `Display` will poll the
/// server for new events.
#[inline]
pub fn wait_for_event(&mut self) -> crate::Result<Event> {
log::debug!("Beginning event wait...");
loop {
match self.event_queue.pop_front() {
Some(event) => break Ok(event),
None => self.wait()?,
}
}
}
/// Wait for an event to be generated by the X server, async redox. See the `wait_for_event` function for
/// more information.
#[cfg(feature = "async")]
#[inline]
pub async fn wait_for_event_async(&mut self) -> crate::Result<Event> {
loop {
match self.event_queue.pop_front() {
Some(event) => break Ok(event),
None => self.wait_async().await?,
}
}
}
/// If there is an event currently in the queue that matches the predicate, returns true.
#[inline]
pub fn check_if_event<F: FnMut(&Event) -> bool>(&self, predicate: F) -> bool {
self.event_queue.iter().any(predicate)
}
/*
/// Save a pointer into this display's map of contexts.
#[inline]
pub fn save_context(&mut self, xid: XID, context: ContextID, data: NonNull<c_void>) {
self.context.insert((xid, context), data);
}
/// Retrieve a pointer from the context.
#[inline]
pub fn find_context(&mut self, xid: XID, context: ContextID) -> Option<NonNull<c_void>> {
self.context.get(&(xid, context)).copied()
}
/// Delete an entry in the context.
#[inline]
pub fn delete_context(&mut self, xid: XID, context: ContextID) {
self.context.remove(&(xid, context));
}
*/
}
|
#[cfg(feature = "std")]
impl DisplayConnection {
/// Create a new connection to the X server, given an optional name and authorization information.
#[inline]
pub fn create(name: Option<Cow<'_, str>>, auth_info: Option<AuthInfo>) -> crate::Result<Self> {
let connection = name::NameConnection::connect_internal(name)?;
Self::from_connection(connection, auth_info)
}
/// Create a new connection to the X server, given an optional name and authorization information, async
/// redox.
#[cfg(feature = "async")]
#[inline]
pub async fn create_async(
name: Option<Cow<'_, str>>,
auth_info: Option<AuthInfo>,
) -> crate::Result<Self> {
let connection = name::NameConnection::connect_internal_async(name).await?;
Self::from_connection_async(connection, auth_info).await
}
} | /// A variant of `Display` that uses X11's default connection mechanisms to connect to the server. In
/// most cases, you should be using this over any variant of `Display`.
#[cfg(feature = "std")]
pub type DisplayConnection = Display<name::NameConnection>; | random_line_split |
mod.rs | // MIT/Apache2 License
//! This module defines the `Display` object, which acts as a connection to the X11 server, and the
//! `Connection` trait, which the `Display` object abstracts itself over. See the documentation for
//! these objects for more information.
use crate::{
auth_info::AuthInfo,
auto::{
xproto::{Colormap, Screen, Setup, SetupRequest, Visualid, Visualtype, Window},
AsByteSequence,
},
event::Event,
util::cycled_zeroes,
xid::XidGenerator,
Request, XID,
};
use alloc::{boxed::Box, collections::VecDeque};
use core::{fmt, iter, marker::PhantomData, mem, num::NonZeroU32};
use cty::c_int;
use hashbrown::HashMap;
use tinyvec::TinyVec;
#[cfg(feature = "std")]
use std::borrow::Cow;
#[cfg(feature = "async")]
use std::{future::Future, pin::Pin};
mod connection;
pub use connection::*;
#[cfg(feature = "std")]
pub mod name;
mod functions;
mod input;
mod output;
pub use functions::*;
pub(crate) const EXT_KEY_SIZE: usize = 24;
/// The connection to the X11 server. Most operations done in breadx revolve around this object
/// in some way, shape or form.
///
/// Internally, this acts as a layer of abstraction over the inner `Conn` object that keeps track
/// of the setup, outgoing and pending requests and replies, the event queue, et cetera. Orthodoxically,
/// X11 usually takes place over a TCP stream or a Unix socket connection; however, `Display` is able
/// to use any object implementing the `Connection` trait as a vehicle for the X11 protocol.
///
/// Upon its instantiation, the `Display` sends bytes to the server requesting the setup information, and
/// then stores it for later use. Afterwards, it awaits commands from the programmer to send requests,
/// receive replies or process events.
///
/// # Example
///
/// Open a connection to the X11 server and get the screen resolution.
///
/// ```rust,no_run
/// use breadx::DisplayConnection;
///
/// let mut conn = DisplayConnection::create(None, None).unwrap();
///
/// let default_screen = conn.default_screen();
/// println!("Default screen is {} x {}", default_screen.width_in_pixels, default_screen.height_in_pixels);
/// ```
pub struct Display<Conn> {
// the connection to the server
pub(crate) connection: Conn,
// the setup received from the server
pub(crate) setup: Setup,
// xid generator
xid: XidGenerator,
// the screen to be used by default
default_screen: usize,
// input variables
pub(crate) event_queue: VecDeque<Event>,
pub(crate) pending_requests: VecDeque<input::PendingRequest>,
pub(crate) pending_replies: HashMap<u16, Box<[u8]>>,
// output variables
request_number: u64,
// store the interned atoms
pub(crate) wm_protocols_atom: Option<NonZeroU32>,
// context db
// context: HashMap<(XID, ContextID), NonNull<c_void>>,
// hashmap linking extension names to major opcodes
// we use byte arrays instead of static string pointers
// here because cache locality leads to an overall speedup (todo: verify)
extensions: HashMap<[u8; EXT_KEY_SIZE], u8>,
}
/// Unique identifier for a context.
pub type ContextID = c_int;
/// A cookie for a request.
///
/// Requests usually take time to resolve into replies. Therefore, the `Display::send_request` method returns
/// the `RequestCookie`, which is later used to block (or await) for the request's eventual result.
#[derive(Copy, Clone, Debug, PartialOrd, Ord, PartialEq, Default, Eq, Hash)]
#[repr(transparent)]
pub struct RequestCookie<R: Request> {
sequence: u16,
_phantom: PhantomData<Option<R::Reply>>,
}
impl<R: Request> RequestCookie<R> {
#[inline]
pub(crate) fn from_sequence(sequence: u64) -> Self {
Self {
sequence: sequence as u16, // truncate to lower bits
_phantom: PhantomData,
}
}
}
#[derive(Default, Debug)]
pub(crate) struct PendingRequestFlags {
pub discard_reply: bool,
pub checked: bool,
}
impl<Conn: fmt::Debug> fmt::Debug for Display<Conn> {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Display")
.field("connection", &self.connection)
.field("setup", &self.setup)
.field("xid", &self.xid)
.field("default_screen", &self.default_screen)
.field("event_queue", &self.event_queue)
.field("pending_requests", &self.pending_requests)
.field("pending_replies", &self.pending_replies)
.field("request_number", &self.request_number)
.finish()
}
}
#[inline]
const fn endian_byte() -> u8 {
// Excerpt from the X Window System Protocol
//
// The client must send an initial byte of data to identify the byte order to be employed.
// The value of the byte must be octal 102 or 154. The value 102 (ASCII uppercase B) means
// values are transmitted most significant byte first, and value 154 (ASCII lowercase l)
// means values are transmitted least significant byte first.
#[cfg(not(target_endian = "little"))]
{
const BE_SIGNIFIER: u8 = b'B';
BE_SIGNIFIER
}
#[cfg(target_endian = "little")]
{
const LE_SIGNIFIER: u8 = b'l';
LE_SIGNIFIER
}
}
impl<Conn: Connection> Display<Conn> {
#[inline]
fn decode_reply<R: Request>(reply: Box<[u8]>) -> crate::Result<R::Reply> {
Ok(R::Reply::from_bytes(&reply)
.ok_or(crate::BreadError::BadObjectRead(None))?
.0)
}
/// Send a request object to the X11 server.
///
/// Given a request object, this function sends it across the connection to the X11 server and returns
/// a cookie used to determine when this request will resolve. Usually, the `Display` object has functions
/// that act as a wrapper around this object; however, if you'd like to circumvent those, this is usually
/// the best option.
#[inline]
pub fn send_request<R: Request>(&mut self, req: R) -> crate::Result<RequestCookie<R>> {
self.send_request_internal(req)
}
/// Wait for a request from the X11 server.
///
/// This function checks the `Display`'s queues to see if a reply matching the given `RequestCookie`
/// has been processed by the X11 server. If not, it polls the server for new events until it has
/// determined that the request has resolved.
#[inline]
pub fn resolve_request<R: Request>(
&mut self,
token: RequestCookie<R>,
) -> crate::Result<R::Reply>
where
R::Reply: Default,
{
if mem::size_of::<R::Reply>() == 0 {
log::debug!("Immediately resolving for reply of size 0");
return Ok(Default::default());
}
loop {
log::trace!("Current replies: {:?}", &self.pending_replies);
match self.pending_replies.remove(&token.sequence) {
Some(reply) => break Self::decode_reply::<R>(reply),
None => self.wait()?,
}
}
}
/// Send a request object to the X11 server, async redox. See the `send_request` function for more
/// information.
#[cfg(feature = "async")]
#[inline]
pub fn send_request_async<'future, R: Request + Send + 'future>(
&'future mut self,
req: R,
) -> Pin<Box<dyn Future<Output = crate::Result<RequestCookie<R>>> + Send + 'future>> {
Box::pin(self.send_request_internal_async(req))
}
/// Wait for a request from the X11 server, async redox. See the `resolve_request` function for more
/// information.
#[cfg(feature = "async")]
#[inline]
pub async fn resolve_request_async<R: Request>(
&mut self,
token: RequestCookie<R>,
) -> crate::Result<R::Reply>
where
R::Reply: Default,
{
if mem::size_of::<R::Reply>() == 0 {
return Ok(Default::default());
}
loop {
match self.pending_replies.remove(&token.sequence) {
Some(reply) => {
break Self::decode_reply::<R>(reply);
}
None => self.wait_async().await?,
}
}
}
#[inline]
fn from_connection_internal(connection: Conn) -> Self {
Self {
connection,
setup: Default::default(),
xid: Default::default(),
default_screen: 0,
event_queue: VecDeque::with_capacity(8),
pending_requests: VecDeque::new(),
pending_replies: HashMap::with_capacity(4),
request_number: 1,
wm_protocols_atom: None,
// context: HashMap::new(),
extensions: HashMap::with_capacity(8),
}
}
/// Creates a new `Display` from a connection and authentication info.
///
/// It is expected that the connection passed in has not had any information sent into it aside from
/// what is necessary for the underlying protocol. After the object is created, the `Display` will poll
/// the server for setup information.
#[inline]
pub fn from_connection(connection: Conn, auth: Option<AuthInfo>) -> crate::Result<Self> {
let mut d = Self::from_connection_internal(connection);
d.init(auth)?;
Ok(d)
}
/// Creates a new `Display` from a connection and authentication info, async redox. See the `from_connection`
/// function for more information.
#[cfg(feature = "async")]
#[inline]
pub async fn from_connection_async(
connection: Conn,
auth: Option<AuthInfo>,
) -> crate::Result<Self> {
let mut d = Self::from_connection_internal(connection);
d.init_async(auth).await?;
Ok(d)
}
/// Generate the setup from the authentication info.
#[inline]
fn create_setup(auth: AuthInfo) -> SetupRequest {
let AuthInfo { name, data,.. } = auth;
SetupRequest {
byte_order: endian_byte(),
protocol_major_version: 11,
protocol_minor_version: 0,
authorization_protocol_name: name,
authorization_protocol_data: data,
}
}
/// Initialize the setup.
#[inline]
fn init(&mut self, auth: Option<AuthInfo>) -> crate::Result {
let setup = Self::create_setup(match auth {
Some(auth) => auth,
None => AuthInfo::get(),
});
let mut bytes: TinyVec<[u8; 32]> = cycled_zeroes(setup.size());
let len = setup.as_bytes(&mut bytes);
bytes.truncate(len);
self.connection.send_packet(&bytes[0..len])?;
let mut bytes: TinyVec<[u8; 32]> = cycled_zeroes(8);
self.connection.read_packet(&mut bytes)?;
match bytes[0] {
0 => return Err(crate::BreadError::FailedToConnect),
2 => return Err(crate::BreadError::FailedToAuthorize),
_ => (),
}
// read in the rest of the bytes
let length_bytes: [u8; 2] = [bytes[6], bytes[7]];
let length = (u16::from_ne_bytes(length_bytes) as usize) * 4;
bytes.extend(iter::once(0).cycle().take(length));
self.connection.read_packet(&mut bytes[8..])?;
let (setup, _) =
Setup::from_bytes(&bytes).ok_or(crate::BreadError::BadObjectRead(Some("Setup")))?;
self.setup = setup;
self.xid = XidGenerator::new(self.setup.resource_id_base, self.setup.resource_id_mask);
log::debug!("resource_id_base is {:#032b}", self.setup.resource_id_base);
log::debug!("resource_id_mask is {:#032b}", self.setup.resource_id_mask);
log::debug!(
"resource_id inc. is {:#032b}",
self.setup.resource_id_mask & self.setup.resource_id_mask.wrapping_neg()
);
Ok(())
}
/// Initialize the setup, async redox.
///
/// TODO; lots of copy-pasted code, redo this at some point
#[cfg(feature = "async")]
#[inline]
async fn init_async(&mut self, auth: Option<AuthInfo>) -> crate::Result {
let setup = Self::create_setup(match auth {
Some(auth) => auth,
None => AuthInfo::get_async().await,
});
let mut bytes: TinyVec<[u8; 32]> = cycled_zeroes(setup.size());
let len = setup.as_bytes(&mut bytes);
bytes.truncate(len);
self.connection.send_packet_async(&bytes[0..len]).await?;
let mut bytes: TinyVec<[u8; 32]> = cycled_zeroes(8);
self.connection.read_packet_async(&mut bytes).await?;
match bytes[0] {
0 => return Err(crate::BreadError::FailedToConnect),
2 => return Err(crate::BreadError::FailedToAuthorize),
_ => (),
}
// read in the rest of the bytes
let length_bytes: [u8; 2] = [bytes[6], bytes[7]];
let length = (u16::from_ne_bytes(length_bytes) as usize) * 4;
bytes.extend(iter::once(0).cycle().take(length));
self.connection.read_packet_async(&mut bytes[8..]).await?;
let (setup, _) = Setup::from_bytes(&bytes)
.ok_or_else(|| crate::BreadError::BadObjectRead(Some("Setup")))?;
self.setup = setup;
self.xid = XidGenerator::new(self.setup.resource_id_base, self.setup.resource_id_mask);
log::debug!("resource_id_base is {:#032b}", self.setup.resource_id_base);
log::debug!("resource_id_mask is {:#032b}", self.setup.resource_id_mask);
log::debug!(
"resource_id inc. is {:#032b}",
self.setup.resource_id_mask & self.setup.resource_id_mask.wrapping_neg()
);
Ok(())
}
/// Get the setup associates with this display.
#[inline]
pub fn setup(&self) -> &Setup {
&self.setup
}
#[inline]
pub fn default_root(&self) -> Window {
self.default_screen().root
}
#[inline]
pub fn default_screen(&self) -> &Screen {
&self.setup.roots[self.default_screen]
}
#[inline]
pub fn default_white_pixel(&self) -> u32 {
self.default_screen().white_pixel
}
#[inline]
pub fn default_black_pixel(&self) -> u32 {
self.default_screen().black_pixel
}
#[inline]
pub fn default_visual_id(&self) -> Visualid {
self.default_screen().root_visual
}
#[inline]
pub fn default_visual(&self) -> &Visualtype {
self.visual_id_to_visual(self.default_visual_id()).unwrap()
}
#[inline]
pub fn default_colormap(&self) -> Colormap {
self.default_screen().default_colormap
}
/// Get a visual type from a visual ID.
#[inline]
pub fn visual_id_to_visual(&self, id: Visualid) -> Option<&Visualtype> {
self.setup
.roots
.iter()
.flat_map(|s| s.allowed_depths.iter())
.flat_map(|d| d.visuals.iter())
.find(|v| v.visual_id == id)
}
/// Generate a unique X ID for a window, colormap, or other object. Usually, `Display`'s helper functions
/// will generate this for you. If you'd like to circumvent them, this will generate ID's for you.
#[inline]
pub fn generate_xid(&mut self) -> crate::Result<XID> {
Ok(self.xid.next().unwrap())
}
/// Wait for an event to be generated by the X server.
///
/// This checks the event queue for a new event. If the queue is empty, the `Display` will poll the
/// server for new events.
#[inline]
pub fn wait_for_event(&mut self) -> crate::Result<Event> {
log::debug!("Beginning event wait...");
loop {
match self.event_queue.pop_front() {
Some(event) => break Ok(event),
None => self.wait()?,
}
}
}
/// Wait for an event to be generated by the X server, async redox. See the `wait_for_event` function for
/// more information.
#[cfg(feature = "async")]
#[inline]
pub async fn | (&mut self) -> crate::Result<Event> {
loop {
match self.event_queue.pop_front() {
Some(event) => break Ok(event),
None => self.wait_async().await?,
}
}
}
/// If there is an event currently in the queue that matches the predicate, returns true.
#[inline]
pub fn check_if_event<F: FnMut(&Event) -> bool>(&self, predicate: F) -> bool {
self.event_queue.iter().any(predicate)
}
/*
/// Save a pointer into this display's map of contexts.
#[inline]
pub fn save_context(&mut self, xid: XID, context: ContextID, data: NonNull<c_void>) {
self.context.insert((xid, context), data);
}
/// Retrieve a pointer from the context.
#[inline]
pub fn find_context(&mut self, xid: XID, context: ContextID) -> Option<NonNull<c_void>> {
self.context.get(&(xid, context)).copied()
}
/// Delete an entry in the context.
#[inline]
pub fn delete_context(&mut self, xid: XID, context: ContextID) {
self.context.remove(&(xid, context));
}
*/
}
/// A variant of `Display` that uses X11's default connection mechanisms to connect to the server. In
/// most cases, you should be using this over any variant of `Display`.
#[cfg(feature = "std")]
pub type DisplayConnection = Display<name::NameConnection>;
#[cfg(feature = "std")]
impl DisplayConnection {
/// Create a new connection to the X server, given an optional name and authorization information.
#[inline]
pub fn create(name: Option<Cow<'_, str>>, auth_info: Option<AuthInfo>) -> crate::Result<Self> {
let connection = name::NameConnection::connect_internal(name)?;
Self::from_connection(connection, auth_info)
}
/// Create a new connection to the X server, given an optional name and authorization information, async
/// redox.
#[cfg(feature = "async")]
#[inline]
pub async fn create_async(
name: Option<Cow<'_, str>>,
auth_info: Option<AuthInfo>,
) -> crate::Result<Self> {
let connection = name::NameConnection::connect_internal_async(name).await?;
Self::from_connection_async(connection, auth_info).await
}
}
| wait_for_event_async | identifier_name |
server.rs | //
// Copyright (c) Pirmin Kalberer. All rights reserved.
// Licensed under the MIT License. See LICENSE file in the project root for full license information.
//
use crate::core::config::ApplicationCfg;
use crate::mvt_service::MvtService;
use crate::runtime_config::{config_from_args, service_from_args};
use crate::static_files::StaticFiles;
use actix_cors::Cors;
use actix_files as fs;
use actix_web::http::header;
use actix_web::middleware::Compress;
use actix_web::{
guard, middleware, web, web::Data, App, HttpRequest, HttpResponse, HttpServer, Result,
};
use clap::ArgMatches;
use log::Level;
use num_cpus;
use open;
use std::collections::HashMap;
use std::str;
use std::str::FromStr;
static DINO: &'static str = " xxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxx xxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx x
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxx
xxxxxxxxxxxxxx xxxxxx
xxxxxxxxxxxx
xxxxxxxxxxx
xxxxxxxxxx
xxxxxxxxx
xxxxxxx
xxxxxx
xxxxxxx";
async fn mvt_metadata(service: web::Data<MvtService>) -> Result<HttpResponse> {
let json = service.get_mvt_metadata()?;
Ok(HttpResponse::Ok().json(&json))
}
/// Font list for Maputnik
async fn fontstacks() -> Result<HttpResponse> {
Ok(HttpResponse::Ok().json(&["Roboto Medium", "Roboto Regular"]))
}
// Include method fonts() which returns HashMap with embedded font files
include!(concat!(env!("OUT_DIR"), "/fonts.rs"));
/// Fonts for Maputnik
/// Example: /fonts/Open%20Sans%20Regular,Arial%20Unicode%20MS%20Regular/0-255.pbf
async fn fonts_pbf(params: web::Path<(String, String)>) -> Result<HttpResponse> {
let fontpbfs = fonts();
let fontlist = ¶ms.as_ref().0;
let range = ¶ms.as_ref().1;
let mut fonts = fontlist.split(",").collect::<Vec<_>>();
fonts.push("Roboto Regular"); // Fallback
let mut resp = HttpResponse::NotFound().finish();
for font in fonts {
let key = format!("fonts/{}/{}.pbf", font.replace("%20", " "), range);
debug!("Font lookup: {}", key);
if let Some(pbf) = fontpbfs.get(&key as &str) {
resp = HttpResponse::Ok()
.content_type("application/x-protobuf")
// data is already gzip compressed
.insert_header(header::ContentEncoding::Gzip)
.body(*pbf); // TODO: chunked response
break;
}
}
Ok(resp)
}
fn req_baseurl(req: &HttpRequest) -> String {
let conninfo = req.connection_info();
format!("{}://{}", conninfo.scheme(), conninfo.host())
}
async fn tileset_tilejson(
service: web::Data<MvtService>,
tileset: web::Path<String>,
req: HttpRequest,
) -> Result<HttpResponse> {
let url = req_baseurl(&req);
let json = web::block(move || service.get_tilejson(&url, &tileset, &service.grid).ok()).await?;
Ok(HttpResponse::Ok().json(&json))
}
async fn tileset_style_json(
service: web::Data<MvtService>,
tileset: web::Path<String>,
req: HttpRequest,
) -> Result<HttpResponse> {
let json = service.get_stylejson(&req_baseurl(&req), &tileset)?;
Ok(HttpResponse::Ok().json(&json))
}
async fn tileset_metadata_json(
service: web::Data<MvtService>,
tileset: web::Path<String>,
) -> Result<HttpResponse> {
let json =
web::block(move || service.get_mbtiles_metadata(&tileset, &service.grid).ok()).await?;
Ok(HttpResponse::Ok().json(&json))
}
async fn tile_pbf(
config: web::Data<ApplicationCfg>,
service: web::Data<MvtService>,
params: web::Path<(String, u8, u32, u32)>,
req: HttpRequest,
) -> Result<HttpResponse> {
let params = params.into_inner();
let tileset = params.0;
let z = params.1;
let x = params.2;
let y = params.3;
let gzip = req
.headers()
.get(header::ACCEPT_ENCODING)
.and_then(|headerval| {
headerval
.to_str()
.ok()
.and_then(|headerstr| Some(headerstr.contains("gzip")))
})
.unwrap_or(false);
// rust-postgres starts its own Tokio runtime
// without blocking we get 'Cannot start a runtime from within a runtime'
let tile = web::block(move || service.tile_cached(&tileset, x, y, z, gzip, None)).await?;
let resp = match tile {
Some(tile) => {
let mut r = HttpResponse::Ok();
r.content_type("application/x-protobuf");
if gzip {
// data is already gzip compressed
r.insert_header(header::ContentEncoding::Gzip);
}
let cache_max_age = config.webserver.cache_control_max_age.unwrap_or(300);
r.insert_header((header::CACHE_CONTROL, format!("max-age={}", cache_max_age)));
r.body(tile) // TODO: chunked response
}
None => HttpResponse::NoContent().finish(),
};
Ok(resp)
}
lazy_static! {
static ref STATIC_FILES: StaticFiles = StaticFiles::init();
}
async fn static_file_handler(req: HttpRequest) -> Result<HttpResponse> {
let key = req.path()[1..].to_string();
let resp = if let Some(ref content) = STATIC_FILES.content(None, key) {
HttpResponse::Ok()
.insert_header((header::ACCESS_CONTROL_ALLOW_ORIGIN, "*")) // TOOD: use Actix middleware
.content_type(content.1)
.body(content.0) // TODO: chunked response
} else {
HttpResponse::NotFound().finish()
};
Ok(resp)
}
#[derive(Deserialize)]
struct DrilldownParams {
minzoom: Option<u8>,
maxzoom: Option<u8>,
points: String, //x1,y1,x2,y2,..
}
async fn drilldown_handler(
service: web::Data<MvtService>,
params: web::Query<DrilldownParams>,
) -> Result<HttpResponse> {
let tileset = None; // all tilesets
let progress = false;
let points: Vec<f64> = params
.points
.split(",")
.map(|v| {
v.parse()
.expect("Error parsing 'point' as pair of float values")
//FIXME: map_err(|_| error::ErrorInternalServerError("...")
})
.collect();
let stats = service.drilldown(tileset, params.minzoom, params.maxzoom, points, progress);
let json = stats.as_json()?;
Ok(HttpResponse::Ok().json(&json))
}
#[actix_web::main]
pub async fn webserver(args: ArgMatches<'static>) -> std::io::Result<()> {
let config = config_from_args(&args);
let host = config
.webserver
.bind
.clone()
.unwrap_or("127.0.0.1".to_string());
let port = config.webserver.port.unwrap_or(6767);
let bind_addr = format!("{}:{}", host, port);
let workers = config.webserver.threads.unwrap_or(num_cpus::get() as u8);
let mvt_viewer = config.service.mvt.viewer;
let openbrowser =
bool::from_str(args.value_of("openbrowser").unwrap_or("true")).unwrap_or(false);
let static_dirs = config.webserver.static_.clone();
let svc_config = config.clone();
let service = web::block(move || {
let mut service = service_from_args(&svc_config, &args);
service.prepare_feature_queries();
service.init_cache();
service
})
.await
.expect("service initialization failed");
let server = HttpServer::new(move || {
let mut app = App::new()
.app_data(Data::new(config.clone()))
.app_data(Data::new(service.clone()))
.wrap(middleware::Logger::new("%r %s %b %Dms %a"))
.wrap(Compress::default())
.wrap(
Cors::default()
.allow_any_origin()
.send_wildcard()
.allowed_methods(vec!["GET"]),
)
.service(
web::resource("/index.json").route(
web::route()
.guard(guard::Any(guard::Get()).or(guard::Head()))
.to(mvt_metadata),
),
)
.service(
web::resource("/fontstacks.json").route(
web::route()
.guard(guard::Any(guard::Get()).or(guard::Head()))
.to(fontstacks),
),
)
.service(
web::resource("/fonts.json").route(
web::route()
.guard(guard::Any(guard::Get()).or(guard::Head()))
.to(fontstacks),
),
)
.service(
web::resource("/fonts/{fonts}/{range}.pbf").route(
web::route()
.guard(guard::Any(guard::Get()).or(guard::Head()))
.to(fonts_pbf),
),
);
for static_dir in &static_dirs {
let dir = &static_dir.dir;
if std::path::Path::new(dir).is_dir() {
info!("Serving static files from directory '{}'", dir);
app = app.service(fs::Files::new(&static_dir.path, dir));
} else {
warn!("Static file directory '{}' not found", dir);
}
}
app = app
.service(
web::resource("/{tileset}.style.json").route(
web::route()
.guard(guard::Any(guard::Get()).or(guard::Head()))
.to(tileset_style_json),
),
)
.service(
web::resource("/{tileset}/metadata.json").route(
web::route()
.guard(guard::Any(guard::Get()).or(guard::Head()))
.to(tileset_metadata_json),
),
)
.service(
web::resource("/{tileset}.json").route(
web::route()
.guard(guard::Any(guard::Get()).or(guard::Head()))
.to(tileset_tilejson),
),
) | .service(
web::resource("/{tileset}/{z}/{x}/{y}.pbf").route(
web::route()
.guard(guard::Any(guard::Get()).or(guard::Head()))
.to(tile_pbf),
),
);
if mvt_viewer {
app = app.service(
web::resource("/drilldown").route(
web::route()
.guard(guard::Any(guard::Get()).or(guard::Head()))
.to(drilldown_handler),
),
);
app = app.default_service(web::to(static_file_handler));
}
app
})
.workers(workers as usize)
.bind(&bind_addr)
.expect("Can not start server on given IP/Port")
.shutdown_timeout(3) // default: 30s
.run();
if log_enabled!(Level::Info) {
println!("{}", DINO);
}
if openbrowser && mvt_viewer {
let _res = open::that(format!("http://{}:{}", &host, port));
}
server.await
} | random_line_split |
|
server.rs | //
// Copyright (c) Pirmin Kalberer. All rights reserved.
// Licensed under the MIT License. See LICENSE file in the project root for full license information.
//
use crate::core::config::ApplicationCfg;
use crate::mvt_service::MvtService;
use crate::runtime_config::{config_from_args, service_from_args};
use crate::static_files::StaticFiles;
use actix_cors::Cors;
use actix_files as fs;
use actix_web::http::header;
use actix_web::middleware::Compress;
use actix_web::{
guard, middleware, web, web::Data, App, HttpRequest, HttpResponse, HttpServer, Result,
};
use clap::ArgMatches;
use log::Level;
use num_cpus;
use open;
use std::collections::HashMap;
use std::str;
use std::str::FromStr;
static DINO: &'static str = " xxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxx xxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx x
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxx
xxxxxxxxxxxxxx xxxxxx
xxxxxxxxxxxx
xxxxxxxxxxx
xxxxxxxxxx
xxxxxxxxx
xxxxxxx
xxxxxx
xxxxxxx";
async fn mvt_metadata(service: web::Data<MvtService>) -> Result<HttpResponse> {
let json = service.get_mvt_metadata()?;
Ok(HttpResponse::Ok().json(&json))
}
/// Font list for Maputnik
async fn fontstacks() -> Result<HttpResponse> {
Ok(HttpResponse::Ok().json(&["Roboto Medium", "Roboto Regular"]))
}
// Include method fonts() which returns HashMap with embedded font files
include!(concat!(env!("OUT_DIR"), "/fonts.rs"));
/// Fonts for Maputnik
/// Example: /fonts/Open%20Sans%20Regular,Arial%20Unicode%20MS%20Regular/0-255.pbf
async fn fonts_pbf(params: web::Path<(String, String)>) -> Result<HttpResponse> {
let fontpbfs = fonts();
let fontlist = ¶ms.as_ref().0;
let range = ¶ms.as_ref().1;
let mut fonts = fontlist.split(",").collect::<Vec<_>>();
fonts.push("Roboto Regular"); // Fallback
let mut resp = HttpResponse::NotFound().finish();
for font in fonts {
let key = format!("fonts/{}/{}.pbf", font.replace("%20", " "), range);
debug!("Font lookup: {}", key);
if let Some(pbf) = fontpbfs.get(&key as &str) {
resp = HttpResponse::Ok()
.content_type("application/x-protobuf")
// data is already gzip compressed
.insert_header(header::ContentEncoding::Gzip)
.body(*pbf); // TODO: chunked response
break;
}
}
Ok(resp)
}
fn req_baseurl(req: &HttpRequest) -> String {
let conninfo = req.connection_info();
format!("{}://{}", conninfo.scheme(), conninfo.host())
}
async fn tileset_tilejson(
service: web::Data<MvtService>,
tileset: web::Path<String>,
req: HttpRequest,
) -> Result<HttpResponse> {
let url = req_baseurl(&req);
let json = web::block(move || service.get_tilejson(&url, &tileset, &service.grid).ok()).await?;
Ok(HttpResponse::Ok().json(&json))
}
async fn tileset_style_json(
service: web::Data<MvtService>,
tileset: web::Path<String>,
req: HttpRequest,
) -> Result<HttpResponse> {
let json = service.get_stylejson(&req_baseurl(&req), &tileset)?;
Ok(HttpResponse::Ok().json(&json))
}
async fn tileset_metadata_json(
service: web::Data<MvtService>,
tileset: web::Path<String>,
) -> Result<HttpResponse> {
let json =
web::block(move || service.get_mbtiles_metadata(&tileset, &service.grid).ok()).await?;
Ok(HttpResponse::Ok().json(&json))
}
async fn | (
config: web::Data<ApplicationCfg>,
service: web::Data<MvtService>,
params: web::Path<(String, u8, u32, u32)>,
req: HttpRequest,
) -> Result<HttpResponse> {
let params = params.into_inner();
let tileset = params.0;
let z = params.1;
let x = params.2;
let y = params.3;
let gzip = req
.headers()
.get(header::ACCEPT_ENCODING)
.and_then(|headerval| {
headerval
.to_str()
.ok()
.and_then(|headerstr| Some(headerstr.contains("gzip")))
})
.unwrap_or(false);
// rust-postgres starts its own Tokio runtime
// without blocking we get 'Cannot start a runtime from within a runtime'
let tile = web::block(move || service.tile_cached(&tileset, x, y, z, gzip, None)).await?;
let resp = match tile {
Some(tile) => {
let mut r = HttpResponse::Ok();
r.content_type("application/x-protobuf");
if gzip {
// data is already gzip compressed
r.insert_header(header::ContentEncoding::Gzip);
}
let cache_max_age = config.webserver.cache_control_max_age.unwrap_or(300);
r.insert_header((header::CACHE_CONTROL, format!("max-age={}", cache_max_age)));
r.body(tile) // TODO: chunked response
}
None => HttpResponse::NoContent().finish(),
};
Ok(resp)
}
lazy_static! {
static ref STATIC_FILES: StaticFiles = StaticFiles::init();
}
async fn static_file_handler(req: HttpRequest) -> Result<HttpResponse> {
let key = req.path()[1..].to_string();
let resp = if let Some(ref content) = STATIC_FILES.content(None, key) {
HttpResponse::Ok()
.insert_header((header::ACCESS_CONTROL_ALLOW_ORIGIN, "*")) // TOOD: use Actix middleware
.content_type(content.1)
.body(content.0) // TODO: chunked response
} else {
HttpResponse::NotFound().finish()
};
Ok(resp)
}
#[derive(Deserialize)]
struct DrilldownParams {
minzoom: Option<u8>,
maxzoom: Option<u8>,
points: String, //x1,y1,x2,y2,..
}
async fn drilldown_handler(
service: web::Data<MvtService>,
params: web::Query<DrilldownParams>,
) -> Result<HttpResponse> {
let tileset = None; // all tilesets
let progress = false;
let points: Vec<f64> = params
.points
.split(",")
.map(|v| {
v.parse()
.expect("Error parsing 'point' as pair of float values")
//FIXME: map_err(|_| error::ErrorInternalServerError("...")
})
.collect();
let stats = service.drilldown(tileset, params.minzoom, params.maxzoom, points, progress);
let json = stats.as_json()?;
Ok(HttpResponse::Ok().json(&json))
}
#[actix_web::main]
pub async fn webserver(args: ArgMatches<'static>) -> std::io::Result<()> {
let config = config_from_args(&args);
let host = config
.webserver
.bind
.clone()
.unwrap_or("127.0.0.1".to_string());
let port = config.webserver.port.unwrap_or(6767);
let bind_addr = format!("{}:{}", host, port);
let workers = config.webserver.threads.unwrap_or(num_cpus::get() as u8);
let mvt_viewer = config.service.mvt.viewer;
let openbrowser =
bool::from_str(args.value_of("openbrowser").unwrap_or("true")).unwrap_or(false);
let static_dirs = config.webserver.static_.clone();
let svc_config = config.clone();
let service = web::block(move || {
let mut service = service_from_args(&svc_config, &args);
service.prepare_feature_queries();
service.init_cache();
service
})
.await
.expect("service initialization failed");
let server = HttpServer::new(move || {
let mut app = App::new()
.app_data(Data::new(config.clone()))
.app_data(Data::new(service.clone()))
.wrap(middleware::Logger::new("%r %s %b %Dms %a"))
.wrap(Compress::default())
.wrap(
Cors::default()
.allow_any_origin()
.send_wildcard()
.allowed_methods(vec!["GET"]),
)
.service(
web::resource("/index.json").route(
web::route()
.guard(guard::Any(guard::Get()).or(guard::Head()))
.to(mvt_metadata),
),
)
.service(
web::resource("/fontstacks.json").route(
web::route()
.guard(guard::Any(guard::Get()).or(guard::Head()))
.to(fontstacks),
),
)
.service(
web::resource("/fonts.json").route(
web::route()
.guard(guard::Any(guard::Get()).or(guard::Head()))
.to(fontstacks),
),
)
.service(
web::resource("/fonts/{fonts}/{range}.pbf").route(
web::route()
.guard(guard::Any(guard::Get()).or(guard::Head()))
.to(fonts_pbf),
),
);
for static_dir in &static_dirs {
let dir = &static_dir.dir;
if std::path::Path::new(dir).is_dir() {
info!("Serving static files from directory '{}'", dir);
app = app.service(fs::Files::new(&static_dir.path, dir));
} else {
warn!("Static file directory '{}' not found", dir);
}
}
app = app
.service(
web::resource("/{tileset}.style.json").route(
web::route()
.guard(guard::Any(guard::Get()).or(guard::Head()))
.to(tileset_style_json),
),
)
.service(
web::resource("/{tileset}/metadata.json").route(
web::route()
.guard(guard::Any(guard::Get()).or(guard::Head()))
.to(tileset_metadata_json),
),
)
.service(
web::resource("/{tileset}.json").route(
web::route()
.guard(guard::Any(guard::Get()).or(guard::Head()))
.to(tileset_tilejson),
),
)
.service(
web::resource("/{tileset}/{z}/{x}/{y}.pbf").route(
web::route()
.guard(guard::Any(guard::Get()).or(guard::Head()))
.to(tile_pbf),
),
);
if mvt_viewer {
app = app.service(
web::resource("/drilldown").route(
web::route()
.guard(guard::Any(guard::Get()).or(guard::Head()))
.to(drilldown_handler),
),
);
app = app.default_service(web::to(static_file_handler));
}
app
})
.workers(workers as usize)
.bind(&bind_addr)
.expect("Can not start server on given IP/Port")
.shutdown_timeout(3) // default: 30s
.run();
if log_enabled!(Level::Info) {
println!("{}", DINO);
}
if openbrowser && mvt_viewer {
let _res = open::that(format!("http://{}:{}", &host, port));
}
server.await
}
| tile_pbf | identifier_name |
server.rs | //
// Copyright (c) Pirmin Kalberer. All rights reserved.
// Licensed under the MIT License. See LICENSE file in the project root for full license information.
//
use crate::core::config::ApplicationCfg;
use crate::mvt_service::MvtService;
use crate::runtime_config::{config_from_args, service_from_args};
use crate::static_files::StaticFiles;
use actix_cors::Cors;
use actix_files as fs;
use actix_web::http::header;
use actix_web::middleware::Compress;
use actix_web::{
guard, middleware, web, web::Data, App, HttpRequest, HttpResponse, HttpServer, Result,
};
use clap::ArgMatches;
use log::Level;
use num_cpus;
use open;
use std::collections::HashMap;
use std::str;
use std::str::FromStr;
static DINO: &'static str = " xxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxx xxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx x
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
xxxxxxxxxxxxxxxxxxxxxxxxxx xxxxxxxxxxx
xxxxxxxxxxxxxx xxxxxx
xxxxxxxxxxxx
xxxxxxxxxxx
xxxxxxxxxx
xxxxxxxxx
xxxxxxx
xxxxxx
xxxxxxx";
async fn mvt_metadata(service: web::Data<MvtService>) -> Result<HttpResponse> {
let json = service.get_mvt_metadata()?;
Ok(HttpResponse::Ok().json(&json))
}
/// Font list for Maputnik
async fn fontstacks() -> Result<HttpResponse> {
Ok(HttpResponse::Ok().json(&["Roboto Medium", "Roboto Regular"]))
}
// Include method fonts() which returns HashMap with embedded font files
include!(concat!(env!("OUT_DIR"), "/fonts.rs"));
/// Fonts for Maputnik
/// Example: /fonts/Open%20Sans%20Regular,Arial%20Unicode%20MS%20Regular/0-255.pbf
async fn fonts_pbf(params: web::Path<(String, String)>) -> Result<HttpResponse> {
let fontpbfs = fonts();
let fontlist = ¶ms.as_ref().0;
let range = ¶ms.as_ref().1;
let mut fonts = fontlist.split(",").collect::<Vec<_>>();
fonts.push("Roboto Regular"); // Fallback
let mut resp = HttpResponse::NotFound().finish();
for font in fonts {
let key = format!("fonts/{}/{}.pbf", font.replace("%20", " "), range);
debug!("Font lookup: {}", key);
if let Some(pbf) = fontpbfs.get(&key as &str) {
resp = HttpResponse::Ok()
.content_type("application/x-protobuf")
// data is already gzip compressed
.insert_header(header::ContentEncoding::Gzip)
.body(*pbf); // TODO: chunked response
break;
}
}
Ok(resp)
}
fn req_baseurl(req: &HttpRequest) -> String {
let conninfo = req.connection_info();
format!("{}://{}", conninfo.scheme(), conninfo.host())
}
async fn tileset_tilejson(
service: web::Data<MvtService>,
tileset: web::Path<String>,
req: HttpRequest,
) -> Result<HttpResponse> {
let url = req_baseurl(&req);
let json = web::block(move || service.get_tilejson(&url, &tileset, &service.grid).ok()).await?;
Ok(HttpResponse::Ok().json(&json))
}
async fn tileset_style_json(
service: web::Data<MvtService>,
tileset: web::Path<String>,
req: HttpRequest,
) -> Result<HttpResponse> {
let json = service.get_stylejson(&req_baseurl(&req), &tileset)?;
Ok(HttpResponse::Ok().json(&json))
}
async fn tileset_metadata_json(
service: web::Data<MvtService>,
tileset: web::Path<String>,
) -> Result<HttpResponse> {
let json =
web::block(move || service.get_mbtiles_metadata(&tileset, &service.grid).ok()).await?;
Ok(HttpResponse::Ok().json(&json))
}
async fn tile_pbf(
config: web::Data<ApplicationCfg>,
service: web::Data<MvtService>,
params: web::Path<(String, u8, u32, u32)>,
req: HttpRequest,
) -> Result<HttpResponse> | Some(tile) => {
let mut r = HttpResponse::Ok();
r.content_type("application/x-protobuf");
if gzip {
// data is already gzip compressed
r.insert_header(header::ContentEncoding::Gzip);
}
let cache_max_age = config.webserver.cache_control_max_age.unwrap_or(300);
r.insert_header((header::CACHE_CONTROL, format!("max-age={}", cache_max_age)));
r.body(tile) // TODO: chunked response
}
None => HttpResponse::NoContent().finish(),
};
Ok(resp)
}
lazy_static! {
static ref STATIC_FILES: StaticFiles = StaticFiles::init();
}
async fn static_file_handler(req: HttpRequest) -> Result<HttpResponse> {
let key = req.path()[1..].to_string();
let resp = if let Some(ref content) = STATIC_FILES.content(None, key) {
HttpResponse::Ok()
.insert_header((header::ACCESS_CONTROL_ALLOW_ORIGIN, "*")) // TOOD: use Actix middleware
.content_type(content.1)
.body(content.0) // TODO: chunked response
} else {
HttpResponse::NotFound().finish()
};
Ok(resp)
}
#[derive(Deserialize)]
struct DrilldownParams {
minzoom: Option<u8>,
maxzoom: Option<u8>,
points: String, //x1,y1,x2,y2,..
}
async fn drilldown_handler(
service: web::Data<MvtService>,
params: web::Query<DrilldownParams>,
) -> Result<HttpResponse> {
let tileset = None; // all tilesets
let progress = false;
let points: Vec<f64> = params
.points
.split(",")
.map(|v| {
v.parse()
.expect("Error parsing 'point' as pair of float values")
//FIXME: map_err(|_| error::ErrorInternalServerError("...")
})
.collect();
let stats = service.drilldown(tileset, params.minzoom, params.maxzoom, points, progress);
let json = stats.as_json()?;
Ok(HttpResponse::Ok().json(&json))
}
#[actix_web::main]
pub async fn webserver(args: ArgMatches<'static>) -> std::io::Result<()> {
let config = config_from_args(&args);
let host = config
.webserver
.bind
.clone()
.unwrap_or("127.0.0.1".to_string());
let port = config.webserver.port.unwrap_or(6767);
let bind_addr = format!("{}:{}", host, port);
let workers = config.webserver.threads.unwrap_or(num_cpus::get() as u8);
let mvt_viewer = config.service.mvt.viewer;
let openbrowser =
bool::from_str(args.value_of("openbrowser").unwrap_or("true")).unwrap_or(false);
let static_dirs = config.webserver.static_.clone();
let svc_config = config.clone();
let service = web::block(move || {
let mut service = service_from_args(&svc_config, &args);
service.prepare_feature_queries();
service.init_cache();
service
})
.await
.expect("service initialization failed");
let server = HttpServer::new(move || {
let mut app = App::new()
.app_data(Data::new(config.clone()))
.app_data(Data::new(service.clone()))
.wrap(middleware::Logger::new("%r %s %b %Dms %a"))
.wrap(Compress::default())
.wrap(
Cors::default()
.allow_any_origin()
.send_wildcard()
.allowed_methods(vec!["GET"]),
)
.service(
web::resource("/index.json").route(
web::route()
.guard(guard::Any(guard::Get()).or(guard::Head()))
.to(mvt_metadata),
),
)
.service(
web::resource("/fontstacks.json").route(
web::route()
.guard(guard::Any(guard::Get()).or(guard::Head()))
.to(fontstacks),
),
)
.service(
web::resource("/fonts.json").route(
web::route()
.guard(guard::Any(guard::Get()).or(guard::Head()))
.to(fontstacks),
),
)
.service(
web::resource("/fonts/{fonts}/{range}.pbf").route(
web::route()
.guard(guard::Any(guard::Get()).or(guard::Head()))
.to(fonts_pbf),
),
);
for static_dir in &static_dirs {
let dir = &static_dir.dir;
if std::path::Path::new(dir).is_dir() {
info!("Serving static files from directory '{}'", dir);
app = app.service(fs::Files::new(&static_dir.path, dir));
} else {
warn!("Static file directory '{}' not found", dir);
}
}
app = app
.service(
web::resource("/{tileset}.style.json").route(
web::route()
.guard(guard::Any(guard::Get()).or(guard::Head()))
.to(tileset_style_json),
),
)
.service(
web::resource("/{tileset}/metadata.json").route(
web::route()
.guard(guard::Any(guard::Get()).or(guard::Head()))
.to(tileset_metadata_json),
),
)
.service(
web::resource("/{tileset}.json").route(
web::route()
.guard(guard::Any(guard::Get()).or(guard::Head()))
.to(tileset_tilejson),
),
)
.service(
web::resource("/{tileset}/{z}/{x}/{y}.pbf").route(
web::route()
.guard(guard::Any(guard::Get()).or(guard::Head()))
.to(tile_pbf),
),
);
if mvt_viewer {
app = app.service(
web::resource("/drilldown").route(
web::route()
.guard(guard::Any(guard::Get()).or(guard::Head()))
.to(drilldown_handler),
),
);
app = app.default_service(web::to(static_file_handler));
}
app
})
.workers(workers as usize)
.bind(&bind_addr)
.expect("Can not start server on given IP/Port")
.shutdown_timeout(3) // default: 30s
.run();
if log_enabled!(Level::Info) {
println!("{}", DINO);
}
if openbrowser && mvt_viewer {
let _res = open::that(format!("http://{}:{}", &host, port));
}
server.await
}
| {
let params = params.into_inner();
let tileset = params.0;
let z = params.1;
let x = params.2;
let y = params.3;
let gzip = req
.headers()
.get(header::ACCEPT_ENCODING)
.and_then(|headerval| {
headerval
.to_str()
.ok()
.and_then(|headerstr| Some(headerstr.contains("gzip")))
})
.unwrap_or(false);
// rust-postgres starts its own Tokio runtime
// without blocking we get 'Cannot start a runtime from within a runtime'
let tile = web::block(move || service.tile_cached(&tileset, x, y, z, gzip, None)).await?;
let resp = match tile { | identifier_body |
interaction.rs | use bevy_math::{Mat4, Quat, Vec2, Vec3};
use bevy_utils::Duration;
use serde::{Deserialize, Serialize};
use std::{
collections::HashMap,
ops::{Deref, Mul},
};
// Note: indices follow WebXR convention. OpenXR's palm joint is missing, but it can be retrieved
// using `XrTrackingSource::hands_pose()`.
pub const XR_HAND_JOINT_WRIST: usize = 0;
pub const XR_HAND_JOINT_THUMB_METACARPAL: usize = 1;
pub const XR_HAND_JOINT_THUMB_PROXIMAL: usize = 2;
pub const XR_HAND_JOINT_THUMB_DISTAL: usize = 3;
pub const XR_HAND_JOINT_THUMB_TIP: usize = 4;
pub const XR_HAND_JOINT_INDEX_METACARPAL: usize = 5;
pub const XR_HAND_JOINT_INDEX_PROXIMAL: usize = 6;
pub const XR_HAND_JOINT_INDEX_INTERMEDIATE: usize = 7;
pub const XR_HAND_JOINT_INDEX_DISTAL: usize = 8;
pub const XR_HAND_JOINT_INDEX_TIP: usize = 9;
pub const XR_HAND_JOINT_MIDDLE_METACARPAL: usize = 10;
pub const XR_HAND_JOINT_MIDDLE_PROXIMAL: usize = 11;
pub const XR_HAND_JOINT_MIDDLE_INTERMEDIATE: usize = 12;
pub const XR_HAND_JOINT_MIDDLE_DISTAL: usize = 13;
pub const XR_HAND_JOINT_MIDDLE_TIP: usize = 14;
pub const XR_HAND_JOINT_RING_METACARPAL: usize = 15;
pub const XR_HAND_JOINT_RING_PROXIMAL: usize = 16;
pub const XR_HAND_JOINT_RING_INTERMEDIATE: usize = 17;
pub const XR_HAND_JOINT_RING_DISTAL: usize = 18;
pub const XR_HAND_JOINT_RING_TIP: usize = 19;
pub const XR_HAND_JOINT_LITTLE_METACARPAL: usize = 20;
pub const XR_HAND_JOINT_LITTLE_PROXIMAL: usize = 21;
pub const XR_HAND_JOINT_LITTLE_INTERMEDIATE: usize = 22;
pub const XR_HAND_JOINT_LITTLE_DISTAL: usize = 23;
pub const XR_HAND_JOINT_LITTLE_TIP: usize = 24;
// To be verified: in all useful instances, when the orientation is valid, the position is also
// valid. In case of 3DOF headsets, position should always be emulated using a neck and arm model.
// In case of hand tracking, when a joint is estimated, both pose and orientation are available.
#[derive(Clone, Copy, Default, Debug, Serialize, Deserialize)]
pub struct XrRigidTransform {
pub position: Vec3,
pub orientation: Quat,
}
impl Mul for XrRigidTransform {
type Output = XrRigidTransform;
fn mul(self, rhs: Self) -> Self::Output {
XrRigidTransform {
position: self.position + self.orientation * rhs.position,
orientation: self.orientation * rhs.orientation,
}
}
}
impl XrRigidTransform {
pub fn to_mat4(&self) -> Mat4 {
todo!()
}
}
#[derive(Clone, Default, Debug, Serialize, Deserialize)]
pub struct XrPose {
pub transform: XrRigidTransform,
pub linear_velocity: Option<Vec3>,
pub angular_velocity: Option<Vec3>,
pub emulated_position: bool,
}
impl Deref for XrPose {
type Target = XrRigidTransform;
fn deref(&self) -> &Self::Target {
&self.transform
}
}
#[derive(Clone, Default, Debug, Serialize, Deserialize)]
pub struct XrJointPose {
pub pose: XrPose,
/// Radius of a sphere placed at the center of the joint that roughly touches the skin on both
/// sides of the hand.
pub radius: f32,
}
impl Deref for XrJointPose {
type Target = XrPose;
fn deref(&self) -> &Self::Target {
&self.pose
}
}
#[derive(Clone, Copy, Eq, PartialEq, Hash, Debug, Serialize, Deserialize)]
pub enum XrReferenceSpaceType {
/// The coordinate system (position and orientation) is set as the headset pose at startup or
/// after a recenter. This should be used only for experiences where the user is laid down.
Viewer,
/// The coordinate system (position and gravity-aligned orientation) is calculated from the
/// headset pose at startup or after a recenter. This is for seated experiences.
Local,
/// The coordinate system (position and orientation) corresponds to the center of a rectangle at
/// floor level, with +Y up. This is for stading or room-scale experiences.
Stage,
}
pub mod implementation {
use super::XrReferenceSpaceType;
use crate::{interaction::XrPose, XrJointPose};
use bevy_math::Vec3;
pub trait XrTrackingSourceBackend: Send + Sync {
fn reference_space_type(&self) -> XrReferenceSpaceType;
fn set_reference_space_type(&self, reference_space_type: XrReferenceSpaceType) -> bool;
fn bounds_geometry(&self) -> Option<Vec<Vec3>>;
fn views_poses(&self) -> Vec<XrPose>;
fn hands_pose(&self) -> [Option<XrPose>; 2];
fn hands_skeleton_pose(&self) -> [Option<Vec<XrJointPose>>; 2];
fn hands_target_ray(&self) -> [Option<XrPose>; 2];
fn viewer_target_ray(&self) -> XrPose;
}
}
/// Component used to poll tracking data. Tracking data is obtained "on-demand" to get the best
/// precision possible. Poses are predicted for the next V-Sync. To obtain poses for an arbitrary
/// point in time, `bevy_openxr` backend provides this functionality with OpenXrTrackingState.
pub struct XrTrackingSource {
inner: Box<dyn implementation::XrTrackingSourceBackend>,
}
impl XrTrackingSource {
pub fn new(backend: Box<dyn implementation::XrTrackingSourceBackend>) -> Self {
Self { inner: backend }
}
pub fn reference_space_type(&self) -> XrReferenceSpaceType {
self.inner.reference_space_type()
}
/// Returns true if the tracking mode has been set correctly. If false is returned the tracking
/// mode is not supported and another one must be chosen.
pub fn set_reference_space_type(&mut self, reference_space_type: XrReferenceSpaceType) -> bool {
self.inner.set_reference_space_type(reference_space_type)
}
pub fn just_reset_reference_space(&mut self) -> bool {
todo!()
}
/// Returns a list of points, ordered clockwise, that define the playspace boundary. Only
/// available when the reference space is set to `BoundedFloor`. Y component is always 0.
pub fn bounds_geometry(&self) -> Option<Vec<Vec3>> {
self.inner.bounds_geometry()
}
pub fn views_poses(&self) -> Vec<XrPose> {
self.inner.views_poses()
}
/// Index 0 corresponds to the left hand, index 1 corresponds to the right hand.
pub fn hands_pose(&self) -> [Option<XrPose>; 2] {
self.inner.hands_pose()
}
/// Index 0 corresponds to the left hand, index 1 corresponds to the right hand.
pub fn hands_skeleton_pose(&self) -> [Option<Vec<XrJointPose>>; 2] |
/// Returns poses that can be used to render a target ray or cursor. The ray is along -Z. The
/// behavior is vendor-specific. Index 0 corresponds to the left hand, index 1 corresponds to
/// the right hand.
pub fn hand_target_ray(&self) -> [Option<XrPose>; 2] {
self.inner.hands_target_ray()
}
/// Returns a pose that can be used to render a target ray or cursor. The ray is along -Z. The
/// origin is between the eyes for head-mounted displays and the center of the screen for
/// handheld devices.
pub fn viewer_target_ray(&self) -> XrPose {
self.inner.viewer_target_ray()
}
// future extensions:
// * eye tracking
// * lower face tracking
// * AR face tracking
// * body/skeletal trackers
// * scene understanding (anchors, planes, meshes)
}
#[derive(Clone, Copy, Eq, PartialEq, Hash, Debug, Serialize, Deserialize)]
pub enum XrHandType {
Left,
Right,
}
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Serialize, Deserialize)]
pub enum XrButtonState {
Default,
Touched,
Pressed,
}
impl Default for XrButtonState {
fn default() -> Self {
Self::Default
}
}
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
pub enum XrActionType {
/// Convenience type that groups click, touch and value actions for a single button.
/// The last segment of the path (`/click`, `/touch` or `/value`) must be omitted.
Button {
touch: bool,
},
Binary,
Scalar,
/// Convenience type that groups x and y axes for a touchpad or thumbstick action.
/// The last segment of the path (`/x` or `/y`) must be omitted.
Vec2D,
}
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
pub enum XrActionState {
Button { state: XrButtonState, value: f32 },
Binary(bool),
Scalar(f32),
Vec2D(Vec2),
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct XrActionDescriptor {
pub name: String,
pub action_type: XrActionType,
}
/// List bindings related to a single interaction profile. `tracked` and `has_haptics` can always be
/// set to false but if they are set to true and the interaction profile does not support them, the
/// the profile will be disabled completely.
pub struct XrProfileDescriptor {
pub profile: String,
pub bindings: Vec<(XrActionDescriptor, String)>,
pub tracked: bool,
pub has_haptics: bool,
}
pub struct XrActionSet {
current_states: HashMap<String, XrActionState>,
previous_states: HashMap<String, XrActionState>,
}
impl XrActionSet {
pub fn state(&self, action: &str) -> Option<XrActionState> {
self.current_states.get(action).cloned()
}
pub fn button_state(&self, action: &str) -> XrButtonState {
if let Some(XrActionState::Button { state,.. }) = self.current_states.get(action) {
*state
} else {
XrButtonState::Default
}
}
pub fn button_touched(&self, action: &str) -> bool {
if let Some(XrActionState::Button { state,.. }) = self.current_states.get(action) {
*state!= XrButtonState::Default
} else {
false
}
}
pub fn button_pressed(&self, action: &str) -> bool {
if let Some(XrActionState::Button { state,.. }) = self.current_states.get(action) {
*state == XrButtonState::Pressed
} else {
false
}
}
fn button_states(&self, action: &str) -> Option<(XrButtonState, XrButtonState)> {
if let (
Some(XrActionState::Button {
state: current_state,
..
}),
Some(XrActionState::Button {
state: previous_state,
..
}),
) = (
self.current_states.get(action),
self.previous_states.get(action),
) {
Some((*current_state, *previous_state))
} else {
None
}
}
pub fn button_just_touched(&self, action: &str) -> bool {
self.button_states(action)
.map(|(cur, prev)| cur!= XrButtonState::Default && prev == XrButtonState::Default)
.unwrap_or(false)
}
pub fn button_just_untouched(&self, action: &str) -> bool {
self.button_states(action)
.map(|(cur, prev)| cur == XrButtonState::Default && prev!= XrButtonState::Default)
.unwrap_or(false)
}
pub fn button_just_pressed(&self, action: &str) -> bool {
self.button_states(action)
.map(|(cur, prev)| cur == XrButtonState::Pressed && prev!= XrButtonState::Pressed)
.unwrap_or(false)
}
pub fn button_just_unpressed(&self, action: &str) -> bool {
self.button_states(action)
.map(|(cur, prev)| cur!= XrButtonState::Pressed && prev == XrButtonState::Pressed)
.unwrap_or(false)
}
pub fn binary_value(&self, action: &str) -> bool {
if let Some(XrActionState::Binary(value)) = self.current_states.get(action) {
*value
} else {
self.button_pressed(action)
}
}
pub fn scalar_value(&self, action: &str) -> f32 {
if let Some(XrActionState::Scalar(value) | XrActionState::Button { value,.. }) =
self.current_states.get(action)
{
*value
} else {
0.0
}
}
pub fn vec_2d_value(&self, action: &str) -> Vec2 {
if let Some(XrActionState::Vec2D(value)) = self.current_states.get(action) {
*value
} else {
Vec2::ZERO
}
}
pub fn set(&mut self, states: HashMap<String, XrActionState>) {
self.previous_states = self.current_states.clone();
self.current_states = states;
}
pub fn clear(&mut self) {
self.current_states.clear();
self.previous_states.clear();
}
}
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
pub enum XrVibrationEventType {
Apply {
duration: Duration,
frequency: f32,
amplitude: f32,
},
Stop,
}
#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)]
pub struct XrVibrationEvent {
pub hand: XrHandType,
pub command: XrVibrationEventType,
}
/// Active interaction profiles. The format is backend-specific. They can be used to choose the
/// controller 3D models to display.
/// Note: in case skeletal hand tracking is active, the profiles still point to controller profiles.
/// The correct 3D model to display can be decided depending on if skeletal hand tracking data is
/// available or not.
#[derive(Clone, PartialEq, Default, Debug, Serialize, Deserialize)]
pub struct XrProfiles {
pub left_hand: Option<String>,
pub right_hand: Option<String>,
}
| {
self.inner.hands_skeleton_pose()
} | identifier_body |
interaction.rs | use bevy_math::{Mat4, Quat, Vec2, Vec3};
use bevy_utils::Duration;
use serde::{Deserialize, Serialize};
use std::{
collections::HashMap,
ops::{Deref, Mul},
};
// Note: indices follow WebXR convention. OpenXR's palm joint is missing, but it can be retrieved
// using `XrTrackingSource::hands_pose()`.
pub const XR_HAND_JOINT_WRIST: usize = 0;
pub const XR_HAND_JOINT_THUMB_METACARPAL: usize = 1;
pub const XR_HAND_JOINT_THUMB_PROXIMAL: usize = 2;
pub const XR_HAND_JOINT_THUMB_DISTAL: usize = 3;
pub const XR_HAND_JOINT_THUMB_TIP: usize = 4;
pub const XR_HAND_JOINT_INDEX_METACARPAL: usize = 5;
pub const XR_HAND_JOINT_INDEX_PROXIMAL: usize = 6;
pub const XR_HAND_JOINT_INDEX_INTERMEDIATE: usize = 7;
pub const XR_HAND_JOINT_INDEX_DISTAL: usize = 8;
pub const XR_HAND_JOINT_INDEX_TIP: usize = 9;
pub const XR_HAND_JOINT_MIDDLE_METACARPAL: usize = 10;
pub const XR_HAND_JOINT_MIDDLE_PROXIMAL: usize = 11;
pub const XR_HAND_JOINT_MIDDLE_INTERMEDIATE: usize = 12;
pub const XR_HAND_JOINT_MIDDLE_DISTAL: usize = 13;
pub const XR_HAND_JOINT_MIDDLE_TIP: usize = 14;
pub const XR_HAND_JOINT_RING_METACARPAL: usize = 15;
pub const XR_HAND_JOINT_RING_PROXIMAL: usize = 16;
pub const XR_HAND_JOINT_RING_INTERMEDIATE: usize = 17;
pub const XR_HAND_JOINT_RING_DISTAL: usize = 18;
pub const XR_HAND_JOINT_RING_TIP: usize = 19;
pub const XR_HAND_JOINT_LITTLE_METACARPAL: usize = 20;
pub const XR_HAND_JOINT_LITTLE_PROXIMAL: usize = 21;
pub const XR_HAND_JOINT_LITTLE_INTERMEDIATE: usize = 22;
pub const XR_HAND_JOINT_LITTLE_DISTAL: usize = 23;
pub const XR_HAND_JOINT_LITTLE_TIP: usize = 24;
// To be verified: in all useful instances, when the orientation is valid, the position is also
// valid. In case of 3DOF headsets, position should always be emulated using a neck and arm model.
// In case of hand tracking, when a joint is estimated, both pose and orientation are available.
#[derive(Clone, Copy, Default, Debug, Serialize, Deserialize)]
pub struct XrRigidTransform {
pub position: Vec3,
pub orientation: Quat,
}
impl Mul for XrRigidTransform {
type Output = XrRigidTransform;
fn mul(self, rhs: Self) -> Self::Output {
XrRigidTransform {
position: self.position + self.orientation * rhs.position,
orientation: self.orientation * rhs.orientation,
}
}
}
impl XrRigidTransform {
pub fn to_mat4(&self) -> Mat4 {
todo!()
}
}
#[derive(Clone, Default, Debug, Serialize, Deserialize)]
pub struct XrPose {
pub transform: XrRigidTransform,
pub linear_velocity: Option<Vec3>,
pub angular_velocity: Option<Vec3>,
pub emulated_position: bool,
}
impl Deref for XrPose {
type Target = XrRigidTransform;
fn deref(&self) -> &Self::Target {
&self.transform
}
}
#[derive(Clone, Default, Debug, Serialize, Deserialize)]
pub struct XrJointPose {
pub pose: XrPose,
/// Radius of a sphere placed at the center of the joint that roughly touches the skin on both
/// sides of the hand.
pub radius: f32,
}
impl Deref for XrJointPose {
type Target = XrPose;
fn deref(&self) -> &Self::Target {
&self.pose
}
}
#[derive(Clone, Copy, Eq, PartialEq, Hash, Debug, Serialize, Deserialize)]
pub enum XrReferenceSpaceType {
/// The coordinate system (position and orientation) is set as the headset pose at startup or
/// after a recenter. This should be used only for experiences where the user is laid down.
Viewer,
/// The coordinate system (position and gravity-aligned orientation) is calculated from the
/// headset pose at startup or after a recenter. This is for seated experiences.
Local,
/// The coordinate system (position and orientation) corresponds to the center of a rectangle at
/// floor level, with +Y up. This is for stading or room-scale experiences.
Stage,
}
pub mod implementation {
use super::XrReferenceSpaceType;
use crate::{interaction::XrPose, XrJointPose};
use bevy_math::Vec3;
pub trait XrTrackingSourceBackend: Send + Sync {
fn reference_space_type(&self) -> XrReferenceSpaceType;
fn set_reference_space_type(&self, reference_space_type: XrReferenceSpaceType) -> bool;
fn bounds_geometry(&self) -> Option<Vec<Vec3>>;
fn views_poses(&self) -> Vec<XrPose>;
fn hands_pose(&self) -> [Option<XrPose>; 2];
fn hands_skeleton_pose(&self) -> [Option<Vec<XrJointPose>>; 2];
fn hands_target_ray(&self) -> [Option<XrPose>; 2];
fn viewer_target_ray(&self) -> XrPose;
}
}
/// Component used to poll tracking data. Tracking data is obtained "on-demand" to get the best
/// precision possible. Poses are predicted for the next V-Sync. To obtain poses for an arbitrary
/// point in time, `bevy_openxr` backend provides this functionality with OpenXrTrackingState.
pub struct XrTrackingSource {
inner: Box<dyn implementation::XrTrackingSourceBackend>,
}
impl XrTrackingSource {
pub fn new(backend: Box<dyn implementation::XrTrackingSourceBackend>) -> Self {
Self { inner: backend }
}
pub fn reference_space_type(&self) -> XrReferenceSpaceType {
self.inner.reference_space_type()
}
/// Returns true if the tracking mode has been set correctly. If false is returned the tracking
/// mode is not supported and another one must be chosen.
pub fn set_reference_space_type(&mut self, reference_space_type: XrReferenceSpaceType) -> bool {
self.inner.set_reference_space_type(reference_space_type)
}
pub fn just_reset_reference_space(&mut self) -> bool {
todo!()
}
/// Returns a list of points, ordered clockwise, that define the playspace boundary. Only
/// available when the reference space is set to `BoundedFloor`. Y component is always 0.
pub fn bounds_geometry(&self) -> Option<Vec<Vec3>> {
self.inner.bounds_geometry()
}
pub fn views_poses(&self) -> Vec<XrPose> {
self.inner.views_poses()
}
/// Index 0 corresponds to the left hand, index 1 corresponds to the right hand.
pub fn hands_pose(&self) -> [Option<XrPose>; 2] {
self.inner.hands_pose()
}
/// Index 0 corresponds to the left hand, index 1 corresponds to the right hand.
pub fn hands_skeleton_pose(&self) -> [Option<Vec<XrJointPose>>; 2] {
self.inner.hands_skeleton_pose()
}
/// Returns poses that can be used to render a target ray or cursor. The ray is along -Z. The
/// behavior is vendor-specific. Index 0 corresponds to the left hand, index 1 corresponds to
/// the right hand.
pub fn hand_target_ray(&self) -> [Option<XrPose>; 2] {
self.inner.hands_target_ray()
}
/// Returns a pose that can be used to render a target ray or cursor. The ray is along -Z. The
/// origin is between the eyes for head-mounted displays and the center of the screen for
/// handheld devices.
pub fn viewer_target_ray(&self) -> XrPose {
self.inner.viewer_target_ray()
}
// future extensions:
// * eye tracking
// * lower face tracking
// * AR face tracking
// * body/skeletal trackers
// * scene understanding (anchors, planes, meshes)
}
#[derive(Clone, Copy, Eq, PartialEq, Hash, Debug, Serialize, Deserialize)]
pub enum XrHandType {
Left,
Right,
}
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Serialize, Deserialize)]
pub enum XrButtonState {
Default,
Touched,
Pressed,
}
impl Default for XrButtonState {
fn default() -> Self {
Self::Default
}
}
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
pub enum XrActionType {
/// Convenience type that groups click, touch and value actions for a single button.
/// The last segment of the path (`/click`, `/touch` or `/value`) must be omitted.
Button {
touch: bool,
},
Binary,
Scalar,
/// Convenience type that groups x and y axes for a touchpad or thumbstick action.
/// The last segment of the path (`/x` or `/y`) must be omitted.
Vec2D,
}
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
pub enum XrActionState {
Button { state: XrButtonState, value: f32 },
Binary(bool),
Scalar(f32),
Vec2D(Vec2),
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct XrActionDescriptor {
pub name: String,
pub action_type: XrActionType,
}
/// List bindings related to a single interaction profile. `tracked` and `has_haptics` can always be
/// set to false but if they are set to true and the interaction profile does not support them, the
/// the profile will be disabled completely.
pub struct XrProfileDescriptor {
pub profile: String,
pub bindings: Vec<(XrActionDescriptor, String)>,
pub tracked: bool,
pub has_haptics: bool,
}
pub struct XrActionSet {
current_states: HashMap<String, XrActionState>,
previous_states: HashMap<String, XrActionState>,
}
impl XrActionSet {
pub fn state(&self, action: &str) -> Option<XrActionState> {
self.current_states.get(action).cloned()
}
pub fn button_state(&self, action: &str) -> XrButtonState {
if let Some(XrActionState::Button { state,.. }) = self.current_states.get(action) | else {
XrButtonState::Default
}
}
pub fn button_touched(&self, action: &str) -> bool {
if let Some(XrActionState::Button { state,.. }) = self.current_states.get(action) {
*state!= XrButtonState::Default
} else {
false
}
}
pub fn button_pressed(&self, action: &str) -> bool {
if let Some(XrActionState::Button { state,.. }) = self.current_states.get(action) {
*state == XrButtonState::Pressed
} else {
false
}
}
fn button_states(&self, action: &str) -> Option<(XrButtonState, XrButtonState)> {
if let (
Some(XrActionState::Button {
state: current_state,
..
}),
Some(XrActionState::Button {
state: previous_state,
..
}),
) = (
self.current_states.get(action),
self.previous_states.get(action),
) {
Some((*current_state, *previous_state))
} else {
None
}
}
pub fn button_just_touched(&self, action: &str) -> bool {
self.button_states(action)
.map(|(cur, prev)| cur!= XrButtonState::Default && prev == XrButtonState::Default)
.unwrap_or(false)
}
pub fn button_just_untouched(&self, action: &str) -> bool {
self.button_states(action)
.map(|(cur, prev)| cur == XrButtonState::Default && prev!= XrButtonState::Default)
.unwrap_or(false)
}
pub fn button_just_pressed(&self, action: &str) -> bool {
self.button_states(action)
.map(|(cur, prev)| cur == XrButtonState::Pressed && prev!= XrButtonState::Pressed)
.unwrap_or(false)
}
pub fn button_just_unpressed(&self, action: &str) -> bool {
self.button_states(action)
.map(|(cur, prev)| cur!= XrButtonState::Pressed && prev == XrButtonState::Pressed)
.unwrap_or(false)
}
pub fn binary_value(&self, action: &str) -> bool {
if let Some(XrActionState::Binary(value)) = self.current_states.get(action) {
*value
} else {
self.button_pressed(action)
}
}
pub fn scalar_value(&self, action: &str) -> f32 {
if let Some(XrActionState::Scalar(value) | XrActionState::Button { value,.. }) =
self.current_states.get(action)
{
*value
} else {
0.0
}
}
pub fn vec_2d_value(&self, action: &str) -> Vec2 {
if let Some(XrActionState::Vec2D(value)) = self.current_states.get(action) {
*value
} else {
Vec2::ZERO
}
}
pub fn set(&mut self, states: HashMap<String, XrActionState>) {
self.previous_states = self.current_states.clone();
self.current_states = states;
}
pub fn clear(&mut self) {
self.current_states.clear();
self.previous_states.clear();
}
}
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
pub enum XrVibrationEventType {
Apply {
duration: Duration,
frequency: f32,
amplitude: f32,
},
Stop,
}
#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)]
pub struct XrVibrationEvent {
pub hand: XrHandType,
pub command: XrVibrationEventType,
}
/// Active interaction profiles. The format is backend-specific. They can be used to choose the
/// controller 3D models to display.
/// Note: in case skeletal hand tracking is active, the profiles still point to controller profiles.
/// The correct 3D model to display can be decided depending on if skeletal hand tracking data is
/// available or not.
#[derive(Clone, PartialEq, Default, Debug, Serialize, Deserialize)]
pub struct XrProfiles {
pub left_hand: Option<String>,
pub right_hand: Option<String>,
}
| {
*state
} | conditional_block |
interaction.rs | use bevy_math::{Mat4, Quat, Vec2, Vec3};
use bevy_utils::Duration;
use serde::{Deserialize, Serialize};
use std::{
collections::HashMap,
ops::{Deref, Mul},
};
// Note: indices follow WebXR convention. OpenXR's palm joint is missing, but it can be retrieved
// using `XrTrackingSource::hands_pose()`.
pub const XR_HAND_JOINT_WRIST: usize = 0;
pub const XR_HAND_JOINT_THUMB_METACARPAL: usize = 1;
pub const XR_HAND_JOINT_THUMB_PROXIMAL: usize = 2;
pub const XR_HAND_JOINT_THUMB_DISTAL: usize = 3;
pub const XR_HAND_JOINT_THUMB_TIP: usize = 4;
pub const XR_HAND_JOINT_INDEX_METACARPAL: usize = 5;
pub const XR_HAND_JOINT_INDEX_PROXIMAL: usize = 6;
pub const XR_HAND_JOINT_INDEX_INTERMEDIATE: usize = 7;
pub const XR_HAND_JOINT_INDEX_DISTAL: usize = 8;
pub const XR_HAND_JOINT_INDEX_TIP: usize = 9;
pub const XR_HAND_JOINT_MIDDLE_METACARPAL: usize = 10;
pub const XR_HAND_JOINT_MIDDLE_PROXIMAL: usize = 11;
pub const XR_HAND_JOINT_MIDDLE_INTERMEDIATE: usize = 12;
pub const XR_HAND_JOINT_MIDDLE_DISTAL: usize = 13;
pub const XR_HAND_JOINT_MIDDLE_TIP: usize = 14;
pub const XR_HAND_JOINT_RING_METACARPAL: usize = 15;
pub const XR_HAND_JOINT_RING_PROXIMAL: usize = 16;
pub const XR_HAND_JOINT_RING_INTERMEDIATE: usize = 17;
pub const XR_HAND_JOINT_RING_DISTAL: usize = 18;
pub const XR_HAND_JOINT_RING_TIP: usize = 19;
pub const XR_HAND_JOINT_LITTLE_METACARPAL: usize = 20;
pub const XR_HAND_JOINT_LITTLE_PROXIMAL: usize = 21;
pub const XR_HAND_JOINT_LITTLE_INTERMEDIATE: usize = 22;
pub const XR_HAND_JOINT_LITTLE_DISTAL: usize = 23;
pub const XR_HAND_JOINT_LITTLE_TIP: usize = 24;
// To be verified: in all useful instances, when the orientation is valid, the position is also
// valid. In case of 3DOF headsets, position should always be emulated using a neck and arm model.
// In case of hand tracking, when a joint is estimated, both pose and orientation are available.
#[derive(Clone, Copy, Default, Debug, Serialize, Deserialize)]
pub struct XrRigidTransform {
pub position: Vec3,
pub orientation: Quat,
}
impl Mul for XrRigidTransform {
type Output = XrRigidTransform;
fn mul(self, rhs: Self) -> Self::Output {
XrRigidTransform {
position: self.position + self.orientation * rhs.position,
orientation: self.orientation * rhs.orientation,
}
}
}
impl XrRigidTransform {
pub fn to_mat4(&self) -> Mat4 {
todo!()
}
}
#[derive(Clone, Default, Debug, Serialize, Deserialize)]
pub struct XrPose {
pub transform: XrRigidTransform,
pub linear_velocity: Option<Vec3>,
pub angular_velocity: Option<Vec3>,
pub emulated_position: bool,
}
impl Deref for XrPose {
type Target = XrRigidTransform;
fn deref(&self) -> &Self::Target {
&self.transform
}
}
#[derive(Clone, Default, Debug, Serialize, Deserialize)]
pub struct XrJointPose {
pub pose: XrPose,
/// Radius of a sphere placed at the center of the joint that roughly touches the skin on both
/// sides of the hand.
pub radius: f32,
}
impl Deref for XrJointPose {
type Target = XrPose;
fn deref(&self) -> &Self::Target {
&self.pose
}
}
#[derive(Clone, Copy, Eq, PartialEq, Hash, Debug, Serialize, Deserialize)]
pub enum XrReferenceSpaceType {
/// The coordinate system (position and orientation) is set as the headset pose at startup or
/// after a recenter. This should be used only for experiences where the user is laid down.
Viewer,
/// The coordinate system (position and gravity-aligned orientation) is calculated from the
/// headset pose at startup or after a recenter. This is for seated experiences.
Local,
/// The coordinate system (position and orientation) corresponds to the center of a rectangle at
/// floor level, with +Y up. This is for stading or room-scale experiences.
Stage,
}
pub mod implementation {
use super::XrReferenceSpaceType;
use crate::{interaction::XrPose, XrJointPose};
use bevy_math::Vec3;
pub trait XrTrackingSourceBackend: Send + Sync {
fn reference_space_type(&self) -> XrReferenceSpaceType;
fn set_reference_space_type(&self, reference_space_type: XrReferenceSpaceType) -> bool;
fn bounds_geometry(&self) -> Option<Vec<Vec3>>;
fn views_poses(&self) -> Vec<XrPose>;
fn hands_pose(&self) -> [Option<XrPose>; 2];
fn hands_skeleton_pose(&self) -> [Option<Vec<XrJointPose>>; 2];
fn hands_target_ray(&self) -> [Option<XrPose>; 2];
fn viewer_target_ray(&self) -> XrPose;
}
}
/// Component used to poll tracking data. Tracking data is obtained "on-demand" to get the best
/// precision possible. Poses are predicted for the next V-Sync. To obtain poses for an arbitrary
/// point in time, `bevy_openxr` backend provides this functionality with OpenXrTrackingState.
pub struct XrTrackingSource {
inner: Box<dyn implementation::XrTrackingSourceBackend>,
}
impl XrTrackingSource {
pub fn new(backend: Box<dyn implementation::XrTrackingSourceBackend>) -> Self {
Self { inner: backend }
}
pub fn reference_space_type(&self) -> XrReferenceSpaceType {
self.inner.reference_space_type()
}
/// Returns true if the tracking mode has been set correctly. If false is returned the tracking
/// mode is not supported and another one must be chosen.
pub fn set_reference_space_type(&mut self, reference_space_type: XrReferenceSpaceType) -> bool {
self.inner.set_reference_space_type(reference_space_type)
}
pub fn just_reset_reference_space(&mut self) -> bool {
todo!()
}
/// Returns a list of points, ordered clockwise, that define the playspace boundary. Only
/// available when the reference space is set to `BoundedFloor`. Y component is always 0.
pub fn bounds_geometry(&self) -> Option<Vec<Vec3>> {
self.inner.bounds_geometry()
}
pub fn views_poses(&self) -> Vec<XrPose> {
self.inner.views_poses()
}
/// Index 0 corresponds to the left hand, index 1 corresponds to the right hand.
pub fn hands_pose(&self) -> [Option<XrPose>; 2] {
self.inner.hands_pose()
}
/// Index 0 corresponds to the left hand, index 1 corresponds to the right hand.
pub fn hands_skeleton_pose(&self) -> [Option<Vec<XrJointPose>>; 2] {
self.inner.hands_skeleton_pose()
}
/// Returns poses that can be used to render a target ray or cursor. The ray is along -Z. The
/// behavior is vendor-specific. Index 0 corresponds to the left hand, index 1 corresponds to
/// the right hand.
pub fn hand_target_ray(&self) -> [Option<XrPose>; 2] {
self.inner.hands_target_ray()
}
/// Returns a pose that can be used to render a target ray or cursor. The ray is along -Z. The
/// origin is between the eyes for head-mounted displays and the center of the screen for
/// handheld devices.
pub fn viewer_target_ray(&self) -> XrPose {
self.inner.viewer_target_ray()
}
// future extensions:
// * eye tracking
// * lower face tracking
// * AR face tracking
// * body/skeletal trackers
// * scene understanding (anchors, planes, meshes)
}
#[derive(Clone, Copy, Eq, PartialEq, Hash, Debug, Serialize, Deserialize)]
pub enum XrHandType {
Left,
Right,
}
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Serialize, Deserialize)]
pub enum XrButtonState {
Default,
Touched,
Pressed,
}
impl Default for XrButtonState {
fn default() -> Self {
Self::Default
}
}
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
pub enum XrActionType {
/// Convenience type that groups click, touch and value actions for a single button.
/// The last segment of the path (`/click`, `/touch` or `/value`) must be omitted.
Button {
touch: bool,
},
Binary,
Scalar,
/// Convenience type that groups x and y axes for a touchpad or thumbstick action.
/// The last segment of the path (`/x` or `/y`) must be omitted.
Vec2D,
}
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
pub enum XrActionState {
Button { state: XrButtonState, value: f32 },
Binary(bool),
Scalar(f32),
Vec2D(Vec2),
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct XrActionDescriptor {
pub name: String,
pub action_type: XrActionType,
}
/// List bindings related to a single interaction profile. `tracked` and `has_haptics` can always be
/// set to false but if they are set to true and the interaction profile does not support them, the
/// the profile will be disabled completely.
pub struct XrProfileDescriptor {
pub profile: String,
pub bindings: Vec<(XrActionDescriptor, String)>,
pub tracked: bool,
pub has_haptics: bool,
}
pub struct XrActionSet {
current_states: HashMap<String, XrActionState>,
previous_states: HashMap<String, XrActionState>,
}
impl XrActionSet {
pub fn state(&self, action: &str) -> Option<XrActionState> {
self.current_states.get(action).cloned()
}
pub fn button_state(&self, action: &str) -> XrButtonState {
if let Some(XrActionState::Button { state,.. }) = self.current_states.get(action) {
*state
} else {
XrButtonState::Default
}
}
pub fn button_touched(&self, action: &str) -> bool {
if let Some(XrActionState::Button { state,.. }) = self.current_states.get(action) {
*state!= XrButtonState::Default
} else {
false
}
}
pub fn button_pressed(&self, action: &str) -> bool {
if let Some(XrActionState::Button { state,.. }) = self.current_states.get(action) {
*state == XrButtonState::Pressed
} else {
false
}
}
fn button_states(&self, action: &str) -> Option<(XrButtonState, XrButtonState)> {
if let (
Some(XrActionState::Button {
state: current_state,
..
}),
Some(XrActionState::Button {
state: previous_state,
..
}),
) = (
self.current_states.get(action),
self.previous_states.get(action),
) {
Some((*current_state, *previous_state))
} else {
None
}
}
pub fn button_just_touched(&self, action: &str) -> bool {
self.button_states(action)
.map(|(cur, prev)| cur!= XrButtonState::Default && prev == XrButtonState::Default)
.unwrap_or(false)
}
pub fn button_just_untouched(&self, action: &str) -> bool {
self.button_states(action)
.map(|(cur, prev)| cur == XrButtonState::Default && prev!= XrButtonState::Default)
.unwrap_or(false)
}
pub fn button_just_pressed(&self, action: &str) -> bool {
self.button_states(action)
.map(|(cur, prev)| cur == XrButtonState::Pressed && prev!= XrButtonState::Pressed)
.unwrap_or(false)
}
pub fn button_just_unpressed(&self, action: &str) -> bool {
self.button_states(action)
.map(|(cur, prev)| cur!= XrButtonState::Pressed && prev == XrButtonState::Pressed)
.unwrap_or(false)
}
pub fn binary_value(&self, action: &str) -> bool {
if let Some(XrActionState::Binary(value)) = self.current_states.get(action) {
*value
} else {
self.button_pressed(action)
}
}
pub fn scalar_value(&self, action: &str) -> f32 {
if let Some(XrActionState::Scalar(value) | XrActionState::Button { value,.. }) =
self.current_states.get(action)
{
*value
} else {
0.0
}
}
pub fn vec_2d_value(&self, action: &str) -> Vec2 {
if let Some(XrActionState::Vec2D(value)) = self.current_states.get(action) {
*value
} else {
Vec2::ZERO
}
}
pub fn set(&mut self, states: HashMap<String, XrActionState>) {
self.previous_states = self.current_states.clone();
self.current_states = states;
}
pub fn clear(&mut self) {
self.current_states.clear();
self.previous_states.clear();
}
}
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
pub enum XrVibrationEventType {
Apply {
duration: Duration,
frequency: f32,
amplitude: f32,
},
Stop,
}
#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)]
pub struct XrVibrationEvent {
pub hand: XrHandType,
pub command: XrVibrationEventType,
}
/// Active interaction profiles. The format is backend-specific. They can be used to choose the
/// controller 3D models to display.
/// Note: in case skeletal hand tracking is active, the profiles still point to controller profiles.
/// The correct 3D model to display can be decided depending on if skeletal hand tracking data is
/// available or not.
#[derive(Clone, PartialEq, Default, Debug, Serialize, Deserialize)]
pub struct | {
pub left_hand: Option<String>,
pub right_hand: Option<String>,
}
| XrProfiles | identifier_name |
interaction.rs | use bevy_math::{Mat4, Quat, Vec2, Vec3};
use bevy_utils::Duration;
use serde::{Deserialize, Serialize};
use std::{
collections::HashMap,
ops::{Deref, Mul},
};
// Note: indices follow WebXR convention. OpenXR's palm joint is missing, but it can be retrieved
// using `XrTrackingSource::hands_pose()`.
pub const XR_HAND_JOINT_WRIST: usize = 0;
pub const XR_HAND_JOINT_THUMB_METACARPAL: usize = 1;
pub const XR_HAND_JOINT_THUMB_PROXIMAL: usize = 2;
pub const XR_HAND_JOINT_THUMB_DISTAL: usize = 3;
pub const XR_HAND_JOINT_THUMB_TIP: usize = 4;
pub const XR_HAND_JOINT_INDEX_METACARPAL: usize = 5;
pub const XR_HAND_JOINT_INDEX_PROXIMAL: usize = 6;
pub const XR_HAND_JOINT_INDEX_INTERMEDIATE: usize = 7;
pub const XR_HAND_JOINT_INDEX_DISTAL: usize = 8;
pub const XR_HAND_JOINT_INDEX_TIP: usize = 9;
pub const XR_HAND_JOINT_MIDDLE_METACARPAL: usize = 10;
pub const XR_HAND_JOINT_MIDDLE_PROXIMAL: usize = 11;
pub const XR_HAND_JOINT_MIDDLE_INTERMEDIATE: usize = 12;
pub const XR_HAND_JOINT_MIDDLE_DISTAL: usize = 13;
pub const XR_HAND_JOINT_MIDDLE_TIP: usize = 14;
pub const XR_HAND_JOINT_RING_METACARPAL: usize = 15;
pub const XR_HAND_JOINT_RING_PROXIMAL: usize = 16;
pub const XR_HAND_JOINT_RING_INTERMEDIATE: usize = 17;
pub const XR_HAND_JOINT_RING_DISTAL: usize = 18;
pub const XR_HAND_JOINT_RING_TIP: usize = 19;
pub const XR_HAND_JOINT_LITTLE_METACARPAL: usize = 20;
pub const XR_HAND_JOINT_LITTLE_PROXIMAL: usize = 21;
pub const XR_HAND_JOINT_LITTLE_INTERMEDIATE: usize = 22;
pub const XR_HAND_JOINT_LITTLE_DISTAL: usize = 23;
pub const XR_HAND_JOINT_LITTLE_TIP: usize = 24;
// To be verified: in all useful instances, when the orientation is valid, the position is also
// valid. In case of 3DOF headsets, position should always be emulated using a neck and arm model.
// In case of hand tracking, when a joint is estimated, both pose and orientation are available.
#[derive(Clone, Copy, Default, Debug, Serialize, Deserialize)]
pub struct XrRigidTransform {
pub position: Vec3,
pub orientation: Quat,
}
impl Mul for XrRigidTransform {
type Output = XrRigidTransform;
fn mul(self, rhs: Self) -> Self::Output {
XrRigidTransform {
position: self.position + self.orientation * rhs.position,
orientation: self.orientation * rhs.orientation,
}
}
}
impl XrRigidTransform {
pub fn to_mat4(&self) -> Mat4 {
todo!()
}
}
#[derive(Clone, Default, Debug, Serialize, Deserialize)]
pub struct XrPose {
pub transform: XrRigidTransform,
pub linear_velocity: Option<Vec3>,
pub angular_velocity: Option<Vec3>,
pub emulated_position: bool,
}
impl Deref for XrPose {
type Target = XrRigidTransform;
fn deref(&self) -> &Self::Target {
&self.transform
}
}
#[derive(Clone, Default, Debug, Serialize, Deserialize)]
pub struct XrJointPose {
pub pose: XrPose,
/// Radius of a sphere placed at the center of the joint that roughly touches the skin on both
/// sides of the hand.
pub radius: f32,
}
impl Deref for XrJointPose {
type Target = XrPose;
fn deref(&self) -> &Self::Target {
&self.pose
}
}
#[derive(Clone, Copy, Eq, PartialEq, Hash, Debug, Serialize, Deserialize)]
pub enum XrReferenceSpaceType {
/// The coordinate system (position and orientation) is set as the headset pose at startup or
/// after a recenter. This should be used only for experiences where the user is laid down.
Viewer,
/// The coordinate system (position and gravity-aligned orientation) is calculated from the
/// headset pose at startup or after a recenter. This is for seated experiences.
Local,
/// The coordinate system (position and orientation) corresponds to the center of a rectangle at
/// floor level, with +Y up. This is for stading or room-scale experiences.
Stage,
}
pub mod implementation {
use super::XrReferenceSpaceType;
use crate::{interaction::XrPose, XrJointPose};
use bevy_math::Vec3;
pub trait XrTrackingSourceBackend: Send + Sync {
fn reference_space_type(&self) -> XrReferenceSpaceType;
fn set_reference_space_type(&self, reference_space_type: XrReferenceSpaceType) -> bool;
fn bounds_geometry(&self) -> Option<Vec<Vec3>>;
fn views_poses(&self) -> Vec<XrPose>;
fn hands_pose(&self) -> [Option<XrPose>; 2];
fn hands_skeleton_pose(&self) -> [Option<Vec<XrJointPose>>; 2];
fn hands_target_ray(&self) -> [Option<XrPose>; 2];
fn viewer_target_ray(&self) -> XrPose;
}
}
/// Component used to poll tracking data. Tracking data is obtained "on-demand" to get the best
/// precision possible. Poses are predicted for the next V-Sync. To obtain poses for an arbitrary
/// point in time, `bevy_openxr` backend provides this functionality with OpenXrTrackingState.
pub struct XrTrackingSource {
inner: Box<dyn implementation::XrTrackingSourceBackend>,
}
impl XrTrackingSource {
pub fn new(backend: Box<dyn implementation::XrTrackingSourceBackend>) -> Self {
Self { inner: backend }
}
pub fn reference_space_type(&self) -> XrReferenceSpaceType {
self.inner.reference_space_type()
}
/// Returns true if the tracking mode has been set correctly. If false is returned the tracking
/// mode is not supported and another one must be chosen.
pub fn set_reference_space_type(&mut self, reference_space_type: XrReferenceSpaceType) -> bool {
self.inner.set_reference_space_type(reference_space_type)
}
pub fn just_reset_reference_space(&mut self) -> bool {
todo!()
}
/// Returns a list of points, ordered clockwise, that define the playspace boundary. Only
/// available when the reference space is set to `BoundedFloor`. Y component is always 0.
pub fn bounds_geometry(&self) -> Option<Vec<Vec3>> {
self.inner.bounds_geometry()
}
pub fn views_poses(&self) -> Vec<XrPose> {
self.inner.views_poses()
}
/// Index 0 corresponds to the left hand, index 1 corresponds to the right hand.
pub fn hands_pose(&self) -> [Option<XrPose>; 2] {
self.inner.hands_pose()
}
/// Index 0 corresponds to the left hand, index 1 corresponds to the right hand.
pub fn hands_skeleton_pose(&self) -> [Option<Vec<XrJointPose>>; 2] {
self.inner.hands_skeleton_pose()
}
/// Returns poses that can be used to render a target ray or cursor. The ray is along -Z. The
/// behavior is vendor-specific. Index 0 corresponds to the left hand, index 1 corresponds to
/// the right hand.
pub fn hand_target_ray(&self) -> [Option<XrPose>; 2] {
self.inner.hands_target_ray()
}
/// Returns a pose that can be used to render a target ray or cursor. The ray is along -Z. The
/// origin is between the eyes for head-mounted displays and the center of the screen for
/// handheld devices.
pub fn viewer_target_ray(&self) -> XrPose {
self.inner.viewer_target_ray()
}
// future extensions:
// * eye tracking
// * lower face tracking
// * AR face tracking
// * body/skeletal trackers
// * scene understanding (anchors, planes, meshes)
}
#[derive(Clone, Copy, Eq, PartialEq, Hash, Debug, Serialize, Deserialize)]
pub enum XrHandType {
Left,
Right,
}
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Serialize, Deserialize)]
pub enum XrButtonState {
Default,
Touched,
Pressed,
}
impl Default for XrButtonState {
fn default() -> Self {
Self::Default
}
}
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
pub enum XrActionType {
/// Convenience type that groups click, touch and value actions for a single button.
/// The last segment of the path (`/click`, `/touch` or `/value`) must be omitted.
Button {
touch: bool,
},
Binary,
Scalar,
/// Convenience type that groups x and y axes for a touchpad or thumbstick action.
/// The last segment of the path (`/x` or `/y`) must be omitted.
Vec2D,
}
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
pub enum XrActionState {
Button { state: XrButtonState, value: f32 },
Binary(bool),
Scalar(f32),
Vec2D(Vec2),
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct XrActionDescriptor {
pub name: String,
pub action_type: XrActionType,
}
/// List bindings related to a single interaction profile. `tracked` and `has_haptics` can always be
/// set to false but if they are set to true and the interaction profile does not support them, the
/// the profile will be disabled completely.
pub struct XrProfileDescriptor {
pub profile: String,
pub bindings: Vec<(XrActionDescriptor, String)>,
pub tracked: bool,
pub has_haptics: bool,
}
pub struct XrActionSet {
current_states: HashMap<String, XrActionState>,
previous_states: HashMap<String, XrActionState>,
}
impl XrActionSet {
pub fn state(&self, action: &str) -> Option<XrActionState> {
self.current_states.get(action).cloned()
}
pub fn button_state(&self, action: &str) -> XrButtonState {
if let Some(XrActionState::Button { state,.. }) = self.current_states.get(action) {
*state
} else {
XrButtonState::Default
}
}
pub fn button_touched(&self, action: &str) -> bool {
if let Some(XrActionState::Button { state,.. }) = self.current_states.get(action) {
*state!= XrButtonState::Default
} else {
false
}
}
pub fn button_pressed(&self, action: &str) -> bool {
if let Some(XrActionState::Button { state,.. }) = self.current_states.get(action) {
*state == XrButtonState::Pressed
} else {
false
}
}
fn button_states(&self, action: &str) -> Option<(XrButtonState, XrButtonState)> {
if let (
Some(XrActionState::Button {
state: current_state,
..
}),
Some(XrActionState::Button {
state: previous_state,
..
}),
) = (
self.current_states.get(action),
self.previous_states.get(action),
) {
Some((*current_state, *previous_state))
} else {
None
}
}
pub fn button_just_touched(&self, action: &str) -> bool {
self.button_states(action)
.map(|(cur, prev)| cur!= XrButtonState::Default && prev == XrButtonState::Default)
.unwrap_or(false)
}
pub fn button_just_untouched(&self, action: &str) -> bool {
self.button_states(action)
.map(|(cur, prev)| cur == XrButtonState::Default && prev!= XrButtonState::Default)
.unwrap_or(false)
}
pub fn button_just_pressed(&self, action: &str) -> bool {
self.button_states(action)
.map(|(cur, prev)| cur == XrButtonState::Pressed && prev!= XrButtonState::Pressed)
.unwrap_or(false)
}
| }
pub fn binary_value(&self, action: &str) -> bool {
if let Some(XrActionState::Binary(value)) = self.current_states.get(action) {
*value
} else {
self.button_pressed(action)
}
}
pub fn scalar_value(&self, action: &str) -> f32 {
if let Some(XrActionState::Scalar(value) | XrActionState::Button { value,.. }) =
self.current_states.get(action)
{
*value
} else {
0.0
}
}
pub fn vec_2d_value(&self, action: &str) -> Vec2 {
if let Some(XrActionState::Vec2D(value)) = self.current_states.get(action) {
*value
} else {
Vec2::ZERO
}
}
pub fn set(&mut self, states: HashMap<String, XrActionState>) {
self.previous_states = self.current_states.clone();
self.current_states = states;
}
pub fn clear(&mut self) {
self.current_states.clear();
self.previous_states.clear();
}
}
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
pub enum XrVibrationEventType {
Apply {
duration: Duration,
frequency: f32,
amplitude: f32,
},
Stop,
}
#[derive(Clone, PartialEq, Debug, Serialize, Deserialize)]
pub struct XrVibrationEvent {
pub hand: XrHandType,
pub command: XrVibrationEventType,
}
/// Active interaction profiles. The format is backend-specific. They can be used to choose the
/// controller 3D models to display.
/// Note: in case skeletal hand tracking is active, the profiles still point to controller profiles.
/// The correct 3D model to display can be decided depending on if skeletal hand tracking data is
/// available or not.
#[derive(Clone, PartialEq, Default, Debug, Serialize, Deserialize)]
pub struct XrProfiles {
pub left_hand: Option<String>,
pub right_hand: Option<String>,
} | pub fn button_just_unpressed(&self, action: &str) -> bool {
self.button_states(action)
.map(|(cur, prev)| cur != XrButtonState::Pressed && prev == XrButtonState::Pressed)
.unwrap_or(false) | random_line_split |
futures.rs | //! A futures executor as an event source
//!
//! Only available with the `executor` cargo feature of `calloop`.
//!
//! This executor is intended for light futures, which will be polled as part of your
//! event loop. Such futures may be waiting for IO, or for some external computation on an
//! other thread for example.
//!
//! You can create a new executor using the `executor` function, which creates a pair
//! `(Executor<T>, Scheduler<T>)` to handle futures that all evaluate to type `T`. The
//! executor should be inserted into your event loop, and will yield the return values of
//! the futures as they finish into your callback. The scheduler can be cloned and used
//! to send futures to be executed into the executor. A generic executor can be obtained
//! by choosing `T = ()` and letting futures handle the forwarding of their return values
//! (if any) by their own means.
//!
//! **Note:** The futures must have their own means of being woken up, as this executor is,
//! by itself, not I/O aware. See [`LoopHandle::adapt_io`](crate::LoopHandle#method.adapt_io)
//! for that, or you can use some other mechanism if you prefer.
use async_task::{Builder, Runnable};
use slab::Slab;
use std::{
cell::RefCell,
future::Future,
rc::Rc,
sync::{
atomic::{AtomicBool, Ordering},
mpsc, Arc, Mutex,
},
task::Waker,
};
use crate::{
sources::{
channel::ChannelError,
ping::{make_ping, Ping, PingError, PingSource},
EventSource,
},
Poll, PostAction, Readiness, Token, TokenFactory,
};
/// A future executor as an event source
#[derive(Debug)]
pub struct Executor<T> {
/// Shared state between the executor and the scheduler.
state: Rc<State<T>>,
/// Notifies us when the executor is woken up.
ping: PingSource,
}
/// A scheduler to send futures to an executor
#[derive(Clone, Debug)]
pub struct Scheduler<T> {
/// Shared state between the executor and the scheduler.
state: Rc<State<T>>,
}
/// The inner state of the executor.
#[derive(Debug)]
struct State<T> {
/// The incoming queue of runnables to be executed.
incoming: mpsc::Receiver<Runnable<usize>>,
/// The sender corresponding to `incoming`.
sender: Arc<Sender>,
/// The list of currently active tasks.
///
/// This is set to `None` when the executor is destroyed.
active_tasks: RefCell<Option<Slab<Active<T>>>>,
}
/// Send a future to an executor.
///
/// This needs to be thread-safe, as it is called from a `Waker` that may be on a different thread.
#[derive(Debug)]
struct Sender {
/// The sender used to send runnables to the executor.
///
/// `mpsc::Sender` is `!Sync`, wrapping it in a `Mutex` makes it `Sync`.
sender: Mutex<mpsc::Sender<Runnable<usize>>>,
/// The ping source used to wake up the executor.
wake_up: Ping,
/// Whether the executor has already been woken.
notified: AtomicBool,
}
/// An active future or its result.
#[derive(Debug)]
enum Active<T> {
/// The future is currently being polled.
///
/// Waking this waker will insert the runnable into `incoming`.
Future(Waker),
/// The future has finished polling, and its result is stored here.
Finished(T),
}
impl<T> Active<T> {
fn is_finished(&self) -> bool {
matches!(self, Active::Finished(_))
}
}
impl<T> Scheduler<T> {
/// Sends the given future to the executor associated to this scheduler
///
/// Returns an error if the the executor not longer exists.
pub fn schedule<Fut:'static>(&self, future: Fut) -> Result<(), ExecutorDestroyed>
where
Fut: Future<Output = T>,
T:'static,
{
/// Store this future's result in the executor.
struct StoreOnDrop<'a, T> {
index: usize,
value: Option<T>,
state: &'a State<T>,
}
impl<T> Drop for StoreOnDrop<'_, T> {
fn drop(&mut self) {
let mut active_tasks = self.state.active_tasks.borrow_mut();
if let Some(active_tasks) = active_tasks.as_mut() {
if let Some(value) = self.value.take() {
active_tasks[self.index] = Active::Finished(value);
} else {
// The future was dropped before it finished.
// Remove it from the active list.
active_tasks.remove(self.index);
}
}
}
}
fn assert_send_and_sync<T: Send + Sync>(_: &T) {}
let mut active_guard = self.state.active_tasks.borrow_mut();
let active_tasks = active_guard.as_mut().ok_or(ExecutorDestroyed)?;
// Wrap the future in another future that polls it and stores the result.
let index = active_tasks.vacant_key();
let future = {
let state = self.state.clone();
async move {
let mut guard = StoreOnDrop {
index,
value: None,
state: &state,
};
// Get the value of the future.
let value = future.await;
// Store it in the executor.
guard.value = Some(value);
}
};
// A schedule function that inserts the runnable into the incoming queue.
let schedule = {
let sender = self.state.sender.clone();
move |runnable| sender.send(runnable)
};
assert_send_and_sync(&schedule);
// Spawn the future.
let (runnable, task) = Builder::new()
.metadata(index)
.spawn_local(move |_| future, schedule);
// Insert the runnable into the set of active tasks.
active_tasks.insert(Active::Future(runnable.waker()));
drop(active_guard);
// Schedule the runnable and detach the task so it isn't cancellable.
runnable.schedule();
task.detach();
Ok(())
}
}
impl Sender {
/// Send a runnable to the executor.
fn send(&self, runnable: Runnable<usize>) {
// Send on the channel.
//
// All we do with the lock is call `send`, so there's no chance of any state being corrupted on
// panic. Therefore it's safe to ignore the mutex poison.
if let Err(e) = self
.sender
.lock()
.unwrap_or_else(|e| e.into_inner())
.send(runnable)
{
// The runnable must be dropped on its origin thread, since the original future might be
//!Send. This channel immediately sends it back to the Executor, which is pinned to the
// origin thread. The executor's Drop implementation will force all of the runnables to be
// dropped, therefore the channel should always be available. If we can't send the runnable,
// it indicates that the above behavior is broken and that unsoundness has occurred. The
// only option at this stage is to forget the runnable and leak the future.
std::mem::forget(e);
unreachable!("Attempted to send runnable to a stopped executor");
}
// If the executor is already awake, don't bother waking it up again.
if self.notified.swap(true, Ordering::SeqCst) {
return;
}
// Wake the executor.
self.wake_up.ping();
}
}
impl<T> Drop for Executor<T> {
fn drop(&mut self) {
let active_tasks = self.state.active_tasks.borrow_mut().take().unwrap();
// Wake all of the active tasks in order to destroy their runnables.
for (_, task) in active_tasks {
if let Active::Future(waker) = task {
// Don't let a panicking waker blow everything up.
//
// There is a chance that a future will panic and, during the unwinding process,
// drop this executor. However, since the future panicked, there is a possibility
// that the internal state of the waker will be invalid in such a way that the waker
// panics as well. Since this would be a panic during a panic, Rust will upgrade it
// into an abort.
//
// In the interest of not aborting without a good reason, we just drop the panic here.
std::panic::catch_unwind(|| waker.wake()).ok();
}
}
// Drain the queue in order to drop all of the runnables.
while self.state.incoming.try_recv().is_ok() {}
}
}
/// Error generated when trying to schedule a future after the
/// executor was destroyed.
#[derive(thiserror::Error, Debug)]
#[error("the executor was destroyed")]
pub struct ExecutorDestroyed;
/// Create a new executor, and its associated scheduler
///
/// May fail due to OS errors preventing calloop to setup its internal pipes (if your
/// process has reatched its file descriptor limit for example).
pub fn executor<T>() -> crate::Result<(Executor<T>, Scheduler<T>)> {
let (sender, incoming) = mpsc::channel();
let (wake_up, ping) = make_ping()?;
let state = Rc::new(State {
incoming,
active_tasks: RefCell::new(Some(Slab::new())),
sender: Arc::new(Sender {
sender: Mutex::new(sender),
wake_up,
notified: AtomicBool::new(false),
}),
});
Ok((
Executor {
state: state.clone(),
ping,
},
Scheduler { state },
))
}
impl<T> EventSource for Executor<T> {
type Event = T;
type Metadata = ();
type Ret = ();
type Error = ExecutorError;
fn process_events<F>(
&mut self,
readiness: Readiness,
token: Token,
mut callback: F,
) -> Result<PostAction, Self::Error>
where
F: FnMut(T, &mut ()),
{
let state = &self.state;
// Set to the unnotified state.
state.sender.notified.store(false, Ordering::SeqCst);
let clear_readiness = {
let mut clear_readiness = false;
// Process runnables, but not too many at a time; better to move onto the next event quickly!
for _ in 0..1024 {
let runnable = match state.incoming.try_recv() {
Ok(runnable) => runnable,
Err(_) => {
// Make sure to clear the readiness if there are no more runnables.
clear_readiness = true;
break;
}
};
// Run the runnable.
let index = *runnable.metadata();
runnable.run();
// If the runnable finished with a result, call the callback.
let mut active_guard = state.active_tasks.borrow_mut();
let active_tasks = active_guard.as_mut().unwrap();
if let Some(state) = active_tasks.get(index) {
if state.is_finished() {
// Take out the state and provide it to the caller.
let result = match active_tasks.remove(index) {
Active::Finished(result) => result,
_ => unreachable!(),
};
callback(result, &mut ());
}
}
}
clear_readiness
};
// Clear the readiness of the ping source if there are no more runnables.
if clear_readiness {
self.ping
.process_events(readiness, token, |(), &mut ()| {})
.map_err(ExecutorError::WakeError)?;
}
Ok(PostAction::Continue)
}
fn register(&mut self, poll: &mut Poll, token_factory: &mut TokenFactory) -> crate::Result<()> {
self.ping.register(poll, token_factory)?;
Ok(())
}
fn reregister(
&mut self,
poll: &mut Poll,
token_factory: &mut TokenFactory,
) -> crate::Result<()> {
self.ping.reregister(poll, token_factory)?;
Ok(())
}
fn | (&mut self, poll: &mut Poll) -> crate::Result<()> {
self.ping.unregister(poll)?;
Ok(())
}
}
/// An error arising from processing events in an async executor event source.
#[derive(thiserror::Error, Debug)]
pub enum ExecutorError {
/// Error while reading new futures added via [`Scheduler::schedule()`].
#[error("error adding new futures")]
NewFutureError(ChannelError),
/// Error while processing wake events from existing futures.
#[error("error processing wake events")]
WakeError(PingError),
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn ready() {
let mut event_loop = crate::EventLoop::<u32>::try_new().unwrap();
let handle = event_loop.handle();
let (exec, sched) = executor::<u32>().unwrap();
handle
.insert_source(exec, move |ret, &mut (), got| {
*got = ret;
})
.unwrap();
let mut got = 0;
let fut = async { 42 };
event_loop
.dispatch(Some(::std::time::Duration::ZERO), &mut got)
.unwrap();
// the future is not yet inserted, and thus has not yet run
assert_eq!(got, 0);
sched.schedule(fut).unwrap();
event_loop
.dispatch(Some(::std::time::Duration::ZERO), &mut got)
.unwrap();
// the future has run
assert_eq!(got, 42);
}
}
| unregister | identifier_name |
futures.rs | //! A futures executor as an event source
//!
//! Only available with the `executor` cargo feature of `calloop`.
//!
//! This executor is intended for light futures, which will be polled as part of your
//! event loop. Such futures may be waiting for IO, or for some external computation on an
//! other thread for example.
//!
//! You can create a new executor using the `executor` function, which creates a pair
//! `(Executor<T>, Scheduler<T>)` to handle futures that all evaluate to type `T`. The
//! executor should be inserted into your event loop, and will yield the return values of
//! the futures as they finish into your callback. The scheduler can be cloned and used
//! to send futures to be executed into the executor. A generic executor can be obtained
//! by choosing `T = ()` and letting futures handle the forwarding of their return values
//! (if any) by their own means.
//!
//! **Note:** The futures must have their own means of being woken up, as this executor is,
//! by itself, not I/O aware. See [`LoopHandle::adapt_io`](crate::LoopHandle#method.adapt_io)
//! for that, or you can use some other mechanism if you prefer.
use async_task::{Builder, Runnable};
use slab::Slab;
use std::{
cell::RefCell,
future::Future,
rc::Rc,
sync::{
atomic::{AtomicBool, Ordering},
mpsc, Arc, Mutex,
},
task::Waker,
};
use crate::{
sources::{
channel::ChannelError,
ping::{make_ping, Ping, PingError, PingSource},
EventSource,
},
Poll, PostAction, Readiness, Token, TokenFactory,
};
/// A future executor as an event source
#[derive(Debug)]
pub struct Executor<T> {
/// Shared state between the executor and the scheduler.
state: Rc<State<T>>,
/// Notifies us when the executor is woken up.
ping: PingSource,
}
/// A scheduler to send futures to an executor
#[derive(Clone, Debug)]
pub struct Scheduler<T> {
/// Shared state between the executor and the scheduler.
state: Rc<State<T>>,
}
/// The inner state of the executor.
#[derive(Debug)]
struct State<T> {
/// The incoming queue of runnables to be executed.
incoming: mpsc::Receiver<Runnable<usize>>,
/// The sender corresponding to `incoming`.
sender: Arc<Sender>,
/// The list of currently active tasks.
///
/// This is set to `None` when the executor is destroyed.
active_tasks: RefCell<Option<Slab<Active<T>>>>,
}
/// Send a future to an executor.
///
/// This needs to be thread-safe, as it is called from a `Waker` that may be on a different thread.
#[derive(Debug)]
struct Sender {
/// The sender used to send runnables to the executor.
///
/// `mpsc::Sender` is `!Sync`, wrapping it in a `Mutex` makes it `Sync`.
sender: Mutex<mpsc::Sender<Runnable<usize>>>,
/// The ping source used to wake up the executor.
wake_up: Ping,
/// Whether the executor has already been woken.
notified: AtomicBool,
}
/// An active future or its result.
#[derive(Debug)]
enum Active<T> {
/// The future is currently being polled.
///
/// Waking this waker will insert the runnable into `incoming`.
Future(Waker),
/// The future has finished polling, and its result is stored here.
Finished(T),
}
impl<T> Active<T> {
fn is_finished(&self) -> bool {
matches!(self, Active::Finished(_))
}
}
impl<T> Scheduler<T> {
/// Sends the given future to the executor associated to this scheduler
///
/// Returns an error if the the executor not longer exists.
pub fn schedule<Fut:'static>(&self, future: Fut) -> Result<(), ExecutorDestroyed>
where
Fut: Future<Output = T>,
T:'static,
{
/// Store this future's result in the executor.
struct StoreOnDrop<'a, T> {
index: usize,
value: Option<T>,
state: &'a State<T>,
}
impl<T> Drop for StoreOnDrop<'_, T> {
fn drop(&mut self) {
let mut active_tasks = self.state.active_tasks.borrow_mut();
if let Some(active_tasks) = active_tasks.as_mut() {
if let Some(value) = self.value.take() {
active_tasks[self.index] = Active::Finished(value);
} else {
// The future was dropped before it finished.
// Remove it from the active list.
active_tasks.remove(self.index);
}
}
}
}
fn assert_send_and_sync<T: Send + Sync>(_: &T) {}
let mut active_guard = self.state.active_tasks.borrow_mut();
let active_tasks = active_guard.as_mut().ok_or(ExecutorDestroyed)?;
// Wrap the future in another future that polls it and stores the result.
let index = active_tasks.vacant_key();
let future = {
let state = self.state.clone();
async move {
let mut guard = StoreOnDrop {
index,
value: None,
state: &state,
};
// Get the value of the future.
let value = future.await;
// Store it in the executor.
guard.value = Some(value);
}
};
// A schedule function that inserts the runnable into the incoming queue.
let schedule = {
let sender = self.state.sender.clone();
move |runnable| sender.send(runnable)
};
assert_send_and_sync(&schedule);
// Spawn the future.
let (runnable, task) = Builder::new()
.metadata(index)
.spawn_local(move |_| future, schedule);
// Insert the runnable into the set of active tasks.
active_tasks.insert(Active::Future(runnable.waker()));
drop(active_guard);
// Schedule the runnable and detach the task so it isn't cancellable.
runnable.schedule();
task.detach();
Ok(())
}
}
impl Sender {
/// Send a runnable to the executor.
fn send(&self, runnable: Runnable<usize>) {
// Send on the channel.
//
// All we do with the lock is call `send`, so there's no chance of any state being corrupted on
// panic. Therefore it's safe to ignore the mutex poison.
if let Err(e) = self
.sender
.lock()
.unwrap_or_else(|e| e.into_inner())
.send(runnable)
{
// The runnable must be dropped on its origin thread, since the original future might be
//!Send. This channel immediately sends it back to the Executor, which is pinned to the
// origin thread. The executor's Drop implementation will force all of the runnables to be
// dropped, therefore the channel should always be available. If we can't send the runnable,
// it indicates that the above behavior is broken and that unsoundness has occurred. The
// only option at this stage is to forget the runnable and leak the future.
std::mem::forget(e);
unreachable!("Attempted to send runnable to a stopped executor");
}
// If the executor is already awake, don't bother waking it up again.
if self.notified.swap(true, Ordering::SeqCst) {
return;
}
// Wake the executor.
self.wake_up.ping();
}
}
impl<T> Drop for Executor<T> {
fn drop(&mut self) {
let active_tasks = self.state.active_tasks.borrow_mut().take().unwrap();
// Wake all of the active tasks in order to destroy their runnables.
for (_, task) in active_tasks {
if let Active::Future(waker) = task {
// Don't let a panicking waker blow everything up.
//
// There is a chance that a future will panic and, during the unwinding process,
// drop this executor. However, since the future panicked, there is a possibility
// that the internal state of the waker will be invalid in such a way that the waker
// panics as well. Since this would be a panic during a panic, Rust will upgrade it
// into an abort.
//
// In the interest of not aborting without a good reason, we just drop the panic here.
std::panic::catch_unwind(|| waker.wake()).ok();
}
}
// Drain the queue in order to drop all of the runnables.
while self.state.incoming.try_recv().is_ok() {}
}
}
/// Error generated when trying to schedule a future after the
/// executor was destroyed.
#[derive(thiserror::Error, Debug)]
#[error("the executor was destroyed")]
pub struct ExecutorDestroyed;
/// Create a new executor, and its associated scheduler
///
/// May fail due to OS errors preventing calloop to setup its internal pipes (if your
/// process has reatched its file descriptor limit for example).
pub fn executor<T>() -> crate::Result<(Executor<T>, Scheduler<T>)> | ))
}
impl<T> EventSource for Executor<T> {
type Event = T;
type Metadata = ();
type Ret = ();
type Error = ExecutorError;
fn process_events<F>(
&mut self,
readiness: Readiness,
token: Token,
mut callback: F,
) -> Result<PostAction, Self::Error>
where
F: FnMut(T, &mut ()),
{
let state = &self.state;
// Set to the unnotified state.
state.sender.notified.store(false, Ordering::SeqCst);
let clear_readiness = {
let mut clear_readiness = false;
// Process runnables, but not too many at a time; better to move onto the next event quickly!
for _ in 0..1024 {
let runnable = match state.incoming.try_recv() {
Ok(runnable) => runnable,
Err(_) => {
// Make sure to clear the readiness if there are no more runnables.
clear_readiness = true;
break;
}
};
// Run the runnable.
let index = *runnable.metadata();
runnable.run();
// If the runnable finished with a result, call the callback.
let mut active_guard = state.active_tasks.borrow_mut();
let active_tasks = active_guard.as_mut().unwrap();
if let Some(state) = active_tasks.get(index) {
if state.is_finished() {
// Take out the state and provide it to the caller.
let result = match active_tasks.remove(index) {
Active::Finished(result) => result,
_ => unreachable!(),
};
callback(result, &mut ());
}
}
}
clear_readiness
};
// Clear the readiness of the ping source if there are no more runnables.
if clear_readiness {
self.ping
.process_events(readiness, token, |(), &mut ()| {})
.map_err(ExecutorError::WakeError)?;
}
Ok(PostAction::Continue)
}
fn register(&mut self, poll: &mut Poll, token_factory: &mut TokenFactory) -> crate::Result<()> {
self.ping.register(poll, token_factory)?;
Ok(())
}
fn reregister(
&mut self,
poll: &mut Poll,
token_factory: &mut TokenFactory,
) -> crate::Result<()> {
self.ping.reregister(poll, token_factory)?;
Ok(())
}
fn unregister(&mut self, poll: &mut Poll) -> crate::Result<()> {
self.ping.unregister(poll)?;
Ok(())
}
}
/// An error arising from processing events in an async executor event source.
#[derive(thiserror::Error, Debug)]
pub enum ExecutorError {
/// Error while reading new futures added via [`Scheduler::schedule()`].
#[error("error adding new futures")]
NewFutureError(ChannelError),
/// Error while processing wake events from existing futures.
#[error("error processing wake events")]
WakeError(PingError),
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn ready() {
let mut event_loop = crate::EventLoop::<u32>::try_new().unwrap();
let handle = event_loop.handle();
let (exec, sched) = executor::<u32>().unwrap();
handle
.insert_source(exec, move |ret, &mut (), got| {
*got = ret;
})
.unwrap();
let mut got = 0;
let fut = async { 42 };
event_loop
.dispatch(Some(::std::time::Duration::ZERO), &mut got)
.unwrap();
// the future is not yet inserted, and thus has not yet run
assert_eq!(got, 0);
sched.schedule(fut).unwrap();
event_loop
.dispatch(Some(::std::time::Duration::ZERO), &mut got)
.unwrap();
// the future has run
assert_eq!(got, 42);
}
}
| {
let (sender, incoming) = mpsc::channel();
let (wake_up, ping) = make_ping()?;
let state = Rc::new(State {
incoming,
active_tasks: RefCell::new(Some(Slab::new())),
sender: Arc::new(Sender {
sender: Mutex::new(sender),
wake_up,
notified: AtomicBool::new(false),
}),
});
Ok((
Executor {
state: state.clone(),
ping,
},
Scheduler { state }, | identifier_body |
futures.rs | //! A futures executor as an event source
//!
//! Only available with the `executor` cargo feature of `calloop`.
//!
//! This executor is intended for light futures, which will be polled as part of your
//! event loop. Such futures may be waiting for IO, or for some external computation on an
//! other thread for example.
//!
//! You can create a new executor using the `executor` function, which creates a pair
//! `(Executor<T>, Scheduler<T>)` to handle futures that all evaluate to type `T`. The
//! executor should be inserted into your event loop, and will yield the return values of
//! the futures as they finish into your callback. The scheduler can be cloned and used
//! to send futures to be executed into the executor. A generic executor can be obtained
//! by choosing `T = ()` and letting futures handle the forwarding of their return values
//! (if any) by their own means.
//!
//! **Note:** The futures must have their own means of being woken up, as this executor is,
//! by itself, not I/O aware. See [`LoopHandle::adapt_io`](crate::LoopHandle#method.adapt_io)
//! for that, or you can use some other mechanism if you prefer.
use async_task::{Builder, Runnable};
use slab::Slab;
use std::{
cell::RefCell,
future::Future,
rc::Rc,
sync::{
atomic::{AtomicBool, Ordering},
mpsc, Arc, Mutex,
},
task::Waker,
};
use crate::{
sources::{
channel::ChannelError,
ping::{make_ping, Ping, PingError, PingSource},
EventSource,
},
Poll, PostAction, Readiness, Token, TokenFactory,
}; | pub struct Executor<T> {
/// Shared state between the executor and the scheduler.
state: Rc<State<T>>,
/// Notifies us when the executor is woken up.
ping: PingSource,
}
/// A scheduler to send futures to an executor
#[derive(Clone, Debug)]
pub struct Scheduler<T> {
/// Shared state between the executor and the scheduler.
state: Rc<State<T>>,
}
/// The inner state of the executor.
#[derive(Debug)]
struct State<T> {
/// The incoming queue of runnables to be executed.
incoming: mpsc::Receiver<Runnable<usize>>,
/// The sender corresponding to `incoming`.
sender: Arc<Sender>,
/// The list of currently active tasks.
///
/// This is set to `None` when the executor is destroyed.
active_tasks: RefCell<Option<Slab<Active<T>>>>,
}
/// Send a future to an executor.
///
/// This needs to be thread-safe, as it is called from a `Waker` that may be on a different thread.
#[derive(Debug)]
struct Sender {
/// The sender used to send runnables to the executor.
///
/// `mpsc::Sender` is `!Sync`, wrapping it in a `Mutex` makes it `Sync`.
sender: Mutex<mpsc::Sender<Runnable<usize>>>,
/// The ping source used to wake up the executor.
wake_up: Ping,
/// Whether the executor has already been woken.
notified: AtomicBool,
}
/// An active future or its result.
#[derive(Debug)]
enum Active<T> {
/// The future is currently being polled.
///
/// Waking this waker will insert the runnable into `incoming`.
Future(Waker),
/// The future has finished polling, and its result is stored here.
Finished(T),
}
impl<T> Active<T> {
fn is_finished(&self) -> bool {
matches!(self, Active::Finished(_))
}
}
impl<T> Scheduler<T> {
/// Sends the given future to the executor associated to this scheduler
///
/// Returns an error if the the executor not longer exists.
pub fn schedule<Fut:'static>(&self, future: Fut) -> Result<(), ExecutorDestroyed>
where
Fut: Future<Output = T>,
T:'static,
{
/// Store this future's result in the executor.
struct StoreOnDrop<'a, T> {
index: usize,
value: Option<T>,
state: &'a State<T>,
}
impl<T> Drop for StoreOnDrop<'_, T> {
fn drop(&mut self) {
let mut active_tasks = self.state.active_tasks.borrow_mut();
if let Some(active_tasks) = active_tasks.as_mut() {
if let Some(value) = self.value.take() {
active_tasks[self.index] = Active::Finished(value);
} else {
// The future was dropped before it finished.
// Remove it from the active list.
active_tasks.remove(self.index);
}
}
}
}
fn assert_send_and_sync<T: Send + Sync>(_: &T) {}
let mut active_guard = self.state.active_tasks.borrow_mut();
let active_tasks = active_guard.as_mut().ok_or(ExecutorDestroyed)?;
// Wrap the future in another future that polls it and stores the result.
let index = active_tasks.vacant_key();
let future = {
let state = self.state.clone();
async move {
let mut guard = StoreOnDrop {
index,
value: None,
state: &state,
};
// Get the value of the future.
let value = future.await;
// Store it in the executor.
guard.value = Some(value);
}
};
// A schedule function that inserts the runnable into the incoming queue.
let schedule = {
let sender = self.state.sender.clone();
move |runnable| sender.send(runnable)
};
assert_send_and_sync(&schedule);
// Spawn the future.
let (runnable, task) = Builder::new()
.metadata(index)
.spawn_local(move |_| future, schedule);
// Insert the runnable into the set of active tasks.
active_tasks.insert(Active::Future(runnable.waker()));
drop(active_guard);
// Schedule the runnable and detach the task so it isn't cancellable.
runnable.schedule();
task.detach();
Ok(())
}
}
impl Sender {
/// Send a runnable to the executor.
fn send(&self, runnable: Runnable<usize>) {
// Send on the channel.
//
// All we do with the lock is call `send`, so there's no chance of any state being corrupted on
// panic. Therefore it's safe to ignore the mutex poison.
if let Err(e) = self
.sender
.lock()
.unwrap_or_else(|e| e.into_inner())
.send(runnable)
{
// The runnable must be dropped on its origin thread, since the original future might be
//!Send. This channel immediately sends it back to the Executor, which is pinned to the
// origin thread. The executor's Drop implementation will force all of the runnables to be
// dropped, therefore the channel should always be available. If we can't send the runnable,
// it indicates that the above behavior is broken and that unsoundness has occurred. The
// only option at this stage is to forget the runnable and leak the future.
std::mem::forget(e);
unreachable!("Attempted to send runnable to a stopped executor");
}
// If the executor is already awake, don't bother waking it up again.
if self.notified.swap(true, Ordering::SeqCst) {
return;
}
// Wake the executor.
self.wake_up.ping();
}
}
impl<T> Drop for Executor<T> {
fn drop(&mut self) {
let active_tasks = self.state.active_tasks.borrow_mut().take().unwrap();
// Wake all of the active tasks in order to destroy their runnables.
for (_, task) in active_tasks {
if let Active::Future(waker) = task {
// Don't let a panicking waker blow everything up.
//
// There is a chance that a future will panic and, during the unwinding process,
// drop this executor. However, since the future panicked, there is a possibility
// that the internal state of the waker will be invalid in such a way that the waker
// panics as well. Since this would be a panic during a panic, Rust will upgrade it
// into an abort.
//
// In the interest of not aborting without a good reason, we just drop the panic here.
std::panic::catch_unwind(|| waker.wake()).ok();
}
}
// Drain the queue in order to drop all of the runnables.
while self.state.incoming.try_recv().is_ok() {}
}
}
/// Error generated when trying to schedule a future after the
/// executor was destroyed.
#[derive(thiserror::Error, Debug)]
#[error("the executor was destroyed")]
pub struct ExecutorDestroyed;
/// Create a new executor, and its associated scheduler
///
/// May fail due to OS errors preventing calloop to setup its internal pipes (if your
/// process has reatched its file descriptor limit for example).
pub fn executor<T>() -> crate::Result<(Executor<T>, Scheduler<T>)> {
let (sender, incoming) = mpsc::channel();
let (wake_up, ping) = make_ping()?;
let state = Rc::new(State {
incoming,
active_tasks: RefCell::new(Some(Slab::new())),
sender: Arc::new(Sender {
sender: Mutex::new(sender),
wake_up,
notified: AtomicBool::new(false),
}),
});
Ok((
Executor {
state: state.clone(),
ping,
},
Scheduler { state },
))
}
impl<T> EventSource for Executor<T> {
type Event = T;
type Metadata = ();
type Ret = ();
type Error = ExecutorError;
fn process_events<F>(
&mut self,
readiness: Readiness,
token: Token,
mut callback: F,
) -> Result<PostAction, Self::Error>
where
F: FnMut(T, &mut ()),
{
let state = &self.state;
// Set to the unnotified state.
state.sender.notified.store(false, Ordering::SeqCst);
let clear_readiness = {
let mut clear_readiness = false;
// Process runnables, but not too many at a time; better to move onto the next event quickly!
for _ in 0..1024 {
let runnable = match state.incoming.try_recv() {
Ok(runnable) => runnable,
Err(_) => {
// Make sure to clear the readiness if there are no more runnables.
clear_readiness = true;
break;
}
};
// Run the runnable.
let index = *runnable.metadata();
runnable.run();
// If the runnable finished with a result, call the callback.
let mut active_guard = state.active_tasks.borrow_mut();
let active_tasks = active_guard.as_mut().unwrap();
if let Some(state) = active_tasks.get(index) {
if state.is_finished() {
// Take out the state and provide it to the caller.
let result = match active_tasks.remove(index) {
Active::Finished(result) => result,
_ => unreachable!(),
};
callback(result, &mut ());
}
}
}
clear_readiness
};
// Clear the readiness of the ping source if there are no more runnables.
if clear_readiness {
self.ping
.process_events(readiness, token, |(), &mut ()| {})
.map_err(ExecutorError::WakeError)?;
}
Ok(PostAction::Continue)
}
fn register(&mut self, poll: &mut Poll, token_factory: &mut TokenFactory) -> crate::Result<()> {
self.ping.register(poll, token_factory)?;
Ok(())
}
fn reregister(
&mut self,
poll: &mut Poll,
token_factory: &mut TokenFactory,
) -> crate::Result<()> {
self.ping.reregister(poll, token_factory)?;
Ok(())
}
fn unregister(&mut self, poll: &mut Poll) -> crate::Result<()> {
self.ping.unregister(poll)?;
Ok(())
}
}
/// An error arising from processing events in an async executor event source.
#[derive(thiserror::Error, Debug)]
pub enum ExecutorError {
/// Error while reading new futures added via [`Scheduler::schedule()`].
#[error("error adding new futures")]
NewFutureError(ChannelError),
/// Error while processing wake events from existing futures.
#[error("error processing wake events")]
WakeError(PingError),
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn ready() {
let mut event_loop = crate::EventLoop::<u32>::try_new().unwrap();
let handle = event_loop.handle();
let (exec, sched) = executor::<u32>().unwrap();
handle
.insert_source(exec, move |ret, &mut (), got| {
*got = ret;
})
.unwrap();
let mut got = 0;
let fut = async { 42 };
event_loop
.dispatch(Some(::std::time::Duration::ZERO), &mut got)
.unwrap();
// the future is not yet inserted, and thus has not yet run
assert_eq!(got, 0);
sched.schedule(fut).unwrap();
event_loop
.dispatch(Some(::std::time::Duration::ZERO), &mut got)
.unwrap();
// the future has run
assert_eq!(got, 42);
}
} |
/// A future executor as an event source
#[derive(Debug)] | random_line_split |
futures.rs | //! A futures executor as an event source
//!
//! Only available with the `executor` cargo feature of `calloop`.
//!
//! This executor is intended for light futures, which will be polled as part of your
//! event loop. Such futures may be waiting for IO, or for some external computation on an
//! other thread for example.
//!
//! You can create a new executor using the `executor` function, which creates a pair
//! `(Executor<T>, Scheduler<T>)` to handle futures that all evaluate to type `T`. The
//! executor should be inserted into your event loop, and will yield the return values of
//! the futures as they finish into your callback. The scheduler can be cloned and used
//! to send futures to be executed into the executor. A generic executor can be obtained
//! by choosing `T = ()` and letting futures handle the forwarding of their return values
//! (if any) by their own means.
//!
//! **Note:** The futures must have their own means of being woken up, as this executor is,
//! by itself, not I/O aware. See [`LoopHandle::adapt_io`](crate::LoopHandle#method.adapt_io)
//! for that, or you can use some other mechanism if you prefer.
use async_task::{Builder, Runnable};
use slab::Slab;
use std::{
cell::RefCell,
future::Future,
rc::Rc,
sync::{
atomic::{AtomicBool, Ordering},
mpsc, Arc, Mutex,
},
task::Waker,
};
use crate::{
sources::{
channel::ChannelError,
ping::{make_ping, Ping, PingError, PingSource},
EventSource,
},
Poll, PostAction, Readiness, Token, TokenFactory,
};
/// A future executor as an event source
#[derive(Debug)]
pub struct Executor<T> {
/// Shared state between the executor and the scheduler.
state: Rc<State<T>>,
/// Notifies us when the executor is woken up.
ping: PingSource,
}
/// A scheduler to send futures to an executor
#[derive(Clone, Debug)]
pub struct Scheduler<T> {
/// Shared state between the executor and the scheduler.
state: Rc<State<T>>,
}
/// The inner state of the executor.
#[derive(Debug)]
struct State<T> {
/// The incoming queue of runnables to be executed.
incoming: mpsc::Receiver<Runnable<usize>>,
/// The sender corresponding to `incoming`.
sender: Arc<Sender>,
/// The list of currently active tasks.
///
/// This is set to `None` when the executor is destroyed.
active_tasks: RefCell<Option<Slab<Active<T>>>>,
}
/// Send a future to an executor.
///
/// This needs to be thread-safe, as it is called from a `Waker` that may be on a different thread.
#[derive(Debug)]
struct Sender {
/// The sender used to send runnables to the executor.
///
/// `mpsc::Sender` is `!Sync`, wrapping it in a `Mutex` makes it `Sync`.
sender: Mutex<mpsc::Sender<Runnable<usize>>>,
/// The ping source used to wake up the executor.
wake_up: Ping,
/// Whether the executor has already been woken.
notified: AtomicBool,
}
/// An active future or its result.
#[derive(Debug)]
enum Active<T> {
/// The future is currently being polled.
///
/// Waking this waker will insert the runnable into `incoming`.
Future(Waker),
/// The future has finished polling, and its result is stored here.
Finished(T),
}
impl<T> Active<T> {
fn is_finished(&self) -> bool {
matches!(self, Active::Finished(_))
}
}
impl<T> Scheduler<T> {
/// Sends the given future to the executor associated to this scheduler
///
/// Returns an error if the the executor not longer exists.
pub fn schedule<Fut:'static>(&self, future: Fut) -> Result<(), ExecutorDestroyed>
where
Fut: Future<Output = T>,
T:'static,
{
/// Store this future's result in the executor.
struct StoreOnDrop<'a, T> {
index: usize,
value: Option<T>,
state: &'a State<T>,
}
impl<T> Drop for StoreOnDrop<'_, T> {
fn drop(&mut self) {
let mut active_tasks = self.state.active_tasks.borrow_mut();
if let Some(active_tasks) = active_tasks.as_mut() {
if let Some(value) = self.value.take() {
active_tasks[self.index] = Active::Finished(value);
} else {
// The future was dropped before it finished.
// Remove it from the active list.
active_tasks.remove(self.index);
}
}
}
}
fn assert_send_and_sync<T: Send + Sync>(_: &T) {}
let mut active_guard = self.state.active_tasks.borrow_mut();
let active_tasks = active_guard.as_mut().ok_or(ExecutorDestroyed)?;
// Wrap the future in another future that polls it and stores the result.
let index = active_tasks.vacant_key();
let future = {
let state = self.state.clone();
async move {
let mut guard = StoreOnDrop {
index,
value: None,
state: &state,
};
// Get the value of the future.
let value = future.await;
// Store it in the executor.
guard.value = Some(value);
}
};
// A schedule function that inserts the runnable into the incoming queue.
let schedule = {
let sender = self.state.sender.clone();
move |runnable| sender.send(runnable)
};
assert_send_and_sync(&schedule);
// Spawn the future.
let (runnable, task) = Builder::new()
.metadata(index)
.spawn_local(move |_| future, schedule);
// Insert the runnable into the set of active tasks.
active_tasks.insert(Active::Future(runnable.waker()));
drop(active_guard);
// Schedule the runnable and detach the task so it isn't cancellable.
runnable.schedule();
task.detach();
Ok(())
}
}
impl Sender {
/// Send a runnable to the executor.
fn send(&self, runnable: Runnable<usize>) {
// Send on the channel.
//
// All we do with the lock is call `send`, so there's no chance of any state being corrupted on
// panic. Therefore it's safe to ignore the mutex poison.
if let Err(e) = self
.sender
.lock()
.unwrap_or_else(|e| e.into_inner())
.send(runnable)
|
// If the executor is already awake, don't bother waking it up again.
if self.notified.swap(true, Ordering::SeqCst) {
return;
}
// Wake the executor.
self.wake_up.ping();
}
}
impl<T> Drop for Executor<T> {
fn drop(&mut self) {
let active_tasks = self.state.active_tasks.borrow_mut().take().unwrap();
// Wake all of the active tasks in order to destroy their runnables.
for (_, task) in active_tasks {
if let Active::Future(waker) = task {
// Don't let a panicking waker blow everything up.
//
// There is a chance that a future will panic and, during the unwinding process,
// drop this executor. However, since the future panicked, there is a possibility
// that the internal state of the waker will be invalid in such a way that the waker
// panics as well. Since this would be a panic during a panic, Rust will upgrade it
// into an abort.
//
// In the interest of not aborting without a good reason, we just drop the panic here.
std::panic::catch_unwind(|| waker.wake()).ok();
}
}
// Drain the queue in order to drop all of the runnables.
while self.state.incoming.try_recv().is_ok() {}
}
}
/// Error generated when trying to schedule a future after the
/// executor was destroyed.
#[derive(thiserror::Error, Debug)]
#[error("the executor was destroyed")]
pub struct ExecutorDestroyed;
/// Create a new executor, and its associated scheduler
///
/// May fail due to OS errors preventing calloop to setup its internal pipes (if your
/// process has reatched its file descriptor limit for example).
pub fn executor<T>() -> crate::Result<(Executor<T>, Scheduler<T>)> {
let (sender, incoming) = mpsc::channel();
let (wake_up, ping) = make_ping()?;
let state = Rc::new(State {
incoming,
active_tasks: RefCell::new(Some(Slab::new())),
sender: Arc::new(Sender {
sender: Mutex::new(sender),
wake_up,
notified: AtomicBool::new(false),
}),
});
Ok((
Executor {
state: state.clone(),
ping,
},
Scheduler { state },
))
}
impl<T> EventSource for Executor<T> {
type Event = T;
type Metadata = ();
type Ret = ();
type Error = ExecutorError;
fn process_events<F>(
&mut self,
readiness: Readiness,
token: Token,
mut callback: F,
) -> Result<PostAction, Self::Error>
where
F: FnMut(T, &mut ()),
{
let state = &self.state;
// Set to the unnotified state.
state.sender.notified.store(false, Ordering::SeqCst);
let clear_readiness = {
let mut clear_readiness = false;
// Process runnables, but not too many at a time; better to move onto the next event quickly!
for _ in 0..1024 {
let runnable = match state.incoming.try_recv() {
Ok(runnable) => runnable,
Err(_) => {
// Make sure to clear the readiness if there are no more runnables.
clear_readiness = true;
break;
}
};
// Run the runnable.
let index = *runnable.metadata();
runnable.run();
// If the runnable finished with a result, call the callback.
let mut active_guard = state.active_tasks.borrow_mut();
let active_tasks = active_guard.as_mut().unwrap();
if let Some(state) = active_tasks.get(index) {
if state.is_finished() {
// Take out the state and provide it to the caller.
let result = match active_tasks.remove(index) {
Active::Finished(result) => result,
_ => unreachable!(),
};
callback(result, &mut ());
}
}
}
clear_readiness
};
// Clear the readiness of the ping source if there are no more runnables.
if clear_readiness {
self.ping
.process_events(readiness, token, |(), &mut ()| {})
.map_err(ExecutorError::WakeError)?;
}
Ok(PostAction::Continue)
}
fn register(&mut self, poll: &mut Poll, token_factory: &mut TokenFactory) -> crate::Result<()> {
self.ping.register(poll, token_factory)?;
Ok(())
}
fn reregister(
&mut self,
poll: &mut Poll,
token_factory: &mut TokenFactory,
) -> crate::Result<()> {
self.ping.reregister(poll, token_factory)?;
Ok(())
}
fn unregister(&mut self, poll: &mut Poll) -> crate::Result<()> {
self.ping.unregister(poll)?;
Ok(())
}
}
/// An error arising from processing events in an async executor event source.
#[derive(thiserror::Error, Debug)]
pub enum ExecutorError {
/// Error while reading new futures added via [`Scheduler::schedule()`].
#[error("error adding new futures")]
NewFutureError(ChannelError),
/// Error while processing wake events from existing futures.
#[error("error processing wake events")]
WakeError(PingError),
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn ready() {
let mut event_loop = crate::EventLoop::<u32>::try_new().unwrap();
let handle = event_loop.handle();
let (exec, sched) = executor::<u32>().unwrap();
handle
.insert_source(exec, move |ret, &mut (), got| {
*got = ret;
})
.unwrap();
let mut got = 0;
let fut = async { 42 };
event_loop
.dispatch(Some(::std::time::Duration::ZERO), &mut got)
.unwrap();
// the future is not yet inserted, and thus has not yet run
assert_eq!(got, 0);
sched.schedule(fut).unwrap();
event_loop
.dispatch(Some(::std::time::Duration::ZERO), &mut got)
.unwrap();
// the future has run
assert_eq!(got, 42);
}
}
| {
// The runnable must be dropped on its origin thread, since the original future might be
// !Send. This channel immediately sends it back to the Executor, which is pinned to the
// origin thread. The executor's Drop implementation will force all of the runnables to be
// dropped, therefore the channel should always be available. If we can't send the runnable,
// it indicates that the above behavior is broken and that unsoundness has occurred. The
// only option at this stage is to forget the runnable and leak the future.
std::mem::forget(e);
unreachable!("Attempted to send runnable to a stopped executor");
} | conditional_block |
path_through.rs |
// This example is another version of `passthrough.rs` that uses the
// path strings instead of the file descriptors with `O_PATH` flag
// for referencing the underlying file entries.
// It has the advantage of being able to use straightforward the
// *standard* filesystem APIs, but also the additional path resolution
// cost for each operation.
//
// This example is inteded to be used as a templete for implementing
// the path based filesystems such as libfuse's highlevel API.
use pico_args::Arguments;
use polyfuse::{
io::{Reader, Writer},
op,
reply::{Reply, ReplyAttr, ReplyEntry, ReplyOpen, ReplyWrite},
Context, DirEntry, FileAttr, Filesystem, Forget, Operation,
};
use slab::Slab;
use std::{
collections::hash_map::{Entry, HashMap},
convert::TryInto,
io,
os::unix::prelude::*,
path::{Path, PathBuf},
sync::Arc,
};
use tokio::{
fs::{File, OpenOptions, ReadDir},
sync::Mutex,
};
#[tokio::main]
async fn main() -> anyhow::Result<()> {
tracing_subscriber::fmt::init();
let mut args = Arguments::from_env();
let source: PathBuf = args
.opt_value_from_str(["-s", "--source"])?
.unwrap_or_else(|| std::env::current_dir().unwrap());
anyhow::ensure!(source.is_dir(), "the source path must be a directory");
let mountpoint: PathBuf = args
.free_from_str()?
.ok_or_else(|| anyhow::anyhow!("missing mountpoint"))?;
anyhow::ensure!(mountpoint.is_dir(), "the mountpoint must be a directory");
let fs = PathThrough::new(source)?;
polyfuse_tokio::mount(fs, mountpoint, &[]).await?;
Ok(())
}
type Ino = u64;
struct INode {
ino: Ino,
path: PathBuf,
refcount: u64,
}
struct INodeTable {
map: HashMap<Ino, Arc<Mutex<INode>>>,
path_to_ino: HashMap<PathBuf, Ino>,
next_ino: u64,
}
impl INodeTable {
fn new() -> Self {
INodeTable {
map: HashMap::new(),
path_to_ino: HashMap::new(),
next_ino: 1, // the inode number is started with 1 and the first node is root.
}
}
fn vacant_entry(&mut self) -> VacantEntry<'_> {
let ino = self.next_ino;
VacantEntry { table: self, ino }
}
fn get(&self, ino: Ino) -> Option<Arc<Mutex<INode>>> {
self.map.get(&ino).cloned()
}
fn get_path(&self, path: &Path) -> Option<Arc<Mutex<INode>>> {
let ino = self.path_to_ino.get(path).copied()?;
self.get(ino)
}
}
struct VacantEntry<'a> {
table: &'a mut INodeTable,
ino: Ino,
}
impl VacantEntry<'_> {
fn insert(mut self, inode: INode) {
let path = inode.path.clone();
self.table.map.insert(self.ino, Arc::new(Mutex::new(inode)));
self.table.path_to_ino.insert(path, self.ino);
self.table.next_ino += 1;
}
}
struct DirHandle {
read_dir: ReadDir,
last_entry: Option<DirEntry>,
offset: u64,
}
struct FileHandle {
file: File,
}
struct PathThrough {
source: PathBuf,
inodes: Mutex<INodeTable>,
dirs: Mutex<Slab<Arc<Mutex<DirHandle>>>>,
files: Mutex<Slab<Arc<Mutex<FileHandle>>>>,
}
impl PathThrough {
fn new(source: PathBuf) -> io::Result<Self> {
let source = source.canonicalize()?;
let mut inodes = INodeTable::new();
inodes.vacant_entry().insert(INode {
ino: 1,
path: PathBuf::new(),
refcount: u64::max_value() / 2,
});
Ok(Self {
source,
inodes: Mutex::new(inodes),
dirs: Mutex::default(),
files: Mutex::default(),
})
}
fn make_entry_out(&self, ino: Ino, attr: FileAttr) -> io::Result<ReplyEntry> {
let mut reply = ReplyEntry::default();
reply.ino(ino);
reply.attr(attr);
Ok(reply)
}
async fn get_attr(&self, path: impl AsRef<Path>) -> io::Result<FileAttr> {
let metadata = tokio::fs::symlink_metadata(self.source.join(path)).await?;
metadata
.try_into()
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))
}
async fn do_lookup(&self, op: &op::Lookup<'_>) -> io::Result<ReplyEntry> {
let mut inodes = self.inodes.lock().await;
let parent = inodes.get(op.parent()).ok_or_else(no_entry)?;
let parent = parent.lock().await;
let path = parent.path.join(op.name());
let metadata = self.get_attr(&path).await?;
let ino;
match inodes.get_path(&path) {
Some(inode) => {
let mut inode = inode.lock().await;
ino = inode.ino;
inode.refcount += 1;
}
None => {
let entry = inodes.vacant_entry();
ino = entry.ino;
entry.insert(INode {
ino,
path,
refcount: 1,
})
}
}
self.make_entry_out(ino, metadata)
}
async fn do_forget(&self, forgets: &[Forget]) {
let mut inodes = self.inodes.lock().await;
for forget in forgets {
if let Entry::Occupied(mut entry) = inodes.map.entry(forget.ino()) {
let refcount = {
let mut inode = entry.get_mut().lock().await;
inode.refcount = inode.refcount.saturating_sub(forget.nlookup());
inode.refcount
};
if refcount == 0 {
tracing::debug!("remove ino={}", entry.key());
drop(entry.remove());
}
}
}
}
async fn do_getattr(&self, op: &op::Getattr<'_>) -> io::Result<ReplyAttr> {
let inodes = self.inodes.lock().await;
let inode = inodes.get(op.ino()).ok_or_else(no_entry)?;
let inode = inode.lock().await;
let attr = self.get_attr(&inode.path).await?;
Ok(ReplyAttr::new(attr))
}
async fn do_setattr(&self, op: &op::Setattr<'_>) -> io::Result<ReplyAttr> {
let file = match op.fh() {
Some(fh) => {
let files = self.files.lock().await;
files.get(fh as usize).cloned()
}
None => None,
};
let mut file = match file {
Some(ref file) => {
let mut file = file.lock().await;
file.file.sync_all().await?;
Some(file) // keep file lock
}
None => None,
};
let inode = {
let inodes = self.inodes.lock().await;
inodes.get(op.ino()).ok_or_else(no_entry)?
};
let inode = inode.lock().await;
let path = Arc::new(self.source.join(&inode.path));
enum FileRef<'a> {
Borrowed(&'a mut File),
Owned(File),
}
impl AsMut<File> for FileRef<'_> {
fn as_mut(&mut self) -> &mut File {
match self {
Self::Borrowed(file) => file,
Self::Owned(file) => file,
}
}
}
let mut file = match file {
Some(ref mut file) => FileRef::Borrowed(&mut file.file),
None => FileRef::Owned(File::open(&*path).await?),
};
// chmod
if let Some(mode) = op.mode() {
let perm = std::fs::Permissions::from_mode(mode);
file.as_mut().set_permissions(perm).await?;
}
// truncate
if let Some(size) = op.size() {
file.as_mut().set_len(size).await?;
}
// chown
match (op.uid(), op.gid()) {
(None, None) => (),
(uid, gid) => {
let path = path.clone();
let uid = uid.map(nix::unistd::Uid::from_raw);
let gid = gid.map(nix::unistd::Gid::from_raw);
tokio::task::spawn_blocking(move || nix::unistd::chown(&*path, uid, gid))
.await?
.map_err(nix_to_io_error)?;
}
}
// TODO: utimes
let attr = self.get_attr(&inode.path).await?;
Ok(ReplyAttr::new(attr))
}
async fn do_readlink(&self, op: &op::Readlink<'_>) -> io::Result<PathBuf> {
let inodes = self.inodes.lock().await;
let inode = inodes.get(op.ino()).ok_or_else(no_entry)?;
let inode = inode.lock().await;
tokio::fs::read_link(self.source.join(&inode.path)).await
}
async fn do_opendir(&self, op: &op::Opendir<'_>) -> io::Result<ReplyOpen> {
let inodes = self.inodes.lock().await;
let inode = inodes.get(op.ino()).ok_or_else(no_entry)?;
let inode = inode.lock().await;
let dir = DirHandle {
read_dir: tokio::fs::read_dir(self.source.join(&inode.path)).await?,
last_entry: None,
offset: 1,
};
let mut dirs = self.dirs.lock().await;
let key = dirs.insert(Arc::new(Mutex::new(dir)));
Ok(ReplyOpen::new(key as u64))
}
async fn do_readdir(&self, op: &op::Readdir<'_>) -> io::Result<impl Reply> {
let dirs = self.dirs.lock().await;
let dir = dirs
.get(op.fh() as usize)
.cloned()
.ok_or_else(|| io::Error::from_raw_os_error(libc::EIO))?;
let mut dir = dir.lock().await;
let dir = &mut *dir;
let mut entries = vec![];
let mut total_len = 0;
if let Some(mut entry) = dir.last_entry.take() {
if total_len + entry.as_ref().len() > op.size() as usize {
return Err(io::Error::from_raw_os_error(libc::ERANGE));
}
entry.set_offset(dir.offset);
total_len += entry.as_ref().len();
dir.offset += 1;
entries.push(entry);
}
while let Some(entry) = dir.read_dir.next_entry().await? {
match entry.file_name() {
name if name.as_bytes() == b"." || name.as_bytes() == b".." => continue,
_ => (),
}
let metadata = entry.metadata().await?;
let mut entry = DirEntry::new(entry.file_name(), metadata.ino(), 0);
if total_len + entry.as_ref().len() <= op.size() as usize {
entry.set_offset(dir.offset);
total_len += entry.as_ref().len();
dir.offset += 1;
entries.push(entry);
} else {
dir.last_entry.replace(entry);
}
}
Ok(entries)
}
async fn do_releasedir(&self, op: &op::Releasedir<'_>) -> io::Result<()> {
let mut dirs = self.dirs.lock().await;
let dir = dirs.remove(op.fh() as usize);
drop(dir);
Ok(())
}
async fn do_open(&self, op: &op::Open<'_>) -> io::Result<ReplyOpen> {
let inodes = self.inodes.lock().await;
let inode = inodes.get(op.ino()).ok_or_else(no_entry)?;
let inode = inode.lock().await;
let options = OpenOptions::from({
let mut options = std::fs::OpenOptions::new();
match op.flags() as i32 & libc::O_ACCMODE {
libc::O_RDONLY => {
options.read(true);
}
libc::O_WRONLY => {
options.write(true);
}
libc::O_RDWR => {
options.read(true).write(true);
}
_ => (),
}
options.custom_flags(op.flags() as i32 &!libc::O_NOFOLLOW);
options
});
let file = FileHandle {
file: options.open(self.source.join(&inode.path)).await?,
};
let mut files = self.files.lock().await;
let key = files.insert(Arc::new(Mutex::new(file)));
Ok(ReplyOpen::new(key as u64))
}
async fn do_read(&self, op: &op::Read<'_>) -> io::Result<impl Reply> {
let files = self.files.lock().await;
let file = files
.get(op.fh() as usize)
.cloned()
.ok_or_else(|| io::Error::from_raw_os_error(libc::EIO))?;
let mut file = file.lock().await;
let file = &mut file.file;
file.seek(io::SeekFrom::Start(op.offset())).await?;
let mut buf = Vec::<u8>::with_capacity(op.size() as usize);
use tokio::io::AsyncReadExt;
tokio::io::copy(&mut file.take(op.size() as u64), &mut buf).await?;
Ok(buf)
}
async fn do_write<R:?Sized>(
&self,
op: &op::Write<'_>,
reader: &mut R,
) -> io::Result<ReplyWrite>
where
R: Reader + Unpin,
{
let files = self.files.lock().await;
let file = files
.get(op.fh() as usize)
.cloned()
.ok_or_else(|| io::Error::from_raw_os_error(libc::EIO))?;
let mut file = file.lock().await;
let file = &mut file.file;
file.seek(io::SeekFrom::Start(op.offset())).await?;
// At here, the data is transferred via the temporary buffer due to
// the incompatibility between the I/O abstraction in `futures` and
// `tokio`.
//
// In order to efficiently transfer the large files, both of zero
// copying support in `polyfuse` and resolution of impedance mismatch
// between `futures::io` and `tokio::io` are required. | {
use futures::io::AsyncReadExt;
reader.read_to_end(&mut buf).await?;
}
use tokio::io::AsyncReadExt;
let mut buf = &buf[..];
let mut buf = (&mut buf).take(op.size() as u64);
let written = tokio::io::copy(&mut buf, &mut *file).await?;
Ok(ReplyWrite::new(written as u32))
}
async fn do_flush(&self, op: &op::Flush<'_>) -> io::Result<()> {
let files = self.files.lock().await;
let file = files
.get(op.fh() as usize)
.cloned()
.ok_or_else(|| io::Error::from_raw_os_error(libc::EIO))?;
let file = file.lock().await;
file.file.try_clone().await?;
Ok(())
}
async fn do_fsync(&self, op: &op::Fsync<'_>) -> io::Result<()> {
let files = self.files.lock().await;
let file = files
.get(op.fh() as usize)
.cloned()
.ok_or_else(|| io::Error::from_raw_os_error(libc::EIO))?;
let mut file = file.lock().await;
let file = &mut file.file;
if op.datasync() {
file.sync_data().await?;
} else {
file.sync_all().await?;
}
Ok(())
}
async fn do_release(&self, op: &op::Release<'_>) -> io::Result<()> {
let mut files = self.files.lock().await;
let file = files.remove(op.fh() as usize);
drop(file);
Ok(())
}
}
#[polyfuse::async_trait]
impl Filesystem for PathThrough {
#[allow(clippy::cognitive_complexity)]
async fn call<'a, 'cx, T:?Sized>(
&'a self,
cx: &'a mut Context<'cx, T>,
op: Operation<'cx>,
) -> io::Result<()>
where
T: Reader + Writer + Send + Unpin,
{
macro_rules! try_reply {
($e:expr) => {
match ($e).await {
Ok(reply) => cx.reply(reply).await,
Err(err) => cx.reply_err(err.raw_os_error().unwrap_or(libc::EIO)).await,
}
};
}
match op {
Operation::Lookup(op) => try_reply!(self.do_lookup(&op)),
Operation::Forget(forgets) => {
self.do_forget(forgets.as_ref()).await;
Ok(())
}
Operation::Getattr(op) => try_reply!(self.do_getattr(&op)),
Operation::Setattr(op) => try_reply!(self.do_setattr(&op)),
Operation::Readlink(op) => try_reply!(self.do_readlink(&op)),
Operation::Opendir(op) => try_reply!(self.do_opendir(&op)),
Operation::Readdir(op) => try_reply!(self.do_readdir(&op)),
Operation::Releasedir(op) => try_reply!(self.do_releasedir(&op)),
Operation::Open(op) => try_reply!(self.do_open(&op)),
Operation::Read(op) => try_reply!(self.do_read(&op)),
Operation::Write(op) => {
let res = self.do_write(&op, &mut cx.reader()).await;
try_reply!(async { res })
}
Operation::Flush(op) => try_reply!(self.do_flush(&op)),
Operation::Fsync(op) => try_reply!(self.do_fsync(&op)),
Operation::Release(op) => try_reply!(self.do_release(&op)),
_ => Ok(()),
}
}
}
#[inline]
fn no_entry() -> io::Error {
io::Error::from_raw_os_error(libc::ENOENT)
}
fn nix_to_io | let mut buf = Vec::with_capacity(op.size() as usize); | random_line_split |
path_through.rs | // This example is another version of `passthrough.rs` that uses the
// path strings instead of the file descriptors with `O_PATH` flag
// for referencing the underlying file entries.
// It has the advantage of being able to use straightforward the
// *standard* filesystem APIs, but also the additional path resolution
// cost for each operation.
//
// This example is inteded to be used as a templete for implementing
// the path based filesystems such as libfuse's highlevel API.
use pico_args::Arguments;
use polyfuse::{
io::{Reader, Writer},
op,
reply::{Reply, ReplyAttr, ReplyEntry, ReplyOpen, ReplyWrite},
Context, DirEntry, FileAttr, Filesystem, Forget, Operation,
};
use slab::Slab;
use std::{
collections::hash_map::{Entry, HashMap},
convert::TryInto,
io,
os::unix::prelude::*,
path::{Path, PathBuf},
sync::Arc,
};
use tokio::{
fs::{File, OpenOptions, ReadDir},
sync::Mutex,
};
#[tokio::main]
async fn main() -> anyhow::Result<()> {
tracing_subscriber::fmt::init();
let mut args = Arguments::from_env();
let source: PathBuf = args
.opt_value_from_str(["-s", "--source"])?
.unwrap_or_else(|| std::env::current_dir().unwrap());
anyhow::ensure!(source.is_dir(), "the source path must be a directory");
let mountpoint: PathBuf = args
.free_from_str()?
.ok_or_else(|| anyhow::anyhow!("missing mountpoint"))?;
anyhow::ensure!(mountpoint.is_dir(), "the mountpoint must be a directory");
let fs = PathThrough::new(source)?;
polyfuse_tokio::mount(fs, mountpoint, &[]).await?;
Ok(())
}
type Ino = u64;
struct INode {
ino: Ino,
path: PathBuf,
refcount: u64,
}
struct INodeTable {
map: HashMap<Ino, Arc<Mutex<INode>>>,
path_to_ino: HashMap<PathBuf, Ino>,
next_ino: u64,
}
impl INodeTable {
fn new() -> Self {
INodeTable {
map: HashMap::new(),
path_to_ino: HashMap::new(),
next_ino: 1, // the inode number is started with 1 and the first node is root.
}
}
fn vacant_entry(&mut self) -> VacantEntry<'_> {
let ino = self.next_ino;
VacantEntry { table: self, ino }
}
fn get(&self, ino: Ino) -> Option<Arc<Mutex<INode>>> {
self.map.get(&ino).cloned()
}
fn | (&self, path: &Path) -> Option<Arc<Mutex<INode>>> {
let ino = self.path_to_ino.get(path).copied()?;
self.get(ino)
}
}
struct VacantEntry<'a> {
table: &'a mut INodeTable,
ino: Ino,
}
impl VacantEntry<'_> {
fn insert(mut self, inode: INode) {
let path = inode.path.clone();
self.table.map.insert(self.ino, Arc::new(Mutex::new(inode)));
self.table.path_to_ino.insert(path, self.ino);
self.table.next_ino += 1;
}
}
struct DirHandle {
read_dir: ReadDir,
last_entry: Option<DirEntry>,
offset: u64,
}
struct FileHandle {
file: File,
}
struct PathThrough {
source: PathBuf,
inodes: Mutex<INodeTable>,
dirs: Mutex<Slab<Arc<Mutex<DirHandle>>>>,
files: Mutex<Slab<Arc<Mutex<FileHandle>>>>,
}
impl PathThrough {
fn new(source: PathBuf) -> io::Result<Self> {
let source = source.canonicalize()?;
let mut inodes = INodeTable::new();
inodes.vacant_entry().insert(INode {
ino: 1,
path: PathBuf::new(),
refcount: u64::max_value() / 2,
});
Ok(Self {
source,
inodes: Mutex::new(inodes),
dirs: Mutex::default(),
files: Mutex::default(),
})
}
fn make_entry_out(&self, ino: Ino, attr: FileAttr) -> io::Result<ReplyEntry> {
let mut reply = ReplyEntry::default();
reply.ino(ino);
reply.attr(attr);
Ok(reply)
}
async fn get_attr(&self, path: impl AsRef<Path>) -> io::Result<FileAttr> {
let metadata = tokio::fs::symlink_metadata(self.source.join(path)).await?;
metadata
.try_into()
.map_err(|err| io::Error::new(io::ErrorKind::Other, err))
}
async fn do_lookup(&self, op: &op::Lookup<'_>) -> io::Result<ReplyEntry> {
let mut inodes = self.inodes.lock().await;
let parent = inodes.get(op.parent()).ok_or_else(no_entry)?;
let parent = parent.lock().await;
let path = parent.path.join(op.name());
let metadata = self.get_attr(&path).await?;
let ino;
match inodes.get_path(&path) {
Some(inode) => {
let mut inode = inode.lock().await;
ino = inode.ino;
inode.refcount += 1;
}
None => {
let entry = inodes.vacant_entry();
ino = entry.ino;
entry.insert(INode {
ino,
path,
refcount: 1,
})
}
}
self.make_entry_out(ino, metadata)
}
async fn do_forget(&self, forgets: &[Forget]) {
let mut inodes = self.inodes.lock().await;
for forget in forgets {
if let Entry::Occupied(mut entry) = inodes.map.entry(forget.ino()) {
let refcount = {
let mut inode = entry.get_mut().lock().await;
inode.refcount = inode.refcount.saturating_sub(forget.nlookup());
inode.refcount
};
if refcount == 0 {
tracing::debug!("remove ino={}", entry.key());
drop(entry.remove());
}
}
}
}
async fn do_getattr(&self, op: &op::Getattr<'_>) -> io::Result<ReplyAttr> {
let inodes = self.inodes.lock().await;
let inode = inodes.get(op.ino()).ok_or_else(no_entry)?;
let inode = inode.lock().await;
let attr = self.get_attr(&inode.path).await?;
Ok(ReplyAttr::new(attr))
}
async fn do_setattr(&self, op: &op::Setattr<'_>) -> io::Result<ReplyAttr> {
let file = match op.fh() {
Some(fh) => {
let files = self.files.lock().await;
files.get(fh as usize).cloned()
}
None => None,
};
let mut file = match file {
Some(ref file) => {
let mut file = file.lock().await;
file.file.sync_all().await?;
Some(file) // keep file lock
}
None => None,
};
let inode = {
let inodes = self.inodes.lock().await;
inodes.get(op.ino()).ok_or_else(no_entry)?
};
let inode = inode.lock().await;
let path = Arc::new(self.source.join(&inode.path));
enum FileRef<'a> {
Borrowed(&'a mut File),
Owned(File),
}
impl AsMut<File> for FileRef<'_> {
fn as_mut(&mut self) -> &mut File {
match self {
Self::Borrowed(file) => file,
Self::Owned(file) => file,
}
}
}
let mut file = match file {
Some(ref mut file) => FileRef::Borrowed(&mut file.file),
None => FileRef::Owned(File::open(&*path).await?),
};
// chmod
if let Some(mode) = op.mode() {
let perm = std::fs::Permissions::from_mode(mode);
file.as_mut().set_permissions(perm).await?;
}
// truncate
if let Some(size) = op.size() {
file.as_mut().set_len(size).await?;
}
// chown
match (op.uid(), op.gid()) {
(None, None) => (),
(uid, gid) => {
let path = path.clone();
let uid = uid.map(nix::unistd::Uid::from_raw);
let gid = gid.map(nix::unistd::Gid::from_raw);
tokio::task::spawn_blocking(move || nix::unistd::chown(&*path, uid, gid))
.await?
.map_err(nix_to_io_error)?;
}
}
// TODO: utimes
let attr = self.get_attr(&inode.path).await?;
Ok(ReplyAttr::new(attr))
}
async fn do_readlink(&self, op: &op::Readlink<'_>) -> io::Result<PathBuf> {
let inodes = self.inodes.lock().await;
let inode = inodes.get(op.ino()).ok_or_else(no_entry)?;
let inode = inode.lock().await;
tokio::fs::read_link(self.source.join(&inode.path)).await
}
async fn do_opendir(&self, op: &op::Opendir<'_>) -> io::Result<ReplyOpen> {
let inodes = self.inodes.lock().await;
let inode = inodes.get(op.ino()).ok_or_else(no_entry)?;
let inode = inode.lock().await;
let dir = DirHandle {
read_dir: tokio::fs::read_dir(self.source.join(&inode.path)).await?,
last_entry: None,
offset: 1,
};
let mut dirs = self.dirs.lock().await;
let key = dirs.insert(Arc::new(Mutex::new(dir)));
Ok(ReplyOpen::new(key as u64))
}
async fn do_readdir(&self, op: &op::Readdir<'_>) -> io::Result<impl Reply> {
let dirs = self.dirs.lock().await;
let dir = dirs
.get(op.fh() as usize)
.cloned()
.ok_or_else(|| io::Error::from_raw_os_error(libc::EIO))?;
let mut dir = dir.lock().await;
let dir = &mut *dir;
let mut entries = vec![];
let mut total_len = 0;
if let Some(mut entry) = dir.last_entry.take() {
if total_len + entry.as_ref().len() > op.size() as usize {
return Err(io::Error::from_raw_os_error(libc::ERANGE));
}
entry.set_offset(dir.offset);
total_len += entry.as_ref().len();
dir.offset += 1;
entries.push(entry);
}
while let Some(entry) = dir.read_dir.next_entry().await? {
match entry.file_name() {
name if name.as_bytes() == b"." || name.as_bytes() == b".." => continue,
_ => (),
}
let metadata = entry.metadata().await?;
let mut entry = DirEntry::new(entry.file_name(), metadata.ino(), 0);
if total_len + entry.as_ref().len() <= op.size() as usize {
entry.set_offset(dir.offset);
total_len += entry.as_ref().len();
dir.offset += 1;
entries.push(entry);
} else {
dir.last_entry.replace(entry);
}
}
Ok(entries)
}
async fn do_releasedir(&self, op: &op::Releasedir<'_>) -> io::Result<()> {
let mut dirs = self.dirs.lock().await;
let dir = dirs.remove(op.fh() as usize);
drop(dir);
Ok(())
}
async fn do_open(&self, op: &op::Open<'_>) -> io::Result<ReplyOpen> {
let inodes = self.inodes.lock().await;
let inode = inodes.get(op.ino()).ok_or_else(no_entry)?;
let inode = inode.lock().await;
let options = OpenOptions::from({
let mut options = std::fs::OpenOptions::new();
match op.flags() as i32 & libc::O_ACCMODE {
libc::O_RDONLY => {
options.read(true);
}
libc::O_WRONLY => {
options.write(true);
}
libc::O_RDWR => {
options.read(true).write(true);
}
_ => (),
}
options.custom_flags(op.flags() as i32 &!libc::O_NOFOLLOW);
options
});
let file = FileHandle {
file: options.open(self.source.join(&inode.path)).await?,
};
let mut files = self.files.lock().await;
let key = files.insert(Arc::new(Mutex::new(file)));
Ok(ReplyOpen::new(key as u64))
}
async fn do_read(&self, op: &op::Read<'_>) -> io::Result<impl Reply> {
let files = self.files.lock().await;
let file = files
.get(op.fh() as usize)
.cloned()
.ok_or_else(|| io::Error::from_raw_os_error(libc::EIO))?;
let mut file = file.lock().await;
let file = &mut file.file;
file.seek(io::SeekFrom::Start(op.offset())).await?;
let mut buf = Vec::<u8>::with_capacity(op.size() as usize);
use tokio::io::AsyncReadExt;
tokio::io::copy(&mut file.take(op.size() as u64), &mut buf).await?;
Ok(buf)
}
async fn do_write<R:?Sized>(
&self,
op: &op::Write<'_>,
reader: &mut R,
) -> io::Result<ReplyWrite>
where
R: Reader + Unpin,
{
let files = self.files.lock().await;
let file = files
.get(op.fh() as usize)
.cloned()
.ok_or_else(|| io::Error::from_raw_os_error(libc::EIO))?;
let mut file = file.lock().await;
let file = &mut file.file;
file.seek(io::SeekFrom::Start(op.offset())).await?;
// At here, the data is transferred via the temporary buffer due to
// the incompatibility between the I/O abstraction in `futures` and
// `tokio`.
//
// In order to efficiently transfer the large files, both of zero
// copying support in `polyfuse` and resolution of impedance mismatch
// between `futures::io` and `tokio::io` are required.
let mut buf = Vec::with_capacity(op.size() as usize);
{
use futures::io::AsyncReadExt;
reader.read_to_end(&mut buf).await?;
}
use tokio::io::AsyncReadExt;
let mut buf = &buf[..];
let mut buf = (&mut buf).take(op.size() as u64);
let written = tokio::io::copy(&mut buf, &mut *file).await?;
Ok(ReplyWrite::new(written as u32))
}
async fn do_flush(&self, op: &op::Flush<'_>) -> io::Result<()> {
let files = self.files.lock().await;
let file = files
.get(op.fh() as usize)
.cloned()
.ok_or_else(|| io::Error::from_raw_os_error(libc::EIO))?;
let file = file.lock().await;
file.file.try_clone().await?;
Ok(())
}
async fn do_fsync(&self, op: &op::Fsync<'_>) -> io::Result<()> {
let files = self.files.lock().await;
let file = files
.get(op.fh() as usize)
.cloned()
.ok_or_else(|| io::Error::from_raw_os_error(libc::EIO))?;
let mut file = file.lock().await;
let file = &mut file.file;
if op.datasync() {
file.sync_data().await?;
} else {
file.sync_all().await?;
}
Ok(())
}
async fn do_release(&self, op: &op::Release<'_>) -> io::Result<()> {
let mut files = self.files.lock().await;
let file = files.remove(op.fh() as usize);
drop(file);
Ok(())
}
}
#[polyfuse::async_trait]
impl Filesystem for PathThrough {
#[allow(clippy::cognitive_complexity)]
async fn call<'a, 'cx, T:?Sized>(
&'a self,
cx: &'a mut Context<'cx, T>,
op: Operation<'cx>,
) -> io::Result<()>
where
T: Reader + Writer + Send + Unpin,
{
macro_rules! try_reply {
($e:expr) => {
match ($e).await {
Ok(reply) => cx.reply(reply).await,
Err(err) => cx.reply_err(err.raw_os_error().unwrap_or(libc::EIO)).await,
}
};
}
match op {
Operation::Lookup(op) => try_reply!(self.do_lookup(&op)),
Operation::Forget(forgets) => {
self.do_forget(forgets.as_ref()).await;
Ok(())
}
Operation::Getattr(op) => try_reply!(self.do_getattr(&op)),
Operation::Setattr(op) => try_reply!(self.do_setattr(&op)),
Operation::Readlink(op) => try_reply!(self.do_readlink(&op)),
Operation::Opendir(op) => try_reply!(self.do_opendir(&op)),
Operation::Readdir(op) => try_reply!(self.do_readdir(&op)),
Operation::Releasedir(op) => try_reply!(self.do_releasedir(&op)),
Operation::Open(op) => try_reply!(self.do_open(&op)),
Operation::Read(op) => try_reply!(self.do_read(&op)),
Operation::Write(op) => {
let res = self.do_write(&op, &mut cx.reader()).await;
try_reply!(async { res })
}
Operation::Flush(op) => try_reply!(self.do_flush(&op)),
Operation::Fsync(op) => try_reply!(self.do_fsync(&op)),
Operation::Release(op) => try_reply!(self.do_release(&op)),
_ => Ok(()),
}
}
}
#[inline]
fn no_entry() -> io::Error {
io::Error::from_raw_os_error(libc::ENOENT)
}
fn nix_to | get_path | identifier_name |
lib.rs | // Copyright 2021-2023 Vector 35 Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use binaryninja::{
binaryview::{BinaryView, BinaryViewExt},
command::{register, Command},
disassembly::{DisassemblyTextLine, InstructionTextToken, InstructionTextTokenContents},
flowgraph::{BranchType, EdgeStyle, FlowGraph, FlowGraphNode, FlowGraphOption},
string::BnString,
};
use dwarfreader::is_valid;
use gimli::{
AttributeValue::{Encoding, Flag, UnitRef},
// BigEndian,
DebuggingInformationEntry,
Dwarf,
EntriesTreeNode,
Reader,
ReaderOffset,
SectionId,
Unit,
UnitSectionOffset,
};
static PADDING: [&'static str; 23] = [
"",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
];
// TODO : This is very much not comprehensive: see https://github.com/gimli-rs/gimli/blob/master/examples/dwarfdump.rs
fn get_info_string<R: Reader>(
view: &BinaryView,
dwarf: &Dwarf<R>,
unit: &Unit<R>,
die_node: &DebuggingInformationEntry<R>,
) -> Vec<DisassemblyTextLine> {
let mut disassembly_lines: Vec<DisassemblyTextLine> = Vec::with_capacity(10); // This is an estimate so "most" things won't need to resize
let label_value = match die_node.offset().to_unit_section_offset(unit) {
UnitSectionOffset::DebugInfoOffset(o) => o.0,
UnitSectionOffset::DebugTypesOffset(o) => o.0,
}
.into_u64();
let label_string = format!("#0x{:08x}", label_value);
disassembly_lines.push(DisassemblyTextLine::from(vec![
InstructionTextToken::new(
BnString::new(label_string),
InstructionTextTokenContents::GotoLabel(label_value),
),
InstructionTextToken::new(BnString::new(":"), InstructionTextTokenContents::Text),
]));
disassembly_lines.push(DisassemblyTextLine::from(vec![InstructionTextToken::new(
BnString::new(die_node.tag().static_string().unwrap()),
InstructionTextTokenContents::TypeName, // TODO : KeywordToken?
)]));
let mut attrs = die_node.attrs();
while let Some(attr) = attrs.next().unwrap() {
let mut attr_line: Vec<InstructionTextToken> = Vec::with_capacity(5);
attr_line.push(InstructionTextToken::new(
BnString::new(" "),
InstructionTextTokenContents::Indentation,
));
let len;
if let Some(n) = attr.name().static_string() {
len = n.len();
attr_line.push(InstructionTextToken::new(
BnString::new(n),
InstructionTextTokenContents::FieldName,
));
} else {
// This is rather unlikely, I think
len = 1;
attr_line.push(InstructionTextToken::new(
BnString::new("?"),
InstructionTextTokenContents::FieldName,
));
}
// On command line the magic number that looks good is 22, but that's too much whitespace in a basic block, so I chose 18 (22 is the max with the current padding provided)
if len < 18 {
attr_line.push(InstructionTextToken::new(
BnString::new(PADDING[18 - len]),
InstructionTextTokenContents::Text,
));
}
attr_line.push(InstructionTextToken::new(
BnString::new(" = "),
InstructionTextTokenContents::Text,
));
if let Ok(Some(addr)) = dwarf.attr_address(unit, attr.value()) {
let addr_string = format!("0x{:08x}", addr);
attr_line.push(InstructionTextToken::new(
BnString::new(addr_string),
InstructionTextTokenContents::Integer(addr),
));
} else if let Ok(attr_reader) = dwarf.attr_string(unit, attr.value()) {
if let Ok(attr_string) = attr_reader.to_string() {
attr_line.push(InstructionTextToken::new(
BnString::new(attr_string.as_ref()),
InstructionTextTokenContents::String({
let (_, id, offset) =
dwarf.lookup_offset_id(attr_reader.offset_id()).unwrap();
offset.into_u64() + view.section_by_name(id.name()).unwrap().start()
}),
));
} else {
attr_line.push(InstructionTextToken::new(
BnString::new("??"),
InstructionTextTokenContents::Text,
));
}
} else if let Encoding(type_class) = attr.value() {
attr_line.push(InstructionTextToken::new(
BnString::new(type_class.static_string().unwrap()),
InstructionTextTokenContents::TypeName,
));
} else if let UnitRef(offset) = attr.value() {
let addr = match offset.to_unit_section_offset(unit) {
UnitSectionOffset::DebugInfoOffset(o) => o.0,
UnitSectionOffset::DebugTypesOffset(o) => o.0,
}
.into_u64();
let addr_string = format!("#0x{:08x}", addr);
attr_line.push(InstructionTextToken::new(
BnString::new(addr_string),
InstructionTextTokenContents::GotoLabel(addr),
));
} else if let Flag(true) = attr.value() {
attr_line.push(InstructionTextToken::new(
BnString::new("true"),
InstructionTextTokenContents::Integer(1),
));
} else if let Flag(false) = attr.value() {
attr_line.push(InstructionTextToken::new(
BnString::new("false"),
InstructionTextTokenContents::Integer(1),
));
// Fall-back cases
} else if let Some(value) = attr.u8_value() {
let value_string = format!("{}", value);
attr_line.push(InstructionTextToken::new(
BnString::new(value_string),
InstructionTextTokenContents::Integer(value.into()),
));
} else if let Some(value) = attr.u16_value() {
let value_string = format!("{}", value);
attr_line.push(InstructionTextToken::new(
BnString::new(value_string),
InstructionTextTokenContents::Integer(value.into()),
));
} else if let Some(value) = attr.udata_value() {
let value_string = format!("{}", value);
attr_line.push(InstructionTextToken::new(
BnString::new(value_string),
InstructionTextTokenContents::Integer(value.into()),
));
} else if let Some(value) = attr.sdata_value() {
let value_string = format!("{}", value);
attr_line.push(InstructionTextToken::new(
BnString::new(value_string),
InstructionTextTokenContents::Integer(value as u64),
));
} else {
let attr_string = format!("{:?}", attr.value());
attr_line.push(InstructionTextToken::new(
BnString::new(attr_string),
InstructionTextTokenContents::Text,
));
}
disassembly_lines.push(DisassemblyTextLine::from(attr_line));
}
disassembly_lines
}
fn process_tree<R: Reader>(
view: &BinaryView,
dwarf: &Dwarf<R>,
unit: &Unit<R>,
graph: &FlowGraph,
graph_parent: &FlowGraphNode,
die_node: EntriesTreeNode<R>,
) {
// Namespaces only - really interesting to look at!
// if (die_node.entry().tag() == constants::DW_TAG_namespace)
// || (die_node.entry().tag() == constants::DW_TAG_class_type)
// || (die_node.entry().tag() == constants::DW_TAG_compile_unit)
// || (die_node.entry().tag() == constants::DW_TAG_subprogram)
// {
let new_node = FlowGraphNode::new(graph);
let attr_string = get_info_string(view, dwarf, unit, die_node.entry());
new_node.set_disassembly_lines(&attr_string);
graph.append(&new_node);
graph_parent.add_outgoing_edge(
BranchType::UnconditionalBranch,
&new_node,
&EdgeStyle::default(),
);
let mut children = die_node.children();
while let Some(child) = children.next().unwrap() {
process_tree(view, dwarf, unit, graph, &new_node, child);
}
// }
}
fn dump_dwarf(bv: &BinaryView) {
let view = if bv.section_by_name(".debug_info").is_ok() {
bv.to_owned()
} else {
bv.parent_view().unwrap()
};
let graph = FlowGraph::new();
graph.set_option(FlowGraphOption::FlowGraphUsesBlockHighlights, true);
graph.set_option(FlowGraphOption::FlowGraphUsesInstructionHighlights, true);
let graph_root = FlowGraphNode::new(&graph);
graph_root.set_lines(vec!["Graph Root"]);
graph.append(&graph_root);
let endian = dwarfreader::get_endian(bv);
let section_reader = |section_id: SectionId| -> _ {
dwarfreader::create_section_reader(section_id, bv, endian, false)
};
let dwarf = Dwarf::load(§ion_reader).unwrap();
let mut iter = dwarf.units();
while let Some(header) = iter.next().unwrap() {
let unit = dwarf.unit(header).unwrap();
let mut entries = unit.entries();
let mut depth = 0;
if let Some((delta_depth, entry)) = entries.next_dfs().unwrap() {
depth += delta_depth;
assert!(depth >= 0);
let mut tree = unit.entries_tree(Some(entry.offset())).unwrap();
let root = tree.root().unwrap();
process_tree(&view, &dwarf, &unit, &graph, &graph_root, root);
}
}
view.show_graph_report("DWARF", graph);
}
struct DWARFDump;
impl Command for DWARFDump {
fn action(&self, view: &BinaryView) {
dump_dwarf(view);
}
fn valid(&self, view: &BinaryView) -> bool {
is_valid(view)
}
}
#[no_mangle]
pub extern "C" fn UIPluginInit() -> bool | {
register(
"DWARF Dump",
"Show embedded DWARF info as a tree structure for you to navigate",
DWARFDump {},
);
true
} | identifier_body |
|
lib.rs | // Copyright 2021-2023 Vector 35 Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use binaryninja::{
binaryview::{BinaryView, BinaryViewExt},
command::{register, Command},
disassembly::{DisassemblyTextLine, InstructionTextToken, InstructionTextTokenContents},
flowgraph::{BranchType, EdgeStyle, FlowGraph, FlowGraphNode, FlowGraphOption},
string::BnString,
};
use dwarfreader::is_valid;
use gimli::{
AttributeValue::{Encoding, Flag, UnitRef},
// BigEndian,
DebuggingInformationEntry,
Dwarf,
EntriesTreeNode,
Reader,
ReaderOffset,
SectionId,
Unit,
UnitSectionOffset,
};
static PADDING: [&'static str; 23] = [
"",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
];
// TODO : This is very much not comprehensive: see https://github.com/gimli-rs/gimli/blob/master/examples/dwarfdump.rs
fn get_info_string<R: Reader>(
view: &BinaryView,
dwarf: &Dwarf<R>,
unit: &Unit<R>,
die_node: &DebuggingInformationEntry<R>,
) -> Vec<DisassemblyTextLine> {
let mut disassembly_lines: Vec<DisassemblyTextLine> = Vec::with_capacity(10); // This is an estimate so "most" things won't need to resize
let label_value = match die_node.offset().to_unit_section_offset(unit) {
UnitSectionOffset::DebugInfoOffset(o) => o.0,
UnitSectionOffset::DebugTypesOffset(o) => o.0,
}
.into_u64();
let label_string = format!("#0x{:08x}", label_value);
disassembly_lines.push(DisassemblyTextLine::from(vec![
InstructionTextToken::new(
BnString::new(label_string),
InstructionTextTokenContents::GotoLabel(label_value),
),
InstructionTextToken::new(BnString::new(":"), InstructionTextTokenContents::Text),
]));
disassembly_lines.push(DisassemblyTextLine::from(vec![InstructionTextToken::new(
BnString::new(die_node.tag().static_string().unwrap()),
InstructionTextTokenContents::TypeName, // TODO : KeywordToken?
)]));
let mut attrs = die_node.attrs();
while let Some(attr) = attrs.next().unwrap() {
let mut attr_line: Vec<InstructionTextToken> = Vec::with_capacity(5);
attr_line.push(InstructionTextToken::new(
BnString::new(" "),
InstructionTextTokenContents::Indentation,
));
let len;
if let Some(n) = attr.name().static_string() {
len = n.len();
attr_line.push(InstructionTextToken::new(
BnString::new(n),
InstructionTextTokenContents::FieldName,
));
} else {
// This is rather unlikely, I think
len = 1;
attr_line.push(InstructionTextToken::new(
BnString::new("?"),
InstructionTextTokenContents::FieldName,
));
}
// On command line the magic number that looks good is 22, but that's too much whitespace in a basic block, so I chose 18 (22 is the max with the current padding provided)
if len < 18 {
attr_line.push(InstructionTextToken::new(
BnString::new(PADDING[18 - len]),
InstructionTextTokenContents::Text,
));
}
attr_line.push(InstructionTextToken::new(
BnString::new(" = "),
InstructionTextTokenContents::Text,
));
if let Ok(Some(addr)) = dwarf.attr_address(unit, attr.value()) {
let addr_string = format!("0x{:08x}", addr);
attr_line.push(InstructionTextToken::new(
BnString::new(addr_string),
InstructionTextTokenContents::Integer(addr),
));
} else if let Ok(attr_reader) = dwarf.attr_string(unit, attr.value()) {
if let Ok(attr_string) = attr_reader.to_string() {
attr_line.push(InstructionTextToken::new(
BnString::new(attr_string.as_ref()),
InstructionTextTokenContents::String({
let (_, id, offset) =
dwarf.lookup_offset_id(attr_reader.offset_id()).unwrap();
offset.into_u64() + view.section_by_name(id.name()).unwrap().start()
}),
));
} else {
attr_line.push(InstructionTextToken::new(
BnString::new("??"),
InstructionTextTokenContents::Text,
));
}
} else if let Encoding(type_class) = attr.value() {
attr_line.push(InstructionTextToken::new(
BnString::new(type_class.static_string().unwrap()),
InstructionTextTokenContents::TypeName,
));
} else if let UnitRef(offset) = attr.value() {
let addr = match offset.to_unit_section_offset(unit) {
UnitSectionOffset::DebugInfoOffset(o) => o.0,
UnitSectionOffset::DebugTypesOffset(o) => o.0,
}
.into_u64();
let addr_string = format!("#0x{:08x}", addr);
attr_line.push(InstructionTextToken::new(
BnString::new(addr_string),
InstructionTextTokenContents::GotoLabel(addr),
));
} else if let Flag(true) = attr.value() {
attr_line.push(InstructionTextToken::new(
BnString::new("true"),
InstructionTextTokenContents::Integer(1),
));
} else if let Flag(false) = attr.value() {
attr_line.push(InstructionTextToken::new(
BnString::new("false"),
InstructionTextTokenContents::Integer(1),
));
// Fall-back cases
} else if let Some(value) = attr.u8_value() {
let value_string = format!("{}", value);
attr_line.push(InstructionTextToken::new(
BnString::new(value_string),
InstructionTextTokenContents::Integer(value.into()),
));
} else if let Some(value) = attr.u16_value() {
let value_string = format!("{}", value);
attr_line.push(InstructionTextToken::new(
BnString::new(value_string),
InstructionTextTokenContents::Integer(value.into()),
));
} else if let Some(value) = attr.udata_value() {
let value_string = format!("{}", value);
attr_line.push(InstructionTextToken::new(
BnString::new(value_string),
InstructionTextTokenContents::Integer(value.into()),
));
} else if let Some(value) = attr.sdata_value() | else {
let attr_string = format!("{:?}", attr.value());
attr_line.push(InstructionTextToken::new(
BnString::new(attr_string),
InstructionTextTokenContents::Text,
));
}
disassembly_lines.push(DisassemblyTextLine::from(attr_line));
}
disassembly_lines
}
fn process_tree<R: Reader>(
view: &BinaryView,
dwarf: &Dwarf<R>,
unit: &Unit<R>,
graph: &FlowGraph,
graph_parent: &FlowGraphNode,
die_node: EntriesTreeNode<R>,
) {
// Namespaces only - really interesting to look at!
// if (die_node.entry().tag() == constants::DW_TAG_namespace)
// || (die_node.entry().tag() == constants::DW_TAG_class_type)
// || (die_node.entry().tag() == constants::DW_TAG_compile_unit)
// || (die_node.entry().tag() == constants::DW_TAG_subprogram)
// {
let new_node = FlowGraphNode::new(graph);
let attr_string = get_info_string(view, dwarf, unit, die_node.entry());
new_node.set_disassembly_lines(&attr_string);
graph.append(&new_node);
graph_parent.add_outgoing_edge(
BranchType::UnconditionalBranch,
&new_node,
&EdgeStyle::default(),
);
let mut children = die_node.children();
while let Some(child) = children.next().unwrap() {
process_tree(view, dwarf, unit, graph, &new_node, child);
}
// }
}
fn dump_dwarf(bv: &BinaryView) {
let view = if bv.section_by_name(".debug_info").is_ok() {
bv.to_owned()
} else {
bv.parent_view().unwrap()
};
let graph = FlowGraph::new();
graph.set_option(FlowGraphOption::FlowGraphUsesBlockHighlights, true);
graph.set_option(FlowGraphOption::FlowGraphUsesInstructionHighlights, true);
let graph_root = FlowGraphNode::new(&graph);
graph_root.set_lines(vec!["Graph Root"]);
graph.append(&graph_root);
let endian = dwarfreader::get_endian(bv);
let section_reader = |section_id: SectionId| -> _ {
dwarfreader::create_section_reader(section_id, bv, endian, false)
};
let dwarf = Dwarf::load(§ion_reader).unwrap();
let mut iter = dwarf.units();
while let Some(header) = iter.next().unwrap() {
let unit = dwarf.unit(header).unwrap();
let mut entries = unit.entries();
let mut depth = 0;
if let Some((delta_depth, entry)) = entries.next_dfs().unwrap() {
depth += delta_depth;
assert!(depth >= 0);
let mut tree = unit.entries_tree(Some(entry.offset())).unwrap();
let root = tree.root().unwrap();
process_tree(&view, &dwarf, &unit, &graph, &graph_root, root);
}
}
view.show_graph_report("DWARF", graph);
}
struct DWARFDump;
impl Command for DWARFDump {
fn action(&self, view: &BinaryView) {
dump_dwarf(view);
}
fn valid(&self, view: &BinaryView) -> bool {
is_valid(view)
}
}
#[no_mangle]
pub extern "C" fn UIPluginInit() -> bool {
register(
"DWARF Dump",
"Show embedded DWARF info as a tree structure for you to navigate",
DWARFDump {},
);
true
}
| {
let value_string = format!("{}", value);
attr_line.push(InstructionTextToken::new(
BnString::new(value_string),
InstructionTextTokenContents::Integer(value as u64),
));
} | conditional_block |
lib.rs | // Copyright 2021-2023 Vector 35 Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software | // See the License for the specific language governing permissions and
// limitations under the License.
use binaryninja::{
binaryview::{BinaryView, BinaryViewExt},
command::{register, Command},
disassembly::{DisassemblyTextLine, InstructionTextToken, InstructionTextTokenContents},
flowgraph::{BranchType, EdgeStyle, FlowGraph, FlowGraphNode, FlowGraphOption},
string::BnString,
};
use dwarfreader::is_valid;
use gimli::{
AttributeValue::{Encoding, Flag, UnitRef},
// BigEndian,
DebuggingInformationEntry,
Dwarf,
EntriesTreeNode,
Reader,
ReaderOffset,
SectionId,
Unit,
UnitSectionOffset,
};
static PADDING: [&'static str; 23] = [
"",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
];
// TODO : This is very much not comprehensive: see https://github.com/gimli-rs/gimli/blob/master/examples/dwarfdump.rs
fn get_info_string<R: Reader>(
view: &BinaryView,
dwarf: &Dwarf<R>,
unit: &Unit<R>,
die_node: &DebuggingInformationEntry<R>,
) -> Vec<DisassemblyTextLine> {
let mut disassembly_lines: Vec<DisassemblyTextLine> = Vec::with_capacity(10); // This is an estimate so "most" things won't need to resize
let label_value = match die_node.offset().to_unit_section_offset(unit) {
UnitSectionOffset::DebugInfoOffset(o) => o.0,
UnitSectionOffset::DebugTypesOffset(o) => o.0,
}
.into_u64();
let label_string = format!("#0x{:08x}", label_value);
disassembly_lines.push(DisassemblyTextLine::from(vec![
InstructionTextToken::new(
BnString::new(label_string),
InstructionTextTokenContents::GotoLabel(label_value),
),
InstructionTextToken::new(BnString::new(":"), InstructionTextTokenContents::Text),
]));
disassembly_lines.push(DisassemblyTextLine::from(vec![InstructionTextToken::new(
BnString::new(die_node.tag().static_string().unwrap()),
InstructionTextTokenContents::TypeName, // TODO : KeywordToken?
)]));
let mut attrs = die_node.attrs();
while let Some(attr) = attrs.next().unwrap() {
let mut attr_line: Vec<InstructionTextToken> = Vec::with_capacity(5);
attr_line.push(InstructionTextToken::new(
BnString::new(" "),
InstructionTextTokenContents::Indentation,
));
let len;
if let Some(n) = attr.name().static_string() {
len = n.len();
attr_line.push(InstructionTextToken::new(
BnString::new(n),
InstructionTextTokenContents::FieldName,
));
} else {
// This is rather unlikely, I think
len = 1;
attr_line.push(InstructionTextToken::new(
BnString::new("?"),
InstructionTextTokenContents::FieldName,
));
}
// On command line the magic number that looks good is 22, but that's too much whitespace in a basic block, so I chose 18 (22 is the max with the current padding provided)
if len < 18 {
attr_line.push(InstructionTextToken::new(
BnString::new(PADDING[18 - len]),
InstructionTextTokenContents::Text,
));
}
attr_line.push(InstructionTextToken::new(
BnString::new(" = "),
InstructionTextTokenContents::Text,
));
if let Ok(Some(addr)) = dwarf.attr_address(unit, attr.value()) {
let addr_string = format!("0x{:08x}", addr);
attr_line.push(InstructionTextToken::new(
BnString::new(addr_string),
InstructionTextTokenContents::Integer(addr),
));
} else if let Ok(attr_reader) = dwarf.attr_string(unit, attr.value()) {
if let Ok(attr_string) = attr_reader.to_string() {
attr_line.push(InstructionTextToken::new(
BnString::new(attr_string.as_ref()),
InstructionTextTokenContents::String({
let (_, id, offset) =
dwarf.lookup_offset_id(attr_reader.offset_id()).unwrap();
offset.into_u64() + view.section_by_name(id.name()).unwrap().start()
}),
));
} else {
attr_line.push(InstructionTextToken::new(
BnString::new("??"),
InstructionTextTokenContents::Text,
));
}
} else if let Encoding(type_class) = attr.value() {
attr_line.push(InstructionTextToken::new(
BnString::new(type_class.static_string().unwrap()),
InstructionTextTokenContents::TypeName,
));
} else if let UnitRef(offset) = attr.value() {
let addr = match offset.to_unit_section_offset(unit) {
UnitSectionOffset::DebugInfoOffset(o) => o.0,
UnitSectionOffset::DebugTypesOffset(o) => o.0,
}
.into_u64();
let addr_string = format!("#0x{:08x}", addr);
attr_line.push(InstructionTextToken::new(
BnString::new(addr_string),
InstructionTextTokenContents::GotoLabel(addr),
));
} else if let Flag(true) = attr.value() {
attr_line.push(InstructionTextToken::new(
BnString::new("true"),
InstructionTextTokenContents::Integer(1),
));
} else if let Flag(false) = attr.value() {
attr_line.push(InstructionTextToken::new(
BnString::new("false"),
InstructionTextTokenContents::Integer(1),
));
// Fall-back cases
} else if let Some(value) = attr.u8_value() {
let value_string = format!("{}", value);
attr_line.push(InstructionTextToken::new(
BnString::new(value_string),
InstructionTextTokenContents::Integer(value.into()),
));
} else if let Some(value) = attr.u16_value() {
let value_string = format!("{}", value);
attr_line.push(InstructionTextToken::new(
BnString::new(value_string),
InstructionTextTokenContents::Integer(value.into()),
));
} else if let Some(value) = attr.udata_value() {
let value_string = format!("{}", value);
attr_line.push(InstructionTextToken::new(
BnString::new(value_string),
InstructionTextTokenContents::Integer(value.into()),
));
} else if let Some(value) = attr.sdata_value() {
let value_string = format!("{}", value);
attr_line.push(InstructionTextToken::new(
BnString::new(value_string),
InstructionTextTokenContents::Integer(value as u64),
));
} else {
let attr_string = format!("{:?}", attr.value());
attr_line.push(InstructionTextToken::new(
BnString::new(attr_string),
InstructionTextTokenContents::Text,
));
}
disassembly_lines.push(DisassemblyTextLine::from(attr_line));
}
disassembly_lines
}
fn process_tree<R: Reader>(
view: &BinaryView,
dwarf: &Dwarf<R>,
unit: &Unit<R>,
graph: &FlowGraph,
graph_parent: &FlowGraphNode,
die_node: EntriesTreeNode<R>,
) {
// Namespaces only - really interesting to look at!
// if (die_node.entry().tag() == constants::DW_TAG_namespace)
// || (die_node.entry().tag() == constants::DW_TAG_class_type)
// || (die_node.entry().tag() == constants::DW_TAG_compile_unit)
// || (die_node.entry().tag() == constants::DW_TAG_subprogram)
// {
let new_node = FlowGraphNode::new(graph);
let attr_string = get_info_string(view, dwarf, unit, die_node.entry());
new_node.set_disassembly_lines(&attr_string);
graph.append(&new_node);
graph_parent.add_outgoing_edge(
BranchType::UnconditionalBranch,
&new_node,
&EdgeStyle::default(),
);
let mut children = die_node.children();
while let Some(child) = children.next().unwrap() {
process_tree(view, dwarf, unit, graph, &new_node, child);
}
// }
}
fn dump_dwarf(bv: &BinaryView) {
let view = if bv.section_by_name(".debug_info").is_ok() {
bv.to_owned()
} else {
bv.parent_view().unwrap()
};
let graph = FlowGraph::new();
graph.set_option(FlowGraphOption::FlowGraphUsesBlockHighlights, true);
graph.set_option(FlowGraphOption::FlowGraphUsesInstructionHighlights, true);
let graph_root = FlowGraphNode::new(&graph);
graph_root.set_lines(vec!["Graph Root"]);
graph.append(&graph_root);
let endian = dwarfreader::get_endian(bv);
let section_reader = |section_id: SectionId| -> _ {
dwarfreader::create_section_reader(section_id, bv, endian, false)
};
let dwarf = Dwarf::load(§ion_reader).unwrap();
let mut iter = dwarf.units();
while let Some(header) = iter.next().unwrap() {
let unit = dwarf.unit(header).unwrap();
let mut entries = unit.entries();
let mut depth = 0;
if let Some((delta_depth, entry)) = entries.next_dfs().unwrap() {
depth += delta_depth;
assert!(depth >= 0);
let mut tree = unit.entries_tree(Some(entry.offset())).unwrap();
let root = tree.root().unwrap();
process_tree(&view, &dwarf, &unit, &graph, &graph_root, root);
}
}
view.show_graph_report("DWARF", graph);
}
struct DWARFDump;
impl Command for DWARFDump {
fn action(&self, view: &BinaryView) {
dump_dwarf(view);
}
fn valid(&self, view: &BinaryView) -> bool {
is_valid(view)
}
}
#[no_mangle]
pub extern "C" fn UIPluginInit() -> bool {
register(
"DWARF Dump",
"Show embedded DWARF info as a tree structure for you to navigate",
DWARFDump {},
);
true
} | // distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | random_line_split |
lib.rs | // Copyright 2021-2023 Vector 35 Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use binaryninja::{
binaryview::{BinaryView, BinaryViewExt},
command::{register, Command},
disassembly::{DisassemblyTextLine, InstructionTextToken, InstructionTextTokenContents},
flowgraph::{BranchType, EdgeStyle, FlowGraph, FlowGraphNode, FlowGraphOption},
string::BnString,
};
use dwarfreader::is_valid;
use gimli::{
AttributeValue::{Encoding, Flag, UnitRef},
// BigEndian,
DebuggingInformationEntry,
Dwarf,
EntriesTreeNode,
Reader,
ReaderOffset,
SectionId,
Unit,
UnitSectionOffset,
};
static PADDING: [&'static str; 23] = [
"",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
" ",
];
// TODO : This is very much not comprehensive: see https://github.com/gimli-rs/gimli/blob/master/examples/dwarfdump.rs
fn | <R: Reader>(
view: &BinaryView,
dwarf: &Dwarf<R>,
unit: &Unit<R>,
die_node: &DebuggingInformationEntry<R>,
) -> Vec<DisassemblyTextLine> {
let mut disassembly_lines: Vec<DisassemblyTextLine> = Vec::with_capacity(10); // This is an estimate so "most" things won't need to resize
let label_value = match die_node.offset().to_unit_section_offset(unit) {
UnitSectionOffset::DebugInfoOffset(o) => o.0,
UnitSectionOffset::DebugTypesOffset(o) => o.0,
}
.into_u64();
let label_string = format!("#0x{:08x}", label_value);
disassembly_lines.push(DisassemblyTextLine::from(vec![
InstructionTextToken::new(
BnString::new(label_string),
InstructionTextTokenContents::GotoLabel(label_value),
),
InstructionTextToken::new(BnString::new(":"), InstructionTextTokenContents::Text),
]));
disassembly_lines.push(DisassemblyTextLine::from(vec![InstructionTextToken::new(
BnString::new(die_node.tag().static_string().unwrap()),
InstructionTextTokenContents::TypeName, // TODO : KeywordToken?
)]));
let mut attrs = die_node.attrs();
while let Some(attr) = attrs.next().unwrap() {
let mut attr_line: Vec<InstructionTextToken> = Vec::with_capacity(5);
attr_line.push(InstructionTextToken::new(
BnString::new(" "),
InstructionTextTokenContents::Indentation,
));
let len;
if let Some(n) = attr.name().static_string() {
len = n.len();
attr_line.push(InstructionTextToken::new(
BnString::new(n),
InstructionTextTokenContents::FieldName,
));
} else {
// This is rather unlikely, I think
len = 1;
attr_line.push(InstructionTextToken::new(
BnString::new("?"),
InstructionTextTokenContents::FieldName,
));
}
// On command line the magic number that looks good is 22, but that's too much whitespace in a basic block, so I chose 18 (22 is the max with the current padding provided)
if len < 18 {
attr_line.push(InstructionTextToken::new(
BnString::new(PADDING[18 - len]),
InstructionTextTokenContents::Text,
));
}
attr_line.push(InstructionTextToken::new(
BnString::new(" = "),
InstructionTextTokenContents::Text,
));
if let Ok(Some(addr)) = dwarf.attr_address(unit, attr.value()) {
let addr_string = format!("0x{:08x}", addr);
attr_line.push(InstructionTextToken::new(
BnString::new(addr_string),
InstructionTextTokenContents::Integer(addr),
));
} else if let Ok(attr_reader) = dwarf.attr_string(unit, attr.value()) {
if let Ok(attr_string) = attr_reader.to_string() {
attr_line.push(InstructionTextToken::new(
BnString::new(attr_string.as_ref()),
InstructionTextTokenContents::String({
let (_, id, offset) =
dwarf.lookup_offset_id(attr_reader.offset_id()).unwrap();
offset.into_u64() + view.section_by_name(id.name()).unwrap().start()
}),
));
} else {
attr_line.push(InstructionTextToken::new(
BnString::new("??"),
InstructionTextTokenContents::Text,
));
}
} else if let Encoding(type_class) = attr.value() {
attr_line.push(InstructionTextToken::new(
BnString::new(type_class.static_string().unwrap()),
InstructionTextTokenContents::TypeName,
));
} else if let UnitRef(offset) = attr.value() {
let addr = match offset.to_unit_section_offset(unit) {
UnitSectionOffset::DebugInfoOffset(o) => o.0,
UnitSectionOffset::DebugTypesOffset(o) => o.0,
}
.into_u64();
let addr_string = format!("#0x{:08x}", addr);
attr_line.push(InstructionTextToken::new(
BnString::new(addr_string),
InstructionTextTokenContents::GotoLabel(addr),
));
} else if let Flag(true) = attr.value() {
attr_line.push(InstructionTextToken::new(
BnString::new("true"),
InstructionTextTokenContents::Integer(1),
));
} else if let Flag(false) = attr.value() {
attr_line.push(InstructionTextToken::new(
BnString::new("false"),
InstructionTextTokenContents::Integer(1),
));
// Fall-back cases
} else if let Some(value) = attr.u8_value() {
let value_string = format!("{}", value);
attr_line.push(InstructionTextToken::new(
BnString::new(value_string),
InstructionTextTokenContents::Integer(value.into()),
));
} else if let Some(value) = attr.u16_value() {
let value_string = format!("{}", value);
attr_line.push(InstructionTextToken::new(
BnString::new(value_string),
InstructionTextTokenContents::Integer(value.into()),
));
} else if let Some(value) = attr.udata_value() {
let value_string = format!("{}", value);
attr_line.push(InstructionTextToken::new(
BnString::new(value_string),
InstructionTextTokenContents::Integer(value.into()),
));
} else if let Some(value) = attr.sdata_value() {
let value_string = format!("{}", value);
attr_line.push(InstructionTextToken::new(
BnString::new(value_string),
InstructionTextTokenContents::Integer(value as u64),
));
} else {
let attr_string = format!("{:?}", attr.value());
attr_line.push(InstructionTextToken::new(
BnString::new(attr_string),
InstructionTextTokenContents::Text,
));
}
disassembly_lines.push(DisassemblyTextLine::from(attr_line));
}
disassembly_lines
}
fn process_tree<R: Reader>(
view: &BinaryView,
dwarf: &Dwarf<R>,
unit: &Unit<R>,
graph: &FlowGraph,
graph_parent: &FlowGraphNode,
die_node: EntriesTreeNode<R>,
) {
// Namespaces only - really interesting to look at!
// if (die_node.entry().tag() == constants::DW_TAG_namespace)
// || (die_node.entry().tag() == constants::DW_TAG_class_type)
// || (die_node.entry().tag() == constants::DW_TAG_compile_unit)
// || (die_node.entry().tag() == constants::DW_TAG_subprogram)
// {
let new_node = FlowGraphNode::new(graph);
let attr_string = get_info_string(view, dwarf, unit, die_node.entry());
new_node.set_disassembly_lines(&attr_string);
graph.append(&new_node);
graph_parent.add_outgoing_edge(
BranchType::UnconditionalBranch,
&new_node,
&EdgeStyle::default(),
);
let mut children = die_node.children();
while let Some(child) = children.next().unwrap() {
process_tree(view, dwarf, unit, graph, &new_node, child);
}
// }
}
fn dump_dwarf(bv: &BinaryView) {
let view = if bv.section_by_name(".debug_info").is_ok() {
bv.to_owned()
} else {
bv.parent_view().unwrap()
};
let graph = FlowGraph::new();
graph.set_option(FlowGraphOption::FlowGraphUsesBlockHighlights, true);
graph.set_option(FlowGraphOption::FlowGraphUsesInstructionHighlights, true);
let graph_root = FlowGraphNode::new(&graph);
graph_root.set_lines(vec!["Graph Root"]);
graph.append(&graph_root);
let endian = dwarfreader::get_endian(bv);
let section_reader = |section_id: SectionId| -> _ {
dwarfreader::create_section_reader(section_id, bv, endian, false)
};
let dwarf = Dwarf::load(§ion_reader).unwrap();
let mut iter = dwarf.units();
while let Some(header) = iter.next().unwrap() {
let unit = dwarf.unit(header).unwrap();
let mut entries = unit.entries();
let mut depth = 0;
if let Some((delta_depth, entry)) = entries.next_dfs().unwrap() {
depth += delta_depth;
assert!(depth >= 0);
let mut tree = unit.entries_tree(Some(entry.offset())).unwrap();
let root = tree.root().unwrap();
process_tree(&view, &dwarf, &unit, &graph, &graph_root, root);
}
}
view.show_graph_report("DWARF", graph);
}
struct DWARFDump;
impl Command for DWARFDump {
fn action(&self, view: &BinaryView) {
dump_dwarf(view);
}
fn valid(&self, view: &BinaryView) -> bool {
is_valid(view)
}
}
#[no_mangle]
pub extern "C" fn UIPluginInit() -> bool {
register(
"DWARF Dump",
"Show embedded DWARF info as a tree structure for you to navigate",
DWARFDump {},
);
true
}
| get_info_string | identifier_name |
mod.rs | use std::{
cell::{Ref, RefCell},
ops::RangeInclusive,
rc::Rc,
};
pub type Time = f32;
use crate::Pose;
/// Aggregate of [`ColumnData`] that can only be added and not deleted.
/// Length of all [`ColumnData`] are equal, making this effectively a 2D table.
pub type DataSet<T = f32> = Vec<ColumnData<T>>;
/// Easily accessible wrapper around [`TimeTable`] to be shared by multiple owners
pub struct DataStore(pub(crate) Rc<RefCell<TimeTable<Pose>>>);
impl Default for DataStore {
fn default() -> Self {
// Temporary, create dummy data for showing
use crate::math::{cos, sin};
use std::f32::consts::PI;
let n = 10000;
let dt = 0.01;
let t: Vec<f32> = (0..n).map(|n| n as f32 * dt).collect();
let pose: Vec<Pose> = t
.iter()
.map(|&t| Pose::new(30.0 * sin(t), 20.0 * cos(t), 2.0 * PI * sin(t)))
.collect();
Self(Rc::new(RefCell::new(TimeTable::new(t, pose))))
}
}
impl DataStore {
pub fn new() -> Self {
Default::default()
}
pub fn clone(&self) -> Self {
DataStore(Rc::clone(&self.0))
}
pub fn borrow(&self) -> Ref<TimeTable<Pose>> {
self.0.borrow()
}
}
/// Single column of data
#[derive(Debug, Default, Clone)]
pub struct ColumnData<T> {
data: Vec<T>,
name: Option<String>,
}
impl<T> ColumnData<T> {
pub fn from_vec(data: impl Into<Vec<T>>) -> Self {
Self {
data: data.into(),
name: None,
}
}
pub fn len(&self) -> usize {
self.data.len()
}
pub fn add(&mut self, element: T) {
self.data.push(element);
}
pub fn get(&self, index: usize) -> Option<&T> {
self.data.get(index)
}
pub fn get_between(&self, range: RangeInclusive<usize>) -> &[T] {
&self.data[range]
}
}
/// Basic time vector for finding indices to look up within [`TimeSeries`] and [`TimeTable`]
#[derive(Debug, Default)]
struct Timeline {
/// Cache stores previously found index to avoid unecessary iteration when finding time index
cache: Option<(Time, usize)>,
/// Actual vector
vec: Vec<Time>,
}
impl Into<Timeline> for Vec<Time> {
fn into(self) -> Timeline {
Timeline {
vec: self,
..Default::default()
}
}
}
impl Timeline {
/// Tolerance to compare two input time for their equality
const EPSILON: f32 = 0.0005;
pub fn new(time_vec: impl Into<Vec<Time>>) -> Self {
Self {
vec: time_vec.into(),
..Default::default()
}
}
/// Adds a time element to the end
pub fn add(&mut self, time: Time) {
self.vec.push(time);
}
/// Checks if time input has changed from last index search
/// If time input is sufficiently close, assume same index can be used without calling [`get_index`]
///
/// [`get_index`]: Self::get_index
fn time_changed(&self, time: Time) -> bool {
self.cache
.map_or(true, |(prev, _)| (time - prev).abs() > Self::EPSILON)
}
/// Find the index that corresponds to the given time in seconds.
///
/// Returns index of first time that is greater or equal to the specified time.
fn get_index(&self, time: Time) -> Option<usize> {
if self.time_changed(time) {
self.vec.iter().position(|&t| t >= time).map(|index| {
// self.cache = Some((time, index));
index
})
} else {
// unwrap here is ok, since time_changed always ensures cache is not None
Some(self.cache.unwrap().1)
}
}
/// Similar to [`get_index`], but only returns time index that is smaller than the input time.
/// This is useful when making sure the returned time index never exceeds the given time, as
/// in [`get_range`]
///
/// [`get_index`]: Self::get_index
/// [`get_range`]: Self::get_range
fn get_index_under(&self, time: Time) -> Option<usize> {
if self.time_changed(time) {
self.vec
.iter()
.position(|&t| t > time)
.map(|idx| (idx - 1).max(0))
.map(|index| {
// self.cache = Some((time, index));
index
})
} else {
// unwrap here is ok, since time_changed always ensures cache is not None
Some(self.cache.unwrap().1)
}
}
/// Returns range indices that is within the time range specified
pub fn get_range(&self, start: Time, end: Time) -> Option<RangeInclusive<usize>> {
if start < end {
if let Some(start) = self.get_index(start) {
if let Some(end) = self.get_index_under(end) {
return Some(start..=end);
}
}
}
None
}
pub fn get_range_raw(&self, start: Time, end: Time) -> Option<Vec<Time>> {
self.get_range(start, end)
.map(|range| self.vec[range].to_vec())
}
/// Length of the time vector
pub fn len(&self) -> usize {
self.vec.len()
}
}
#[derive(Debug, Default)]
pub struct TimeTable<T> {
time: Timeline,
data: DataSet<T>,
}
impl<T> Into<TimeTable<T>> for TimeSeries<T> {
fn into(self) -> TimeTable<T> {
TimeTable {
time: self.time,
data: vec![self.data],
}
}
}
impl<T: Clone> TimeTable<T> {
#[allow(dead_code)]
pub fn | (timeseries: TimeSeries<T>) -> Self {
Self {
time: timeseries.time,
data: vec![timeseries.data],
}
}
}
impl<T: Clone> TimeTable<T> {
pub fn new(time: Vec<Time>, data: Vec<T>) -> Self {
TimeSeries::new(time, data).into()
}
#[allow(dead_code)]
pub fn get_column(&self, column: usize) -> Option<ColumnData<T>> {
self.data.get(column).map(|val| val.clone())
}
pub fn get_at_time(&self, column: usize, time: Time) -> Option<T> {
if let Some(idx) = self.time.get_index(time) {
self.data
.get(column)
.and_then(|vec| vec.get(idx).clone())
.map(|el| el.to_owned())
} else {
None
}
}
pub fn get_time_range(&self, start: Time, end: Time) -> Option<Vec<Time>> {
self.time.get_range_raw(start, end)
}
/// Returns slice of data that is within the time range specified
pub fn get_range(&self, column: usize, start: Time, end: Time) -> Option<Vec<T>> {
if let Some(range) = self.time.get_range(start, end) {
self.data
.get(column)
.map(|vec| vec.get_between(range).to_owned())
} else {
None
}
}
}
#[derive(Debug, Default)]
pub struct TimeSeries<T> {
time: Timeline,
data: ColumnData<T>,
sample_time: Time,
}
impl<T: Clone> TimeSeries<T> {
/// Create a new [`TimeSeries`] from given array of `time` and `data`.
pub fn new(time: impl Into<Vec<Time>>, data: impl Into<Vec<T>>) -> Self {
let time = Timeline::new(time.into());
let data = ColumnData::from_vec(data);
let sample_time = 0.0;
if time.len()!= data.len() {
panic!("Size of time and data are different!");
}
Self {
time,
data,
sample_time,
}
}
/// Create an empty [`TimeSeries`]
pub fn empty() -> Self {
Self {
time: Timeline::default(),
data: ColumnData {
data: Vec::new(),
name: None,
},
sample_time: 0.0,
}
}
/// Sets the `sample_time` of the [`TimeSeries`]. If `add` is called with timestamp
/// that is smaller than the sum of the last timestamp and `sample_time`, `add` will
/// not push the data into the [`TimeSeries`]. When `sample_time` is set to zero (by default),
/// `add` will only discard data points whose timestamp is identical to the last timestamp,
/// i.e. it only guarantees monotonically increasing timestamps.
pub fn with_sample_time(mut self, sample_time: Time) -> Self {
self.sample_time = sample_time;
self
}
pub fn add(&mut self, time: Time, element: T) {
// if let Some(last) = self.time.vec.last() {
// if last + self.sample_time < time {
// self.time.add(time);
// self.data.add(element);
// }
// } else {
// self.time.add(time);
// self.data.add(element);
// }
if self.time.vec.is_empty() {
self.time.add(time);
self.data.add(element);
} else {
if self.sample_time > 0.0 {
if self.time.vec.last().unwrap() + self.sample_time <= time {
self.time.add(time);
self.data.add(element);
}
} else {
if self.time.vec.last().unwrap() < &time {
self.time.add(time);
self.data.add(element);
}
}
}
}
/// Get data element for a given time
pub fn get_at_time(&self, time: Time) -> Option<T> {
self.time
.get_index(time)
.and_then(|idx| self.data.get(idx))
.map(|val| val.to_owned())
}
/// Returns slice of data that is within the time range specified
#[allow(dead_code)]
pub fn get_range(&self, start: Time, end: Time) -> Option<&[T]> {
self.time
.get_range(start, end)
.map(|range| self.data.get_between(range))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::math::{cos, sin};
use crate::Pose;
use std::f32::consts::PI;
fn dummy_pose() -> TimeSeries<Pose> {
let n = 5;
let dt = 0.01;
let t: Vec<f32> = (0..n).map(|n| n as f32 * dt).collect();
let pose: Vec<Pose> = t
.iter()
.map(|&t| Pose::new(30.0 * sin(t), 20.0 * cos(t), 2.0 * PI * sin(t)))
.collect();
TimeSeries::new(t, pose)
}
fn dummy_f32() -> TimeSeries<f32> {
let n = 5;
let dt = 1.0;
let t: Vec<f32> = (0..n).map(|n| n as f32 * dt).collect();
let data: Vec<f32> = t.iter().map(|&t| t * 3.0).collect();
TimeSeries::new(t, data)
}
#[test]
fn add_timeseries() {
let mut ts = TimeSeries::<f32>::empty();
ts.add(0.5, 5.0);
ts.add(0.8, 15.0);
dbg!(&ts);
}
#[test]
fn check_index() {
// dbg!(&ts);
let ts = dummy_pose();
assert_eq!(2, ts.time.get_index(0.02).unwrap()); // finding exactly matching time
assert_eq!(2, ts.time.get_index(0.02).unwrap()); // running again should give same result
assert_eq!(2, ts.time.get_index(0.015).unwrap()); // finding next closest time stamp
}
#[test]
fn check_range() {
let ts = dummy_f32();
assert_eq!(1, ts.time.get_index(1.0).unwrap());
assert_eq!(3, ts.time.get_index(2.1).unwrap());
assert_eq!(3, ts.time.get_index(2.9).unwrap());
assert_eq!(3, ts.time.get_index(3.0).unwrap());
assert_eq!(&[3.0, 6.0], ts.get_range(1.0, 2.9).unwrap());
assert_eq!(&[3.0, 6.0, 9.0], ts.get_range(1.0, 3.0).unwrap());
}
#[test]
fn series_to_table() {
let ts = dummy_f32();
let _table: TimeTable<f32> = ts.into();
}
#[test]
fn check_sample_time() {
let mut ts = TimeSeries::<f32>::empty();
ts.add(0.0, 1.0);
ts.add(0.0, 2.0); // This shouldn't be added
ts.add(0.5, 3.0);
ts.add(0.5, 4.0); // This shouldn't be added
assert_eq!(0, ts.time.get_index(0.0).unwrap());
assert_eq!(1, ts.time.get_index(0.5).unwrap());
let mut ts = TimeSeries::<f32>::empty().with_sample_time(0.1);
ts.add(0.0, 1.0);
ts.add(0.05, 2.0); // This shouldn't be added
ts.add(0.1, 3.0);
assert_eq!(0, ts.time.get_index(0.0).unwrap());
assert_eq!(1, ts.time.get_index(0.1).unwrap());
}
}
// TimeTable data format options:
// 1. Generational-arena -> Arena<TimeSeries>
// pros: easy to push and manage TimeSeries
// cons: Dependency, TimeSeries cannot contain different data types
// 2. Vec<Box<dyn TimeSeries>>
// pros: No dependency
// cons: Use of trait object
// 3. ndarray?
| from_timeseries | identifier_name |
mod.rs | use std::{
cell::{Ref, RefCell},
ops::RangeInclusive,
rc::Rc,
};
pub type Time = f32;
use crate::Pose;
/// Aggregate of [`ColumnData`] that can only be added and not deleted.
/// Length of all [`ColumnData`] are equal, making this effectively a 2D table.
pub type DataSet<T = f32> = Vec<ColumnData<T>>;
/// Easily accessible wrapper around [`TimeTable`] to be shared by multiple owners
pub struct DataStore(pub(crate) Rc<RefCell<TimeTable<Pose>>>);
impl Default for DataStore {
fn default() -> Self {
// Temporary, create dummy data for showing
use crate::math::{cos, sin};
use std::f32::consts::PI;
let n = 10000;
let dt = 0.01;
let t: Vec<f32> = (0..n).map(|n| n as f32 * dt).collect();
let pose: Vec<Pose> = t
.iter()
.map(|&t| Pose::new(30.0 * sin(t), 20.0 * cos(t), 2.0 * PI * sin(t)))
.collect();
Self(Rc::new(RefCell::new(TimeTable::new(t, pose))))
}
}
impl DataStore {
pub fn new() -> Self {
Default::default()
}
pub fn clone(&self) -> Self {
DataStore(Rc::clone(&self.0))
}
pub fn borrow(&self) -> Ref<TimeTable<Pose>> {
self.0.borrow()
}
}
/// Single column of data
#[derive(Debug, Default, Clone)]
pub struct ColumnData<T> {
data: Vec<T>,
name: Option<String>,
}
impl<T> ColumnData<T> {
pub fn from_vec(data: impl Into<Vec<T>>) -> Self {
Self {
data: data.into(),
name: None,
}
}
pub fn len(&self) -> usize {
self.data.len()
}
pub fn add(&mut self, element: T) {
self.data.push(element);
}
pub fn get(&self, index: usize) -> Option<&T> {
self.data.get(index)
}
pub fn get_between(&self, range: RangeInclusive<usize>) -> &[T] {
&self.data[range]
}
}
/// Basic time vector for finding indices to look up within [`TimeSeries`] and [`TimeTable`]
#[derive(Debug, Default)]
struct Timeline {
/// Cache stores previously found index to avoid unecessary iteration when finding time index
cache: Option<(Time, usize)>,
/// Actual vector
vec: Vec<Time>,
}
impl Into<Timeline> for Vec<Time> {
fn into(self) -> Timeline {
Timeline {
vec: self,
..Default::default()
}
}
}
impl Timeline {
/// Tolerance to compare two input time for their equality
const EPSILON: f32 = 0.0005;
pub fn new(time_vec: impl Into<Vec<Time>>) -> Self {
Self {
vec: time_vec.into(),
..Default::default()
}
}
/// Adds a time element to the end
pub fn add(&mut self, time: Time) {
self.vec.push(time);
}
/// Checks if time input has changed from last index search
/// If time input is sufficiently close, assume same index can be used without calling [`get_index`]
///
/// [`get_index`]: Self::get_index
fn time_changed(&self, time: Time) -> bool {
self.cache
.map_or(true, |(prev, _)| (time - prev).abs() > Self::EPSILON)
}
/// Find the index that corresponds to the given time in seconds.
///
/// Returns index of first time that is greater or equal to the specified time.
fn get_index(&self, time: Time) -> Option<usize> {
if self.time_changed(time) {
self.vec.iter().position(|&t| t >= time).map(|index| {
// self.cache = Some((time, index));
index
})
} else {
// unwrap here is ok, since time_changed always ensures cache is not None
Some(self.cache.unwrap().1)
}
}
/// Similar to [`get_index`], but only returns time index that is smaller than the input time.
/// This is useful when making sure the returned time index never exceeds the given time, as
/// in [`get_range`]
///
/// [`get_index`]: Self::get_index
/// [`get_range`]: Self::get_range
fn get_index_under(&self, time: Time) -> Option<usize> {
if self.time_changed(time) {
self.vec
.iter()
.position(|&t| t > time)
.map(|idx| (idx - 1).max(0))
.map(|index| {
// self.cache = Some((time, index));
index
})
} else {
// unwrap here is ok, since time_changed always ensures cache is not None
Some(self.cache.unwrap().1)
}
}
/// Returns range indices that is within the time range specified
pub fn get_range(&self, start: Time, end: Time) -> Option<RangeInclusive<usize>> |
pub fn get_range_raw(&self, start: Time, end: Time) -> Option<Vec<Time>> {
self.get_range(start, end)
.map(|range| self.vec[range].to_vec())
}
/// Length of the time vector
pub fn len(&self) -> usize {
self.vec.len()
}
}
#[derive(Debug, Default)]
pub struct TimeTable<T> {
time: Timeline,
data: DataSet<T>,
}
impl<T> Into<TimeTable<T>> for TimeSeries<T> {
fn into(self) -> TimeTable<T> {
TimeTable {
time: self.time,
data: vec![self.data],
}
}
}
impl<T: Clone> TimeTable<T> {
#[allow(dead_code)]
pub fn from_timeseries(timeseries: TimeSeries<T>) -> Self {
Self {
time: timeseries.time,
data: vec![timeseries.data],
}
}
}
impl<T: Clone> TimeTable<T> {
pub fn new(time: Vec<Time>, data: Vec<T>) -> Self {
TimeSeries::new(time, data).into()
}
#[allow(dead_code)]
pub fn get_column(&self, column: usize) -> Option<ColumnData<T>> {
self.data.get(column).map(|val| val.clone())
}
pub fn get_at_time(&self, column: usize, time: Time) -> Option<T> {
if let Some(idx) = self.time.get_index(time) {
self.data
.get(column)
.and_then(|vec| vec.get(idx).clone())
.map(|el| el.to_owned())
} else {
None
}
}
pub fn get_time_range(&self, start: Time, end: Time) -> Option<Vec<Time>> {
self.time.get_range_raw(start, end)
}
/// Returns slice of data that is within the time range specified
pub fn get_range(&self, column: usize, start: Time, end: Time) -> Option<Vec<T>> {
if let Some(range) = self.time.get_range(start, end) {
self.data
.get(column)
.map(|vec| vec.get_between(range).to_owned())
} else {
None
}
}
}
#[derive(Debug, Default)]
pub struct TimeSeries<T> {
time: Timeline,
data: ColumnData<T>,
sample_time: Time,
}
impl<T: Clone> TimeSeries<T> {
/// Create a new [`TimeSeries`] from given array of `time` and `data`.
pub fn new(time: impl Into<Vec<Time>>, data: impl Into<Vec<T>>) -> Self {
let time = Timeline::new(time.into());
let data = ColumnData::from_vec(data);
let sample_time = 0.0;
if time.len()!= data.len() {
panic!("Size of time and data are different!");
}
Self {
time,
data,
sample_time,
}
}
/// Create an empty [`TimeSeries`]
pub fn empty() -> Self {
Self {
time: Timeline::default(),
data: ColumnData {
data: Vec::new(),
name: None,
},
sample_time: 0.0,
}
}
/// Sets the `sample_time` of the [`TimeSeries`]. If `add` is called with timestamp
/// that is smaller than the sum of the last timestamp and `sample_time`, `add` will
/// not push the data into the [`TimeSeries`]. When `sample_time` is set to zero (by default),
/// `add` will only discard data points whose timestamp is identical to the last timestamp,
/// i.e. it only guarantees monotonically increasing timestamps.
pub fn with_sample_time(mut self, sample_time: Time) -> Self {
self.sample_time = sample_time;
self
}
pub fn add(&mut self, time: Time, element: T) {
// if let Some(last) = self.time.vec.last() {
// if last + self.sample_time < time {
// self.time.add(time);
// self.data.add(element);
// }
// } else {
// self.time.add(time);
// self.data.add(element);
// }
if self.time.vec.is_empty() {
self.time.add(time);
self.data.add(element);
} else {
if self.sample_time > 0.0 {
if self.time.vec.last().unwrap() + self.sample_time <= time {
self.time.add(time);
self.data.add(element);
}
} else {
if self.time.vec.last().unwrap() < &time {
self.time.add(time);
self.data.add(element);
}
}
}
}
/// Get data element for a given time
pub fn get_at_time(&self, time: Time) -> Option<T> {
self.time
.get_index(time)
.and_then(|idx| self.data.get(idx))
.map(|val| val.to_owned())
}
/// Returns slice of data that is within the time range specified
#[allow(dead_code)]
pub fn get_range(&self, start: Time, end: Time) -> Option<&[T]> {
self.time
.get_range(start, end)
.map(|range| self.data.get_between(range))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::math::{cos, sin};
use crate::Pose;
use std::f32::consts::PI;
fn dummy_pose() -> TimeSeries<Pose> {
let n = 5;
let dt = 0.01;
let t: Vec<f32> = (0..n).map(|n| n as f32 * dt).collect();
let pose: Vec<Pose> = t
.iter()
.map(|&t| Pose::new(30.0 * sin(t), 20.0 * cos(t), 2.0 * PI * sin(t)))
.collect();
TimeSeries::new(t, pose)
}
fn dummy_f32() -> TimeSeries<f32> {
let n = 5;
let dt = 1.0;
let t: Vec<f32> = (0..n).map(|n| n as f32 * dt).collect();
let data: Vec<f32> = t.iter().map(|&t| t * 3.0).collect();
TimeSeries::new(t, data)
}
#[test]
fn add_timeseries() {
let mut ts = TimeSeries::<f32>::empty();
ts.add(0.5, 5.0);
ts.add(0.8, 15.0);
dbg!(&ts);
}
#[test]
fn check_index() {
// dbg!(&ts);
let ts = dummy_pose();
assert_eq!(2, ts.time.get_index(0.02).unwrap()); // finding exactly matching time
assert_eq!(2, ts.time.get_index(0.02).unwrap()); // running again should give same result
assert_eq!(2, ts.time.get_index(0.015).unwrap()); // finding next closest time stamp
}
#[test]
fn check_range() {
let ts = dummy_f32();
assert_eq!(1, ts.time.get_index(1.0).unwrap());
assert_eq!(3, ts.time.get_index(2.1).unwrap());
assert_eq!(3, ts.time.get_index(2.9).unwrap());
assert_eq!(3, ts.time.get_index(3.0).unwrap());
assert_eq!(&[3.0, 6.0], ts.get_range(1.0, 2.9).unwrap());
assert_eq!(&[3.0, 6.0, 9.0], ts.get_range(1.0, 3.0).unwrap());
}
#[test]
fn series_to_table() {
let ts = dummy_f32();
let _table: TimeTable<f32> = ts.into();
}
#[test]
fn check_sample_time() {
let mut ts = TimeSeries::<f32>::empty();
ts.add(0.0, 1.0);
ts.add(0.0, 2.0); // This shouldn't be added
ts.add(0.5, 3.0);
ts.add(0.5, 4.0); // This shouldn't be added
assert_eq!(0, ts.time.get_index(0.0).unwrap());
assert_eq!(1, ts.time.get_index(0.5).unwrap());
let mut ts = TimeSeries::<f32>::empty().with_sample_time(0.1);
ts.add(0.0, 1.0);
ts.add(0.05, 2.0); // This shouldn't be added
ts.add(0.1, 3.0);
assert_eq!(0, ts.time.get_index(0.0).unwrap());
assert_eq!(1, ts.time.get_index(0.1).unwrap());
}
}
// TimeTable data format options:
// 1. Generational-arena -> Arena<TimeSeries>
// pros: easy to push and manage TimeSeries
// cons: Dependency, TimeSeries cannot contain different data types
// 2. Vec<Box<dyn TimeSeries>>
// pros: No dependency
// cons: Use of trait object
// 3. ndarray?
| {
if start < end {
if let Some(start) = self.get_index(start) {
if let Some(end) = self.get_index_under(end) {
return Some(start..=end);
}
}
}
None
} | identifier_body |
mod.rs | use std::{
cell::{Ref, RefCell},
ops::RangeInclusive,
rc::Rc,
};
pub type Time = f32;
use crate::Pose;
/// Aggregate of [`ColumnData`] that can only be added and not deleted.
/// Length of all [`ColumnData`] are equal, making this effectively a 2D table.
pub type DataSet<T = f32> = Vec<ColumnData<T>>;
/// Easily accessible wrapper around [`TimeTable`] to be shared by multiple owners
pub struct DataStore(pub(crate) Rc<RefCell<TimeTable<Pose>>>);
impl Default for DataStore {
fn default() -> Self {
// Temporary, create dummy data for showing
use crate::math::{cos, sin};
use std::f32::consts::PI;
let n = 10000;
let dt = 0.01;
let t: Vec<f32> = (0..n).map(|n| n as f32 * dt).collect();
let pose: Vec<Pose> = t
.iter()
.map(|&t| Pose::new(30.0 * sin(t), 20.0 * cos(t), 2.0 * PI * sin(t)))
.collect();
Self(Rc::new(RefCell::new(TimeTable::new(t, pose))))
}
}
impl DataStore {
pub fn new() -> Self {
Default::default()
}
pub fn clone(&self) -> Self {
DataStore(Rc::clone(&self.0))
}
pub fn borrow(&self) -> Ref<TimeTable<Pose>> {
self.0.borrow()
}
}
/// Single column of data
#[derive(Debug, Default, Clone)]
pub struct ColumnData<T> {
data: Vec<T>,
name: Option<String>,
}
impl<T> ColumnData<T> {
pub fn from_vec(data: impl Into<Vec<T>>) -> Self {
Self {
data: data.into(),
name: None,
}
}
pub fn len(&self) -> usize {
self.data.len()
}
pub fn add(&mut self, element: T) {
self.data.push(element);
}
pub fn get(&self, index: usize) -> Option<&T> {
self.data.get(index)
}
pub fn get_between(&self, range: RangeInclusive<usize>) -> &[T] {
&self.data[range]
}
}
/// Basic time vector for finding indices to look up within [`TimeSeries`] and [`TimeTable`]
#[derive(Debug, Default)]
struct Timeline {
/// Cache stores previously found index to avoid unecessary iteration when finding time index
cache: Option<(Time, usize)>,
/// Actual vector
vec: Vec<Time>,
}
impl Into<Timeline> for Vec<Time> {
fn into(self) -> Timeline {
Timeline {
vec: self,
..Default::default()
}
}
}
impl Timeline {
/// Tolerance to compare two input time for their equality
const EPSILON: f32 = 0.0005;
pub fn new(time_vec: impl Into<Vec<Time>>) -> Self {
Self {
vec: time_vec.into(),
..Default::default()
}
}
/// Adds a time element to the end
pub fn add(&mut self, time: Time) {
self.vec.push(time);
}
/// Checks if time input has changed from last index search
/// If time input is sufficiently close, assume same index can be used without calling [`get_index`]
///
/// [`get_index`]: Self::get_index
fn time_changed(&self, time: Time) -> bool {
self.cache
.map_or(true, |(prev, _)| (time - prev).abs() > Self::EPSILON)
}
/// Find the index that corresponds to the given time in seconds.
///
/// Returns index of first time that is greater or equal to the specified time.
fn get_index(&self, time: Time) -> Option<usize> {
if self.time_changed(time) {
self.vec.iter().position(|&t| t >= time).map(|index| {
// self.cache = Some((time, index));
index
})
} else |
}
/// Similar to [`get_index`], but only returns time index that is smaller than the input time.
/// This is useful when making sure the returned time index never exceeds the given time, as
/// in [`get_range`]
///
/// [`get_index`]: Self::get_index
/// [`get_range`]: Self::get_range
fn get_index_under(&self, time: Time) -> Option<usize> {
if self.time_changed(time) {
self.vec
.iter()
.position(|&t| t > time)
.map(|idx| (idx - 1).max(0))
.map(|index| {
// self.cache = Some((time, index));
index
})
} else {
// unwrap here is ok, since time_changed always ensures cache is not None
Some(self.cache.unwrap().1)
}
}
/// Returns range indices that is within the time range specified
pub fn get_range(&self, start: Time, end: Time) -> Option<RangeInclusive<usize>> {
if start < end {
if let Some(start) = self.get_index(start) {
if let Some(end) = self.get_index_under(end) {
return Some(start..=end);
}
}
}
None
}
pub fn get_range_raw(&self, start: Time, end: Time) -> Option<Vec<Time>> {
self.get_range(start, end)
.map(|range| self.vec[range].to_vec())
}
/// Length of the time vector
pub fn len(&self) -> usize {
self.vec.len()
}
}
#[derive(Debug, Default)]
pub struct TimeTable<T> {
time: Timeline,
data: DataSet<T>,
}
impl<T> Into<TimeTable<T>> for TimeSeries<T> {
fn into(self) -> TimeTable<T> {
TimeTable {
time: self.time,
data: vec![self.data],
}
}
}
impl<T: Clone> TimeTable<T> {
#[allow(dead_code)]
pub fn from_timeseries(timeseries: TimeSeries<T>) -> Self {
Self {
time: timeseries.time,
data: vec![timeseries.data],
}
}
}
impl<T: Clone> TimeTable<T> {
pub fn new(time: Vec<Time>, data: Vec<T>) -> Self {
TimeSeries::new(time, data).into()
}
#[allow(dead_code)]
pub fn get_column(&self, column: usize) -> Option<ColumnData<T>> {
self.data.get(column).map(|val| val.clone())
}
pub fn get_at_time(&self, column: usize, time: Time) -> Option<T> {
if let Some(idx) = self.time.get_index(time) {
self.data
.get(column)
.and_then(|vec| vec.get(idx).clone())
.map(|el| el.to_owned())
} else {
None
}
}
pub fn get_time_range(&self, start: Time, end: Time) -> Option<Vec<Time>> {
self.time.get_range_raw(start, end)
}
/// Returns slice of data that is within the time range specified
pub fn get_range(&self, column: usize, start: Time, end: Time) -> Option<Vec<T>> {
if let Some(range) = self.time.get_range(start, end) {
self.data
.get(column)
.map(|vec| vec.get_between(range).to_owned())
} else {
None
}
}
}
#[derive(Debug, Default)]
pub struct TimeSeries<T> {
time: Timeline,
data: ColumnData<T>,
sample_time: Time,
}
impl<T: Clone> TimeSeries<T> {
/// Create a new [`TimeSeries`] from given array of `time` and `data`.
pub fn new(time: impl Into<Vec<Time>>, data: impl Into<Vec<T>>) -> Self {
let time = Timeline::new(time.into());
let data = ColumnData::from_vec(data);
let sample_time = 0.0;
if time.len()!= data.len() {
panic!("Size of time and data are different!");
}
Self {
time,
data,
sample_time,
}
}
/// Create an empty [`TimeSeries`]
pub fn empty() -> Self {
Self {
time: Timeline::default(),
data: ColumnData {
data: Vec::new(),
name: None,
},
sample_time: 0.0,
}
}
/// Sets the `sample_time` of the [`TimeSeries`]. If `add` is called with timestamp
/// that is smaller than the sum of the last timestamp and `sample_time`, `add` will
/// not push the data into the [`TimeSeries`]. When `sample_time` is set to zero (by default),
/// `add` will only discard data points whose timestamp is identical to the last timestamp,
/// i.e. it only guarantees monotonically increasing timestamps.
pub fn with_sample_time(mut self, sample_time: Time) -> Self {
self.sample_time = sample_time;
self
}
pub fn add(&mut self, time: Time, element: T) {
// if let Some(last) = self.time.vec.last() {
// if last + self.sample_time < time {
// self.time.add(time);
// self.data.add(element);
// }
// } else {
// self.time.add(time);
// self.data.add(element);
// }
if self.time.vec.is_empty() {
self.time.add(time);
self.data.add(element);
} else {
if self.sample_time > 0.0 {
if self.time.vec.last().unwrap() + self.sample_time <= time {
self.time.add(time);
self.data.add(element);
}
} else {
if self.time.vec.last().unwrap() < &time {
self.time.add(time);
self.data.add(element);
}
}
}
}
/// Get data element for a given time
pub fn get_at_time(&self, time: Time) -> Option<T> {
self.time
.get_index(time)
.and_then(|idx| self.data.get(idx))
.map(|val| val.to_owned())
}
/// Returns slice of data that is within the time range specified
#[allow(dead_code)]
pub fn get_range(&self, start: Time, end: Time) -> Option<&[T]> {
self.time
.get_range(start, end)
.map(|range| self.data.get_between(range))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::math::{cos, sin};
use crate::Pose;
use std::f32::consts::PI;
fn dummy_pose() -> TimeSeries<Pose> {
let n = 5;
let dt = 0.01;
let t: Vec<f32> = (0..n).map(|n| n as f32 * dt).collect();
let pose: Vec<Pose> = t
.iter()
.map(|&t| Pose::new(30.0 * sin(t), 20.0 * cos(t), 2.0 * PI * sin(t)))
.collect();
TimeSeries::new(t, pose)
}
fn dummy_f32() -> TimeSeries<f32> {
let n = 5;
let dt = 1.0;
let t: Vec<f32> = (0..n).map(|n| n as f32 * dt).collect();
let data: Vec<f32> = t.iter().map(|&t| t * 3.0).collect();
TimeSeries::new(t, data)
}
#[test]
fn add_timeseries() {
let mut ts = TimeSeries::<f32>::empty();
ts.add(0.5, 5.0);
ts.add(0.8, 15.0);
dbg!(&ts);
}
#[test]
fn check_index() {
// dbg!(&ts);
let ts = dummy_pose();
assert_eq!(2, ts.time.get_index(0.02).unwrap()); // finding exactly matching time
assert_eq!(2, ts.time.get_index(0.02).unwrap()); // running again should give same result
assert_eq!(2, ts.time.get_index(0.015).unwrap()); // finding next closest time stamp
}
#[test]
fn check_range() {
let ts = dummy_f32();
assert_eq!(1, ts.time.get_index(1.0).unwrap());
assert_eq!(3, ts.time.get_index(2.1).unwrap());
assert_eq!(3, ts.time.get_index(2.9).unwrap());
assert_eq!(3, ts.time.get_index(3.0).unwrap());
assert_eq!(&[3.0, 6.0], ts.get_range(1.0, 2.9).unwrap());
assert_eq!(&[3.0, 6.0, 9.0], ts.get_range(1.0, 3.0).unwrap());
}
#[test]
fn series_to_table() {
let ts = dummy_f32();
let _table: TimeTable<f32> = ts.into();
}
#[test]
fn check_sample_time() {
let mut ts = TimeSeries::<f32>::empty();
ts.add(0.0, 1.0);
ts.add(0.0, 2.0); // This shouldn't be added
ts.add(0.5, 3.0);
ts.add(0.5, 4.0); // This shouldn't be added
assert_eq!(0, ts.time.get_index(0.0).unwrap());
assert_eq!(1, ts.time.get_index(0.5).unwrap());
let mut ts = TimeSeries::<f32>::empty().with_sample_time(0.1);
ts.add(0.0, 1.0);
ts.add(0.05, 2.0); // This shouldn't be added
ts.add(0.1, 3.0);
assert_eq!(0, ts.time.get_index(0.0).unwrap());
assert_eq!(1, ts.time.get_index(0.1).unwrap());
}
}
// TimeTable data format options:
// 1. Generational-arena -> Arena<TimeSeries>
// pros: easy to push and manage TimeSeries
// cons: Dependency, TimeSeries cannot contain different data types
// 2. Vec<Box<dyn TimeSeries>>
// pros: No dependency
// cons: Use of trait object
// 3. ndarray?
| {
// unwrap here is ok, since time_changed always ensures cache is not None
Some(self.cache.unwrap().1)
} | conditional_block |
mod.rs | use std::{
cell::{Ref, RefCell},
ops::RangeInclusive,
rc::Rc,
};
pub type Time = f32;
use crate::Pose;
/// Aggregate of [`ColumnData`] that can only be added and not deleted.
/// Length of all [`ColumnData`] are equal, making this effectively a 2D table.
pub type DataSet<T = f32> = Vec<ColumnData<T>>;
/// Easily accessible wrapper around [`TimeTable`] to be shared by multiple owners
pub struct DataStore(pub(crate) Rc<RefCell<TimeTable<Pose>>>);
impl Default for DataStore {
fn default() -> Self {
// Temporary, create dummy data for showing
use crate::math::{cos, sin};
use std::f32::consts::PI;
let n = 10000;
let dt = 0.01;
let t: Vec<f32> = (0..n).map(|n| n as f32 * dt).collect();
let pose: Vec<Pose> = t
.iter()
.map(|&t| Pose::new(30.0 * sin(t), 20.0 * cos(t), 2.0 * PI * sin(t)))
.collect();
Self(Rc::new(RefCell::new(TimeTable::new(t, pose))))
}
}
impl DataStore {
pub fn new() -> Self {
Default::default()
}
pub fn clone(&self) -> Self {
DataStore(Rc::clone(&self.0))
}
pub fn borrow(&self) -> Ref<TimeTable<Pose>> {
self.0.borrow()
}
}
/// Single column of data
#[derive(Debug, Default, Clone)]
pub struct ColumnData<T> {
data: Vec<T>,
name: Option<String>,
}
impl<T> ColumnData<T> {
pub fn from_vec(data: impl Into<Vec<T>>) -> Self {
Self {
data: data.into(),
name: None,
}
}
pub fn len(&self) -> usize {
self.data.len()
}
pub fn add(&mut self, element: T) {
self.data.push(element);
}
pub fn get(&self, index: usize) -> Option<&T> {
self.data.get(index)
}
pub fn get_between(&self, range: RangeInclusive<usize>) -> &[T] {
&self.data[range]
}
}
/// Basic time vector for finding indices to look up within [`TimeSeries`] and [`TimeTable`]
#[derive(Debug, Default)]
struct Timeline {
/// Cache stores previously found index to avoid unecessary iteration when finding time index
cache: Option<(Time, usize)>,
/// Actual vector
vec: Vec<Time>,
}
impl Into<Timeline> for Vec<Time> {
fn into(self) -> Timeline {
Timeline {
vec: self,
..Default::default()
}
}
}
impl Timeline {
/// Tolerance to compare two input time for their equality
const EPSILON: f32 = 0.0005;
pub fn new(time_vec: impl Into<Vec<Time>>) -> Self {
Self {
vec: time_vec.into(),
..Default::default()
}
}
/// Adds a time element to the end
pub fn add(&mut self, time: Time) {
self.vec.push(time);
}
/// Checks if time input has changed from last index search
/// If time input is sufficiently close, assume same index can be used without calling [`get_index`]
///
/// [`get_index`]: Self::get_index
fn time_changed(&self, time: Time) -> bool {
self.cache
.map_or(true, |(prev, _)| (time - prev).abs() > Self::EPSILON)
}
/// Find the index that corresponds to the given time in seconds.
///
/// Returns index of first time that is greater or equal to the specified time.
fn get_index(&self, time: Time) -> Option<usize> { | // self.cache = Some((time, index));
index
})
} else {
// unwrap here is ok, since time_changed always ensures cache is not None
Some(self.cache.unwrap().1)
}
}
/// Similar to [`get_index`], but only returns time index that is smaller than the input time.
/// This is useful when making sure the returned time index never exceeds the given time, as
/// in [`get_range`]
///
/// [`get_index`]: Self::get_index
/// [`get_range`]: Self::get_range
fn get_index_under(&self, time: Time) -> Option<usize> {
if self.time_changed(time) {
self.vec
.iter()
.position(|&t| t > time)
.map(|idx| (idx - 1).max(0))
.map(|index| {
// self.cache = Some((time, index));
index
})
} else {
// unwrap here is ok, since time_changed always ensures cache is not None
Some(self.cache.unwrap().1)
}
}
/// Returns range indices that is within the time range specified
pub fn get_range(&self, start: Time, end: Time) -> Option<RangeInclusive<usize>> {
if start < end {
if let Some(start) = self.get_index(start) {
if let Some(end) = self.get_index_under(end) {
return Some(start..=end);
}
}
}
None
}
pub fn get_range_raw(&self, start: Time, end: Time) -> Option<Vec<Time>> {
self.get_range(start, end)
.map(|range| self.vec[range].to_vec())
}
/// Length of the time vector
pub fn len(&self) -> usize {
self.vec.len()
}
}
#[derive(Debug, Default)]
pub struct TimeTable<T> {
time: Timeline,
data: DataSet<T>,
}
impl<T> Into<TimeTable<T>> for TimeSeries<T> {
fn into(self) -> TimeTable<T> {
TimeTable {
time: self.time,
data: vec![self.data],
}
}
}
impl<T: Clone> TimeTable<T> {
#[allow(dead_code)]
pub fn from_timeseries(timeseries: TimeSeries<T>) -> Self {
Self {
time: timeseries.time,
data: vec![timeseries.data],
}
}
}
impl<T: Clone> TimeTable<T> {
pub fn new(time: Vec<Time>, data: Vec<T>) -> Self {
TimeSeries::new(time, data).into()
}
#[allow(dead_code)]
pub fn get_column(&self, column: usize) -> Option<ColumnData<T>> {
self.data.get(column).map(|val| val.clone())
}
pub fn get_at_time(&self, column: usize, time: Time) -> Option<T> {
if let Some(idx) = self.time.get_index(time) {
self.data
.get(column)
.and_then(|vec| vec.get(idx).clone())
.map(|el| el.to_owned())
} else {
None
}
}
pub fn get_time_range(&self, start: Time, end: Time) -> Option<Vec<Time>> {
self.time.get_range_raw(start, end)
}
/// Returns slice of data that is within the time range specified
pub fn get_range(&self, column: usize, start: Time, end: Time) -> Option<Vec<T>> {
if let Some(range) = self.time.get_range(start, end) {
self.data
.get(column)
.map(|vec| vec.get_between(range).to_owned())
} else {
None
}
}
}
#[derive(Debug, Default)]
pub struct TimeSeries<T> {
time: Timeline,
data: ColumnData<T>,
sample_time: Time,
}
impl<T: Clone> TimeSeries<T> {
/// Create a new [`TimeSeries`] from given array of `time` and `data`.
pub fn new(time: impl Into<Vec<Time>>, data: impl Into<Vec<T>>) -> Self {
let time = Timeline::new(time.into());
let data = ColumnData::from_vec(data);
let sample_time = 0.0;
if time.len()!= data.len() {
panic!("Size of time and data are different!");
}
Self {
time,
data,
sample_time,
}
}
/// Create an empty [`TimeSeries`]
pub fn empty() -> Self {
Self {
time: Timeline::default(),
data: ColumnData {
data: Vec::new(),
name: None,
},
sample_time: 0.0,
}
}
/// Sets the `sample_time` of the [`TimeSeries`]. If `add` is called with timestamp
/// that is smaller than the sum of the last timestamp and `sample_time`, `add` will
/// not push the data into the [`TimeSeries`]. When `sample_time` is set to zero (by default),
/// `add` will only discard data points whose timestamp is identical to the last timestamp,
/// i.e. it only guarantees monotonically increasing timestamps.
pub fn with_sample_time(mut self, sample_time: Time) -> Self {
self.sample_time = sample_time;
self
}
pub fn add(&mut self, time: Time, element: T) {
// if let Some(last) = self.time.vec.last() {
// if last + self.sample_time < time {
// self.time.add(time);
// self.data.add(element);
// }
// } else {
// self.time.add(time);
// self.data.add(element);
// }
if self.time.vec.is_empty() {
self.time.add(time);
self.data.add(element);
} else {
if self.sample_time > 0.0 {
if self.time.vec.last().unwrap() + self.sample_time <= time {
self.time.add(time);
self.data.add(element);
}
} else {
if self.time.vec.last().unwrap() < &time {
self.time.add(time);
self.data.add(element);
}
}
}
}
/// Get data element for a given time
pub fn get_at_time(&self, time: Time) -> Option<T> {
self.time
.get_index(time)
.and_then(|idx| self.data.get(idx))
.map(|val| val.to_owned())
}
/// Returns slice of data that is within the time range specified
#[allow(dead_code)]
pub fn get_range(&self, start: Time, end: Time) -> Option<&[T]> {
self.time
.get_range(start, end)
.map(|range| self.data.get_between(range))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::math::{cos, sin};
use crate::Pose;
use std::f32::consts::PI;
fn dummy_pose() -> TimeSeries<Pose> {
let n = 5;
let dt = 0.01;
let t: Vec<f32> = (0..n).map(|n| n as f32 * dt).collect();
let pose: Vec<Pose> = t
.iter()
.map(|&t| Pose::new(30.0 * sin(t), 20.0 * cos(t), 2.0 * PI * sin(t)))
.collect();
TimeSeries::new(t, pose)
}
fn dummy_f32() -> TimeSeries<f32> {
let n = 5;
let dt = 1.0;
let t: Vec<f32> = (0..n).map(|n| n as f32 * dt).collect();
let data: Vec<f32> = t.iter().map(|&t| t * 3.0).collect();
TimeSeries::new(t, data)
}
#[test]
fn add_timeseries() {
let mut ts = TimeSeries::<f32>::empty();
ts.add(0.5, 5.0);
ts.add(0.8, 15.0);
dbg!(&ts);
}
#[test]
fn check_index() {
// dbg!(&ts);
let ts = dummy_pose();
assert_eq!(2, ts.time.get_index(0.02).unwrap()); // finding exactly matching time
assert_eq!(2, ts.time.get_index(0.02).unwrap()); // running again should give same result
assert_eq!(2, ts.time.get_index(0.015).unwrap()); // finding next closest time stamp
}
#[test]
fn check_range() {
let ts = dummy_f32();
assert_eq!(1, ts.time.get_index(1.0).unwrap());
assert_eq!(3, ts.time.get_index(2.1).unwrap());
assert_eq!(3, ts.time.get_index(2.9).unwrap());
assert_eq!(3, ts.time.get_index(3.0).unwrap());
assert_eq!(&[3.0, 6.0], ts.get_range(1.0, 2.9).unwrap());
assert_eq!(&[3.0, 6.0, 9.0], ts.get_range(1.0, 3.0).unwrap());
}
#[test]
fn series_to_table() {
let ts = dummy_f32();
let _table: TimeTable<f32> = ts.into();
}
#[test]
fn check_sample_time() {
let mut ts = TimeSeries::<f32>::empty();
ts.add(0.0, 1.0);
ts.add(0.0, 2.0); // This shouldn't be added
ts.add(0.5, 3.0);
ts.add(0.5, 4.0); // This shouldn't be added
assert_eq!(0, ts.time.get_index(0.0).unwrap());
assert_eq!(1, ts.time.get_index(0.5).unwrap());
let mut ts = TimeSeries::<f32>::empty().with_sample_time(0.1);
ts.add(0.0, 1.0);
ts.add(0.05, 2.0); // This shouldn't be added
ts.add(0.1, 3.0);
assert_eq!(0, ts.time.get_index(0.0).unwrap());
assert_eq!(1, ts.time.get_index(0.1).unwrap());
}
}
// TimeTable data format options:
// 1. Generational-arena -> Arena<TimeSeries>
// pros: easy to push and manage TimeSeries
// cons: Dependency, TimeSeries cannot contain different data types
// 2. Vec<Box<dyn TimeSeries>>
// pros: No dependency
// cons: Use of trait object
// 3. ndarray? | if self.time_changed(time) {
self.vec.iter().position(|&t| t >= time).map(|index| { | random_line_split |
main.rs | // Copyright 2021 The Simlin Authors. All rights reserved.
// Use of this source code is governed by the Apache License,
// Version 2.0, that can be found in the LICENSE file.
use std::fs::File;
use std::io::{BufReader, Write};
use std::rc::Rc;
use pico_args::Arguments;
use simlin_compat::engine::builtins::Loc;
use simlin_compat::engine::common::UnitError;
use simlin_compat::engine::datamodel::{Equation, Project as DatamodelProject};
use simlin_compat::engine::{
eprintln, serde, ErrorCode, Project, Results, Simulation, Variable, Vm,
};
use simlin_compat::prost::Message;
use simlin_compat::{load_csv, open_vensim, open_xmile, to_xmile};
const VERSION: &str = "1.0";
const EXIT_FAILURE: i32 = 1;
#[macro_export]
macro_rules! die(
($($arg:tt)*) => { {
use std;
eprintln!($($arg)*);
std::process::exit(EXIT_FAILURE)
} }
);
fn usage() ->! {
let argv0 = std::env::args()
.next()
.unwrap_or_else(|| "<mdl>".to_string());
die!(
concat!(
"mdl {}: Simulate system dynamics models.\n\
\n\
USAGE:\n",
" {} [SUBCOMMAND] [OPTION...] PATH\n",
"\n\
OPTIONS:\n",
" -h, --help show this message\n",
" --vensim model is a Vensim.mdl file\n",
" --to-xmile output should be XMILE not protobuf\n",
" --model-only for conversion, only output model instead of project\n",
" --output FILE path to write output file\n",
" --reference FILE reference TSV for debug subcommand\n",
" --no-output don't print the output (for benchmarking)\n",
"\n\
SUBCOMMANDS:\n",
" simulate Simulate a model and display output\n",
" convert Convert an XMILE or Vensim model to protobuf\n",
" equations Print the equations out\n",
" debug Output model equations interleaved with a reference run\n",
),
VERSION,
argv0
);
}
#[derive(Clone, Default, Debug)]
struct Args {
path: Option<String>,
output: Option<String>,
reference: Option<String>,
is_vensim: bool,
is_to_xmile: bool,
is_convert: bool,
is_model_only: bool,
is_no_output: bool,
is_equations: bool,
is_debug: bool,
}
fn parse_args() -> Result<Args, Box<dyn std::error::Error>> {
let mut parsed = Arguments::from_env();
if parsed.contains(["-h", "--help"]) {
usage();
}
let subcommand = parsed.subcommand()?;
if subcommand.is_none() {
eprintln!("error: subcommand required");
usage();
}
let mut args: Args = Default::default();
let subcommand = subcommand.unwrap();
if subcommand == "convert" {
args.is_convert = true;
} else if subcommand == "simulate" {
} else if subcommand == "equations" {
args.is_equations = true;
} else if subcommand == "debug" {
args.is_debug = true;
} else {
eprintln!("error: unknown subcommand {}", subcommand);
usage();
}
args.output = parsed.value_from_str("--output").ok();
args.reference = parsed.value_from_str("--reference").ok();
args.is_no_output = parsed.contains("--no-output");
args.is_model_only = parsed.contains("--model-only");
args.is_to_xmile = parsed.contains("--to-xmile");
args.is_vensim = parsed.contains("--vensim");
let free_arguments = parsed.finish();
if free_arguments.is_empty() {
eprintln!("error: input path required");
usage();
}
args.path = free_arguments[0].to_str().map(|s| s.to_owned());
Ok(args)
}
fn simulate(project: &DatamodelProject) -> Results {
let project_datamodel = project.clone();
let project = Rc::new(Project::from(project.clone()));
if!project.errors.is_empty() {
for err in project.errors.iter() {
eprintln!("project error: {}", err);
}
}
let mut found_model_error = false;
for (model_name, model) in project.models.iter() {
let model_datamodel = project_datamodel.get_model(model_name);
if model_datamodel.is_none() {
continue;
}
let model_datamodel = model_datamodel.unwrap();
let mut found_var_error = false;
for (ident, errors) in model.get_variable_errors() {
assert!(!errors.is_empty());
let var = model_datamodel.get_variable(&ident).unwrap();
found_var_error = true;
for error in errors {
eprintln!();
if let Some(Equation::Scalar(eqn)) = var.get_equation() {
eprintln!(" {}", eqn);
let space = " ".repeat(error.start as usize);
let underline = "~".repeat((error.end - error.start) as usize);
eprintln!(" {}{}", space, underline);
}
eprintln!(
"error in model '{}' variable '{}': {}",
model_name, ident, error.code
);
}
}
for (ident, errors) in model.get_unit_errors() {
assert!(!errors.is_empty());
let var = model_datamodel.get_variable(&ident).unwrap();
for error in errors {
eprintln!();
let (eqn, loc, details) = match error {
UnitError::DefinitionError(error, details) => {
let details = if let Some(details) = details {
format!("{} -- {}", error.code, details)
} else {
format!("{}", error.code)
};
(
var.get_units(),
Loc::new(error.start.into(), error.end.into()),
details,
)
}
UnitError::ConsistencyError(code, loc, details) => {
let (eqn, loc, code) =
if let Some(Equation::Scalar(eqn)) = var.get_equation() {
(Some(eqn), loc, code)
} else {
(None, loc, code)
};
let details = match details {
Some(details) => format!("{} -- {}", code, details),
None => format!("{}", code),
};
(eqn, loc, details)
}
};
if let Some(eqn) = eqn {
eprintln!(" {}", eqn);
let space = " ".repeat(loc.start as usize);
let underline = "~".repeat((loc.end - loc.start) as usize);
eprintln!(" {}{}", space, underline);
}
eprintln!(
"units error in model '{}' variable '{}': {}",
model_name, ident, details
);
}
}
if let Some(errors) = &model.errors {
for error in errors.iter() {
if error.code == ErrorCode::VariablesHaveErrors && found_var_error {
continue;
}
eprintln!("error in model {}: {}", model_name, error);
found_model_error = true;
}
}
}
let sim = match Simulation::new(&project, "main") {
Ok(sim) => sim,
Err(err) => {
if!(err.code == ErrorCode::NotSimulatable && found_model_error) {
eprintln!("error: {}", err);
}
std::process::exit(1);
}
};
let compiled = sim.compile().unwrap();
let mut vm = Vm::new(compiled).unwrap();
vm.run_to_end().unwrap();
vm.into_results()
}
fn main() {
let args = match parse_args() {
Ok(args) => args,
Err(err) => {
eprintln!("error: {}", err);
usage();
}
};
let file_path = args.path.unwrap_or_else(|| "/dev/stdin".to_string());
let file = File::open(&file_path).unwrap();
let mut reader = BufReader::new(file);
let project = if args.is_vensim {
open_vensim(&mut reader)
} else {
open_xmile(&mut reader)
};
if project.is_err() {
eprintln!("model '{}' error: {}", &file_path, project.err().unwrap());
return;
};
let project = project.unwrap();
if args.is_equations {
let mut output_file =
File::create(&args.output.unwrap_or_else(|| "/dev/stdout".to_string())).unwrap();
let project = Rc::new(Project::from(project));
for (model_name, model) in project.models.iter().filter(|(_, model)|!model.implicit) {
output_file
.write_fmt(format_args!("% {}\n", model_name))
.unwrap();
output_file
.write_fmt(format_args!("\\begin{{align*}}\n"))
.unwrap();
let var_count = model.variables.len();
for (i, (var_name, var)) in model.variables.iter().enumerate() {
let subscript = if var.is_stock() { "(t_0)" } else { "" };
let var_name = str::replace(var_name, "_", "\\_");
let continuation = if!var.is_stock() && i == var_count - 1 {
""
} else {
" \\\\"
};
let eqn = var
.ast()
.map(|ast| ast.to_latex())
.unwrap_or_else(|| "\\varnothing".to_owned());
output_file
.write_fmt(format_args!(
"\\mathrm{{{}}}{} & = {}{}\n",
var_name, subscript, eqn, continuation
))
.unwrap();
if var.is_stock() {
if let Variable::Stock {
inflows, outflows,..
} = var
{
let continuation = if i == var_count - 1 { "" } else { " \\\\" };
let use_parens = inflows.len() + outflows.len() > 1;
let mut eqn = inflows
.iter()
.map(|inflow| {
format!("\\mathrm{{{}}}", str::replace(inflow, "_", "\\_"))
})
.collect::<Vec<_>>()
.join(" + ");
if!outflows.is_empty() {
eqn = format!(
"{}-{}",
eqn,
outflows
.iter()
.map(|inflow| format!(
"\\mathrm{{{}}}",
str::replace(inflow, "_", "\\_")
))
.collect::<Vec<_>>()
.join(" - ")
);
}
if use_parens {
eqn = format!("({}) ", eqn);
} else {
eqn = format!("{} \\cdot ", eqn);
}
output_file
.write_fmt(format_args!(
"\\mathrm{{{}}}(t) & = \\mathrm{{{}}}(t - dt) + {} dt{}\n",
var_name, var_name, eqn, continuation
))
.unwrap();
}
}
}
output_file
.write_fmt(format_args!("\\end{{align*}}\n"))
.unwrap();
}
} else if args.is_convert {
let pb_project = serde::serialize(&project);
let mut buf: Vec<u8> = if args.is_model_only {
if pb_project.models.len()!= 1 {
die!("--model-only specified, but more than 1 model in this project");
}
let mut buf = Vec::with_capacity(pb_project.models[0].encoded_len());
pb_project.models[0].encode(&mut buf).unwrap();
buf
} else {
let mut buf = Vec::with_capacity(pb_project.encoded_len());
pb_project.encode(&mut buf).unwrap();
buf
};
if args.is_to_xmile {
match to_xmile(&project) {
Ok(s) => {
buf = s.into_bytes();
buf.push(b'\n');
}
Err(err) => {
die!("error converting to XMILE: {}", err);
}
}
}
let mut output_file =
File::create(&args.output.unwrap_or_else(|| "/dev/stdout".to_string())).unwrap();
output_file.write_all(&buf).unwrap();
} else if args.is_debug | else {
let results = simulate(&project);
if!args.is_no_output {
results.print_tsv();
}
}
}
| {
if args.reference.is_none() {
eprintln!("missing required argument --reference FILE");
std::process::exit(1);
}
let ref_path = args.reference.unwrap();
let reference = load_csv(&ref_path, b'\t').unwrap();
let results = simulate(&project);
results.print_tsv_comparison(Some(&reference));
} | conditional_block |
main.rs | // Copyright 2021 The Simlin Authors. All rights reserved.
// Use of this source code is governed by the Apache License,
// Version 2.0, that can be found in the LICENSE file.
use std::fs::File;
use std::io::{BufReader, Write};
use std::rc::Rc;
use pico_args::Arguments;
use simlin_compat::engine::builtins::Loc;
use simlin_compat::engine::common::UnitError;
use simlin_compat::engine::datamodel::{Equation, Project as DatamodelProject};
use simlin_compat::engine::{
eprintln, serde, ErrorCode, Project, Results, Simulation, Variable, Vm,
};
use simlin_compat::prost::Message;
use simlin_compat::{load_csv, open_vensim, open_xmile, to_xmile};
const VERSION: &str = "1.0";
const EXIT_FAILURE: i32 = 1;
#[macro_export]
macro_rules! die(
($($arg:tt)*) => { {
use std;
eprintln!($($arg)*);
std::process::exit(EXIT_FAILURE)
} }
);
fn usage() ->! {
let argv0 = std::env::args()
.next()
.unwrap_or_else(|| "<mdl>".to_string());
die!(
concat!(
"mdl {}: Simulate system dynamics models.\n\
\n\
USAGE:\n",
" {} [SUBCOMMAND] [OPTION...] PATH\n",
"\n\
OPTIONS:\n",
" -h, --help show this message\n",
" --vensim model is a Vensim.mdl file\n",
" --to-xmile output should be XMILE not protobuf\n",
" --model-only for conversion, only output model instead of project\n",
" --output FILE path to write output file\n",
" --reference FILE reference TSV for debug subcommand\n",
" --no-output don't print the output (for benchmarking)\n",
"\n\
SUBCOMMANDS:\n",
" simulate Simulate a model and display output\n",
" convert Convert an XMILE or Vensim model to protobuf\n",
" equations Print the equations out\n",
" debug Output model equations interleaved with a reference run\n",
),
VERSION,
argv0
);
}
#[derive(Clone, Default, Debug)]
struct Args {
path: Option<String>,
output: Option<String>,
reference: Option<String>,
is_vensim: bool,
is_to_xmile: bool,
is_convert: bool,
is_model_only: bool,
is_no_output: bool,
is_equations: bool,
is_debug: bool,
}
fn parse_args() -> Result<Args, Box<dyn std::error::Error>> {
let mut parsed = Arguments::from_env();
if parsed.contains(["-h", "--help"]) {
usage();
}
let subcommand = parsed.subcommand()?;
if subcommand.is_none() {
eprintln!("error: subcommand required");
usage();
}
let mut args: Args = Default::default();
let subcommand = subcommand.unwrap();
if subcommand == "convert" {
args.is_convert = true;
} else if subcommand == "simulate" {
} else if subcommand == "equations" {
args.is_equations = true;
} else if subcommand == "debug" {
args.is_debug = true;
} else {
eprintln!("error: unknown subcommand {}", subcommand);
usage();
}
args.output = parsed.value_from_str("--output").ok();
args.reference = parsed.value_from_str("--reference").ok();
args.is_no_output = parsed.contains("--no-output");
args.is_model_only = parsed.contains("--model-only");
args.is_to_xmile = parsed.contains("--to-xmile");
args.is_vensim = parsed.contains("--vensim");
let free_arguments = parsed.finish();
if free_arguments.is_empty() {
eprintln!("error: input path required");
usage();
}
args.path = free_arguments[0].to_str().map(|s| s.to_owned());
Ok(args)
}
fn simulate(project: &DatamodelProject) -> Results {
let project_datamodel = project.clone();
let project = Rc::new(Project::from(project.clone()));
if!project.errors.is_empty() {
for err in project.errors.iter() {
eprintln!("project error: {}", err);
}
}
let mut found_model_error = false;
for (model_name, model) in project.models.iter() {
let model_datamodel = project_datamodel.get_model(model_name);
if model_datamodel.is_none() {
continue;
}
let model_datamodel = model_datamodel.unwrap();
let mut found_var_error = false;
for (ident, errors) in model.get_variable_errors() {
assert!(!errors.is_empty());
let var = model_datamodel.get_variable(&ident).unwrap();
found_var_error = true;
for error in errors {
eprintln!();
if let Some(Equation::Scalar(eqn)) = var.get_equation() {
eprintln!(" {}", eqn);
let space = " ".repeat(error.start as usize);
let underline = "~".repeat((error.end - error.start) as usize);
eprintln!(" {}{}", space, underline);
}
eprintln!(
"error in model '{}' variable '{}': {}",
model_name, ident, error.code
);
}
}
for (ident, errors) in model.get_unit_errors() {
assert!(!errors.is_empty());
let var = model_datamodel.get_variable(&ident).unwrap();
for error in errors {
eprintln!();
let (eqn, loc, details) = match error {
UnitError::DefinitionError(error, details) => {
let details = if let Some(details) = details {
format!("{} -- {}", error.code, details)
} else {
format!("{}", error.code)
};
(
var.get_units(),
Loc::new(error.start.into(), error.end.into()),
details,
)
}
UnitError::ConsistencyError(code, loc, details) => {
let (eqn, loc, code) =
if let Some(Equation::Scalar(eqn)) = var.get_equation() {
(Some(eqn), loc, code)
} else {
(None, loc, code)
};
let details = match details {
Some(details) => format!("{} -- {}", code, details),
None => format!("{}", code),
};
(eqn, loc, details)
}
};
if let Some(eqn) = eqn {
eprintln!(" {}", eqn);
let space = " ".repeat(loc.start as usize);
let underline = "~".repeat((loc.end - loc.start) as usize);
eprintln!(" {}{}", space, underline);
}
eprintln!(
"units error in model '{}' variable '{}': {}",
model_name, ident, details
);
}
}
if let Some(errors) = &model.errors {
for error in errors.iter() {
if error.code == ErrorCode::VariablesHaveErrors && found_var_error {
continue;
}
eprintln!("error in model {}: {}", model_name, error);
found_model_error = true;
}
}
}
let sim = match Simulation::new(&project, "main") {
Ok(sim) => sim,
Err(err) => {
if!(err.code == ErrorCode::NotSimulatable && found_model_error) {
eprintln!("error: {}", err);
}
std::process::exit(1);
}
};
let compiled = sim.compile().unwrap();
let mut vm = Vm::new(compiled).unwrap();
vm.run_to_end().unwrap(); |
fn main() {
let args = match parse_args() {
Ok(args) => args,
Err(err) => {
eprintln!("error: {}", err);
usage();
}
};
let file_path = args.path.unwrap_or_else(|| "/dev/stdin".to_string());
let file = File::open(&file_path).unwrap();
let mut reader = BufReader::new(file);
let project = if args.is_vensim {
open_vensim(&mut reader)
} else {
open_xmile(&mut reader)
};
if project.is_err() {
eprintln!("model '{}' error: {}", &file_path, project.err().unwrap());
return;
};
let project = project.unwrap();
if args.is_equations {
let mut output_file =
File::create(&args.output.unwrap_or_else(|| "/dev/stdout".to_string())).unwrap();
let project = Rc::new(Project::from(project));
for (model_name, model) in project.models.iter().filter(|(_, model)|!model.implicit) {
output_file
.write_fmt(format_args!("% {}\n", model_name))
.unwrap();
output_file
.write_fmt(format_args!("\\begin{{align*}}\n"))
.unwrap();
let var_count = model.variables.len();
for (i, (var_name, var)) in model.variables.iter().enumerate() {
let subscript = if var.is_stock() { "(t_0)" } else { "" };
let var_name = str::replace(var_name, "_", "\\_");
let continuation = if!var.is_stock() && i == var_count - 1 {
""
} else {
" \\\\"
};
let eqn = var
.ast()
.map(|ast| ast.to_latex())
.unwrap_or_else(|| "\\varnothing".to_owned());
output_file
.write_fmt(format_args!(
"\\mathrm{{{}}}{} & = {}{}\n",
var_name, subscript, eqn, continuation
))
.unwrap();
if var.is_stock() {
if let Variable::Stock {
inflows, outflows,..
} = var
{
let continuation = if i == var_count - 1 { "" } else { " \\\\" };
let use_parens = inflows.len() + outflows.len() > 1;
let mut eqn = inflows
.iter()
.map(|inflow| {
format!("\\mathrm{{{}}}", str::replace(inflow, "_", "\\_"))
})
.collect::<Vec<_>>()
.join(" + ");
if!outflows.is_empty() {
eqn = format!(
"{}-{}",
eqn,
outflows
.iter()
.map(|inflow| format!(
"\\mathrm{{{}}}",
str::replace(inflow, "_", "\\_")
))
.collect::<Vec<_>>()
.join(" - ")
);
}
if use_parens {
eqn = format!("({}) ", eqn);
} else {
eqn = format!("{} \\cdot ", eqn);
}
output_file
.write_fmt(format_args!(
"\\mathrm{{{}}}(t) & = \\mathrm{{{}}}(t - dt) + {} dt{}\n",
var_name, var_name, eqn, continuation
))
.unwrap();
}
}
}
output_file
.write_fmt(format_args!("\\end{{align*}}\n"))
.unwrap();
}
} else if args.is_convert {
let pb_project = serde::serialize(&project);
let mut buf: Vec<u8> = if args.is_model_only {
if pb_project.models.len()!= 1 {
die!("--model-only specified, but more than 1 model in this project");
}
let mut buf = Vec::with_capacity(pb_project.models[0].encoded_len());
pb_project.models[0].encode(&mut buf).unwrap();
buf
} else {
let mut buf = Vec::with_capacity(pb_project.encoded_len());
pb_project.encode(&mut buf).unwrap();
buf
};
if args.is_to_xmile {
match to_xmile(&project) {
Ok(s) => {
buf = s.into_bytes();
buf.push(b'\n');
}
Err(err) => {
die!("error converting to XMILE: {}", err);
}
}
}
let mut output_file =
File::create(&args.output.unwrap_or_else(|| "/dev/stdout".to_string())).unwrap();
output_file.write_all(&buf).unwrap();
} else if args.is_debug {
if args.reference.is_none() {
eprintln!("missing required argument --reference FILE");
std::process::exit(1);
}
let ref_path = args.reference.unwrap();
let reference = load_csv(&ref_path, b'\t').unwrap();
let results = simulate(&project);
results.print_tsv_comparison(Some(&reference));
} else {
let results = simulate(&project);
if!args.is_no_output {
results.print_tsv();
}
}
} | vm.into_results()
} | random_line_split |
main.rs | // Copyright 2021 The Simlin Authors. All rights reserved.
// Use of this source code is governed by the Apache License,
// Version 2.0, that can be found in the LICENSE file.
use std::fs::File;
use std::io::{BufReader, Write};
use std::rc::Rc;
use pico_args::Arguments;
use simlin_compat::engine::builtins::Loc;
use simlin_compat::engine::common::UnitError;
use simlin_compat::engine::datamodel::{Equation, Project as DatamodelProject};
use simlin_compat::engine::{
eprintln, serde, ErrorCode, Project, Results, Simulation, Variable, Vm,
};
use simlin_compat::prost::Message;
use simlin_compat::{load_csv, open_vensim, open_xmile, to_xmile};
const VERSION: &str = "1.0";
const EXIT_FAILURE: i32 = 1;
#[macro_export]
macro_rules! die(
($($arg:tt)*) => { {
use std;
eprintln!($($arg)*);
std::process::exit(EXIT_FAILURE)
} }
);
fn | () ->! {
let argv0 = std::env::args()
.next()
.unwrap_or_else(|| "<mdl>".to_string());
die!(
concat!(
"mdl {}: Simulate system dynamics models.\n\
\n\
USAGE:\n",
" {} [SUBCOMMAND] [OPTION...] PATH\n",
"\n\
OPTIONS:\n",
" -h, --help show this message\n",
" --vensim model is a Vensim.mdl file\n",
" --to-xmile output should be XMILE not protobuf\n",
" --model-only for conversion, only output model instead of project\n",
" --output FILE path to write output file\n",
" --reference FILE reference TSV for debug subcommand\n",
" --no-output don't print the output (for benchmarking)\n",
"\n\
SUBCOMMANDS:\n",
" simulate Simulate a model and display output\n",
" convert Convert an XMILE or Vensim model to protobuf\n",
" equations Print the equations out\n",
" debug Output model equations interleaved with a reference run\n",
),
VERSION,
argv0
);
}
#[derive(Clone, Default, Debug)]
struct Args {
path: Option<String>,
output: Option<String>,
reference: Option<String>,
is_vensim: bool,
is_to_xmile: bool,
is_convert: bool,
is_model_only: bool,
is_no_output: bool,
is_equations: bool,
is_debug: bool,
}
fn parse_args() -> Result<Args, Box<dyn std::error::Error>> {
let mut parsed = Arguments::from_env();
if parsed.contains(["-h", "--help"]) {
usage();
}
let subcommand = parsed.subcommand()?;
if subcommand.is_none() {
eprintln!("error: subcommand required");
usage();
}
let mut args: Args = Default::default();
let subcommand = subcommand.unwrap();
if subcommand == "convert" {
args.is_convert = true;
} else if subcommand == "simulate" {
} else if subcommand == "equations" {
args.is_equations = true;
} else if subcommand == "debug" {
args.is_debug = true;
} else {
eprintln!("error: unknown subcommand {}", subcommand);
usage();
}
args.output = parsed.value_from_str("--output").ok();
args.reference = parsed.value_from_str("--reference").ok();
args.is_no_output = parsed.contains("--no-output");
args.is_model_only = parsed.contains("--model-only");
args.is_to_xmile = parsed.contains("--to-xmile");
args.is_vensim = parsed.contains("--vensim");
let free_arguments = parsed.finish();
if free_arguments.is_empty() {
eprintln!("error: input path required");
usage();
}
args.path = free_arguments[0].to_str().map(|s| s.to_owned());
Ok(args)
}
fn simulate(project: &DatamodelProject) -> Results {
let project_datamodel = project.clone();
let project = Rc::new(Project::from(project.clone()));
if!project.errors.is_empty() {
for err in project.errors.iter() {
eprintln!("project error: {}", err);
}
}
let mut found_model_error = false;
for (model_name, model) in project.models.iter() {
let model_datamodel = project_datamodel.get_model(model_name);
if model_datamodel.is_none() {
continue;
}
let model_datamodel = model_datamodel.unwrap();
let mut found_var_error = false;
for (ident, errors) in model.get_variable_errors() {
assert!(!errors.is_empty());
let var = model_datamodel.get_variable(&ident).unwrap();
found_var_error = true;
for error in errors {
eprintln!();
if let Some(Equation::Scalar(eqn)) = var.get_equation() {
eprintln!(" {}", eqn);
let space = " ".repeat(error.start as usize);
let underline = "~".repeat((error.end - error.start) as usize);
eprintln!(" {}{}", space, underline);
}
eprintln!(
"error in model '{}' variable '{}': {}",
model_name, ident, error.code
);
}
}
for (ident, errors) in model.get_unit_errors() {
assert!(!errors.is_empty());
let var = model_datamodel.get_variable(&ident).unwrap();
for error in errors {
eprintln!();
let (eqn, loc, details) = match error {
UnitError::DefinitionError(error, details) => {
let details = if let Some(details) = details {
format!("{} -- {}", error.code, details)
} else {
format!("{}", error.code)
};
(
var.get_units(),
Loc::new(error.start.into(), error.end.into()),
details,
)
}
UnitError::ConsistencyError(code, loc, details) => {
let (eqn, loc, code) =
if let Some(Equation::Scalar(eqn)) = var.get_equation() {
(Some(eqn), loc, code)
} else {
(None, loc, code)
};
let details = match details {
Some(details) => format!("{} -- {}", code, details),
None => format!("{}", code),
};
(eqn, loc, details)
}
};
if let Some(eqn) = eqn {
eprintln!(" {}", eqn);
let space = " ".repeat(loc.start as usize);
let underline = "~".repeat((loc.end - loc.start) as usize);
eprintln!(" {}{}", space, underline);
}
eprintln!(
"units error in model '{}' variable '{}': {}",
model_name, ident, details
);
}
}
if let Some(errors) = &model.errors {
for error in errors.iter() {
if error.code == ErrorCode::VariablesHaveErrors && found_var_error {
continue;
}
eprintln!("error in model {}: {}", model_name, error);
found_model_error = true;
}
}
}
let sim = match Simulation::new(&project, "main") {
Ok(sim) => sim,
Err(err) => {
if!(err.code == ErrorCode::NotSimulatable && found_model_error) {
eprintln!("error: {}", err);
}
std::process::exit(1);
}
};
let compiled = sim.compile().unwrap();
let mut vm = Vm::new(compiled).unwrap();
vm.run_to_end().unwrap();
vm.into_results()
}
fn main() {
let args = match parse_args() {
Ok(args) => args,
Err(err) => {
eprintln!("error: {}", err);
usage();
}
};
let file_path = args.path.unwrap_or_else(|| "/dev/stdin".to_string());
let file = File::open(&file_path).unwrap();
let mut reader = BufReader::new(file);
let project = if args.is_vensim {
open_vensim(&mut reader)
} else {
open_xmile(&mut reader)
};
if project.is_err() {
eprintln!("model '{}' error: {}", &file_path, project.err().unwrap());
return;
};
let project = project.unwrap();
if args.is_equations {
let mut output_file =
File::create(&args.output.unwrap_or_else(|| "/dev/stdout".to_string())).unwrap();
let project = Rc::new(Project::from(project));
for (model_name, model) in project.models.iter().filter(|(_, model)|!model.implicit) {
output_file
.write_fmt(format_args!("% {}\n", model_name))
.unwrap();
output_file
.write_fmt(format_args!("\\begin{{align*}}\n"))
.unwrap();
let var_count = model.variables.len();
for (i, (var_name, var)) in model.variables.iter().enumerate() {
let subscript = if var.is_stock() { "(t_0)" } else { "" };
let var_name = str::replace(var_name, "_", "\\_");
let continuation = if!var.is_stock() && i == var_count - 1 {
""
} else {
" \\\\"
};
let eqn = var
.ast()
.map(|ast| ast.to_latex())
.unwrap_or_else(|| "\\varnothing".to_owned());
output_file
.write_fmt(format_args!(
"\\mathrm{{{}}}{} & = {}{}\n",
var_name, subscript, eqn, continuation
))
.unwrap();
if var.is_stock() {
if let Variable::Stock {
inflows, outflows,..
} = var
{
let continuation = if i == var_count - 1 { "" } else { " \\\\" };
let use_parens = inflows.len() + outflows.len() > 1;
let mut eqn = inflows
.iter()
.map(|inflow| {
format!("\\mathrm{{{}}}", str::replace(inflow, "_", "\\_"))
})
.collect::<Vec<_>>()
.join(" + ");
if!outflows.is_empty() {
eqn = format!(
"{}-{}",
eqn,
outflows
.iter()
.map(|inflow| format!(
"\\mathrm{{{}}}",
str::replace(inflow, "_", "\\_")
))
.collect::<Vec<_>>()
.join(" - ")
);
}
if use_parens {
eqn = format!("({}) ", eqn);
} else {
eqn = format!("{} \\cdot ", eqn);
}
output_file
.write_fmt(format_args!(
"\\mathrm{{{}}}(t) & = \\mathrm{{{}}}(t - dt) + {} dt{}\n",
var_name, var_name, eqn, continuation
))
.unwrap();
}
}
}
output_file
.write_fmt(format_args!("\\end{{align*}}\n"))
.unwrap();
}
} else if args.is_convert {
let pb_project = serde::serialize(&project);
let mut buf: Vec<u8> = if args.is_model_only {
if pb_project.models.len()!= 1 {
die!("--model-only specified, but more than 1 model in this project");
}
let mut buf = Vec::with_capacity(pb_project.models[0].encoded_len());
pb_project.models[0].encode(&mut buf).unwrap();
buf
} else {
let mut buf = Vec::with_capacity(pb_project.encoded_len());
pb_project.encode(&mut buf).unwrap();
buf
};
if args.is_to_xmile {
match to_xmile(&project) {
Ok(s) => {
buf = s.into_bytes();
buf.push(b'\n');
}
Err(err) => {
die!("error converting to XMILE: {}", err);
}
}
}
let mut output_file =
File::create(&args.output.unwrap_or_else(|| "/dev/stdout".to_string())).unwrap();
output_file.write_all(&buf).unwrap();
} else if args.is_debug {
if args.reference.is_none() {
eprintln!("missing required argument --reference FILE");
std::process::exit(1);
}
let ref_path = args.reference.unwrap();
let reference = load_csv(&ref_path, b'\t').unwrap();
let results = simulate(&project);
results.print_tsv_comparison(Some(&reference));
} else {
let results = simulate(&project);
if!args.is_no_output {
results.print_tsv();
}
}
}
| usage | identifier_name |
ui.rs | use std::collections::{HashSet, HashMap, VecDeque};
use std::any::{Any, TypeId};
use std::rc::Rc;
use std::cell::RefCell;
use cassowary::Constraint;
use cassowary::strength::*;
use glutin;
use window::Window;
use app::App;
use widget::{WidgetRef, WidgetBuilder};
use layout::{LimnSolver, LayoutChanged, LayoutVars, ExactFrame};
use layout::constraint::*;
use geometry::{Point, Rect, Size};
use resources::WidgetId;
use event::Target;
use render::WebRenderContext;
/// If true, the constraint that matches the root layout size to the window size
/// is required. This can be useful for debugging but can result in panics from resizing the window.
const WINDOW_CONSTRAINT_REQUIRED: bool = false;
pub struct Ui {
pub(crate) root: WidgetRef,
widget_map: HashMap<WidgetId, WidgetRef>,
pub(crate) solver: LimnSolver,
pub(crate) render: WebRenderContext,
needs_redraw: bool,
should_close: bool,
debug_draw_bounds: bool,
window: Rc<RefCell<Window>>,
window_constraints: Vec<Constraint>,
}
impl Ui {
pub(super) fn new(mut window: Window, events_loop: &glutin::EventsLoop) -> Self {
let mut root = WidgetBuilder::new("window");
root.layout().set_container(ExactFrame);
root.layout().add(top_left(Point::zero()));
if!WINDOW_CONSTRAINT_REQUIRED {
let mut root_layout = root.layout();
root_layout.edit_right().strength(REQUIRED - 1.0);
root_layout.edit_bottom().strength(REQUIRED - 1.0);
}
let render = WebRenderContext::new(&mut window, events_loop);
Ui {
widget_map: HashMap::new(),
root: root.into(),
solver: LimnSolver::new(),
render: render,
needs_redraw: true,
should_close: false,
debug_draw_bounds: false,
window: Rc::new(RefCell::new(window)),
window_constraints: Vec::new(),
}
}
pub fn get_widget(&self, widget_id: WidgetId) -> Option<WidgetRef> {
self.widget_map.get(&widget_id).map(|widget| widget.clone())
}
pub fn get_root(&self) -> WidgetRef {
self.root.clone()
}
pub fn event<T:'static>(&self, data: T) {
self.get_root().event(data);
}
pub fn close(&mut self) {
self.should_close = true;
}
pub(super) fn should_close(&self) -> bool {
self.should_close
}
pub(super) fn resize_window_to_fit(&mut self) {
let window_dims = self.get_root_dims();
self.window.borrow_mut().resize(window_dims.width as u32, window_dims.height as u32);
}
pub fn get_root_dims(&self) -> Size {
let root = self.get_root();
let mut dims = root.bounds().size;
// use min size to prevent window size from being set to 0 (X crashes)
dims.width = f32::max(100.0, dims.width);
dims.height = f32::max(100.0, dims.height);
dims
}
pub(super) fn window_resized(&mut self, window_dims: Size) {
let window_size = self.window.borrow_mut().size_u32();
self.render.window_resized(window_size);
let mut root = self.get_root();
if WINDOW_CONSTRAINT_REQUIRED {
let window_constraints = root.layout().create_constraint(size(window_dims));
{
let window_constraints = window_constraints.clone();
root.update_layout(|layout| {
for constraint in self.window_constraints.drain(..) {
layout.remove_constraint(constraint);
}
layout.add(window_constraints);
});
}
self.window_constraints = window_constraints;
} else {
root.update_layout(|layout| {
layout.edit_right().set(window_dims.width);
layout.edit_bottom().set(window_dims.height);
});
}
self.needs_redraw = true;
}
pub fn check_layout_changes(&mut self) {
let changes = self.solver.fetch_changes();
debug!("layout has {} changes", changes.len());
if!changes.is_empty() {
self.event(LayoutChanged(changes));
}
}
pub fn redraw(&mut self) |
pub fn needs_redraw(&self) -> bool {
self.needs_redraw
}
pub(super) fn draw_if_needed(&mut self) {
if self.needs_redraw {
self.draw();
self.needs_redraw = false;
}
}
fn draw(&mut self) {
let window_size = self.window.borrow_mut().size_f32();
let (builder, resources) = {
let mut renderer = self.render.render_builder(window_size);
let crop_to = Rect::new(Point::zero(), Size::new(::std::f32::MAX, ::std::f32::MAX));
self.root.widget_mut().draw(crop_to, &mut renderer);
if self.debug_draw_bounds {
self.root.widget_mut().draw_debug(&mut renderer);
}
(renderer.builder, renderer.resources)
};
self.render.set_display_list(builder, resources, window_size);
self.render.generate_frame();
}
// Call after drawing
pub(super) fn update(&mut self) {
self.render.update(self.window.borrow_mut().size_u32());
let window = self.window.borrow_mut();
window.swap_buffers();
}
pub fn widgets_bfs(&self) -> WidgetsBfs {
WidgetsBfs::new(self.get_root())
}
pub fn widgets_under_cursor(&mut self, point: Point) -> WidgetsUnderCursor {
WidgetsUnderCursor::new(point, self.get_root())
}
/// Find the first widget under the cursor, ie. the last to be drawn that is under the cursor
pub fn widget_under_cursor(&mut self, point: Point) -> Option<WidgetRef> {
self.widgets_under_cursor(point).next()
}
fn handle_widget_event(&mut self, widget_ref: WidgetRef, type_id: TypeId, data: &Any) -> bool {
let handled = widget_ref.trigger_event(self, type_id, data);
if widget_ref.has_updated() {
self.needs_redraw = true;
widget_ref.set_updated(false);
}
handled
}
pub(super) fn handle_event(&mut self, address: Target, type_id: TypeId, data: &Any) {
match address {
Target::Root => {
let root = self.get_root();
self.handle_widget_event(root, type_id, data);
}
Target::Widget(widget_ref) => {
self.handle_widget_event(widget_ref, type_id, data);
}
Target::SubTree(widget_ref) => {
self.handle_event_subtree(widget_ref, type_id, data);
}
Target::BubbleUp(widget_ref) => {
let mut maybe_widget_ref = Some(widget_ref);
while let Some(widget_ref) = maybe_widget_ref {
if self.handle_widget_event(widget_ref.clone(), type_id, data) {
break;
}
maybe_widget_ref = widget_ref.parent();
}
}
}
}
fn handle_event_subtree(&mut self, widget_ref: WidgetRef, type_id: TypeId, data: &Any) {
self.handle_widget_event(widget_ref.clone(), type_id, data);
let children = &widget_ref.children();
for child in children {
self.handle_event_subtree(child.clone(), type_id, data);
}
}
pub fn set_debug_draw_bounds(&mut self, debug_draw_bounds: bool) {
self.debug_draw_bounds = debug_draw_bounds;
self.redraw();
}
pub fn debug_widget_positions(&self) {
println!("WIDGET POSITIONS");
for widget_ref in self.widgets_bfs() {
let bounds = widget_ref.bounds();
let name = widget_ref.name();
println!("{:?} {:?}", name, bounds);
}
}
}
#[derive(Clone)]
pub struct RegisterWidget(pub WidgetRef);
#[derive(Clone)]
pub struct RemoveWidget(pub WidgetRef);
impl App {
pub fn add_ui_handlers(&mut self) {
self.add_handler_fn(|event: &RegisterWidget, args| {
let event = event.clone();
let RegisterWidget(widget_ref) = event;
args.ui.widget_map.insert(widget_ref.id(), widget_ref.clone());
});
self.add_handler_fn(|event: &RemoveWidget, args| {
let event = event.clone();
let RemoveWidget(widget_ref) = event;
args.ui.solver.remove_layout(widget_ref.id().0);
args.ui.check_layout_changes();
args.ui.widget_map.remove(&widget_ref.id());
});
}
}
pub struct WidgetAttachedEvent;
pub struct WidgetDetachedEvent;
pub struct ChildAttachedEvent(pub WidgetId, pub LayoutVars);
pub enum ChildrenUpdatedEvent {
Added(WidgetRef),
Removed(WidgetRef),
}
pub struct WidgetsUnderCursor {
point: Point,
dfs: WidgetsDfsPostReverse,
}
impl WidgetsUnderCursor {
fn new(point: Point, root: WidgetRef) -> Self {
WidgetsUnderCursor {
point: point,
dfs: WidgetsDfsPostReverse::new(root),
}
}
}
impl Iterator for WidgetsUnderCursor {
type Item = WidgetRef;
fn next(&mut self) -> Option<WidgetRef> {
for widget_ref in self.dfs.by_ref() {
let widget = &widget_ref.widget();
if widget.is_under_cursor(self.point) {
return Some(widget_ref.clone());
}
}
None
}
}
// Iterates in reverse of draw order, that is, depth first post order,
// with siblings in reverse of insertion order
struct WidgetsDfsPostReverse {
stack: Vec<WidgetRef>,
discovered: HashSet<WidgetRef>,
finished: HashSet<WidgetRef>,
}
impl WidgetsDfsPostReverse {
fn new(root: WidgetRef) -> Self {
WidgetsDfsPostReverse {
stack: vec![root],
discovered: HashSet::new(),
finished: HashSet::new(),
}
}
}
impl Iterator for WidgetsDfsPostReverse {
type Item = WidgetRef;
fn next(&mut self) -> Option<WidgetRef> {
while let Some(widget_ref) = self.stack.last().cloned() {
if self.discovered.insert(widget_ref.clone()) {
for child in &widget_ref.children() {
self.stack.push(child.clone());
}
} else {
self.stack.pop();
if self.finished.insert(widget_ref.clone()) {
return Some(widget_ref.clone());
}
}
}
None
}
}
pub struct WidgetsBfs {
queue: VecDeque<WidgetRef>,
}
impl WidgetsBfs {
fn new(root: WidgetRef) -> Self {
let mut queue = VecDeque::new();
queue.push_front(root);
WidgetsBfs { queue: queue }
}
}
impl Iterator for WidgetsBfs {
type Item = WidgetRef;
fn next(&mut self) -> Option<WidgetRef> {
if let Some(widget_ref) = self.queue.pop_front() {
for child in &widget_ref.children() {
self.queue.push_back(child.clone());
}
Some(widget_ref)
} else {
None
}
}
}
| {
self.needs_redraw = true;
} | identifier_body |
ui.rs | use std::collections::{HashSet, HashMap, VecDeque};
use std::any::{Any, TypeId};
use std::rc::Rc;
use std::cell::RefCell;
use cassowary::Constraint;
use cassowary::strength::*;
use glutin;
use window::Window;
use app::App;
use widget::{WidgetRef, WidgetBuilder};
use layout::{LimnSolver, LayoutChanged, LayoutVars, ExactFrame};
use layout::constraint::*;
use geometry::{Point, Rect, Size};
use resources::WidgetId;
use event::Target;
use render::WebRenderContext;
/// If true, the constraint that matches the root layout size to the window size
/// is required. This can be useful for debugging but can result in panics from resizing the window.
const WINDOW_CONSTRAINT_REQUIRED: bool = false;
pub struct Ui {
pub(crate) root: WidgetRef,
widget_map: HashMap<WidgetId, WidgetRef>,
pub(crate) solver: LimnSolver,
pub(crate) render: WebRenderContext,
needs_redraw: bool,
should_close: bool,
debug_draw_bounds: bool,
window: Rc<RefCell<Window>>,
window_constraints: Vec<Constraint>,
}
impl Ui {
pub(super) fn new(mut window: Window, events_loop: &glutin::EventsLoop) -> Self {
let mut root = WidgetBuilder::new("window");
root.layout().set_container(ExactFrame);
root.layout().add(top_left(Point::zero()));
if!WINDOW_CONSTRAINT_REQUIRED {
let mut root_layout = root.layout();
root_layout.edit_right().strength(REQUIRED - 1.0);
root_layout.edit_bottom().strength(REQUIRED - 1.0);
}
let render = WebRenderContext::new(&mut window, events_loop);
Ui {
widget_map: HashMap::new(),
root: root.into(),
solver: LimnSolver::new(),
render: render,
needs_redraw: true,
should_close: false,
debug_draw_bounds: false,
window: Rc::new(RefCell::new(window)),
window_constraints: Vec::new(),
}
}
pub fn get_widget(&self, widget_id: WidgetId) -> Option<WidgetRef> {
self.widget_map.get(&widget_id).map(|widget| widget.clone())
}
pub fn get_root(&self) -> WidgetRef {
self.root.clone()
}
pub fn event<T:'static>(&self, data: T) {
self.get_root().event(data);
}
pub fn close(&mut self) {
self.should_close = true;
}
pub(super) fn should_close(&self) -> bool {
self.should_close
}
pub(super) fn resize_window_to_fit(&mut self) {
let window_dims = self.get_root_dims();
self.window.borrow_mut().resize(window_dims.width as u32, window_dims.height as u32);
}
pub fn get_root_dims(&self) -> Size {
let root = self.get_root();
let mut dims = root.bounds().size;
// use min size to prevent window size from being set to 0 (X crashes)
dims.width = f32::max(100.0, dims.width);
dims.height = f32::max(100.0, dims.height);
dims
}
pub(super) fn window_resized(&mut self, window_dims: Size) {
let window_size = self.window.borrow_mut().size_u32();
self.render.window_resized(window_size);
let mut root = self.get_root();
if WINDOW_CONSTRAINT_REQUIRED {
let window_constraints = root.layout().create_constraint(size(window_dims));
{
let window_constraints = window_constraints.clone();
root.update_layout(|layout| {
for constraint in self.window_constraints.drain(..) {
layout.remove_constraint(constraint);
}
layout.add(window_constraints);
});
}
self.window_constraints = window_constraints;
} else {
root.update_layout(|layout| {
layout.edit_right().set(window_dims.width);
layout.edit_bottom().set(window_dims.height);
});
}
self.needs_redraw = true;
}
pub fn check_layout_changes(&mut self) {
let changes = self.solver.fetch_changes();
debug!("layout has {} changes", changes.len());
if!changes.is_empty() {
self.event(LayoutChanged(changes));
}
}
pub fn redraw(&mut self) {
self.needs_redraw = true;
}
pub fn needs_redraw(&self) -> bool {
self.needs_redraw
}
pub(super) fn draw_if_needed(&mut self) {
if self.needs_redraw {
self.draw();
self.needs_redraw = false;
}
}
fn draw(&mut self) {
let window_size = self.window.borrow_mut().size_f32();
let (builder, resources) = {
let mut renderer = self.render.render_builder(window_size);
let crop_to = Rect::new(Point::zero(), Size::new(::std::f32::MAX, ::std::f32::MAX));
self.root.widget_mut().draw(crop_to, &mut renderer);
if self.debug_draw_bounds {
self.root.widget_mut().draw_debug(&mut renderer);
}
(renderer.builder, renderer.resources)
};
self.render.set_display_list(builder, resources, window_size);
self.render.generate_frame();
}
// Call after drawing
pub(super) fn update(&mut self) {
self.render.update(self.window.borrow_mut().size_u32());
let window = self.window.borrow_mut();
window.swap_buffers();
}
pub fn widgets_bfs(&self) -> WidgetsBfs {
WidgetsBfs::new(self.get_root())
}
pub fn widgets_under_cursor(&mut self, point: Point) -> WidgetsUnderCursor {
WidgetsUnderCursor::new(point, self.get_root())
}
/// Find the first widget under the cursor, ie. the last to be drawn that is under the cursor
pub fn widget_under_cursor(&mut self, point: Point) -> Option<WidgetRef> {
self.widgets_under_cursor(point).next()
}
fn handle_widget_event(&mut self, widget_ref: WidgetRef, type_id: TypeId, data: &Any) -> bool {
let handled = widget_ref.trigger_event(self, type_id, data);
if widget_ref.has_updated() {
self.needs_redraw = true;
widget_ref.set_updated(false);
}
handled
}
pub(super) fn handle_event(&mut self, address: Target, type_id: TypeId, data: &Any) {
match address {
Target::Root => {
let root = self.get_root();
self.handle_widget_event(root, type_id, data);
}
Target::Widget(widget_ref) => {
self.handle_widget_event(widget_ref, type_id, data);
}
Target::SubTree(widget_ref) => {
self.handle_event_subtree(widget_ref, type_id, data);
}
Target::BubbleUp(widget_ref) => {
let mut maybe_widget_ref = Some(widget_ref);
while let Some(widget_ref) = maybe_widget_ref {
if self.handle_widget_event(widget_ref.clone(), type_id, data) {
break;
}
maybe_widget_ref = widget_ref.parent();
}
}
}
}
fn handle_event_subtree(&mut self, widget_ref: WidgetRef, type_id: TypeId, data: &Any) {
self.handle_widget_event(widget_ref.clone(), type_id, data);
let children = &widget_ref.children();
for child in children {
self.handle_event_subtree(child.clone(), type_id, data);
}
}
pub fn set_debug_draw_bounds(&mut self, debug_draw_bounds: bool) {
self.debug_draw_bounds = debug_draw_bounds;
self.redraw();
}
pub fn debug_widget_positions(&self) {
println!("WIDGET POSITIONS");
for widget_ref in self.widgets_bfs() {
let bounds = widget_ref.bounds();
let name = widget_ref.name();
println!("{:?} {:?}", name, bounds);
}
}
}
#[derive(Clone)]
pub struct RegisterWidget(pub WidgetRef);
#[derive(Clone)]
pub struct RemoveWidget(pub WidgetRef);
impl App {
pub fn add_ui_handlers(&mut self) {
self.add_handler_fn(|event: &RegisterWidget, args| {
let event = event.clone();
let RegisterWidget(widget_ref) = event;
args.ui.widget_map.insert(widget_ref.id(), widget_ref.clone());
});
self.add_handler_fn(|event: &RemoveWidget, args| {
let event = event.clone();
let RemoveWidget(widget_ref) = event;
args.ui.solver.remove_layout(widget_ref.id().0);
args.ui.check_layout_changes();
args.ui.widget_map.remove(&widget_ref.id());
});
}
}
pub struct WidgetAttachedEvent;
pub struct WidgetDetachedEvent;
pub struct ChildAttachedEvent(pub WidgetId, pub LayoutVars);
pub enum ChildrenUpdatedEvent {
Added(WidgetRef),
Removed(WidgetRef),
}
pub struct WidgetsUnderCursor {
point: Point,
dfs: WidgetsDfsPostReverse,
}
impl WidgetsUnderCursor {
fn new(point: Point, root: WidgetRef) -> Self {
WidgetsUnderCursor {
point: point,
dfs: WidgetsDfsPostReverse::new(root),
}
}
}
impl Iterator for WidgetsUnderCursor {
type Item = WidgetRef;
fn next(&mut self) -> Option<WidgetRef> {
for widget_ref in self.dfs.by_ref() {
let widget = &widget_ref.widget();
if widget.is_under_cursor(self.point) {
return Some(widget_ref.clone());
}
}
None
}
}
// Iterates in reverse of draw order, that is, depth first post order,
// with siblings in reverse of insertion order
struct WidgetsDfsPostReverse {
stack: Vec<WidgetRef>,
discovered: HashSet<WidgetRef>,
finished: HashSet<WidgetRef>,
}
impl WidgetsDfsPostReverse {
fn | (root: WidgetRef) -> Self {
WidgetsDfsPostReverse {
stack: vec![root],
discovered: HashSet::new(),
finished: HashSet::new(),
}
}
}
impl Iterator for WidgetsDfsPostReverse {
type Item = WidgetRef;
fn next(&mut self) -> Option<WidgetRef> {
while let Some(widget_ref) = self.stack.last().cloned() {
if self.discovered.insert(widget_ref.clone()) {
for child in &widget_ref.children() {
self.stack.push(child.clone());
}
} else {
self.stack.pop();
if self.finished.insert(widget_ref.clone()) {
return Some(widget_ref.clone());
}
}
}
None
}
}
pub struct WidgetsBfs {
queue: VecDeque<WidgetRef>,
}
impl WidgetsBfs {
fn new(root: WidgetRef) -> Self {
let mut queue = VecDeque::new();
queue.push_front(root);
WidgetsBfs { queue: queue }
}
}
impl Iterator for WidgetsBfs {
type Item = WidgetRef;
fn next(&mut self) -> Option<WidgetRef> {
if let Some(widget_ref) = self.queue.pop_front() {
for child in &widget_ref.children() {
self.queue.push_back(child.clone());
}
Some(widget_ref)
} else {
None
}
}
}
| new | identifier_name |
ui.rs | use std::collections::{HashSet, HashMap, VecDeque};
use std::any::{Any, TypeId};
use std::rc::Rc;
use std::cell::RefCell;
use cassowary::Constraint;
use cassowary::strength::*;
use glutin;
use window::Window;
use app::App;
use widget::{WidgetRef, WidgetBuilder};
use layout::{LimnSolver, LayoutChanged, LayoutVars, ExactFrame};
use layout::constraint::*;
use geometry::{Point, Rect, Size};
use resources::WidgetId;
use event::Target;
use render::WebRenderContext;
/// If true, the constraint that matches the root layout size to the window size
/// is required. This can be useful for debugging but can result in panics from resizing the window.
const WINDOW_CONSTRAINT_REQUIRED: bool = false;
pub struct Ui {
pub(crate) root: WidgetRef,
widget_map: HashMap<WidgetId, WidgetRef>,
pub(crate) solver: LimnSolver,
pub(crate) render: WebRenderContext,
needs_redraw: bool,
should_close: bool,
debug_draw_bounds: bool,
window: Rc<RefCell<Window>>,
window_constraints: Vec<Constraint>,
}
impl Ui {
pub(super) fn new(mut window: Window, events_loop: &glutin::EventsLoop) -> Self {
let mut root = WidgetBuilder::new("window");
root.layout().set_container(ExactFrame);
root.layout().add(top_left(Point::zero()));
if!WINDOW_CONSTRAINT_REQUIRED {
let mut root_layout = root.layout();
root_layout.edit_right().strength(REQUIRED - 1.0);
root_layout.edit_bottom().strength(REQUIRED - 1.0);
}
let render = WebRenderContext::new(&mut window, events_loop);
Ui {
widget_map: HashMap::new(),
root: root.into(),
solver: LimnSolver::new(),
render: render,
needs_redraw: true,
should_close: false,
debug_draw_bounds: false,
window: Rc::new(RefCell::new(window)),
window_constraints: Vec::new(),
}
}
pub fn get_widget(&self, widget_id: WidgetId) -> Option<WidgetRef> {
self.widget_map.get(&widget_id).map(|widget| widget.clone())
}
pub fn get_root(&self) -> WidgetRef {
self.root.clone()
}
pub fn event<T:'static>(&self, data: T) {
self.get_root().event(data);
}
pub fn close(&mut self) {
self.should_close = true;
}
pub(super) fn should_close(&self) -> bool {
self.should_close
}
pub(super) fn resize_window_to_fit(&mut self) {
let window_dims = self.get_root_dims();
self.window.borrow_mut().resize(window_dims.width as u32, window_dims.height as u32);
}
pub fn get_root_dims(&self) -> Size {
let root = self.get_root();
let mut dims = root.bounds().size;
// use min size to prevent window size from being set to 0 (X crashes)
dims.width = f32::max(100.0, dims.width);
dims.height = f32::max(100.0, dims.height);
dims
}
pub(super) fn window_resized(&mut self, window_dims: Size) {
let window_size = self.window.borrow_mut().size_u32();
self.render.window_resized(window_size);
let mut root = self.get_root();
if WINDOW_CONSTRAINT_REQUIRED {
let window_constraints = root.layout().create_constraint(size(window_dims));
{
let window_constraints = window_constraints.clone();
root.update_layout(|layout| {
for constraint in self.window_constraints.drain(..) {
layout.remove_constraint(constraint);
}
layout.add(window_constraints);
});
}
self.window_constraints = window_constraints;
} else {
root.update_layout(|layout| {
layout.edit_right().set(window_dims.width);
layout.edit_bottom().set(window_dims.height);
});
}
self.needs_redraw = true;
}
pub fn check_layout_changes(&mut self) {
let changes = self.solver.fetch_changes();
debug!("layout has {} changes", changes.len());
if!changes.is_empty() {
self.event(LayoutChanged(changes));
}
}
pub fn redraw(&mut self) {
self.needs_redraw = true;
}
pub fn needs_redraw(&self) -> bool {
self.needs_redraw
}
pub(super) fn draw_if_needed(&mut self) {
if self.needs_redraw {
self.draw();
self.needs_redraw = false;
}
}
fn draw(&mut self) {
let window_size = self.window.borrow_mut().size_f32();
let (builder, resources) = {
let mut renderer = self.render.render_builder(window_size);
let crop_to = Rect::new(Point::zero(), Size::new(::std::f32::MAX, ::std::f32::MAX));
self.root.widget_mut().draw(crop_to, &mut renderer);
if self.debug_draw_bounds {
self.root.widget_mut().draw_debug(&mut renderer);
}
(renderer.builder, renderer.resources)
};
self.render.set_display_list(builder, resources, window_size);
self.render.generate_frame();
}
// Call after drawing
pub(super) fn update(&mut self) {
self.render.update(self.window.borrow_mut().size_u32());
let window = self.window.borrow_mut();
window.swap_buffers();
}
pub fn widgets_bfs(&self) -> WidgetsBfs {
WidgetsBfs::new(self.get_root())
}
pub fn widgets_under_cursor(&mut self, point: Point) -> WidgetsUnderCursor {
WidgetsUnderCursor::new(point, self.get_root())
}
/// Find the first widget under the cursor, ie. the last to be drawn that is under the cursor
pub fn widget_under_cursor(&mut self, point: Point) -> Option<WidgetRef> {
self.widgets_under_cursor(point).next()
}
fn handle_widget_event(&mut self, widget_ref: WidgetRef, type_id: TypeId, data: &Any) -> bool {
let handled = widget_ref.trigger_event(self, type_id, data);
if widget_ref.has_updated() {
self.needs_redraw = true;
widget_ref.set_updated(false);
}
handled
}
pub(super) fn handle_event(&mut self, address: Target, type_id: TypeId, data: &Any) {
match address {
Target::Root => {
let root = self.get_root();
self.handle_widget_event(root, type_id, data);
}
Target::Widget(widget_ref) => {
self.handle_widget_event(widget_ref, type_id, data);
}
Target::SubTree(widget_ref) => {
self.handle_event_subtree(widget_ref, type_id, data);
}
Target::BubbleUp(widget_ref) => {
let mut maybe_widget_ref = Some(widget_ref);
while let Some(widget_ref) = maybe_widget_ref {
if self.handle_widget_event(widget_ref.clone(), type_id, data) {
break;
}
maybe_widget_ref = widget_ref.parent();
}
}
}
}
fn handle_event_subtree(&mut self, widget_ref: WidgetRef, type_id: TypeId, data: &Any) {
self.handle_widget_event(widget_ref.clone(), type_id, data);
let children = &widget_ref.children();
for child in children {
self.handle_event_subtree(child.clone(), type_id, data);
}
}
pub fn set_debug_draw_bounds(&mut self, debug_draw_bounds: bool) {
self.debug_draw_bounds = debug_draw_bounds;
self.redraw();
}
pub fn debug_widget_positions(&self) {
println!("WIDGET POSITIONS");
for widget_ref in self.widgets_bfs() {
let bounds = widget_ref.bounds();
let name = widget_ref.name();
println!("{:?} {:?}", name, bounds);
}
}
}
#[derive(Clone)]
pub struct RegisterWidget(pub WidgetRef);
#[derive(Clone)]
pub struct RemoveWidget(pub WidgetRef);
impl App {
pub fn add_ui_handlers(&mut self) {
self.add_handler_fn(|event: &RegisterWidget, args| {
let event = event.clone();
let RegisterWidget(widget_ref) = event;
args.ui.widget_map.insert(widget_ref.id(), widget_ref.clone());
});
self.add_handler_fn(|event: &RemoveWidget, args| {
let event = event.clone();
let RemoveWidget(widget_ref) = event;
args.ui.solver.remove_layout(widget_ref.id().0);
args.ui.check_layout_changes();
args.ui.widget_map.remove(&widget_ref.id());
});
}
}
pub struct WidgetAttachedEvent;
pub struct WidgetDetachedEvent;
pub struct ChildAttachedEvent(pub WidgetId, pub LayoutVars);
pub enum ChildrenUpdatedEvent {
Added(WidgetRef),
Removed(WidgetRef),
}
pub struct WidgetsUnderCursor {
point: Point,
dfs: WidgetsDfsPostReverse,
}
impl WidgetsUnderCursor {
fn new(point: Point, root: WidgetRef) -> Self {
WidgetsUnderCursor {
point: point,
dfs: WidgetsDfsPostReverse::new(root),
}
}
}
impl Iterator for WidgetsUnderCursor {
type Item = WidgetRef;
fn next(&mut self) -> Option<WidgetRef> {
for widget_ref in self.dfs.by_ref() {
let widget = &widget_ref.widget();
if widget.is_under_cursor(self.point) |
}
None
}
}
// Iterates in reverse of draw order, that is, depth first post order,
// with siblings in reverse of insertion order
struct WidgetsDfsPostReverse {
stack: Vec<WidgetRef>,
discovered: HashSet<WidgetRef>,
finished: HashSet<WidgetRef>,
}
impl WidgetsDfsPostReverse {
fn new(root: WidgetRef) -> Self {
WidgetsDfsPostReverse {
stack: vec![root],
discovered: HashSet::new(),
finished: HashSet::new(),
}
}
}
impl Iterator for WidgetsDfsPostReverse {
type Item = WidgetRef;
fn next(&mut self) -> Option<WidgetRef> {
while let Some(widget_ref) = self.stack.last().cloned() {
if self.discovered.insert(widget_ref.clone()) {
for child in &widget_ref.children() {
self.stack.push(child.clone());
}
} else {
self.stack.pop();
if self.finished.insert(widget_ref.clone()) {
return Some(widget_ref.clone());
}
}
}
None
}
}
pub struct WidgetsBfs {
queue: VecDeque<WidgetRef>,
}
impl WidgetsBfs {
fn new(root: WidgetRef) -> Self {
let mut queue = VecDeque::new();
queue.push_front(root);
WidgetsBfs { queue: queue }
}
}
impl Iterator for WidgetsBfs {
type Item = WidgetRef;
fn next(&mut self) -> Option<WidgetRef> {
if let Some(widget_ref) = self.queue.pop_front() {
for child in &widget_ref.children() {
self.queue.push_back(child.clone());
}
Some(widget_ref)
} else {
None
}
}
}
| {
return Some(widget_ref.clone());
} | conditional_block |
ui.rs | use std::collections::{HashSet, HashMap, VecDeque};
use std::any::{Any, TypeId};
use std::rc::Rc;
use std::cell::RefCell;
use cassowary::Constraint;
use cassowary::strength::*;
use glutin;
use window::Window;
use app::App;
use widget::{WidgetRef, WidgetBuilder};
use layout::{LimnSolver, LayoutChanged, LayoutVars, ExactFrame};
use layout::constraint::*;
use geometry::{Point, Rect, Size};
use resources::WidgetId;
use event::Target;
use render::WebRenderContext;
/// If true, the constraint that matches the root layout size to the window size
/// is required. This can be useful for debugging but can result in panics from resizing the window.
const WINDOW_CONSTRAINT_REQUIRED: bool = false;
pub struct Ui {
pub(crate) root: WidgetRef,
widget_map: HashMap<WidgetId, WidgetRef>,
pub(crate) solver: LimnSolver,
pub(crate) render: WebRenderContext,
needs_redraw: bool,
should_close: bool,
debug_draw_bounds: bool,
window: Rc<RefCell<Window>>,
window_constraints: Vec<Constraint>,
}
impl Ui {
pub(super) fn new(mut window: Window, events_loop: &glutin::EventsLoop) -> Self {
let mut root = WidgetBuilder::new("window");
root.layout().set_container(ExactFrame);
root.layout().add(top_left(Point::zero()));
if!WINDOW_CONSTRAINT_REQUIRED {
let mut root_layout = root.layout();
root_layout.edit_right().strength(REQUIRED - 1.0);
root_layout.edit_bottom().strength(REQUIRED - 1.0);
}
let render = WebRenderContext::new(&mut window, events_loop);
Ui {
widget_map: HashMap::new(),
root: root.into(),
solver: LimnSolver::new(),
render: render,
needs_redraw: true,
should_close: false,
debug_draw_bounds: false,
window: Rc::new(RefCell::new(window)),
window_constraints: Vec::new(),
}
}
pub fn get_widget(&self, widget_id: WidgetId) -> Option<WidgetRef> {
self.widget_map.get(&widget_id).map(|widget| widget.clone())
}
pub fn get_root(&self) -> WidgetRef {
self.root.clone()
}
pub fn event<T:'static>(&self, data: T) {
self.get_root().event(data);
}
pub fn close(&mut self) {
self.should_close = true;
}
pub(super) fn should_close(&self) -> bool {
self.should_close
}
pub(super) fn resize_window_to_fit(&mut self) {
let window_dims = self.get_root_dims();
self.window.borrow_mut().resize(window_dims.width as u32, window_dims.height as u32);
}
pub fn get_root_dims(&self) -> Size {
let root = self.get_root();
let mut dims = root.bounds().size;
// use min size to prevent window size from being set to 0 (X crashes)
dims.width = f32::max(100.0, dims.width);
dims.height = f32::max(100.0, dims.height);
dims
}
pub(super) fn window_resized(&mut self, window_dims: Size) {
let window_size = self.window.borrow_mut().size_u32();
self.render.window_resized(window_size);
let mut root = self.get_root();
if WINDOW_CONSTRAINT_REQUIRED {
let window_constraints = root.layout().create_constraint(size(window_dims));
{
let window_constraints = window_constraints.clone();
root.update_layout(|layout| {
for constraint in self.window_constraints.drain(..) {
layout.remove_constraint(constraint);
}
layout.add(window_constraints);
});
}
self.window_constraints = window_constraints;
} else {
root.update_layout(|layout| {
layout.edit_right().set(window_dims.width);
layout.edit_bottom().set(window_dims.height);
});
}
self.needs_redraw = true;
}
pub fn check_layout_changes(&mut self) {
let changes = self.solver.fetch_changes();
debug!("layout has {} changes", changes.len());
if!changes.is_empty() {
self.event(LayoutChanged(changes));
}
}
pub fn redraw(&mut self) {
self.needs_redraw = true;
}
pub fn needs_redraw(&self) -> bool {
self.needs_redraw
}
pub(super) fn draw_if_needed(&mut self) {
if self.needs_redraw {
self.draw();
self.needs_redraw = false;
}
}
fn draw(&mut self) {
let window_size = self.window.borrow_mut().size_f32();
let (builder, resources) = {
let mut renderer = self.render.render_builder(window_size);
let crop_to = Rect::new(Point::zero(), Size::new(::std::f32::MAX, ::std::f32::MAX));
self.root.widget_mut().draw(crop_to, &mut renderer);
if self.debug_draw_bounds {
self.root.widget_mut().draw_debug(&mut renderer);
}
(renderer.builder, renderer.resources)
};
self.render.set_display_list(builder, resources, window_size);
self.render.generate_frame();
}
// Call after drawing
pub(super) fn update(&mut self) {
self.render.update(self.window.borrow_mut().size_u32());
let window = self.window.borrow_mut();
window.swap_buffers();
}
pub fn widgets_bfs(&self) -> WidgetsBfs {
WidgetsBfs::new(self.get_root())
}
pub fn widgets_under_cursor(&mut self, point: Point) -> WidgetsUnderCursor {
WidgetsUnderCursor::new(point, self.get_root())
}
/// Find the first widget under the cursor, ie. the last to be drawn that is under the cursor
pub fn widget_under_cursor(&mut self, point: Point) -> Option<WidgetRef> {
self.widgets_under_cursor(point).next()
}
fn handle_widget_event(&mut self, widget_ref: WidgetRef, type_id: TypeId, data: &Any) -> bool {
let handled = widget_ref.trigger_event(self, type_id, data);
if widget_ref.has_updated() {
self.needs_redraw = true;
widget_ref.set_updated(false);
}
handled
}
pub(super) fn handle_event(&mut self, address: Target, type_id: TypeId, data: &Any) {
match address {
Target::Root => {
let root = self.get_root();
self.handle_widget_event(root, type_id, data);
}
Target::Widget(widget_ref) => {
self.handle_widget_event(widget_ref, type_id, data);
}
Target::SubTree(widget_ref) => {
self.handle_event_subtree(widget_ref, type_id, data);
}
Target::BubbleUp(widget_ref) => {
let mut maybe_widget_ref = Some(widget_ref);
while let Some(widget_ref) = maybe_widget_ref {
if self.handle_widget_event(widget_ref.clone(), type_id, data) {
break;
}
maybe_widget_ref = widget_ref.parent();
}
}
}
}
fn handle_event_subtree(&mut self, widget_ref: WidgetRef, type_id: TypeId, data: &Any) {
self.handle_widget_event(widget_ref.clone(), type_id, data);
let children = &widget_ref.children();
for child in children {
self.handle_event_subtree(child.clone(), type_id, data);
}
}
pub fn set_debug_draw_bounds(&mut self, debug_draw_bounds: bool) {
self.debug_draw_bounds = debug_draw_bounds;
self.redraw();
}
pub fn debug_widget_positions(&self) {
println!("WIDGET POSITIONS");
for widget_ref in self.widgets_bfs() {
let bounds = widget_ref.bounds();
let name = widget_ref.name();
println!("{:?} {:?}", name, bounds);
}
}
}
#[derive(Clone)]
pub struct RegisterWidget(pub WidgetRef);
#[derive(Clone)]
pub struct RemoveWidget(pub WidgetRef);
impl App {
pub fn add_ui_handlers(&mut self) {
self.add_handler_fn(|event: &RegisterWidget, args| {
let event = event.clone();
let RegisterWidget(widget_ref) = event;
args.ui.widget_map.insert(widget_ref.id(), widget_ref.clone());
});
self.add_handler_fn(|event: &RemoveWidget, args| {
let event = event.clone();
let RemoveWidget(widget_ref) = event;
args.ui.solver.remove_layout(widget_ref.id().0);
args.ui.check_layout_changes();
args.ui.widget_map.remove(&widget_ref.id());
});
}
}
pub struct WidgetAttachedEvent;
pub struct WidgetDetachedEvent;
pub struct ChildAttachedEvent(pub WidgetId, pub LayoutVars);
pub enum ChildrenUpdatedEvent {
Added(WidgetRef),
Removed(WidgetRef), | dfs: WidgetsDfsPostReverse,
}
impl WidgetsUnderCursor {
fn new(point: Point, root: WidgetRef) -> Self {
WidgetsUnderCursor {
point: point,
dfs: WidgetsDfsPostReverse::new(root),
}
}
}
impl Iterator for WidgetsUnderCursor {
type Item = WidgetRef;
fn next(&mut self) -> Option<WidgetRef> {
for widget_ref in self.dfs.by_ref() {
let widget = &widget_ref.widget();
if widget.is_under_cursor(self.point) {
return Some(widget_ref.clone());
}
}
None
}
}
// Iterates in reverse of draw order, that is, depth first post order,
// with siblings in reverse of insertion order
struct WidgetsDfsPostReverse {
stack: Vec<WidgetRef>,
discovered: HashSet<WidgetRef>,
finished: HashSet<WidgetRef>,
}
impl WidgetsDfsPostReverse {
fn new(root: WidgetRef) -> Self {
WidgetsDfsPostReverse {
stack: vec![root],
discovered: HashSet::new(),
finished: HashSet::new(),
}
}
}
impl Iterator for WidgetsDfsPostReverse {
type Item = WidgetRef;
fn next(&mut self) -> Option<WidgetRef> {
while let Some(widget_ref) = self.stack.last().cloned() {
if self.discovered.insert(widget_ref.clone()) {
for child in &widget_ref.children() {
self.stack.push(child.clone());
}
} else {
self.stack.pop();
if self.finished.insert(widget_ref.clone()) {
return Some(widget_ref.clone());
}
}
}
None
}
}
pub struct WidgetsBfs {
queue: VecDeque<WidgetRef>,
}
impl WidgetsBfs {
fn new(root: WidgetRef) -> Self {
let mut queue = VecDeque::new();
queue.push_front(root);
WidgetsBfs { queue: queue }
}
}
impl Iterator for WidgetsBfs {
type Item = WidgetRef;
fn next(&mut self) -> Option<WidgetRef> {
if let Some(widget_ref) = self.queue.pop_front() {
for child in &widget_ref.children() {
self.queue.push_back(child.clone());
}
Some(widget_ref)
} else {
None
}
}
} | }
pub struct WidgetsUnderCursor {
point: Point, | random_line_split |
check.rs | //! Consensus check functions
use std::{collections::HashSet, sync::Arc};
use chrono::{DateTime, Utc};
use zebra_chain::{
amount::{Amount, Error as AmountError, NonNegative},
block::{Block, Hash, Header, Height},
parameters::{Network, NetworkUpgrade},
transaction,
work::{difficulty::ExpandedDifficulty, equihash},
};
use crate::{error::*, parameters::SLOW_START_INTERVAL};
use super::subsidy;
/// Checks if there is exactly one coinbase transaction in `Block`,
/// and if that coinbase transaction is the first transaction in the block.
/// Returns the coinbase transaction is successful.
///
/// > A transaction that has a single transparent input with a null prevout field,
/// > is called a coinbase transaction. Every block has a single coinbase
/// > transaction as the first transaction in the block.
///
/// <https://zips.z.cash/protocol/protocol.pdf#coinbasetransactions>
pub fn coinbase_is_first(block: &Block) -> Result<Arc<transaction::Transaction>, BlockError> {
// # Consensus
//
// > A block MUST have at least one transaction
//
// <https://zips.z.cash/protocol/protocol.pdf#blockheader>
let first = block
.transactions
.get(0)
.ok_or(BlockError::NoTransactions)?;
// > The first transaction in a block MUST be a coinbase transaction,
// > and subsequent transactions MUST NOT be coinbase transactions.
//
// <https://zips.z.cash/protocol/protocol.pdf#blockheader>
//
// > A transaction that has a single transparent input with a null prevout
// > field, is called a coinbase transaction.
//
// <https://zips.z.cash/protocol/protocol.pdf#coinbasetransactions>
let mut rest = block.transactions.iter().skip(1);
if!first.is_coinbase() {
Err(TransactionError::CoinbasePosition)?;
}
// > A transparent input in a non-coinbase transaction MUST NOT have a null prevout
//
// <https://zips.z.cash/protocol/protocol.pdf#txnconsensus>
if!rest.all(|tx| tx.is_valid_non_coinbase()) {
Err(TransactionError::CoinbaseAfterFirst)?;
}
Ok(first.clone())
}
/// Returns `Ok(ExpandedDifficulty)` if the`difficulty_threshold` of `header` is at least as difficult as
/// the target difficulty limit for `network` (PoWLimit)
///
/// If the header difficulty threshold is invalid, returns an error containing `height` and `hash`.
pub fn difficulty_threshold_is_valid(
header: &Header,
network: Network,
height: &Height,
hash: &Hash,
) -> Result<ExpandedDifficulty, BlockError> {
let difficulty_threshold = header
.difficulty_threshold
.to_expanded()
.ok_or(BlockError::InvalidDifficulty(*height, *hash))?;
// Note: the comparison in this function is a u256 integer comparison, like
// zcashd and bitcoin. Greater values represent *less* work.
// The PowLimit check is part of `Threshold()` in the spec, but it doesn't
// actually depend on any previous blocks.
if difficulty_threshold > ExpandedDifficulty::target_difficulty_limit(network) {
Err(BlockError::TargetDifficultyLimit(
*height,
*hash,
difficulty_threshold,
network,
ExpandedDifficulty::target_difficulty_limit(network),
))?;
}
Ok(difficulty_threshold)
}
/// Returns `Ok(())` if `hash` passes:
/// - the target difficulty limit for `network` (PoWLimit), and
/// - the difficulty filter,
/// based on the fields in `header`.
///
/// If the block is invalid, returns an error containing `height` and `hash`.
pub fn difficulty_is_valid(
header: &Header,
network: Network,
height: &Height,
hash: &Hash,
) -> Result<(), BlockError> {
let difficulty_threshold = difficulty_threshold_is_valid(header, network, height, hash)?;
// Note: the comparison in this function is a u256 integer comparison, like
// zcashd and bitcoin. Greater values represent *less* work.
// # Consensus
//
// > The block MUST pass the difficulty filter.
//
// https://zips.z.cash/protocol/protocol.pdf#blockheader
//
// The difficulty filter is also context-free.
if hash > &difficulty_threshold {
Err(BlockError::DifficultyFilter(
*height,
*hash,
difficulty_threshold,
network,
))?;
}
Ok(())
}
/// Returns `Ok(())` if the `EquihashSolution` is valid for `header`
pub fn equihash_solution_is_valid(header: &Header) -> Result<(), equihash::Error> |
/// Returns `Ok(())` if the block subsidy in `block` is valid for `network`
///
/// [3.9]: https://zips.z.cash/protocol/protocol.pdf#subsidyconcepts
pub fn subsidy_is_valid(block: &Block, network: Network) -> Result<(), BlockError> {
let height = block.coinbase_height().ok_or(SubsidyError::NoCoinbase)?;
let coinbase = block.transactions.get(0).ok_or(SubsidyError::NoCoinbase)?;
// Validate funding streams
let Some(halving_div) = subsidy::general::halving_divisor(height, network) else {
// Far future halving, with no founders reward or funding streams
return Ok(());
};
let canopy_activation_height = NetworkUpgrade::Canopy
.activation_height(network)
.expect("Canopy activation height is known");
if height < SLOW_START_INTERVAL {
unreachable!(
"unsupported block height: callers should handle blocks below {:?}",
SLOW_START_INTERVAL
)
} else if halving_div.count_ones()!= 1 {
unreachable!("invalid halving divisor: the halving divisor must be a non-zero power of two")
} else if height < canopy_activation_height {
// Founders rewards are paid up to Canopy activation, on both mainnet and testnet.
// But we checkpoint in Canopy so founders reward does not apply for Zebra.
unreachable!("we cannot verify consensus rules before Canopy activation");
} else if halving_div < 4 {
// Funding streams are paid from Canopy activation to the second halving
// Note: Canopy activation is at the first halving on mainnet, but not on testnet
// ZIP-1014 only applies to mainnet, ZIP-214 contains the specific rules for testnet
// funding stream amount values
let funding_streams = subsidy::funding_streams::funding_stream_values(height, network)
.expect("We always expect a funding stream hashmap response even if empty");
// # Consensus
//
// > [Canopy onward] The coinbase transaction at block height `height`
// > MUST contain at least one output per funding stream `fs` active at `height`,
// > that pays `fs.Value(height)` zatoshi in the prescribed way to the stream's
// > recipient address represented by `fs.AddressList[fs.AddressIndex(height)]
//
// https://zips.z.cash/protocol/protocol.pdf#fundingstreams
for (receiver, expected_amount) in funding_streams {
let address =
subsidy::funding_streams::funding_stream_address(height, network, receiver);
let has_expected_output =
subsidy::funding_streams::filter_outputs_by_address(coinbase, address)
.iter()
.map(zebra_chain::transparent::Output::value)
.any(|value| value == expected_amount);
if!has_expected_output {
Err(SubsidyError::FundingStreamNotFound)?;
}
}
Ok(())
} else {
// Future halving, with no founders reward or funding streams
Ok(())
}
}
/// Returns `Ok(())` if the miner fees consensus rule is valid.
///
/// [7.1.2]: https://zips.z.cash/protocol/protocol.pdf#txnconsensus
pub fn miner_fees_are_valid(
block: &Block,
network: Network,
block_miner_fees: Amount<NonNegative>,
) -> Result<(), BlockError> {
let height = block.coinbase_height().ok_or(SubsidyError::NoCoinbase)?;
let coinbase = block.transactions.get(0).ok_or(SubsidyError::NoCoinbase)?;
let transparent_value_balance: Amount = subsidy::general::output_amounts(coinbase)
.iter()
.sum::<Result<Amount<NonNegative>, AmountError>>()
.map_err(|_| SubsidyError::SumOverflow)?
.constrain()
.expect("positive value always fit in `NegativeAllowed`");
let sapling_value_balance = coinbase.sapling_value_balance().sapling_amount();
let orchard_value_balance = coinbase.orchard_value_balance().orchard_amount();
let block_subsidy = subsidy::general::block_subsidy(height, network)
.expect("a valid block subsidy for this height and network");
// # Consensus
//
// > The total value in zatoshi of transparent outputs from a coinbase transaction,
// > minus vbalanceSapling, minus vbalanceOrchard, MUST NOT be greater than the value
// > in zatoshi of block subsidy plus the transaction fees paid by transactions in this block.
//
// https://zips.z.cash/protocol/protocol.pdf#txnconsensus
let left = (transparent_value_balance - sapling_value_balance - orchard_value_balance)
.map_err(|_| SubsidyError::SumOverflow)?;
let right = (block_subsidy + block_miner_fees).map_err(|_| SubsidyError::SumOverflow)?;
if left > right {
Err(SubsidyError::InvalidMinerFees)?;
}
Ok(())
}
/// Returns `Ok(())` if `header.time` is less than or equal to
/// 2 hours in the future, according to the node's local clock (`now`).
///
/// This is a non-deterministic rule, as clocks vary over time, and
/// between different nodes.
///
/// "In addition, a full validator MUST NOT accept blocks with nTime
/// more than two hours in the future according to its clock. This
/// is not strictly a consensus rule because it is nondeterministic,
/// and clock time varies between nodes. Also note that a block that
/// is rejected by this rule at a given point in time may later be
/// accepted." [§7.5][7.5]
///
/// [7.5]: https://zips.z.cash/protocol/protocol.pdf#blockheader
///
/// If the header time is invalid, returns an error containing `height` and `hash`.
pub fn time_is_valid_at(
header: &Header,
now: DateTime<Utc>,
height: &Height,
hash: &Hash,
) -> Result<(), zebra_chain::block::BlockTimeError> {
header.time_is_valid_at(now, height, hash)
}
/// Check Merkle root validity.
///
/// `transaction_hashes` is a precomputed list of transaction hashes.
///
/// # Consensus rules:
///
/// - A SHA-256d hash in internal byte order. The merkle root is derived from the
/// hashes of all transactions included in this block, ensuring that none of
/// those transactions can be modified without modifying the header. [7.6]
///
/// # Panics
///
/// - If block does not have a coinbase transaction.
///
/// [ZIP-244]: https://zips.z.cash/zip-0244
/// [7.1]: https://zips.z.cash/protocol/nu5.pdf#txnencodingandconsensus
/// [7.6]: https://zips.z.cash/protocol/nu5.pdf#blockheader
pub fn merkle_root_validity(
network: Network,
block: &Block,
transaction_hashes: &[transaction::Hash],
) -> Result<(), BlockError> {
// TODO: deduplicate zebra-chain and zebra-consensus errors (#2908)
block
.check_transaction_network_upgrade_consistency(network)
.map_err(|_| BlockError::WrongTransactionConsensusBranchId)?;
let merkle_root = transaction_hashes.iter().cloned().collect();
if block.header.merkle_root!= merkle_root {
return Err(BlockError::BadMerkleRoot {
actual: merkle_root,
expected: block.header.merkle_root,
});
}
// Bitcoin's transaction Merkle trees are malleable, allowing blocks with
// duplicate transactions to have the same Merkle root as blocks without
// duplicate transactions.
//
// Collecting into a HashSet deduplicates, so this checks that there are no
// duplicate transaction hashes, preventing Merkle root malleability.
//
// ## Full Block Validation
//
// Duplicate transactions should cause a block to be
// rejected, as duplicate transactions imply that the block contains a
// double-spend. As a defense-in-depth, however, we also check that there
// are no duplicate transaction hashes.
//
// ## Checkpoint Validation
//
// To prevent malleability (CVE-2012-2459), we also need to check
// whether the transaction hashes are unique.
if transaction_hashes.len()!= transaction_hashes.iter().collect::<HashSet<_>>().len() {
return Err(BlockError::DuplicateTransaction);
}
Ok(())
}
| {
// # Consensus
//
// > `solution` MUST represent a valid Equihash solution.
//
// https://zips.z.cash/protocol/protocol.pdf#blockheader
header.solution.check(header)
} | identifier_body |
check.rs | //! Consensus check functions
use std::{collections::HashSet, sync::Arc};
use chrono::{DateTime, Utc};
use zebra_chain::{
amount::{Amount, Error as AmountError, NonNegative},
block::{Block, Hash, Header, Height},
parameters::{Network, NetworkUpgrade},
transaction,
work::{difficulty::ExpandedDifficulty, equihash},
};
use crate::{error::*, parameters::SLOW_START_INTERVAL};
use super::subsidy;
/// Checks if there is exactly one coinbase transaction in `Block`,
/// and if that coinbase transaction is the first transaction in the block.
/// Returns the coinbase transaction is successful.
///
/// > A transaction that has a single transparent input with a null prevout field,
/// > is called a coinbase transaction. Every block has a single coinbase
/// > transaction as the first transaction in the block.
///
/// <https://zips.z.cash/protocol/protocol.pdf#coinbasetransactions>
pub fn coinbase_is_first(block: &Block) -> Result<Arc<transaction::Transaction>, BlockError> {
// # Consensus
//
// > A block MUST have at least one transaction
//
// <https://zips.z.cash/protocol/protocol.pdf#blockheader>
let first = block
.transactions
.get(0)
.ok_or(BlockError::NoTransactions)?;
// > The first transaction in a block MUST be a coinbase transaction,
// > and subsequent transactions MUST NOT be coinbase transactions.
//
// <https://zips.z.cash/protocol/protocol.pdf#blockheader>
//
// > A transaction that has a single transparent input with a null prevout
// > field, is called a coinbase transaction.
//
// <https://zips.z.cash/protocol/protocol.pdf#coinbasetransactions>
let mut rest = block.transactions.iter().skip(1);
if!first.is_coinbase() {
Err(TransactionError::CoinbasePosition)?;
}
// > A transparent input in a non-coinbase transaction MUST NOT have a null prevout
//
// <https://zips.z.cash/protocol/protocol.pdf#txnconsensus>
if!rest.all(|tx| tx.is_valid_non_coinbase()) {
Err(TransactionError::CoinbaseAfterFirst)?;
}
Ok(first.clone())
}
/// Returns `Ok(ExpandedDifficulty)` if the`difficulty_threshold` of `header` is at least as difficult as
/// the target difficulty limit for `network` (PoWLimit)
///
/// If the header difficulty threshold is invalid, returns an error containing `height` and `hash`.
pub fn difficulty_threshold_is_valid(
header: &Header,
network: Network,
height: &Height,
hash: &Hash,
) -> Result<ExpandedDifficulty, BlockError> {
let difficulty_threshold = header
.difficulty_threshold
.to_expanded()
.ok_or(BlockError::InvalidDifficulty(*height, *hash))?;
// Note: the comparison in this function is a u256 integer comparison, like
// zcashd and bitcoin. Greater values represent *less* work.
// The PowLimit check is part of `Threshold()` in the spec, but it doesn't
// actually depend on any previous blocks.
if difficulty_threshold > ExpandedDifficulty::target_difficulty_limit(network) {
Err(BlockError::TargetDifficultyLimit(
*height,
*hash,
difficulty_threshold,
network,
ExpandedDifficulty::target_difficulty_limit(network),
))?;
}
Ok(difficulty_threshold)
}
/// Returns `Ok(())` if `hash` passes:
/// - the target difficulty limit for `network` (PoWLimit), and
/// - the difficulty filter,
/// based on the fields in `header`.
///
/// If the block is invalid, returns an error containing `height` and `hash`.
pub fn difficulty_is_valid(
header: &Header,
network: Network,
height: &Height,
hash: &Hash,
) -> Result<(), BlockError> {
let difficulty_threshold = difficulty_threshold_is_valid(header, network, height, hash)?;
// Note: the comparison in this function is a u256 integer comparison, like
// zcashd and bitcoin. Greater values represent *less* work.
// # Consensus
//
// > The block MUST pass the difficulty filter.
//
// https://zips.z.cash/protocol/protocol.pdf#blockheader
//
// The difficulty filter is also context-free.
if hash > &difficulty_threshold {
Err(BlockError::DifficultyFilter(
*height,
*hash,
difficulty_threshold,
network,
))?;
}
Ok(())
}
/// Returns `Ok(())` if the `EquihashSolution` is valid for `header`
pub fn equihash_solution_is_valid(header: &Header) -> Result<(), equihash::Error> {
// # Consensus
//
// > `solution` MUST represent a valid Equihash solution.
//
// https://zips.z.cash/protocol/protocol.pdf#blockheader
header.solution.check(header)
}
/// Returns `Ok(())` if the block subsidy in `block` is valid for `network`
///
/// [3.9]: https://zips.z.cash/protocol/protocol.pdf#subsidyconcepts
pub fn subsidy_is_valid(block: &Block, network: Network) -> Result<(), BlockError> {
let height = block.coinbase_height().ok_or(SubsidyError::NoCoinbase)?;
let coinbase = block.transactions.get(0).ok_or(SubsidyError::NoCoinbase)?;
// Validate funding streams
let Some(halving_div) = subsidy::general::halving_divisor(height, network) else {
// Far future halving, with no founders reward or funding streams
return Ok(());
};
let canopy_activation_height = NetworkUpgrade::Canopy
.activation_height(network)
.expect("Canopy activation height is known");
if height < SLOW_START_INTERVAL {
unreachable!(
"unsupported block height: callers should handle blocks below {:?}",
SLOW_START_INTERVAL
)
} else if halving_div.count_ones()!= 1 {
unreachable!("invalid halving divisor: the halving divisor must be a non-zero power of two")
} else if height < canopy_activation_height {
// Founders rewards are paid up to Canopy activation, on both mainnet and testnet.
// But we checkpoint in Canopy so founders reward does not apply for Zebra.
unreachable!("we cannot verify consensus rules before Canopy activation");
} else if halving_div < 4 {
// Funding streams are paid from Canopy activation to the second halving
// Note: Canopy activation is at the first halving on mainnet, but not on testnet
// ZIP-1014 only applies to mainnet, ZIP-214 contains the specific rules for testnet
// funding stream amount values
let funding_streams = subsidy::funding_streams::funding_stream_values(height, network)
.expect("We always expect a funding stream hashmap response even if empty");
// # Consensus
//
// > [Canopy onward] The coinbase transaction at block height `height`
// > MUST contain at least one output per funding stream `fs` active at `height`,
// > that pays `fs.Value(height)` zatoshi in the prescribed way to the stream's
// > recipient address represented by `fs.AddressList[fs.AddressIndex(height)]
//
// https://zips.z.cash/protocol/protocol.pdf#fundingstreams
for (receiver, expected_amount) in funding_streams {
let address =
subsidy::funding_streams::funding_stream_address(height, network, receiver);
let has_expected_output =
subsidy::funding_streams::filter_outputs_by_address(coinbase, address)
.iter()
.map(zebra_chain::transparent::Output::value)
.any(|value| value == expected_amount);
if!has_expected_output {
Err(SubsidyError::FundingStreamNotFound)?;
}
}
Ok(())
} else {
// Future halving, with no founders reward or funding streams
Ok(())
}
}
/// Returns `Ok(())` if the miner fees consensus rule is valid.
///
/// [7.1.2]: https://zips.z.cash/protocol/protocol.pdf#txnconsensus
pub fn | (
block: &Block,
network: Network,
block_miner_fees: Amount<NonNegative>,
) -> Result<(), BlockError> {
let height = block.coinbase_height().ok_or(SubsidyError::NoCoinbase)?;
let coinbase = block.transactions.get(0).ok_or(SubsidyError::NoCoinbase)?;
let transparent_value_balance: Amount = subsidy::general::output_amounts(coinbase)
.iter()
.sum::<Result<Amount<NonNegative>, AmountError>>()
.map_err(|_| SubsidyError::SumOverflow)?
.constrain()
.expect("positive value always fit in `NegativeAllowed`");
let sapling_value_balance = coinbase.sapling_value_balance().sapling_amount();
let orchard_value_balance = coinbase.orchard_value_balance().orchard_amount();
let block_subsidy = subsidy::general::block_subsidy(height, network)
.expect("a valid block subsidy for this height and network");
// # Consensus
//
// > The total value in zatoshi of transparent outputs from a coinbase transaction,
// > minus vbalanceSapling, minus vbalanceOrchard, MUST NOT be greater than the value
// > in zatoshi of block subsidy plus the transaction fees paid by transactions in this block.
//
// https://zips.z.cash/protocol/protocol.pdf#txnconsensus
let left = (transparent_value_balance - sapling_value_balance - orchard_value_balance)
.map_err(|_| SubsidyError::SumOverflow)?;
let right = (block_subsidy + block_miner_fees).map_err(|_| SubsidyError::SumOverflow)?;
if left > right {
Err(SubsidyError::InvalidMinerFees)?;
}
Ok(())
}
/// Returns `Ok(())` if `header.time` is less than or equal to
/// 2 hours in the future, according to the node's local clock (`now`).
///
/// This is a non-deterministic rule, as clocks vary over time, and
/// between different nodes.
///
/// "In addition, a full validator MUST NOT accept blocks with nTime
/// more than two hours in the future according to its clock. This
/// is not strictly a consensus rule because it is nondeterministic,
/// and clock time varies between nodes. Also note that a block that
/// is rejected by this rule at a given point in time may later be
/// accepted." [§7.5][7.5]
///
/// [7.5]: https://zips.z.cash/protocol/protocol.pdf#blockheader
///
/// If the header time is invalid, returns an error containing `height` and `hash`.
pub fn time_is_valid_at(
header: &Header,
now: DateTime<Utc>,
height: &Height,
hash: &Hash,
) -> Result<(), zebra_chain::block::BlockTimeError> {
header.time_is_valid_at(now, height, hash)
}
/// Check Merkle root validity.
///
/// `transaction_hashes` is a precomputed list of transaction hashes.
///
/// # Consensus rules:
///
/// - A SHA-256d hash in internal byte order. The merkle root is derived from the
/// hashes of all transactions included in this block, ensuring that none of
/// those transactions can be modified without modifying the header. [7.6]
///
/// # Panics
///
/// - If block does not have a coinbase transaction.
///
/// [ZIP-244]: https://zips.z.cash/zip-0244
/// [7.1]: https://zips.z.cash/protocol/nu5.pdf#txnencodingandconsensus
/// [7.6]: https://zips.z.cash/protocol/nu5.pdf#blockheader
pub fn merkle_root_validity(
network: Network,
block: &Block,
transaction_hashes: &[transaction::Hash],
) -> Result<(), BlockError> {
// TODO: deduplicate zebra-chain and zebra-consensus errors (#2908)
block
.check_transaction_network_upgrade_consistency(network)
.map_err(|_| BlockError::WrongTransactionConsensusBranchId)?;
let merkle_root = transaction_hashes.iter().cloned().collect();
if block.header.merkle_root!= merkle_root {
return Err(BlockError::BadMerkleRoot {
actual: merkle_root,
expected: block.header.merkle_root,
});
}
// Bitcoin's transaction Merkle trees are malleable, allowing blocks with
// duplicate transactions to have the same Merkle root as blocks without
// duplicate transactions.
//
// Collecting into a HashSet deduplicates, so this checks that there are no
// duplicate transaction hashes, preventing Merkle root malleability.
//
// ## Full Block Validation
//
// Duplicate transactions should cause a block to be
// rejected, as duplicate transactions imply that the block contains a
// double-spend. As a defense-in-depth, however, we also check that there
// are no duplicate transaction hashes.
//
// ## Checkpoint Validation
//
// To prevent malleability (CVE-2012-2459), we also need to check
// whether the transaction hashes are unique.
if transaction_hashes.len()!= transaction_hashes.iter().collect::<HashSet<_>>().len() {
return Err(BlockError::DuplicateTransaction);
}
Ok(())
}
| miner_fees_are_valid | identifier_name |
check.rs | //! Consensus check functions
use std::{collections::HashSet, sync::Arc};
use chrono::{DateTime, Utc};
use zebra_chain::{
amount::{Amount, Error as AmountError, NonNegative},
block::{Block, Hash, Header, Height},
parameters::{Network, NetworkUpgrade},
transaction,
work::{difficulty::ExpandedDifficulty, equihash},
};
use crate::{error::*, parameters::SLOW_START_INTERVAL};
use super::subsidy;
/// Checks if there is exactly one coinbase transaction in `Block`,
/// and if that coinbase transaction is the first transaction in the block.
/// Returns the coinbase transaction is successful.
///
/// > A transaction that has a single transparent input with a null prevout field,
/// > is called a coinbase transaction. Every block has a single coinbase
/// > transaction as the first transaction in the block.
///
/// <https://zips.z.cash/protocol/protocol.pdf#coinbasetransactions>
pub fn coinbase_is_first(block: &Block) -> Result<Arc<transaction::Transaction>, BlockError> {
// # Consensus
//
// > A block MUST have at least one transaction
//
// <https://zips.z.cash/protocol/protocol.pdf#blockheader>
let first = block
.transactions
.get(0)
.ok_or(BlockError::NoTransactions)?;
// > The first transaction in a block MUST be a coinbase transaction,
// > and subsequent transactions MUST NOT be coinbase transactions.
//
// <https://zips.z.cash/protocol/protocol.pdf#blockheader>
//
// > A transaction that has a single transparent input with a null prevout
// > field, is called a coinbase transaction.
//
// <https://zips.z.cash/protocol/protocol.pdf#coinbasetransactions>
let mut rest = block.transactions.iter().skip(1);
if!first.is_coinbase() {
Err(TransactionError::CoinbasePosition)?;
}
// > A transparent input in a non-coinbase transaction MUST NOT have a null prevout
//
// <https://zips.z.cash/protocol/protocol.pdf#txnconsensus>
if!rest.all(|tx| tx.is_valid_non_coinbase()) {
Err(TransactionError::CoinbaseAfterFirst)?;
}
Ok(first.clone())
}
/// Returns `Ok(ExpandedDifficulty)` if the`difficulty_threshold` of `header` is at least as difficult as
/// the target difficulty limit for `network` (PoWLimit)
///
/// If the header difficulty threshold is invalid, returns an error containing `height` and `hash`.
pub fn difficulty_threshold_is_valid(
header: &Header,
network: Network,
height: &Height,
hash: &Hash,
) -> Result<ExpandedDifficulty, BlockError> {
let difficulty_threshold = header
.difficulty_threshold
.to_expanded()
.ok_or(BlockError::InvalidDifficulty(*height, *hash))?;
// Note: the comparison in this function is a u256 integer comparison, like
// zcashd and bitcoin. Greater values represent *less* work.
// The PowLimit check is part of `Threshold()` in the spec, but it doesn't
// actually depend on any previous blocks.
if difficulty_threshold > ExpandedDifficulty::target_difficulty_limit(network) {
Err(BlockError::TargetDifficultyLimit(
*height,
*hash,
difficulty_threshold,
network,
ExpandedDifficulty::target_difficulty_limit(network),
))?;
}
Ok(difficulty_threshold)
}
/// Returns `Ok(())` if `hash` passes:
/// - the target difficulty limit for `network` (PoWLimit), and
/// - the difficulty filter,
/// based on the fields in `header`.
///
/// If the block is invalid, returns an error containing `height` and `hash`.
pub fn difficulty_is_valid(
header: &Header,
network: Network,
height: &Height,
hash: &Hash,
) -> Result<(), BlockError> {
let difficulty_threshold = difficulty_threshold_is_valid(header, network, height, hash)?;
// Note: the comparison in this function is a u256 integer comparison, like
// zcashd and bitcoin. Greater values represent *less* work.
// # Consensus
//
// > The block MUST pass the difficulty filter.
//
// https://zips.z.cash/protocol/protocol.pdf#blockheader
//
// The difficulty filter is also context-free.
if hash > &difficulty_threshold {
Err(BlockError::DifficultyFilter(
*height,
*hash,
difficulty_threshold,
network,
))?;
}
Ok(())
}
/// Returns `Ok(())` if the `EquihashSolution` is valid for `header`
pub fn equihash_solution_is_valid(header: &Header) -> Result<(), equihash::Error> { | //
// https://zips.z.cash/protocol/protocol.pdf#blockheader
header.solution.check(header)
}
/// Returns `Ok(())` if the block subsidy in `block` is valid for `network`
///
/// [3.9]: https://zips.z.cash/protocol/protocol.pdf#subsidyconcepts
pub fn subsidy_is_valid(block: &Block, network: Network) -> Result<(), BlockError> {
let height = block.coinbase_height().ok_or(SubsidyError::NoCoinbase)?;
let coinbase = block.transactions.get(0).ok_or(SubsidyError::NoCoinbase)?;
// Validate funding streams
let Some(halving_div) = subsidy::general::halving_divisor(height, network) else {
// Far future halving, with no founders reward or funding streams
return Ok(());
};
let canopy_activation_height = NetworkUpgrade::Canopy
.activation_height(network)
.expect("Canopy activation height is known");
if height < SLOW_START_INTERVAL {
unreachable!(
"unsupported block height: callers should handle blocks below {:?}",
SLOW_START_INTERVAL
)
} else if halving_div.count_ones()!= 1 {
unreachable!("invalid halving divisor: the halving divisor must be a non-zero power of two")
} else if height < canopy_activation_height {
// Founders rewards are paid up to Canopy activation, on both mainnet and testnet.
// But we checkpoint in Canopy so founders reward does not apply for Zebra.
unreachable!("we cannot verify consensus rules before Canopy activation");
} else if halving_div < 4 {
// Funding streams are paid from Canopy activation to the second halving
// Note: Canopy activation is at the first halving on mainnet, but not on testnet
// ZIP-1014 only applies to mainnet, ZIP-214 contains the specific rules for testnet
// funding stream amount values
let funding_streams = subsidy::funding_streams::funding_stream_values(height, network)
.expect("We always expect a funding stream hashmap response even if empty");
// # Consensus
//
// > [Canopy onward] The coinbase transaction at block height `height`
// > MUST contain at least one output per funding stream `fs` active at `height`,
// > that pays `fs.Value(height)` zatoshi in the prescribed way to the stream's
// > recipient address represented by `fs.AddressList[fs.AddressIndex(height)]
//
// https://zips.z.cash/protocol/protocol.pdf#fundingstreams
for (receiver, expected_amount) in funding_streams {
let address =
subsidy::funding_streams::funding_stream_address(height, network, receiver);
let has_expected_output =
subsidy::funding_streams::filter_outputs_by_address(coinbase, address)
.iter()
.map(zebra_chain::transparent::Output::value)
.any(|value| value == expected_amount);
if!has_expected_output {
Err(SubsidyError::FundingStreamNotFound)?;
}
}
Ok(())
} else {
// Future halving, with no founders reward or funding streams
Ok(())
}
}
/// Returns `Ok(())` if the miner fees consensus rule is valid.
///
/// [7.1.2]: https://zips.z.cash/protocol/protocol.pdf#txnconsensus
pub fn miner_fees_are_valid(
block: &Block,
network: Network,
block_miner_fees: Amount<NonNegative>,
) -> Result<(), BlockError> {
let height = block.coinbase_height().ok_or(SubsidyError::NoCoinbase)?;
let coinbase = block.transactions.get(0).ok_or(SubsidyError::NoCoinbase)?;
let transparent_value_balance: Amount = subsidy::general::output_amounts(coinbase)
.iter()
.sum::<Result<Amount<NonNegative>, AmountError>>()
.map_err(|_| SubsidyError::SumOverflow)?
.constrain()
.expect("positive value always fit in `NegativeAllowed`");
let sapling_value_balance = coinbase.sapling_value_balance().sapling_amount();
let orchard_value_balance = coinbase.orchard_value_balance().orchard_amount();
let block_subsidy = subsidy::general::block_subsidy(height, network)
.expect("a valid block subsidy for this height and network");
// # Consensus
//
// > The total value in zatoshi of transparent outputs from a coinbase transaction,
// > minus vbalanceSapling, minus vbalanceOrchard, MUST NOT be greater than the value
// > in zatoshi of block subsidy plus the transaction fees paid by transactions in this block.
//
// https://zips.z.cash/protocol/protocol.pdf#txnconsensus
let left = (transparent_value_balance - sapling_value_balance - orchard_value_balance)
.map_err(|_| SubsidyError::SumOverflow)?;
let right = (block_subsidy + block_miner_fees).map_err(|_| SubsidyError::SumOverflow)?;
if left > right {
Err(SubsidyError::InvalidMinerFees)?;
}
Ok(())
}
/// Returns `Ok(())` if `header.time` is less than or equal to
/// 2 hours in the future, according to the node's local clock (`now`).
///
/// This is a non-deterministic rule, as clocks vary over time, and
/// between different nodes.
///
/// "In addition, a full validator MUST NOT accept blocks with nTime
/// more than two hours in the future according to its clock. This
/// is not strictly a consensus rule because it is nondeterministic,
/// and clock time varies between nodes. Also note that a block that
/// is rejected by this rule at a given point in time may later be
/// accepted." [§7.5][7.5]
///
/// [7.5]: https://zips.z.cash/protocol/protocol.pdf#blockheader
///
/// If the header time is invalid, returns an error containing `height` and `hash`.
pub fn time_is_valid_at(
header: &Header,
now: DateTime<Utc>,
height: &Height,
hash: &Hash,
) -> Result<(), zebra_chain::block::BlockTimeError> {
header.time_is_valid_at(now, height, hash)
}
/// Check Merkle root validity.
///
/// `transaction_hashes` is a precomputed list of transaction hashes.
///
/// # Consensus rules:
///
/// - A SHA-256d hash in internal byte order. The merkle root is derived from the
/// hashes of all transactions included in this block, ensuring that none of
/// those transactions can be modified without modifying the header. [7.6]
///
/// # Panics
///
/// - If block does not have a coinbase transaction.
///
/// [ZIP-244]: https://zips.z.cash/zip-0244
/// [7.1]: https://zips.z.cash/protocol/nu5.pdf#txnencodingandconsensus
/// [7.6]: https://zips.z.cash/protocol/nu5.pdf#blockheader
pub fn merkle_root_validity(
network: Network,
block: &Block,
transaction_hashes: &[transaction::Hash],
) -> Result<(), BlockError> {
// TODO: deduplicate zebra-chain and zebra-consensus errors (#2908)
block
.check_transaction_network_upgrade_consistency(network)
.map_err(|_| BlockError::WrongTransactionConsensusBranchId)?;
let merkle_root = transaction_hashes.iter().cloned().collect();
if block.header.merkle_root!= merkle_root {
return Err(BlockError::BadMerkleRoot {
actual: merkle_root,
expected: block.header.merkle_root,
});
}
// Bitcoin's transaction Merkle trees are malleable, allowing blocks with
// duplicate transactions to have the same Merkle root as blocks without
// duplicate transactions.
//
// Collecting into a HashSet deduplicates, so this checks that there are no
// duplicate transaction hashes, preventing Merkle root malleability.
//
// ## Full Block Validation
//
// Duplicate transactions should cause a block to be
// rejected, as duplicate transactions imply that the block contains a
// double-spend. As a defense-in-depth, however, we also check that there
// are no duplicate transaction hashes.
//
// ## Checkpoint Validation
//
// To prevent malleability (CVE-2012-2459), we also need to check
// whether the transaction hashes are unique.
if transaction_hashes.len()!= transaction_hashes.iter().collect::<HashSet<_>>().len() {
return Err(BlockError::DuplicateTransaction);
}
Ok(())
} | // # Consensus
//
// > `solution` MUST represent a valid Equihash solution. | random_line_split |
check.rs | //! Consensus check functions
use std::{collections::HashSet, sync::Arc};
use chrono::{DateTime, Utc};
use zebra_chain::{
amount::{Amount, Error as AmountError, NonNegative},
block::{Block, Hash, Header, Height},
parameters::{Network, NetworkUpgrade},
transaction,
work::{difficulty::ExpandedDifficulty, equihash},
};
use crate::{error::*, parameters::SLOW_START_INTERVAL};
use super::subsidy;
/// Checks if there is exactly one coinbase transaction in `Block`,
/// and if that coinbase transaction is the first transaction in the block.
/// Returns the coinbase transaction is successful.
///
/// > A transaction that has a single transparent input with a null prevout field,
/// > is called a coinbase transaction. Every block has a single coinbase
/// > transaction as the first transaction in the block.
///
/// <https://zips.z.cash/protocol/protocol.pdf#coinbasetransactions>
pub fn coinbase_is_first(block: &Block) -> Result<Arc<transaction::Transaction>, BlockError> {
// # Consensus
//
// > A block MUST have at least one transaction
//
// <https://zips.z.cash/protocol/protocol.pdf#blockheader>
let first = block
.transactions
.get(0)
.ok_or(BlockError::NoTransactions)?;
// > The first transaction in a block MUST be a coinbase transaction,
// > and subsequent transactions MUST NOT be coinbase transactions.
//
// <https://zips.z.cash/protocol/protocol.pdf#blockheader>
//
// > A transaction that has a single transparent input with a null prevout
// > field, is called a coinbase transaction.
//
// <https://zips.z.cash/protocol/protocol.pdf#coinbasetransactions>
let mut rest = block.transactions.iter().skip(1);
if!first.is_coinbase() {
Err(TransactionError::CoinbasePosition)?;
}
// > A transparent input in a non-coinbase transaction MUST NOT have a null prevout
//
// <https://zips.z.cash/protocol/protocol.pdf#txnconsensus>
if!rest.all(|tx| tx.is_valid_non_coinbase()) {
Err(TransactionError::CoinbaseAfterFirst)?;
}
Ok(first.clone())
}
/// Returns `Ok(ExpandedDifficulty)` if the`difficulty_threshold` of `header` is at least as difficult as
/// the target difficulty limit for `network` (PoWLimit)
///
/// If the header difficulty threshold is invalid, returns an error containing `height` and `hash`.
pub fn difficulty_threshold_is_valid(
header: &Header,
network: Network,
height: &Height,
hash: &Hash,
) -> Result<ExpandedDifficulty, BlockError> {
let difficulty_threshold = header
.difficulty_threshold
.to_expanded()
.ok_or(BlockError::InvalidDifficulty(*height, *hash))?;
// Note: the comparison in this function is a u256 integer comparison, like
// zcashd and bitcoin. Greater values represent *less* work.
// The PowLimit check is part of `Threshold()` in the spec, but it doesn't
// actually depend on any previous blocks.
if difficulty_threshold > ExpandedDifficulty::target_difficulty_limit(network) {
Err(BlockError::TargetDifficultyLimit(
*height,
*hash,
difficulty_threshold,
network,
ExpandedDifficulty::target_difficulty_limit(network),
))?;
}
Ok(difficulty_threshold)
}
/// Returns `Ok(())` if `hash` passes:
/// - the target difficulty limit for `network` (PoWLimit), and
/// - the difficulty filter,
/// based on the fields in `header`.
///
/// If the block is invalid, returns an error containing `height` and `hash`.
pub fn difficulty_is_valid(
header: &Header,
network: Network,
height: &Height,
hash: &Hash,
) -> Result<(), BlockError> {
let difficulty_threshold = difficulty_threshold_is_valid(header, network, height, hash)?;
// Note: the comparison in this function is a u256 integer comparison, like
// zcashd and bitcoin. Greater values represent *less* work.
// # Consensus
//
// > The block MUST pass the difficulty filter.
//
// https://zips.z.cash/protocol/protocol.pdf#blockheader
//
// The difficulty filter is also context-free.
if hash > &difficulty_threshold {
Err(BlockError::DifficultyFilter(
*height,
*hash,
difficulty_threshold,
network,
))?;
}
Ok(())
}
/// Returns `Ok(())` if the `EquihashSolution` is valid for `header`
pub fn equihash_solution_is_valid(header: &Header) -> Result<(), equihash::Error> {
// # Consensus
//
// > `solution` MUST represent a valid Equihash solution.
//
// https://zips.z.cash/protocol/protocol.pdf#blockheader
header.solution.check(header)
}
/// Returns `Ok(())` if the block subsidy in `block` is valid for `network`
///
/// [3.9]: https://zips.z.cash/protocol/protocol.pdf#subsidyconcepts
pub fn subsidy_is_valid(block: &Block, network: Network) -> Result<(), BlockError> {
let height = block.coinbase_height().ok_or(SubsidyError::NoCoinbase)?;
let coinbase = block.transactions.get(0).ok_or(SubsidyError::NoCoinbase)?;
// Validate funding streams
let Some(halving_div) = subsidy::general::halving_divisor(height, network) else {
// Far future halving, with no founders reward or funding streams
return Ok(());
};
let canopy_activation_height = NetworkUpgrade::Canopy
.activation_height(network)
.expect("Canopy activation height is known");
if height < SLOW_START_INTERVAL {
unreachable!(
"unsupported block height: callers should handle blocks below {:?}",
SLOW_START_INTERVAL
)
} else if halving_div.count_ones()!= 1 {
unreachable!("invalid halving divisor: the halving divisor must be a non-zero power of two")
} else if height < canopy_activation_height {
// Founders rewards are paid up to Canopy activation, on both mainnet and testnet.
// But we checkpoint in Canopy so founders reward does not apply for Zebra.
unreachable!("we cannot verify consensus rules before Canopy activation");
} else if halving_div < 4 {
// Funding streams are paid from Canopy activation to the second halving
// Note: Canopy activation is at the first halving on mainnet, but not on testnet
// ZIP-1014 only applies to mainnet, ZIP-214 contains the specific rules for testnet
// funding stream amount values
let funding_streams = subsidy::funding_streams::funding_stream_values(height, network)
.expect("We always expect a funding stream hashmap response even if empty");
// # Consensus
//
// > [Canopy onward] The coinbase transaction at block height `height`
// > MUST contain at least one output per funding stream `fs` active at `height`,
// > that pays `fs.Value(height)` zatoshi in the prescribed way to the stream's
// > recipient address represented by `fs.AddressList[fs.AddressIndex(height)]
//
// https://zips.z.cash/protocol/protocol.pdf#fundingstreams
for (receiver, expected_amount) in funding_streams {
let address =
subsidy::funding_streams::funding_stream_address(height, network, receiver);
let has_expected_output =
subsidy::funding_streams::filter_outputs_by_address(coinbase, address)
.iter()
.map(zebra_chain::transparent::Output::value)
.any(|value| value == expected_amount);
if!has_expected_output {
Err(SubsidyError::FundingStreamNotFound)?;
}
}
Ok(())
} else |
}
/// Returns `Ok(())` if the miner fees consensus rule is valid.
///
/// [7.1.2]: https://zips.z.cash/protocol/protocol.pdf#txnconsensus
pub fn miner_fees_are_valid(
block: &Block,
network: Network,
block_miner_fees: Amount<NonNegative>,
) -> Result<(), BlockError> {
let height = block.coinbase_height().ok_or(SubsidyError::NoCoinbase)?;
let coinbase = block.transactions.get(0).ok_or(SubsidyError::NoCoinbase)?;
let transparent_value_balance: Amount = subsidy::general::output_amounts(coinbase)
.iter()
.sum::<Result<Amount<NonNegative>, AmountError>>()
.map_err(|_| SubsidyError::SumOverflow)?
.constrain()
.expect("positive value always fit in `NegativeAllowed`");
let sapling_value_balance = coinbase.sapling_value_balance().sapling_amount();
let orchard_value_balance = coinbase.orchard_value_balance().orchard_amount();
let block_subsidy = subsidy::general::block_subsidy(height, network)
.expect("a valid block subsidy for this height and network");
// # Consensus
//
// > The total value in zatoshi of transparent outputs from a coinbase transaction,
// > minus vbalanceSapling, minus vbalanceOrchard, MUST NOT be greater than the value
// > in zatoshi of block subsidy plus the transaction fees paid by transactions in this block.
//
// https://zips.z.cash/protocol/protocol.pdf#txnconsensus
let left = (transparent_value_balance - sapling_value_balance - orchard_value_balance)
.map_err(|_| SubsidyError::SumOverflow)?;
let right = (block_subsidy + block_miner_fees).map_err(|_| SubsidyError::SumOverflow)?;
if left > right {
Err(SubsidyError::InvalidMinerFees)?;
}
Ok(())
}
/// Returns `Ok(())` if `header.time` is less than or equal to
/// 2 hours in the future, according to the node's local clock (`now`).
///
/// This is a non-deterministic rule, as clocks vary over time, and
/// between different nodes.
///
/// "In addition, a full validator MUST NOT accept blocks with nTime
/// more than two hours in the future according to its clock. This
/// is not strictly a consensus rule because it is nondeterministic,
/// and clock time varies between nodes. Also note that a block that
/// is rejected by this rule at a given point in time may later be
/// accepted." [§7.5][7.5]
///
/// [7.5]: https://zips.z.cash/protocol/protocol.pdf#blockheader
///
/// If the header time is invalid, returns an error containing `height` and `hash`.
pub fn time_is_valid_at(
header: &Header,
now: DateTime<Utc>,
height: &Height,
hash: &Hash,
) -> Result<(), zebra_chain::block::BlockTimeError> {
header.time_is_valid_at(now, height, hash)
}
/// Check Merkle root validity.
///
/// `transaction_hashes` is a precomputed list of transaction hashes.
///
/// # Consensus rules:
///
/// - A SHA-256d hash in internal byte order. The merkle root is derived from the
/// hashes of all transactions included in this block, ensuring that none of
/// those transactions can be modified without modifying the header. [7.6]
///
/// # Panics
///
/// - If block does not have a coinbase transaction.
///
/// [ZIP-244]: https://zips.z.cash/zip-0244
/// [7.1]: https://zips.z.cash/protocol/nu5.pdf#txnencodingandconsensus
/// [7.6]: https://zips.z.cash/protocol/nu5.pdf#blockheader
pub fn merkle_root_validity(
network: Network,
block: &Block,
transaction_hashes: &[transaction::Hash],
) -> Result<(), BlockError> {
// TODO: deduplicate zebra-chain and zebra-consensus errors (#2908)
block
.check_transaction_network_upgrade_consistency(network)
.map_err(|_| BlockError::WrongTransactionConsensusBranchId)?;
let merkle_root = transaction_hashes.iter().cloned().collect();
if block.header.merkle_root!= merkle_root {
return Err(BlockError::BadMerkleRoot {
actual: merkle_root,
expected: block.header.merkle_root,
});
}
// Bitcoin's transaction Merkle trees are malleable, allowing blocks with
// duplicate transactions to have the same Merkle root as blocks without
// duplicate transactions.
//
// Collecting into a HashSet deduplicates, so this checks that there are no
// duplicate transaction hashes, preventing Merkle root malleability.
//
// ## Full Block Validation
//
// Duplicate transactions should cause a block to be
// rejected, as duplicate transactions imply that the block contains a
// double-spend. As a defense-in-depth, however, we also check that there
// are no duplicate transaction hashes.
//
// ## Checkpoint Validation
//
// To prevent malleability (CVE-2012-2459), we also need to check
// whether the transaction hashes are unique.
if transaction_hashes.len()!= transaction_hashes.iter().collect::<HashSet<_>>().len() {
return Err(BlockError::DuplicateTransaction);
}
Ok(())
}
| {
// Future halving, with no founders reward or funding streams
Ok(())
} | conditional_block |
alias.rs | err(cx, loc.span, "can not move into a by-reference binding");
}
let root = expr_root(cx, init.expr, false);
let root_var = path_def_id(cx, root.ex);
if is_none(root_var) {
err(cx, loc.span, "a reference binding can't be \
rooted in a temporary");
}
for proot in pattern_roots(cx.tcx, root.mut, loc.node.pat) {
let bnd = mk_binding(cx, proot.id, proot.span, root_var,
unsafe_set(proot.mut));
// Don't implicitly copy explicit references
bnd.copied = not_allowed;
bs += [bnd];
}
}
_ {
err(cx, loc.span, "by-reference bindings must be initialized");
}
}
}
fn cant_copy(cx: ctx, b: binding) -> bool {
alt b.copied {
not_allowed. { ret true; }
copied. { ret false; }
not_copied. {}
}
let ty = ty::node_id_to_type(cx.tcx, b.node_id);
if ty::type_allows_implicit_copy(cx.tcx, ty) {
b.copied = copied;
cx.copy_map.insert(b.node_id, ());
if copy_is_expensive(cx.tcx, ty) {
cx.tcx.sess.span_warn(b.span,
"inserting an implicit copy for type " +
util::ppaux::ty_to_str(cx.tcx, ty));
}
ret false;
} else { ret true; }
}
fn check_call(cx: ctx, sc: scope, f: @ast::expr, args: [@ast::expr])
-> [binding] {
let fty = ty::expr_ty(cx.tcx, f);
let arg_ts = ty::ty_fn_args(cx.tcx, fty);
let mut_roots: [{arg: uint, node: node_id}] = [];
let bindings = [];
let i = 0u;
for arg_t: ty::arg in arg_ts {
let arg = args[i];
let root = expr_root(cx, arg, false);
if arg_t.mode == ast::by_mut_ref {
alt path_def(cx, arg) {
some(def) {
let dnum = ast_util::def_id_of_def(def).node;
mut_roots += [{arg: i, node: dnum}];
}
_ { }
}
}
let root_var = path_def_id(cx, root.ex);
bindings += [@{node_id: arg.id,
span: arg.span,
root_var: root_var,
local_id: 0u,
unsafe_tys: unsafe_set(root.mut),
mutable copied: alt arg_t.mode {
ast::by_move. | ast::by_copy. { copied }
ast::by_mut_ref. { not_allowed }
_ { not_copied }
}}];
i += 1u;
}
let f_may_close =
alt f.node {
ast::expr_path(_) { def_is_local(cx.tcx.def_map.get(f.id)) }
_ { true }
};
if f_may_close {
let i = 0u;
for b in bindings {
let unsfe = vec::len(b.unsafe_tys) > 0u;
alt b.root_var {
some(rid) {
for o in sc.bs {
if o.node_id == rid && vec::len(o.unsafe_tys) > 0u {
unsfe = true; break;
}
}
}
_ {}
}
if unsfe && cant_copy(cx, b) {
err(cx, f.span, #fmt["function may alias with argument \
%u, which is not immutably rooted", i]);
}
i += 1u;
}
}
let j = 0u;
for b in bindings {
for unsafe_ty in b.unsafe_tys {
let i = 0u;
for arg_t: ty::arg in arg_ts {
let mut_alias = arg_t.mode == ast::by_mut_ref;
if i!= j &&
ty_can_unsafely_include(cx, unsafe_ty, arg_t.ty,
mut_alias) &&
cant_copy(cx, b) {
err(cx, args[i].span,
#fmt["argument %u may alias with argument %u, \
which is not immutably rooted", i, j]);
}
i += 1u;
}
}
j += 1u;
}
// Ensure we're not passing a root by mutable alias.
for {node: node, arg: arg} in mut_roots {
let i = 0u;
for b in bindings {
if i!= arg {
alt b.root_var {
some(root) {
if node == root && cant_copy(cx, b) {
err(cx, args[arg].span,
"passing a mutable reference to a \
variable that roots another reference");
break;
}
}
none. { }
}
}
i += 1u;
}
}
ret bindings;
}
fn check_alt(cx: ctx, input: @ast::expr, arms: [ast::arm], sc: scope,
v: vt<scope>) {
v.visit_expr(input, sc, v);
let orig_invalid = *sc.invalid;
let all_invalid = orig_invalid;
let root = expr_root(cx, input, true);
for a: ast::arm in arms {
let new_bs = sc.bs;
let root_var = path_def_id(cx, root.ex);
let pat_id_map = ast_util::pat_id_map(a.pats[0]);
type info = {
id: node_id,
mutable unsafe_tys: [unsafe_ty],
span: span};
let binding_info: [info] = [];
for pat in a.pats {
for proot in pattern_roots(cx.tcx, root.mut, pat) {
let canon_id = pat_id_map.get(proot.name);
alt vec::find(binding_info, {|x| x.id == canon_id}) {
some(s) { s.unsafe_tys += unsafe_set(proot.mut); }
none. {
binding_info += [
{id: canon_id,
mutable unsafe_tys: unsafe_set(proot.mut),
span: proot.span}];
}
}
}
}
for info in binding_info {
new_bs += [mk_binding(cx, info.id, info.span, root_var,
copy info.unsafe_tys)];
}
*sc.invalid = orig_invalid;
visit::visit_arm(a, {bs: new_bs with sc}, v);
all_invalid = append_invalid(all_invalid, *sc.invalid, orig_invalid);
}
*sc.invalid = all_invalid;
}
fn check_for(cx: ctx, local: @ast::local, seq: @ast::expr, blk: ast::blk,
sc: scope, v: vt<scope>) {
let root = expr_root(cx, seq, false);
// If this is a mutable vector, don't allow it to be touched.
let seq_t = ty::expr_ty(cx.tcx, seq);
let cur_mut = root.mut;
alt ty::struct(cx.tcx, seq_t) {
ty::ty_vec(mt) {
if mt.mut!= ast::imm {
cur_mut = some(contains(seq_t));
}
}
_ {}
}
let root_var = path_def_id(cx, root.ex);
let new_bs = sc.bs;
for proot in pattern_roots(cx.tcx, cur_mut, local.node.pat) {
new_bs += [mk_binding(cx, proot.id, proot.span, root_var,
unsafe_set(proot.mut))];
}
visit::visit_block(blk, {bs: new_bs with sc}, v);
}
fn check_var(cx: ctx, ex: @ast::expr, p: @ast::path, id: ast::node_id,
assign: bool, sc: scope) {
let def = cx.tcx.def_map.get(id);
if!def_is_local(def) { ret; }
let my_defnum = ast_util::def_id_of_def(def).node;
let my_local_id = local_id_of_node(cx, my_defnum);
let var_t = ty::expr_ty(cx.tcx, ex);
for b in sc.bs {
// excludes variables introduced since the alias was made
if my_local_id < b.local_id {
for unsafe_ty in b.unsafe_tys {
if ty_can_unsafely_include(cx, unsafe_ty, var_t, assign) {
let inv = @{reason: val_taken, node_id: b.node_id,
sp: ex.span, path: p};
*sc.invalid = list::cons(inv, @*sc.invalid);
}
}
} else if b.node_id == my_defnum {
test_scope(cx, sc, b, p);
}
}
}
fn check_lval(cx: @ctx, dest: @ast::expr, sc: scope, v: vt<scope>) {
alt dest.node {
ast::expr_path(p) {
let def = cx.tcx.def_map.get(dest.id);
let dnum = ast_util::def_id_of_def(def).node;
for b in sc.bs {
if b.root_var == some(dnum) {
let inv = @{reason: overwritten, node_id: b.node_id,
sp: dest.span, path: p};
*sc.invalid = list::cons(inv, @*sc.invalid);
}
}
}
_ { visit_expr(cx, dest, sc, v); }
}
}
fn check_assign(cx: @ctx, dest: @ast::expr, src: @ast::expr, sc: scope,
v: vt<scope>) {
visit_expr(cx, src, sc, v);
check_lval(cx, dest, sc, v);
}
fn check_if(c: @ast::expr, then: ast::blk, els: option::t<@ast::expr>,
sc: scope, v: vt<scope>) {
v.visit_expr(c, sc, v);
let orig_invalid = *sc.invalid;
v.visit_block(then, sc, v);
let then_invalid = *sc.invalid;
*sc.invalid = orig_invalid;
visit::visit_expr_opt(els, sc, v);
*sc.invalid = append_invalid(*sc.invalid, then_invalid, orig_invalid);
}
fn check_loop(cx: ctx, sc: scope, checker: block()) {
let orig_invalid = filter_invalid(*sc.invalid, sc.bs);
checker();
let new_invalid = filter_invalid(*sc.invalid, sc.bs);
// Have to check contents of loop again if it invalidated an alias
if list::len(orig_invalid) < list::len(new_invalid) {
let old_silent = cx.silent;
cx.silent = true;
checker();
cx.silent = old_silent;
}
*sc.invalid = new_invalid;
}
fn test_scope(cx: ctx, sc: scope, b: binding, p: @ast::path) {
let prob = find_invalid(b.node_id, *sc.invalid);
alt b.root_var {
some(dn) {
for other in sc.bs {
if!is_none(prob) { break; }
if other.node_id == dn {
prob = find_invalid(other.node_id, *sc.invalid);
}
}
}
_ {}
}
if!is_none(prob) && cant_copy(cx, b) {
let i = option::get(prob);
let msg = alt i.reason {
overwritten. { "overwriting " + ast_util::path_name(i.path) }
val_taken. { "taking the value of " + ast_util::path_name(i.path) }
};
err(cx, i.sp, msg + " will invalidate reference " +
ast_util::path_name(p) + ", which is still used");
}
}
fn path_def(cx: ctx, ex: @ast::expr) -> option::t<ast::def> {
ret alt ex.node {
ast::expr_path(_) { some(cx.tcx.def_map.get(ex.id)) }
_ { none }
}
}
fn path_def_id(cx: ctx, ex: @ast::expr) -> option::t<ast::node_id> {
alt ex.node {
ast::expr_path(_) {
ret some(ast_util::def_id_of_def(cx.tcx.def_map.get(ex.id)).node);
}
_ { ret none; }
}
}
fn ty_can_unsafely_include(cx: ctx, needle: unsafe_ty, haystack: ty::t,
mut: bool) -> bool {
fn get_mut(cur: bool, mt: ty::mt) -> bool {
ret cur || mt.mut!= ast::imm;
}
fn helper(tcx: ty::ctxt, needle: unsafe_ty, haystack: ty::t, mut: bool)
-> bool {
if alt needle {
contains(ty) { ty == haystack }
mut_contains(ty) { mut && ty == haystack }
} { ret true; }
alt ty::struct(tcx, haystack) {
ty::ty_tag(_, ts) {
for t: ty::t in ts {
if helper(tcx, needle, t, mut) { ret true; }
}
ret false;
}
ty::ty_box(mt) | ty::ty_ptr(mt) | ty::ty_uniq(mt) {
ret helper(tcx, needle, mt.ty, get_mut(mut, mt));
}
ty::ty_rec(fields) {
for f: ty::field in fields {
if helper(tcx, needle, f.mt.ty, get_mut(mut, f.mt)) {
ret true;
}
}
ret false;
}
ty::ty_tup(ts) {
for t in ts { if helper(tcx, needle, t, mut) { ret true; } }
ret false;
}
ty::ty_fn({proto: ast::proto_bare., _}) { ret false; }
// These may contain anything.
ty::ty_fn(_) | ty::ty_obj(_) { ret true; }
// A type param may include everything, but can only be
// treated as opaque downstream, and is thus safe unless we
// saw mutable fields, in which case the whole thing can be
// overwritten.
ty::ty_param(_, _) { ret mut; }
_ { ret false; }
}
}
ret helper(cx.tcx, needle, haystack, mut);
}
fn def_is_local(d: ast::def) -> bool {
alt d {
ast::def_local(_, _) | ast::def_arg(_, _) | ast::def_binding(_) |
ast::def_upvar(_, _, _) | ast::def_self(_) |
ast::def_obj_field(_, _) { true }
_ { false }
}
}
fn local_id_of_node(cx: ctx, id: node_id) -> uint {
alt cx.tcx.items.find(id) {
some(ast_map::node_arg(_, id)) | some(ast_map::node_local(id)) { id }
_ { 0u }
}
}
// Heuristic, somewhat random way to decide whether to warn when inserting an
// implicit copy.
fn copy_is_expensive(tcx: ty::ctxt, ty: ty::t) -> bool {
fn score_ty(tcx: ty::ctxt, ty: ty::t) -> uint {
ret alt ty::struct(tcx, ty) {
ty::ty_nil. | ty::ty_bot. | ty::ty_bool. | ty::ty_int(_) |
ty::ty_uint(_) | ty::ty_float(_) | ty::ty_type. | ty::ty_native(_) |
ty::ty_ptr(_) { 1u }
ty::ty_box(_) { 3u }
ty::ty_constr(t, _) | ty::ty_res(_, t, _) { score_ty(tcx, t) }
ty::ty_fn(_) | ty::ty_native_fn(_, _) |
ty::ty_obj(_) { 4u }
ty::ty_str. | ty::ty_vec(_) | ty::ty_param(_, _) { 50u }
ty::ty_uniq(mt) { 1u + score_ty(tcx, mt.ty) }
ty::ty_tag(_, ts) | ty::ty_tup(ts) {
let sum = 0u;
for t in ts { sum += score_ty(tcx, t); }
sum
}
ty::ty_rec(fs) {
let sum = 0u;
for f in fs { sum += score_ty(tcx, f.mt.ty); }
sum
}
};
}
ret score_ty(tcx, ty) > 8u;
}
type pattern_root = {id: node_id,
name: ident,
mut: option::t<unsafe_ty>,
span: span};
fn pattern_roots(tcx: ty::ctxt, mut: option::t<unsafe_ty>, pat: @ast::pat)
-> [pattern_root] {
fn walk(tcx: ty::ctxt, mut: option::t<unsafe_ty>, pat: @ast::pat,
&set: [pattern_root]) {
alt pat.node {
ast::pat_wild. | ast::pat_lit(_) | ast::pat_range(_, _) {}
ast::pat_bind(nm, sub) {
set += [{id: pat.id, name: nm, mut: mut, span: pat.span}];
alt sub { some(p) { walk(tcx, mut, p, set); } _ {} }
}
ast::pat_tag(_, ps) | ast::pat_tup(ps) {
for p in ps { walk(tcx, mut, p, set); }
}
ast::pat_rec(fs, _) { | let ty = ty::node_id_to_type(tcx, pat.id);
for f in fs {
let m = ty::get_field(tcx, ty, f.ident).mt.mut != ast::imm;
walk(tcx, m ? some(contains(ty)) : mut, f.pat, set); | random_line_split |
|
alias.rs | let f_may_close =
alt f.node {
ast::expr_path(_) { def_is_local(cx.tcx.def_map.get(f.id)) }
_ { true }
};
if f_may_close {
let i = 0u;
for b in bindings {
let unsfe = vec::len(b.unsafe_tys) > 0u;
alt b.root_var {
some(rid) {
for o in sc.bs {
if o.node_id == rid && vec::len(o.unsafe_tys) > 0u {
unsfe = true; break;
}
}
}
_ {}
}
if unsfe && cant_copy(cx, b) {
err(cx, f.span, #fmt["function may alias with argument \
%u, which is not immutably rooted", i]);
}
i += 1u;
}
}
let j = 0u;
for b in bindings {
for unsafe_ty in b.unsafe_tys {
let i = 0u;
for arg_t: ty::arg in arg_ts {
let mut_alias = arg_t.mode == ast::by_mut_ref;
if i!= j &&
ty_can_unsafely_include(cx, unsafe_ty, arg_t.ty,
mut_alias) &&
cant_copy(cx, b) {
err(cx, args[i].span,
#fmt["argument %u may alias with argument %u, \
which is not immutably rooted", i, j]);
}
i += 1u;
}
}
j += 1u;
}
// Ensure we're not passing a root by mutable alias.
for {node: node, arg: arg} in mut_roots {
let i = 0u;
for b in bindings {
if i!= arg {
alt b.root_var {
some(root) {
if node == root && cant_copy(cx, b) {
err(cx, args[arg].span,
"passing a mutable reference to a \
variable that roots another reference");
break;
}
}
none. { }
}
}
i += 1u;
}
}
ret bindings;
}
fn check_alt(cx: ctx, input: @ast::expr, arms: [ast::arm], sc: scope,
v: vt<scope>) {
v.visit_expr(input, sc, v);
let orig_invalid = *sc.invalid;
let all_invalid = orig_invalid;
let root = expr_root(cx, input, true);
for a: ast::arm in arms {
let new_bs = sc.bs;
let root_var = path_def_id(cx, root.ex);
let pat_id_map = ast_util::pat_id_map(a.pats[0]);
type info = {
id: node_id,
mutable unsafe_tys: [unsafe_ty],
span: span};
let binding_info: [info] = [];
for pat in a.pats {
for proot in pattern_roots(cx.tcx, root.mut, pat) {
let canon_id = pat_id_map.get(proot.name);
alt vec::find(binding_info, {|x| x.id == canon_id}) {
some(s) { s.unsafe_tys += unsafe_set(proot.mut); }
none. {
binding_info += [
{id: canon_id,
mutable unsafe_tys: unsafe_set(proot.mut),
span: proot.span}];
}
}
}
}
for info in binding_info {
new_bs += [mk_binding(cx, info.id, info.span, root_var,
copy info.unsafe_tys)];
}
*sc.invalid = orig_invalid;
visit::visit_arm(a, {bs: new_bs with sc}, v);
all_invalid = append_invalid(all_invalid, *sc.invalid, orig_invalid);
}
*sc.invalid = all_invalid;
}
fn check_for(cx: ctx, local: @ast::local, seq: @ast::expr, blk: ast::blk,
sc: scope, v: vt<scope>) {
let root = expr_root(cx, seq, false);
// If this is a mutable vector, don't allow it to be touched.
let seq_t = ty::expr_ty(cx.tcx, seq);
let cur_mut = root.mut;
alt ty::struct(cx.tcx, seq_t) {
ty::ty_vec(mt) {
if mt.mut!= ast::imm {
cur_mut = some(contains(seq_t));
}
}
_ {}
}
let root_var = path_def_id(cx, root.ex);
let new_bs = sc.bs;
for proot in pattern_roots(cx.tcx, cur_mut, local.node.pat) {
new_bs += [mk_binding(cx, proot.id, proot.span, root_var,
unsafe_set(proot.mut))];
}
visit::visit_block(blk, {bs: new_bs with sc}, v);
}
fn check_var(cx: ctx, ex: @ast::expr, p: @ast::path, id: ast::node_id,
assign: bool, sc: scope) {
let def = cx.tcx.def_map.get(id);
if!def_is_local(def) { ret; }
let my_defnum = ast_util::def_id_of_def(def).node;
let my_local_id = local_id_of_node(cx, my_defnum);
let var_t = ty::expr_ty(cx.tcx, ex);
for b in sc.bs {
// excludes variables introduced since the alias was made
if my_local_id < b.local_id {
for unsafe_ty in b.unsafe_tys {
if ty_can_unsafely_include(cx, unsafe_ty, var_t, assign) {
let inv = @{reason: val_taken, node_id: b.node_id,
sp: ex.span, path: p};
*sc.invalid = list::cons(inv, @*sc.invalid);
}
}
} else if b.node_id == my_defnum {
test_scope(cx, sc, b, p);
}
}
}
fn check_lval(cx: @ctx, dest: @ast::expr, sc: scope, v: vt<scope>) {
alt dest.node {
ast::expr_path(p) {
let def = cx.tcx.def_map.get(dest.id);
let dnum = ast_util::def_id_of_def(def).node;
for b in sc.bs {
if b.root_var == some(dnum) {
let inv = @{reason: overwritten, node_id: b.node_id,
sp: dest.span, path: p};
*sc.invalid = list::cons(inv, @*sc.invalid);
}
}
}
_ { visit_expr(cx, dest, sc, v); }
}
}
fn check_assign(cx: @ctx, dest: @ast::expr, src: @ast::expr, sc: scope,
v: vt<scope>) {
visit_expr(cx, src, sc, v);
check_lval(cx, dest, sc, v);
}
fn check_if(c: @ast::expr, then: ast::blk, els: option::t<@ast::expr>,
sc: scope, v: vt<scope>) {
v.visit_expr(c, sc, v);
let orig_invalid = *sc.invalid;
v.visit_block(then, sc, v);
let then_invalid = *sc.invalid;
*sc.invalid = orig_invalid;
visit::visit_expr_opt(els, sc, v);
*sc.invalid = append_invalid(*sc.invalid, then_invalid, orig_invalid);
}
fn check_loop(cx: ctx, sc: scope, checker: block()) {
let orig_invalid = filter_invalid(*sc.invalid, sc.bs);
checker();
let new_invalid = filter_invalid(*sc.invalid, sc.bs);
// Have to check contents of loop again if it invalidated an alias
if list::len(orig_invalid) < list::len(new_invalid) {
let old_silent = cx.silent;
cx.silent = true;
checker();
cx.silent = old_silent;
}
*sc.invalid = new_invalid;
}
fn test_scope(cx: ctx, sc: scope, b: binding, p: @ast::path) {
let prob = find_invalid(b.node_id, *sc.invalid);
alt b.root_var {
some(dn) {
for other in sc.bs {
if!is_none(prob) { break; }
if other.node_id == dn {
prob = find_invalid(other.node_id, *sc.invalid);
}
}
}
_ {}
}
if!is_none(prob) && cant_copy(cx, b) {
let i = option::get(prob);
let msg = alt i.reason {
overwritten. { "overwriting " + ast_util::path_name(i.path) }
val_taken. { "taking the value of " + ast_util::path_name(i.path) }
};
err(cx, i.sp, msg + " will invalidate reference " +
ast_util::path_name(p) + ", which is still used");
}
}
fn path_def(cx: ctx, ex: @ast::expr) -> option::t<ast::def> {
ret alt ex.node {
ast::expr_path(_) { some(cx.tcx.def_map.get(ex.id)) }
_ { none }
}
}
fn path_def_id(cx: ctx, ex: @ast::expr) -> option::t<ast::node_id> {
alt ex.node {
ast::expr_path(_) {
ret some(ast_util::def_id_of_def(cx.tcx.def_map.get(ex.id)).node);
}
_ { ret none; }
}
}
fn ty_can_unsafely_include(cx: ctx, needle: unsafe_ty, haystack: ty::t,
mut: bool) -> bool {
fn get_mut(cur: bool, mt: ty::mt) -> bool {
ret cur || mt.mut!= ast::imm;
}
fn helper(tcx: ty::ctxt, needle: unsafe_ty, haystack: ty::t, mut: bool)
-> bool {
if alt needle {
contains(ty) { ty == haystack }
mut_contains(ty) { mut && ty == haystack }
} { ret true; }
alt ty::struct(tcx, haystack) {
ty::ty_tag(_, ts) {
for t: ty::t in ts {
if helper(tcx, needle, t, mut) { ret true; }
}
ret false;
}
ty::ty_box(mt) | ty::ty_ptr(mt) | ty::ty_uniq(mt) {
ret helper(tcx, needle, mt.ty, get_mut(mut, mt));
}
ty::ty_rec(fields) {
for f: ty::field in fields {
if helper(tcx, needle, f.mt.ty, get_mut(mut, f.mt)) {
ret true;
}
}
ret false;
}
ty::ty_tup(ts) {
for t in ts { if helper(tcx, needle, t, mut) { ret true; } }
ret false;
}
ty::ty_fn({proto: ast::proto_bare., _}) { ret false; }
// These may contain anything.
ty::ty_fn(_) | ty::ty_obj(_) { ret true; }
// A type param may include everything, but can only be
// treated as opaque downstream, and is thus safe unless we
// saw mutable fields, in which case the whole thing can be
// overwritten.
ty::ty_param(_, _) { ret mut; }
_ { ret false; }
}
}
ret helper(cx.tcx, needle, haystack, mut);
}
fn def_is_local(d: ast::def) -> bool {
alt d {
ast::def_local(_, _) | ast::def_arg(_, _) | ast::def_binding(_) |
ast::def_upvar(_, _, _) | ast::def_self(_) |
ast::def_obj_field(_, _) { true }
_ { false }
}
}
fn local_id_of_node(cx: ctx, id: node_id) -> uint {
alt cx.tcx.items.find(id) {
some(ast_map::node_arg(_, id)) | some(ast_map::node_local(id)) { id }
_ { 0u }
}
}
// Heuristic, somewhat random way to decide whether to warn when inserting an
// implicit copy.
fn copy_is_expensive(tcx: ty::ctxt, ty: ty::t) -> bool {
fn score_ty(tcx: ty::ctxt, ty: ty::t) -> uint {
ret alt ty::struct(tcx, ty) {
ty::ty_nil. | ty::ty_bot. | ty::ty_bool. | ty::ty_int(_) |
ty::ty_uint(_) | ty::ty_float(_) | ty::ty_type. | ty::ty_native(_) |
ty::ty_ptr(_) { 1u }
ty::ty_box(_) { 3u }
ty::ty_constr(t, _) | ty::ty_res(_, t, _) { score_ty(tcx, t) }
ty::ty_fn(_) | ty::ty_native_fn(_, _) |
ty::ty_obj(_) { 4u }
ty::ty_str. | ty::ty_vec(_) | ty::ty_param(_, _) { 50u }
ty::ty_uniq(mt) { 1u + score_ty(tcx, mt.ty) }
ty::ty_tag(_, ts) | ty::ty_tup(ts) {
let sum = 0u;
for t in ts { sum += score_ty(tcx, t); }
sum
}
ty::ty_rec(fs) {
let sum = 0u;
for f in fs { sum += score_ty(tcx, f.mt.ty); }
sum
}
};
}
ret score_ty(tcx, ty) > 8u;
}
type pattern_root = {id: node_id,
name: ident,
mut: option::t<unsafe_ty>,
span: span};
fn pattern_roots(tcx: ty::ctxt, mut: option::t<unsafe_ty>, pat: @ast::pat)
-> [pattern_root] {
fn walk(tcx: ty::ctxt, mut: option::t<unsafe_ty>, pat: @ast::pat,
&set: [pattern_root]) {
alt pat.node {
ast::pat_wild. | ast::pat_lit(_) | ast::pat_range(_, _) {}
ast::pat_bind(nm, sub) {
set += [{id: pat.id, name: nm, mut: mut, span: pat.span}];
alt sub { some(p) { walk(tcx, mut, p, set); } _ {} }
}
ast::pat_tag(_, ps) | ast::pat_tup(ps) {
for p in ps { walk(tcx, mut, p, set); }
}
ast::pat_rec(fs, _) {
let ty = ty::node_id_to_type(tcx, pat.id);
for f in fs {
let m = ty::get_field(tcx, ty, f.ident).mt.mut!= ast::imm;
walk(tcx, m? some(contains(ty)) : mut, f.pat, set);
}
}
ast::pat_box(p) {
let ty = ty::node_id_to_type(tcx, pat.id);
let m = alt ty::struct(tcx, ty) {
ty::ty_box(mt) { mt.mut!= ast::imm }
};
walk(tcx, m? some(contains(ty)) : mut, p, set);
}
ast::pat_uniq(p) {
let ty = ty::node_id_to_type(tcx, pat.id);
let m = alt ty::struct(tcx, ty) {
ty::ty_uniq(mt) { mt.mut!= ast::imm }
};
walk(tcx, m? some(contains(ty)) : mut, p, set);
}
}
}
let set = [];
walk(tcx, mut, pat, set);
ret set;
}
// Wraps the expr_root in mut.rs to also handle roots that exist through
// return-by-reference
fn expr_root(cx: ctx, ex: @ast::expr, autoderef: bool)
-> {ex: @ast::expr, mut: option::t<unsafe_ty>} {
let base_root = mut::expr_root(cx.tcx, ex, autoderef);
let unsafe_ty = none;
for d in *base_root.ds {
if d.mut { unsafe_ty = some(contains(d.outer_t)); break; }
}
alt base_root.ex.node {
ast::expr_path(_) {
alt cx.tcx.def_map.get(base_root.ex.id) {
ast::def_obj_field(_, ast::mut.) {
unsafe_ty = some(mut_contains(ty::expr_ty(cx.tcx, base_root.ex)));
}
_ {}
}
}
_ {}
}
ret {ex: base_root.ex, mut: unsafe_ty};
}
fn unsafe_set(from: option::t<unsafe_ty>) -> [unsafe_ty] {
alt from { some(t) { [t] } _ { [] } }
}
fn find_invalid(id: node_id, lst: list<@invalid>)
-> option::t<@invalid> {
let cur = lst;
while true {
alt cur {
list::nil. { break; }
list::cons(head, tail) {
if head.node_id == id { ret some(head); }
cur = *tail;
}
}
}
ret none;
}
fn append_invalid(dest: list<@invalid>, src: list<@invalid>,
stop: list<@invalid>) -> list<@invalid> {
let cur = src, dest = dest;
while cur!= stop {
alt cur {
list::cons(head, tail) {
if is_none(find_invalid(head.node_id, dest)) {
dest = list::cons(head, @dest);
}
cur = *tail;
}
}
}
ret dest;
}
fn filter_invalid(src: list<@invalid>, bs: [binding]) -> list<@invalid> {
let out = list::nil, cur = src;
while cur!= list::nil {
alt cur {
list::cons(head, tail) {
let p = vec::position_pred(bs, {|b| b.node_id == head.node_id});
if!is_none(p) { out = list::cons(head, @out); }
cur = *tail;
}
}
}
ret out;
}
fn err(cx: ctx, sp: span, err: str) {
if!cx.silent ||!cx.tcx.sess.has_errors() | {
cx.tcx.sess.span_err(sp, err);
} | conditional_block |
|
alias.rs | err(*cx, sp, "can not pass a dynamically-sized type by value");
}
}
// Blocks need to obey any restrictions from the enclosing scope, and may
// be called multiple times.
let proto = ty::ty_fn_proto(cx.tcx, fty);
if proto == ast::proto_block {
check_loop(*cx, sc) {|| v.visit_block(body, sc, v);}
} else {
let sc = {bs: [], invalid: @mutable list::nil};
v.visit_block(body, sc, v);
}
}
fn visit_expr(cx: @ctx, ex: @ast::expr, sc: scope, v: vt<scope>) {
let handled = true;
alt ex.node {
ast::expr_call(f, args, _) {
check_call(*cx, sc, f, args);
handled = false;
}
ast::expr_alt(input, arms) { check_alt(*cx, input, arms, sc, v); }
ast::expr_for(decl, seq, blk) {
v.visit_expr(seq, sc, v);
check_loop(*cx, sc) {|| check_for(*cx, decl, seq, blk, sc, v); }
}
ast::expr_path(pt) {
check_var(*cx, ex, pt, ex.id, false, sc);
handled = false;
}
ast::expr_swap(lhs, rhs) {
check_lval(cx, lhs, sc, v);
check_lval(cx, rhs, sc, v);
handled = false;
}
ast::expr_move(dest, src) {
check_assign(cx, dest, src, sc, v);
check_lval(cx, src, sc, v);
}
ast::expr_assign(dest, src) | ast::expr_assign_op(_, dest, src) {
check_assign(cx, dest, src, sc, v);
}
ast::expr_if(c, then, els) { check_if(c, then, els, sc, v); }
ast::expr_while(_, _) | ast::expr_do_while(_, _) {
check_loop(*cx, sc) {|| visit::visit_expr(ex, sc, v); }
}
_ { handled = false; }
}
if!handled { visit::visit_expr(ex, sc, v); }
}
fn visit_block(cx: @ctx, b: ast::blk, sc: scope, v: vt<scope>) {
let bs = sc.bs, sc = sc;
for stmt in b.node.stmts {
alt stmt.node {
ast::stmt_decl(@{node: ast::decl_item(it), _}, _) {
v.visit_item(it, sc, v);
}
ast::stmt_decl(@{node: ast::decl_local(locs), _}, _) {
for (st, loc) in locs {
if st == ast::let_ref {
add_bindings_for_let(*cx, bs, loc);
sc = {bs: bs with sc};
}
alt loc.node.init {
some(init) {
if init.op == ast::init_move {
check_lval(cx, init.expr, sc, v);
}
}
none. { }
}
}
}
ast::stmt_expr(ex, _) | ast::stmt_semi(ex, _) {
v.visit_expr(ex, sc, v);
}
}
}
visit::visit_expr_opt(b.node.expr, sc, v);
}
fn add_bindings_for_let(cx: ctx, &bs: [binding], loc: @ast::local) {
alt loc.node.init {
some(init) {
if init.op == ast::init_move {
err(cx, loc.span, "can not move into a by-reference binding");
}
let root = expr_root(cx, init.expr, false);
let root_var = path_def_id(cx, root.ex);
if is_none(root_var) {
err(cx, loc.span, "a reference binding can't be \
rooted in a temporary");
}
for proot in pattern_roots(cx.tcx, root.mut, loc.node.pat) {
let bnd = mk_binding(cx, proot.id, proot.span, root_var,
unsafe_set(proot.mut));
// Don't implicitly copy explicit references
bnd.copied = not_allowed;
bs += [bnd];
}
}
_ {
err(cx, loc.span, "by-reference bindings must be initialized");
}
}
}
fn cant_copy(cx: ctx, b: binding) -> bool {
alt b.copied {
not_allowed. { ret true; }
copied. { ret false; }
not_copied. {}
}
let ty = ty::node_id_to_type(cx.tcx, b.node_id);
if ty::type_allows_implicit_copy(cx.tcx, ty) {
b.copied = copied;
cx.copy_map.insert(b.node_id, ());
if copy_is_expensive(cx.tcx, ty) {
cx.tcx.sess.span_warn(b.span,
"inserting an implicit copy for type " +
util::ppaux::ty_to_str(cx.tcx, ty));
}
ret false;
} else { ret true; }
}
fn check_call(cx: ctx, sc: scope, f: @ast::expr, args: [@ast::expr])
-> [binding] {
let fty = ty::expr_ty(cx.tcx, f);
let arg_ts = ty::ty_fn_args(cx.tcx, fty);
let mut_roots: [{arg: uint, node: node_id}] = [];
let bindings = [];
let i = 0u;
for arg_t: ty::arg in arg_ts {
let arg = args[i];
let root = expr_root(cx, arg, false);
if arg_t.mode == ast::by_mut_ref {
alt path_def(cx, arg) {
some(def) {
let dnum = ast_util::def_id_of_def(def).node;
mut_roots += [{arg: i, node: dnum}];
}
_ { }
}
}
let root_var = path_def_id(cx, root.ex);
bindings += [@{node_id: arg.id,
span: arg.span,
root_var: root_var,
local_id: 0u,
unsafe_tys: unsafe_set(root.mut),
mutable copied: alt arg_t.mode {
ast::by_move. | ast::by_copy. { copied }
ast::by_mut_ref. { not_allowed }
_ { not_copied }
}}];
i += 1u;
}
let f_may_close =
alt f.node {
ast::expr_path(_) { def_is_local(cx.tcx.def_map.get(f.id)) }
_ { true }
};
if f_may_close {
let i = 0u;
for b in bindings {
let unsfe = vec::len(b.unsafe_tys) > 0u;
alt b.root_var {
some(rid) {
for o in sc.bs {
if o.node_id == rid && vec::len(o.unsafe_tys) > 0u {
unsfe = true; break;
}
}
}
_ {}
}
if unsfe && cant_copy(cx, b) {
err(cx, f.span, #fmt["function may alias with argument \
%u, which is not immutably rooted", i]);
}
i += 1u;
}
}
let j = 0u;
for b in bindings {
for unsafe_ty in b.unsafe_tys {
let i = 0u;
for arg_t: ty::arg in arg_ts {
let mut_alias = arg_t.mode == ast::by_mut_ref;
if i!= j &&
ty_can_unsafely_include(cx, unsafe_ty, arg_t.ty,
mut_alias) &&
cant_copy(cx, b) {
err(cx, args[i].span,
#fmt["argument %u may alias with argument %u, \
which is not immutably rooted", i, j]);
}
i += 1u;
}
}
j += 1u;
}
// Ensure we're not passing a root by mutable alias.
for {node: node, arg: arg} in mut_roots {
let i = 0u;
for b in bindings {
if i!= arg {
alt b.root_var {
some(root) {
if node == root && cant_copy(cx, b) {
err(cx, args[arg].span,
"passing a mutable reference to a \
variable that roots another reference");
break;
}
}
none. { }
}
}
i += 1u;
}
}
ret bindings;
}
fn check_alt(cx: ctx, input: @ast::expr, arms: [ast::arm], sc: scope,
v: vt<scope>) {
v.visit_expr(input, sc, v);
let orig_invalid = *sc.invalid;
let all_invalid = orig_invalid;
let root = expr_root(cx, input, true);
for a: ast::arm in arms {
let new_bs = sc.bs;
let root_var = path_def_id(cx, root.ex);
let pat_id_map = ast_util::pat_id_map(a.pats[0]);
type info = {
id: node_id,
mutable unsafe_tys: [unsafe_ty],
span: span};
let binding_info: [info] = [];
for pat in a.pats {
for proot in pattern_roots(cx.tcx, root.mut, pat) {
let canon_id = pat_id_map.get(proot.name);
alt vec::find(binding_info, {|x| x.id == canon_id}) {
some(s) { s.unsafe_tys += unsafe_set(proot.mut); }
none. {
binding_info += [
{id: canon_id,
mutable unsafe_tys: unsafe_set(proot.mut),
span: proot.span}];
}
}
}
}
for info in binding_info {
new_bs += [mk_binding(cx, info.id, info.span, root_var,
copy info.unsafe_tys)];
}
*sc.invalid = orig_invalid;
visit::visit_arm(a, {bs: new_bs with sc}, v);
all_invalid = append_invalid(all_invalid, *sc.invalid, orig_invalid);
}
*sc.invalid = all_invalid;
}
fn check_for(cx: ctx, local: @ast::local, seq: @ast::expr, blk: ast::blk,
sc: scope, v: vt<scope>) {
let root = expr_root(cx, seq, false);
// If this is a mutable vector, don't allow it to be touched.
let seq_t = ty::expr_ty(cx.tcx, seq);
let cur_mut = root.mut;
alt ty::struct(cx.tcx, seq_t) {
ty::ty_vec(mt) {
if mt.mut!= ast::imm {
cur_mut = some(contains(seq_t));
}
}
_ {}
}
let root_var = path_def_id(cx, root.ex);
let new_bs = sc.bs;
for proot in pattern_roots(cx.tcx, cur_mut, local.node.pat) {
new_bs += [mk_binding(cx, proot.id, proot.span, root_var,
unsafe_set(proot.mut))];
}
visit::visit_block(blk, {bs: new_bs with sc}, v);
}
fn check_var(cx: ctx, ex: @ast::expr, p: @ast::path, id: ast::node_id,
assign: bool, sc: scope) | }
fn check_lval(cx: @ctx, dest: @ast::expr, sc: scope, v: vt<scope>) {
alt dest.node {
ast::expr_path(p) {
let def = cx.tcx.def_map.get(dest.id);
let dnum = ast_util::def_id_of_def(def).node;
for b in sc.bs {
if b.root_var == some(dnum) {
let inv = @{reason: overwritten, node_id: b.node_id,
sp: dest.span, path: p};
*sc.invalid = list::cons(inv, @*sc.invalid);
}
}
}
_ { visit_expr(cx, dest, sc, v); }
}
}
fn check_assign(cx: @ctx, dest: @ast::expr, src: @ast::expr, sc: scope,
v: vt<scope>) {
visit_expr(cx, src, sc, v);
check_lval(cx, dest, sc, v);
}
fn check_if(c: @ast::expr, then: ast::blk, els: option::t<@ast::expr>,
sc: scope, v: vt<scope>) {
v.visit_expr(c, sc, v);
let orig_invalid = *sc.invalid;
v.visit_block(then, sc, v);
let then_invalid = *sc.invalid;
*sc.invalid = orig_invalid;
visit::visit_expr_opt(els, sc, v);
*sc.invalid = append_invalid(*sc.invalid, then_invalid, orig_invalid);
}
fn check_loop(cx: ctx, sc: scope, checker: block()) {
let orig_invalid = filter_invalid(*sc.invalid, sc.bs);
checker();
let new_invalid = filter_invalid(*sc.invalid, sc.bs);
// Have to check contents of loop again if it invalidated an alias
if list::len(orig_invalid) < list::len(new_invalid) {
let old_silent = cx.silent;
cx.silent = true;
checker();
cx.silent = old_silent;
}
*sc.invalid = new_invalid;
}
fn test_scope(cx: ctx, sc: scope, b: binding, p: @ast::path) {
let prob = find_invalid(b.node_id, *sc.invalid);
alt b.root_var {
some(dn) {
for other in sc.bs {
if!is_none(prob) { break; }
if other.node_id == dn {
prob = find_invalid(other.node_id, *sc.invalid);
}
}
}
_ {}
}
if!is_none(prob) && cant_copy(cx, b) {
let i = option::get(prob);
let msg = alt i.reason {
overwritten. { "overwriting " + ast_util::path_name(i.path) }
val_taken. { "taking the value of " + ast_util::path_name(i.path) }
};
err(cx, i.sp, msg + " will invalidate reference " +
ast_util::path_name(p) + ", which is still used");
}
}
fn path_def(cx: ctx, ex: @ast::expr) -> option::t<ast::def> {
ret alt ex.node {
ast::expr_path(_) { some(cx.tcx.def_map.get(ex.id)) }
_ { none }
}
}
fn path_def_id(cx: ctx, ex: @ast::expr) -> option::t<ast::node_id> {
alt ex.node {
ast::expr_path(_) {
ret some(ast_util::def_id_of_def(cx.tcx.def_map.get(ex.id)).node);
}
_ { ret none; }
}
}
fn ty_can_unsafely_include(cx: ctx, needle: unsafe_ty, haystack: ty::t,
mut: bool) -> bool {
fn get_mut(cur: bool, mt: ty::mt) -> bool {
ret cur || mt.mut!= ast::imm;
}
fn helper(tcx: ty::ctxt, needle: unsafe_ty, haystack: ty::t, mut: bool)
-> bool {
if alt needle {
contains(ty) { ty == haystack }
mut_contains(ty) { mut && ty == haystack }
} { ret true; }
alt ty::struct(tcx, haystack) {
ty::ty_tag(_, ts) {
for t: ty::t in ts {
if helper(tcx, needle, t, mut) { ret true; }
}
ret false;
}
ty::ty_box(mt) | ty::ty_ptr(mt) | ty::ty_uniq(mt) {
ret helper(tcx, needle, mt.ty, get_mut(mut, mt));
}
ty::ty_rec(fields) {
for f: ty::field in fields {
if helper(tcx, needle, f.mt.ty, get_mut(mut, f.mt)) {
ret true;
}
}
ret false;
}
ty::ty_tup(ts) {
for t in ts { if helper(tcx, needle, t, mut) { ret true; } }
ret false;
}
ty::ty_fn({proto: ast::proto_bare., _}) { ret false; }
// These may contain anything.
ty::ty_fn(_) | ty | {
let def = cx.tcx.def_map.get(id);
if !def_is_local(def) { ret; }
let my_defnum = ast_util::def_id_of_def(def).node;
let my_local_id = local_id_of_node(cx, my_defnum);
let var_t = ty::expr_ty(cx.tcx, ex);
for b in sc.bs {
// excludes variables introduced since the alias was made
if my_local_id < b.local_id {
for unsafe_ty in b.unsafe_tys {
if ty_can_unsafely_include(cx, unsafe_ty, var_t, assign) {
let inv = @{reason: val_taken, node_id: b.node_id,
sp: ex.span, path: p};
*sc.invalid = list::cons(inv, @*sc.invalid);
}
}
} else if b.node_id == my_defnum {
test_scope(cx, sc, b, p);
}
} | identifier_body |
alias.rs | scope, f: @ast::expr, args: [@ast::expr])
-> [binding] {
let fty = ty::expr_ty(cx.tcx, f);
let arg_ts = ty::ty_fn_args(cx.tcx, fty);
let mut_roots: [{arg: uint, node: node_id}] = [];
let bindings = [];
let i = 0u;
for arg_t: ty::arg in arg_ts {
let arg = args[i];
let root = expr_root(cx, arg, false);
if arg_t.mode == ast::by_mut_ref {
alt path_def(cx, arg) {
some(def) {
let dnum = ast_util::def_id_of_def(def).node;
mut_roots += [{arg: i, node: dnum}];
}
_ { }
}
}
let root_var = path_def_id(cx, root.ex);
bindings += [@{node_id: arg.id,
span: arg.span,
root_var: root_var,
local_id: 0u,
unsafe_tys: unsafe_set(root.mut),
mutable copied: alt arg_t.mode {
ast::by_move. | ast::by_copy. { copied }
ast::by_mut_ref. { not_allowed }
_ { not_copied }
}}];
i += 1u;
}
let f_may_close =
alt f.node {
ast::expr_path(_) { def_is_local(cx.tcx.def_map.get(f.id)) }
_ { true }
};
if f_may_close {
let i = 0u;
for b in bindings {
let unsfe = vec::len(b.unsafe_tys) > 0u;
alt b.root_var {
some(rid) {
for o in sc.bs {
if o.node_id == rid && vec::len(o.unsafe_tys) > 0u {
unsfe = true; break;
}
}
}
_ {}
}
if unsfe && cant_copy(cx, b) {
err(cx, f.span, #fmt["function may alias with argument \
%u, which is not immutably rooted", i]);
}
i += 1u;
}
}
let j = 0u;
for b in bindings {
for unsafe_ty in b.unsafe_tys {
let i = 0u;
for arg_t: ty::arg in arg_ts {
let mut_alias = arg_t.mode == ast::by_mut_ref;
if i!= j &&
ty_can_unsafely_include(cx, unsafe_ty, arg_t.ty,
mut_alias) &&
cant_copy(cx, b) {
err(cx, args[i].span,
#fmt["argument %u may alias with argument %u, \
which is not immutably rooted", i, j]);
}
i += 1u;
}
}
j += 1u;
}
// Ensure we're not passing a root by mutable alias.
for {node: node, arg: arg} in mut_roots {
let i = 0u;
for b in bindings {
if i!= arg {
alt b.root_var {
some(root) {
if node == root && cant_copy(cx, b) {
err(cx, args[arg].span,
"passing a mutable reference to a \
variable that roots another reference");
break;
}
}
none. { }
}
}
i += 1u;
}
}
ret bindings;
}
fn check_alt(cx: ctx, input: @ast::expr, arms: [ast::arm], sc: scope,
v: vt<scope>) {
v.visit_expr(input, sc, v);
let orig_invalid = *sc.invalid;
let all_invalid = orig_invalid;
let root = expr_root(cx, input, true);
for a: ast::arm in arms {
let new_bs = sc.bs;
let root_var = path_def_id(cx, root.ex);
let pat_id_map = ast_util::pat_id_map(a.pats[0]);
type info = {
id: node_id,
mutable unsafe_tys: [unsafe_ty],
span: span};
let binding_info: [info] = [];
for pat in a.pats {
for proot in pattern_roots(cx.tcx, root.mut, pat) {
let canon_id = pat_id_map.get(proot.name);
alt vec::find(binding_info, {|x| x.id == canon_id}) {
some(s) { s.unsafe_tys += unsafe_set(proot.mut); }
none. {
binding_info += [
{id: canon_id,
mutable unsafe_tys: unsafe_set(proot.mut),
span: proot.span}];
}
}
}
}
for info in binding_info {
new_bs += [mk_binding(cx, info.id, info.span, root_var,
copy info.unsafe_tys)];
}
*sc.invalid = orig_invalid;
visit::visit_arm(a, {bs: new_bs with sc}, v);
all_invalid = append_invalid(all_invalid, *sc.invalid, orig_invalid);
}
*sc.invalid = all_invalid;
}
fn check_for(cx: ctx, local: @ast::local, seq: @ast::expr, blk: ast::blk,
sc: scope, v: vt<scope>) {
let root = expr_root(cx, seq, false);
// If this is a mutable vector, don't allow it to be touched.
let seq_t = ty::expr_ty(cx.tcx, seq);
let cur_mut = root.mut;
alt ty::struct(cx.tcx, seq_t) {
ty::ty_vec(mt) {
if mt.mut!= ast::imm {
cur_mut = some(contains(seq_t));
}
}
_ {}
}
let root_var = path_def_id(cx, root.ex);
let new_bs = sc.bs;
for proot in pattern_roots(cx.tcx, cur_mut, local.node.pat) {
new_bs += [mk_binding(cx, proot.id, proot.span, root_var,
unsafe_set(proot.mut))];
}
visit::visit_block(blk, {bs: new_bs with sc}, v);
}
fn check_var(cx: ctx, ex: @ast::expr, p: @ast::path, id: ast::node_id,
assign: bool, sc: scope) {
let def = cx.tcx.def_map.get(id);
if!def_is_local(def) { ret; }
let my_defnum = ast_util::def_id_of_def(def).node;
let my_local_id = local_id_of_node(cx, my_defnum);
let var_t = ty::expr_ty(cx.tcx, ex);
for b in sc.bs {
// excludes variables introduced since the alias was made
if my_local_id < b.local_id {
for unsafe_ty in b.unsafe_tys {
if ty_can_unsafely_include(cx, unsafe_ty, var_t, assign) {
let inv = @{reason: val_taken, node_id: b.node_id,
sp: ex.span, path: p};
*sc.invalid = list::cons(inv, @*sc.invalid);
}
}
} else if b.node_id == my_defnum {
test_scope(cx, sc, b, p);
}
}
}
fn check_lval(cx: @ctx, dest: @ast::expr, sc: scope, v: vt<scope>) {
alt dest.node {
ast::expr_path(p) {
let def = cx.tcx.def_map.get(dest.id);
let dnum = ast_util::def_id_of_def(def).node;
for b in sc.bs {
if b.root_var == some(dnum) {
let inv = @{reason: overwritten, node_id: b.node_id,
sp: dest.span, path: p};
*sc.invalid = list::cons(inv, @*sc.invalid);
}
}
}
_ { visit_expr(cx, dest, sc, v); }
}
}
fn check_assign(cx: @ctx, dest: @ast::expr, src: @ast::expr, sc: scope,
v: vt<scope>) {
visit_expr(cx, src, sc, v);
check_lval(cx, dest, sc, v);
}
fn check_if(c: @ast::expr, then: ast::blk, els: option::t<@ast::expr>,
sc: scope, v: vt<scope>) {
v.visit_expr(c, sc, v);
let orig_invalid = *sc.invalid;
v.visit_block(then, sc, v);
let then_invalid = *sc.invalid;
*sc.invalid = orig_invalid;
visit::visit_expr_opt(els, sc, v);
*sc.invalid = append_invalid(*sc.invalid, then_invalid, orig_invalid);
}
fn check_loop(cx: ctx, sc: scope, checker: block()) {
let orig_invalid = filter_invalid(*sc.invalid, sc.bs);
checker();
let new_invalid = filter_invalid(*sc.invalid, sc.bs);
// Have to check contents of loop again if it invalidated an alias
if list::len(orig_invalid) < list::len(new_invalid) {
let old_silent = cx.silent;
cx.silent = true;
checker();
cx.silent = old_silent;
}
*sc.invalid = new_invalid;
}
fn test_scope(cx: ctx, sc: scope, b: binding, p: @ast::path) {
let prob = find_invalid(b.node_id, *sc.invalid);
alt b.root_var {
some(dn) {
for other in sc.bs {
if!is_none(prob) { break; }
if other.node_id == dn {
prob = find_invalid(other.node_id, *sc.invalid);
}
}
}
_ {}
}
if!is_none(prob) && cant_copy(cx, b) {
let i = option::get(prob);
let msg = alt i.reason {
overwritten. { "overwriting " + ast_util::path_name(i.path) }
val_taken. { "taking the value of " + ast_util::path_name(i.path) }
};
err(cx, i.sp, msg + " will invalidate reference " +
ast_util::path_name(p) + ", which is still used");
}
}
fn path_def(cx: ctx, ex: @ast::expr) -> option::t<ast::def> {
ret alt ex.node {
ast::expr_path(_) { some(cx.tcx.def_map.get(ex.id)) }
_ { none }
}
}
fn path_def_id(cx: ctx, ex: @ast::expr) -> option::t<ast::node_id> {
alt ex.node {
ast::expr_path(_) {
ret some(ast_util::def_id_of_def(cx.tcx.def_map.get(ex.id)).node);
}
_ { ret none; }
}
}
fn ty_can_unsafely_include(cx: ctx, needle: unsafe_ty, haystack: ty::t,
mut: bool) -> bool {
fn get_mut(cur: bool, mt: ty::mt) -> bool {
ret cur || mt.mut!= ast::imm;
}
fn helper(tcx: ty::ctxt, needle: unsafe_ty, haystack: ty::t, mut: bool)
-> bool {
if alt needle {
contains(ty) { ty == haystack }
mut_contains(ty) { mut && ty == haystack }
} { ret true; }
alt ty::struct(tcx, haystack) {
ty::ty_tag(_, ts) {
for t: ty::t in ts {
if helper(tcx, needle, t, mut) { ret true; }
}
ret false;
}
ty::ty_box(mt) | ty::ty_ptr(mt) | ty::ty_uniq(mt) {
ret helper(tcx, needle, mt.ty, get_mut(mut, mt));
}
ty::ty_rec(fields) {
for f: ty::field in fields {
if helper(tcx, needle, f.mt.ty, get_mut(mut, f.mt)) {
ret true;
}
}
ret false;
}
ty::ty_tup(ts) {
for t in ts { if helper(tcx, needle, t, mut) { ret true; } }
ret false;
}
ty::ty_fn({proto: ast::proto_bare., _}) { ret false; }
// These may contain anything.
ty::ty_fn(_) | ty::ty_obj(_) { ret true; }
// A type param may include everything, but can only be
// treated as opaque downstream, and is thus safe unless we
// saw mutable fields, in which case the whole thing can be
// overwritten.
ty::ty_param(_, _) { ret mut; }
_ { ret false; }
}
}
ret helper(cx.tcx, needle, haystack, mut);
}
fn def_is_local(d: ast::def) -> bool {
alt d {
ast::def_local(_, _) | ast::def_arg(_, _) | ast::def_binding(_) |
ast::def_upvar(_, _, _) | ast::def_self(_) |
ast::def_obj_field(_, _) { true }
_ { false }
}
}
fn local_id_of_node(cx: ctx, id: node_id) -> uint {
alt cx.tcx.items.find(id) {
some(ast_map::node_arg(_, id)) | some(ast_map::node_local(id)) { id }
_ { 0u }
}
}
// Heuristic, somewhat random way to decide whether to warn when inserting an
// implicit copy.
fn copy_is_expensive(tcx: ty::ctxt, ty: ty::t) -> bool {
fn score_ty(tcx: ty::ctxt, ty: ty::t) -> uint {
ret alt ty::struct(tcx, ty) {
ty::ty_nil. | ty::ty_bot. | ty::ty_bool. | ty::ty_int(_) |
ty::ty_uint(_) | ty::ty_float(_) | ty::ty_type. | ty::ty_native(_) |
ty::ty_ptr(_) { 1u }
ty::ty_box(_) { 3u }
ty::ty_constr(t, _) | ty::ty_res(_, t, _) { score_ty(tcx, t) }
ty::ty_fn(_) | ty::ty_native_fn(_, _) |
ty::ty_obj(_) { 4u }
ty::ty_str. | ty::ty_vec(_) | ty::ty_param(_, _) { 50u }
ty::ty_uniq(mt) { 1u + score_ty(tcx, mt.ty) }
ty::ty_tag(_, ts) | ty::ty_tup(ts) {
let sum = 0u;
for t in ts { sum += score_ty(tcx, t); }
sum
}
ty::ty_rec(fs) {
let sum = 0u;
for f in fs { sum += score_ty(tcx, f.mt.ty); }
sum
}
};
}
ret score_ty(tcx, ty) > 8u;
}
type pattern_root = {id: node_id,
name: ident,
mut: option::t<unsafe_ty>,
span: span};
fn pattern_roots(tcx: ty::ctxt, mut: option::t<unsafe_ty>, pat: @ast::pat)
-> [pattern_root] {
fn walk(tcx: ty::ctxt, mut: option::t<unsafe_ty>, pat: @ast::pat,
&set: [pattern_root]) {
alt pat.node {
ast::pat_wild. | ast::pat_lit(_) | ast::pat_range(_, _) {}
ast::pat_bind(nm, sub) {
set += [{id: pat.id, name: nm, mut: mut, span: pat.span}];
alt sub { some(p) { walk(tcx, mut, p, set); } _ {} }
}
ast::pat_tag(_, ps) | ast::pat_tup(ps) {
for p in ps { walk(tcx, mut, p, set); }
}
ast::pat_rec(fs, _) {
let ty = ty::node_id_to_type(tcx, pat.id);
for f in fs {
let m = ty::get_field(tcx, ty, f.ident).mt.mut!= ast::imm;
walk(tcx, m? some(contains(ty)) : mut, f.pat, set);
}
}
ast::pat_box(p) {
let ty = ty::node_id_to_type(tcx, pat.id);
let m = alt ty::struct(tcx, ty) {
ty::ty_box(mt) { mt.mut!= ast::imm }
};
walk(tcx, m? some(contains(ty)) : mut, p, set);
}
ast::pat_uniq(p) {
let ty = ty::node_id_to_type(tcx, pat.id);
let m = alt ty::struct(tcx, ty) {
ty::ty_uniq(mt) { mt.mut!= ast::imm }
};
walk(tcx, m? some(contains(ty)) : mut, p, set);
}
}
}
let set = [];
walk(tcx, mut, pat, set);
ret set;
}
// Wraps the expr_root in mut.rs to also handle roots that exist through
// return-by-reference
fn expr_root(cx: ctx, ex: @ast::expr, autoderef: bool)
-> {ex: @ast::expr, mut: option::t<unsafe_ty>} {
let base_root = mut::expr_root(cx.tcx, ex, autoderef);
let unsafe_ty = none;
for d in *base_root.ds {
if d.mut { unsafe_ty = some(contains(d.outer_t)); break; }
}
alt base_root.ex.node {
ast::expr_path(_) {
alt cx.tcx.def_map.get(base_root.ex.id) {
ast::def_obj_field(_, ast::mut.) {
unsafe_ty = some(mut_contains(ty::expr_ty(cx.tcx, base_root.ex)));
}
_ {}
}
}
_ {}
}
ret {ex: base_root.ex, mut: unsafe_ty};
}
fn | unsafe_set | identifier_name |
|
main.rs | #[macro_use]
extern crate failure;
use nbted::unstable::{data, read, string_read, string_write, write};
use nbted::Result;
use std::env;
use std::fs::File;
use std::io;
use std::io::{BufReader, BufWriter};
use std::path::Path;
use std::process::exit;
use std::process::Command;
use getopts::Options;
use tempdir::TempDir;
use failure::ResultExt;
fn main() |
/// Main entrypoint for program.
///
/// Returns an integer representing the program's exit status.
fn run_cmdline() -> Result<i32> {
let args: Vec<String> = env::args().collect();
let mut opts = Options::new();
let _: &Options = opts.optflagopt("e", "edit", "edit a NBT file with your $EDITOR.
If [FILE] is specified, then that file is edited in place, but specifying --input and/or --output will override the input/output.
If no file is specified, default to read from --input and writing to --output.", "FILE");
let _: &Options = opts.optflagopt("p", "print", "print NBT file to text format. Adding an argument to this is the same as specifying --input", "FILE");
let _: &Options = opts.optflagopt("r", "reverse", "reverse a file in text format to NBT format. Adding an argument to this is the same as specifying --input", "FILE");
let _: &Options = opts.optopt(
"i",
"input",
"specify the input file, defaults to stdin",
"FILE",
);
let _: &Options = opts.optopt(
"o",
"output",
"specify the output file, defaults to stdout",
"FILE",
);
let _: &Options = opts.optflag("", "man", "print the nbted man page source and exit");
let _: &Options = opts.optflag("h", "help", "print the help menu and exit");
let _: &Options = opts.optflag("", "version", "print program version and exit");
let matches = opts.parse(&args[1..]).context("error parsing options")?;
if matches.opt_present("h") {
let brief = "Usage: nbted [options] FILE";
print!("{}", opts.usage(&brief));
println!("\nThe default action, taken if no action is explicitly selected, is to --edit.");
println!(
"\nFor detailed usage information, read the nbted man page. If the nbted man page\
\nwas not installed on your system, such as if you installed using `cargo install`,\
\nthen you can use `nbted --man | nroff -man | less` to read the nbted man page."
);
return Ok(0);
}
if matches.opt_present("version") {
println!(
"{} {} {}",
env!("CARGO_PKG_NAME"),
env!("CARGO_PKG_VERSION"),
/* See build.rs for the git-revision.txt file */
include!(concat!(env!("OUT_DIR"), "/git-revision.txt"))
);
println!("https://github.com/C4K3/nbted");
return Ok(0);
}
if matches.opt_present("man") {
print!(include_str!("../nbted.1"));
return Ok(0);
}
let is_print: bool = matches.opt_present("print");
let is_reverse: bool = matches.opt_present("reverse");
let is_edit: bool = if matches.opt_present("edit") {
true
} else {
/* If edit is not explicitly defined, it is the default action and is
* selected if no other action is specified */
!(is_reverse || is_print)
};
/* Hopefully this is a simpler way of ensuring that only one action can be
* taken than having a long logical expression */
let mut action_count = 0;
if is_print {
action_count += 1;
}
if is_reverse {
action_count += 1;
}
if is_edit {
action_count += 1;
}
if action_count > 1 {
bail!("You can only specify one action at a time.");
}
/* Figure out the input file, by trying to read the arguments for all of
* --input, --edit, --print and --reverse, prioritizing --input over the
* other arguments, if none of the arguments are specified but there is a
* free argument, use that, else we finally default to - (stdin) */
let input = if let Some(x) = matches.opt_str("input") {
x
} else if let Some(x) = matches.opt_str("edit") {
x
} else if let Some(x) = matches.opt_str("print") {
x
} else if let Some(x) = matches.opt_str("reverse") {
x
} else if matches.free.len() == 1 {
matches.free[0].clone()
} else {
/* stdin */
"-".to_string()
};
let output = if let Some(x) = matches.opt_str("output") {
x
} else if let Some(x) = matches.opt_str("edit") {
x
} else if is_edit && matches.free.len() == 1 {
/* Only want to default to the free argument if we're editing
* (DO NOT WRITE BACK TO THE READ FILE UNLESS EDITING!) */
matches.free[0].clone()
} else {
/* stdout */
"-".to_string()
};
if matches.free.len() > 1 {
bail!("nbted was given multiple arguments, but only supports editing one file at a time.");
}
if is_print {
print(&input, &output)
} else if is_reverse {
reverse(&input, &output)
} else if is_edit {
edit(&input, &output)
} else {
bail!("Internal error: No action selected. (Please report this.)");
}
}
/// When the user wants to edit a specific file in place
///
/// Returns an integer representing the program's exit status.
fn edit(input: &str, output: &str) -> Result<i32> {
/* First we read the NBT data from the input */
let nbt = if input == "-" {
// let mut f = BufReader::new(io::stdin());
let f = io::stdin();
let mut f = f.lock();
read::read_file(&mut f).context("Unable to parse any NBT files from stdin")?
} else {
let path: &Path = Path::new(input);
let f = File::open(path).context(format!("Unable to open file {}", input))?;
let mut f = BufReader::new(f);
read::read_file(&mut f).context(format_err!(
"Unable to parse {}, are you sure it's an NBT file?",
input
))?
};
/* Then we create a temporary file and write the NBT data in text format
* to the temporary file */
let tmpdir = TempDir::new("nbted").context("Unable to create temporary directory")?;
let tmp = match Path::new(input).file_name() {
Some(x) => {
let mut x = x.to_os_string();
x.push(".txt");
x
}
None => bail!("Error reading file name"),
};
let tmp_path = tmpdir.path().join(tmp);
{
let mut f = File::create(&tmp_path).context("Unable to create temporary file")?;
string_write::write_file(&mut f, &nbt).context("Unable to write temporary file")?;
f.sync_all().context("Unable to synchronize file")?;
}
let new_nbt = {
let mut new_nbt = open_editor(&tmp_path);
while let Err(e) = new_nbt {
eprintln!("Unable to parse edited file");
for e in e.iter_chain() {
eprintln!(" caused by: {}", e);
}
eprintln!("Do you want to open the file for editing again? (y/N)");
let mut line = String::new();
let _: usize = io::stdin()
.read_line(&mut line)
.context("Error reading from stdin. Nothing was changed")?;
if line.trim() == "y" {
new_nbt = open_editor(&tmp_path);
} else {
eprintln!("Exiting... File is unchanged.");
return Ok(0);
}
}
new_nbt.expect("new_nbt was Error")
};
if nbt == new_nbt {
eprintln!("No changes, will do nothing.");
return Ok(0);
}
/* And finally we write the edited nbt (new_nbt) into the output file */
if output == "-" {
let f = io::stdout();
let mut f = f.lock();
/* If we get an error writing to stdout, we want to just silently exit
* with exit code 1. (It can generally be assumed that nbted will not
* error in serializing the data, so any error here would be because of
* writing to stdout) */
match write::write_file(&mut f, &new_nbt) {
Ok(()) => (),
Err(_) => return Ok(1),
}
} else {
let path: &Path = Path::new(output);
let f = File::create(&path).context(format_err!(
"Unable to write to output NBT file {}. Nothing was changed",
output
))?;
let mut f = BufWriter::new(f);
write::write_file(&mut f, &new_nbt).context(
format_err!("Error writing NBT file {}. State of NBT file is unknown, consider restoring it from a backup.",
output))?;
}
eprintln!("File edited successfully.");
Ok(0)
}
/// Open the user's $EDITOR on the temporary file, wait until the editor is
/// closed again, read the temporary file and attempt to parse it into NBT,
/// returning the result.
fn open_editor(tmp_path: &Path) -> Result<data::NBTFile> {
let editor = match env::var("VISUAL") {
Ok(x) => x,
Err(_) => match env::var("EDITOR") {
Ok(x) => x,
Err(_) => bail!("Unable to find $EDITOR"),
},
};
let mut cmd = Command::new(editor);
let _: &mut Command = cmd.arg(&tmp_path.as_os_str());
let mut cmd = cmd.spawn().context("Error opening editor")?;
match cmd.wait().context("error executing editor")? {
x if x.success() => (),
_ => bail!("Editor did not exit correctly"),
}
/* Then we parse the text format in the temporary file into NBT */
let mut f = File::open(&tmp_path).context(format_err!(
"Unable to read temporary file. Nothing was changed."
))?;
string_read::read_file(&mut f)
}
/// When the user wants to print an NBT file to text format
fn print(input: &str, output: &str) -> Result<i32> {
/* First we read a NBTFile from the input */
let nbt = if input == "-" {
let f = io::stdin();
let mut f = f.lock();
read::read_file(&mut f).context(format_err!(
"Unable to parse {}, are you sure it's an NBT file?",
input
))?
} else {
let path: &Path = Path::new(input);
let f = File::open(path).context(format_err!("Unable to open file {}", input))?;
let mut f = BufReader::new(f);
read::read_file(&mut f).context(format_err!(
"Unable to parse {}, are you sure it's an NBT file?",
input
))?
};
/* Then we write the NBTFile to the output in text format */
if output == "-" {
let f = io::stdout();
let mut f = f.lock();
/* If we get an error writing to stdout, we want to just silently exit
* with exit code 1. (It can generally be assumed that nbted will not
* error in serializing the data, so any error here would be because of
* writing to stdout) */
match string_write::write_file(&mut f, &nbt) {
Ok(()) => (),
Err(_) => return Ok(1),
}
} else {
let path: &Path = Path::new(output);
let f = File::create(&path).context(format_err!(
"Unable to write to output NBT file {}. Nothing was changed.",
output
))?;
let mut f = BufWriter::new(f);
string_write::write_file(&mut f, &nbt).context(
format_err!("Error writing NBT file {}. State of NBT file is unknown, consider restoring it from a backup.",
output))?;
}
Ok(0)
}
/// When the user wants to convert a text format file into an NBT file
///
/// Returns an integer representing the program's exit status.
fn reverse(input: &str, output: &str) -> Result<i32> {
/* First we read the input file in the text format */
let path: &Path = Path::new(input);
let mut f = File::open(&path).context(format_err!("Unable to read text file {}", input))?;
let nbt = string_read::read_file(&mut f)
.context(format_err!("Unable to parse text file {}", input))?;
/* Then we write the parsed NBT to the output file in NBT format */
if output == "-" {
let f = io::stdout();
let mut f = f.lock();
/* If we get an error writing to stdout, we want to just silently exit
* with exit code 1. (It can generally be assumed that nbted will not
* error in serializing the data, so any error here would be because of
* writing to stdout) */
match write::write_file(&mut f, &nbt) {
Ok(()) => (),
Err(_) => return Ok(1),
}
} else {
let path: &Path = Path::new(output);
let f = File::create(&path).context(format_err!(
"Unable to write to output NBT file {}. Nothing was changed",
output
))?;
let mut f = BufWriter::new(f);
write::write_file(&mut f, &nbt).context(
format_err!("error writing to NBT FILE {}, state of NBT file is unknown, consider restoring it from a backup.",
output))?;
}
Ok(0)
}
| {
match run_cmdline() {
Ok(ret) => {
exit(ret);
}
Err(e) => {
eprintln!("{}", e.backtrace());
eprintln!("Error: {}", e);
for e in e.iter_chain().skip(1) {
eprintln!(" caused by: {}", e);
}
eprintln!("For help, run with --help or read the manpage.");
exit(1);
}
}
} | identifier_body |
main.rs | #[macro_use]
extern crate failure;
use nbted::unstable::{data, read, string_read, string_write, write};
use nbted::Result;
use std::env;
use std::fs::File;
use std::io;
use std::io::{BufReader, BufWriter};
use std::path::Path;
use std::process::exit;
use std::process::Command;
use getopts::Options;
use tempdir::TempDir;
use failure::ResultExt;
fn main() {
match run_cmdline() {
Ok(ret) => {
exit(ret);
}
Err(e) => {
eprintln!("{}", e.backtrace());
eprintln!("Error: {}", e);
for e in e.iter_chain().skip(1) {
eprintln!(" caused by: {}", e);
}
eprintln!("For help, run with --help or read the manpage.");
exit(1);
}
}
}
/// Main entrypoint for program.
///
/// Returns an integer representing the program's exit status.
fn run_cmdline() -> Result<i32> {
let args: Vec<String> = env::args().collect();
let mut opts = Options::new();
let _: &Options = opts.optflagopt("e", "edit", "edit a NBT file with your $EDITOR.
If [FILE] is specified, then that file is edited in place, but specifying --input and/or --output will override the input/output.
If no file is specified, default to read from --input and writing to --output.", "FILE");
let _: &Options = opts.optflagopt("p", "print", "print NBT file to text format. Adding an argument to this is the same as specifying --input", "FILE");
let _: &Options = opts.optflagopt("r", "reverse", "reverse a file in text format to NBT format. Adding an argument to this is the same as specifying --input", "FILE");
let _: &Options = opts.optopt(
"i",
"input",
"specify the input file, defaults to stdin",
"FILE",
);
let _: &Options = opts.optopt(
"o",
"output",
"specify the output file, defaults to stdout",
"FILE",
);
let _: &Options = opts.optflag("", "man", "print the nbted man page source and exit");
let _: &Options = opts.optflag("h", "help", "print the help menu and exit");
let _: &Options = opts.optflag("", "version", "print program version and exit");
let matches = opts.parse(&args[1..]).context("error parsing options")?;
if matches.opt_present("h") {
let brief = "Usage: nbted [options] FILE";
print!("{}", opts.usage(&brief));
println!("\nThe default action, taken if no action is explicitly selected, is to --edit.");
println!(
"\nFor detailed usage information, read the nbted man page. If the nbted man page\
\nwas not installed on your system, such as if you installed using `cargo install`,\
\nthen you can use `nbted --man | nroff -man | less` to read the nbted man page."
);
return Ok(0);
}
if matches.opt_present("version") {
println!(
"{} {} {}",
env!("CARGO_PKG_NAME"),
env!("CARGO_PKG_VERSION"),
/* See build.rs for the git-revision.txt file */
include!(concat!(env!("OUT_DIR"), "/git-revision.txt"))
);
println!("https://github.com/C4K3/nbted");
return Ok(0);
}
if matches.opt_present("man") {
print!(include_str!("../nbted.1"));
return Ok(0);
}
let is_print: bool = matches.opt_present("print");
let is_reverse: bool = matches.opt_present("reverse");
let is_edit: bool = if matches.opt_present("edit") {
true
} else {
/* If edit is not explicitly defined, it is the default action and is
* selected if no other action is specified */
!(is_reverse || is_print)
};
/* Hopefully this is a simpler way of ensuring that only one action can be
* taken than having a long logical expression */
let mut action_count = 0;
if is_print {
action_count += 1;
}
if is_reverse {
action_count += 1;
}
if is_edit {
action_count += 1;
}
if action_count > 1 {
bail!("You can only specify one action at a time.");
}
/* Figure out the input file, by trying to read the arguments for all of
* --input, --edit, --print and --reverse, prioritizing --input over the
* other arguments, if none of the arguments are specified but there is a
* free argument, use that, else we finally default to - (stdin) */
let input = if let Some(x) = matches.opt_str("input") {
x
} else if let Some(x) = matches.opt_str("edit") {
x
} else if let Some(x) = matches.opt_str("print") {
x
} else if let Some(x) = matches.opt_str("reverse") {
x
} else if matches.free.len() == 1 {
matches.free[0].clone()
} else {
/* stdin */
"-".to_string()
};
let output = if let Some(x) = matches.opt_str("output") {
x
} else if let Some(x) = matches.opt_str("edit") {
x
} else if is_edit && matches.free.len() == 1 {
/* Only want to default to the free argument if we're editing
* (DO NOT WRITE BACK TO THE READ FILE UNLESS EDITING!) */
matches.free[0].clone()
} else {
/* stdout */
"-".to_string()
};
if matches.free.len() > 1 {
bail!("nbted was given multiple arguments, but only supports editing one file at a time.");
}
if is_print {
print(&input, &output)
} else if is_reverse {
reverse(&input, &output)
} else if is_edit {
edit(&input, &output)
} else {
bail!("Internal error: No action selected. (Please report this.)");
}
}
/// When the user wants to edit a specific file in place
///
/// Returns an integer representing the program's exit status.
fn edit(input: &str, output: &str) -> Result<i32> {
/* First we read the NBT data from the input */
let nbt = if input == "-" {
// let mut f = BufReader::new(io::stdin());
let f = io::stdin();
let mut f = f.lock();
read::read_file(&mut f).context("Unable to parse any NBT files from stdin")?
} else {
let path: &Path = Path::new(input);
let f = File::open(path).context(format!("Unable to open file {}", input))?;
let mut f = BufReader::new(f);
read::read_file(&mut f).context(format_err!(
"Unable to parse {}, are you sure it's an NBT file?",
input
))?
};
/* Then we create a temporary file and write the NBT data in text format | Some(x) => {
let mut x = x.to_os_string();
x.push(".txt");
x
}
None => bail!("Error reading file name"),
};
let tmp_path = tmpdir.path().join(tmp);
{
let mut f = File::create(&tmp_path).context("Unable to create temporary file")?;
string_write::write_file(&mut f, &nbt).context("Unable to write temporary file")?;
f.sync_all().context("Unable to synchronize file")?;
}
let new_nbt = {
let mut new_nbt = open_editor(&tmp_path);
while let Err(e) = new_nbt {
eprintln!("Unable to parse edited file");
for e in e.iter_chain() {
eprintln!(" caused by: {}", e);
}
eprintln!("Do you want to open the file for editing again? (y/N)");
let mut line = String::new();
let _: usize = io::stdin()
.read_line(&mut line)
.context("Error reading from stdin. Nothing was changed")?;
if line.trim() == "y" {
new_nbt = open_editor(&tmp_path);
} else {
eprintln!("Exiting... File is unchanged.");
return Ok(0);
}
}
new_nbt.expect("new_nbt was Error")
};
if nbt == new_nbt {
eprintln!("No changes, will do nothing.");
return Ok(0);
}
/* And finally we write the edited nbt (new_nbt) into the output file */
if output == "-" {
let f = io::stdout();
let mut f = f.lock();
/* If we get an error writing to stdout, we want to just silently exit
* with exit code 1. (It can generally be assumed that nbted will not
* error in serializing the data, so any error here would be because of
* writing to stdout) */
match write::write_file(&mut f, &new_nbt) {
Ok(()) => (),
Err(_) => return Ok(1),
}
} else {
let path: &Path = Path::new(output);
let f = File::create(&path).context(format_err!(
"Unable to write to output NBT file {}. Nothing was changed",
output
))?;
let mut f = BufWriter::new(f);
write::write_file(&mut f, &new_nbt).context(
format_err!("Error writing NBT file {}. State of NBT file is unknown, consider restoring it from a backup.",
output))?;
}
eprintln!("File edited successfully.");
Ok(0)
}
/// Open the user's $EDITOR on the temporary file, wait until the editor is
/// closed again, read the temporary file and attempt to parse it into NBT,
/// returning the result.
fn open_editor(tmp_path: &Path) -> Result<data::NBTFile> {
let editor = match env::var("VISUAL") {
Ok(x) => x,
Err(_) => match env::var("EDITOR") {
Ok(x) => x,
Err(_) => bail!("Unable to find $EDITOR"),
},
};
let mut cmd = Command::new(editor);
let _: &mut Command = cmd.arg(&tmp_path.as_os_str());
let mut cmd = cmd.spawn().context("Error opening editor")?;
match cmd.wait().context("error executing editor")? {
x if x.success() => (),
_ => bail!("Editor did not exit correctly"),
}
/* Then we parse the text format in the temporary file into NBT */
let mut f = File::open(&tmp_path).context(format_err!(
"Unable to read temporary file. Nothing was changed."
))?;
string_read::read_file(&mut f)
}
/// When the user wants to print an NBT file to text format
fn print(input: &str, output: &str) -> Result<i32> {
/* First we read a NBTFile from the input */
let nbt = if input == "-" {
let f = io::stdin();
let mut f = f.lock();
read::read_file(&mut f).context(format_err!(
"Unable to parse {}, are you sure it's an NBT file?",
input
))?
} else {
let path: &Path = Path::new(input);
let f = File::open(path).context(format_err!("Unable to open file {}", input))?;
let mut f = BufReader::new(f);
read::read_file(&mut f).context(format_err!(
"Unable to parse {}, are you sure it's an NBT file?",
input
))?
};
/* Then we write the NBTFile to the output in text format */
if output == "-" {
let f = io::stdout();
let mut f = f.lock();
/* If we get an error writing to stdout, we want to just silently exit
* with exit code 1. (It can generally be assumed that nbted will not
* error in serializing the data, so any error here would be because of
* writing to stdout) */
match string_write::write_file(&mut f, &nbt) {
Ok(()) => (),
Err(_) => return Ok(1),
}
} else {
let path: &Path = Path::new(output);
let f = File::create(&path).context(format_err!(
"Unable to write to output NBT file {}. Nothing was changed.",
output
))?;
let mut f = BufWriter::new(f);
string_write::write_file(&mut f, &nbt).context(
format_err!("Error writing NBT file {}. State of NBT file is unknown, consider restoring it from a backup.",
output))?;
}
Ok(0)
}
/// When the user wants to convert a text format file into an NBT file
///
/// Returns an integer representing the program's exit status.
fn reverse(input: &str, output: &str) -> Result<i32> {
/* First we read the input file in the text format */
let path: &Path = Path::new(input);
let mut f = File::open(&path).context(format_err!("Unable to read text file {}", input))?;
let nbt = string_read::read_file(&mut f)
.context(format_err!("Unable to parse text file {}", input))?;
/* Then we write the parsed NBT to the output file in NBT format */
if output == "-" {
let f = io::stdout();
let mut f = f.lock();
/* If we get an error writing to stdout, we want to just silently exit
* with exit code 1. (It can generally be assumed that nbted will not
* error in serializing the data, so any error here would be because of
* writing to stdout) */
match write::write_file(&mut f, &nbt) {
Ok(()) => (),
Err(_) => return Ok(1),
}
} else {
let path: &Path = Path::new(output);
let f = File::create(&path).context(format_err!(
"Unable to write to output NBT file {}. Nothing was changed",
output
))?;
let mut f = BufWriter::new(f);
write::write_file(&mut f, &nbt).context(
format_err!("error writing to NBT FILE {}, state of NBT file is unknown, consider restoring it from a backup.",
output))?;
}
Ok(0)
} | * to the temporary file */
let tmpdir = TempDir::new("nbted").context("Unable to create temporary directory")?;
let tmp = match Path::new(input).file_name() { | random_line_split |
main.rs | #[macro_use]
extern crate failure;
use nbted::unstable::{data, read, string_read, string_write, write};
use nbted::Result;
use std::env;
use std::fs::File;
use std::io;
use std::io::{BufReader, BufWriter};
use std::path::Path;
use std::process::exit;
use std::process::Command;
use getopts::Options;
use tempdir::TempDir;
use failure::ResultExt;
fn main() {
match run_cmdline() {
Ok(ret) => {
exit(ret);
}
Err(e) => {
eprintln!("{}", e.backtrace());
eprintln!("Error: {}", e);
for e in e.iter_chain().skip(1) {
eprintln!(" caused by: {}", e);
}
eprintln!("For help, run with --help or read the manpage.");
exit(1);
}
}
}
/// Main entrypoint for program.
///
/// Returns an integer representing the program's exit status.
fn run_cmdline() -> Result<i32> {
let args: Vec<String> = env::args().collect();
let mut opts = Options::new();
let _: &Options = opts.optflagopt("e", "edit", "edit a NBT file with your $EDITOR.
If [FILE] is specified, then that file is edited in place, but specifying --input and/or --output will override the input/output.
If no file is specified, default to read from --input and writing to --output.", "FILE");
let _: &Options = opts.optflagopt("p", "print", "print NBT file to text format. Adding an argument to this is the same as specifying --input", "FILE");
let _: &Options = opts.optflagopt("r", "reverse", "reverse a file in text format to NBT format. Adding an argument to this is the same as specifying --input", "FILE");
let _: &Options = opts.optopt(
"i",
"input",
"specify the input file, defaults to stdin",
"FILE",
);
let _: &Options = opts.optopt(
"o",
"output",
"specify the output file, defaults to stdout",
"FILE",
);
let _: &Options = opts.optflag("", "man", "print the nbted man page source and exit");
let _: &Options = opts.optflag("h", "help", "print the help menu and exit");
let _: &Options = opts.optflag("", "version", "print program version and exit");
let matches = opts.parse(&args[1..]).context("error parsing options")?;
if matches.opt_present("h") {
let brief = "Usage: nbted [options] FILE";
print!("{}", opts.usage(&brief));
println!("\nThe default action, taken if no action is explicitly selected, is to --edit.");
println!(
"\nFor detailed usage information, read the nbted man page. If the nbted man page\
\nwas not installed on your system, such as if you installed using `cargo install`,\
\nthen you can use `nbted --man | nroff -man | less` to read the nbted man page."
);
return Ok(0);
}
if matches.opt_present("version") {
println!(
"{} {} {}",
env!("CARGO_PKG_NAME"),
env!("CARGO_PKG_VERSION"),
/* See build.rs for the git-revision.txt file */
include!(concat!(env!("OUT_DIR"), "/git-revision.txt"))
);
println!("https://github.com/C4K3/nbted");
return Ok(0);
}
if matches.opt_present("man") {
print!(include_str!("../nbted.1"));
return Ok(0);
}
let is_print: bool = matches.opt_present("print");
let is_reverse: bool = matches.opt_present("reverse");
let is_edit: bool = if matches.opt_present("edit") {
true
} else {
/* If edit is not explicitly defined, it is the default action and is
* selected if no other action is specified */
!(is_reverse || is_print)
};
/* Hopefully this is a simpler way of ensuring that only one action can be
* taken than having a long logical expression */
let mut action_count = 0;
if is_print {
action_count += 1;
}
if is_reverse {
action_count += 1;
}
if is_edit {
action_count += 1;
}
if action_count > 1 {
bail!("You can only specify one action at a time.");
}
/* Figure out the input file, by trying to read the arguments for all of
* --input, --edit, --print and --reverse, prioritizing --input over the
* other arguments, if none of the arguments are specified but there is a
* free argument, use that, else we finally default to - (stdin) */
let input = if let Some(x) = matches.opt_str("input") {
x
} else if let Some(x) = matches.opt_str("edit") {
x
} else if let Some(x) = matches.opt_str("print") {
x
} else if let Some(x) = matches.opt_str("reverse") {
x
} else if matches.free.len() == 1 {
matches.free[0].clone()
} else {
/* stdin */
"-".to_string()
};
let output = if let Some(x) = matches.opt_str("output") {
x
} else if let Some(x) = matches.opt_str("edit") {
x
} else if is_edit && matches.free.len() == 1 {
/* Only want to default to the free argument if we're editing
* (DO NOT WRITE BACK TO THE READ FILE UNLESS EDITING!) */
matches.free[0].clone()
} else {
/* stdout */
"-".to_string()
};
if matches.free.len() > 1 {
bail!("nbted was given multiple arguments, but only supports editing one file at a time.");
}
if is_print {
print(&input, &output)
} else if is_reverse {
reverse(&input, &output)
} else if is_edit | else {
bail!("Internal error: No action selected. (Please report this.)");
}
}
/// When the user wants to edit a specific file in place
///
/// Returns an integer representing the program's exit status.
fn edit(input: &str, output: &str) -> Result<i32> {
/* First we read the NBT data from the input */
let nbt = if input == "-" {
// let mut f = BufReader::new(io::stdin());
let f = io::stdin();
let mut f = f.lock();
read::read_file(&mut f).context("Unable to parse any NBT files from stdin")?
} else {
let path: &Path = Path::new(input);
let f = File::open(path).context(format!("Unable to open file {}", input))?;
let mut f = BufReader::new(f);
read::read_file(&mut f).context(format_err!(
"Unable to parse {}, are you sure it's an NBT file?",
input
))?
};
/* Then we create a temporary file and write the NBT data in text format
* to the temporary file */
let tmpdir = TempDir::new("nbted").context("Unable to create temporary directory")?;
let tmp = match Path::new(input).file_name() {
Some(x) => {
let mut x = x.to_os_string();
x.push(".txt");
x
}
None => bail!("Error reading file name"),
};
let tmp_path = tmpdir.path().join(tmp);
{
let mut f = File::create(&tmp_path).context("Unable to create temporary file")?;
string_write::write_file(&mut f, &nbt).context("Unable to write temporary file")?;
f.sync_all().context("Unable to synchronize file")?;
}
let new_nbt = {
let mut new_nbt = open_editor(&tmp_path);
while let Err(e) = new_nbt {
eprintln!("Unable to parse edited file");
for e in e.iter_chain() {
eprintln!(" caused by: {}", e);
}
eprintln!("Do you want to open the file for editing again? (y/N)");
let mut line = String::new();
let _: usize = io::stdin()
.read_line(&mut line)
.context("Error reading from stdin. Nothing was changed")?;
if line.trim() == "y" {
new_nbt = open_editor(&tmp_path);
} else {
eprintln!("Exiting... File is unchanged.");
return Ok(0);
}
}
new_nbt.expect("new_nbt was Error")
};
if nbt == new_nbt {
eprintln!("No changes, will do nothing.");
return Ok(0);
}
/* And finally we write the edited nbt (new_nbt) into the output file */
if output == "-" {
let f = io::stdout();
let mut f = f.lock();
/* If we get an error writing to stdout, we want to just silently exit
* with exit code 1. (It can generally be assumed that nbted will not
* error in serializing the data, so any error here would be because of
* writing to stdout) */
match write::write_file(&mut f, &new_nbt) {
Ok(()) => (),
Err(_) => return Ok(1),
}
} else {
let path: &Path = Path::new(output);
let f = File::create(&path).context(format_err!(
"Unable to write to output NBT file {}. Nothing was changed",
output
))?;
let mut f = BufWriter::new(f);
write::write_file(&mut f, &new_nbt).context(
format_err!("Error writing NBT file {}. State of NBT file is unknown, consider restoring it from a backup.",
output))?;
}
eprintln!("File edited successfully.");
Ok(0)
}
/// Open the user's $EDITOR on the temporary file, wait until the editor is
/// closed again, read the temporary file and attempt to parse it into NBT,
/// returning the result.
fn open_editor(tmp_path: &Path) -> Result<data::NBTFile> {
let editor = match env::var("VISUAL") {
Ok(x) => x,
Err(_) => match env::var("EDITOR") {
Ok(x) => x,
Err(_) => bail!("Unable to find $EDITOR"),
},
};
let mut cmd = Command::new(editor);
let _: &mut Command = cmd.arg(&tmp_path.as_os_str());
let mut cmd = cmd.spawn().context("Error opening editor")?;
match cmd.wait().context("error executing editor")? {
x if x.success() => (),
_ => bail!("Editor did not exit correctly"),
}
/* Then we parse the text format in the temporary file into NBT */
let mut f = File::open(&tmp_path).context(format_err!(
"Unable to read temporary file. Nothing was changed."
))?;
string_read::read_file(&mut f)
}
/// When the user wants to print an NBT file to text format
fn print(input: &str, output: &str) -> Result<i32> {
/* First we read a NBTFile from the input */
let nbt = if input == "-" {
let f = io::stdin();
let mut f = f.lock();
read::read_file(&mut f).context(format_err!(
"Unable to parse {}, are you sure it's an NBT file?",
input
))?
} else {
let path: &Path = Path::new(input);
let f = File::open(path).context(format_err!("Unable to open file {}", input))?;
let mut f = BufReader::new(f);
read::read_file(&mut f).context(format_err!(
"Unable to parse {}, are you sure it's an NBT file?",
input
))?
};
/* Then we write the NBTFile to the output in text format */
if output == "-" {
let f = io::stdout();
let mut f = f.lock();
/* If we get an error writing to stdout, we want to just silently exit
* with exit code 1. (It can generally be assumed that nbted will not
* error in serializing the data, so any error here would be because of
* writing to stdout) */
match string_write::write_file(&mut f, &nbt) {
Ok(()) => (),
Err(_) => return Ok(1),
}
} else {
let path: &Path = Path::new(output);
let f = File::create(&path).context(format_err!(
"Unable to write to output NBT file {}. Nothing was changed.",
output
))?;
let mut f = BufWriter::new(f);
string_write::write_file(&mut f, &nbt).context(
format_err!("Error writing NBT file {}. State of NBT file is unknown, consider restoring it from a backup.",
output))?;
}
Ok(0)
}
/// When the user wants to convert a text format file into an NBT file
///
/// Returns an integer representing the program's exit status.
fn reverse(input: &str, output: &str) -> Result<i32> {
/* First we read the input file in the text format */
let path: &Path = Path::new(input);
let mut f = File::open(&path).context(format_err!("Unable to read text file {}", input))?;
let nbt = string_read::read_file(&mut f)
.context(format_err!("Unable to parse text file {}", input))?;
/* Then we write the parsed NBT to the output file in NBT format */
if output == "-" {
let f = io::stdout();
let mut f = f.lock();
/* If we get an error writing to stdout, we want to just silently exit
* with exit code 1. (It can generally be assumed that nbted will not
* error in serializing the data, so any error here would be because of
* writing to stdout) */
match write::write_file(&mut f, &nbt) {
Ok(()) => (),
Err(_) => return Ok(1),
}
} else {
let path: &Path = Path::new(output);
let f = File::create(&path).context(format_err!(
"Unable to write to output NBT file {}. Nothing was changed",
output
))?;
let mut f = BufWriter::new(f);
write::write_file(&mut f, &nbt).context(
format_err!("error writing to NBT FILE {}, state of NBT file is unknown, consider restoring it from a backup.",
output))?;
}
Ok(0)
}
| {
edit(&input, &output)
} | conditional_block |
main.rs | #[macro_use]
extern crate failure;
use nbted::unstable::{data, read, string_read, string_write, write};
use nbted::Result;
use std::env;
use std::fs::File;
use std::io;
use std::io::{BufReader, BufWriter};
use std::path::Path;
use std::process::exit;
use std::process::Command;
use getopts::Options;
use tempdir::TempDir;
use failure::ResultExt;
fn main() {
match run_cmdline() {
Ok(ret) => {
exit(ret);
}
Err(e) => {
eprintln!("{}", e.backtrace());
eprintln!("Error: {}", e);
for e in e.iter_chain().skip(1) {
eprintln!(" caused by: {}", e);
}
eprintln!("For help, run with --help or read the manpage.");
exit(1);
}
}
}
/// Main entrypoint for program.
///
/// Returns an integer representing the program's exit status.
fn run_cmdline() -> Result<i32> {
let args: Vec<String> = env::args().collect();
let mut opts = Options::new();
let _: &Options = opts.optflagopt("e", "edit", "edit a NBT file with your $EDITOR.
If [FILE] is specified, then that file is edited in place, but specifying --input and/or --output will override the input/output.
If no file is specified, default to read from --input and writing to --output.", "FILE");
let _: &Options = opts.optflagopt("p", "print", "print NBT file to text format. Adding an argument to this is the same as specifying --input", "FILE");
let _: &Options = opts.optflagopt("r", "reverse", "reverse a file in text format to NBT format. Adding an argument to this is the same as specifying --input", "FILE");
let _: &Options = opts.optopt(
"i",
"input",
"specify the input file, defaults to stdin",
"FILE",
);
let _: &Options = opts.optopt(
"o",
"output",
"specify the output file, defaults to stdout",
"FILE",
);
let _: &Options = opts.optflag("", "man", "print the nbted man page source and exit");
let _: &Options = opts.optflag("h", "help", "print the help menu and exit");
let _: &Options = opts.optflag("", "version", "print program version and exit");
let matches = opts.parse(&args[1..]).context("error parsing options")?;
if matches.opt_present("h") {
let brief = "Usage: nbted [options] FILE";
print!("{}", opts.usage(&brief));
println!("\nThe default action, taken if no action is explicitly selected, is to --edit.");
println!(
"\nFor detailed usage information, read the nbted man page. If the nbted man page\
\nwas not installed on your system, such as if you installed using `cargo install`,\
\nthen you can use `nbted --man | nroff -man | less` to read the nbted man page."
);
return Ok(0);
}
if matches.opt_present("version") {
println!(
"{} {} {}",
env!("CARGO_PKG_NAME"),
env!("CARGO_PKG_VERSION"),
/* See build.rs for the git-revision.txt file */
include!(concat!(env!("OUT_DIR"), "/git-revision.txt"))
);
println!("https://github.com/C4K3/nbted");
return Ok(0);
}
if matches.opt_present("man") {
print!(include_str!("../nbted.1"));
return Ok(0);
}
let is_print: bool = matches.opt_present("print");
let is_reverse: bool = matches.opt_present("reverse");
let is_edit: bool = if matches.opt_present("edit") {
true
} else {
/* If edit is not explicitly defined, it is the default action and is
* selected if no other action is specified */
!(is_reverse || is_print)
};
/* Hopefully this is a simpler way of ensuring that only one action can be
* taken than having a long logical expression */
let mut action_count = 0;
if is_print {
action_count += 1;
}
if is_reverse {
action_count += 1;
}
if is_edit {
action_count += 1;
}
if action_count > 1 {
bail!("You can only specify one action at a time.");
}
/* Figure out the input file, by trying to read the arguments for all of
* --input, --edit, --print and --reverse, prioritizing --input over the
* other arguments, if none of the arguments are specified but there is a
* free argument, use that, else we finally default to - (stdin) */
let input = if let Some(x) = matches.opt_str("input") {
x
} else if let Some(x) = matches.opt_str("edit") {
x
} else if let Some(x) = matches.opt_str("print") {
x
} else if let Some(x) = matches.opt_str("reverse") {
x
} else if matches.free.len() == 1 {
matches.free[0].clone()
} else {
/* stdin */
"-".to_string()
};
let output = if let Some(x) = matches.opt_str("output") {
x
} else if let Some(x) = matches.opt_str("edit") {
x
} else if is_edit && matches.free.len() == 1 {
/* Only want to default to the free argument if we're editing
* (DO NOT WRITE BACK TO THE READ FILE UNLESS EDITING!) */
matches.free[0].clone()
} else {
/* stdout */
"-".to_string()
};
if matches.free.len() > 1 {
bail!("nbted was given multiple arguments, but only supports editing one file at a time.");
}
if is_print {
print(&input, &output)
} else if is_reverse {
reverse(&input, &output)
} else if is_edit {
edit(&input, &output)
} else {
bail!("Internal error: No action selected. (Please report this.)");
}
}
/// When the user wants to edit a specific file in place
///
/// Returns an integer representing the program's exit status.
fn edit(input: &str, output: &str) -> Result<i32> {
/* First we read the NBT data from the input */
let nbt = if input == "-" {
// let mut f = BufReader::new(io::stdin());
let f = io::stdin();
let mut f = f.lock();
read::read_file(&mut f).context("Unable to parse any NBT files from stdin")?
} else {
let path: &Path = Path::new(input);
let f = File::open(path).context(format!("Unable to open file {}", input))?;
let mut f = BufReader::new(f);
read::read_file(&mut f).context(format_err!(
"Unable to parse {}, are you sure it's an NBT file?",
input
))?
};
/* Then we create a temporary file and write the NBT data in text format
* to the temporary file */
let tmpdir = TempDir::new("nbted").context("Unable to create temporary directory")?;
let tmp = match Path::new(input).file_name() {
Some(x) => {
let mut x = x.to_os_string();
x.push(".txt");
x
}
None => bail!("Error reading file name"),
};
let tmp_path = tmpdir.path().join(tmp);
{
let mut f = File::create(&tmp_path).context("Unable to create temporary file")?;
string_write::write_file(&mut f, &nbt).context("Unable to write temporary file")?;
f.sync_all().context("Unable to synchronize file")?;
}
let new_nbt = {
let mut new_nbt = open_editor(&tmp_path);
while let Err(e) = new_nbt {
eprintln!("Unable to parse edited file");
for e in e.iter_chain() {
eprintln!(" caused by: {}", e);
}
eprintln!("Do you want to open the file for editing again? (y/N)");
let mut line = String::new();
let _: usize = io::stdin()
.read_line(&mut line)
.context("Error reading from stdin. Nothing was changed")?;
if line.trim() == "y" {
new_nbt = open_editor(&tmp_path);
} else {
eprintln!("Exiting... File is unchanged.");
return Ok(0);
}
}
new_nbt.expect("new_nbt was Error")
};
if nbt == new_nbt {
eprintln!("No changes, will do nothing.");
return Ok(0);
}
/* And finally we write the edited nbt (new_nbt) into the output file */
if output == "-" {
let f = io::stdout();
let mut f = f.lock();
/* If we get an error writing to stdout, we want to just silently exit
* with exit code 1. (It can generally be assumed that nbted will not
* error in serializing the data, so any error here would be because of
* writing to stdout) */
match write::write_file(&mut f, &new_nbt) {
Ok(()) => (),
Err(_) => return Ok(1),
}
} else {
let path: &Path = Path::new(output);
let f = File::create(&path).context(format_err!(
"Unable to write to output NBT file {}. Nothing was changed",
output
))?;
let mut f = BufWriter::new(f);
write::write_file(&mut f, &new_nbt).context(
format_err!("Error writing NBT file {}. State of NBT file is unknown, consider restoring it from a backup.",
output))?;
}
eprintln!("File edited successfully.");
Ok(0)
}
/// Open the user's $EDITOR on the temporary file, wait until the editor is
/// closed again, read the temporary file and attempt to parse it into NBT,
/// returning the result.
fn | (tmp_path: &Path) -> Result<data::NBTFile> {
let editor = match env::var("VISUAL") {
Ok(x) => x,
Err(_) => match env::var("EDITOR") {
Ok(x) => x,
Err(_) => bail!("Unable to find $EDITOR"),
},
};
let mut cmd = Command::new(editor);
let _: &mut Command = cmd.arg(&tmp_path.as_os_str());
let mut cmd = cmd.spawn().context("Error opening editor")?;
match cmd.wait().context("error executing editor")? {
x if x.success() => (),
_ => bail!("Editor did not exit correctly"),
}
/* Then we parse the text format in the temporary file into NBT */
let mut f = File::open(&tmp_path).context(format_err!(
"Unable to read temporary file. Nothing was changed."
))?;
string_read::read_file(&mut f)
}
/// When the user wants to print an NBT file to text format
fn print(input: &str, output: &str) -> Result<i32> {
/* First we read a NBTFile from the input */
let nbt = if input == "-" {
let f = io::stdin();
let mut f = f.lock();
read::read_file(&mut f).context(format_err!(
"Unable to parse {}, are you sure it's an NBT file?",
input
))?
} else {
let path: &Path = Path::new(input);
let f = File::open(path).context(format_err!("Unable to open file {}", input))?;
let mut f = BufReader::new(f);
read::read_file(&mut f).context(format_err!(
"Unable to parse {}, are you sure it's an NBT file?",
input
))?
};
/* Then we write the NBTFile to the output in text format */
if output == "-" {
let f = io::stdout();
let mut f = f.lock();
/* If we get an error writing to stdout, we want to just silently exit
* with exit code 1. (It can generally be assumed that nbted will not
* error in serializing the data, so any error here would be because of
* writing to stdout) */
match string_write::write_file(&mut f, &nbt) {
Ok(()) => (),
Err(_) => return Ok(1),
}
} else {
let path: &Path = Path::new(output);
let f = File::create(&path).context(format_err!(
"Unable to write to output NBT file {}. Nothing was changed.",
output
))?;
let mut f = BufWriter::new(f);
string_write::write_file(&mut f, &nbt).context(
format_err!("Error writing NBT file {}. State of NBT file is unknown, consider restoring it from a backup.",
output))?;
}
Ok(0)
}
/// When the user wants to convert a text format file into an NBT file
///
/// Returns an integer representing the program's exit status.
fn reverse(input: &str, output: &str) -> Result<i32> {
/* First we read the input file in the text format */
let path: &Path = Path::new(input);
let mut f = File::open(&path).context(format_err!("Unable to read text file {}", input))?;
let nbt = string_read::read_file(&mut f)
.context(format_err!("Unable to parse text file {}", input))?;
/* Then we write the parsed NBT to the output file in NBT format */
if output == "-" {
let f = io::stdout();
let mut f = f.lock();
/* If we get an error writing to stdout, we want to just silently exit
* with exit code 1. (It can generally be assumed that nbted will not
* error in serializing the data, so any error here would be because of
* writing to stdout) */
match write::write_file(&mut f, &nbt) {
Ok(()) => (),
Err(_) => return Ok(1),
}
} else {
let path: &Path = Path::new(output);
let f = File::create(&path).context(format_err!(
"Unable to write to output NBT file {}. Nothing was changed",
output
))?;
let mut f = BufWriter::new(f);
write::write_file(&mut f, &nbt).context(
format_err!("error writing to NBT FILE {}, state of NBT file is unknown, consider restoring it from a backup.",
output))?;
}
Ok(0)
}
| open_editor | identifier_name |
rtio.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use c_str::CString;
use cast;
use comm::{Sender, Receiver};
use libc::c_int;
use libc;
use kinds::Send;
use ops::Drop;
use option::{Option, Some, None};
use path::Path;
use result::Err;
use rt::local::Local;
use rt::task::Task;
use vec::Vec;
use ai = io::net::addrinfo;
use io;
use io::IoResult;
use io::net::ip::{IpAddr, SocketAddr};
use io::process::{ProcessConfig, ProcessExit};
use io::signal::Signum;
use io::{FileMode, FileAccess, FileStat, FilePermission};
use io::{SeekStyle};
pub trait Callback {
fn call(&mut self);
}
pub trait EventLoop {
fn run(&mut self);
fn callback(&mut self, arg: proc():Send);
fn pausable_idle_callback(&mut self,
~Callback:Send) -> ~PausableIdleCallback:Send;
fn remote_callback(&mut self, ~Callback:Send) -> ~RemoteCallback:Send;
/// The asynchronous I/O services. Not all event loops may provide one.
fn io<'a>(&'a mut self) -> Option<&'a mut IoFactory>;
fn has_active_io(&self) -> bool;
}
pub trait RemoteCallback {
/// Trigger the remote callback. Note that the number of times the
/// callback is run is not guaranteed. All that is guaranteed is
/// that, after calling 'fire', the callback will be called at
/// least once, but multiple callbacks may be coalesced and
/// callbacks may be called more often requested. Destruction also
/// triggers the callback.
fn fire(&mut self);
}
/// Data needed to make a successful open(2) call
/// Using unix flag conventions for now, which happens to also be what's supported
/// libuv (it does translation to windows under the hood).
pub struct FileOpenConfig {
/// Path to file to be opened
pub path: Path,
/// Flags for file access mode (as per open(2))
pub flags: int,
/// File creation mode, ignored unless O_CREAT is passed as part of flags
pub mode: int
}
/// Description of what to do when a file handle is closed
pub enum CloseBehavior {
/// Do not close this handle when the object is destroyed
DontClose,
/// Synchronously close the handle, meaning that the task will block when
/// the handle is destroyed until it has been fully closed.
CloseSynchronously,
/// Asynchronously closes a handle, meaning that the task will *not* block
/// when the handle is destroyed, but the handle will still get deallocated
/// and cleaned up (but this will happen asynchronously on the local event
/// loop).
CloseAsynchronously,
}
pub struct LocalIo<'a> {
factory: &'a mut IoFactory,
}
#[unsafe_destructor]
impl<'a> Drop for LocalIo<'a> {
fn drop(&mut self) {
// FIXME(pcwalton): Do nothing here for now, but eventually we may want
// something. For now this serves to make `LocalIo` noncopyable.
}
}
impl<'a> LocalIo<'a> {
/// Returns the local I/O: either the local scheduler's I/O services or
/// the native I/O services.
pub fn borrow() -> Option<LocalIo> {
// FIXME(#11053): bad
//
// This is currently very unsafely implemented. We don't actually
// *take* the local I/O so there's a very real possibility that we
// can have two borrows at once. Currently there is not a clear way
// to actually borrow the local I/O factory safely because even if
// ownership were transferred down to the functions that the I/O
// factory implements it's just too much of a pain to know when to
// relinquish ownership back into the local task (but that would be
// the safe way of implementing this function).
//
// In order to get around this, we just transmute a copy out of the task
// in order to have what is likely a static lifetime (bad).
let mut t: ~Task = Local::take();
let ret = t.local_io().map(|t| {
unsafe { cast::transmute_copy(&t) }
});
Local::put(t);
return ret;
}
pub fn maybe_raise<T>(f: |io: &mut IoFactory| -> IoResult<T>)
-> IoResult<T>
|
pub fn new<'a>(io: &'a mut IoFactory) -> LocalIo<'a> {
LocalIo { factory: io }
}
/// Returns the underlying I/O factory as a trait reference.
#[inline]
pub fn get<'a>(&'a mut self) -> &'a mut IoFactory {
// FIXME(pcwalton): I think this is actually sound? Could borrow check
// allow this safely?
unsafe {
cast::transmute_copy(&self.factory)
}
}
}
pub trait IoFactory {
// networking
fn tcp_connect(&mut self, addr: SocketAddr) -> IoResult<~RtioTcpStream:Send>;
fn tcp_bind(&mut self, addr: SocketAddr) -> IoResult<~RtioTcpListener:Send>;
fn udp_bind(&mut self, addr: SocketAddr) -> IoResult<~RtioUdpSocket:Send>;
fn unix_bind(&mut self, path: &CString)
-> IoResult<~RtioUnixListener:Send>;
fn unix_connect(&mut self, path: &CString) -> IoResult<~RtioPipe:Send>;
fn get_host_addresses(&mut self, host: Option<&str>, servname: Option<&str>,
hint: Option<ai::Hint>) -> IoResult<~[ai::Info]>;
// filesystem operations
fn fs_from_raw_fd(&mut self, fd: c_int, close: CloseBehavior)
-> ~RtioFileStream:Send;
fn fs_open(&mut self, path: &CString, fm: FileMode, fa: FileAccess)
-> IoResult<~RtioFileStream:Send>;
fn fs_unlink(&mut self, path: &CString) -> IoResult<()>;
fn fs_stat(&mut self, path: &CString) -> IoResult<FileStat>;
fn fs_mkdir(&mut self, path: &CString,
mode: FilePermission) -> IoResult<()>;
fn fs_chmod(&mut self, path: &CString,
mode: FilePermission) -> IoResult<()>;
fn fs_rmdir(&mut self, path: &CString) -> IoResult<()>;
fn fs_rename(&mut self, path: &CString, to: &CString) -> IoResult<()>;
fn fs_readdir(&mut self, path: &CString, flags: c_int) ->
IoResult<Vec<Path>>;
fn fs_lstat(&mut self, path: &CString) -> IoResult<FileStat>;
fn fs_chown(&mut self, path: &CString, uid: int, gid: int) ->
IoResult<()>;
fn fs_readlink(&mut self, path: &CString) -> IoResult<Path>;
fn fs_symlink(&mut self, src: &CString, dst: &CString) -> IoResult<()>;
fn fs_link(&mut self, src: &CString, dst: &CString) -> IoResult<()>;
fn fs_utime(&mut self, src: &CString, atime: u64, mtime: u64) ->
IoResult<()>;
// misc
fn timer_init(&mut self) -> IoResult<~RtioTimer:Send>;
fn spawn(&mut self, config: ProcessConfig)
-> IoResult<(~RtioProcess:Send, ~[Option<~RtioPipe:Send>])>;
fn kill(&mut self, pid: libc::pid_t, signal: int) -> IoResult<()>;
fn pipe_open(&mut self, fd: c_int) -> IoResult<~RtioPipe:Send>;
fn tty_open(&mut self, fd: c_int, readable: bool)
-> IoResult<~RtioTTY:Send>;
fn signal(&mut self, signal: Signum, channel: Sender<Signum>)
-> IoResult<~RtioSignal:Send>;
}
pub trait RtioTcpListener : RtioSocket {
fn listen(~self) -> IoResult<~RtioTcpAcceptor:Send>;
}
pub trait RtioTcpAcceptor : RtioSocket {
fn accept(&mut self) -> IoResult<~RtioTcpStream:Send>;
fn accept_simultaneously(&mut self) -> IoResult<()>;
fn dont_accept_simultaneously(&mut self) -> IoResult<()>;
}
pub trait RtioTcpStream : RtioSocket {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint>;
fn write(&mut self, buf: &[u8]) -> IoResult<()>;
fn peer_name(&mut self) -> IoResult<SocketAddr>;
fn control_congestion(&mut self) -> IoResult<()>;
fn nodelay(&mut self) -> IoResult<()>;
fn keepalive(&mut self, delay_in_seconds: uint) -> IoResult<()>;
fn letdie(&mut self) -> IoResult<()>;
fn clone(&self) -> ~RtioTcpStream:Send;
fn close_write(&mut self) -> IoResult<()>;
}
pub trait RtioSocket {
fn socket_name(&mut self) -> IoResult<SocketAddr>;
}
pub trait RtioUdpSocket : RtioSocket {
fn recvfrom(&mut self, buf: &mut [u8]) -> IoResult<(uint, SocketAddr)>;
fn sendto(&mut self, buf: &[u8], dst: SocketAddr) -> IoResult<()>;
fn join_multicast(&mut self, multi: IpAddr) -> IoResult<()>;
fn leave_multicast(&mut self, multi: IpAddr) -> IoResult<()>;
fn loop_multicast_locally(&mut self) -> IoResult<()>;
fn dont_loop_multicast_locally(&mut self) -> IoResult<()>;
fn multicast_time_to_live(&mut self, ttl: int) -> IoResult<()>;
fn time_to_live(&mut self, ttl: int) -> IoResult<()>;
fn hear_broadcasts(&mut self) -> IoResult<()>;
fn ignore_broadcasts(&mut self) -> IoResult<()>;
fn clone(&self) -> ~RtioUdpSocket:Send;
}
pub trait RtioTimer {
fn sleep(&mut self, msecs: u64);
fn oneshot(&mut self, msecs: u64) -> Receiver<()>;
fn period(&mut self, msecs: u64) -> Receiver<()>;
}
pub trait RtioFileStream {
fn read(&mut self, buf: &mut [u8]) -> IoResult<int>;
fn write(&mut self, buf: &[u8]) -> IoResult<()>;
fn pread(&mut self, buf: &mut [u8], offset: u64) -> IoResult<int>;
fn pwrite(&mut self, buf: &[u8], offset: u64) -> IoResult<()>;
fn seek(&mut self, pos: i64, whence: SeekStyle) -> IoResult<u64>;
fn tell(&self) -> IoResult<u64>;
fn fsync(&mut self) -> IoResult<()>;
fn datasync(&mut self) -> IoResult<()>;
fn truncate(&mut self, offset: i64) -> IoResult<()>;
}
pub trait RtioProcess {
fn id(&self) -> libc::pid_t;
fn kill(&mut self, signal: int) -> IoResult<()>;
fn wait(&mut self) -> ProcessExit;
}
pub trait RtioPipe {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint>;
fn write(&mut self, buf: &[u8]) -> IoResult<()>;
fn clone(&self) -> ~RtioPipe:Send;
}
pub trait RtioUnixListener {
fn listen(~self) -> IoResult<~RtioUnixAcceptor:Send>;
}
pub trait RtioUnixAcceptor {
fn accept(&mut self) -> IoResult<~RtioPipe:Send>;
}
pub trait RtioTTY {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint>;
fn write(&mut self, buf: &[u8]) -> IoResult<()>;
fn set_raw(&mut self, raw: bool) -> IoResult<()>;
fn get_winsize(&mut self) -> IoResult<(int, int)>;
fn isatty(&self) -> bool;
}
pub trait PausableIdleCallback {
fn pause(&mut self);
fn resume(&mut self);
}
pub trait RtioSignal {}
| {
match LocalIo::borrow() {
None => Err(io::standard_error(io::IoUnavailable)),
Some(mut io) => f(io.get()),
}
} | identifier_body |
rtio.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use c_str::CString;
use cast;
use comm::{Sender, Receiver};
use libc::c_int;
use libc;
use kinds::Send;
use ops::Drop;
use option::{Option, Some, None};
use path::Path;
use result::Err;
use rt::local::Local;
use rt::task::Task;
use vec::Vec;
use ai = io::net::addrinfo;
use io;
use io::IoResult;
use io::net::ip::{IpAddr, SocketAddr};
use io::process::{ProcessConfig, ProcessExit};
use io::signal::Signum;
use io::{FileMode, FileAccess, FileStat, FilePermission};
use io::{SeekStyle};
pub trait Callback {
fn call(&mut self);
}
pub trait EventLoop {
fn run(&mut self);
fn callback(&mut self, arg: proc():Send);
fn pausable_idle_callback(&mut self,
~Callback:Send) -> ~PausableIdleCallback:Send;
fn remote_callback(&mut self, ~Callback:Send) -> ~RemoteCallback:Send;
/// The asynchronous I/O services. Not all event loops may provide one.
fn io<'a>(&'a mut self) -> Option<&'a mut IoFactory>;
fn has_active_io(&self) -> bool;
}
pub trait RemoteCallback {
/// Trigger the remote callback. Note that the number of times the
/// callback is run is not guaranteed. All that is guaranteed is
/// that, after calling 'fire', the callback will be called at
/// least once, but multiple callbacks may be coalesced and
/// callbacks may be called more often requested. Destruction also
/// triggers the callback.
fn fire(&mut self);
}
/// Data needed to make a successful open(2) call
/// Using unix flag conventions for now, which happens to also be what's supported
/// libuv (it does translation to windows under the hood).
pub struct FileOpenConfig {
/// Path to file to be opened
pub path: Path,
/// Flags for file access mode (as per open(2))
pub flags: int,
/// File creation mode, ignored unless O_CREAT is passed as part of flags
pub mode: int
}
/// Description of what to do when a file handle is closed
pub enum CloseBehavior {
/// Do not close this handle when the object is destroyed
DontClose,
/// Synchronously close the handle, meaning that the task will block when
/// the handle is destroyed until it has been fully closed.
CloseSynchronously,
/// Asynchronously closes a handle, meaning that the task will *not* block
/// when the handle is destroyed, but the handle will still get deallocated
/// and cleaned up (but this will happen asynchronously on the local event
/// loop).
CloseAsynchronously,
}
pub struct LocalIo<'a> {
factory: &'a mut IoFactory,
}
#[unsafe_destructor]
impl<'a> Drop for LocalIo<'a> {
fn drop(&mut self) {
// FIXME(pcwalton): Do nothing here for now, but eventually we may want
// something. For now this serves to make `LocalIo` noncopyable.
}
}
impl<'a> LocalIo<'a> {
/// Returns the local I/O: either the local scheduler's I/O services or
/// the native I/O services.
pub fn borrow() -> Option<LocalIo> {
// FIXME(#11053): bad
//
// This is currently very unsafely implemented. We don't actually
// *take* the local I/O so there's a very real possibility that we
// can have two borrows at once. Currently there is not a clear way
// to actually borrow the local I/O factory safely because even if
// ownership were transferred down to the functions that the I/O
// factory implements it's just too much of a pain to know when to
// relinquish ownership back into the local task (but that would be
// the safe way of implementing this function).
//
// In order to get around this, we just transmute a copy out of the task
// in order to have what is likely a static lifetime (bad).
let mut t: ~Task = Local::take();
let ret = t.local_io().map(|t| {
unsafe { cast::transmute_copy(&t) }
});
Local::put(t);
return ret;
}
pub fn maybe_raise<T>(f: |io: &mut IoFactory| -> IoResult<T>)
-> IoResult<T>
{
match LocalIo::borrow() {
None => Err(io::standard_error(io::IoUnavailable)),
Some(mut io) => f(io.get()),
}
}
pub fn new<'a>(io: &'a mut IoFactory) -> LocalIo<'a> {
LocalIo { factory: io }
}
/// Returns the underlying I/O factory as a trait reference.
#[inline]
pub fn get<'a>(&'a mut self) -> &'a mut IoFactory {
// FIXME(pcwalton): I think this is actually sound? Could borrow check
// allow this safely?
unsafe {
cast::transmute_copy(&self.factory)
}
}
}
pub trait IoFactory {
// networking
fn tcp_connect(&mut self, addr: SocketAddr) -> IoResult<~RtioTcpStream:Send>;
fn tcp_bind(&mut self, addr: SocketAddr) -> IoResult<~RtioTcpListener:Send>;
fn udp_bind(&mut self, addr: SocketAddr) -> IoResult<~RtioUdpSocket:Send>;
fn unix_bind(&mut self, path: &CString)
-> IoResult<~RtioUnixListener:Send>;
fn unix_connect(&mut self, path: &CString) -> IoResult<~RtioPipe:Send>;
fn get_host_addresses(&mut self, host: Option<&str>, servname: Option<&str>,
hint: Option<ai::Hint>) -> IoResult<~[ai::Info]>;
// filesystem operations
fn fs_from_raw_fd(&mut self, fd: c_int, close: CloseBehavior)
-> ~RtioFileStream:Send;
fn fs_open(&mut self, path: &CString, fm: FileMode, fa: FileAccess)
-> IoResult<~RtioFileStream:Send>;
fn fs_unlink(&mut self, path: &CString) -> IoResult<()>;
fn fs_stat(&mut self, path: &CString) -> IoResult<FileStat>;
fn fs_mkdir(&mut self, path: &CString,
mode: FilePermission) -> IoResult<()>;
fn fs_chmod(&mut self, path: &CString,
mode: FilePermission) -> IoResult<()>;
fn fs_rmdir(&mut self, path: &CString) -> IoResult<()>;
fn fs_rename(&mut self, path: &CString, to: &CString) -> IoResult<()>;
fn fs_readdir(&mut self, path: &CString, flags: c_int) ->
IoResult<Vec<Path>>;
fn fs_lstat(&mut self, path: &CString) -> IoResult<FileStat>;
fn fs_chown(&mut self, path: &CString, uid: int, gid: int) ->
IoResult<()>;
fn fs_readlink(&mut self, path: &CString) -> IoResult<Path>;
fn fs_symlink(&mut self, src: &CString, dst: &CString) -> IoResult<()>;
fn fs_link(&mut self, src: &CString, dst: &CString) -> IoResult<()>;
fn fs_utime(&mut self, src: &CString, atime: u64, mtime: u64) ->
IoResult<()>;
// misc
fn timer_init(&mut self) -> IoResult<~RtioTimer:Send>;
fn spawn(&mut self, config: ProcessConfig)
-> IoResult<(~RtioProcess:Send, ~[Option<~RtioPipe:Send>])>;
fn kill(&mut self, pid: libc::pid_t, signal: int) -> IoResult<()>;
fn pipe_open(&mut self, fd: c_int) -> IoResult<~RtioPipe:Send>;
fn tty_open(&mut self, fd: c_int, readable: bool)
-> IoResult<~RtioTTY:Send>;
fn signal(&mut self, signal: Signum, channel: Sender<Signum>)
-> IoResult<~RtioSignal:Send>;
}
pub trait RtioTcpListener : RtioSocket {
fn listen(~self) -> IoResult<~RtioTcpAcceptor:Send>;
}
pub trait RtioTcpAcceptor : RtioSocket {
fn accept(&mut self) -> IoResult<~RtioTcpStream:Send>;
fn accept_simultaneously(&mut self) -> IoResult<()>;
fn dont_accept_simultaneously(&mut self) -> IoResult<()>;
}
pub trait RtioTcpStream : RtioSocket {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint>;
fn write(&mut self, buf: &[u8]) -> IoResult<()>;
fn peer_name(&mut self) -> IoResult<SocketAddr>;
fn control_congestion(&mut self) -> IoResult<()>;
fn nodelay(&mut self) -> IoResult<()>;
fn keepalive(&mut self, delay_in_seconds: uint) -> IoResult<()>;
fn letdie(&mut self) -> IoResult<()>;
fn clone(&self) -> ~RtioTcpStream:Send;
fn close_write(&mut self) -> IoResult<()>;
}
pub trait RtioSocket {
fn socket_name(&mut self) -> IoResult<SocketAddr>;
}
pub trait RtioUdpSocket : RtioSocket {
fn recvfrom(&mut self, buf: &mut [u8]) -> IoResult<(uint, SocketAddr)>;
fn sendto(&mut self, buf: &[u8], dst: SocketAddr) -> IoResult<()>;
fn join_multicast(&mut self, multi: IpAddr) -> IoResult<()>;
fn leave_multicast(&mut self, multi: IpAddr) -> IoResult<()>;
fn loop_multicast_locally(&mut self) -> IoResult<()>;
fn dont_loop_multicast_locally(&mut self) -> IoResult<()>;
fn multicast_time_to_live(&mut self, ttl: int) -> IoResult<()>;
fn time_to_live(&mut self, ttl: int) -> IoResult<()>;
fn hear_broadcasts(&mut self) -> IoResult<()>;
fn ignore_broadcasts(&mut self) -> IoResult<()>;
fn clone(&self) -> ~RtioUdpSocket:Send;
}
pub trait RtioTimer {
fn sleep(&mut self, msecs: u64);
fn oneshot(&mut self, msecs: u64) -> Receiver<()>;
fn period(&mut self, msecs: u64) -> Receiver<()>;
}
pub trait RtioFileStream {
fn read(&mut self, buf: &mut [u8]) -> IoResult<int>;
fn write(&mut self, buf: &[u8]) -> IoResult<()>;
fn pread(&mut self, buf: &mut [u8], offset: u64) -> IoResult<int>;
fn pwrite(&mut self, buf: &[u8], offset: u64) -> IoResult<()>;
fn seek(&mut self, pos: i64, whence: SeekStyle) -> IoResult<u64>;
fn tell(&self) -> IoResult<u64>;
fn fsync(&mut self) -> IoResult<()>;
fn datasync(&mut self) -> IoResult<()>; | fn id(&self) -> libc::pid_t;
fn kill(&mut self, signal: int) -> IoResult<()>;
fn wait(&mut self) -> ProcessExit;
}
pub trait RtioPipe {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint>;
fn write(&mut self, buf: &[u8]) -> IoResult<()>;
fn clone(&self) -> ~RtioPipe:Send;
}
pub trait RtioUnixListener {
fn listen(~self) -> IoResult<~RtioUnixAcceptor:Send>;
}
pub trait RtioUnixAcceptor {
fn accept(&mut self) -> IoResult<~RtioPipe:Send>;
}
pub trait RtioTTY {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint>;
fn write(&mut self, buf: &[u8]) -> IoResult<()>;
fn set_raw(&mut self, raw: bool) -> IoResult<()>;
fn get_winsize(&mut self) -> IoResult<(int, int)>;
fn isatty(&self) -> bool;
}
pub trait PausableIdleCallback {
fn pause(&mut self);
fn resume(&mut self);
}
pub trait RtioSignal {} | fn truncate(&mut self, offset: i64) -> IoResult<()>;
}
pub trait RtioProcess { | random_line_split |
rtio.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use c_str::CString;
use cast;
use comm::{Sender, Receiver};
use libc::c_int;
use libc;
use kinds::Send;
use ops::Drop;
use option::{Option, Some, None};
use path::Path;
use result::Err;
use rt::local::Local;
use rt::task::Task;
use vec::Vec;
use ai = io::net::addrinfo;
use io;
use io::IoResult;
use io::net::ip::{IpAddr, SocketAddr};
use io::process::{ProcessConfig, ProcessExit};
use io::signal::Signum;
use io::{FileMode, FileAccess, FileStat, FilePermission};
use io::{SeekStyle};
pub trait Callback {
fn call(&mut self);
}
pub trait EventLoop {
fn run(&mut self);
fn callback(&mut self, arg: proc():Send);
fn pausable_idle_callback(&mut self,
~Callback:Send) -> ~PausableIdleCallback:Send;
fn remote_callback(&mut self, ~Callback:Send) -> ~RemoteCallback:Send;
/// The asynchronous I/O services. Not all event loops may provide one.
fn io<'a>(&'a mut self) -> Option<&'a mut IoFactory>;
fn has_active_io(&self) -> bool;
}
pub trait RemoteCallback {
/// Trigger the remote callback. Note that the number of times the
/// callback is run is not guaranteed. All that is guaranteed is
/// that, after calling 'fire', the callback will be called at
/// least once, but multiple callbacks may be coalesced and
/// callbacks may be called more often requested. Destruction also
/// triggers the callback.
fn fire(&mut self);
}
/// Data needed to make a successful open(2) call
/// Using unix flag conventions for now, which happens to also be what's supported
/// libuv (it does translation to windows under the hood).
pub struct | {
/// Path to file to be opened
pub path: Path,
/// Flags for file access mode (as per open(2))
pub flags: int,
/// File creation mode, ignored unless O_CREAT is passed as part of flags
pub mode: int
}
/// Description of what to do when a file handle is closed
pub enum CloseBehavior {
/// Do not close this handle when the object is destroyed
DontClose,
/// Synchronously close the handle, meaning that the task will block when
/// the handle is destroyed until it has been fully closed.
CloseSynchronously,
/// Asynchronously closes a handle, meaning that the task will *not* block
/// when the handle is destroyed, but the handle will still get deallocated
/// and cleaned up (but this will happen asynchronously on the local event
/// loop).
CloseAsynchronously,
}
pub struct LocalIo<'a> {
factory: &'a mut IoFactory,
}
#[unsafe_destructor]
impl<'a> Drop for LocalIo<'a> {
fn drop(&mut self) {
// FIXME(pcwalton): Do nothing here for now, but eventually we may want
// something. For now this serves to make `LocalIo` noncopyable.
}
}
impl<'a> LocalIo<'a> {
/// Returns the local I/O: either the local scheduler's I/O services or
/// the native I/O services.
pub fn borrow() -> Option<LocalIo> {
// FIXME(#11053): bad
//
// This is currently very unsafely implemented. We don't actually
// *take* the local I/O so there's a very real possibility that we
// can have two borrows at once. Currently there is not a clear way
// to actually borrow the local I/O factory safely because even if
// ownership were transferred down to the functions that the I/O
// factory implements it's just too much of a pain to know when to
// relinquish ownership back into the local task (but that would be
// the safe way of implementing this function).
//
// In order to get around this, we just transmute a copy out of the task
// in order to have what is likely a static lifetime (bad).
let mut t: ~Task = Local::take();
let ret = t.local_io().map(|t| {
unsafe { cast::transmute_copy(&t) }
});
Local::put(t);
return ret;
}
pub fn maybe_raise<T>(f: |io: &mut IoFactory| -> IoResult<T>)
-> IoResult<T>
{
match LocalIo::borrow() {
None => Err(io::standard_error(io::IoUnavailable)),
Some(mut io) => f(io.get()),
}
}
pub fn new<'a>(io: &'a mut IoFactory) -> LocalIo<'a> {
LocalIo { factory: io }
}
/// Returns the underlying I/O factory as a trait reference.
#[inline]
pub fn get<'a>(&'a mut self) -> &'a mut IoFactory {
// FIXME(pcwalton): I think this is actually sound? Could borrow check
// allow this safely?
unsafe {
cast::transmute_copy(&self.factory)
}
}
}
pub trait IoFactory {
// networking
fn tcp_connect(&mut self, addr: SocketAddr) -> IoResult<~RtioTcpStream:Send>;
fn tcp_bind(&mut self, addr: SocketAddr) -> IoResult<~RtioTcpListener:Send>;
fn udp_bind(&mut self, addr: SocketAddr) -> IoResult<~RtioUdpSocket:Send>;
fn unix_bind(&mut self, path: &CString)
-> IoResult<~RtioUnixListener:Send>;
fn unix_connect(&mut self, path: &CString) -> IoResult<~RtioPipe:Send>;
fn get_host_addresses(&mut self, host: Option<&str>, servname: Option<&str>,
hint: Option<ai::Hint>) -> IoResult<~[ai::Info]>;
// filesystem operations
fn fs_from_raw_fd(&mut self, fd: c_int, close: CloseBehavior)
-> ~RtioFileStream:Send;
fn fs_open(&mut self, path: &CString, fm: FileMode, fa: FileAccess)
-> IoResult<~RtioFileStream:Send>;
fn fs_unlink(&mut self, path: &CString) -> IoResult<()>;
fn fs_stat(&mut self, path: &CString) -> IoResult<FileStat>;
fn fs_mkdir(&mut self, path: &CString,
mode: FilePermission) -> IoResult<()>;
fn fs_chmod(&mut self, path: &CString,
mode: FilePermission) -> IoResult<()>;
fn fs_rmdir(&mut self, path: &CString) -> IoResult<()>;
fn fs_rename(&mut self, path: &CString, to: &CString) -> IoResult<()>;
fn fs_readdir(&mut self, path: &CString, flags: c_int) ->
IoResult<Vec<Path>>;
fn fs_lstat(&mut self, path: &CString) -> IoResult<FileStat>;
fn fs_chown(&mut self, path: &CString, uid: int, gid: int) ->
IoResult<()>;
fn fs_readlink(&mut self, path: &CString) -> IoResult<Path>;
fn fs_symlink(&mut self, src: &CString, dst: &CString) -> IoResult<()>;
fn fs_link(&mut self, src: &CString, dst: &CString) -> IoResult<()>;
fn fs_utime(&mut self, src: &CString, atime: u64, mtime: u64) ->
IoResult<()>;
// misc
fn timer_init(&mut self) -> IoResult<~RtioTimer:Send>;
fn spawn(&mut self, config: ProcessConfig)
-> IoResult<(~RtioProcess:Send, ~[Option<~RtioPipe:Send>])>;
fn kill(&mut self, pid: libc::pid_t, signal: int) -> IoResult<()>;
fn pipe_open(&mut self, fd: c_int) -> IoResult<~RtioPipe:Send>;
fn tty_open(&mut self, fd: c_int, readable: bool)
-> IoResult<~RtioTTY:Send>;
fn signal(&mut self, signal: Signum, channel: Sender<Signum>)
-> IoResult<~RtioSignal:Send>;
}
pub trait RtioTcpListener : RtioSocket {
fn listen(~self) -> IoResult<~RtioTcpAcceptor:Send>;
}
pub trait RtioTcpAcceptor : RtioSocket {
fn accept(&mut self) -> IoResult<~RtioTcpStream:Send>;
fn accept_simultaneously(&mut self) -> IoResult<()>;
fn dont_accept_simultaneously(&mut self) -> IoResult<()>;
}
pub trait RtioTcpStream : RtioSocket {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint>;
fn write(&mut self, buf: &[u8]) -> IoResult<()>;
fn peer_name(&mut self) -> IoResult<SocketAddr>;
fn control_congestion(&mut self) -> IoResult<()>;
fn nodelay(&mut self) -> IoResult<()>;
fn keepalive(&mut self, delay_in_seconds: uint) -> IoResult<()>;
fn letdie(&mut self) -> IoResult<()>;
fn clone(&self) -> ~RtioTcpStream:Send;
fn close_write(&mut self) -> IoResult<()>;
}
pub trait RtioSocket {
fn socket_name(&mut self) -> IoResult<SocketAddr>;
}
pub trait RtioUdpSocket : RtioSocket {
fn recvfrom(&mut self, buf: &mut [u8]) -> IoResult<(uint, SocketAddr)>;
fn sendto(&mut self, buf: &[u8], dst: SocketAddr) -> IoResult<()>;
fn join_multicast(&mut self, multi: IpAddr) -> IoResult<()>;
fn leave_multicast(&mut self, multi: IpAddr) -> IoResult<()>;
fn loop_multicast_locally(&mut self) -> IoResult<()>;
fn dont_loop_multicast_locally(&mut self) -> IoResult<()>;
fn multicast_time_to_live(&mut self, ttl: int) -> IoResult<()>;
fn time_to_live(&mut self, ttl: int) -> IoResult<()>;
fn hear_broadcasts(&mut self) -> IoResult<()>;
fn ignore_broadcasts(&mut self) -> IoResult<()>;
fn clone(&self) -> ~RtioUdpSocket:Send;
}
pub trait RtioTimer {
fn sleep(&mut self, msecs: u64);
fn oneshot(&mut self, msecs: u64) -> Receiver<()>;
fn period(&mut self, msecs: u64) -> Receiver<()>;
}
pub trait RtioFileStream {
fn read(&mut self, buf: &mut [u8]) -> IoResult<int>;
fn write(&mut self, buf: &[u8]) -> IoResult<()>;
fn pread(&mut self, buf: &mut [u8], offset: u64) -> IoResult<int>;
fn pwrite(&mut self, buf: &[u8], offset: u64) -> IoResult<()>;
fn seek(&mut self, pos: i64, whence: SeekStyle) -> IoResult<u64>;
fn tell(&self) -> IoResult<u64>;
fn fsync(&mut self) -> IoResult<()>;
fn datasync(&mut self) -> IoResult<()>;
fn truncate(&mut self, offset: i64) -> IoResult<()>;
}
pub trait RtioProcess {
fn id(&self) -> libc::pid_t;
fn kill(&mut self, signal: int) -> IoResult<()>;
fn wait(&mut self) -> ProcessExit;
}
pub trait RtioPipe {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint>;
fn write(&mut self, buf: &[u8]) -> IoResult<()>;
fn clone(&self) -> ~RtioPipe:Send;
}
pub trait RtioUnixListener {
fn listen(~self) -> IoResult<~RtioUnixAcceptor:Send>;
}
pub trait RtioUnixAcceptor {
fn accept(&mut self) -> IoResult<~RtioPipe:Send>;
}
pub trait RtioTTY {
fn read(&mut self, buf: &mut [u8]) -> IoResult<uint>;
fn write(&mut self, buf: &[u8]) -> IoResult<()>;
fn set_raw(&mut self, raw: bool) -> IoResult<()>;
fn get_winsize(&mut self) -> IoResult<(int, int)>;
fn isatty(&self) -> bool;
}
pub trait PausableIdleCallback {
fn pause(&mut self);
fn resume(&mut self);
}
pub trait RtioSignal {}
| FileOpenConfig | identifier_name |
traffic_signal.rs | use crate::options::TrafficSignalStyle;
use crate::render::{DrawCtx, DrawTurnGroup, BIG_ARROW_THICKNESS};
use crate::ui::UI;
use ezgui::{
Button, Color, Composite, DrawBoth, EventCtx, GeomBatch, GfxCtx, Line, ManagedWidget,
ModalMenu, Outcome, Text,
};
use geom::{Circle, Distance, Duration, Polygon};
use map_model::{IntersectionID, Phase, TurnPriority};
use std::collections::BTreeSet;
// Only draws a box when time_left is present
pub fn draw_signal_phase(
phase: &Phase,
i: IntersectionID,
time_left: Option<Duration>,
batch: &mut GeomBatch,
ctx: &DrawCtx,
) | batch.push(
protected_color,
signal.turn_groups[g]
.geom
.make_arrow(BIG_ARROW_THICKNESS * 2.0)
.unwrap(),
);
}
}
for g in &phase.yield_groups {
if g.crosswalk.is_none() {
batch.extend(
yield_color,
signal.turn_groups[g]
.geom
.make_arrow_outline(
BIG_ARROW_THICKNESS * 2.0,
BIG_ARROW_THICKNESS / 2.0,
)
.unwrap(),
);
}
}
}
TrafficSignalStyle::Icons => {
for g in DrawTurnGroup::for_i(i, ctx.map) {
batch.push(ctx.cs.get("turn block background"), g.block.clone());
let arrow_color = match phase.get_priority_of_group(g.id) {
TurnPriority::Protected => ctx.cs.get("turn protected by traffic signal"),
TurnPriority::Yield => ctx
.cs
.get("turn that can yield by traffic signal")
.alpha(1.0),
TurnPriority::Banned => ctx.cs.get("turn not in current phase"),
};
batch.push(arrow_color, g.arrow.clone());
}
}
TrafficSignalStyle::IndividualTurnArrows => {
for turn in ctx.map.get_turns_in_intersection(i) {
if turn.between_sidewalks() {
continue;
}
match phase.get_priority_of_turn(turn.id, signal) {
TurnPriority::Protected => {
batch.push(
protected_color,
turn.geom.make_arrow(BIG_ARROW_THICKNESS * 2.0).unwrap(),
);
}
TurnPriority::Yield => {
batch.extend(
yield_color,
turn.geom
.make_arrow_outline(
BIG_ARROW_THICKNESS * 2.0,
BIG_ARROW_THICKNESS / 2.0,
)
.unwrap(),
);
}
TurnPriority::Banned => {}
}
}
}
}
if time_left.is_none() {
return;
}
let radius = Distance::meters(0.5);
let box_width = (2.5 * radius).inner_meters();
let box_height = (6.5 * radius).inner_meters();
let center = ctx.map.get_i(i).polygon.center();
let top_left = center.offset(-box_width / 2.0, -box_height / 2.0);
let percent = time_left.unwrap() / phase.duration;
// TODO Tune colors.
batch.push(
ctx.cs.get_def("traffic signal box", Color::grey(0.5)),
Polygon::rectangle(box_width, box_height).translate(top_left.x(), top_left.y()),
);
batch.push(
Color::RED,
Circle::new(center.offset(0.0, -2.0 * radius.inner_meters()), radius).to_polygon(),
);
batch.push(Color::grey(0.4), Circle::new(center, radius).to_polygon());
batch.push(
Color::YELLOW,
Circle::new(center, radius).to_partial_polygon(percent),
);
batch.push(
Color::GREEN,
Circle::new(center.offset(0.0, 2.0 * radius.inner_meters()), radius).to_polygon(),
);
}
pub struct TrafficSignalDiagram {
pub i: IntersectionID,
composite: Composite,
current_phase: usize,
}
impl TrafficSignalDiagram {
pub fn new(
i: IntersectionID,
current_phase: usize,
ui: &UI,
ctx: &EventCtx,
) -> TrafficSignalDiagram {
TrafficSignalDiagram {
i,
composite: make_diagram(i, current_phase, ui, ctx),
current_phase,
}
}
pub fn event(&mut self, ctx: &mut EventCtx, ui: &mut UI, menu: &mut ModalMenu) {
if self.current_phase!= 0 && menu.action("select previous phase") {
self.change_phase(self.current_phase - 1, ui, ctx);
}
if self.current_phase!= ui.primary.map.get_traffic_signal(self.i).phases.len() - 1
&& menu.action("select next phase")
{
self.change_phase(self.current_phase + 1, ui, ctx);
}
match self.composite.event(ctx) {
Some(Outcome::Clicked(x)) => {
self.change_phase(x["phase ".len()..].parse::<usize>().unwrap() - 1, ui, ctx);
}
None => {}
}
}
fn change_phase(&mut self, idx: usize, ui: &UI, ctx: &EventCtx) {
if self.current_phase!= idx {
let preserve_scroll = self.composite.preserve_scroll(ctx);
self.current_phase = idx;
self.composite = make_diagram(self.i, self.current_phase, ui, ctx);
self.composite.restore_scroll(ctx, preserve_scroll);
}
}
pub fn current_phase(&self) -> usize {
self.current_phase
}
pub fn draw(&self, g: &mut GfxCtx) {
self.composite.draw(g);
}
}
fn make_diagram(i: IntersectionID, selected: usize, ui: &UI, ctx: &EventCtx) -> Composite {
// Slightly inaccurate -- the turn rendering may slightly exceed the intersection polygon --
// but this is close enough.
let bounds = ui.primary.map.get_i(i).polygon.get_bounds();
// Pick a zoom so that we fit some percentage of the screen
let zoom = 0.2 * ctx.canvas.window_width / (bounds.max_x - bounds.min_x);
let bbox = Polygon::rectangle(
zoom * (bounds.max_x - bounds.min_x),
zoom * (bounds.max_y - bounds.min_y),
);
let signal = ui.primary.map.get_traffic_signal(i);
let mut col = vec![ManagedWidget::draw_text(ctx, {
let mut txt = Text::new();
txt.add(Line(i.to_string()).roboto());
let road_names = ui
.primary
.map
.get_i(i)
.roads
.iter()
.map(|r| ui.primary.map.get_r(*r).get_name())
.collect::<BTreeSet<_>>();
let len = road_names.len();
// TODO Some kind of reusable TextStyle thing
// TODO Need to wrap this
txt.add(Line("").roboto().size(21).fg(Color::WHITE.alpha(0.54)));
for (idx, n) in road_names.into_iter().enumerate() {
txt.append(Line(n).roboto().fg(Color::WHITE.alpha(0.54)));
if idx!= len - 1 {
txt.append(Line(", ").roboto().fg(Color::WHITE.alpha(0.54)));
}
}
txt.add(Line(format!("{} phases", signal.phases.len())));
txt.add(Line(""));
txt.add(Line(format!("Signal offset: {}", signal.offset)));
txt.add(Line(format!("One cycle lasts {}", signal.cycle_length())));
txt
})];
for (idx, phase) in signal.phases.iter().enumerate() {
col.push(
ManagedWidget::row(vec![
ManagedWidget::draw_text(ctx, Text::from(Line(format!("#{}", idx + 1)))),
ManagedWidget::draw_text(ctx, Text::from(Line(phase.duration.to_string()))),
])
.margin(5)
.evenly_spaced(),
);
let mut orig_batch = GeomBatch::new();
draw_signal_phase(phase, i, None, &mut orig_batch, &ui.draw_ctx());
let mut normal = GeomBatch::new();
// TODO Ideally no background here, but we have to force the dimensions of normal and
// hovered to be the same. For some reason the bbox is slightly different.
if idx == selected {
normal.push(Color::RED.alpha(0.15), bbox.clone());
} else {
normal.push(Color::CYAN.alpha(0.05), bbox.clone());
}
// Move to the origin and apply zoom
for (color, poly) in orig_batch.consume() {
normal.push(
color,
poly.translate(-bounds.min_x, -bounds.min_y).scale(zoom),
);
}
let mut hovered = GeomBatch::new();
hovered.push(Color::RED.alpha(0.95), bbox.clone());
hovered.append(normal.clone());
col.push(
ManagedWidget::btn(Button::new(
DrawBoth::new(ctx, normal, Vec::new()),
DrawBoth::new(ctx, hovered, Vec::new()),
None,
&format!("phase {}", idx + 1),
bbox.clone(),
))
.margin(5),
);
}
Composite::scrollable(ctx, ManagedWidget::col(col).bg(Color::hex("#545454")))
}
| {
let protected_color = ctx
.cs
.get_def("turn protected by traffic signal", Color::GREEN);
let yield_color = ctx.cs.get_def(
"turn that can yield by traffic signal",
Color::rgba(255, 105, 180, 0.8),
);
let signal = ctx.map.get_traffic_signal(i);
for (id, crosswalk) in &ctx.draw_map.get_i(i).crosswalks {
if phase.get_priority_of_turn(*id, signal) == TurnPriority::Protected {
batch.append(crosswalk.clone());
}
}
match ctx.opts.traffic_signal_style {
TrafficSignalStyle::GroupArrows => {
for g in &phase.protected_groups {
if g.crosswalk.is_none() { | identifier_body |
traffic_signal.rs | use crate::options::TrafficSignalStyle;
use crate::render::{DrawCtx, DrawTurnGroup, BIG_ARROW_THICKNESS};
use crate::ui::UI;
use ezgui::{
Button, Color, Composite, DrawBoth, EventCtx, GeomBatch, GfxCtx, Line, ManagedWidget,
ModalMenu, Outcome, Text,
};
use geom::{Circle, Distance, Duration, Polygon};
use map_model::{IntersectionID, Phase, TurnPriority};
use std::collections::BTreeSet;
// Only draws a box when time_left is present
pub fn draw_signal_phase(
phase: &Phase,
i: IntersectionID,
time_left: Option<Duration>,
batch: &mut GeomBatch,
ctx: &DrawCtx,
) {
let protected_color = ctx
.cs
.get_def("turn protected by traffic signal", Color::GREEN);
let yield_color = ctx.cs.get_def(
"turn that can yield by traffic signal",
Color::rgba(255, 105, 180, 0.8),
);
let signal = ctx.map.get_traffic_signal(i);
for (id, crosswalk) in &ctx.draw_map.get_i(i).crosswalks {
if phase.get_priority_of_turn(*id, signal) == TurnPriority::Protected {
batch.append(crosswalk.clone());
}
}
match ctx.opts.traffic_signal_style {
TrafficSignalStyle::GroupArrows => {
for g in &phase.protected_groups {
if g.crosswalk.is_none() {
batch.push(
protected_color,
signal.turn_groups[g]
.geom
.make_arrow(BIG_ARROW_THICKNESS * 2.0)
.unwrap(),
);
}
}
for g in &phase.yield_groups {
if g.crosswalk.is_none() {
batch.extend(
yield_color,
signal.turn_groups[g]
.geom
.make_arrow_outline(
BIG_ARROW_THICKNESS * 2.0,
BIG_ARROW_THICKNESS / 2.0,
)
.unwrap(),
);
}
}
}
TrafficSignalStyle::Icons => {
for g in DrawTurnGroup::for_i(i, ctx.map) {
batch.push(ctx.cs.get("turn block background"), g.block.clone());
let arrow_color = match phase.get_priority_of_group(g.id) {
TurnPriority::Protected => ctx.cs.get("turn protected by traffic signal"),
TurnPriority::Yield => ctx
.cs
.get("turn that can yield by traffic signal")
.alpha(1.0),
TurnPriority::Banned => ctx.cs.get("turn not in current phase"),
};
batch.push(arrow_color, g.arrow.clone());
}
}
TrafficSignalStyle::IndividualTurnArrows => {
for turn in ctx.map.get_turns_in_intersection(i) {
if turn.between_sidewalks() {
continue;
}
match phase.get_priority_of_turn(turn.id, signal) {
TurnPriority::Protected => {
batch.push(
protected_color,
turn.geom.make_arrow(BIG_ARROW_THICKNESS * 2.0).unwrap(),
);
}
TurnPriority::Yield => {
batch.extend(
yield_color,
turn.geom
.make_arrow_outline(
BIG_ARROW_THICKNESS * 2.0,
BIG_ARROW_THICKNESS / 2.0,
)
.unwrap(),
);
}
TurnPriority::Banned => {}
}
}
}
}
if time_left.is_none() {
return;
}
let radius = Distance::meters(0.5);
let box_width = (2.5 * radius).inner_meters();
let box_height = (6.5 * radius).inner_meters();
let center = ctx.map.get_i(i).polygon.center();
let top_left = center.offset(-box_width / 2.0, -box_height / 2.0);
let percent = time_left.unwrap() / phase.duration;
// TODO Tune colors.
batch.push(
ctx.cs.get_def("traffic signal box", Color::grey(0.5)),
Polygon::rectangle(box_width, box_height).translate(top_left.x(), top_left.y()),
);
batch.push(
Color::RED,
Circle::new(center.offset(0.0, -2.0 * radius.inner_meters()), radius).to_polygon(),
);
batch.push(Color::grey(0.4), Circle::new(center, radius).to_polygon());
batch.push(
Color::YELLOW,
Circle::new(center, radius).to_partial_polygon(percent),
);
batch.push(
Color::GREEN,
Circle::new(center.offset(0.0, 2.0 * radius.inner_meters()), radius).to_polygon(),
);
}
pub struct TrafficSignalDiagram {
pub i: IntersectionID,
composite: Composite,
current_phase: usize,
}
impl TrafficSignalDiagram {
pub fn new(
i: IntersectionID,
current_phase: usize,
ui: &UI,
ctx: &EventCtx,
) -> TrafficSignalDiagram {
TrafficSignalDiagram {
i,
composite: make_diagram(i, current_phase, ui, ctx),
current_phase,
}
}
pub fn event(&mut self, ctx: &mut EventCtx, ui: &mut UI, menu: &mut ModalMenu) {
if self.current_phase!= 0 && menu.action("select previous phase") {
self.change_phase(self.current_phase - 1, ui, ctx);
}
if self.current_phase!= ui.primary.map.get_traffic_signal(self.i).phases.len() - 1
&& menu.action("select next phase")
{
self.change_phase(self.current_phase + 1, ui, ctx);
}
match self.composite.event(ctx) {
Some(Outcome::Clicked(x)) => {
self.change_phase(x["phase ".len()..].parse::<usize>().unwrap() - 1, ui, ctx);
}
None => {}
}
}
fn change_phase(&mut self, idx: usize, ui: &UI, ctx: &EventCtx) {
if self.current_phase!= idx {
let preserve_scroll = self.composite.preserve_scroll(ctx);
self.current_phase = idx;
self.composite = make_diagram(self.i, self.current_phase, ui, ctx);
self.composite.restore_scroll(ctx, preserve_scroll);
}
}
pub fn current_phase(&self) -> usize {
self.current_phase
}
pub fn draw(&self, g: &mut GfxCtx) {
self.composite.draw(g);
}
}
fn make_diagram(i: IntersectionID, selected: usize, ui: &UI, ctx: &EventCtx) -> Composite {
// Slightly inaccurate -- the turn rendering may slightly exceed the intersection polygon --
// but this is close enough.
let bounds = ui.primary.map.get_i(i).polygon.get_bounds();
// Pick a zoom so that we fit some percentage of the screen
let zoom = 0.2 * ctx.canvas.window_width / (bounds.max_x - bounds.min_x);
let bbox = Polygon::rectangle(
zoom * (bounds.max_x - bounds.min_x),
zoom * (bounds.max_y - bounds.min_y),
);
let signal = ui.primary.map.get_traffic_signal(i);
let mut col = vec![ManagedWidget::draw_text(ctx, {
let mut txt = Text::new();
txt.add(Line(i.to_string()).roboto());
let road_names = ui
.primary
.map
.get_i(i) | .iter()
.map(|r| ui.primary.map.get_r(*r).get_name())
.collect::<BTreeSet<_>>();
let len = road_names.len();
// TODO Some kind of reusable TextStyle thing
// TODO Need to wrap this
txt.add(Line("").roboto().size(21).fg(Color::WHITE.alpha(0.54)));
for (idx, n) in road_names.into_iter().enumerate() {
txt.append(Line(n).roboto().fg(Color::WHITE.alpha(0.54)));
if idx!= len - 1 {
txt.append(Line(", ").roboto().fg(Color::WHITE.alpha(0.54)));
}
}
txt.add(Line(format!("{} phases", signal.phases.len())));
txt.add(Line(""));
txt.add(Line(format!("Signal offset: {}", signal.offset)));
txt.add(Line(format!("One cycle lasts {}", signal.cycle_length())));
txt
})];
for (idx, phase) in signal.phases.iter().enumerate() {
col.push(
ManagedWidget::row(vec![
ManagedWidget::draw_text(ctx, Text::from(Line(format!("#{}", idx + 1)))),
ManagedWidget::draw_text(ctx, Text::from(Line(phase.duration.to_string()))),
])
.margin(5)
.evenly_spaced(),
);
let mut orig_batch = GeomBatch::new();
draw_signal_phase(phase, i, None, &mut orig_batch, &ui.draw_ctx());
let mut normal = GeomBatch::new();
// TODO Ideally no background here, but we have to force the dimensions of normal and
// hovered to be the same. For some reason the bbox is slightly different.
if idx == selected {
normal.push(Color::RED.alpha(0.15), bbox.clone());
} else {
normal.push(Color::CYAN.alpha(0.05), bbox.clone());
}
// Move to the origin and apply zoom
for (color, poly) in orig_batch.consume() {
normal.push(
color,
poly.translate(-bounds.min_x, -bounds.min_y).scale(zoom),
);
}
let mut hovered = GeomBatch::new();
hovered.push(Color::RED.alpha(0.95), bbox.clone());
hovered.append(normal.clone());
col.push(
ManagedWidget::btn(Button::new(
DrawBoth::new(ctx, normal, Vec::new()),
DrawBoth::new(ctx, hovered, Vec::new()),
None,
&format!("phase {}", idx + 1),
bbox.clone(),
))
.margin(5),
);
}
Composite::scrollable(ctx, ManagedWidget::col(col).bg(Color::hex("#545454")))
} | .roads | random_line_split |
traffic_signal.rs | use crate::options::TrafficSignalStyle;
use crate::render::{DrawCtx, DrawTurnGroup, BIG_ARROW_THICKNESS};
use crate::ui::UI;
use ezgui::{
Button, Color, Composite, DrawBoth, EventCtx, GeomBatch, GfxCtx, Line, ManagedWidget,
ModalMenu, Outcome, Text,
};
use geom::{Circle, Distance, Duration, Polygon};
use map_model::{IntersectionID, Phase, TurnPriority};
use std::collections::BTreeSet;
// Only draws a box when time_left is present
pub fn draw_signal_phase(
phase: &Phase,
i: IntersectionID,
time_left: Option<Duration>,
batch: &mut GeomBatch,
ctx: &DrawCtx,
) {
let protected_color = ctx
.cs
.get_def("turn protected by traffic signal", Color::GREEN);
let yield_color = ctx.cs.get_def(
"turn that can yield by traffic signal",
Color::rgba(255, 105, 180, 0.8),
);
let signal = ctx.map.get_traffic_signal(i);
for (id, crosswalk) in &ctx.draw_map.get_i(i).crosswalks {
if phase.get_priority_of_turn(*id, signal) == TurnPriority::Protected {
batch.append(crosswalk.clone());
}
}
match ctx.opts.traffic_signal_style {
TrafficSignalStyle::GroupArrows => {
for g in &phase.protected_groups {
if g.crosswalk.is_none() {
batch.push(
protected_color,
signal.turn_groups[g]
.geom
.make_arrow(BIG_ARROW_THICKNESS * 2.0)
.unwrap(),
);
}
}
for g in &phase.yield_groups {
if g.crosswalk.is_none() {
batch.extend(
yield_color,
signal.turn_groups[g]
.geom
.make_arrow_outline(
BIG_ARROW_THICKNESS * 2.0,
BIG_ARROW_THICKNESS / 2.0,
)
.unwrap(),
);
}
}
}
TrafficSignalStyle::Icons => {
for g in DrawTurnGroup::for_i(i, ctx.map) {
batch.push(ctx.cs.get("turn block background"), g.block.clone());
let arrow_color = match phase.get_priority_of_group(g.id) {
TurnPriority::Protected => ctx.cs.get("turn protected by traffic signal"),
TurnPriority::Yield => ctx
.cs
.get("turn that can yield by traffic signal")
.alpha(1.0),
TurnPriority::Banned => ctx.cs.get("turn not in current phase"),
};
batch.push(arrow_color, g.arrow.clone());
}
}
TrafficSignalStyle::IndividualTurnArrows => {
for turn in ctx.map.get_turns_in_intersection(i) {
if turn.between_sidewalks() {
continue;
}
match phase.get_priority_of_turn(turn.id, signal) {
TurnPriority::Protected => {
batch.push(
protected_color,
turn.geom.make_arrow(BIG_ARROW_THICKNESS * 2.0).unwrap(),
);
}
TurnPriority::Yield => |
TurnPriority::Banned => {}
}
}
}
}
if time_left.is_none() {
return;
}
let radius = Distance::meters(0.5);
let box_width = (2.5 * radius).inner_meters();
let box_height = (6.5 * radius).inner_meters();
let center = ctx.map.get_i(i).polygon.center();
let top_left = center.offset(-box_width / 2.0, -box_height / 2.0);
let percent = time_left.unwrap() / phase.duration;
// TODO Tune colors.
batch.push(
ctx.cs.get_def("traffic signal box", Color::grey(0.5)),
Polygon::rectangle(box_width, box_height).translate(top_left.x(), top_left.y()),
);
batch.push(
Color::RED,
Circle::new(center.offset(0.0, -2.0 * radius.inner_meters()), radius).to_polygon(),
);
batch.push(Color::grey(0.4), Circle::new(center, radius).to_polygon());
batch.push(
Color::YELLOW,
Circle::new(center, radius).to_partial_polygon(percent),
);
batch.push(
Color::GREEN,
Circle::new(center.offset(0.0, 2.0 * radius.inner_meters()), radius).to_polygon(),
);
}
pub struct TrafficSignalDiagram {
pub i: IntersectionID,
composite: Composite,
current_phase: usize,
}
impl TrafficSignalDiagram {
pub fn new(
i: IntersectionID,
current_phase: usize,
ui: &UI,
ctx: &EventCtx,
) -> TrafficSignalDiagram {
TrafficSignalDiagram {
i,
composite: make_diagram(i, current_phase, ui, ctx),
current_phase,
}
}
pub fn event(&mut self, ctx: &mut EventCtx, ui: &mut UI, menu: &mut ModalMenu) {
if self.current_phase!= 0 && menu.action("select previous phase") {
self.change_phase(self.current_phase - 1, ui, ctx);
}
if self.current_phase!= ui.primary.map.get_traffic_signal(self.i).phases.len() - 1
&& menu.action("select next phase")
{
self.change_phase(self.current_phase + 1, ui, ctx);
}
match self.composite.event(ctx) {
Some(Outcome::Clicked(x)) => {
self.change_phase(x["phase ".len()..].parse::<usize>().unwrap() - 1, ui, ctx);
}
None => {}
}
}
fn change_phase(&mut self, idx: usize, ui: &UI, ctx: &EventCtx) {
if self.current_phase!= idx {
let preserve_scroll = self.composite.preserve_scroll(ctx);
self.current_phase = idx;
self.composite = make_diagram(self.i, self.current_phase, ui, ctx);
self.composite.restore_scroll(ctx, preserve_scroll);
}
}
pub fn current_phase(&self) -> usize {
self.current_phase
}
pub fn draw(&self, g: &mut GfxCtx) {
self.composite.draw(g);
}
}
fn make_diagram(i: IntersectionID, selected: usize, ui: &UI, ctx: &EventCtx) -> Composite {
// Slightly inaccurate -- the turn rendering may slightly exceed the intersection polygon --
// but this is close enough.
let bounds = ui.primary.map.get_i(i).polygon.get_bounds();
// Pick a zoom so that we fit some percentage of the screen
let zoom = 0.2 * ctx.canvas.window_width / (bounds.max_x - bounds.min_x);
let bbox = Polygon::rectangle(
zoom * (bounds.max_x - bounds.min_x),
zoom * (bounds.max_y - bounds.min_y),
);
let signal = ui.primary.map.get_traffic_signal(i);
let mut col = vec![ManagedWidget::draw_text(ctx, {
let mut txt = Text::new();
txt.add(Line(i.to_string()).roboto());
let road_names = ui
.primary
.map
.get_i(i)
.roads
.iter()
.map(|r| ui.primary.map.get_r(*r).get_name())
.collect::<BTreeSet<_>>();
let len = road_names.len();
// TODO Some kind of reusable TextStyle thing
// TODO Need to wrap this
txt.add(Line("").roboto().size(21).fg(Color::WHITE.alpha(0.54)));
for (idx, n) in road_names.into_iter().enumerate() {
txt.append(Line(n).roboto().fg(Color::WHITE.alpha(0.54)));
if idx!= len - 1 {
txt.append(Line(", ").roboto().fg(Color::WHITE.alpha(0.54)));
}
}
txt.add(Line(format!("{} phases", signal.phases.len())));
txt.add(Line(""));
txt.add(Line(format!("Signal offset: {}", signal.offset)));
txt.add(Line(format!("One cycle lasts {}", signal.cycle_length())));
txt
})];
for (idx, phase) in signal.phases.iter().enumerate() {
col.push(
ManagedWidget::row(vec![
ManagedWidget::draw_text(ctx, Text::from(Line(format!("#{}", idx + 1)))),
ManagedWidget::draw_text(ctx, Text::from(Line(phase.duration.to_string()))),
])
.margin(5)
.evenly_spaced(),
);
let mut orig_batch = GeomBatch::new();
draw_signal_phase(phase, i, None, &mut orig_batch, &ui.draw_ctx());
let mut normal = GeomBatch::new();
// TODO Ideally no background here, but we have to force the dimensions of normal and
// hovered to be the same. For some reason the bbox is slightly different.
if idx == selected {
normal.push(Color::RED.alpha(0.15), bbox.clone());
} else {
normal.push(Color::CYAN.alpha(0.05), bbox.clone());
}
// Move to the origin and apply zoom
for (color, poly) in orig_batch.consume() {
normal.push(
color,
poly.translate(-bounds.min_x, -bounds.min_y).scale(zoom),
);
}
let mut hovered = GeomBatch::new();
hovered.push(Color::RED.alpha(0.95), bbox.clone());
hovered.append(normal.clone());
col.push(
ManagedWidget::btn(Button::new(
DrawBoth::new(ctx, normal, Vec::new()),
DrawBoth::new(ctx, hovered, Vec::new()),
None,
&format!("phase {}", idx + 1),
bbox.clone(),
))
.margin(5),
);
}
Composite::scrollable(ctx, ManagedWidget::col(col).bg(Color::hex("#545454")))
}
| {
batch.extend(
yield_color,
turn.geom
.make_arrow_outline(
BIG_ARROW_THICKNESS * 2.0,
BIG_ARROW_THICKNESS / 2.0,
)
.unwrap(),
);
} | conditional_block |
traffic_signal.rs | use crate::options::TrafficSignalStyle;
use crate::render::{DrawCtx, DrawTurnGroup, BIG_ARROW_THICKNESS};
use crate::ui::UI;
use ezgui::{
Button, Color, Composite, DrawBoth, EventCtx, GeomBatch, GfxCtx, Line, ManagedWidget,
ModalMenu, Outcome, Text,
};
use geom::{Circle, Distance, Duration, Polygon};
use map_model::{IntersectionID, Phase, TurnPriority};
use std::collections::BTreeSet;
// Only draws a box when time_left is present
pub fn draw_signal_phase(
phase: &Phase,
i: IntersectionID,
time_left: Option<Duration>,
batch: &mut GeomBatch,
ctx: &DrawCtx,
) {
let protected_color = ctx
.cs
.get_def("turn protected by traffic signal", Color::GREEN);
let yield_color = ctx.cs.get_def(
"turn that can yield by traffic signal",
Color::rgba(255, 105, 180, 0.8),
);
let signal = ctx.map.get_traffic_signal(i);
for (id, crosswalk) in &ctx.draw_map.get_i(i).crosswalks {
if phase.get_priority_of_turn(*id, signal) == TurnPriority::Protected {
batch.append(crosswalk.clone());
}
}
match ctx.opts.traffic_signal_style {
TrafficSignalStyle::GroupArrows => {
for g in &phase.protected_groups {
if g.crosswalk.is_none() {
batch.push(
protected_color,
signal.turn_groups[g]
.geom
.make_arrow(BIG_ARROW_THICKNESS * 2.0)
.unwrap(),
);
}
}
for g in &phase.yield_groups {
if g.crosswalk.is_none() {
batch.extend(
yield_color,
signal.turn_groups[g]
.geom
.make_arrow_outline(
BIG_ARROW_THICKNESS * 2.0,
BIG_ARROW_THICKNESS / 2.0,
)
.unwrap(),
);
}
}
}
TrafficSignalStyle::Icons => {
for g in DrawTurnGroup::for_i(i, ctx.map) {
batch.push(ctx.cs.get("turn block background"), g.block.clone());
let arrow_color = match phase.get_priority_of_group(g.id) {
TurnPriority::Protected => ctx.cs.get("turn protected by traffic signal"),
TurnPriority::Yield => ctx
.cs
.get("turn that can yield by traffic signal")
.alpha(1.0),
TurnPriority::Banned => ctx.cs.get("turn not in current phase"),
};
batch.push(arrow_color, g.arrow.clone());
}
}
TrafficSignalStyle::IndividualTurnArrows => {
for turn in ctx.map.get_turns_in_intersection(i) {
if turn.between_sidewalks() {
continue;
}
match phase.get_priority_of_turn(turn.id, signal) {
TurnPriority::Protected => {
batch.push(
protected_color,
turn.geom.make_arrow(BIG_ARROW_THICKNESS * 2.0).unwrap(),
);
}
TurnPriority::Yield => {
batch.extend(
yield_color,
turn.geom
.make_arrow_outline(
BIG_ARROW_THICKNESS * 2.0,
BIG_ARROW_THICKNESS / 2.0,
)
.unwrap(),
);
}
TurnPriority::Banned => {}
}
}
}
}
if time_left.is_none() {
return;
}
let radius = Distance::meters(0.5);
let box_width = (2.5 * radius).inner_meters();
let box_height = (6.5 * radius).inner_meters();
let center = ctx.map.get_i(i).polygon.center();
let top_left = center.offset(-box_width / 2.0, -box_height / 2.0);
let percent = time_left.unwrap() / phase.duration;
// TODO Tune colors.
batch.push(
ctx.cs.get_def("traffic signal box", Color::grey(0.5)),
Polygon::rectangle(box_width, box_height).translate(top_left.x(), top_left.y()),
);
batch.push(
Color::RED,
Circle::new(center.offset(0.0, -2.0 * radius.inner_meters()), radius).to_polygon(),
);
batch.push(Color::grey(0.4), Circle::new(center, radius).to_polygon());
batch.push(
Color::YELLOW,
Circle::new(center, radius).to_partial_polygon(percent),
);
batch.push(
Color::GREEN,
Circle::new(center.offset(0.0, 2.0 * radius.inner_meters()), radius).to_polygon(),
);
}
pub struct TrafficSignalDiagram {
pub i: IntersectionID,
composite: Composite,
current_phase: usize,
}
impl TrafficSignalDiagram {
pub fn new(
i: IntersectionID,
current_phase: usize,
ui: &UI,
ctx: &EventCtx,
) -> TrafficSignalDiagram {
TrafficSignalDiagram {
i,
composite: make_diagram(i, current_phase, ui, ctx),
current_phase,
}
}
pub fn | (&mut self, ctx: &mut EventCtx, ui: &mut UI, menu: &mut ModalMenu) {
if self.current_phase!= 0 && menu.action("select previous phase") {
self.change_phase(self.current_phase - 1, ui, ctx);
}
if self.current_phase!= ui.primary.map.get_traffic_signal(self.i).phases.len() - 1
&& menu.action("select next phase")
{
self.change_phase(self.current_phase + 1, ui, ctx);
}
match self.composite.event(ctx) {
Some(Outcome::Clicked(x)) => {
self.change_phase(x["phase ".len()..].parse::<usize>().unwrap() - 1, ui, ctx);
}
None => {}
}
}
fn change_phase(&mut self, idx: usize, ui: &UI, ctx: &EventCtx) {
if self.current_phase!= idx {
let preserve_scroll = self.composite.preserve_scroll(ctx);
self.current_phase = idx;
self.composite = make_diagram(self.i, self.current_phase, ui, ctx);
self.composite.restore_scroll(ctx, preserve_scroll);
}
}
pub fn current_phase(&self) -> usize {
self.current_phase
}
pub fn draw(&self, g: &mut GfxCtx) {
self.composite.draw(g);
}
}
fn make_diagram(i: IntersectionID, selected: usize, ui: &UI, ctx: &EventCtx) -> Composite {
// Slightly inaccurate -- the turn rendering may slightly exceed the intersection polygon --
// but this is close enough.
let bounds = ui.primary.map.get_i(i).polygon.get_bounds();
// Pick a zoom so that we fit some percentage of the screen
let zoom = 0.2 * ctx.canvas.window_width / (bounds.max_x - bounds.min_x);
let bbox = Polygon::rectangle(
zoom * (bounds.max_x - bounds.min_x),
zoom * (bounds.max_y - bounds.min_y),
);
let signal = ui.primary.map.get_traffic_signal(i);
let mut col = vec![ManagedWidget::draw_text(ctx, {
let mut txt = Text::new();
txt.add(Line(i.to_string()).roboto());
let road_names = ui
.primary
.map
.get_i(i)
.roads
.iter()
.map(|r| ui.primary.map.get_r(*r).get_name())
.collect::<BTreeSet<_>>();
let len = road_names.len();
// TODO Some kind of reusable TextStyle thing
// TODO Need to wrap this
txt.add(Line("").roboto().size(21).fg(Color::WHITE.alpha(0.54)));
for (idx, n) in road_names.into_iter().enumerate() {
txt.append(Line(n).roboto().fg(Color::WHITE.alpha(0.54)));
if idx!= len - 1 {
txt.append(Line(", ").roboto().fg(Color::WHITE.alpha(0.54)));
}
}
txt.add(Line(format!("{} phases", signal.phases.len())));
txt.add(Line(""));
txt.add(Line(format!("Signal offset: {}", signal.offset)));
txt.add(Line(format!("One cycle lasts {}", signal.cycle_length())));
txt
})];
for (idx, phase) in signal.phases.iter().enumerate() {
col.push(
ManagedWidget::row(vec![
ManagedWidget::draw_text(ctx, Text::from(Line(format!("#{}", idx + 1)))),
ManagedWidget::draw_text(ctx, Text::from(Line(phase.duration.to_string()))),
])
.margin(5)
.evenly_spaced(),
);
let mut orig_batch = GeomBatch::new();
draw_signal_phase(phase, i, None, &mut orig_batch, &ui.draw_ctx());
let mut normal = GeomBatch::new();
// TODO Ideally no background here, but we have to force the dimensions of normal and
// hovered to be the same. For some reason the bbox is slightly different.
if idx == selected {
normal.push(Color::RED.alpha(0.15), bbox.clone());
} else {
normal.push(Color::CYAN.alpha(0.05), bbox.clone());
}
// Move to the origin and apply zoom
for (color, poly) in orig_batch.consume() {
normal.push(
color,
poly.translate(-bounds.min_x, -bounds.min_y).scale(zoom),
);
}
let mut hovered = GeomBatch::new();
hovered.push(Color::RED.alpha(0.95), bbox.clone());
hovered.append(normal.clone());
col.push(
ManagedWidget::btn(Button::new(
DrawBoth::new(ctx, normal, Vec::new()),
DrawBoth::new(ctx, hovered, Vec::new()),
None,
&format!("phase {}", idx + 1),
bbox.clone(),
))
.margin(5),
);
}
Composite::scrollable(ctx, ManagedWidget::col(col).bg(Color::hex("#545454")))
}
| event | identifier_name |
util.rs | // Copyright 2022 TiKV Project Authors. Licensed under Apache-2.0.
use std::{fmt::Write, path::Path, sync::Arc, thread, time::Duration};
use encryption_export::{data_key_manager_from_config, DataKeyManager};
use engine_rocks::{RocksEngine, RocksStatistics};
use engine_test::raft::RaftTestEngine;
use engine_traits::{CfName, KvEngine, TabletRegistry, CF_DEFAULT};
use file_system::IoRateLimiter;
use futures::future::BoxFuture;
use kvproto::{
encryptionpb::EncryptionMethod,
kvrpcpb::Context,
metapb,
raft_cmdpb::{CmdType, RaftCmdRequest, RaftCmdResponse},
};
use raftstore::{store::ReadResponse, Result};
use rand::{prelude::SliceRandom, RngCore};
use server::common::ConfiguredRaftEngine;
use tempfile::TempDir;
use test_raftstore::{new_get_cmd, new_put_cf_cmd, new_request, new_snap_cmd, Config};
use tikv::{
server::KvEngineFactoryBuilder,
storage::{
kv::{SnapContext, SnapshotExt},
point_key_range, Engine, Snapshot,
},
};
use tikv_util::{
config::ReadableDuration, escape, future::block_on_timeout, worker::LazyWorker, HandyRwLock,
};
use txn_types::Key;
use crate::{bootstrap_store, cluster::Cluster, ServerCluster, Simulator};
pub fn create_test_engine(
// TODO: pass it in for all cases.
id: Option<(u64, u64)>,
limiter: Option<Arc<IoRateLimiter>>,
cfg: &Config,
) -> (
TabletRegistry<RocksEngine>,
RaftTestEngine,
Option<Arc<DataKeyManager>>,
TempDir,
LazyWorker<String>,
Arc<RocksStatistics>,
Option<Arc<RocksStatistics>>,
) {
let dir = test_util::temp_dir("test_cluster", cfg.prefer_mem);
let mut cfg = cfg.clone();
cfg.storage.data_dir = dir.path().to_str().unwrap().to_string();
cfg.raft_store.raftdb_path = cfg.infer_raft_db_path(None).unwrap();
cfg.raft_engine.mut_config().dir = cfg.infer_raft_engine_path(None).unwrap();
let key_manager =
data_key_manager_from_config(&cfg.security.encryption, dir.path().to_str().unwrap())
.unwrap()
.map(Arc::new);
let cache = cfg.storage.block_cache.build_shared_cache();
let env = cfg
.build_shared_rocks_env(key_manager.clone(), limiter)
.unwrap();
let sst_worker = LazyWorker::new("sst-recovery");
let scheduler = sst_worker.scheduler();
let (raft_engine, raft_statistics) = RaftTestEngine::build(&cfg, &env, &key_manager, &cache);
if let Some((cluster_id, store_id)) = id {
assert_ne!(store_id, 0);
bootstrap_store(&raft_engine, cluster_id, store_id).unwrap();
}
let builder = KvEngineFactoryBuilder::new(env, &cfg.tikv, cache, key_manager.clone())
.sst_recovery_sender(Some(scheduler));
let factory = Box::new(builder.build());
let rocks_statistics = factory.rocks_statistics();
let reg = TabletRegistry::new(factory, dir.path().join("tablet")).unwrap();
(
reg,
raft_engine,
key_manager,
dir,
sst_worker,
rocks_statistics,
raft_statistics,
)
}
/// Keep putting random kvs until specified size limit is reached.
pub fn put_till_size<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
limit: u64,
range: &mut dyn Iterator<Item = u64>,
) -> Vec<u8> {
put_cf_till_size(cluster, CF_DEFAULT, limit, range)
}
pub fn put_cf_till_size<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
cf: &'static str,
limit: u64,
range: &mut dyn Iterator<Item = u64>,
) -> Vec<u8> {
assert!(limit > 0);
let mut len = 0;
let mut rng = rand::thread_rng();
let mut key = String::new();
let mut value = vec![0; 64];
while len < limit {
let batch_size = std::cmp::min(1024, limit - len);
let mut reqs = vec![];
for _ in 0..batch_size / 74 + 1 {
key.clear();
let key_id = range.next().unwrap();
write!(key, "{:09}", key_id).unwrap();
rng.fill_bytes(&mut value);
// plus 1 for the extra encoding prefix
len += key.len() as u64 + 1;
len += value.len() as u64;
reqs.push(new_put_cf_cmd(cf, key.as_bytes(), &value));
}
cluster.batch_put(key.as_bytes(), reqs).unwrap();
// Approximate size of memtable is inaccurate for small data,
// we flush it to SST so we can use the size properties instead.
cluster.must_flush_cf(cf, true);
}
key.into_bytes()
}
pub fn configure_for_encryption(config: &mut Config) {
let manifest_dir = Path::new(env!("CARGO_MANIFEST_DIR"));
let cfg = &mut config.security.encryption;
cfg.data_encryption_method = EncryptionMethod::Aes128Ctr;
cfg.data_key_rotation_period = ReadableDuration(Duration::from_millis(100));
cfg.master_key = test_util::new_test_file_master_key(manifest_dir);
}
pub fn configure_for_snapshot(config: &mut Config) {
// Truncate the log quickly so that we can force sending snapshot.
config.raft_store.raft_log_gc_tick_interval = ReadableDuration::millis(20);
config.raft_store.raft_log_gc_count_limit = Some(2);
config.raft_store.merge_max_log_gap = 1;
config.raft_store.snap_mgr_gc_tick_interval = ReadableDuration::millis(50);
configure_for_encryption(config);
}
pub fn configure_for_lease_read_v2<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
base_tick_ms: Option<u64>,
election_ticks: Option<usize>,
) -> Duration {
if let Some(base_tick_ms) = base_tick_ms |
let base_tick_interval = cluster.cfg.raft_store.raft_base_tick_interval.0;
if let Some(election_ticks) = election_ticks {
cluster.cfg.raft_store.raft_election_timeout_ticks = election_ticks;
}
let election_ticks = cluster.cfg.raft_store.raft_election_timeout_ticks as u32;
let election_timeout = base_tick_interval * election_ticks;
// Adjust max leader lease.
cluster.cfg.raft_store.raft_store_max_leader_lease =
ReadableDuration(election_timeout - base_tick_interval);
// Use large peer check interval, abnormal and max leader missing duration to
// make a valid config, that is election timeout x 2 < peer stale state
// check < abnormal < max leader missing duration.
cluster.cfg.raft_store.peer_stale_state_check_interval = ReadableDuration(election_timeout * 3);
cluster.cfg.raft_store.abnormal_leader_missing_duration =
ReadableDuration(election_timeout * 4);
cluster.cfg.raft_store.max_leader_missing_duration = ReadableDuration(election_timeout * 5);
election_timeout
}
pub fn wait_for_synced(
cluster: &mut Cluster<ServerCluster<RocksEngine>, RocksEngine>,
node_id: u64,
region_id: u64,
) {
let mut storage = cluster
.sim
.read()
.unwrap()
.storages
.get(&node_id)
.unwrap()
.clone();
let leader = cluster.leader_of_region(region_id).unwrap();
let epoch = cluster.get_region_epoch(region_id);
let mut ctx = Context::default();
ctx.set_region_id(region_id);
ctx.set_peer(leader);
ctx.set_region_epoch(epoch);
let snap_ctx = SnapContext {
pb_ctx: &ctx,
..Default::default()
};
let snapshot = storage.snapshot(snap_ctx).unwrap();
let txn_ext = snapshot.txn_ext.clone().unwrap();
for retry in 0..10 {
if txn_ext.is_max_ts_synced() {
break;
}
thread::sleep(Duration::from_millis(1 << retry));
}
assert!(snapshot.ext().is_max_ts_synced());
}
// Issue a read request on the specified peer.
pub fn read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
read_quorum: bool,
timeout: Duration,
) -> Result<RaftCmdResponse> {
let mut request = new_request(
region.get_id(),
region.get_region_epoch().clone(),
vec![new_get_cmd(key)],
read_quorum,
);
request.mut_header().set_peer(peer);
cluster.read(None, request, timeout)
}
pub fn async_read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
read_quorum: bool,
replica_read: bool,
) -> BoxFuture<'static, RaftCmdResponse> {
let mut request = new_request(
region.get_id(),
region.get_region_epoch().clone(),
vec![new_get_cmd(key)],
read_quorum,
);
request.mut_header().set_peer(peer);
request.mut_header().set_replica_read(replica_read);
let node_id = request.get_header().get_peer().get_store_id();
let f = cluster.sim.wl().async_read(node_id, request);
Box::pin(async move { f.await.unwrap() })
}
pub fn batch_read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
requests: &[(metapb::Peer, metapb::Region)],
) -> Vec<ReadResponse<<EK as KvEngine>::Snapshot>> {
let mut results = vec![];
for (peer, region) in requests {
let node_id = peer.get_store_id();
let mut request = new_request(
region.get_id(),
region.get_region_epoch().clone(),
vec![new_snap_cmd()],
false,
);
request.mut_header().set_peer(peer.clone());
let snap = cluster.sim.wl().async_snapshot(node_id, request);
let resp = block_on_timeout(
Box::pin(async move {
match snap.await {
Ok(snap) => ReadResponse {
response: Default::default(),
snapshot: Some(snap),
txn_extra_op: Default::default(),
},
Err(resp) => ReadResponse {
response: resp,
snapshot: None,
txn_extra_op: Default::default(),
},
}
}),
Duration::from_secs(1),
)
.unwrap();
results.push(resp);
}
results
}
pub fn async_read_index_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
read_quorum: bool,
) -> BoxFuture<'static, RaftCmdResponse> {
let mut cmd = new_get_cmd(key);
cmd.mut_read_index().set_start_ts(u64::MAX);
cmd.mut_read_index()
.mut_key_ranges()
.push(point_key_range(Key::from_raw(key)));
let mut request = new_request(
region.get_id(),
region.get_region_epoch().clone(),
vec![cmd],
read_quorum,
);
// Use replica read to issue a read index.
request.mut_header().set_replica_read(true);
request.mut_header().set_peer(peer);
let node_id = request.get_header().get_peer().get_store_id();
let f = cluster.sim.wl().async_read(node_id, request);
Box::pin(async move { f.await.unwrap() })
}
pub fn async_command_on_node<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
node_id: u64,
request: RaftCmdRequest,
) -> BoxFuture<'static, RaftCmdResponse> {
cluster.sim.wl().async_command_on_node(node_id, request)
}
pub fn test_delete_range<T: Simulator<EK>, EK: KvEngine>(cluster: &mut Cluster<T, EK>, cf: CfName) {
let data_set: Vec<_> = (1..500)
.map(|i| {
(
format!("key{:08}", i).into_bytes(),
format!("value{}", i).into_bytes(),
)
})
.collect();
for kvs in data_set.chunks(50) {
let requests = kvs.iter().map(|(k, v)| new_put_cf_cmd(cf, k, v)).collect();
// key9 is always the last region.
cluster.batch_put(b"key9", requests).unwrap();
}
// delete_range request with notify_only set should not actually delete data.
cluster.must_notify_delete_range_cf(cf, b"", b"");
let mut rng = rand::thread_rng();
for _ in 0..50 {
let (k, v) = data_set.choose(&mut rng).unwrap();
assert_eq!(cluster.get_cf(cf, k).unwrap(), *v);
}
// Empty keys means the whole range.
cluster.must_delete_range_cf(cf, b"", b"");
for _ in 0..50 {
let k = &data_set.choose(&mut rng).unwrap().0;
assert!(cluster.get_cf(cf, k).is_none());
}
}
pub fn must_get_value(resp: &RaftCmdResponse) -> Vec<u8> {
if resp.get_header().has_error() {
panic!("failed to read {:?}", resp);
}
assert_eq!(resp.get_responses().len(), 1);
assert_eq!(resp.get_responses()[0].get_cmd_type(), CmdType::Get);
assert!(resp.get_responses()[0].has_get());
resp.get_responses()[0].get_get().get_value().to_vec()
}
pub fn must_read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
value: &[u8],
) {
let timeout = Duration::from_secs(5);
match read_on_peer(cluster, peer, region, key, false, timeout) {
Ok(ref resp) if value == must_get_value(resp).as_slice() => (),
other => panic!(
"read key {}, expect value {:?}, got {:?}",
log_wrappers::hex_encode_upper(key),
value,
other
),
}
}
pub fn must_error_read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
timeout: Duration,
) {
if let Ok(mut resp) = read_on_peer(cluster, peer, region, key, false, timeout) {
if!resp.get_header().has_error() {
let value = resp.mut_responses()[0].mut_get().take_value();
panic!(
"key {}, expect error but got {}",
log_wrappers::hex_encode_upper(key),
escape(&value)
);
}
}
}
pub fn put_with_timeout<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
node_id: u64,
key: &[u8],
value: &[u8],
timeout: Duration,
) -> Result<RaftCmdResponse> {
let mut region = cluster.get_region(key);
let region_id = region.get_id();
let mut req = new_request(
region_id,
region.take_region_epoch(),
vec![new_put_cf_cmd(CF_DEFAULT, key, value)],
false,
);
req.mut_header().set_peer(
region
.get_peers()
.iter()
.find(|p| p.store_id == node_id)
.unwrap()
.clone(),
);
cluster.call_command_on_node(node_id, req, timeout)
}
| {
cluster.cfg.raft_store.raft_base_tick_interval = ReadableDuration::millis(base_tick_ms);
} | conditional_block |
util.rs | // Copyright 2022 TiKV Project Authors. Licensed under Apache-2.0.
use std::{fmt::Write, path::Path, sync::Arc, thread, time::Duration};
use encryption_export::{data_key_manager_from_config, DataKeyManager};
use engine_rocks::{RocksEngine, RocksStatistics};
use engine_test::raft::RaftTestEngine;
use engine_traits::{CfName, KvEngine, TabletRegistry, CF_DEFAULT};
use file_system::IoRateLimiter;
use futures::future::BoxFuture;
use kvproto::{
encryptionpb::EncryptionMethod,
kvrpcpb::Context,
metapb,
raft_cmdpb::{CmdType, RaftCmdRequest, RaftCmdResponse},
};
use raftstore::{store::ReadResponse, Result};
use rand::{prelude::SliceRandom, RngCore};
use server::common::ConfiguredRaftEngine;
use tempfile::TempDir;
use test_raftstore::{new_get_cmd, new_put_cf_cmd, new_request, new_snap_cmd, Config};
use tikv::{
server::KvEngineFactoryBuilder,
storage::{
kv::{SnapContext, SnapshotExt},
point_key_range, Engine, Snapshot,
},
};
use tikv_util::{
config::ReadableDuration, escape, future::block_on_timeout, worker::LazyWorker, HandyRwLock,
};
use txn_types::Key;
use crate::{bootstrap_store, cluster::Cluster, ServerCluster, Simulator};
pub fn create_test_engine(
// TODO: pass it in for all cases.
id: Option<(u64, u64)>,
limiter: Option<Arc<IoRateLimiter>>,
cfg: &Config,
) -> (
TabletRegistry<RocksEngine>,
RaftTestEngine,
Option<Arc<DataKeyManager>>,
TempDir,
LazyWorker<String>,
Arc<RocksStatistics>,
Option<Arc<RocksStatistics>>,
) {
let dir = test_util::temp_dir("test_cluster", cfg.prefer_mem);
let mut cfg = cfg.clone();
cfg.storage.data_dir = dir.path().to_str().unwrap().to_string();
cfg.raft_store.raftdb_path = cfg.infer_raft_db_path(None).unwrap();
cfg.raft_engine.mut_config().dir = cfg.infer_raft_engine_path(None).unwrap();
let key_manager =
data_key_manager_from_config(&cfg.security.encryption, dir.path().to_str().unwrap())
.unwrap()
.map(Arc::new);
let cache = cfg.storage.block_cache.build_shared_cache();
let env = cfg
.build_shared_rocks_env(key_manager.clone(), limiter)
.unwrap();
let sst_worker = LazyWorker::new("sst-recovery");
let scheduler = sst_worker.scheduler();
let (raft_engine, raft_statistics) = RaftTestEngine::build(&cfg, &env, &key_manager, &cache);
if let Some((cluster_id, store_id)) = id {
assert_ne!(store_id, 0);
bootstrap_store(&raft_engine, cluster_id, store_id).unwrap();
}
let builder = KvEngineFactoryBuilder::new(env, &cfg.tikv, cache, key_manager.clone())
.sst_recovery_sender(Some(scheduler));
let factory = Box::new(builder.build());
let rocks_statistics = factory.rocks_statistics();
let reg = TabletRegistry::new(factory, dir.path().join("tablet")).unwrap();
(
reg,
raft_engine,
key_manager,
dir,
sst_worker,
rocks_statistics,
raft_statistics,
)
}
/// Keep putting random kvs until specified size limit is reached.
pub fn put_till_size<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
limit: u64,
range: &mut dyn Iterator<Item = u64>,
) -> Vec<u8> {
put_cf_till_size(cluster, CF_DEFAULT, limit, range)
}
pub fn put_cf_till_size<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
cf: &'static str,
limit: u64,
range: &mut dyn Iterator<Item = u64>,
) -> Vec<u8> {
assert!(limit > 0);
let mut len = 0;
let mut rng = rand::thread_rng();
let mut key = String::new();
let mut value = vec![0; 64];
while len < limit {
let batch_size = std::cmp::min(1024, limit - len);
let mut reqs = vec![];
for _ in 0..batch_size / 74 + 1 {
key.clear();
let key_id = range.next().unwrap();
write!(key, "{:09}", key_id).unwrap();
rng.fill_bytes(&mut value);
// plus 1 for the extra encoding prefix
len += key.len() as u64 + 1;
len += value.len() as u64;
reqs.push(new_put_cf_cmd(cf, key.as_bytes(), &value));
}
cluster.batch_put(key.as_bytes(), reqs).unwrap();
// Approximate size of memtable is inaccurate for small data,
// we flush it to SST so we can use the size properties instead.
cluster.must_flush_cf(cf, true);
}
key.into_bytes()
}
pub fn configure_for_encryption(config: &mut Config) {
let manifest_dir = Path::new(env!("CARGO_MANIFEST_DIR"));
let cfg = &mut config.security.encryption;
cfg.data_encryption_method = EncryptionMethod::Aes128Ctr;
cfg.data_key_rotation_period = ReadableDuration(Duration::from_millis(100));
cfg.master_key = test_util::new_test_file_master_key(manifest_dir);
}
pub fn configure_for_snapshot(config: &mut Config) {
// Truncate the log quickly so that we can force sending snapshot.
config.raft_store.raft_log_gc_tick_interval = ReadableDuration::millis(20);
config.raft_store.raft_log_gc_count_limit = Some(2);
config.raft_store.merge_max_log_gap = 1;
config.raft_store.snap_mgr_gc_tick_interval = ReadableDuration::millis(50);
configure_for_encryption(config);
}
pub fn configure_for_lease_read_v2<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
base_tick_ms: Option<u64>,
election_ticks: Option<usize>,
) -> Duration {
if let Some(base_tick_ms) = base_tick_ms {
cluster.cfg.raft_store.raft_base_tick_interval = ReadableDuration::millis(base_tick_ms);
}
let base_tick_interval = cluster.cfg.raft_store.raft_base_tick_interval.0;
if let Some(election_ticks) = election_ticks {
cluster.cfg.raft_store.raft_election_timeout_ticks = election_ticks;
}
let election_ticks = cluster.cfg.raft_store.raft_election_timeout_ticks as u32;
let election_timeout = base_tick_interval * election_ticks;
// Adjust max leader lease.
cluster.cfg.raft_store.raft_store_max_leader_lease =
ReadableDuration(election_timeout - base_tick_interval);
// Use large peer check interval, abnormal and max leader missing duration to
// make a valid config, that is election timeout x 2 < peer stale state |
election_timeout
}
pub fn wait_for_synced(
cluster: &mut Cluster<ServerCluster<RocksEngine>, RocksEngine>,
node_id: u64,
region_id: u64,
) {
let mut storage = cluster
.sim
.read()
.unwrap()
.storages
.get(&node_id)
.unwrap()
.clone();
let leader = cluster.leader_of_region(region_id).unwrap();
let epoch = cluster.get_region_epoch(region_id);
let mut ctx = Context::default();
ctx.set_region_id(region_id);
ctx.set_peer(leader);
ctx.set_region_epoch(epoch);
let snap_ctx = SnapContext {
pb_ctx: &ctx,
..Default::default()
};
let snapshot = storage.snapshot(snap_ctx).unwrap();
let txn_ext = snapshot.txn_ext.clone().unwrap();
for retry in 0..10 {
if txn_ext.is_max_ts_synced() {
break;
}
thread::sleep(Duration::from_millis(1 << retry));
}
assert!(snapshot.ext().is_max_ts_synced());
}
// Issue a read request on the specified peer.
pub fn read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
read_quorum: bool,
timeout: Duration,
) -> Result<RaftCmdResponse> {
let mut request = new_request(
region.get_id(),
region.get_region_epoch().clone(),
vec![new_get_cmd(key)],
read_quorum,
);
request.mut_header().set_peer(peer);
cluster.read(None, request, timeout)
}
pub fn async_read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
read_quorum: bool,
replica_read: bool,
) -> BoxFuture<'static, RaftCmdResponse> {
let mut request = new_request(
region.get_id(),
region.get_region_epoch().clone(),
vec![new_get_cmd(key)],
read_quorum,
);
request.mut_header().set_peer(peer);
request.mut_header().set_replica_read(replica_read);
let node_id = request.get_header().get_peer().get_store_id();
let f = cluster.sim.wl().async_read(node_id, request);
Box::pin(async move { f.await.unwrap() })
}
pub fn batch_read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
requests: &[(metapb::Peer, metapb::Region)],
) -> Vec<ReadResponse<<EK as KvEngine>::Snapshot>> {
let mut results = vec![];
for (peer, region) in requests {
let node_id = peer.get_store_id();
let mut request = new_request(
region.get_id(),
region.get_region_epoch().clone(),
vec![new_snap_cmd()],
false,
);
request.mut_header().set_peer(peer.clone());
let snap = cluster.sim.wl().async_snapshot(node_id, request);
let resp = block_on_timeout(
Box::pin(async move {
match snap.await {
Ok(snap) => ReadResponse {
response: Default::default(),
snapshot: Some(snap),
txn_extra_op: Default::default(),
},
Err(resp) => ReadResponse {
response: resp,
snapshot: None,
txn_extra_op: Default::default(),
},
}
}),
Duration::from_secs(1),
)
.unwrap();
results.push(resp);
}
results
}
pub fn async_read_index_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
read_quorum: bool,
) -> BoxFuture<'static, RaftCmdResponse> {
let mut cmd = new_get_cmd(key);
cmd.mut_read_index().set_start_ts(u64::MAX);
cmd.mut_read_index()
.mut_key_ranges()
.push(point_key_range(Key::from_raw(key)));
let mut request = new_request(
region.get_id(),
region.get_region_epoch().clone(),
vec![cmd],
read_quorum,
);
// Use replica read to issue a read index.
request.mut_header().set_replica_read(true);
request.mut_header().set_peer(peer);
let node_id = request.get_header().get_peer().get_store_id();
let f = cluster.sim.wl().async_read(node_id, request);
Box::pin(async move { f.await.unwrap() })
}
pub fn async_command_on_node<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
node_id: u64,
request: RaftCmdRequest,
) -> BoxFuture<'static, RaftCmdResponse> {
cluster.sim.wl().async_command_on_node(node_id, request)
}
pub fn test_delete_range<T: Simulator<EK>, EK: KvEngine>(cluster: &mut Cluster<T, EK>, cf: CfName) {
let data_set: Vec<_> = (1..500)
.map(|i| {
(
format!("key{:08}", i).into_bytes(),
format!("value{}", i).into_bytes(),
)
})
.collect();
for kvs in data_set.chunks(50) {
let requests = kvs.iter().map(|(k, v)| new_put_cf_cmd(cf, k, v)).collect();
// key9 is always the last region.
cluster.batch_put(b"key9", requests).unwrap();
}
// delete_range request with notify_only set should not actually delete data.
cluster.must_notify_delete_range_cf(cf, b"", b"");
let mut rng = rand::thread_rng();
for _ in 0..50 {
let (k, v) = data_set.choose(&mut rng).unwrap();
assert_eq!(cluster.get_cf(cf, k).unwrap(), *v);
}
// Empty keys means the whole range.
cluster.must_delete_range_cf(cf, b"", b"");
for _ in 0..50 {
let k = &data_set.choose(&mut rng).unwrap().0;
assert!(cluster.get_cf(cf, k).is_none());
}
}
pub fn must_get_value(resp: &RaftCmdResponse) -> Vec<u8> {
if resp.get_header().has_error() {
panic!("failed to read {:?}", resp);
}
assert_eq!(resp.get_responses().len(), 1);
assert_eq!(resp.get_responses()[0].get_cmd_type(), CmdType::Get);
assert!(resp.get_responses()[0].has_get());
resp.get_responses()[0].get_get().get_value().to_vec()
}
pub fn must_read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
value: &[u8],
) {
let timeout = Duration::from_secs(5);
match read_on_peer(cluster, peer, region, key, false, timeout) {
Ok(ref resp) if value == must_get_value(resp).as_slice() => (),
other => panic!(
"read key {}, expect value {:?}, got {:?}",
log_wrappers::hex_encode_upper(key),
value,
other
),
}
}
pub fn must_error_read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
timeout: Duration,
) {
if let Ok(mut resp) = read_on_peer(cluster, peer, region, key, false, timeout) {
if!resp.get_header().has_error() {
let value = resp.mut_responses()[0].mut_get().take_value();
panic!(
"key {}, expect error but got {}",
log_wrappers::hex_encode_upper(key),
escape(&value)
);
}
}
}
pub fn put_with_timeout<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
node_id: u64,
key: &[u8],
value: &[u8],
timeout: Duration,
) -> Result<RaftCmdResponse> {
let mut region = cluster.get_region(key);
let region_id = region.get_id();
let mut req = new_request(
region_id,
region.take_region_epoch(),
vec![new_put_cf_cmd(CF_DEFAULT, key, value)],
false,
);
req.mut_header().set_peer(
region
.get_peers()
.iter()
.find(|p| p.store_id == node_id)
.unwrap()
.clone(),
);
cluster.call_command_on_node(node_id, req, timeout)
} | // check < abnormal < max leader missing duration.
cluster.cfg.raft_store.peer_stale_state_check_interval = ReadableDuration(election_timeout * 3);
cluster.cfg.raft_store.abnormal_leader_missing_duration =
ReadableDuration(election_timeout * 4);
cluster.cfg.raft_store.max_leader_missing_duration = ReadableDuration(election_timeout * 5); | random_line_split |
util.rs | // Copyright 2022 TiKV Project Authors. Licensed under Apache-2.0.
use std::{fmt::Write, path::Path, sync::Arc, thread, time::Duration};
use encryption_export::{data_key_manager_from_config, DataKeyManager};
use engine_rocks::{RocksEngine, RocksStatistics};
use engine_test::raft::RaftTestEngine;
use engine_traits::{CfName, KvEngine, TabletRegistry, CF_DEFAULT};
use file_system::IoRateLimiter;
use futures::future::BoxFuture;
use kvproto::{
encryptionpb::EncryptionMethod,
kvrpcpb::Context,
metapb,
raft_cmdpb::{CmdType, RaftCmdRequest, RaftCmdResponse},
};
use raftstore::{store::ReadResponse, Result};
use rand::{prelude::SliceRandom, RngCore};
use server::common::ConfiguredRaftEngine;
use tempfile::TempDir;
use test_raftstore::{new_get_cmd, new_put_cf_cmd, new_request, new_snap_cmd, Config};
use tikv::{
server::KvEngineFactoryBuilder,
storage::{
kv::{SnapContext, SnapshotExt},
point_key_range, Engine, Snapshot,
},
};
use tikv_util::{
config::ReadableDuration, escape, future::block_on_timeout, worker::LazyWorker, HandyRwLock,
};
use txn_types::Key;
use crate::{bootstrap_store, cluster::Cluster, ServerCluster, Simulator};
pub fn create_test_engine(
// TODO: pass it in for all cases.
id: Option<(u64, u64)>,
limiter: Option<Arc<IoRateLimiter>>,
cfg: &Config,
) -> (
TabletRegistry<RocksEngine>,
RaftTestEngine,
Option<Arc<DataKeyManager>>,
TempDir,
LazyWorker<String>,
Arc<RocksStatistics>,
Option<Arc<RocksStatistics>>,
) {
let dir = test_util::temp_dir("test_cluster", cfg.prefer_mem);
let mut cfg = cfg.clone();
cfg.storage.data_dir = dir.path().to_str().unwrap().to_string();
cfg.raft_store.raftdb_path = cfg.infer_raft_db_path(None).unwrap();
cfg.raft_engine.mut_config().dir = cfg.infer_raft_engine_path(None).unwrap();
let key_manager =
data_key_manager_from_config(&cfg.security.encryption, dir.path().to_str().unwrap())
.unwrap()
.map(Arc::new);
let cache = cfg.storage.block_cache.build_shared_cache();
let env = cfg
.build_shared_rocks_env(key_manager.clone(), limiter)
.unwrap();
let sst_worker = LazyWorker::new("sst-recovery");
let scheduler = sst_worker.scheduler();
let (raft_engine, raft_statistics) = RaftTestEngine::build(&cfg, &env, &key_manager, &cache);
if let Some((cluster_id, store_id)) = id {
assert_ne!(store_id, 0);
bootstrap_store(&raft_engine, cluster_id, store_id).unwrap();
}
let builder = KvEngineFactoryBuilder::new(env, &cfg.tikv, cache, key_manager.clone())
.sst_recovery_sender(Some(scheduler));
let factory = Box::new(builder.build());
let rocks_statistics = factory.rocks_statistics();
let reg = TabletRegistry::new(factory, dir.path().join("tablet")).unwrap();
(
reg,
raft_engine,
key_manager,
dir,
sst_worker,
rocks_statistics,
raft_statistics,
)
}
/// Keep putting random kvs until specified size limit is reached.
pub fn put_till_size<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
limit: u64,
range: &mut dyn Iterator<Item = u64>,
) -> Vec<u8> {
put_cf_till_size(cluster, CF_DEFAULT, limit, range)
}
pub fn put_cf_till_size<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
cf: &'static str,
limit: u64,
range: &mut dyn Iterator<Item = u64>,
) -> Vec<u8> {
assert!(limit > 0);
let mut len = 0;
let mut rng = rand::thread_rng();
let mut key = String::new();
let mut value = vec![0; 64];
while len < limit {
let batch_size = std::cmp::min(1024, limit - len);
let mut reqs = vec![];
for _ in 0..batch_size / 74 + 1 {
key.clear();
let key_id = range.next().unwrap();
write!(key, "{:09}", key_id).unwrap();
rng.fill_bytes(&mut value);
// plus 1 for the extra encoding prefix
len += key.len() as u64 + 1;
len += value.len() as u64;
reqs.push(new_put_cf_cmd(cf, key.as_bytes(), &value));
}
cluster.batch_put(key.as_bytes(), reqs).unwrap();
// Approximate size of memtable is inaccurate for small data,
// we flush it to SST so we can use the size properties instead.
cluster.must_flush_cf(cf, true);
}
key.into_bytes()
}
pub fn configure_for_encryption(config: &mut Config) {
let manifest_dir = Path::new(env!("CARGO_MANIFEST_DIR"));
let cfg = &mut config.security.encryption;
cfg.data_encryption_method = EncryptionMethod::Aes128Ctr;
cfg.data_key_rotation_period = ReadableDuration(Duration::from_millis(100));
cfg.master_key = test_util::new_test_file_master_key(manifest_dir);
}
pub fn configure_for_snapshot(config: &mut Config) {
// Truncate the log quickly so that we can force sending snapshot.
config.raft_store.raft_log_gc_tick_interval = ReadableDuration::millis(20);
config.raft_store.raft_log_gc_count_limit = Some(2);
config.raft_store.merge_max_log_gap = 1;
config.raft_store.snap_mgr_gc_tick_interval = ReadableDuration::millis(50);
configure_for_encryption(config);
}
pub fn configure_for_lease_read_v2<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
base_tick_ms: Option<u64>,
election_ticks: Option<usize>,
) -> Duration {
if let Some(base_tick_ms) = base_tick_ms {
cluster.cfg.raft_store.raft_base_tick_interval = ReadableDuration::millis(base_tick_ms);
}
let base_tick_interval = cluster.cfg.raft_store.raft_base_tick_interval.0;
if let Some(election_ticks) = election_ticks {
cluster.cfg.raft_store.raft_election_timeout_ticks = election_ticks;
}
let election_ticks = cluster.cfg.raft_store.raft_election_timeout_ticks as u32;
let election_timeout = base_tick_interval * election_ticks;
// Adjust max leader lease.
cluster.cfg.raft_store.raft_store_max_leader_lease =
ReadableDuration(election_timeout - base_tick_interval);
// Use large peer check interval, abnormal and max leader missing duration to
// make a valid config, that is election timeout x 2 < peer stale state
// check < abnormal < max leader missing duration.
cluster.cfg.raft_store.peer_stale_state_check_interval = ReadableDuration(election_timeout * 3);
cluster.cfg.raft_store.abnormal_leader_missing_duration =
ReadableDuration(election_timeout * 4);
cluster.cfg.raft_store.max_leader_missing_duration = ReadableDuration(election_timeout * 5);
election_timeout
}
pub fn wait_for_synced(
cluster: &mut Cluster<ServerCluster<RocksEngine>, RocksEngine>,
node_id: u64,
region_id: u64,
) {
let mut storage = cluster
.sim
.read()
.unwrap()
.storages
.get(&node_id)
.unwrap()
.clone();
let leader = cluster.leader_of_region(region_id).unwrap();
let epoch = cluster.get_region_epoch(region_id);
let mut ctx = Context::default();
ctx.set_region_id(region_id);
ctx.set_peer(leader);
ctx.set_region_epoch(epoch);
let snap_ctx = SnapContext {
pb_ctx: &ctx,
..Default::default()
};
let snapshot = storage.snapshot(snap_ctx).unwrap();
let txn_ext = snapshot.txn_ext.clone().unwrap();
for retry in 0..10 {
if txn_ext.is_max_ts_synced() {
break;
}
thread::sleep(Duration::from_millis(1 << retry));
}
assert!(snapshot.ext().is_max_ts_synced());
}
// Issue a read request on the specified peer.
pub fn read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
read_quorum: bool,
timeout: Duration,
) -> Result<RaftCmdResponse> {
let mut request = new_request(
region.get_id(),
region.get_region_epoch().clone(),
vec![new_get_cmd(key)],
read_quorum,
);
request.mut_header().set_peer(peer);
cluster.read(None, request, timeout)
}
pub fn async_read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
read_quorum: bool,
replica_read: bool,
) -> BoxFuture<'static, RaftCmdResponse> {
let mut request = new_request(
region.get_id(),
region.get_region_epoch().clone(),
vec![new_get_cmd(key)],
read_quorum,
);
request.mut_header().set_peer(peer);
request.mut_header().set_replica_read(replica_read);
let node_id = request.get_header().get_peer().get_store_id();
let f = cluster.sim.wl().async_read(node_id, request);
Box::pin(async move { f.await.unwrap() })
}
pub fn batch_read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
requests: &[(metapb::Peer, metapb::Region)],
) -> Vec<ReadResponse<<EK as KvEngine>::Snapshot>> {
let mut results = vec![];
for (peer, region) in requests {
let node_id = peer.get_store_id();
let mut request = new_request(
region.get_id(),
region.get_region_epoch().clone(),
vec![new_snap_cmd()],
false,
);
request.mut_header().set_peer(peer.clone());
let snap = cluster.sim.wl().async_snapshot(node_id, request);
let resp = block_on_timeout(
Box::pin(async move {
match snap.await {
Ok(snap) => ReadResponse {
response: Default::default(),
snapshot: Some(snap),
txn_extra_op: Default::default(),
},
Err(resp) => ReadResponse {
response: resp,
snapshot: None,
txn_extra_op: Default::default(),
},
}
}),
Duration::from_secs(1),
)
.unwrap();
results.push(resp);
}
results
}
pub fn async_read_index_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
read_quorum: bool,
) -> BoxFuture<'static, RaftCmdResponse> {
let mut cmd = new_get_cmd(key);
cmd.mut_read_index().set_start_ts(u64::MAX);
cmd.mut_read_index()
.mut_key_ranges()
.push(point_key_range(Key::from_raw(key)));
let mut request = new_request(
region.get_id(),
region.get_region_epoch().clone(),
vec![cmd],
read_quorum,
);
// Use replica read to issue a read index.
request.mut_header().set_replica_read(true);
request.mut_header().set_peer(peer);
let node_id = request.get_header().get_peer().get_store_id();
let f = cluster.sim.wl().async_read(node_id, request);
Box::pin(async move { f.await.unwrap() })
}
pub fn async_command_on_node<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
node_id: u64,
request: RaftCmdRequest,
) -> BoxFuture<'static, RaftCmdResponse> {
cluster.sim.wl().async_command_on_node(node_id, request)
}
pub fn test_delete_range<T: Simulator<EK>, EK: KvEngine>(cluster: &mut Cluster<T, EK>, cf: CfName) | let (k, v) = data_set.choose(&mut rng).unwrap();
assert_eq!(cluster.get_cf(cf, k).unwrap(), *v);
}
// Empty keys means the whole range.
cluster.must_delete_range_cf(cf, b"", b"");
for _ in 0..50 {
let k = &data_set.choose(&mut rng).unwrap().0;
assert!(cluster.get_cf(cf, k).is_none());
}
}
pub fn must_get_value(resp: &RaftCmdResponse) -> Vec<u8> {
if resp.get_header().has_error() {
panic!("failed to read {:?}", resp);
}
assert_eq!(resp.get_responses().len(), 1);
assert_eq!(resp.get_responses()[0].get_cmd_type(), CmdType::Get);
assert!(resp.get_responses()[0].has_get());
resp.get_responses()[0].get_get().get_value().to_vec()
}
pub fn must_read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
value: &[u8],
) {
let timeout = Duration::from_secs(5);
match read_on_peer(cluster, peer, region, key, false, timeout) {
Ok(ref resp) if value == must_get_value(resp).as_slice() => (),
other => panic!(
"read key {}, expect value {:?}, got {:?}",
log_wrappers::hex_encode_upper(key),
value,
other
),
}
}
pub fn must_error_read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
timeout: Duration,
) {
if let Ok(mut resp) = read_on_peer(cluster, peer, region, key, false, timeout) {
if!resp.get_header().has_error() {
let value = resp.mut_responses()[0].mut_get().take_value();
panic!(
"key {}, expect error but got {}",
log_wrappers::hex_encode_upper(key),
escape(&value)
);
}
}
}
pub fn put_with_timeout<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
node_id: u64,
key: &[u8],
value: &[u8],
timeout: Duration,
) -> Result<RaftCmdResponse> {
let mut region = cluster.get_region(key);
let region_id = region.get_id();
let mut req = new_request(
region_id,
region.take_region_epoch(),
vec![new_put_cf_cmd(CF_DEFAULT, key, value)],
false,
);
req.mut_header().set_peer(
region
.get_peers()
.iter()
.find(|p| p.store_id == node_id)
.unwrap()
.clone(),
);
cluster.call_command_on_node(node_id, req, timeout)
}
| {
let data_set: Vec<_> = (1..500)
.map(|i| {
(
format!("key{:08}", i).into_bytes(),
format!("value{}", i).into_bytes(),
)
})
.collect();
for kvs in data_set.chunks(50) {
let requests = kvs.iter().map(|(k, v)| new_put_cf_cmd(cf, k, v)).collect();
// key9 is always the last region.
cluster.batch_put(b"key9", requests).unwrap();
}
// delete_range request with notify_only set should not actually delete data.
cluster.must_notify_delete_range_cf(cf, b"", b"");
let mut rng = rand::thread_rng();
for _ in 0..50 { | identifier_body |
util.rs | // Copyright 2022 TiKV Project Authors. Licensed under Apache-2.0.
use std::{fmt::Write, path::Path, sync::Arc, thread, time::Duration};
use encryption_export::{data_key_manager_from_config, DataKeyManager};
use engine_rocks::{RocksEngine, RocksStatistics};
use engine_test::raft::RaftTestEngine;
use engine_traits::{CfName, KvEngine, TabletRegistry, CF_DEFAULT};
use file_system::IoRateLimiter;
use futures::future::BoxFuture;
use kvproto::{
encryptionpb::EncryptionMethod,
kvrpcpb::Context,
metapb,
raft_cmdpb::{CmdType, RaftCmdRequest, RaftCmdResponse},
};
use raftstore::{store::ReadResponse, Result};
use rand::{prelude::SliceRandom, RngCore};
use server::common::ConfiguredRaftEngine;
use tempfile::TempDir;
use test_raftstore::{new_get_cmd, new_put_cf_cmd, new_request, new_snap_cmd, Config};
use tikv::{
server::KvEngineFactoryBuilder,
storage::{
kv::{SnapContext, SnapshotExt},
point_key_range, Engine, Snapshot,
},
};
use tikv_util::{
config::ReadableDuration, escape, future::block_on_timeout, worker::LazyWorker, HandyRwLock,
};
use txn_types::Key;
use crate::{bootstrap_store, cluster::Cluster, ServerCluster, Simulator};
pub fn create_test_engine(
// TODO: pass it in for all cases.
id: Option<(u64, u64)>,
limiter: Option<Arc<IoRateLimiter>>,
cfg: &Config,
) -> (
TabletRegistry<RocksEngine>,
RaftTestEngine,
Option<Arc<DataKeyManager>>,
TempDir,
LazyWorker<String>,
Arc<RocksStatistics>,
Option<Arc<RocksStatistics>>,
) {
let dir = test_util::temp_dir("test_cluster", cfg.prefer_mem);
let mut cfg = cfg.clone();
cfg.storage.data_dir = dir.path().to_str().unwrap().to_string();
cfg.raft_store.raftdb_path = cfg.infer_raft_db_path(None).unwrap();
cfg.raft_engine.mut_config().dir = cfg.infer_raft_engine_path(None).unwrap();
let key_manager =
data_key_manager_from_config(&cfg.security.encryption, dir.path().to_str().unwrap())
.unwrap()
.map(Arc::new);
let cache = cfg.storage.block_cache.build_shared_cache();
let env = cfg
.build_shared_rocks_env(key_manager.clone(), limiter)
.unwrap();
let sst_worker = LazyWorker::new("sst-recovery");
let scheduler = sst_worker.scheduler();
let (raft_engine, raft_statistics) = RaftTestEngine::build(&cfg, &env, &key_manager, &cache);
if let Some((cluster_id, store_id)) = id {
assert_ne!(store_id, 0);
bootstrap_store(&raft_engine, cluster_id, store_id).unwrap();
}
let builder = KvEngineFactoryBuilder::new(env, &cfg.tikv, cache, key_manager.clone())
.sst_recovery_sender(Some(scheduler));
let factory = Box::new(builder.build());
let rocks_statistics = factory.rocks_statistics();
let reg = TabletRegistry::new(factory, dir.path().join("tablet")).unwrap();
(
reg,
raft_engine,
key_manager,
dir,
sst_worker,
rocks_statistics,
raft_statistics,
)
}
/// Keep putting random kvs until specified size limit is reached.
pub fn put_till_size<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
limit: u64,
range: &mut dyn Iterator<Item = u64>,
) -> Vec<u8> {
put_cf_till_size(cluster, CF_DEFAULT, limit, range)
}
pub fn put_cf_till_size<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
cf: &'static str,
limit: u64,
range: &mut dyn Iterator<Item = u64>,
) -> Vec<u8> {
assert!(limit > 0);
let mut len = 0;
let mut rng = rand::thread_rng();
let mut key = String::new();
let mut value = vec![0; 64];
while len < limit {
let batch_size = std::cmp::min(1024, limit - len);
let mut reqs = vec![];
for _ in 0..batch_size / 74 + 1 {
key.clear();
let key_id = range.next().unwrap();
write!(key, "{:09}", key_id).unwrap();
rng.fill_bytes(&mut value);
// plus 1 for the extra encoding prefix
len += key.len() as u64 + 1;
len += value.len() as u64;
reqs.push(new_put_cf_cmd(cf, key.as_bytes(), &value));
}
cluster.batch_put(key.as_bytes(), reqs).unwrap();
// Approximate size of memtable is inaccurate for small data,
// we flush it to SST so we can use the size properties instead.
cluster.must_flush_cf(cf, true);
}
key.into_bytes()
}
pub fn | (config: &mut Config) {
let manifest_dir = Path::new(env!("CARGO_MANIFEST_DIR"));
let cfg = &mut config.security.encryption;
cfg.data_encryption_method = EncryptionMethod::Aes128Ctr;
cfg.data_key_rotation_period = ReadableDuration(Duration::from_millis(100));
cfg.master_key = test_util::new_test_file_master_key(manifest_dir);
}
pub fn configure_for_snapshot(config: &mut Config) {
// Truncate the log quickly so that we can force sending snapshot.
config.raft_store.raft_log_gc_tick_interval = ReadableDuration::millis(20);
config.raft_store.raft_log_gc_count_limit = Some(2);
config.raft_store.merge_max_log_gap = 1;
config.raft_store.snap_mgr_gc_tick_interval = ReadableDuration::millis(50);
configure_for_encryption(config);
}
pub fn configure_for_lease_read_v2<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
base_tick_ms: Option<u64>,
election_ticks: Option<usize>,
) -> Duration {
if let Some(base_tick_ms) = base_tick_ms {
cluster.cfg.raft_store.raft_base_tick_interval = ReadableDuration::millis(base_tick_ms);
}
let base_tick_interval = cluster.cfg.raft_store.raft_base_tick_interval.0;
if let Some(election_ticks) = election_ticks {
cluster.cfg.raft_store.raft_election_timeout_ticks = election_ticks;
}
let election_ticks = cluster.cfg.raft_store.raft_election_timeout_ticks as u32;
let election_timeout = base_tick_interval * election_ticks;
// Adjust max leader lease.
cluster.cfg.raft_store.raft_store_max_leader_lease =
ReadableDuration(election_timeout - base_tick_interval);
// Use large peer check interval, abnormal and max leader missing duration to
// make a valid config, that is election timeout x 2 < peer stale state
// check < abnormal < max leader missing duration.
cluster.cfg.raft_store.peer_stale_state_check_interval = ReadableDuration(election_timeout * 3);
cluster.cfg.raft_store.abnormal_leader_missing_duration =
ReadableDuration(election_timeout * 4);
cluster.cfg.raft_store.max_leader_missing_duration = ReadableDuration(election_timeout * 5);
election_timeout
}
pub fn wait_for_synced(
cluster: &mut Cluster<ServerCluster<RocksEngine>, RocksEngine>,
node_id: u64,
region_id: u64,
) {
let mut storage = cluster
.sim
.read()
.unwrap()
.storages
.get(&node_id)
.unwrap()
.clone();
let leader = cluster.leader_of_region(region_id).unwrap();
let epoch = cluster.get_region_epoch(region_id);
let mut ctx = Context::default();
ctx.set_region_id(region_id);
ctx.set_peer(leader);
ctx.set_region_epoch(epoch);
let snap_ctx = SnapContext {
pb_ctx: &ctx,
..Default::default()
};
let snapshot = storage.snapshot(snap_ctx).unwrap();
let txn_ext = snapshot.txn_ext.clone().unwrap();
for retry in 0..10 {
if txn_ext.is_max_ts_synced() {
break;
}
thread::sleep(Duration::from_millis(1 << retry));
}
assert!(snapshot.ext().is_max_ts_synced());
}
// Issue a read request on the specified peer.
pub fn read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
read_quorum: bool,
timeout: Duration,
) -> Result<RaftCmdResponse> {
let mut request = new_request(
region.get_id(),
region.get_region_epoch().clone(),
vec![new_get_cmd(key)],
read_quorum,
);
request.mut_header().set_peer(peer);
cluster.read(None, request, timeout)
}
pub fn async_read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
read_quorum: bool,
replica_read: bool,
) -> BoxFuture<'static, RaftCmdResponse> {
let mut request = new_request(
region.get_id(),
region.get_region_epoch().clone(),
vec![new_get_cmd(key)],
read_quorum,
);
request.mut_header().set_peer(peer);
request.mut_header().set_replica_read(replica_read);
let node_id = request.get_header().get_peer().get_store_id();
let f = cluster.sim.wl().async_read(node_id, request);
Box::pin(async move { f.await.unwrap() })
}
pub fn batch_read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
requests: &[(metapb::Peer, metapb::Region)],
) -> Vec<ReadResponse<<EK as KvEngine>::Snapshot>> {
let mut results = vec![];
for (peer, region) in requests {
let node_id = peer.get_store_id();
let mut request = new_request(
region.get_id(),
region.get_region_epoch().clone(),
vec![new_snap_cmd()],
false,
);
request.mut_header().set_peer(peer.clone());
let snap = cluster.sim.wl().async_snapshot(node_id, request);
let resp = block_on_timeout(
Box::pin(async move {
match snap.await {
Ok(snap) => ReadResponse {
response: Default::default(),
snapshot: Some(snap),
txn_extra_op: Default::default(),
},
Err(resp) => ReadResponse {
response: resp,
snapshot: None,
txn_extra_op: Default::default(),
},
}
}),
Duration::from_secs(1),
)
.unwrap();
results.push(resp);
}
results
}
pub fn async_read_index_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
read_quorum: bool,
) -> BoxFuture<'static, RaftCmdResponse> {
let mut cmd = new_get_cmd(key);
cmd.mut_read_index().set_start_ts(u64::MAX);
cmd.mut_read_index()
.mut_key_ranges()
.push(point_key_range(Key::from_raw(key)));
let mut request = new_request(
region.get_id(),
region.get_region_epoch().clone(),
vec![cmd],
read_quorum,
);
// Use replica read to issue a read index.
request.mut_header().set_replica_read(true);
request.mut_header().set_peer(peer);
let node_id = request.get_header().get_peer().get_store_id();
let f = cluster.sim.wl().async_read(node_id, request);
Box::pin(async move { f.await.unwrap() })
}
pub fn async_command_on_node<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
node_id: u64,
request: RaftCmdRequest,
) -> BoxFuture<'static, RaftCmdResponse> {
cluster.sim.wl().async_command_on_node(node_id, request)
}
pub fn test_delete_range<T: Simulator<EK>, EK: KvEngine>(cluster: &mut Cluster<T, EK>, cf: CfName) {
let data_set: Vec<_> = (1..500)
.map(|i| {
(
format!("key{:08}", i).into_bytes(),
format!("value{}", i).into_bytes(),
)
})
.collect();
for kvs in data_set.chunks(50) {
let requests = kvs.iter().map(|(k, v)| new_put_cf_cmd(cf, k, v)).collect();
// key9 is always the last region.
cluster.batch_put(b"key9", requests).unwrap();
}
// delete_range request with notify_only set should not actually delete data.
cluster.must_notify_delete_range_cf(cf, b"", b"");
let mut rng = rand::thread_rng();
for _ in 0..50 {
let (k, v) = data_set.choose(&mut rng).unwrap();
assert_eq!(cluster.get_cf(cf, k).unwrap(), *v);
}
// Empty keys means the whole range.
cluster.must_delete_range_cf(cf, b"", b"");
for _ in 0..50 {
let k = &data_set.choose(&mut rng).unwrap().0;
assert!(cluster.get_cf(cf, k).is_none());
}
}
pub fn must_get_value(resp: &RaftCmdResponse) -> Vec<u8> {
if resp.get_header().has_error() {
panic!("failed to read {:?}", resp);
}
assert_eq!(resp.get_responses().len(), 1);
assert_eq!(resp.get_responses()[0].get_cmd_type(), CmdType::Get);
assert!(resp.get_responses()[0].has_get());
resp.get_responses()[0].get_get().get_value().to_vec()
}
pub fn must_read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
value: &[u8],
) {
let timeout = Duration::from_secs(5);
match read_on_peer(cluster, peer, region, key, false, timeout) {
Ok(ref resp) if value == must_get_value(resp).as_slice() => (),
other => panic!(
"read key {}, expect value {:?}, got {:?}",
log_wrappers::hex_encode_upper(key),
value,
other
),
}
}
pub fn must_error_read_on_peer<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
peer: metapb::Peer,
region: metapb::Region,
key: &[u8],
timeout: Duration,
) {
if let Ok(mut resp) = read_on_peer(cluster, peer, region, key, false, timeout) {
if!resp.get_header().has_error() {
let value = resp.mut_responses()[0].mut_get().take_value();
panic!(
"key {}, expect error but got {}",
log_wrappers::hex_encode_upper(key),
escape(&value)
);
}
}
}
pub fn put_with_timeout<T: Simulator<EK>, EK: KvEngine>(
cluster: &mut Cluster<T, EK>,
node_id: u64,
key: &[u8],
value: &[u8],
timeout: Duration,
) -> Result<RaftCmdResponse> {
let mut region = cluster.get_region(key);
let region_id = region.get_id();
let mut req = new_request(
region_id,
region.take_region_epoch(),
vec![new_put_cf_cmd(CF_DEFAULT, key, value)],
false,
);
req.mut_header().set_peer(
region
.get_peers()
.iter()
.find(|p| p.store_id == node_id)
.unwrap()
.clone(),
);
cluster.call_command_on_node(node_id, req, timeout)
}
| configure_for_encryption | identifier_name |
multiexp.rs | use super::error::{GPUError, GPUResult};
use super::locks;
use super::sources;
use super::utils;
use crate::bls::Engine;
use crate::multicore::Worker;
use crate::multiexp::{multiexp as cpu_multiexp, FullDensity};
use ff::{PrimeField, ScalarEngine};
use groupy::{CurveAffine, CurveProjective};
use log::{error, info};
use rust_gpu_tools::*;
use std::any::TypeId;
use std::sync::Arc;
const MAX_WINDOW_SIZE: usize = 10;
const LOCAL_WORK_SIZE: usize = 256;
const MEMORY_PADDING: f64 = 0.2f64; // Let 20% of GPU memory be free
pub fn get_cpu_utilization() -> f64 {
use std::env;
env::var("BELLMAN_CPU_UTILIZATION")
.and_then(|v| match v.parse() {
Ok(val) => Ok(val),
Err(_) => {
error!("Invalid BELLMAN_CPU_UTILIZATION! Defaulting to 0...");
Ok(0f64)
}
})
.unwrap_or(0f64)
.max(0f64)
.min(1f64)
}
// Multiexp kernel for a single GPU
pub struct SingleMultiexpKernel<E>
where
E: Engine,
{
program: opencl::Program,
core_count: usize,
n: usize,
priority: bool,
_phantom: std::marker::PhantomData<E::Fr>,
}
fn calc_num_groups(core_count: usize, num_windows: usize) -> usize {
// Observations show that we get the best performance when num_groups * num_windows ~= 2 * CUDA_CORES
2 * core_count / num_windows
}
fn calc_window_size(n: usize, exp_bits: usize, core_count: usize) -> usize {
// window_size = ln(n / num_groups)
// num_windows = exp_bits / window_size
// num_groups = 2 * core_count / num_windows = 2 * core_count * window_size / exp_bits
// window_size = ln(n / num_groups) = ln(n * exp_bits / (2 * core_count * window_size))
// window_size = ln(exp_bits * n / (2 * core_count)) - ln(window_size)
//
// Thus we need to solve the following equation:
// window_size + ln(window_size) = ln(exp_bits * n / (2 * core_count))
let lower_bound = (((exp_bits * n) as f64) / ((2 * core_count) as f64)).ln();
for w in 0..MAX_WINDOW_SIZE {
if (w as f64) + (w as f64).ln() > lower_bound {
return w;
}
}
MAX_WINDOW_SIZE
}
fn calc_best_chunk_size(max_window_size: usize, core_count: usize, exp_bits: usize) -> usize {
// Best chunk-size (N) can also be calculated using the same logic as calc_window_size:
// n = e^window_size * window_size * 2 * core_count / exp_bits
(((max_window_size as f64).exp() as f64)
* (max_window_size as f64)
* 2f64
* (core_count as f64)
/ (exp_bits as f64))
.ceil() as usize
}
fn calc_chunk_size<E>(mem: u64, core_count: usize) -> usize
where
E: Engine,
{
let aff_size = std::mem::size_of::<E::G1Affine>() + std::mem::size_of::<E::G2Affine>();
let exp_size = exp_size::<E>();
let proj_size = std::mem::size_of::<E::G1>() + std::mem::size_of::<E::G2>();
((((mem as f64) * (1f64 - MEMORY_PADDING)) as usize)
- (2 * core_count * ((1 << MAX_WINDOW_SIZE) + 1) * proj_size))
/ (aff_size + exp_size)
}
fn exp_size<E: Engine>() -> usize {
std::mem::size_of::<<E::Fr as ff::PrimeField>::Repr>()
}
impl<E> SingleMultiexpKernel<E>
where
E: Engine,
{
pub fn create(d: opencl::Device, priority: bool) -> GPUResult<SingleMultiexpKernel<E>> {
let src = sources::kernel::<E>(d.brand() == opencl::Brand::Nvidia);
let exp_bits = exp_size::<E>() * 8;
let core_count = utils::get_core_count(&d);
let mem = d.memory();
let max_n = calc_chunk_size::<E>(mem, core_count);
let best_n = calc_best_chunk_size(MAX_WINDOW_SIZE, core_count, exp_bits);
let n = std::cmp::min(max_n, best_n);
Ok(SingleMultiexpKernel {
program: opencl::Program::from_opencl(d, &src)?,
core_count,
n,
priority,
_phantom: std::marker::PhantomData,
})
}
pub fn multiexp<G>(
&mut self,
bases: &[G],
exps: &[<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr],
n: usize,
) -> GPUResult<<G as CurveAffine>::Projective>
where
G: CurveAffine,
{
if locks::PriorityLock::should_break(self.priority) {
return Err(GPUError::GPUTaken);
}
let exp_bits = exp_size::<E>() * 8;
let window_size = calc_window_size(n as usize, exp_bits, self.core_count);
let num_windows = ((exp_bits as f64) / (window_size as f64)).ceil() as usize;
let num_groups = calc_num_groups(self.core_count, num_windows);
let bucket_len = 1 << window_size;
// Each group will have `num_windows` threads and as there are `num_groups` groups, there will
// be `num_groups` * `num_windows` threads in total.
// Each thread will use `num_groups` * `num_windows` * `bucket_len` buckets.
let mut base_buffer = self.program.create_buffer::<G>(n)?;
base_buffer.write_from(0, bases)?;
let mut exp_buffer = self
.program
.create_buffer::<<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr>(n)?;
exp_buffer.write_from(0, exps)?;
let bucket_buffer = self
.program
.create_buffer::<<G as CurveAffine>::Projective>(2 * self.core_count * bucket_len)?;
let result_buffer = self
.program
.create_buffer::<<G as CurveAffine>::Projective>(2 * self.core_count)?;
// Make global work size divisible by `LOCAL_WORK_SIZE`
let mut global_work_size = num_windows * num_groups;
global_work_size +=
(LOCAL_WORK_SIZE - (global_work_size % LOCAL_WORK_SIZE)) % LOCAL_WORK_SIZE;
let kernel = self.program.create_kernel(
if TypeId::of::<G>() == TypeId::of::<E::G1Affine>() {
"G1_bellman_multiexp"
} else if TypeId::of::<G>() == TypeId::of::<E::G2Affine>() {
"G2_bellman_multiexp"
} else {
return Err(GPUError::Simple("Only E::G1 and E::G2 are supported!"));
},
global_work_size,
None,
);
call_kernel!(
kernel,
&base_buffer,
&bucket_buffer,
&result_buffer,
&exp_buffer,
n as u32,
num_groups as u32,
num_windows as u32,
window_size as u32
)?;
let mut results = vec![<G as CurveAffine>::Projective::zero(); num_groups * num_windows];
result_buffer.read_into(0, &mut results)?;
// Using the algorithm below, we can calculate the final result by accumulating the results
// of those `NUM_GROUPS` * `NUM_WINDOWS` threads.
let mut acc = <G as CurveAffine>::Projective::zero();
let mut bits = 0;
for i in 0..num_windows {
let w = std::cmp::min(window_size, exp_bits - bits);
for _ in 0..w {
acc.double();
}
for g in 0..num_groups {
acc.add_assign(&results[g * num_windows + i]);
}
bits += w; // Process the next window
}
Ok(acc)
}
}
// A struct that containts several multiexp kernels for different devices
pub struct MultiexpKernel<E>
where
E: Engine,
{
kernels: Vec<SingleMultiexpKernel<E>>,
_lock: locks::GPULock, // RFC 1857: struct fields are dropped in the same order as they are declared.
}
impl<E> MultiexpKernel<E>
where
E: Engine,
{
pub fn create(priority: bool) -> GPUResult<MultiexpKernel<E>> {
let lock = locks::GPULock::lock();
let devices = opencl::Device::all()?;
let kernels: Vec<_> = devices
.into_iter()
.map(|d| (d.clone(), SingleMultiexpKernel::<E>::create(d, priority)))
.filter_map(|(device, res)| {
if let Err(ref e) = res {
error!(
"Cannot initialize kernel for device '{}'! Error: {}",
device.name(),
e
);
}
res.ok()
})
.collect();
if kernels.is_empty() {
return Err(GPUError::Simple("No working GPUs found!"));
}
info!(
"Multiexp: {} working device(s) selected. (CPU utilization: {})",
kernels.len(),
get_cpu_utilization()
);
for (i, k) in kernels.iter().enumerate() {
info!(
"Multiexp: Device {}: {} (Chunk-size: {})",
i,
k.program.device().name(),
k.n
);
}
Ok(MultiexpKernel::<E> {
kernels,
_lock: lock,
})
}
pub fn | <G>(
&mut self,
pool: &Worker,
bases: Arc<Vec<G>>,
exps: Arc<Vec<<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr>>,
skip: usize,
n: usize,
) -> GPUResult<<G as CurveAffine>::Projective>
where
G: CurveAffine,
<G as groupy::CurveAffine>::Engine: crate::bls::Engine,
{
let num_devices = self.kernels.len();
// Bases are skipped by `self.1` elements, when converted from (Arc<Vec<G>>, usize) to Source
// https://github.com/zkcrypto/bellman/blob/10c5010fd9c2ca69442dc9775ea271e286e776d8/src/multiexp.rs#L38
let bases = &bases[skip..(skip + n)];
let exps = &exps[..n];
let cpu_n = ((n as f64) * get_cpu_utilization()) as usize;
let n = n - cpu_n;
let (cpu_bases, bases) = bases.split_at(cpu_n);
let (cpu_exps, exps) = exps.split_at(cpu_n);
let chunk_size = ((n as f64) / (num_devices as f64)).ceil() as usize;
crate::multicore::THREAD_POOL.install(|| {
use rayon::prelude::*;
let mut acc = <G as CurveAffine>::Projective::zero();
let results = if n > 0 {
bases
.par_chunks(chunk_size)
.zip(exps.par_chunks(chunk_size))
.zip(self.kernels.par_iter_mut())
.map(|((bases, exps), kern)| -> Result<<G as CurveAffine>::Projective, GPUError> {
let mut acc = <G as CurveAffine>::Projective::zero();
for (bases, exps) in bases.chunks(kern.n).zip(exps.chunks(kern.n)) {
let result = kern.multiexp(bases, exps, bases.len())?;
acc.add_assign(&result);
}
Ok(acc)
})
.collect::<Vec<_>>()
} else {
Vec::new()
};
let cpu_acc = cpu_multiexp(
&pool,
(Arc::new(cpu_bases.to_vec()), 0),
FullDensity,
Arc::new(cpu_exps.to_vec()),
&mut None,
);
for r in results {
acc.add_assign(&r?);
}
acc.add_assign(&cpu_acc.wait().unwrap());
Ok(acc)
})
}
}
| multiexp | identifier_name |
multiexp.rs | use super::error::{GPUError, GPUResult};
use super::locks;
use super::sources;
use super::utils;
use crate::bls::Engine;
use crate::multicore::Worker;
use crate::multiexp::{multiexp as cpu_multiexp, FullDensity};
use ff::{PrimeField, ScalarEngine};
use groupy::{CurveAffine, CurveProjective};
use log::{error, info};
use rust_gpu_tools::*;
use std::any::TypeId;
use std::sync::Arc;
const MAX_WINDOW_SIZE: usize = 10;
const LOCAL_WORK_SIZE: usize = 256;
const MEMORY_PADDING: f64 = 0.2f64; // Let 20% of GPU memory be free
pub fn get_cpu_utilization() -> f64 {
use std::env;
env::var("BELLMAN_CPU_UTILIZATION")
.and_then(|v| match v.parse() {
Ok(val) => Ok(val),
Err(_) => {
error!("Invalid BELLMAN_CPU_UTILIZATION! Defaulting to 0...");
Ok(0f64)
}
})
.unwrap_or(0f64)
.max(0f64)
.min(1f64)
}
// Multiexp kernel for a single GPU
pub struct SingleMultiexpKernel<E>
where
E: Engine,
{
program: opencl::Program,
core_count: usize,
n: usize,
priority: bool,
_phantom: std::marker::PhantomData<E::Fr>,
}
fn calc_num_groups(core_count: usize, num_windows: usize) -> usize {
// Observations show that we get the best performance when num_groups * num_windows ~= 2 * CUDA_CORES
2 * core_count / num_windows
}
fn calc_window_size(n: usize, exp_bits: usize, core_count: usize) -> usize {
// window_size = ln(n / num_groups)
// num_windows = exp_bits / window_size
// num_groups = 2 * core_count / num_windows = 2 * core_count * window_size / exp_bits
// window_size = ln(n / num_groups) = ln(n * exp_bits / (2 * core_count * window_size))
// window_size = ln(exp_bits * n / (2 * core_count)) - ln(window_size)
//
// Thus we need to solve the following equation:
// window_size + ln(window_size) = ln(exp_bits * n / (2 * core_count))
let lower_bound = (((exp_bits * n) as f64) / ((2 * core_count) as f64)).ln();
for w in 0..MAX_WINDOW_SIZE {
if (w as f64) + (w as f64).ln() > lower_bound |
}
MAX_WINDOW_SIZE
}
fn calc_best_chunk_size(max_window_size: usize, core_count: usize, exp_bits: usize) -> usize {
// Best chunk-size (N) can also be calculated using the same logic as calc_window_size:
// n = e^window_size * window_size * 2 * core_count / exp_bits
(((max_window_size as f64).exp() as f64)
* (max_window_size as f64)
* 2f64
* (core_count as f64)
/ (exp_bits as f64))
.ceil() as usize
}
fn calc_chunk_size<E>(mem: u64, core_count: usize) -> usize
where
E: Engine,
{
let aff_size = std::mem::size_of::<E::G1Affine>() + std::mem::size_of::<E::G2Affine>();
let exp_size = exp_size::<E>();
let proj_size = std::mem::size_of::<E::G1>() + std::mem::size_of::<E::G2>();
((((mem as f64) * (1f64 - MEMORY_PADDING)) as usize)
- (2 * core_count * ((1 << MAX_WINDOW_SIZE) + 1) * proj_size))
/ (aff_size + exp_size)
}
fn exp_size<E: Engine>() -> usize {
std::mem::size_of::<<E::Fr as ff::PrimeField>::Repr>()
}
impl<E> SingleMultiexpKernel<E>
where
E: Engine,
{
pub fn create(d: opencl::Device, priority: bool) -> GPUResult<SingleMultiexpKernel<E>> {
let src = sources::kernel::<E>(d.brand() == opencl::Brand::Nvidia);
let exp_bits = exp_size::<E>() * 8;
let core_count = utils::get_core_count(&d);
let mem = d.memory();
let max_n = calc_chunk_size::<E>(mem, core_count);
let best_n = calc_best_chunk_size(MAX_WINDOW_SIZE, core_count, exp_bits);
let n = std::cmp::min(max_n, best_n);
Ok(SingleMultiexpKernel {
program: opencl::Program::from_opencl(d, &src)?,
core_count,
n,
priority,
_phantom: std::marker::PhantomData,
})
}
pub fn multiexp<G>(
&mut self,
bases: &[G],
exps: &[<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr],
n: usize,
) -> GPUResult<<G as CurveAffine>::Projective>
where
G: CurveAffine,
{
if locks::PriorityLock::should_break(self.priority) {
return Err(GPUError::GPUTaken);
}
let exp_bits = exp_size::<E>() * 8;
let window_size = calc_window_size(n as usize, exp_bits, self.core_count);
let num_windows = ((exp_bits as f64) / (window_size as f64)).ceil() as usize;
let num_groups = calc_num_groups(self.core_count, num_windows);
let bucket_len = 1 << window_size;
// Each group will have `num_windows` threads and as there are `num_groups` groups, there will
// be `num_groups` * `num_windows` threads in total.
// Each thread will use `num_groups` * `num_windows` * `bucket_len` buckets.
let mut base_buffer = self.program.create_buffer::<G>(n)?;
base_buffer.write_from(0, bases)?;
let mut exp_buffer = self
.program
.create_buffer::<<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr>(n)?;
exp_buffer.write_from(0, exps)?;
let bucket_buffer = self
.program
.create_buffer::<<G as CurveAffine>::Projective>(2 * self.core_count * bucket_len)?;
let result_buffer = self
.program
.create_buffer::<<G as CurveAffine>::Projective>(2 * self.core_count)?;
// Make global work size divisible by `LOCAL_WORK_SIZE`
let mut global_work_size = num_windows * num_groups;
global_work_size +=
(LOCAL_WORK_SIZE - (global_work_size % LOCAL_WORK_SIZE)) % LOCAL_WORK_SIZE;
let kernel = self.program.create_kernel(
if TypeId::of::<G>() == TypeId::of::<E::G1Affine>() {
"G1_bellman_multiexp"
} else if TypeId::of::<G>() == TypeId::of::<E::G2Affine>() {
"G2_bellman_multiexp"
} else {
return Err(GPUError::Simple("Only E::G1 and E::G2 are supported!"));
},
global_work_size,
None,
);
call_kernel!(
kernel,
&base_buffer,
&bucket_buffer,
&result_buffer,
&exp_buffer,
n as u32,
num_groups as u32,
num_windows as u32,
window_size as u32
)?;
let mut results = vec![<G as CurveAffine>::Projective::zero(); num_groups * num_windows];
result_buffer.read_into(0, &mut results)?;
// Using the algorithm below, we can calculate the final result by accumulating the results
// of those `NUM_GROUPS` * `NUM_WINDOWS` threads.
let mut acc = <G as CurveAffine>::Projective::zero();
let mut bits = 0;
for i in 0..num_windows {
let w = std::cmp::min(window_size, exp_bits - bits);
for _ in 0..w {
acc.double();
}
for g in 0..num_groups {
acc.add_assign(&results[g * num_windows + i]);
}
bits += w; // Process the next window
}
Ok(acc)
}
}
// A struct that containts several multiexp kernels for different devices
pub struct MultiexpKernel<E>
where
E: Engine,
{
kernels: Vec<SingleMultiexpKernel<E>>,
_lock: locks::GPULock, // RFC 1857: struct fields are dropped in the same order as they are declared.
}
impl<E> MultiexpKernel<E>
where
E: Engine,
{
pub fn create(priority: bool) -> GPUResult<MultiexpKernel<E>> {
let lock = locks::GPULock::lock();
let devices = opencl::Device::all()?;
let kernels: Vec<_> = devices
.into_iter()
.map(|d| (d.clone(), SingleMultiexpKernel::<E>::create(d, priority)))
.filter_map(|(device, res)| {
if let Err(ref e) = res {
error!(
"Cannot initialize kernel for device '{}'! Error: {}",
device.name(),
e
);
}
res.ok()
})
.collect();
if kernels.is_empty() {
return Err(GPUError::Simple("No working GPUs found!"));
}
info!(
"Multiexp: {} working device(s) selected. (CPU utilization: {})",
kernels.len(),
get_cpu_utilization()
);
for (i, k) in kernels.iter().enumerate() {
info!(
"Multiexp: Device {}: {} (Chunk-size: {})",
i,
k.program.device().name(),
k.n
);
}
Ok(MultiexpKernel::<E> {
kernels,
_lock: lock,
})
}
pub fn multiexp<G>(
&mut self,
pool: &Worker,
bases: Arc<Vec<G>>,
exps: Arc<Vec<<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr>>,
skip: usize,
n: usize,
) -> GPUResult<<G as CurveAffine>::Projective>
where
G: CurveAffine,
<G as groupy::CurveAffine>::Engine: crate::bls::Engine,
{
let num_devices = self.kernels.len();
// Bases are skipped by `self.1` elements, when converted from (Arc<Vec<G>>, usize) to Source
// https://github.com/zkcrypto/bellman/blob/10c5010fd9c2ca69442dc9775ea271e286e776d8/src/multiexp.rs#L38
let bases = &bases[skip..(skip + n)];
let exps = &exps[..n];
let cpu_n = ((n as f64) * get_cpu_utilization()) as usize;
let n = n - cpu_n;
let (cpu_bases, bases) = bases.split_at(cpu_n);
let (cpu_exps, exps) = exps.split_at(cpu_n);
let chunk_size = ((n as f64) / (num_devices as f64)).ceil() as usize;
crate::multicore::THREAD_POOL.install(|| {
use rayon::prelude::*;
let mut acc = <G as CurveAffine>::Projective::zero();
let results = if n > 0 {
bases
.par_chunks(chunk_size)
.zip(exps.par_chunks(chunk_size))
.zip(self.kernels.par_iter_mut())
.map(|((bases, exps), kern)| -> Result<<G as CurveAffine>::Projective, GPUError> {
let mut acc = <G as CurveAffine>::Projective::zero();
for (bases, exps) in bases.chunks(kern.n).zip(exps.chunks(kern.n)) {
let result = kern.multiexp(bases, exps, bases.len())?;
acc.add_assign(&result);
}
Ok(acc)
})
.collect::<Vec<_>>()
} else {
Vec::new()
};
let cpu_acc = cpu_multiexp(
&pool,
(Arc::new(cpu_bases.to_vec()), 0),
FullDensity,
Arc::new(cpu_exps.to_vec()),
&mut None,
);
for r in results {
acc.add_assign(&r?);
}
acc.add_assign(&cpu_acc.wait().unwrap());
Ok(acc)
})
}
}
| {
return w;
} | conditional_block |
multiexp.rs | use super::error::{GPUError, GPUResult};
use super::locks;
use super::sources;
use super::utils;
use crate::bls::Engine;
use crate::multicore::Worker;
use crate::multiexp::{multiexp as cpu_multiexp, FullDensity};
use ff::{PrimeField, ScalarEngine};
use groupy::{CurveAffine, CurveProjective};
use log::{error, info};
use rust_gpu_tools::*;
use std::any::TypeId;
use std::sync::Arc;
const MAX_WINDOW_SIZE: usize = 10;
const LOCAL_WORK_SIZE: usize = 256;
const MEMORY_PADDING: f64 = 0.2f64; // Let 20% of GPU memory be free
pub fn get_cpu_utilization() -> f64 {
use std::env;
env::var("BELLMAN_CPU_UTILIZATION")
.and_then(|v| match v.parse() {
Ok(val) => Ok(val),
Err(_) => {
error!("Invalid BELLMAN_CPU_UTILIZATION! Defaulting to 0...");
Ok(0f64)
}
})
.unwrap_or(0f64)
.max(0f64)
.min(1f64)
}
// Multiexp kernel for a single GPU
pub struct SingleMultiexpKernel<E>
where
E: Engine,
{
program: opencl::Program,
core_count: usize,
n: usize,
priority: bool,
_phantom: std::marker::PhantomData<E::Fr>,
}
fn calc_num_groups(core_count: usize, num_windows: usize) -> usize {
// Observations show that we get the best performance when num_groups * num_windows ~= 2 * CUDA_CORES
2 * core_count / num_windows
}
fn calc_window_size(n: usize, exp_bits: usize, core_count: usize) -> usize {
// window_size = ln(n / num_groups)
// num_windows = exp_bits / window_size
// num_groups = 2 * core_count / num_windows = 2 * core_count * window_size / exp_bits
// window_size = ln(n / num_groups) = ln(n * exp_bits / (2 * core_count * window_size))
// window_size = ln(exp_bits * n / (2 * core_count)) - ln(window_size)
//
// Thus we need to solve the following equation:
// window_size + ln(window_size) = ln(exp_bits * n / (2 * core_count))
let lower_bound = (((exp_bits * n) as f64) / ((2 * core_count) as f64)).ln();
for w in 0..MAX_WINDOW_SIZE {
if (w as f64) + (w as f64).ln() > lower_bound {
return w;
}
}
MAX_WINDOW_SIZE
}
fn calc_best_chunk_size(max_window_size: usize, core_count: usize, exp_bits: usize) -> usize {
// Best chunk-size (N) can also be calculated using the same logic as calc_window_size:
// n = e^window_size * window_size * 2 * core_count / exp_bits
(((max_window_size as f64).exp() as f64)
* (max_window_size as f64)
* 2f64
* (core_count as f64)
/ (exp_bits as f64))
.ceil() as usize
}
fn calc_chunk_size<E>(mem: u64, core_count: usize) -> usize
where
E: Engine,
{
let aff_size = std::mem::size_of::<E::G1Affine>() + std::mem::size_of::<E::G2Affine>();
let exp_size = exp_size::<E>();
let proj_size = std::mem::size_of::<E::G1>() + std::mem::size_of::<E::G2>();
((((mem as f64) * (1f64 - MEMORY_PADDING)) as usize)
- (2 * core_count * ((1 << MAX_WINDOW_SIZE) + 1) * proj_size))
/ (aff_size + exp_size)
}
fn exp_size<E: Engine>() -> usize {
std::mem::size_of::<<E::Fr as ff::PrimeField>::Repr>()
}
impl<E> SingleMultiexpKernel<E>
where
E: Engine,
{
pub fn create(d: opencl::Device, priority: bool) -> GPUResult<SingleMultiexpKernel<E>> {
let src = sources::kernel::<E>(d.brand() == opencl::Brand::Nvidia);
let exp_bits = exp_size::<E>() * 8;
let core_count = utils::get_core_count(&d);
let mem = d.memory();
let max_n = calc_chunk_size::<E>(mem, core_count);
let best_n = calc_best_chunk_size(MAX_WINDOW_SIZE, core_count, exp_bits);
let n = std::cmp::min(max_n, best_n);
Ok(SingleMultiexpKernel {
program: opencl::Program::from_opencl(d, &src)?,
core_count,
n,
priority,
_phantom: std::marker::PhantomData,
})
}
pub fn multiexp<G>(
&mut self,
bases: &[G],
exps: &[<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr],
n: usize,
) -> GPUResult<<G as CurveAffine>::Projective>
where
G: CurveAffine,
{
if locks::PriorityLock::should_break(self.priority) {
return Err(GPUError::GPUTaken);
}
let exp_bits = exp_size::<E>() * 8;
let window_size = calc_window_size(n as usize, exp_bits, self.core_count);
let num_windows = ((exp_bits as f64) / (window_size as f64)).ceil() as usize;
let num_groups = calc_num_groups(self.core_count, num_windows);
let bucket_len = 1 << window_size;
// Each group will have `num_windows` threads and as there are `num_groups` groups, there will
// be `num_groups` * `num_windows` threads in total.
// Each thread will use `num_groups` * `num_windows` * `bucket_len` buckets.
let mut base_buffer = self.program.create_buffer::<G>(n)?;
base_buffer.write_from(0, bases)?;
let mut exp_buffer = self
.program
.create_buffer::<<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr>(n)?;
exp_buffer.write_from(0, exps)?;
let bucket_buffer = self
.program
.create_buffer::<<G as CurveAffine>::Projective>(2 * self.core_count * bucket_len)?;
let result_buffer = self
.program
.create_buffer::<<G as CurveAffine>::Projective>(2 * self.core_count)?;
// Make global work size divisible by `LOCAL_WORK_SIZE`
let mut global_work_size = num_windows * num_groups;
global_work_size +=
(LOCAL_WORK_SIZE - (global_work_size % LOCAL_WORK_SIZE)) % LOCAL_WORK_SIZE;
let kernel = self.program.create_kernel(
if TypeId::of::<G>() == TypeId::of::<E::G1Affine>() {
"G1_bellman_multiexp"
} else if TypeId::of::<G>() == TypeId::of::<E::G2Affine>() {
"G2_bellman_multiexp"
} else {
return Err(GPUError::Simple("Only E::G1 and E::G2 are supported!"));
},
global_work_size,
None,
);
call_kernel!(
kernel,
&base_buffer,
&bucket_buffer,
&result_buffer,
&exp_buffer,
n as u32,
num_groups as u32,
num_windows as u32,
window_size as u32
)?;
let mut results = vec![<G as CurveAffine>::Projective::zero(); num_groups * num_windows];
result_buffer.read_into(0, &mut results)?;
// Using the algorithm below, we can calculate the final result by accumulating the results
// of those `NUM_GROUPS` * `NUM_WINDOWS` threads.
let mut acc = <G as CurveAffine>::Projective::zero();
let mut bits = 0;
for i in 0..num_windows {
let w = std::cmp::min(window_size, exp_bits - bits);
for _ in 0..w {
acc.double();
}
for g in 0..num_groups {
acc.add_assign(&results[g * num_windows + i]);
}
bits += w; // Process the next window
}
Ok(acc)
}
}
// A struct that containts several multiexp kernels for different devices
pub struct MultiexpKernel<E>
where
E: Engine,
{
kernels: Vec<SingleMultiexpKernel<E>>,
_lock: locks::GPULock, // RFC 1857: struct fields are dropped in the same order as they are declared.
}
impl<E> MultiexpKernel<E>
where
E: Engine,
{
pub fn create(priority: bool) -> GPUResult<MultiexpKernel<E>> {
let lock = locks::GPULock::lock();
let devices = opencl::Device::all()?;
let kernels: Vec<_> = devices
.into_iter()
.map(|d| (d.clone(), SingleMultiexpKernel::<E>::create(d, priority)))
.filter_map(|(device, res)| {
if let Err(ref e) = res {
error!(
"Cannot initialize kernel for device '{}'! Error: {}",
device.name(),
e
); | })
.collect();
if kernels.is_empty() {
return Err(GPUError::Simple("No working GPUs found!"));
}
info!(
"Multiexp: {} working device(s) selected. (CPU utilization: {})",
kernels.len(),
get_cpu_utilization()
);
for (i, k) in kernels.iter().enumerate() {
info!(
"Multiexp: Device {}: {} (Chunk-size: {})",
i,
k.program.device().name(),
k.n
);
}
Ok(MultiexpKernel::<E> {
kernels,
_lock: lock,
})
}
pub fn multiexp<G>(
&mut self,
pool: &Worker,
bases: Arc<Vec<G>>,
exps: Arc<Vec<<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr>>,
skip: usize,
n: usize,
) -> GPUResult<<G as CurveAffine>::Projective>
where
G: CurveAffine,
<G as groupy::CurveAffine>::Engine: crate::bls::Engine,
{
let num_devices = self.kernels.len();
// Bases are skipped by `self.1` elements, when converted from (Arc<Vec<G>>, usize) to Source
// https://github.com/zkcrypto/bellman/blob/10c5010fd9c2ca69442dc9775ea271e286e776d8/src/multiexp.rs#L38
let bases = &bases[skip..(skip + n)];
let exps = &exps[..n];
let cpu_n = ((n as f64) * get_cpu_utilization()) as usize;
let n = n - cpu_n;
let (cpu_bases, bases) = bases.split_at(cpu_n);
let (cpu_exps, exps) = exps.split_at(cpu_n);
let chunk_size = ((n as f64) / (num_devices as f64)).ceil() as usize;
crate::multicore::THREAD_POOL.install(|| {
use rayon::prelude::*;
let mut acc = <G as CurveAffine>::Projective::zero();
let results = if n > 0 {
bases
.par_chunks(chunk_size)
.zip(exps.par_chunks(chunk_size))
.zip(self.kernels.par_iter_mut())
.map(|((bases, exps), kern)| -> Result<<G as CurveAffine>::Projective, GPUError> {
let mut acc = <G as CurveAffine>::Projective::zero();
for (bases, exps) in bases.chunks(kern.n).zip(exps.chunks(kern.n)) {
let result = kern.multiexp(bases, exps, bases.len())?;
acc.add_assign(&result);
}
Ok(acc)
})
.collect::<Vec<_>>()
} else {
Vec::new()
};
let cpu_acc = cpu_multiexp(
&pool,
(Arc::new(cpu_bases.to_vec()), 0),
FullDensity,
Arc::new(cpu_exps.to_vec()),
&mut None,
);
for r in results {
acc.add_assign(&r?);
}
acc.add_assign(&cpu_acc.wait().unwrap());
Ok(acc)
})
}
} | }
res.ok() | random_line_split |
multiexp.rs | use super::error::{GPUError, GPUResult};
use super::locks;
use super::sources;
use super::utils;
use crate::bls::Engine;
use crate::multicore::Worker;
use crate::multiexp::{multiexp as cpu_multiexp, FullDensity};
use ff::{PrimeField, ScalarEngine};
use groupy::{CurveAffine, CurveProjective};
use log::{error, info};
use rust_gpu_tools::*;
use std::any::TypeId;
use std::sync::Arc;
const MAX_WINDOW_SIZE: usize = 10;
const LOCAL_WORK_SIZE: usize = 256;
const MEMORY_PADDING: f64 = 0.2f64; // Let 20% of GPU memory be free
pub fn get_cpu_utilization() -> f64 {
use std::env;
env::var("BELLMAN_CPU_UTILIZATION")
.and_then(|v| match v.parse() {
Ok(val) => Ok(val),
Err(_) => {
error!("Invalid BELLMAN_CPU_UTILIZATION! Defaulting to 0...");
Ok(0f64)
}
})
.unwrap_or(0f64)
.max(0f64)
.min(1f64)
}
// Multiexp kernel for a single GPU
pub struct SingleMultiexpKernel<E>
where
E: Engine,
{
program: opencl::Program,
core_count: usize,
n: usize,
priority: bool,
_phantom: std::marker::PhantomData<E::Fr>,
}
fn calc_num_groups(core_count: usize, num_windows: usize) -> usize {
// Observations show that we get the best performance when num_groups * num_windows ~= 2 * CUDA_CORES
2 * core_count / num_windows
}
fn calc_window_size(n: usize, exp_bits: usize, core_count: usize) -> usize {
// window_size = ln(n / num_groups)
// num_windows = exp_bits / window_size
// num_groups = 2 * core_count / num_windows = 2 * core_count * window_size / exp_bits
// window_size = ln(n / num_groups) = ln(n * exp_bits / (2 * core_count * window_size))
// window_size = ln(exp_bits * n / (2 * core_count)) - ln(window_size)
//
// Thus we need to solve the following equation:
// window_size + ln(window_size) = ln(exp_bits * n / (2 * core_count))
let lower_bound = (((exp_bits * n) as f64) / ((2 * core_count) as f64)).ln();
for w in 0..MAX_WINDOW_SIZE {
if (w as f64) + (w as f64).ln() > lower_bound {
return w;
}
}
MAX_WINDOW_SIZE
}
fn calc_best_chunk_size(max_window_size: usize, core_count: usize, exp_bits: usize) -> usize {
// Best chunk-size (N) can also be calculated using the same logic as calc_window_size:
// n = e^window_size * window_size * 2 * core_count / exp_bits
(((max_window_size as f64).exp() as f64)
* (max_window_size as f64)
* 2f64
* (core_count as f64)
/ (exp_bits as f64))
.ceil() as usize
}
fn calc_chunk_size<E>(mem: u64, core_count: usize) -> usize
where
E: Engine,
|
fn exp_size<E: Engine>() -> usize {
std::mem::size_of::<<E::Fr as ff::PrimeField>::Repr>()
}
impl<E> SingleMultiexpKernel<E>
where
E: Engine,
{
pub fn create(d: opencl::Device, priority: bool) -> GPUResult<SingleMultiexpKernel<E>> {
let src = sources::kernel::<E>(d.brand() == opencl::Brand::Nvidia);
let exp_bits = exp_size::<E>() * 8;
let core_count = utils::get_core_count(&d);
let mem = d.memory();
let max_n = calc_chunk_size::<E>(mem, core_count);
let best_n = calc_best_chunk_size(MAX_WINDOW_SIZE, core_count, exp_bits);
let n = std::cmp::min(max_n, best_n);
Ok(SingleMultiexpKernel {
program: opencl::Program::from_opencl(d, &src)?,
core_count,
n,
priority,
_phantom: std::marker::PhantomData,
})
}
pub fn multiexp<G>(
&mut self,
bases: &[G],
exps: &[<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr],
n: usize,
) -> GPUResult<<G as CurveAffine>::Projective>
where
G: CurveAffine,
{
if locks::PriorityLock::should_break(self.priority) {
return Err(GPUError::GPUTaken);
}
let exp_bits = exp_size::<E>() * 8;
let window_size = calc_window_size(n as usize, exp_bits, self.core_count);
let num_windows = ((exp_bits as f64) / (window_size as f64)).ceil() as usize;
let num_groups = calc_num_groups(self.core_count, num_windows);
let bucket_len = 1 << window_size;
// Each group will have `num_windows` threads and as there are `num_groups` groups, there will
// be `num_groups` * `num_windows` threads in total.
// Each thread will use `num_groups` * `num_windows` * `bucket_len` buckets.
let mut base_buffer = self.program.create_buffer::<G>(n)?;
base_buffer.write_from(0, bases)?;
let mut exp_buffer = self
.program
.create_buffer::<<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr>(n)?;
exp_buffer.write_from(0, exps)?;
let bucket_buffer = self
.program
.create_buffer::<<G as CurveAffine>::Projective>(2 * self.core_count * bucket_len)?;
let result_buffer = self
.program
.create_buffer::<<G as CurveAffine>::Projective>(2 * self.core_count)?;
// Make global work size divisible by `LOCAL_WORK_SIZE`
let mut global_work_size = num_windows * num_groups;
global_work_size +=
(LOCAL_WORK_SIZE - (global_work_size % LOCAL_WORK_SIZE)) % LOCAL_WORK_SIZE;
let kernel = self.program.create_kernel(
if TypeId::of::<G>() == TypeId::of::<E::G1Affine>() {
"G1_bellman_multiexp"
} else if TypeId::of::<G>() == TypeId::of::<E::G2Affine>() {
"G2_bellman_multiexp"
} else {
return Err(GPUError::Simple("Only E::G1 and E::G2 are supported!"));
},
global_work_size,
None,
);
call_kernel!(
kernel,
&base_buffer,
&bucket_buffer,
&result_buffer,
&exp_buffer,
n as u32,
num_groups as u32,
num_windows as u32,
window_size as u32
)?;
let mut results = vec![<G as CurveAffine>::Projective::zero(); num_groups * num_windows];
result_buffer.read_into(0, &mut results)?;
// Using the algorithm below, we can calculate the final result by accumulating the results
// of those `NUM_GROUPS` * `NUM_WINDOWS` threads.
let mut acc = <G as CurveAffine>::Projective::zero();
let mut bits = 0;
for i in 0..num_windows {
let w = std::cmp::min(window_size, exp_bits - bits);
for _ in 0..w {
acc.double();
}
for g in 0..num_groups {
acc.add_assign(&results[g * num_windows + i]);
}
bits += w; // Process the next window
}
Ok(acc)
}
}
// A struct that containts several multiexp kernels for different devices
pub struct MultiexpKernel<E>
where
E: Engine,
{
kernels: Vec<SingleMultiexpKernel<E>>,
_lock: locks::GPULock, // RFC 1857: struct fields are dropped in the same order as they are declared.
}
impl<E> MultiexpKernel<E>
where
E: Engine,
{
pub fn create(priority: bool) -> GPUResult<MultiexpKernel<E>> {
let lock = locks::GPULock::lock();
let devices = opencl::Device::all()?;
let kernels: Vec<_> = devices
.into_iter()
.map(|d| (d.clone(), SingleMultiexpKernel::<E>::create(d, priority)))
.filter_map(|(device, res)| {
if let Err(ref e) = res {
error!(
"Cannot initialize kernel for device '{}'! Error: {}",
device.name(),
e
);
}
res.ok()
})
.collect();
if kernels.is_empty() {
return Err(GPUError::Simple("No working GPUs found!"));
}
info!(
"Multiexp: {} working device(s) selected. (CPU utilization: {})",
kernels.len(),
get_cpu_utilization()
);
for (i, k) in kernels.iter().enumerate() {
info!(
"Multiexp: Device {}: {} (Chunk-size: {})",
i,
k.program.device().name(),
k.n
);
}
Ok(MultiexpKernel::<E> {
kernels,
_lock: lock,
})
}
pub fn multiexp<G>(
&mut self,
pool: &Worker,
bases: Arc<Vec<G>>,
exps: Arc<Vec<<<G::Engine as ScalarEngine>::Fr as PrimeField>::Repr>>,
skip: usize,
n: usize,
) -> GPUResult<<G as CurveAffine>::Projective>
where
G: CurveAffine,
<G as groupy::CurveAffine>::Engine: crate::bls::Engine,
{
let num_devices = self.kernels.len();
// Bases are skipped by `self.1` elements, when converted from (Arc<Vec<G>>, usize) to Source
// https://github.com/zkcrypto/bellman/blob/10c5010fd9c2ca69442dc9775ea271e286e776d8/src/multiexp.rs#L38
let bases = &bases[skip..(skip + n)];
let exps = &exps[..n];
let cpu_n = ((n as f64) * get_cpu_utilization()) as usize;
let n = n - cpu_n;
let (cpu_bases, bases) = bases.split_at(cpu_n);
let (cpu_exps, exps) = exps.split_at(cpu_n);
let chunk_size = ((n as f64) / (num_devices as f64)).ceil() as usize;
crate::multicore::THREAD_POOL.install(|| {
use rayon::prelude::*;
let mut acc = <G as CurveAffine>::Projective::zero();
let results = if n > 0 {
bases
.par_chunks(chunk_size)
.zip(exps.par_chunks(chunk_size))
.zip(self.kernels.par_iter_mut())
.map(|((bases, exps), kern)| -> Result<<G as CurveAffine>::Projective, GPUError> {
let mut acc = <G as CurveAffine>::Projective::zero();
for (bases, exps) in bases.chunks(kern.n).zip(exps.chunks(kern.n)) {
let result = kern.multiexp(bases, exps, bases.len())?;
acc.add_assign(&result);
}
Ok(acc)
})
.collect::<Vec<_>>()
} else {
Vec::new()
};
let cpu_acc = cpu_multiexp(
&pool,
(Arc::new(cpu_bases.to_vec()), 0),
FullDensity,
Arc::new(cpu_exps.to_vec()),
&mut None,
);
for r in results {
acc.add_assign(&r?);
}
acc.add_assign(&cpu_acc.wait().unwrap());
Ok(acc)
})
}
}
| {
let aff_size = std::mem::size_of::<E::G1Affine>() + std::mem::size_of::<E::G2Affine>();
let exp_size = exp_size::<E>();
let proj_size = std::mem::size_of::<E::G1>() + std::mem::size_of::<E::G2>();
((((mem as f64) * (1f64 - MEMORY_PADDING)) as usize)
- (2 * core_count * ((1 << MAX_WINDOW_SIZE) + 1) * proj_size))
/ (aff_size + exp_size)
} | identifier_body |
pso.rs | // Copyright 2015 The Gfx-rs Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Raw Pipeline State Objects
//!
//! This module contains items used to create and manage a raw pipeline state object. Most users
//! will want to use the typed and safe `PipelineState`. See the `pso` module inside the `gfx`
//! crate.
use {MAX_COLOR_TARGETS, MAX_VERTEX_ATTRIBUTES, MAX_CONSTANT_BUFFERS,
MAX_RESOURCE_VIEWS, MAX_UNORDERED_VIEWS, MAX_SAMPLERS};
use {ConstantBufferSlot, ColorSlot, ResourceViewSlot,
UnorderedViewSlot, SamplerSlot,
Primitive, Resources};
use {format, state as s, texture};
use shade::Usage;
use std::error::Error;
use std::fmt;
/// Maximum number of vertex buffers used in a PSO definition.
pub const MAX_VERTEX_BUFFERS: usize = 4;
/// An offset inside a vertex buffer, in bytes.
pub type BufferOffset = usize;
/// Error types happening upon PSO creation on the device side.
#[derive(Clone, Debug, PartialEq)]
pub struct CreationError;
impl fmt::Display for CreationError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result |
}
impl Error for CreationError {
fn description(&self) -> &str {
"Could not create PSO on device."
}
}
/// Color output configuration of the PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct ColorInfo {
/// Color channel mask
pub mask: s::ColorMask,
/// Optional color blending
pub color: Option<s::BlendChannel>,
/// Optional alpha blending
pub alpha: Option<s::BlendChannel>,
}
impl From<s::ColorMask> for ColorInfo {
fn from(mask: s::ColorMask) -> ColorInfo {
ColorInfo {
mask: mask,
color: None,
alpha: None,
}
}
}
impl From<s::Blend> for ColorInfo {
fn from(blend: s::Blend) -> ColorInfo {
ColorInfo {
mask: s::MASK_ALL,
color: Some(blend.color),
alpha: Some(blend.alpha),
}
}
}
/// Depth and stencil state of the PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct DepthStencilInfo {
/// Optional depth test configuration
pub depth: Option<s::Depth>,
/// Optional stencil test on the front faces
pub front: Option<s::StencilSide>,
/// Optional stencil test on the back faces
pub back: Option<s::StencilSide>,
}
impl From<s::Depth> for DepthStencilInfo {
fn from(depth: s::Depth) -> DepthStencilInfo {
DepthStencilInfo {
depth: Some(depth),
front: None,
back: None,
}
}
}
impl From<s::Stencil> for DepthStencilInfo {
fn from(stencil: s::Stencil) -> DepthStencilInfo {
DepthStencilInfo {
depth: None,
front: Some(stencil.front),
back: Some(stencil.back),
}
}
}
impl From<(s::Depth, s::Stencil)> for DepthStencilInfo {
fn from(ds: (s::Depth, s::Stencil)) -> DepthStencilInfo {
DepthStencilInfo {
depth: Some(ds.0),
front: Some(ds.1.front),
back: Some(ds.1.back),
}
}
}
/// Index of a vertex buffer.
pub type BufferIndex = u8;
/// Offset of an attribute from the start of the buffer, in bytes
pub type ElemOffset = u32;
/// Offset between attribute values, in bytes
pub type ElemStride = u8;
/// The number of instances between each subsequent attribute value
pub type InstanceRate = u8;
/// A struct element descriptor.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct Element<F> {
/// Element format
pub format: F,
/// Offset from the beginning of the container, in bytes
pub offset: ElemOffset,
}
/// Vertex buffer descriptor
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct VertexBufferDesc {
/// Total container size, in bytes
pub stride: ElemStride,
/// Rate of the input for the given buffer
pub rate: InstanceRate,
}
/// PSO vertex attribute descriptor
pub type AttributeDesc = (BufferIndex, Element<format::Format>);
/// PSO constant buffer descriptor
pub type ConstantBufferDesc = Usage;
/// PSO shader resource view descriptor
pub type ResourceViewDesc = Usage;
/// PSO unordered access view descriptor
pub type UnorderedViewDesc = Usage;
/// PSO sampler descriptor
pub type SamplerDesc = Usage;
/// PSO color target descriptor
pub type ColorTargetDesc = (format::Format, ColorInfo);
/// PSO depth-stencil target descriptor
pub type DepthStencilDesc = (format::Format, DepthStencilInfo);
/// All the information surrounding a shader program that is required
/// for PSO creation, including the formats of vertex buffers and pixel targets;
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct Descriptor {
/// Type of the primitive
pub primitive: Primitive,
/// Rasterizer setup
pub rasterizer: s::Rasterizer,
/// Enable scissor test
pub scissor: bool,
/// Vertex buffers
pub vertex_buffers: [Option<VertexBufferDesc>; MAX_VERTEX_BUFFERS],
/// Vertex attributes
pub attributes: [Option<AttributeDesc>; MAX_VERTEX_ATTRIBUTES],
/// Constant buffers
pub constant_buffers: [Option<ConstantBufferDesc>; MAX_CONSTANT_BUFFERS],
/// Shader resource views
pub resource_views: [Option<ResourceViewDesc>; MAX_RESOURCE_VIEWS],
/// Unordered access views
pub unordered_views: [Option<UnorderedViewDesc>; MAX_UNORDERED_VIEWS],
/// Samplers
pub samplers: [Option<SamplerDesc>; MAX_SAMPLERS],
/// Render target views (RTV)
pub color_targets: [Option<ColorTargetDesc>; MAX_COLOR_TARGETS],
/// Depth stencil view (DSV)
pub depth_stencil: Option<DepthStencilDesc>,
}
impl Descriptor {
/// Create a new empty PSO descriptor.
pub fn new(primitive: Primitive, rast: s::Rasterizer) -> Descriptor {
Descriptor {
primitive: primitive,
rasterizer: rast,
scissor: false,
vertex_buffers: [None; MAX_VERTEX_BUFFERS],
attributes: [None; MAX_VERTEX_ATTRIBUTES],
constant_buffers: [None; MAX_CONSTANT_BUFFERS],
resource_views: [None; MAX_RESOURCE_VIEWS],
unordered_views: [None; MAX_UNORDERED_VIEWS],
samplers: [None; MAX_SAMPLERS],
color_targets: [None; MAX_COLOR_TARGETS],
depth_stencil: None,
}
}
}
/// A complete set of vertex buffers to be used for vertex import in PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct VertexBufferSet<R: Resources>(
/// Array of buffer handles with offsets in them
pub [Option<(R::Buffer, BufferOffset)>; MAX_VERTEX_ATTRIBUTES]
);
impl<R: Resources> VertexBufferSet<R> {
/// Create an empty set
pub fn new() -> VertexBufferSet<R> {
VertexBufferSet([None; MAX_VERTEX_ATTRIBUTES])
}
}
/// A constant buffer run-time parameter for PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct ConstantBufferParam<R: Resources>(pub R::Buffer, pub Usage, pub ConstantBufferSlot);
/// A shader resource view (SRV) run-time parameter for PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct ResourceViewParam<R: Resources>(pub R::ShaderResourceView, pub Usage, pub ResourceViewSlot);
/// An unordered access view (UAV) run-time parameter for PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct UnorderedViewParam<R: Resources>(pub R::UnorderedAccessView, pub Usage, pub UnorderedViewSlot);
/// A sampler run-time parameter for PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct SamplerParam<R: Resources>(pub R::Sampler, pub Usage, pub SamplerSlot);
/// A complete set of render targets to be used for pixel export in PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct PixelTargetSet<R: Resources> {
/// Array of color target views
pub colors: [Option<R::RenderTargetView>; MAX_COLOR_TARGETS],
/// Depth target view
pub depth: Option<R::DepthStencilView>,
/// Stencil target view
pub stencil: Option<R::DepthStencilView>,
/// Rendering dimensions
pub dimensions: Option<texture::Dimensions>,
}
impl<R: Resources> PixelTargetSet<R> {
/// Create an empty set
pub fn new() -> PixelTargetSet<R> {
PixelTargetSet {
colors: [None; MAX_COLOR_TARGETS],
depth: None,
stencil: None,
dimensions: None,
}
}
/// Add a color view to the specified slot
pub fn add_color(&mut self,
slot: ColorSlot,
view: &R::RenderTargetView,
dim: texture::Dimensions) {
self.colors[slot as usize] = Some(view.clone());
self.set_dimensions(dim);
}
/// Add a depth or stencil view to the specified slot
pub fn add_depth_stencil(&mut self,
view: &R::DepthStencilView,
has_depth: bool,
has_stencil: bool,
dim: texture::Dimensions) {
if has_depth {
self.depth = Some(view.clone());
}
if has_stencil {
self.stencil = Some(view.clone());
}
self.set_dimensions(dim);
}
fn set_dimensions(&mut self, dim: texture::Dimensions) {
debug_assert!(self.dimensions.map(|d| d == dim).unwrap_or(true));
self.dimensions = Some(dim);
}
/// Get the rendering view (returns values > 0)
pub fn get_view(&self) -> (u16, u16, u16) {
use std::cmp::max;
self.dimensions
.map(|(w, h, d, _)| (max(w, 1), max(h, 1), max(d, 1)))
.unwrap_or((1, 1, 1))
}
}
| {
write!(f, "{}", self.description())
} | identifier_body |
pso.rs | // Copyright 2015 The Gfx-rs Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Raw Pipeline State Objects
//!
//! This module contains items used to create and manage a raw pipeline state object. Most users
//! will want to use the typed and safe `PipelineState`. See the `pso` module inside the `gfx`
//! crate.
use {MAX_COLOR_TARGETS, MAX_VERTEX_ATTRIBUTES, MAX_CONSTANT_BUFFERS,
MAX_RESOURCE_VIEWS, MAX_UNORDERED_VIEWS, MAX_SAMPLERS};
use {ConstantBufferSlot, ColorSlot, ResourceViewSlot,
UnorderedViewSlot, SamplerSlot,
Primitive, Resources};
use {format, state as s, texture};
use shade::Usage;
use std::error::Error;
use std::fmt;
/// Maximum number of vertex buffers used in a PSO definition.
pub const MAX_VERTEX_BUFFERS: usize = 4;
/// An offset inside a vertex buffer, in bytes.
pub type BufferOffset = usize;
/// Error types happening upon PSO creation on the device side.
#[derive(Clone, Debug, PartialEq)]
pub struct CreationError;
impl fmt::Display for CreationError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.description())
}
}
impl Error for CreationError {
fn description(&self) -> &str {
"Could not create PSO on device."
}
}
/// Color output configuration of the PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct ColorInfo {
/// Color channel mask
pub mask: s::ColorMask,
/// Optional color blending
pub color: Option<s::BlendChannel>,
/// Optional alpha blending
pub alpha: Option<s::BlendChannel>,
}
impl From<s::ColorMask> for ColorInfo {
fn from(mask: s::ColorMask) -> ColorInfo {
ColorInfo {
mask: mask,
color: None,
alpha: None,
}
}
}
impl From<s::Blend> for ColorInfo {
fn from(blend: s::Blend) -> ColorInfo {
ColorInfo {
mask: s::MASK_ALL,
color: Some(blend.color),
alpha: Some(blend.alpha),
}
}
}
/// Depth and stencil state of the PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct DepthStencilInfo {
/// Optional depth test configuration
pub depth: Option<s::Depth>,
/// Optional stencil test on the front faces
pub front: Option<s::StencilSide>,
/// Optional stencil test on the back faces
pub back: Option<s::StencilSide>,
}
impl From<s::Depth> for DepthStencilInfo {
fn from(depth: s::Depth) -> DepthStencilInfo {
DepthStencilInfo {
depth: Some(depth),
front: None,
back: None,
}
}
}
impl From<s::Stencil> for DepthStencilInfo {
fn from(stencil: s::Stencil) -> DepthStencilInfo {
DepthStencilInfo {
depth: None,
front: Some(stencil.front),
back: Some(stencil.back),
}
}
}
impl From<(s::Depth, s::Stencil)> for DepthStencilInfo {
fn from(ds: (s::Depth, s::Stencil)) -> DepthStencilInfo {
DepthStencilInfo {
depth: Some(ds.0),
front: Some(ds.1.front),
back: Some(ds.1.back),
}
}
}
/// Index of a vertex buffer.
pub type BufferIndex = u8;
/// Offset of an attribute from the start of the buffer, in bytes
pub type ElemOffset = u32;
/// Offset between attribute values, in bytes
pub type ElemStride = u8;
/// The number of instances between each subsequent attribute value
pub type InstanceRate = u8;
/// A struct element descriptor.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct Element<F> {
/// Element format
pub format: F,
/// Offset from the beginning of the container, in bytes
pub offset: ElemOffset,
}
/// Vertex buffer descriptor
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct VertexBufferDesc {
/// Total container size, in bytes
pub stride: ElemStride,
/// Rate of the input for the given buffer
pub rate: InstanceRate,
}
/// PSO vertex attribute descriptor
pub type AttributeDesc = (BufferIndex, Element<format::Format>);
/// PSO constant buffer descriptor
pub type ConstantBufferDesc = Usage;
/// PSO shader resource view descriptor
pub type ResourceViewDesc = Usage;
/// PSO unordered access view descriptor
pub type UnorderedViewDesc = Usage;
/// PSO sampler descriptor
pub type SamplerDesc = Usage;
/// PSO color target descriptor
pub type ColorTargetDesc = (format::Format, ColorInfo);
/// PSO depth-stencil target descriptor
pub type DepthStencilDesc = (format::Format, DepthStencilInfo);
/// All the information surrounding a shader program that is required
/// for PSO creation, including the formats of vertex buffers and pixel targets;
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct Descriptor {
/// Type of the primitive
pub primitive: Primitive,
/// Rasterizer setup
pub rasterizer: s::Rasterizer,
/// Enable scissor test
pub scissor: bool,
/// Vertex buffers
pub vertex_buffers: [Option<VertexBufferDesc>; MAX_VERTEX_BUFFERS],
/// Vertex attributes
pub attributes: [Option<AttributeDesc>; MAX_VERTEX_ATTRIBUTES],
/// Constant buffers
pub constant_buffers: [Option<ConstantBufferDesc>; MAX_CONSTANT_BUFFERS],
/// Shader resource views
pub resource_views: [Option<ResourceViewDesc>; MAX_RESOURCE_VIEWS],
/// Unordered access views
pub unordered_views: [Option<UnorderedViewDesc>; MAX_UNORDERED_VIEWS],
/// Samplers
pub samplers: [Option<SamplerDesc>; MAX_SAMPLERS],
/// Render target views (RTV)
pub color_targets: [Option<ColorTargetDesc>; MAX_COLOR_TARGETS],
/// Depth stencil view (DSV)
pub depth_stencil: Option<DepthStencilDesc>,
}
impl Descriptor {
/// Create a new empty PSO descriptor.
pub fn new(primitive: Primitive, rast: s::Rasterizer) -> Descriptor {
Descriptor {
primitive: primitive,
rasterizer: rast,
scissor: false,
vertex_buffers: [None; MAX_VERTEX_BUFFERS],
attributes: [None; MAX_VERTEX_ATTRIBUTES],
constant_buffers: [None; MAX_CONSTANT_BUFFERS],
resource_views: [None; MAX_RESOURCE_VIEWS],
unordered_views: [None; MAX_UNORDERED_VIEWS],
samplers: [None; MAX_SAMPLERS],
color_targets: [None; MAX_COLOR_TARGETS],
depth_stencil: None,
}
}
}
/// A complete set of vertex buffers to be used for vertex import in PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct VertexBufferSet<R: Resources>(
/// Array of buffer handles with offsets in them
pub [Option<(R::Buffer, BufferOffset)>; MAX_VERTEX_ATTRIBUTES]
);
impl<R: Resources> VertexBufferSet<R> {
/// Create an empty set
pub fn new() -> VertexBufferSet<R> {
VertexBufferSet([None; MAX_VERTEX_ATTRIBUTES])
}
}
/// A constant buffer run-time parameter for PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct ConstantBufferParam<R: Resources>(pub R::Buffer, pub Usage, pub ConstantBufferSlot);
/// A shader resource view (SRV) run-time parameter for PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct ResourceViewParam<R: Resources>(pub R::ShaderResourceView, pub Usage, pub ResourceViewSlot);
/// An unordered access view (UAV) run-time parameter for PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct UnorderedViewParam<R: Resources>(pub R::UnorderedAccessView, pub Usage, pub UnorderedViewSlot);
/// A sampler run-time parameter for PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct SamplerParam<R: Resources>(pub R::Sampler, pub Usage, pub SamplerSlot);
/// A complete set of render targets to be used for pixel export in PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct PixelTargetSet<R: Resources> {
/// Array of color target views
pub colors: [Option<R::RenderTargetView>; MAX_COLOR_TARGETS],
/// Depth target view
pub depth: Option<R::DepthStencilView>,
/// Stencil target view
pub stencil: Option<R::DepthStencilView>,
/// Rendering dimensions
pub dimensions: Option<texture::Dimensions>,
}
impl<R: Resources> PixelTargetSet<R> {
/// Create an empty set
pub fn new() -> PixelTargetSet<R> {
PixelTargetSet {
colors: [None; MAX_COLOR_TARGETS],
depth: None,
stencil: None,
dimensions: None,
}
}
/// Add a color view to the specified slot
pub fn add_color(&mut self,
slot: ColorSlot,
view: &R::RenderTargetView,
dim: texture::Dimensions) {
self.colors[slot as usize] = Some(view.clone());
self.set_dimensions(dim);
}
/// Add a depth or stencil view to the specified slot
pub fn add_depth_stencil(&mut self,
view: &R::DepthStencilView,
has_depth: bool,
has_stencil: bool,
dim: texture::Dimensions) {
if has_depth {
self.depth = Some(view.clone());
}
if has_stencil |
self.set_dimensions(dim);
}
fn set_dimensions(&mut self, dim: texture::Dimensions) {
debug_assert!(self.dimensions.map(|d| d == dim).unwrap_or(true));
self.dimensions = Some(dim);
}
/// Get the rendering view (returns values > 0)
pub fn get_view(&self) -> (u16, u16, u16) {
use std::cmp::max;
self.dimensions
.map(|(w, h, d, _)| (max(w, 1), max(h, 1), max(d, 1)))
.unwrap_or((1, 1, 1))
}
}
| {
self.stencil = Some(view.clone());
} | conditional_block |
pso.rs | // Copyright 2015 The Gfx-rs Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Raw Pipeline State Objects
//!
//! This module contains items used to create and manage a raw pipeline state object. Most users
//! will want to use the typed and safe `PipelineState`. See the `pso` module inside the `gfx`
//! crate.
use {MAX_COLOR_TARGETS, MAX_VERTEX_ATTRIBUTES, MAX_CONSTANT_BUFFERS,
MAX_RESOURCE_VIEWS, MAX_UNORDERED_VIEWS, MAX_SAMPLERS};
use {ConstantBufferSlot, ColorSlot, ResourceViewSlot,
UnorderedViewSlot, SamplerSlot,
Primitive, Resources};
use {format, state as s, texture};
use shade::Usage;
use std::error::Error;
use std::fmt;
/// Maximum number of vertex buffers used in a PSO definition.
pub const MAX_VERTEX_BUFFERS: usize = 4;
/// An offset inside a vertex buffer, in bytes.
pub type BufferOffset = usize;
/// Error types happening upon PSO creation on the device side.
#[derive(Clone, Debug, PartialEq)]
pub struct CreationError;
impl fmt::Display for CreationError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.description())
}
}
impl Error for CreationError {
fn description(&self) -> &str {
"Could not create PSO on device."
}
}
/// Color output configuration of the PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct ColorInfo {
/// Color channel mask
pub mask: s::ColorMask,
/// Optional color blending
pub color: Option<s::BlendChannel>,
/// Optional alpha blending
pub alpha: Option<s::BlendChannel>,
}
impl From<s::ColorMask> for ColorInfo {
fn from(mask: s::ColorMask) -> ColorInfo {
ColorInfo {
mask: mask,
color: None,
alpha: None,
}
}
}
impl From<s::Blend> for ColorInfo {
fn from(blend: s::Blend) -> ColorInfo {
ColorInfo {
mask: s::MASK_ALL,
color: Some(blend.color),
alpha: Some(blend.alpha),
}
}
}
/// Depth and stencil state of the PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct DepthStencilInfo {
/// Optional depth test configuration
pub depth: Option<s::Depth>,
/// Optional stencil test on the front faces
pub front: Option<s::StencilSide>,
/// Optional stencil test on the back faces
pub back: Option<s::StencilSide>,
}
impl From<s::Depth> for DepthStencilInfo {
fn from(depth: s::Depth) -> DepthStencilInfo {
DepthStencilInfo {
depth: Some(depth),
front: None,
back: None,
}
}
}
impl From<s::Stencil> for DepthStencilInfo {
fn from(stencil: s::Stencil) -> DepthStencilInfo {
DepthStencilInfo {
depth: None,
front: Some(stencil.front),
back: Some(stencil.back),
}
}
}
impl From<(s::Depth, s::Stencil)> for DepthStencilInfo {
fn from(ds: (s::Depth, s::Stencil)) -> DepthStencilInfo {
DepthStencilInfo {
depth: Some(ds.0),
front: Some(ds.1.front),
back: Some(ds.1.back),
}
}
}
/// Index of a vertex buffer.
pub type BufferIndex = u8;
/// Offset of an attribute from the start of the buffer, in bytes
pub type ElemOffset = u32;
/// Offset between attribute values, in bytes
pub type ElemStride = u8;
/// The number of instances between each subsequent attribute value
pub type InstanceRate = u8;
/// A struct element descriptor.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct Element<F> {
/// Element format
pub format: F,
/// Offset from the beginning of the container, in bytes
pub offset: ElemOffset,
}
/// Vertex buffer descriptor
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct VertexBufferDesc {
/// Total container size, in bytes
pub stride: ElemStride,
/// Rate of the input for the given buffer
pub rate: InstanceRate,
}
/// PSO vertex attribute descriptor
pub type AttributeDesc = (BufferIndex, Element<format::Format>);
/// PSO constant buffer descriptor
pub type ConstantBufferDesc = Usage;
/// PSO shader resource view descriptor
pub type ResourceViewDesc = Usage;
/// PSO unordered access view descriptor
pub type UnorderedViewDesc = Usage;
/// PSO sampler descriptor
pub type SamplerDesc = Usage;
/// PSO color target descriptor
pub type ColorTargetDesc = (format::Format, ColorInfo);
/// PSO depth-stencil target descriptor
pub type DepthStencilDesc = (format::Format, DepthStencilInfo);
/// All the information surrounding a shader program that is required
/// for PSO creation, including the formats of vertex buffers and pixel targets;
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct Descriptor {
/// Type of the primitive
pub primitive: Primitive,
/// Rasterizer setup
pub rasterizer: s::Rasterizer,
/// Enable scissor test
pub scissor: bool,
/// Vertex buffers
pub vertex_buffers: [Option<VertexBufferDesc>; MAX_VERTEX_BUFFERS],
/// Vertex attributes
pub attributes: [Option<AttributeDesc>; MAX_VERTEX_ATTRIBUTES],
/// Constant buffers
pub constant_buffers: [Option<ConstantBufferDesc>; MAX_CONSTANT_BUFFERS],
/// Shader resource views
pub resource_views: [Option<ResourceViewDesc>; MAX_RESOURCE_VIEWS],
/// Unordered access views
pub unordered_views: [Option<UnorderedViewDesc>; MAX_UNORDERED_VIEWS],
/// Samplers
pub samplers: [Option<SamplerDesc>; MAX_SAMPLERS],
/// Render target views (RTV)
pub color_targets: [Option<ColorTargetDesc>; MAX_COLOR_TARGETS],
/// Depth stencil view (DSV)
pub depth_stencil: Option<DepthStencilDesc>,
}
impl Descriptor {
/// Create a new empty PSO descriptor.
pub fn new(primitive: Primitive, rast: s::Rasterizer) -> Descriptor {
Descriptor {
primitive: primitive,
rasterizer: rast,
scissor: false,
vertex_buffers: [None; MAX_VERTEX_BUFFERS],
attributes: [None; MAX_VERTEX_ATTRIBUTES],
constant_buffers: [None; MAX_CONSTANT_BUFFERS],
resource_views: [None; MAX_RESOURCE_VIEWS],
unordered_views: [None; MAX_UNORDERED_VIEWS],
samplers: [None; MAX_SAMPLERS],
color_targets: [None; MAX_COLOR_TARGETS],
depth_stencil: None,
}
}
}
/// A complete set of vertex buffers to be used for vertex import in PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct VertexBufferSet<R: Resources>(
/// Array of buffer handles with offsets in them
pub [Option<(R::Buffer, BufferOffset)>; MAX_VERTEX_ATTRIBUTES]
);
impl<R: Resources> VertexBufferSet<R> {
/// Create an empty set
pub fn new() -> VertexBufferSet<R> {
VertexBufferSet([None; MAX_VERTEX_ATTRIBUTES])
}
}
/// A constant buffer run-time parameter for PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct ConstantBufferParam<R: Resources>(pub R::Buffer, pub Usage, pub ConstantBufferSlot);
/// A shader resource view (SRV) run-time parameter for PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct ResourceViewParam<R: Resources>(pub R::ShaderResourceView, pub Usage, pub ResourceViewSlot);
/// An unordered access view (UAV) run-time parameter for PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct UnorderedViewParam<R: Resources>(pub R::UnorderedAccessView, pub Usage, pub UnorderedViewSlot);
/// A sampler run-time parameter for PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct SamplerParam<R: Resources>(pub R::Sampler, pub Usage, pub SamplerSlot);
/// A complete set of render targets to be used for pixel export in PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct | <R: Resources> {
/// Array of color target views
pub colors: [Option<R::RenderTargetView>; MAX_COLOR_TARGETS],
/// Depth target view
pub depth: Option<R::DepthStencilView>,
/// Stencil target view
pub stencil: Option<R::DepthStencilView>,
/// Rendering dimensions
pub dimensions: Option<texture::Dimensions>,
}
impl<R: Resources> PixelTargetSet<R> {
/// Create an empty set
pub fn new() -> PixelTargetSet<R> {
PixelTargetSet {
colors: [None; MAX_COLOR_TARGETS],
depth: None,
stencil: None,
dimensions: None,
}
}
/// Add a color view to the specified slot
pub fn add_color(&mut self,
slot: ColorSlot,
view: &R::RenderTargetView,
dim: texture::Dimensions) {
self.colors[slot as usize] = Some(view.clone());
self.set_dimensions(dim);
}
/// Add a depth or stencil view to the specified slot
pub fn add_depth_stencil(&mut self,
view: &R::DepthStencilView,
has_depth: bool,
has_stencil: bool,
dim: texture::Dimensions) {
if has_depth {
self.depth = Some(view.clone());
}
if has_stencil {
self.stencil = Some(view.clone());
}
self.set_dimensions(dim);
}
fn set_dimensions(&mut self, dim: texture::Dimensions) {
debug_assert!(self.dimensions.map(|d| d == dim).unwrap_or(true));
self.dimensions = Some(dim);
}
/// Get the rendering view (returns values > 0)
pub fn get_view(&self) -> (u16, u16, u16) {
use std::cmp::max;
self.dimensions
.map(|(w, h, d, _)| (max(w, 1), max(h, 1), max(d, 1)))
.unwrap_or((1, 1, 1))
}
}
| PixelTargetSet | identifier_name |
pso.rs | // Copyright 2015 The Gfx-rs Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Raw Pipeline State Objects
//!
//! This module contains items used to create and manage a raw pipeline state object. Most users
//! will want to use the typed and safe `PipelineState`. See the `pso` module inside the `gfx`
//! crate.
use {MAX_COLOR_TARGETS, MAX_VERTEX_ATTRIBUTES, MAX_CONSTANT_BUFFERS,
MAX_RESOURCE_VIEWS, MAX_UNORDERED_VIEWS, MAX_SAMPLERS}; | UnorderedViewSlot, SamplerSlot,
Primitive, Resources};
use {format, state as s, texture};
use shade::Usage;
use std::error::Error;
use std::fmt;
/// Maximum number of vertex buffers used in a PSO definition.
pub const MAX_VERTEX_BUFFERS: usize = 4;
/// An offset inside a vertex buffer, in bytes.
pub type BufferOffset = usize;
/// Error types happening upon PSO creation on the device side.
#[derive(Clone, Debug, PartialEq)]
pub struct CreationError;
impl fmt::Display for CreationError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.description())
}
}
impl Error for CreationError {
fn description(&self) -> &str {
"Could not create PSO on device."
}
}
/// Color output configuration of the PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct ColorInfo {
/// Color channel mask
pub mask: s::ColorMask,
/// Optional color blending
pub color: Option<s::BlendChannel>,
/// Optional alpha blending
pub alpha: Option<s::BlendChannel>,
}
impl From<s::ColorMask> for ColorInfo {
fn from(mask: s::ColorMask) -> ColorInfo {
ColorInfo {
mask: mask,
color: None,
alpha: None,
}
}
}
impl From<s::Blend> for ColorInfo {
fn from(blend: s::Blend) -> ColorInfo {
ColorInfo {
mask: s::MASK_ALL,
color: Some(blend.color),
alpha: Some(blend.alpha),
}
}
}
/// Depth and stencil state of the PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct DepthStencilInfo {
/// Optional depth test configuration
pub depth: Option<s::Depth>,
/// Optional stencil test on the front faces
pub front: Option<s::StencilSide>,
/// Optional stencil test on the back faces
pub back: Option<s::StencilSide>,
}
impl From<s::Depth> for DepthStencilInfo {
fn from(depth: s::Depth) -> DepthStencilInfo {
DepthStencilInfo {
depth: Some(depth),
front: None,
back: None,
}
}
}
impl From<s::Stencil> for DepthStencilInfo {
fn from(stencil: s::Stencil) -> DepthStencilInfo {
DepthStencilInfo {
depth: None,
front: Some(stencil.front),
back: Some(stencil.back),
}
}
}
impl From<(s::Depth, s::Stencil)> for DepthStencilInfo {
fn from(ds: (s::Depth, s::Stencil)) -> DepthStencilInfo {
DepthStencilInfo {
depth: Some(ds.0),
front: Some(ds.1.front),
back: Some(ds.1.back),
}
}
}
/// Index of a vertex buffer.
pub type BufferIndex = u8;
/// Offset of an attribute from the start of the buffer, in bytes
pub type ElemOffset = u32;
/// Offset between attribute values, in bytes
pub type ElemStride = u8;
/// The number of instances between each subsequent attribute value
pub type InstanceRate = u8;
/// A struct element descriptor.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct Element<F> {
/// Element format
pub format: F,
/// Offset from the beginning of the container, in bytes
pub offset: ElemOffset,
}
/// Vertex buffer descriptor
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct VertexBufferDesc {
/// Total container size, in bytes
pub stride: ElemStride,
/// Rate of the input for the given buffer
pub rate: InstanceRate,
}
/// PSO vertex attribute descriptor
pub type AttributeDesc = (BufferIndex, Element<format::Format>);
/// PSO constant buffer descriptor
pub type ConstantBufferDesc = Usage;
/// PSO shader resource view descriptor
pub type ResourceViewDesc = Usage;
/// PSO unordered access view descriptor
pub type UnorderedViewDesc = Usage;
/// PSO sampler descriptor
pub type SamplerDesc = Usage;
/// PSO color target descriptor
pub type ColorTargetDesc = (format::Format, ColorInfo);
/// PSO depth-stencil target descriptor
pub type DepthStencilDesc = (format::Format, DepthStencilInfo);
/// All the information surrounding a shader program that is required
/// for PSO creation, including the formats of vertex buffers and pixel targets;
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub struct Descriptor {
/// Type of the primitive
pub primitive: Primitive,
/// Rasterizer setup
pub rasterizer: s::Rasterizer,
/// Enable scissor test
pub scissor: bool,
/// Vertex buffers
pub vertex_buffers: [Option<VertexBufferDesc>; MAX_VERTEX_BUFFERS],
/// Vertex attributes
pub attributes: [Option<AttributeDesc>; MAX_VERTEX_ATTRIBUTES],
/// Constant buffers
pub constant_buffers: [Option<ConstantBufferDesc>; MAX_CONSTANT_BUFFERS],
/// Shader resource views
pub resource_views: [Option<ResourceViewDesc>; MAX_RESOURCE_VIEWS],
/// Unordered access views
pub unordered_views: [Option<UnorderedViewDesc>; MAX_UNORDERED_VIEWS],
/// Samplers
pub samplers: [Option<SamplerDesc>; MAX_SAMPLERS],
/// Render target views (RTV)
pub color_targets: [Option<ColorTargetDesc>; MAX_COLOR_TARGETS],
/// Depth stencil view (DSV)
pub depth_stencil: Option<DepthStencilDesc>,
}
impl Descriptor {
/// Create a new empty PSO descriptor.
pub fn new(primitive: Primitive, rast: s::Rasterizer) -> Descriptor {
Descriptor {
primitive: primitive,
rasterizer: rast,
scissor: false,
vertex_buffers: [None; MAX_VERTEX_BUFFERS],
attributes: [None; MAX_VERTEX_ATTRIBUTES],
constant_buffers: [None; MAX_CONSTANT_BUFFERS],
resource_views: [None; MAX_RESOURCE_VIEWS],
unordered_views: [None; MAX_UNORDERED_VIEWS],
samplers: [None; MAX_SAMPLERS],
color_targets: [None; MAX_COLOR_TARGETS],
depth_stencil: None,
}
}
}
/// A complete set of vertex buffers to be used for vertex import in PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct VertexBufferSet<R: Resources>(
/// Array of buffer handles with offsets in them
pub [Option<(R::Buffer, BufferOffset)>; MAX_VERTEX_ATTRIBUTES]
);
impl<R: Resources> VertexBufferSet<R> {
/// Create an empty set
pub fn new() -> VertexBufferSet<R> {
VertexBufferSet([None; MAX_VERTEX_ATTRIBUTES])
}
}
/// A constant buffer run-time parameter for PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct ConstantBufferParam<R: Resources>(pub R::Buffer, pub Usage, pub ConstantBufferSlot);
/// A shader resource view (SRV) run-time parameter for PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct ResourceViewParam<R: Resources>(pub R::ShaderResourceView, pub Usage, pub ResourceViewSlot);
/// An unordered access view (UAV) run-time parameter for PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct UnorderedViewParam<R: Resources>(pub R::UnorderedAccessView, pub Usage, pub UnorderedViewSlot);
/// A sampler run-time parameter for PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct SamplerParam<R: Resources>(pub R::Sampler, pub Usage, pub SamplerSlot);
/// A complete set of render targets to be used for pixel export in PSO.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct PixelTargetSet<R: Resources> {
/// Array of color target views
pub colors: [Option<R::RenderTargetView>; MAX_COLOR_TARGETS],
/// Depth target view
pub depth: Option<R::DepthStencilView>,
/// Stencil target view
pub stencil: Option<R::DepthStencilView>,
/// Rendering dimensions
pub dimensions: Option<texture::Dimensions>,
}
impl<R: Resources> PixelTargetSet<R> {
/// Create an empty set
pub fn new() -> PixelTargetSet<R> {
PixelTargetSet {
colors: [None; MAX_COLOR_TARGETS],
depth: None,
stencil: None,
dimensions: None,
}
}
/// Add a color view to the specified slot
pub fn add_color(&mut self,
slot: ColorSlot,
view: &R::RenderTargetView,
dim: texture::Dimensions) {
self.colors[slot as usize] = Some(view.clone());
self.set_dimensions(dim);
}
/// Add a depth or stencil view to the specified slot
pub fn add_depth_stencil(&mut self,
view: &R::DepthStencilView,
has_depth: bool,
has_stencil: bool,
dim: texture::Dimensions) {
if has_depth {
self.depth = Some(view.clone());
}
if has_stencil {
self.stencil = Some(view.clone());
}
self.set_dimensions(dim);
}
fn set_dimensions(&mut self, dim: texture::Dimensions) {
debug_assert!(self.dimensions.map(|d| d == dim).unwrap_or(true));
self.dimensions = Some(dim);
}
/// Get the rendering view (returns values > 0)
pub fn get_view(&self) -> (u16, u16, u16) {
use std::cmp::max;
self.dimensions
.map(|(w, h, d, _)| (max(w, 1), max(h, 1), max(d, 1)))
.unwrap_or((1, 1, 1))
}
} | use {ConstantBufferSlot, ColorSlot, ResourceViewSlot, | random_line_split |
fmt.rs | //! Utilities for formatting and printing `String`s.
//!
//! This module contains the runtime support for the [`format!`] syntax extension.
//! This macro is implemented in the compiler to emit calls to this module in
//! order to format arguments at runtime into strings.
//!
//! # Usage
//!
//! The [`format!`] macro is intended to be familiar to those coming from C's
//! `printf`/`fprintf` functions or Python's `str.format` function.
//!
//! Some examples of the [`format!`] extension are:
//!
//! ```
//! format!("Hello"); // => "Hello"
//! format!("Hello, {}!", "world"); // => "Hello, world!"
//! format!("The number is {}", 1); // => "The number is 1"
//! format!("{:?}", (3, 4)); // => "(3, 4)"
//! format!("{value}", value=4); // => "4"
//! format!("{} {}", 1, 2); // => "1 2"
//! format!("{:04}", 42); // => "0042" with leading zeros
//! ```
//!
//! From these, you can see that the first argument is a format string. It is
//! required by the compiler for this to be a string literal; it cannot be a
//! variable passed in (in order to perform validity checking). The compiler
//! will then parse the format string and determine if the list of arguments
//! provided is suitable to pass to this format string.
//!
//! To convert a single value to a string, use the [`to_string`] method. This
//! will use the [`Display`] formatting trait.
//!
//! ## Positional parameters
//!
//! Each formatting argument is allowed to specify which value argument it's
//! referencing, and if omitted it is assumed to be "the next argument". For
//! example, the format string `{} {} {}` would take three parameters, and they
//! would be formatted in the same order as they're given. The format string
//! `{2} {1} {0}`, however, would format arguments in reverse order.
//!
//! Things can get a little tricky once you start intermingling the two types of
//! positional specifiers. The "next argument" specifier can be thought of as an
//! iterator over the argument. Each time a "next argument" specifier is seen,
//! the iterator advances. This leads to behavior like this:
//!
//! ```
//! format!("{1} {} {0} {}", 1, 2); // => "2 1 1 2"
//! ```
//!
//! The internal iterator over the argument has not been advanced by the time
//! the first `{}` is seen, so it prints the first argument. Then upon reaching
//! the second `{}`, the iterator has advanced forward to the second argument.
//! Essentially, parameters which explicitly name their argument do not affect
//! parameters which do not name an argument in terms of positional specifiers.
//!
//! A format string is required to use all of its arguments, otherwise it is a
//! compile-time error. You may refer to the same argument more than once in the
//! format string.
//!
//! ## Named parameters
//!
//! Rust itself does not have a Python-like equivalent of named parameters to a
//! function, but the [`format!`] macro is a syntax extension which allows it to
//! leverage named parameters. Named parameters are listed at the end of the
//! argument list and have the syntax:
//!
//! ```text
//! identifier '=' expression
//! ```
//!
//! For example, the following [`format!`] expressions all use named argument:
//!
//! ```
//! format!("{argument}", argument = "test"); // => "test"
//! format!("{name} {}", 1, name = 2); // => "2 1"
//! format!("{a} {c} {b}", a="a", b='b', c=3); // => "a 3 b"
//! ```
//!
//! It is not valid to put positional parameters (those without names) after
//! arguments which have names. Like with positional parameters, it is not
//! valid to provide named parameters that are unused by the format string.
//!
//! # Formatting Parameters
//!
//! Each argument being formatted can be transformed by a number of formatting
//! parameters (corresponding to `format_spec` in the syntax above). These
//! parameters affect the string representation of what's being formatted.
//!
//! ## Width
//!
//! ```
//! // All of these print "Hello x !"
//! println!("Hello {:5}!", "x");
//! println!("Hello {:1$}!", "x", 5);
//! println!("Hello {1:0$}!", 5, "x");
//! println!("Hello {:width$}!", "x", width = 5);
//! ```
//!
//! This is a parameter for the "minimum width" that the format should take up.
//! If the value's string does not fill up this many characters, then the
//! padding specified by fill/alignment will be used to take up the required
//! space (see below).
//!
//! The value for the width can also be provided as a [`usize`] in the list of
//! parameters by adding a postfix `$`, indicating that the second argument is
//! a [`usize`] specifying the width.
//!
//! Referring to an argument with the dollar syntax does not affect the "next
//! argument" counter, so it's usually a good idea to refer to arguments by
//! position, or use named arguments.
//!
//! ## Fill/Alignment
//!
//! ```
//! assert_eq!(format!("Hello {:<5}!", "x"), "Hello x !");
//! assert_eq!(format!("Hello {:-<5}!", "x"), "Hello x----!");
//! assert_eq!(format!("Hello {:^5}!", "x"), "Hello x !");
//! assert_eq!(format!("Hello {:>5}!", "x"), "Hello x!");
//! ```
//!
//! The optional fill character and alignment is provided normally in conjunction with the
//! [`width`](#width) parameter. It must be defined before `width`, right after the `:`.
//! This indicates that if the value being formatted is smaller than
//! `width` some extra characters will be printed around it.
//! Filling comes in the following variants for different alignments:
//!
//! * `[fill]<` - the argument is left-aligned in `width` columns
//! * `[fill]^` - the argument is center-aligned in `width` columns
//! * `[fill]>` - the argument is right-aligned in `width` columns
//!
//! The default [fill/alignment](#fillalignment) for non-numerics is a space and
//! left-aligned. The
//! defaults for numeric formatters is also a space but with right-alignment. If
//! the `0` flag (see below) is specified for numerics, then the implicit fill character is
//! `0`.
//!
//! Note that alignment may not be implemented by some types. In particular, it
//! is not generally implemented for the `Debug` trait. A good way to ensure
//! padding is applied is to format your input, then pad this resulting string
//! to obtain your output:
//!
//! ```
//! println!("Hello {:^15}!", format!("{:?}", Some("hi"))); // => "Hello Some("hi") !"
//! ```
//!
//! ## Sign/`#`/`0`
//!
//! ```
//! assert_eq!(format!("Hello {:+}!", 5), "Hello +5!");
//! assert_eq!(format!("{:#x}!", 27), "0x1b!");
//! assert_eq!(format!("Hello {:05}!", 5), "Hello 00005!");
//! assert_eq!(format!("Hello {:05}!", -5), "Hello -0005!");
//! assert_eq!(format!("{:#010x}!", 27), "0x0000001b!");
//! ```
//!
//! These are all flags altering the behavior of the formatter.
//!
//! * `+` - This is intended for numeric types and indicates that the sign
//! should always be printed. Positive signs are never printed by
//! default, and the negative sign is only printed by default for the
//! `Signed` trait. This flag indicates that the correct sign (`+` or `-`)
//! should always be printed.
//! * `-` - Currently not used
//! * `#` - This flag is indicates that the "alternate" form of printing should
//! be used. The alternate forms are:
//! * `#?` - pretty-print the [`Debug`] formatting
//! * `#x` - precedes the argument with a `0x`
//! * `#X` - precedes the argument with a `0x`
//! * `#b` - precedes the argument with a `0b`
//! * `#o` - precedes the argument with a `0o`
//! * `0` - This is used to indicate for integer formats that the padding to `width` should
//! both be done with a `0` character as well as be sign-aware. A format
//! like `{:08}` would yield `00000001` for the integer `1`, while the
//! same format would yield `-0000001` for the integer `-1`. Notice that
//! the negative version has one fewer zero than the positive version.
//! Note that padding zeroes are always placed after the sign (if any)
//! and before the digits. When used together with the `#` flag, a similar
//! rule applies: padding zeroes are inserted after the prefix but before
//! the digits. The prefix is included in the total width.
//!
//! ## Precision
//!
//! For non-numeric types, this can be considered a "maximum width". If the resulting string is
//! longer than this width, then it is truncated down to this many characters and that truncated
//! value is emitted with proper `fill`, `alignment` and `width` if those parameters are set.
//!
//! For integral types, this is ignored.
//!
//! For floating-point types, this indicates how many digits after the decimal point should be
//! printed.
//!
//! There are three possible ways to specify the desired `precision`:
//!
//! 1. An integer `.N`:
//!
//! the integer `N` itself is the precision.
//!
//! 2. An integer or name followed by dollar sign `.N$`:
//!
//! use format *argument* `N` (which must be a `usize`) as the precision.
//!
//! 3. An asterisk `.*`:
//!
//! `.*` means that this `{...}` is associated with *two* format inputs rather than one: the
//! first input holds the `usize` precision, and the second holds the value to print. Note that
//! in this case, if one uses the format string `{<arg>:<spec>.*}`, then the `<arg>` part refers
//! to the *value* to print, and the `precision` must come in the input preceding `<arg>`.
//!
//! For example, the following calls all print the same thing `Hello x is 0.01000`:
//!
//! ```
//! // Hello {arg 0 ("x")} is {arg 1 (0.01) with precision specified inline (5)}
//! println!("Hello {0} is {1:.5}", "x", 0.01);
//!
//! // Hello {arg 1 ("x")} is {arg 2 (0.01) with precision specified in arg 0 (5)}
//! println!("Hello {1} is {2:.0$}", 5, "x", 0.01);
//!
//! // Hello {arg 0 ("x")} is {arg 2 (0.01) with precision specified in arg 1 (5)}
//! println!("Hello {0} is {2:.1$}", "x", 5, 0.01);
//!
//! // Hello {next arg ("x")} is {second of next two args (0.01) with precision
//! // specified in first of next two args (5)}
//! println!("Hello {} is {:.*}", "x", 5, 0.01);
//!
//! // Hello {next arg ("x")} is {arg 2 (0.01) with precision
//! // specified in its predecessor (5)}
//! println!("Hello {} is {2:.*}", "x", 5, 0.01);
//!
//! // Hello {next arg ("x")} is {arg "number" (0.01) with precision specified
//! // in arg "prec" (5)}
//! println!("Hello {} is {number:.prec$}", "x", prec = 5, number = 0.01);
//! ```
//!
//! While these:
//!
//! ```
//! println!("{}, `{name:.*}` has 3 fractional digits", "Hello", 3, name=1234.56);
//! println!("{}, `{name:.*}` has 3 characters", "Hello", 3, name="1234.56");
//! println!("{}, `{name:>8.*}` has 3 right-aligned characters", "Hello", 3, name="1234.56");
//! ```
//!
//! print two significantly different things:
//!
//! ```text
//! Hello, `1234.560` has 3 fractional digits
//! Hello, `123` has 3 characters
//! Hello, ` 123` has 3 right-aligned characters
//! ```
//!
//! ## Localization
//!
//! In some programming languages, the behavior of string formatting functions
//! depends on the operating system's locale setting. The format functions
//! provided by Rust's standard library do not have any concept of locale, and
//! will produce the same results on all systems regardless of user
//! configuration.
//!
//! For example, the following code will always print `1.5` even if the system
//! locale uses a decimal separator other than a dot.
//!
//! ```
//! println!("The value is {}", 1.5);
//! ```
//!
//! # Escaping
//!
//! The literal characters `{` and `}` may be included in a string by preceding
//! them with the same character. For example, the `{` character is escaped with
//! `{{` and the `}` character is escaped with `}}`.
//!
//! ```
//! assert_eq!(format!("Hello {{}}"), "Hello {}");
//! assert_eq!(format!("{{ Hello"), "{ Hello");
//! ```
//!
//! # Syntax
//!
//! To summarize, here you can find the full grammar of format strings.
//! The syntax for the formatting language used is drawn from other languages,
//! so it should not be too alien. Arguments are formatted with Python-like
//! syntax, meaning that arguments are surrounded by `{}` instead of the C-like
//! `%`. The actual grammar for the formatting syntax is:
//!
//! ```text
//! format_string := <text> [ maybe-format <text> ] *
//! maybe-format := '{' '{' | '}' '}' | <format>
//! format := '{' [ argument ] [ ':' format_spec ] '}'
//! argument := integer | identifier
//!
//! format_spec := [[fill]align][sign]['#']['0'][width]['.' precision][type]
//! fill := character
//! align := '<' | '^' | '>'
//! sign := '+' | '-'
//! width := count
//! precision := count | '*'
//! type := identifier | '?' | ''
//! count := parameter | integer
//! parameter := argument '$'
//! ```
//!
//! # Formatting traits
//!
//! When requesting that an argument be formatted with a particular type, you
//! are actually requesting that an argument ascribes to a particular trait.
//! This allows multiple actual types to be formatted via `{:x}` (like [`i8`] as
//! well as [`isize`]). The current mapping of types to traits is:
//!
//! * *nothing* ⇒ [`Display`]
//! * `?` ⇒ [`Debug`]
//! * `x?` ⇒ [`Debug`] with lower-case hexadecimal integers
//! * `X?` ⇒ [`Debug`] with upper-case hexadecimal integers
//! * `o` ⇒ [`Octal`](trait.Octal.html)
//! * `x` ⇒ [`LowerHex`](trait.LowerHex.html)
//! * `X` ⇒ [`UpperHex`](trait.UpperHex.html)
//! * `p` ⇒ [`Pointer`](trait.Pointer.html)
//! * `b` ⇒ [`Binary`]
//! * `e` ⇒ [`LowerExp`](trait.LowerExp.html)
//! * `E` ⇒ [`UpperExp`](trait.UpperExp.html)
//!
//! What this means is that any type of argument which implements the
//! [`fmt::Binary`][`Binary`] trait can then be formatted with `{:b}`. Implementations
//! are provided for these traits for a number of primitive types by the
//! standard library as well. If no format is specified (as in `{}` or `{:6}`),
//! then the format trait used is the [`Display`] trait.
//!
//! When implementing a format trait for your own type, you will have to
//! implement a method of the signature:
//!
//! ```
//! # #![allow(dead_code)]
//! # use std::fmt;
//! # struct Foo; // our custom type
//! # impl fmt::Display for Foo {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! # write!(f, "testing, testing")
//! # } }
//! ```
//!
//! Your type will be passed as `self` by-reference, and then the function
//! should emit output into the `f.buf` stream. It is up to each format trait
//! implementation to correctly adhere to the requested formatting parameters.
//! The values of these parameters will be listed in the fields of the
//! [`Formatter`] struct. In order to help with this, the [`Formatter`] struct also
//! provides some helper methods.
//!
//! Additionally, the return value of this function is [`fmt::Result`] which is a
//! type alias of [`Result`]`<(), `[`std::fmt::Error`]`>`. Formatting implementations
//! should ensure that they propagate errors from the [`Formatter`] (e.g., when
//! calling [`write!`]). However, they should never return errors spuriously. That
//! is, a formatting implementation must and may only return an error if the
//! passed-in [`Formatter`] returns an error. This is because, contrary to what
//! the function signature might suggest, string formatting is an infallible
//! operation. This function only returns a result because writing to the
//! underlying stream might fail and it must provide a way to propagate the fact
//! that an error has occurred back up the stack.
//!
//! An example of implementing the formatting traits would look
//! like:
//!
//! ```
//! use std::fmt;
//!
//! #[derive(Debug)]
//! struct Vector2D {
//! x: isize,
//! y: isize,
//! }
//!
//! impl fmt::Display for Vector2D {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! // The `f` value implements the `Write` trait, which is what the
//! // write! macro is expecting. Note that this formatting ignores the
//! // various flags provided to format strings.
//! write!(f, "({}, {})", self.x, self.y)
//! }
//! }
//!
//! // Different traits allow different forms of output of a type. The meaning
//! // of this format is to print the magnitude of a vector.
//! impl fmt::Binary for Vector2D {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! let magnitude = (self.x * self.x + self.y * self.y) as f64;
//! let magnitude = magnitude.sqrt();
//!
//! // Respect the formatting flags by using the helper method
//! // `pad_integral` on the Formatter object. See the method
//! // documentation for details, and the function `pad` can be used
//! // to pad strings.
//! let decimals = f.precision().unwrap_or(3);
//! let string = format!("{:.*}", decimals, magnitude);
//! f.pad_integral(true, "", &string)
//! }
//! }
//!
//! fn main() {
//! let myvector = Vector2D { x: 3, y: 4 };
//!
//! println!("{}", myvector); // => "(3, 4)"
//! println!("{:?}", myvector); // => "Vector2D {x: 3, y:4}"
//! println!("{:10.3b}", myvector); // => " 5.000"
//! }
//! ```
//!
//! ### `fmt::Display` vs `fmt::Debug`
//!
//! These two formatting traits have distinct purposes:
//!
//! - [`fmt::Display`][`Display`] implementations assert that the type can be faithfully
//! represented as a UTF-8 string at all times. It is **not** expected that
//! all types implement the [`Display`] trait.
//! - [`fmt::Debug`][`Debug`] implementations should be implemented for **all** public types.
//! Output will typically represent the internal state as faithfully as possible.
//! The purpose of the [`Debug`] trait is to facilitate debugging Rust code. In
//! most cases, using `#[derive(Debug)]` is sufficient and recommended.
//!
//! Some examples of the output from both traits:
//!
//! ```
//! assert_eq!(format!("{} {:?}", 3, 4), "3 4");
//! assert_eq!(format!("{} {:?}", 'a', 'b'), "a 'b'");
//! assert_eq!(format!("{} {:?}", "foo\n", "bar\n"), "foo\n \"bar\\n\"");
//! ```
//!
//! # Related macros
//!
//! There are a number of related macros in the [`format!`] family. The ones that
//! are currently implemented are:
//!
//! ```ignore (only-for-syntax-highlight)
//! format! // described above
//! write! // first argument is a &mut io::Write, the destination
//! writeln! // same as write but appends a newline
//! print! // the format string is printed to the standard output
//! println! // same as print but appends a newline
//! eprint! // the format string is printed to the standard error
//! eprintln! // same as eprint but appends a newline
//! format_args! // described below.
//! ```
//!
//! ### `write!`
//!
//! This and [`writeln!`] are two macros which are used to emit the format string
//! to a specified stream. This is used to prevent intermediate allocations of
//! format strings and instead directly write the output. Under the hood, this
//! function is actually invoking the [`write_fmt`] function defined on the
//! [`std::io::Write`] trait. Example usage is:
//!
//! ```
//! # #![allow(unused_must_use)]
//! use std::io::Write;
//! let mut w = Vec::new();
//! write!(&mut w, "Hello {}!", "world");
//! ```
//!
//! ### `print!`
//!
//! This and [`println!`] emit their output to stdout. Similarly to the [`write!`]
//! macro, the goal of these macros is to avoid intermediate allocations when
//! printing output. Example usage is:
//!
//! ```
//! print!("Hello {}!", "world");
//! println!("I have a newline {}", "character at the end");
//! ```
//! ### `eprint!`
//!
//! The [`eprint!`] and [`eprintln!`] macros are identical to
//! [`print!`] and [`println!`], respectively, except they emit their
//! output to stderr.
//!
//! ### `format_args!`
//!
//! This is a curious macro which is used to safely pass around
//! an opaque object describing the format string. This object
//! does not require any heap allocations to create, and it only
//! references information on the stack. Under the hood, all of
//! the related macros are implemented in terms of this. First
//! off, some example usage is:
//!
//! ```
//! # #![allow(unused_must_use)]
//! use std::fmt;
//! use std::io::{self, Write};
//!
//! let mut some_writer = io::stdout();
//! write!(&mut some_writer, "{}", format_args!("print with a {}", "macro"));
//!
//! fn my_fmt_fn(args: fmt::Arguments) {
//! write!(&mut io::stdout(), "{}", args);
//! }
//! my_fmt_fn(format_args!(", or a {} too", "function"));
//! ```
//!
//! The result of the [`format_args!`] macro is a value of type [`fmt::Arguments`].
//! This structure can then be passed to the [`write`] and [`format`] functions
//! inside this module in order to process the format string.
//! The goal of this macro is to even further prevent intermediate allocations
//! when dealing formatting strings.
//!
//! For example, a logging library could use the standard formatting syntax, but
//! it would internally pass around this structure until it has been determined
//! where output should go to.
//!
//! [`usize`]:../../std/primitive.usize.html
//! [`isize`]:../../std/primitive.isize.html
//! [`i8`]:../../std/primitive.i8.html
//! [`Display`]: trait.Display.html
//! [`Binary`]: trait.Binary.html
//! [`fmt::Result`]: type.Result.html
//! [`Result`]:../../std/result/enum.Result.html
//! [`std::fmt::Error`]: struct.Error.html
//! [`Formatter`]: struct.Formatter.html
//! [`write!`]:../../std/macro.write.html
//! [`Debug`]: trait.Debug.html | //! [`format!`]:../../std/macro.format.html
//! [`to_string`]:../../std/string/trait.ToString.html
//! [`writeln!`]:../../std/macro.writeln.html
//! [`write_fmt`]:../../std/io/trait.Write.html#method.write_fmt
//! [`std::io::Write`]:../../std/io/trait.Write.html
//! [`print!`]:../../std/macro.print.html
//! [`println!`]:../../std/macro.println.html
//! [`eprint!`]:../../std/macro.eprint.html
//! [`eprintln!`]:../../std/macro.eprintln.html
//! [`write!`]:../../std/macro.write.html
//! [`format_args!`]:../../std/macro.format_args.html
//! [`fmt::Arguments`]: struct.Arguments.html
//! [`write`]: fn.write.html
//! [`format`]: fn.format.html
#![stable(feature = "rust1", since = "1.0.0")]
#[unstable(feature = "fmt_internals", issue = "none")]
pub use core::fmt::rt;
#[stable(feature = "fmt_flags_align", since = "1.28.0")]
pub use core::fmt::Alignment;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::Error;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{write, ArgumentV1, Arguments};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Binary, Octal};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Debug, Display};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{DebugList, DebugMap, DebugSet, DebugStruct, DebugTuple};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Formatter, Result, Write};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{LowerExp, UpperExp};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{LowerHex, Pointer, UpperHex};
use crate::string;
/// The `format` function takes an [`Arguments`] struct and returns the resulting
/// formatted string.
///
/// The [`Arguments`] instance can be created with the [`format_args!`] macro.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::fmt;
///
/// let s = fmt::format(format_args!("Hello, {}!", "world"));
/// assert_eq!(s, "Hello, world!");
/// ```
///
/// Please note that using [`format!`] might be preferable.
/// Example:
///
/// ```
/// let s = format!("Hello, {}!", "world");
/// assert_eq!(s, "Hello, world!");
/// ```
///
/// [`Arguments`]: struct.Arguments.html
/// [`format_args!`]:../../std/macro.format_args.html
/// [`format!`]:../../std/macro.format.html
#[stable(feature = "rust1", since = "1.0.0")]
pub fn format(args: Arguments<'_>) -> string::String {
let capacity = args.estimated_capacity();
let mut output = string::String::with_capacity(capacity);
output.write_fmt(args).expect("a formatting trait implementation returned an error");
output
} | random_line_split |
|
fmt.rs | //! Utilities for formatting and printing `String`s.
//!
//! This module contains the runtime support for the [`format!`] syntax extension.
//! This macro is implemented in the compiler to emit calls to this module in
//! order to format arguments at runtime into strings.
//!
//! # Usage
//!
//! The [`format!`] macro is intended to be familiar to those coming from C's
//! `printf`/`fprintf` functions or Python's `str.format` function.
//!
//! Some examples of the [`format!`] extension are:
//!
//! ```
//! format!("Hello"); // => "Hello"
//! format!("Hello, {}!", "world"); // => "Hello, world!"
//! format!("The number is {}", 1); // => "The number is 1"
//! format!("{:?}", (3, 4)); // => "(3, 4)"
//! format!("{value}", value=4); // => "4"
//! format!("{} {}", 1, 2); // => "1 2"
//! format!("{:04}", 42); // => "0042" with leading zeros
//! ```
//!
//! From these, you can see that the first argument is a format string. It is
//! required by the compiler for this to be a string literal; it cannot be a
//! variable passed in (in order to perform validity checking). The compiler
//! will then parse the format string and determine if the list of arguments
//! provided is suitable to pass to this format string.
//!
//! To convert a single value to a string, use the [`to_string`] method. This
//! will use the [`Display`] formatting trait.
//!
//! ## Positional parameters
//!
//! Each formatting argument is allowed to specify which value argument it's
//! referencing, and if omitted it is assumed to be "the next argument". For
//! example, the format string `{} {} {}` would take three parameters, and they
//! would be formatted in the same order as they're given. The format string
//! `{2} {1} {0}`, however, would format arguments in reverse order.
//!
//! Things can get a little tricky once you start intermingling the two types of
//! positional specifiers. The "next argument" specifier can be thought of as an
//! iterator over the argument. Each time a "next argument" specifier is seen,
//! the iterator advances. This leads to behavior like this:
//!
//! ```
//! format!("{1} {} {0} {}", 1, 2); // => "2 1 1 2"
//! ```
//!
//! The internal iterator over the argument has not been advanced by the time
//! the first `{}` is seen, so it prints the first argument. Then upon reaching
//! the second `{}`, the iterator has advanced forward to the second argument.
//! Essentially, parameters which explicitly name their argument do not affect
//! parameters which do not name an argument in terms of positional specifiers.
//!
//! A format string is required to use all of its arguments, otherwise it is a
//! compile-time error. You may refer to the same argument more than once in the
//! format string.
//!
//! ## Named parameters
//!
//! Rust itself does not have a Python-like equivalent of named parameters to a
//! function, but the [`format!`] macro is a syntax extension which allows it to
//! leverage named parameters. Named parameters are listed at the end of the
//! argument list and have the syntax:
//!
//! ```text
//! identifier '=' expression
//! ```
//!
//! For example, the following [`format!`] expressions all use named argument:
//!
//! ```
//! format!("{argument}", argument = "test"); // => "test"
//! format!("{name} {}", 1, name = 2); // => "2 1"
//! format!("{a} {c} {b}", a="a", b='b', c=3); // => "a 3 b"
//! ```
//!
//! It is not valid to put positional parameters (those without names) after
//! arguments which have names. Like with positional parameters, it is not
//! valid to provide named parameters that are unused by the format string.
//!
//! # Formatting Parameters
//!
//! Each argument being formatted can be transformed by a number of formatting
//! parameters (corresponding to `format_spec` in the syntax above). These
//! parameters affect the string representation of what's being formatted.
//!
//! ## Width
//!
//! ```
//! // All of these print "Hello x !"
//! println!("Hello {:5}!", "x");
//! println!("Hello {:1$}!", "x", 5);
//! println!("Hello {1:0$}!", 5, "x");
//! println!("Hello {:width$}!", "x", width = 5);
//! ```
//!
//! This is a parameter for the "minimum width" that the format should take up.
//! If the value's string does not fill up this many characters, then the
//! padding specified by fill/alignment will be used to take up the required
//! space (see below).
//!
//! The value for the width can also be provided as a [`usize`] in the list of
//! parameters by adding a postfix `$`, indicating that the second argument is
//! a [`usize`] specifying the width.
//!
//! Referring to an argument with the dollar syntax does not affect the "next
//! argument" counter, so it's usually a good idea to refer to arguments by
//! position, or use named arguments.
//!
//! ## Fill/Alignment
//!
//! ```
//! assert_eq!(format!("Hello {:<5}!", "x"), "Hello x !");
//! assert_eq!(format!("Hello {:-<5}!", "x"), "Hello x----!");
//! assert_eq!(format!("Hello {:^5}!", "x"), "Hello x !");
//! assert_eq!(format!("Hello {:>5}!", "x"), "Hello x!");
//! ```
//!
//! The optional fill character and alignment is provided normally in conjunction with the
//! [`width`](#width) parameter. It must be defined before `width`, right after the `:`.
//! This indicates that if the value being formatted is smaller than
//! `width` some extra characters will be printed around it.
//! Filling comes in the following variants for different alignments:
//!
//! * `[fill]<` - the argument is left-aligned in `width` columns
//! * `[fill]^` - the argument is center-aligned in `width` columns
//! * `[fill]>` - the argument is right-aligned in `width` columns
//!
//! The default [fill/alignment](#fillalignment) for non-numerics is a space and
//! left-aligned. The
//! defaults for numeric formatters is also a space but with right-alignment. If
//! the `0` flag (see below) is specified for numerics, then the implicit fill character is
//! `0`.
//!
//! Note that alignment may not be implemented by some types. In particular, it
//! is not generally implemented for the `Debug` trait. A good way to ensure
//! padding is applied is to format your input, then pad this resulting string
//! to obtain your output:
//!
//! ```
//! println!("Hello {:^15}!", format!("{:?}", Some("hi"))); // => "Hello Some("hi") !"
//! ```
//!
//! ## Sign/`#`/`0`
//!
//! ```
//! assert_eq!(format!("Hello {:+}!", 5), "Hello +5!");
//! assert_eq!(format!("{:#x}!", 27), "0x1b!");
//! assert_eq!(format!("Hello {:05}!", 5), "Hello 00005!");
//! assert_eq!(format!("Hello {:05}!", -5), "Hello -0005!");
//! assert_eq!(format!("{:#010x}!", 27), "0x0000001b!");
//! ```
//!
//! These are all flags altering the behavior of the formatter.
//!
//! * `+` - This is intended for numeric types and indicates that the sign
//! should always be printed. Positive signs are never printed by
//! default, and the negative sign is only printed by default for the
//! `Signed` trait. This flag indicates that the correct sign (`+` or `-`)
//! should always be printed.
//! * `-` - Currently not used
//! * `#` - This flag is indicates that the "alternate" form of printing should
//! be used. The alternate forms are:
//! * `#?` - pretty-print the [`Debug`] formatting
//! * `#x` - precedes the argument with a `0x`
//! * `#X` - precedes the argument with a `0x`
//! * `#b` - precedes the argument with a `0b`
//! * `#o` - precedes the argument with a `0o`
//! * `0` - This is used to indicate for integer formats that the padding to `width` should
//! both be done with a `0` character as well as be sign-aware. A format
//! like `{:08}` would yield `00000001` for the integer `1`, while the
//! same format would yield `-0000001` for the integer `-1`. Notice that
//! the negative version has one fewer zero than the positive version.
//! Note that padding zeroes are always placed after the sign (if any)
//! and before the digits. When used together with the `#` flag, a similar
//! rule applies: padding zeroes are inserted after the prefix but before
//! the digits. The prefix is included in the total width.
//!
//! ## Precision
//!
//! For non-numeric types, this can be considered a "maximum width". If the resulting string is
//! longer than this width, then it is truncated down to this many characters and that truncated
//! value is emitted with proper `fill`, `alignment` and `width` if those parameters are set.
//!
//! For integral types, this is ignored.
//!
//! For floating-point types, this indicates how many digits after the decimal point should be
//! printed.
//!
//! There are three possible ways to specify the desired `precision`:
//!
//! 1. An integer `.N`:
//!
//! the integer `N` itself is the precision.
//!
//! 2. An integer or name followed by dollar sign `.N$`:
//!
//! use format *argument* `N` (which must be a `usize`) as the precision.
//!
//! 3. An asterisk `.*`:
//!
//! `.*` means that this `{...}` is associated with *two* format inputs rather than one: the
//! first input holds the `usize` precision, and the second holds the value to print. Note that
//! in this case, if one uses the format string `{<arg>:<spec>.*}`, then the `<arg>` part refers
//! to the *value* to print, and the `precision` must come in the input preceding `<arg>`.
//!
//! For example, the following calls all print the same thing `Hello x is 0.01000`:
//!
//! ```
//! // Hello {arg 0 ("x")} is {arg 1 (0.01) with precision specified inline (5)}
//! println!("Hello {0} is {1:.5}", "x", 0.01);
//!
//! // Hello {arg 1 ("x")} is {arg 2 (0.01) with precision specified in arg 0 (5)}
//! println!("Hello {1} is {2:.0$}", 5, "x", 0.01);
//!
//! // Hello {arg 0 ("x")} is {arg 2 (0.01) with precision specified in arg 1 (5)}
//! println!("Hello {0} is {2:.1$}", "x", 5, 0.01);
//!
//! // Hello {next arg ("x")} is {second of next two args (0.01) with precision
//! // specified in first of next two args (5)}
//! println!("Hello {} is {:.*}", "x", 5, 0.01);
//!
//! // Hello {next arg ("x")} is {arg 2 (0.01) with precision
//! // specified in its predecessor (5)}
//! println!("Hello {} is {2:.*}", "x", 5, 0.01);
//!
//! // Hello {next arg ("x")} is {arg "number" (0.01) with precision specified
//! // in arg "prec" (5)}
//! println!("Hello {} is {number:.prec$}", "x", prec = 5, number = 0.01);
//! ```
//!
//! While these:
//!
//! ```
//! println!("{}, `{name:.*}` has 3 fractional digits", "Hello", 3, name=1234.56);
//! println!("{}, `{name:.*}` has 3 characters", "Hello", 3, name="1234.56");
//! println!("{}, `{name:>8.*}` has 3 right-aligned characters", "Hello", 3, name="1234.56");
//! ```
//!
//! print two significantly different things:
//!
//! ```text
//! Hello, `1234.560` has 3 fractional digits
//! Hello, `123` has 3 characters
//! Hello, ` 123` has 3 right-aligned characters
//! ```
//!
//! ## Localization
//!
//! In some programming languages, the behavior of string formatting functions
//! depends on the operating system's locale setting. The format functions
//! provided by Rust's standard library do not have any concept of locale, and
//! will produce the same results on all systems regardless of user
//! configuration.
//!
//! For example, the following code will always print `1.5` even if the system
//! locale uses a decimal separator other than a dot.
//!
//! ```
//! println!("The value is {}", 1.5);
//! ```
//!
//! # Escaping
//!
//! The literal characters `{` and `}` may be included in a string by preceding
//! them with the same character. For example, the `{` character is escaped with
//! `{{` and the `}` character is escaped with `}}`.
//!
//! ```
//! assert_eq!(format!("Hello {{}}"), "Hello {}");
//! assert_eq!(format!("{{ Hello"), "{ Hello");
//! ```
//!
//! # Syntax
//!
//! To summarize, here you can find the full grammar of format strings.
//! The syntax for the formatting language used is drawn from other languages,
//! so it should not be too alien. Arguments are formatted with Python-like
//! syntax, meaning that arguments are surrounded by `{}` instead of the C-like
//! `%`. The actual grammar for the formatting syntax is:
//!
//! ```text
//! format_string := <text> [ maybe-format <text> ] *
//! maybe-format := '{' '{' | '}' '}' | <format>
//! format := '{' [ argument ] [ ':' format_spec ] '}'
//! argument := integer | identifier
//!
//! format_spec := [[fill]align][sign]['#']['0'][width]['.' precision][type]
//! fill := character
//! align := '<' | '^' | '>'
//! sign := '+' | '-'
//! width := count
//! precision := count | '*'
//! type := identifier | '?' | ''
//! count := parameter | integer
//! parameter := argument '$'
//! ```
//!
//! # Formatting traits
//!
//! When requesting that an argument be formatted with a particular type, you
//! are actually requesting that an argument ascribes to a particular trait.
//! This allows multiple actual types to be formatted via `{:x}` (like [`i8`] as
//! well as [`isize`]). The current mapping of types to traits is:
//!
//! * *nothing* ⇒ [`Display`]
//! * `?` ⇒ [`Debug`]
//! * `x?` ⇒ [`Debug`] with lower-case hexadecimal integers
//! * `X?` ⇒ [`Debug`] with upper-case hexadecimal integers
//! * `o` ⇒ [`Octal`](trait.Octal.html)
//! * `x` ⇒ [`LowerHex`](trait.LowerHex.html)
//! * `X` ⇒ [`UpperHex`](trait.UpperHex.html)
//! * `p` ⇒ [`Pointer`](trait.Pointer.html)
//! * `b` ⇒ [`Binary`]
//! * `e` ⇒ [`LowerExp`](trait.LowerExp.html)
//! * `E` ⇒ [`UpperExp`](trait.UpperExp.html)
//!
//! What this means is that any type of argument which implements the
//! [`fmt::Binary`][`Binary`] trait can then be formatted with `{:b}`. Implementations
//! are provided for these traits for a number of primitive types by the
//! standard library as well. If no format is specified (as in `{}` or `{:6}`),
//! then the format trait used is the [`Display`] trait.
//!
//! When implementing a format trait for your own type, you will have to
//! implement a method of the signature:
//!
//! ```
//! # #![allow(dead_code)]
//! # use std::fmt;
//! # struct Foo; // our custom type
//! # impl fmt::Display for Foo {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! # write!(f, "testing, testing")
//! # } }
//! ```
//!
//! Your type will be passed as `self` by-reference, and then the function
//! should emit output into the `f.buf` stream. It is up to each format trait
//! implementation to correctly adhere to the requested formatting parameters.
//! The values of these parameters will be listed in the fields of the
//! [`Formatter`] struct. In order to help with this, the [`Formatter`] struct also
//! provides some helper methods.
//!
//! Additionally, the return value of this function is [`fmt::Result`] which is a
//! type alias of [`Result`]`<(), `[`std::fmt::Error`]`>`. Formatting implementations
//! should ensure that they propagate errors from the [`Formatter`] (e.g., when
//! calling [`write!`]). However, they should never return errors spuriously. That
//! is, a formatting implementation must and may only return an error if the
//! passed-in [`Formatter`] returns an error. This is because, contrary to what
//! the function signature might suggest, string formatting is an infallible
//! operation. This function only returns a result because writing to the
//! underlying stream might fail and it must provide a way to propagate the fact
//! that an error has occurred back up the stack.
//!
//! An example of implementing the formatting traits would look
//! like:
//!
//! ```
//! use std::fmt;
//!
//! #[derive(Debug)]
//! struct Vector2D {
//! x: isize,
//! y: isize,
//! }
//!
//! impl fmt::Display for Vector2D {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! // The `f` value implements the `Write` trait, which is what the
//! // write! macro is expecting. Note that this formatting ignores the
//! // various flags provided to format strings.
//! write!(f, "({}, {})", self.x, self.y)
//! }
//! }
//!
//! // Different traits allow different forms of output of a type. The meaning
//! // of this format is to print the magnitude of a vector.
//! impl fmt::Binary for Vector2D {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! let magnitude = (self.x * self.x + self.y * self.y) as f64;
//! let magnitude = magnitude.sqrt();
//!
//! // Respect the formatting flags by using the helper method
//! // `pad_integral` on the Formatter object. See the method
//! // documentation for details, and the function `pad` can be used
//! // to pad strings.
//! let decimals = f.precision().unwrap_or(3);
//! let string = format!("{:.*}", decimals, magnitude);
//! f.pad_integral(true, "", &string)
//! }
//! }
//!
//! fn main() {
//! let myvector = Vector2D { x: 3, y: 4 };
//!
//! println!("{}", myvector); // => "(3, 4)"
//! println!("{:?}", myvector); // => "Vector2D {x: 3, y:4}"
//! println!("{:10.3b}", myvector); // => " 5.000"
//! }
//! ```
//!
//! ### `fmt::Display` vs `fmt::Debug`
//!
//! These two formatting traits have distinct purposes:
//!
//! - [`fmt::Display`][`Display`] implementations assert that the type can be faithfully
//! represented as a UTF-8 string at all times. It is **not** expected that
//! all types implement the [`Display`] trait.
//! - [`fmt::Debug`][`Debug`] implementations should be implemented for **all** public types.
//! Output will typically represent the internal state as faithfully as possible.
//! The purpose of the [`Debug`] trait is to facilitate debugging Rust code. In
//! most cases, using `#[derive(Debug)]` is sufficient and recommended.
//!
//! Some examples of the output from both traits:
//!
//! ```
//! assert_eq!(format!("{} {:?}", 3, 4), "3 4");
//! assert_eq!(format!("{} {:?}", 'a', 'b'), "a 'b'");
//! assert_eq!(format!("{} {:?}", "foo\n", "bar\n"), "foo\n \"bar\\n\"");
//! ```
//!
//! # Related macros
//!
//! There are a number of related macros in the [`format!`] family. The ones that
//! are currently implemented are:
//!
//! ```ignore (only-for-syntax-highlight)
//! format! // described above
//! write! // first argument is a &mut io::Write, the destination
//! writeln! // same as write but appends a newline
//! print! // the format string is printed to the standard output
//! println! // same as print but appends a newline
//! eprint! // the format string is printed to the standard error
//! eprintln! // same as eprint but appends a newline
//! format_args! // described below.
//! ```
//!
//! ### `write!`
//!
//! This and [`writeln!`] are two macros which are used to emit the format string
//! to a specified stream. This is used to prevent intermediate allocations of
//! format strings and instead directly write the output. Under the hood, this
//! function is actually invoking the [`write_fmt`] function defined on the
//! [`std::io::Write`] trait. Example usage is:
//!
//! ```
//! # #![allow(unused_must_use)]
//! use std::io::Write;
//! let mut w = Vec::new();
//! write!(&mut w, "Hello {}!", "world");
//! ```
//!
//! ### `print!`
//!
//! This and [`println!`] emit their output to stdout. Similarly to the [`write!`]
//! macro, the goal of these macros is to avoid intermediate allocations when
//! printing output. Example usage is:
//!
//! ```
//! print!("Hello {}!", "world");
//! println!("I have a newline {}", "character at the end");
//! ```
//! ### `eprint!`
//!
//! The [`eprint!`] and [`eprintln!`] macros are identical to
//! [`print!`] and [`println!`], respectively, except they emit their
//! output to stderr.
//!
//! ### `format_args!`
//!
//! This is a curious macro which is used to safely pass around
//! an opaque object describing the format string. This object
//! does not require any heap allocations to create, and it only
//! references information on the stack. Under the hood, all of
//! the related macros are implemented in terms of this. First
//! off, some example usage is:
//!
//! ```
//! # #![allow(unused_must_use)]
//! use std::fmt;
//! use std::io::{self, Write};
//!
//! let mut some_writer = io::stdout();
//! write!(&mut some_writer, "{}", format_args!("print with a {}", "macro"));
//!
//! fn my_fmt_fn(args: fmt::Arguments) {
//! write!(&mut io::stdout(), "{}", args);
//! }
//! my_fmt_fn(format_args!(", or a {} too", "function"));
//! ```
//!
//! The result of the [`format_args!`] macro is a value of type [`fmt::Arguments`].
//! This structure can then be passed to the [`write`] and [`format`] functions
//! inside this module in order to process the format string.
//! The goal of this macro is to even further prevent intermediate allocations
//! when dealing formatting strings.
//!
//! For example, a logging library could use the standard formatting syntax, but
//! it would internally pass around this structure until it has been determined
//! where output should go to.
//!
//! [`usize`]:../../std/primitive.usize.html
//! [`isize`]:../../std/primitive.isize.html
//! [`i8`]:../../std/primitive.i8.html
//! [`Display`]: trait.Display.html
//! [`Binary`]: trait.Binary.html
//! [`fmt::Result`]: type.Result.html
//! [`Result`]:../../std/result/enum.Result.html
//! [`std::fmt::Error`]: struct.Error.html
//! [`Formatter`]: struct.Formatter.html
//! [`write!`]:../../std/macro.write.html
//! [`Debug`]: trait.Debug.html
//! [`format!`]:../../std/macro.format.html
//! [`to_string`]:../../std/string/trait.ToString.html
//! [`writeln!`]:../../std/macro.writeln.html
//! [`write_fmt`]:../../std/io/trait.Write.html#method.write_fmt
//! [`std::io::Write`]:../../std/io/trait.Write.html
//! [`print!`]:../../std/macro.print.html
//! [`println!`]:../../std/macro.println.html
//! [`eprint!`]:../../std/macro.eprint.html
//! [`eprintln!`]:../../std/macro.eprintln.html
//! [`write!`]:../../std/macro.write.html
//! [`format_args!`]:../../std/macro.format_args.html
//! [`fmt::Arguments`]: struct.Arguments.html
//! [`write`]: fn.write.html
//! [`format`]: fn.format.html
#![stable(feature = "rust1", since = "1.0.0")]
#[unstable(feature = "fmt_internals", issue = "none")]
pub use core::fmt::rt;
#[stable(feature = "fmt_flags_align", since = "1.28.0")]
pub use core::fmt::Alignment;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::Error;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{write, ArgumentV1, Arguments};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Binary, Octal};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Debug, Display};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{DebugList, DebugMap, DebugSet, DebugStruct, DebugTuple};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Formatter, Result, Write};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{LowerExp, UpperExp};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{LowerHex, Pointer, UpperHex};
use crate::string;
/// The `format` function takes an [`Arguments`] struct and returns the resulting
/// formatted string.
///
/// The [`Arguments`] instance can be created with the [`format_args!`] macro.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::fmt;
///
/// let s = fmt::format(format_args!("Hello, {}!", "world"));
/// assert_eq!(s, "Hello, world!");
/// ```
///
/// Please note that using [`format!`] might be preferable.
/// Example:
///
/// ```
/// let s = format!("Hello, {}!", "world");
/// assert_eq!(s, "Hello, world!");
/// ```
///
/// [`Arguments`]: struct.Arguments.html
/// [`format_args!`]:../../std/macro.format_args.html
/// [`format!`]:../../std/macro.format.html
#[stable(feature = "rust1", since = "1.0.0")]
pub fn format(args: Arguments<'_>) -> string::String {
let capacity = a | rgs.estimated_capacity();
let mut output = string::String::with_capacity(capacity);
output.write_fmt(args).expect("a formatting trait implementation returned an error");
output
}
| identifier_body |
|
fmt.rs | //! Utilities for formatting and printing `String`s.
//!
//! This module contains the runtime support for the [`format!`] syntax extension.
//! This macro is implemented in the compiler to emit calls to this module in
//! order to format arguments at runtime into strings.
//!
//! # Usage
//!
//! The [`format!`] macro is intended to be familiar to those coming from C's
//! `printf`/`fprintf` functions or Python's `str.format` function.
//!
//! Some examples of the [`format!`] extension are:
//!
//! ```
//! format!("Hello"); // => "Hello"
//! format!("Hello, {}!", "world"); // => "Hello, world!"
//! format!("The number is {}", 1); // => "The number is 1"
//! format!("{:?}", (3, 4)); // => "(3, 4)"
//! format!("{value}", value=4); // => "4"
//! format!("{} {}", 1, 2); // => "1 2"
//! format!("{:04}", 42); // => "0042" with leading zeros
//! ```
//!
//! From these, you can see that the first argument is a format string. It is
//! required by the compiler for this to be a string literal; it cannot be a
//! variable passed in (in order to perform validity checking). The compiler
//! will then parse the format string and determine if the list of arguments
//! provided is suitable to pass to this format string.
//!
//! To convert a single value to a string, use the [`to_string`] method. This
//! will use the [`Display`] formatting trait.
//!
//! ## Positional parameters
//!
//! Each formatting argument is allowed to specify which value argument it's
//! referencing, and if omitted it is assumed to be "the next argument". For
//! example, the format string `{} {} {}` would take three parameters, and they
//! would be formatted in the same order as they're given. The format string
//! `{2} {1} {0}`, however, would format arguments in reverse order.
//!
//! Things can get a little tricky once you start intermingling the two types of
//! positional specifiers. The "next argument" specifier can be thought of as an
//! iterator over the argument. Each time a "next argument" specifier is seen,
//! the iterator advances. This leads to behavior like this:
//!
//! ```
//! format!("{1} {} {0} {}", 1, 2); // => "2 1 1 2"
//! ```
//!
//! The internal iterator over the argument has not been advanced by the time
//! the first `{}` is seen, so it prints the first argument. Then upon reaching
//! the second `{}`, the iterator has advanced forward to the second argument.
//! Essentially, parameters which explicitly name their argument do not affect
//! parameters which do not name an argument in terms of positional specifiers.
//!
//! A format string is required to use all of its arguments, otherwise it is a
//! compile-time error. You may refer to the same argument more than once in the
//! format string.
//!
//! ## Named parameters
//!
//! Rust itself does not have a Python-like equivalent of named parameters to a
//! function, but the [`format!`] macro is a syntax extension which allows it to
//! leverage named parameters. Named parameters are listed at the end of the
//! argument list and have the syntax:
//!
//! ```text
//! identifier '=' expression
//! ```
//!
//! For example, the following [`format!`] expressions all use named argument:
//!
//! ```
//! format!("{argument}", argument = "test"); // => "test"
//! format!("{name} {}", 1, name = 2); // => "2 1"
//! format!("{a} {c} {b}", a="a", b='b', c=3); // => "a 3 b"
//! ```
//!
//! It is not valid to put positional parameters (those without names) after
//! arguments which have names. Like with positional parameters, it is not
//! valid to provide named parameters that are unused by the format string.
//!
//! # Formatting Parameters
//!
//! Each argument being formatted can be transformed by a number of formatting
//! parameters (corresponding to `format_spec` in the syntax above). These
//! parameters affect the string representation of what's being formatted.
//!
//! ## Width
//!
//! ```
//! // All of these print "Hello x !"
//! println!("Hello {:5}!", "x");
//! println!("Hello {:1$}!", "x", 5);
//! println!("Hello {1:0$}!", 5, "x");
//! println!("Hello {:width$}!", "x", width = 5);
//! ```
//!
//! This is a parameter for the "minimum width" that the format should take up.
//! If the value's string does not fill up this many characters, then the
//! padding specified by fill/alignment will be used to take up the required
//! space (see below).
//!
//! The value for the width can also be provided as a [`usize`] in the list of
//! parameters by adding a postfix `$`, indicating that the second argument is
//! a [`usize`] specifying the width.
//!
//! Referring to an argument with the dollar syntax does not affect the "next
//! argument" counter, so it's usually a good idea to refer to arguments by
//! position, or use named arguments.
//!
//! ## Fill/Alignment
//!
//! ```
//! assert_eq!(format!("Hello {:<5}!", "x"), "Hello x !");
//! assert_eq!(format!("Hello {:-<5}!", "x"), "Hello x----!");
//! assert_eq!(format!("Hello {:^5}!", "x"), "Hello x !");
//! assert_eq!(format!("Hello {:>5}!", "x"), "Hello x!");
//! ```
//!
//! The optional fill character and alignment is provided normally in conjunction with the
//! [`width`](#width) parameter. It must be defined before `width`, right after the `:`.
//! This indicates that if the value being formatted is smaller than
//! `width` some extra characters will be printed around it.
//! Filling comes in the following variants for different alignments:
//!
//! * `[fill]<` - the argument is left-aligned in `width` columns
//! * `[fill]^` - the argument is center-aligned in `width` columns
//! * `[fill]>` - the argument is right-aligned in `width` columns
//!
//! The default [fill/alignment](#fillalignment) for non-numerics is a space and
//! left-aligned. The
//! defaults for numeric formatters is also a space but with right-alignment. If
//! the `0` flag (see below) is specified for numerics, then the implicit fill character is
//! `0`.
//!
//! Note that alignment may not be implemented by some types. In particular, it
//! is not generally implemented for the `Debug` trait. A good way to ensure
//! padding is applied is to format your input, then pad this resulting string
//! to obtain your output:
//!
//! ```
//! println!("Hello {:^15}!", format!("{:?}", Some("hi"))); // => "Hello Some("hi") !"
//! ```
//!
//! ## Sign/`#`/`0`
//!
//! ```
//! assert_eq!(format!("Hello {:+}!", 5), "Hello +5!");
//! assert_eq!(format!("{:#x}!", 27), "0x1b!");
//! assert_eq!(format!("Hello {:05}!", 5), "Hello 00005!");
//! assert_eq!(format!("Hello {:05}!", -5), "Hello -0005!");
//! assert_eq!(format!("{:#010x}!", 27), "0x0000001b!");
//! ```
//!
//! These are all flags altering the behavior of the formatter.
//!
//! * `+` - This is intended for numeric types and indicates that the sign
//! should always be printed. Positive signs are never printed by
//! default, and the negative sign is only printed by default for the
//! `Signed` trait. This flag indicates that the correct sign (`+` or `-`)
//! should always be printed.
//! * `-` - Currently not used
//! * `#` - This flag is indicates that the "alternate" form of printing should
//! be used. The alternate forms are:
//! * `#?` - pretty-print the [`Debug`] formatting
//! * `#x` - precedes the argument with a `0x`
//! * `#X` - precedes the argument with a `0x`
//! * `#b` - precedes the argument with a `0b`
//! * `#o` - precedes the argument with a `0o`
//! * `0` - This is used to indicate for integer formats that the padding to `width` should
//! both be done with a `0` character as well as be sign-aware. A format
//! like `{:08}` would yield `00000001` for the integer `1`, while the
//! same format would yield `-0000001` for the integer `-1`. Notice that
//! the negative version has one fewer zero than the positive version.
//! Note that padding zeroes are always placed after the sign (if any)
//! and before the digits. When used together with the `#` flag, a similar
//! rule applies: padding zeroes are inserted after the prefix but before
//! the digits. The prefix is included in the total width.
//!
//! ## Precision
//!
//! For non-numeric types, this can be considered a "maximum width". If the resulting string is
//! longer than this width, then it is truncated down to this many characters and that truncated
//! value is emitted with proper `fill`, `alignment` and `width` if those parameters are set.
//!
//! For integral types, this is ignored.
//!
//! For floating-point types, this indicates how many digits after the decimal point should be
//! printed.
//!
//! There are three possible ways to specify the desired `precision`:
//!
//! 1. An integer `.N`:
//!
//! the integer `N` itself is the precision.
//!
//! 2. An integer or name followed by dollar sign `.N$`:
//!
//! use format *argument* `N` (which must be a `usize`) as the precision.
//!
//! 3. An asterisk `.*`:
//!
//! `.*` means that this `{...}` is associated with *two* format inputs rather than one: the
//! first input holds the `usize` precision, and the second holds the value to print. Note that
//! in this case, if one uses the format string `{<arg>:<spec>.*}`, then the `<arg>` part refers
//! to the *value* to print, and the `precision` must come in the input preceding `<arg>`.
//!
//! For example, the following calls all print the same thing `Hello x is 0.01000`:
//!
//! ```
//! // Hello {arg 0 ("x")} is {arg 1 (0.01) with precision specified inline (5)}
//! println!("Hello {0} is {1:.5}", "x", 0.01);
//!
//! // Hello {arg 1 ("x")} is {arg 2 (0.01) with precision specified in arg 0 (5)}
//! println!("Hello {1} is {2:.0$}", 5, "x", 0.01);
//!
//! // Hello {arg 0 ("x")} is {arg 2 (0.01) with precision specified in arg 1 (5)}
//! println!("Hello {0} is {2:.1$}", "x", 5, 0.01);
//!
//! // Hello {next arg ("x")} is {second of next two args (0.01) with precision
//! // specified in first of next two args (5)}
//! println!("Hello {} is {:.*}", "x", 5, 0.01);
//!
//! // Hello {next arg ("x")} is {arg 2 (0.01) with precision
//! // specified in its predecessor (5)}
//! println!("Hello {} is {2:.*}", "x", 5, 0.01);
//!
//! // Hello {next arg ("x")} is {arg "number" (0.01) with precision specified
//! // in arg "prec" (5)}
//! println!("Hello {} is {number:.prec$}", "x", prec = 5, number = 0.01);
//! ```
//!
//! While these:
//!
//! ```
//! println!("{}, `{name:.*}` has 3 fractional digits", "Hello", 3, name=1234.56);
//! println!("{}, `{name:.*}` has 3 characters", "Hello", 3, name="1234.56");
//! println!("{}, `{name:>8.*}` has 3 right-aligned characters", "Hello", 3, name="1234.56");
//! ```
//!
//! print two significantly different things:
//!
//! ```text
//! Hello, `1234.560` has 3 fractional digits
//! Hello, `123` has 3 characters
//! Hello, ` 123` has 3 right-aligned characters
//! ```
//!
//! ## Localization
//!
//! In some programming languages, the behavior of string formatting functions
//! depends on the operating system's locale setting. The format functions
//! provided by Rust's standard library do not have any concept of locale, and
//! will produce the same results on all systems regardless of user
//! configuration.
//!
//! For example, the following code will always print `1.5` even if the system
//! locale uses a decimal separator other than a dot.
//!
//! ```
//! println!("The value is {}", 1.5);
//! ```
//!
//! # Escaping
//!
//! The literal characters `{` and `}` may be included in a string by preceding
//! them with the same character. For example, the `{` character is escaped with
//! `{{` and the `}` character is escaped with `}}`.
//!
//! ```
//! assert_eq!(format!("Hello {{}}"), "Hello {}");
//! assert_eq!(format!("{{ Hello"), "{ Hello");
//! ```
//!
//! # Syntax
//!
//! To summarize, here you can find the full grammar of format strings.
//! The syntax for the formatting language used is drawn from other languages,
//! so it should not be too alien. Arguments are formatted with Python-like
//! syntax, meaning that arguments are surrounded by `{}` instead of the C-like
//! `%`. The actual grammar for the formatting syntax is:
//!
//! ```text
//! format_string := <text> [ maybe-format <text> ] *
//! maybe-format := '{' '{' | '}' '}' | <format>
//! format := '{' [ argument ] [ ':' format_spec ] '}'
//! argument := integer | identifier
//!
//! format_spec := [[fill]align][sign]['#']['0'][width]['.' precision][type]
//! fill := character
//! align := '<' | '^' | '>'
//! sign := '+' | '-'
//! width := count
//! precision := count | '*'
//! type := identifier | '?' | ''
//! count := parameter | integer
//! parameter := argument '$'
//! ```
//!
//! # Formatting traits
//!
//! When requesting that an argument be formatted with a particular type, you
//! are actually requesting that an argument ascribes to a particular trait.
//! This allows multiple actual types to be formatted via `{:x}` (like [`i8`] as
//! well as [`isize`]). The current mapping of types to traits is:
//!
//! * *nothing* ⇒ [`Display`]
//! * `?` ⇒ [`Debug`]
//! * `x?` ⇒ [`Debug`] with lower-case hexadecimal integers
//! * `X?` ⇒ [`Debug`] with upper-case hexadecimal integers
//! * `o` ⇒ [`Octal`](trait.Octal.html)
//! * `x` ⇒ [`LowerHex`](trait.LowerHex.html)
//! * `X` ⇒ [`UpperHex`](trait.UpperHex.html)
//! * `p` ⇒ [`Pointer`](trait.Pointer.html)
//! * `b` ⇒ [`Binary`]
//! * `e` ⇒ [`LowerExp`](trait.LowerExp.html)
//! * `E` ⇒ [`UpperExp`](trait.UpperExp.html)
//!
//! What this means is that any type of argument which implements the
//! [`fmt::Binary`][`Binary`] trait can then be formatted with `{:b}`. Implementations
//! are provided for these traits for a number of primitive types by the
//! standard library as well. If no format is specified (as in `{}` or `{:6}`),
//! then the format trait used is the [`Display`] trait.
//!
//! When implementing a format trait for your own type, you will have to
//! implement a method of the signature:
//!
//! ```
//! # #![allow(dead_code)]
//! # use std::fmt;
//! # struct Foo; // our custom type
//! # impl fmt::Display for Foo {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! # write!(f, "testing, testing")
//! # } }
//! ```
//!
//! Your type will be passed as `self` by-reference, and then the function
//! should emit output into the `f.buf` stream. It is up to each format trait
//! implementation to correctly adhere to the requested formatting parameters.
//! The values of these parameters will be listed in the fields of the
//! [`Formatter`] struct. In order to help with this, the [`Formatter`] struct also
//! provides some helper methods.
//!
//! Additionally, the return value of this function is [`fmt::Result`] which is a
//! type alias of [`Result`]`<(), `[`std::fmt::Error`]`>`. Formatting implementations
//! should ensure that they propagate errors from the [`Formatter`] (e.g., when
//! calling [`write!`]). However, they should never return errors spuriously. That
//! is, a formatting implementation must and may only return an error if the
//! passed-in [`Formatter`] returns an error. This is because, contrary to what
//! the function signature might suggest, string formatting is an infallible
//! operation. This function only returns a result because writing to the
//! underlying stream might fail and it must provide a way to propagate the fact
//! that an error has occurred back up the stack.
//!
//! An example of implementing the formatting traits would look
//! like:
//!
//! ```
//! use std::fmt;
//!
//! #[derive(Debug)]
//! struct Vector2D {
//! x: isize,
//! y: isize,
//! }
//!
//! impl fmt::Display for Vector2D {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! // The `f` value implements the `Write` trait, which is what the
//! // write! macro is expecting. Note that this formatting ignores the
//! // various flags provided to format strings.
//! write!(f, "({}, {})", self.x, self.y)
//! }
//! }
//!
//! // Different traits allow different forms of output of a type. The meaning
//! // of this format is to print the magnitude of a vector.
//! impl fmt::Binary for Vector2D {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! let magnitude = (self.x * self.x + self.y * self.y) as f64;
//! let magnitude = magnitude.sqrt();
//!
//! // Respect the formatting flags by using the helper method
//! // `pad_integral` on the Formatter object. See the method
//! // documentation for details, and the function `pad` can be used
//! // to pad strings.
//! let decimals = f.precision().unwrap_or(3);
//! let string = format!("{:.*}", decimals, magnitude);
//! f.pad_integral(true, "", &string)
//! }
//! }
//!
//! fn main() {
//! let myvector = Vector2D { x: 3, y: 4 };
//!
//! println!("{}", myvector); // => "(3, 4)"
//! println!("{:?}", myvector); // => "Vector2D {x: 3, y:4}"
//! println!("{:10.3b}", myvector); // => " 5.000"
//! }
//! ```
//!
//! ### `fmt::Display` vs `fmt::Debug`
//!
//! These two formatting traits have distinct purposes:
//!
//! - [`fmt::Display`][`Display`] implementations assert that the type can be faithfully
//! represented as a UTF-8 string at all times. It is **not** expected that
//! all types implement the [`Display`] trait.
//! - [`fmt::Debug`][`Debug`] implementations should be implemented for **all** public types.
//! Output will typically represent the internal state as faithfully as possible.
//! The purpose of the [`Debug`] trait is to facilitate debugging Rust code. In
//! most cases, using `#[derive(Debug)]` is sufficient and recommended.
//!
//! Some examples of the output from both traits:
//!
//! ```
//! assert_eq!(format!("{} {:?}", 3, 4), "3 4");
//! assert_eq!(format!("{} {:?}", 'a', 'b'), "a 'b'");
//! assert_eq!(format!("{} {:?}", "foo\n", "bar\n"), "foo\n \"bar\\n\"");
//! ```
//!
//! # Related macros
//!
//! There are a number of related macros in the [`format!`] family. The ones that
//! are currently implemented are:
//!
//! ```ignore (only-for-syntax-highlight)
//! format! // described above
//! write! // first argument is a &mut io::Write, the destination
//! writeln! // same as write but appends a newline
//! print! // the format string is printed to the standard output
//! println! // same as print but appends a newline
//! eprint! // the format string is printed to the standard error
//! eprintln! // same as eprint but appends a newline
//! format_args! // described below.
//! ```
//!
//! ### `write!`
//!
//! This and [`writeln!`] are two macros which are used to emit the format string
//! to a specified stream. This is used to prevent intermediate allocations of
//! format strings and instead directly write the output. Under the hood, this
//! function is actually invoking the [`write_fmt`] function defined on the
//! [`std::io::Write`] trait. Example usage is:
//!
//! ```
//! # #![allow(unused_must_use)]
//! use std::io::Write;
//! let mut w = Vec::new();
//! write!(&mut w, "Hello {}!", "world");
//! ```
//!
//! ### `print!`
//!
//! This and [`println!`] emit their output to stdout. Similarly to the [`write!`]
//! macro, the goal of these macros is to avoid intermediate allocations when
//! printing output. Example usage is:
//!
//! ```
//! print!("Hello {}!", "world");
//! println!("I have a newline {}", "character at the end");
//! ```
//! ### `eprint!`
//!
//! The [`eprint!`] and [`eprintln!`] macros are identical to
//! [`print!`] and [`println!`], respectively, except they emit their
//! output to stderr.
//!
//! ### `format_args!`
//!
//! This is a curious macro which is used to safely pass around
//! an opaque object describing the format string. This object
//! does not require any heap allocations to create, and it only
//! references information on the stack. Under the hood, all of
//! the related macros are implemented in terms of this. First
//! off, some example usage is:
//!
//! ```
//! # #![allow(unused_must_use)]
//! use std::fmt;
//! use std::io::{self, Write};
//!
//! let mut some_writer = io::stdout();
//! write!(&mut some_writer, "{}", format_args!("print with a {}", "macro"));
//!
//! fn my_fmt_fn(args: fmt::Arguments) {
//! write!(&mut io::stdout(), "{}", args);
//! }
//! my_fmt_fn(format_args!(", or a {} too", "function"));
//! ```
//!
//! The result of the [`format_args!`] macro is a value of type [`fmt::Arguments`].
//! This structure can then be passed to the [`write`] and [`format`] functions
//! inside this module in order to process the format string.
//! The goal of this macro is to even further prevent intermediate allocations
//! when dealing formatting strings.
//!
//! For example, a logging library could use the standard formatting syntax, but
//! it would internally pass around this structure until it has been determined
//! where output should go to.
//!
//! [`usize`]:../../std/primitive.usize.html
//! [`isize`]:../../std/primitive.isize.html
//! [`i8`]:../../std/primitive.i8.html
//! [`Display`]: trait.Display.html
//! [`Binary`]: trait.Binary.html
//! [`fmt::Result`]: type.Result.html
//! [`Result`]:../../std/result/enum.Result.html
//! [`std::fmt::Error`]: struct.Error.html
//! [`Formatter`]: struct.Formatter.html
//! [`write!`]:../../std/macro.write.html
//! [`Debug`]: trait.Debug.html
//! [`format!`]:../../std/macro.format.html
//! [`to_string`]:../../std/string/trait.ToString.html
//! [`writeln!`]:../../std/macro.writeln.html
//! [`write_fmt`]:../../std/io/trait.Write.html#method.write_fmt
//! [`std::io::Write`]:../../std/io/trait.Write.html
//! [`print!`]:../../std/macro.print.html
//! [`println!`]:../../std/macro.println.html
//! [`eprint!`]:../../std/macro.eprint.html
//! [`eprintln!`]:../../std/macro.eprintln.html
//! [`write!`]:../../std/macro.write.html
//! [`format_args!`]:../../std/macro.format_args.html
//! [`fmt::Arguments`]: struct.Arguments.html
//! [`write`]: fn.write.html
//! [`format`]: fn.format.html
#![stable(feature = "rust1", since = "1.0.0")]
#[unstable(feature = "fmt_internals", issue = "none")]
pub use core::fmt::rt;
#[stable(feature = "fmt_flags_align", since = "1.28.0")]
pub use core::fmt::Alignment;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::Error;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{write, ArgumentV1, Arguments};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Binary, Octal};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Debug, Display};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{DebugList, DebugMap, DebugSet, DebugStruct, DebugTuple};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Formatter, Result, Write};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{LowerExp, UpperExp};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{LowerHex, Pointer, UpperHex};
use crate::string;
/// The `format` function takes an [`Arguments`] struct and returns the resulting
/// formatted string.
///
/// The [`Arguments`] instance can be created with the [`format_args!`] macro.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::fmt;
///
/// let s = fmt::format(format_args!("Hello, {}!", "world"));
/// assert_eq!(s, "Hello, world!");
/// ```
///
/// Please note that using [`format!`] might be preferable.
/// Example:
///
/// ```
/// let s = format!("Hello, {}!", "world");
/// assert_eq!(s, "Hello, world!");
/// ```
///
/// [`Arguments`]: struct.Arguments.html
/// [`format_args!`]:../../std/macro.format_args.html
/// [`format!`]:../../std/macro.format.html
#[stable(feature = "rust1", since = "1.0.0")]
pub fn format(args: Arguments | -> string::String {
let capacity = args.estimated_capacity();
let mut output = string::String::with_capacity(capacity);
output.write_fmt(args).expect("a formatting trait implementation returned an error");
output
}
| <'_>) | identifier_name |
sha_256.rs | //! This module is an implementation of the SHA-256 hashing algorithm
use padding::PaddingScheme;
use padding::merkle_damgard::MDPadding512u32;
// Logical functions used by SHA-256 (function names taken from NIST standard)
fn ch(x: u32, y: u32, z: u32) -> u32 {
(x & y) ^ (!x & z)
}
//
fn maj(x: u32, y: u32, z: u32) -> u32 {
(x & y) ^ (x & z) ^ (y & z)
}
//
fn capital_sigma_0(x: u32) -> u32 {
x.rotate_right(2) ^ x.rotate_right(13) ^ x.rotate_right(22)
}
//
fn capital_sigma_1(x: u32) -> u32 {
x.rotate_right(6) ^ x.rotate_right(11) ^ x.rotate_right(25)
}
//
fn sigma_0(x: u32) -> u32 {
x.rotate_right(7) ^ x.rotate_right(18) ^ (x >> 3)
}
//
fn | (x: u32) -> u32 {
x.rotate_right(17) ^ x.rotate_right(19) ^ (x >> 10)
}
// Constants used by SHA-256
const K: [u32; 64] = [0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2];
// Initial hash value of SHA-256
const H_0: [u32; 8] = [0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19];
// SHA-256 digests will be emitted in the following format
pub const DIGEST_LEN: usize = 256/8;
pub type Digest = [u8; DIGEST_LEN];
// Compute the SHA-256 hash of any message
pub fn sha_256(message: &[u8]) -> Digest {
// Set the initial hash value
let mut hash = H_0;
// Parse and pad the message into 512-bit blocks of 32-bit words, then
// iterate over the resulting message blocks
for message_block in MDPadding512u32::new(message) {
// Prepare the message schedule
let mut w = [0; 64];
w[0..16].copy_from_slice(&message_block[..]);
for t in 16..64 {
w[t] = sigma_1(w[t-2]).wrapping_add(w[t-7])
.wrapping_add(sigma_0(w[t-15]))
.wrapping_add(w[t-16]);
}
// Initialize the eight working variables from the previous hash value
let (mut a, mut b, mut c, mut d) = (hash[0], hash[1], hash[2], hash[3]);
let (mut e, mut f, mut g, mut h) = (hash[4], hash[5], hash[6], hash[7]);
// Compute the hash increment
for t in 0..64 {
let t_1 = h.wrapping_add(capital_sigma_1(e))
.wrapping_add(ch(e, f, g))
.wrapping_add(K[t])
.wrapping_add(w[t]);
let t_2 = capital_sigma_0(a).wrapping_add(maj(a, b, c));
h = g;
g = f;
f = e;
e = d.wrapping_add(t_1);
d = c;
c = b;
b = a;
a = t_1.wrapping_add(t_2);
}
// Update the hash value
hash[0] = hash[0].wrapping_add(a);
hash[1] = hash[1].wrapping_add(b);
hash[2] = hash[2].wrapping_add(c);
hash[3] = hash[3].wrapping_add(d);
hash[4] = hash[4].wrapping_add(e);
hash[5] = hash[5].wrapping_add(f);
hash[6] = hash[6].wrapping_add(g);
hash[7] = hash[7].wrapping_add(h);
}
// Output the final hash value
let mut result = [0u8; 256/8];
for (input, outputs) in hash.iter().zip(result.chunks_mut(4)) {
outputs.copy_from_slice(&[(*input >> 24) as u8,
((*input >> 16) & 0xff) as u8,
((*input >> 8) & 0xff) as u8,
(*input & 0xff) as u8]);
};
result
}
#[cfg(test)]
mod tests {
use hash::sha_256::sha_256;
#[test]
fn one_block_message_sample() {
let input = [0x61, 0x62, 0x63];
let hash = sha_256(&input);
assert_eq!(hash, [0xba, 0x78, 0x16, 0xbf, 0x8f, 0x01, 0xcf, 0xea,
0x41, 0x41, 0x40, 0xde, 0x5d, 0xae, 0x22, 0x23,
0xb0, 0x03, 0x61, 0xa3, 0x96, 0x17, 0x7a, 0x9c,
0xb4, 0x10, 0xff, 0x61, 0xf2, 0x00, 0x15, 0xad]);
}
#[test]
fn two_block_message_sample() {
let input = [0x61, 0x62, 0x63, 0x64, 0x62, 0x63, 0x64, 0x65,
0x63, 0x64, 0x65, 0x66, 0x64, 0x65, 0x66, 0x67,
0x65, 0x66, 0x67, 0x68, 0x66, 0x67, 0x68, 0x69,
0x67, 0x68, 0x69, 0x6a, 0x68, 0x69, 0x6a, 0x6b,
0x69, 0x6a, 0x6b, 0x6c, 0x6a, 0x6b, 0x6c, 0x6d,
0x6b, 0x6c, 0x6d, 0x6e, 0x6c, 0x6d, 0x6e, 0x6f,
0x6d, 0x6e, 0x6f, 0x70, 0x6e, 0x6f, 0x70, 0x71];
let hash = sha_256(&input);
assert_eq!(hash, [0x24, 0x8d, 0x6a, 0x61, 0xd2, 0x06, 0x38, 0xb8,
0xe5, 0xc0, 0x26, 0x93, 0x0c, 0x3e, 0x60, 0x39,
0xa3, 0x3c, 0xe4, 0x59, 0x64, 0xff, 0x21, 0x67,
0xf6, 0xec, 0xed, 0xd4, 0x19, 0xdb, 0x06, 0xc1]);
}
#[test]
fn one_byte() {
let input = [0xbd];
let hash = sha_256(&input);
assert_eq!(hash, [0x68, 0x32, 0x57, 0x20, 0xaa, 0xbd, 0x7c, 0x82,
0xf3, 0x0f, 0x55, 0x4b, 0x31, 0x3d, 0x05, 0x70,
0xc9, 0x5a, 0xcc, 0xbb, 0x7d, 0xc4, 0xb5, 0xaa,
0xe1, 0x12, 0x04, 0xc0, 0x8f, 0xfe, 0x73, 0x2b]);
}
#[test]
fn four_bytes() {
let input = [0xc9, 0x8c, 0x8e, 0x55];
let hash = sha_256(&input);
assert_eq!(hash, [0x7a, 0xbc, 0x22, 0xc0, 0xae, 0x5a, 0xf2, 0x6c,
0xe9, 0x3d, 0xbb, 0x94, 0x43, 0x3a, 0x0e, 0x0b,
0x2e, 0x11, 0x9d, 0x01, 0x4f, 0x8e, 0x7f, 0x65,
0xbd, 0x56, 0xc6, 0x1c, 0xcc, 0xcd, 0x95, 0x04]);
}
#[test]
fn fifty_five_zeros() {
let input = [0; 55];
let hash = sha_256(&input);
assert_eq!(hash, [0x02, 0x77, 0x94, 0x66, 0xcd, 0xec, 0x16, 0x38,
0x11, 0xd0, 0x78, 0x81, 0x5c, 0x63, 0x3f, 0x21,
0x90, 0x14, 0x13, 0x08, 0x14, 0x49, 0x00, 0x2f,
0x24, 0xaa, 0x3e, 0x80, 0xf0, 0xb8, 0x8e, 0xf7]);
}
#[test]
fn fifty_six_zeros() {
let input = [0; 56];
let hash = sha_256(&input);
assert_eq!(hash, [0xd4, 0x81, 0x7a, 0xa5, 0x49, 0x76, 0x28, 0xe7,
0xc7, 0x7e, 0x6b, 0x60, 0x61, 0x07, 0x04, 0x2b,
0xbb, 0xa3, 0x13, 0x08, 0x88, 0xc5, 0xf4, 0x7a,
0x37, 0x5e, 0x61, 0x79, 0xbe, 0x78, 0x9f, 0xbb]);
}
#[test]
fn fifty_seven_zeros() {
let input = [0; 57];
let hash = sha_256(&input);
assert_eq!(hash, [0x65, 0xa1, 0x6c, 0xb7, 0x86, 0x13, 0x35, 0xd5,
0xac, 0xe3, 0xc6, 0x07, 0x18, 0xb5, 0x05, 0x2e,
0x44, 0x66, 0x07, 0x26, 0xda, 0x4c, 0xd1, 0x3b,
0xb7, 0x45, 0x38, 0x1b, 0x23, 0x5a, 0x17, 0x85]);
}
#[test]
fn sixty_four_zeros() {
let input = [0; 64];
let hash = sha_256(&input);
assert_eq!(hash, [0xf5, 0xa5, 0xfd, 0x42, 0xd1, 0x6a, 0x20, 0x30,
0x27, 0x98, 0xef, 0x6e, 0xd3, 0x09, 0x97, 0x9b,
0x43, 0x00, 0x3d, 0x23, 0x20, 0xd9, 0xf0, 0xe8,
0xea, 0x98, 0x31, 0xa9, 0x27, 0x59, 0xfb, 0x4b]);
}
#[test]
fn a_thousand_zeros() {
let input = [0; 1000];
let hash = sha_256(&input);
assert_eq!(hash, [0x54, 0x1b, 0x3e, 0x9d, 0xaa, 0x09, 0xb2, 0x0b,
0xf8, 0x5f, 0xa2, 0x73, 0xe5, 0xcb, 0xd3, 0xe8,
0x01, 0x85, 0xaa, 0x4e, 0xc2, 0x98, 0xe7, 0x65,
0xdb, 0x87, 0x74, 0x2b, 0x70, 0x13, 0x8a, 0x53]);
}
#[test]
fn a_thousand_41() {
let input = [0x41; 1000];
let hash = sha_256(&input);
assert_eq!(hash, [0xc2, 0xe6, 0x86, 0x82, 0x34, 0x89, 0xce, 0xd2,
0x01, 0x7f, 0x60, 0x59, 0xb8, 0xb2, 0x39, 0x31,
0x8b, 0x63, 0x64, 0xf6, 0xdc, 0xd8, 0x35, 0xd0,
0xa5, 0x19, 0x10, 0x5a, 0x1e, 0xad, 0xd6, 0xe4]);
}
#[test]
fn a_thousand_and_five_55() {
let input = [0x55; 1005];
let hash = sha_256(&input);
assert_eq!(hash, [0xf4, 0xd6, 0x2d, 0xde, 0xc0, 0xf3, 0xdd, 0x90,
0xea, 0x13, 0x80, 0xfa, 0x16, 0xa5, 0xff, 0x8d,
0xc4, 0xc5, 0x4b, 0x21, 0x74, 0x06, 0x50, 0xf2,
0x4a, 0xfc, 0x41, 0x20, 0x90, 0x35, 0x52, 0xb0]);
}
#[test]
fn a_million_zeros() {
let input = vec![0; 1_000_000];
let hash = sha_256(&input);
assert_eq!(hash, [0xd2, 0x97, 0x51, 0xf2, 0x64, 0x9b, 0x32, 0xff,
0x57, 0x2b, 0x5e, 0x0a, 0x9f, 0x54, 0x1e, 0xa6,
0x60, 0xa5, 0x0f, 0x94, 0xff, 0x0b, 0xee, 0xdf,
0xb0, 0xb6, 0x92, 0xb9, 0x24, 0xcc, 0x80, 0x25]);
}
// The following tests are highly ressource intensive and should only be
// run in release mode, which is why they are ignored by default.
#[test]
#[ignore]
fn half_a_billion_5a() {
let input = vec![0x5a; 0x2000_0000];
let hash = sha_256(&input);
assert_eq!(hash, [0x15, 0xa1, 0x86, 0x8c, 0x12, 0xcc, 0x53, 0x95,
0x1e, 0x18, 0x23, 0x44, 0x27, 0x74, 0x47, 0xcd,
0x09, 0x79, 0x53, 0x6b, 0xad, 0xcc, 0x51, 0x2a,
0xd2, 0x4c, 0x67, 0xe9, 0xb2, 0xd4, 0xf3, 0xdd]);
}
//
#[test]
#[ignore]
fn a_billion_zeros() {
let input = vec![0; 0x4100_0000];
let hash = sha_256(&input);
assert_eq!(hash, [0x46, 0x1c, 0x19, 0xa9, 0x3b, 0xd4, 0x34, 0x4f,
0x92, 0x15, 0xf5, 0xec, 0x64, 0x35, 0x70, 0x90,
0x34, 0x2b, 0xc6, 0x6b, 0x15, 0xa1, 0x48, 0x31,
0x7d, 0x27, 0x6e, 0x31, 0xcb, 0xc2, 0x0b, 0x53]);
}
//
#[test]
#[ignore]
fn two_billions_42() {
let input = vec![0x42; 0x6000_003e];
let hash = sha_256(&input);
assert_eq!(hash, [0xc2, 0x3c, 0xe8, 0xa7, 0x89, 0x5f, 0x4b, 0x21,
0xec, 0x0d, 0xaf, 0x37, 0x92, 0x0a, 0xc0, 0xa2,
0x62, 0xa2, 0x20, 0x04, 0x5a, 0x03, 0xeb, 0x2d,
0xfe, 0xd4, 0x8e, 0xf9, 0xb0, 0x5a, 0xab, 0xea]);
}
}
| sigma_1 | identifier_name |
sha_256.rs | //! This module is an implementation of the SHA-256 hashing algorithm
use padding::PaddingScheme;
use padding::merkle_damgard::MDPadding512u32;
// Logical functions used by SHA-256 (function names taken from NIST standard)
fn ch(x: u32, y: u32, z: u32) -> u32 {
(x & y) ^ (!x & z)
}
//
fn maj(x: u32, y: u32, z: u32) -> u32 {
(x & y) ^ (x & z) ^ (y & z)
}
//
fn capital_sigma_0(x: u32) -> u32 {
x.rotate_right(2) ^ x.rotate_right(13) ^ x.rotate_right(22)
}
//
fn capital_sigma_1(x: u32) -> u32 {
x.rotate_right(6) ^ x.rotate_right(11) ^ x.rotate_right(25)
}
//
fn sigma_0(x: u32) -> u32 |
//
fn sigma_1(x: u32) -> u32 {
x.rotate_right(17) ^ x.rotate_right(19) ^ (x >> 10)
}
// Constants used by SHA-256
const K: [u32; 64] = [0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2];
// Initial hash value of SHA-256
const H_0: [u32; 8] = [0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19];
// SHA-256 digests will be emitted in the following format
pub const DIGEST_LEN: usize = 256/8;
pub type Digest = [u8; DIGEST_LEN];
// Compute the SHA-256 hash of any message
pub fn sha_256(message: &[u8]) -> Digest {
// Set the initial hash value
let mut hash = H_0;
// Parse and pad the message into 512-bit blocks of 32-bit words, then
// iterate over the resulting message blocks
for message_block in MDPadding512u32::new(message) {
// Prepare the message schedule
let mut w = [0; 64];
w[0..16].copy_from_slice(&message_block[..]);
for t in 16..64 {
w[t] = sigma_1(w[t-2]).wrapping_add(w[t-7])
.wrapping_add(sigma_0(w[t-15]))
.wrapping_add(w[t-16]);
}
// Initialize the eight working variables from the previous hash value
let (mut a, mut b, mut c, mut d) = (hash[0], hash[1], hash[2], hash[3]);
let (mut e, mut f, mut g, mut h) = (hash[4], hash[5], hash[6], hash[7]);
// Compute the hash increment
for t in 0..64 {
let t_1 = h.wrapping_add(capital_sigma_1(e))
.wrapping_add(ch(e, f, g))
.wrapping_add(K[t])
.wrapping_add(w[t]);
let t_2 = capital_sigma_0(a).wrapping_add(maj(a, b, c));
h = g;
g = f;
f = e;
e = d.wrapping_add(t_1);
d = c;
c = b;
b = a;
a = t_1.wrapping_add(t_2);
}
// Update the hash value
hash[0] = hash[0].wrapping_add(a);
hash[1] = hash[1].wrapping_add(b);
hash[2] = hash[2].wrapping_add(c);
hash[3] = hash[3].wrapping_add(d);
hash[4] = hash[4].wrapping_add(e);
hash[5] = hash[5].wrapping_add(f);
hash[6] = hash[6].wrapping_add(g);
hash[7] = hash[7].wrapping_add(h);
}
// Output the final hash value
let mut result = [0u8; 256/8];
for (input, outputs) in hash.iter().zip(result.chunks_mut(4)) {
outputs.copy_from_slice(&[(*input >> 24) as u8,
((*input >> 16) & 0xff) as u8,
((*input >> 8) & 0xff) as u8,
(*input & 0xff) as u8]);
};
result
}
#[cfg(test)]
mod tests {
use hash::sha_256::sha_256;
#[test]
fn one_block_message_sample() {
let input = [0x61, 0x62, 0x63];
let hash = sha_256(&input);
assert_eq!(hash, [0xba, 0x78, 0x16, 0xbf, 0x8f, 0x01, 0xcf, 0xea,
0x41, 0x41, 0x40, 0xde, 0x5d, 0xae, 0x22, 0x23,
0xb0, 0x03, 0x61, 0xa3, 0x96, 0x17, 0x7a, 0x9c,
0xb4, 0x10, 0xff, 0x61, 0xf2, 0x00, 0x15, 0xad]);
}
#[test]
fn two_block_message_sample() {
let input = [0x61, 0x62, 0x63, 0x64, 0x62, 0x63, 0x64, 0x65,
0x63, 0x64, 0x65, 0x66, 0x64, 0x65, 0x66, 0x67,
0x65, 0x66, 0x67, 0x68, 0x66, 0x67, 0x68, 0x69,
0x67, 0x68, 0x69, 0x6a, 0x68, 0x69, 0x6a, 0x6b,
0x69, 0x6a, 0x6b, 0x6c, 0x6a, 0x6b, 0x6c, 0x6d,
0x6b, 0x6c, 0x6d, 0x6e, 0x6c, 0x6d, 0x6e, 0x6f,
0x6d, 0x6e, 0x6f, 0x70, 0x6e, 0x6f, 0x70, 0x71];
let hash = sha_256(&input);
assert_eq!(hash, [0x24, 0x8d, 0x6a, 0x61, 0xd2, 0x06, 0x38, 0xb8,
0xe5, 0xc0, 0x26, 0x93, 0x0c, 0x3e, 0x60, 0x39,
0xa3, 0x3c, 0xe4, 0x59, 0x64, 0xff, 0x21, 0x67,
0xf6, 0xec, 0xed, 0xd4, 0x19, 0xdb, 0x06, 0xc1]);
}
#[test]
fn one_byte() {
let input = [0xbd];
let hash = sha_256(&input);
assert_eq!(hash, [0x68, 0x32, 0x57, 0x20, 0xaa, 0xbd, 0x7c, 0x82,
0xf3, 0x0f, 0x55, 0x4b, 0x31, 0x3d, 0x05, 0x70,
0xc9, 0x5a, 0xcc, 0xbb, 0x7d, 0xc4, 0xb5, 0xaa,
0xe1, 0x12, 0x04, 0xc0, 0x8f, 0xfe, 0x73, 0x2b]);
}
#[test]
fn four_bytes() {
let input = [0xc9, 0x8c, 0x8e, 0x55];
let hash = sha_256(&input);
assert_eq!(hash, [0x7a, 0xbc, 0x22, 0xc0, 0xae, 0x5a, 0xf2, 0x6c,
0xe9, 0x3d, 0xbb, 0x94, 0x43, 0x3a, 0x0e, 0x0b,
0x2e, 0x11, 0x9d, 0x01, 0x4f, 0x8e, 0x7f, 0x65,
0xbd, 0x56, 0xc6, 0x1c, 0xcc, 0xcd, 0x95, 0x04]);
}
#[test]
fn fifty_five_zeros() {
let input = [0; 55];
let hash = sha_256(&input);
assert_eq!(hash, [0x02, 0x77, 0x94, 0x66, 0xcd, 0xec, 0x16, 0x38,
0x11, 0xd0, 0x78, 0x81, 0x5c, 0x63, 0x3f, 0x21,
0x90, 0x14, 0x13, 0x08, 0x14, 0x49, 0x00, 0x2f,
0x24, 0xaa, 0x3e, 0x80, 0xf0, 0xb8, 0x8e, 0xf7]);
}
#[test]
fn fifty_six_zeros() {
let input = [0; 56];
let hash = sha_256(&input);
assert_eq!(hash, [0xd4, 0x81, 0x7a, 0xa5, 0x49, 0x76, 0x28, 0xe7,
0xc7, 0x7e, 0x6b, 0x60, 0x61, 0x07, 0x04, 0x2b,
0xbb, 0xa3, 0x13, 0x08, 0x88, 0xc5, 0xf4, 0x7a,
0x37, 0x5e, 0x61, 0x79, 0xbe, 0x78, 0x9f, 0xbb]);
}
#[test]
fn fifty_seven_zeros() {
let input = [0; 57];
let hash = sha_256(&input);
assert_eq!(hash, [0x65, 0xa1, 0x6c, 0xb7, 0x86, 0x13, 0x35, 0xd5,
0xac, 0xe3, 0xc6, 0x07, 0x18, 0xb5, 0x05, 0x2e,
0x44, 0x66, 0x07, 0x26, 0xda, 0x4c, 0xd1, 0x3b,
0xb7, 0x45, 0x38, 0x1b, 0x23, 0x5a, 0x17, 0x85]);
}
#[test]
fn sixty_four_zeros() {
let input = [0; 64];
let hash = sha_256(&input);
assert_eq!(hash, [0xf5, 0xa5, 0xfd, 0x42, 0xd1, 0x6a, 0x20, 0x30,
0x27, 0x98, 0xef, 0x6e, 0xd3, 0x09, 0x97, 0x9b,
0x43, 0x00, 0x3d, 0x23, 0x20, 0xd9, 0xf0, 0xe8,
0xea, 0x98, 0x31, 0xa9, 0x27, 0x59, 0xfb, 0x4b]);
}
#[test]
fn a_thousand_zeros() {
let input = [0; 1000];
let hash = sha_256(&input);
assert_eq!(hash, [0x54, 0x1b, 0x3e, 0x9d, 0xaa, 0x09, 0xb2, 0x0b,
0xf8, 0x5f, 0xa2, 0x73, 0xe5, 0xcb, 0xd3, 0xe8,
0x01, 0x85, 0xaa, 0x4e, 0xc2, 0x98, 0xe7, 0x65,
0xdb, 0x87, 0x74, 0x2b, 0x70, 0x13, 0x8a, 0x53]);
}
#[test]
fn a_thousand_41() {
let input = [0x41; 1000];
let hash = sha_256(&input);
assert_eq!(hash, [0xc2, 0xe6, 0x86, 0x82, 0x34, 0x89, 0xce, 0xd2,
0x01, 0x7f, 0x60, 0x59, 0xb8, 0xb2, 0x39, 0x31,
0x8b, 0x63, 0x64, 0xf6, 0xdc, 0xd8, 0x35, 0xd0,
0xa5, 0x19, 0x10, 0x5a, 0x1e, 0xad, 0xd6, 0xe4]);
}
#[test]
fn a_thousand_and_five_55() {
let input = [0x55; 1005];
let hash = sha_256(&input);
assert_eq!(hash, [0xf4, 0xd6, 0x2d, 0xde, 0xc0, 0xf3, 0xdd, 0x90,
0xea, 0x13, 0x80, 0xfa, 0x16, 0xa5, 0xff, 0x8d,
0xc4, 0xc5, 0x4b, 0x21, 0x74, 0x06, 0x50, 0xf2,
0x4a, 0xfc, 0x41, 0x20, 0x90, 0x35, 0x52, 0xb0]);
}
#[test]
fn a_million_zeros() {
let input = vec![0; 1_000_000];
let hash = sha_256(&input);
assert_eq!(hash, [0xd2, 0x97, 0x51, 0xf2, 0x64, 0x9b, 0x32, 0xff,
0x57, 0x2b, 0x5e, 0x0a, 0x9f, 0x54, 0x1e, 0xa6,
0x60, 0xa5, 0x0f, 0x94, 0xff, 0x0b, 0xee, 0xdf,
0xb0, 0xb6, 0x92, 0xb9, 0x24, 0xcc, 0x80, 0x25]);
}
// The following tests are highly ressource intensive and should only be
// run in release mode, which is why they are ignored by default.
#[test]
#[ignore]
fn half_a_billion_5a() {
let input = vec![0x5a; 0x2000_0000];
let hash = sha_256(&input);
assert_eq!(hash, [0x15, 0xa1, 0x86, 0x8c, 0x12, 0xcc, 0x53, 0x95,
0x1e, 0x18, 0x23, 0x44, 0x27, 0x74, 0x47, 0xcd,
0x09, 0x79, 0x53, 0x6b, 0xad, 0xcc, 0x51, 0x2a,
0xd2, 0x4c, 0x67, 0xe9, 0xb2, 0xd4, 0xf3, 0xdd]);
}
//
#[test]
#[ignore]
fn a_billion_zeros() {
let input = vec![0; 0x4100_0000];
let hash = sha_256(&input);
assert_eq!(hash, [0x46, 0x1c, 0x19, 0xa9, 0x3b, 0xd4, 0x34, 0x4f,
0x92, 0x15, 0xf5, 0xec, 0x64, 0x35, 0x70, 0x90,
0x34, 0x2b, 0xc6, 0x6b, 0x15, 0xa1, 0x48, 0x31,
0x7d, 0x27, 0x6e, 0x31, 0xcb, 0xc2, 0x0b, 0x53]);
}
//
#[test]
#[ignore]
fn two_billions_42() {
let input = vec![0x42; 0x6000_003e];
let hash = sha_256(&input);
assert_eq!(hash, [0xc2, 0x3c, 0xe8, 0xa7, 0x89, 0x5f, 0x4b, 0x21,
0xec, 0x0d, 0xaf, 0x37, 0x92, 0x0a, 0xc0, 0xa2,
0x62, 0xa2, 0x20, 0x04, 0x5a, 0x03, 0xeb, 0x2d,
0xfe, 0xd4, 0x8e, 0xf9, 0xb0, 0x5a, 0xab, 0xea]);
}
}
| {
x.rotate_right(7) ^ x.rotate_right(18) ^ (x >> 3)
} | identifier_body |
sha_256.rs | //! This module is an implementation of the SHA-256 hashing algorithm
use padding::PaddingScheme;
use padding::merkle_damgard::MDPadding512u32;
// Logical functions used by SHA-256 (function names taken from NIST standard)
fn ch(x: u32, y: u32, z: u32) -> u32 {
(x & y) ^ (!x & z)
}
//
fn maj(x: u32, y: u32, z: u32) -> u32 {
(x & y) ^ (x & z) ^ (y & z)
}
//
fn capital_sigma_0(x: u32) -> u32 {
x.rotate_right(2) ^ x.rotate_right(13) ^ x.rotate_right(22)
}
//
fn capital_sigma_1(x: u32) -> u32 {
x.rotate_right(6) ^ x.rotate_right(11) ^ x.rotate_right(25)
}
//
fn sigma_0(x: u32) -> u32 {
x.rotate_right(7) ^ x.rotate_right(18) ^ (x >> 3)
}
//
fn sigma_1(x: u32) -> u32 {
x.rotate_right(17) ^ x.rotate_right(19) ^ (x >> 10)
}
// Constants used by SHA-256
const K: [u32; 64] = [0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2];
// Initial hash value of SHA-256
const H_0: [u32; 8] = [0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19];
// SHA-256 digests will be emitted in the following format
pub const DIGEST_LEN: usize = 256/8;
pub type Digest = [u8; DIGEST_LEN];
// Compute the SHA-256 hash of any message
pub fn sha_256(message: &[u8]) -> Digest {
// Set the initial hash value
let mut hash = H_0;
// Parse and pad the message into 512-bit blocks of 32-bit words, then
// iterate over the resulting message blocks
for message_block in MDPadding512u32::new(message) {
// Prepare the message schedule
let mut w = [0; 64];
w[0..16].copy_from_slice(&message_block[..]);
for t in 16..64 {
w[t] = sigma_1(w[t-2]).wrapping_add(w[t-7])
.wrapping_add(sigma_0(w[t-15]))
.wrapping_add(w[t-16]);
}
// Initialize the eight working variables from the previous hash value
let (mut a, mut b, mut c, mut d) = (hash[0], hash[1], hash[2], hash[3]);
let (mut e, mut f, mut g, mut h) = (hash[4], hash[5], hash[6], hash[7]);
// Compute the hash increment
for t in 0..64 {
let t_1 = h.wrapping_add(capital_sigma_1(e)) | g = f;
f = e;
e = d.wrapping_add(t_1);
d = c;
c = b;
b = a;
a = t_1.wrapping_add(t_2);
}
// Update the hash value
hash[0] = hash[0].wrapping_add(a);
hash[1] = hash[1].wrapping_add(b);
hash[2] = hash[2].wrapping_add(c);
hash[3] = hash[3].wrapping_add(d);
hash[4] = hash[4].wrapping_add(e);
hash[5] = hash[5].wrapping_add(f);
hash[6] = hash[6].wrapping_add(g);
hash[7] = hash[7].wrapping_add(h);
}
// Output the final hash value
let mut result = [0u8; 256/8];
for (input, outputs) in hash.iter().zip(result.chunks_mut(4)) {
outputs.copy_from_slice(&[(*input >> 24) as u8,
((*input >> 16) & 0xff) as u8,
((*input >> 8) & 0xff) as u8,
(*input & 0xff) as u8]);
};
result
}
#[cfg(test)]
mod tests {
use hash::sha_256::sha_256;
#[test]
fn one_block_message_sample() {
let input = [0x61, 0x62, 0x63];
let hash = sha_256(&input);
assert_eq!(hash, [0xba, 0x78, 0x16, 0xbf, 0x8f, 0x01, 0xcf, 0xea,
0x41, 0x41, 0x40, 0xde, 0x5d, 0xae, 0x22, 0x23,
0xb0, 0x03, 0x61, 0xa3, 0x96, 0x17, 0x7a, 0x9c,
0xb4, 0x10, 0xff, 0x61, 0xf2, 0x00, 0x15, 0xad]);
}
#[test]
fn two_block_message_sample() {
let input = [0x61, 0x62, 0x63, 0x64, 0x62, 0x63, 0x64, 0x65,
0x63, 0x64, 0x65, 0x66, 0x64, 0x65, 0x66, 0x67,
0x65, 0x66, 0x67, 0x68, 0x66, 0x67, 0x68, 0x69,
0x67, 0x68, 0x69, 0x6a, 0x68, 0x69, 0x6a, 0x6b,
0x69, 0x6a, 0x6b, 0x6c, 0x6a, 0x6b, 0x6c, 0x6d,
0x6b, 0x6c, 0x6d, 0x6e, 0x6c, 0x6d, 0x6e, 0x6f,
0x6d, 0x6e, 0x6f, 0x70, 0x6e, 0x6f, 0x70, 0x71];
let hash = sha_256(&input);
assert_eq!(hash, [0x24, 0x8d, 0x6a, 0x61, 0xd2, 0x06, 0x38, 0xb8,
0xe5, 0xc0, 0x26, 0x93, 0x0c, 0x3e, 0x60, 0x39,
0xa3, 0x3c, 0xe4, 0x59, 0x64, 0xff, 0x21, 0x67,
0xf6, 0xec, 0xed, 0xd4, 0x19, 0xdb, 0x06, 0xc1]);
}
#[test]
fn one_byte() {
let input = [0xbd];
let hash = sha_256(&input);
assert_eq!(hash, [0x68, 0x32, 0x57, 0x20, 0xaa, 0xbd, 0x7c, 0x82,
0xf3, 0x0f, 0x55, 0x4b, 0x31, 0x3d, 0x05, 0x70,
0xc9, 0x5a, 0xcc, 0xbb, 0x7d, 0xc4, 0xb5, 0xaa,
0xe1, 0x12, 0x04, 0xc0, 0x8f, 0xfe, 0x73, 0x2b]);
}
#[test]
fn four_bytes() {
let input = [0xc9, 0x8c, 0x8e, 0x55];
let hash = sha_256(&input);
assert_eq!(hash, [0x7a, 0xbc, 0x22, 0xc0, 0xae, 0x5a, 0xf2, 0x6c,
0xe9, 0x3d, 0xbb, 0x94, 0x43, 0x3a, 0x0e, 0x0b,
0x2e, 0x11, 0x9d, 0x01, 0x4f, 0x8e, 0x7f, 0x65,
0xbd, 0x56, 0xc6, 0x1c, 0xcc, 0xcd, 0x95, 0x04]);
}
#[test]
fn fifty_five_zeros() {
let input = [0; 55];
let hash = sha_256(&input);
assert_eq!(hash, [0x02, 0x77, 0x94, 0x66, 0xcd, 0xec, 0x16, 0x38,
0x11, 0xd0, 0x78, 0x81, 0x5c, 0x63, 0x3f, 0x21,
0x90, 0x14, 0x13, 0x08, 0x14, 0x49, 0x00, 0x2f,
0x24, 0xaa, 0x3e, 0x80, 0xf0, 0xb8, 0x8e, 0xf7]);
}
#[test]
fn fifty_six_zeros() {
let input = [0; 56];
let hash = sha_256(&input);
assert_eq!(hash, [0xd4, 0x81, 0x7a, 0xa5, 0x49, 0x76, 0x28, 0xe7,
0xc7, 0x7e, 0x6b, 0x60, 0x61, 0x07, 0x04, 0x2b,
0xbb, 0xa3, 0x13, 0x08, 0x88, 0xc5, 0xf4, 0x7a,
0x37, 0x5e, 0x61, 0x79, 0xbe, 0x78, 0x9f, 0xbb]);
}
#[test]
fn fifty_seven_zeros() {
let input = [0; 57];
let hash = sha_256(&input);
assert_eq!(hash, [0x65, 0xa1, 0x6c, 0xb7, 0x86, 0x13, 0x35, 0xd5,
0xac, 0xe3, 0xc6, 0x07, 0x18, 0xb5, 0x05, 0x2e,
0x44, 0x66, 0x07, 0x26, 0xda, 0x4c, 0xd1, 0x3b,
0xb7, 0x45, 0x38, 0x1b, 0x23, 0x5a, 0x17, 0x85]);
}
#[test]
fn sixty_four_zeros() {
let input = [0; 64];
let hash = sha_256(&input);
assert_eq!(hash, [0xf5, 0xa5, 0xfd, 0x42, 0xd1, 0x6a, 0x20, 0x30,
0x27, 0x98, 0xef, 0x6e, 0xd3, 0x09, 0x97, 0x9b,
0x43, 0x00, 0x3d, 0x23, 0x20, 0xd9, 0xf0, 0xe8,
0xea, 0x98, 0x31, 0xa9, 0x27, 0x59, 0xfb, 0x4b]);
}
#[test]
fn a_thousand_zeros() {
let input = [0; 1000];
let hash = sha_256(&input);
assert_eq!(hash, [0x54, 0x1b, 0x3e, 0x9d, 0xaa, 0x09, 0xb2, 0x0b,
0xf8, 0x5f, 0xa2, 0x73, 0xe5, 0xcb, 0xd3, 0xe8,
0x01, 0x85, 0xaa, 0x4e, 0xc2, 0x98, 0xe7, 0x65,
0xdb, 0x87, 0x74, 0x2b, 0x70, 0x13, 0x8a, 0x53]);
}
#[test]
fn a_thousand_41() {
let input = [0x41; 1000];
let hash = sha_256(&input);
assert_eq!(hash, [0xc2, 0xe6, 0x86, 0x82, 0x34, 0x89, 0xce, 0xd2,
0x01, 0x7f, 0x60, 0x59, 0xb8, 0xb2, 0x39, 0x31,
0x8b, 0x63, 0x64, 0xf6, 0xdc, 0xd8, 0x35, 0xd0,
0xa5, 0x19, 0x10, 0x5a, 0x1e, 0xad, 0xd6, 0xe4]);
}
#[test]
fn a_thousand_and_five_55() {
let input = [0x55; 1005];
let hash = sha_256(&input);
assert_eq!(hash, [0xf4, 0xd6, 0x2d, 0xde, 0xc0, 0xf3, 0xdd, 0x90,
0xea, 0x13, 0x80, 0xfa, 0x16, 0xa5, 0xff, 0x8d,
0xc4, 0xc5, 0x4b, 0x21, 0x74, 0x06, 0x50, 0xf2,
0x4a, 0xfc, 0x41, 0x20, 0x90, 0x35, 0x52, 0xb0]);
}
#[test]
fn a_million_zeros() {
let input = vec![0; 1_000_000];
let hash = sha_256(&input);
assert_eq!(hash, [0xd2, 0x97, 0x51, 0xf2, 0x64, 0x9b, 0x32, 0xff,
0x57, 0x2b, 0x5e, 0x0a, 0x9f, 0x54, 0x1e, 0xa6,
0x60, 0xa5, 0x0f, 0x94, 0xff, 0x0b, 0xee, 0xdf,
0xb0, 0xb6, 0x92, 0xb9, 0x24, 0xcc, 0x80, 0x25]);
}
// The following tests are highly ressource intensive and should only be
// run in release mode, which is why they are ignored by default.
#[test]
#[ignore]
fn half_a_billion_5a() {
let input = vec![0x5a; 0x2000_0000];
let hash = sha_256(&input);
assert_eq!(hash, [0x15, 0xa1, 0x86, 0x8c, 0x12, 0xcc, 0x53, 0x95,
0x1e, 0x18, 0x23, 0x44, 0x27, 0x74, 0x47, 0xcd,
0x09, 0x79, 0x53, 0x6b, 0xad, 0xcc, 0x51, 0x2a,
0xd2, 0x4c, 0x67, 0xe9, 0xb2, 0xd4, 0xf3, 0xdd]);
}
//
#[test]
#[ignore]
fn a_billion_zeros() {
let input = vec![0; 0x4100_0000];
let hash = sha_256(&input);
assert_eq!(hash, [0x46, 0x1c, 0x19, 0xa9, 0x3b, 0xd4, 0x34, 0x4f,
0x92, 0x15, 0xf5, 0xec, 0x64, 0x35, 0x70, 0x90,
0x34, 0x2b, 0xc6, 0x6b, 0x15, 0xa1, 0x48, 0x31,
0x7d, 0x27, 0x6e, 0x31, 0xcb, 0xc2, 0x0b, 0x53]);
}
//
#[test]
#[ignore]
fn two_billions_42() {
let input = vec![0x42; 0x6000_003e];
let hash = sha_256(&input);
assert_eq!(hash, [0xc2, 0x3c, 0xe8, 0xa7, 0x89, 0x5f, 0x4b, 0x21,
0xec, 0x0d, 0xaf, 0x37, 0x92, 0x0a, 0xc0, 0xa2,
0x62, 0xa2, 0x20, 0x04, 0x5a, 0x03, 0xeb, 0x2d,
0xfe, 0xd4, 0x8e, 0xf9, 0xb0, 0x5a, 0xab, 0xea]);
}
} | .wrapping_add(ch(e, f, g))
.wrapping_add(K[t])
.wrapping_add(w[t]);
let t_2 = capital_sigma_0(a).wrapping_add(maj(a, b, c));
h = g; | random_line_split |
main.1.rs | extern crate brainfuck;
extern crate rand;
#[macro_use]
extern crate log;
extern crate env_logger;
mod context;
use brainfuck::parser;
use context::Context;
use std::time::{Duration, Instant};
use rand::random;
use log::LevelFilter;
use std::sync::mpsc::channel;
use std::thread;
use std::sync::{Arc, Mutex};
/*
f64数组 代表genome(基因组)
每个f64(8个字节 gene 基因)代表一条指令
AI程序的工作原理如下:
一个基因组由一个f64数组组成。
每个基因对应Brainf-ck编程语言中的指令。
从一群随机基因组开始。
将每个f64转换成相应的指令,编码成结果程序,并执行这个程序。
根据程序的控制台输出获取每个程序的适应分数,并对它们进行排名。
使用赌轮选择,杂交和变异将最佳基因组配对在一起,以产生新一代。
用新一代重复该过程,直到达到目标适应分数。
解释指令集
Brainf-ck由以下指令集组成:
1 > 递增指针。
2 < 减少指针。
3 + 递增指针处的字节。
3 - 减少指针处的字节。
5 . 输出指针处的字节。
6 , 输入一个字节并将其存储在指针的字节中。
7 [ 如果指针处的字节为零,则跳过匹配]。
8 ] 向后跳转到匹配[除非指针处的字节为零。
*/
const MUTATION_RATE: f64 = 0.05;
const CROSSOVER_RATE: f64 = 0.80;
const INITIAL_GENOME_SIZE: usize = 100;
const NUM_ELITE: usize = 4;//精英选择个数
const NUM_COPIES_ELITE: usize = 1; //每个精英复制数
const NUM_THREAD: usize = 2;//线程数
const POPULATION_SIZE: usize = 50*NUM_THREAD+NUM_ELITE*NUM_COPIES_ELITE;//人口数量
//基因组
#[derive(Copy)]
pub struct Genome {
fitness: f64,
genes: Vec<f64>,
}
impl Genome {
fn new() -> Genome{
Genome{
fitness: 1.0,
genes: vec![]
}
}
fn length(&self) -> usize{
self.genes.len()
}
fn random() -> Genome{
Genome{
fitness: 1.0,
genes: vec![random(); INITIAL_GENOME_SIZE]
}
}
/*
通过插入,替换,删除,移位进行突变。
- 在基因组中选择一个索引。
- 如果插入,则在该位置插入一个变异位。 其余位向上移动一个索引。 最后一位被删除了。
- 如果替换,则在该位置设置变异位。
- 如果删除,所有位都在该位置向下移动。 在数组的末尾添加一个变异位。
- 如果移位,所有位都从位置0开始向上或向下移动。如果向上,则最后一位向前移动。 如果向下,第一位就会结束。
*/
fn mutate(&mut self){
for pos in 0..self.genes.len(){
if random::<f64>() < MUTATION_RATE{
//选择变异类型
let r = random::<f64>();
if r <= 0.25 {
//插入突变
let mutation_index = pos;
//变异之前备份当前位
let mut shift_bit = self.genes[mutation_index];
//在变异位设置随机数
self.genes[mutation_index] = random();
//将位向上或向下凹陷1。
let up = random::<f64>() >= 0.5;
if up{//插入并删除末尾
for i in mutation_index+1..self.length{
let next_shift_bit = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = next_shift_bit;
}
}else{//插入并删除第一个
for i in (0..=mutation_index).rev(){
let next_shift_bit = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = next_shift_bit;
}
}
}else if r <= 0.5{
//删除突变
let mutation_index = pos;
let up = random::<f64>() >= 0.5;
if up{//删除并在开头插入
for i in (1..=mutation_index).rev(){
self.genes[i] = self.genes[i-1];
}
self.genes[0] = random();
}else{//删除并在末尾插入
for i in mutation_index..self.length-1{
self.genes[i] = self.genes[i+1]
}
self.genes[self.length-1] = random();
}
}else if r <= 0.75{
//转移/旋转突变
let up = random::<f64>() >= 0.5;
if up{
// 1,2,3 => 3,1,2
let mut shift_bit = self.genes[0];
for i in 0..self.length{
if i>0{
let temp = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = temp;
}else{
self.genes[i] = self.genes[self.length-1];
}
}
}else{
// 1,2,3 => 2,3,1
let mut shift_bit = self.genes[self.length-1];
for i in (0..=self.length-1).rev(){
if i<self.length-1{
let temp = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = temp;
}else{
self.genes[i] = self.genes[0];
}
}
}
}else{
//替换突变
self.genes[pos] = random();
}
}
}
}
fn crossover(&self, genome:&Genome) -> (Genome, Genome){
if random::<f64>()>CROSSOVER_RATE{
return (self.clone(), genome.clone());
}
let pos = (random::<f64>()*self.length as f64) as usize;
let mut child1 = Genome::new();
let mut child2 = Genome::new();
for i in 0..self.length{
if i<pos{
child1.genes[i] = self.genes[i];
child2.genes[i] = genome.genes[i];
}else{
child1.genes[i] = genome.genes[i];
child2.genes[i] = self.genes[i];
}
}
(child1, child2)
}
fn to_bf(&self) -> String {
let mut bf = String::new();
for gene in self.genes.iter() {
let d = *gene;
if d <= 0.125 {
bf.push('>');
} else if d <= 0.25 {
bf.push('<');
} else if d <= 0.375 {
bf.push('+');
} else if d <= 0.5 {
bf.push('-');
} else if d <= 0.625 {
bf.push('.');
} else if d <= 0.75 {
//bf.push(',');
bf.push('.');
} else if d <= 0.875 {
bf.push('[');
} else {
bf.push(']');
}
}
bf
}
fn run(&self) -> String{
let mut context = Context::new();
let program = self.to_bf().replace("[]", "");
if let Ok(block) = parser::parse(program.as_bytes()) {
context.run(&block);
}
context.out
}
fn calc_fitness(&mut self, target: &str){
let target = target.as_bytes();
self.fitness = 0.0;
let out = self.run();
let out_bytes = out.as_bytes();
for i in 0..target.len() {
if out_bytes.len()>i{
self.fitness += 255.0 - (out_bytes[i] as f64 - target[i] as f64).abs();
}
}
}
}
impl Clone for Genome {
fn clone(&self) -> Genome {
Genome{
fitness: self.fitness,
genes: self.genes,
length: self.length
}
}
}
pub struct GA {
target: String,
populations: Vec<Genome>,
total_fitness: f64,
generations: usize,
}
impl GA {
fn new(target: &str) -> GA {
let mut populations = vec![];
for _ in 0..POPULATION_SIZE{
populations.push(Genome::random());
}
GA {
target: String::from(target),
generations: 0,
total_fitness: 0.0,
populations
}
}
fn roulette_selection(&self) -> usize{
//生成0和总体适应分之间的随机数
let slice = random::<f64>() * self.total_fitness;
let mut fitness_total = 0.0;
let mut selected_pos = 0;
for i in 0..self.populations.len(){
fitness_total += self.populations[i].fitness;
//如果当前适应分>随机数,返回此处的染色体
if fitness_total > slice{
selected_pos = i;
break;
}
}
selected_pos
}
//下一代
fn epoch(&mut self){
//计算总适应分
self.total_fitness = 0.0;
for p in &mut self.populations{
self.total_fitness += p.fitness;
}
//按照得分排序
self.populations.sort_by(|a, b| b.fitness.partial_cmp(&a.fitness).unwrap());
let out = self.populations[0].run();
println!("program={} out={:?}", self.populations[0].to_bf(), out.get(0..5));
info!("人口:{} 代数:{} 最高分:{}", self.populations.len(), self.generations, self.populations[0].fitness);
//新群体
let mut new_pop = vec![];
//精英选择
for i in 0..NUM_ELITE{
for _ in 0. |
new_pop.push(self.populations[i]);
}
}
let (tx, rx) = channel();
let elite_count = new_pop.len();
let new_pop = Arc::new(Mutex::new(new_pop));
for tid in 0..NUM_THREAD{
let target = self.target.clone();
let child_count = (POPULATION_SIZE-elite_count)/NUM_THREAD;
let mut parents = vec![];
for _ in 0..child_count/2{
//每次生成两个孩子
parents.push(self.populations[self.roulette_selection()]);
parents.push(self.populations[self.roulette_selection()]);
}
let tx = tx.clone();
let new_pop_clone = new_pop.clone();
thread::spawn(move || {
let mut childs = vec![];
//println!("{}.start", tid);
//每次生成两个孩子
while childs.len()<child_count as usize{
let mum = parents.pop().unwrap();
let dad = parents.pop().unwrap();
//杂交
let (mut baby1, mut baby2) = mum.crossover(&dad);
//变异
baby1.mutate();
baby1.mutate();
//计算适应分
baby1.calc_fitness(&target);
baby2.calc_fitness(&target);
childs.push(baby1);
childs.push(baby2);
}
//println!("{}.end", tid);
let mut new_pop = new_pop_clone.lock().unwrap();
new_pop.append(&mut childs);
//println!("{}.new_pop.len()={}", tid, new_pop.len());
if new_pop.len() == POPULATION_SIZE{
let mut pops = vec![];
pops.append(&mut new_pop);
tx.send(pops).unwrap();
}
});
}
//替换新的群体
self.populations.clear();
self.populations.append(&mut rx.recv().unwrap());
self.generations += 1;
}
}
fn main() {
env_logger::Builder::from_default_env()
//.default_format_timestamp(false)
.filter_level(LevelFilter::Info)
.init();
//let hello_world = include_bytes!("../hello_world.bf");
//let program = b"++++++++[>++++[>++>+++>+++>+<<<<-]>+>+>->>+[<]<-]>>.>---.+++++++..+++.>>.<-.<.+++.------.--------.>>+.>++.";
let mut ga = GA::new("Hello");
for _ in 0..50000{
ga.epoch();
}
// for i in 0..ga.populations.len(){
// ga.populations[i].calc_fitness("Hello World!");
// }
// let dad = ga.populations[ga.roulette_selection()];
// let mum = ga.populations[ga.roulette_selection()];
// let (mut child1, mut child2) = dad.crossover(&mum);
// child1.calc_fitness("Hello World!");
// child2.calc_fitness("Hello World!");
println!("end.");
//let now = Instant::now();
// let fitness = ga.calc_fitness(program, "Hello World!");
// println!("耗时:{}ms", duration_to_milis(&now.elapsed()));
// println!("fitness:{}", fitness);
}
pub fn duration_to_milis(duration: &Duration) -> f64 {
duration.as_secs() as f64 * 1000.0 + duration.subsec_nanos() as f64 / 1_000_000.0
}
| .NUM_COPIES_ELITE{ | identifier_name |
main.1.rs | extern crate brainfuck;
extern crate rand;
#[macro_use]
extern crate log;
extern crate env_logger;
mod context;
use brainfuck::parser;
use context::Context;
use std::time::{Duration, Instant};
use rand::random;
use log::LevelFilter;
use std::sync::mpsc::channel;
use std::thread;
use std::sync::{Arc, Mutex};
/*
f64数组 代表genome(基因组)
每个f64(8个字节 gene 基因)代表一条指令
AI程序的工作原理如下:
一个基因组由一个f64数组组成。
每个基因对应Brainf-ck编程语言中的指令。
从一群随机基因组开始。
将每个f64转换成相应的指令,编码成结果程序,并执行这个程序。
根据程序的控制台输出获取每个程序的适应分数,并对它们进行排名。
使用赌轮选择,杂交和变异将最佳基因组配对在一起,以产生新一代。
用新一代重复该过程,直到达到目标适应分数。
解释指令集
Brainf-ck由以下指令集组成:
1 > 递增指针。
2 < 减少指针。
3 + 递增指针处的字节。
3 - 减少指针处的字节。
5 . 输出指针处的字节。
6 , 输入一个字节并将其存储在指针的字节中。
7 [ 如果指针处的字节为零,则跳过匹配]。
8 ] 向后跳转到匹配[除非指针处的字节为零。
*/
const MUTATION_RATE: f64 = 0.05;
const CROSSOVER_RATE: f64 = 0.80;
const INITIAL_GENOME_SIZE: usize = 100;
const NUM_ELITE: usize = 4;//精英选择个数
const NUM_COPIES_ELITE: usize = 1; //每个精英复制数
const NUM_THREAD: usize = 2;//线程数
const POPULATION_SIZE: usize = 50*NUM_THREAD+NUM_ELITE*NUM_COPIES_ELITE;//人口数量
//基因组
#[derive(Copy)]
pub struct Genome {
fitness: f64,
genes: Vec<f64>,
}
impl Genome {
fn new() -> Genome{
Genome{
fitness: 1.0,
genes: vec![]
}
}
fn length(&self) -> usize{
self.genes.len()
}
fn random() -> Genome{
Genome{
fitness: 1.0,
genes: vec![random(); INITIAL_GENOME_SIZE]
}
}
/*
通过插入,替换,删除,移位进行突变。
- 在基因组中选择一个索引。
- 如果插入,则在该位置插入一个变异位。 其余位向上移动一个索引。 最后一位被删除了。
- 如果替换,则在该位置设置变异位。
- 如果删除,所有位都在该位置向下移动。 在数组的末尾添加一个变异位。
- 如果移位,所有位都从位置0开始向上或向下移动。如果向上,则最后一位向前移动。 如果向下,第一位就会结束。
*/
fn mutate(&mut self){
for pos in 0..self.genes.len(){
if random::<f64>() < MUTATION_RATE{
//选择变异类型
let r = random::<f64>();
if r <= 0.25 {
//插入突变
let mutation_index = pos;
//变异之前备份当前位
let mut shift_bit = self.genes[mutation_index];
//在变异位设置随机数
self.genes[mutation_index] = random();
//将位向上或向下凹陷1。
let up = random::<f64>() >= 0.5;
if up{//插入并删除末尾
for i in mutation_index+1..self.length{
let next_shift_bit = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = next_shift_bit;
}
}else{//插入并删除第一个
for i in (0..=mutation_index).rev(){
let next_shift_bit = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = next_shift_bit;
}
}
}else if r <= 0.5{
//删除突变
let mutation_index = pos;
let up = random::<f64>() >= 0.5;
if up{//删除并在开头插入
for i in (1..=mutation_index).rev(){
self.genes[i] = self.genes[i-1];
}
self.genes[0] = random();
}else{//删除并在末尾插入
for i in mutation_index..self.length-1{
self.genes[i] = self.genes[i+1]
}
self.genes[self.length-1] = random();
}
}else if r <= 0.75{
//转移/旋转突变
let up = random::<f64>() >= 0.5;
if up{
// 1,2,3 => 3,1,2
let mut shift_bit = self.genes[0];
for i in 0..self.length{
if i>0{
let temp = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = temp;
}else{
self.genes[i] = self.genes[self.length-1];
}
}
}else{
// 1,2,3 => 2,3,1
let mut shift_bit = self.genes[self.length-1];
for i in (0..=self.length-1).rev(){
if i<self.length-1{
let temp = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = temp;
}else{
self.genes[i] = self.genes[0];
}
}
}
}else{
//替换突变
self.genes[pos] = random();
}
}
}
}
fn crossover(&self, genome:&Genome) -> (Genome, Genome){
if random::<f64>()>CROSSOVER_RATE{
return (self.clone(), genome.clone());
}
let pos = (random::<f64>()*self.length as f64) as usize;
let mut child1 = Genome::new();
let mut child2 = Genome::new();
for i in 0..self.length{
if i<pos{
child1.genes[i] = self.genes[i];
child2.genes[i] = genome.genes[i];
}else{
child1.genes[i] = genome.genes[i];
child2.genes[i] = self.genes[i];
}
}
(child1, child2)
}
fn to_bf(&self) -> String {
let mut bf = String::new();
for gene in self.genes.iter() {
let d = *gene;
if d <= 0.125 {
bf.push('>');
} else if d <= 0.25 {
bf.push('<');
} else if d <= 0.375 {
bf.push('+');
} else if d <= 0.5 {
bf.push('-');
} else if d <= 0.625 {
bf.push('.');
} else if d <= 0.75 {
//bf.push(',');
bf.push('.');
} else if d <= 0.875 {
bf.push('[');
} else {
bf.push(']');
}
}
bf
}
fn run(&self) -> String{
let mut context = Context::new();
let program = self.to_bf().replace("[]", "");
if let Ok(block) = parser::parse(program.as_bytes()) {
context.run(&block);
}
context.out
}
fn calc_fitness(&mut self, target: &str){
let target = target.as_bytes();
self.fitness = 0.0;
let out = self.run();
let out_bytes = out.as_bytes();
for i in 0..target.len() {
if out_bytes.len()>i{
self.fitness += 255.0 - (out_bytes[i] as f64 - target[i] as f64).abs();
}
}
}
}
impl Clone for Genome {
fn clone(&self) -> Genome {
Genome{
fitness: self.fitness,
genes: self.genes,
length: self.length
}
}
}
pub struct GA {
target: String,
populations: Vec<Genome>,
total_fitness: f64,
generations: usize,
}
impl GA {
fn new(target: &str) -> GA {
let mut populations = vec![];
for _ in 0..POPULATION_SIZE{
populations.push(Genome::random());
}
GA {
target: String::from(target),
generations: 0,
total_fitness: 0.0,
populations
}
}
fn roulette_selection(&self) -> usize{
//生成0和总体适应分之间的随机数
let slice = random::<f64>() * self.total_fitness;
let mut fitness_total = 0.0;
let mut selected_pos = 0;
for i in 0..self.populations.len(){
fitness_total += self.populations[i].fitness;
//如果当前适应分>随机数,返回此处的染色体
if fitness_total > slice{
selected_pos = i;
break;
}
}
selected_pos
}
//下一代
fn epoch(&mut self){
//计算总适应分
self.total_fitness = 0.0;
for p in &mut self.populations{
self.total_fitness += p.fitness;
}
//按照得分排序
self.populations.sort_by(|a, b| b.fitness.partial_cmp(&a.fitness).unwrap());
let out = self.populations[0].run();
println!("program={} out={:?}", self.populations[0].to_bf(), out.get(0..5));
info!("人口:{} 代数:{} 最高分:{}", self.populations.len(), self.generations, self.populations[0].fitness);
//新群体
let mut new_pop = vec![];
//精英选择
for i in 0..NUM_ELITE{
for _ in 0..NUM_COPIES_ELITE{
| oulette_selection()]);
parents.push(self.populations[self.roulette_selection()]);
}
let tx = tx.clone();
let new_pop_clone = new_pop.clone();
thread::spawn(move || {
let mut childs = vec![];
//println!("{}.start", tid);
//每次生成两个孩子
while childs.len()<child_count as usize{
let mum = parents.pop().unwrap();
let dad = parents.pop().unwrap();
//杂交
let (mut baby1, mut baby2) = mum.crossover(&dad);
//变异
baby1.mutate();
baby1.mutate();
//计算适应分
baby1.calc_fitness(&target);
baby2.calc_fitness(&target);
childs.push(baby1);
childs.push(baby2);
}
//println!("{}.end", tid);
let mut new_pop = new_pop_clone.lock().unwrap();
new_pop.append(&mut childs);
//println!("{}.new_pop.len()={}", tid, new_pop.len());
if new_pop.len() == POPULATION_SIZE{
let mut pops = vec![];
pops.append(&mut new_pop);
tx.send(pops).unwrap();
}
});
}
//替换新的群体
self.populations.clear();
self.populations.append(&mut rx.recv().unwrap());
self.generations += 1;
}
}
fn main() {
env_logger::Builder::from_default_env()
//.default_format_timestamp(false)
.filter_level(LevelFilter::Info)
.init();
//let hello_world = include_bytes!("../hello_world.bf");
//let program = b"++++++++[>++++[>++>+++>+++>+<<<<-]>+>+>->>+[<]<-]>>.>---.+++++++..+++.>>.<-.<.+++.------.--------.>>+.>++.";
let mut ga = GA::new("Hello");
for _ in 0..50000{
ga.epoch();
}
// for i in 0..ga.populations.len(){
// ga.populations[i].calc_fitness("Hello World!");
// }
// let dad = ga.populations[ga.roulette_selection()];
// let mum = ga.populations[ga.roulette_selection()];
// let (mut child1, mut child2) = dad.crossover(&mum);
// child1.calc_fitness("Hello World!");
// child2.calc_fitness("Hello World!");
println!("end.");
//let now = Instant::now();
// let fitness = ga.calc_fitness(program, "Hello World!");
// println!("耗时:{}ms", duration_to_milis(&now.elapsed()));
// println!("fitness:{}", fitness);
}
pub fn duration_to_milis(duration: &Duration) -> f64 {
duration.as_secs() as f64 * 1000.0 + duration.subsec_nanos() as f64 / 1_000_000.0
}
| new_pop.push(self.populations[i]);
}
}
let (tx, rx) = channel();
let elite_count = new_pop.len();
let new_pop = Arc::new(Mutex::new(new_pop));
for tid in 0..NUM_THREAD{
let target = self.target.clone();
let child_count = (POPULATION_SIZE-elite_count)/NUM_THREAD;
let mut parents = vec![];
for _ in 0..child_count/2{
//每次生成两个孩子
parents.push(self.populations[self.r | identifier_body |
main.1.rs | extern crate brainfuck;
extern crate rand;
#[macro_use]
extern crate log;
extern crate env_logger;
mod context;
use brainfuck::parser;
use context::Context;
use std::time::{Duration, Instant};
use rand::random;
use log::LevelFilter;
use std::sync::mpsc::channel;
use std::thread;
use std::sync::{Arc, Mutex};
/*
f64数组 代表genome(基因组)
每个f64(8个字节 gene 基因)代表一条指令
AI程序的工作原理如下:
一个基因组由一个f64数组组成。
每个基因对应Brainf-ck编程语言中的指令。
从一群随机基因组开始。
将每个f64转换成相应的指令,编码成结果程序,并执行这个程序。
根据程序的控制台输出获取每个程序的适应分数,并对它们进行排名。
使用赌轮选择,杂交和变异将最佳基因组配对在一起,以产生新一代。
用新一代重复该过程,直到达到目标适应分数。
解释指令集
Brainf-ck由以下指令集组成:
1 > 递增指针。
2 < 减少指针。
3 + 递增指针处的字节。
3 - 减少指针处的字节。
5 . 输出指针处的字节。
6 , 输入一个字节并将其存储在指针的字节中。
7 [ 如果指针处的字节为零,则跳过匹配]。
8 ] 向后跳转到匹配[除非指针处的字节为零。
*/
const MUTATION_RATE: f64 = 0.05;
const CROSSOVER_RATE: f64 = 0.80;
const INITIAL_GENOME_SIZE: usize = 100;
const NUM_ELITE: usize = 4;//精英选择个数
const NUM_COPIES_ELITE: usize = 1; //每个精英复制数
const NUM_THREAD: usize = 2;//线程数
const POPULATION_SIZE: usize = 50*NUM_THREAD+NUM_ELITE*NUM_COPIES_ELITE;//人口数量
//基因组
#[derive(Copy)]
pub struct Genome {
fitness: f64,
genes: Vec<f64>,
}
impl Genome {
fn new() -> Genome{
Genome{
fitness: 1.0,
genes: vec![]
}
}
fn length(&self) -> usize{
self.genes.len()
}
fn random() -> Genome{
Genome{
fitness: 1.0,
genes: vec![random(); INITIAL_GENOME_SIZE]
}
}
/*
通过插入,替换,删除,移位进行突变。
- 在基因组中选择一个索引。
- 如果插入,则在该位置插入一个变异位。 其余位向上移动一个索引。 最后一位被删除了。
- 如果替换,则在该位置设置变异位。
- 如果删除,所有位都在该位置向下移动。 在数组的末尾添加一个变异位。
- 如果移位,所有位都从位置0开始向上或向下移动。如果向上,则最后一位向前移动。 如果向下,第一位就会结束。
*/
fn mutate(&mut self){
for pos in 0..self.genes.len(){
if random::<f64>() < MUTATION_RATE{
//选择变异类型
let r = random::<f64>();
if r <= 0.25 {
//插入突变
let mutation_index = pos;
//变异之前备份当前位
let mut shift_bit = self.genes[mutation_index];
//在变异位设置随机数
self.genes[mutation_index] = random();
//将位向上或向下凹陷1。
let up = random::<f64>() >= 0.5;
if up{//插入并删除末尾
for i in mutation_index+1..self.length{
let next_shift_bit = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = next_shift_bit;
}
}else{//插入并删除第一个
for i in (0..=mutation_index).rev(){
let next_shift_bit = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = next_shift_bit;
}
}
}else if r <= 0.5{
//删除突变
let mutation_index = pos;
let up = random::<f64>() >= 0.5;
if up{//删除并在开头插入
for i in (1..=mutation_index).rev(){
self.genes[i] = self.genes[i-1];
}
self.genes[0] = random();
}else{//删除并在末尾插入
for i in mutation_index..self.length-1{
self.genes[i] = self.genes[i+1]
}
self.genes[self.length-1] = random();
}
}else if r <= 0.75{
//转移/旋转突变
let up = random::<f64>() >= 0.5;
if up{
// 1,2,3 => 3,1,2
let mut shift_bit = self.genes[0];
for i in 0..self.length{ | self.genes[i] = shift_bit;
shift_bit = temp;
}else{
self.genes[i] = self.genes[self.length-1];
}
}
}else{
// 1,2,3 => 2,3,1
let mut shift_bit = self.genes[self.length-1];
for i in (0..=self.length-1).rev(){
if i<self.length-1{
let temp = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = temp;
}else{
self.genes[i] = self.genes[0];
}
}
}
}else{
//替换突变
self.genes[pos] = random();
}
}
}
}
fn crossover(&self, genome:&Genome) -> (Genome, Genome){
if random::<f64>()>CROSSOVER_RATE{
return (self.clone(), genome.clone());
}
let pos = (random::<f64>()*self.length as f64) as usize;
let mut child1 = Genome::new();
let mut child2 = Genome::new();
for i in 0..self.length{
if i<pos{
child1.genes[i] = self.genes[i];
child2.genes[i] = genome.genes[i];
}else{
child1.genes[i] = genome.genes[i];
child2.genes[i] = self.genes[i];
}
}
(child1, child2)
}
fn to_bf(&self) -> String {
let mut bf = String::new();
for gene in self.genes.iter() {
let d = *gene;
if d <= 0.125 {
bf.push('>');
} else if d <= 0.25 {
bf.push('<');
} else if d <= 0.375 {
bf.push('+');
} else if d <= 0.5 {
bf.push('-');
} else if d <= 0.625 {
bf.push('.');
} else if d <= 0.75 {
//bf.push(',');
bf.push('.');
} else if d <= 0.875 {
bf.push('[');
} else {
bf.push(']');
}
}
bf
}
fn run(&self) -> String{
let mut context = Context::new();
let program = self.to_bf().replace("[]", "");
if let Ok(block) = parser::parse(program.as_bytes()) {
context.run(&block);
}
context.out
}
fn calc_fitness(&mut self, target: &str){
let target = target.as_bytes();
self.fitness = 0.0;
let out = self.run();
let out_bytes = out.as_bytes();
for i in 0..target.len() {
if out_bytes.len()>i{
self.fitness += 255.0 - (out_bytes[i] as f64 - target[i] as f64).abs();
}
}
}
}
impl Clone for Genome {
fn clone(&self) -> Genome {
Genome{
fitness: self.fitness,
genes: self.genes,
length: self.length
}
}
}
pub struct GA {
target: String,
populations: Vec<Genome>,
total_fitness: f64,
generations: usize,
}
impl GA {
fn new(target: &str) -> GA {
let mut populations = vec![];
for _ in 0..POPULATION_SIZE{
populations.push(Genome::random());
}
GA {
target: String::from(target),
generations: 0,
total_fitness: 0.0,
populations
}
}
fn roulette_selection(&self) -> usize{
//生成0和总体适应分之间的随机数
let slice = random::<f64>() * self.total_fitness;
let mut fitness_total = 0.0;
let mut selected_pos = 0;
for i in 0..self.populations.len(){
fitness_total += self.populations[i].fitness;
//如果当前适应分>随机数,返回此处的染色体
if fitness_total > slice{
selected_pos = i;
break;
}
}
selected_pos
}
//下一代
fn epoch(&mut self){
//计算总适应分
self.total_fitness = 0.0;
for p in &mut self.populations{
self.total_fitness += p.fitness;
}
//按照得分排序
self.populations.sort_by(|a, b| b.fitness.partial_cmp(&a.fitness).unwrap());
let out = self.populations[0].run();
println!("program={} out={:?}", self.populations[0].to_bf(), out.get(0..5));
info!("人口:{} 代数:{} 最高分:{}", self.populations.len(), self.generations, self.populations[0].fitness);
//新群体
let mut new_pop = vec![];
//精英选择
for i in 0..NUM_ELITE{
for _ in 0..NUM_COPIES_ELITE{
new_pop.push(self.populations[i]);
}
}
let (tx, rx) = channel();
let elite_count = new_pop.len();
let new_pop = Arc::new(Mutex::new(new_pop));
for tid in 0..NUM_THREAD{
let target = self.target.clone();
let child_count = (POPULATION_SIZE-elite_count)/NUM_THREAD;
let mut parents = vec![];
for _ in 0..child_count/2{
//每次生成两个孩子
parents.push(self.populations[self.roulette_selection()]);
parents.push(self.populations[self.roulette_selection()]);
}
let tx = tx.clone();
let new_pop_clone = new_pop.clone();
thread::spawn(move || {
let mut childs = vec![];
//println!("{}.start", tid);
//每次生成两个孩子
while childs.len()<child_count as usize{
let mum = parents.pop().unwrap();
let dad = parents.pop().unwrap();
//杂交
let (mut baby1, mut baby2) = mum.crossover(&dad);
//变异
baby1.mutate();
baby1.mutate();
//计算适应分
baby1.calc_fitness(&target);
baby2.calc_fitness(&target);
childs.push(baby1);
childs.push(baby2);
}
//println!("{}.end", tid);
let mut new_pop = new_pop_clone.lock().unwrap();
new_pop.append(&mut childs);
//println!("{}.new_pop.len()={}", tid, new_pop.len());
if new_pop.len() == POPULATION_SIZE{
let mut pops = vec![];
pops.append(&mut new_pop);
tx.send(pops).unwrap();
}
});
}
//替换新的群体
self.populations.clear();
self.populations.append(&mut rx.recv().unwrap());
self.generations += 1;
}
}
fn main() {
env_logger::Builder::from_default_env()
//.default_format_timestamp(false)
.filter_level(LevelFilter::Info)
.init();
//let hello_world = include_bytes!("../hello_world.bf");
//let program = b"++++++++[>++++[>++>+++>+++>+<<<<-]>+>+>->>+[<]<-]>>.>---.+++++++..+++.>>.<-.<.+++.------.--------.>>+.>++.";
let mut ga = GA::new("Hello");
for _ in 0..50000{
ga.epoch();
}
// for i in 0..ga.populations.len(){
// ga.populations[i].calc_fitness("Hello World!");
// }
// let dad = ga.populations[ga.roulette_selection()];
// let mum = ga.populations[ga.roulette_selection()];
// let (mut child1, mut child2) = dad.crossover(&mum);
// child1.calc_fitness("Hello World!");
// child2.calc_fitness("Hello World!");
println!("end.");
//let now = Instant::now();
// let fitness = ga.calc_fitness(program, "Hello World!");
// println!("耗时:{}ms", duration_to_milis(&now.elapsed()));
// println!("fitness:{}", fitness);
}
pub fn duration_to_milis(duration: &Duration) -> f64 {
duration.as_secs() as f64 * 1000.0 + duration.subsec_nanos() as f64 / 1_000_000.0
} | if i>0{
let temp = self.genes[i]; | random_line_split |
main.1.rs | extern crate brainfuck;
extern crate rand;
#[macro_use]
extern crate log;
extern crate env_logger;
mod context;
use brainfuck::parser;
use context::Context;
use std::time::{Duration, Instant};
use rand::random;
use log::LevelFilter;
use std::sync::mpsc::channel;
use std::thread;
use std::sync::{Arc, Mutex};
/*
f64数组 代表genome(基因组)
每个f64(8个字节 gene 基因)代表一条指令
AI程序的工作原理如下:
一个基因组由一个f64数组组成。
每个基因对应Brainf-ck编程语言中的指令。
从一群随机基因组开始。
将每个f64转换成相应的指令,编码成结果程序,并执行这个程序。
根据程序的控制台输出获取每个程序的适应分数,并对它们进行排名。
使用赌轮选择,杂交和变异将最佳基因组配对在一起,以产生新一代。
用新一代重复该过程,直到达到目标适应分数。
解释指令集
Brainf-ck由以下指令集组成:
1 > 递增指针。
2 < 减少指针。
3 + 递增指针处的字节。
3 - 减少指针处的字节。
5 . 输出指针处的字节。
6 , 输入一个字节并将其存储在指针的字节中。
7 [ 如果指针处的字节为零,则跳过匹配]。
8 ] 向后跳转到匹配[除非指针处的字节为零。
*/
const MUTATION_RATE: f64 = 0.05;
const CROSSOVER_RATE: f64 = 0.80;
const INITIAL_GENOME_SIZE: usize = 100;
const NUM_ELITE: usize = 4;//精英选择个数
const NUM_COPIES_ELITE: usize = 1; //每个精英复制数
const NUM_THREAD: usize = 2;//线程数
const POPULATION_SIZE: usize = 50*NUM_THREAD+NUM_ELITE*NUM_COPIES_ELITE;//人口数量
//基因组
#[derive(Copy)]
pub struct Genome {
fitness: f64,
genes: Vec<f64>,
}
impl Genome {
fn new() -> Genome{
Genome{
fitness: 1.0,
genes: vec![]
}
}
fn length(&self) -> usize{
self.genes.len()
}
fn random() -> Genome{
Genome{
fitness: 1.0,
genes: vec![random(); INITIAL_GENOME_SIZE]
}
}
/*
通过插入,替换,删除,移位进行突变。
- 在基因组中选择一个索引。
- 如果插入,则在该位置插入一个变异位。 其余位向上移动一个索引。 最后一位被删除了。
- 如果替换,则在该位置设置变异位。
- 如果删除,所有位都在该位置向下移动。 在数组的末尾添加一个变异位。
- 如果移位,所有位都从位置0开始向上或向下移动。如果向上,则最后一位向前移动。 如果向下,第一位就会结束。
*/
fn mutate(&mut self){
for pos in 0..self.genes.len(){
if random::<f64>() < MUTATION_RATE{
//选择变异类型
let r = random::<f64>();
if r <= 0.25 {
//插入突变
let mutation_index = pos;
//变异之前备份当前位
let mut shift_bit = self.genes[mutation_index];
//在变异位设置随机数
self.genes[mutation_index] = random();
//将位向上或向下凹陷1。
let up = random::<f64>() >= 0.5;
if up{//插入并删除末尾
for i in mutation_index+1..self.length{
let next_shift_bit = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = next_shift_bit;
}
}else{//插入并删除第一个
for i in (0..=mutation_index).rev(){
let next_shift_bit = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = next_shift_bit;
}
}
}else if r <= 0.5{
//删除突变
let mutation_index = pos;
let up = random::<f64>() >= 0.5;
if up{//删除并在开头插入
for i in (1..=mutation_index).rev(){
self.genes[i] = self.genes[i-1];
}
self.genes[0] = random();
}else{//删除并在末尾插入
for i in mutation_index..self.length-1{
self.genes[i] = self.genes[i+1]
}
self.genes[self.length-1] = random();
}
}else if r <= 0.75{
//转移/旋转突变
let up = random::<f64>() >= 0.5;
if up{
// 1,2,3 => 3,1,2
let mut shift_bit = self.genes[0];
for i in 0..self.length{
if i>0{
let temp = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = temp;
}else{
self.genes[i] = self.genes[self.length-1];
}
}
}else{
// 1,2,3 => 2,3,1
let mut shift_bit = self.genes[self.length-1];
for i in (0..=self.length-1).rev(){
if i<self.length-1{
let temp = self.genes[i];
self.genes[i] = shift_bit;
shift_bit = temp;
}else{
self.genes[i] = self.genes[0];
}
}
}
}else{
//替换突变
self.genes[pos] = random();
}
}
}
}
fn crossover(&self, genome:&Genome) -> (Genome, Genome){
if random::<f64>()>CROSSOVER_RATE{
return (self.clone(), genome.clone());
}
let pos = (random::<f64>()*self.length as f64) as usize;
let mut child1 = Genome::new();
let mut child2 = Genome::new();
for i in 0..self.length{
if i<pos{
child1.genes[i] = self.genes[i];
child2.genes[i] = genome.genes[i];
}else{
child1.genes[i] = genome.genes[i];
child2.genes[i] = self.genes[i];
}
}
(child1, child2)
}
fn to_bf(&self) -> String {
let mut bf = String::new();
for gene in self.genes.iter() {
let d = *gene;
if d <= 0.125 {
bf.push('>');
} else if d <= 0.25 {
bf.push('<');
} else if d <= 0.375 {
bf.push('+');
} else if d <= 0.5 {
bf.push('-');
} else if d <= 0.625 {
bf.push('.');
} else if d <= 0.75 {
//bf.push(',');
bf.push('.');
} else if d <= 0.875 {
bf.push('[');
} else {
bf.push(']');
}
}
bf
}
fn run(&self) -> String{
let mut context = Context::new();
let program = self.to_bf().replace("[]", "");
if let Ok(block) = parser::parse(program.as_bytes()) {
context.run(&block);
}
context.out
}
fn calc_fitness(&mut self, target: &str){
let target = target.as_bytes();
self.fitness = 0.0;
let out = self.run();
let out_bytes = out.as_bytes();
for i in 0..target.len() {
if out_bytes.len()>i{
self.fitness += 255.0 - (out_bytes[i] as f64 - target[i] as f64).abs();
}
}
}
}
impl Clone for Genome {
fn clone(&self) -> Genome {
Genome{
fitness: self.fitness,
genes: self.genes,
length: self.length
}
}
}
pub struct GA {
target: String,
populations: Vec<Genome>,
total_fitness: f64,
generations: usize,
}
impl GA {
fn new(target: &str) -> GA {
let mut populations = vec![];
for _ in 0..POPULATION_SIZE{
populations.push(Genome::random());
}
GA {
target: String::from(target),
generations: 0,
total_fitness: 0.0,
populations
}
}
fn roulette_selection(&self) -> usize{
//生成0和总体适应分之间的随机数
let slice = random::<f64>() * self.total_fitness;
let mut fitness_total = 0.0;
let mut selected_pos = 0;
for i in 0..self.populations.len(){
fitness_total += self.populations[i].fitness;
//如果当前适应分>随机数,返回此处的染色体
if fit | selected_pos
}
//下一代
fn epoch(&mut self){
//计算总适应分
self.total_fitness = 0.0;
for p in &mut self.populations{
self.total_fitness += p.fitness;
}
//按照得分排序
self.populations.sort_by(|a, b| b.fitness.partial_cmp(&a.fitness).unwrap());
let out = self.populations[0].run();
println!("program={} out={:?}", self.populations[0].to_bf(), out.get(0..5));
info!("人口:{} 代数:{} 最高分:{}", self.populations.len(), self.generations, self.populations[0].fitness);
//新群体
let mut new_pop = vec![];
//精英选择
for i in 0..NUM_ELITE{
for _ in 0..NUM_COPIES_ELITE{
new_pop.push(self.populations[i]);
}
}
let (tx, rx) = channel();
let elite_count = new_pop.len();
let new_pop = Arc::new(Mutex::new(new_pop));
for tid in 0..NUM_THREAD{
let target = self.target.clone();
let child_count = (POPULATION_SIZE-elite_count)/NUM_THREAD;
let mut parents = vec![];
for _ in 0..child_count/2{
//每次生成两个孩子
parents.push(self.populations[self.roulette_selection()]);
parents.push(self.populations[self.roulette_selection()]);
}
let tx = tx.clone();
let new_pop_clone = new_pop.clone();
thread::spawn(move || {
let mut childs = vec![];
//println!("{}.start", tid);
//每次生成两个孩子
while childs.len()<child_count as usize{
let mum = parents.pop().unwrap();
let dad = parents.pop().unwrap();
//杂交
let (mut baby1, mut baby2) = mum.crossover(&dad);
//变异
baby1.mutate();
baby1.mutate();
//计算适应分
baby1.calc_fitness(&target);
baby2.calc_fitness(&target);
childs.push(baby1);
childs.push(baby2);
}
//println!("{}.end", tid);
let mut new_pop = new_pop_clone.lock().unwrap();
new_pop.append(&mut childs);
//println!("{}.new_pop.len()={}", tid, new_pop.len());
if new_pop.len() == POPULATION_SIZE{
let mut pops = vec![];
pops.append(&mut new_pop);
tx.send(pops).unwrap();
}
});
}
//替换新的群体
self.populations.clear();
self.populations.append(&mut rx.recv().unwrap());
self.generations += 1;
}
}
fn main() {
env_logger::Builder::from_default_env()
//.default_format_timestamp(false)
.filter_level(LevelFilter::Info)
.init();
//let hello_world = include_bytes!("../hello_world.bf");
//let program = b"++++++++[>++++[>++>+++>+++>+<<<<-]>+>+>->>+[<]<-]>>.>---.+++++++..+++.>>.<-.<.+++.------.--------.>>+.>++.";
let mut ga = GA::new("Hello");
for _ in 0..50000{
ga.epoch();
}
// for i in 0..ga.populations.len(){
// ga.populations[i].calc_fitness("Hello World!");
// }
// let dad = ga.populations[ga.roulette_selection()];
// let mum = ga.populations[ga.roulette_selection()];
// let (mut child1, mut child2) = dad.crossover(&mum);
// child1.calc_fitness("Hello World!");
// child2.calc_fitness("Hello World!");
println!("end.");
//let now = Instant::now();
// let fitness = ga.calc_fitness(program, "Hello World!");
// println!("耗时:{}ms", duration_to_milis(&now.elapsed()));
// println!("fitness:{}", fitness);
}
pub fn duration_to_milis(duration: &Duration) -> f64 {
duration.as_secs() as f64 * 1000.0 + duration.subsec_nanos() as f64 / 1_000_000.0
}
| ness_total > slice{
selected_pos = i;
break;
}
}
| conditional_block |
main.rs | //! Urbit Nock 4K data structures, with basic parsing, and evaluation.
//! <https://urbit.org/docs/learn/arvo/nock/>
#![feature(never_type, exact_size_is_empty)]
use byteorder::{ByteOrder, LittleEndian};
use derive_more::Constructor;
use env_logger;
use log::{debug, error, info, log, trace, warn};
use std::{clone::Clone, error::Error, fmt::Display, rc::Rc};
pub fn | () -> Result<(), Box<dyn std::error::Error>> {
env_logger::try_init()?;
let subject = list(&[cell(atom(11), atom(12)), atom(2), atom(3), atom(4), atom(5)]);
let formula = cell(atom(0), atom(7));
info!("subject: {}", subject);
info!("formula: {}", formula);
let product = nock(subject.clone(), formula.try_cell()?)?;
info!("product: {}.", product);
println!("*[{} {}] = {}", subject, formula, product);
Ok(())
}
/* Data structures * * * * * * * * * * * * * * * * * */
/// A Nock Noun can be any Nock value, either an Atom or a Cell.
#[derive(Debug, Hash, Eq, PartialEq, Clone)]
pub enum Noun {
Atom(Atom),
Cell(Cell),
}
/// A Nock Cell is an ordered pair of Nouns, implemented as a tuple.
#[derive(Debug, Hash, Eq, PartialEq, Clone, Constructor)]
pub struct Cell {
tail: Rc<Noun>,
head: Rc<Noun>,
}
/// A Nock Atom is an arbitrarily-large unsigned integer.
#[derive(Debug, Hash, Eq, PartialEq, Clone)]
pub struct Atom {
bytes_le: Vec<u8>,
}
/// Evaluating a Nock expression that contains an invalid, undefined, infinite,
/// nonterminating, or irreducible subexpression produces a Crash.
#[derive(Debug, Hash, Eq, PartialEq, Clone, Constructor)]
pub struct Crash {
message: String,
}
/// The result of evaluating/nocking/tarring a Noun: a product Noun or a Crash.
pub type NockResult = Result<Rc<Noun>, Crash>;
/* Atom encoding and decoding * * * * * * * * * * * * * * * * * */
impl Atom {
/// Construct a new Atom from a little-endian byte slice.
pub fn new(bytes_le: &[u8]) -> Self {
// Strip irrelevent trailing zero bytes to normalize Atom for Hash and Eq.
let mut len = bytes_le.len();
while len > 0 && bytes_le[len - 1] == 0x00 {
len -= 1;
}
Self { bytes_le: bytes_le[..len].to_vec() }
}
/// Whether this Atom is zero, which is the truthy value in Nock.
pub fn is_zero(&self) -> bool {
self.bytes_le.len() == 0
}
/// Returns the value of this atom as a little-endian byte slice.
pub fn as_bytes_le(&self) -> &[u8] {
&self.bytes_le
}
/// Returns the value of the atom as Some(u128) if it fits, else None.
pub fn try_u128(&self) -> Option<u128> {
if self.as_bytes_le().is_empty() {
Some(0)
} else if self.bytes_le.len() < 16 {
Some(LittleEndian::read_uint128(&self.bytes_le, self.bytes_le.len()))
} else {
None
}
}
}
impl From<bool> for Atom {
fn from(b: bool) -> Atom {
if b {
Atom::new(&[0])
} else {
Atom::new(&[1])
}
}
}
impl From<u128> for Atom {
fn from(n: u128) -> Atom {
let mut bytes = [0u8; 16];
LittleEndian::write_u128(&mut bytes, n);
Atom::new(&bytes)
}
}
/* Noun construction conveniences * * * * * * * * * * * * * * * * * */
/// Creates a new cons Cell Noun containing two existing Nouns.
pub fn cell(left: Rc<Noun>, right: Rc<Noun>) -> Rc<Noun> {
Rc::new(Noun::Cell(Cell::new(left, right)))
}
/// Creates a new unsigned integer Atom Noun.
pub fn atom<T: Into<Atom>>(atom: T) -> Rc<Noun> {
Rc::new(Noun::Atom(atom.into()))
}
/// Groups a nonempty slice of Nouns into Cells, from right-to-left, returning a Noun.
///
/// `list(&[a, b, c, d,...])` = `cell(a, cell(b, cell(c, cell(d,...))))`
pub fn list(nouns: &[Rc<Noun>]) -> Rc<Noun> {
let nouns = nouns.to_vec();
if nouns.is_empty() {
panic!("can't have an empty list")
}
let mut nouns_rev = nouns.into_iter().rev();
let mut result = nouns_rev.next().unwrap();
for outer in nouns_rev {
result = cell(outer, result);
}
result
}
/* Formatting nouns and errors * * * * * * * * * * * * * * * * * */
impl Display for Noun {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Noun::Atom(atom) => match atom.try_u128() {
// If it fits in u128 we'll display it as an ordinary decimal integer
// literal.
Some(n) => write!(f, "{}", n),
// For larger values, we'll use a hex integer literal.
None => {
write!(f, "0x")?;
for byte in atom.as_bytes_le().iter().rev() {
write!(f, "{:02x}", byte)?;
}
Ok(())
}
},
Noun::Cell(cell) => write!(f, "[{} {}]", cell.head, cell.tail),
}
}
}
impl Error for Crash {}
impl Display for Crash {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "Nock Crash: {}", self.message)
}
}
impl From<&str> for Crash {
fn from(message: &str) -> Crash {
Crash::new(message.to_string())
}
}
/* Nock evaluation * * * * * * * * * * * * * * * * * */
/// The Nock function interprets a formula Cell and applies it a subject Noun.
pub fn nock(subject: Rc<Noun>, formula: &Cell) -> NockResult {
let operation = formula.head();
let parameter = formula.tail();
match *operation {
Noun::Cell(operation) => {
let f = operation.head().try_cell()?;
let g = operation.tail().try_cell()?;
let fp = nock(subject, f)?;
let gp = nock(subject, g)?;
Ok(cell(fp, gp))
}
Noun::Atom(operation) => {
match operation.try_u128().ok_or(Crash::from("opcode > u128"))? {
// A formula [0 b] reduces to the noun at tree address b in the subject.
// *[a 0 b] -> /[b a]
0 => cell(parameter, subject.clone()).net(),
// A formula [1 b] reduces to the constant noun b.
// *[a 1 b] -> b
1 => Ok(parameter),
// A formula [2 b c] treats b and c as formulas, resolves each against the
// subject, then computes Nock again with the product of b as the subject, c
// as the formula. *[a 2 b c] -> *[*[a b] *[a c]]
2 => {
let parameter = parameter.try_cell()?;
let f = parameter.head().try_cell()?;
let g = parameter.tail().try_cell()?;
let fp = nock(subject, f)?;
let gp = nock(subject, g)?.try_cell()?;
nock(fp, gp)
}
// In formulas [3 b] and [4 b], b is another formula, whose product against
// the subject becomes the input to an axiomatic operator. 3 is? and 4 is +
// *[a 3 b] -> ?*[a b]
3 => Ok(cell(subject, parameter).tar()?.wut()),
// *[a 4 b] -> +*[a b]
3 => Ok(cell(subject, parameter).tar()?.lus()?),
// A formula [5 b c] treats b and c as formulas that become the input to
// another axiomatic operator, =. *[a 5 b c] -> =[*[a b]
// *[a c]]
5 => unimplemented!(),
// Instructions 6 through 11 are not strictly necessary for Turing
// completeness; deleting them from Nock would decrease compactness, but not
// expressiveness. [6 b c d] is if b, then c, else d. Each
// of b, c, d is a formula against the subject. Remember that 0 is true and
// 1 is false. *[a 6 b c d] -> *[a *[[c d] 0 *[[2 3] 0 *[a
// 4 4 b]]]]
6 => unimplemented!(),
// [7 b c] composes the formulas b and c.
// *[a 7 b c] -> *[*[a b] c]
7 => unimplemented!(),
// [8 b c] produces the product of formula c, against a subject whose head
// is the product of formula b with the original subject, and whose tail is
// the original subject. (Think of 8 as a “variable declaration” or “stack
// push.”) *[a 8 b c] -> *[[*[a b] a] c]
8 => unimplemented!(),
// [9 b c] computes the product of formula c with the current subject; from
// that product d it extracts a formula e at tree address b, then computes
// *[d e]. (9 is designed to fit Hoon; d is a core (object), e points to an
// arm (method).) *[a 9 b c] -> *[*[a c] 2 [0 1] 0 b]
9 => unimplemented!(),
// In a formula [10 [b c] d], c and d are computed with the current subject,
// and then b of the product of d is replaced with the product of c.
// *[a 10 [b c] d] -> #[b *[a c] *[a d]]
10 => unimplemented!(),
// [11 b c] is a hint semantically equivalent to the formula c. If b is an
// atom, it's a static hint, which is just discarded. If b is a cell, it's a
// dynamic hint; the head of b is discarded, and the tail of b is executed
// as a formula against the current subject; the product of this is
// discarded. *[a 11 b c] -> *[a c]
// [11 hint formula]
11 => {
let parameter = parameter.try_cell()?;
let _hint = parameter.head;
let formula = parameter.tail.try_cell()?;
nock(subject, formula)
}
_ => Err(Crash::from("opcode > 11")),
}
}
}
}
impl Noun {
/// Returns a reference to the Atom in this Noun, or a Crash if it's a cell.
pub fn try_atom(&self) -> Result<&Atom, Crash> {
match self {
Noun::Atom(atom) => Ok(atom),
Noun::Cell(_) => Err(Crash::from("required atom, had cell")),
}
}
/// Returns a reference to the Cell in this Noun, or a Crash if it's an atom.
pub fn try_cell(&self) -> Result<&Cell, Crash> {
match self {
Noun::Cell(cell) => Ok(cell),
Noun::Atom(_) => Err(Crash::from("required cell, had atom")),
}
}
/// `*[subject formula]` nock formula application.
pub fn tar(&self) -> NockResult {
trace!("*{}", self);
let self_cell = self.try_cell()?;
let subject = self_cell.head();
let formula = self_cell.tail().try_cell()?;
nock(subject, formula)
}
/// `?noun` noun type operator.
pub fn wut(&self) -> Rc<Noun> {
trace!("?{}", self);
Rc::new(Noun::Atom(Atom::from(match self {
Noun::Cell(_) => true,
Noun::Atom(_) => false,
})))
}
/// `=[head tail]` noun equality operator.
pub fn tis(&self) -> NockResult {
trace!("={}", self);
let self_cell = self.try_cell()?;
Ok(atom(Atom::from(self_cell.head == self_cell.tail)))
}
/// `+number` atom increment operator.
pub fn lus(&self) -> NockResult {
trace!("+{}", self);
let self_atom = self.try_atom()?;
let mut incremented_bytes = self_atom.as_bytes_le().to_vec();
incremented_bytes.push(0x00);
for byte in incremented_bytes.iter_mut() {
if *byte == 0xFF {
*byte = 0x00;
continue;
} else {
*byte += 1;
break;
}
}
Ok(atom(Atom::new(&incremented_bytes)))
}
/// `/[index root]`, `*[root 0 index]` cell tree slot indexing operator.
pub fn net(&self) -> NockResult {
trace!("/{}", self);
let self_cell = self.try_cell()?;
let index = self_cell.head().try_atom()?;
let root = self_cell.tail();
if index.is_zero() {
return Err(Crash::from("index in /[index root] must be > 0"));
}
let mut result = root;
for (byte_index, byte) in index.as_bytes_le().iter().rev().enumerate() {
let skip_bits = if byte_index == 0 {
byte.leading_zeros() + 1
} else {
0
};
for bit_index in skip_bits..8 {
result = if ((byte >> (7 - bit_index)) & 1) == 0 {
result.try_cell()?.head()
} else {
result.try_cell()?.tail()
};
}
}
Ok(result)
}
/// `#[index root replacement]` edit cell tree index modification operator.
pub fn hax(&self) -> NockResult {
trace!("#{}", self);
unimplemented!()
}
}
impl Cell {
pub fn head(&self) -> Rc<Noun> {
self.head.clone()
}
pub fn tail(&self) -> Rc<Noun> {
self.tail.clone()
}
}
| main | identifier_name |
main.rs | //! Urbit Nock 4K data structures, with basic parsing, and evaluation.
//! <https://urbit.org/docs/learn/arvo/nock/>
#![feature(never_type, exact_size_is_empty)]
use byteorder::{ByteOrder, LittleEndian};
use derive_more::Constructor;
use env_logger;
use log::{debug, error, info, log, trace, warn};
use std::{clone::Clone, error::Error, fmt::Display, rc::Rc};
pub fn main() -> Result<(), Box<dyn std::error::Error>> {
env_logger::try_init()?;
let subject = list(&[cell(atom(11), atom(12)), atom(2), atom(3), atom(4), atom(5)]);
let formula = cell(atom(0), atom(7));
info!("subject: {}", subject);
info!("formula: {}", formula);
let product = nock(subject.clone(), formula.try_cell()?)?;
info!("product: {}.", product);
println!("*[{} {}] = {}", subject, formula, product);
Ok(())
}
/* Data structures * * * * * * * * * * * * * * * * * */
/// A Nock Noun can be any Nock value, either an Atom or a Cell.
#[derive(Debug, Hash, Eq, PartialEq, Clone)]
pub enum Noun {
Atom(Atom),
Cell(Cell),
}
/// A Nock Cell is an ordered pair of Nouns, implemented as a tuple.
#[derive(Debug, Hash, Eq, PartialEq, Clone, Constructor)]
pub struct Cell {
tail: Rc<Noun>,
head: Rc<Noun>,
}
/// A Nock Atom is an arbitrarily-large unsigned integer.
#[derive(Debug, Hash, Eq, PartialEq, Clone)]
pub struct Atom {
bytes_le: Vec<u8>,
}
/// Evaluating a Nock expression that contains an invalid, undefined, infinite,
/// nonterminating, or irreducible subexpression produces a Crash.
#[derive(Debug, Hash, Eq, PartialEq, Clone, Constructor)]
pub struct Crash {
message: String,
}
/// The result of evaluating/nocking/tarring a Noun: a product Noun or a Crash.
pub type NockResult = Result<Rc<Noun>, Crash>;
/* Atom encoding and decoding * * * * * * * * * * * * * * * * * */
impl Atom {
/// Construct a new Atom from a little-endian byte slice.
pub fn new(bytes_le: &[u8]) -> Self {
// Strip irrelevent trailing zero bytes to normalize Atom for Hash and Eq.
let mut len = bytes_le.len();
while len > 0 && bytes_le[len - 1] == 0x00 {
len -= 1;
}
Self { bytes_le: bytes_le[..len].to_vec() }
}
/// Whether this Atom is zero, which is the truthy value in Nock.
pub fn is_zero(&self) -> bool {
self.bytes_le.len() == 0
}
/// Returns the value of this atom as a little-endian byte slice.
pub fn as_bytes_le(&self) -> &[u8] {
&self.bytes_le
}
/// Returns the value of the atom as Some(u128) if it fits, else None.
pub fn try_u128(&self) -> Option<u128> {
if self.as_bytes_le().is_empty() {
Some(0)
} else if self.bytes_le.len() < 16 {
Some(LittleEndian::read_uint128(&self.bytes_le, self.bytes_le.len()))
} else {
None
}
}
}
impl From<bool> for Atom {
fn from(b: bool) -> Atom {
if b {
Atom::new(&[0])
} else {
Atom::new(&[1])
}
}
}
impl From<u128> for Atom {
fn from(n: u128) -> Atom {
let mut bytes = [0u8; 16];
LittleEndian::write_u128(&mut bytes, n);
Atom::new(&bytes)
}
}
/* Noun construction conveniences * * * * * * * * * * * * * * * * * */
/// Creates a new cons Cell Noun containing two existing Nouns.
pub fn cell(left: Rc<Noun>, right: Rc<Noun>) -> Rc<Noun> {
Rc::new(Noun::Cell(Cell::new(left, right)))
}
/// Creates a new unsigned integer Atom Noun.
pub fn atom<T: Into<Atom>>(atom: T) -> Rc<Noun> {
Rc::new(Noun::Atom(atom.into()))
}
/// Groups a nonempty slice of Nouns into Cells, from right-to-left, returning a Noun.
///
/// `list(&[a, b, c, d,...])` = `cell(a, cell(b, cell(c, cell(d,...))))`
pub fn list(nouns: &[Rc<Noun>]) -> Rc<Noun> {
let nouns = nouns.to_vec();
if nouns.is_empty() {
panic!("can't have an empty list")
}
let mut nouns_rev = nouns.into_iter().rev();
let mut result = nouns_rev.next().unwrap();
for outer in nouns_rev {
result = cell(outer, result);
}
result
}
/* Formatting nouns and errors * * * * * * * * * * * * * * * * * */
impl Display for Noun {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Noun::Atom(atom) => match atom.try_u128() {
// If it fits in u128 we'll display it as an ordinary decimal integer
// literal.
Some(n) => write!(f, "{}", n),
// For larger values, we'll use a hex integer literal.
None => {
write!(f, "0x")?;
for byte in atom.as_bytes_le().iter().rev() {
write!(f, "{:02x}", byte)?;
}
Ok(())
}
},
Noun::Cell(cell) => write!(f, "[{} {}]", cell.head, cell.tail),
}
}
}
impl Error for Crash {}
impl Display for Crash {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "Nock Crash: {}", self.message)
}
}
impl From<&str> for Crash {
fn from(message: &str) -> Crash {
Crash::new(message.to_string())
}
}
/* Nock evaluation * * * * * * * * * * * * * * * * * */
/// The Nock function interprets a formula Cell and applies it a subject Noun.
pub fn nock(subject: Rc<Noun>, formula: &Cell) -> NockResult {
let operation = formula.head();
let parameter = formula.tail();
match *operation {
Noun::Cell(operation) => {
let f = operation.head().try_cell()?;
let g = operation.tail().try_cell()?;
let fp = nock(subject, f)?;
let gp = nock(subject, g)?;
Ok(cell(fp, gp))
}
Noun::Atom(operation) => {
match operation.try_u128().ok_or(Crash::from("opcode > u128"))? {
// A formula [0 b] reduces to the noun at tree address b in the subject.
// *[a 0 b] -> /[b a]
0 => cell(parameter, subject.clone()).net(),
// A formula [1 b] reduces to the constant noun b.
// *[a 1 b] -> b
1 => Ok(parameter),
// A formula [2 b c] treats b and c as formulas, resolves each against the
// subject, then computes Nock again with the product of b as the subject, c
// as the formula. *[a 2 b c] -> *[*[a b] *[a c]]
2 => {
let parameter = parameter.try_cell()?;
let f = parameter.head().try_cell()?;
let g = parameter.tail().try_cell()?;
let fp = nock(subject, f)?;
let gp = nock(subject, g)?.try_cell()?;
nock(fp, gp) | // *[a 4 b] -> +*[a b]
3 => Ok(cell(subject, parameter).tar()?.lus()?),
// A formula [5 b c] treats b and c as formulas that become the input to
// another axiomatic operator, =. *[a 5 b c] -> =[*[a b]
// *[a c]]
5 => unimplemented!(),
// Instructions 6 through 11 are not strictly necessary for Turing
// completeness; deleting them from Nock would decrease compactness, but not
// expressiveness. [6 b c d] is if b, then c, else d. Each
// of b, c, d is a formula against the subject. Remember that 0 is true and
// 1 is false. *[a 6 b c d] -> *[a *[[c d] 0 *[[2 3] 0 *[a
// 4 4 b]]]]
6 => unimplemented!(),
// [7 b c] composes the formulas b and c.
// *[a 7 b c] -> *[*[a b] c]
7 => unimplemented!(),
// [8 b c] produces the product of formula c, against a subject whose head
// is the product of formula b with the original subject, and whose tail is
// the original subject. (Think of 8 as a “variable declaration” or “stack
// push.”) *[a 8 b c] -> *[[*[a b] a] c]
8 => unimplemented!(),
// [9 b c] computes the product of formula c with the current subject; from
// that product d it extracts a formula e at tree address b, then computes
// *[d e]. (9 is designed to fit Hoon; d is a core (object), e points to an
// arm (method).) *[a 9 b c] -> *[*[a c] 2 [0 1] 0 b]
9 => unimplemented!(),
// In a formula [10 [b c] d], c and d are computed with the current subject,
// and then b of the product of d is replaced with the product of c.
// *[a 10 [b c] d] -> #[b *[a c] *[a d]]
10 => unimplemented!(),
// [11 b c] is a hint semantically equivalent to the formula c. If b is an
// atom, it's a static hint, which is just discarded. If b is a cell, it's a
// dynamic hint; the head of b is discarded, and the tail of b is executed
// as a formula against the current subject; the product of this is
// discarded. *[a 11 b c] -> *[a c]
// [11 hint formula]
11 => {
let parameter = parameter.try_cell()?;
let _hint = parameter.head;
let formula = parameter.tail.try_cell()?;
nock(subject, formula)
}
_ => Err(Crash::from("opcode > 11")),
}
}
}
}
impl Noun {
/// Returns a reference to the Atom in this Noun, or a Crash if it's a cell.
pub fn try_atom(&self) -> Result<&Atom, Crash> {
match self {
Noun::Atom(atom) => Ok(atom),
Noun::Cell(_) => Err(Crash::from("required atom, had cell")),
}
}
/// Returns a reference to the Cell in this Noun, or a Crash if it's an atom.
pub fn try_cell(&self) -> Result<&Cell, Crash> {
match self {
Noun::Cell(cell) => Ok(cell),
Noun::Atom(_) => Err(Crash::from("required cell, had atom")),
}
}
/// `*[subject formula]` nock formula application.
pub fn tar(&self) -> NockResult {
trace!("*{}", self);
let self_cell = self.try_cell()?;
let subject = self_cell.head();
let formula = self_cell.tail().try_cell()?;
nock(subject, formula)
}
/// `?noun` noun type operator.
pub fn wut(&self) -> Rc<Noun> {
trace!("?{}", self);
Rc::new(Noun::Atom(Atom::from(match self {
Noun::Cell(_) => true,
Noun::Atom(_) => false,
})))
}
/// `=[head tail]` noun equality operator.
pub fn tis(&self) -> NockResult {
trace!("={}", self);
let self_cell = self.try_cell()?;
Ok(atom(Atom::from(self_cell.head == self_cell.tail)))
}
/// `+number` atom increment operator.
pub fn lus(&self) -> NockResult {
trace!("+{}", self);
let self_atom = self.try_atom()?;
let mut incremented_bytes = self_atom.as_bytes_le().to_vec();
incremented_bytes.push(0x00);
for byte in incremented_bytes.iter_mut() {
if *byte == 0xFF {
*byte = 0x00;
continue;
} else {
*byte += 1;
break;
}
}
Ok(atom(Atom::new(&incremented_bytes)))
}
/// `/[index root]`, `*[root 0 index]` cell tree slot indexing operator.
pub fn net(&self) -> NockResult {
trace!("/{}", self);
let self_cell = self.try_cell()?;
let index = self_cell.head().try_atom()?;
let root = self_cell.tail();
if index.is_zero() {
return Err(Crash::from("index in /[index root] must be > 0"));
}
let mut result = root;
for (byte_index, byte) in index.as_bytes_le().iter().rev().enumerate() {
let skip_bits = if byte_index == 0 {
byte.leading_zeros() + 1
} else {
0
};
for bit_index in skip_bits..8 {
result = if ((byte >> (7 - bit_index)) & 1) == 0 {
result.try_cell()?.head()
} else {
result.try_cell()?.tail()
};
}
}
Ok(result)
}
/// `#[index root replacement]` edit cell tree index modification operator.
pub fn hax(&self) -> NockResult {
trace!("#{}", self);
unimplemented!()
}
}
impl Cell {
pub fn head(&self) -> Rc<Noun> {
self.head.clone()
}
pub fn tail(&self) -> Rc<Noun> {
self.tail.clone()
}
} | }
// In formulas [3 b] and [4 b], b is another formula, whose product against
// the subject becomes the input to an axiomatic operator. 3 is ? and 4 is +
// *[a 3 b] -> ?*[a b]
3 => Ok(cell(subject, parameter).tar()?.wut()), | random_line_split |
main.rs | //! Urbit Nock 4K data structures, with basic parsing, and evaluation.
//! <https://urbit.org/docs/learn/arvo/nock/>
#![feature(never_type, exact_size_is_empty)]
use byteorder::{ByteOrder, LittleEndian};
use derive_more::Constructor;
use env_logger;
use log::{debug, error, info, log, trace, warn};
use std::{clone::Clone, error::Error, fmt::Display, rc::Rc};
pub fn main() -> Result<(), Box<dyn std::error::Error>> {
env_logger::try_init()?;
let subject = list(&[cell(atom(11), atom(12)), atom(2), atom(3), atom(4), atom(5)]);
let formula = cell(atom(0), atom(7));
info!("subject: {}", subject);
info!("formula: {}", formula);
let product = nock(subject.clone(), formula.try_cell()?)?;
info!("product: {}.", product);
println!("*[{} {}] = {}", subject, formula, product);
Ok(())
}
/* Data structures * * * * * * * * * * * * * * * * * */
/// A Nock Noun can be any Nock value, either an Atom or a Cell.
#[derive(Debug, Hash, Eq, PartialEq, Clone)]
pub enum Noun {
Atom(Atom),
Cell(Cell),
}
/// A Nock Cell is an ordered pair of Nouns, implemented as a tuple.
#[derive(Debug, Hash, Eq, PartialEq, Clone, Constructor)]
pub struct Cell {
tail: Rc<Noun>,
head: Rc<Noun>,
}
/// A Nock Atom is an arbitrarily-large unsigned integer.
#[derive(Debug, Hash, Eq, PartialEq, Clone)]
pub struct Atom {
bytes_le: Vec<u8>,
}
/// Evaluating a Nock expression that contains an invalid, undefined, infinite,
/// nonterminating, or irreducible subexpression produces a Crash.
#[derive(Debug, Hash, Eq, PartialEq, Clone, Constructor)]
pub struct Crash {
message: String,
}
/// The result of evaluating/nocking/tarring a Noun: a product Noun or a Crash.
pub type NockResult = Result<Rc<Noun>, Crash>;
/* Atom encoding and decoding * * * * * * * * * * * * * * * * * */
impl Atom {
/// Construct a new Atom from a little-endian byte slice.
pub fn new(bytes_le: &[u8]) -> Self {
// Strip irrelevent trailing zero bytes to normalize Atom for Hash and Eq.
let mut len = bytes_le.len();
while len > 0 && bytes_le[len - 1] == 0x00 {
len -= 1;
}
Self { bytes_le: bytes_le[..len].to_vec() }
}
/// Whether this Atom is zero, which is the truthy value in Nock.
pub fn is_zero(&self) -> bool {
self.bytes_le.len() == 0
}
/// Returns the value of this atom as a little-endian byte slice.
pub fn as_bytes_le(&self) -> &[u8] {
&self.bytes_le
}
/// Returns the value of the atom as Some(u128) if it fits, else None.
pub fn try_u128(&self) -> Option<u128> {
if self.as_bytes_le().is_empty() {
Some(0)
} else if self.bytes_le.len() < 16 {
Some(LittleEndian::read_uint128(&self.bytes_le, self.bytes_le.len()))
} else {
None
}
}
}
impl From<bool> for Atom {
fn from(b: bool) -> Atom {
if b {
Atom::new(&[0])
} else {
Atom::new(&[1])
}
}
}
impl From<u128> for Atom {
fn from(n: u128) -> Atom {
let mut bytes = [0u8; 16];
LittleEndian::write_u128(&mut bytes, n);
Atom::new(&bytes)
}
}
/* Noun construction conveniences * * * * * * * * * * * * * * * * * */
/// Creates a new cons Cell Noun containing two existing Nouns.
pub fn cell(left: Rc<Noun>, right: Rc<Noun>) -> Rc<Noun> {
Rc::new(Noun::Cell(Cell::new(left, right)))
}
/// Creates a new unsigned integer Atom Noun.
pub fn atom<T: Into<Atom>>(atom: T) -> Rc<Noun> {
Rc::new(Noun::Atom(atom.into()))
}
/// Groups a nonempty slice of Nouns into Cells, from right-to-left, returning a Noun.
///
/// `list(&[a, b, c, d,...])` = `cell(a, cell(b, cell(c, cell(d,...))))`
pub fn list(nouns: &[Rc<Noun>]) -> Rc<Noun> {
let nouns = nouns.to_vec();
if nouns.is_empty() {
panic!("can't have an empty list")
}
let mut nouns_rev = nouns.into_iter().rev();
let mut result = nouns_rev.next().unwrap();
for outer in nouns_rev {
result = cell(outer, result);
}
result
}
/* Formatting nouns and errors * * * * * * * * * * * * * * * * * */
impl Display for Noun {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Noun::Atom(atom) => match atom.try_u128() {
// If it fits in u128 we'll display it as an ordinary decimal integer
// literal.
Some(n) => write!(f, "{}", n),
// For larger values, we'll use a hex integer literal.
None => {
write!(f, "0x")?;
for byte in atom.as_bytes_le().iter().rev() {
write!(f, "{:02x}", byte)?;
}
Ok(())
}
},
Noun::Cell(cell) => write!(f, "[{} {}]", cell.head, cell.tail),
}
}
}
impl Error for Crash {}
impl Display for Crash {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "Nock Crash: {}", self.message)
}
}
impl From<&str> for Crash {
fn from(message: &str) -> Crash {
Crash::new(message.to_string())
}
}
/* Nock evaluation * * * * * * * * * * * * * * * * * */
/// The Nock function interprets a formula Cell and applies it a subject Noun.
pub fn nock(subject: Rc<Noun>, formula: &Cell) -> NockResult {
let operation = formula.head();
let parameter = formula.tail();
match *operation {
Noun::Cell(operation) => {
let f = operation.head().try_cell()?;
let g = operation.tail().try_cell()?;
let fp = nock(subject, f)?;
let gp = nock(subject, g)?;
Ok(cell(fp, gp))
}
Noun::Atom(operation) => {
match operation.try_u128().ok_or(Crash::from("opcode > u128"))? {
// A formula [0 b] reduces to the noun at tree address b in the subject.
// *[a 0 b] -> /[b a]
0 => cell(parameter, subject.clone()).net(),
// A formula [1 b] reduces to the constant noun b.
// *[a 1 b] -> b
1 => Ok(parameter),
// A formula [2 b c] treats b and c as formulas, resolves each against the
// subject, then computes Nock again with the product of b as the subject, c
// as the formula. *[a 2 b c] -> *[*[a b] *[a c]]
2 => {
let parameter = parameter.try_cell()?;
let f = parameter.head().try_cell()?;
let g = parameter.tail().try_cell()?;
let fp = nock(subject, f)?;
let gp = nock(subject, g)?.try_cell()?;
nock(fp, gp)
}
// In formulas [3 b] and [4 b], b is another formula, whose product against
// the subject becomes the input to an axiomatic operator. 3 is? and 4 is +
// *[a 3 b] -> ?*[a b]
3 => Ok(cell(subject, parameter).tar()?.wut()),
// *[a 4 b] -> +*[a b]
3 => Ok(cell(subject, parameter).tar()?.lus()?),
// A formula [5 b c] treats b and c as formulas that become the input to
// another axiomatic operator, =. *[a 5 b c] -> =[*[a b]
// *[a c]]
5 => unimplemented!(),
// Instructions 6 through 11 are not strictly necessary for Turing
// completeness; deleting them from Nock would decrease compactness, but not
// expressiveness. [6 b c d] is if b, then c, else d. Each
// of b, c, d is a formula against the subject. Remember that 0 is true and
// 1 is false. *[a 6 b c d] -> *[a *[[c d] 0 *[[2 3] 0 *[a
// 4 4 b]]]]
6 => unimplemented!(),
// [7 b c] composes the formulas b and c.
// *[a 7 b c] -> *[*[a b] c]
7 => unimplemented!(),
// [8 b c] produces the product of formula c, against a subject whose head
// is the product of formula b with the original subject, and whose tail is
// the original subject. (Think of 8 as a “variable declaration” or “stack
// push.”) *[a 8 b c] -> *[[*[a b] a] c]
8 => unimplemented!(),
// [9 b c] computes the product of formula c with the current subject; from
// that product d it extracts a formula e at tree address b, then computes
// *[d e]. (9 is designed to fit Hoon; d is a core (object), e points to an
// arm (method).) *[a 9 b c] -> *[*[a c] 2 [0 1] 0 b]
9 => unimplemented!(),
// In a formula [10 [b c] d], c and d are computed with the current subject,
// and then b of the product of d is replaced with the product of c.
// *[a 10 [b c] d] -> #[b *[a c] *[a d]]
10 => unimplemented!(),
// [11 b c] is a hint semantically equivalent to the formula c. If b is an
// atom, it's a static hint, which is just discarded. If b is a cell, it's a
// dynamic hint; the head of b is discarded, and the tail of b is executed
// as a formula against the current subject; the product of this is
// discarded. *[a 11 b c] -> *[a c]
// [11 hint formula]
11 => {
let parameter = parameter.try_cell()?;
let _hint = parameter.head;
let formula = parameter.tail.try_cell()?;
nock(subject, formula)
}
_ => Err(Crash::from("opcode > 11")),
}
}
}
}
impl Noun {
/// Returns a reference to the Atom in this Noun, or a Crash if it's a cell.
pub fn try_atom(&self) -> Result<&Atom, Crash> {
| / Returns a reference to the Cell in this Noun, or a Crash if it's an atom.
pub fn try_cell(&self) -> Result<&Cell, Crash> {
match self {
Noun::Cell(cell) => Ok(cell),
Noun::Atom(_) => Err(Crash::from("required cell, had atom")),
}
}
/// `*[subject formula]` nock formula application.
pub fn tar(&self) -> NockResult {
trace!("*{}", self);
let self_cell = self.try_cell()?;
let subject = self_cell.head();
let formula = self_cell.tail().try_cell()?;
nock(subject, formula)
}
/// `?noun` noun type operator.
pub fn wut(&self) -> Rc<Noun> {
trace!("?{}", self);
Rc::new(Noun::Atom(Atom::from(match self {
Noun::Cell(_) => true,
Noun::Atom(_) => false,
})))
}
/// `=[head tail]` noun equality operator.
pub fn tis(&self) -> NockResult {
trace!("={}", self);
let self_cell = self.try_cell()?;
Ok(atom(Atom::from(self_cell.head == self_cell.tail)))
}
/// `+number` atom increment operator.
pub fn lus(&self) -> NockResult {
trace!("+{}", self);
let self_atom = self.try_atom()?;
let mut incremented_bytes = self_atom.as_bytes_le().to_vec();
incremented_bytes.push(0x00);
for byte in incremented_bytes.iter_mut() {
if *byte == 0xFF {
*byte = 0x00;
continue;
} else {
*byte += 1;
break;
}
}
Ok(atom(Atom::new(&incremented_bytes)))
}
/// `/[index root]`, `*[root 0 index]` cell tree slot indexing operator.
pub fn net(&self) -> NockResult {
trace!("/{}", self);
let self_cell = self.try_cell()?;
let index = self_cell.head().try_atom()?;
let root = self_cell.tail();
if index.is_zero() {
return Err(Crash::from("index in /[index root] must be > 0"));
}
let mut result = root;
for (byte_index, byte) in index.as_bytes_le().iter().rev().enumerate() {
let skip_bits = if byte_index == 0 {
byte.leading_zeros() + 1
} else {
0
};
for bit_index in skip_bits..8 {
result = if ((byte >> (7 - bit_index)) & 1) == 0 {
result.try_cell()?.head()
} else {
result.try_cell()?.tail()
};
}
}
Ok(result)
}
/// `#[index root replacement]` edit cell tree index modification operator.
pub fn hax(&self) -> NockResult {
trace!("#{}", self);
unimplemented!()
}
}
impl Cell {
pub fn head(&self) -> Rc<Noun> {
self.head.clone()
}
pub fn tail(&self) -> Rc<Noun> {
self.tail.clone()
}
}
| match self {
Noun::Atom(atom) => Ok(atom),
Noun::Cell(_) => Err(Crash::from("required atom, had cell")),
}
}
// | identifier_body |
symdumper.rs | use std::ffi::OsStr;
use std::os::windows::ffi::OsStrExt;
use std::iter::once;
use std::process::Command;
use std::io::{Error, ErrorKind};
use crate::win32::ModuleInfo;
type HANDLE = usize;
extern {
fn GetCurrentProcess() -> HANDLE;
}
#[allow(non_snake_case)]
#[repr(C)]
struct SrcCodeInfoW {
SizeOfStruct: u32,
Key: usize,
ModBase: u64,
Obj: [u16; 261],
FileName: [u16; 261],
LineNumber: u32,
Address: u64,
}
impl Default for SrcCodeInfoW {
fn default() -> Self { | SizeOfStruct: std::mem::size_of::<SrcCodeInfoW>() as u32,
Key: 0,
ModBase: 0,
Obj: [0; 261],
FileName: [0; 261],
LineNumber: 0,
Address: 0,
}
}
}
#[allow(non_snake_case)]
#[repr(C)]
struct SymbolInfoW {
SizeOfStruct: u32,
TypeIndex: u32,
Reserved: [u64; 2],
Index: u32,
Size: u32,
ModBase: u64,
Flags: u32,
Value: u64,
Address: u64,
Register: u32,
Scope: u32,
Tag: u32,
NameLen: u32,
MaxNameLen: u32,
// technically this field is dynamically sized as specified by MaxNameLen
Name: [u16; 8192],
}
impl Default for SymbolInfoW {
fn default() -> Self {
SymbolInfoW {
// Subtract off the size of the dynamic component, one byte
// already included in the structure
SizeOfStruct: std::mem::size_of::<SymbolInfoW>() as u32 - 8192*2,
TypeIndex: 0,
Reserved: [0; 2],
Index: 0,
Size: 0,
ModBase: 0,
Flags: 0,
Value: 0,
Address: 0,
Register: 0,
Scope: 0,
Tag: 0,
NameLen: 0,
MaxNameLen: 8192,
Name: [0; 8192],
}
}
}
#[allow(non_snake_case)]
#[repr(C)]
struct ImagehlpLineW64 {
SizeOfStruct: u32,
Key: usize,
LineNumber: u32,
FileName: *const u16,
Address: u64,
}
impl Default for ImagehlpLineW64 {
fn default() -> Self {
ImagehlpLineW64 {
SizeOfStruct: std::mem::size_of::<ImagehlpLineW64>() as u32,
Key: 0,
LineNumber: 0,
FileName: std::ptr::null(),
Address: 0,
}
}
}
#[allow(non_snake_case)]
#[repr(C)]
struct ImagehlpModule64W {
SizeOfStruct: u32,
BaseOfImage: u64,
ImageSize: u32,
TimeDateStamp: u32,
CheckSum: u32,
NumSyms: u32,
SymType: u32,
ModuleName: [u16; 32],
ImageName: [u16; 256],
LoadedImageName: [u16; 256],
LoadedPdbName: [u16; 256],
CVSig: u32,
CVData: [u16; 780],
PdbSig: u32,
PdbSig70: [u8; 16],
PdbAge: u32,
PdbUnmatched: bool,
DbgUnmatched: bool,
LineNumbers: bool,
GlobalSymbols: bool,
TypeInfo: bool,
SourceIndexed: bool,
Publics: bool,
}
impl Default for ImagehlpModule64W {
fn default() -> Self {
ImagehlpModule64W {
SizeOfStruct: std::mem::size_of::<ImagehlpModule64W>() as u32,
BaseOfImage: 0,
ImageSize: 0,
TimeDateStamp: 0,
CheckSum: 0,
NumSyms: 0,
SymType: 0,
ModuleName: [0; 32],
ImageName: [0; 256],
LoadedImageName: [0; 256],
LoadedPdbName: [0; 256],
CVSig: 0,
CVData: [0; 780],
PdbSig: 0,
PdbSig70: [0; 16],
PdbAge: 0,
PdbUnmatched: false,
DbgUnmatched: false,
LineNumbers: false,
GlobalSymbols: false,
TypeInfo: false,
SourceIndexed: false,
Publics: false,
}
}
}
/// Vector of (virtual address, symbol name, symbol size)
type Context = *mut SymbolContext;
extern fn srcline_callback(srcline_info: *const SrcCodeInfoW, context: usize) -> bool {
let srcline = unsafe { &*srcline_info };
let context = unsafe { &mut *(context as Context) };
let mut filename = Vec::with_capacity(srcline.FileName.len());
for &val in srcline.FileName.iter() {
if val == 0 { break; }
filename.push(val);
}
let source_filename = String::from_utf16(&filename)
.expect("Failed to decode UTF-16 file name");
context.sourceline.push((srcline.Address - srcline.ModBase, source_filename, srcline.LineNumber as u64));
true
}
extern fn sym_callback(sym_info: *const SymbolInfoW, size: u32, context: usize) -> bool {
let symbol = unsafe { &*sym_info };
let context = unsafe { &mut *(context as Context) };
// Technically NameLen isn't supposed to contain the null terminator... but it does.
// Yay!
if symbol.NameLen < 1 {
return true;
}
let symbol_name = String::from_utf16(&symbol.Name[..symbol.NameLen as usize - 1])
.expect("Failed to decode UTF-16 symbol name");
context.symbols.push((symbol.Address - symbol.ModBase, symbol_name, size as u64));
true
}
#[link(name = "dbghelp")]
extern {
fn SymInitializeW(hProcess: HANDLE, UserSearchPath: *const u16,
fInvadeProcess: bool) -> bool;
fn SymLoadModuleExW(hProcess: HANDLE, hFile: HANDLE, ImageName: *const u16,
ModuleName: *const u16, BaseOfDll: u64, DllSize: u32,
Data: usize, Flags: u32) -> u64;
fn SymGetModuleInfoW64(hProcess: HANDLE, dwAddr: u64,
ModuleInfo: *mut ImagehlpModule64W) -> bool;
fn SymEnumSymbolsW(hProcess: HANDLE, BaseOfDll: u64, Mask: usize,
callback: extern fn(sym_info: *const SymbolInfoW, size: u32, context: usize) -> bool,
context: usize) -> bool;
fn SymEnumSourceLinesW(hProcess: HANDLE, Base: u64, Obj: usize, File: usize, Line: u32,
Flags: u32, callback: extern fn(LineInfo: *const SrcCodeInfoW, UserContext: usize) -> bool,
UserContext: usize) -> bool;
fn SymUnloadModule64(hProcess: HANDLE, BaseOfDll: u64) -> bool;
fn SymCleanup(hProcess: HANDLE) -> bool;
}
pub fn win16_for_str(s: &str) -> Vec<u16> {
OsStr::new(s).encode_wide().chain(once(0)).collect()
}
#[repr(C)]
#[derive(Clone, Default)]
pub struct SymbolContext {
pub symbols: Vec<(u64, String, u64)>,
pub sourceline: Vec<(u64, String, u64)>,
}
/// Get all of the symbols from a PE file `pe_file`
pub fn get_symbols_from_file(pe_file: &str) -> SymbolContext {
let mut symdb = SymbolContext {
symbols: Vec::new(),
sourceline: Vec::new(),
};
let module_base;
unsafe {
let cur_process = GetCurrentProcess();
// Initialize the symbol library for this process
assert!(SymInitializeW(cur_process, 0 as *const _, false),
"Failed to SymInitializeW()");
// Load up a module into the current process as the base address
// the file specified
let filename = win16_for_str(pe_file);
module_base = SymLoadModuleExW(cur_process, 0, filename.as_ptr(), std::ptr::null(), 0, 0, 0, 0);
assert!(module_base!= 0, "Failed to SymLoadModuleExW()");
// Get information about the module we just loaded
let mut module_info = ImagehlpModule64W::default();
assert!(SymGetModuleInfoW64(cur_process, module_base,
&mut module_info as *mut _),
"Failed to SymGetModuleInfoW64()");
// This is pedantic but we might as well check it
assert!(module_info.BaseOfImage == module_base);
assert!(SymEnumSymbolsW(cur_process, module_base, 0, sym_callback, &mut symdb as *mut _ as usize));
if!SymEnumSourceLinesW(cur_process, module_base, 0, 0, 0, 0, srcline_callback, &mut symdb as *mut _ as usize) {
// Eh just silently fail here, most people won't have private
// symbols so this would just spam
//print!("Warning: Could not enumerate sourcelines\n");
}
assert!(SymUnloadModule64(cur_process, module_base),
"Failed to SymUnloadModule64()");
assert!(SymCleanup(cur_process), "Failed to SymCleanup()");
}
symdb.symbols.sort_by_key(|x| x.0);
symdb.sourceline.sort_by_key(|x| x.0);
symdb
}
/// Get all of the symbols from a module `module_name` with a TimeDateStamp
/// and SizeOfImage from the PE header. This will automatically download the
/// module and PDB from the symbol store using symchk
pub fn get_symbols_from_module(module: &ModuleInfo)
-> std::io::Result<SymbolContext>
{
// Use symchk to download the module and symbols
let module = download_symbol(module.name(), module.time(), module.size())?;
Ok(get_symbols_from_file(&module))
}
/// Download a module and the corresponding PDB based on module_name,
/// it's TimeDateStamp and SizeOfImage from it's PE header
///
/// Returns a string containing a filename of the downloaded module
fn download_symbol(module_name: &str, timedatestamp: u32, sizeofimage: u32)
-> std::io::Result<String> {
let mut dir = std::env::temp_dir();
dir.push("applepie_manifest");
// Create manifest file for symchk
std::fs::write(&dir, format!("{},{:x}{:x},1\r\n",
module_name, timedatestamp, sizeofimage))?;
// Run symchk to download this module
let res = Command::new("symchk")
.arg("/v")
.arg("/im")
.arg(dir)
.output()?;
if!res.status.success() {
return Err(Error::new(ErrorKind::Other, "symchk returned with error"));
}
// Symchk apparently ran, check output
let stderr = std::str::from_utf8(&res.stderr)
.expect("Failed to convert symchk output to utf-8");
let mut filename = None;
for line in stderr.lines() {
const PREFIX: &'static str = "DBGHELP: ";
const POSTFIX: &'static str = " - OK";
// The line that contains the filename looks like:
// DBGHELP: C:\symbols\calc.exe\8f598a9eb000\calc.exe - OK
if!line.starts_with(PREFIX) { continue; }
if!line.ends_with(POSTFIX) { continue; }
// We only expect one line of output to match the above criteria
// If there are multiple we'll need to improve this "parser"
assert!(filename.is_none(), "Multiple filenames in symchk output");
// Save the filename we found
filename = Some(&line[PREFIX.len()..line.len() - POSTFIX.len()]);
}
// Fail hard if we didn't get the output filename from symchk
let filename = filename.expect("Did not get expected symchk output");
// Run symchk to download the pdb for the file
let res = Command::new("symchk")
.arg(filename)
.output()?;
if!res.status.success() {
return Err(Error::new(ErrorKind::Other, "symchk returned with error"));
}
// Now we have downloaded the PDB for this file :)
Ok(filename.into())
}
#[test]
fn test_symchk() {
download_symbol("calc.exe", 0x8F598A9E, 0xB000)
.expect("Failed to download symbol");
} | SrcCodeInfoW { | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.