file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
softmax.rs | use alumina_core::{
base_ops::{OpInstance, OpSpecification},
errors::{ExecutionError, GradientError, OpBuildError, ShapePropError},
exec::ExecutionContext,
grad::GradientContext,
graph::{Graph, Node, NodeID},
shape_prop::ShapePropContext,
util::wrap_dim,
};
use indexmap::{indexset, IndexMap, IndexSet};
use ndarray::{Axis, Dimension, Zip};
use std::any::Any;
/// Calculates the combined Softmax norm of the input nodes.
///
/// Axis determines the grouping direction.
pub fn softmax<I>(logits: I, axis: isize) -> Result<Node, OpBuildError>
where
I: Into<Node>,
{
let logits = logits.into();
let axis = wrap_dim(axis, logits.shape().len());
let output = logits.graph().new_node(logits.shape());
Softmax::new(logits, output.clone(), axis).build()?;
Ok(output)
}
#[must_use = "Op builder not used, call.build()"]
#[derive(Clone, Debug)]
pub struct Softmax {
logits: Node,
output: Node,
axis: usize,
}
impl Softmax {
pub fn new<I, O>(logits: I, output: O, axis: usize) -> Self
where
I: Into<Node>,
O: Into<Node>,
{
let logits = logits.into();
let output = output.into();
assert!(
logits.shape().len() == output.shape().len(),
"output and logits must have the same shape"
);
assert!(
axis < logits.shape().len(),
"axis {} must be less than logits.shape().len() {}",
axis,
logits.shape().len()
);
Softmax { logits, output, axis }
}
}
impl OpSpecification for Softmax {
type InstanceType = SoftmaxInstance;
fn type_name(&self) -> &'static str {
"Softmax"
}
fn inputs(&self) -> IndexSet<Node> {
indexset![self.logits.clone()]
}
fn outputs(&self) -> IndexSet<Node> {
indexset![self.output.clone()]
}
fn clone_with_nodes_changed(&self, mapping: &IndexMap<Node, Node>) -> Self {
Self {
logits: mapping.get(&self.logits).unwrap_or(&self.logits).clone(),
output: mapping.get(&self.output).unwrap_or(&self.output).clone(),
axis: self.axis,
}
}
fn build_instance(self) -> Result<Self::InstanceType, OpBuildError> |
}
/// Softmax OpInstance
#[derive(Clone, Debug)]
pub struct SoftmaxInstance {
logits: NodeID,
output: NodeID,
axis: usize,
}
impl OpInstance for SoftmaxInstance {
fn type_name(&self) -> &'static str {
"Softmax"
}
fn as_specification(&self, graph: &Graph) -> Box<dyn Any> {
Box::new(Softmax {
logits: graph.node_from_id(self.logits),
output: graph.node_from_id(self.output),
axis: self.axis,
})
}
fn inputs(&self) -> IndexSet<NodeID> {
indexset![self.logits]
}
fn outputs(&self) -> IndexSet<NodeID> {
indexset![self.output]
}
fn gradient(&self, ctx: &mut GradientContext) -> Result<(), GradientError> {
SoftmaxBack::new(
ctx.node(&self.logits),
ctx.grad_of(&self.logits),
ctx.grad_of(&self.output),
self.axis,
)
.build()?;
Ok(())
}
fn propagate_shapes(&self, ctx: &mut ShapePropContext) -> Result<(), ShapePropError> {
ctx.merge_output_shape(&self.output, &ctx.input_shape(&self.logits).slice().into())
}
fn execute(&self, ctx: &ExecutionContext) -> Result<(), ExecutionError> {
Zip::from(ctx.get_input(&self.logits).lanes(Axis(self.axis)))
.and(ctx.get_output(&self.output).lanes_mut(Axis(self.axis)))
.par_for_each(|logits, outputs| {
let max = logits.iter().fold(::std::f32::NEG_INFINITY, |max, &v| v.max(max));
let exp_sum = logits.iter().fold(0.0, |sum, &v| sum + (v - max).exp());
Zip::from(logits).and(outputs).for_each(|logit, output| {
*output += (logit - max).exp() / exp_sum;
});
});
Ok(())
}
}
/// Optimised Backward pass for Softmax Op.
///
/// Input/Output naming convention matches Softmax Input/Outputs, i.e. output_grad is an input to this Op.
///
/// All inputs and grads must be unique.
#[must_use = "Op builder not used, call.build()"]
#[derive(Clone, Debug)]
pub struct SoftmaxBack {
logits: Node,
logits_grad: Node,
output_grad: Node,
axis: usize,
}
impl SoftmaxBack {
pub fn new<I1, I2, O>(logits: I1, logits_grad: O, output_grad: I2, axis: usize) -> Self
where
I1: Into<Node>,
I2: Into<Node>,
O: Into<Node>,
{
let logits = logits.into();
let logits_grad = logits_grad.into();
let output_grad = output_grad.into();
assert!(logits.shape().len() == logits_grad.shape().len());
assert!(logits.shape().len() == output_grad.shape().len());
assert!(
axis < logits.shape().len(),
"axis {} must be less than logits.shape().len() {}",
axis,
logits.shape().len()
);
SoftmaxBack {
logits,
logits_grad,
output_grad,
axis,
}
}
}
impl OpSpecification for SoftmaxBack {
type InstanceType = SoftmaxBackInstance;
fn type_name(&self) -> &'static str {
"SoftmaxBack"
}
fn inputs(&self) -> IndexSet<Node> {
indexset![self.logits.clone(), self.output_grad.clone()]
}
fn outputs(&self) -> IndexSet<Node> {
indexset![self.logits_grad.clone()]
}
fn clone_with_nodes_changed(&self, mapping: &IndexMap<Node, Node>) -> Self {
Self {
logits: mapping.get(&self.logits).unwrap_or(&self.logits).clone(),
output_grad: mapping.get(&self.output_grad).unwrap_or(&self.output_grad).clone(),
logits_grad: mapping.get(&self.logits_grad).unwrap_or(&self.logits_grad).clone(),
axis: self.axis,
}
}
fn build_instance(self) -> Result<Self::InstanceType, OpBuildError> {
Ok(SoftmaxBackInstance {
logits: self.logits.id(),
logits_grad: self.logits_grad.id(),
output_grad: self.output_grad.id(),
axis: self.axis,
})
}
}
/// SoftmaxBack OpInstance
#[derive(Clone, Debug)]
pub struct SoftmaxBackInstance {
logits: NodeID,
logits_grad: NodeID,
output_grad: NodeID,
axis: usize,
}
impl OpInstance for SoftmaxBackInstance {
fn type_name(&self) -> &'static str {
"SoftmaxBack"
}
fn as_specification(&self, graph: &Graph) -> Box<dyn Any> {
Box::new(SoftmaxBack {
logits: graph.node_from_id(self.logits),
logits_grad: graph.node_from_id(self.logits_grad),
output_grad: graph.node_from_id(self.output_grad),
axis: self.axis,
})
}
fn inputs(&self) -> IndexSet<NodeID> {
indexset![self.logits, self.output_grad]
}
fn outputs(&self) -> IndexSet<NodeID> {
indexset![self.logits_grad]
}
fn gradient(&self, _ctx: &mut GradientContext) -> Result<(), GradientError> {
Err(GradientError::Unimplemented)
}
fn propagate_shapes(&self, ctx: &mut ShapePropContext) -> Result<(), ShapePropError> {
let logits_shape = ctx.input_shape(&self.logits).clone();
let output_grad_shape = ctx.input_shape(&self.output_grad).clone();
if logits_shape.ndim() == 0 {
return Err(format!(
"Softmax requires logit and label shapes to have at least one axis: {:?}",
logits_shape.slice(),
)
.into());
}
if output_grad_shape!= logits_shape {
return Err(format!("SoftmaxBack requires the output grad to have the shape of the logits: logits:{:?} output_grad:{:?}, axis: {}", logits_shape.slice(), output_grad_shape.slice(), self.axis).into());
}
ctx.merge_output_shape(&self.logits_grad, &logits_shape.slice().into())
}
fn execute(&self, ctx: &ExecutionContext) -> Result<(), ExecutionError> {
Zip::from(ctx.get_output(&self.logits_grad).lanes_mut(Axis(self.axis)))
.and(ctx.get_input(&self.logits).lanes(Axis(self.axis)))
.and(ctx.get_input(&self.output_grad).lanes(Axis(self.axis)))
.par_for_each(|mut logits_grad, logits, output_grad| {
let len = logits.len();
let max = logits.iter().fold(::std::f32::NEG_INFINITY, |max, &v| v.max(max));
let exp_sum = logits.iter().fold(0., |sum, &v| sum + (v - max).exp());
// let exp_sum_ln = exp_sum.ln();
for (i, grad) in output_grad.iter().enumerate() {
if grad.abs() > 0.0 {
// hopefully output gradients are sparse, eg from cross entropy loss
let a = logits[i] - max;
// let x = (a - exp_sum_ln).exp();
let x = a.exp() / exp_sum;
let g_x = grad * x;
let mut other_sum = 0.0;
for j in 0..i {
let b = logits[j] - max;
// logits_grad[j] -= g_x * (b - exp_sum_ln).exp();
logits_grad[j] -= g_x * b.exp() / exp_sum;
other_sum += b.exp() / exp_sum;
}
// logits_grad[i] += g_x - g_x * x;
// inpd_n[i] += - inp_n.iter().enumerate().fold(0., |sum, (ind, v)| sum + if ind!= i
// {(v-max).exp()} else {0.0})*(mult/exp_sum);
for j in i + 1..len {
let b = logits[j] - max;
// logits_grad[j] -= g_x * (b- exp_sum_ln).exp();
logits_grad[j] -= g_x * b.exp() / exp_sum;
other_sum += b.exp() / exp_sum;
}
logits_grad[i] += g_x * other_sum;
}
}
});
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::softmax;
use crate::elementwise::mul::mul;
use alumina_core::graph::Node;
use alumina_test::{grad_numeric_test::GradNumericTest, relatively_close::RelClose};
use indexmap::indexset;
use ndarray::arr2;
#[test]
fn forward_test() {
let logits = Node::new(&[4, 4])
.set_value(arr2(&[
[0.2, 0.4, 0.6, 0.8],
[1.2, 1.4, 1.6, 1.8],
[2.2, 2.4, 2.6, 2.8],
[3.2, 3.4, 3.6, 3.8],
]))
.set_name("logits");
let hor_groups = softmax(&logits, -1).unwrap();
let vert_groups = softmax(&logits, 0).unwrap();
assert!(hor_groups
.calc()
.unwrap()
.all_relatively_close(&arr2(&[[0.180_657_18, 0.220_655_17, 0.269_508_84, 0.329_178_84]]), 1e-4));
assert!(vert_groups.calc().unwrap().all_relatively_close(
&arr2(&[[0.032_058_604], [0.087_144_32f32], [0.236_882_82], [0.643_914_3]]),
1e-4
));
}
#[test]
fn grad_numeric_rand_test() {
let logits = Node::new(&[13, 33]).set_name("logits");
let rand = Node::new(&[13, 33]).set_name("rand"); // multiply output by random amounts to prevent gradient cancellation
let output = mul(&softmax(&logits, -1).unwrap(), &rand).unwrap();
GradNumericTest::new(&output, &indexset![&logits, &rand])
.step_size(1e-3)
.tolerance(4e-3)
.run();
}
#[test]
fn grad_numeric_test() {
let logits = Node::new(&[13, 33]).set_name("logits");
let output = softmax(&logits, -1).unwrap();
GradNumericTest::new(&output, &indexset![&logits])
.expect_zero(&logits, 20.0 * ::std::f32::EPSILON) // under a uniform output gradient the gradient of the logits should cancel out to zero
.step_size(1e-3)
.tolerance(4e-3)
.run();
}
}
| {
Ok(SoftmaxInstance {
logits: self.logits.id(),
output: self.output.id(),
axis: self.axis,
})
} | identifier_body |
softmax.rs | use alumina_core::{
base_ops::{OpInstance, OpSpecification},
errors::{ExecutionError, GradientError, OpBuildError, ShapePropError},
exec::ExecutionContext,
grad::GradientContext,
graph::{Graph, Node, NodeID},
shape_prop::ShapePropContext,
util::wrap_dim,
};
use indexmap::{indexset, IndexMap, IndexSet};
use ndarray::{Axis, Dimension, Zip};
use std::any::Any;
/// Calculates the combined Softmax norm of the input nodes.
///
/// Axis determines the grouping direction.
pub fn softmax<I>(logits: I, axis: isize) -> Result<Node, OpBuildError>
where
I: Into<Node>,
{
let logits = logits.into();
let axis = wrap_dim(axis, logits.shape().len());
let output = logits.graph().new_node(logits.shape());
Softmax::new(logits, output.clone(), axis).build()?;
Ok(output)
}
#[must_use = "Op builder not used, call.build()"]
#[derive(Clone, Debug)]
pub struct Softmax {
logits: Node,
output: Node,
axis: usize,
}
impl Softmax {
pub fn new<I, O>(logits: I, output: O, axis: usize) -> Self
where
I: Into<Node>,
O: Into<Node>,
{
let logits = logits.into();
let output = output.into();
assert!(
logits.shape().len() == output.shape().len(),
"output and logits must have the same shape"
);
assert!(
axis < logits.shape().len(),
"axis {} must be less than logits.shape().len() {}",
axis,
logits.shape().len()
);
Softmax { logits, output, axis }
}
}
impl OpSpecification for Softmax {
type InstanceType = SoftmaxInstance;
fn type_name(&self) -> &'static str {
"Softmax"
}
fn inputs(&self) -> IndexSet<Node> {
indexset![self.logits.clone()]
}
fn outputs(&self) -> IndexSet<Node> {
indexset![self.output.clone()]
}
fn clone_with_nodes_changed(&self, mapping: &IndexMap<Node, Node>) -> Self {
Self {
logits: mapping.get(&self.logits).unwrap_or(&self.logits).clone(),
output: mapping.get(&self.output).unwrap_or(&self.output).clone(),
axis: self.axis,
}
}
fn build_instance(self) -> Result<Self::InstanceType, OpBuildError> {
Ok(SoftmaxInstance {
logits: self.logits.id(),
output: self.output.id(),
axis: self.axis,
})
}
}
/// Softmax OpInstance
#[derive(Clone, Debug)]
pub struct SoftmaxInstance {
logits: NodeID,
output: NodeID,
axis: usize,
}
impl OpInstance for SoftmaxInstance {
fn type_name(&self) -> &'static str {
"Softmax"
}
fn as_specification(&self, graph: &Graph) -> Box<dyn Any> {
Box::new(Softmax {
logits: graph.node_from_id(self.logits),
output: graph.node_from_id(self.output),
axis: self.axis,
})
}
fn inputs(&self) -> IndexSet<NodeID> {
indexset![self.logits]
}
fn outputs(&self) -> IndexSet<NodeID> {
indexset![self.output]
}
fn gradient(&self, ctx: &mut GradientContext) -> Result<(), GradientError> {
SoftmaxBack::new(
ctx.node(&self.logits),
ctx.grad_of(&self.logits),
ctx.grad_of(&self.output),
self.axis,
)
.build()?;
Ok(())
}
fn propagate_shapes(&self, ctx: &mut ShapePropContext) -> Result<(), ShapePropError> {
ctx.merge_output_shape(&self.output, &ctx.input_shape(&self.logits).slice().into())
}
fn execute(&self, ctx: &ExecutionContext) -> Result<(), ExecutionError> {
Zip::from(ctx.get_input(&self.logits).lanes(Axis(self.axis)))
.and(ctx.get_output(&self.output).lanes_mut(Axis(self.axis)))
.par_for_each(|logits, outputs| {
let max = logits.iter().fold(::std::f32::NEG_INFINITY, |max, &v| v.max(max));
let exp_sum = logits.iter().fold(0.0, |sum, &v| sum + (v - max).exp());
Zip::from(logits).and(outputs).for_each(|logit, output| {
*output += (logit - max).exp() / exp_sum;
});
});
Ok(())
}
}
/// Optimised Backward pass for Softmax Op.
///
/// Input/Output naming convention matches Softmax Input/Outputs, i.e. output_grad is an input to this Op.
///
/// All inputs and grads must be unique.
#[must_use = "Op builder not used, call.build()"]
#[derive(Clone, Debug)]
pub struct SoftmaxBack {
logits: Node,
logits_grad: Node,
output_grad: Node,
axis: usize,
}
impl SoftmaxBack {
pub fn new<I1, I2, O>(logits: I1, logits_grad: O, output_grad: I2, axis: usize) -> Self
where
I1: Into<Node>,
I2: Into<Node>,
O: Into<Node>,
{
let logits = logits.into();
let logits_grad = logits_grad.into();
let output_grad = output_grad.into();
assert!(logits.shape().len() == logits_grad.shape().len());
assert!(logits.shape().len() == output_grad.shape().len());
assert!(
axis < logits.shape().len(),
"axis {} must be less than logits.shape().len() {}",
axis,
logits.shape().len()
);
SoftmaxBack {
logits,
logits_grad,
output_grad,
axis,
}
}
}
impl OpSpecification for SoftmaxBack {
type InstanceType = SoftmaxBackInstance;
fn type_name(&self) -> &'static str {
"SoftmaxBack"
}
fn inputs(&self) -> IndexSet<Node> {
indexset![self.logits.clone(), self.output_grad.clone()]
}
fn outputs(&self) -> IndexSet<Node> {
indexset![self.logits_grad.clone()]
}
fn clone_with_nodes_changed(&self, mapping: &IndexMap<Node, Node>) -> Self {
Self {
logits: mapping.get(&self.logits).unwrap_or(&self.logits).clone(),
output_grad: mapping.get(&self.output_grad).unwrap_or(&self.output_grad).clone(),
logits_grad: mapping.get(&self.logits_grad).unwrap_or(&self.logits_grad).clone(),
axis: self.axis,
}
}
fn build_instance(self) -> Result<Self::InstanceType, OpBuildError> {
Ok(SoftmaxBackInstance {
logits: self.logits.id(),
logits_grad: self.logits_grad.id(),
output_grad: self.output_grad.id(),
axis: self.axis,
})
}
}
/// SoftmaxBack OpInstance
#[derive(Clone, Debug)]
pub struct SoftmaxBackInstance {
logits: NodeID,
logits_grad: NodeID,
output_grad: NodeID,
axis: usize,
}
impl OpInstance for SoftmaxBackInstance {
fn type_name(&self) -> &'static str {
"SoftmaxBack"
}
fn as_specification(&self, graph: &Graph) -> Box<dyn Any> {
Box::new(SoftmaxBack {
logits: graph.node_from_id(self.logits),
logits_grad: graph.node_from_id(self.logits_grad),
output_grad: graph.node_from_id(self.output_grad),
axis: self.axis,
})
}
fn inputs(&self) -> IndexSet<NodeID> {
indexset![self.logits, self.output_grad]
}
fn outputs(&self) -> IndexSet<NodeID> {
indexset![self.logits_grad]
}
fn gradient(&self, _ctx: &mut GradientContext) -> Result<(), GradientError> {
Err(GradientError::Unimplemented)
}
fn propagate_shapes(&self, ctx: &mut ShapePropContext) -> Result<(), ShapePropError> {
let logits_shape = ctx.input_shape(&self.logits).clone();
let output_grad_shape = ctx.input_shape(&self.output_grad).clone();
if logits_shape.ndim() == 0 {
return Err(format!(
"Softmax requires logit and label shapes to have at least one axis: {:?}",
logits_shape.slice(),
)
.into());
}
if output_grad_shape!= logits_shape |
ctx.merge_output_shape(&self.logits_grad, &logits_shape.slice().into())
}
fn execute(&self, ctx: &ExecutionContext) -> Result<(), ExecutionError> {
Zip::from(ctx.get_output(&self.logits_grad).lanes_mut(Axis(self.axis)))
.and(ctx.get_input(&self.logits).lanes(Axis(self.axis)))
.and(ctx.get_input(&self.output_grad).lanes(Axis(self.axis)))
.par_for_each(|mut logits_grad, logits, output_grad| {
let len = logits.len();
let max = logits.iter().fold(::std::f32::NEG_INFINITY, |max, &v| v.max(max));
let exp_sum = logits.iter().fold(0., |sum, &v| sum + (v - max).exp());
// let exp_sum_ln = exp_sum.ln();
for (i, grad) in output_grad.iter().enumerate() {
if grad.abs() > 0.0 {
// hopefully output gradients are sparse, eg from cross entropy loss
let a = logits[i] - max;
// let x = (a - exp_sum_ln).exp();
let x = a.exp() / exp_sum;
let g_x = grad * x;
let mut other_sum = 0.0;
for j in 0..i {
let b = logits[j] - max;
// logits_grad[j] -= g_x * (b - exp_sum_ln).exp();
logits_grad[j] -= g_x * b.exp() / exp_sum;
other_sum += b.exp() / exp_sum;
}
// logits_grad[i] += g_x - g_x * x;
// inpd_n[i] += - inp_n.iter().enumerate().fold(0., |sum, (ind, v)| sum + if ind!= i
// {(v-max).exp()} else {0.0})*(mult/exp_sum);
for j in i + 1..len {
let b = logits[j] - max;
// logits_grad[j] -= g_x * (b- exp_sum_ln).exp();
logits_grad[j] -= g_x * b.exp() / exp_sum;
other_sum += b.exp() / exp_sum;
}
logits_grad[i] += g_x * other_sum;
}
}
});
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::softmax;
use crate::elementwise::mul::mul;
use alumina_core::graph::Node;
use alumina_test::{grad_numeric_test::GradNumericTest, relatively_close::RelClose};
use indexmap::indexset;
use ndarray::arr2;
#[test]
fn forward_test() {
let logits = Node::new(&[4, 4])
.set_value(arr2(&[
[0.2, 0.4, 0.6, 0.8],
[1.2, 1.4, 1.6, 1.8],
[2.2, 2.4, 2.6, 2.8],
[3.2, 3.4, 3.6, 3.8],
]))
.set_name("logits");
let hor_groups = softmax(&logits, -1).unwrap();
let vert_groups = softmax(&logits, 0).unwrap();
assert!(hor_groups
.calc()
.unwrap()
.all_relatively_close(&arr2(&[[0.180_657_18, 0.220_655_17, 0.269_508_84, 0.329_178_84]]), 1e-4));
assert!(vert_groups.calc().unwrap().all_relatively_close(
&arr2(&[[0.032_058_604], [0.087_144_32f32], [0.236_882_82], [0.643_914_3]]),
1e-4
));
}
#[test]
fn grad_numeric_rand_test() {
let logits = Node::new(&[13, 33]).set_name("logits");
let rand = Node::new(&[13, 33]).set_name("rand"); // multiply output by random amounts to prevent gradient cancellation
let output = mul(&softmax(&logits, -1).unwrap(), &rand).unwrap();
GradNumericTest::new(&output, &indexset![&logits, &rand])
.step_size(1e-3)
.tolerance(4e-3)
.run();
}
#[test]
fn grad_numeric_test() {
let logits = Node::new(&[13, 33]).set_name("logits");
let output = softmax(&logits, -1).unwrap();
GradNumericTest::new(&output, &indexset![&logits])
.expect_zero(&logits, 20.0 * ::std::f32::EPSILON) // under a uniform output gradient the gradient of the logits should cancel out to zero
.step_size(1e-3)
.tolerance(4e-3)
.run();
}
}
| {
return Err(format!("SoftmaxBack requires the output grad to have the shape of the logits: logits:{:?} output_grad:{:?}, axis: {}", logits_shape.slice(), output_grad_shape.slice(), self.axis).into());
} | conditional_block |
mod.rs | mod task;
mod worker;
use crate::executor::Executor;
use crossbeam::{
deque::{Injector, Stealer, Worker},
queue::ArrayQueue,
};
use futures::Future;
use std::{
io,
sync::{atomic::Ordering, Arc},
thread::JoinHandle,
};
use task::Task;
/// An executor which distributes tasks across multiple threads using a work-stealing
/// scheduler. Tasks can be spawned on it by calling the [`spawn`][`Executor::spawn`]
/// method on the `ThreadPool`. Note that since this executor moves futures between different
/// threads, the future in question *must* be [`Send`].
///
/// # Examples
/// ```
/// use std::io;
/// use threader::{
/// executor::Executor,
/// thread_pool::ThreadPool,
/// net::tcp::TcpStream,
/// };
///
/// fn main() -> io::Result<()> {
/// let mut pool = ThreadPool::new()?;
/// let addr = "10.0.0.1:80".parse().unwrap();
///
/// pool.spawn(async move {
/// let _stream = TcpStream::connect(&addr);
/// });
///
/// pool.shutdown_on_idle();
/// Ok(())
/// }
/// ```
pub struct ThreadPool {
workers: Vec<(JoinHandle<()>, Arc<worker::Handle>)>,
count: usize,
shutdown: bool,
shared: Arc<Shared>,
}
impl ThreadPool {
/// Creates a new `ThreadPool` instance with a number of threads
/// equal to the number of logical CPU cores in a given machine.
/// Returns any errors that may have occurred in creating the
/// thread pool.
///
/// # Examples
/// ```
/// use std::io;
/// use threader::thread_pool::ThreadPool;
///
/// fn main() -> io::Result<()> {
/// let pool = ThreadPool::new()?;
/// Ok(())
/// }
/// ```
pub fn new() -> io::Result<ThreadPool> {
ThreadPool::new_priv(None)
}
/// Creates a new `ThreadPool` instance with a number of threads
/// equal to `count`. `count` must not be zero, or this method
/// will panic. Like [`ThreadPool::new`], this method returns
/// any errors that occurred when creating the thread pool.
///
/// # Panics
/// Panics if `count` is equal to zero.
///
/// # Examples
/// ```
/// use std::io;
/// use threader::thread_pool::ThreadPool;
///
/// fn main() -> io::Result<()> {
/// let pool = ThreadPool::with_threads(1)?;
/// Ok(())
/// }
/// ```
pub fn with_threads(count: usize) -> io::Result<ThreadPool> {
ThreadPool::new_priv(Some(count))
}
/// Shuts down the `ThreadPool` when all worker threads are idle. This method
/// blocks the current thread until all of the worker threads have been joined.
pub fn shutdown_on_idle(&mut self) {
self.shutdown_priv(worker::SHUTDOWN_IDLE);
}
/// Shuts down the `ThreadPool` immediately, sending a message to all worker
/// threads to shut down. This emethod blocks the current thread until all
/// worker threads have been joined, but this blocking shouldn't be noticeable.
pub fn shutdown_now(&mut self) {
self.shutdown_priv(worker::SHUTDOWN_NOW);
}
// Private method used to reduce code duplication.
fn | (&mut self, shutdown: usize) {
self.shutdown = true;
for (_, handle) in &self.workers {
handle.state.store(shutdown, Ordering::Release);
handle.unparker.unpark();
}
while let Some((thread, _)) = self.workers.pop() {
let _ = thread.join();
}
}
// Private method used to reduce code duplication.
fn new_priv(count: Option<usize>) -> io::Result<ThreadPool> {
if let Some(0) = count {
panic!("Can not create a thread pool with 0 threads.");
}
let count = count.unwrap_or(num_cpus::get());
let queues = {
let mut vec = Vec::with_capacity(count);
for _ in 0..count {
vec.push(Worker::new_fifo());
}
vec
};
let stealers: Vec<_> = queues.iter().map(|queue| queue.stealer()).collect();
let shared = Arc::new(Shared {
injector: Injector::new(),
sleep_queue: ArrayQueue::new(count),
stealers,
});
let workers = {
let mut vec = Vec::with_capacity(count);
for queue in queues {
let thread = worker::create_worker(Arc::clone(&shared), queue)?;
vec.push(thread);
}
vec
};
for (_, handle) in &workers {
let handle = Arc::clone(handle);
// Unwrap here since this is a programmer error
// if this fails.
shared.sleep_queue.push(handle).unwrap();
}
Ok(ThreadPool {
workers,
shutdown: false,
count,
shared,
})
}
}
impl<F> Executor<F> for ThreadPool
where
F: Future<Output = ()> + Send +'static,
{
fn spawn(&self, future: F) {
let shared = Arc::downgrade(&self.shared);
let task = Task::new(future, shared);
self.shared.injector.push(task);
if!self.shared.sleep_queue.is_empty() {
if let Ok(handle) = self.shared.sleep_queue.pop() {
handle.state.store(worker::NEW_TASK, Ordering::Release);
handle.unparker.unpark();
}
}
}
}
impl Drop for ThreadPool {
fn drop(&mut self) {
self.shutdown_now();
}
}
pub(crate) struct Shared {
injector: Injector<Task>,
sleep_queue: ArrayQueue<Arc<worker::Handle>>,
stealers: Vec<Stealer<Task>>,
}
#[cfg(test)]
mod tests {
use super::*;
use crossbeam::channel;
use futures::future;
use futures::task::{Context, Waker};
use parking_lot::Mutex;
use std::pin::Pin;
use std::sync::atomic::AtomicBool;
use std::task::Poll;
use std::thread;
use std::time::{Duration, Instant};
static TIMES: usize = 100;
#[test]
fn simple() {
let executor = ThreadPool::new().unwrap();
executor.spawn(async {
println!("Hello, world!");
});
thread::sleep(Duration::from_secs(1));
}
#[test]
fn reschedule() {
struct CustomFuture {
waker: Arc<Mutex<Option<Waker>>>,
shared: Arc<AtomicBool>,
}
impl CustomFuture {
fn new() -> CustomFuture {
let waker: Arc<Mutex<Option<Waker>>> = Arc::new(Mutex::new(None));
let waker_thread = Arc::clone(&waker);
let shared = Arc::new(AtomicBool::new(false));
let shared_thread = Arc::clone(&shared);
thread::spawn(move || {
thread::sleep(Duration::from_secs(1));
if let Some(waker) = waker_thread.lock().take() {
waker.wake();
shared_thread.store(true, Ordering::SeqCst);
}
});
CustomFuture { waker, shared }
}
}
impl Future for CustomFuture {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
if self.shared.load(Ordering::SeqCst) {
Poll::Ready(())
} else {
*(self.waker.lock()) = Some(cx.waker().clone());
Poll::Pending
}
}
}
let (tx, rx) = channel::unbounded();
let executor = ThreadPool::with_threads(12).unwrap();
executor.spawn(async move {
CustomFuture::new().await;
tx.send(0).unwrap();
});
thread::sleep(Duration::from_secs(4));
assert_eq!(rx.try_recv(), Ok(0));
}
#[test]
#[should_panic]
fn zero_threads() {
let executor = ThreadPool::with_threads(0).unwrap();
executor.spawn(async {});
}
#[test]
fn custom_thread_count() {
let executor = ThreadPool::with_threads(32).unwrap();
executor.spawn(async {});
}
#[test]
#[ignore]
fn bad_future() {
// A future that spawns a thread, returns Poll::Ready(()), and
// keeps trying to reschedule itself on the thread_pool.
struct BadFuture {
shared: Arc<Mutex<Option<Waker>>>,
}
impl BadFuture {
fn new() -> BadFuture {
let shared: Arc<Mutex<Option<Waker>>> = Arc::new(Mutex::new(None));
let thread_shared = Arc::clone(&shared);
thread::spawn(move || loop {
let guard = thread_shared.lock();
if let Some(waker) = guard.as_ref() {
waker.clone().wake();
}
});
BadFuture { shared }
}
}
impl Future for BadFuture {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut guard = self.shared.lock();
*guard = Some(cx.waker().clone());
Poll::Ready(())
}
}
let executor = ThreadPool::new().unwrap();
for _ in 0..50 {
executor.spawn(BadFuture::new());
}
}
#[test]
#[ignore]
fn time_threader() {
let mut executor = ThreadPool::with_threads(1).unwrap();
let mut results = Vec::with_capacity(TIMES);
eprintln!("\nthreader time test starting...");
let total_start = Instant::now();
for _ in 0..TIMES {
let start = Instant::now();
for _ in 0..50_000 {
executor.spawn(async {
future::ready(()).await;
});
}
let end = start.elapsed();
// eprintln!("threader: {:?}", end);
results.push(end.as_millis());
}
let shutdown_start = Instant::now();
executor.shutdown_on_idle();
eprintln!("threader shutdown: {:?}", shutdown_start.elapsed());
eprintln!("threader total: {:?}", total_start.elapsed());
let average = {
let sum: u128 = results.into_iter().sum();
(sum as f64) / (TIMES as f64)
};
eprintln!("threader average: {:?}ms", average);
}
}
| shutdown_priv | identifier_name |
mod.rs | mod task;
mod worker;
use crate::executor::Executor;
use crossbeam::{
deque::{Injector, Stealer, Worker},
queue::ArrayQueue,
};
use futures::Future;
use std::{
io,
sync::{atomic::Ordering, Arc},
thread::JoinHandle,
};
use task::Task;
/// An executor which distributes tasks across multiple threads using a work-stealing
/// scheduler. Tasks can be spawned on it by calling the [`spawn`][`Executor::spawn`]
/// method on the `ThreadPool`. Note that since this executor moves futures between different
/// threads, the future in question *must* be [`Send`].
///
/// # Examples
/// ```
/// use std::io;
/// use threader::{
/// executor::Executor,
/// thread_pool::ThreadPool,
/// net::tcp::TcpStream,
/// };
///
/// fn main() -> io::Result<()> {
/// let mut pool = ThreadPool::new()?;
/// let addr = "10.0.0.1:80".parse().unwrap();
///
/// pool.spawn(async move {
/// let _stream = TcpStream::connect(&addr);
/// });
///
/// pool.shutdown_on_idle();
/// Ok(())
/// }
/// ```
pub struct ThreadPool {
workers: Vec<(JoinHandle<()>, Arc<worker::Handle>)>,
count: usize,
shutdown: bool,
shared: Arc<Shared>,
}
impl ThreadPool {
/// Creates a new `ThreadPool` instance with a number of threads
/// equal to the number of logical CPU cores in a given machine.
/// Returns any errors that may have occurred in creating the
/// thread pool.
///
/// # Examples
/// ```
/// use std::io;
/// use threader::thread_pool::ThreadPool;
///
/// fn main() -> io::Result<()> {
/// let pool = ThreadPool::new()?;
/// Ok(())
/// }
/// ```
pub fn new() -> io::Result<ThreadPool> {
ThreadPool::new_priv(None)
}
/// Creates a new `ThreadPool` instance with a number of threads
/// equal to `count`. `count` must not be zero, or this method
/// will panic. Like [`ThreadPool::new`], this method returns
/// any errors that occurred when creating the thread pool.
///
/// # Panics
/// Panics if `count` is equal to zero.
///
/// # Examples
/// ```
/// use std::io;
/// use threader::thread_pool::ThreadPool;
///
/// fn main() -> io::Result<()> {
/// let pool = ThreadPool::with_threads(1)?;
/// Ok(())
/// }
/// ```
pub fn with_threads(count: usize) -> io::Result<ThreadPool> {
ThreadPool::new_priv(Some(count))
}
/// Shuts down the `ThreadPool` when all worker threads are idle. This method
/// blocks the current thread until all of the worker threads have been joined.
pub fn shutdown_on_idle(&mut self) {
self.shutdown_priv(worker::SHUTDOWN_IDLE);
}
/// Shuts down the `ThreadPool` immediately, sending a message to all worker
/// threads to shut down. This emethod blocks the current thread until all
/// worker threads have been joined, but this blocking shouldn't be noticeable.
pub fn shutdown_now(&mut self) {
self.shutdown_priv(worker::SHUTDOWN_NOW);
}
// Private method used to reduce code duplication.
fn shutdown_priv(&mut self, shutdown: usize) {
self.shutdown = true;
for (_, handle) in &self.workers {
handle.state.store(shutdown, Ordering::Release);
handle.unparker.unpark();
}
while let Some((thread, _)) = self.workers.pop() {
let _ = thread.join();
}
}
// Private method used to reduce code duplication.
fn new_priv(count: Option<usize>) -> io::Result<ThreadPool> {
if let Some(0) = count {
panic!("Can not create a thread pool with 0 threads.");
}
let count = count.unwrap_or(num_cpus::get());
let queues = {
let mut vec = Vec::with_capacity(count);
for _ in 0..count {
vec.push(Worker::new_fifo());
}
vec
};
let stealers: Vec<_> = queues.iter().map(|queue| queue.stealer()).collect();
let shared = Arc::new(Shared {
injector: Injector::new(),
sleep_queue: ArrayQueue::new(count),
stealers,
});
let workers = {
let mut vec = Vec::with_capacity(count);
for queue in queues {
let thread = worker::create_worker(Arc::clone(&shared), queue)?;
vec.push(thread);
}
vec
};
for (_, handle) in &workers {
let handle = Arc::clone(handle);
// Unwrap here since this is a programmer error
// if this fails.
shared.sleep_queue.push(handle).unwrap();
}
Ok(ThreadPool {
workers,
shutdown: false,
count,
shared,
})
}
}
impl<F> Executor<F> for ThreadPool
where
F: Future<Output = ()> + Send +'static,
{
fn spawn(&self, future: F) {
let shared = Arc::downgrade(&self.shared);
let task = Task::new(future, shared);
self.shared.injector.push(task);
if!self.shared.sleep_queue.is_empty() {
if let Ok(handle) = self.shared.sleep_queue.pop() {
handle.state.store(worker::NEW_TASK, Ordering::Release);
handle.unparker.unpark();
}
}
}
}
impl Drop for ThreadPool {
fn drop(&mut self) {
self.shutdown_now();
}
}
pub(crate) struct Shared {
injector: Injector<Task>,
sleep_queue: ArrayQueue<Arc<worker::Handle>>,
stealers: Vec<Stealer<Task>>,
}
#[cfg(test)]
mod tests {
use super::*;
use crossbeam::channel;
use futures::future;
use futures::task::{Context, Waker};
use parking_lot::Mutex;
use std::pin::Pin;
use std::sync::atomic::AtomicBool;
use std::task::Poll;
use std::thread;
use std::time::{Duration, Instant};
static TIMES: usize = 100;
#[test]
fn simple() {
let executor = ThreadPool::new().unwrap();
executor.spawn(async {
println!("Hello, world!");
});
thread::sleep(Duration::from_secs(1));
}
#[test]
fn reschedule() {
struct CustomFuture {
waker: Arc<Mutex<Option<Waker>>>,
shared: Arc<AtomicBool>,
}
impl CustomFuture {
fn new() -> CustomFuture {
let waker: Arc<Mutex<Option<Waker>>> = Arc::new(Mutex::new(None));
let waker_thread = Arc::clone(&waker);
let shared = Arc::new(AtomicBool::new(false));
let shared_thread = Arc::clone(&shared);
thread::spawn(move || {
thread::sleep(Duration::from_secs(1));
if let Some(waker) = waker_thread.lock().take() |
});
CustomFuture { waker, shared }
}
}
impl Future for CustomFuture {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
if self.shared.load(Ordering::SeqCst) {
Poll::Ready(())
} else {
*(self.waker.lock()) = Some(cx.waker().clone());
Poll::Pending
}
}
}
let (tx, rx) = channel::unbounded();
let executor = ThreadPool::with_threads(12).unwrap();
executor.spawn(async move {
CustomFuture::new().await;
tx.send(0).unwrap();
});
thread::sleep(Duration::from_secs(4));
assert_eq!(rx.try_recv(), Ok(0));
}
#[test]
#[should_panic]
fn zero_threads() {
let executor = ThreadPool::with_threads(0).unwrap();
executor.spawn(async {});
}
#[test]
fn custom_thread_count() {
let executor = ThreadPool::with_threads(32).unwrap();
executor.spawn(async {});
}
#[test]
#[ignore]
fn bad_future() {
// A future that spawns a thread, returns Poll::Ready(()), and
// keeps trying to reschedule itself on the thread_pool.
struct BadFuture {
shared: Arc<Mutex<Option<Waker>>>,
}
impl BadFuture {
fn new() -> BadFuture {
let shared: Arc<Mutex<Option<Waker>>> = Arc::new(Mutex::new(None));
let thread_shared = Arc::clone(&shared);
thread::spawn(move || loop {
let guard = thread_shared.lock();
if let Some(waker) = guard.as_ref() {
waker.clone().wake();
}
});
BadFuture { shared }
}
}
impl Future for BadFuture {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut guard = self.shared.lock();
*guard = Some(cx.waker().clone());
Poll::Ready(())
}
}
let executor = ThreadPool::new().unwrap();
for _ in 0..50 {
executor.spawn(BadFuture::new());
}
}
#[test]
#[ignore]
fn time_threader() {
let mut executor = ThreadPool::with_threads(1).unwrap();
let mut results = Vec::with_capacity(TIMES);
eprintln!("\nthreader time test starting...");
let total_start = Instant::now();
for _ in 0..TIMES {
let start = Instant::now();
for _ in 0..50_000 {
executor.spawn(async {
future::ready(()).await;
});
}
let end = start.elapsed();
// eprintln!("threader: {:?}", end);
results.push(end.as_millis());
}
let shutdown_start = Instant::now();
executor.shutdown_on_idle();
eprintln!("threader shutdown: {:?}", shutdown_start.elapsed());
eprintln!("threader total: {:?}", total_start.elapsed());
let average = {
let sum: u128 = results.into_iter().sum();
(sum as f64) / (TIMES as f64)
};
eprintln!("threader average: {:?}ms", average);
}
}
| {
waker.wake();
shared_thread.store(true, Ordering::SeqCst);
} | conditional_block |
mod.rs | mod task;
mod worker;
use crate::executor::Executor;
use crossbeam::{
deque::{Injector, Stealer, Worker},
queue::ArrayQueue,
};
use futures::Future;
use std::{
io,
sync::{atomic::Ordering, Arc},
thread::JoinHandle,
};
use task::Task;
/// An executor which distributes tasks across multiple threads using a work-stealing
/// scheduler. Tasks can be spawned on it by calling the [`spawn`][`Executor::spawn`]
/// method on the `ThreadPool`. Note that since this executor moves futures between different
/// threads, the future in question *must* be [`Send`].
///
/// # Examples
/// ```
/// use std::io;
/// use threader::{
/// executor::Executor,
/// thread_pool::ThreadPool,
/// net::tcp::TcpStream,
/// };
///
/// fn main() -> io::Result<()> {
/// let mut pool = ThreadPool::new()?;
/// let addr = "10.0.0.1:80".parse().unwrap();
///
/// pool.spawn(async move {
/// let _stream = TcpStream::connect(&addr);
/// });
///
/// pool.shutdown_on_idle();
/// Ok(())
/// }
/// ```
pub struct ThreadPool {
workers: Vec<(JoinHandle<()>, Arc<worker::Handle>)>,
count: usize,
shutdown: bool,
shared: Arc<Shared>,
}
impl ThreadPool {
/// Creates a new `ThreadPool` instance with a number of threads
/// equal to the number of logical CPU cores in a given machine.
/// Returns any errors that may have occurred in creating the
/// thread pool.
///
/// # Examples
/// ```
/// use std::io;
/// use threader::thread_pool::ThreadPool;
///
/// fn main() -> io::Result<()> {
/// let pool = ThreadPool::new()?;
/// Ok(())
/// }
/// ```
pub fn new() -> io::Result<ThreadPool> {
ThreadPool::new_priv(None)
}
/// Creates a new `ThreadPool` instance with a number of threads
/// equal to `count`. `count` must not be zero, or this method
/// will panic. Like [`ThreadPool::new`], this method returns
/// any errors that occurred when creating the thread pool.
///
/// # Panics
/// Panics if `count` is equal to zero.
///
/// # Examples
/// ```
/// use std::io;
/// use threader::thread_pool::ThreadPool;
///
/// fn main() -> io::Result<()> {
/// let pool = ThreadPool::with_threads(1)?;
/// Ok(())
/// }
/// ```
pub fn with_threads(count: usize) -> io::Result<ThreadPool> {
ThreadPool::new_priv(Some(count))
}
/// Shuts down the `ThreadPool` when all worker threads are idle. This method
/// blocks the current thread until all of the worker threads have been joined.
pub fn shutdown_on_idle(&mut self) {
self.shutdown_priv(worker::SHUTDOWN_IDLE);
}
/// Shuts down the `ThreadPool` immediately, sending a message to all worker
/// threads to shut down. This emethod blocks the current thread until all
/// worker threads have been joined, but this blocking shouldn't be noticeable.
pub fn shutdown_now(&mut self) {
self.shutdown_priv(worker::SHUTDOWN_NOW);
}
// Private method used to reduce code duplication.
fn shutdown_priv(&mut self, shutdown: usize) {
self.shutdown = true;
for (_, handle) in &self.workers {
handle.state.store(shutdown, Ordering::Release);
handle.unparker.unpark();
}
while let Some((thread, _)) = self.workers.pop() {
let _ = thread.join();
}
}
// Private method used to reduce code duplication.
fn new_priv(count: Option<usize>) -> io::Result<ThreadPool> {
if let Some(0) = count {
panic!("Can not create a thread pool with 0 threads.");
}
let count = count.unwrap_or(num_cpus::get());
let queues = {
let mut vec = Vec::with_capacity(count);
for _ in 0..count {
vec.push(Worker::new_fifo());
}
vec
};
let stealers: Vec<_> = queues.iter().map(|queue| queue.stealer()).collect();
let shared = Arc::new(Shared {
injector: Injector::new(),
sleep_queue: ArrayQueue::new(count),
stealers,
});
let workers = {
let mut vec = Vec::with_capacity(count);
for queue in queues {
let thread = worker::create_worker(Arc::clone(&shared), queue)?;
vec.push(thread);
}
vec
};
for (_, handle) in &workers {
let handle = Arc::clone(handle);
// Unwrap here since this is a programmer error
// if this fails.
shared.sleep_queue.push(handle).unwrap();
}
Ok(ThreadPool {
workers,
shutdown: false,
count,
shared,
})
}
}
impl<F> Executor<F> for ThreadPool
where
F: Future<Output = ()> + Send +'static,
{
fn spawn(&self, future: F) {
let shared = Arc::downgrade(&self.shared);
let task = Task::new(future, shared);
self.shared.injector.push(task);
if!self.shared.sleep_queue.is_empty() {
if let Ok(handle) = self.shared.sleep_queue.pop() {
handle.state.store(worker::NEW_TASK, Ordering::Release);
handle.unparker.unpark();
}
}
}
}
impl Drop for ThreadPool {
fn drop(&mut self) {
self.shutdown_now();
}
}
pub(crate) struct Shared {
injector: Injector<Task>,
sleep_queue: ArrayQueue<Arc<worker::Handle>>,
stealers: Vec<Stealer<Task>>,
}
#[cfg(test)]
mod tests {
use super::*;
use crossbeam::channel;
use futures::future;
use futures::task::{Context, Waker};
use parking_lot::Mutex;
use std::pin::Pin;
use std::sync::atomic::AtomicBool;
use std::task::Poll;
use std::thread;
use std::time::{Duration, Instant};
static TIMES: usize = 100;
#[test]
fn simple() {
let executor = ThreadPool::new().unwrap();
executor.spawn(async {
println!("Hello, world!");
});
thread::sleep(Duration::from_secs(1));
}
#[test]
fn reschedule() {
struct CustomFuture {
waker: Arc<Mutex<Option<Waker>>>,
shared: Arc<AtomicBool>,
}
impl CustomFuture {
fn new() -> CustomFuture {
let waker: Arc<Mutex<Option<Waker>>> = Arc::new(Mutex::new(None));
let waker_thread = Arc::clone(&waker);
let shared = Arc::new(AtomicBool::new(false));
let shared_thread = Arc::clone(&shared);
thread::spawn(move || {
thread::sleep(Duration::from_secs(1));
if let Some(waker) = waker_thread.lock().take() {
waker.wake();
shared_thread.store(true, Ordering::SeqCst);
}
});
CustomFuture { waker, shared }
}
}
impl Future for CustomFuture {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
if self.shared.load(Ordering::SeqCst) {
Poll::Ready(())
} else {
*(self.waker.lock()) = Some(cx.waker().clone());
Poll::Pending
}
}
}
let (tx, rx) = channel::unbounded();
let executor = ThreadPool::with_threads(12).unwrap();
executor.spawn(async move {
CustomFuture::new().await;
tx.send(0).unwrap();
});
thread::sleep(Duration::from_secs(4));
assert_eq!(rx.try_recv(), Ok(0));
}
#[test]
#[should_panic]
fn zero_threads() |
#[test]
fn custom_thread_count() {
let executor = ThreadPool::with_threads(32).unwrap();
executor.spawn(async {});
}
#[test]
#[ignore]
fn bad_future() {
// A future that spawns a thread, returns Poll::Ready(()), and
// keeps trying to reschedule itself on the thread_pool.
struct BadFuture {
shared: Arc<Mutex<Option<Waker>>>,
}
impl BadFuture {
fn new() -> BadFuture {
let shared: Arc<Mutex<Option<Waker>>> = Arc::new(Mutex::new(None));
let thread_shared = Arc::clone(&shared);
thread::spawn(move || loop {
let guard = thread_shared.lock();
if let Some(waker) = guard.as_ref() {
waker.clone().wake();
}
});
BadFuture { shared }
}
}
impl Future for BadFuture {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut guard = self.shared.lock();
*guard = Some(cx.waker().clone());
Poll::Ready(())
}
}
let executor = ThreadPool::new().unwrap();
for _ in 0..50 {
executor.spawn(BadFuture::new());
}
}
#[test]
#[ignore]
fn time_threader() {
let mut executor = ThreadPool::with_threads(1).unwrap();
let mut results = Vec::with_capacity(TIMES);
eprintln!("\nthreader time test starting...");
let total_start = Instant::now();
for _ in 0..TIMES {
let start = Instant::now();
for _ in 0..50_000 {
executor.spawn(async {
future::ready(()).await;
});
}
let end = start.elapsed();
// eprintln!("threader: {:?}", end);
results.push(end.as_millis());
}
let shutdown_start = Instant::now();
executor.shutdown_on_idle();
eprintln!("threader shutdown: {:?}", shutdown_start.elapsed());
eprintln!("threader total: {:?}", total_start.elapsed());
let average = {
let sum: u128 = results.into_iter().sum();
(sum as f64) / (TIMES as f64)
};
eprintln!("threader average: {:?}ms", average);
}
}
| {
let executor = ThreadPool::with_threads(0).unwrap();
executor.spawn(async {});
} | identifier_body |
mod.rs | mod task;
mod worker;
use crate::executor::Executor;
use crossbeam::{
deque::{Injector, Stealer, Worker},
queue::ArrayQueue,
};
use futures::Future;
use std::{
io,
sync::{atomic::Ordering, Arc},
thread::JoinHandle,
};
use task::Task;
/// An executor which distributes tasks across multiple threads using a work-stealing
/// scheduler. Tasks can be spawned on it by calling the [`spawn`][`Executor::spawn`]
/// method on the `ThreadPool`. Note that since this executor moves futures between different
/// threads, the future in question *must* be [`Send`].
///
/// # Examples
/// ```
/// use std::io;
/// use threader::{
/// executor::Executor,
/// thread_pool::ThreadPool,
/// net::tcp::TcpStream,
/// };
///
/// fn main() -> io::Result<()> {
/// let mut pool = ThreadPool::new()?;
/// let addr = "10.0.0.1:80".parse().unwrap();
///
/// pool.spawn(async move {
/// let _stream = TcpStream::connect(&addr);
/// });
///
/// pool.shutdown_on_idle();
/// Ok(())
/// }
/// ```
pub struct ThreadPool {
workers: Vec<(JoinHandle<()>, Arc<worker::Handle>)>,
count: usize,
shutdown: bool,
shared: Arc<Shared>,
}
impl ThreadPool {
/// Creates a new `ThreadPool` instance with a number of threads
/// equal to the number of logical CPU cores in a given machine.
/// Returns any errors that may have occurred in creating the
/// thread pool.
///
/// # Examples
/// ```
/// use std::io;
/// use threader::thread_pool::ThreadPool;
///
/// fn main() -> io::Result<()> {
/// let pool = ThreadPool::new()?;
/// Ok(())
/// }
/// ```
pub fn new() -> io::Result<ThreadPool> {
ThreadPool::new_priv(None)
}
/// Creates a new `ThreadPool` instance with a number of threads
/// equal to `count`. `count` must not be zero, or this method
/// will panic. Like [`ThreadPool::new`], this method returns
/// any errors that occurred when creating the thread pool.
///
/// # Panics
/// Panics if `count` is equal to zero.
///
/// # Examples
/// ```
/// use std::io;
/// use threader::thread_pool::ThreadPool;
///
/// fn main() -> io::Result<()> {
/// let pool = ThreadPool::with_threads(1)?;
/// Ok(())
/// }
/// ```
pub fn with_threads(count: usize) -> io::Result<ThreadPool> {
ThreadPool::new_priv(Some(count))
}
/// Shuts down the `ThreadPool` when all worker threads are idle. This method
/// blocks the current thread until all of the worker threads have been joined.
pub fn shutdown_on_idle(&mut self) {
self.shutdown_priv(worker::SHUTDOWN_IDLE);
}
/// Shuts down the `ThreadPool` immediately, sending a message to all worker
/// threads to shut down. This emethod blocks the current thread until all
/// worker threads have been joined, but this blocking shouldn't be noticeable.
pub fn shutdown_now(&mut self) {
self.shutdown_priv(worker::SHUTDOWN_NOW);
}
// Private method used to reduce code duplication.
fn shutdown_priv(&mut self, shutdown: usize) {
self.shutdown = true;
for (_, handle) in &self.workers {
handle.state.store(shutdown, Ordering::Release);
handle.unparker.unpark();
}
while let Some((thread, _)) = self.workers.pop() {
let _ = thread.join();
}
}
// Private method used to reduce code duplication.
fn new_priv(count: Option<usize>) -> io::Result<ThreadPool> {
if let Some(0) = count {
panic!("Can not create a thread pool with 0 threads.");
}
let count = count.unwrap_or(num_cpus::get());
let queues = {
let mut vec = Vec::with_capacity(count);
for _ in 0..count {
vec.push(Worker::new_fifo());
}
vec
};
let stealers: Vec<_> = queues.iter().map(|queue| queue.stealer()).collect();
let shared = Arc::new(Shared {
injector: Injector::new(),
sleep_queue: ArrayQueue::new(count),
stealers,
});
let workers = {
let mut vec = Vec::with_capacity(count);
for queue in queues {
let thread = worker::create_worker(Arc::clone(&shared), queue)?;
vec.push(thread);
}
vec
};
for (_, handle) in &workers {
let handle = Arc::clone(handle);
// Unwrap here since this is a programmer error
// if this fails.
shared.sleep_queue.push(handle).unwrap();
}
Ok(ThreadPool {
workers,
shutdown: false,
count,
shared,
})
}
}
impl<F> Executor<F> for ThreadPool
where
F: Future<Output = ()> + Send +'static,
{
fn spawn(&self, future: F) {
let shared = Arc::downgrade(&self.shared);
let task = Task::new(future, shared);
self.shared.injector.push(task);
if!self.shared.sleep_queue.is_empty() {
if let Ok(handle) = self.shared.sleep_queue.pop() {
handle.state.store(worker::NEW_TASK, Ordering::Release);
handle.unparker.unpark();
}
}
}
}
impl Drop for ThreadPool {
fn drop(&mut self) {
self.shutdown_now();
}
}
pub(crate) struct Shared {
injector: Injector<Task>,
sleep_queue: ArrayQueue<Arc<worker::Handle>>,
stealers: Vec<Stealer<Task>>,
}
#[cfg(test)]
mod tests {
use super::*;
use crossbeam::channel;
use futures::future;
use futures::task::{Context, Waker};
use parking_lot::Mutex;
use std::pin::Pin;
use std::sync::atomic::AtomicBool;
use std::task::Poll;
use std::thread;
use std::time::{Duration, Instant};
static TIMES: usize = 100;
#[test]
fn simple() {
let executor = ThreadPool::new().unwrap();
executor.spawn(async {
println!("Hello, world!");
});
thread::sleep(Duration::from_secs(1));
}
#[test]
fn reschedule() {
struct CustomFuture {
waker: Arc<Mutex<Option<Waker>>>,
shared: Arc<AtomicBool>,
}
impl CustomFuture {
fn new() -> CustomFuture {
let waker: Arc<Mutex<Option<Waker>>> = Arc::new(Mutex::new(None));
let waker_thread = Arc::clone(&waker);
let shared = Arc::new(AtomicBool::new(false));
let shared_thread = Arc::clone(&shared);
thread::spawn(move || {
thread::sleep(Duration::from_secs(1));
if let Some(waker) = waker_thread.lock().take() {
waker.wake();
shared_thread.store(true, Ordering::SeqCst);
}
});
CustomFuture { waker, shared }
}
}
impl Future for CustomFuture {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
if self.shared.load(Ordering::SeqCst) {
Poll::Ready(())
} else {
*(self.waker.lock()) = Some(cx.waker().clone());
Poll::Pending
}
}
}
let (tx, rx) = channel::unbounded();
let executor = ThreadPool::with_threads(12).unwrap();
executor.spawn(async move {
CustomFuture::new().await;
tx.send(0).unwrap();
});
thread::sleep(Duration::from_secs(4));
assert_eq!(rx.try_recv(), Ok(0));
}
#[test]
#[should_panic]
fn zero_threads() { | #[test]
fn custom_thread_count() {
let executor = ThreadPool::with_threads(32).unwrap();
executor.spawn(async {});
}
#[test]
#[ignore]
fn bad_future() {
// A future that spawns a thread, returns Poll::Ready(()), and
// keeps trying to reschedule itself on the thread_pool.
struct BadFuture {
shared: Arc<Mutex<Option<Waker>>>,
}
impl BadFuture {
fn new() -> BadFuture {
let shared: Arc<Mutex<Option<Waker>>> = Arc::new(Mutex::new(None));
let thread_shared = Arc::clone(&shared);
thread::spawn(move || loop {
let guard = thread_shared.lock();
if let Some(waker) = guard.as_ref() {
waker.clone().wake();
}
});
BadFuture { shared }
}
}
impl Future for BadFuture {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut guard = self.shared.lock();
*guard = Some(cx.waker().clone());
Poll::Ready(())
}
}
let executor = ThreadPool::new().unwrap();
for _ in 0..50 {
executor.spawn(BadFuture::new());
}
}
#[test]
#[ignore]
fn time_threader() {
let mut executor = ThreadPool::with_threads(1).unwrap();
let mut results = Vec::with_capacity(TIMES);
eprintln!("\nthreader time test starting...");
let total_start = Instant::now();
for _ in 0..TIMES {
let start = Instant::now();
for _ in 0..50_000 {
executor.spawn(async {
future::ready(()).await;
});
}
let end = start.elapsed();
// eprintln!("threader: {:?}", end);
results.push(end.as_millis());
}
let shutdown_start = Instant::now();
executor.shutdown_on_idle();
eprintln!("threader shutdown: {:?}", shutdown_start.elapsed());
eprintln!("threader total: {:?}", total_start.elapsed());
let average = {
let sum: u128 = results.into_iter().sum();
(sum as f64) / (TIMES as f64)
};
eprintln!("threader average: {:?}ms", average);
}
} | let executor = ThreadPool::with_threads(0).unwrap();
executor.spawn(async {});
}
| random_line_split |
main.rs | // Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use futures::{channel::oneshot, FutureExt, TryFutureExt, TryStreamExt};
use iml_agent_comms::{
error::ImlAgentCommsError,
flush_queue,
host::{self, SharedHosts},
messaging::{consume_agent_tx_queue, AgentData, AGENT_TX_RUST},
session::{self, Session, Sessions},
};
use iml_rabbit::{self, create_connection_filter, send_message, Connection};
use iml_wire_types::{
Envelope, Fqdn, ManagerMessage, ManagerMessages, Message, PluginMessage, PluginName,
};
use std::{sync::Arc, time::Duration};
use warp::Filter;
async fn data_handler(
has_session: bool,
client: Connection,
data: AgentData,
) -> Result<(), ImlAgentCommsError> {
if has_session {
tracing::debug!("Forwarding valid message {}", data);
let s = format!("rust_agent_{}_rx", data.plugin);
send_message(client, "", s, PluginMessage::from(data)).await?;
} else |
Ok(())
}
async fn session_create_req_handler(
sessions: &mut Sessions,
client: Connection,
fqdn: Fqdn,
plugin: PluginName,
) -> Result<(), ImlAgentCommsError> {
let session = Session::new(plugin.clone(), fqdn.clone());
tracing::info!("Creating session {}", session);
let last_opt = sessions.insert(plugin.clone(), session.clone());
if let Some(last) = last_opt {
tracing::warn!("Destroying session {} to create new one", last);
let s = format!("rust_agent_{}_rx", plugin);
send_message(
client.clone(),
"",
s,
PluginMessage::SessionTerminate {
fqdn: last.fqdn,
plugin: last.plugin,
session_id: last.id,
},
)
.await?;
}
let s = format!("rust_agent_{}_rx", plugin.clone());
send_message(
client.clone(),
"",
s,
PluginMessage::SessionCreate {
fqdn: fqdn.clone(),
plugin: plugin.clone(),
session_id: session.id.clone(),
},
)
.await?;
send_message(
client.clone(),
"",
AGENT_TX_RUST,
ManagerMessage::SessionCreateResponse {
fqdn,
plugin,
session_id: session.id,
},
)
.await?;
Ok(())
}
#[derive(serde::Deserialize, serde::Serialize, Debug)]
struct GetArgs {
server_boot_time: String,
client_start_time: String,
}
#[derive(serde::Deserialize, Debug)]
struct MessageFqdn {
pub fqdn: Fqdn,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
iml_tracing::init();
// Handle an error in locks by shutting down
let (tx, rx) = oneshot::channel();
let shared_hosts = host::shared_hosts();
let shared_hosts2 = Arc::clone(&shared_hosts);
let shared_hosts3 = Arc::clone(&shared_hosts);
tokio::spawn(
async move {
let conn = iml_rabbit::connect_to_rabbit().await?;
let ch = iml_rabbit::create_channel(&conn).await?;
let mut s = consume_agent_tx_queue(ch, AGENT_TX_RUST).await?;
while let Some(msg) = s.try_next().await? {
let MessageFqdn { fqdn } = serde_json::from_slice(&msg.data)?;
let mut hosts = shared_hosts.lock().await;
let host = hosts.get_mut(&fqdn);
if let Some(host) = host {
let mut queue = host.queue.lock().await;
queue.push_back(msg.data);
tracing::debug!(
"Put data on host queue {}: Queue size: {:?}",
fqdn,
queue.len()
);
} else {
tracing::warn!(
"Dropping message to {:?} because it did not have a host queue",
fqdn
);
}
}
Ok(())
}
.unwrap_or_else(|e: ImlAgentCommsError| {
tx.send(()).unwrap_or_else(drop);
tracing::error!("{:?}", e)
}),
);
let hosts_filter = warp::any().map(move || Arc::clone(&shared_hosts2));
let (fut, client_filter) = create_connection_filter().await?;
tokio::spawn(fut);
let receiver = warp::post()
.and(warp::header::<String>("x-ssl-client-name").map(Fqdn))
.and(hosts_filter)
.and(client_filter)
.and(warp::body::json())
.and_then(
|fqdn: Fqdn,
hosts: SharedHosts,
client: Connection,
Envelope {
messages,
client_start_time,
..
}: Envelope| {
async move {
tracing::debug!("<-- Delivery from agent {}: Messages: {:?}", fqdn, messages,);
let sessions = {
let mut hosts = hosts.lock().await;
let host = host::get_or_insert(&mut hosts, fqdn, client_start_time);
Arc::clone(&host.sessions)
};
for msg in messages {
let s2 = Arc::clone(&sessions);
match msg {
Message::Data {
plugin,
session_id,
session_seq,
body,
fqdn,
..
} => {
let lock = s2.lock().await;
let has_session =
session::get_by_session_id(&plugin, &session_id, &lock)
.is_some();
data_handler(
has_session,
client.clone(),
AgentData {
fqdn,
plugin,
session_id,
session_seq,
body,
},
)
.await?;
}
Message::SessionCreateRequest { plugin, fqdn } => {
let mut lock = s2.lock().await;
session_create_req_handler(&mut lock, client.clone(), fqdn, plugin)
.await?;
}
}
}
Ok::<(), ImlAgentCommsError>(())
}
.map_err(warp::reject::custom)
.map_ok(|_| {
warp::reply::with_status(warp::reply(), warp::http::StatusCode::ACCEPTED)
})
},
);
let hosts_filter = warp::any().map(move || Arc::clone(&shared_hosts3));
let sender = warp::get()
.and(warp::header::<String>("x-ssl-client-name").map(Fqdn))
.and(warp::query::<GetArgs>())
.and(hosts_filter)
.and_then(|fqdn: Fqdn, args: GetArgs, hosts: SharedHosts| {
async move {
{
let mut hosts = hosts.lock().await;
let mut host = host::get_or_insert(
&mut hosts,
fqdn.clone(),
args.client_start_time.clone(),
);
host.stop();
// If we are not dealing with the same agent anymore, terminate all existing sessions.
if host.client_start_time!= args.client_start_time {
tracing::info!(
"Terminating all sessions on {:?} because start time has changed",
fqdn
);
host.client_start_time = args.client_start_time;
return Ok::<_, ImlAgentCommsError>(ManagerMessages {
messages: vec![ManagerMessage::SessionTerminateAll { fqdn }],
});
}
}
let (tx, rx) = oneshot::channel();
let (sessions, queue) = {
let mut hosts = hosts.lock().await;
let host = host::get_or_insert(
&mut hosts,
fqdn.clone(),
args.client_start_time.clone(),
);
host.stop_reading = Some(tx);
(Arc::clone(&host.sessions), Arc::clone(&host.queue))
};
let xs = flush_queue::flush(queue, Duration::from_secs(30), rx).await?;
let mut xs: Vec<ManagerMessage> = xs
.into_iter()
.map(|x| serde_json::from_slice(&x).map_err(ImlAgentCommsError::from))
.collect::<Result<Vec<_>, ImlAgentCommsError>>()?;
let guard = sessions.lock().await;
xs.retain(|x| session::is_session_valid(x, &guard));
tracing::debug!(
"--> Delivery to agent {}({:?}): {:?}",
&fqdn,
&args.client_start_time,
xs,
);
Ok::<_, ImlAgentCommsError>(ManagerMessages { messages: xs })
}
.map_err(warp::reject::custom)
})
.map(|envelope| warp::reply::json(&envelope));
let log = warp::log("iml_agent_comms::api");
let routes = warp::path("message").and(receiver.or(sender).with(log));
let addr = iml_manager_env::get_http_agent2_addr();
tracing::info!("Starting iml-agent-comms on {:?}", addr);
let (_, fut) = warp::serve(routes).bind_with_graceful_shutdown(addr, rx.map(drop));
fut.await;
Ok(())
}
| {
tracing::warn!("Terminating session because unknown {}", data);
send_message(
client.clone(),
"",
AGENT_TX_RUST,
ManagerMessage::SessionTerminate {
fqdn: data.fqdn,
plugin: data.plugin,
session_id: data.session_id,
},
)
.await?;
} | conditional_block |
main.rs | // Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use futures::{channel::oneshot, FutureExt, TryFutureExt, TryStreamExt};
use iml_agent_comms::{
error::ImlAgentCommsError,
flush_queue,
host::{self, SharedHosts},
messaging::{consume_agent_tx_queue, AgentData, AGENT_TX_RUST},
session::{self, Session, Sessions},
};
use iml_rabbit::{self, create_connection_filter, send_message, Connection};
use iml_wire_types::{
Envelope, Fqdn, ManagerMessage, ManagerMessages, Message, PluginMessage, PluginName,
};
use std::{sync::Arc, time::Duration};
use warp::Filter;
async fn data_handler(
has_session: bool,
client: Connection,
data: AgentData,
) -> Result<(), ImlAgentCommsError> {
if has_session {
tracing::debug!("Forwarding valid message {}", data);
let s = format!("rust_agent_{}_rx", data.plugin);
send_message(client, "", s, PluginMessage::from(data)).await?;
} else {
tracing::warn!("Terminating session because unknown {}", data);
send_message(
client.clone(),
"",
AGENT_TX_RUST,
ManagerMessage::SessionTerminate {
fqdn: data.fqdn,
plugin: data.plugin,
session_id: data.session_id,
},
)
.await?;
}
Ok(())
}
async fn session_create_req_handler(
sessions: &mut Sessions,
client: Connection,
fqdn: Fqdn,
plugin: PluginName,
) -> Result<(), ImlAgentCommsError> {
let session = Session::new(plugin.clone(), fqdn.clone());
tracing::info!("Creating session {}", session);
let last_opt = sessions.insert(plugin.clone(), session.clone());
if let Some(last) = last_opt {
tracing::warn!("Destroying session {} to create new one", last);
let s = format!("rust_agent_{}_rx", plugin);
send_message(
client.clone(),
"",
s,
PluginMessage::SessionTerminate {
fqdn: last.fqdn,
plugin: last.plugin,
session_id: last.id,
},
)
.await?;
}
let s = format!("rust_agent_{}_rx", plugin.clone());
send_message(
client.clone(),
"",
s,
PluginMessage::SessionCreate {
fqdn: fqdn.clone(),
plugin: plugin.clone(),
session_id: session.id.clone(),
},
)
.await?;
send_message(
client.clone(),
"",
AGENT_TX_RUST,
ManagerMessage::SessionCreateResponse {
fqdn,
plugin,
session_id: session.id,
},
)
.await?;
Ok(())
}
#[derive(serde::Deserialize, serde::Serialize, Debug)]
struct GetArgs {
server_boot_time: String,
client_start_time: String,
}
#[derive(serde::Deserialize, Debug)]
struct MessageFqdn {
pub fqdn: Fqdn,
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
iml_tracing::init();
// Handle an error in locks by shutting down
let (tx, rx) = oneshot::channel();
let shared_hosts = host::shared_hosts();
let shared_hosts2 = Arc::clone(&shared_hosts);
let shared_hosts3 = Arc::clone(&shared_hosts);
tokio::spawn(
async move {
let conn = iml_rabbit::connect_to_rabbit().await?;
let ch = iml_rabbit::create_channel(&conn).await?;
let mut s = consume_agent_tx_queue(ch, AGENT_TX_RUST).await?;
while let Some(msg) = s.try_next().await? {
let MessageFqdn { fqdn } = serde_json::from_slice(&msg.data)?;
let mut hosts = shared_hosts.lock().await;
let host = hosts.get_mut(&fqdn);
if let Some(host) = host {
let mut queue = host.queue.lock().await; |
tracing::debug!(
"Put data on host queue {}: Queue size: {:?}",
fqdn,
queue.len()
);
} else {
tracing::warn!(
"Dropping message to {:?} because it did not have a host queue",
fqdn
);
}
}
Ok(())
}
.unwrap_or_else(|e: ImlAgentCommsError| {
tx.send(()).unwrap_or_else(drop);
tracing::error!("{:?}", e)
}),
);
let hosts_filter = warp::any().map(move || Arc::clone(&shared_hosts2));
let (fut, client_filter) = create_connection_filter().await?;
tokio::spawn(fut);
let receiver = warp::post()
.and(warp::header::<String>("x-ssl-client-name").map(Fqdn))
.and(hosts_filter)
.and(client_filter)
.and(warp::body::json())
.and_then(
|fqdn: Fqdn,
hosts: SharedHosts,
client: Connection,
Envelope {
messages,
client_start_time,
..
}: Envelope| {
async move {
tracing::debug!("<-- Delivery from agent {}: Messages: {:?}", fqdn, messages,);
let sessions = {
let mut hosts = hosts.lock().await;
let host = host::get_or_insert(&mut hosts, fqdn, client_start_time);
Arc::clone(&host.sessions)
};
for msg in messages {
let s2 = Arc::clone(&sessions);
match msg {
Message::Data {
plugin,
session_id,
session_seq,
body,
fqdn,
..
} => {
let lock = s2.lock().await;
let has_session =
session::get_by_session_id(&plugin, &session_id, &lock)
.is_some();
data_handler(
has_session,
client.clone(),
AgentData {
fqdn,
plugin,
session_id,
session_seq,
body,
},
)
.await?;
}
Message::SessionCreateRequest { plugin, fqdn } => {
let mut lock = s2.lock().await;
session_create_req_handler(&mut lock, client.clone(), fqdn, plugin)
.await?;
}
}
}
Ok::<(), ImlAgentCommsError>(())
}
.map_err(warp::reject::custom)
.map_ok(|_| {
warp::reply::with_status(warp::reply(), warp::http::StatusCode::ACCEPTED)
})
},
);
let hosts_filter = warp::any().map(move || Arc::clone(&shared_hosts3));
let sender = warp::get()
.and(warp::header::<String>("x-ssl-client-name").map(Fqdn))
.and(warp::query::<GetArgs>())
.and(hosts_filter)
.and_then(|fqdn: Fqdn, args: GetArgs, hosts: SharedHosts| {
async move {
{
let mut hosts = hosts.lock().await;
let mut host = host::get_or_insert(
&mut hosts,
fqdn.clone(),
args.client_start_time.clone(),
);
host.stop();
// If we are not dealing with the same agent anymore, terminate all existing sessions.
if host.client_start_time!= args.client_start_time {
tracing::info!(
"Terminating all sessions on {:?} because start time has changed",
fqdn
);
host.client_start_time = args.client_start_time;
return Ok::<_, ImlAgentCommsError>(ManagerMessages {
messages: vec![ManagerMessage::SessionTerminateAll { fqdn }],
});
}
}
let (tx, rx) = oneshot::channel();
let (sessions, queue) = {
let mut hosts = hosts.lock().await;
let host = host::get_or_insert(
&mut hosts,
fqdn.clone(),
args.client_start_time.clone(),
);
host.stop_reading = Some(tx);
(Arc::clone(&host.sessions), Arc::clone(&host.queue))
};
let xs = flush_queue::flush(queue, Duration::from_secs(30), rx).await?;
let mut xs: Vec<ManagerMessage> = xs
.into_iter()
.map(|x| serde_json::from_slice(&x).map_err(ImlAgentCommsError::from))
.collect::<Result<Vec<_>, ImlAgentCommsError>>()?;
let guard = sessions.lock().await;
xs.retain(|x| session::is_session_valid(x, &guard));
tracing::debug!(
"--> Delivery to agent {}({:?}): {:?}",
&fqdn,
&args.client_start_time,
xs,
);
Ok::<_, ImlAgentCommsError>(ManagerMessages { messages: xs })
}
.map_err(warp::reject::custom)
})
.map(|envelope| warp::reply::json(&envelope));
let log = warp::log("iml_agent_comms::api");
let routes = warp::path("message").and(receiver.or(sender).with(log));
let addr = iml_manager_env::get_http_agent2_addr();
tracing::info!("Starting iml-agent-comms on {:?}", addr);
let (_, fut) = warp::serve(routes).bind_with_graceful_shutdown(addr, rx.map(drop));
fut.await;
Ok(())
} | queue.push_back(msg.data); | random_line_split |
main.rs | // Copyright (c) 2020 DDN. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
use futures::{channel::oneshot, FutureExt, TryFutureExt, TryStreamExt};
use iml_agent_comms::{
error::ImlAgentCommsError,
flush_queue,
host::{self, SharedHosts},
messaging::{consume_agent_tx_queue, AgentData, AGENT_TX_RUST},
session::{self, Session, Sessions},
};
use iml_rabbit::{self, create_connection_filter, send_message, Connection};
use iml_wire_types::{
Envelope, Fqdn, ManagerMessage, ManagerMessages, Message, PluginMessage, PluginName,
};
use std::{sync::Arc, time::Duration};
use warp::Filter;
async fn data_handler(
has_session: bool,
client: Connection,
data: AgentData,
) -> Result<(), ImlAgentCommsError> {
if has_session {
tracing::debug!("Forwarding valid message {}", data);
let s = format!("rust_agent_{}_rx", data.plugin);
send_message(client, "", s, PluginMessage::from(data)).await?;
} else {
tracing::warn!("Terminating session because unknown {}", data);
send_message(
client.clone(),
"",
AGENT_TX_RUST,
ManagerMessage::SessionTerminate {
fqdn: data.fqdn,
plugin: data.plugin,
session_id: data.session_id,
},
)
.await?;
}
Ok(())
}
async fn session_create_req_handler(
sessions: &mut Sessions,
client: Connection,
fqdn: Fqdn,
plugin: PluginName,
) -> Result<(), ImlAgentCommsError> {
let session = Session::new(plugin.clone(), fqdn.clone());
tracing::info!("Creating session {}", session);
let last_opt = sessions.insert(plugin.clone(), session.clone());
if let Some(last) = last_opt {
tracing::warn!("Destroying session {} to create new one", last);
let s = format!("rust_agent_{}_rx", plugin);
send_message(
client.clone(),
"",
s,
PluginMessage::SessionTerminate {
fqdn: last.fqdn,
plugin: last.plugin,
session_id: last.id,
},
)
.await?;
}
let s = format!("rust_agent_{}_rx", plugin.clone());
send_message(
client.clone(),
"",
s,
PluginMessage::SessionCreate {
fqdn: fqdn.clone(),
plugin: plugin.clone(),
session_id: session.id.clone(),
},
)
.await?;
send_message(
client.clone(),
"",
AGENT_TX_RUST,
ManagerMessage::SessionCreateResponse {
fqdn,
plugin,
session_id: session.id,
},
)
.await?;
Ok(())
}
#[derive(serde::Deserialize, serde::Serialize, Debug)]
struct GetArgs {
server_boot_time: String,
client_start_time: String,
}
#[derive(serde::Deserialize, Debug)]
struct MessageFqdn {
pub fqdn: Fqdn,
}
#[tokio::main]
async fn | () -> Result<(), Box<dyn std::error::Error>> {
iml_tracing::init();
// Handle an error in locks by shutting down
let (tx, rx) = oneshot::channel();
let shared_hosts = host::shared_hosts();
let shared_hosts2 = Arc::clone(&shared_hosts);
let shared_hosts3 = Arc::clone(&shared_hosts);
tokio::spawn(
async move {
let conn = iml_rabbit::connect_to_rabbit().await?;
let ch = iml_rabbit::create_channel(&conn).await?;
let mut s = consume_agent_tx_queue(ch, AGENT_TX_RUST).await?;
while let Some(msg) = s.try_next().await? {
let MessageFqdn { fqdn } = serde_json::from_slice(&msg.data)?;
let mut hosts = shared_hosts.lock().await;
let host = hosts.get_mut(&fqdn);
if let Some(host) = host {
let mut queue = host.queue.lock().await;
queue.push_back(msg.data);
tracing::debug!(
"Put data on host queue {}: Queue size: {:?}",
fqdn,
queue.len()
);
} else {
tracing::warn!(
"Dropping message to {:?} because it did not have a host queue",
fqdn
);
}
}
Ok(())
}
.unwrap_or_else(|e: ImlAgentCommsError| {
tx.send(()).unwrap_or_else(drop);
tracing::error!("{:?}", e)
}),
);
let hosts_filter = warp::any().map(move || Arc::clone(&shared_hosts2));
let (fut, client_filter) = create_connection_filter().await?;
tokio::spawn(fut);
let receiver = warp::post()
.and(warp::header::<String>("x-ssl-client-name").map(Fqdn))
.and(hosts_filter)
.and(client_filter)
.and(warp::body::json())
.and_then(
|fqdn: Fqdn,
hosts: SharedHosts,
client: Connection,
Envelope {
messages,
client_start_time,
..
}: Envelope| {
async move {
tracing::debug!("<-- Delivery from agent {}: Messages: {:?}", fqdn, messages,);
let sessions = {
let mut hosts = hosts.lock().await;
let host = host::get_or_insert(&mut hosts, fqdn, client_start_time);
Arc::clone(&host.sessions)
};
for msg in messages {
let s2 = Arc::clone(&sessions);
match msg {
Message::Data {
plugin,
session_id,
session_seq,
body,
fqdn,
..
} => {
let lock = s2.lock().await;
let has_session =
session::get_by_session_id(&plugin, &session_id, &lock)
.is_some();
data_handler(
has_session,
client.clone(),
AgentData {
fqdn,
plugin,
session_id,
session_seq,
body,
},
)
.await?;
}
Message::SessionCreateRequest { plugin, fqdn } => {
let mut lock = s2.lock().await;
session_create_req_handler(&mut lock, client.clone(), fqdn, plugin)
.await?;
}
}
}
Ok::<(), ImlAgentCommsError>(())
}
.map_err(warp::reject::custom)
.map_ok(|_| {
warp::reply::with_status(warp::reply(), warp::http::StatusCode::ACCEPTED)
})
},
);
let hosts_filter = warp::any().map(move || Arc::clone(&shared_hosts3));
let sender = warp::get()
.and(warp::header::<String>("x-ssl-client-name").map(Fqdn))
.and(warp::query::<GetArgs>())
.and(hosts_filter)
.and_then(|fqdn: Fqdn, args: GetArgs, hosts: SharedHosts| {
async move {
{
let mut hosts = hosts.lock().await;
let mut host = host::get_or_insert(
&mut hosts,
fqdn.clone(),
args.client_start_time.clone(),
);
host.stop();
// If we are not dealing with the same agent anymore, terminate all existing sessions.
if host.client_start_time!= args.client_start_time {
tracing::info!(
"Terminating all sessions on {:?} because start time has changed",
fqdn
);
host.client_start_time = args.client_start_time;
return Ok::<_, ImlAgentCommsError>(ManagerMessages {
messages: vec![ManagerMessage::SessionTerminateAll { fqdn }],
});
}
}
let (tx, rx) = oneshot::channel();
let (sessions, queue) = {
let mut hosts = hosts.lock().await;
let host = host::get_or_insert(
&mut hosts,
fqdn.clone(),
args.client_start_time.clone(),
);
host.stop_reading = Some(tx);
(Arc::clone(&host.sessions), Arc::clone(&host.queue))
};
let xs = flush_queue::flush(queue, Duration::from_secs(30), rx).await?;
let mut xs: Vec<ManagerMessage> = xs
.into_iter()
.map(|x| serde_json::from_slice(&x).map_err(ImlAgentCommsError::from))
.collect::<Result<Vec<_>, ImlAgentCommsError>>()?;
let guard = sessions.lock().await;
xs.retain(|x| session::is_session_valid(x, &guard));
tracing::debug!(
"--> Delivery to agent {}({:?}): {:?}",
&fqdn,
&args.client_start_time,
xs,
);
Ok::<_, ImlAgentCommsError>(ManagerMessages { messages: xs })
}
.map_err(warp::reject::custom)
})
.map(|envelope| warp::reply::json(&envelope));
let log = warp::log("iml_agent_comms::api");
let routes = warp::path("message").and(receiver.or(sender).with(log));
let addr = iml_manager_env::get_http_agent2_addr();
tracing::info!("Starting iml-agent-comms on {:?}", addr);
let (_, fut) = warp::serve(routes).bind_with_graceful_shutdown(addr, rx.map(drop));
fut.await;
Ok(())
}
| main | identifier_name |
mod.rs | mod workflow_machines;
// TODO: Move all these inside a submachines module
#[allow(unused)]
mod activity_state_machine;
#[allow(unused)]
mod cancel_external_state_machine;
#[allow(unused)]
mod cancel_workflow_state_machine;
#[allow(unused)]
mod child_workflow_state_machine;
mod complete_workflow_state_machine;
#[allow(unused)]
mod continue_as_new_workflow_state_machine;
#[allow(unused)]
mod fail_workflow_state_machine;
#[allow(unused)]
mod local_activity_state_machine;
#[allow(unused)]
mod mutable_side_effect_state_machine;
#[allow(unused)]
mod side_effect_state_machine;
#[allow(unused)]
mod signal_external_state_machine;
mod timer_state_machine;
#[allow(unused)]
mod upsert_search_attributes_state_machine;
#[allow(unused)]
mod version_state_machine;
mod workflow_task_state_machine;
#[cfg(test)]
pub(crate) mod test_help;
pub(crate) use workflow_machines::{WFMachinesError, WorkflowMachines};
use crate::{
machines::workflow_machines::MachineResponse,
protos::{
coresdk::{self, command::Variant, wf_activation_job},
temporal::api::{
command::v1::{
command::Attributes, CancelTimerCommandAttributes, Command,
CompleteWorkflowExecutionCommandAttributes, StartTimerCommandAttributes,
},
enums::v1::CommandType,
history::v1::{
HistoryEvent, WorkflowExecutionCanceledEventAttributes,
WorkflowExecutionSignaledEventAttributes, WorkflowExecutionStartedEventAttributes,
},
},
},
};
use prost::alloc::fmt::Formatter;
use rustfsm::{MachineError, StateMachine};
use std::{
convert::{TryFrom, TryInto},
fmt::{Debug, Display},
};
use tracing::Level;
pub(crate) type ProtoCommand = Command;
/// Implementors of this trait represent something that can (eventually) call into a workflow to
/// drive it, start it, signal it, cancel it, etc.
pub(crate) trait DrivenWorkflow: ActivationListener + Send {
/// Start the workflow
fn start(&mut self, attribs: WorkflowExecutionStartedEventAttributes);
/// Obtain any output from the workflow's recent execution(s). Because the lang sdk is
/// responsible for calling workflow code as a result of receiving tasks from
/// [crate::Core::poll_task], we cannot directly iterate it here. Thus implementations of this
/// trait are expected to either buffer output or otherwise produce it on demand when this
/// function is called.
///
/// In the case of the real [WorkflowBridge] implementation, commands are simply pulled from
/// a buffer that the language side sinks into when it calls [crate::Core::complete_task]
fn fetch_workflow_iteration_output(&mut self) -> Vec<WFCommand>;
/// Signal the workflow
fn signal(&mut self, attribs: WorkflowExecutionSignaledEventAttributes);
/// Cancel the workflow
fn cancel(&mut self, attribs: WorkflowExecutionCanceledEventAttributes);
}
/// Allows observers to listen to newly generated outgoing activation jobs. Used for testing, where
/// some activations must be handled before outgoing commands are issued to avoid deadlocking.
pub(crate) trait ActivationListener {
fn on_activation_job(&mut self, _activation: &wf_activation_job::Attributes) {}
}
/// [DrivenWorkflow]s respond with these when called, to indicate what they want to do next.
/// EX: Create a new timer, complete the workflow, etc.
#[derive(Debug, derive_more::From)]
pub enum WFCommand {
/// Returned when we need to wait for the lang sdk to send us something
NoCommandsFromLang,
AddTimer(StartTimerCommandAttributes),
CancelTimer(CancelTimerCommandAttributes),
CompleteWorkflow(CompleteWorkflowExecutionCommandAttributes),
}
#[derive(thiserror::Error, Debug, derive_more::From)]
#[error("Couldn't convert <lang> command")]
pub struct InconvertibleCommandError(pub coresdk::Command);
impl TryFrom<coresdk::Command> for WFCommand {
type Error = InconvertibleCommandError;
fn try_from(c: coresdk::Command) -> Result<Self, Self::Error> {
match c.variant {
Some(Variant::Api(Command {
attributes: Some(attrs),
..
})) => match attrs {
Attributes::StartTimerCommandAttributes(s) => Ok(WFCommand::AddTimer(s)),
Attributes::CancelTimerCommandAttributes(s) => Ok(WFCommand::CancelTimer(s)),
Attributes::CompleteWorkflowExecutionCommandAttributes(c) => {
Ok(WFCommand::CompleteWorkflow(c))
}
_ => unimplemented!(),
},
_ => Err(c.into()),
}
}
}
/// Extends [rustfsm::StateMachine] with some functionality specific to the temporal SDK.
///
/// Formerly known as `EntityStateMachine` in Java.
trait TemporalStateMachine: CheckStateMachineInFinal + Send {
fn name(&self) -> &str;
fn handle_command(&mut self, command_type: CommandType) -> Result<(), WFMachinesError>;
/// Tell the state machine to handle some event. Returns a list of responses that can be used
/// to update the overall state of the workflow. EX: To issue outgoing WF activations.
fn handle_event(
&mut self,
event: &HistoryEvent,
has_next_event: bool,
) -> Result<Vec<MachineResponse>, WFMachinesError>;
/// Attempt to cancel the command associated with this state machine, if it is cancellable
fn cancel(&mut self) -> Result<MachineResponse, WFMachinesError>;
/// Should return true if the command was cancelled before we sent it to the server. Always
/// returns false for non-cancellable machines
fn was_cancelled_before_sent_to_server(&self) -> bool;
}
impl<SM> TemporalStateMachine for SM
where
SM: StateMachine + CheckStateMachineInFinal + WFMachinesAdapter + Cancellable + Clone + Send,
<SM as StateMachine>::Event: TryFrom<HistoryEvent>,
<SM as StateMachine>::Event: TryFrom<CommandType>,
WFMachinesError: From<<<SM as StateMachine>::Event as TryFrom<HistoryEvent>>::Error>,
<SM as StateMachine>::Command: Debug,
<SM as StateMachine>::State: Display,
<SM as StateMachine>::Error: Into<WFMachinesError> +'static + Send + Sync,
{
fn name(&self) -> &str {
<Self as StateMachine>::name(self)
}
fn handle_command(&mut self, command_type: CommandType) -> Result<(), WFMachinesError> {
event!(
Level::DEBUG,
msg = "handling command",
?command_type,
machine_name = %self.name(),
state = %self.state()
);
if let Ok(converted_command) = command_type.try_into() {
match self.on_event_mut(converted_command) {
Ok(_c) => Ok(()),
Err(MachineError::InvalidTransition) => {
Err(WFMachinesError::UnexpectedCommand(command_type))
}
Err(MachineError::Underlying(e)) => Err(e.into()),
}
} else {
Err(WFMachinesError::UnexpectedCommand(command_type))
}
}
fn handle_event(
&mut self,
event: &HistoryEvent,
has_next_event: bool,
) -> Result<Vec<MachineResponse>, WFMachinesError> {
event!(
Level::DEBUG,
msg = "handling event",
%event,
machine_name = %self.name(),
state = %self.state()
);
let converted_event = event.clone().try_into()?;
match self.on_event_mut(converted_event) {
Ok(c) => {
if!c.is_empty() {
event!(Level::DEBUG, msg = "Machine produced commands",?c, state = %self.state());
}
let mut machine_responses = vec![];
for cmd in c {
machine_responses.extend(self.adapt_response(event, has_next_event, cmd)?);
}
Ok(machine_responses)
}
Err(MachineError::InvalidTransition) => {
Err(WFMachinesError::InvalidTransitionDuringEvent(
event.clone(),
format!(
"{} in state {} says the transition is invalid",
self.name(),
self.state()
),
))
}
Err(MachineError::Underlying(e)) => Err(e.into()),
}
}
fn cancel(&mut self) -> Result<MachineResponse, WFMachinesError> {
let res = self.cancel();
res.map_err(|e| match e {
MachineError::InvalidTransition => {
WFMachinesError::InvalidTransition("while attempting to cancel")
}
MachineError::Underlying(e) => e.into(),
})
}
fn | (&self) -> bool {
self.was_cancelled_before_sent_to_server()
}
}
/// Exists purely to allow generic implementation of `is_final_state` for all [StateMachine]
/// implementors
trait CheckStateMachineInFinal {
/// Returns true if the state machine is in a final state
fn is_final_state(&self) -> bool;
}
impl<SM> CheckStateMachineInFinal for SM
where
SM: StateMachine,
{
fn is_final_state(&self) -> bool {
self.on_final_state()
}
}
/// This trait exists to bridge [StateMachine]s and the [WorkflowMachines] instance. It has access
/// to the machine's concrete types while hiding those details from [WorkflowMachines]
trait WFMachinesAdapter: StateMachine {
/// Given a the event being processed, and a command that this [StateMachine] instance just
/// produced, perform any handling that needs inform the [WorkflowMachines] instance of some
/// action to be taken in response to that command.
fn adapt_response(
&self,
event: &HistoryEvent,
has_next_event: bool,
my_command: Self::Command,
) -> Result<Vec<MachineResponse>, WFMachinesError>;
}
trait Cancellable: StateMachine {
/// Cancel the machine / the command represented by the machine.
///
/// # Panics
/// * If the machine is not cancellable. It's a logic error on our part to call it on such
/// machines.
fn cancel(&mut self) -> Result<MachineResponse, MachineError<Self::Error>> {
// It's a logic error on our part if this is ever called on a machine that can't actually
// be cancelled
panic!(format!("Machine {} cannot be cancelled", self.name()))
}
/// Should return true if the command was cancelled before we sent it to the server
fn was_cancelled_before_sent_to_server(&self) -> bool {
false
}
}
#[derive(Debug)]
struct NewMachineWithCommand<T: TemporalStateMachine> {
command: ProtoCommand,
machine: T,
}
impl Debug for dyn TemporalStateMachine {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_str(self.name())
}
}
| was_cancelled_before_sent_to_server | identifier_name |
mod.rs | mod workflow_machines;
// TODO: Move all these inside a submachines module
#[allow(unused)]
mod activity_state_machine;
#[allow(unused)]
mod cancel_external_state_machine;
#[allow(unused)]
mod cancel_workflow_state_machine;
#[allow(unused)]
mod child_workflow_state_machine;
mod complete_workflow_state_machine;
#[allow(unused)]
mod continue_as_new_workflow_state_machine;
#[allow(unused)]
mod fail_workflow_state_machine;
#[allow(unused)]
mod local_activity_state_machine;
#[allow(unused)]
mod mutable_side_effect_state_machine;
#[allow(unused)]
mod side_effect_state_machine;
#[allow(unused)]
mod signal_external_state_machine;
mod timer_state_machine;
#[allow(unused)]
mod upsert_search_attributes_state_machine;
#[allow(unused)]
mod version_state_machine;
mod workflow_task_state_machine;
#[cfg(test)]
pub(crate) mod test_help;
pub(crate) use workflow_machines::{WFMachinesError, WorkflowMachines};
use crate::{
machines::workflow_machines::MachineResponse,
protos::{
coresdk::{self, command::Variant, wf_activation_job},
temporal::api::{
command::v1::{
command::Attributes, CancelTimerCommandAttributes, Command,
CompleteWorkflowExecutionCommandAttributes, StartTimerCommandAttributes,
},
enums::v1::CommandType,
history::v1::{
HistoryEvent, WorkflowExecutionCanceledEventAttributes,
WorkflowExecutionSignaledEventAttributes, WorkflowExecutionStartedEventAttributes,
},
},
},
};
use prost::alloc::fmt::Formatter;
use rustfsm::{MachineError, StateMachine};
use std::{
convert::{TryFrom, TryInto},
fmt::{Debug, Display},
};
use tracing::Level;
pub(crate) type ProtoCommand = Command;
/// Implementors of this trait represent something that can (eventually) call into a workflow to
/// drive it, start it, signal it, cancel it, etc.
pub(crate) trait DrivenWorkflow: ActivationListener + Send {
/// Start the workflow
fn start(&mut self, attribs: WorkflowExecutionStartedEventAttributes);
/// Obtain any output from the workflow's recent execution(s). Because the lang sdk is
/// responsible for calling workflow code as a result of receiving tasks from
/// [crate::Core::poll_task], we cannot directly iterate it here. Thus implementations of this
/// trait are expected to either buffer output or otherwise produce it on demand when this
/// function is called.
///
/// In the case of the real [WorkflowBridge] implementation, commands are simply pulled from
/// a buffer that the language side sinks into when it calls [crate::Core::complete_task]
fn fetch_workflow_iteration_output(&mut self) -> Vec<WFCommand>;
/// Signal the workflow
fn signal(&mut self, attribs: WorkflowExecutionSignaledEventAttributes);
/// Cancel the workflow
fn cancel(&mut self, attribs: WorkflowExecutionCanceledEventAttributes);
}
/// Allows observers to listen to newly generated outgoing activation jobs. Used for testing, where
/// some activations must be handled before outgoing commands are issued to avoid deadlocking.
pub(crate) trait ActivationListener {
fn on_activation_job(&mut self, _activation: &wf_activation_job::Attributes) {}
}
/// [DrivenWorkflow]s respond with these when called, to indicate what they want to do next.
/// EX: Create a new timer, complete the workflow, etc.
#[derive(Debug, derive_more::From)]
pub enum WFCommand {
/// Returned when we need to wait for the lang sdk to send us something
NoCommandsFromLang,
AddTimer(StartTimerCommandAttributes),
CancelTimer(CancelTimerCommandAttributes),
CompleteWorkflow(CompleteWorkflowExecutionCommandAttributes),
}
#[derive(thiserror::Error, Debug, derive_more::From)]
#[error("Couldn't convert <lang> command")]
pub struct InconvertibleCommandError(pub coresdk::Command);
impl TryFrom<coresdk::Command> for WFCommand {
type Error = InconvertibleCommandError;
fn try_from(c: coresdk::Command) -> Result<Self, Self::Error> {
match c.variant {
Some(Variant::Api(Command {
attributes: Some(attrs),
..
})) => match attrs {
Attributes::StartTimerCommandAttributes(s) => Ok(WFCommand::AddTimer(s)),
Attributes::CancelTimerCommandAttributes(s) => Ok(WFCommand::CancelTimer(s)),
Attributes::CompleteWorkflowExecutionCommandAttributes(c) => {
Ok(WFCommand::CompleteWorkflow(c))
}
_ => unimplemented!(),
},
_ => Err(c.into()),
}
}
}
/// Extends [rustfsm::StateMachine] with some functionality specific to the temporal SDK.
///
/// Formerly known as `EntityStateMachine` in Java.
trait TemporalStateMachine: CheckStateMachineInFinal + Send {
fn name(&self) -> &str;
fn handle_command(&mut self, command_type: CommandType) -> Result<(), WFMachinesError>;
/// Tell the state machine to handle some event. Returns a list of responses that can be used
/// to update the overall state of the workflow. EX: To issue outgoing WF activations.
fn handle_event(
&mut self,
event: &HistoryEvent,
has_next_event: bool,
) -> Result<Vec<MachineResponse>, WFMachinesError>;
/// Attempt to cancel the command associated with this state machine, if it is cancellable
fn cancel(&mut self) -> Result<MachineResponse, WFMachinesError>;
/// Should return true if the command was cancelled before we sent it to the server. Always
/// returns false for non-cancellable machines
fn was_cancelled_before_sent_to_server(&self) -> bool;
}
impl<SM> TemporalStateMachine for SM
where
SM: StateMachine + CheckStateMachineInFinal + WFMachinesAdapter + Cancellable + Clone + Send,
<SM as StateMachine>::Event: TryFrom<HistoryEvent>,
<SM as StateMachine>::Event: TryFrom<CommandType>,
WFMachinesError: From<<<SM as StateMachine>::Event as TryFrom<HistoryEvent>>::Error>,
<SM as StateMachine>::Command: Debug,
<SM as StateMachine>::State: Display,
<SM as StateMachine>::Error: Into<WFMachinesError> +'static + Send + Sync,
{
fn name(&self) -> &str {
<Self as StateMachine>::name(self)
}
fn handle_command(&mut self, command_type: CommandType) -> Result<(), WFMachinesError> {
event!(
Level::DEBUG,
msg = "handling command",
?command_type,
machine_name = %self.name(),
state = %self.state()
);
if let Ok(converted_command) = command_type.try_into() {
match self.on_event_mut(converted_command) {
Ok(_c) => Ok(()),
Err(MachineError::InvalidTransition) => {
Err(WFMachinesError::UnexpectedCommand(command_type))
}
Err(MachineError::Underlying(e)) => Err(e.into()),
}
} else {
Err(WFMachinesError::UnexpectedCommand(command_type))
}
}
fn handle_event(
&mut self,
event: &HistoryEvent,
has_next_event: bool,
) -> Result<Vec<MachineResponse>, WFMachinesError> {
event!(
Level::DEBUG,
msg = "handling event",
%event,
machine_name = %self.name(),
state = %self.state()
);
let converted_event = event.clone().try_into()?;
match self.on_event_mut(converted_event) {
Ok(c) => {
if!c.is_empty() {
event!(Level::DEBUG, msg = "Machine produced commands",?c, state = %self.state());
}
let mut machine_responses = vec![];
for cmd in c {
machine_responses.extend(self.adapt_response(event, has_next_event, cmd)?);
}
Ok(machine_responses)
}
Err(MachineError::InvalidTransition) => {
Err(WFMachinesError::InvalidTransitionDuringEvent(
event.clone(),
format!(
"{} in state {} says the transition is invalid",
self.name(),
self.state()
),
))
}
Err(MachineError::Underlying(e)) => Err(e.into()),
}
}
fn cancel(&mut self) -> Result<MachineResponse, WFMachinesError> {
let res = self.cancel();
res.map_err(|e| match e {
MachineError::InvalidTransition => {
WFMachinesError::InvalidTransition("while attempting to cancel")
}
MachineError::Underlying(e) => e.into(),
})
}
fn was_cancelled_before_sent_to_server(&self) -> bool |
}
/// Exists purely to allow generic implementation of `is_final_state` for all [StateMachine]
/// implementors
trait CheckStateMachineInFinal {
/// Returns true if the state machine is in a final state
fn is_final_state(&self) -> bool;
}
impl<SM> CheckStateMachineInFinal for SM
where
SM: StateMachine,
{
fn is_final_state(&self) -> bool {
self.on_final_state()
}
}
/// This trait exists to bridge [StateMachine]s and the [WorkflowMachines] instance. It has access
/// to the machine's concrete types while hiding those details from [WorkflowMachines]
trait WFMachinesAdapter: StateMachine {
/// Given a the event being processed, and a command that this [StateMachine] instance just
/// produced, perform any handling that needs inform the [WorkflowMachines] instance of some
/// action to be taken in response to that command.
fn adapt_response(
&self,
event: &HistoryEvent,
has_next_event: bool,
my_command: Self::Command,
) -> Result<Vec<MachineResponse>, WFMachinesError>;
}
trait Cancellable: StateMachine {
/// Cancel the machine / the command represented by the machine.
///
/// # Panics
/// * If the machine is not cancellable. It's a logic error on our part to call it on such
/// machines.
fn cancel(&mut self) -> Result<MachineResponse, MachineError<Self::Error>> {
// It's a logic error on our part if this is ever called on a machine that can't actually
// be cancelled
panic!(format!("Machine {} cannot be cancelled", self.name()))
}
/// Should return true if the command was cancelled before we sent it to the server
fn was_cancelled_before_sent_to_server(&self) -> bool {
false
}
}
#[derive(Debug)]
struct NewMachineWithCommand<T: TemporalStateMachine> {
command: ProtoCommand,
machine: T,
}
impl Debug for dyn TemporalStateMachine {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_str(self.name())
}
}
| {
self.was_cancelled_before_sent_to_server()
} | identifier_body |
mod.rs | mod workflow_machines;
// TODO: Move all these inside a submachines module
#[allow(unused)]
mod activity_state_machine;
#[allow(unused)] | mod cancel_external_state_machine;
#[allow(unused)]
mod cancel_workflow_state_machine;
#[allow(unused)]
mod child_workflow_state_machine;
mod complete_workflow_state_machine;
#[allow(unused)]
mod continue_as_new_workflow_state_machine;
#[allow(unused)]
mod fail_workflow_state_machine;
#[allow(unused)]
mod local_activity_state_machine;
#[allow(unused)]
mod mutable_side_effect_state_machine;
#[allow(unused)]
mod side_effect_state_machine;
#[allow(unused)]
mod signal_external_state_machine;
mod timer_state_machine;
#[allow(unused)]
mod upsert_search_attributes_state_machine;
#[allow(unused)]
mod version_state_machine;
mod workflow_task_state_machine;
#[cfg(test)]
pub(crate) mod test_help;
pub(crate) use workflow_machines::{WFMachinesError, WorkflowMachines};
use crate::{
machines::workflow_machines::MachineResponse,
protos::{
coresdk::{self, command::Variant, wf_activation_job},
temporal::api::{
command::v1::{
command::Attributes, CancelTimerCommandAttributes, Command,
CompleteWorkflowExecutionCommandAttributes, StartTimerCommandAttributes,
},
enums::v1::CommandType,
history::v1::{
HistoryEvent, WorkflowExecutionCanceledEventAttributes,
WorkflowExecutionSignaledEventAttributes, WorkflowExecutionStartedEventAttributes,
},
},
},
};
use prost::alloc::fmt::Formatter;
use rustfsm::{MachineError, StateMachine};
use std::{
convert::{TryFrom, TryInto},
fmt::{Debug, Display},
};
use tracing::Level;
pub(crate) type ProtoCommand = Command;
/// Implementors of this trait represent something that can (eventually) call into a workflow to
/// drive it, start it, signal it, cancel it, etc.
pub(crate) trait DrivenWorkflow: ActivationListener + Send {
/// Start the workflow
fn start(&mut self, attribs: WorkflowExecutionStartedEventAttributes);
/// Obtain any output from the workflow's recent execution(s). Because the lang sdk is
/// responsible for calling workflow code as a result of receiving tasks from
/// [crate::Core::poll_task], we cannot directly iterate it here. Thus implementations of this
/// trait are expected to either buffer output or otherwise produce it on demand when this
/// function is called.
///
/// In the case of the real [WorkflowBridge] implementation, commands are simply pulled from
/// a buffer that the language side sinks into when it calls [crate::Core::complete_task]
fn fetch_workflow_iteration_output(&mut self) -> Vec<WFCommand>;
/// Signal the workflow
fn signal(&mut self, attribs: WorkflowExecutionSignaledEventAttributes);
/// Cancel the workflow
fn cancel(&mut self, attribs: WorkflowExecutionCanceledEventAttributes);
}
/// Allows observers to listen to newly generated outgoing activation jobs. Used for testing, where
/// some activations must be handled before outgoing commands are issued to avoid deadlocking.
pub(crate) trait ActivationListener {
fn on_activation_job(&mut self, _activation: &wf_activation_job::Attributes) {}
}
/// [DrivenWorkflow]s respond with these when called, to indicate what they want to do next.
/// EX: Create a new timer, complete the workflow, etc.
#[derive(Debug, derive_more::From)]
pub enum WFCommand {
/// Returned when we need to wait for the lang sdk to send us something
NoCommandsFromLang,
AddTimer(StartTimerCommandAttributes),
CancelTimer(CancelTimerCommandAttributes),
CompleteWorkflow(CompleteWorkflowExecutionCommandAttributes),
}
#[derive(thiserror::Error, Debug, derive_more::From)]
#[error("Couldn't convert <lang> command")]
pub struct InconvertibleCommandError(pub coresdk::Command);
impl TryFrom<coresdk::Command> for WFCommand {
type Error = InconvertibleCommandError;
fn try_from(c: coresdk::Command) -> Result<Self, Self::Error> {
match c.variant {
Some(Variant::Api(Command {
attributes: Some(attrs),
..
})) => match attrs {
Attributes::StartTimerCommandAttributes(s) => Ok(WFCommand::AddTimer(s)),
Attributes::CancelTimerCommandAttributes(s) => Ok(WFCommand::CancelTimer(s)),
Attributes::CompleteWorkflowExecutionCommandAttributes(c) => {
Ok(WFCommand::CompleteWorkflow(c))
}
_ => unimplemented!(),
},
_ => Err(c.into()),
}
}
}
/// Extends [rustfsm::StateMachine] with some functionality specific to the temporal SDK.
///
/// Formerly known as `EntityStateMachine` in Java.
trait TemporalStateMachine: CheckStateMachineInFinal + Send {
fn name(&self) -> &str;
fn handle_command(&mut self, command_type: CommandType) -> Result<(), WFMachinesError>;
/// Tell the state machine to handle some event. Returns a list of responses that can be used
/// to update the overall state of the workflow. EX: To issue outgoing WF activations.
fn handle_event(
&mut self,
event: &HistoryEvent,
has_next_event: bool,
) -> Result<Vec<MachineResponse>, WFMachinesError>;
/// Attempt to cancel the command associated with this state machine, if it is cancellable
fn cancel(&mut self) -> Result<MachineResponse, WFMachinesError>;
/// Should return true if the command was cancelled before we sent it to the server. Always
/// returns false for non-cancellable machines
fn was_cancelled_before_sent_to_server(&self) -> bool;
}
impl<SM> TemporalStateMachine for SM
where
SM: StateMachine + CheckStateMachineInFinal + WFMachinesAdapter + Cancellable + Clone + Send,
<SM as StateMachine>::Event: TryFrom<HistoryEvent>,
<SM as StateMachine>::Event: TryFrom<CommandType>,
WFMachinesError: From<<<SM as StateMachine>::Event as TryFrom<HistoryEvent>>::Error>,
<SM as StateMachine>::Command: Debug,
<SM as StateMachine>::State: Display,
<SM as StateMachine>::Error: Into<WFMachinesError> +'static + Send + Sync,
{
fn name(&self) -> &str {
<Self as StateMachine>::name(self)
}
fn handle_command(&mut self, command_type: CommandType) -> Result<(), WFMachinesError> {
event!(
Level::DEBUG,
msg = "handling command",
?command_type,
machine_name = %self.name(),
state = %self.state()
);
if let Ok(converted_command) = command_type.try_into() {
match self.on_event_mut(converted_command) {
Ok(_c) => Ok(()),
Err(MachineError::InvalidTransition) => {
Err(WFMachinesError::UnexpectedCommand(command_type))
}
Err(MachineError::Underlying(e)) => Err(e.into()),
}
} else {
Err(WFMachinesError::UnexpectedCommand(command_type))
}
}
fn handle_event(
&mut self,
event: &HistoryEvent,
has_next_event: bool,
) -> Result<Vec<MachineResponse>, WFMachinesError> {
event!(
Level::DEBUG,
msg = "handling event",
%event,
machine_name = %self.name(),
state = %self.state()
);
let converted_event = event.clone().try_into()?;
match self.on_event_mut(converted_event) {
Ok(c) => {
if!c.is_empty() {
event!(Level::DEBUG, msg = "Machine produced commands",?c, state = %self.state());
}
let mut machine_responses = vec![];
for cmd in c {
machine_responses.extend(self.adapt_response(event, has_next_event, cmd)?);
}
Ok(machine_responses)
}
Err(MachineError::InvalidTransition) => {
Err(WFMachinesError::InvalidTransitionDuringEvent(
event.clone(),
format!(
"{} in state {} says the transition is invalid",
self.name(),
self.state()
),
))
}
Err(MachineError::Underlying(e)) => Err(e.into()),
}
}
fn cancel(&mut self) -> Result<MachineResponse, WFMachinesError> {
let res = self.cancel();
res.map_err(|e| match e {
MachineError::InvalidTransition => {
WFMachinesError::InvalidTransition("while attempting to cancel")
}
MachineError::Underlying(e) => e.into(),
})
}
fn was_cancelled_before_sent_to_server(&self) -> bool {
self.was_cancelled_before_sent_to_server()
}
}
/// Exists purely to allow generic implementation of `is_final_state` for all [StateMachine]
/// implementors
trait CheckStateMachineInFinal {
/// Returns true if the state machine is in a final state
fn is_final_state(&self) -> bool;
}
impl<SM> CheckStateMachineInFinal for SM
where
SM: StateMachine,
{
fn is_final_state(&self) -> bool {
self.on_final_state()
}
}
/// This trait exists to bridge [StateMachine]s and the [WorkflowMachines] instance. It has access
/// to the machine's concrete types while hiding those details from [WorkflowMachines]
trait WFMachinesAdapter: StateMachine {
/// Given a the event being processed, and a command that this [StateMachine] instance just
/// produced, perform any handling that needs inform the [WorkflowMachines] instance of some
/// action to be taken in response to that command.
fn adapt_response(
&self,
event: &HistoryEvent,
has_next_event: bool,
my_command: Self::Command,
) -> Result<Vec<MachineResponse>, WFMachinesError>;
}
trait Cancellable: StateMachine {
/// Cancel the machine / the command represented by the machine.
///
/// # Panics
/// * If the machine is not cancellable. It's a logic error on our part to call it on such
/// machines.
fn cancel(&mut self) -> Result<MachineResponse, MachineError<Self::Error>> {
// It's a logic error on our part if this is ever called on a machine that can't actually
// be cancelled
panic!(format!("Machine {} cannot be cancelled", self.name()))
}
/// Should return true if the command was cancelled before we sent it to the server
fn was_cancelled_before_sent_to_server(&self) -> bool {
false
}
}
#[derive(Debug)]
struct NewMachineWithCommand<T: TemporalStateMachine> {
command: ProtoCommand,
machine: T,
}
impl Debug for dyn TemporalStateMachine {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.write_str(self.name())
}
} | random_line_split |
|
cimport.rs | use libc::{c_char, c_uint, c_float, c_int};
use scene::RawScene;
use types::{AiString, MemoryInfo};
use fileio::{AiFileIO};
/// Represents an opaque set of settings to be used during importing.
#[repr(C)]
pub struct | {
sentinel: c_char,
}
#[link(name = "assimp")]
extern {
/// Reads the given file and returns its content.
///
/// If the call succeeds, the imported data is returned in an aiScene
/// structure. The data is intended to be read-only, it stays property of
/// the ASSIMP library and will be stable until aiReleaseImport() is
/// called. After you're done with it, call aiReleaseImport() to free the
/// resources associated with this file. If the import fails, NULL is
/// returned instead. Call aiGetErrorString() to retrieve a human-readable
/// error text.
///
/// # Parameters
/// * `pFile` Path and filename of the file to be imported,
/// expected to be a null-terminated c-string. NULL is not a valid value.
///
/// * `pFlags` Optional post processing steps to be executed after
/// a successful import. Provide a bitwise combination of the
/// aiPostProcessSteps flags.
///
/// Pointer to the imported data or NULL if the import failed.
pub fn aiImportFile(fname: *const c_char, flags: c_uint) -> *const RawScene;
/// Same as #aiImportFileEx, but adds an extra parameter containing importer settings.
/// * pProps #aiPropertyStore instance containing import settings.
// ASSIMP_API const C_STRUCT aiScene* aiImportFileExWithProperties(
// const char* pFile,
// unsigned int pFlags,
// C_STRUCT aiFileIO* pFS,
// const C_STRUCT aiPropertyStore* pProps);
pub fn aiImportFileExWithProperties(fname: *const c_char,
flags: c_uint,
fio : *mut AiFileIO,
props: *const PropertyStore)
-> *const RawScene;
/// Returns the error text of the last failed import process.
///
/// @return A textual description of the error that occurred at the last
/// import process. NULL if there was no error.
/// There can't be an error if you got a non-NULL aiScene from
/// aiImportFile/aiImportFileEx/aiApplyPostProcessing.
pub fn aiGetErrorString() -> *const c_char;
/// Reads the given file from a given memory buffer,
///
/// If the call succeeds, the contents of the file are returned as a
/// pointer to an aiScene object. The returned data is intended to be
/// read-only, the importer keeps ownership of the data and will destroy
/// it upon destruction. If the import fails, NULL is returned. A
/// human-readable error description can be retrieved by calling
/// aiGetErrorString().
/// # Arguments
///
/// * `buffer` Pointer to the file data
/// * `length` Length of pBuffer, in bytes
/// * `flags` Optional post processing steps to be executed after
/// a successful import. Provide a bitwise combination of the
/// aiPostProcessSteps flags. If you wish to inspect the imported
/// scene first in order to fine-tune your post-processing setup,
/// consider to use aiApplyPostProcessing().
/// * `hint` An additional hint to the library. If this is a non empty
/// string, the library looks for a loader to support the file extension
/// specified by pHint and passes the file to the first matching loader.
/// If this loader is unable to completely the request, the library
/// continues and tries to determine the file format on its own, a task
/// that may or may not be successful. Check the return value, and
/// you'll know...
///
/// A pointer to the imported data, NULL if the import failed.
///
/// Note: This is a straightforward way to decode models from memory
/// buffers, but it doesn't handle model formats spreading their data
/// across multiple files or even directories. Examples include OBJ or
/// MD3, which outsource parts of their material stuff into external
/// scripts. If you need the full functionality, provide a custom IOSystem
/// to make Assimp find these files.
pub fn aiImportFileFromMemory(buf: *const c_char,
len: c_uint,
flags: c_uint,
hint: *const c_char)
-> *const RawScene;
/// Same as aiImportFileFromMemory, but adds an extra parameter
/// containing importer settings.
///
/// * props PropertyStore instance containing import settings.
pub fn aiImportFileFromMemoryWithProperties(buf: *const c_char,
len: c_uint,
flags: c_uint,
hint: *const c_char,
props: *const PropertyStore)
-> *const RawScene;
/// Apply post-processing to an already-imported scene.
///
/// This is strictly equivalent to calling aiImportFile()/aiImportFileEx
/// with the same flags. However, you can use this separate function to
/// inspect the imported scene first to fine-tune your post-processing
/// setup.
///
/// # Parameters
///
/// * `scene` Scene to work on.
/// * `flags` Provide a bitwise combination of the aiPostProcessSteps flags.
///
/// Returns a pointer to the post-processed data.
///
/// Post processing is done in-place, meaning this is still the same
/// aiScene which you passed for pScene. However, _if_ post-processing
/// failed, the scene could now be NULL. That's quite a rare case, post
/// processing steps are not really designed to 'fail'. To be exact, the
/// aiProcess_ValidateDS flag is currently the only post processing step
/// which can actually cause the scene to be reset to NULL.
pub fn aiApplyPostProcessing(scene: *const RawScene,
flags: c_uint)
-> *const RawScene;
/// Releases all resources associated with the given import process.
///
/// Call this function after you're done with the imported data.
/// pScene The imported data to release. NULL is a valid value.
pub fn aiReleaseImport(scene: *const RawScene);
/// Get the approximated storage required by an imported asset
///
/// # Parameters
///
/// * pIn Input asset.
/// * in Data structure to be filled.
pub fn aiGetMemoryRequirements(scene: *const RawScene, info: *mut MemoryInfo);
/// Create an empty property store.
///
/// Property stores are used to collect import settings.
/// Returns a new property store. Property stores need to
/// be manually destroyed using the aiReleasePropertyStore API function.
pub fn aiCreatePropertyStore() -> *mut PropertyStore;
/// Delete a property store.
pub fn aiReleasePropertyStore(p: *mut PropertyStore);
/// Set an integer property.
///
/// This is the C-version of Assimp::Importer::SetPropertyInteger(). In
/// the C interface, properties are always shared by all imports. It is
/// not possible to specify them per import.
///
/// * `name` Name of the configuration property to be set. All supported
/// public properties are defined in the config.h header file (AI_CONFIG_XXX).
/// * `value` New value for the property
pub fn aiSetImportPropertyInteger(store: *mut PropertyStore,
name: *const c_char,
value: c_int);
/// Set a floating-point property.
///
/// This is the C-version of Assimp::Importer::SetPropertyFloat(). In the
/// C interface, properties are always shared by all imports. It is not
/// possible to specify them per import.
///
/// `name` Name of the configuration property to be set. All supported
/// public properties are defined in the config.h header file
/// `value` New value for the property
///
pub fn aiSetImportPropertyFloat(store: *mut PropertyStore,
name: *const c_char,
value: c_float);
/// Set a string property.
///
/// This is the C-version of Assimp::Importer::SetPropertyString(). In
/// the C interface, properties are always shared by all imports. It is
/// not possible to specify them per import.
///
/// # Parameters
/// * property store to modify. Use aiCreatePropertyStore to obtain a store.
/// * szName Name of the configuration property to be set. All supported
/// public properties are defined in the config.h header file
/// (AI_CONFIG_XXX).
/// * value New value for the property
///
pub fn aiSetImportPropertyString(store: *mut PropertyStore,
name: *const c_char,
st: *const AiString);
}
// /** Reads the given file using user-defined I/O functions and returns
// * its content.
// *
// * If the call succeeds, the imported data is returned in an aiScene structure.
// * The data is intended to be read-only, it stays property of the ASSIMP
// * library and will be stable until aiReleaseImport() is called. After you're
// * done with it, call aiReleaseImport() to free the resources associated with
// * this file. If the import fails, NULL is returned instead. Call
// * aiGetErrorString() to retrieve a human-readable error text.
// * @param pFile Path and filename of the file to be imported,
// * expected to be a null-terminated c-string. NULL is not a valid value.
// * @param pFlags Optional post processing steps to be executed after
// * a successful import. Provide a bitwise combination of the
// * #aiPostProcessSteps flags.
// * @param pFS aiFileIO structure. Will be used to open the model file itself
// * and any other files the loader needs to open. Pass NULL to use the default
// * implementation.
// * @return Pointer to the imported data or NULL if the import failed.
// * @note Include <aiFileIO.h> for the definition of #aiFileIO.
// */
// ASSIMP_API const C_STRUCT aiScene* aiImportFileEx(
// const char* pFile,
// unsigned int pFlags,
// C_STRUCT aiFileIO* pFS);
| PropertyStore | identifier_name |
cimport.rs | use libc::{c_char, c_uint, c_float, c_int};
use scene::RawScene;
use types::{AiString, MemoryInfo};
use fileio::{AiFileIO};
/// Represents an opaque set of settings to be used during importing.
#[repr(C)]
pub struct PropertyStore {
sentinel: c_char,
}
#[link(name = "assimp")]
extern {
/// Reads the given file and returns its content.
///
/// If the call succeeds, the imported data is returned in an aiScene
/// structure. The data is intended to be read-only, it stays property of
/// the ASSIMP library and will be stable until aiReleaseImport() is
/// called. After you're done with it, call aiReleaseImport() to free the
/// resources associated with this file. If the import fails, NULL is
/// returned instead. Call aiGetErrorString() to retrieve a human-readable
/// error text.
///
/// # Parameters
/// * `pFile` Path and filename of the file to be imported,
/// expected to be a null-terminated c-string. NULL is not a valid value.
///
/// * `pFlags` Optional post processing steps to be executed after
/// a successful import. Provide a bitwise combination of the
/// aiPostProcessSteps flags.
///
/// Pointer to the imported data or NULL if the import failed.
pub fn aiImportFile(fname: *const c_char, flags: c_uint) -> *const RawScene;
/// Same as #aiImportFileEx, but adds an extra parameter containing importer settings.
/// * pProps #aiPropertyStore instance containing import settings.
// ASSIMP_API const C_STRUCT aiScene* aiImportFileExWithProperties(
// const char* pFile,
// unsigned int pFlags,
// C_STRUCT aiFileIO* pFS,
// const C_STRUCT aiPropertyStore* pProps);
pub fn aiImportFileExWithProperties(fname: *const c_char,
flags: c_uint,
fio : *mut AiFileIO,
props: *const PropertyStore)
-> *const RawScene;
/// Returns the error text of the last failed import process.
///
/// @return A textual description of the error that occurred at the last
/// import process. NULL if there was no error.
/// There can't be an error if you got a non-NULL aiScene from
/// aiImportFile/aiImportFileEx/aiApplyPostProcessing.
pub fn aiGetErrorString() -> *const c_char;
/// Reads the given file from a given memory buffer,
///
/// If the call succeeds, the contents of the file are returned as a
/// pointer to an aiScene object. The returned data is intended to be
/// read-only, the importer keeps ownership of the data and will destroy
/// it upon destruction. If the import fails, NULL is returned. A
/// human-readable error description can be retrieved by calling
/// aiGetErrorString().
/// # Arguments
///
/// * `buffer` Pointer to the file data
/// * `length` Length of pBuffer, in bytes
/// * `flags` Optional post processing steps to be executed after
/// a successful import. Provide a bitwise combination of the
/// aiPostProcessSteps flags. If you wish to inspect the imported
/// scene first in order to fine-tune your post-processing setup,
/// consider to use aiApplyPostProcessing().
/// * `hint` An additional hint to the library. If this is a non empty
/// string, the library looks for a loader to support the file extension
/// specified by pHint and passes the file to the first matching loader.
/// If this loader is unable to completely the request, the library
/// continues and tries to determine the file format on its own, a task
/// that may or may not be successful. Check the return value, and
/// you'll know...
///
/// A pointer to the imported data, NULL if the import failed.
///
/// Note: This is a straightforward way to decode models from memory
/// buffers, but it doesn't handle model formats spreading their data
/// across multiple files or even directories. Examples include OBJ or
/// MD3, which outsource parts of their material stuff into external
/// scripts. If you need the full functionality, provide a custom IOSystem
/// to make Assimp find these files.
pub fn aiImportFileFromMemory(buf: *const c_char,
len: c_uint,
flags: c_uint,
hint: *const c_char)
-> *const RawScene;
| ///
/// * props PropertyStore instance containing import settings.
pub fn aiImportFileFromMemoryWithProperties(buf: *const c_char,
len: c_uint,
flags: c_uint,
hint: *const c_char,
props: *const PropertyStore)
-> *const RawScene;
/// Apply post-processing to an already-imported scene.
///
/// This is strictly equivalent to calling aiImportFile()/aiImportFileEx
/// with the same flags. However, you can use this separate function to
/// inspect the imported scene first to fine-tune your post-processing
/// setup.
///
/// # Parameters
///
/// * `scene` Scene to work on.
/// * `flags` Provide a bitwise combination of the aiPostProcessSteps flags.
///
/// Returns a pointer to the post-processed data.
///
/// Post processing is done in-place, meaning this is still the same
/// aiScene which you passed for pScene. However, _if_ post-processing
/// failed, the scene could now be NULL. That's quite a rare case, post
/// processing steps are not really designed to 'fail'. To be exact, the
/// aiProcess_ValidateDS flag is currently the only post processing step
/// which can actually cause the scene to be reset to NULL.
pub fn aiApplyPostProcessing(scene: *const RawScene,
flags: c_uint)
-> *const RawScene;
/// Releases all resources associated with the given import process.
///
/// Call this function after you're done with the imported data.
/// pScene The imported data to release. NULL is a valid value.
pub fn aiReleaseImport(scene: *const RawScene);
/// Get the approximated storage required by an imported asset
///
/// # Parameters
///
/// * pIn Input asset.
/// * in Data structure to be filled.
pub fn aiGetMemoryRequirements(scene: *const RawScene, info: *mut MemoryInfo);
/// Create an empty property store.
///
/// Property stores are used to collect import settings.
/// Returns a new property store. Property stores need to
/// be manually destroyed using the aiReleasePropertyStore API function.
pub fn aiCreatePropertyStore() -> *mut PropertyStore;
/// Delete a property store.
pub fn aiReleasePropertyStore(p: *mut PropertyStore);
/// Set an integer property.
///
/// This is the C-version of Assimp::Importer::SetPropertyInteger(). In
/// the C interface, properties are always shared by all imports. It is
/// not possible to specify them per import.
///
/// * `name` Name of the configuration property to be set. All supported
/// public properties are defined in the config.h header file (AI_CONFIG_XXX).
/// * `value` New value for the property
pub fn aiSetImportPropertyInteger(store: *mut PropertyStore,
name: *const c_char,
value: c_int);
/// Set a floating-point property.
///
/// This is the C-version of Assimp::Importer::SetPropertyFloat(). In the
/// C interface, properties are always shared by all imports. It is not
/// possible to specify them per import.
///
/// `name` Name of the configuration property to be set. All supported
/// public properties are defined in the config.h header file
/// `value` New value for the property
///
pub fn aiSetImportPropertyFloat(store: *mut PropertyStore,
name: *const c_char,
value: c_float);
/// Set a string property.
///
/// This is the C-version of Assimp::Importer::SetPropertyString(). In
/// the C interface, properties are always shared by all imports. It is
/// not possible to specify them per import.
///
/// # Parameters
/// * property store to modify. Use aiCreatePropertyStore to obtain a store.
/// * szName Name of the configuration property to be set. All supported
/// public properties are defined in the config.h header file
/// (AI_CONFIG_XXX).
/// * value New value for the property
///
pub fn aiSetImportPropertyString(store: *mut PropertyStore,
name: *const c_char,
st: *const AiString);
}
// /** Reads the given file using user-defined I/O functions and returns
// * its content.
// *
// * If the call succeeds, the imported data is returned in an aiScene structure.
// * The data is intended to be read-only, it stays property of the ASSIMP
// * library and will be stable until aiReleaseImport() is called. After you're
// * done with it, call aiReleaseImport() to free the resources associated with
// * this file. If the import fails, NULL is returned instead. Call
// * aiGetErrorString() to retrieve a human-readable error text.
// * @param pFile Path and filename of the file to be imported,
// * expected to be a null-terminated c-string. NULL is not a valid value.
// * @param pFlags Optional post processing steps to be executed after
// * a successful import. Provide a bitwise combination of the
// * #aiPostProcessSteps flags.
// * @param pFS aiFileIO structure. Will be used to open the model file itself
// * and any other files the loader needs to open. Pass NULL to use the default
// * implementation.
// * @return Pointer to the imported data or NULL if the import failed.
// * @note Include <aiFileIO.h> for the definition of #aiFileIO.
// */
// ASSIMP_API const C_STRUCT aiScene* aiImportFileEx(
// const char* pFile,
// unsigned int pFlags,
// C_STRUCT aiFileIO* pFS); | /// Same as aiImportFileFromMemory, but adds an extra parameter
/// containing importer settings. | random_line_split |
stacks.rs | //! Generate a Wasm program that keeps track of its current stack frames.
//!
//! We can then compare the stack trace we observe in Wasmtime to what the Wasm
//! program believes its stack should be. Any discrepencies between the two
//! points to a bug in either this test case generator or Wasmtime's stack
//! walker.
use std::mem;
use arbitrary::{Arbitrary, Result, Unstructured};
use wasm_encoder::Instruction;
const MAX_FUNCS: usize = 20;
const MAX_OPS: usize = 1_000;
/// Generate a Wasm module that keeps track of its current call stack, to
/// compare to the host.
#[derive(Debug)]
pub struct Stacks {
funcs: Vec<Function>,
inputs: Vec<u8>,
}
#[derive(Debug, Default)]
struct Function {
ops: Vec<Op>,
}
#[derive(Arbitrary, Debug, Clone, Copy)]
enum Op {
CheckStackInHost,
Call(u32),
CallThroughHost(u32),
}
impl<'a> Arbitrary<'a> for Stacks {
fn arbitrary(u: &mut Unstructured<'a>) -> Result<Self> {
let funcs = Self::arbitrary_funcs(u)?;
let n = u.len().min(200);
let inputs = u.bytes(n)?.to_vec();
Ok(Stacks { funcs, inputs })
}
}
impl Stacks {
fn arbitrary_funcs(u: &mut Unstructured) -> Result<Vec<Function>> {
let mut funcs = vec![Function::default()];
// The indices of functions within `funcs` that we still need to
// generate.
let mut work_list = vec![0];
while let Some(f) = work_list.pop() {
let mut ops = Vec::with_capacity(u.arbitrary_len::<Op>()?.min(MAX_OPS));
for _ in 0..ops.capacity() {
ops.push(u.arbitrary()?);
}
for op in &mut ops {
match op {
Op::CallThroughHost(idx) | Op::Call(idx) => {
if u.is_empty() || funcs.len() >= MAX_FUNCS || u.ratio(4, 5)? {
// Call an existing function.
*idx = *idx % u32::try_from(funcs.len()).unwrap();
} else {
// Call a new function...
*idx = u32::try_from(funcs.len()).unwrap();
//...which means we also need to eventually define it.
work_list.push(funcs.len());
funcs.push(Function::default());
}
}
Op::CheckStackInHost => {}
}
}
funcs[f].ops = ops;
}
Ok(funcs)
}
/// Get the input values to run the Wasm module with.
pub fn inputs(&self) -> &[u8] {
&self.inputs
}
/// Get this test case's Wasm module.
///
/// The Wasm module has the following imports:
///
/// * `host.check_stack: [] -> []`: The host can check the Wasm's
/// understanding of its own stack against the host's understanding of the
/// Wasm stack to find discrepency bugs.
///
/// * `host.call_func: [funcref] -> []`: The host should call the given
/// `funcref`, creating a call stack with multiple sequences of contiguous
/// Wasm frames on the stack like `[..., wasm, host, wasm]`.
///
/// The Wasm module has the following exports:
///
/// * `run: [i32] -> []`: This function should be called with each of the
/// input values to run this generated test case.
///
/// * `get_stack: [] -> [i32 i32]`: Get the pointer and length of the `u32`
/// array of this Wasm's understanding of its stack. This is useful for
/// checking whether the host's view of the stack at a trap matches the
/// Wasm program's understanding.
pub fn wasm(&self) -> Vec<u8> {
let mut module = wasm_encoder::Module::new();
let mut types = wasm_encoder::TypeSection::new();
let run_type = types.len();
types.function(vec![wasm_encoder::ValType::I32], vec![]);
let get_stack_type = types.len();
types.function(
vec![],
vec![wasm_encoder::ValType::I32, wasm_encoder::ValType::I32],
);
let null_type = types.len();
types.function(vec![], vec![]);
let call_func_type = types.len();
types.function(vec![wasm_encoder::ValType::FUNCREF], vec![]);
section(&mut module, types);
let mut imports = wasm_encoder::ImportSection::new();
let check_stack_func = 0;
imports.import(
"host",
"check_stack",
wasm_encoder::EntityType::Function(null_type),
);
let call_func_func = 1;
imports.import(
"host",
"call_func",
wasm_encoder::EntityType::Function(call_func_type),
);
let num_imported_funcs = 2;
section(&mut module, imports);
let mut funcs = wasm_encoder::FunctionSection::new();
for _ in &self.funcs {
funcs.function(null_type);
}
let run_func = funcs.len() + num_imported_funcs;
funcs.function(run_type);
let get_stack_func = funcs.len() + num_imported_funcs;
funcs.function(get_stack_type);
section(&mut module, funcs);
let mut mems = wasm_encoder::MemorySection::new();
let memory = mems.len();
mems.memory(wasm_encoder::MemoryType {
minimum: 1,
maximum: Some(1),
memory64: false,
shared: false,
});
section(&mut module, mems);
let mut globals = wasm_encoder::GlobalSection::new();
let fuel_global = globals.len();
globals.global(
wasm_encoder::GlobalType {
val_type: wasm_encoder::ValType::I32,
mutable: true,
},
&wasm_encoder::ConstExpr::i32_const(0),
);
let stack_len_global = globals.len();
globals.global(
wasm_encoder::GlobalType {
val_type: wasm_encoder::ValType::I32,
mutable: true,
},
&wasm_encoder::ConstExpr::i32_const(0),
);
section(&mut module, globals);
let mut exports = wasm_encoder::ExportSection::new();
exports.export("run", wasm_encoder::ExportKind::Func, run_func);
exports.export("get_stack", wasm_encoder::ExportKind::Func, get_stack_func);
exports.export("memory", wasm_encoder::ExportKind::Memory, memory);
exports.export("fuel", wasm_encoder::ExportKind::Global, fuel_global);
section(&mut module, exports);
let mut elems = wasm_encoder::ElementSection::new();
elems.declared(wasm_encoder::Elements::Functions(
&(0..num_imported_funcs + u32::try_from(self.funcs.len()).unwrap()).collect::<Vec<_>>(),
));
section(&mut module, elems);
let check_fuel = |body: &mut wasm_encoder::Function| {
// Trap if we are out of fuel.
body.instruction(&Instruction::GlobalGet(fuel_global))
.instruction(&Instruction::I32Eqz)
.instruction(&Instruction::If(wasm_encoder::BlockType::Empty))
.instruction(&Instruction::Unreachable)
.instruction(&Instruction::End);
// Decrement fuel.
body.instruction(&Instruction::GlobalGet(fuel_global))
.instruction(&Instruction::I32Const(1))
.instruction(&Instruction::I32Sub)
.instruction(&Instruction::GlobalSet(fuel_global));
};
let push_func_to_stack = |body: &mut wasm_encoder::Function, func: u32| {
// Add this function to our internal stack.
//
// Note that we know our `stack_len_global` can't go beyond memory
// bounds because we limit fuel to at most `u8::MAX` and each stack
// entry is an `i32` and `u8::MAX * size_of(i32)` still fits in one
// Wasm page.
body.instruction(&Instruction::GlobalGet(stack_len_global))
.instruction(&Instruction::I32Const(func as i32))
.instruction(&Instruction::I32Store(wasm_encoder::MemArg {
offset: 0,
align: 0,
memory_index: memory,
}))
.instruction(&Instruction::GlobalGet(stack_len_global))
.instruction(&Instruction::I32Const(mem::size_of::<i32>() as i32))
.instruction(&Instruction::I32Add)
.instruction(&Instruction::GlobalSet(stack_len_global));
};
let pop_func_from_stack = |body: &mut wasm_encoder::Function| {
// Remove this function from our internal stack.
body.instruction(&Instruction::GlobalGet(stack_len_global))
.instruction(&Instruction::I32Const(mem::size_of::<i32>() as i32))
.instruction(&Instruction::I32Sub)
.instruction(&Instruction::GlobalSet(stack_len_global));
};
let mut code = wasm_encoder::CodeSection::new();
for (func_index, func) in self.funcs.iter().enumerate() {
let mut body = wasm_encoder::Function::new(vec![]);
push_func_to_stack(
&mut body,
num_imported_funcs + u32::try_from(func_index).unwrap(),
);
check_fuel(&mut body);
// Perform our specified operations.
for op in &func.ops {
match op {
Op::CheckStackInHost => {
body.instruction(&Instruction::Call(check_stack_func));
}
Op::Call(f) => {
body.instruction(&Instruction::Call(f + num_imported_funcs));
}
Op::CallThroughHost(f) => {
body.instruction(&Instruction::RefFunc(f + num_imported_funcs))
.instruction(&Instruction::Call(call_func_func));
}
}
}
// Potentially trap at the end of our function as well, so that we
// exercise the scenario where the Wasm-to-host trampoline
// initialized `last_wasm_exit_sp` et al when calling out to a host
// function, but then we returned back to Wasm and then trapped
// while `last_wasm_exit_sp` et al are still initialized from that
// previous host call.
check_fuel(&mut body);
pop_func_from_stack(&mut body);
function(&mut code, body);
}
let mut run_body = wasm_encoder::Function::new(vec![]);
// Reset the bump pointer for the internal stack (this allows us to
// reuse an instance in the oracle, rather than re-instantiate).
run_body
.instruction(&Instruction::I32Const(0))
.instruction(&Instruction::GlobalSet(stack_len_global));
// Initialize the fuel global.
run_body
.instruction(&Instruction::LocalGet(0))
.instruction(&Instruction::GlobalSet(fuel_global));
push_func_to_stack(&mut run_body, run_func);
// Make sure to check for out-of-fuel in the `run` function as well, so
// that we also capture stack traces with only one frame, not just `run`
// followed by the first locally-defined function and then zero or more
// extra frames.
check_fuel(&mut run_body);
// Call the first locally defined function.
run_body.instruction(&Instruction::Call(num_imported_funcs));
check_fuel(&mut run_body);
pop_func_from_stack(&mut run_body);
function(&mut code, run_body);
let mut get_stack_body = wasm_encoder::Function::new(vec![]);
get_stack_body
.instruction(&Instruction::I32Const(0))
.instruction(&Instruction::GlobalGet(stack_len_global));
function(&mut code, get_stack_body);
section(&mut module, code);
return module.finish();
// Helper that defines a section in the module and takes ownership of it
// so that it is dropped and its memory reclaimed after adding it to the
// module.
fn section(module: &mut wasm_encoder::Module, section: impl wasm_encoder::Section) {
module.section(§ion);
}
// Helper that defines a function body in the code section and takes
// ownership of it so that it is dropped and its memory reclaimed after
// adding it to the module.
fn function(code: &mut wasm_encoder::CodeSection, mut func: wasm_encoder::Function) {
func.instruction(&Instruction::End);
code.function(&func);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use rand::prelude::*;
use wasmparser::Validator;
#[test]
fn stacks_generates_valid_wasm_modules() |
fn validate(wasm: &[u8]) {
let mut validator = Validator::new();
let err = match validator.validate_all(wasm) {
Ok(_) => return,
Err(e) => e,
};
drop(std::fs::write("test.wasm", wasm));
if let Ok(text) = wasmprinter::print_bytes(wasm) {
drop(std::fs::write("test.wat", &text));
}
panic!("wasm failed to validate: {}", err);
}
}
| {
let mut rng = SmallRng::seed_from_u64(0);
let mut buf = vec![0; 2048];
for _ in 0..1024 {
rng.fill_bytes(&mut buf);
let u = Unstructured::new(&buf);
if let Ok(stacks) = Stacks::arbitrary_take_rest(u) {
let wasm = stacks.wasm();
validate(&wasm);
}
}
} | identifier_body |
stacks.rs | //! Generate a Wasm program that keeps track of its current stack frames.
//!
//! We can then compare the stack trace we observe in Wasmtime to what the Wasm
//! program believes its stack should be. Any discrepencies between the two
//! points to a bug in either this test case generator or Wasmtime's stack
//! walker.
use std::mem;
use arbitrary::{Arbitrary, Result, Unstructured};
use wasm_encoder::Instruction;
const MAX_FUNCS: usize = 20;
const MAX_OPS: usize = 1_000;
/// Generate a Wasm module that keeps track of its current call stack, to
/// compare to the host.
#[derive(Debug)]
pub struct Stacks {
funcs: Vec<Function>,
inputs: Vec<u8>,
}
#[derive(Debug, Default)]
struct Function {
ops: Vec<Op>,
}
#[derive(Arbitrary, Debug, Clone, Copy)]
enum Op {
CheckStackInHost,
Call(u32),
CallThroughHost(u32),
}
impl<'a> Arbitrary<'a> for Stacks {
fn arbitrary(u: &mut Unstructured<'a>) -> Result<Self> {
let funcs = Self::arbitrary_funcs(u)?;
let n = u.len().min(200);
let inputs = u.bytes(n)?.to_vec();
Ok(Stacks { funcs, inputs })
}
}
impl Stacks {
fn arbitrary_funcs(u: &mut Unstructured) -> Result<Vec<Function>> {
let mut funcs = vec![Function::default()];
// The indices of functions within `funcs` that we still need to
// generate.
let mut work_list = vec![0];
while let Some(f) = work_list.pop() {
let mut ops = Vec::with_capacity(u.arbitrary_len::<Op>()?.min(MAX_OPS));
for _ in 0..ops.capacity() {
ops.push(u.arbitrary()?);
}
for op in &mut ops {
match op {
Op::CallThroughHost(idx) | Op::Call(idx) => {
if u.is_empty() || funcs.len() >= MAX_FUNCS || u.ratio(4, 5)? {
// Call an existing function.
*idx = *idx % u32::try_from(funcs.len()).unwrap();
} else {
// Call a new function...
*idx = u32::try_from(funcs.len()).unwrap();
//...which means we also need to eventually define it.
work_list.push(funcs.len());
funcs.push(Function::default());
}
}
Op::CheckStackInHost => {}
}
}
funcs[f].ops = ops;
}
Ok(funcs)
}
/// Get the input values to run the Wasm module with.
pub fn inputs(&self) -> &[u8] {
&self.inputs
}
/// Get this test case's Wasm module.
///
/// The Wasm module has the following imports:
///
/// * `host.check_stack: [] -> []`: The host can check the Wasm's
/// understanding of its own stack against the host's understanding of the
/// Wasm stack to find discrepency bugs.
///
/// * `host.call_func: [funcref] -> []`: The host should call the given
/// `funcref`, creating a call stack with multiple sequences of contiguous
/// Wasm frames on the stack like `[..., wasm, host, wasm]`.
///
/// The Wasm module has the following exports:
///
/// * `run: [i32] -> []`: This function should be called with each of the
/// input values to run this generated test case.
///
/// * `get_stack: [] -> [i32 i32]`: Get the pointer and length of the `u32`
/// array of this Wasm's understanding of its stack. This is useful for
/// checking whether the host's view of the stack at a trap matches the
/// Wasm program's understanding.
pub fn wasm(&self) -> Vec<u8> {
let mut module = wasm_encoder::Module::new();
let mut types = wasm_encoder::TypeSection::new();
let run_type = types.len();
types.function(vec![wasm_encoder::ValType::I32], vec![]);
| vec![wasm_encoder::ValType::I32, wasm_encoder::ValType::I32],
);
let null_type = types.len();
types.function(vec![], vec![]);
let call_func_type = types.len();
types.function(vec![wasm_encoder::ValType::FUNCREF], vec![]);
section(&mut module, types);
let mut imports = wasm_encoder::ImportSection::new();
let check_stack_func = 0;
imports.import(
"host",
"check_stack",
wasm_encoder::EntityType::Function(null_type),
);
let call_func_func = 1;
imports.import(
"host",
"call_func",
wasm_encoder::EntityType::Function(call_func_type),
);
let num_imported_funcs = 2;
section(&mut module, imports);
let mut funcs = wasm_encoder::FunctionSection::new();
for _ in &self.funcs {
funcs.function(null_type);
}
let run_func = funcs.len() + num_imported_funcs;
funcs.function(run_type);
let get_stack_func = funcs.len() + num_imported_funcs;
funcs.function(get_stack_type);
section(&mut module, funcs);
let mut mems = wasm_encoder::MemorySection::new();
let memory = mems.len();
mems.memory(wasm_encoder::MemoryType {
minimum: 1,
maximum: Some(1),
memory64: false,
shared: false,
});
section(&mut module, mems);
let mut globals = wasm_encoder::GlobalSection::new();
let fuel_global = globals.len();
globals.global(
wasm_encoder::GlobalType {
val_type: wasm_encoder::ValType::I32,
mutable: true,
},
&wasm_encoder::ConstExpr::i32_const(0),
);
let stack_len_global = globals.len();
globals.global(
wasm_encoder::GlobalType {
val_type: wasm_encoder::ValType::I32,
mutable: true,
},
&wasm_encoder::ConstExpr::i32_const(0),
);
section(&mut module, globals);
let mut exports = wasm_encoder::ExportSection::new();
exports.export("run", wasm_encoder::ExportKind::Func, run_func);
exports.export("get_stack", wasm_encoder::ExportKind::Func, get_stack_func);
exports.export("memory", wasm_encoder::ExportKind::Memory, memory);
exports.export("fuel", wasm_encoder::ExportKind::Global, fuel_global);
section(&mut module, exports);
let mut elems = wasm_encoder::ElementSection::new();
elems.declared(wasm_encoder::Elements::Functions(
&(0..num_imported_funcs + u32::try_from(self.funcs.len()).unwrap()).collect::<Vec<_>>(),
));
section(&mut module, elems);
let check_fuel = |body: &mut wasm_encoder::Function| {
// Trap if we are out of fuel.
body.instruction(&Instruction::GlobalGet(fuel_global))
.instruction(&Instruction::I32Eqz)
.instruction(&Instruction::If(wasm_encoder::BlockType::Empty))
.instruction(&Instruction::Unreachable)
.instruction(&Instruction::End);
// Decrement fuel.
body.instruction(&Instruction::GlobalGet(fuel_global))
.instruction(&Instruction::I32Const(1))
.instruction(&Instruction::I32Sub)
.instruction(&Instruction::GlobalSet(fuel_global));
};
let push_func_to_stack = |body: &mut wasm_encoder::Function, func: u32| {
// Add this function to our internal stack.
//
// Note that we know our `stack_len_global` can't go beyond memory
// bounds because we limit fuel to at most `u8::MAX` and each stack
// entry is an `i32` and `u8::MAX * size_of(i32)` still fits in one
// Wasm page.
body.instruction(&Instruction::GlobalGet(stack_len_global))
.instruction(&Instruction::I32Const(func as i32))
.instruction(&Instruction::I32Store(wasm_encoder::MemArg {
offset: 0,
align: 0,
memory_index: memory,
}))
.instruction(&Instruction::GlobalGet(stack_len_global))
.instruction(&Instruction::I32Const(mem::size_of::<i32>() as i32))
.instruction(&Instruction::I32Add)
.instruction(&Instruction::GlobalSet(stack_len_global));
};
let pop_func_from_stack = |body: &mut wasm_encoder::Function| {
// Remove this function from our internal stack.
body.instruction(&Instruction::GlobalGet(stack_len_global))
.instruction(&Instruction::I32Const(mem::size_of::<i32>() as i32))
.instruction(&Instruction::I32Sub)
.instruction(&Instruction::GlobalSet(stack_len_global));
};
let mut code = wasm_encoder::CodeSection::new();
for (func_index, func) in self.funcs.iter().enumerate() {
let mut body = wasm_encoder::Function::new(vec![]);
push_func_to_stack(
&mut body,
num_imported_funcs + u32::try_from(func_index).unwrap(),
);
check_fuel(&mut body);
// Perform our specified operations.
for op in &func.ops {
match op {
Op::CheckStackInHost => {
body.instruction(&Instruction::Call(check_stack_func));
}
Op::Call(f) => {
body.instruction(&Instruction::Call(f + num_imported_funcs));
}
Op::CallThroughHost(f) => {
body.instruction(&Instruction::RefFunc(f + num_imported_funcs))
.instruction(&Instruction::Call(call_func_func));
}
}
}
// Potentially trap at the end of our function as well, so that we
// exercise the scenario where the Wasm-to-host trampoline
// initialized `last_wasm_exit_sp` et al when calling out to a host
// function, but then we returned back to Wasm and then trapped
// while `last_wasm_exit_sp` et al are still initialized from that
// previous host call.
check_fuel(&mut body);
pop_func_from_stack(&mut body);
function(&mut code, body);
}
let mut run_body = wasm_encoder::Function::new(vec![]);
// Reset the bump pointer for the internal stack (this allows us to
// reuse an instance in the oracle, rather than re-instantiate).
run_body
.instruction(&Instruction::I32Const(0))
.instruction(&Instruction::GlobalSet(stack_len_global));
// Initialize the fuel global.
run_body
.instruction(&Instruction::LocalGet(0))
.instruction(&Instruction::GlobalSet(fuel_global));
push_func_to_stack(&mut run_body, run_func);
// Make sure to check for out-of-fuel in the `run` function as well, so
// that we also capture stack traces with only one frame, not just `run`
// followed by the first locally-defined function and then zero or more
// extra frames.
check_fuel(&mut run_body);
// Call the first locally defined function.
run_body.instruction(&Instruction::Call(num_imported_funcs));
check_fuel(&mut run_body);
pop_func_from_stack(&mut run_body);
function(&mut code, run_body);
let mut get_stack_body = wasm_encoder::Function::new(vec![]);
get_stack_body
.instruction(&Instruction::I32Const(0))
.instruction(&Instruction::GlobalGet(stack_len_global));
function(&mut code, get_stack_body);
section(&mut module, code);
return module.finish();
// Helper that defines a section in the module and takes ownership of it
// so that it is dropped and its memory reclaimed after adding it to the
// module.
fn section(module: &mut wasm_encoder::Module, section: impl wasm_encoder::Section) {
module.section(§ion);
}
// Helper that defines a function body in the code section and takes
// ownership of it so that it is dropped and its memory reclaimed after
// adding it to the module.
fn function(code: &mut wasm_encoder::CodeSection, mut func: wasm_encoder::Function) {
func.instruction(&Instruction::End);
code.function(&func);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use rand::prelude::*;
use wasmparser::Validator;
#[test]
fn stacks_generates_valid_wasm_modules() {
let mut rng = SmallRng::seed_from_u64(0);
let mut buf = vec![0; 2048];
for _ in 0..1024 {
rng.fill_bytes(&mut buf);
let u = Unstructured::new(&buf);
if let Ok(stacks) = Stacks::arbitrary_take_rest(u) {
let wasm = stacks.wasm();
validate(&wasm);
}
}
}
fn validate(wasm: &[u8]) {
let mut validator = Validator::new();
let err = match validator.validate_all(wasm) {
Ok(_) => return,
Err(e) => e,
};
drop(std::fs::write("test.wasm", wasm));
if let Ok(text) = wasmprinter::print_bytes(wasm) {
drop(std::fs::write("test.wat", &text));
}
panic!("wasm failed to validate: {}", err);
}
} | let get_stack_type = types.len();
types.function(
vec![], | random_line_split |
stacks.rs | //! Generate a Wasm program that keeps track of its current stack frames.
//!
//! We can then compare the stack trace we observe in Wasmtime to what the Wasm
//! program believes its stack should be. Any discrepencies between the two
//! points to a bug in either this test case generator or Wasmtime's stack
//! walker.
use std::mem;
use arbitrary::{Arbitrary, Result, Unstructured};
use wasm_encoder::Instruction;
const MAX_FUNCS: usize = 20;
const MAX_OPS: usize = 1_000;
/// Generate a Wasm module that keeps track of its current call stack, to
/// compare to the host.
#[derive(Debug)]
pub struct Stacks {
funcs: Vec<Function>,
inputs: Vec<u8>,
}
#[derive(Debug, Default)]
struct Function {
ops: Vec<Op>,
}
#[derive(Arbitrary, Debug, Clone, Copy)]
enum Op {
CheckStackInHost,
Call(u32),
CallThroughHost(u32),
}
impl<'a> Arbitrary<'a> for Stacks {
fn arbitrary(u: &mut Unstructured<'a>) -> Result<Self> {
let funcs = Self::arbitrary_funcs(u)?;
let n = u.len().min(200);
let inputs = u.bytes(n)?.to_vec();
Ok(Stacks { funcs, inputs })
}
}
impl Stacks {
fn | (u: &mut Unstructured) -> Result<Vec<Function>> {
let mut funcs = vec![Function::default()];
// The indices of functions within `funcs` that we still need to
// generate.
let mut work_list = vec![0];
while let Some(f) = work_list.pop() {
let mut ops = Vec::with_capacity(u.arbitrary_len::<Op>()?.min(MAX_OPS));
for _ in 0..ops.capacity() {
ops.push(u.arbitrary()?);
}
for op in &mut ops {
match op {
Op::CallThroughHost(idx) | Op::Call(idx) => {
if u.is_empty() || funcs.len() >= MAX_FUNCS || u.ratio(4, 5)? {
// Call an existing function.
*idx = *idx % u32::try_from(funcs.len()).unwrap();
} else {
// Call a new function...
*idx = u32::try_from(funcs.len()).unwrap();
//...which means we also need to eventually define it.
work_list.push(funcs.len());
funcs.push(Function::default());
}
}
Op::CheckStackInHost => {}
}
}
funcs[f].ops = ops;
}
Ok(funcs)
}
/// Get the input values to run the Wasm module with.
pub fn inputs(&self) -> &[u8] {
&self.inputs
}
/// Get this test case's Wasm module.
///
/// The Wasm module has the following imports:
///
/// * `host.check_stack: [] -> []`: The host can check the Wasm's
/// understanding of its own stack against the host's understanding of the
/// Wasm stack to find discrepency bugs.
///
/// * `host.call_func: [funcref] -> []`: The host should call the given
/// `funcref`, creating a call stack with multiple sequences of contiguous
/// Wasm frames on the stack like `[..., wasm, host, wasm]`.
///
/// The Wasm module has the following exports:
///
/// * `run: [i32] -> []`: This function should be called with each of the
/// input values to run this generated test case.
///
/// * `get_stack: [] -> [i32 i32]`: Get the pointer and length of the `u32`
/// array of this Wasm's understanding of its stack. This is useful for
/// checking whether the host's view of the stack at a trap matches the
/// Wasm program's understanding.
pub fn wasm(&self) -> Vec<u8> {
let mut module = wasm_encoder::Module::new();
let mut types = wasm_encoder::TypeSection::new();
let run_type = types.len();
types.function(vec![wasm_encoder::ValType::I32], vec![]);
let get_stack_type = types.len();
types.function(
vec![],
vec![wasm_encoder::ValType::I32, wasm_encoder::ValType::I32],
);
let null_type = types.len();
types.function(vec![], vec![]);
let call_func_type = types.len();
types.function(vec![wasm_encoder::ValType::FUNCREF], vec![]);
section(&mut module, types);
let mut imports = wasm_encoder::ImportSection::new();
let check_stack_func = 0;
imports.import(
"host",
"check_stack",
wasm_encoder::EntityType::Function(null_type),
);
let call_func_func = 1;
imports.import(
"host",
"call_func",
wasm_encoder::EntityType::Function(call_func_type),
);
let num_imported_funcs = 2;
section(&mut module, imports);
let mut funcs = wasm_encoder::FunctionSection::new();
for _ in &self.funcs {
funcs.function(null_type);
}
let run_func = funcs.len() + num_imported_funcs;
funcs.function(run_type);
let get_stack_func = funcs.len() + num_imported_funcs;
funcs.function(get_stack_type);
section(&mut module, funcs);
let mut mems = wasm_encoder::MemorySection::new();
let memory = mems.len();
mems.memory(wasm_encoder::MemoryType {
minimum: 1,
maximum: Some(1),
memory64: false,
shared: false,
});
section(&mut module, mems);
let mut globals = wasm_encoder::GlobalSection::new();
let fuel_global = globals.len();
globals.global(
wasm_encoder::GlobalType {
val_type: wasm_encoder::ValType::I32,
mutable: true,
},
&wasm_encoder::ConstExpr::i32_const(0),
);
let stack_len_global = globals.len();
globals.global(
wasm_encoder::GlobalType {
val_type: wasm_encoder::ValType::I32,
mutable: true,
},
&wasm_encoder::ConstExpr::i32_const(0),
);
section(&mut module, globals);
let mut exports = wasm_encoder::ExportSection::new();
exports.export("run", wasm_encoder::ExportKind::Func, run_func);
exports.export("get_stack", wasm_encoder::ExportKind::Func, get_stack_func);
exports.export("memory", wasm_encoder::ExportKind::Memory, memory);
exports.export("fuel", wasm_encoder::ExportKind::Global, fuel_global);
section(&mut module, exports);
let mut elems = wasm_encoder::ElementSection::new();
elems.declared(wasm_encoder::Elements::Functions(
&(0..num_imported_funcs + u32::try_from(self.funcs.len()).unwrap()).collect::<Vec<_>>(),
));
section(&mut module, elems);
let check_fuel = |body: &mut wasm_encoder::Function| {
// Trap if we are out of fuel.
body.instruction(&Instruction::GlobalGet(fuel_global))
.instruction(&Instruction::I32Eqz)
.instruction(&Instruction::If(wasm_encoder::BlockType::Empty))
.instruction(&Instruction::Unreachable)
.instruction(&Instruction::End);
// Decrement fuel.
body.instruction(&Instruction::GlobalGet(fuel_global))
.instruction(&Instruction::I32Const(1))
.instruction(&Instruction::I32Sub)
.instruction(&Instruction::GlobalSet(fuel_global));
};
let push_func_to_stack = |body: &mut wasm_encoder::Function, func: u32| {
// Add this function to our internal stack.
//
// Note that we know our `stack_len_global` can't go beyond memory
// bounds because we limit fuel to at most `u8::MAX` and each stack
// entry is an `i32` and `u8::MAX * size_of(i32)` still fits in one
// Wasm page.
body.instruction(&Instruction::GlobalGet(stack_len_global))
.instruction(&Instruction::I32Const(func as i32))
.instruction(&Instruction::I32Store(wasm_encoder::MemArg {
offset: 0,
align: 0,
memory_index: memory,
}))
.instruction(&Instruction::GlobalGet(stack_len_global))
.instruction(&Instruction::I32Const(mem::size_of::<i32>() as i32))
.instruction(&Instruction::I32Add)
.instruction(&Instruction::GlobalSet(stack_len_global));
};
let pop_func_from_stack = |body: &mut wasm_encoder::Function| {
// Remove this function from our internal stack.
body.instruction(&Instruction::GlobalGet(stack_len_global))
.instruction(&Instruction::I32Const(mem::size_of::<i32>() as i32))
.instruction(&Instruction::I32Sub)
.instruction(&Instruction::GlobalSet(stack_len_global));
};
let mut code = wasm_encoder::CodeSection::new();
for (func_index, func) in self.funcs.iter().enumerate() {
let mut body = wasm_encoder::Function::new(vec![]);
push_func_to_stack(
&mut body,
num_imported_funcs + u32::try_from(func_index).unwrap(),
);
check_fuel(&mut body);
// Perform our specified operations.
for op in &func.ops {
match op {
Op::CheckStackInHost => {
body.instruction(&Instruction::Call(check_stack_func));
}
Op::Call(f) => {
body.instruction(&Instruction::Call(f + num_imported_funcs));
}
Op::CallThroughHost(f) => {
body.instruction(&Instruction::RefFunc(f + num_imported_funcs))
.instruction(&Instruction::Call(call_func_func));
}
}
}
// Potentially trap at the end of our function as well, so that we
// exercise the scenario where the Wasm-to-host trampoline
// initialized `last_wasm_exit_sp` et al when calling out to a host
// function, but then we returned back to Wasm and then trapped
// while `last_wasm_exit_sp` et al are still initialized from that
// previous host call.
check_fuel(&mut body);
pop_func_from_stack(&mut body);
function(&mut code, body);
}
let mut run_body = wasm_encoder::Function::new(vec![]);
// Reset the bump pointer for the internal stack (this allows us to
// reuse an instance in the oracle, rather than re-instantiate).
run_body
.instruction(&Instruction::I32Const(0))
.instruction(&Instruction::GlobalSet(stack_len_global));
// Initialize the fuel global.
run_body
.instruction(&Instruction::LocalGet(0))
.instruction(&Instruction::GlobalSet(fuel_global));
push_func_to_stack(&mut run_body, run_func);
// Make sure to check for out-of-fuel in the `run` function as well, so
// that we also capture stack traces with only one frame, not just `run`
// followed by the first locally-defined function and then zero or more
// extra frames.
check_fuel(&mut run_body);
// Call the first locally defined function.
run_body.instruction(&Instruction::Call(num_imported_funcs));
check_fuel(&mut run_body);
pop_func_from_stack(&mut run_body);
function(&mut code, run_body);
let mut get_stack_body = wasm_encoder::Function::new(vec![]);
get_stack_body
.instruction(&Instruction::I32Const(0))
.instruction(&Instruction::GlobalGet(stack_len_global));
function(&mut code, get_stack_body);
section(&mut module, code);
return module.finish();
// Helper that defines a section in the module and takes ownership of it
// so that it is dropped and its memory reclaimed after adding it to the
// module.
fn section(module: &mut wasm_encoder::Module, section: impl wasm_encoder::Section) {
module.section(§ion);
}
// Helper that defines a function body in the code section and takes
// ownership of it so that it is dropped and its memory reclaimed after
// adding it to the module.
fn function(code: &mut wasm_encoder::CodeSection, mut func: wasm_encoder::Function) {
func.instruction(&Instruction::End);
code.function(&func);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use rand::prelude::*;
use wasmparser::Validator;
#[test]
fn stacks_generates_valid_wasm_modules() {
let mut rng = SmallRng::seed_from_u64(0);
let mut buf = vec![0; 2048];
for _ in 0..1024 {
rng.fill_bytes(&mut buf);
let u = Unstructured::new(&buf);
if let Ok(stacks) = Stacks::arbitrary_take_rest(u) {
let wasm = stacks.wasm();
validate(&wasm);
}
}
}
fn validate(wasm: &[u8]) {
let mut validator = Validator::new();
let err = match validator.validate_all(wasm) {
Ok(_) => return,
Err(e) => e,
};
drop(std::fs::write("test.wasm", wasm));
if let Ok(text) = wasmprinter::print_bytes(wasm) {
drop(std::fs::write("test.wat", &text));
}
panic!("wasm failed to validate: {}", err);
}
}
| arbitrary_funcs | identifier_name |
window.rs | struct Window {
window: winit::window::Window,
last_frame_time: std::time::Instant,
alive: Arc<()>,
app_handle: app::AppHandle,
invalidate_amount: InvalidateAmount,
// Everything for rendering
surface: wgpu::Surface,
gpu_device: wgpu::Device,
swap_chain_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
renderer: imgui_wgpu::Renderer,
queue: wgpu::Queue,
/// A MSAA framebuffer texture and its sample count.
msaa_framebuffer: Option<(wgpu::TextureView, wgpu::Extent3d, u32)>,
// All imgui related
winit_platform: imgui_winit_support::WinitPlatform,
imgui: ImguiContext,
default_font: Option<imgui::FontId>,
last_cursor: Option<imgui::MouseCursor>,
/// The data model associated with this native window, that holds its state.
pub data_model: Box<dyn Layout>,
}
enum ImguiContext {
Suspended(imgui::SuspendedContext),
Used(),
}
impl Window {
/// Closes the window and, if no further windows remain, shuts down the application.
pub fn close(&mut self) {
let data_model = &mut self.data_model as *mut Box<(dyn Layout +'static)>;
let should_close = unsafe { &mut *data_model }.before_close(self);
if should_close {
let window_id = self.id();
let _ = self.app_handle.execute_with_gui(move |app: &mut App| {
app.remove_window(window_id);
});
}
}
/// Get a mutable reference to the underlying `winit::window::Window`, which can be used to
/// change size, position, etc.
pub fn window_mut(&mut self) -> &mut winit::window::Window {
&mut self.window
}
/// Get a reference to the underlying `winit::window::Window`, which can be used to
/// change size, position, etc.
pub fn window(&self) -> &winit::window::Window {
&self.window
}
/// Get the id of the window.
pub fn id(&self) -> winit::window::WindowId {
self.window.id()
}
/// Get the time the window was last updated.
pub fn last_frame_time(&self) -> std::time::Instant {
self.last_frame_time
}
/// Updates the frame time to now and returns the duration since the last frame.
#[inline]
pub(crate) fn update_frame_time(&mut self) -> std::time::Duration {
let now = std::time::Instant::now();
let frame_delta = now - self.last_frame_time;
self.last_frame_time = now;
frame_delta
}
/// Creates a standard top level window.
///
/// Call this method inside the closure passed to `App::new_window()`.
pub fn build_window(title: &str, size: (u32, u32)) -> winit::window::WindowBuilder {
winit::window::WindowBuilder::new()
.with_title(title)
.with_inner_size(winit::dpi::PhysicalSize {
width: size.0,
height: size.1,
})
.with_resizable(true)
}
/// Creates a new `Window` instance.
///
/// *Only for internal use.* The user creates new windows using `App::new_window()`.
pub(crate) fn new(
app: &mut app::App,
data_model: Box<dyn Layout>,
wnd: winit::window::Window,
visible: bool,
) -> UiResult<Window> {
let size = wnd.inner_size();
let surface = unsafe { app.wgpu_instance.create_surface(&wnd) };
// select adapter and gpu device
let (device, queue) = Window::select_gpu_device(&app, &surface)?;
// create the swapchain
let format = wgpu::TextureFormat::Bgra8UnormSrgb;
let swap_chain_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo,
};
let swap_chain = device.create_swap_chain(&surface, &swap_chain_desc);
let msaa_framebuffer = if app.msaa_samples > 1 {
Some(Window::create_msaa_framebuffer(
&device,
&swap_chain_desc,
app.msaa_samples,
))
} else {
None
};
// create imgui ui
// Note: This is going to panic if any other `imgui::Context` is currently active
let mut imgui = imgui::Context::create_with_shared_font_atlas(app.font_atlas.clone());
let mut platform = imgui_winit_support::WinitPlatform::init(&mut imgui);
platform.attach_window(
imgui.io_mut(),
&wnd,
imgui_winit_support::HiDpiMode::Default,
);
app.apply_imgui_settings(&mut imgui);
// create renderer
let renderer = imgui_wgpu::Renderer::new(
&mut imgui,
&device,
&queue,
swap_chain_desc.format,
None,
app.msaa_samples,
);
let mut wnd = Window {
window: wnd,
last_frame_time: std::time::Instant::now(),
alive: Arc::default(),
app_handle: app.handle(),
invalidate_amount: InvalidateAmount::Stop,
surface,
gpu_device: device,
swap_chain_desc,
swap_chain,
renderer,
queue,
msaa_framebuffer,
winit_platform: platform,
imgui: ImguiContext::Suspended(imgui.suspend()),
default_font: None,
last_cursor: None,
data_model,
};
if visible {
// draw immediately
let mut active_window = wnd.activate()?;
active_window.render(app, std::time::Duration::from_millis(std::u64::MAX))?;
drop(active_window);
wnd.window().set_visible(true);
}
Ok(wnd)
}
pub fn invalidate_amount(&self) -> InvalidateAmount {
self.invalidate_amount
}
pub fn set_invalidate_amount(&self, amount: InvalidateAmount) -> UiResult<()> {
self.app_handle
.send_event(super::AppEvent::SetWindowInvalidateAmount {
window_id: self.id(),
state: amount,
})
}
fn update_render_size<T: std::convert::Into<u32>>(
&mut self,
_app: &App,
size: winit::dpi::PhysicalSize<T>,
) {
self.swap_chain_desc.width = size.width.into();
self.swap_chain_desc.height = size.height.into();
self.swap_chain = self
.gpu_device
.create_swap_chain(&self.surface, &self.swap_chain_desc);
// Note: Normally we would also update the optional MSAA framebuffer here, but
// this causes visual resize lag, presumably because the recreation of the MSAA
// texture is quite expensive. Instead this is done in the
// `ActiveWindow::render()` method.
}
pub(super) fn activate<'a>(&'a mut self) -> UiResult<ActiveWindow<'a>> {
let imgui = std::mem::replace(&mut self.imgui, ImguiContext::Used());
if let ImguiContext::Suspended(ctx) = imgui {
let ctx = ctx.activate();
match ctx {
Ok(ctx) => {
return Ok(ActiveWindow {
imgui_context: std::mem::ManuallyDrop::new(ctx),
wrapped_window: self,
});
}
Err(ctx) => {
self.imgui = ImguiContext::Suspended(ctx);
return Err(UiError::new(ErrorCode::IMGUI_CONTEXT_ACTIVATE_FAILED));
}
}
}
Err(UiError::new(ErrorCode::INVALID_IMGUI_CONTEXT))
}
/// Creates a thread-safe handle to this native window.
///
/// This handle can be used to access the represented native window from another
/// thread using events that get sent to and dispatched in the main (UI) thread.
pub fn handle(&self) -> WindowHandle {
WindowHandle {
window_id: self.id(),
app_handle: self.app_handle.clone(),
alive: Arc::downgrade(&self.alive),
}
}
/// Request window invalidation as soon as possible.
pub fn request_invalidate(&self) {
self.window.request_redraw();
}
fn select_gpu_device(
app: &App,
surface: &wgpu::Surface,
) -> UiResult<(wgpu::Device, wgpu::Queue)> {
use futures::executor::block_on;
let adapter_opts = wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
};
let adapter_request = app.wgpu_instance.request_adapter(&adapter_opts);
let adapter = match block_on(adapter_request) {
Some(val) => val,
None => return Err(ErrorCode::GRAPHICS_ADAPTER_NOT_AVAILABLE.into()),
};
let device_desc = wgpu::DeviceDescriptor {
features: wgpu::Features::default(),
limits: wgpu::Limits::default(),
shader_validation: false,
};
let device_request =
adapter.request_device(&device_desc, Some(std::path::Path::new(file!())));
let device_and_queue = match block_on(device_request) {
Ok(device) => device,
Err(err) => {
return Err(UiError::with_source(
ErrorCode::REQUEST_GRAPHICS_DEVICE_FAILED,
err,
))
}
};
Ok(device_and_queue)
}
/// Creates new framebuffer for multisampling anti-aliasing with the specified
/// `sample_count`.
/// Returnes a tuple with the `wgpu::TextureView` and the MSAA sample count used.
fn create_msaa_framebuffer(
device: &wgpu::Device,
sc_desc: &wgpu::SwapChainDescriptor,
sample_count: u32,
) -> (wgpu::TextureView, wgpu::Extent3d, u32) {
let tex_extent = wgpu::Extent3d {
width: sc_desc.width,
height: sc_desc.height,
depth: 1,
};
let tex_desc = &wgpu::TextureDescriptor {
label: Some("imgui_msaa_texture"),
size: tex_extent,
mip_level_count: 1,
sample_count: sample_count,
dimension: wgpu::TextureDimension::D2,
format: sc_desc.format,
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
};
let tex_view_desc = &wgpu::TextureViewDescriptor {
label: Some("imgui_msaa_texture_view"),
format: Some(sc_desc.format),
dimension: None,
aspect: wgpu::TextureAspect::All,
base_mip_level: 0,
level_count: None,
base_array_layer: 0,
array_layer_count: None,
};
(
device.create_texture(tex_desc).create_view(&tex_view_desc),
tex_extent,
sample_count,
)
}
/// Gets the `wgpu::Queue` of this window.
pub fn wgpu_queue(&self) -> &wgpu::Queue {
&self.queue
}
/// Gets the `wgpu::Device` of this window.
pub fn wgpu_device(&self) -> &wgpu::Device {
&self.gpu_device
}
/// Gets the renderer.
pub fn renderer(&self) -> &imgui_wgpu::Renderer {
&self.renderer
}
/// Gets a reference to the texture collection.
pub fn textures(&self) -> &imgui::Textures<imgui_wgpu::Texture> {
&self.renderer.textures
}
/// Gets a mutable reference to the texture collection.
pub fn textures_mut(&mut self) -> &mut imgui::Textures<imgui_wgpu::Texture> {
&mut self.renderer.textures
}
}
/// A window prepared to be updated.
///
/// This struct is used to disjoin the lifetimes of the `Window` with that of the
/// `imgui::Context`.
pub struct ActiveWindow<'a> {
/// The imgui context of the `window`.
pub imgui_context: std::mem::ManuallyDrop<imgui::Context>,
/// The original native window, where its `imgui` value has been replaced with
/// `ImguiContext::Used()` and moved to `imgui_context`.
pub wrapped_window: &'a mut Window,
}
impl<'a> Drop for ActiveWindow<'a> {
/// Returns the `imgui::Context` back to the native window.
fn drop(&mut self) {
let val = std::mem::replace(&mut *self.imgui_context, unsafe {
std::mem::MaybeUninit::uninit().assume_init()
});
self.wrapped_window.imgui = ImguiContext::Suspended(val.suspend());
}
}
impl Deref for ActiveWindow<'_> {
type Target = Window;
fn deref(&self) -> &Self::Target {
self.wrapped_window
}
}
impl DerefMut for ActiveWindow<'_> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.wrapped_window
}
}
impl ActiveWindow<'_> {
pub fn on_event(&mut self, app: &App, evt: &super::Event) {
let ActiveWindow {
wrapped_window: this,
imgui_context: imgui,
} = self;
// let this: &mut Window = this;
// let imgui: &mut imgui::Context = imgui;
this.winit_platform
.handle_event(imgui.io_mut(), &this.window, evt);
match evt {
super::Event::WindowEvent {
window_id,
event: ref wnd_evt,
} if *window_id == this.id() => match wnd_evt {
winit::event::WindowEvent::CloseRequested => {
self.close();
}
winit::event::WindowEvent::Resized(physical_size) => {
self.update_render_size(app, *physical_size);
}
_ => (),
},
super::Event::MainEventsCleared => {
this.request_invalidate();
}
_ => (),
}
}
pub fn render(&mut self, app: &App, delta_time: std::time::Duration) -> UiResult<()> {
let ActiveWindow {
wrapped_window: this,
imgui_context: imgui,
} = self;
// let this: &mut Window = this;
// let imgui: &mut imgui::Context = imgui;
imgui.io_mut().update_delta_time(delta_time);
this.winit_platform
.prepare_frame(imgui.io_mut(), &this.window)
.expect("Failed to prepare frame.");
let ui = imgui.frame();
{
let font_handle = match this.default_font {
Some(font) => Some(ui.push_font(font)),
None => None,
};
let layout_ctx = LayoutContext {
ui: &ui,
window_handle: this.handle(),
invalidate_amount_changed: Cell::new(false),
invalidate_amount: Cell::new(this.invalidate_amount()),
};
let data_model = &mut this.data_model as *mut Box<(dyn Layout +'static)>;
unsafe {
(*data_model).layout(layout_ctx, app, this);
}
if let Some(font_handle) = font_handle {
font_handle.pop(&ui);
}
}
if this.last_cursor!= ui.mouse_cursor() {
this.last_cursor = ui.mouse_cursor();
this.winit_platform.prepare_render(&ui, &this.window);
}
let draw_data: &imgui::DrawData = ui.render();
if draw_data.draw_lists_count() == 0 {
log::debug!("Imgui draw data is empty!");
return Ok(());
}
let frame: wgpu::SwapChainFrame = match this.swap_chain.get_current_frame() {
Ok(val) => val,
Err(_) => return Err(UiError::new(ErrorCode::SWAP_CHAIN_TIMEOUT)),
};
let cmd_encoder_desc = wgpu::CommandEncoderDescriptor {
label: Some("imgui_command_encoder"),
};
let mut encoder: wgpu::CommandEncoder =
this.gpu_device.create_command_encoder(&cmd_encoder_desc);
// If we have a msaa framebuffer, use it.
let (attachment, resolve_target) =
if let Some((ref msaa_framebuffer, size, _)) = this.msaa_framebuffer {
// Recreate the msaa_framebuffer if its size doesn't match.
if size.width == this.swap_chain_desc.width
&& size.height == this.swap_chain_desc.height
{
(msaa_framebuffer, Some(&frame.output.view))
} else {
this.msaa_framebuffer = Some(Window::create_msaa_framebuffer(
&this.gpu_device,
&this.swap_chain_desc,
app.msaa_samples,
));
(
&this.msaa_framebuffer.as_ref().unwrap().0,
Some(&frame.output.view),
)
}
} else {
(&frame.output.view, None)
};
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment,
resolve_target,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color::BLACK),
store: true,
},
}],
depth_stencil_attachment: None,
});
match this
.renderer
.render(&draw_data, &this.queue, &this.gpu_device, &mut render_pass)
{
Err(err) => {
return Err(UiError::with_source(
ErrorCode::RENDER_ERROR,
utils::MessageError::debug(&err),
)) | } | random_line_split |
|
window.rs | invalidate_amount: Cell<InvalidateAmount>,
}
impl LayoutContext<'_> {
/// Requests invalidation of the specified `amount` after the current frame is
/// finished. The resulting requested invalidation amount is the maximum of
/// all `request_invalidate()` calls for one frame.
#[inline]
pub fn request_invalidate(&self, amount: InvalidateAmount) {
if self.invalidate_amount.get() < amount {
self.invalidate_amount.set(amount);
} else if self.invalidate_amount.get() == amount {
self.invalidate_amount
.set(match (self.invalidate_amount.get(), amount) {
(InvalidateAmount::Until(inst0), InvalidateAmount::Until(inst1)) => {
InvalidateAmount::Until(utils::max_instant(inst0, inst1))
}
(curr, _) => curr,
});
}
self.invalidate_amount_changed.set(true);
}
}
impl<'ui> Deref for LayoutContext<'ui> {
type Target = imgui::Ui<'ui>;
#[inline]
fn deref(&self) -> &Self::Target {
if self.ui.is_item_edited() {
self.request_invalidate(InvalidateAmount::Once);
} else if self.ui.is_item_activated() {
self.request_invalidate(InvalidateAmount::Indefinetely);
} else if self.ui.is_item_deactivated() {
self.request_invalidate(InvalidateAmount::Stop);
}
self.ui
}
}
impl Drop for LayoutContext<'_> {
fn drop(&mut self) {
if self.invalidate_amount_changed.get() {
let _ = self
.window_handle
.set_invalidate_amount(self.invalidate_amount.get());
} else {
if self.ui.is_any_item_active() {
let _ = self.window_handle.request_invalidate();
}
}
}
}
/// This struct represents an OS window, which contains an ImGUI graphical user interface.
pub struct Window {
window: winit::window::Window,
last_frame_time: std::time::Instant,
alive: Arc<()>,
app_handle: app::AppHandle,
invalidate_amount: InvalidateAmount,
// Everything for rendering
surface: wgpu::Surface,
gpu_device: wgpu::Device,
swap_chain_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
renderer: imgui_wgpu::Renderer,
queue: wgpu::Queue,
/// A MSAA framebuffer texture and its sample count.
msaa_framebuffer: Option<(wgpu::TextureView, wgpu::Extent3d, u32)>,
// All imgui related
winit_platform: imgui_winit_support::WinitPlatform,
imgui: ImguiContext,
default_font: Option<imgui::FontId>,
last_cursor: Option<imgui::MouseCursor>,
/// The data model associated with this native window, that holds its state.
pub data_model: Box<dyn Layout>,
}
enum ImguiContext {
Suspended(imgui::SuspendedContext),
Used(),
}
impl Window {
/// Closes the window and, if no further windows remain, shuts down the application.
pub fn close(&mut self) {
let data_model = &mut self.data_model as *mut Box<(dyn Layout +'static)>;
let should_close = unsafe { &mut *data_model }.before_close(self);
if should_close {
let window_id = self.id();
let _ = self.app_handle.execute_with_gui(move |app: &mut App| {
app.remove_window(window_id);
});
}
}
/// Get a mutable reference to the underlying `winit::window::Window`, which can be used to
/// change size, position, etc.
pub fn window_mut(&mut self) -> &mut winit::window::Window {
&mut self.window
}
/// Get a reference to the underlying `winit::window::Window`, which can be used to
/// change size, position, etc.
pub fn window(&self) -> &winit::window::Window {
&self.window
}
/// Get the id of the window.
pub fn id(&self) -> winit::window::WindowId {
self.window.id()
}
/// Get the time the window was last updated.
pub fn last_frame_time(&self) -> std::time::Instant {
self.last_frame_time
}
/// Updates the frame time to now and returns the duration since the last frame.
#[inline]
pub(crate) fn update_frame_time(&mut self) -> std::time::Duration {
let now = std::time::Instant::now();
let frame_delta = now - self.last_frame_time;
self.last_frame_time = now;
frame_delta
}
/// Creates a standard top level window.
///
/// Call this method inside the closure passed to `App::new_window()`.
pub fn build_window(title: &str, size: (u32, u32)) -> winit::window::WindowBuilder {
winit::window::WindowBuilder::new()
.with_title(title)
.with_inner_size(winit::dpi::PhysicalSize {
width: size.0,
height: size.1,
})
.with_resizable(true)
}
/// Creates a new `Window` instance.
///
/// *Only for internal use.* The user creates new windows using `App::new_window()`.
pub(crate) fn new(
app: &mut app::App,
data_model: Box<dyn Layout>,
wnd: winit::window::Window,
visible: bool,
) -> UiResult<Window> {
let size = wnd.inner_size();
let surface = unsafe { app.wgpu_instance.create_surface(&wnd) };
// select adapter and gpu device
let (device, queue) = Window::select_gpu_device(&app, &surface)?;
// create the swapchain
let format = wgpu::TextureFormat::Bgra8UnormSrgb;
let swap_chain_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo,
};
let swap_chain = device.create_swap_chain(&surface, &swap_chain_desc);
let msaa_framebuffer = if app.msaa_samples > 1 {
Some(Window::create_msaa_framebuffer(
&device,
&swap_chain_desc,
app.msaa_samples,
))
} else {
None
};
// create imgui ui
// Note: This is going to panic if any other `imgui::Context` is currently active
let mut imgui = imgui::Context::create_with_shared_font_atlas(app.font_atlas.clone());
let mut platform = imgui_winit_support::WinitPlatform::init(&mut imgui);
platform.attach_window(
imgui.io_mut(),
&wnd,
imgui_winit_support::HiDpiMode::Default,
);
app.apply_imgui_settings(&mut imgui);
// create renderer
let renderer = imgui_wgpu::Renderer::new(
&mut imgui,
&device,
&queue,
swap_chain_desc.format,
None,
app.msaa_samples,
);
let mut wnd = Window {
window: wnd,
last_frame_time: std::time::Instant::now(),
alive: Arc::default(),
app_handle: app.handle(),
invalidate_amount: InvalidateAmount::Stop,
surface,
gpu_device: device,
swap_chain_desc,
swap_chain,
renderer,
queue,
msaa_framebuffer,
winit_platform: platform,
imgui: ImguiContext::Suspended(imgui.suspend()),
default_font: None,
last_cursor: None,
data_model,
};
if visible {
// draw immediately
let mut active_window = wnd.activate()?;
active_window.render(app, std::time::Duration::from_millis(std::u64::MAX))?;
drop(active_window);
wnd.window().set_visible(true);
}
Ok(wnd)
}
pub fn invalidate_amount(&self) -> InvalidateAmount {
self.invalidate_amount
}
pub fn set_invalidate_amount(&self, amount: InvalidateAmount) -> UiResult<()> {
self.app_handle
.send_event(super::AppEvent::SetWindowInvalidateAmount {
window_id: self.id(),
state: amount,
})
}
fn update_render_size<T: std::convert::Into<u32>>(
&mut self,
_app: &App,
size: winit::dpi::PhysicalSize<T>,
) {
self.swap_chain_desc.width = size.width.into();
self.swap_chain_desc.height = size.height.into();
self.swap_chain = self
.gpu_device
.create_swap_chain(&self.surface, &self.swap_chain_desc);
// Note: Normally we would also update the optional MSAA framebuffer here, but
// this causes visual resize lag, presumably because the recreation of the MSAA
// texture is quite expensive. Instead this is done in the
// `ActiveWindow::render()` method.
}
pub(super) fn activate<'a>(&'a mut self) -> UiResult<ActiveWindow<'a>> {
let imgui = std::mem::replace(&mut self.imgui, ImguiContext::Used());
if let ImguiContext::Suspended(ctx) = imgui {
let ctx = ctx.activate();
match ctx {
Ok(ctx) => {
return Ok(ActiveWindow {
imgui_context: std::mem::ManuallyDrop::new(ctx),
wrapped_window: self,
});
}
Err(ctx) => {
self.imgui = ImguiContext::Suspended(ctx);
return Err(UiError::new(ErrorCode::IMGUI_CONTEXT_ACTIVATE_FAILED));
}
}
}
Err(UiError::new(ErrorCode::INVALID_IMGUI_CONTEXT))
}
/// Creates a thread-safe handle to this native window.
///
/// This handle can be used to access the represented native window from another
/// thread using events that get sent to and dispatched in the main (UI) thread.
pub fn handle(&self) -> WindowHandle {
WindowHandle {
window_id: self.id(),
app_handle: self.app_handle.clone(),
alive: Arc::downgrade(&self.alive),
}
}
/// Request window invalidation as soon as possible.
pub fn request_invalidate(&self) {
self.window.request_redraw();
}
fn select_gpu_device(
app: &App,
surface: &wgpu::Surface,
) -> UiResult<(wgpu::Device, wgpu::Queue)> {
use futures::executor::block_on;
let adapter_opts = wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
};
let adapter_request = app.wgpu_instance.request_adapter(&adapter_opts);
let adapter = match block_on(adapter_request) {
Some(val) => val,
None => return Err(ErrorCode::GRAPHICS_ADAPTER_NOT_AVAILABLE.into()),
};
let device_desc = wgpu::DeviceDescriptor {
features: wgpu::Features::default(),
limits: wgpu::Limits::default(),
shader_validation: false,
};
let device_request =
adapter.request_device(&device_desc, Some(std::path::Path::new(file!())));
let device_and_queue = match block_on(device_request) {
Ok(device) => device,
Err(err) => {
return Err(UiError::with_source(
ErrorCode::REQUEST_GRAPHICS_DEVICE_FAILED,
err,
))
}
};
Ok(device_and_queue)
}
/// Creates new framebuffer for multisampling anti-aliasing with the specified
/// `sample_count`.
/// Returnes a tuple with the `wgpu::TextureView` and the MSAA sample count used.
fn create_msaa_framebuffer(
device: &wgpu::Device,
sc_desc: &wgpu::SwapChainDescriptor,
sample_count: u32,
) -> (wgpu::TextureView, wgpu::Extent3d, u32) {
let tex_extent = wgpu::Extent3d {
width: sc_desc.width,
height: sc_desc.height,
depth: 1,
};
let tex_desc = &wgpu::TextureDescriptor {
label: Some("imgui_msaa_texture"),
size: tex_extent,
mip_level_count: 1,
sample_count: sample_count,
dimension: wgpu::TextureDimension::D2,
format: sc_desc.format,
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
};
let tex_view_desc = &wgpu::TextureViewDescriptor {
label: Some("imgui_msaa_texture_view"),
format: Some(sc_desc.format),
dimension: None,
aspect: wgpu::TextureAspect::All,
base_mip_level: 0,
level_count: None,
base_array_layer: 0,
array_layer_count: None,
};
(
device.create_texture(tex_desc).create_view(&tex_view_desc),
tex_extent,
sample_count,
)
}
/// Gets the `wgpu::Queue` of this window.
pub fn wgpu_queue(&self) -> &wgpu::Queue {
&self.queue
}
/// Gets the `wgpu::Device` of this window.
pub fn wgpu_device(&self) -> &wgpu::Device {
&self.gpu_device
}
/// Gets the renderer.
pub fn renderer(&self) -> &imgui_wgpu::Renderer {
&self.renderer
}
/// Gets a reference to the texture collection.
pub fn textures(&self) -> &imgui::Textures<imgui_wgpu::Texture> {
&self.renderer.textures
}
/// Gets a mutable reference to the texture collection.
pub fn textures_mut(&mut self) -> &mut imgui::Textures<imgui_wgpu::Texture> {
&mut self.renderer.textures
}
}
/// A window prepared to be updated.
///
/// This struct is used to disjoin the lifetimes of the `Window` with that of the
/// `imgui::Context`.
pub struct ActiveWindow<'a> {
/// The imgui context of the `window`.
pub imgui_context: std::mem::ManuallyDrop<imgui::Context>,
/// The original native window, where its `imgui` value has been replaced with
/// `ImguiContext::Used()` and moved to `imgui_context`.
pub wrapped_window: &'a mut Window,
}
impl<'a> Drop for ActiveWindow<'a> {
/// Returns the `imgui::Context` back to the native window.
fn | (&mut self) {
let val = std::mem::replace(&mut *self.imgui_context, unsafe {
std::mem::MaybeUninit::uninit().assume_init()
});
self.wrapped_window.imgui = ImguiContext::Suspended(val.suspend());
}
}
impl Deref for ActiveWindow<'_> {
type Target = Window;
fn deref(&self) -> &Self::Target {
self.wrapped_window
}
}
impl DerefMut for ActiveWindow<'_> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.wrapped_window
}
}
impl ActiveWindow<'_> {
pub fn on_event(&mut self, app: &App, evt: &super::Event) {
let ActiveWindow {
wrapped_window: this,
imgui_context: imgui,
} = self;
// let this: &mut Window = this;
// let imgui: &mut imgui::Context = imgui;
this.winit_platform
.handle_event(imgui.io_mut(), &this.window, evt);
match evt {
super::Event::WindowEvent {
window_id,
event: ref wnd_evt,
} if *window_id == this.id() => match wnd_evt {
winit::event::WindowEvent::CloseRequested => {
self.close();
}
winit::event::WindowEvent::Resized(physical_size) => {
self.update_render_size(app, *physical_size);
}
_ => (),
},
super::Event::MainEventsCleared => {
this.request_invalidate();
}
_ => (),
}
}
pub fn render(&mut self, app: &App, delta_time: std::time::Duration) -> UiResult<()> {
let ActiveWindow {
wrapped_window: this,
imgui_context: imgui,
} = self;
// let this: &mut Window = this;
// let imgui: &mut imgui::Context = imgui;
imgui.io_mut().update_delta_time(delta_time);
this.winit_platform
.prepare_frame(imgui.io_mut(), &this.window)
.expect("Failed to prepare frame.");
let ui = imgui.frame();
{
let font_handle = match this.default_font {
Some(font) => Some(ui.push_font(font)),
None => None,
};
let layout_ctx = LayoutContext {
ui: &ui,
window_handle: this.handle(),
invalidate_amount_changed: Cell::new(false),
invalidate_amount: Cell::new(this.invalidate_amount()),
};
let data_model = &mut this.data_model as *mut Box<(dyn Layout +'static)>;
unsafe {
(*data_model).layout(layout_ctx, app, this);
}
if let Some(font_handle) = font_handle {
font_handle.pop(&ui);
}
}
if this.last_cursor!= ui.mouse_cursor() {
this.last_cursor = ui.mouse_cursor();
this.winit_platform.prepare_render(&ui, &this.window);
}
let draw_data: &imgui::DrawData = ui.render();
if draw_data.draw_lists_count() == 0 {
log::debug!("Imgui draw data is empty!");
return Ok(());
}
let frame: wgpu::SwapChainFrame = match this.swap_chain.get_current_frame() {
Ok(val) => val,
Err(_) => return Err(UiError::new(ErrorCode::SWAP_CHAIN_TIMEOUT)),
};
let cmd_encoder_desc = wgpu::CommandEncoderDescriptor {
label: Some("imgui_command_encoder"),
};
let mut encoder: wgpu::CommandEncoder =
this.gpu_device.create_command_encoder(&cmd_encoder_desc);
// If we have a msaa framebuffer, use it.
let (attachment, resolve_target) =
if let Some((ref msaa_framebuffer, size, _)) = this.msaa_framebuffer {
// Recreate the msaa_framebuffer if its size doesn't match.
if size.width == this.swap_chain_desc.width
&& size.height == this.swap_chain_desc.height
| drop | identifier_name |
window.rs | std::time::Duration {
let now = std::time::Instant::now();
let frame_delta = now - self.last_frame_time;
self.last_frame_time = now;
frame_delta
}
/// Creates a standard top level window.
///
/// Call this method inside the closure passed to `App::new_window()`.
pub fn build_window(title: &str, size: (u32, u32)) -> winit::window::WindowBuilder {
winit::window::WindowBuilder::new()
.with_title(title)
.with_inner_size(winit::dpi::PhysicalSize {
width: size.0,
height: size.1,
})
.with_resizable(true)
}
/// Creates a new `Window` instance.
///
/// *Only for internal use.* The user creates new windows using `App::new_window()`.
pub(crate) fn new(
app: &mut app::App,
data_model: Box<dyn Layout>,
wnd: winit::window::Window,
visible: bool,
) -> UiResult<Window> {
let size = wnd.inner_size();
let surface = unsafe { app.wgpu_instance.create_surface(&wnd) };
// select adapter and gpu device
let (device, queue) = Window::select_gpu_device(&app, &surface)?;
// create the swapchain
let format = wgpu::TextureFormat::Bgra8UnormSrgb;
let swap_chain_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo,
};
let swap_chain = device.create_swap_chain(&surface, &swap_chain_desc);
let msaa_framebuffer = if app.msaa_samples > 1 {
Some(Window::create_msaa_framebuffer(
&device,
&swap_chain_desc,
app.msaa_samples,
))
} else {
None
};
// create imgui ui
// Note: This is going to panic if any other `imgui::Context` is currently active
let mut imgui = imgui::Context::create_with_shared_font_atlas(app.font_atlas.clone());
let mut platform = imgui_winit_support::WinitPlatform::init(&mut imgui);
platform.attach_window(
imgui.io_mut(),
&wnd,
imgui_winit_support::HiDpiMode::Default,
);
app.apply_imgui_settings(&mut imgui);
// create renderer
let renderer = imgui_wgpu::Renderer::new(
&mut imgui,
&device,
&queue,
swap_chain_desc.format,
None,
app.msaa_samples,
);
let mut wnd = Window {
window: wnd,
last_frame_time: std::time::Instant::now(),
alive: Arc::default(),
app_handle: app.handle(),
invalidate_amount: InvalidateAmount::Stop,
surface,
gpu_device: device,
swap_chain_desc,
swap_chain,
renderer,
queue,
msaa_framebuffer,
winit_platform: platform,
imgui: ImguiContext::Suspended(imgui.suspend()),
default_font: None,
last_cursor: None,
data_model,
};
if visible {
// draw immediately
let mut active_window = wnd.activate()?;
active_window.render(app, std::time::Duration::from_millis(std::u64::MAX))?;
drop(active_window);
wnd.window().set_visible(true);
}
Ok(wnd)
}
pub fn invalidate_amount(&self) -> InvalidateAmount {
self.invalidate_amount
}
pub fn set_invalidate_amount(&self, amount: InvalidateAmount) -> UiResult<()> {
self.app_handle
.send_event(super::AppEvent::SetWindowInvalidateAmount {
window_id: self.id(),
state: amount,
})
}
fn update_render_size<T: std::convert::Into<u32>>(
&mut self,
_app: &App,
size: winit::dpi::PhysicalSize<T>,
) {
self.swap_chain_desc.width = size.width.into();
self.swap_chain_desc.height = size.height.into();
self.swap_chain = self
.gpu_device
.create_swap_chain(&self.surface, &self.swap_chain_desc);
// Note: Normally we would also update the optional MSAA framebuffer here, but
// this causes visual resize lag, presumably because the recreation of the MSAA
// texture is quite expensive. Instead this is done in the
// `ActiveWindow::render()` method.
}
pub(super) fn activate<'a>(&'a mut self) -> UiResult<ActiveWindow<'a>> {
let imgui = std::mem::replace(&mut self.imgui, ImguiContext::Used());
if let ImguiContext::Suspended(ctx) = imgui {
let ctx = ctx.activate();
match ctx {
Ok(ctx) => {
return Ok(ActiveWindow {
imgui_context: std::mem::ManuallyDrop::new(ctx),
wrapped_window: self,
});
}
Err(ctx) => {
self.imgui = ImguiContext::Suspended(ctx);
return Err(UiError::new(ErrorCode::IMGUI_CONTEXT_ACTIVATE_FAILED));
}
}
}
Err(UiError::new(ErrorCode::INVALID_IMGUI_CONTEXT))
}
/// Creates a thread-safe handle to this native window.
///
/// This handle can be used to access the represented native window from another
/// thread using events that get sent to and dispatched in the main (UI) thread.
pub fn handle(&self) -> WindowHandle {
WindowHandle {
window_id: self.id(),
app_handle: self.app_handle.clone(),
alive: Arc::downgrade(&self.alive),
}
}
/// Request window invalidation as soon as possible.
pub fn request_invalidate(&self) {
self.window.request_redraw();
}
fn select_gpu_device(
app: &App,
surface: &wgpu::Surface,
) -> UiResult<(wgpu::Device, wgpu::Queue)> {
use futures::executor::block_on;
let adapter_opts = wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
};
let adapter_request = app.wgpu_instance.request_adapter(&adapter_opts);
let adapter = match block_on(adapter_request) {
Some(val) => val,
None => return Err(ErrorCode::GRAPHICS_ADAPTER_NOT_AVAILABLE.into()),
};
let device_desc = wgpu::DeviceDescriptor {
features: wgpu::Features::default(),
limits: wgpu::Limits::default(),
shader_validation: false,
};
let device_request =
adapter.request_device(&device_desc, Some(std::path::Path::new(file!())));
let device_and_queue = match block_on(device_request) {
Ok(device) => device,
Err(err) => {
return Err(UiError::with_source(
ErrorCode::REQUEST_GRAPHICS_DEVICE_FAILED,
err,
))
}
};
Ok(device_and_queue)
}
/// Creates new framebuffer for multisampling anti-aliasing with the specified
/// `sample_count`.
/// Returnes a tuple with the `wgpu::TextureView` and the MSAA sample count used.
fn create_msaa_framebuffer(
device: &wgpu::Device,
sc_desc: &wgpu::SwapChainDescriptor,
sample_count: u32,
) -> (wgpu::TextureView, wgpu::Extent3d, u32) {
let tex_extent = wgpu::Extent3d {
width: sc_desc.width,
height: sc_desc.height,
depth: 1,
};
let tex_desc = &wgpu::TextureDescriptor {
label: Some("imgui_msaa_texture"),
size: tex_extent,
mip_level_count: 1,
sample_count: sample_count,
dimension: wgpu::TextureDimension::D2,
format: sc_desc.format,
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
};
let tex_view_desc = &wgpu::TextureViewDescriptor {
label: Some("imgui_msaa_texture_view"),
format: Some(sc_desc.format),
dimension: None,
aspect: wgpu::TextureAspect::All,
base_mip_level: 0,
level_count: None,
base_array_layer: 0,
array_layer_count: None,
};
(
device.create_texture(tex_desc).create_view(&tex_view_desc),
tex_extent,
sample_count,
)
}
/// Gets the `wgpu::Queue` of this window.
pub fn wgpu_queue(&self) -> &wgpu::Queue {
&self.queue
}
/// Gets the `wgpu::Device` of this window.
pub fn wgpu_device(&self) -> &wgpu::Device {
&self.gpu_device
}
/// Gets the renderer.
pub fn renderer(&self) -> &imgui_wgpu::Renderer {
&self.renderer
}
/// Gets a reference to the texture collection.
pub fn textures(&self) -> &imgui::Textures<imgui_wgpu::Texture> {
&self.renderer.textures
}
/// Gets a mutable reference to the texture collection.
pub fn textures_mut(&mut self) -> &mut imgui::Textures<imgui_wgpu::Texture> {
&mut self.renderer.textures
}
}
/// A window prepared to be updated.
///
/// This struct is used to disjoin the lifetimes of the `Window` with that of the
/// `imgui::Context`.
pub struct ActiveWindow<'a> {
/// The imgui context of the `window`.
pub imgui_context: std::mem::ManuallyDrop<imgui::Context>,
/// The original native window, where its `imgui` value has been replaced with
/// `ImguiContext::Used()` and moved to `imgui_context`.
pub wrapped_window: &'a mut Window,
}
impl<'a> Drop for ActiveWindow<'a> {
/// Returns the `imgui::Context` back to the native window.
fn drop(&mut self) {
let val = std::mem::replace(&mut *self.imgui_context, unsafe {
std::mem::MaybeUninit::uninit().assume_init()
});
self.wrapped_window.imgui = ImguiContext::Suspended(val.suspend());
}
}
impl Deref for ActiveWindow<'_> {
type Target = Window;
fn deref(&self) -> &Self::Target {
self.wrapped_window
}
}
impl DerefMut for ActiveWindow<'_> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.wrapped_window
}
}
impl ActiveWindow<'_> {
pub fn on_event(&mut self, app: &App, evt: &super::Event) {
let ActiveWindow {
wrapped_window: this,
imgui_context: imgui,
} = self;
// let this: &mut Window = this;
// let imgui: &mut imgui::Context = imgui;
this.winit_platform
.handle_event(imgui.io_mut(), &this.window, evt);
match evt {
super::Event::WindowEvent {
window_id,
event: ref wnd_evt,
} if *window_id == this.id() => match wnd_evt {
winit::event::WindowEvent::CloseRequested => {
self.close();
}
winit::event::WindowEvent::Resized(physical_size) => {
self.update_render_size(app, *physical_size);
}
_ => (),
},
super::Event::MainEventsCleared => {
this.request_invalidate();
}
_ => (),
}
}
pub fn render(&mut self, app: &App, delta_time: std::time::Duration) -> UiResult<()> {
let ActiveWindow {
wrapped_window: this,
imgui_context: imgui,
} = self;
// let this: &mut Window = this;
// let imgui: &mut imgui::Context = imgui;
imgui.io_mut().update_delta_time(delta_time);
this.winit_platform
.prepare_frame(imgui.io_mut(), &this.window)
.expect("Failed to prepare frame.");
let ui = imgui.frame();
{
let font_handle = match this.default_font {
Some(font) => Some(ui.push_font(font)),
None => None,
};
let layout_ctx = LayoutContext {
ui: &ui,
window_handle: this.handle(),
invalidate_amount_changed: Cell::new(false),
invalidate_amount: Cell::new(this.invalidate_amount()),
};
let data_model = &mut this.data_model as *mut Box<(dyn Layout +'static)>;
unsafe {
(*data_model).layout(layout_ctx, app, this);
}
if let Some(font_handle) = font_handle {
font_handle.pop(&ui);
}
}
if this.last_cursor!= ui.mouse_cursor() {
this.last_cursor = ui.mouse_cursor();
this.winit_platform.prepare_render(&ui, &this.window);
}
let draw_data: &imgui::DrawData = ui.render();
if draw_data.draw_lists_count() == 0 {
log::debug!("Imgui draw data is empty!");
return Ok(());
}
let frame: wgpu::SwapChainFrame = match this.swap_chain.get_current_frame() {
Ok(val) => val,
Err(_) => return Err(UiError::new(ErrorCode::SWAP_CHAIN_TIMEOUT)),
};
let cmd_encoder_desc = wgpu::CommandEncoderDescriptor {
label: Some("imgui_command_encoder"),
};
let mut encoder: wgpu::CommandEncoder =
this.gpu_device.create_command_encoder(&cmd_encoder_desc);
// If we have a msaa framebuffer, use it.
let (attachment, resolve_target) =
if let Some((ref msaa_framebuffer, size, _)) = this.msaa_framebuffer {
// Recreate the msaa_framebuffer if its size doesn't match.
if size.width == this.swap_chain_desc.width
&& size.height == this.swap_chain_desc.height
{
(msaa_framebuffer, Some(&frame.output.view))
} else {
this.msaa_framebuffer = Some(Window::create_msaa_framebuffer(
&this.gpu_device,
&this.swap_chain_desc,
app.msaa_samples,
));
(
&this.msaa_framebuffer.as_ref().unwrap().0,
Some(&frame.output.view),
)
}
} else {
(&frame.output.view, None)
};
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment,
resolve_target,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color::BLACK),
store: true,
},
}],
depth_stencil_attachment: None,
});
match this
.renderer
.render(&draw_data, &this.queue, &this.gpu_device, &mut render_pass)
{
Err(err) => {
return Err(UiError::with_source(
ErrorCode::RENDER_ERROR,
utils::MessageError::debug(&err),
))
}
Ok(_) => (),
};
drop(render_pass);
this.queue.submit(Some(encoder.finish()));
Ok(())
}
}
/// A thread-safe handle to a `Window`.
///
/// This handle can be used to communicate with the Window from a different thread
/// through events. All methods on this handle will return an error when the window does
/// not exist anymore (can be queried with `alive()`).
pub struct WindowHandle {
window_id: winit::window::WindowId,
app_handle: app::AppHandle,
alive: Weak<()>,
}
impl WindowHandle {
/// Queries wether the represented window still exists or not.
pub fn alive(&self) -> bool {
match self.alive.upgrade() {
Some(_) => true,
_ => false,
}
}
/// Runs the closure `callback` in the UI thread.
///
/// Returns an error if the `Window` this handle referres to doesn't exist anymore.
pub fn run(
&self,
callback: impl FnOnce(&mut app::App, &mut Window) +'static + Send,
) -> UiResult<()> {
if let None = self.alive.upgrade() {
return Err(ErrorCode::WINDOW_DOES_NOT_EXIST.into());
}
self.app_handle
.send_event(app::AppEvent::ExecuteWithWindow {
window_id: self.window_id,
callback: app::ExecuteWithWindowCallback(Box::new(callback)),
})
.unwrap();
Ok(())
}
/// Runs the closure callback in the UI thread and passes
/// the data model of the `Window` downcast to T.
///
/// The main thread will panic if the data model of the Window
/// cannot be downcast to T.
///
/// ## Note
/// There is no guarantee that the passed closure will be run.
/// If the Window gets destryed after this method has been called
/// and before the main thread has gotten the event for running the closure,
/// it will be skipped.
pub fn run_with_data_model<T: Layout + Any>(
&self,
callback: impl FnOnce(&mut app::App, &mut T, &mut Window) +'static + Send,
) -> UiResult<()> {
if let None = self.alive.upgrade() | {
return Err(ErrorCode::WINDOW_DOES_NOT_EXIST.into());
} | conditional_block |
|
window.rs | invalidate_amount: Cell<InvalidateAmount>,
}
impl LayoutContext<'_> {
/// Requests invalidation of the specified `amount` after the current frame is
/// finished. The resulting requested invalidation amount is the maximum of
/// all `request_invalidate()` calls for one frame.
#[inline]
pub fn request_invalidate(&self, amount: InvalidateAmount) {
if self.invalidate_amount.get() < amount {
self.invalidate_amount.set(amount);
} else if self.invalidate_amount.get() == amount {
self.invalidate_amount
.set(match (self.invalidate_amount.get(), amount) {
(InvalidateAmount::Until(inst0), InvalidateAmount::Until(inst1)) => {
InvalidateAmount::Until(utils::max_instant(inst0, inst1))
}
(curr, _) => curr,
});
}
self.invalidate_amount_changed.set(true);
}
}
impl<'ui> Deref for LayoutContext<'ui> {
type Target = imgui::Ui<'ui>;
#[inline]
fn deref(&self) -> &Self::Target {
if self.ui.is_item_edited() {
self.request_invalidate(InvalidateAmount::Once);
} else if self.ui.is_item_activated() {
self.request_invalidate(InvalidateAmount::Indefinetely);
} else if self.ui.is_item_deactivated() {
self.request_invalidate(InvalidateAmount::Stop);
}
self.ui
}
}
impl Drop for LayoutContext<'_> {
fn drop(&mut self) {
if self.invalidate_amount_changed.get() {
let _ = self
.window_handle
.set_invalidate_amount(self.invalidate_amount.get());
} else {
if self.ui.is_any_item_active() {
let _ = self.window_handle.request_invalidate();
}
}
}
}
/// This struct represents an OS window, which contains an ImGUI graphical user interface.
pub struct Window {
window: winit::window::Window,
last_frame_time: std::time::Instant,
alive: Arc<()>,
app_handle: app::AppHandle,
invalidate_amount: InvalidateAmount,
// Everything for rendering
surface: wgpu::Surface,
gpu_device: wgpu::Device,
swap_chain_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
renderer: imgui_wgpu::Renderer,
queue: wgpu::Queue,
/// A MSAA framebuffer texture and its sample count.
msaa_framebuffer: Option<(wgpu::TextureView, wgpu::Extent3d, u32)>,
// All imgui related
winit_platform: imgui_winit_support::WinitPlatform,
imgui: ImguiContext,
default_font: Option<imgui::FontId>,
last_cursor: Option<imgui::MouseCursor>,
/// The data model associated with this native window, that holds its state.
pub data_model: Box<dyn Layout>,
}
enum ImguiContext {
Suspended(imgui::SuspendedContext),
Used(),
}
impl Window {
/// Closes the window and, if no further windows remain, shuts down the application.
pub fn close(&mut self) {
let data_model = &mut self.data_model as *mut Box<(dyn Layout +'static)>;
let should_close = unsafe { &mut *data_model }.before_close(self);
if should_close {
let window_id = self.id();
let _ = self.app_handle.execute_with_gui(move |app: &mut App| {
app.remove_window(window_id);
});
}
}
/// Get a mutable reference to the underlying `winit::window::Window`, which can be used to
/// change size, position, etc.
pub fn window_mut(&mut self) -> &mut winit::window::Window {
&mut self.window
}
/// Get a reference to the underlying `winit::window::Window`, which can be used to
/// change size, position, etc.
pub fn window(&self) -> &winit::window::Window {
&self.window
}
/// Get the id of the window.
pub fn id(&self) -> winit::window::WindowId {
self.window.id()
}
/// Get the time the window was last updated.
pub fn last_frame_time(&self) -> std::time::Instant {
self.last_frame_time
}
/// Updates the frame time to now and returns the duration since the last frame.
#[inline]
pub(crate) fn update_frame_time(&mut self) -> std::time::Duration {
let now = std::time::Instant::now();
let frame_delta = now - self.last_frame_time;
self.last_frame_time = now;
frame_delta
}
/// Creates a standard top level window.
///
/// Call this method inside the closure passed to `App::new_window()`.
pub fn build_window(title: &str, size: (u32, u32)) -> winit::window::WindowBuilder {
winit::window::WindowBuilder::new()
.with_title(title)
.with_inner_size(winit::dpi::PhysicalSize {
width: size.0,
height: size.1,
})
.with_resizable(true)
}
/// Creates a new `Window` instance.
///
/// *Only for internal use.* The user creates new windows using `App::new_window()`.
pub(crate) fn new(
app: &mut app::App,
data_model: Box<dyn Layout>,
wnd: winit::window::Window,
visible: bool,
) -> UiResult<Window> {
let size = wnd.inner_size();
let surface = unsafe { app.wgpu_instance.create_surface(&wnd) };
// select adapter and gpu device
let (device, queue) = Window::select_gpu_device(&app, &surface)?;
// create the swapchain
let format = wgpu::TextureFormat::Bgra8UnormSrgb;
let swap_chain_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo,
};
let swap_chain = device.create_swap_chain(&surface, &swap_chain_desc);
let msaa_framebuffer = if app.msaa_samples > 1 {
Some(Window::create_msaa_framebuffer(
&device,
&swap_chain_desc,
app.msaa_samples,
))
} else {
None
};
// create imgui ui
// Note: This is going to panic if any other `imgui::Context` is currently active
let mut imgui = imgui::Context::create_with_shared_font_atlas(app.font_atlas.clone());
let mut platform = imgui_winit_support::WinitPlatform::init(&mut imgui);
platform.attach_window(
imgui.io_mut(),
&wnd,
imgui_winit_support::HiDpiMode::Default,
);
app.apply_imgui_settings(&mut imgui);
// create renderer
let renderer = imgui_wgpu::Renderer::new(
&mut imgui,
&device,
&queue,
swap_chain_desc.format,
None,
app.msaa_samples,
);
let mut wnd = Window {
window: wnd,
last_frame_time: std::time::Instant::now(),
alive: Arc::default(),
app_handle: app.handle(),
invalidate_amount: InvalidateAmount::Stop,
surface,
gpu_device: device,
swap_chain_desc,
swap_chain,
renderer,
queue,
msaa_framebuffer,
winit_platform: platform,
imgui: ImguiContext::Suspended(imgui.suspend()),
default_font: None,
last_cursor: None,
data_model,
};
if visible {
// draw immediately
let mut active_window = wnd.activate()?;
active_window.render(app, std::time::Duration::from_millis(std::u64::MAX))?;
drop(active_window);
wnd.window().set_visible(true);
}
Ok(wnd)
}
pub fn invalidate_amount(&self) -> InvalidateAmount {
self.invalidate_amount
}
pub fn set_invalidate_amount(&self, amount: InvalidateAmount) -> UiResult<()> {
self.app_handle
.send_event(super::AppEvent::SetWindowInvalidateAmount {
window_id: self.id(),
state: amount,
})
}
fn update_render_size<T: std::convert::Into<u32>>(
&mut self,
_app: &App,
size: winit::dpi::PhysicalSize<T>,
) {
self.swap_chain_desc.width = size.width.into();
self.swap_chain_desc.height = size.height.into();
self.swap_chain = self
.gpu_device
.create_swap_chain(&self.surface, &self.swap_chain_desc);
// Note: Normally we would also update the optional MSAA framebuffer here, but
// this causes visual resize lag, presumably because the recreation of the MSAA
// texture is quite expensive. Instead this is done in the
// `ActiveWindow::render()` method.
}
pub(super) fn activate<'a>(&'a mut self) -> UiResult<ActiveWindow<'a>> {
let imgui = std::mem::replace(&mut self.imgui, ImguiContext::Used());
if let ImguiContext::Suspended(ctx) = imgui {
let ctx = ctx.activate();
match ctx {
Ok(ctx) => {
return Ok(ActiveWindow {
imgui_context: std::mem::ManuallyDrop::new(ctx),
wrapped_window: self,
});
}
Err(ctx) => {
self.imgui = ImguiContext::Suspended(ctx);
return Err(UiError::new(ErrorCode::IMGUI_CONTEXT_ACTIVATE_FAILED));
}
}
}
Err(UiError::new(ErrorCode::INVALID_IMGUI_CONTEXT))
}
/// Creates a thread-safe handle to this native window.
///
/// This handle can be used to access the represented native window from another
/// thread using events that get sent to and dispatched in the main (UI) thread.
pub fn handle(&self) -> WindowHandle {
WindowHandle {
window_id: self.id(),
app_handle: self.app_handle.clone(),
alive: Arc::downgrade(&self.alive),
}
}
/// Request window invalidation as soon as possible.
pub fn request_invalidate(&self) {
self.window.request_redraw();
}
fn select_gpu_device(
app: &App,
surface: &wgpu::Surface,
) -> UiResult<(wgpu::Device, wgpu::Queue)> {
use futures::executor::block_on;
let adapter_opts = wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::Default,
compatible_surface: Some(&surface),
};
let adapter_request = app.wgpu_instance.request_adapter(&adapter_opts);
let adapter = match block_on(adapter_request) {
Some(val) => val,
None => return Err(ErrorCode::GRAPHICS_ADAPTER_NOT_AVAILABLE.into()),
};
let device_desc = wgpu::DeviceDescriptor {
features: wgpu::Features::default(),
limits: wgpu::Limits::default(),
shader_validation: false,
};
let device_request =
adapter.request_device(&device_desc, Some(std::path::Path::new(file!())));
let device_and_queue = match block_on(device_request) {
Ok(device) => device,
Err(err) => {
return Err(UiError::with_source(
ErrorCode::REQUEST_GRAPHICS_DEVICE_FAILED,
err,
))
}
};
Ok(device_and_queue)
}
/// Creates new framebuffer for multisampling anti-aliasing with the specified
/// `sample_count`.
/// Returnes a tuple with the `wgpu::TextureView` and the MSAA sample count used.
fn create_msaa_framebuffer(
device: &wgpu::Device,
sc_desc: &wgpu::SwapChainDescriptor,
sample_count: u32,
) -> (wgpu::TextureView, wgpu::Extent3d, u32) {
let tex_extent = wgpu::Extent3d {
width: sc_desc.width,
height: sc_desc.height,
depth: 1,
};
let tex_desc = &wgpu::TextureDescriptor {
label: Some("imgui_msaa_texture"),
size: tex_extent,
mip_level_count: 1,
sample_count: sample_count,
dimension: wgpu::TextureDimension::D2,
format: sc_desc.format,
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
};
let tex_view_desc = &wgpu::TextureViewDescriptor {
label: Some("imgui_msaa_texture_view"),
format: Some(sc_desc.format),
dimension: None,
aspect: wgpu::TextureAspect::All,
base_mip_level: 0,
level_count: None,
base_array_layer: 0,
array_layer_count: None,
};
(
device.create_texture(tex_desc).create_view(&tex_view_desc),
tex_extent,
sample_count,
)
}
/// Gets the `wgpu::Queue` of this window.
pub fn wgpu_queue(&self) -> &wgpu::Queue {
&self.queue
}
/// Gets the `wgpu::Device` of this window.
pub fn wgpu_device(&self) -> &wgpu::Device {
&self.gpu_device
}
/// Gets the renderer.
pub fn renderer(&self) -> &imgui_wgpu::Renderer {
&self.renderer
}
/// Gets a reference to the texture collection.
pub fn textures(&self) -> &imgui::Textures<imgui_wgpu::Texture> {
&self.renderer.textures
}
/// Gets a mutable reference to the texture collection.
pub fn textures_mut(&mut self) -> &mut imgui::Textures<imgui_wgpu::Texture> |
}
/// A window prepared to be updated.
///
/// This struct is used to disjoin the lifetimes of the `Window` with that of the
/// `imgui::Context`.
pub struct ActiveWindow<'a> {
/// The imgui context of the `window`.
pub imgui_context: std::mem::ManuallyDrop<imgui::Context>,
/// The original native window, where its `imgui` value has been replaced with
/// `ImguiContext::Used()` and moved to `imgui_context`.
pub wrapped_window: &'a mut Window,
}
impl<'a> Drop for ActiveWindow<'a> {
/// Returns the `imgui::Context` back to the native window.
fn drop(&mut self) {
let val = std::mem::replace(&mut *self.imgui_context, unsafe {
std::mem::MaybeUninit::uninit().assume_init()
});
self.wrapped_window.imgui = ImguiContext::Suspended(val.suspend());
}
}
impl Deref for ActiveWindow<'_> {
type Target = Window;
fn deref(&self) -> &Self::Target {
self.wrapped_window
}
}
impl DerefMut for ActiveWindow<'_> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.wrapped_window
}
}
impl ActiveWindow<'_> {
pub fn on_event(&mut self, app: &App, evt: &super::Event) {
let ActiveWindow {
wrapped_window: this,
imgui_context: imgui,
} = self;
// let this: &mut Window = this;
// let imgui: &mut imgui::Context = imgui;
this.winit_platform
.handle_event(imgui.io_mut(), &this.window, evt);
match evt {
super::Event::WindowEvent {
window_id,
event: ref wnd_evt,
} if *window_id == this.id() => match wnd_evt {
winit::event::WindowEvent::CloseRequested => {
self.close();
}
winit::event::WindowEvent::Resized(physical_size) => {
self.update_render_size(app, *physical_size);
}
_ => (),
},
super::Event::MainEventsCleared => {
this.request_invalidate();
}
_ => (),
}
}
pub fn render(&mut self, app: &App, delta_time: std::time::Duration) -> UiResult<()> {
let ActiveWindow {
wrapped_window: this,
imgui_context: imgui,
} = self;
// let this: &mut Window = this;
// let imgui: &mut imgui::Context = imgui;
imgui.io_mut().update_delta_time(delta_time);
this.winit_platform
.prepare_frame(imgui.io_mut(), &this.window)
.expect("Failed to prepare frame.");
let ui = imgui.frame();
{
let font_handle = match this.default_font {
Some(font) => Some(ui.push_font(font)),
None => None,
};
let layout_ctx = LayoutContext {
ui: &ui,
window_handle: this.handle(),
invalidate_amount_changed: Cell::new(false),
invalidate_amount: Cell::new(this.invalidate_amount()),
};
let data_model = &mut this.data_model as *mut Box<(dyn Layout +'static)>;
unsafe {
(*data_model).layout(layout_ctx, app, this);
}
if let Some(font_handle) = font_handle {
font_handle.pop(&ui);
}
}
if this.last_cursor!= ui.mouse_cursor() {
this.last_cursor = ui.mouse_cursor();
this.winit_platform.prepare_render(&ui, &this.window);
}
let draw_data: &imgui::DrawData = ui.render();
if draw_data.draw_lists_count() == 0 {
log::debug!("Imgui draw data is empty!");
return Ok(());
}
let frame: wgpu::SwapChainFrame = match this.swap_chain.get_current_frame() {
Ok(val) => val,
Err(_) => return Err(UiError::new(ErrorCode::SWAP_CHAIN_TIMEOUT)),
};
let cmd_encoder_desc = wgpu::CommandEncoderDescriptor {
label: Some("imgui_command_encoder"),
};
let mut encoder: wgpu::CommandEncoder =
this.gpu_device.create_command_encoder(&cmd_encoder_desc);
// If we have a msaa framebuffer, use it.
let (attachment, resolve_target) =
if let Some((ref msaa_framebuffer, size, _)) = this.msaa_framebuffer {
// Recreate the msaa_framebuffer if its size doesn't match.
if size.width == this.swap_chain_desc.width
&& size.height == this.swap_chain_desc.height
| {
&mut self.renderer.textures
} | identifier_body |
main.rs | #![feature(try_trait)]
#![feature(label_break_value)]
extern crate serenity;
mod converter;
mod arc_commands;
mod message_helper;
use message_helper::MessageHelper;
use std::sync::{Arc, Mutex};
use std::process::Command;
use serenity::model::prelude::*;
use serenity::prelude::*;
use serenity::utils::MessageBuilder;
use std::env;
use std::fs::{File, self};
use std::io::Write;
use std::path::PathBuf;
use std::collections::BTreeSet;
#[derive(Default)]
struct Handler {
channel_id: Arc<Mutex<BTreeSet<ChannelId>>>
}
impl Handler {
pub fn new<It: IntoIterator<Item=ChannelId>>(channels: It) -> Handler {
Handler {
channel_id: Arc::new(Mutex::new(channels.into_iter().collect())),
..Default::default()
}
}
}
use converter::SUPPORTED_TYPES;
static HELP_TEXT: &str =
"%convert [args] - convert file even if channel isn't set
%help - display this message\n\
%set_channel - watch this channel for files\n\
%unset_channel - don't watch this channel to watch for files\n\
%update - update param labels and install paramxml if not installed\n\
%thanks - credits\n\
%supported_types - print all supported types
\n\
Arc commands\n\
%ls [folder] - list files/folders in arc
%get [file] - request a file from the arc
%find_song [song name query] - list songs for a given name
%get_song [song name query] - download the first song from %find_song
\n\
Include'start,end' or'start-end' for looping in wav -> nus3audio conversions";
static THANKS_TEXT: &str =
"jam1garner - bot programming, libnus3audio, mscdec/msclang, etc.\n\
Arthur (@BenArthur_7) - motion_list_rs, sqb-rs, and much more\n\
Moosehunter, jam1garner, Raytwo, soneek - VGAudio lopus support\n\
RandomTalkingBush, DemonSlayerX8, jam1garner - data.arc hashes
soneek - nus3audio help\n\
leoetlino - byml-v2\n\
TNN, Genwald - WAV/audio help\n\
Ploaj, SMG (ScanMountGoat) - ArcCross, SSBHLib\n\
Arthur, Dr. Hypercake, Birdwards, SMG, Meshima, TNN, Blazingflare, TheSmartKid - Param labels\n\
coolsonickirby, SushiiZ - testing help";
enum SetUnset {
Set,
Unset
}
use SetUnset::*;
fn save_channels(channel_ids: &BTreeSet<ChannelId>, message: &MessageHelper, owner: &User) {
if let Err(e) = fs::write(
CHANNELS_PATH,
channel_ids.iter()
.map(ToString::to_string)
.collect::<Vec<_>>()
.join("\n")
) {
message.say(
MessageBuilder::new()
.mention(owner)
.push(" Failed to save channel ids:")
.push_codeblock_safe(e.to_string(), None)
.build()
);
};
}
fn set_or_unset_channel(handler: &Handler, message: &MessageHelper, set: SetUnset) {
let owner = message.get_current_application_info().owner;
let is_admin = message.member_permissions().administrator();
if message.author == owner || is_admin {
let arc = Arc::clone(&handler.channel_id);
let mut channel_ids = arc.lock().unwrap();
match set {
Set => {
channel_ids.insert(message.channel_id);
message.say("Channel set");
save_channels(&channel_ids, message, &owner);
}
Unset => {
if channel_ids.remove(&message.channel_id) | else {
message.say("Channel was not set");
}
}
}
} else {
message.reply("You do not have the proper permissions to set the channel.");
}
}
impl EventHandler for Handler {
fn message(&self, context: Context, message: Message) {
let message = MessageHelper::new(message, context);
if message.author.bot {
return;
}
let mut message_content = &message.content[..];
let mut convert_command = false;
if!message.content.is_empty() && &message.content[0..1] == "%" {
match message.content[1..].trim() {
s if s.starts_with("convert") => {
message_content = &s[7..];
convert_command = true;
}
"update" => {
update(&message);
return;
}
"set_channel" => set_or_unset_channel(self, &message, Set),
"unset_channel" => set_or_unset_channel(self, &message, Unset),
"help" => {
let _ =
message.say(
MessageBuilder::new()
.push("Version 1.3\nCommands:")
.push_codeblock_safe(HELP_TEXT, None)
.push(format!("Supported types: {}...", &SUPPORTED_TYPES[..90]))
.build()
);
}
"thanks" => {
let _ =
message.say(
MessageBuilder::new()
.push("A big thanks to everyone who has in anyway helped:")
.push_codeblock_safe(THANKS_TEXT, None)
.build()
);
}
"supported_types" => {
let _ =
message.say(
MessageBuilder::new()
.push("Supported filetypes:")
.push_codeblock_safe(SUPPORTED_TYPES, None)
.build()
);
}
s @ "ls" | s if s.starts_with("ls ") => arc_commands::ls(s, &message),
s if s.starts_with("get ") => arc_commands::get(s, &message),
s if s.starts_with("find_song ") => arc_commands::find_song(s, &message),
s if s.starts_with("get_song ") => arc_commands::get_song(s, &message),
s if s.starts_with("hash ") => arc_commands::hash(s, &message),
_ => {
message.say("Invalid command");
return;
}
}
}
if!convert_command {
let enabled_channels = Arc::clone(&self.channel_id);
if!enabled_channels.lock().unwrap().contains(&message.channel_id) {
return;
}
}
for attachment in &message.attachments {
let content = match attachment.download() {
Ok(content) => content,
Err(why) => {
println!("Error downloading attachment: {:?}", why);
message.say("Error downloading attachment");
return;
},
};
let path: PathBuf = ["/tmp/converter/", &attachment.filename].iter().collect();
match std::fs::create_dir_all(format!("/tmp/converter")) {
Ok(()) => {}
Err(why) => {
println!("Error creating dir: {:?}", why);
message.say("Error creating dir");
}
}
let mut file = match File::create(path.as_os_str()) {
Ok(file) => file,
Err(why) => {
println!("Error creating file: {:?}", why);
message.say("Error creating file");
return;
},
};
if let Err(why) = file.write(&content) {
println!("Error writing to file: {:?}", why);
return;
}
if match converter::extension(path.as_path()) {
"mscsb" | "c" | "wav" | "zip" | "yml" => true,
s => converter::byml::EXTENSIONS.contains(&s.trim_start_matches("s")),
} {
message.broadcast_typing();
}
match converter::convert(path, message_content) {
Ok(path) => {
let _ =
message.send_file(path.to_str().unwrap(), "Converted file")
.map_err(|e|{
message.say(
MessageBuilder::new()
.push("Error sending file: ")
.push_codeblock_safe(e.to_string(), None)
.build()
);
});
std::fs::remove_file(path).unwrap();
}
Err(why) => {
println!("Error converting file: {:?}", why);
message.say(
MessageBuilder::new()
.push("Error converting file:")
.push_codeblock_safe(why.message, None)
.build()
);
}
}
}
}
}
const MOTION_LABEL_PATH: &str = "motion_list_labels.txt";
const SQB_LABEL_PATH: &str = "sqb_labels.txt";
const CHANNELS_PATH: &str = "channels.txt";
fn load_channels() -> Vec<ChannelId> {
fs::read_to_string(CHANNELS_PATH).ok()
.map(|channels_file|{
channels_file.split('\n')
.map(|s| u64::from_str_radix(s, 10))
.filter_map(Result::ok)
.map(Into::into)
.collect()
})
.unwrap_or_default()
}
fn main() {
arc_commands::setup_songs();
update_labels(&[MOTION_LABEL_PATH, SQB_LABEL_PATH]);
let channels = load_channels();
// Login with a bot token from the environment
let mut client = Client::new(&env::var("DISCORD_TOKEN").expect("token"), Handler::new(channels))
.expect("Error creating client");
if let Err(why) = client.start() {
println!("An error occurred while running the client: {:?}", why);
}
}
fn update_labels(label_paths: &[&str]) {
hash40::set_labels(
label_paths
.iter()
.map(|label| hash40::read_labels(label).unwrap())
.flatten()
)
}
fn update(message: &MessageHelper) {
let update_output =
match Command::new("sh").arg("update.sh").output() {
Ok(x) => x,
Err(e) => {
message.say(
MessageBuilder::new()
.push("Failed to run update:")
.push_codeblock_safe(e.to_string(), None)
.build()
);
return;
}
};
if update_output.status.success() {
let out = std::str::from_utf8(&update_output.stdout[..]).unwrap();
message.say(out);
} else {
let err = std::str::from_utf8(&update_output.stderr[..]).unwrap();
message.say(
MessageBuilder::new()
.push("Error:")
.push_codeblock_safe(err, None)
.build()
);
}
update_labels(&[MOTION_LABEL_PATH, SQB_LABEL_PATH]);
}
| {
message.say("Channel unset");
} | conditional_block |
main.rs | #![feature(try_trait)]
#![feature(label_break_value)]
extern crate serenity;
mod converter;
mod arc_commands;
mod message_helper;
use message_helper::MessageHelper;
use std::sync::{Arc, Mutex};
use std::process::Command;
use serenity::model::prelude::*;
use serenity::prelude::*;
use serenity::utils::MessageBuilder;
use std::env;
use std::fs::{File, self};
use std::io::Write;
use std::path::PathBuf;
use std::collections::BTreeSet;
#[derive(Default)]
struct Handler {
channel_id: Arc<Mutex<BTreeSet<ChannelId>>>
}
impl Handler {
pub fn new<It: IntoIterator<Item=ChannelId>>(channels: It) -> Handler {
Handler {
channel_id: Arc::new(Mutex::new(channels.into_iter().collect())),
..Default::default()
}
}
}
use converter::SUPPORTED_TYPES;
static HELP_TEXT: &str =
"%convert [args] - convert file even if channel isn't set
%help - display this message\n\
%set_channel - watch this channel for files\n\
%unset_channel - don't watch this channel to watch for files\n\
%update - update param labels and install paramxml if not installed\n\
%thanks - credits\n\
%supported_types - print all supported types
\n\
Arc commands\n\
%ls [folder] - list files/folders in arc
%get [file] - request a file from the arc
%find_song [song name query] - list songs for a given name
%get_song [song name query] - download the first song from %find_song
\n\
Include'start,end' or'start-end' for looping in wav -> nus3audio conversions";
static THANKS_TEXT: &str =
"jam1garner - bot programming, libnus3audio, mscdec/msclang, etc.\n\
Arthur (@BenArthur_7) - motion_list_rs, sqb-rs, and much more\n\
Moosehunter, jam1garner, Raytwo, soneek - VGAudio lopus support\n\
RandomTalkingBush, DemonSlayerX8, jam1garner - data.arc hashes
soneek - nus3audio help\n\
leoetlino - byml-v2\n\
TNN, Genwald - WAV/audio help\n\
Ploaj, SMG (ScanMountGoat) - ArcCross, SSBHLib\n\
Arthur, Dr. Hypercake, Birdwards, SMG, Meshima, TNN, Blazingflare, TheSmartKid - Param labels\n\
coolsonickirby, SushiiZ - testing help";
enum SetUnset {
Set,
Unset
}
use SetUnset::*;
fn save_channels(channel_ids: &BTreeSet<ChannelId>, message: &MessageHelper, owner: &User) {
if let Err(e) = fs::write(
CHANNELS_PATH,
channel_ids.iter()
.map(ToString::to_string)
.collect::<Vec<_>>()
.join("\n")
) {
message.say(
MessageBuilder::new()
.mention(owner)
.push(" Failed to save channel ids:")
.push_codeblock_safe(e.to_string(), None)
.build()
);
};
}
fn set_or_unset_channel(handler: &Handler, message: &MessageHelper, set: SetUnset) {
let owner = message.get_current_application_info().owner;
let is_admin = message.member_permissions().administrator();
if message.author == owner || is_admin {
let arc = Arc::clone(&handler.channel_id);
let mut channel_ids = arc.lock().unwrap();
match set {
Set => {
channel_ids.insert(message.channel_id);
message.say("Channel set");
save_channels(&channel_ids, message, &owner);
}
Unset => {
if channel_ids.remove(&message.channel_id) {
message.say("Channel unset");
} else {
message.say("Channel was not set");
}
}
}
} else {
message.reply("You do not have the proper permissions to set the channel.");
}
}
impl EventHandler for Handler {
fn message(&self, context: Context, message: Message) {
let message = MessageHelper::new(message, context);
if message.author.bot {
return;
}
let mut message_content = &message.content[..];
let mut convert_command = false;
if!message.content.is_empty() && &message.content[0..1] == "%" {
match message.content[1..].trim() {
s if s.starts_with("convert") => {
message_content = &s[7..];
convert_command = true;
}
"update" => {
update(&message);
return;
}
"set_channel" => set_or_unset_channel(self, &message, Set),
"unset_channel" => set_or_unset_channel(self, &message, Unset),
"help" => {
let _ =
message.say(
MessageBuilder::new()
.push("Version 1.3\nCommands:")
.push_codeblock_safe(HELP_TEXT, None)
.push(format!("Supported types: {}...", &SUPPORTED_TYPES[..90]))
.build()
);
}
"thanks" => {
let _ =
message.say(
MessageBuilder::new()
.push("A big thanks to everyone who has in anyway helped:")
.push_codeblock_safe(THANKS_TEXT, None)
.build()
);
}
"supported_types" => {
let _ =
message.say(
MessageBuilder::new()
.push("Supported filetypes:")
.push_codeblock_safe(SUPPORTED_TYPES, None)
.build()
);
}
s @ "ls" | s if s.starts_with("ls ") => arc_commands::ls(s, &message),
s if s.starts_with("get ") => arc_commands::get(s, &message),
s if s.starts_with("find_song ") => arc_commands::find_song(s, &message),
s if s.starts_with("get_song ") => arc_commands::get_song(s, &message),
s if s.starts_with("hash ") => arc_commands::hash(s, &message),
_ => {
message.say("Invalid command");
return;
}
}
}
if!convert_command {
let enabled_channels = Arc::clone(&self.channel_id);
if!enabled_channels.lock().unwrap().contains(&message.channel_id) {
return;
}
}
for attachment in &message.attachments {
let content = match attachment.download() {
Ok(content) => content,
Err(why) => {
println!("Error downloading attachment: {:?}", why);
message.say("Error downloading attachment");
return;
},
};
let path: PathBuf = ["/tmp/converter/", &attachment.filename].iter().collect();
match std::fs::create_dir_all(format!("/tmp/converter")) {
Ok(()) => {}
Err(why) => {
println!("Error creating dir: {:?}", why);
message.say("Error creating dir");
}
}
let mut file = match File::create(path.as_os_str()) {
Ok(file) => file,
Err(why) => {
println!("Error creating file: {:?}", why);
message.say("Error creating file");
return;
},
};
if let Err(why) = file.write(&content) {
println!("Error writing to file: {:?}", why);
return;
}
if match converter::extension(path.as_path()) {
"mscsb" | "c" | "wav" | "zip" | "yml" => true,
s => converter::byml::EXTENSIONS.contains(&s.trim_start_matches("s")),
} {
message.broadcast_typing();
}
match converter::convert(path, message_content) {
Ok(path) => {
let _ =
message.send_file(path.to_str().unwrap(), "Converted file")
.map_err(|e|{
message.say(
MessageBuilder::new()
.push("Error sending file: ")
.push_codeblock_safe(e.to_string(), None)
.build()
);
});
std::fs::remove_file(path).unwrap();
}
Err(why) => {
println!("Error converting file: {:?}", why);
message.say(
MessageBuilder::new()
.push("Error converting file:")
.push_codeblock_safe(why.message, None)
.build()
);
}
}
}
}
}
const MOTION_LABEL_PATH: &str = "motion_list_labels.txt";
const SQB_LABEL_PATH: &str = "sqb_labels.txt";
const CHANNELS_PATH: &str = "channels.txt";
fn load_channels() -> Vec<ChannelId> {
fs::read_to_string(CHANNELS_PATH).ok()
.map(|channels_file|{
channels_file.split('\n')
.map(|s| u64::from_str_radix(s, 10))
.filter_map(Result::ok)
.map(Into::into)
.collect()
})
.unwrap_or_default()
}
fn main() {
arc_commands::setup_songs();
update_labels(&[MOTION_LABEL_PATH, SQB_LABEL_PATH]);
let channels = load_channels();
// Login with a bot token from the environment
let mut client = Client::new(&env::var("DISCORD_TOKEN").expect("token"), Handler::new(channels))
.expect("Error creating client");
if let Err(why) = client.start() {
println!("An error occurred while running the client: {:?}", why);
}
}
fn | (label_paths: &[&str]) {
hash40::set_labels(
label_paths
.iter()
.map(|label| hash40::read_labels(label).unwrap())
.flatten()
)
}
fn update(message: &MessageHelper) {
let update_output =
match Command::new("sh").arg("update.sh").output() {
Ok(x) => x,
Err(e) => {
message.say(
MessageBuilder::new()
.push("Failed to run update:")
.push_codeblock_safe(e.to_string(), None)
.build()
);
return;
}
};
if update_output.status.success() {
let out = std::str::from_utf8(&update_output.stdout[..]).unwrap();
message.say(out);
} else {
let err = std::str::from_utf8(&update_output.stderr[..]).unwrap();
message.say(
MessageBuilder::new()
.push("Error:")
.push_codeblock_safe(err, None)
.build()
);
}
update_labels(&[MOTION_LABEL_PATH, SQB_LABEL_PATH]);
}
| update_labels | identifier_name |
main.rs | #![feature(try_trait)]
#![feature(label_break_value)]
extern crate serenity;
mod converter;
mod arc_commands;
mod message_helper;
use message_helper::MessageHelper;
use std::sync::{Arc, Mutex};
use std::process::Command;
use serenity::model::prelude::*;
use serenity::prelude::*;
use serenity::utils::MessageBuilder;
use std::env;
use std::fs::{File, self};
use std::io::Write;
use std::path::PathBuf;
use std::collections::BTreeSet;
#[derive(Default)]
struct Handler {
channel_id: Arc<Mutex<BTreeSet<ChannelId>>>
}
impl Handler {
pub fn new<It: IntoIterator<Item=ChannelId>>(channels: It) -> Handler |
}
use converter::SUPPORTED_TYPES;
static HELP_TEXT: &str =
"%convert [args] - convert file even if channel isn't set
%help - display this message\n\
%set_channel - watch this channel for files\n\
%unset_channel - don't watch this channel to watch for files\n\
%update - update param labels and install paramxml if not installed\n\
%thanks - credits\n\
%supported_types - print all supported types
\n\
Arc commands\n\
%ls [folder] - list files/folders in arc
%get [file] - request a file from the arc
%find_song [song name query] - list songs for a given name
%get_song [song name query] - download the first song from %find_song
\n\
Include'start,end' or'start-end' for looping in wav -> nus3audio conversions";
static THANKS_TEXT: &str =
"jam1garner - bot programming, libnus3audio, mscdec/msclang, etc.\n\
Arthur (@BenArthur_7) - motion_list_rs, sqb-rs, and much more\n\
Moosehunter, jam1garner, Raytwo, soneek - VGAudio lopus support\n\
RandomTalkingBush, DemonSlayerX8, jam1garner - data.arc hashes
soneek - nus3audio help\n\
leoetlino - byml-v2\n\
TNN, Genwald - WAV/audio help\n\
Ploaj, SMG (ScanMountGoat) - ArcCross, SSBHLib\n\
Arthur, Dr. Hypercake, Birdwards, SMG, Meshima, TNN, Blazingflare, TheSmartKid - Param labels\n\
coolsonickirby, SushiiZ - testing help";
enum SetUnset {
Set,
Unset
}
use SetUnset::*;
fn save_channels(channel_ids: &BTreeSet<ChannelId>, message: &MessageHelper, owner: &User) {
if let Err(e) = fs::write(
CHANNELS_PATH,
channel_ids.iter()
.map(ToString::to_string)
.collect::<Vec<_>>()
.join("\n")
) {
message.say(
MessageBuilder::new()
.mention(owner)
.push(" Failed to save channel ids:")
.push_codeblock_safe(e.to_string(), None)
.build()
);
};
}
fn set_or_unset_channel(handler: &Handler, message: &MessageHelper, set: SetUnset) {
let owner = message.get_current_application_info().owner;
let is_admin = message.member_permissions().administrator();
if message.author == owner || is_admin {
let arc = Arc::clone(&handler.channel_id);
let mut channel_ids = arc.lock().unwrap();
match set {
Set => {
channel_ids.insert(message.channel_id);
message.say("Channel set");
save_channels(&channel_ids, message, &owner);
}
Unset => {
if channel_ids.remove(&message.channel_id) {
message.say("Channel unset");
} else {
message.say("Channel was not set");
}
}
}
} else {
message.reply("You do not have the proper permissions to set the channel.");
}
}
impl EventHandler for Handler {
fn message(&self, context: Context, message: Message) {
let message = MessageHelper::new(message, context);
if message.author.bot {
return;
}
let mut message_content = &message.content[..];
let mut convert_command = false;
if!message.content.is_empty() && &message.content[0..1] == "%" {
match message.content[1..].trim() {
s if s.starts_with("convert") => {
message_content = &s[7..];
convert_command = true;
}
"update" => {
update(&message);
return;
}
"set_channel" => set_or_unset_channel(self, &message, Set),
"unset_channel" => set_or_unset_channel(self, &message, Unset),
"help" => {
let _ =
message.say(
MessageBuilder::new()
.push("Version 1.3\nCommands:")
.push_codeblock_safe(HELP_TEXT, None)
.push(format!("Supported types: {}...", &SUPPORTED_TYPES[..90]))
.build()
);
}
"thanks" => {
let _ =
message.say(
MessageBuilder::new()
.push("A big thanks to everyone who has in anyway helped:")
.push_codeblock_safe(THANKS_TEXT, None)
.build()
);
}
"supported_types" => {
let _ =
message.say(
MessageBuilder::new()
.push("Supported filetypes:")
.push_codeblock_safe(SUPPORTED_TYPES, None)
.build()
);
}
s @ "ls" | s if s.starts_with("ls ") => arc_commands::ls(s, &message),
s if s.starts_with("get ") => arc_commands::get(s, &message),
s if s.starts_with("find_song ") => arc_commands::find_song(s, &message),
s if s.starts_with("get_song ") => arc_commands::get_song(s, &message),
s if s.starts_with("hash ") => arc_commands::hash(s, &message),
_ => {
message.say("Invalid command");
return;
}
}
}
if!convert_command {
let enabled_channels = Arc::clone(&self.channel_id);
if!enabled_channels.lock().unwrap().contains(&message.channel_id) {
return;
}
}
for attachment in &message.attachments {
let content = match attachment.download() {
Ok(content) => content,
Err(why) => {
println!("Error downloading attachment: {:?}", why);
message.say("Error downloading attachment");
return;
},
};
let path: PathBuf = ["/tmp/converter/", &attachment.filename].iter().collect();
match std::fs::create_dir_all(format!("/tmp/converter")) {
Ok(()) => {}
Err(why) => {
println!("Error creating dir: {:?}", why);
message.say("Error creating dir");
}
}
let mut file = match File::create(path.as_os_str()) {
Ok(file) => file,
Err(why) => {
println!("Error creating file: {:?}", why);
message.say("Error creating file");
return;
},
};
if let Err(why) = file.write(&content) {
println!("Error writing to file: {:?}", why);
return;
}
if match converter::extension(path.as_path()) {
"mscsb" | "c" | "wav" | "zip" | "yml" => true,
s => converter::byml::EXTENSIONS.contains(&s.trim_start_matches("s")),
} {
message.broadcast_typing();
}
match converter::convert(path, message_content) {
Ok(path) => {
let _ =
message.send_file(path.to_str().unwrap(), "Converted file")
.map_err(|e|{
message.say(
MessageBuilder::new()
.push("Error sending file: ")
.push_codeblock_safe(e.to_string(), None)
.build()
);
});
std::fs::remove_file(path).unwrap();
}
Err(why) => {
println!("Error converting file: {:?}", why);
message.say(
MessageBuilder::new()
.push("Error converting file:")
.push_codeblock_safe(why.message, None)
.build()
);
}
}
}
}
}
const MOTION_LABEL_PATH: &str = "motion_list_labels.txt";
const SQB_LABEL_PATH: &str = "sqb_labels.txt";
const CHANNELS_PATH: &str = "channels.txt";
fn load_channels() -> Vec<ChannelId> {
fs::read_to_string(CHANNELS_PATH).ok()
.map(|channels_file|{
channels_file.split('\n')
.map(|s| u64::from_str_radix(s, 10))
.filter_map(Result::ok)
.map(Into::into)
.collect()
})
.unwrap_or_default()
}
fn main() {
arc_commands::setup_songs();
update_labels(&[MOTION_LABEL_PATH, SQB_LABEL_PATH]);
let channels = load_channels();
// Login with a bot token from the environment
let mut client = Client::new(&env::var("DISCORD_TOKEN").expect("token"), Handler::new(channels))
.expect("Error creating client");
if let Err(why) = client.start() {
println!("An error occurred while running the client: {:?}", why);
}
}
fn update_labels(label_paths: &[&str]) {
hash40::set_labels(
label_paths
.iter()
.map(|label| hash40::read_labels(label).unwrap())
.flatten()
)
}
fn update(message: &MessageHelper) {
let update_output =
match Command::new("sh").arg("update.sh").output() {
Ok(x) => x,
Err(e) => {
message.say(
MessageBuilder::new()
.push("Failed to run update:")
.push_codeblock_safe(e.to_string(), None)
.build()
);
return;
}
};
if update_output.status.success() {
let out = std::str::from_utf8(&update_output.stdout[..]).unwrap();
message.say(out);
} else {
let err = std::str::from_utf8(&update_output.stderr[..]).unwrap();
message.say(
MessageBuilder::new()
.push("Error:")
.push_codeblock_safe(err, None)
.build()
);
}
update_labels(&[MOTION_LABEL_PATH, SQB_LABEL_PATH]);
}
| {
Handler {
channel_id: Arc::new(Mutex::new(channels.into_iter().collect())),
..Default::default()
}
} | identifier_body |
main.rs | #![feature(try_trait)]
#![feature(label_break_value)]
extern crate serenity;
mod converter;
mod arc_commands;
mod message_helper;
use message_helper::MessageHelper;
use std::sync::{Arc, Mutex};
use std::process::Command;
use serenity::model::prelude::*;
use serenity::prelude::*;
use serenity::utils::MessageBuilder;
use std::env;
use std::fs::{File, self};
use std::io::Write;
use std::path::PathBuf;
use std::collections::BTreeSet;
#[derive(Default)]
struct Handler {
channel_id: Arc<Mutex<BTreeSet<ChannelId>>>
}
impl Handler {
pub fn new<It: IntoIterator<Item=ChannelId>>(channels: It) -> Handler {
Handler {
channel_id: Arc::new(Mutex::new(channels.into_iter().collect())),
..Default::default()
}
}
}
use converter::SUPPORTED_TYPES;
static HELP_TEXT: &str =
"%convert [args] - convert file even if channel isn't set
%help - display this message\n\
%set_channel - watch this channel for files\n\
%unset_channel - don't watch this channel to watch for files\n\
%update - update param labels and install paramxml if not installed\n\
%thanks - credits\n\
%supported_types - print all supported types
\n\
Arc commands\n\
%ls [folder] - list files/folders in arc
%get [file] - request a file from the arc
%find_song [song name query] - list songs for a given name
%get_song [song name query] - download the first song from %find_song
\n\
Include'start,end' or'start-end' for looping in wav -> nus3audio conversions";
static THANKS_TEXT: &str =
"jam1garner - bot programming, libnus3audio, mscdec/msclang, etc.\n\
Arthur (@BenArthur_7) - motion_list_rs, sqb-rs, and much more\n\
Moosehunter, jam1garner, Raytwo, soneek - VGAudio lopus support\n\
RandomTalkingBush, DemonSlayerX8, jam1garner - data.arc hashes
soneek - nus3audio help\n\
leoetlino - byml-v2\n\
TNN, Genwald - WAV/audio help\n\
Ploaj, SMG (ScanMountGoat) - ArcCross, SSBHLib\n\ | enum SetUnset {
Set,
Unset
}
use SetUnset::*;
fn save_channels(channel_ids: &BTreeSet<ChannelId>, message: &MessageHelper, owner: &User) {
if let Err(e) = fs::write(
CHANNELS_PATH,
channel_ids.iter()
.map(ToString::to_string)
.collect::<Vec<_>>()
.join("\n")
) {
message.say(
MessageBuilder::new()
.mention(owner)
.push(" Failed to save channel ids:")
.push_codeblock_safe(e.to_string(), None)
.build()
);
};
}
fn set_or_unset_channel(handler: &Handler, message: &MessageHelper, set: SetUnset) {
let owner = message.get_current_application_info().owner;
let is_admin = message.member_permissions().administrator();
if message.author == owner || is_admin {
let arc = Arc::clone(&handler.channel_id);
let mut channel_ids = arc.lock().unwrap();
match set {
Set => {
channel_ids.insert(message.channel_id);
message.say("Channel set");
save_channels(&channel_ids, message, &owner);
}
Unset => {
if channel_ids.remove(&message.channel_id) {
message.say("Channel unset");
} else {
message.say("Channel was not set");
}
}
}
} else {
message.reply("You do not have the proper permissions to set the channel.");
}
}
impl EventHandler for Handler {
fn message(&self, context: Context, message: Message) {
let message = MessageHelper::new(message, context);
if message.author.bot {
return;
}
let mut message_content = &message.content[..];
let mut convert_command = false;
if!message.content.is_empty() && &message.content[0..1] == "%" {
match message.content[1..].trim() {
s if s.starts_with("convert") => {
message_content = &s[7..];
convert_command = true;
}
"update" => {
update(&message);
return;
}
"set_channel" => set_or_unset_channel(self, &message, Set),
"unset_channel" => set_or_unset_channel(self, &message, Unset),
"help" => {
let _ =
message.say(
MessageBuilder::new()
.push("Version 1.3\nCommands:")
.push_codeblock_safe(HELP_TEXT, None)
.push(format!("Supported types: {}...", &SUPPORTED_TYPES[..90]))
.build()
);
}
"thanks" => {
let _ =
message.say(
MessageBuilder::new()
.push("A big thanks to everyone who has in anyway helped:")
.push_codeblock_safe(THANKS_TEXT, None)
.build()
);
}
"supported_types" => {
let _ =
message.say(
MessageBuilder::new()
.push("Supported filetypes:")
.push_codeblock_safe(SUPPORTED_TYPES, None)
.build()
);
}
s @ "ls" | s if s.starts_with("ls ") => arc_commands::ls(s, &message),
s if s.starts_with("get ") => arc_commands::get(s, &message),
s if s.starts_with("find_song ") => arc_commands::find_song(s, &message),
s if s.starts_with("get_song ") => arc_commands::get_song(s, &message),
s if s.starts_with("hash ") => arc_commands::hash(s, &message),
_ => {
message.say("Invalid command");
return;
}
}
}
if!convert_command {
let enabled_channels = Arc::clone(&self.channel_id);
if!enabled_channels.lock().unwrap().contains(&message.channel_id) {
return;
}
}
for attachment in &message.attachments {
let content = match attachment.download() {
Ok(content) => content,
Err(why) => {
println!("Error downloading attachment: {:?}", why);
message.say("Error downloading attachment");
return;
},
};
let path: PathBuf = ["/tmp/converter/", &attachment.filename].iter().collect();
match std::fs::create_dir_all(format!("/tmp/converter")) {
Ok(()) => {}
Err(why) => {
println!("Error creating dir: {:?}", why);
message.say("Error creating dir");
}
}
let mut file = match File::create(path.as_os_str()) {
Ok(file) => file,
Err(why) => {
println!("Error creating file: {:?}", why);
message.say("Error creating file");
return;
},
};
if let Err(why) = file.write(&content) {
println!("Error writing to file: {:?}", why);
return;
}
if match converter::extension(path.as_path()) {
"mscsb" | "c" | "wav" | "zip" | "yml" => true,
s => converter::byml::EXTENSIONS.contains(&s.trim_start_matches("s")),
} {
message.broadcast_typing();
}
match converter::convert(path, message_content) {
Ok(path) => {
let _ =
message.send_file(path.to_str().unwrap(), "Converted file")
.map_err(|e|{
message.say(
MessageBuilder::new()
.push("Error sending file: ")
.push_codeblock_safe(e.to_string(), None)
.build()
);
});
std::fs::remove_file(path).unwrap();
}
Err(why) => {
println!("Error converting file: {:?}", why);
message.say(
MessageBuilder::new()
.push("Error converting file:")
.push_codeblock_safe(why.message, None)
.build()
);
}
}
}
}
}
const MOTION_LABEL_PATH: &str = "motion_list_labels.txt";
const SQB_LABEL_PATH: &str = "sqb_labels.txt";
const CHANNELS_PATH: &str = "channels.txt";
fn load_channels() -> Vec<ChannelId> {
fs::read_to_string(CHANNELS_PATH).ok()
.map(|channels_file|{
channels_file.split('\n')
.map(|s| u64::from_str_radix(s, 10))
.filter_map(Result::ok)
.map(Into::into)
.collect()
})
.unwrap_or_default()
}
fn main() {
arc_commands::setup_songs();
update_labels(&[MOTION_LABEL_PATH, SQB_LABEL_PATH]);
let channels = load_channels();
// Login with a bot token from the environment
let mut client = Client::new(&env::var("DISCORD_TOKEN").expect("token"), Handler::new(channels))
.expect("Error creating client");
if let Err(why) = client.start() {
println!("An error occurred while running the client: {:?}", why);
}
}
fn update_labels(label_paths: &[&str]) {
hash40::set_labels(
label_paths
.iter()
.map(|label| hash40::read_labels(label).unwrap())
.flatten()
)
}
fn update(message: &MessageHelper) {
let update_output =
match Command::new("sh").arg("update.sh").output() {
Ok(x) => x,
Err(e) => {
message.say(
MessageBuilder::new()
.push("Failed to run update:")
.push_codeblock_safe(e.to_string(), None)
.build()
);
return;
}
};
if update_output.status.success() {
let out = std::str::from_utf8(&update_output.stdout[..]).unwrap();
message.say(out);
} else {
let err = std::str::from_utf8(&update_output.stderr[..]).unwrap();
message.say(
MessageBuilder::new()
.push("Error:")
.push_codeblock_safe(err, None)
.build()
);
}
update_labels(&[MOTION_LABEL_PATH, SQB_LABEL_PATH]);
} | Arthur, Dr. Hypercake, Birdwards, SMG, Meshima, TNN, Blazingflare, TheSmartKid - Param labels\n\
coolsonickirby, SushiiZ - testing help";
| random_line_split |
ext.rs | use digest::Digest;
use hmac::crypto_mac::MacError;
use hmac::{Hmac, Mac, NewMac};
use typenum::Unsigned;
use crate::arithmetic::*;
use crate::elliptic::curves::{Curve, ECScalar, Point, Scalar};
/// [Digest] extension allowing to hash elliptic points, scalars, and bigints
///
/// Can be used with any hashing algorithm that implements `Digest` traits (e.g. [Sha256](sha2::Sha256),
/// [Sha512](sha2::Sha512), etc.)
///
/// ## Example
///
/// ```rust
/// use sha2::Sha256;
/// use curv::arithmetic::*;
/// use curv::cryptographic_primitives::hashing::{Digest, DigestExt};
/// use curv::elliptic::curves::{Secp256k1, Point};
///
/// let hash = Sha256::new()
/// .chain_point(&Point::<Secp256k1>::generator())
/// .chain_point(Point::<Secp256k1>::base_point2())
/// .chain_bigint(&BigInt::from(10))
/// .result_bigint();
///
/// assert_eq!(hash, BigInt::from_hex("73764f937fbe25092466b417fa66ad9c62607865e1f8151df253aa3a2fd7599b").unwrap());
/// ```
pub trait DigestExt {
fn input_bigint(&mut self, n: &BigInt);
fn input_point<E: Curve>(&mut self, point: &Point<E>);
fn input_scalar<E: Curve>(&mut self, scalar: &Scalar<E>);
fn chain_bigint(mut self, n: &BigInt) -> Self
where
Self: Sized,
{
self.input_bigint(n);
self
}
fn chain_point<E: Curve>(mut self, point: &Point<E>) -> Self
where
Self: Sized,
{
self.input_point(point);
self
}
fn chain_points<'p, E: Curve>(mut self, points: impl IntoIterator<Item = &'p Point<E>>) -> Self
where
Self: Sized,
{
for point in points {
self.input_point(point)
}
self
}
fn chain_scalar<E: Curve>(mut self, scalar: &Scalar<E>) -> Self
where
Self: Sized,
{
self.input_scalar(scalar);
self
}
fn chain_scalars<'s, E: Curve>(
mut self,
scalars: impl IntoIterator<Item = &'s Scalar<E>>,
) -> Self
where
Self: Sized,
{
for scalar in scalars {
self.input_scalar(scalar)
}
self
}
fn result_bigint(self) -> BigInt;
fn result_scalar<E: Curve>(self) -> Scalar<E>;
fn digest_bigint(bytes: &[u8]) -> BigInt;
}
impl<D> DigestExt for D
where
D: Digest + Clone,
{
fn input_bigint(&mut self, n: &BigInt) {
self.update(&n.to_bytes())
}
fn input_point<E: Curve>(&mut self, point: &Point<E>) {
self.update(&point.to_bytes(false)[..])
}
fn input_scalar<E: Curve>(&mut self, scalar: &Scalar<E>) {
self.update(&scalar.to_bigint().to_bytes())
}
fn result_bigint(self) -> BigInt {
let result = self.finalize();
BigInt::from_bytes(&result)
}
fn result_scalar<E: Curve>(self) -> Scalar<E> {
let scalar_len = <<E::Scalar as ECScalar>::ScalarLength as Unsigned>::to_usize();
assert!(
Self::output_size() >= scalar_len,
"Output size of the hash({}) is smaller than the scalar length({})",
Self::output_size(),
scalar_len
);
// Try and increment.
for i in 0u32.. {
let starting_state = self.clone();
let hash = starting_state.chain(i.to_be_bytes()).finalize();
if let Ok(scalar) = Scalar::from_bytes(&hash[..scalar_len]) |
}
unreachable!("The probably of this reaching is extremely small ((2^n-q)/(2^n))^(2^32)")
}
fn digest_bigint(bytes: &[u8]) -> BigInt {
Self::new().chain(bytes).result_bigint()
}
}
/// [Hmac] extension allowing to use bigints to instantiate hmac, update, and finalize it.
pub trait HmacExt: Sized {
fn new_bigint(key: &BigInt) -> Self;
fn input_bigint(&mut self, n: &BigInt);
fn chain_bigint(mut self, n: &BigInt) -> Self
where
Self: Sized,
{
self.input_bigint(n);
self
}
fn result_bigint(self) -> BigInt;
fn verify_bigint(self, code: &BigInt) -> Result<(), MacError>;
}
impl<D> HmacExt for Hmac<D>
where
D: digest::Update + digest::BlockInput + digest::FixedOutput + digest::Reset + Default + Clone,
{
fn new_bigint(key: &BigInt) -> Self {
let bytes = key.to_bytes();
Self::new_from_slice(&bytes).expect("HMAC must take a key of any length")
}
fn input_bigint(&mut self, n: &BigInt) {
self.update(&n.to_bytes())
}
fn result_bigint(self) -> BigInt {
BigInt::from_bytes(&self.finalize().into_bytes())
}
fn verify_bigint(self, code: &BigInt) -> Result<(), MacError> {
self.verify(&code.to_bytes())
}
}
#[cfg(test)]
mod test {
use sha2::{Sha256, Sha512};
use super::*;
// Test Vectors taken from:
// https://csrc.nist.gov/projects/cryptographic-algorithm-validation-program/secure-hashing#shavs
#[test]
fn vector_sha256_test() {
// Empty Message
let result: BigInt = Sha256::new().result_bigint();
assert_eq!(
result.to_hex(),
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
);
// 256 bit message
let result: BigInt = Sha256::new()
.chain_bigint(
&BigInt::from_hex(
"09fc1accc230a205e4a208e64a8f204291f581a12756392da4b8c0cf5ef02b95",
)
.unwrap(),
)
.result_bigint();
assert_eq!(
result.to_hex(),
"4f44c1c7fbebb6f9601829f3897bfd650c56fa07844be76489076356ac1886a4"
);
// 2x128 bit messages
let result: BigInt = Sha256::new()
.chain_bigint(&BigInt::from_hex("09fc1accc230a205e4a208e64a8f2042").unwrap())
.chain_bigint(&BigInt::from_hex("91f581a12756392da4b8c0cf5ef02b95").unwrap())
.result_bigint();
assert_eq!(
result.to_hex(),
"4f44c1c7fbebb6f9601829f3897bfd650c56fa07844be76489076356ac1886a4"
);
// 512 bit message
let result: BigInt = Sha256::new()
.chain_bigint(&BigInt::from_hex("5a86b737eaea8ee976a0a24da63e7ed7eefad18a101c1211e2b3650c5187c2a8a650547208251f6d4237e661c7bf4c77f335390394c37fa1a9f9be836ac28509").unwrap())
.result_bigint();
assert_eq!(
result.to_hex(),
"42e61e174fbb3897d6dd6cef3dd2802fe67b331953b06114a65c772859dfc1aa"
);
}
#[test]
// Test Vectors taken from:
// https://csrc.nist.gov/projects/cryptographic-algorithm-validation-program/secure-hashing#shavs
fn vector_sha512_test() {
// Empty message
let result: BigInt = Sha512::new().result_bigint();
assert_eq!(
result.to_hex(),
"cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e"
);
// 2x256 bit message
let result: BigInt = Sha512::new()
.chain_bigint(
&BigInt::from_hex(
"c1ca70ae1279ba0b918157558b4920d6b7fba8a06be515170f202fafd36fb7f7",
)
.unwrap(),
)
.chain_bigint(
&BigInt::from_hex(
"9d69fad745dba6150568db1e2b728504113eeac34f527fc82f2200b462ecbf5d",
)
.unwrap(),
)
.result_bigint();
assert_eq!(
result.to_hex(),
"46e46623912b3932b8d662ab42583423843206301b58bf20ab6d76fd47f1cbbcf421df536ecd7e56db5354e7e0f98822d2129c197f6f0f222b8ec5231f3967d"
);
// 512 bit message
let result: BigInt = Sha512::new()
.chain_bigint(&BigInt::from_hex(
"c1ca70ae1279ba0b918157558b4920d6b7fba8a06be515170f202fafd36fb7f79d69fad745dba6150568db1e2b728504113eeac34f527fc82f2200b462ecbf5d").unwrap())
.result_bigint();
assert_eq!(
result.to_hex(),
"46e46623912b3932b8d662ab42583423843206301b58bf20ab6d76fd47f1cbbcf421df536ecd7e56db5354e7e0f98822d2129c197f6f0f222b8ec5231f3967d"
);
// 1024 bit message
let result: BigInt = Sha512::new()
.chain_bigint(&BigInt::from_hex("fd2203e467574e834ab07c9097ae164532f24be1eb5d88f1af7748ceff0d2c67a21f4e4097f9d3bb4e9fbf97186e0db6db0100230a52b453d421f8ab9c9a6043aa3295ea20d2f06a2f37470d8a99075f1b8a8336f6228cf08b5942fc1fb4299c7d2480e8e82bce175540bdfad7752bc95b577f229515394f3ae5cec870a4b2f8").unwrap())
.result_bigint();
assert_eq!(
result.to_hex(),
"a21b1077d52b27ac545af63b32746c6e3c51cb0cb9f281eb9f3580a6d4996d5c9917d2a6e484627a9d5a06fa1b25327a9d710e027387fc3e07d7c4d14c6086cc"
);
}
crate::test_for_all_curves!(create_sha512_from_ge_test);
fn create_sha256_from_ge_test<E: Curve>() {
let generator = Point::<E>::generator();
let base_point2 = Point::<E>::base_point2();
let result1 = Sha256::new()
.chain_point(&generator)
.chain_point(base_point2)
.result_scalar::<E>();
assert!(result1.to_bigint().bit_length() > 240);
let result2 = Sha256::new()
.chain_point(base_point2)
.chain_point(&generator)
.result_scalar::<E>();
assert_ne!(result1, result2);
let result3 = Sha256::new()
.chain_point(base_point2)
.chain_point(&generator)
.result_scalar::<E>();
assert_eq!(result2, result3);
}
crate::test_for_all_curves!(create_sha256_from_ge_test);
fn create_sha512_from_ge_test<E: Curve>() {
let generator = Point::<E>::generator();
let base_point2 = Point::<E>::base_point2();
let result1 = Sha512::new()
.chain_point(&generator)
.chain_point(base_point2)
.result_scalar::<E>();
assert!(result1.to_bigint().bit_length() > 240);
let result2 = Sha512::new()
.chain_point(base_point2)
.chain_point(&generator)
.result_scalar::<E>();
assert_ne!(result1, result2);
let result3 = Sha512::new()
.chain_point(base_point2)
.chain_point(&generator)
.result_scalar::<E>();
assert_eq!(result2, result3);
}
#[test]
fn create_hmac_test() {
let key = BigInt::sample(512);
let result1 = Hmac::<Sha512>::new_bigint(&key)
.chain_bigint(&BigInt::from(10))
.result_bigint();
assert!(Hmac::<Sha512>::new_bigint(&key)
.chain_bigint(&BigInt::from(10))
.verify_bigint(&result1)
.is_ok());
let key2 = BigInt::sample(512);
// same data, different key
let result2 = Hmac::<Sha512>::new_bigint(&key2)
.chain_bigint(&BigInt::from(10))
.result_bigint();
assert_ne!(result1, result2);
// same key, different data
let result3 = Hmac::<Sha512>::new_bigint(&key)
.chain_bigint(&BigInt::from(10))
.chain_bigint(&BigInt::from(11))
.result_bigint();
assert_ne!(result1, result3);
// same key, same data
let result4 = Hmac::<Sha512>::new_bigint(&key)
.chain_bigint(&BigInt::from(10))
.result_bigint();
assert_eq!(result1, result4)
}
}
| {
return scalar;
} | conditional_block |
ext.rs | use digest::Digest;
use hmac::crypto_mac::MacError;
use hmac::{Hmac, Mac, NewMac};
use typenum::Unsigned;
use crate::arithmetic::*;
use crate::elliptic::curves::{Curve, ECScalar, Point, Scalar};
/// [Digest] extension allowing to hash elliptic points, scalars, and bigints
///
/// Can be used with any hashing algorithm that implements `Digest` traits (e.g. [Sha256](sha2::Sha256),
/// [Sha512](sha2::Sha512), etc.)
///
/// ## Example
///
/// ```rust
/// use sha2::Sha256;
/// use curv::arithmetic::*;
/// use curv::cryptographic_primitives::hashing::{Digest, DigestExt};
/// use curv::elliptic::curves::{Secp256k1, Point};
///
/// let hash = Sha256::new()
/// .chain_point(&Point::<Secp256k1>::generator())
/// .chain_point(Point::<Secp256k1>::base_point2())
/// .chain_bigint(&BigInt::from(10))
/// .result_bigint();
///
/// assert_eq!(hash, BigInt::from_hex("73764f937fbe25092466b417fa66ad9c62607865e1f8151df253aa3a2fd7599b").unwrap());
/// ```
pub trait DigestExt {
fn input_bigint(&mut self, n: &BigInt);
fn input_point<E: Curve>(&mut self, point: &Point<E>);
fn input_scalar<E: Curve>(&mut self, scalar: &Scalar<E>);
fn chain_bigint(mut self, n: &BigInt) -> Self
where
Self: Sized,
{
self.input_bigint(n);
self
}
fn chain_point<E: Curve>(mut self, point: &Point<E>) -> Self
where
Self: Sized,
{
self.input_point(point);
self
}
fn chain_points<'p, E: Curve>(mut self, points: impl IntoIterator<Item = &'p Point<E>>) -> Self
where
Self: Sized,
{
for point in points {
self.input_point(point)
}
self
}
fn chain_scalar<E: Curve>(mut self, scalar: &Scalar<E>) -> Self
where
Self: Sized,
{
self.input_scalar(scalar); | mut self,
scalars: impl IntoIterator<Item = &'s Scalar<E>>,
) -> Self
where
Self: Sized,
{
for scalar in scalars {
self.input_scalar(scalar)
}
self
}
fn result_bigint(self) -> BigInt;
fn result_scalar<E: Curve>(self) -> Scalar<E>;
fn digest_bigint(bytes: &[u8]) -> BigInt;
}
impl<D> DigestExt for D
where
D: Digest + Clone,
{
fn input_bigint(&mut self, n: &BigInt) {
self.update(&n.to_bytes())
}
fn input_point<E: Curve>(&mut self, point: &Point<E>) {
self.update(&point.to_bytes(false)[..])
}
fn input_scalar<E: Curve>(&mut self, scalar: &Scalar<E>) {
self.update(&scalar.to_bigint().to_bytes())
}
fn result_bigint(self) -> BigInt {
let result = self.finalize();
BigInt::from_bytes(&result)
}
fn result_scalar<E: Curve>(self) -> Scalar<E> {
let scalar_len = <<E::Scalar as ECScalar>::ScalarLength as Unsigned>::to_usize();
assert!(
Self::output_size() >= scalar_len,
"Output size of the hash({}) is smaller than the scalar length({})",
Self::output_size(),
scalar_len
);
// Try and increment.
for i in 0u32.. {
let starting_state = self.clone();
let hash = starting_state.chain(i.to_be_bytes()).finalize();
if let Ok(scalar) = Scalar::from_bytes(&hash[..scalar_len]) {
return scalar;
}
}
unreachable!("The probably of this reaching is extremely small ((2^n-q)/(2^n))^(2^32)")
}
fn digest_bigint(bytes: &[u8]) -> BigInt {
Self::new().chain(bytes).result_bigint()
}
}
/// [Hmac] extension allowing to use bigints to instantiate hmac, update, and finalize it.
pub trait HmacExt: Sized {
fn new_bigint(key: &BigInt) -> Self;
fn input_bigint(&mut self, n: &BigInt);
fn chain_bigint(mut self, n: &BigInt) -> Self
where
Self: Sized,
{
self.input_bigint(n);
self
}
fn result_bigint(self) -> BigInt;
fn verify_bigint(self, code: &BigInt) -> Result<(), MacError>;
}
impl<D> HmacExt for Hmac<D>
where
D: digest::Update + digest::BlockInput + digest::FixedOutput + digest::Reset + Default + Clone,
{
fn new_bigint(key: &BigInt) -> Self {
let bytes = key.to_bytes();
Self::new_from_slice(&bytes).expect("HMAC must take a key of any length")
}
fn input_bigint(&mut self, n: &BigInt) {
self.update(&n.to_bytes())
}
fn result_bigint(self) -> BigInt {
BigInt::from_bytes(&self.finalize().into_bytes())
}
fn verify_bigint(self, code: &BigInt) -> Result<(), MacError> {
self.verify(&code.to_bytes())
}
}
#[cfg(test)]
mod test {
use sha2::{Sha256, Sha512};
use super::*;
// Test Vectors taken from:
// https://csrc.nist.gov/projects/cryptographic-algorithm-validation-program/secure-hashing#shavs
#[test]
fn vector_sha256_test() {
// Empty Message
let result: BigInt = Sha256::new().result_bigint();
assert_eq!(
result.to_hex(),
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
);
// 256 bit message
let result: BigInt = Sha256::new()
.chain_bigint(
&BigInt::from_hex(
"09fc1accc230a205e4a208e64a8f204291f581a12756392da4b8c0cf5ef02b95",
)
.unwrap(),
)
.result_bigint();
assert_eq!(
result.to_hex(),
"4f44c1c7fbebb6f9601829f3897bfd650c56fa07844be76489076356ac1886a4"
);
// 2x128 bit messages
let result: BigInt = Sha256::new()
.chain_bigint(&BigInt::from_hex("09fc1accc230a205e4a208e64a8f2042").unwrap())
.chain_bigint(&BigInt::from_hex("91f581a12756392da4b8c0cf5ef02b95").unwrap())
.result_bigint();
assert_eq!(
result.to_hex(),
"4f44c1c7fbebb6f9601829f3897bfd650c56fa07844be76489076356ac1886a4"
);
// 512 bit message
let result: BigInt = Sha256::new()
.chain_bigint(&BigInt::from_hex("5a86b737eaea8ee976a0a24da63e7ed7eefad18a101c1211e2b3650c5187c2a8a650547208251f6d4237e661c7bf4c77f335390394c37fa1a9f9be836ac28509").unwrap())
.result_bigint();
assert_eq!(
result.to_hex(),
"42e61e174fbb3897d6dd6cef3dd2802fe67b331953b06114a65c772859dfc1aa"
);
}
#[test]
// Test Vectors taken from:
// https://csrc.nist.gov/projects/cryptographic-algorithm-validation-program/secure-hashing#shavs
fn vector_sha512_test() {
// Empty message
let result: BigInt = Sha512::new().result_bigint();
assert_eq!(
result.to_hex(),
"cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e"
);
// 2x256 bit message
let result: BigInt = Sha512::new()
.chain_bigint(
&BigInt::from_hex(
"c1ca70ae1279ba0b918157558b4920d6b7fba8a06be515170f202fafd36fb7f7",
)
.unwrap(),
)
.chain_bigint(
&BigInt::from_hex(
"9d69fad745dba6150568db1e2b728504113eeac34f527fc82f2200b462ecbf5d",
)
.unwrap(),
)
.result_bigint();
assert_eq!(
result.to_hex(),
"46e46623912b3932b8d662ab42583423843206301b58bf20ab6d76fd47f1cbbcf421df536ecd7e56db5354e7e0f98822d2129c197f6f0f222b8ec5231f3967d"
);
// 512 bit message
let result: BigInt = Sha512::new()
.chain_bigint(&BigInt::from_hex(
"c1ca70ae1279ba0b918157558b4920d6b7fba8a06be515170f202fafd36fb7f79d69fad745dba6150568db1e2b728504113eeac34f527fc82f2200b462ecbf5d").unwrap())
.result_bigint();
assert_eq!(
result.to_hex(),
"46e46623912b3932b8d662ab42583423843206301b58bf20ab6d76fd47f1cbbcf421df536ecd7e56db5354e7e0f98822d2129c197f6f0f222b8ec5231f3967d"
);
// 1024 bit message
let result: BigInt = Sha512::new()
.chain_bigint(&BigInt::from_hex("fd2203e467574e834ab07c9097ae164532f24be1eb5d88f1af7748ceff0d2c67a21f4e4097f9d3bb4e9fbf97186e0db6db0100230a52b453d421f8ab9c9a6043aa3295ea20d2f06a2f37470d8a99075f1b8a8336f6228cf08b5942fc1fb4299c7d2480e8e82bce175540bdfad7752bc95b577f229515394f3ae5cec870a4b2f8").unwrap())
.result_bigint();
assert_eq!(
result.to_hex(),
"a21b1077d52b27ac545af63b32746c6e3c51cb0cb9f281eb9f3580a6d4996d5c9917d2a6e484627a9d5a06fa1b25327a9d710e027387fc3e07d7c4d14c6086cc"
);
}
crate::test_for_all_curves!(create_sha512_from_ge_test);
fn create_sha256_from_ge_test<E: Curve>() {
let generator = Point::<E>::generator();
let base_point2 = Point::<E>::base_point2();
let result1 = Sha256::new()
.chain_point(&generator)
.chain_point(base_point2)
.result_scalar::<E>();
assert!(result1.to_bigint().bit_length() > 240);
let result2 = Sha256::new()
.chain_point(base_point2)
.chain_point(&generator)
.result_scalar::<E>();
assert_ne!(result1, result2);
let result3 = Sha256::new()
.chain_point(base_point2)
.chain_point(&generator)
.result_scalar::<E>();
assert_eq!(result2, result3);
}
crate::test_for_all_curves!(create_sha256_from_ge_test);
fn create_sha512_from_ge_test<E: Curve>() {
let generator = Point::<E>::generator();
let base_point2 = Point::<E>::base_point2();
let result1 = Sha512::new()
.chain_point(&generator)
.chain_point(base_point2)
.result_scalar::<E>();
assert!(result1.to_bigint().bit_length() > 240);
let result2 = Sha512::new()
.chain_point(base_point2)
.chain_point(&generator)
.result_scalar::<E>();
assert_ne!(result1, result2);
let result3 = Sha512::new()
.chain_point(base_point2)
.chain_point(&generator)
.result_scalar::<E>();
assert_eq!(result2, result3);
}
#[test]
fn create_hmac_test() {
let key = BigInt::sample(512);
let result1 = Hmac::<Sha512>::new_bigint(&key)
.chain_bigint(&BigInt::from(10))
.result_bigint();
assert!(Hmac::<Sha512>::new_bigint(&key)
.chain_bigint(&BigInt::from(10))
.verify_bigint(&result1)
.is_ok());
let key2 = BigInt::sample(512);
// same data, different key
let result2 = Hmac::<Sha512>::new_bigint(&key2)
.chain_bigint(&BigInt::from(10))
.result_bigint();
assert_ne!(result1, result2);
// same key, different data
let result3 = Hmac::<Sha512>::new_bigint(&key)
.chain_bigint(&BigInt::from(10))
.chain_bigint(&BigInt::from(11))
.result_bigint();
assert_ne!(result1, result3);
// same key, same data
let result4 = Hmac::<Sha512>::new_bigint(&key)
.chain_bigint(&BigInt::from(10))
.result_bigint();
assert_eq!(result1, result4)
}
} | self
}
fn chain_scalars<'s, E: Curve>( | random_line_split |
ext.rs | use digest::Digest;
use hmac::crypto_mac::MacError;
use hmac::{Hmac, Mac, NewMac};
use typenum::Unsigned;
use crate::arithmetic::*;
use crate::elliptic::curves::{Curve, ECScalar, Point, Scalar};
/// [Digest] extension allowing to hash elliptic points, scalars, and bigints
///
/// Can be used with any hashing algorithm that implements `Digest` traits (e.g. [Sha256](sha2::Sha256),
/// [Sha512](sha2::Sha512), etc.)
///
/// ## Example
///
/// ```rust
/// use sha2::Sha256;
/// use curv::arithmetic::*;
/// use curv::cryptographic_primitives::hashing::{Digest, DigestExt};
/// use curv::elliptic::curves::{Secp256k1, Point};
///
/// let hash = Sha256::new()
/// .chain_point(&Point::<Secp256k1>::generator())
/// .chain_point(Point::<Secp256k1>::base_point2())
/// .chain_bigint(&BigInt::from(10))
/// .result_bigint();
///
/// assert_eq!(hash, BigInt::from_hex("73764f937fbe25092466b417fa66ad9c62607865e1f8151df253aa3a2fd7599b").unwrap());
/// ```
pub trait DigestExt {
fn input_bigint(&mut self, n: &BigInt);
fn input_point<E: Curve>(&mut self, point: &Point<E>);
fn input_scalar<E: Curve>(&mut self, scalar: &Scalar<E>);
fn chain_bigint(mut self, n: &BigInt) -> Self
where
Self: Sized,
{
self.input_bigint(n);
self
}
fn chain_point<E: Curve>(mut self, point: &Point<E>) -> Self
where
Self: Sized,
{
self.input_point(point);
self
}
fn chain_points<'p, E: Curve>(mut self, points: impl IntoIterator<Item = &'p Point<E>>) -> Self
where
Self: Sized,
{
for point in points {
self.input_point(point)
}
self
}
fn chain_scalar<E: Curve>(mut self, scalar: &Scalar<E>) -> Self
where
Self: Sized,
{
self.input_scalar(scalar);
self
}
fn chain_scalars<'s, E: Curve>(
mut self,
scalars: impl IntoIterator<Item = &'s Scalar<E>>,
) -> Self
where
Self: Sized,
{
for scalar in scalars {
self.input_scalar(scalar)
}
self
}
fn result_bigint(self) -> BigInt;
fn result_scalar<E: Curve>(self) -> Scalar<E>;
fn digest_bigint(bytes: &[u8]) -> BigInt;
}
impl<D> DigestExt for D
where
D: Digest + Clone,
{
fn input_bigint(&mut self, n: &BigInt) {
self.update(&n.to_bytes())
}
fn input_point<E: Curve>(&mut self, point: &Point<E>) {
self.update(&point.to_bytes(false)[..])
}
fn input_scalar<E: Curve>(&mut self, scalar: &Scalar<E>) {
self.update(&scalar.to_bigint().to_bytes())
}
fn result_bigint(self) -> BigInt {
let result = self.finalize();
BigInt::from_bytes(&result)
}
fn result_scalar<E: Curve>(self) -> Scalar<E> {
let scalar_len = <<E::Scalar as ECScalar>::ScalarLength as Unsigned>::to_usize();
assert!(
Self::output_size() >= scalar_len,
"Output size of the hash({}) is smaller than the scalar length({})",
Self::output_size(),
scalar_len
);
// Try and increment.
for i in 0u32.. {
let starting_state = self.clone();
let hash = starting_state.chain(i.to_be_bytes()).finalize();
if let Ok(scalar) = Scalar::from_bytes(&hash[..scalar_len]) {
return scalar;
}
}
unreachable!("The probably of this reaching is extremely small ((2^n-q)/(2^n))^(2^32)")
}
fn digest_bigint(bytes: &[u8]) -> BigInt {
Self::new().chain(bytes).result_bigint()
}
}
/// [Hmac] extension allowing to use bigints to instantiate hmac, update, and finalize it.
pub trait HmacExt: Sized {
fn new_bigint(key: &BigInt) -> Self;
fn input_bigint(&mut self, n: &BigInt);
fn chain_bigint(mut self, n: &BigInt) -> Self
where
Self: Sized,
{
self.input_bigint(n);
self
}
fn result_bigint(self) -> BigInt;
fn verify_bigint(self, code: &BigInt) -> Result<(), MacError>;
}
impl<D> HmacExt for Hmac<D>
where
D: digest::Update + digest::BlockInput + digest::FixedOutput + digest::Reset + Default + Clone,
{
fn new_bigint(key: &BigInt) -> Self {
let bytes = key.to_bytes();
Self::new_from_slice(&bytes).expect("HMAC must take a key of any length")
}
fn input_bigint(&mut self, n: &BigInt) {
self.update(&n.to_bytes())
}
fn result_bigint(self) -> BigInt {
BigInt::from_bytes(&self.finalize().into_bytes())
}
fn verify_bigint(self, code: &BigInt) -> Result<(), MacError> {
self.verify(&code.to_bytes())
}
}
#[cfg(test)]
mod test {
use sha2::{Sha256, Sha512};
use super::*;
// Test Vectors taken from:
// https://csrc.nist.gov/projects/cryptographic-algorithm-validation-program/secure-hashing#shavs
#[test]
fn vector_sha256_test() {
// Empty Message
let result: BigInt = Sha256::new().result_bigint();
assert_eq!(
result.to_hex(),
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
);
// 256 bit message
let result: BigInt = Sha256::new()
.chain_bigint(
&BigInt::from_hex(
"09fc1accc230a205e4a208e64a8f204291f581a12756392da4b8c0cf5ef02b95",
)
.unwrap(),
)
.result_bigint();
assert_eq!(
result.to_hex(),
"4f44c1c7fbebb6f9601829f3897bfd650c56fa07844be76489076356ac1886a4"
);
// 2x128 bit messages
let result: BigInt = Sha256::new()
.chain_bigint(&BigInt::from_hex("09fc1accc230a205e4a208e64a8f2042").unwrap())
.chain_bigint(&BigInt::from_hex("91f581a12756392da4b8c0cf5ef02b95").unwrap())
.result_bigint();
assert_eq!(
result.to_hex(),
"4f44c1c7fbebb6f9601829f3897bfd650c56fa07844be76489076356ac1886a4"
);
// 512 bit message
let result: BigInt = Sha256::new()
.chain_bigint(&BigInt::from_hex("5a86b737eaea8ee976a0a24da63e7ed7eefad18a101c1211e2b3650c5187c2a8a650547208251f6d4237e661c7bf4c77f335390394c37fa1a9f9be836ac28509").unwrap())
.result_bigint();
assert_eq!(
result.to_hex(),
"42e61e174fbb3897d6dd6cef3dd2802fe67b331953b06114a65c772859dfc1aa"
);
}
#[test]
// Test Vectors taken from:
// https://csrc.nist.gov/projects/cryptographic-algorithm-validation-program/secure-hashing#shavs
fn vector_sha512_test() {
// Empty message
let result: BigInt = Sha512::new().result_bigint();
assert_eq!(
result.to_hex(),
"cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e"
);
// 2x256 bit message
let result: BigInt = Sha512::new()
.chain_bigint(
&BigInt::from_hex(
"c1ca70ae1279ba0b918157558b4920d6b7fba8a06be515170f202fafd36fb7f7",
)
.unwrap(),
)
.chain_bigint(
&BigInt::from_hex(
"9d69fad745dba6150568db1e2b728504113eeac34f527fc82f2200b462ecbf5d",
)
.unwrap(),
)
.result_bigint();
assert_eq!(
result.to_hex(),
"46e46623912b3932b8d662ab42583423843206301b58bf20ab6d76fd47f1cbbcf421df536ecd7e56db5354e7e0f98822d2129c197f6f0f222b8ec5231f3967d"
);
// 512 bit message
let result: BigInt = Sha512::new()
.chain_bigint(&BigInt::from_hex(
"c1ca70ae1279ba0b918157558b4920d6b7fba8a06be515170f202fafd36fb7f79d69fad745dba6150568db1e2b728504113eeac34f527fc82f2200b462ecbf5d").unwrap())
.result_bigint();
assert_eq!(
result.to_hex(),
"46e46623912b3932b8d662ab42583423843206301b58bf20ab6d76fd47f1cbbcf421df536ecd7e56db5354e7e0f98822d2129c197f6f0f222b8ec5231f3967d"
);
// 1024 bit message
let result: BigInt = Sha512::new()
.chain_bigint(&BigInt::from_hex("fd2203e467574e834ab07c9097ae164532f24be1eb5d88f1af7748ceff0d2c67a21f4e4097f9d3bb4e9fbf97186e0db6db0100230a52b453d421f8ab9c9a6043aa3295ea20d2f06a2f37470d8a99075f1b8a8336f6228cf08b5942fc1fb4299c7d2480e8e82bce175540bdfad7752bc95b577f229515394f3ae5cec870a4b2f8").unwrap())
.result_bigint();
assert_eq!(
result.to_hex(),
"a21b1077d52b27ac545af63b32746c6e3c51cb0cb9f281eb9f3580a6d4996d5c9917d2a6e484627a9d5a06fa1b25327a9d710e027387fc3e07d7c4d14c6086cc"
);
}
crate::test_for_all_curves!(create_sha512_from_ge_test);
fn | <E: Curve>() {
let generator = Point::<E>::generator();
let base_point2 = Point::<E>::base_point2();
let result1 = Sha256::new()
.chain_point(&generator)
.chain_point(base_point2)
.result_scalar::<E>();
assert!(result1.to_bigint().bit_length() > 240);
let result2 = Sha256::new()
.chain_point(base_point2)
.chain_point(&generator)
.result_scalar::<E>();
assert_ne!(result1, result2);
let result3 = Sha256::new()
.chain_point(base_point2)
.chain_point(&generator)
.result_scalar::<E>();
assert_eq!(result2, result3);
}
crate::test_for_all_curves!(create_sha256_from_ge_test);
fn create_sha512_from_ge_test<E: Curve>() {
let generator = Point::<E>::generator();
let base_point2 = Point::<E>::base_point2();
let result1 = Sha512::new()
.chain_point(&generator)
.chain_point(base_point2)
.result_scalar::<E>();
assert!(result1.to_bigint().bit_length() > 240);
let result2 = Sha512::new()
.chain_point(base_point2)
.chain_point(&generator)
.result_scalar::<E>();
assert_ne!(result1, result2);
let result3 = Sha512::new()
.chain_point(base_point2)
.chain_point(&generator)
.result_scalar::<E>();
assert_eq!(result2, result3);
}
#[test]
fn create_hmac_test() {
let key = BigInt::sample(512);
let result1 = Hmac::<Sha512>::new_bigint(&key)
.chain_bigint(&BigInt::from(10))
.result_bigint();
assert!(Hmac::<Sha512>::new_bigint(&key)
.chain_bigint(&BigInt::from(10))
.verify_bigint(&result1)
.is_ok());
let key2 = BigInt::sample(512);
// same data, different key
let result2 = Hmac::<Sha512>::new_bigint(&key2)
.chain_bigint(&BigInt::from(10))
.result_bigint();
assert_ne!(result1, result2);
// same key, different data
let result3 = Hmac::<Sha512>::new_bigint(&key)
.chain_bigint(&BigInt::from(10))
.chain_bigint(&BigInt::from(11))
.result_bigint();
assert_ne!(result1, result3);
// same key, same data
let result4 = Hmac::<Sha512>::new_bigint(&key)
.chain_bigint(&BigInt::from(10))
.result_bigint();
assert_eq!(result1, result4)
}
}
| create_sha256_from_ge_test | identifier_name |
ext.rs | use digest::Digest;
use hmac::crypto_mac::MacError;
use hmac::{Hmac, Mac, NewMac};
use typenum::Unsigned;
use crate::arithmetic::*;
use crate::elliptic::curves::{Curve, ECScalar, Point, Scalar};
/// [Digest] extension allowing to hash elliptic points, scalars, and bigints
///
/// Can be used with any hashing algorithm that implements `Digest` traits (e.g. [Sha256](sha2::Sha256),
/// [Sha512](sha2::Sha512), etc.)
///
/// ## Example
///
/// ```rust
/// use sha2::Sha256;
/// use curv::arithmetic::*;
/// use curv::cryptographic_primitives::hashing::{Digest, DigestExt};
/// use curv::elliptic::curves::{Secp256k1, Point};
///
/// let hash = Sha256::new()
/// .chain_point(&Point::<Secp256k1>::generator())
/// .chain_point(Point::<Secp256k1>::base_point2())
/// .chain_bigint(&BigInt::from(10))
/// .result_bigint();
///
/// assert_eq!(hash, BigInt::from_hex("73764f937fbe25092466b417fa66ad9c62607865e1f8151df253aa3a2fd7599b").unwrap());
/// ```
pub trait DigestExt {
fn input_bigint(&mut self, n: &BigInt);
fn input_point<E: Curve>(&mut self, point: &Point<E>);
fn input_scalar<E: Curve>(&mut self, scalar: &Scalar<E>);
fn chain_bigint(mut self, n: &BigInt) -> Self
where
Self: Sized,
{
self.input_bigint(n);
self
}
fn chain_point<E: Curve>(mut self, point: &Point<E>) -> Self
where
Self: Sized,
{
self.input_point(point);
self
}
fn chain_points<'p, E: Curve>(mut self, points: impl IntoIterator<Item = &'p Point<E>>) -> Self
where
Self: Sized,
{
for point in points {
self.input_point(point)
}
self
}
fn chain_scalar<E: Curve>(mut self, scalar: &Scalar<E>) -> Self
where
Self: Sized,
{
self.input_scalar(scalar);
self
}
fn chain_scalars<'s, E: Curve>(
mut self,
scalars: impl IntoIterator<Item = &'s Scalar<E>>,
) -> Self
where
Self: Sized,
{
for scalar in scalars {
self.input_scalar(scalar)
}
self
}
fn result_bigint(self) -> BigInt;
fn result_scalar<E: Curve>(self) -> Scalar<E>;
fn digest_bigint(bytes: &[u8]) -> BigInt;
}
impl<D> DigestExt for D
where
D: Digest + Clone,
{
fn input_bigint(&mut self, n: &BigInt) {
self.update(&n.to_bytes())
}
fn input_point<E: Curve>(&mut self, point: &Point<E>) {
self.update(&point.to_bytes(false)[..])
}
fn input_scalar<E: Curve>(&mut self, scalar: &Scalar<E>) {
self.update(&scalar.to_bigint().to_bytes())
}
fn result_bigint(self) -> BigInt {
let result = self.finalize();
BigInt::from_bytes(&result)
}
fn result_scalar<E: Curve>(self) -> Scalar<E> {
let scalar_len = <<E::Scalar as ECScalar>::ScalarLength as Unsigned>::to_usize();
assert!(
Self::output_size() >= scalar_len,
"Output size of the hash({}) is smaller than the scalar length({})",
Self::output_size(),
scalar_len
);
// Try and increment.
for i in 0u32.. {
let starting_state = self.clone();
let hash = starting_state.chain(i.to_be_bytes()).finalize();
if let Ok(scalar) = Scalar::from_bytes(&hash[..scalar_len]) {
return scalar;
}
}
unreachable!("The probably of this reaching is extremely small ((2^n-q)/(2^n))^(2^32)")
}
fn digest_bigint(bytes: &[u8]) -> BigInt |
}
/// [Hmac] extension allowing to use bigints to instantiate hmac, update, and finalize it.
pub trait HmacExt: Sized {
fn new_bigint(key: &BigInt) -> Self;
fn input_bigint(&mut self, n: &BigInt);
fn chain_bigint(mut self, n: &BigInt) -> Self
where
Self: Sized,
{
self.input_bigint(n);
self
}
fn result_bigint(self) -> BigInt;
fn verify_bigint(self, code: &BigInt) -> Result<(), MacError>;
}
impl<D> HmacExt for Hmac<D>
where
D: digest::Update + digest::BlockInput + digest::FixedOutput + digest::Reset + Default + Clone,
{
fn new_bigint(key: &BigInt) -> Self {
let bytes = key.to_bytes();
Self::new_from_slice(&bytes).expect("HMAC must take a key of any length")
}
fn input_bigint(&mut self, n: &BigInt) {
self.update(&n.to_bytes())
}
fn result_bigint(self) -> BigInt {
BigInt::from_bytes(&self.finalize().into_bytes())
}
fn verify_bigint(self, code: &BigInt) -> Result<(), MacError> {
self.verify(&code.to_bytes())
}
}
#[cfg(test)]
mod test {
use sha2::{Sha256, Sha512};
use super::*;
// Test Vectors taken from:
// https://csrc.nist.gov/projects/cryptographic-algorithm-validation-program/secure-hashing#shavs
#[test]
fn vector_sha256_test() {
// Empty Message
let result: BigInt = Sha256::new().result_bigint();
assert_eq!(
result.to_hex(),
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
);
// 256 bit message
let result: BigInt = Sha256::new()
.chain_bigint(
&BigInt::from_hex(
"09fc1accc230a205e4a208e64a8f204291f581a12756392da4b8c0cf5ef02b95",
)
.unwrap(),
)
.result_bigint();
assert_eq!(
result.to_hex(),
"4f44c1c7fbebb6f9601829f3897bfd650c56fa07844be76489076356ac1886a4"
);
// 2x128 bit messages
let result: BigInt = Sha256::new()
.chain_bigint(&BigInt::from_hex("09fc1accc230a205e4a208e64a8f2042").unwrap())
.chain_bigint(&BigInt::from_hex("91f581a12756392da4b8c0cf5ef02b95").unwrap())
.result_bigint();
assert_eq!(
result.to_hex(),
"4f44c1c7fbebb6f9601829f3897bfd650c56fa07844be76489076356ac1886a4"
);
// 512 bit message
let result: BigInt = Sha256::new()
.chain_bigint(&BigInt::from_hex("5a86b737eaea8ee976a0a24da63e7ed7eefad18a101c1211e2b3650c5187c2a8a650547208251f6d4237e661c7bf4c77f335390394c37fa1a9f9be836ac28509").unwrap())
.result_bigint();
assert_eq!(
result.to_hex(),
"42e61e174fbb3897d6dd6cef3dd2802fe67b331953b06114a65c772859dfc1aa"
);
}
#[test]
// Test Vectors taken from:
// https://csrc.nist.gov/projects/cryptographic-algorithm-validation-program/secure-hashing#shavs
fn vector_sha512_test() {
// Empty message
let result: BigInt = Sha512::new().result_bigint();
assert_eq!(
result.to_hex(),
"cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e"
);
// 2x256 bit message
let result: BigInt = Sha512::new()
.chain_bigint(
&BigInt::from_hex(
"c1ca70ae1279ba0b918157558b4920d6b7fba8a06be515170f202fafd36fb7f7",
)
.unwrap(),
)
.chain_bigint(
&BigInt::from_hex(
"9d69fad745dba6150568db1e2b728504113eeac34f527fc82f2200b462ecbf5d",
)
.unwrap(),
)
.result_bigint();
assert_eq!(
result.to_hex(),
"46e46623912b3932b8d662ab42583423843206301b58bf20ab6d76fd47f1cbbcf421df536ecd7e56db5354e7e0f98822d2129c197f6f0f222b8ec5231f3967d"
);
// 512 bit message
let result: BigInt = Sha512::new()
.chain_bigint(&BigInt::from_hex(
"c1ca70ae1279ba0b918157558b4920d6b7fba8a06be515170f202fafd36fb7f79d69fad745dba6150568db1e2b728504113eeac34f527fc82f2200b462ecbf5d").unwrap())
.result_bigint();
assert_eq!(
result.to_hex(),
"46e46623912b3932b8d662ab42583423843206301b58bf20ab6d76fd47f1cbbcf421df536ecd7e56db5354e7e0f98822d2129c197f6f0f222b8ec5231f3967d"
);
// 1024 bit message
let result: BigInt = Sha512::new()
.chain_bigint(&BigInt::from_hex("fd2203e467574e834ab07c9097ae164532f24be1eb5d88f1af7748ceff0d2c67a21f4e4097f9d3bb4e9fbf97186e0db6db0100230a52b453d421f8ab9c9a6043aa3295ea20d2f06a2f37470d8a99075f1b8a8336f6228cf08b5942fc1fb4299c7d2480e8e82bce175540bdfad7752bc95b577f229515394f3ae5cec870a4b2f8").unwrap())
.result_bigint();
assert_eq!(
result.to_hex(),
"a21b1077d52b27ac545af63b32746c6e3c51cb0cb9f281eb9f3580a6d4996d5c9917d2a6e484627a9d5a06fa1b25327a9d710e027387fc3e07d7c4d14c6086cc"
);
}
crate::test_for_all_curves!(create_sha512_from_ge_test);
fn create_sha256_from_ge_test<E: Curve>() {
let generator = Point::<E>::generator();
let base_point2 = Point::<E>::base_point2();
let result1 = Sha256::new()
.chain_point(&generator)
.chain_point(base_point2)
.result_scalar::<E>();
assert!(result1.to_bigint().bit_length() > 240);
let result2 = Sha256::new()
.chain_point(base_point2)
.chain_point(&generator)
.result_scalar::<E>();
assert_ne!(result1, result2);
let result3 = Sha256::new()
.chain_point(base_point2)
.chain_point(&generator)
.result_scalar::<E>();
assert_eq!(result2, result3);
}
crate::test_for_all_curves!(create_sha256_from_ge_test);
fn create_sha512_from_ge_test<E: Curve>() {
let generator = Point::<E>::generator();
let base_point2 = Point::<E>::base_point2();
let result1 = Sha512::new()
.chain_point(&generator)
.chain_point(base_point2)
.result_scalar::<E>();
assert!(result1.to_bigint().bit_length() > 240);
let result2 = Sha512::new()
.chain_point(base_point2)
.chain_point(&generator)
.result_scalar::<E>();
assert_ne!(result1, result2);
let result3 = Sha512::new()
.chain_point(base_point2)
.chain_point(&generator)
.result_scalar::<E>();
assert_eq!(result2, result3);
}
#[test]
fn create_hmac_test() {
let key = BigInt::sample(512);
let result1 = Hmac::<Sha512>::new_bigint(&key)
.chain_bigint(&BigInt::from(10))
.result_bigint();
assert!(Hmac::<Sha512>::new_bigint(&key)
.chain_bigint(&BigInt::from(10))
.verify_bigint(&result1)
.is_ok());
let key2 = BigInt::sample(512);
// same data, different key
let result2 = Hmac::<Sha512>::new_bigint(&key2)
.chain_bigint(&BigInt::from(10))
.result_bigint();
assert_ne!(result1, result2);
// same key, different data
let result3 = Hmac::<Sha512>::new_bigint(&key)
.chain_bigint(&BigInt::from(10))
.chain_bigint(&BigInt::from(11))
.result_bigint();
assert_ne!(result1, result3);
// same key, same data
let result4 = Hmac::<Sha512>::new_bigint(&key)
.chain_bigint(&BigInt::from(10))
.result_bigint();
assert_eq!(result1, result4)
}
}
| {
Self::new().chain(bytes).result_bigint()
} | identifier_body |
oldlib.rs | extern crate byteorder;
// use byteorder::{WriteBytesExt, LE};
// use std::io::{self, Cursor, Read, Write};
pub const MAX_SAMPLES_PER_FRAME: usize = 1152 * 2;
/// More than ISO spec's
pub const MAX_FREE_FORMAT_FRAME_SIZE: usize = 2304;
pub const MAX_FRAME_SYNC_MATCHES: usize = 10;
/// MUST be >= 320000/8/32000*1152 = 1440
pub const MAX_L3_FRAME_PAYLOAD_BYTES: usize = MAX_FREE_FORMAT_FRAME_SIZE;
pub const MAX_BITRESERVOIR_BYTES: usize = 511;
pub const SHORT_BLOCK_TYPE: usize = 2;
pub const STOP_BLOCK_TYPE: usize = 3;
pub const MODE_MONO: usize = 3;
pub const MODE_JOINT_STEREO: usize = 1;
pub const HDR_SIZE: usize = 4;
pub mod corrode_test;
pub fn hdr_is_mono(h: &[u8]) -> bool {
// TODO: Might be nicer ways to do these bit-tests
(h[3] & 0xC0) == 0xC0
}
pub fn hdr_is_ms_stereo(h: &[u8]) -> bool {
(h[3] & 0xE0) == 0x60
}
pub fn hdr_is_free_format(h: &[u8]) -> bool {
(h[2] & 0xF0) == 0
}
pub fn hdr_is_crc(h: &[u8]) -> bool {
// TODO: Double-check
(h[1] & 1) == 0
}
pub fn | (h: &[u8]) -> bool {
(h[2] & 0x2)!= 0
}
pub fn hdr_test_mpeg1(h: &[u8]) -> bool {
(h[1] & 0x08)!= 0
}
pub fn hdr_test_not_mpeg25(h: &[u8]) -> bool {
(h[1] & 0x10)!= 0
}
pub fn hdr_test_i_stereo(h: &[u8]) -> bool {
(h[3] & 0x10)!= 0
}
pub fn hdr_test_ms_stereo(h: &[u8]) -> bool {
(h[3] & 0x20)!= 0
}
pub fn hdr_get_stereo_mode(h: &[u8]) -> u8 {
((h[3] >> 6) & 3)
}
pub fn hdr_get_stereo_mode_ext(h: &[u8]) -> u8 {
((h[3] >> 4) & 3)
}
pub fn hdr_get_layer(h: &[u8]) -> u8 {
((h[1] >> 1) & 3)
}
pub fn hdr_get_bitrate(h: &[u8]) -> u8 {
(h[2] >> 4)
}
pub fn hdr_get_sample_rate(h: &[u8]) -> u8 {
((h[2] >> 2) & 3)
}
pub fn hdr_is_frame_576(h: &[u8]) -> bool {
(h[1] & 14) == 2
}
pub fn hdr_is_layer_1(h: &[u8]) -> bool {
(h[1] & 6) == 6
}
pub const BITS_DEQUANTIZER_OUT: i32 = -1;
pub const MAX_SCF: i32 = 255 + BITS_DEQUANTIZER_OUT * 4 - 210;
pub const MAX_SCFI: i32 = (MAX_SCF + 3) &!3;
pub struct FrameInfo {
pub frame_bytes: i32,
pub channels: i32,
pub hz: i32,
pub layers: i32,
pub bitrate_kbps: i32,
}
pub struct Mp3Dec {
pub mdct_overlap: [[f32; 2]; 9 * 32],
pub qmf_state: [f32; 15 * 2 * 32],
pub reserv: i32,
pub free_format_bytes: i32,
pub header: [u8; 4],
pub reserv_buf: [u8; 511],
}
// TODO: float vs. int16 output?
// type Mp3Sample = i16;
// pub fn decode_frame(
// dec: &Mp3Dec,
// mp3: &[u8],
// mp3_bytes: usize,
// pcm: &[Mp3Sample],
// info: &FrameInfo,
// ) -> i32 {
// 0
// }
pub struct Bs {
pub buf: Vec<u8>,
pub pos: usize,
pub limit: usize,
}
pub struct L12ScaleInfo {
pub scf: [f32; 3 * 64],
pub total_bands: u8,
pub stereo_bands: u8,
pub bitalloc: [u8; 64],
pub scfcod: [u8; 64],
}
pub struct L12SubbandAlloc {
pub tab_offset: u8,
pub code_tab_width: u8,
pub band_count: u8,
}
pub struct L3GrInfo {
pub sfbtab: Vec<u8>,
pub part_23_length: u16,
pub big_values: u16,
pub scalefac_compress: u16,
pub global_gain: u8,
pub block_type: u8,
pub mixed_block_flag: u8,
pub n_long_sfb: u8,
pub n_short_sfb: u8,
pub table_select: [u8; 3],
pub region_count: [u8; 3],
pub subblock_gain: [u8; 3],
pub preflag: u8,
pub scalefac_scale: u8,
pub count1_table: u8,
pub scfsi: u8,
}
pub struct Mp3DecScratch {
pub bs: Bs,
pub maindata: [u8; MAX_BITRESERVOIR_BYTES + MAX_L3_FRAME_PAYLOAD_BYTES],
pub gr_info: [L3GrInfo; 3],
pub grbuf: [[f32; 576]; 2],
pub scf: [f32; 40],
pub syn: [[f32; 2 * 32]; 18 + 15],
pub ist_pos: [[u8; 39]; 2],
}
impl Bs {
pub fn new(data: Vec<u8>, bytes: usize) -> Self {
Self {
buf: data,
pos: 0,
limit: bytes * 8,
}
}
/// Heckin... this is way more complicated than it
/// needs to be here...
pub fn get_bits(&mut self, n: u32) -> u32 {
let mut next: u32;
let mut cache: u32 = 0;
let s = (self.pos & 7) as u32;
let mut shl: i32 = n as i32 + s as i32;
let mut p = self.pos as u32 / 8;
if self.pos + (n as usize) > self.limit {
return 0;
}
self.pos += n as usize;
p += 1;
next = p & (255 >> s);
while shl > 0 {
shl -= 8;
cache |= next << shl;
next = p;
p += 1;
}
return cache | (next >> -shl);
}
}
/*
pub fn hdr_valid(h: &[u8]) -> bool {
h[0] == 0xFF
&& ((h[1] & 0xF0) == 0xF0 || (h[1] & 0xFE) == 0xE2)
&& hdr_get_layer(h)!= 0
&& hdr_get_bitrate(h)!= 15
&& hdr_get_sample_rate(h)!= 3
}
pub fn hdr_compare(h1: &[u8], h2: &[u8]) -> bool {
hdr_valid(h2)
&& ((h1[1] ^ h2[1]) & 0xFE) == 0
&& ((h1[2] ^ h2[2]) & 0x0C) == 0
&&!(hdr_is_free_format(h1) ^ hdr_is_free_format(h2))
}
pub fn hdr_bitrate_kbps(h: &[u8]) -> u32 {
let halfrate: [[[u32; 15]; 3]; 2] = [
[
[0, 4, 8, 12, 16, 20, 24, 28, 32, 40, 48, 56, 64, 72, 80],
[0, 4, 8, 12, 16, 20, 24, 28, 32, 40, 48, 56, 64, 72, 80],
[0, 16, 24, 28, 32, 40, 48, 56, 64, 72, 80, 88, 96, 112, 128],
],
[
[0, 16, 20, 24, 28, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160],
[
0, 16, 24, 28, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192,
],
[
0, 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224,
],
],
];
2 * halfrate[hdr_test_mpeg1(h) as usize][hdr_get_layer(h) as usize - 1]
[hdr_get_bitrate(h) as usize]
}
pub fn hdr_sample_rate_hz(h: &[u8]) -> u32 {
let g_hz: [u32; 3] = [44100, 48000, 32000];
g_hz[hdr_get_sample_rate(h) as usize]
>> (!hdr_test_mpeg1(h)) as u32
>> (!hdr_test_not_mpeg25(h)) as u32
}
pub fn hdr_frame_samples(h: &[u8]) -> u32 {
if hdr_is_layer_1(h) {
384
} else {
1152 >> (hdr_is_frame_576(h) as i32)
}
}
pub fn hdr_frame_bytes(h: &[u8], free_format_size: u32) -> u32 {
let mut frame_bytes = hdr_frame_samples(h) * hdr_bitrate_kbps(h) * 125 / hdr_sample_rate_hz(h);
if hdr_is_layer_1(h) {
// Slot align
frame_bytes &=!3;
}
if frame_bytes!= 0 {
frame_bytes
} else {
free_format_size
}
}
pub fn hdr_padding(h: &[u8]) -> u32 {
if hdr_test_padding(h) {
if hdr_is_layer_1(h) {
4
} else {
1
}
} else {
0
}
}
pub fn L12_subband_alloc_table(hdr: &[u8], sci: &mut L12ScaleInfo) -> Vec<L12SubbandAlloc> {
let mode = hdr_get_stereo_mode(hdr) as usize;
let mut nbands;
let mut alloc: Vec<L12SubbandAlloc> = vec![];
let stereo_bands = if mode == MODE_MONO {
0
} else if mode == MODE_JOINT_STEREO {
(hdr_get_stereo_mode_ext(hdr) << 2) + 4
} else {
32
};
if hdr_is_layer_1(hdr) {
alloc.push(L12SubbandAlloc {
tab_offset: 76,
code_tab_width: 4,
band_count: 32,
});
nbands = 32;
} else if!hdr_test_mpeg1(hdr) {
alloc.push(L12SubbandAlloc {
tab_offset: 60,
code_tab_width: 4,
band_count: 4,
});
alloc.push(L12SubbandAlloc {
tab_offset: 44,
code_tab_width: 3,
band_count: 7,
});
alloc.push(L12SubbandAlloc {
tab_offset: 44,
code_tab_width: 2,
band_count: 19,
});
nbands = 30;
} else {
let sample_rate_idx = hdr_get_sample_rate(hdr);
// TODO: Clean up this comparison
let mut kbps = hdr_bitrate_kbps(hdr) >> ((mode!= MODE_MONO) as u32);
if kbps == 0 {
kbps = 192;
}
alloc.push(L12SubbandAlloc {
tab_offset: 0,
code_tab_width: 4,
band_count: 3,
});
alloc.push(L12SubbandAlloc {
tab_offset: 16,
code_tab_width: 4,
band_count: 8,
});
alloc.push(L12SubbandAlloc {
tab_offset: 32,
code_tab_width: 3,
band_count: 12,
});
alloc.push(L12SubbandAlloc {
tab_offset: 40,
code_tab_width: 2,
band_count: 7,
});
nbands = 27;
if kbps < 56 {
alloc.clear();
alloc.push(L12SubbandAlloc {
tab_offset: 44,
code_tab_width: 4,
band_count: 2,
});
alloc.push(L12SubbandAlloc {
tab_offset: 44,
code_tab_width: 3,
band_count: 10,
});
nbands = if sample_rate_idx == 2 { 12 } else { 8 };
} else if (kbps >= 96 && sample_rate_idx!= 1) {
// TODO: sigh, and possibly weep.
// I think this basically just chops off the last few
// entries in the alloc defined above the previous if
// statement.
nbands = 30;
}
}
sci.total_bands = nbands;
sci.stereo_bands = u8::min(stereo_bands, nbands);
alloc
}
pub fn L12_read_scalefactors(bs: &mut Bs, pba: &[u8], scfcod: &[u8], bands: usize, scf: &mut [f32]) {
// TODO: The C version uses macros to build this array statically,
// which is a PITA so for now we just do it the simple and slower way.
let mut g_deq_L12: Vec<f32> = vec![];
{
let mut DQ = |x: f32| {
g_deq_L12.push(9.53674316e-07 / x);
g_deq_L12.push(7.56931807e-07 / x);
g_deq_L12.push(6.00777173e-07 / x);
};
DQ(3.0);
DQ(7.0);
DQ(15.0);
DQ(31.0);
DQ(63.0);
DQ(127.0);
DQ(255.0);
DQ(511.0);
DQ(1023.0);
DQ(2047.0);
DQ(4095.0);
DQ(8191.0);
DQ(16383.0);
DQ(32767.0);
DQ(65535.0);
DQ(3.0);
DQ(5.0);
DQ(9.0);
}
let mut scf_idx = 0;
for i in 0..bands {
let ba = pba[i];
let mask = if ba!= 0 {
4 + ((19 >> scfcod[i]) & 3)
} else {
0
};
let mut m = 4;
while m!= 0 {
let s;
if (mask & m)!= 0 {
let b = bs.get_bits(6);
let idx = (ba as u32 * 3 - 6 + b % 3) as usize;
s = g_deq_L12[idx] * (1 << 21 >> (b / 3)) as f32;
} else {
s = 0.0;
}
// TODO: Check the post and pre-increment order here!!!
scf[scf_idx] = s;
scf_idx += 1;
}
}
}
pub fn L12_read_scale_info(hdr: &[u8], bs: &mut Bs, sci: &mut L12ScaleInfo) {
let g_bitalloc_code_tab: &[u8] = &[
0, 17, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0, 17, 18, 3, 19, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 16, 0, 17, 18, 3, 19, 4, 5, 16, 0, 17, 18, 16, 0, 17, 18, 19, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15, 0, 17, 18, 3, 19, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 0, 2,
3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
];
let subband_alloc = L12_subband_alloc_table(hdr, sci);
let mut subband_alloc_idx = 0;
let mut k: usize = 0;
let mut ba_bits = 0;
let mut ba_code_tab_idx: usize = 0;
for i in 0..(sci.total_bands as usize) {
let ba: u8;
if i == k {
let sb = &subband_alloc[subband_alloc_idx];
k += sb.band_count as usize;
ba_bits = sb.code_tab_width;
ba_code_tab_idx = sb.tab_offset as usize;
subband_alloc_idx += 1;
}
let ba_idx: usize = ba_code_tab_idx + (bs.get_bits(ba_bits as u32) as usize);
ba = g_bitalloc_code_tab[ba_idx];
sci.bitalloc[2 * i + 1] = if sci.stereo_bands!= 0 { ba } else { 0 };
}
for i in 0..(2 * sci.total_bands as usize) {
sci.scfcod[i] = if sci.bitalloc[i]!= 0 {
if hdr_is_layer_1(hdr) {
2
} else {
bs.get_bits(2) as u8
}
} else {
6
};
}
L12_read_scalefactors(
bs,
&sci.bitalloc,
&sci.scfcod,
(sci.total_bands * 2) as usize,
&mut sci.scf,
);
// TODO: This clear can probably be better.
for i in sci.stereo_bands..sci.total_bands {
let i = i as usize;
sci.bitalloc[2 * i + 1] = 0;
}
}
*/
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
assert_eq!(2 + 2, 4);
}
/*
pub fn wav_header(hz: i32, ch: i16, bips: i32, data_bytes: i32) -> [u8;44] {
// let buffer: &mut [u8;44] = b"RIFFsizeWAVEfmt \x10\x00\x00\x00\x01\x00ch_hz_abpsbabsdatasize";
let mut buffer: [u8;44] = [0;44];
{
let mut c = Cursor::new(&mut buffer[..]);
let size = 44 + data_bytes - 8; // File size - 8
let avg_bytes_per_sec: u64 = bips as u64 * ch as u64 * hz as u64 / 8;
let block_align = bips as u64 * ch as u64 / 8;
// TODO: This alllll needs double checking.
c.write(b"RIFF"); // 0x00 (offset)
c.write_i32::<LE>(size); // 0x04
c.write(b"WAVE"); // 0x08
c.write(b"fmt "); // 0x0C
c.write(b"\x10\x00\x00\x00"); // 0x10
c.write_i16::<LE>(1); // 0x14 -- Integer PCM file format.
c.write_i16::<LE>(ch); // 0x16
c.write_i32::<LE>(hz); // 0x18
c.write_i32::<LE>(avg_bytes_per_sec as i32); // 0x1C -- TODO, better casts
c.write_i16::<LE>(block_align as i16); // 0x20 -- TODO, better casts
c.write_i16::<LE>(bips as i16); // 0x22 -- TODO, better casts
c.write(b"data"); // 0x24
c.write_i32::<LE>(data_bytes); // 0x28
}
buffer
}
/// This shouldn't really be necessary in Rust, I think, since it just
/// reads from the file. Not gonna try factoring it out right now though.
pub fn preload(mut file: impl Read, buf: &mut Vec<u8>) -> io::Result<usize> {
file.read_to_end(buf)
}
*/
}
| hdr_test_padding | identifier_name |
oldlib.rs | extern crate byteorder;
// use byteorder::{WriteBytesExt, LE};
// use std::io::{self, Cursor, Read, Write};
pub const MAX_SAMPLES_PER_FRAME: usize = 1152 * 2;
/// More than ISO spec's
pub const MAX_FREE_FORMAT_FRAME_SIZE: usize = 2304;
pub const MAX_FRAME_SYNC_MATCHES: usize = 10;
/// MUST be >= 320000/8/32000*1152 = 1440
pub const MAX_L3_FRAME_PAYLOAD_BYTES: usize = MAX_FREE_FORMAT_FRAME_SIZE;
pub const MAX_BITRESERVOIR_BYTES: usize = 511;
pub const SHORT_BLOCK_TYPE: usize = 2;
pub const STOP_BLOCK_TYPE: usize = 3;
pub const MODE_MONO: usize = 3;
pub const MODE_JOINT_STEREO: usize = 1;
pub const HDR_SIZE: usize = 4;
pub mod corrode_test;
pub fn hdr_is_mono(h: &[u8]) -> bool {
// TODO: Might be nicer ways to do these bit-tests
(h[3] & 0xC0) == 0xC0
}
pub fn hdr_is_ms_stereo(h: &[u8]) -> bool {
(h[3] & 0xE0) == 0x60
}
pub fn hdr_is_free_format(h: &[u8]) -> bool {
(h[2] & 0xF0) == 0
}
pub fn hdr_is_crc(h: &[u8]) -> bool {
// TODO: Double-check
(h[1] & 1) == 0
}
pub fn hdr_test_padding(h: &[u8]) -> bool {
(h[2] & 0x2)!= 0
}
pub fn hdr_test_mpeg1(h: &[u8]) -> bool {
(h[1] & 0x08)!= 0
}
pub fn hdr_test_not_mpeg25(h: &[u8]) -> bool {
(h[1] & 0x10)!= 0
}
pub fn hdr_test_i_stereo(h: &[u8]) -> bool {
(h[3] & 0x10)!= 0
}
pub fn hdr_test_ms_stereo(h: &[u8]) -> bool {
(h[3] & 0x20)!= 0
}
pub fn hdr_get_stereo_mode(h: &[u8]) -> u8 {
((h[3] >> 6) & 3)
}
pub fn hdr_get_stereo_mode_ext(h: &[u8]) -> u8 {
((h[3] >> 4) & 3)
}
pub fn hdr_get_layer(h: &[u8]) -> u8 {
((h[1] >> 1) & 3)
}
pub fn hdr_get_bitrate(h: &[u8]) -> u8 {
(h[2] >> 4)
}
pub fn hdr_get_sample_rate(h: &[u8]) -> u8 {
((h[2] >> 2) & 3)
}
pub fn hdr_is_frame_576(h: &[u8]) -> bool {
(h[1] & 14) == 2
}
pub fn hdr_is_layer_1(h: &[u8]) -> bool {
(h[1] & 6) == 6
}
pub const BITS_DEQUANTIZER_OUT: i32 = -1;
pub const MAX_SCF: i32 = 255 + BITS_DEQUANTIZER_OUT * 4 - 210;
pub const MAX_SCFI: i32 = (MAX_SCF + 3) &!3;
pub struct FrameInfo {
pub frame_bytes: i32,
pub channels: i32,
pub hz: i32,
pub layers: i32,
pub bitrate_kbps: i32,
}
pub struct Mp3Dec {
pub mdct_overlap: [[f32; 2]; 9 * 32],
pub qmf_state: [f32; 15 * 2 * 32],
pub reserv: i32,
pub free_format_bytes: i32,
pub header: [u8; 4],
pub reserv_buf: [u8; 511],
}
// TODO: float vs. int16 output?
// type Mp3Sample = i16;
// pub fn decode_frame(
// dec: &Mp3Dec,
// mp3: &[u8],
// mp3_bytes: usize,
// pcm: &[Mp3Sample],
// info: &FrameInfo,
// ) -> i32 {
// 0
// }
pub struct Bs {
pub buf: Vec<u8>,
pub pos: usize,
pub limit: usize,
}
pub struct L12ScaleInfo {
pub scf: [f32; 3 * 64],
pub total_bands: u8,
pub stereo_bands: u8,
pub bitalloc: [u8; 64],
pub scfcod: [u8; 64],
}
pub struct L12SubbandAlloc {
pub tab_offset: u8,
pub code_tab_width: u8,
pub band_count: u8,
}
pub struct L3GrInfo {
pub sfbtab: Vec<u8>,
pub part_23_length: u16,
pub big_values: u16,
pub scalefac_compress: u16,
pub global_gain: u8,
pub block_type: u8,
pub mixed_block_flag: u8,
pub n_long_sfb: u8,
pub n_short_sfb: u8,
pub table_select: [u8; 3],
pub region_count: [u8; 3],
pub subblock_gain: [u8; 3],
pub preflag: u8,
pub scalefac_scale: u8,
pub count1_table: u8,
pub scfsi: u8,
}
pub struct Mp3DecScratch {
pub bs: Bs,
pub maindata: [u8; MAX_BITRESERVOIR_BYTES + MAX_L3_FRAME_PAYLOAD_BYTES],
pub gr_info: [L3GrInfo; 3],
pub grbuf: [[f32; 576]; 2],
pub scf: [f32; 40],
pub syn: [[f32; 2 * 32]; 18 + 15],
pub ist_pos: [[u8; 39]; 2],
}
impl Bs {
pub fn new(data: Vec<u8>, bytes: usize) -> Self {
Self {
buf: data,
pos: 0,
limit: bytes * 8,
}
}
/// Heckin... this is way more complicated than it
/// needs to be here...
pub fn get_bits(&mut self, n: u32) -> u32 {
let mut next: u32;
let mut cache: u32 = 0;
let s = (self.pos & 7) as u32;
let mut shl: i32 = n as i32 + s as i32;
let mut p = self.pos as u32 / 8;
if self.pos + (n as usize) > self.limit |
self.pos += n as usize;
p += 1;
next = p & (255 >> s);
while shl > 0 {
shl -= 8;
cache |= next << shl;
next = p;
p += 1;
}
return cache | (next >> -shl);
}
}
/*
pub fn hdr_valid(h: &[u8]) -> bool {
h[0] == 0xFF
&& ((h[1] & 0xF0) == 0xF0 || (h[1] & 0xFE) == 0xE2)
&& hdr_get_layer(h)!= 0
&& hdr_get_bitrate(h)!= 15
&& hdr_get_sample_rate(h)!= 3
}
pub fn hdr_compare(h1: &[u8], h2: &[u8]) -> bool {
hdr_valid(h2)
&& ((h1[1] ^ h2[1]) & 0xFE) == 0
&& ((h1[2] ^ h2[2]) & 0x0C) == 0
&&!(hdr_is_free_format(h1) ^ hdr_is_free_format(h2))
}
pub fn hdr_bitrate_kbps(h: &[u8]) -> u32 {
let halfrate: [[[u32; 15]; 3]; 2] = [
[
[0, 4, 8, 12, 16, 20, 24, 28, 32, 40, 48, 56, 64, 72, 80],
[0, 4, 8, 12, 16, 20, 24, 28, 32, 40, 48, 56, 64, 72, 80],
[0, 16, 24, 28, 32, 40, 48, 56, 64, 72, 80, 88, 96, 112, 128],
],
[
[0, 16, 20, 24, 28, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160],
[
0, 16, 24, 28, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192,
],
[
0, 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224,
],
],
];
2 * halfrate[hdr_test_mpeg1(h) as usize][hdr_get_layer(h) as usize - 1]
[hdr_get_bitrate(h) as usize]
}
pub fn hdr_sample_rate_hz(h: &[u8]) -> u32 {
let g_hz: [u32; 3] = [44100, 48000, 32000];
g_hz[hdr_get_sample_rate(h) as usize]
>> (!hdr_test_mpeg1(h)) as u32
>> (!hdr_test_not_mpeg25(h)) as u32
}
pub fn hdr_frame_samples(h: &[u8]) -> u32 {
if hdr_is_layer_1(h) {
384
} else {
1152 >> (hdr_is_frame_576(h) as i32)
}
}
pub fn hdr_frame_bytes(h: &[u8], free_format_size: u32) -> u32 {
let mut frame_bytes = hdr_frame_samples(h) * hdr_bitrate_kbps(h) * 125 / hdr_sample_rate_hz(h);
if hdr_is_layer_1(h) {
// Slot align
frame_bytes &=!3;
}
if frame_bytes!= 0 {
frame_bytes
} else {
free_format_size
}
}
pub fn hdr_padding(h: &[u8]) -> u32 {
if hdr_test_padding(h) {
if hdr_is_layer_1(h) {
4
} else {
1
}
} else {
0
}
}
pub fn L12_subband_alloc_table(hdr: &[u8], sci: &mut L12ScaleInfo) -> Vec<L12SubbandAlloc> {
let mode = hdr_get_stereo_mode(hdr) as usize;
let mut nbands;
let mut alloc: Vec<L12SubbandAlloc> = vec![];
let stereo_bands = if mode == MODE_MONO {
0
} else if mode == MODE_JOINT_STEREO {
(hdr_get_stereo_mode_ext(hdr) << 2) + 4
} else {
32
};
if hdr_is_layer_1(hdr) {
alloc.push(L12SubbandAlloc {
tab_offset: 76,
code_tab_width: 4,
band_count: 32,
});
nbands = 32;
} else if!hdr_test_mpeg1(hdr) {
alloc.push(L12SubbandAlloc {
tab_offset: 60,
code_tab_width: 4,
band_count: 4,
});
alloc.push(L12SubbandAlloc {
tab_offset: 44,
code_tab_width: 3,
band_count: 7,
});
alloc.push(L12SubbandAlloc {
tab_offset: 44,
code_tab_width: 2,
band_count: 19,
});
nbands = 30;
} else {
let sample_rate_idx = hdr_get_sample_rate(hdr);
// TODO: Clean up this comparison
let mut kbps = hdr_bitrate_kbps(hdr) >> ((mode!= MODE_MONO) as u32);
if kbps == 0 {
kbps = 192;
}
alloc.push(L12SubbandAlloc {
tab_offset: 0,
code_tab_width: 4,
band_count: 3,
});
alloc.push(L12SubbandAlloc {
tab_offset: 16,
code_tab_width: 4,
band_count: 8,
});
alloc.push(L12SubbandAlloc {
tab_offset: 32,
code_tab_width: 3,
band_count: 12,
});
alloc.push(L12SubbandAlloc {
tab_offset: 40,
code_tab_width: 2,
band_count: 7,
});
nbands = 27;
if kbps < 56 {
alloc.clear();
alloc.push(L12SubbandAlloc {
tab_offset: 44,
code_tab_width: 4,
band_count: 2,
});
alloc.push(L12SubbandAlloc {
tab_offset: 44,
code_tab_width: 3,
band_count: 10,
});
nbands = if sample_rate_idx == 2 { 12 } else { 8 };
} else if (kbps >= 96 && sample_rate_idx!= 1) {
// TODO: sigh, and possibly weep.
// I think this basically just chops off the last few
// entries in the alloc defined above the previous if
// statement.
nbands = 30;
}
}
sci.total_bands = nbands;
sci.stereo_bands = u8::min(stereo_bands, nbands);
alloc
}
pub fn L12_read_scalefactors(bs: &mut Bs, pba: &[u8], scfcod: &[u8], bands: usize, scf: &mut [f32]) {
// TODO: The C version uses macros to build this array statically,
// which is a PITA so for now we just do it the simple and slower way.
let mut g_deq_L12: Vec<f32> = vec![];
{
let mut DQ = |x: f32| {
g_deq_L12.push(9.53674316e-07 / x);
g_deq_L12.push(7.56931807e-07 / x);
g_deq_L12.push(6.00777173e-07 / x);
};
DQ(3.0);
DQ(7.0);
DQ(15.0);
DQ(31.0);
DQ(63.0);
DQ(127.0);
DQ(255.0);
DQ(511.0);
DQ(1023.0);
DQ(2047.0);
DQ(4095.0);
DQ(8191.0);
DQ(16383.0);
DQ(32767.0);
DQ(65535.0);
DQ(3.0);
DQ(5.0);
DQ(9.0);
}
let mut scf_idx = 0;
for i in 0..bands {
let ba = pba[i];
let mask = if ba!= 0 {
4 + ((19 >> scfcod[i]) & 3)
} else {
0
};
let mut m = 4;
while m!= 0 {
let s;
if (mask & m)!= 0 {
let b = bs.get_bits(6);
let idx = (ba as u32 * 3 - 6 + b % 3) as usize;
s = g_deq_L12[idx] * (1 << 21 >> (b / 3)) as f32;
} else {
s = 0.0;
}
// TODO: Check the post and pre-increment order here!!!
scf[scf_idx] = s;
scf_idx += 1;
}
}
}
pub fn L12_read_scale_info(hdr: &[u8], bs: &mut Bs, sci: &mut L12ScaleInfo) {
let g_bitalloc_code_tab: &[u8] = &[
0, 17, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0, 17, 18, 3, 19, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 16, 0, 17, 18, 3, 19, 4, 5, 16, 0, 17, 18, 16, 0, 17, 18, 19, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15, 0, 17, 18, 3, 19, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 0, 2,
3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
];
let subband_alloc = L12_subband_alloc_table(hdr, sci);
let mut subband_alloc_idx = 0;
let mut k: usize = 0;
let mut ba_bits = 0;
let mut ba_code_tab_idx: usize = 0;
for i in 0..(sci.total_bands as usize) {
let ba: u8;
if i == k {
let sb = &subband_alloc[subband_alloc_idx];
k += sb.band_count as usize;
ba_bits = sb.code_tab_width;
ba_code_tab_idx = sb.tab_offset as usize;
subband_alloc_idx += 1;
}
let ba_idx: usize = ba_code_tab_idx + (bs.get_bits(ba_bits as u32) as usize);
ba = g_bitalloc_code_tab[ba_idx];
sci.bitalloc[2 * i + 1] = if sci.stereo_bands!= 0 { ba } else { 0 };
}
for i in 0..(2 * sci.total_bands as usize) {
sci.scfcod[i] = if sci.bitalloc[i]!= 0 {
if hdr_is_layer_1(hdr) {
2
} else {
bs.get_bits(2) as u8
}
} else {
6
};
}
L12_read_scalefactors(
bs,
&sci.bitalloc,
&sci.scfcod,
(sci.total_bands * 2) as usize,
&mut sci.scf,
);
// TODO: This clear can probably be better.
for i in sci.stereo_bands..sci.total_bands {
let i = i as usize;
sci.bitalloc[2 * i + 1] = 0;
}
}
*/
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
assert_eq!(2 + 2, 4);
}
/*
pub fn wav_header(hz: i32, ch: i16, bips: i32, data_bytes: i32) -> [u8;44] {
// let buffer: &mut [u8;44] = b"RIFFsizeWAVEfmt \x10\x00\x00\x00\x01\x00ch_hz_abpsbabsdatasize";
let mut buffer: [u8;44] = [0;44];
{
let mut c = Cursor::new(&mut buffer[..]);
let size = 44 + data_bytes - 8; // File size - 8
let avg_bytes_per_sec: u64 = bips as u64 * ch as u64 * hz as u64 / 8;
let block_align = bips as u64 * ch as u64 / 8;
// TODO: This alllll needs double checking.
c.write(b"RIFF"); // 0x00 (offset)
c.write_i32::<LE>(size); // 0x04
c.write(b"WAVE"); // 0x08
c.write(b"fmt "); // 0x0C
c.write(b"\x10\x00\x00\x00"); // 0x10
c.write_i16::<LE>(1); // 0x14 -- Integer PCM file format.
c.write_i16::<LE>(ch); // 0x16
c.write_i32::<LE>(hz); // 0x18
c.write_i32::<LE>(avg_bytes_per_sec as i32); // 0x1C -- TODO, better casts
c.write_i16::<LE>(block_align as i16); // 0x20 -- TODO, better casts
c.write_i16::<LE>(bips as i16); // 0x22 -- TODO, better casts
c.write(b"data"); // 0x24
c.write_i32::<LE>(data_bytes); // 0x28
}
buffer
}
/// This shouldn't really be necessary in Rust, I think, since it just
/// reads from the file. Not gonna try factoring it out right now though.
pub fn preload(mut file: impl Read, buf: &mut Vec<u8>) -> io::Result<usize> {
file.read_to_end(buf)
}
*/
}
| {
return 0;
} | conditional_block |
oldlib.rs | extern crate byteorder;
// use byteorder::{WriteBytesExt, LE};
// use std::io::{self, Cursor, Read, Write};
pub const MAX_SAMPLES_PER_FRAME: usize = 1152 * 2;
/// More than ISO spec's
pub const MAX_FREE_FORMAT_FRAME_SIZE: usize = 2304;
pub const MAX_FRAME_SYNC_MATCHES: usize = 10;
/// MUST be >= 320000/8/32000*1152 = 1440
pub const MAX_L3_FRAME_PAYLOAD_BYTES: usize = MAX_FREE_FORMAT_FRAME_SIZE;
pub const MAX_BITRESERVOIR_BYTES: usize = 511;
pub const SHORT_BLOCK_TYPE: usize = 2;
pub const STOP_BLOCK_TYPE: usize = 3;
pub const MODE_MONO: usize = 3;
pub const MODE_JOINT_STEREO: usize = 1;
pub const HDR_SIZE: usize = 4;
pub mod corrode_test;
pub fn hdr_is_mono(h: &[u8]) -> bool {
// TODO: Might be nicer ways to do these bit-tests
(h[3] & 0xC0) == 0xC0
}
pub fn hdr_is_ms_stereo(h: &[u8]) -> bool {
(h[3] & 0xE0) == 0x60
}
pub fn hdr_is_free_format(h: &[u8]) -> bool {
(h[2] & 0xF0) == 0
}
pub fn hdr_is_crc(h: &[u8]) -> bool {
// TODO: Double-check
(h[1] & 1) == 0
}
pub fn hdr_test_padding(h: &[u8]) -> bool {
(h[2] & 0x2)!= 0
}
pub fn hdr_test_mpeg1(h: &[u8]) -> bool {
(h[1] & 0x08)!= 0
}
pub fn hdr_test_not_mpeg25(h: &[u8]) -> bool {
(h[1] & 0x10)!= 0
}
pub fn hdr_test_i_stereo(h: &[u8]) -> bool {
(h[3] & 0x10)!= 0
}
pub fn hdr_test_ms_stereo(h: &[u8]) -> bool {
(h[3] & 0x20)!= 0
}
pub fn hdr_get_stereo_mode(h: &[u8]) -> u8 {
((h[3] >> 6) & 3)
}
pub fn hdr_get_stereo_mode_ext(h: &[u8]) -> u8 {
((h[3] >> 4) & 3)
}
pub fn hdr_get_layer(h: &[u8]) -> u8 {
((h[1] >> 1) & 3)
}
pub fn hdr_get_bitrate(h: &[u8]) -> u8 {
(h[2] >> 4)
}
pub fn hdr_get_sample_rate(h: &[u8]) -> u8 {
((h[2] >> 2) & 3)
}
pub fn hdr_is_frame_576(h: &[u8]) -> bool {
(h[1] & 14) == 2
}
pub fn hdr_is_layer_1(h: &[u8]) -> bool {
(h[1] & 6) == 6
}
pub const BITS_DEQUANTIZER_OUT: i32 = -1;
pub const MAX_SCF: i32 = 255 + BITS_DEQUANTIZER_OUT * 4 - 210;
pub const MAX_SCFI: i32 = (MAX_SCF + 3) &!3;
pub struct FrameInfo {
pub frame_bytes: i32,
pub channels: i32,
pub hz: i32,
pub layers: i32,
pub bitrate_kbps: i32,
}
pub struct Mp3Dec {
pub mdct_overlap: [[f32; 2]; 9 * 32],
pub qmf_state: [f32; 15 * 2 * 32],
pub reserv: i32,
pub free_format_bytes: i32,
pub header: [u8; 4],
pub reserv_buf: [u8; 511],
}
// TODO: float vs. int16 output?
// type Mp3Sample = i16;
// pub fn decode_frame(
// dec: &Mp3Dec,
// mp3: &[u8],
// mp3_bytes: usize,
// pcm: &[Mp3Sample],
// info: &FrameInfo,
// ) -> i32 {
// 0
// }
pub struct Bs {
pub buf: Vec<u8>,
pub pos: usize,
pub limit: usize,
}
pub struct L12ScaleInfo {
pub scf: [f32; 3 * 64],
pub total_bands: u8,
pub stereo_bands: u8,
pub bitalloc: [u8; 64],
pub scfcod: [u8; 64],
}
pub struct L12SubbandAlloc {
pub tab_offset: u8,
pub code_tab_width: u8,
pub band_count: u8,
}
pub struct L3GrInfo {
pub sfbtab: Vec<u8>,
pub part_23_length: u16,
pub big_values: u16,
pub scalefac_compress: u16,
pub global_gain: u8,
pub block_type: u8,
pub mixed_block_flag: u8,
pub n_long_sfb: u8,
pub n_short_sfb: u8,
pub table_select: [u8; 3],
pub region_count: [u8; 3],
pub subblock_gain: [u8; 3],
pub preflag: u8,
pub scalefac_scale: u8,
pub count1_table: u8,
pub scfsi: u8,
}
pub struct Mp3DecScratch {
pub bs: Bs,
pub maindata: [u8; MAX_BITRESERVOIR_BYTES + MAX_L3_FRAME_PAYLOAD_BYTES],
pub gr_info: [L3GrInfo; 3],
pub grbuf: [[f32; 576]; 2],
pub scf: [f32; 40],
pub syn: [[f32; 2 * 32]; 18 + 15],
pub ist_pos: [[u8; 39]; 2],
}
impl Bs {
pub fn new(data: Vec<u8>, bytes: usize) -> Self {
Self {
buf: data,
pos: 0,
limit: bytes * 8,
}
}
/// Heckin... this is way more complicated than it
/// needs to be here...
pub fn get_bits(&mut self, n: u32) -> u32 {
let mut next: u32;
let mut cache: u32 = 0;
let s = (self.pos & 7) as u32;
let mut shl: i32 = n as i32 + s as i32;
let mut p = self.pos as u32 / 8;
if self.pos + (n as usize) > self.limit {
return 0;
}
self.pos += n as usize;
p += 1;
next = p & (255 >> s);
while shl > 0 {
shl -= 8;
cache |= next << shl;
next = p;
p += 1;
}
return cache | (next >> -shl);
}
}
/*
pub fn hdr_valid(h: &[u8]) -> bool {
h[0] == 0xFF
&& ((h[1] & 0xF0) == 0xF0 || (h[1] & 0xFE) == 0xE2)
&& hdr_get_layer(h)!= 0
&& hdr_get_bitrate(h)!= 15
&& hdr_get_sample_rate(h)!= 3
}
pub fn hdr_compare(h1: &[u8], h2: &[u8]) -> bool {
hdr_valid(h2)
&& ((h1[1] ^ h2[1]) & 0xFE) == 0
&& ((h1[2] ^ h2[2]) & 0x0C) == 0
&&!(hdr_is_free_format(h1) ^ hdr_is_free_format(h2))
}
pub fn hdr_bitrate_kbps(h: &[u8]) -> u32 {
let halfrate: [[[u32; 15]; 3]; 2] = [
[
[0, 4, 8, 12, 16, 20, 24, 28, 32, 40, 48, 56, 64, 72, 80],
[0, 4, 8, 12, 16, 20, 24, 28, 32, 40, 48, 56, 64, 72, 80],
[0, 16, 24, 28, 32, 40, 48, 56, 64, 72, 80, 88, 96, 112, 128],
],
[
[0, 16, 20, 24, 28, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160],
[
0, 16, 24, 28, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192,
],
[
0, 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224,
],
],
];
2 * halfrate[hdr_test_mpeg1(h) as usize][hdr_get_layer(h) as usize - 1]
[hdr_get_bitrate(h) as usize]
}
pub fn hdr_sample_rate_hz(h: &[u8]) -> u32 {
let g_hz: [u32; 3] = [44100, 48000, 32000];
g_hz[hdr_get_sample_rate(h) as usize]
>> (!hdr_test_mpeg1(h)) as u32
>> (!hdr_test_not_mpeg25(h)) as u32
}
pub fn hdr_frame_samples(h: &[u8]) -> u32 {
if hdr_is_layer_1(h) {
384
} else {
1152 >> (hdr_is_frame_576(h) as i32)
}
}
pub fn hdr_frame_bytes(h: &[u8], free_format_size: u32) -> u32 {
let mut frame_bytes = hdr_frame_samples(h) * hdr_bitrate_kbps(h) * 125 / hdr_sample_rate_hz(h);
if hdr_is_layer_1(h) {
// Slot align
frame_bytes &=!3;
}
if frame_bytes!= 0 {
frame_bytes
} else {
free_format_size
}
}
pub fn hdr_padding(h: &[u8]) -> u32 {
if hdr_test_padding(h) {
if hdr_is_layer_1(h) { | 4
} else {
1
}
} else {
0
}
}
pub fn L12_subband_alloc_table(hdr: &[u8], sci: &mut L12ScaleInfo) -> Vec<L12SubbandAlloc> {
let mode = hdr_get_stereo_mode(hdr) as usize;
let mut nbands;
let mut alloc: Vec<L12SubbandAlloc> = vec![];
let stereo_bands = if mode == MODE_MONO {
0
} else if mode == MODE_JOINT_STEREO {
(hdr_get_stereo_mode_ext(hdr) << 2) + 4
} else {
32
};
if hdr_is_layer_1(hdr) {
alloc.push(L12SubbandAlloc {
tab_offset: 76,
code_tab_width: 4,
band_count: 32,
});
nbands = 32;
} else if!hdr_test_mpeg1(hdr) {
alloc.push(L12SubbandAlloc {
tab_offset: 60,
code_tab_width: 4,
band_count: 4,
});
alloc.push(L12SubbandAlloc {
tab_offset: 44,
code_tab_width: 3,
band_count: 7,
});
alloc.push(L12SubbandAlloc {
tab_offset: 44,
code_tab_width: 2,
band_count: 19,
});
nbands = 30;
} else {
let sample_rate_idx = hdr_get_sample_rate(hdr);
// TODO: Clean up this comparison
let mut kbps = hdr_bitrate_kbps(hdr) >> ((mode!= MODE_MONO) as u32);
if kbps == 0 {
kbps = 192;
}
alloc.push(L12SubbandAlloc {
tab_offset: 0,
code_tab_width: 4,
band_count: 3,
});
alloc.push(L12SubbandAlloc {
tab_offset: 16,
code_tab_width: 4,
band_count: 8,
});
alloc.push(L12SubbandAlloc {
tab_offset: 32,
code_tab_width: 3,
band_count: 12,
});
alloc.push(L12SubbandAlloc {
tab_offset: 40,
code_tab_width: 2,
band_count: 7,
});
nbands = 27;
if kbps < 56 {
alloc.clear();
alloc.push(L12SubbandAlloc {
tab_offset: 44,
code_tab_width: 4,
band_count: 2,
});
alloc.push(L12SubbandAlloc {
tab_offset: 44,
code_tab_width: 3,
band_count: 10,
});
nbands = if sample_rate_idx == 2 { 12 } else { 8 };
} else if (kbps >= 96 && sample_rate_idx!= 1) {
// TODO: sigh, and possibly weep.
// I think this basically just chops off the last few
// entries in the alloc defined above the previous if
// statement.
nbands = 30;
}
}
sci.total_bands = nbands;
sci.stereo_bands = u8::min(stereo_bands, nbands);
alloc
}
pub fn L12_read_scalefactors(bs: &mut Bs, pba: &[u8], scfcod: &[u8], bands: usize, scf: &mut [f32]) {
// TODO: The C version uses macros to build this array statically,
// which is a PITA so for now we just do it the simple and slower way.
let mut g_deq_L12: Vec<f32> = vec![];
{
let mut DQ = |x: f32| {
g_deq_L12.push(9.53674316e-07 / x);
g_deq_L12.push(7.56931807e-07 / x);
g_deq_L12.push(6.00777173e-07 / x);
};
DQ(3.0);
DQ(7.0);
DQ(15.0);
DQ(31.0);
DQ(63.0);
DQ(127.0);
DQ(255.0);
DQ(511.0);
DQ(1023.0);
DQ(2047.0);
DQ(4095.0);
DQ(8191.0);
DQ(16383.0);
DQ(32767.0);
DQ(65535.0);
DQ(3.0);
DQ(5.0);
DQ(9.0);
}
let mut scf_idx = 0;
for i in 0..bands {
let ba = pba[i];
let mask = if ba!= 0 {
4 + ((19 >> scfcod[i]) & 3)
} else {
0
};
let mut m = 4;
while m!= 0 {
let s;
if (mask & m)!= 0 {
let b = bs.get_bits(6);
let idx = (ba as u32 * 3 - 6 + b % 3) as usize;
s = g_deq_L12[idx] * (1 << 21 >> (b / 3)) as f32;
} else {
s = 0.0;
}
// TODO: Check the post and pre-increment order here!!!
scf[scf_idx] = s;
scf_idx += 1;
}
}
}
pub fn L12_read_scale_info(hdr: &[u8], bs: &mut Bs, sci: &mut L12ScaleInfo) {
let g_bitalloc_code_tab: &[u8] = &[
0, 17, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0, 17, 18, 3, 19, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 16, 0, 17, 18, 3, 19, 4, 5, 16, 0, 17, 18, 16, 0, 17, 18, 19, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15, 0, 17, 18, 3, 19, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 0, 2,
3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
];
let subband_alloc = L12_subband_alloc_table(hdr, sci);
let mut subband_alloc_idx = 0;
let mut k: usize = 0;
let mut ba_bits = 0;
let mut ba_code_tab_idx: usize = 0;
for i in 0..(sci.total_bands as usize) {
let ba: u8;
if i == k {
let sb = &subband_alloc[subband_alloc_idx];
k += sb.band_count as usize;
ba_bits = sb.code_tab_width;
ba_code_tab_idx = sb.tab_offset as usize;
subband_alloc_idx += 1;
}
let ba_idx: usize = ba_code_tab_idx + (bs.get_bits(ba_bits as u32) as usize);
ba = g_bitalloc_code_tab[ba_idx];
sci.bitalloc[2 * i + 1] = if sci.stereo_bands!= 0 { ba } else { 0 };
}
for i in 0..(2 * sci.total_bands as usize) {
sci.scfcod[i] = if sci.bitalloc[i]!= 0 {
if hdr_is_layer_1(hdr) {
2
} else {
bs.get_bits(2) as u8
}
} else {
6
};
}
L12_read_scalefactors(
bs,
&sci.bitalloc,
&sci.scfcod,
(sci.total_bands * 2) as usize,
&mut sci.scf,
);
// TODO: This clear can probably be better.
for i in sci.stereo_bands..sci.total_bands {
let i = i as usize;
sci.bitalloc[2 * i + 1] = 0;
}
}
*/
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
assert_eq!(2 + 2, 4);
}
/*
pub fn wav_header(hz: i32, ch: i16, bips: i32, data_bytes: i32) -> [u8;44] {
// let buffer: &mut [u8;44] = b"RIFFsizeWAVEfmt \x10\x00\x00\x00\x01\x00ch_hz_abpsbabsdatasize";
let mut buffer: [u8;44] = [0;44];
{
let mut c = Cursor::new(&mut buffer[..]);
let size = 44 + data_bytes - 8; // File size - 8
let avg_bytes_per_sec: u64 = bips as u64 * ch as u64 * hz as u64 / 8;
let block_align = bips as u64 * ch as u64 / 8;
// TODO: This alllll needs double checking.
c.write(b"RIFF"); // 0x00 (offset)
c.write_i32::<LE>(size); // 0x04
c.write(b"WAVE"); // 0x08
c.write(b"fmt "); // 0x0C
c.write(b"\x10\x00\x00\x00"); // 0x10
c.write_i16::<LE>(1); // 0x14 -- Integer PCM file format.
c.write_i16::<LE>(ch); // 0x16
c.write_i32::<LE>(hz); // 0x18
c.write_i32::<LE>(avg_bytes_per_sec as i32); // 0x1C -- TODO, better casts
c.write_i16::<LE>(block_align as i16); // 0x20 -- TODO, better casts
c.write_i16::<LE>(bips as i16); // 0x22 -- TODO, better casts
c.write(b"data"); // 0x24
c.write_i32::<LE>(data_bytes); // 0x28
}
buffer
}
/// This shouldn't really be necessary in Rust, I think, since it just
/// reads from the file. Not gonna try factoring it out right now though.
pub fn preload(mut file: impl Read, buf: &mut Vec<u8>) -> io::Result<usize> {
file.read_to_end(buf)
}
*/
} | random_line_split |
|
oldlib.rs | extern crate byteorder;
// use byteorder::{WriteBytesExt, LE};
// use std::io::{self, Cursor, Read, Write};
pub const MAX_SAMPLES_PER_FRAME: usize = 1152 * 2;
/// More than ISO spec's
pub const MAX_FREE_FORMAT_FRAME_SIZE: usize = 2304;
pub const MAX_FRAME_SYNC_MATCHES: usize = 10;
/// MUST be >= 320000/8/32000*1152 = 1440
pub const MAX_L3_FRAME_PAYLOAD_BYTES: usize = MAX_FREE_FORMAT_FRAME_SIZE;
pub const MAX_BITRESERVOIR_BYTES: usize = 511;
pub const SHORT_BLOCK_TYPE: usize = 2;
pub const STOP_BLOCK_TYPE: usize = 3;
pub const MODE_MONO: usize = 3;
pub const MODE_JOINT_STEREO: usize = 1;
pub const HDR_SIZE: usize = 4;
pub mod corrode_test;
pub fn hdr_is_mono(h: &[u8]) -> bool {
// TODO: Might be nicer ways to do these bit-tests
(h[3] & 0xC0) == 0xC0
}
pub fn hdr_is_ms_stereo(h: &[u8]) -> bool {
(h[3] & 0xE0) == 0x60
}
pub fn hdr_is_free_format(h: &[u8]) -> bool {
(h[2] & 0xF0) == 0
}
pub fn hdr_is_crc(h: &[u8]) -> bool {
// TODO: Double-check
(h[1] & 1) == 0
}
pub fn hdr_test_padding(h: &[u8]) -> bool {
(h[2] & 0x2)!= 0
}
pub fn hdr_test_mpeg1(h: &[u8]) -> bool {
(h[1] & 0x08)!= 0
}
pub fn hdr_test_not_mpeg25(h: &[u8]) -> bool {
(h[1] & 0x10)!= 0
}
pub fn hdr_test_i_stereo(h: &[u8]) -> bool {
(h[3] & 0x10)!= 0
}
pub fn hdr_test_ms_stereo(h: &[u8]) -> bool {
(h[3] & 0x20)!= 0
}
pub fn hdr_get_stereo_mode(h: &[u8]) -> u8 {
((h[3] >> 6) & 3)
}
pub fn hdr_get_stereo_mode_ext(h: &[u8]) -> u8 {
((h[3] >> 4) & 3)
}
pub fn hdr_get_layer(h: &[u8]) -> u8 {
((h[1] >> 1) & 3)
}
pub fn hdr_get_bitrate(h: &[u8]) -> u8 {
(h[2] >> 4)
}
pub fn hdr_get_sample_rate(h: &[u8]) -> u8 {
((h[2] >> 2) & 3)
}
pub fn hdr_is_frame_576(h: &[u8]) -> bool {
(h[1] & 14) == 2
}
pub fn hdr_is_layer_1(h: &[u8]) -> bool {
(h[1] & 6) == 6
}
pub const BITS_DEQUANTIZER_OUT: i32 = -1;
pub const MAX_SCF: i32 = 255 + BITS_DEQUANTIZER_OUT * 4 - 210;
pub const MAX_SCFI: i32 = (MAX_SCF + 3) &!3;
pub struct FrameInfo {
pub frame_bytes: i32,
pub channels: i32,
pub hz: i32,
pub layers: i32,
pub bitrate_kbps: i32,
}
pub struct Mp3Dec {
pub mdct_overlap: [[f32; 2]; 9 * 32],
pub qmf_state: [f32; 15 * 2 * 32],
pub reserv: i32,
pub free_format_bytes: i32,
pub header: [u8; 4],
pub reserv_buf: [u8; 511],
}
// TODO: float vs. int16 output?
// type Mp3Sample = i16;
// pub fn decode_frame(
// dec: &Mp3Dec,
// mp3: &[u8],
// mp3_bytes: usize,
// pcm: &[Mp3Sample],
// info: &FrameInfo,
// ) -> i32 {
// 0
// }
pub struct Bs {
pub buf: Vec<u8>,
pub pos: usize,
pub limit: usize,
}
pub struct L12ScaleInfo {
pub scf: [f32; 3 * 64],
pub total_bands: u8,
pub stereo_bands: u8,
pub bitalloc: [u8; 64],
pub scfcod: [u8; 64],
}
pub struct L12SubbandAlloc {
pub tab_offset: u8,
pub code_tab_width: u8,
pub band_count: u8,
}
pub struct L3GrInfo {
pub sfbtab: Vec<u8>,
pub part_23_length: u16,
pub big_values: u16,
pub scalefac_compress: u16,
pub global_gain: u8,
pub block_type: u8,
pub mixed_block_flag: u8,
pub n_long_sfb: u8,
pub n_short_sfb: u8,
pub table_select: [u8; 3],
pub region_count: [u8; 3],
pub subblock_gain: [u8; 3],
pub preflag: u8,
pub scalefac_scale: u8,
pub count1_table: u8,
pub scfsi: u8,
}
pub struct Mp3DecScratch {
pub bs: Bs,
pub maindata: [u8; MAX_BITRESERVOIR_BYTES + MAX_L3_FRAME_PAYLOAD_BYTES],
pub gr_info: [L3GrInfo; 3],
pub grbuf: [[f32; 576]; 2],
pub scf: [f32; 40],
pub syn: [[f32; 2 * 32]; 18 + 15],
pub ist_pos: [[u8; 39]; 2],
}
impl Bs {
pub fn new(data: Vec<u8>, bytes: usize) -> Self {
Self {
buf: data,
pos: 0,
limit: bytes * 8,
}
}
/// Heckin... this is way more complicated than it
/// needs to be here...
pub fn get_bits(&mut self, n: u32) -> u32 |
}
/*
pub fn hdr_valid(h: &[u8]) -> bool {
h[0] == 0xFF
&& ((h[1] & 0xF0) == 0xF0 || (h[1] & 0xFE) == 0xE2)
&& hdr_get_layer(h)!= 0
&& hdr_get_bitrate(h)!= 15
&& hdr_get_sample_rate(h)!= 3
}
pub fn hdr_compare(h1: &[u8], h2: &[u8]) -> bool {
hdr_valid(h2)
&& ((h1[1] ^ h2[1]) & 0xFE) == 0
&& ((h1[2] ^ h2[2]) & 0x0C) == 0
&&!(hdr_is_free_format(h1) ^ hdr_is_free_format(h2))
}
pub fn hdr_bitrate_kbps(h: &[u8]) -> u32 {
let halfrate: [[[u32; 15]; 3]; 2] = [
[
[0, 4, 8, 12, 16, 20, 24, 28, 32, 40, 48, 56, 64, 72, 80],
[0, 4, 8, 12, 16, 20, 24, 28, 32, 40, 48, 56, 64, 72, 80],
[0, 16, 24, 28, 32, 40, 48, 56, 64, 72, 80, 88, 96, 112, 128],
],
[
[0, 16, 20, 24, 28, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160],
[
0, 16, 24, 28, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192,
],
[
0, 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224,
],
],
];
2 * halfrate[hdr_test_mpeg1(h) as usize][hdr_get_layer(h) as usize - 1]
[hdr_get_bitrate(h) as usize]
}
pub fn hdr_sample_rate_hz(h: &[u8]) -> u32 {
let g_hz: [u32; 3] = [44100, 48000, 32000];
g_hz[hdr_get_sample_rate(h) as usize]
>> (!hdr_test_mpeg1(h)) as u32
>> (!hdr_test_not_mpeg25(h)) as u32
}
pub fn hdr_frame_samples(h: &[u8]) -> u32 {
if hdr_is_layer_1(h) {
384
} else {
1152 >> (hdr_is_frame_576(h) as i32)
}
}
pub fn hdr_frame_bytes(h: &[u8], free_format_size: u32) -> u32 {
let mut frame_bytes = hdr_frame_samples(h) * hdr_bitrate_kbps(h) * 125 / hdr_sample_rate_hz(h);
if hdr_is_layer_1(h) {
// Slot align
frame_bytes &=!3;
}
if frame_bytes!= 0 {
frame_bytes
} else {
free_format_size
}
}
pub fn hdr_padding(h: &[u8]) -> u32 {
if hdr_test_padding(h) {
if hdr_is_layer_1(h) {
4
} else {
1
}
} else {
0
}
}
pub fn L12_subband_alloc_table(hdr: &[u8], sci: &mut L12ScaleInfo) -> Vec<L12SubbandAlloc> {
let mode = hdr_get_stereo_mode(hdr) as usize;
let mut nbands;
let mut alloc: Vec<L12SubbandAlloc> = vec![];
let stereo_bands = if mode == MODE_MONO {
0
} else if mode == MODE_JOINT_STEREO {
(hdr_get_stereo_mode_ext(hdr) << 2) + 4
} else {
32
};
if hdr_is_layer_1(hdr) {
alloc.push(L12SubbandAlloc {
tab_offset: 76,
code_tab_width: 4,
band_count: 32,
});
nbands = 32;
} else if!hdr_test_mpeg1(hdr) {
alloc.push(L12SubbandAlloc {
tab_offset: 60,
code_tab_width: 4,
band_count: 4,
});
alloc.push(L12SubbandAlloc {
tab_offset: 44,
code_tab_width: 3,
band_count: 7,
});
alloc.push(L12SubbandAlloc {
tab_offset: 44,
code_tab_width: 2,
band_count: 19,
});
nbands = 30;
} else {
let sample_rate_idx = hdr_get_sample_rate(hdr);
// TODO: Clean up this comparison
let mut kbps = hdr_bitrate_kbps(hdr) >> ((mode!= MODE_MONO) as u32);
if kbps == 0 {
kbps = 192;
}
alloc.push(L12SubbandAlloc {
tab_offset: 0,
code_tab_width: 4,
band_count: 3,
});
alloc.push(L12SubbandAlloc {
tab_offset: 16,
code_tab_width: 4,
band_count: 8,
});
alloc.push(L12SubbandAlloc {
tab_offset: 32,
code_tab_width: 3,
band_count: 12,
});
alloc.push(L12SubbandAlloc {
tab_offset: 40,
code_tab_width: 2,
band_count: 7,
});
nbands = 27;
if kbps < 56 {
alloc.clear();
alloc.push(L12SubbandAlloc {
tab_offset: 44,
code_tab_width: 4,
band_count: 2,
});
alloc.push(L12SubbandAlloc {
tab_offset: 44,
code_tab_width: 3,
band_count: 10,
});
nbands = if sample_rate_idx == 2 { 12 } else { 8 };
} else if (kbps >= 96 && sample_rate_idx!= 1) {
// TODO: sigh, and possibly weep.
// I think this basically just chops off the last few
// entries in the alloc defined above the previous if
// statement.
nbands = 30;
}
}
sci.total_bands = nbands;
sci.stereo_bands = u8::min(stereo_bands, nbands);
alloc
}
pub fn L12_read_scalefactors(bs: &mut Bs, pba: &[u8], scfcod: &[u8], bands: usize, scf: &mut [f32]) {
// TODO: The C version uses macros to build this array statically,
// which is a PITA so for now we just do it the simple and slower way.
let mut g_deq_L12: Vec<f32> = vec![];
{
let mut DQ = |x: f32| {
g_deq_L12.push(9.53674316e-07 / x);
g_deq_L12.push(7.56931807e-07 / x);
g_deq_L12.push(6.00777173e-07 / x);
};
DQ(3.0);
DQ(7.0);
DQ(15.0);
DQ(31.0);
DQ(63.0);
DQ(127.0);
DQ(255.0);
DQ(511.0);
DQ(1023.0);
DQ(2047.0);
DQ(4095.0);
DQ(8191.0);
DQ(16383.0);
DQ(32767.0);
DQ(65535.0);
DQ(3.0);
DQ(5.0);
DQ(9.0);
}
let mut scf_idx = 0;
for i in 0..bands {
let ba = pba[i];
let mask = if ba!= 0 {
4 + ((19 >> scfcod[i]) & 3)
} else {
0
};
let mut m = 4;
while m!= 0 {
let s;
if (mask & m)!= 0 {
let b = bs.get_bits(6);
let idx = (ba as u32 * 3 - 6 + b % 3) as usize;
s = g_deq_L12[idx] * (1 << 21 >> (b / 3)) as f32;
} else {
s = 0.0;
}
// TODO: Check the post and pre-increment order here!!!
scf[scf_idx] = s;
scf_idx += 1;
}
}
}
pub fn L12_read_scale_info(hdr: &[u8], bs: &mut Bs, sci: &mut L12ScaleInfo) {
let g_bitalloc_code_tab: &[u8] = &[
0, 17, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0, 17, 18, 3, 19, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 16, 0, 17, 18, 3, 19, 4, 5, 16, 0, 17, 18, 16, 0, 17, 18, 19, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15, 0, 17, 18, 3, 19, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 0, 2,
3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
];
let subband_alloc = L12_subband_alloc_table(hdr, sci);
let mut subband_alloc_idx = 0;
let mut k: usize = 0;
let mut ba_bits = 0;
let mut ba_code_tab_idx: usize = 0;
for i in 0..(sci.total_bands as usize) {
let ba: u8;
if i == k {
let sb = &subband_alloc[subband_alloc_idx];
k += sb.band_count as usize;
ba_bits = sb.code_tab_width;
ba_code_tab_idx = sb.tab_offset as usize;
subband_alloc_idx += 1;
}
let ba_idx: usize = ba_code_tab_idx + (bs.get_bits(ba_bits as u32) as usize);
ba = g_bitalloc_code_tab[ba_idx];
sci.bitalloc[2 * i + 1] = if sci.stereo_bands!= 0 { ba } else { 0 };
}
for i in 0..(2 * sci.total_bands as usize) {
sci.scfcod[i] = if sci.bitalloc[i]!= 0 {
if hdr_is_layer_1(hdr) {
2
} else {
bs.get_bits(2) as u8
}
} else {
6
};
}
L12_read_scalefactors(
bs,
&sci.bitalloc,
&sci.scfcod,
(sci.total_bands * 2) as usize,
&mut sci.scf,
);
// TODO: This clear can probably be better.
for i in sci.stereo_bands..sci.total_bands {
let i = i as usize;
sci.bitalloc[2 * i + 1] = 0;
}
}
*/
#[cfg(test)]
mod tests {
#[test]
fn it_works() {
assert_eq!(2 + 2, 4);
}
/*
pub fn wav_header(hz: i32, ch: i16, bips: i32, data_bytes: i32) -> [u8;44] {
// let buffer: &mut [u8;44] = b"RIFFsizeWAVEfmt \x10\x00\x00\x00\x01\x00ch_hz_abpsbabsdatasize";
let mut buffer: [u8;44] = [0;44];
{
let mut c = Cursor::new(&mut buffer[..]);
let size = 44 + data_bytes - 8; // File size - 8
let avg_bytes_per_sec: u64 = bips as u64 * ch as u64 * hz as u64 / 8;
let block_align = bips as u64 * ch as u64 / 8;
// TODO: This alllll needs double checking.
c.write(b"RIFF"); // 0x00 (offset)
c.write_i32::<LE>(size); // 0x04
c.write(b"WAVE"); // 0x08
c.write(b"fmt "); // 0x0C
c.write(b"\x10\x00\x00\x00"); // 0x10
c.write_i16::<LE>(1); // 0x14 -- Integer PCM file format.
c.write_i16::<LE>(ch); // 0x16
c.write_i32::<LE>(hz); // 0x18
c.write_i32::<LE>(avg_bytes_per_sec as i32); // 0x1C -- TODO, better casts
c.write_i16::<LE>(block_align as i16); // 0x20 -- TODO, better casts
c.write_i16::<LE>(bips as i16); // 0x22 -- TODO, better casts
c.write(b"data"); // 0x24
c.write_i32::<LE>(data_bytes); // 0x28
}
buffer
}
/// This shouldn't really be necessary in Rust, I think, since it just
/// reads from the file. Not gonna try factoring it out right now though.
pub fn preload(mut file: impl Read, buf: &mut Vec<u8>) -> io::Result<usize> {
file.read_to_end(buf)
}
*/
}
| {
let mut next: u32;
let mut cache: u32 = 0;
let s = (self.pos & 7) as u32;
let mut shl: i32 = n as i32 + s as i32;
let mut p = self.pos as u32 / 8;
if self.pos + (n as usize) > self.limit {
return 0;
}
self.pos += n as usize;
p += 1;
next = p & (255 >> s);
while shl > 0 {
shl -= 8;
cache |= next << shl;
next = p;
p += 1;
}
return cache | (next >> -shl);
} | identifier_body |
main.rs | extern crate rand;
extern crate getopts;
extern crate num_cpus;
extern crate cards;
extern crate poker_hands;
use std::env;
use std::collections::HashMap;
use std::str::FromStr;
use std::thread;
use std::sync::*;
use getopts::{Options, Matches, HasArg, Occur};
use rand::{thread_rng, Rng};
use cards::{Card, Rank, Suit, card};
use cards::Rank::*;
use cards::Suit::*;
use poker_hands::{Hand, NUM_HANDS};
fn main() {
let args: Vec<String> = env::args().collect();
let opts = create_opts();
let arg_matches = match opts.parse(&args[1..]) {
Ok(matches) => matches,
Err(error) => panic!("Could not parse {:?}; error: {:?}", args, error)
};
let initial_board = get_initial_board(&arg_matches);
let total_num_sims =
if initial_board.len() == BOARD_SIZE {
println!("The given board is full, so there's no uncertainty.");
1
} else {
get_num_sims(&arg_matches)
};
let all_hole_cards = get_hole_cards(&arg_matches);
let num_threads = get_num_threads(&arg_matches);
println!("Simulating {} hands", total_num_sims);
if initial_board.len() > 0 {
println!("For board {:?}", initial_board);
}
println!("Using {} threads", num_threads);
let board_ref = Arc::new(initial_board);
let hole_cards_ref = Arc::new(all_hole_cards);
let outcomes = Arc::new(Mutex::new(HashMap::new()));
let mut children = Vec::with_capacity(num_threads as usize);
for thread_index in 0..num_threads {
let this_num_sims = get_num_sims_for_thread(total_num_sims, num_threads as i32, thread_index as i32);
let this_board_ref = board_ref.clone();
let this_hole_cards_ref = hole_cards_ref.clone();
let this_outcomes = outcomes.clone();
let child_thread = thread::spawn(move || {
simulate_hands(this_num_sims, &this_board_ref, &this_hole_cards_ref, &this_outcomes)
});
children.push(child_thread);
}
for child_thread in children {
match child_thread.join() {
Ok(_) => continue,
Err(e) => panic!("Worker thread died! {:?}", e)
}
}
let final_outcomes = outcomes.lock().unwrap();
let sorted_outcomes = sort_descending(
final_outcomes.iter().map(|(outcome, stats)| (outcome.clone(), stats.total_events())).collect());
for outcome in sorted_outcomes {
let stats = final_outcomes.get(&outcome).unwrap();
let total_events = stats.total_events();
let outcome_percent = (total_events as f64 / total_num_sims as f64) * 100f64;
let outcome_name = name_outcome(&outcome, &hole_cards_ref);
println!("{} ({} times, {}%)", outcome_name, total_events, outcome_percent);
let sorted_hand_indices = sort_descending(
(0..NUM_HANDS).map(|index| (index, stats.events[index])).collect());
for hand_index in sorted_hand_indices {
let hand_events = stats.events[hand_index];
if hand_events == 0 {
continue;
}
let hand_percent = (hand_events as f64 / total_events as f64) * 100f64;
println!("\t{}: {} times, {}%", Hand::name_hand_index(hand_index), hand_events, hand_percent);
}
}
}
fn simulate_hands(num_sims: i32, initial_board: &[Card], all_hole_cards: &[[Card; 2]], outcomes: &Mutex<HashMap<Vec<i32>, HandStats>>) {
for _ in 0..num_sims {
let board = pick_random_board(initial_board, all_hole_cards);
assert!(board.len() == BOARD_SIZE);
let mut hands = Vec::with_capacity(all_hole_cards.len());
for hole_cards in all_hole_cards {
let mut cards: Vec<Card> = Vec::with_capacity(hole_cards.len() + board.len());
cards.extend(board.iter().cloned());
cards.extend(hole_cards.iter().cloned());
// Sort descending - best_hand_of() requires this.
cards.sort_by(|first, second| second.cmp(first));
let hand = Hand::best_hand_of(&cards);
hands.push(hand);
}
assert!(hands.len() == all_hole_cards.len());
let mut winners = Vec::new();
winners.push(0);
let mut best_hand = hands[0];
for index in 1..hands.len() {
let hand = hands[index];
if hand == best_hand {
winners.push(index as i32);
} else if hand > best_hand {
winners.clear();
winners.push(index as i32);
best_hand = hand;
}
}
insert_outcome(&mut outcomes.lock().unwrap(), &winners, &best_hand);
}
}
fn sort_descending<T: Clone>(mut items: Vec<(T, i32)>) -> Vec<T> {
// Switch the order to get greatest-first.
items.sort_by(|&(_, first), &(_, second)| second.cmp(&first));
items.iter().map(|&(ref item, _)| item.clone()).collect()
}
const HOLE_CARDS_ARG: &'static str = "h";
const NUM_SIMS_ARG: &'static str = "n";
const NUM_THREADS_ARG: &'static str = "t";
const BOARD_ARG: &'static str = "b";
fn create_opts() -> Options {
// Unfortunately, there doesn't seem to be a way to require that an option appears at least once.
let mut opts = Options::new();
opts.opt(HOLE_CARDS_ARG, "hole cards", "A single player's hole cards", "XxYy", HasArg::Yes, Occur::Multi);
opts.opt(NUM_SIMS_ARG, "number of simulations", "The number of hands to simulate in order to approximate the true distribution.", "n", HasArg::Yes, Occur::Optional);
opts.opt(NUM_THREADS_ARG, "number of threads to use", "The number of threads to use simultaneously to run the simulations.", "t", HasArg::Yes, Occur::Optional);
opts.opt(BOARD_ARG, "board cards", "The cards already on the board.", "XxYyZz", HasArg::Yes, Occur::Optional);
opts
}
fn get_initial_board(matches: &Matches) -> Vec<Card> {
if!matches.opt_present(BOARD_ARG) {
return Vec::new();
}
let board_string = matches.opt_str(&BOARD_ARG).unwrap();
let initial_board = parse_cards_string(&board_string);
assert!(initial_board.len() <= BOARD_SIZE, "Initial board has more than {} cards! {}", BOARD_SIZE, board_string);
initial_board
}
fn get_hole_cards(matches: &Matches) -> Vec<[Card; 2]> {
assert!(matches.opt_count(HOLE_CARDS_ARG) >= 1, "No hole cards specified");
let hole_strings = matches.opt_strs(HOLE_CARDS_ARG);
let mut all_hole_cards = Vec::with_capacity(hole_strings.len());
for hole_string in &hole_strings {
let hole_cards = parse_cards_string(hole_string);
assert!(hole_cards.len() == 2, "{} specifies {} cards, not 2", hole_string, hole_cards.len());
all_hole_cards.push([hole_cards[0], hole_cards[1]]);
}
all_hole_cards
}
const DEFAULT_NUM_SIMS: i32 = 10 * 1000;
fn get_num_sims(matches: &Matches) -> i32 {
get_numeric_arg(matches, NUM_SIMS_ARG, DEFAULT_NUM_SIMS)
}
fn get_num_threads(matches: &Matches) -> i32 {
get_numeric_arg(matches, NUM_THREADS_ARG, num_cpus::get() as i32)
}
fn get_numeric_arg(matches: &Matches, arg: &str, default: i32) -> i32 {
if!matches.opt_present(arg) {
return default;
}
let num_str = matches.opt_str(arg).unwrap();
let num_maybe: Result<i32, _> = FromStr::from_str(&num_str);
match num_maybe {
Ok(num) => num,
Err(_) => {
println!("Could not parse {} arg as a number: {}; ignoring it.", arg, num_str);
default
}
}
}
fn parse_cards_string(cards_string: &str) -> Vec<Card> { | let num_cards = chars.len() / 2;
let mut cards = Vec::with_capacity(num_cards);
for card_index in 0..num_cards {
let rank_index = card_index * 2;
let suit_index = rank_index + 1;
let rank_char = chars[rank_index];
let suit_char = chars[suit_index];
let rank = parse_rank(rank_char).expect(
&format!("Couldn't parse {} (position {} in {}) as a rank",
rank_char, rank_index, cards_string));
let suit = parse_suit(suit_char).expect(
&format!("Couldn't parse {} (position {} in {}) as a suit",
suit_char, suit_index, cards_string));
cards.push(card(rank, suit));
}
cards
}
fn parse_rank(rank_char: char) -> Option<Rank> {
let rank = match rank_char {
'A' | 'a' => Ace,
'K' | 'k' => King,
'Q' | 'q' => Queen,
'J' | 'j' => Jack,
'T' | 't' => Ten,
'9' => Nine,
'8' => Eight,
'7' => Seven,
'6' => Six,
'5' => Five,
'4' => Four,
'3' => Three,
'2' => Two,
_ => return None
};
Some(rank)
}
fn parse_suit(suit_char: char) -> Option<Suit> {
let suit = match suit_char {
'S' |'s' => Spades,
'H' | 'h' => Hearts,
'C' | 'c' => Clubs,
'D' | 'd' => Diamonds,
_ => return None
};
Some(suit)
}
fn insert_outcome(outcomes: &mut HashMap<Vec<i32>, HandStats>, winners: &Vec<i32>, hand: &Hand) {
// Set up default stats if there are none yet.
if let None = outcomes.get(winners) {
outcomes.insert(winners.clone(), HandStats::create());
}
outcomes.get_mut(winners).unwrap().add_event(hand);
}
const BOARD_SIZE: usize = 5;
fn pick_random_board(initial_board: &[Card], all_hole_cards: &[[Card; 2]]) -> [Card; BOARD_SIZE] {
let mut board = [card(Ace, Spades); BOARD_SIZE]; // Dummies
for index in 0..initial_board.len() {
board[index] = initial_board[index];
}
let mut used_indexes: Vec<u8> = Vec::with_capacity(all_hole_cards.len() + BOARD_SIZE);
let card_to_index = |card: &Card| (*card).into();
used_indexes.extend(
initial_board.iter().map(&card_to_index));
used_indexes.extend(
all_hole_cards.iter().
flat_map(|cards| cards). // Flatten all hands into one iterator
map(&card_to_index));
let mut board_index = initial_board.len();
let mut rng = rand::thread_rng();
while board_index < BOARD_SIZE {
/*
Generate random cards and skip them if they're used already.
The assumption is that few cards will be used compared to the
possible 52, so it should skip rarely and be efficient.
*/
let card = rng.gen::<Card>();
let card_index = card.into();
if used_indexes.contains(&card_index) {
continue;
}
used_indexes.push(card_index);
board[board_index] = card;
board_index += 1;
}
board
}
fn get_num_sims_for_thread(total_num_sims: i32, total_num_threads: i32, thread_index: i32) -> i32 {
assert!(total_num_threads > thread_index);
let base_num_sims = total_num_sims / total_num_threads;
let threads_with_extra = total_num_sims % total_num_threads;
let this_threads_extra =
if thread_index < threads_with_extra {
1
} else {
0
};
base_num_sims + this_threads_extra
}
struct HandStats {
events: [i32; NUM_HANDS], // Number of times each hand happened
}
impl HandStats {
fn create() -> HandStats {
HandStats{events: [0; NUM_HANDS]}
}
fn add_event(&mut self, hand: &Hand) {
let event_index: u8 = (*hand).into();
self.events[event_index as usize] += 1;
}
fn total_events(&self) -> i32 {
self.events.iter().fold(0, |aggregate, event| aggregate + event)
}
}
fn name_outcome(outcome: &Vec<i32>, all_hole_cards: &[[Card; 2]]) -> String {
if outcome.len() == 1 {
let hand_index = outcome[0];
return format!("Hand {} {:?} wins", outcome[0], all_hole_cards[hand_index as usize]);
}
if outcome.len() > 0 {
return format!("Chop between hands {}", hands_to_string(all_hole_cards, &outcome));
}
panic!("Empty outcome")
}
fn hands_to_string(hands: &[[Card; 2]], indices: &[i32]) -> String {
let mut string = format!("{:?}", hands[indices[0] as usize]);
for index in 1..indices.len() {
string = string + &format!(", {:?}", hands[indices[index as usize] as usize]);
}
string
} | let chars: Vec<char> = cards_string.chars().collect();
assert!(chars.len() % 2 == 0, "Odd numbers of characters, cannot be cards: {}", cards_string);
| random_line_split |
main.rs | extern crate rand;
extern crate getopts;
extern crate num_cpus;
extern crate cards;
extern crate poker_hands;
use std::env;
use std::collections::HashMap;
use std::str::FromStr;
use std::thread;
use std::sync::*;
use getopts::{Options, Matches, HasArg, Occur};
use rand::{thread_rng, Rng};
use cards::{Card, Rank, Suit, card};
use cards::Rank::*;
use cards::Suit::*;
use poker_hands::{Hand, NUM_HANDS};
fn main() {
let args: Vec<String> = env::args().collect();
let opts = create_opts();
let arg_matches = match opts.parse(&args[1..]) {
Ok(matches) => matches,
Err(error) => panic!("Could not parse {:?}; error: {:?}", args, error)
};
let initial_board = get_initial_board(&arg_matches);
let total_num_sims =
if initial_board.len() == BOARD_SIZE {
println!("The given board is full, so there's no uncertainty.");
1
} else {
get_num_sims(&arg_matches)
};
let all_hole_cards = get_hole_cards(&arg_matches);
let num_threads = get_num_threads(&arg_matches);
println!("Simulating {} hands", total_num_sims);
if initial_board.len() > 0 {
println!("For board {:?}", initial_board);
}
println!("Using {} threads", num_threads);
let board_ref = Arc::new(initial_board);
let hole_cards_ref = Arc::new(all_hole_cards);
let outcomes = Arc::new(Mutex::new(HashMap::new()));
let mut children = Vec::with_capacity(num_threads as usize);
for thread_index in 0..num_threads {
let this_num_sims = get_num_sims_for_thread(total_num_sims, num_threads as i32, thread_index as i32);
let this_board_ref = board_ref.clone();
let this_hole_cards_ref = hole_cards_ref.clone();
let this_outcomes = outcomes.clone();
let child_thread = thread::spawn(move || {
simulate_hands(this_num_sims, &this_board_ref, &this_hole_cards_ref, &this_outcomes)
});
children.push(child_thread);
}
for child_thread in children {
match child_thread.join() {
Ok(_) => continue,
Err(e) => panic!("Worker thread died! {:?}", e)
}
}
let final_outcomes = outcomes.lock().unwrap();
let sorted_outcomes = sort_descending(
final_outcomes.iter().map(|(outcome, stats)| (outcome.clone(), stats.total_events())).collect());
for outcome in sorted_outcomes {
let stats = final_outcomes.get(&outcome).unwrap();
let total_events = stats.total_events();
let outcome_percent = (total_events as f64 / total_num_sims as f64) * 100f64;
let outcome_name = name_outcome(&outcome, &hole_cards_ref);
println!("{} ({} times, {}%)", outcome_name, total_events, outcome_percent);
let sorted_hand_indices = sort_descending(
(0..NUM_HANDS).map(|index| (index, stats.events[index])).collect());
for hand_index in sorted_hand_indices {
let hand_events = stats.events[hand_index];
if hand_events == 0 {
continue;
}
let hand_percent = (hand_events as f64 / total_events as f64) * 100f64;
println!("\t{}: {} times, {}%", Hand::name_hand_index(hand_index), hand_events, hand_percent);
}
}
}
fn simulate_hands(num_sims: i32, initial_board: &[Card], all_hole_cards: &[[Card; 2]], outcomes: &Mutex<HashMap<Vec<i32>, HandStats>>) {
for _ in 0..num_sims {
let board = pick_random_board(initial_board, all_hole_cards);
assert!(board.len() == BOARD_SIZE);
let mut hands = Vec::with_capacity(all_hole_cards.len());
for hole_cards in all_hole_cards {
let mut cards: Vec<Card> = Vec::with_capacity(hole_cards.len() + board.len());
cards.extend(board.iter().cloned());
cards.extend(hole_cards.iter().cloned());
// Sort descending - best_hand_of() requires this.
cards.sort_by(|first, second| second.cmp(first));
let hand = Hand::best_hand_of(&cards);
hands.push(hand);
}
assert!(hands.len() == all_hole_cards.len());
let mut winners = Vec::new();
winners.push(0);
let mut best_hand = hands[0];
for index in 1..hands.len() {
let hand = hands[index];
if hand == best_hand {
winners.push(index as i32);
} else if hand > best_hand {
winners.clear();
winners.push(index as i32);
best_hand = hand;
}
}
insert_outcome(&mut outcomes.lock().unwrap(), &winners, &best_hand);
}
}
fn sort_descending<T: Clone>(mut items: Vec<(T, i32)>) -> Vec<T> {
// Switch the order to get greatest-first.
items.sort_by(|&(_, first), &(_, second)| second.cmp(&first));
items.iter().map(|&(ref item, _)| item.clone()).collect()
}
const HOLE_CARDS_ARG: &'static str = "h";
const NUM_SIMS_ARG: &'static str = "n";
const NUM_THREADS_ARG: &'static str = "t";
const BOARD_ARG: &'static str = "b";
fn create_opts() -> Options {
// Unfortunately, there doesn't seem to be a way to require that an option appears at least once.
let mut opts = Options::new();
opts.opt(HOLE_CARDS_ARG, "hole cards", "A single player's hole cards", "XxYy", HasArg::Yes, Occur::Multi);
opts.opt(NUM_SIMS_ARG, "number of simulations", "The number of hands to simulate in order to approximate the true distribution.", "n", HasArg::Yes, Occur::Optional);
opts.opt(NUM_THREADS_ARG, "number of threads to use", "The number of threads to use simultaneously to run the simulations.", "t", HasArg::Yes, Occur::Optional);
opts.opt(BOARD_ARG, "board cards", "The cards already on the board.", "XxYyZz", HasArg::Yes, Occur::Optional);
opts
}
fn get_initial_board(matches: &Matches) -> Vec<Card> {
if!matches.opt_present(BOARD_ARG) {
return Vec::new();
}
let board_string = matches.opt_str(&BOARD_ARG).unwrap();
let initial_board = parse_cards_string(&board_string);
assert!(initial_board.len() <= BOARD_SIZE, "Initial board has more than {} cards! {}", BOARD_SIZE, board_string);
initial_board
}
fn get_hole_cards(matches: &Matches) -> Vec<[Card; 2]> {
assert!(matches.opt_count(HOLE_CARDS_ARG) >= 1, "No hole cards specified");
let hole_strings = matches.opt_strs(HOLE_CARDS_ARG);
let mut all_hole_cards = Vec::with_capacity(hole_strings.len());
for hole_string in &hole_strings {
let hole_cards = parse_cards_string(hole_string);
assert!(hole_cards.len() == 2, "{} specifies {} cards, not 2", hole_string, hole_cards.len());
all_hole_cards.push([hole_cards[0], hole_cards[1]]);
}
all_hole_cards
}
const DEFAULT_NUM_SIMS: i32 = 10 * 1000;
fn get_num_sims(matches: &Matches) -> i32 |
fn get_num_threads(matches: &Matches) -> i32 {
get_numeric_arg(matches, NUM_THREADS_ARG, num_cpus::get() as i32)
}
fn get_numeric_arg(matches: &Matches, arg: &str, default: i32) -> i32 {
if!matches.opt_present(arg) {
return default;
}
let num_str = matches.opt_str(arg).unwrap();
let num_maybe: Result<i32, _> = FromStr::from_str(&num_str);
match num_maybe {
Ok(num) => num,
Err(_) => {
println!("Could not parse {} arg as a number: {}; ignoring it.", arg, num_str);
default
}
}
}
fn parse_cards_string(cards_string: &str) -> Vec<Card> {
let chars: Vec<char> = cards_string.chars().collect();
assert!(chars.len() % 2 == 0, "Odd numbers of characters, cannot be cards: {}", cards_string);
let num_cards = chars.len() / 2;
let mut cards = Vec::with_capacity(num_cards);
for card_index in 0..num_cards {
let rank_index = card_index * 2;
let suit_index = rank_index + 1;
let rank_char = chars[rank_index];
let suit_char = chars[suit_index];
let rank = parse_rank(rank_char).expect(
&format!("Couldn't parse {} (position {} in {}) as a rank",
rank_char, rank_index, cards_string));
let suit = parse_suit(suit_char).expect(
&format!("Couldn't parse {} (position {} in {}) as a suit",
suit_char, suit_index, cards_string));
cards.push(card(rank, suit));
}
cards
}
fn parse_rank(rank_char: char) -> Option<Rank> {
let rank = match rank_char {
'A' | 'a' => Ace,
'K' | 'k' => King,
'Q' | 'q' => Queen,
'J' | 'j' => Jack,
'T' | 't' => Ten,
'9' => Nine,
'8' => Eight,
'7' => Seven,
'6' => Six,
'5' => Five,
'4' => Four,
'3' => Three,
'2' => Two,
_ => return None
};
Some(rank)
}
fn parse_suit(suit_char: char) -> Option<Suit> {
let suit = match suit_char {
'S' |'s' => Spades,
'H' | 'h' => Hearts,
'C' | 'c' => Clubs,
'D' | 'd' => Diamonds,
_ => return None
};
Some(suit)
}
fn insert_outcome(outcomes: &mut HashMap<Vec<i32>, HandStats>, winners: &Vec<i32>, hand: &Hand) {
// Set up default stats if there are none yet.
if let None = outcomes.get(winners) {
outcomes.insert(winners.clone(), HandStats::create());
}
outcomes.get_mut(winners).unwrap().add_event(hand);
}
const BOARD_SIZE: usize = 5;
fn pick_random_board(initial_board: &[Card], all_hole_cards: &[[Card; 2]]) -> [Card; BOARD_SIZE] {
let mut board = [card(Ace, Spades); BOARD_SIZE]; // Dummies
for index in 0..initial_board.len() {
board[index] = initial_board[index];
}
let mut used_indexes: Vec<u8> = Vec::with_capacity(all_hole_cards.len() + BOARD_SIZE);
let card_to_index = |card: &Card| (*card).into();
used_indexes.extend(
initial_board.iter().map(&card_to_index));
used_indexes.extend(
all_hole_cards.iter().
flat_map(|cards| cards). // Flatten all hands into one iterator
map(&card_to_index));
let mut board_index = initial_board.len();
let mut rng = rand::thread_rng();
while board_index < BOARD_SIZE {
/*
Generate random cards and skip them if they're used already.
The assumption is that few cards will be used compared to the
possible 52, so it should skip rarely and be efficient.
*/
let card = rng.gen::<Card>();
let card_index = card.into();
if used_indexes.contains(&card_index) {
continue;
}
used_indexes.push(card_index);
board[board_index] = card;
board_index += 1;
}
board
}
fn get_num_sims_for_thread(total_num_sims: i32, total_num_threads: i32, thread_index: i32) -> i32 {
assert!(total_num_threads > thread_index);
let base_num_sims = total_num_sims / total_num_threads;
let threads_with_extra = total_num_sims % total_num_threads;
let this_threads_extra =
if thread_index < threads_with_extra {
1
} else {
0
};
base_num_sims + this_threads_extra
}
struct HandStats {
events: [i32; NUM_HANDS], // Number of times each hand happened
}
impl HandStats {
fn create() -> HandStats {
HandStats{events: [0; NUM_HANDS]}
}
fn add_event(&mut self, hand: &Hand) {
let event_index: u8 = (*hand).into();
self.events[event_index as usize] += 1;
}
fn total_events(&self) -> i32 {
self.events.iter().fold(0, |aggregate, event| aggregate + event)
}
}
fn name_outcome(outcome: &Vec<i32>, all_hole_cards: &[[Card; 2]]) -> String {
if outcome.len() == 1 {
let hand_index = outcome[0];
return format!("Hand {} {:?} wins", outcome[0], all_hole_cards[hand_index as usize]);
}
if outcome.len() > 0 {
return format!("Chop between hands {}", hands_to_string(all_hole_cards, &outcome));
}
panic!("Empty outcome")
}
fn hands_to_string(hands: &[[Card; 2]], indices: &[i32]) -> String {
let mut string = format!("{:?}", hands[indices[0] as usize]);
for index in 1..indices.len() {
string = string + &format!(", {:?}", hands[indices[index as usize] as usize]);
}
string
}
| {
get_numeric_arg(matches, NUM_SIMS_ARG, DEFAULT_NUM_SIMS)
} | identifier_body |
main.rs | extern crate rand;
extern crate getopts;
extern crate num_cpus;
extern crate cards;
extern crate poker_hands;
use std::env;
use std::collections::HashMap;
use std::str::FromStr;
use std::thread;
use std::sync::*;
use getopts::{Options, Matches, HasArg, Occur};
use rand::{thread_rng, Rng};
use cards::{Card, Rank, Suit, card};
use cards::Rank::*;
use cards::Suit::*;
use poker_hands::{Hand, NUM_HANDS};
fn main() {
let args: Vec<String> = env::args().collect();
let opts = create_opts();
let arg_matches = match opts.parse(&args[1..]) {
Ok(matches) => matches,
Err(error) => panic!("Could not parse {:?}; error: {:?}", args, error)
};
let initial_board = get_initial_board(&arg_matches);
let total_num_sims =
if initial_board.len() == BOARD_SIZE {
println!("The given board is full, so there's no uncertainty.");
1
} else {
get_num_sims(&arg_matches)
};
let all_hole_cards = get_hole_cards(&arg_matches);
let num_threads = get_num_threads(&arg_matches);
println!("Simulating {} hands", total_num_sims);
if initial_board.len() > 0 {
println!("For board {:?}", initial_board);
}
println!("Using {} threads", num_threads);
let board_ref = Arc::new(initial_board);
let hole_cards_ref = Arc::new(all_hole_cards);
let outcomes = Arc::new(Mutex::new(HashMap::new()));
let mut children = Vec::with_capacity(num_threads as usize);
for thread_index in 0..num_threads {
let this_num_sims = get_num_sims_for_thread(total_num_sims, num_threads as i32, thread_index as i32);
let this_board_ref = board_ref.clone();
let this_hole_cards_ref = hole_cards_ref.clone();
let this_outcomes = outcomes.clone();
let child_thread = thread::spawn(move || {
simulate_hands(this_num_sims, &this_board_ref, &this_hole_cards_ref, &this_outcomes)
});
children.push(child_thread);
}
for child_thread in children {
match child_thread.join() {
Ok(_) => continue,
Err(e) => panic!("Worker thread died! {:?}", e)
}
}
let final_outcomes = outcomes.lock().unwrap();
let sorted_outcomes = sort_descending(
final_outcomes.iter().map(|(outcome, stats)| (outcome.clone(), stats.total_events())).collect());
for outcome in sorted_outcomes {
let stats = final_outcomes.get(&outcome).unwrap();
let total_events = stats.total_events();
let outcome_percent = (total_events as f64 / total_num_sims as f64) * 100f64;
let outcome_name = name_outcome(&outcome, &hole_cards_ref);
println!("{} ({} times, {}%)", outcome_name, total_events, outcome_percent);
let sorted_hand_indices = sort_descending(
(0..NUM_HANDS).map(|index| (index, stats.events[index])).collect());
for hand_index in sorted_hand_indices {
let hand_events = stats.events[hand_index];
if hand_events == 0 {
continue;
}
let hand_percent = (hand_events as f64 / total_events as f64) * 100f64;
println!("\t{}: {} times, {}%", Hand::name_hand_index(hand_index), hand_events, hand_percent);
}
}
}
fn simulate_hands(num_sims: i32, initial_board: &[Card], all_hole_cards: &[[Card; 2]], outcomes: &Mutex<HashMap<Vec<i32>, HandStats>>) {
for _ in 0..num_sims {
let board = pick_random_board(initial_board, all_hole_cards);
assert!(board.len() == BOARD_SIZE);
let mut hands = Vec::with_capacity(all_hole_cards.len());
for hole_cards in all_hole_cards {
let mut cards: Vec<Card> = Vec::with_capacity(hole_cards.len() + board.len());
cards.extend(board.iter().cloned());
cards.extend(hole_cards.iter().cloned());
// Sort descending - best_hand_of() requires this.
cards.sort_by(|first, second| second.cmp(first));
let hand = Hand::best_hand_of(&cards);
hands.push(hand);
}
assert!(hands.len() == all_hole_cards.len());
let mut winners = Vec::new();
winners.push(0);
let mut best_hand = hands[0];
for index in 1..hands.len() {
let hand = hands[index];
if hand == best_hand {
winners.push(index as i32);
} else if hand > best_hand {
winners.clear();
winners.push(index as i32);
best_hand = hand;
}
}
insert_outcome(&mut outcomes.lock().unwrap(), &winners, &best_hand);
}
}
fn sort_descending<T: Clone>(mut items: Vec<(T, i32)>) -> Vec<T> {
// Switch the order to get greatest-first.
items.sort_by(|&(_, first), &(_, second)| second.cmp(&first));
items.iter().map(|&(ref item, _)| item.clone()).collect()
}
const HOLE_CARDS_ARG: &'static str = "h";
const NUM_SIMS_ARG: &'static str = "n";
const NUM_THREADS_ARG: &'static str = "t";
const BOARD_ARG: &'static str = "b";
fn | () -> Options {
// Unfortunately, there doesn't seem to be a way to require that an option appears at least once.
let mut opts = Options::new();
opts.opt(HOLE_CARDS_ARG, "hole cards", "A single player's hole cards", "XxYy", HasArg::Yes, Occur::Multi);
opts.opt(NUM_SIMS_ARG, "number of simulations", "The number of hands to simulate in order to approximate the true distribution.", "n", HasArg::Yes, Occur::Optional);
opts.opt(NUM_THREADS_ARG, "number of threads to use", "The number of threads to use simultaneously to run the simulations.", "t", HasArg::Yes, Occur::Optional);
opts.opt(BOARD_ARG, "board cards", "The cards already on the board.", "XxYyZz", HasArg::Yes, Occur::Optional);
opts
}
fn get_initial_board(matches: &Matches) -> Vec<Card> {
if!matches.opt_present(BOARD_ARG) {
return Vec::new();
}
let board_string = matches.opt_str(&BOARD_ARG).unwrap();
let initial_board = parse_cards_string(&board_string);
assert!(initial_board.len() <= BOARD_SIZE, "Initial board has more than {} cards! {}", BOARD_SIZE, board_string);
initial_board
}
fn get_hole_cards(matches: &Matches) -> Vec<[Card; 2]> {
assert!(matches.opt_count(HOLE_CARDS_ARG) >= 1, "No hole cards specified");
let hole_strings = matches.opt_strs(HOLE_CARDS_ARG);
let mut all_hole_cards = Vec::with_capacity(hole_strings.len());
for hole_string in &hole_strings {
let hole_cards = parse_cards_string(hole_string);
assert!(hole_cards.len() == 2, "{} specifies {} cards, not 2", hole_string, hole_cards.len());
all_hole_cards.push([hole_cards[0], hole_cards[1]]);
}
all_hole_cards
}
const DEFAULT_NUM_SIMS: i32 = 10 * 1000;
fn get_num_sims(matches: &Matches) -> i32 {
get_numeric_arg(matches, NUM_SIMS_ARG, DEFAULT_NUM_SIMS)
}
fn get_num_threads(matches: &Matches) -> i32 {
get_numeric_arg(matches, NUM_THREADS_ARG, num_cpus::get() as i32)
}
fn get_numeric_arg(matches: &Matches, arg: &str, default: i32) -> i32 {
if!matches.opt_present(arg) {
return default;
}
let num_str = matches.opt_str(arg).unwrap();
let num_maybe: Result<i32, _> = FromStr::from_str(&num_str);
match num_maybe {
Ok(num) => num,
Err(_) => {
println!("Could not parse {} arg as a number: {}; ignoring it.", arg, num_str);
default
}
}
}
fn parse_cards_string(cards_string: &str) -> Vec<Card> {
let chars: Vec<char> = cards_string.chars().collect();
assert!(chars.len() % 2 == 0, "Odd numbers of characters, cannot be cards: {}", cards_string);
let num_cards = chars.len() / 2;
let mut cards = Vec::with_capacity(num_cards);
for card_index in 0..num_cards {
let rank_index = card_index * 2;
let suit_index = rank_index + 1;
let rank_char = chars[rank_index];
let suit_char = chars[suit_index];
let rank = parse_rank(rank_char).expect(
&format!("Couldn't parse {} (position {} in {}) as a rank",
rank_char, rank_index, cards_string));
let suit = parse_suit(suit_char).expect(
&format!("Couldn't parse {} (position {} in {}) as a suit",
suit_char, suit_index, cards_string));
cards.push(card(rank, suit));
}
cards
}
fn parse_rank(rank_char: char) -> Option<Rank> {
let rank = match rank_char {
'A' | 'a' => Ace,
'K' | 'k' => King,
'Q' | 'q' => Queen,
'J' | 'j' => Jack,
'T' | 't' => Ten,
'9' => Nine,
'8' => Eight,
'7' => Seven,
'6' => Six,
'5' => Five,
'4' => Four,
'3' => Three,
'2' => Two,
_ => return None
};
Some(rank)
}
fn parse_suit(suit_char: char) -> Option<Suit> {
let suit = match suit_char {
'S' |'s' => Spades,
'H' | 'h' => Hearts,
'C' | 'c' => Clubs,
'D' | 'd' => Diamonds,
_ => return None
};
Some(suit)
}
fn insert_outcome(outcomes: &mut HashMap<Vec<i32>, HandStats>, winners: &Vec<i32>, hand: &Hand) {
// Set up default stats if there are none yet.
if let None = outcomes.get(winners) {
outcomes.insert(winners.clone(), HandStats::create());
}
outcomes.get_mut(winners).unwrap().add_event(hand);
}
const BOARD_SIZE: usize = 5;
fn pick_random_board(initial_board: &[Card], all_hole_cards: &[[Card; 2]]) -> [Card; BOARD_SIZE] {
let mut board = [card(Ace, Spades); BOARD_SIZE]; // Dummies
for index in 0..initial_board.len() {
board[index] = initial_board[index];
}
let mut used_indexes: Vec<u8> = Vec::with_capacity(all_hole_cards.len() + BOARD_SIZE);
let card_to_index = |card: &Card| (*card).into();
used_indexes.extend(
initial_board.iter().map(&card_to_index));
used_indexes.extend(
all_hole_cards.iter().
flat_map(|cards| cards). // Flatten all hands into one iterator
map(&card_to_index));
let mut board_index = initial_board.len();
let mut rng = rand::thread_rng();
while board_index < BOARD_SIZE {
/*
Generate random cards and skip them if they're used already.
The assumption is that few cards will be used compared to the
possible 52, so it should skip rarely and be efficient.
*/
let card = rng.gen::<Card>();
let card_index = card.into();
if used_indexes.contains(&card_index) {
continue;
}
used_indexes.push(card_index);
board[board_index] = card;
board_index += 1;
}
board
}
fn get_num_sims_for_thread(total_num_sims: i32, total_num_threads: i32, thread_index: i32) -> i32 {
assert!(total_num_threads > thread_index);
let base_num_sims = total_num_sims / total_num_threads;
let threads_with_extra = total_num_sims % total_num_threads;
let this_threads_extra =
if thread_index < threads_with_extra {
1
} else {
0
};
base_num_sims + this_threads_extra
}
struct HandStats {
events: [i32; NUM_HANDS], // Number of times each hand happened
}
impl HandStats {
fn create() -> HandStats {
HandStats{events: [0; NUM_HANDS]}
}
fn add_event(&mut self, hand: &Hand) {
let event_index: u8 = (*hand).into();
self.events[event_index as usize] += 1;
}
fn total_events(&self) -> i32 {
self.events.iter().fold(0, |aggregate, event| aggregate + event)
}
}
fn name_outcome(outcome: &Vec<i32>, all_hole_cards: &[[Card; 2]]) -> String {
if outcome.len() == 1 {
let hand_index = outcome[0];
return format!("Hand {} {:?} wins", outcome[0], all_hole_cards[hand_index as usize]);
}
if outcome.len() > 0 {
return format!("Chop between hands {}", hands_to_string(all_hole_cards, &outcome));
}
panic!("Empty outcome")
}
fn hands_to_string(hands: &[[Card; 2]], indices: &[i32]) -> String {
let mut string = format!("{:?}", hands[indices[0] as usize]);
for index in 1..indices.len() {
string = string + &format!(", {:?}", hands[indices[index as usize] as usize]);
}
string
}
| create_opts | identifier_name |
main.rs | extern crate rand;
extern crate getopts;
extern crate num_cpus;
extern crate cards;
extern crate poker_hands;
use std::env;
use std::collections::HashMap;
use std::str::FromStr;
use std::thread;
use std::sync::*;
use getopts::{Options, Matches, HasArg, Occur};
use rand::{thread_rng, Rng};
use cards::{Card, Rank, Suit, card};
use cards::Rank::*;
use cards::Suit::*;
use poker_hands::{Hand, NUM_HANDS};
fn main() {
let args: Vec<String> = env::args().collect();
let opts = create_opts();
let arg_matches = match opts.parse(&args[1..]) {
Ok(matches) => matches,
Err(error) => panic!("Could not parse {:?}; error: {:?}", args, error)
};
let initial_board = get_initial_board(&arg_matches);
let total_num_sims =
if initial_board.len() == BOARD_SIZE | else {
get_num_sims(&arg_matches)
};
let all_hole_cards = get_hole_cards(&arg_matches);
let num_threads = get_num_threads(&arg_matches);
println!("Simulating {} hands", total_num_sims);
if initial_board.len() > 0 {
println!("For board {:?}", initial_board);
}
println!("Using {} threads", num_threads);
let board_ref = Arc::new(initial_board);
let hole_cards_ref = Arc::new(all_hole_cards);
let outcomes = Arc::new(Mutex::new(HashMap::new()));
let mut children = Vec::with_capacity(num_threads as usize);
for thread_index in 0..num_threads {
let this_num_sims = get_num_sims_for_thread(total_num_sims, num_threads as i32, thread_index as i32);
let this_board_ref = board_ref.clone();
let this_hole_cards_ref = hole_cards_ref.clone();
let this_outcomes = outcomes.clone();
let child_thread = thread::spawn(move || {
simulate_hands(this_num_sims, &this_board_ref, &this_hole_cards_ref, &this_outcomes)
});
children.push(child_thread);
}
for child_thread in children {
match child_thread.join() {
Ok(_) => continue,
Err(e) => panic!("Worker thread died! {:?}", e)
}
}
let final_outcomes = outcomes.lock().unwrap();
let sorted_outcomes = sort_descending(
final_outcomes.iter().map(|(outcome, stats)| (outcome.clone(), stats.total_events())).collect());
for outcome in sorted_outcomes {
let stats = final_outcomes.get(&outcome).unwrap();
let total_events = stats.total_events();
let outcome_percent = (total_events as f64 / total_num_sims as f64) * 100f64;
let outcome_name = name_outcome(&outcome, &hole_cards_ref);
println!("{} ({} times, {}%)", outcome_name, total_events, outcome_percent);
let sorted_hand_indices = sort_descending(
(0..NUM_HANDS).map(|index| (index, stats.events[index])).collect());
for hand_index in sorted_hand_indices {
let hand_events = stats.events[hand_index];
if hand_events == 0 {
continue;
}
let hand_percent = (hand_events as f64 / total_events as f64) * 100f64;
println!("\t{}: {} times, {}%", Hand::name_hand_index(hand_index), hand_events, hand_percent);
}
}
}
fn simulate_hands(num_sims: i32, initial_board: &[Card], all_hole_cards: &[[Card; 2]], outcomes: &Mutex<HashMap<Vec<i32>, HandStats>>) {
for _ in 0..num_sims {
let board = pick_random_board(initial_board, all_hole_cards);
assert!(board.len() == BOARD_SIZE);
let mut hands = Vec::with_capacity(all_hole_cards.len());
for hole_cards in all_hole_cards {
let mut cards: Vec<Card> = Vec::with_capacity(hole_cards.len() + board.len());
cards.extend(board.iter().cloned());
cards.extend(hole_cards.iter().cloned());
// Sort descending - best_hand_of() requires this.
cards.sort_by(|first, second| second.cmp(first));
let hand = Hand::best_hand_of(&cards);
hands.push(hand);
}
assert!(hands.len() == all_hole_cards.len());
let mut winners = Vec::new();
winners.push(0);
let mut best_hand = hands[0];
for index in 1..hands.len() {
let hand = hands[index];
if hand == best_hand {
winners.push(index as i32);
} else if hand > best_hand {
winners.clear();
winners.push(index as i32);
best_hand = hand;
}
}
insert_outcome(&mut outcomes.lock().unwrap(), &winners, &best_hand);
}
}
fn sort_descending<T: Clone>(mut items: Vec<(T, i32)>) -> Vec<T> {
// Switch the order to get greatest-first.
items.sort_by(|&(_, first), &(_, second)| second.cmp(&first));
items.iter().map(|&(ref item, _)| item.clone()).collect()
}
const HOLE_CARDS_ARG: &'static str = "h";
const NUM_SIMS_ARG: &'static str = "n";
const NUM_THREADS_ARG: &'static str = "t";
const BOARD_ARG: &'static str = "b";
fn create_opts() -> Options {
// Unfortunately, there doesn't seem to be a way to require that an option appears at least once.
let mut opts = Options::new();
opts.opt(HOLE_CARDS_ARG, "hole cards", "A single player's hole cards", "XxYy", HasArg::Yes, Occur::Multi);
opts.opt(NUM_SIMS_ARG, "number of simulations", "The number of hands to simulate in order to approximate the true distribution.", "n", HasArg::Yes, Occur::Optional);
opts.opt(NUM_THREADS_ARG, "number of threads to use", "The number of threads to use simultaneously to run the simulations.", "t", HasArg::Yes, Occur::Optional);
opts.opt(BOARD_ARG, "board cards", "The cards already on the board.", "XxYyZz", HasArg::Yes, Occur::Optional);
opts
}
fn get_initial_board(matches: &Matches) -> Vec<Card> {
if!matches.opt_present(BOARD_ARG) {
return Vec::new();
}
let board_string = matches.opt_str(&BOARD_ARG).unwrap();
let initial_board = parse_cards_string(&board_string);
assert!(initial_board.len() <= BOARD_SIZE, "Initial board has more than {} cards! {}", BOARD_SIZE, board_string);
initial_board
}
fn get_hole_cards(matches: &Matches) -> Vec<[Card; 2]> {
assert!(matches.opt_count(HOLE_CARDS_ARG) >= 1, "No hole cards specified");
let hole_strings = matches.opt_strs(HOLE_CARDS_ARG);
let mut all_hole_cards = Vec::with_capacity(hole_strings.len());
for hole_string in &hole_strings {
let hole_cards = parse_cards_string(hole_string);
assert!(hole_cards.len() == 2, "{} specifies {} cards, not 2", hole_string, hole_cards.len());
all_hole_cards.push([hole_cards[0], hole_cards[1]]);
}
all_hole_cards
}
const DEFAULT_NUM_SIMS: i32 = 10 * 1000;
fn get_num_sims(matches: &Matches) -> i32 {
get_numeric_arg(matches, NUM_SIMS_ARG, DEFAULT_NUM_SIMS)
}
fn get_num_threads(matches: &Matches) -> i32 {
get_numeric_arg(matches, NUM_THREADS_ARG, num_cpus::get() as i32)
}
fn get_numeric_arg(matches: &Matches, arg: &str, default: i32) -> i32 {
if!matches.opt_present(arg) {
return default;
}
let num_str = matches.opt_str(arg).unwrap();
let num_maybe: Result<i32, _> = FromStr::from_str(&num_str);
match num_maybe {
Ok(num) => num,
Err(_) => {
println!("Could not parse {} arg as a number: {}; ignoring it.", arg, num_str);
default
}
}
}
fn parse_cards_string(cards_string: &str) -> Vec<Card> {
let chars: Vec<char> = cards_string.chars().collect();
assert!(chars.len() % 2 == 0, "Odd numbers of characters, cannot be cards: {}", cards_string);
let num_cards = chars.len() / 2;
let mut cards = Vec::with_capacity(num_cards);
for card_index in 0..num_cards {
let rank_index = card_index * 2;
let suit_index = rank_index + 1;
let rank_char = chars[rank_index];
let suit_char = chars[suit_index];
let rank = parse_rank(rank_char).expect(
&format!("Couldn't parse {} (position {} in {}) as a rank",
rank_char, rank_index, cards_string));
let suit = parse_suit(suit_char).expect(
&format!("Couldn't parse {} (position {} in {}) as a suit",
suit_char, suit_index, cards_string));
cards.push(card(rank, suit));
}
cards
}
fn parse_rank(rank_char: char) -> Option<Rank> {
let rank = match rank_char {
'A' | 'a' => Ace,
'K' | 'k' => King,
'Q' | 'q' => Queen,
'J' | 'j' => Jack,
'T' | 't' => Ten,
'9' => Nine,
'8' => Eight,
'7' => Seven,
'6' => Six,
'5' => Five,
'4' => Four,
'3' => Three,
'2' => Two,
_ => return None
};
Some(rank)
}
fn parse_suit(suit_char: char) -> Option<Suit> {
let suit = match suit_char {
'S' |'s' => Spades,
'H' | 'h' => Hearts,
'C' | 'c' => Clubs,
'D' | 'd' => Diamonds,
_ => return None
};
Some(suit)
}
fn insert_outcome(outcomes: &mut HashMap<Vec<i32>, HandStats>, winners: &Vec<i32>, hand: &Hand) {
// Set up default stats if there are none yet.
if let None = outcomes.get(winners) {
outcomes.insert(winners.clone(), HandStats::create());
}
outcomes.get_mut(winners).unwrap().add_event(hand);
}
const BOARD_SIZE: usize = 5;
fn pick_random_board(initial_board: &[Card], all_hole_cards: &[[Card; 2]]) -> [Card; BOARD_SIZE] {
let mut board = [card(Ace, Spades); BOARD_SIZE]; // Dummies
for index in 0..initial_board.len() {
board[index] = initial_board[index];
}
let mut used_indexes: Vec<u8> = Vec::with_capacity(all_hole_cards.len() + BOARD_SIZE);
let card_to_index = |card: &Card| (*card).into();
used_indexes.extend(
initial_board.iter().map(&card_to_index));
used_indexes.extend(
all_hole_cards.iter().
flat_map(|cards| cards). // Flatten all hands into one iterator
map(&card_to_index));
let mut board_index = initial_board.len();
let mut rng = rand::thread_rng();
while board_index < BOARD_SIZE {
/*
Generate random cards and skip them if they're used already.
The assumption is that few cards will be used compared to the
possible 52, so it should skip rarely and be efficient.
*/
let card = rng.gen::<Card>();
let card_index = card.into();
if used_indexes.contains(&card_index) {
continue;
}
used_indexes.push(card_index);
board[board_index] = card;
board_index += 1;
}
board
}
fn get_num_sims_for_thread(total_num_sims: i32, total_num_threads: i32, thread_index: i32) -> i32 {
assert!(total_num_threads > thread_index);
let base_num_sims = total_num_sims / total_num_threads;
let threads_with_extra = total_num_sims % total_num_threads;
let this_threads_extra =
if thread_index < threads_with_extra {
1
} else {
0
};
base_num_sims + this_threads_extra
}
struct HandStats {
events: [i32; NUM_HANDS], // Number of times each hand happened
}
impl HandStats {
fn create() -> HandStats {
HandStats{events: [0; NUM_HANDS]}
}
fn add_event(&mut self, hand: &Hand) {
let event_index: u8 = (*hand).into();
self.events[event_index as usize] += 1;
}
fn total_events(&self) -> i32 {
self.events.iter().fold(0, |aggregate, event| aggregate + event)
}
}
fn name_outcome(outcome: &Vec<i32>, all_hole_cards: &[[Card; 2]]) -> String {
if outcome.len() == 1 {
let hand_index = outcome[0];
return format!("Hand {} {:?} wins", outcome[0], all_hole_cards[hand_index as usize]);
}
if outcome.len() > 0 {
return format!("Chop between hands {}", hands_to_string(all_hole_cards, &outcome));
}
panic!("Empty outcome")
}
fn hands_to_string(hands: &[[Card; 2]], indices: &[i32]) -> String {
let mut string = format!("{:?}", hands[indices[0] as usize]);
for index in 1..indices.len() {
string = string + &format!(", {:?}", hands[indices[index as usize] as usize]);
}
string
}
| {
println!("The given board is full, so there's no uncertainty.");
1
} | conditional_block |
day18.rs | use crate::day::{DayResult, PartResult};
use chumsky::prelude::*;
use itertools::Itertools;
use std::{collections::LinkedList, ops::Add, str::FromStr};
pub fn run() -> Result<DayResult, Box<dyn std::error::Error>> {
let part1 = part1(include_str!("inputs/day18.txt"))?;
let part2 = part2(include_str!("inputs/day18.txt"))?;
Ok(DayResult::new(
PartResult::Success(format!("The answer is {}", part1)),
PartResult::Success(format!(
"The largest magnitude from two numbers is {}",
part2
)),
))
}
#[derive(Debug, Eq, PartialEq, Clone)]
struct SnailfishNumber {
values: LinkedList<SnailfishValue>,
}
#[derive(Debug, Eq, PartialEq, Clone)]
struct SnailfishValue {
value: u32,
depth: usize,
}
impl SnailfishValue {
fn deeper(&self) -> Self {
Self {
value: self.value,
depth: self.depth + 1,
}
}
}
impl SnailfishNumber {
fn split(&mut self) -> bool {
if let Some(index_to_split) = self
.values
.iter()
.enumerate()
.filter(|(_, v)| v.value >= 10)
.map(|(i, _)| i)
.next()
{
let mut back_side = self.values.split_off(index_to_split);
let split_num = back_side.pop_front().unwrap();
let new_depth = split_num.depth + 1;
self.values.push_back(SnailfishValue {
value: split_num.value / 2,
depth: new_depth,
});
self.values.push_back(SnailfishValue {
value: (split_num.value / 2) + (split_num.value % 2),
depth: new_depth,
});
self.values.append(&mut back_side);
true
} else {
false
}
}
fn explode(&mut self) -> bool {
let explode_pair: Vec<(usize, SnailfishValue)> = self
.values
.iter()
.cloned()
.enumerate()
.filter(|(_, v)| v.depth >= 4)
.take(2)
.collect();
if explode_pair.len() < 2 {
// no explosions necessary
return false;
}
let left_index = explode_pair[0].0;
let left_value = &explode_pair[0].1;
let right_index = explode_pair[1].0;
let right_value = &explode_pair[1].1;
if left_index + 1!= right_index {
panic!("Exploding pair don't have neighbouring indicies, the list is corrupted. List: {:?}", self.values);
}
let mut back_side = self.values.split_off(left_index);
// drop the pair we're exploding
back_side.pop_front();
back_side.pop_front();
if let Some(to_left) = self.values.back_mut() {
// need to modify the number to the left
to_left.value += left_value.value;
}
// construct new 0 element
let new_zero = SnailfishValue {
value: 0,
depth: left_value.depth - 1,
};
self.values.push_back(new_zero);
if let Some(to_right) = back_side.front_mut() {
// need to modify the number to the right
to_right.value += right_value.value;
}
self.values.append(&mut back_side);
true
}
fn reduce(&mut self) {
if self.explode() {
self.reduce();
} else {
if self.split() {
self.reduce();
}
}
}
fn magnitude(&self) -> u32 {
let mut flattened = self.values.clone();
while flattened.len() > 1 {
// the first two deepest elements must be a pair
let deepest = flattened.iter().map(|v| v.depth).max().unwrap();
let deepest_two = flattened
.iter()
.cloned()
.enumerate()
.filter(|(_, v)| v.depth == deepest)
.take(2)
.collect::<Vec<_>>();
let new_value = SnailfishValue {
value: deepest_two[0].1.value * 3 + deepest_two[1].1.value * 2,
depth: if deepest == 0 { 0 } else { deepest - 1 },
};
let mut back = flattened.split_off(deepest_two[0].0);
back.pop_front();
back.pop_front();
flattened.push_back(new_value);
flattened.append(&mut back);
}
flattened.pop_front().unwrap().value
}
}
impl Add for SnailfishNumber {
type Output = SnailfishNumber;
fn add(self, rhs: Self) -> Self::Output {
let mut values = LinkedList::new();
for v in self
.values
.iter()
.chain(rhs.values.iter())
.map(|v| v.deeper())
{
values.push_back(v)
}
let mut sfn = SnailfishNumber { values };
sfn.reduce();
sfn
}
}
impl FromStr for SnailfishNumber {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let values = parser()
.parse(s)
.map_err(|s| s.into_iter().map(|e| e.to_string()).collect::<String>())?;
Ok(SnailfishNumber { values })
}
}
enum ValueOrList {
Value(SnailfishValue),
List(LinkedList<SnailfishValue>),
}
fn parser() -> impl Parser<char, LinkedList<SnailfishValue>, Error = Simple<char>> {
recursive(|pair| {
let int = text::int(10)
.map(|s: String| s.parse::<u32>().unwrap())
.map(|i| ValueOrList::Value(SnailfishValue { value: i, depth: 0 }));
let int_or_nested_pair = int.or(pair.map(|p| ValueOrList::List(p)));
let inner_pair = int_or_nested_pair
.clone()
.then_ignore(just(','))
.then(int_or_nested_pair)
.map(|(l, r)| {
let mut list = LinkedList::new();
match l {
ValueOrList::Value(sv) => list.push_back(sv),
ValueOrList::List(l) => {
for mut sv in l {
sv.depth += 1;
list.push_back(sv);
}
}
}
match r {
ValueOrList::Value(sv) => list.push_back(sv),
ValueOrList::List(l) => {
for mut sv in l {
sv.depth += 1;
list.push_back(sv);
}
}
}
list
});
inner_pair.delimited_by('[', ']')
})
}
fn part1(input: &str) -> Result<u32, Box<dyn std::error::Error>> {
let mut sfns = input
.lines()
.map(|l| SnailfishNumber::from_str(l))
.collect::<Result<LinkedList<_>, _>>()?;
let first = sfns.pop_front().ok_or(format!("No numbers were parsed"))?;
let result = sfns.into_iter().fold(first, |a, x| a + x);
Ok(result.magnitude())
}
fn part2(input: &str) -> Result<u32, Box<dyn std::error::Error>> {
let sfns = input
.lines()
.map(|l| SnailfishNumber::from_str(l))
.collect::<Result<Vec<_>, _>>()?;
let pairs = sfns.into_iter().permutations(2);
let magnitudes = pairs.map(|p| (p[0].clone() + p[1].clone()).magnitude());
let answer = magnitudes.max().ok_or(format!("No numbers were parsed"))?;
Ok(answer)
}
#[test]
fn test_parser() {
let pair = SnailfishNumber::from_str("[1,2]").unwrap();
assert_eq!(
pair.values.into_iter().collect::<Vec<_>>(),
vec![
SnailfishValue { value: 1, depth: 0 },
SnailfishValue { value: 2, depth: 0 }
]
);
}
#[test]
fn test_split() {
let mut must_split = SnailfishNumber::from_str("[[[[0,7],4],[15,[0,13]]],[1,1]]").unwrap();
must_split.split();
assert_eq!(
must_split,
SnailfishNumber::from_str("[[[[0,7],4],[[7,8],[0,13]]],[1,1]]").unwrap()
);
must_split.split();
assert_eq!(
must_split,
SnailfishNumber::from_str("[[[[0,7],4],[[7,8],[0,[6,7]]]],[1,1]]").unwrap()
)
}
#[test]
fn test_explode() {
let mut must_explode = SnailfishNumber::from_str("[[[[[9,8],1],2],3],4]").unwrap();
must_explode.explode();
assert_eq!(
must_explode,
SnailfishNumber::from_str("[[[[0,9],2],3],4]").unwrap()
);
let mut must_explode = SnailfishNumber::from_str("[7,[6,[5,[4,[3,2]]]]]").unwrap();
must_explode.explode();
assert_eq!(
must_explode,
SnailfishNumber::from_str("[7,[6,[5,[7,0]]]]").unwrap()
);
let mut must_explode = SnailfishNumber::from_str("[[6,[5,[4,[3,2]]]],1]").unwrap();
must_explode.explode();
assert_eq!(
must_explode,
SnailfishNumber::from_str("[[6,[5,[7,0]]],3]").unwrap()
);
}
#[test]
fn test_part1_samples() {
fn test_sample(left: &str, right: &str, result: &str) {
let left = SnailfishNumber::from_str(left).unwrap();
let right = SnailfishNumber::from_str(right).unwrap();
let result = SnailfishNumber::from_str(result).unwrap();
assert_eq!(left + right, result);
}
test_sample(
"[[[[4,3],4],4],[7,[[8,4],9]]]",
"[1,1]",
"[[[[0,7],4],[[7,8],[6,0]]],[8,1]]",
);
}
#[test]
fn test_magnitude() {
assert_eq!(
SnailfishNumber::from_str("[[[[8,7],[7,7]],[[8,6],[7,7]]],[[[0,7],[6,6]],[8,7]]]")
.unwrap()
.magnitude(),
3488
);
}
#[test]
fn test_part1_sample() {
let result = part1(
"[[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]]
[[[5,[2,8]],4],[5,[[9,9],0]]]
[6,[[[6,2],[5,6]],[[7,6],[4,7]]]]
[[[6,[0,7]],[0,9]],[4,[9,[9,0]]]]
[[[7,[6,4]],[3,[1,3]]],[[[5,5],1],9]]
[[6,[[7,3],[3,2]]],[[[3,8],[5,7]],4]] | [[[[5,4],[7,7]],8],[[8,3],8]]
[[9,3],[[9,9],[6,[4,9]]]]
[[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]]
[[[[5,2],5],[8,[3,7]]],[[5,[7,5]],[4,4]]]",
)
.unwrap();
assert_eq!(result, 4140);
}
#[test]
fn test_part2_sample() {
let result = part2(
"[[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]]
[[[5,[2,8]],4],[5,[[9,9],0]]]
[6,[[[6,2],[5,6]],[[7,6],[4,7]]]]
[[[6,[0,7]],[0,9]],[4,[9,[9,0]]]]
[[[7,[6,4]],[3,[1,3]]],[[[5,5],1],9]]
[[6,[[7,3],[3,2]]],[[[3,8],[5,7]],4]]
[[[[5,4],[7,7]],8],[[8,3],8]]
[[9,3],[[9,9],[6,[4,9]]]]
[[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]]
[[[[5,2],5],[8,[3,7]]],[[5,[7,5]],[4,4]]]",
)
.unwrap();
assert_eq!(result, 3993);
} | random_line_split |
|
day18.rs | use crate::day::{DayResult, PartResult};
use chumsky::prelude::*;
use itertools::Itertools;
use std::{collections::LinkedList, ops::Add, str::FromStr};
pub fn run() -> Result<DayResult, Box<dyn std::error::Error>> {
let part1 = part1(include_str!("inputs/day18.txt"))?;
let part2 = part2(include_str!("inputs/day18.txt"))?;
Ok(DayResult::new(
PartResult::Success(format!("The answer is {}", part1)),
PartResult::Success(format!(
"The largest magnitude from two numbers is {}",
part2
)),
))
}
#[derive(Debug, Eq, PartialEq, Clone)]
struct SnailfishNumber {
values: LinkedList<SnailfishValue>,
}
#[derive(Debug, Eq, PartialEq, Clone)]
struct SnailfishValue {
value: u32,
depth: usize,
}
impl SnailfishValue {
fn | (&self) -> Self {
Self {
value: self.value,
depth: self.depth + 1,
}
}
}
impl SnailfishNumber {
fn split(&mut self) -> bool {
if let Some(index_to_split) = self
.values
.iter()
.enumerate()
.filter(|(_, v)| v.value >= 10)
.map(|(i, _)| i)
.next()
{
let mut back_side = self.values.split_off(index_to_split);
let split_num = back_side.pop_front().unwrap();
let new_depth = split_num.depth + 1;
self.values.push_back(SnailfishValue {
value: split_num.value / 2,
depth: new_depth,
});
self.values.push_back(SnailfishValue {
value: (split_num.value / 2) + (split_num.value % 2),
depth: new_depth,
});
self.values.append(&mut back_side);
true
} else {
false
}
}
fn explode(&mut self) -> bool {
let explode_pair: Vec<(usize, SnailfishValue)> = self
.values
.iter()
.cloned()
.enumerate()
.filter(|(_, v)| v.depth >= 4)
.take(2)
.collect();
if explode_pair.len() < 2 {
// no explosions necessary
return false;
}
let left_index = explode_pair[0].0;
let left_value = &explode_pair[0].1;
let right_index = explode_pair[1].0;
let right_value = &explode_pair[1].1;
if left_index + 1!= right_index {
panic!("Exploding pair don't have neighbouring indicies, the list is corrupted. List: {:?}", self.values);
}
let mut back_side = self.values.split_off(left_index);
// drop the pair we're exploding
back_side.pop_front();
back_side.pop_front();
if let Some(to_left) = self.values.back_mut() {
// need to modify the number to the left
to_left.value += left_value.value;
}
// construct new 0 element
let new_zero = SnailfishValue {
value: 0,
depth: left_value.depth - 1,
};
self.values.push_back(new_zero);
if let Some(to_right) = back_side.front_mut() {
// need to modify the number to the right
to_right.value += right_value.value;
}
self.values.append(&mut back_side);
true
}
fn reduce(&mut self) {
if self.explode() {
self.reduce();
} else {
if self.split() {
self.reduce();
}
}
}
fn magnitude(&self) -> u32 {
let mut flattened = self.values.clone();
while flattened.len() > 1 {
// the first two deepest elements must be a pair
let deepest = flattened.iter().map(|v| v.depth).max().unwrap();
let deepest_two = flattened
.iter()
.cloned()
.enumerate()
.filter(|(_, v)| v.depth == deepest)
.take(2)
.collect::<Vec<_>>();
let new_value = SnailfishValue {
value: deepest_two[0].1.value * 3 + deepest_two[1].1.value * 2,
depth: if deepest == 0 { 0 } else { deepest - 1 },
};
let mut back = flattened.split_off(deepest_two[0].0);
back.pop_front();
back.pop_front();
flattened.push_back(new_value);
flattened.append(&mut back);
}
flattened.pop_front().unwrap().value
}
}
impl Add for SnailfishNumber {
type Output = SnailfishNumber;
fn add(self, rhs: Self) -> Self::Output {
let mut values = LinkedList::new();
for v in self
.values
.iter()
.chain(rhs.values.iter())
.map(|v| v.deeper())
{
values.push_back(v)
}
let mut sfn = SnailfishNumber { values };
sfn.reduce();
sfn
}
}
impl FromStr for SnailfishNumber {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let values = parser()
.parse(s)
.map_err(|s| s.into_iter().map(|e| e.to_string()).collect::<String>())?;
Ok(SnailfishNumber { values })
}
}
enum ValueOrList {
Value(SnailfishValue),
List(LinkedList<SnailfishValue>),
}
fn parser() -> impl Parser<char, LinkedList<SnailfishValue>, Error = Simple<char>> {
recursive(|pair| {
let int = text::int(10)
.map(|s: String| s.parse::<u32>().unwrap())
.map(|i| ValueOrList::Value(SnailfishValue { value: i, depth: 0 }));
let int_or_nested_pair = int.or(pair.map(|p| ValueOrList::List(p)));
let inner_pair = int_or_nested_pair
.clone()
.then_ignore(just(','))
.then(int_or_nested_pair)
.map(|(l, r)| {
let mut list = LinkedList::new();
match l {
ValueOrList::Value(sv) => list.push_back(sv),
ValueOrList::List(l) => {
for mut sv in l {
sv.depth += 1;
list.push_back(sv);
}
}
}
match r {
ValueOrList::Value(sv) => list.push_back(sv),
ValueOrList::List(l) => {
for mut sv in l {
sv.depth += 1;
list.push_back(sv);
}
}
}
list
});
inner_pair.delimited_by('[', ']')
})
}
fn part1(input: &str) -> Result<u32, Box<dyn std::error::Error>> {
let mut sfns = input
.lines()
.map(|l| SnailfishNumber::from_str(l))
.collect::<Result<LinkedList<_>, _>>()?;
let first = sfns.pop_front().ok_or(format!("No numbers were parsed"))?;
let result = sfns.into_iter().fold(first, |a, x| a + x);
Ok(result.magnitude())
}
fn part2(input: &str) -> Result<u32, Box<dyn std::error::Error>> {
let sfns = input
.lines()
.map(|l| SnailfishNumber::from_str(l))
.collect::<Result<Vec<_>, _>>()?;
let pairs = sfns.into_iter().permutations(2);
let magnitudes = pairs.map(|p| (p[0].clone() + p[1].clone()).magnitude());
let answer = magnitudes.max().ok_or(format!("No numbers were parsed"))?;
Ok(answer)
}
#[test]
fn test_parser() {
let pair = SnailfishNumber::from_str("[1,2]").unwrap();
assert_eq!(
pair.values.into_iter().collect::<Vec<_>>(),
vec![
SnailfishValue { value: 1, depth: 0 },
SnailfishValue { value: 2, depth: 0 }
]
);
}
#[test]
fn test_split() {
let mut must_split = SnailfishNumber::from_str("[[[[0,7],4],[15,[0,13]]],[1,1]]").unwrap();
must_split.split();
assert_eq!(
must_split,
SnailfishNumber::from_str("[[[[0,7],4],[[7,8],[0,13]]],[1,1]]").unwrap()
);
must_split.split();
assert_eq!(
must_split,
SnailfishNumber::from_str("[[[[0,7],4],[[7,8],[0,[6,7]]]],[1,1]]").unwrap()
)
}
#[test]
fn test_explode() {
let mut must_explode = SnailfishNumber::from_str("[[[[[9,8],1],2],3],4]").unwrap();
must_explode.explode();
assert_eq!(
must_explode,
SnailfishNumber::from_str("[[[[0,9],2],3],4]").unwrap()
);
let mut must_explode = SnailfishNumber::from_str("[7,[6,[5,[4,[3,2]]]]]").unwrap();
must_explode.explode();
assert_eq!(
must_explode,
SnailfishNumber::from_str("[7,[6,[5,[7,0]]]]").unwrap()
);
let mut must_explode = SnailfishNumber::from_str("[[6,[5,[4,[3,2]]]],1]").unwrap();
must_explode.explode();
assert_eq!(
must_explode,
SnailfishNumber::from_str("[[6,[5,[7,0]]],3]").unwrap()
);
}
#[test]
fn test_part1_samples() {
fn test_sample(left: &str, right: &str, result: &str) {
let left = SnailfishNumber::from_str(left).unwrap();
let right = SnailfishNumber::from_str(right).unwrap();
let result = SnailfishNumber::from_str(result).unwrap();
assert_eq!(left + right, result);
}
test_sample(
"[[[[4,3],4],4],[7,[[8,4],9]]]",
"[1,1]",
"[[[[0,7],4],[[7,8],[6,0]]],[8,1]]",
);
}
#[test]
fn test_magnitude() {
assert_eq!(
SnailfishNumber::from_str("[[[[8,7],[7,7]],[[8,6],[7,7]]],[[[0,7],[6,6]],[8,7]]]")
.unwrap()
.magnitude(),
3488
);
}
#[test]
fn test_part1_sample() {
let result = part1(
"[[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]]
[[[5,[2,8]],4],[5,[[9,9],0]]]
[6,[[[6,2],[5,6]],[[7,6],[4,7]]]]
[[[6,[0,7]],[0,9]],[4,[9,[9,0]]]]
[[[7,[6,4]],[3,[1,3]]],[[[5,5],1],9]]
[[6,[[7,3],[3,2]]],[[[3,8],[5,7]],4]]
[[[[5,4],[7,7]],8],[[8,3],8]]
[[9,3],[[9,9],[6,[4,9]]]]
[[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]]
[[[[5,2],5],[8,[3,7]]],[[5,[7,5]],[4,4]]]",
)
.unwrap();
assert_eq!(result, 4140);
}
#[test]
fn test_part2_sample() {
let result = part2(
"[[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]]
[[[5,[2,8]],4],[5,[[9,9],0]]]
[6,[[[6,2],[5,6]],[[7,6],[4,7]]]]
[[[6,[0,7]],[0,9]],[4,[9,[9,0]]]]
[[[7,[6,4]],[3,[1,3]]],[[[5,5],1],9]]
[[6,[[7,3],[3,2]]],[[[3,8],[5,7]],4]]
[[[[5,4],[7,7]],8],[[8,3],8]]
[[9,3],[[9,9],[6,[4,9]]]]
[[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]]
[[[[5,2],5],[8,[3,7]]],[[5,[7,5]],[4,4]]]",
)
.unwrap();
assert_eq!(result, 3993);
}
| deeper | identifier_name |
day18.rs | use crate::day::{DayResult, PartResult};
use chumsky::prelude::*;
use itertools::Itertools;
use std::{collections::LinkedList, ops::Add, str::FromStr};
pub fn run() -> Result<DayResult, Box<dyn std::error::Error>> {
let part1 = part1(include_str!("inputs/day18.txt"))?;
let part2 = part2(include_str!("inputs/day18.txt"))?;
Ok(DayResult::new(
PartResult::Success(format!("The answer is {}", part1)),
PartResult::Success(format!(
"The largest magnitude from two numbers is {}",
part2
)),
))
}
#[derive(Debug, Eq, PartialEq, Clone)]
struct SnailfishNumber {
values: LinkedList<SnailfishValue>,
}
#[derive(Debug, Eq, PartialEq, Clone)]
struct SnailfishValue {
value: u32,
depth: usize,
}
impl SnailfishValue {
fn deeper(&self) -> Self {
Self {
value: self.value,
depth: self.depth + 1,
}
}
}
impl SnailfishNumber {
fn split(&mut self) -> bool {
if let Some(index_to_split) = self
.values
.iter()
.enumerate()
.filter(|(_, v)| v.value >= 10)
.map(|(i, _)| i)
.next()
{
let mut back_side = self.values.split_off(index_to_split);
let split_num = back_side.pop_front().unwrap();
let new_depth = split_num.depth + 1;
self.values.push_back(SnailfishValue {
value: split_num.value / 2,
depth: new_depth,
});
self.values.push_back(SnailfishValue {
value: (split_num.value / 2) + (split_num.value % 2),
depth: new_depth,
});
self.values.append(&mut back_side);
true
} else {
false
}
}
fn explode(&mut self) -> bool {
let explode_pair: Vec<(usize, SnailfishValue)> = self
.values
.iter()
.cloned()
.enumerate()
.filter(|(_, v)| v.depth >= 4)
.take(2)
.collect();
if explode_pair.len() < 2 {
// no explosions necessary
return false;
}
let left_index = explode_pair[0].0;
let left_value = &explode_pair[0].1;
let right_index = explode_pair[1].0;
let right_value = &explode_pair[1].1;
if left_index + 1!= right_index {
panic!("Exploding pair don't have neighbouring indicies, the list is corrupted. List: {:?}", self.values);
}
let mut back_side = self.values.split_off(left_index);
// drop the pair we're exploding
back_side.pop_front();
back_side.pop_front();
if let Some(to_left) = self.values.back_mut() {
// need to modify the number to the left
to_left.value += left_value.value;
}
// construct new 0 element
let new_zero = SnailfishValue {
value: 0,
depth: left_value.depth - 1,
};
self.values.push_back(new_zero);
if let Some(to_right) = back_side.front_mut() {
// need to modify the number to the right
to_right.value += right_value.value;
}
self.values.append(&mut back_side);
true
}
fn reduce(&mut self) {
if self.explode() {
self.reduce();
} else {
if self.split() {
self.reduce();
}
}
}
fn magnitude(&self) -> u32 {
let mut flattened = self.values.clone();
while flattened.len() > 1 {
// the first two deepest elements must be a pair
let deepest = flattened.iter().map(|v| v.depth).max().unwrap();
let deepest_two = flattened
.iter()
.cloned()
.enumerate()
.filter(|(_, v)| v.depth == deepest)
.take(2)
.collect::<Vec<_>>();
let new_value = SnailfishValue {
value: deepest_two[0].1.value * 3 + deepest_two[1].1.value * 2,
depth: if deepest == 0 { 0 } else { deepest - 1 },
};
let mut back = flattened.split_off(deepest_two[0].0);
back.pop_front();
back.pop_front();
flattened.push_back(new_value);
flattened.append(&mut back);
}
flattened.pop_front().unwrap().value
}
}
impl Add for SnailfishNumber {
type Output = SnailfishNumber;
fn add(self, rhs: Self) -> Self::Output {
let mut values = LinkedList::new();
for v in self
.values
.iter()
.chain(rhs.values.iter())
.map(|v| v.deeper())
{
values.push_back(v)
}
let mut sfn = SnailfishNumber { values };
sfn.reduce();
sfn
}
}
impl FromStr for SnailfishNumber {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let values = parser()
.parse(s)
.map_err(|s| s.into_iter().map(|e| e.to_string()).collect::<String>())?;
Ok(SnailfishNumber { values })
}
}
enum ValueOrList {
Value(SnailfishValue),
List(LinkedList<SnailfishValue>),
}
fn parser() -> impl Parser<char, LinkedList<SnailfishValue>, Error = Simple<char>> {
recursive(|pair| {
let int = text::int(10)
.map(|s: String| s.parse::<u32>().unwrap())
.map(|i| ValueOrList::Value(SnailfishValue { value: i, depth: 0 }));
let int_or_nested_pair = int.or(pair.map(|p| ValueOrList::List(p)));
let inner_pair = int_or_nested_pair
.clone()
.then_ignore(just(','))
.then(int_or_nested_pair)
.map(|(l, r)| {
let mut list = LinkedList::new();
match l {
ValueOrList::Value(sv) => list.push_back(sv),
ValueOrList::List(l) => {
for mut sv in l {
sv.depth += 1;
list.push_back(sv);
}
}
}
match r {
ValueOrList::Value(sv) => list.push_back(sv),
ValueOrList::List(l) => {
for mut sv in l {
sv.depth += 1;
list.push_back(sv);
}
}
}
list
});
inner_pair.delimited_by('[', ']')
})
}
fn part1(input: &str) -> Result<u32, Box<dyn std::error::Error>> |
fn part2(input: &str) -> Result<u32, Box<dyn std::error::Error>> {
let sfns = input
.lines()
.map(|l| SnailfishNumber::from_str(l))
.collect::<Result<Vec<_>, _>>()?;
let pairs = sfns.into_iter().permutations(2);
let magnitudes = pairs.map(|p| (p[0].clone() + p[1].clone()).magnitude());
let answer = magnitudes.max().ok_or(format!("No numbers were parsed"))?;
Ok(answer)
}
#[test]
fn test_parser() {
let pair = SnailfishNumber::from_str("[1,2]").unwrap();
assert_eq!(
pair.values.into_iter().collect::<Vec<_>>(),
vec![
SnailfishValue { value: 1, depth: 0 },
SnailfishValue { value: 2, depth: 0 }
]
);
}
#[test]
fn test_split() {
let mut must_split = SnailfishNumber::from_str("[[[[0,7],4],[15,[0,13]]],[1,1]]").unwrap();
must_split.split();
assert_eq!(
must_split,
SnailfishNumber::from_str("[[[[0,7],4],[[7,8],[0,13]]],[1,1]]").unwrap()
);
must_split.split();
assert_eq!(
must_split,
SnailfishNumber::from_str("[[[[0,7],4],[[7,8],[0,[6,7]]]],[1,1]]").unwrap()
)
}
#[test]
fn test_explode() {
let mut must_explode = SnailfishNumber::from_str("[[[[[9,8],1],2],3],4]").unwrap();
must_explode.explode();
assert_eq!(
must_explode,
SnailfishNumber::from_str("[[[[0,9],2],3],4]").unwrap()
);
let mut must_explode = SnailfishNumber::from_str("[7,[6,[5,[4,[3,2]]]]]").unwrap();
must_explode.explode();
assert_eq!(
must_explode,
SnailfishNumber::from_str("[7,[6,[5,[7,0]]]]").unwrap()
);
let mut must_explode = SnailfishNumber::from_str("[[6,[5,[4,[3,2]]]],1]").unwrap();
must_explode.explode();
assert_eq!(
must_explode,
SnailfishNumber::from_str("[[6,[5,[7,0]]],3]").unwrap()
);
}
#[test]
fn test_part1_samples() {
fn test_sample(left: &str, right: &str, result: &str) {
let left = SnailfishNumber::from_str(left).unwrap();
let right = SnailfishNumber::from_str(right).unwrap();
let result = SnailfishNumber::from_str(result).unwrap();
assert_eq!(left + right, result);
}
test_sample(
"[[[[4,3],4],4],[7,[[8,4],9]]]",
"[1,1]",
"[[[[0,7],4],[[7,8],[6,0]]],[8,1]]",
);
}
#[test]
fn test_magnitude() {
assert_eq!(
SnailfishNumber::from_str("[[[[8,7],[7,7]],[[8,6],[7,7]]],[[[0,7],[6,6]],[8,7]]]")
.unwrap()
.magnitude(),
3488
);
}
#[test]
fn test_part1_sample() {
let result = part1(
"[[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]]
[[[5,[2,8]],4],[5,[[9,9],0]]]
[6,[[[6,2],[5,6]],[[7,6],[4,7]]]]
[[[6,[0,7]],[0,9]],[4,[9,[9,0]]]]
[[[7,[6,4]],[3,[1,3]]],[[[5,5],1],9]]
[[6,[[7,3],[3,2]]],[[[3,8],[5,7]],4]]
[[[[5,4],[7,7]],8],[[8,3],8]]
[[9,3],[[9,9],[6,[4,9]]]]
[[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]]
[[[[5,2],5],[8,[3,7]]],[[5,[7,5]],[4,4]]]",
)
.unwrap();
assert_eq!(result, 4140);
}
#[test]
fn test_part2_sample() {
let result = part2(
"[[[0,[5,8]],[[1,7],[9,6]]],[[4,[1,2]],[[1,4],2]]]
[[[5,[2,8]],4],[5,[[9,9],0]]]
[6,[[[6,2],[5,6]],[[7,6],[4,7]]]]
[[[6,[0,7]],[0,9]],[4,[9,[9,0]]]]
[[[7,[6,4]],[3,[1,3]]],[[[5,5],1],9]]
[[6,[[7,3],[3,2]]],[[[3,8],[5,7]],4]]
[[[[5,4],[7,7]],8],[[8,3],8]]
[[9,3],[[9,9],[6,[4,9]]]]
[[2,[[7,7],7]],[[5,8],[[9,3],[0,2]]]]
[[[[5,2],5],[8,[3,7]]],[[5,[7,5]],[4,4]]]",
)
.unwrap();
assert_eq!(result, 3993);
}
| {
let mut sfns = input
.lines()
.map(|l| SnailfishNumber::from_str(l))
.collect::<Result<LinkedList<_>, _>>()?;
let first = sfns.pop_front().ok_or(format!("No numbers were parsed"))?;
let result = sfns.into_iter().fold(first, |a, x| a + x);
Ok(result.magnitude())
} | identifier_body |
stream.rs | #![allow(dead_code)]
use libc;
use listpack::*;
use rax::*;
use sds::*;
use std;
use std::default::Default;
use std::fmt;
use std::mem::size_of;
use std::ptr;
pub struct Stream {
pub s: *mut stream,
}
const STREAM_ID: StreamID = StreamID { ms: 0, seq: 0 };
const STREAM_ID_REF: *const StreamID = &STREAM_ID as *const StreamID;
impl Stream {
pub fn new() -> Stream {
return Stream { s: unsafe { streamNew() } };
}
fn lookup_consumer_group(&self, groupname: Sds) -> *mut streamCG {
unsafe { streamLookupCG(self.s, groupname) }
}
pub fn append() {}
pub fn append_vector(&self, fields: *mut Sds, len: usize) -> StreamID {
unsafe {
let added_id: StreamID = std::mem::uninitialized();
streamAppendItemSDSMap(
self.s,
fields,
// &fields as *mut *mut _ as *mut *mut libc::c_void,
len as i64,
&added_id,
ptr::null_mut(),
);
added_id
}
}
// pub fn append(&self, fields: &mut Vec<Sds>) {
// unsafe {
// let mut added_id: *mut StreamID = ptr::null_mut();
//// let mut added_id: *mut StreamID = ptr::null_mut();
//
// streamAppendItem2(
// self.s,
// fields.as_mut_ptr(),
// fields.len() as i64,
// added_id,
// ptr::null_mut(),
// )
// }
// }
pub fn append_stream() {}
}
//
impl Drop for Stream {
fn drop(&mut self) {
unsafe { freeStream(self.s) }
}
}
#[derive(Copy)]
#[repr(C)]
pub struct StreamID {
ms: libc::uint64_t,
seq: libc::uint64_t,
}
impl fmt::Debug for StreamID {
fn fmt(&self, _f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
Ok(())
}
}
impl Default for StreamID {
fn default() -> Self {
StreamID { ms: 0, seq: 0 }
}
}
impl Clone for StreamID {
fn clone(&self) -> Self {
StreamID { ms: self.ms, seq: self.seq }
}
}
impl RaxKey for StreamID {
type Output = StreamID;
fn encode(self) -> Self::Output {
StreamID {
ms: self.ms.to_be(),
seq: self.seq.to_be(),
}
}
fn to_buf(&self) -> (*const u8, usize) {
(self as *const _ as *const u8, size_of::<StreamID>())
}
fn from_buf(ptr: *const u8, len: usize) -> StreamID {
if len!= size_of::<StreamID>() |
unsafe {
StreamID {
ms: u64::from_be(*(ptr as *mut [u8; 8] as *mut u64)),
seq: u64::from_be(*(ptr.offset(8) as *mut [u8; 8] as *mut u64)),
}
}
}
}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct EntryPack;
//use std::fs::File;
//use std::io::prelude::*;
impl EntryPack {
fn read_from_from() {
// std::fs::File::open()
//
// let mut f = File::open(filename).expect("file not found");
//
// let mut contents = String::new();
// f.read_to_string(&mut contents)
// .expect("something went wrong reading the file");
}
fn deserialize() -> *mut listpack {
std::ptr::null_mut()
}
fn append(lp: *mut listpack, fields: &[Sds]) -> *mut listpack {
/* Create a new listpack and radix tree node if needed. Note that when
* a new listpack is created, we populate it with a "master entry". This
* is just a set of fields that is taken as refernce in order to compress
* the stream entries that we'll add inside the listpack.
*
* Note that while we use the first added entry fields to create
* the master entry, the first added entry is NOT represented in the master
* entry, which is a stand alone object. But of course, the first entry
* will compress well because it's used as reference.
*
* The master entry is composed like in the following example:
*
* +-------+---------+------------+---------+--/--+---------+---------+-+
* | count | deleted | num-fields | field_1 | field_2 |... | field_N |0|
* +-------+---------+------------+---------+--/--+---------+---------+-+
*
* count and deleted just represent respectively the total number of
* entries inside the listpack that are valid, and marked as deleted
* (delted flag in the entry flags set). So the total number of items
* actually inside the listpack (both deleted and not) is count+deleted.
*
* The real entries will be encoded with an ID that is just the
* millisecond and sequence difference compared to the key stored at
* the radix tree node containing the listpack (delta encoding), and
* if the fields of the entry are the same as the master enty fields, the
* entry flags will specify this fact and the entry fields and number
* of fields will be omitted (see later in the code of this function).
*
* The "0" entry at the end is the same as the 'lp-count' entry in the
* regular stream entries (see below), and marks the fact that there are
* no more entries, when we scan the stream from right to left. */
unsafe {
if lp.is_null() {
let numfields = fields.len() / 2;
let mut lp = lpNew();
lp = lpAppendInteger(lp, 1);
lp = lpAppendInteger(lp, 0);
lp = lpAppendInteger(lp, numfields as i64);
for i in 0..numfields {
let field = fields[i * 2];
lp = lpAppend(lp, field as *mut u8, get_len(field) as u32);
}
lp = lpAppendInteger(lp, 0); /* Master entry zero terminator. */
}
}
lp
}
// fn append(lp: *mut listpack, map: &[Sds]) -> (*mut listpack, bool) {
// (lp, true)
// }
}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct stream {
rax: *mut ::rax::rax,
length: libc::uint64_t,
last_id: StreamID,
cgroups: *mut u8,
}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct streamIterator;
// stream: *mut stream,
// master_id: StreamID,
// master_fields_count: libc::uint64_t,
// master_fields_start
//}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct streamCG {
last_id: StreamID,
pel: *mut rax,
consumers: *mut rax,
}
//#[derive(Clone, Copy)]
#[repr(C)]
pub struct streamConsumer {
seen_time: libc::c_longlong,
name: Sds,
pel: *mut rax,
}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct streamNACK {
delivery_time: libc::c_longlong,
delivery_count: libc::uint64_t,
consumer: *mut streamConsumer,
}
#[allow(improper_ctypes)]
#[allow(non_snake_case)]
#[link(name = "redismodule", kind = "static")]
extern "C" {
// fn createObject()
fn streamNew() -> *mut stream;
fn freeStream(s: *mut stream);
fn streamAppendItemSDSMap(
s: *mut stream,
argv: *mut Sds,
numfields: libc::int64_t,
added_id: *const StreamID,
use_id: *mut StreamID,
);
fn streamIteratorStart(
si: *mut streamIterator,
s: *mut stream,
start: StreamID,
end: StreamID,
rev: libc::c_int,
);
fn streamIteratorGetID(
si: *mut streamIterator,
id: *mut StreamID,
numfields: *mut libc::int64_t,
) -> libc::c_int;
fn streamIteratorGetField(
si: *mut streamIterator,
fieldptr: *mut *mut u8,
valueptr: *mut *mut u8,
fieldlen: *mut libc::int64_t,
valuelen: *mut libc::int64_t,
);
fn streamIteratorRemoveEntry(
si: *mut streamIterator,
id: *mut StreamID,
) -> libc::c_int;
fn streamIteratorStop(
si: *mut streamIterator,
) -> libc::c_int;
fn streamDeleteItem(
s: *mut stream,
id: *mut StreamID,
) -> libc::c_int;
fn string2ull(
s: *const libc::c_char,
value: *mut libc::uint64_t,
) -> libc::c_int;
fn streamCreateNACK(
consumer: *mut streamConsumer
) -> *mut streamNACK;
fn streamFreeNACK(
na: *mut streamNACK
);
fn streamFreeConsumer(
sc: *mut streamConsumer
);
fn streamCreateCG(
s: *mut stream,
name: *mut libc::c_char,
namelen: libc::size_t, id: *mut StreamID,
) -> *mut streamCG;
fn streamFreeCG(cg: *mut streamCG);
fn streamLookupCG(
s: *mut stream,
groupname: Sds,
) -> *mut streamCG;
fn streamLookupConsumer(
cg: *mut streamCG,
name: Sds,
create: libc::c_int,
) -> *mut streamConsumer;
fn streamDelConsumer(
cg: *mut streamCG,
name: Sds,
) -> libc::uint64_t;
}
#[cfg(test)]
mod tests {
use rax::*;
use sds;
// use std;
use stream::Stream;
#[test]
fn it_works() {
let s = Stream::new();
// let mut array = ArrayVec::from([
// sds::sds_new("id"),
// sds::sds_from_long_long(1),
// sds::sds_new("auth-key"),
// sds::sds_new_len("some_really_long_auth_ley"),
// sds::sds_new("data"),
// sds::sds_new_len("{\"id\": \"JSON_ID\"}")
// ]);
let mut x = [
sds::new("128"),
sds::new("123"),
sds::new("1234"),
sds::new("12345"),
// sds::sds_from_long_long(1),
// sds::sds_new("auth-key"),
// sds::sds_new_len("some_really_long_auth_ley"),
// sds::sds_new("data"),
// sds::sds_new_len("{\"id\": \"JSON_ID\"}")
];
let ss = sds::new("hi");
// sds::sds_len(ss);
println!("{}", sds::get_len(ss));
// sds::sds_dup(x[0]);
// sds::sds_dup(x[1]);
for _ in 0..1000 {
let mut id = s.append_vector(x.as_mut_ptr(), x.len() / 2);
// println!("{}-{}", id.ms, id.seq);
}
unsafe {
raxShow((*s.s).rax);
}
// let mut id = s.append_vector((x).as_mut_ptr(), x.len() / 2);
// println!("{}-{}", id.ms, id.seq);
// id = s.append_vector((x).as_mut_ptr(), x.len() / 2);
// println!("{}-{}", id.ms, id.seq);
}
} | {
return StreamID::default();
} | conditional_block |
stream.rs | #![allow(dead_code)]
use libc;
use listpack::*;
use rax::*;
use sds::*;
use std;
use std::default::Default;
use std::fmt;
use std::mem::size_of;
use std::ptr;
pub struct Stream {
pub s: *mut stream,
}
const STREAM_ID: StreamID = StreamID { ms: 0, seq: 0 };
const STREAM_ID_REF: *const StreamID = &STREAM_ID as *const StreamID;
impl Stream {
pub fn new() -> Stream {
return Stream { s: unsafe { streamNew() } };
}
fn lookup_consumer_group(&self, groupname: Sds) -> *mut streamCG {
unsafe { streamLookupCG(self.s, groupname) }
}
pub fn append() {}
pub fn append_vector(&self, fields: *mut Sds, len: usize) -> StreamID {
unsafe {
let added_id: StreamID = std::mem::uninitialized();
streamAppendItemSDSMap(
self.s,
fields,
// &fields as *mut *mut _ as *mut *mut libc::c_void,
len as i64,
&added_id,
ptr::null_mut(),
);
added_id
}
}
// pub fn append(&self, fields: &mut Vec<Sds>) {
// unsafe {
// let mut added_id: *mut StreamID = ptr::null_mut();
//// let mut added_id: *mut StreamID = ptr::null_mut();
//
// streamAppendItem2(
// self.s,
// fields.as_mut_ptr(),
// fields.len() as i64,
// added_id,
// ptr::null_mut(),
// )
// }
// }
pub fn append_stream() {}
}
//
impl Drop for Stream {
fn drop(&mut self) {
unsafe { freeStream(self.s) }
}
}
#[derive(Copy)]
#[repr(C)]
pub struct StreamID {
ms: libc::uint64_t,
seq: libc::uint64_t,
}
impl fmt::Debug for StreamID {
fn fmt(&self, _f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
Ok(())
}
}
impl Default for StreamID {
fn default() -> Self {
StreamID { ms: 0, seq: 0 }
}
}
impl Clone for StreamID {
fn clone(&self) -> Self {
StreamID { ms: self.ms, seq: self.seq }
}
}
impl RaxKey for StreamID {
type Output = StreamID;
fn encode(self) -> Self::Output {
StreamID {
ms: self.ms.to_be(),
seq: self.seq.to_be(),
}
}
fn | (&self) -> (*const u8, usize) {
(self as *const _ as *const u8, size_of::<StreamID>())
}
fn from_buf(ptr: *const u8, len: usize) -> StreamID {
if len!= size_of::<StreamID>() {
return StreamID::default();
}
unsafe {
StreamID {
ms: u64::from_be(*(ptr as *mut [u8; 8] as *mut u64)),
seq: u64::from_be(*(ptr.offset(8) as *mut [u8; 8] as *mut u64)),
}
}
}
}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct EntryPack;
//use std::fs::File;
//use std::io::prelude::*;
impl EntryPack {
fn read_from_from() {
// std::fs::File::open()
//
// let mut f = File::open(filename).expect("file not found");
//
// let mut contents = String::new();
// f.read_to_string(&mut contents)
// .expect("something went wrong reading the file");
}
fn deserialize() -> *mut listpack {
std::ptr::null_mut()
}
fn append(lp: *mut listpack, fields: &[Sds]) -> *mut listpack {
/* Create a new listpack and radix tree node if needed. Note that when
* a new listpack is created, we populate it with a "master entry". This
* is just a set of fields that is taken as refernce in order to compress
* the stream entries that we'll add inside the listpack.
*
* Note that while we use the first added entry fields to create
* the master entry, the first added entry is NOT represented in the master
* entry, which is a stand alone object. But of course, the first entry
* will compress well because it's used as reference.
*
* The master entry is composed like in the following example:
*
* +-------+---------+------------+---------+--/--+---------+---------+-+
* | count | deleted | num-fields | field_1 | field_2 |... | field_N |0|
* +-------+---------+------------+---------+--/--+---------+---------+-+
*
* count and deleted just represent respectively the total number of
* entries inside the listpack that are valid, and marked as deleted
* (delted flag in the entry flags set). So the total number of items
* actually inside the listpack (both deleted and not) is count+deleted.
*
* The real entries will be encoded with an ID that is just the
* millisecond and sequence difference compared to the key stored at
* the radix tree node containing the listpack (delta encoding), and
* if the fields of the entry are the same as the master enty fields, the
* entry flags will specify this fact and the entry fields and number
* of fields will be omitted (see later in the code of this function).
*
* The "0" entry at the end is the same as the 'lp-count' entry in the
* regular stream entries (see below), and marks the fact that there are
* no more entries, when we scan the stream from right to left. */
unsafe {
if lp.is_null() {
let numfields = fields.len() / 2;
let mut lp = lpNew();
lp = lpAppendInteger(lp, 1);
lp = lpAppendInteger(lp, 0);
lp = lpAppendInteger(lp, numfields as i64);
for i in 0..numfields {
let field = fields[i * 2];
lp = lpAppend(lp, field as *mut u8, get_len(field) as u32);
}
lp = lpAppendInteger(lp, 0); /* Master entry zero terminator. */
}
}
lp
}
// fn append(lp: *mut listpack, map: &[Sds]) -> (*mut listpack, bool) {
// (lp, true)
// }
}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct stream {
rax: *mut ::rax::rax,
length: libc::uint64_t,
last_id: StreamID,
cgroups: *mut u8,
}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct streamIterator;
// stream: *mut stream,
// master_id: StreamID,
// master_fields_count: libc::uint64_t,
// master_fields_start
//}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct streamCG {
last_id: StreamID,
pel: *mut rax,
consumers: *mut rax,
}
//#[derive(Clone, Copy)]
#[repr(C)]
pub struct streamConsumer {
seen_time: libc::c_longlong,
name: Sds,
pel: *mut rax,
}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct streamNACK {
delivery_time: libc::c_longlong,
delivery_count: libc::uint64_t,
consumer: *mut streamConsumer,
}
#[allow(improper_ctypes)]
#[allow(non_snake_case)]
#[link(name = "redismodule", kind = "static")]
extern "C" {
// fn createObject()
fn streamNew() -> *mut stream;
fn freeStream(s: *mut stream);
fn streamAppendItemSDSMap(
s: *mut stream,
argv: *mut Sds,
numfields: libc::int64_t,
added_id: *const StreamID,
use_id: *mut StreamID,
);
fn streamIteratorStart(
si: *mut streamIterator,
s: *mut stream,
start: StreamID,
end: StreamID,
rev: libc::c_int,
);
fn streamIteratorGetID(
si: *mut streamIterator,
id: *mut StreamID,
numfields: *mut libc::int64_t,
) -> libc::c_int;
fn streamIteratorGetField(
si: *mut streamIterator,
fieldptr: *mut *mut u8,
valueptr: *mut *mut u8,
fieldlen: *mut libc::int64_t,
valuelen: *mut libc::int64_t,
);
fn streamIteratorRemoveEntry(
si: *mut streamIterator,
id: *mut StreamID,
) -> libc::c_int;
fn streamIteratorStop(
si: *mut streamIterator,
) -> libc::c_int;
fn streamDeleteItem(
s: *mut stream,
id: *mut StreamID,
) -> libc::c_int;
fn string2ull(
s: *const libc::c_char,
value: *mut libc::uint64_t,
) -> libc::c_int;
fn streamCreateNACK(
consumer: *mut streamConsumer
) -> *mut streamNACK;
fn streamFreeNACK(
na: *mut streamNACK
);
fn streamFreeConsumer(
sc: *mut streamConsumer
);
fn streamCreateCG(
s: *mut stream,
name: *mut libc::c_char,
namelen: libc::size_t, id: *mut StreamID,
) -> *mut streamCG;
fn streamFreeCG(cg: *mut streamCG);
fn streamLookupCG(
s: *mut stream,
groupname: Sds,
) -> *mut streamCG;
fn streamLookupConsumer(
cg: *mut streamCG,
name: Sds,
create: libc::c_int,
) -> *mut streamConsumer;
fn streamDelConsumer(
cg: *mut streamCG,
name: Sds,
) -> libc::uint64_t;
}
#[cfg(test)]
mod tests {
use rax::*;
use sds;
// use std;
use stream::Stream;
#[test]
fn it_works() {
let s = Stream::new();
// let mut array = ArrayVec::from([
// sds::sds_new("id"),
// sds::sds_from_long_long(1),
// sds::sds_new("auth-key"),
// sds::sds_new_len("some_really_long_auth_ley"),
// sds::sds_new("data"),
// sds::sds_new_len("{\"id\": \"JSON_ID\"}")
// ]);
let mut x = [
sds::new("128"),
sds::new("123"),
sds::new("1234"),
sds::new("12345"),
// sds::sds_from_long_long(1),
// sds::sds_new("auth-key"),
// sds::sds_new_len("some_really_long_auth_ley"),
// sds::sds_new("data"),
// sds::sds_new_len("{\"id\": \"JSON_ID\"}")
];
let ss = sds::new("hi");
// sds::sds_len(ss);
println!("{}", sds::get_len(ss));
// sds::sds_dup(x[0]);
// sds::sds_dup(x[1]);
for _ in 0..1000 {
let mut id = s.append_vector(x.as_mut_ptr(), x.len() / 2);
// println!("{}-{}", id.ms, id.seq);
}
unsafe {
raxShow((*s.s).rax);
}
// let mut id = s.append_vector((x).as_mut_ptr(), x.len() / 2);
// println!("{}-{}", id.ms, id.seq);
// id = s.append_vector((x).as_mut_ptr(), x.len() / 2);
// println!("{}-{}", id.ms, id.seq);
}
} | to_buf | identifier_name |
stream.rs | #![allow(dead_code)]
use libc;
use listpack::*;
use rax::*;
use sds::*;
use std;
use std::default::Default;
use std::fmt;
use std::mem::size_of;
use std::ptr;
pub struct Stream {
pub s: *mut stream,
}
const STREAM_ID: StreamID = StreamID { ms: 0, seq: 0 };
const STREAM_ID_REF: *const StreamID = &STREAM_ID as *const StreamID;
impl Stream {
pub fn new() -> Stream {
return Stream { s: unsafe { streamNew() } };
}
fn lookup_consumer_group(&self, groupname: Sds) -> *mut streamCG {
unsafe { streamLookupCG(self.s, groupname) }
}
pub fn append() {}
pub fn append_vector(&self, fields: *mut Sds, len: usize) -> StreamID {
unsafe {
let added_id: StreamID = std::mem::uninitialized();
streamAppendItemSDSMap(
self.s,
fields,
// &fields as *mut *mut _ as *mut *mut libc::c_void,
len as i64,
&added_id,
ptr::null_mut(),
);
added_id
}
}
// pub fn append(&self, fields: &mut Vec<Sds>) {
// unsafe {
// let mut added_id: *mut StreamID = ptr::null_mut();
//// let mut added_id: *mut StreamID = ptr::null_mut();
//
// streamAppendItem2(
// self.s,
// fields.as_mut_ptr(),
// fields.len() as i64,
// added_id,
// ptr::null_mut(),
// )
// }
// }
pub fn append_stream() {}
}
//
impl Drop for Stream {
fn drop(&mut self) {
unsafe { freeStream(self.s) }
}
}
#[derive(Copy)]
#[repr(C)]
pub struct StreamID {
ms: libc::uint64_t,
seq: libc::uint64_t,
}
impl fmt::Debug for StreamID {
fn fmt(&self, _f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
Ok(())
}
}
impl Default for StreamID {
fn default() -> Self {
StreamID { ms: 0, seq: 0 }
}
}
impl Clone for StreamID {
fn clone(&self) -> Self {
StreamID { ms: self.ms, seq: self.seq }
}
}
impl RaxKey for StreamID {
type Output = StreamID;
fn encode(self) -> Self::Output {
StreamID {
ms: self.ms.to_be(),
seq: self.seq.to_be(),
}
}
fn to_buf(&self) -> (*const u8, usize) {
(self as *const _ as *const u8, size_of::<StreamID>())
}
fn from_buf(ptr: *const u8, len: usize) -> StreamID {
if len!= size_of::<StreamID>() {
return StreamID::default();
}
unsafe {
StreamID {
ms: u64::from_be(*(ptr as *mut [u8; 8] as *mut u64)),
seq: u64::from_be(*(ptr.offset(8) as *mut [u8; 8] as *mut u64)),
}
}
}
}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct EntryPack;
//use std::fs::File;
//use std::io::prelude::*;
impl EntryPack {
fn read_from_from() {
// std::fs::File::open()
//
// let mut f = File::open(filename).expect("file not found");
//
// let mut contents = String::new();
// f.read_to_string(&mut contents)
// .expect("something went wrong reading the file");
}
fn deserialize() -> *mut listpack {
std::ptr::null_mut()
}
fn append(lp: *mut listpack, fields: &[Sds]) -> *mut listpack {
/* Create a new listpack and radix tree node if needed. Note that when
* a new listpack is created, we populate it with a "master entry". This
* is just a set of fields that is taken as refernce in order to compress
* the stream entries that we'll add inside the listpack.
*
* Note that while we use the first added entry fields to create
* the master entry, the first added entry is NOT represented in the master
* entry, which is a stand alone object. But of course, the first entry
* will compress well because it's used as reference.
*
* The master entry is composed like in the following example:
*
* +-------+---------+------------+---------+--/--+---------+---------+-+
* | count | deleted | num-fields | field_1 | field_2 |... | field_N |0|
* +-------+---------+------------+---------+--/--+---------+---------+-+
*
* count and deleted just represent respectively the total number of
* entries inside the listpack that are valid, and marked as deleted
* (delted flag in the entry flags set). So the total number of items
* actually inside the listpack (both deleted and not) is count+deleted.
*
* The real entries will be encoded with an ID that is just the
* millisecond and sequence difference compared to the key stored at
* the radix tree node containing the listpack (delta encoding), and
* if the fields of the entry are the same as the master enty fields, the
* entry flags will specify this fact and the entry fields and number
* of fields will be omitted (see later in the code of this function).
*
* The "0" entry at the end is the same as the 'lp-count' entry in the
* regular stream entries (see below), and marks the fact that there are
* no more entries, when we scan the stream from right to left. */
unsafe {
if lp.is_null() {
let numfields = fields.len() / 2;
let mut lp = lpNew();
lp = lpAppendInteger(lp, 1);
lp = lpAppendInteger(lp, 0);
lp = lpAppendInteger(lp, numfields as i64);
for i in 0..numfields {
let field = fields[i * 2];
lp = lpAppend(lp, field as *mut u8, get_len(field) as u32);
}
lp = lpAppendInteger(lp, 0); /* Master entry zero terminator. */
}
}
lp
}
// fn append(lp: *mut listpack, map: &[Sds]) -> (*mut listpack, bool) {
// (lp, true)
// }
}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct stream {
rax: *mut ::rax::rax,
length: libc::uint64_t,
last_id: StreamID,
cgroups: *mut u8,
}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct streamIterator;
// stream: *mut stream,
// master_id: StreamID,
// master_fields_count: libc::uint64_t,
// master_fields_start
//}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct streamCG {
last_id: StreamID,
pel: *mut rax,
consumers: *mut rax,
}
//#[derive(Clone, Copy)]
#[repr(C)]
pub struct streamConsumer {
seen_time: libc::c_longlong,
name: Sds,
pel: *mut rax,
}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct streamNACK {
delivery_time: libc::c_longlong,
delivery_count: libc::uint64_t,
consumer: *mut streamConsumer,
}
#[allow(improper_ctypes)]
#[allow(non_snake_case)]
#[link(name = "redismodule", kind = "static")]
extern "C" {
// fn createObject()
fn streamNew() -> *mut stream;
fn freeStream(s: *mut stream);
fn streamAppendItemSDSMap(
s: *mut stream,
argv: *mut Sds,
numfields: libc::int64_t,
added_id: *const StreamID,
use_id: *mut StreamID,
);
fn streamIteratorStart(
si: *mut streamIterator,
s: *mut stream,
start: StreamID,
end: StreamID,
rev: libc::c_int,
);
fn streamIteratorGetID(
si: *mut streamIterator,
id: *mut StreamID,
numfields: *mut libc::int64_t,
) -> libc::c_int;
fn streamIteratorGetField(
si: *mut streamIterator,
fieldptr: *mut *mut u8,
valueptr: *mut *mut u8,
fieldlen: *mut libc::int64_t,
valuelen: *mut libc::int64_t,
);
fn streamIteratorRemoveEntry(
si: *mut streamIterator,
id: *mut StreamID,
) -> libc::c_int;
fn streamIteratorStop(
si: *mut streamIterator,
) -> libc::c_int;
fn streamDeleteItem(
s: *mut stream,
id: *mut StreamID,
) -> libc::c_int;
fn string2ull( | s: *const libc::c_char,
value: *mut libc::uint64_t,
) -> libc::c_int;
fn streamCreateNACK(
consumer: *mut streamConsumer
) -> *mut streamNACK;
fn streamFreeNACK(
na: *mut streamNACK
);
fn streamFreeConsumer(
sc: *mut streamConsumer
);
fn streamCreateCG(
s: *mut stream,
name: *mut libc::c_char,
namelen: libc::size_t, id: *mut StreamID,
) -> *mut streamCG;
fn streamFreeCG(cg: *mut streamCG);
fn streamLookupCG(
s: *mut stream,
groupname: Sds,
) -> *mut streamCG;
fn streamLookupConsumer(
cg: *mut streamCG,
name: Sds,
create: libc::c_int,
) -> *mut streamConsumer;
fn streamDelConsumer(
cg: *mut streamCG,
name: Sds,
) -> libc::uint64_t;
}
#[cfg(test)]
mod tests {
use rax::*;
use sds;
// use std;
use stream::Stream;
#[test]
fn it_works() {
let s = Stream::new();
// let mut array = ArrayVec::from([
// sds::sds_new("id"),
// sds::sds_from_long_long(1),
// sds::sds_new("auth-key"),
// sds::sds_new_len("some_really_long_auth_ley"),
// sds::sds_new("data"),
// sds::sds_new_len("{\"id\": \"JSON_ID\"}")
// ]);
let mut x = [
sds::new("128"),
sds::new("123"),
sds::new("1234"),
sds::new("12345"),
// sds::sds_from_long_long(1),
// sds::sds_new("auth-key"),
// sds::sds_new_len("some_really_long_auth_ley"),
// sds::sds_new("data"),
// sds::sds_new_len("{\"id\": \"JSON_ID\"}")
];
let ss = sds::new("hi");
// sds::sds_len(ss);
println!("{}", sds::get_len(ss));
// sds::sds_dup(x[0]);
// sds::sds_dup(x[1]);
for _ in 0..1000 {
let mut id = s.append_vector(x.as_mut_ptr(), x.len() / 2);
// println!("{}-{}", id.ms, id.seq);
}
unsafe {
raxShow((*s.s).rax);
}
// let mut id = s.append_vector((x).as_mut_ptr(), x.len() / 2);
// println!("{}-{}", id.ms, id.seq);
// id = s.append_vector((x).as_mut_ptr(), x.len() / 2);
// println!("{}-{}", id.ms, id.seq);
}
} | random_line_split |
|
stream.rs | #![allow(dead_code)]
use libc;
use listpack::*;
use rax::*;
use sds::*;
use std;
use std::default::Default;
use std::fmt;
use std::mem::size_of;
use std::ptr;
pub struct Stream {
pub s: *mut stream,
}
const STREAM_ID: StreamID = StreamID { ms: 0, seq: 0 };
const STREAM_ID_REF: *const StreamID = &STREAM_ID as *const StreamID;
impl Stream {
pub fn new() -> Stream {
return Stream { s: unsafe { streamNew() } };
}
fn lookup_consumer_group(&self, groupname: Sds) -> *mut streamCG {
unsafe { streamLookupCG(self.s, groupname) }
}
pub fn append() {}
pub fn append_vector(&self, fields: *mut Sds, len: usize) -> StreamID {
unsafe {
let added_id: StreamID = std::mem::uninitialized();
streamAppendItemSDSMap(
self.s,
fields,
// &fields as *mut *mut _ as *mut *mut libc::c_void,
len as i64,
&added_id,
ptr::null_mut(),
);
added_id
}
}
// pub fn append(&self, fields: &mut Vec<Sds>) {
// unsafe {
// let mut added_id: *mut StreamID = ptr::null_mut();
//// let mut added_id: *mut StreamID = ptr::null_mut();
//
// streamAppendItem2(
// self.s,
// fields.as_mut_ptr(),
// fields.len() as i64,
// added_id,
// ptr::null_mut(),
// )
// }
// }
pub fn append_stream() {}
}
//
impl Drop for Stream {
fn drop(&mut self) {
unsafe { freeStream(self.s) }
}
}
#[derive(Copy)]
#[repr(C)]
pub struct StreamID {
ms: libc::uint64_t,
seq: libc::uint64_t,
}
impl fmt::Debug for StreamID {
fn fmt(&self, _f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
Ok(())
}
}
impl Default for StreamID {
fn default() -> Self {
StreamID { ms: 0, seq: 0 }
}
}
impl Clone for StreamID {
fn clone(&self) -> Self {
StreamID { ms: self.ms, seq: self.seq }
}
}
impl RaxKey for StreamID {
type Output = StreamID;
fn encode(self) -> Self::Output {
StreamID {
ms: self.ms.to_be(),
seq: self.seq.to_be(),
}
}
fn to_buf(&self) -> (*const u8, usize) {
(self as *const _ as *const u8, size_of::<StreamID>())
}
fn from_buf(ptr: *const u8, len: usize) -> StreamID {
if len!= size_of::<StreamID>() {
return StreamID::default();
}
unsafe {
StreamID {
ms: u64::from_be(*(ptr as *mut [u8; 8] as *mut u64)),
seq: u64::from_be(*(ptr.offset(8) as *mut [u8; 8] as *mut u64)),
}
}
}
}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct EntryPack;
//use std::fs::File;
//use std::io::prelude::*;
impl EntryPack {
fn read_from_from() |
fn deserialize() -> *mut listpack {
std::ptr::null_mut()
}
fn append(lp: *mut listpack, fields: &[Sds]) -> *mut listpack {
/* Create a new listpack and radix tree node if needed. Note that when
* a new listpack is created, we populate it with a "master entry". This
* is just a set of fields that is taken as refernce in order to compress
* the stream entries that we'll add inside the listpack.
*
* Note that while we use the first added entry fields to create
* the master entry, the first added entry is NOT represented in the master
* entry, which is a stand alone object. But of course, the first entry
* will compress well because it's used as reference.
*
* The master entry is composed like in the following example:
*
* +-------+---------+------------+---------+--/--+---------+---------+-+
* | count | deleted | num-fields | field_1 | field_2 |... | field_N |0|
* +-------+---------+------------+---------+--/--+---------+---------+-+
*
* count and deleted just represent respectively the total number of
* entries inside the listpack that are valid, and marked as deleted
* (delted flag in the entry flags set). So the total number of items
* actually inside the listpack (both deleted and not) is count+deleted.
*
* The real entries will be encoded with an ID that is just the
* millisecond and sequence difference compared to the key stored at
* the radix tree node containing the listpack (delta encoding), and
* if the fields of the entry are the same as the master enty fields, the
* entry flags will specify this fact and the entry fields and number
* of fields will be omitted (see later in the code of this function).
*
* The "0" entry at the end is the same as the 'lp-count' entry in the
* regular stream entries (see below), and marks the fact that there are
* no more entries, when we scan the stream from right to left. */
unsafe {
if lp.is_null() {
let numfields = fields.len() / 2;
let mut lp = lpNew();
lp = lpAppendInteger(lp, 1);
lp = lpAppendInteger(lp, 0);
lp = lpAppendInteger(lp, numfields as i64);
for i in 0..numfields {
let field = fields[i * 2];
lp = lpAppend(lp, field as *mut u8, get_len(field) as u32);
}
lp = lpAppendInteger(lp, 0); /* Master entry zero terminator. */
}
}
lp
}
// fn append(lp: *mut listpack, map: &[Sds]) -> (*mut listpack, bool) {
// (lp, true)
// }
}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct stream {
rax: *mut ::rax::rax,
length: libc::uint64_t,
last_id: StreamID,
cgroups: *mut u8,
}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct streamIterator;
// stream: *mut stream,
// master_id: StreamID,
// master_fields_count: libc::uint64_t,
// master_fields_start
//}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct streamCG {
last_id: StreamID,
pel: *mut rax,
consumers: *mut rax,
}
//#[derive(Clone, Copy)]
#[repr(C)]
pub struct streamConsumer {
seen_time: libc::c_longlong,
name: Sds,
pel: *mut rax,
}
#[derive(Clone, Copy)]
#[repr(C)]
pub struct streamNACK {
delivery_time: libc::c_longlong,
delivery_count: libc::uint64_t,
consumer: *mut streamConsumer,
}
#[allow(improper_ctypes)]
#[allow(non_snake_case)]
#[link(name = "redismodule", kind = "static")]
extern "C" {
// fn createObject()
fn streamNew() -> *mut stream;
fn freeStream(s: *mut stream);
fn streamAppendItemSDSMap(
s: *mut stream,
argv: *mut Sds,
numfields: libc::int64_t,
added_id: *const StreamID,
use_id: *mut StreamID,
);
fn streamIteratorStart(
si: *mut streamIterator,
s: *mut stream,
start: StreamID,
end: StreamID,
rev: libc::c_int,
);
fn streamIteratorGetID(
si: *mut streamIterator,
id: *mut StreamID,
numfields: *mut libc::int64_t,
) -> libc::c_int;
fn streamIteratorGetField(
si: *mut streamIterator,
fieldptr: *mut *mut u8,
valueptr: *mut *mut u8,
fieldlen: *mut libc::int64_t,
valuelen: *mut libc::int64_t,
);
fn streamIteratorRemoveEntry(
si: *mut streamIterator,
id: *mut StreamID,
) -> libc::c_int;
fn streamIteratorStop(
si: *mut streamIterator,
) -> libc::c_int;
fn streamDeleteItem(
s: *mut stream,
id: *mut StreamID,
) -> libc::c_int;
fn string2ull(
s: *const libc::c_char,
value: *mut libc::uint64_t,
) -> libc::c_int;
fn streamCreateNACK(
consumer: *mut streamConsumer
) -> *mut streamNACK;
fn streamFreeNACK(
na: *mut streamNACK
);
fn streamFreeConsumer(
sc: *mut streamConsumer
);
fn streamCreateCG(
s: *mut stream,
name: *mut libc::c_char,
namelen: libc::size_t, id: *mut StreamID,
) -> *mut streamCG;
fn streamFreeCG(cg: *mut streamCG);
fn streamLookupCG(
s: *mut stream,
groupname: Sds,
) -> *mut streamCG;
fn streamLookupConsumer(
cg: *mut streamCG,
name: Sds,
create: libc::c_int,
) -> *mut streamConsumer;
fn streamDelConsumer(
cg: *mut streamCG,
name: Sds,
) -> libc::uint64_t;
}
#[cfg(test)]
mod tests {
use rax::*;
use sds;
// use std;
use stream::Stream;
#[test]
fn it_works() {
let s = Stream::new();
// let mut array = ArrayVec::from([
// sds::sds_new("id"),
// sds::sds_from_long_long(1),
// sds::sds_new("auth-key"),
// sds::sds_new_len("some_really_long_auth_ley"),
// sds::sds_new("data"),
// sds::sds_new_len("{\"id\": \"JSON_ID\"}")
// ]);
let mut x = [
sds::new("128"),
sds::new("123"),
sds::new("1234"),
sds::new("12345"),
// sds::sds_from_long_long(1),
// sds::sds_new("auth-key"),
// sds::sds_new_len("some_really_long_auth_ley"),
// sds::sds_new("data"),
// sds::sds_new_len("{\"id\": \"JSON_ID\"}")
];
let ss = sds::new("hi");
// sds::sds_len(ss);
println!("{}", sds::get_len(ss));
// sds::sds_dup(x[0]);
// sds::sds_dup(x[1]);
for _ in 0..1000 {
let mut id = s.append_vector(x.as_mut_ptr(), x.len() / 2);
// println!("{}-{}", id.ms, id.seq);
}
unsafe {
raxShow((*s.s).rax);
}
// let mut id = s.append_vector((x).as_mut_ptr(), x.len() / 2);
// println!("{}-{}", id.ms, id.seq);
// id = s.append_vector((x).as_mut_ptr(), x.len() / 2);
// println!("{}-{}", id.ms, id.seq);
}
} | {
// std::fs::File::open()
//
// let mut f = File::open(filename).expect("file not found");
//
// let mut contents = String::new();
// f.read_to_string(&mut contents)
// .expect("something went wrong reading the file");
} | identifier_body |
vk.rs | use vulkano::device::Queue;
use vulkano::swapchain::Surface;
use vulkano::buffer::{BufferUsage, CpuAccessibleBuffer };
use vulkano::command_buffer::{AutoCommandBufferBuilder, DynamicState};
use vulkano::device::{Device, DeviceExtensions};
use vulkano::format::Format;
use vulkano::framebuffer::{Framebuffer, FramebufferAbstract, Subpass, RenderPassAbstract};
use vulkano::image::SwapchainImage;
use vulkano::image::attachment::AttachmentImage;
use vulkano::instance::Instance;
use vulkano::instance::PhysicalDevice;
use vulkano::pipeline::vertex::TwoBuffersDefinition;
use vulkano::pipeline::viewport::Viewport;
use vulkano::pipeline::{GraphicsPipeline, GraphicsPipelineAbstract};
use vulkano::swapchain::{AcquireError, PresentMode, SurfaceTransform, Swapchain, SwapchainCreationError};
use vulkano::swapchain;
use vulkano::sync::{GpuFuture, FlushError};
use vulkano::sync;
use vulkano_win::VkSurfaceBuild;
use winit::{EventsLoop, Window, WindowBuilder, Event, WindowEvent};
use simple_error::SimpleError;
use std::sync::Arc;
use std::iter;
use std::time::Instant;
use std::error::Error;
use crate::graphics::*;
pub struct VulkanBackend {
show_fps: bool,
device: Arc<Device>,
vs: vs::Shader,
fs: fs::Shader,
swapchain: Arc<Swapchain<winit::Window>>,
images: Vec<Arc<SwapchainImage<Window>>>,
render_pass: Arc<dyn RenderPassAbstract + Send + Sync>,
surface: Arc<Surface<winit::Window>>,
queue: Arc<Queue>,
events_loop: EventsLoop,
phys_dims: [u32; 2],
log_dims: [u32; 2],
}
impl VulkanBackend {
fn window_size_dependent_setup(&self) -> Result<(Arc<(dyn GraphicsPipelineAbstract + Send + Sync)>,
Vec<Arc<dyn FramebufferAbstract + Send + Sync>>),
Box<dyn Error>>{
let dimensions = self.images[0].dimensions();
let depth_buffer = AttachmentImage::transient(
self.device.clone(),
dimensions,
Format::D16Unorm)?;
let framebuffers = self.images.iter().map(|image| {
let buf = Framebuffer::start(self.render_pass.clone())
.add(image.clone())?
.add(depth_buffer.clone())?
.build()?;
Ok(Arc::new(
buf
) as Arc<dyn FramebufferAbstract + Send + Sync>)
}).collect::<Result<Vec<_>, Box<dyn Error>>>()?;
let pipeline = Arc::new(GraphicsPipeline::start()
.vertex_input(TwoBuffersDefinition::<VkVertex, VkColour>::new())
.vertex_shader(self.vs.main_entry_point(), ())
.triangle_list()
.viewports_dynamic_scissors_irrelevant(1)
.viewports(iter::once(Viewport {
origin: [0.0, 0.0],
dimensions: [dimensions[0] as f32, dimensions[1] as f32],
depth_range: 0.0..1.0,
}))
.fragment_shader(self.fs.main_entry_point(), ())
.blend_alpha_blending()
.depth_stencil_simple_depth()
.render_pass(Subpass::from(self.render_pass.clone(), 0)
.ok_or(SimpleError::new("Failed to load subpass"))?)
.build(self.device.clone())?);
Ok((pipeline, framebuffers))
}
fn convert_vertex(&self, vert: Vertex) -> VkVertex {
let mut position = match vert {
Vertex::Xy(x, y) => [x, y, 0.0],
Vertex::Xyz(x, y, z) => [x, y, z]
};
position[0] /= self.log_dims[0] as f32;
position[1] /= self.log_dims[1] as f32;
position[0] -= 0.5;
position[1] -= 0.5;
position[0] *= 2.;
position[1] *= 2.;
VkVertex { position }
}
}
impl GfxProvider for VulkanBackend {
fn | () -> Result<Self, Box<dyn Error>> {
println!("Beginning Vulkan setup...");
let instance = {
let extensions = vulkano_win::required_extensions();
Instance::new(None, &extensions, None)
}?;
// We then choose which physical device to use.
//
// In a real application, there are three things to take into consideration:
//
// - Some devices may not support some of the optional features that may be required by your
// application. You should filter out the devices that don't support your app.
//
// - Not all devices can draw to a certain surface. Once you create your window, you have to
// choose a device that is capable of drawing to it.
//
// - You probably want to leave the choice between the remaining devices to the user.
//
let mut physical_devices = PhysicalDevice::enumerate(&instance);
for device in physical_devices.clone() {
println!("Found device: {} (type: {:?})", device.name(), device.ty());
}
let physical = physical_devices.next().ok_or(SimpleError::new("Found no devices"))?;
// Some debug info.
println!("Using {}.", physical.name());
let events_loop = EventsLoop::new();
let surface = WindowBuilder::new()
// .with_transparency(true)
.with_decorations(false)
.build_vk_surface(&events_loop, instance.clone())?;
let window = surface.window();
let queue_family = physical.queue_families().find(|&q| {
q.supports_graphics() && surface.is_supported(q).unwrap_or(false)
}).ok_or(SimpleError::new("Found no suitable devices"))?;
let device_ext = DeviceExtensions { khr_swapchain: true,..DeviceExtensions::none() };
let (device, mut queues) = Device::new(physical, physical.supported_features(), &device_ext,
[(queue_family, 0.5)].iter().cloned())?;
let queue = queues.next().ok_or(SimpleError::new("Failed to create queue"))?;
let (phys_dims, log_dims) = if let Some(dimensions) = window.get_inner_size() {
let log: (u32, u32) = dimensions.into();
let phys: (u32, u32) = dimensions.to_physical(window.get_hidpi_factor()).into();
([phys.0, phys.1], [log.0, log.1])
} else {
return Err("Failed to load window dimensions".into());
};
let (swapchain, images) = {
let caps = surface.capabilities(physical)?;
let usage = caps.supported_usage_flags;
let alpha = caps.supported_composite_alpha.iter().next()
.ok_or(SimpleError::new("Found no transparency-supporting devices"))?;
let format = caps.supported_formats[0].0;
Swapchain::new(device.clone(), surface.clone(), caps.min_image_count, format,
phys_dims, 1, usage, &queue, SurfaceTransform::Identity, alpha,
PresentMode::Fifo, true, None)
}?;
let vs = vs::Shader::load(device.clone())?;
let fs = fs::Shader::load(device.clone())?;
let render_pass = Arc::new(vulkano::single_pass_renderpass!(device.clone(),
attachments: {
color: {
load: Clear,
store: Store,
format: swapchain.format(),
samples: 1,
},
depth: {
load: Clear,
store: DontCare,
format: Format::D16Unorm,
samples: 1,
}
},
pass: {
color: [color],
depth_stencil: {depth}
}
)?);
let show_fps = false;
let images = images.to_vec();
Ok(Self {
show_fps,
device,
vs,
fs,
images,
render_pass,
swapchain,
surface,
queue,
events_loop,
phys_dims,
log_dims
})
}
fn show_fps(mut self) -> Self {
self.show_fps = true;
self
}
fn run(mut self, mut vertex_producer: Box<dyn VertexProducer>) -> Result<(), Box<dyn Error>> {
let (mut pipeline, mut framebuffers) = self.window_size_dependent_setup()?;
let mut recreate_swapchain = false;
let window = self.surface.window();
let mut previous_frame_end = Box::new(sync::now(self.device.clone())) as Box<dyn GpuFuture>;
let mut t0 = Instant::now();
let mut updates = 0;
let fps_freq = 100;
loop {
if self.show_fps {
// The below line panics on my Intel Ultra HD 620 setup,
// but only on debug. It seems to be a bug in Vulkano, specifically
// a race condition caused by the driver behaving differently to how
// they thought it would.
previous_frame_end.cleanup_finished();
updates += 1;
if updates % fps_freq == 0 {
let t = Instant::now();
let ms = t.duration_since(t0).as_millis() as f32 / fps_freq as f32;
let fps = 1000.0 / ms;
println!("{} fps", fps);
t0 = Instant::now();
}
}
// Whenever the window resizes we need to recreate everything dependent on the window size.
// In this example that includes the swapchain, the framebuffers and the dynamic state viewport.
if recreate_swapchain {
// Get the new dimensions of the window.
let (phys_dims, log_dims) = if let Some(dimensions) = window.get_inner_size() {
let log: (u32, u32) = dimensions.into();
let phys: (u32, u32) = dimensions.to_physical(window.get_hidpi_factor()).into();
([phys.0, phys.1], [log.0, log.1])
} else {
return Err("Failed to load window dimensions".into());
};
self.phys_dims = phys_dims;
self.log_dims = log_dims;
let (new_swapchain, new_images) = match self.swapchain.recreate_with_dimension(phys_dims) {
Ok(r) => r,
Err(SwapchainCreationError::UnsupportedDimensions) => continue,
Err(err) => panic!("{:?}", err)
};
self.swapchain = new_swapchain;
self.images = new_images.to_vec();
let (new_pipeline, new_framebuffers) = self.window_size_dependent_setup()?;
pipeline = new_pipeline;
framebuffers = new_framebuffers;
recreate_swapchain = false;
}
let (image_num, acquire_future) = match swapchain::acquire_next_image(self.swapchain.clone(), None) {
Ok(r) => r,
Err(AcquireError::OutOfDate) => {
recreate_swapchain = true;
continue;
},
Err(err) => panic!("{:?}", err)
};
let clear_values = vec![[1., 0., 1., 1.].into(), 1f32.into()];
let (vertices, colours, indices) = vertex_producer.get_data(RuntimeParams {
window_width: self.log_dims[0] as u16,
window_height: self.log_dims[1] as u16
});
let vertices: Vec<VkVertex> = vertices.into_iter().map(|vert| self.convert_vertex(vert)).collect();
let colours: Vec<VkColour> = colours.into_iter().map(|col| VkColour::from(col)).collect();
let vertex_buffer = CpuAccessibleBuffer::from_iter(self.device.clone(), BufferUsage::all(), vertices.iter().cloned())?;
let colour_buffer = CpuAccessibleBuffer::from_iter(self.device.clone(), BufferUsage::all(), colours.iter().cloned())?;
let index_buffer = CpuAccessibleBuffer::from_iter(self.device.clone(), BufferUsage::all(), indices.iter().cloned())?;
let command_buffer = AutoCommandBufferBuilder::primary_one_time_submit(self.device.clone(), self.queue.family())?
.begin_render_pass(framebuffers[image_num].clone(), false, clear_values)?
.draw_indexed(
pipeline.clone(),
&DynamicState::none(),
vec!(vertex_buffer.clone(), colour_buffer.clone()),
index_buffer.clone(), (), ())?
.end_render_pass()?
.build()?;
let future = previous_frame_end.join(acquire_future)
.then_execute(self.queue.clone(), command_buffer)?
.then_swapchain_present(self.queue.clone(), self.swapchain.clone(), image_num)
.then_signal_fence_and_flush();
match future {
Ok(future) => {
previous_frame_end = Box::new(future) as Box<_>;
}
Err(FlushError::OutOfDate) => {
recreate_swapchain = true;
previous_frame_end = Box::new(sync::now(self.device.clone())) as Box<_>;
}
Err(e) => {
println!("{:?}", e);
previous_frame_end = Box::new(sync::now(self.device.clone())) as Box<_>;
}
}
let mut done = false;
self.events_loop.poll_events(|ev| {
match ev {
Event::WindowEvent { event: WindowEvent::CloseRequested,.. } => done = true,
Event::WindowEvent { event: WindowEvent::Resized(_),.. } => recreate_swapchain = true,
_ => ()
}
});
if done {
return Ok(());
}
}
}
}
#[derive(Default, Debug, Clone)]
struct VkVertex { position: [f32; 3] }
vulkano::impl_vertex!(VkVertex, position);
#[derive(Default, Debug, Clone)]
struct VkColour { colour: [f32; 4] }
vulkano::impl_vertex!(VkColour, colour);
impl From<Colour> for VkColour {
fn from(col: Colour) -> Self {
let mut colour = match col {
Colour::Rgb(r, g, b) => [r, g, b, 1.0],
Colour::Rgba(r, g, b, a) => [r, g, b, a]
};
// Convert from sRGB; the Vulkano API doesn't allow us to change the colour space
for i in colour.iter_mut() {
*i = i.powf(2.2);
}
Self { colour }
}
}
mod vs {
vulkano_shaders::shader! {
ty: "vertex",
src: "
#version 450
layout(location = 0) in vec3 position;
layout(location = 1) in vec4 colour;
layout(location = 0) out vec4 fragColour;
void main() {
gl_Position = vec4(position, 1.0);
fragColour = colour;
}
"
}
}
mod fs {
vulkano_shaders::shader! {
ty: "fragment",
src: "
#version 450
layout(location = 0) in vec4 fragColour;
layout(location = 0) out vec4 f_color;
void main() {
f_color = vec4(fragColour);
}
"
}
} | new | identifier_name |
vk.rs | use vulkano::device::Queue;
use vulkano::swapchain::Surface;
use vulkano::buffer::{BufferUsage, CpuAccessibleBuffer };
use vulkano::command_buffer::{AutoCommandBufferBuilder, DynamicState};
use vulkano::device::{Device, DeviceExtensions};
use vulkano::format::Format;
use vulkano::framebuffer::{Framebuffer, FramebufferAbstract, Subpass, RenderPassAbstract};
use vulkano::image::SwapchainImage;
use vulkano::image::attachment::AttachmentImage;
use vulkano::instance::Instance;
use vulkano::instance::PhysicalDevice;
use vulkano::pipeline::vertex::TwoBuffersDefinition;
use vulkano::pipeline::viewport::Viewport;
use vulkano::pipeline::{GraphicsPipeline, GraphicsPipelineAbstract};
use vulkano::swapchain::{AcquireError, PresentMode, SurfaceTransform, Swapchain, SwapchainCreationError};
use vulkano::swapchain;
use vulkano::sync::{GpuFuture, FlushError};
use vulkano::sync;
use vulkano_win::VkSurfaceBuild;
use winit::{EventsLoop, Window, WindowBuilder, Event, WindowEvent};
use simple_error::SimpleError;
use std::sync::Arc;
use std::iter;
use std::time::Instant;
use std::error::Error;
use crate::graphics::*;
pub struct VulkanBackend {
show_fps: bool,
device: Arc<Device>,
vs: vs::Shader,
fs: fs::Shader,
swapchain: Arc<Swapchain<winit::Window>>,
images: Vec<Arc<SwapchainImage<Window>>>,
render_pass: Arc<dyn RenderPassAbstract + Send + Sync>,
surface: Arc<Surface<winit::Window>>,
queue: Arc<Queue>,
events_loop: EventsLoop,
phys_dims: [u32; 2],
log_dims: [u32; 2],
}
impl VulkanBackend {
fn window_size_dependent_setup(&self) -> Result<(Arc<(dyn GraphicsPipelineAbstract + Send + Sync)>,
Vec<Arc<dyn FramebufferAbstract + Send + Sync>>),
Box<dyn Error>>{
let dimensions = self.images[0].dimensions();
let depth_buffer = AttachmentImage::transient(
self.device.clone(),
dimensions,
Format::D16Unorm)?;
let framebuffers = self.images.iter().map(|image| {
let buf = Framebuffer::start(self.render_pass.clone())
.add(image.clone())?
.add(depth_buffer.clone())?
.build()?;
Ok(Arc::new(
buf
) as Arc<dyn FramebufferAbstract + Send + Sync>)
}).collect::<Result<Vec<_>, Box<dyn Error>>>()?;
let pipeline = Arc::new(GraphicsPipeline::start()
.vertex_input(TwoBuffersDefinition::<VkVertex, VkColour>::new())
.vertex_shader(self.vs.main_entry_point(), ())
.triangle_list()
.viewports_dynamic_scissors_irrelevant(1)
.viewports(iter::once(Viewport {
origin: [0.0, 0.0],
dimensions: [dimensions[0] as f32, dimensions[1] as f32],
depth_range: 0.0..1.0,
}))
.fragment_shader(self.fs.main_entry_point(), ())
.blend_alpha_blending()
.depth_stencil_simple_depth()
.render_pass(Subpass::from(self.render_pass.clone(), 0)
.ok_or(SimpleError::new("Failed to load subpass"))?)
.build(self.device.clone())?);
Ok((pipeline, framebuffers))
}
fn convert_vertex(&self, vert: Vertex) -> VkVertex {
let mut position = match vert {
Vertex::Xy(x, y) => [x, y, 0.0],
Vertex::Xyz(x, y, z) => [x, y, z]
};
position[0] /= self.log_dims[0] as f32;
position[1] /= self.log_dims[1] as f32;
position[0] -= 0.5;
position[1] -= 0.5;
position[0] *= 2.;
position[1] *= 2.;
VkVertex { position }
}
}
impl GfxProvider for VulkanBackend {
fn new() -> Result<Self, Box<dyn Error>> {
println!("Beginning Vulkan setup...");
let instance = {
let extensions = vulkano_win::required_extensions();
Instance::new(None, &extensions, None)
}?;
// We then choose which physical device to use.
//
// In a real application, there are three things to take into consideration:
//
// - Some devices may not support some of the optional features that may be required by your
// application. You should filter out the devices that don't support your app.
//
// - Not all devices can draw to a certain surface. Once you create your window, you have to
// choose a device that is capable of drawing to it.
//
// - You probably want to leave the choice between the remaining devices to the user.
//
let mut physical_devices = PhysicalDevice::enumerate(&instance);
for device in physical_devices.clone() {
println!("Found device: {} (type: {:?})", device.name(), device.ty());
}
let physical = physical_devices.next().ok_or(SimpleError::new("Found no devices"))?;
// Some debug info.
println!("Using {}.", physical.name());
let events_loop = EventsLoop::new();
let surface = WindowBuilder::new()
// .with_transparency(true)
.with_decorations(false)
.build_vk_surface(&events_loop, instance.clone())?;
let window = surface.window();
let queue_family = physical.queue_families().find(|&q| {
q.supports_graphics() && surface.is_supported(q).unwrap_or(false)
}).ok_or(SimpleError::new("Found no suitable devices"))?;
let device_ext = DeviceExtensions { khr_swapchain: true,..DeviceExtensions::none() };
let (device, mut queues) = Device::new(physical, physical.supported_features(), &device_ext,
[(queue_family, 0.5)].iter().cloned())?;
let queue = queues.next().ok_or(SimpleError::new("Failed to create queue"))?;
let (phys_dims, log_dims) = if let Some(dimensions) = window.get_inner_size() {
let log: (u32, u32) = dimensions.into();
let phys: (u32, u32) = dimensions.to_physical(window.get_hidpi_factor()).into();
([phys.0, phys.1], [log.0, log.1])
} else {
return Err("Failed to load window dimensions".into());
};
let (swapchain, images) = {
let caps = surface.capabilities(physical)?;
let usage = caps.supported_usage_flags;
let alpha = caps.supported_composite_alpha.iter().next()
.ok_or(SimpleError::new("Found no transparency-supporting devices"))?;
let format = caps.supported_formats[0].0;
Swapchain::new(device.clone(), surface.clone(), caps.min_image_count, format,
phys_dims, 1, usage, &queue, SurfaceTransform::Identity, alpha,
PresentMode::Fifo, true, None)
}?;
let vs = vs::Shader::load(device.clone())?;
let fs = fs::Shader::load(device.clone())?;
let render_pass = Arc::new(vulkano::single_pass_renderpass!(device.clone(),
attachments: {
color: {
load: Clear,
store: Store,
format: swapchain.format(),
samples: 1,
},
depth: {
load: Clear,
store: DontCare,
format: Format::D16Unorm,
samples: 1,
}
},
pass: {
color: [color],
depth_stencil: {depth}
}
)?);
let show_fps = false;
let images = images.to_vec();
Ok(Self {
show_fps,
device,
vs,
fs,
images,
render_pass,
swapchain,
surface,
queue,
events_loop,
phys_dims,
log_dims
})
}
fn show_fps(mut self) -> Self {
self.show_fps = true;
self
}
fn run(mut self, mut vertex_producer: Box<dyn VertexProducer>) -> Result<(), Box<dyn Error>> {
let (mut pipeline, mut framebuffers) = self.window_size_dependent_setup()?;
let mut recreate_swapchain = false;
let window = self.surface.window();
let mut previous_frame_end = Box::new(sync::now(self.device.clone())) as Box<dyn GpuFuture>;
let mut t0 = Instant::now();
let mut updates = 0;
let fps_freq = 100;
loop {
if self.show_fps {
// The below line panics on my Intel Ultra HD 620 setup,
// but only on debug. It seems to be a bug in Vulkano, specifically
// a race condition caused by the driver behaving differently to how
// they thought it would.
previous_frame_end.cleanup_finished();
updates += 1;
if updates % fps_freq == 0 {
let t = Instant::now();
let ms = t.duration_since(t0).as_millis() as f32 / fps_freq as f32;
let fps = 1000.0 / ms;
println!("{} fps", fps);
t0 = Instant::now();
}
}
// Whenever the window resizes we need to recreate everything dependent on the window size.
// In this example that includes the swapchain, the framebuffers and the dynamic state viewport.
if recreate_swapchain {
// Get the new dimensions of the window.
let (phys_dims, log_dims) = if let Some(dimensions) = window.get_inner_size() {
let log: (u32, u32) = dimensions.into();
let phys: (u32, u32) = dimensions.to_physical(window.get_hidpi_factor()).into();
([phys.0, phys.1], [log.0, log.1])
} else {
return Err("Failed to load window dimensions".into());
};
self.phys_dims = phys_dims;
self.log_dims = log_dims;
let (new_swapchain, new_images) = match self.swapchain.recreate_with_dimension(phys_dims) {
Ok(r) => r,
Err(SwapchainCreationError::UnsupportedDimensions) => continue,
Err(err) => panic!("{:?}", err)
};
self.swapchain = new_swapchain;
self.images = new_images.to_vec();
let (new_pipeline, new_framebuffers) = self.window_size_dependent_setup()?;
pipeline = new_pipeline;
framebuffers = new_framebuffers;
recreate_swapchain = false;
}
let (image_num, acquire_future) = match swapchain::acquire_next_image(self.swapchain.clone(), None) {
Ok(r) => r,
Err(AcquireError::OutOfDate) => {
recreate_swapchain = true;
continue;
},
Err(err) => panic!("{:?}", err)
};
let clear_values = vec![[1., 0., 1., 1.].into(), 1f32.into()];
let (vertices, colours, indices) = vertex_producer.get_data(RuntimeParams {
window_width: self.log_dims[0] as u16,
window_height: self.log_dims[1] as u16
});
let vertices: Vec<VkVertex> = vertices.into_iter().map(|vert| self.convert_vertex(vert)).collect();
let colours: Vec<VkColour> = colours.into_iter().map(|col| VkColour::from(col)).collect();
let vertex_buffer = CpuAccessibleBuffer::from_iter(self.device.clone(), BufferUsage::all(), vertices.iter().cloned())?;
let colour_buffer = CpuAccessibleBuffer::from_iter(self.device.clone(), BufferUsage::all(), colours.iter().cloned())?;
let index_buffer = CpuAccessibleBuffer::from_iter(self.device.clone(), BufferUsage::all(), indices.iter().cloned())?;
let command_buffer = AutoCommandBufferBuilder::primary_one_time_submit(self.device.clone(), self.queue.family())?
.begin_render_pass(framebuffers[image_num].clone(), false, clear_values)?
.draw_indexed(
pipeline.clone(),
&DynamicState::none(),
vec!(vertex_buffer.clone(), colour_buffer.clone()),
index_buffer.clone(), (), ())?
.end_render_pass()?
.build()?;
let future = previous_frame_end.join(acquire_future)
.then_execute(self.queue.clone(), command_buffer)?
.then_swapchain_present(self.queue.clone(), self.swapchain.clone(), image_num)
.then_signal_fence_and_flush();
match future {
Ok(future) => {
previous_frame_end = Box::new(future) as Box<_>;
}
Err(FlushError::OutOfDate) => {
recreate_swapchain = true;
previous_frame_end = Box::new(sync::now(self.device.clone())) as Box<_>;
}
Err(e) => {
println!("{:?}", e);
previous_frame_end = Box::new(sync::now(self.device.clone())) as Box<_>;
}
}
let mut done = false;
self.events_loop.poll_events(|ev| {
match ev {
Event::WindowEvent { event: WindowEvent::CloseRequested,.. } => done = true,
Event::WindowEvent { event: WindowEvent::Resized(_),.. } => recreate_swapchain = true,
_ => ()
}
});
if done {
return Ok(());
}
}
}
}
#[derive(Default, Debug, Clone)]
struct VkVertex { position: [f32; 3] }
vulkano::impl_vertex!(VkVertex, position);
#[derive(Default, Debug, Clone)]
struct VkColour { colour: [f32; 4] }
vulkano::impl_vertex!(VkColour, colour);
impl From<Colour> for VkColour {
fn from(col: Colour) -> Self {
let mut colour = match col {
Colour::Rgb(r, g, b) => [r, g, b, 1.0],
Colour::Rgba(r, g, b, a) => [r, g, b, a]
};
// Convert from sRGB; the Vulkano API doesn't allow us to change the colour space
for i in colour.iter_mut() {
*i = i.powf(2.2);
}
Self { colour }
}
}
mod vs {
vulkano_shaders::shader! {
ty: "vertex",
src: "
#version 450
layout(location = 0) in vec3 position;
layout(location = 1) in vec4 colour;
layout(location = 0) out vec4 fragColour; |
void main() {
gl_Position = vec4(position, 1.0);
fragColour = colour;
}
"
}
}
mod fs {
vulkano_shaders::shader! {
ty: "fragment",
src: "
#version 450
layout(location = 0) in vec4 fragColour;
layout(location = 0) out vec4 f_color;
void main() {
f_color = vec4(fragColour);
}
"
}
} | random_line_split |
|
windows.rs | it might run out of memory.
// Now the way we would normally handle this is to have a counter and limit the
// number of outstandig buffers, queueing requests and only handle them when the
// counter is below the high water mark. The same goes for using unlimited timeouts.
// http://www.serverframework.com/asynchronousevents/2011/06/tcp-flow-control-and-asynchronous-writes.html
impl TcpStream {
pub fn connect(adr: impl net::ToSocketAddrs) -> io::Result<Self> {
// This is a shortcut since this will block when establishing the connection.
// There are several ways of avoiding this.
// a) Obtrain the socket using system calls, set it to non_blocking before we connect
// b) use the crate [net2](https://docs.rs/net2/0.2.33/net2/index.html) which
// defines a trait with default implementation for TcpStream which allow us to set
// it to non-blocking before we connect
// Rust creates a WSASocket set to overlapped by default which is just what we need
// https://github.com/rust-lang/rust/blob/f86521e0a33a2b54c4c23dbfc5250013f7a33b11/src/libstd/sys/windows/net.rs#L99
let stream = net::TcpStream::connect(adr)?;
stream.set_nonblocking(true)?;
let mut buffer = vec![0_u8; 1024];
let wsabuf = vec![ffi::WSABUF::new(buffer.len() as u32, buffer.as_mut_ptr())];
Ok(TcpStream {
inner: stream,
buffer,
wsabuf,
event: None,
token: None,
pos: 0,
operations: LinkedList::new(),
})
}
}
impl Read for TcpStream {
fn read(&mut self, buff: &mut [u8]) -> io::Result<usize> {
let mut bytes_read = 0;
for (a, b) in self.buffer.iter().skip(self.pos).zip(buff) {
*b = *a;
bytes_read += 1;
}
self.pos += bytes_read;
Ok(bytes_read)
}
}
impl Write for TcpStream {
fn write(&mut self, buff: &[u8]) -> io::Result<usize> {
self.inner.write(buff)
}
fn flush(&mut self) -> io::Result<()> {
self.inner.flush()
}
}
impl AsRawSocket for TcpStream {
fn as_raw_socket(&self) -> RawSocket {
self.inner.as_raw_socket()
}
}
pub struct Registrator {
completion_port: isize,
is_poll_dead: Arc<AtomicBool>,
}
impl Registrator {
pub fn register(
&self,
soc: &mut TcpStream,
token: usize,
interests: Interests,
) -> io::Result<()> {
if self.is_poll_dead.load(Ordering::SeqCst) {
return Err(io::Error::new(
io::ErrorKind::Interrupted,
"Poll instance is dead.",
));
}
ffi::create_io_completion_port(soc.as_raw_socket(), self.completion_port, 0)?;
let op = ffi::Operation::new(token);
soc.operations.push_back(op);
if interests.is_readable() {
ffi::wsa_recv(
soc.as_raw_socket(),
&mut soc.wsabuf,
soc.operations.back_mut().unwrap(),
)?;
} else {
unimplemented!();
}
Ok(())
}
/// NOTE: An alternative solution is to use the `CompletionKey` to signal that
/// this is a close event. We don't use it for anything else so it is a
/// good candidate to use for timers and special events like this
pub fn close_loop(&self) -> io::Result<()> {
if self
.is_poll_dead
.compare_and_swap(false, true, Ordering::SeqCst)
|
let mut overlapped = ffi::WSAOVERLAPPED::zeroed();
ffi::post_queued_completion_status(self.completion_port, 0, 0, &mut overlapped)?;
Ok(())
}
}
// possible Arc<InnerSelector> needed
#[derive(Debug)]
pub struct Selector {
completion_port: isize,
}
impl Selector {
pub fn new() -> io::Result<Self> {
// set up the queue
let completion_port = ffi::create_completion_port()?;
Ok(Selector { completion_port })
}
pub fn registrator(&self, is_poll_dead: Arc<AtomicBool>) -> Registrator {
Registrator {
completion_port: self.completion_port,
is_poll_dead,
}
}
/// Blocks until an Event has occured. Never times out. We could take a parameter
/// for a timeout and pass it on but we'll not do that in our example.
pub fn select(
&mut self,
events: &mut Vec<ffi::OVERLAPPED_ENTRY>,
timeout: Option<i32>,
) -> io::Result<()> {
// calling GetQueueCompletionStatus will either return a handle to a "port" ready to read or
// block if the queue is empty.
// Windows want timeout as u32 so we cast it as such
let timeout = timeout.map(|t| t as u32);
// first let's clear events for any previous events and wait until we get som more
events.clear();
let ul_count = events.capacity() as u32;
let removed_res = ffi::get_queued_completion_status_ex(
self.completion_port as isize,
events,
ul_count,
timeout,
false,
);
// We need to handle the case that the "error" was a WAIT_TIMEOUT error.
// the code for this error is 258 on Windows. We don't treat this as an error
// but set the events returned to 0.
// (i tried to do this in the `ffi` function but there was an error)
let removed = match removed_res {
Ok(n) => n,
Err(ref e) if e.raw_os_error() == Some(258) => 0,
Err(e) => return Err(e),
};
unsafe {
events.set_len(removed as usize);
}
Ok(())
}
}
impl Drop for Selector {
fn drop(&mut self) {
match ffi::close_handle(self.completion_port) {
Ok(_) => (),
Err(e) => {
if!std::thread::panicking() {
panic!(e);
}
}
}
}
}
mod ffi {
use super::*;
use std::io;
use std::os::windows::io::RawSocket;
use std::ptr;
#[repr(C)]
#[derive(Clone, Debug)]
pub struct WSABUF {
len: u32,
buf: *mut u8,
}
impl WSABUF {
pub fn new(len: u32, buf: *mut u8) -> Self {
WSABUF { len, buf }
}
}
#[repr(C)]
#[derive(Debug, Clone)]
pub struct OVERLAPPED_ENTRY {
lp_completion_key: *mut usize,
lp_overlapped: *mut WSAOVERLAPPED,
internal: usize,
bytes_transferred: u32,
}
impl OVERLAPPED_ENTRY {
pub fn id(&self) -> Token {
// TODO: this might be solvable wihtout sacrifising so much of Rust safety guarantees
let operation: &Operation = unsafe { &*(self.lp_overlapped as *const Operation) };
operation.token
}
pub(crate) fn zeroed() -> Self {
OVERLAPPED_ENTRY {
lp_completion_key: ptr::null_mut(),
lp_overlapped: ptr::null_mut(),
internal: 0,
bytes_transferred: 0,
}
}
}
// Reference: https://docs.microsoft.com/en-us/windows/win32/api/winsock2/ns-winsock2-wsaoverlapped
#[repr(C)]
#[derive(Debug)]
pub struct WSAOVERLAPPED {
/// Reserved for internal use
internal: ULONG_PTR,
/// Reserved
internal_high: ULONG_PTR,
/// Reserved for service providers
offset: DWORD,
/// Reserved for service providers
offset_high: DWORD,
/// If an overlapped I/O operation is issued without an I/O completion routine
/// (the operation's lpCompletionRoutine parameter is set to null), then this parameter
/// should either contain a valid handle to a WSAEVENT object or be null. If the
/// lpCompletionRoutine parameter of the call is non-null then applications are free
/// to use this parameter as necessary.
h_event: HANDLE,
}
impl WSAOVERLAPPED {
pub fn zeroed() -> Self {
WSAOVERLAPPED {
internal: ptr::null_mut(),
internal_high: ptr::null_mut(),
offset: 0,
offset_high: 0,
h_event: 0,
}
}
}
/// Operation is a way for us to attach additional context to the `WSAOVERLAPPED`
/// event. Inpired by [BOOST ASIO](https://www.boost.org/doc/libs/1_42_0/boost/asio/detail/win_iocp_io_service.hpp)
#[derive(Debug)]
#[repr(C)]
pub struct Operation {
wsaoverlapped: WSAOVERLAPPED,
token: usize,
}
impl Operation {
pub(crate) fn new(token: usize) -> Self {
Operation {
wsaoverlapped: WSAOVERLAPPED::zeroed(),
token,
}
}
}
// You can find most of these here: https://docs.microsoft.com/en-us/windows/win32/winprog/windows-data-types
/// The HANDLE type is actually a `*mut c_void` but windows preserves backwards compatibility by allowing
/// a INVALID_HANDLE_VALUE which is `-1`. We can't express that in Rust so it's much easier for us to treat
/// this as an isize instead;
pub type HANDLE = isize;
pub type BOOL = bool;
pub type WORD = u16;
pub type DWORD = u32;
pub type ULONG = u32;
pub type PULONG = *mut ULONG;
pub type ULONG_PTR = *mut usize;
pub type PULONG_PTR = *mut ULONG_PTR;
pub type LPDWORD = *mut DWORD;
pub type LPWSABUF = *mut WSABUF;
pub type LPWSAOVERLAPPED = *mut WSAOVERLAPPED;
pub type LPWSAOVERLAPPED_COMPLETION_ROUTINE = *const extern "C" fn();
// https://referencesource.microsoft.com/#System.Runtime.Remoting/channels/ipc/win32namedpipes.cs,edc09ced20442fea,references
// read this! https://devblogs.microsoft.com/oldnewthing/20040302-00/?p=40443
/// Defined in `win32.h` which you can find on your windows system
pub const INVALID_HANDLE_VALUE: HANDLE = -1;
// https://docs.microsoft.com/en-us/windows/win32/winsock/windows-sockets-error-codes-2
pub const WSA_IO_PENDING: i32 = 997;
// This can also be written as `4294967295` if you look at sources on the internet.
// Interpreted as an i32 the value is -1
// see for yourself: https://play.rust-lang.org/?version=stable&mode=debug&edition=2018&gist=4b93de7d7eb43fa9cd7f5b60933d8935
pub const INFINITE: u32 = 0xFFFFFFFF;
#[link(name = "Kernel32")]
extern "stdcall" {
// https://docs.microsoft.com/en-us/windows/win32/fileio/createiocompletionport
fn CreateIoCompletionPort(
filehandle: HANDLE,
existing_completionport: HANDLE,
completion_key: ULONG_PTR,
number_of_concurrent_threads: DWORD,
) -> HANDLE;
// https://docs.microsoft.com/en-us/windows/win32/api/winsock2/nf-winsock2-wsarecv
fn WSARecv(
s: RawSocket,
lpBuffers: LPWSABUF,
dwBufferCount: DWORD,
lpNumberOfBytesRecvd: LPDWORD,
lpFlags: LPDWORD,
lpOverlapped: LPWSAOVERLAPPED,
lpCompletionRoutine: LPWSAOVERLAPPED_COMPLETION_ROUTINE,
) -> i32;
// https://docs.microsoft.com/en-us/windows/win32/fileio/postqueuedcompletionstatus
fn PostQueuedCompletionStatus(
CompletionPort: HANDLE,
dwNumberOfBytesTransferred: DWORD,
dwCompletionKey: ULONG_PTR,
lpOverlapped: LPWSAOVERLAPPED,
) -> i32;
/// https://docs.microsoft.com/nb-no/windows/win32/api/ioapiset/nf-ioapiset-getqueuedcompletionstatus
/// Errors: https://docs.microsoft.com/nb-no/windows/win32/debug/system-error-codes--0-499-
/// From this we can see that error `WAIT_TIMEOUT` has the code 258 which we'll
/// need later on
fn GetQueuedCompletionStatusEx(
CompletionPort: HANDLE,
lpCompletionPortEntries: *mut OVERLAPPED_ENTRY,
ulCount: ULONG,
ulNumEntriesRemoved: PULONG,
dwMilliseconds: DWORD,
fAlertable: BOOL,
) -> i32;
fn GetQueuedCompletionStatus(
CompletionPort: HANDLE,
lpNumberOfBytesTransferred: LPDWORD,
lpCompletionKey: PULONG_PTR,
lpOverlapped: LPWSAOVERLAPPED,
dwMilliseconds: DWORD,
) -> i32;
// https://docs.microsoft.com/nb-no/windows/win32/api/handleapi/nf-handleapi-closehandle
fn CloseHandle(hObject: HANDLE) -> i32;
// https://docs.microsoft.com/nb-no/windows/win32/api/winsock/nf-winsock-wsagetlasterror
fn WSAGetLastError() -> i32;
}
// ===== SAFE WRAPPERS =====
pub fn close_handle(handle: isize) -> io::Result<()> {
let res = unsafe { CloseHandle(handle) };
if res == 0 {
Err(std::io::Error::last_os_error().into())
} else {
Ok(())
}
}
pub fn create_completion_port() -> io::Result<isize> {
unsafe {
// number_of_concurrent_threads = 0 means use the number of physical threads but the argument is
// ignored when existing_completionport is set to null.
let res = CreateIoCompletionPort(INVALID_HANDLE_VALUE, 0, ptr::null_mut(), 0);
if (res as *mut usize).is_null() {
return Err(std::io::Error::last_os_error());
}
Ok(res)
}
}
/// Returns the file handle to the completion port we passed in
pub fn create_io_completion_port(
s: RawSocket,
completion_port: isize,
token: usize,
) -> io::Result<isize> {
let res =
unsafe { CreateIoCompletionPort(s as isize, completion_port, token as *mut usize, 0) };
if (res as *mut usize).is_null() {
return Err(std::io::Error::last_os_error());
}
Ok(res)
}
/// Creates a socket read event.
/// ## Returns
/// The number of bytes recieved
pub fn wsa_recv(
s: RawSocket,
wsabuffers: &mut [WSABUF],
op: &mut Operation,
) -> Result<(), io::Error> {
let mut flags = 0;
let operation_ptr: *mut Operation = op;
let res = unsafe {
WSARecv(
s,
wsabuffers.as_mut_ptr(),
1,
ptr::null_mut(),
&mut flags,
operation_ptr as *mut WSAOVERLAPPED,
ptr::null_mut(),
)
};
if res!= 0 {
let err = unsafe { WSAGetLastError() };
if err == WSA_IO_PENDING {
// Everything is OK, and we can wait this with GetQueuedCompletionStatus
Ok(())
} else {
Err(std::io::Error::last_os_error())
}
} else {
// The socket is already ready so we don't need to queue it
// TODO: Avoid queueing this
Ok(())
}
}
pub fn post_queued_completion_status(
completion_port: isize,
bytes_to_transfer: u32,
completion_key: usize,
overlapped_ptr: &mut WSAOVERLAPPED,
) -> io::Result<()> {
let res = unsafe {
PostQueuedCompletionStatus(
completion_port,
bytes_to_transfer,
completion_key as *mut usize,
overlapped_ptr,
)
};
if res == 0 {
Err(std::io::Error::last_os_error().into())
} else {
Ok(())
}
}
/// ## Parameters:
/// - *completion_port:* the handle to a completion port created by calling CreateIoCompletionPort
/// - *completion_port_entries:* a pointer to an array of OVERLAPPED_ENTRY structures
/// - *ul_count:* The maximum number of entries to remove
/// - *timeout:* The timeout in milliseconds, if set to NONE, timeout is set to INFINITE
/// - *alertable:* If this parameter is FALSE, the function does not return until the time-out period has elapsed or
/// an entry is retrieved. If the parameter is TRUE and there are no available entries, the function performs
/// an alertable wait. The thread returns when the system queues an I/O completion routine or APC to the thread
/// and the thread executes the function.
///
/// ## Returns
/// The number of items actually removed from the queue
pub fn get_queued_completion_status_ex(
completion_port: isize,
completion_port_entries: &mut [OVERLAPPED_ENTRY],
ul_count: u32,
timeout: Option<u32>,
alertable: bool,
) -> io::Result<u32> {
let mut ul_num_entries_removed: u32 = 0;
// can't coerce directly to *mut *mut usize and cant cast `&mut` as `*mut`
// let completion_key_ptr: *mut &mut usize = completion_key_ptr;
// // but we can cast a `*mut...`
// let completion_key_ptr: *mut *mut usize = completion_key_ptr as *mut *mut usize;
let timeout = timeout.unwrap_or(INFINITE);
let res = unsafe {
GetQueuedCompletionStatusEx(
completion_port,
completion_port_entries.as_mut_ptr(),
ul_count,
&mut ul_num_entries_removed,
timeout,
alertable,
)
};
if res == 0 {
Err(io::Error::last_os_error())
} else {
Ok(ul_num_entries_removed)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn selector_new_creates_valid_port() {
let selector = Selector::new().expect("create completion port failed");
assert!(selector.completion_port > 0);
}
#[test]
fn selector_register() {
let selector = Selector::new().expect("create completion port failed");
let poll_is_alive = Arc::new(AtomicBool::new(false));
let registrator = selector.registrator(poll_is_alive.clone());
let mut sock: TcpStream = TcpStream::connect("slowwly.robertomurray.co.uk:80").unwrap();
let request = "GET /delay/1000/url/http://www.google.com HTTP/1.1\r\n\
Host: slowwly.robertomurray.co.uk\r\n\
Connection: close\r\n\
| {
return Err(io::Error::new(
io::ErrorKind::Interrupted,
"Poll instance is dead.",
));
} | conditional_block |
windows.rs | it might run out of memory.
// Now the way we would normally handle this is to have a counter and limit the
// number of outstandig buffers, queueing requests and only handle them when the
// counter is below the high water mark. The same goes for using unlimited timeouts.
// http://www.serverframework.com/asynchronousevents/2011/06/tcp-flow-control-and-asynchronous-writes.html
impl TcpStream {
pub fn connect(adr: impl net::ToSocketAddrs) -> io::Result<Self> {
// This is a shortcut since this will block when establishing the connection.
// There are several ways of avoiding this.
// a) Obtrain the socket using system calls, set it to non_blocking before we connect
// b) use the crate [net2](https://docs.rs/net2/0.2.33/net2/index.html) which
// defines a trait with default implementation for TcpStream which allow us to set
// it to non-blocking before we connect
// Rust creates a WSASocket set to overlapped by default which is just what we need
// https://github.com/rust-lang/rust/blob/f86521e0a33a2b54c4c23dbfc5250013f7a33b11/src/libstd/sys/windows/net.rs#L99
let stream = net::TcpStream::connect(adr)?;
stream.set_nonblocking(true)?;
let mut buffer = vec![0_u8; 1024];
let wsabuf = vec![ffi::WSABUF::new(buffer.len() as u32, buffer.as_mut_ptr())];
Ok(TcpStream {
inner: stream,
buffer,
wsabuf,
event: None,
token: None,
pos: 0,
operations: LinkedList::new(),
})
}
}
impl Read for TcpStream {
fn read(&mut self, buff: &mut [u8]) -> io::Result<usize> {
let mut bytes_read = 0;
for (a, b) in self.buffer.iter().skip(self.pos).zip(buff) {
*b = *a;
bytes_read += 1;
}
self.pos += bytes_read;
Ok(bytes_read)
}
}
impl Write for TcpStream {
fn write(&mut self, buff: &[u8]) -> io::Result<usize> {
self.inner.write(buff)
}
fn flush(&mut self) -> io::Result<()> {
self.inner.flush()
}
}
impl AsRawSocket for TcpStream {
fn as_raw_socket(&self) -> RawSocket {
self.inner.as_raw_socket()
}
}
pub struct Registrator {
completion_port: isize,
is_poll_dead: Arc<AtomicBool>,
}
impl Registrator {
pub fn register(
&self,
soc: &mut TcpStream,
token: usize,
interests: Interests,
) -> io::Result<()> {
if self.is_poll_dead.load(Ordering::SeqCst) {
return Err(io::Error::new(
io::ErrorKind::Interrupted,
"Poll instance is dead.",
));
}
ffi::create_io_completion_port(soc.as_raw_socket(), self.completion_port, 0)?;
let op = ffi::Operation::new(token);
soc.operations.push_back(op);
if interests.is_readable() {
ffi::wsa_recv(
soc.as_raw_socket(),
&mut soc.wsabuf,
soc.operations.back_mut().unwrap(),
)?;
} else {
unimplemented!();
}
Ok(())
}
/// NOTE: An alternative solution is to use the `CompletionKey` to signal that
/// this is a close event. We don't use it for anything else so it is a
/// good candidate to use for timers and special events like this
pub fn close_loop(&self) -> io::Result<()> {
if self
.is_poll_dead
.compare_and_swap(false, true, Ordering::SeqCst)
{
return Err(io::Error::new(
io::ErrorKind::Interrupted,
"Poll instance is dead.",
));
}
let mut overlapped = ffi::WSAOVERLAPPED::zeroed();
ffi::post_queued_completion_status(self.completion_port, 0, 0, &mut overlapped)?;
Ok(())
}
}
// possible Arc<InnerSelector> needed
#[derive(Debug)]
pub struct Selector {
completion_port: isize,
}
impl Selector {
pub fn new() -> io::Result<Self> {
// set up the queue
let completion_port = ffi::create_completion_port()?;
Ok(Selector { completion_port })
}
pub fn registrator(&self, is_poll_dead: Arc<AtomicBool>) -> Registrator {
Registrator {
completion_port: self.completion_port,
is_poll_dead,
}
}
/// Blocks until an Event has occured. Never times out. We could take a parameter
/// for a timeout and pass it on but we'll not do that in our example.
pub fn select(
&mut self,
events: &mut Vec<ffi::OVERLAPPED_ENTRY>,
timeout: Option<i32>,
) -> io::Result<()> {
// calling GetQueueCompletionStatus will either return a handle to a "port" ready to read or
// block if the queue is empty.
// Windows want timeout as u32 so we cast it as such
let timeout = timeout.map(|t| t as u32);
// first let's clear events for any previous events and wait until we get som more
events.clear();
let ul_count = events.capacity() as u32;
let removed_res = ffi::get_queued_completion_status_ex(
self.completion_port as isize,
events,
ul_count,
timeout,
false,
);
// We need to handle the case that the "error" was a WAIT_TIMEOUT error.
// the code for this error is 258 on Windows. We don't treat this as an error
// but set the events returned to 0.
// (i tried to do this in the `ffi` function but there was an error)
let removed = match removed_res {
Ok(n) => n,
Err(ref e) if e.raw_os_error() == Some(258) => 0,
Err(e) => return Err(e),
};
unsafe {
events.set_len(removed as usize);
}
Ok(())
}
}
impl Drop for Selector {
fn drop(&mut self) {
match ffi::close_handle(self.completion_port) {
Ok(_) => (),
Err(e) => {
if!std::thread::panicking() {
panic!(e);
}
}
}
}
}
mod ffi {
use super::*;
use std::io;
use std::os::windows::io::RawSocket;
use std::ptr;
#[repr(C)]
#[derive(Clone, Debug)]
pub struct WSABUF {
len: u32,
buf: *mut u8,
}
impl WSABUF {
pub fn new(len: u32, buf: *mut u8) -> Self {
WSABUF { len, buf }
}
}
#[repr(C)]
#[derive(Debug, Clone)]
pub struct OVERLAPPED_ENTRY {
lp_completion_key: *mut usize,
lp_overlapped: *mut WSAOVERLAPPED,
internal: usize,
bytes_transferred: u32,
}
impl OVERLAPPED_ENTRY {
pub fn id(&self) -> Token {
// TODO: this might be solvable wihtout sacrifising so much of Rust safety guarantees
let operation: &Operation = unsafe { &*(self.lp_overlapped as *const Operation) };
operation.token
}
pub(crate) fn zeroed() -> Self {
OVERLAPPED_ENTRY {
lp_completion_key: ptr::null_mut(),
lp_overlapped: ptr::null_mut(),
internal: 0,
bytes_transferred: 0,
}
}
}
// Reference: https://docs.microsoft.com/en-us/windows/win32/api/winsock2/ns-winsock2-wsaoverlapped
#[repr(C)]
#[derive(Debug)]
pub struct WSAOVERLAPPED {
/// Reserved for internal use
internal: ULONG_PTR,
/// Reserved
internal_high: ULONG_PTR,
/// Reserved for service providers
offset: DWORD,
/// Reserved for service providers
offset_high: DWORD,
/// If an overlapped I/O operation is issued without an I/O completion routine
/// (the operation's lpCompletionRoutine parameter is set to null), then this parameter
/// should either contain a valid handle to a WSAEVENT object or be null. If the
/// lpCompletionRoutine parameter of the call is non-null then applications are free
/// to use this parameter as necessary.
h_event: HANDLE,
}
impl WSAOVERLAPPED {
pub fn zeroed() -> Self {
WSAOVERLAPPED {
internal: ptr::null_mut(),
internal_high: ptr::null_mut(),
offset: 0,
offset_high: 0,
h_event: 0,
}
}
}
/// Operation is a way for us to attach additional context to the `WSAOVERLAPPED`
/// event. Inpired by [BOOST ASIO](https://www.boost.org/doc/libs/1_42_0/boost/asio/detail/win_iocp_io_service.hpp)
#[derive(Debug)]
#[repr(C)]
pub struct Operation {
wsaoverlapped: WSAOVERLAPPED,
token: usize,
}
impl Operation {
pub(crate) fn | (token: usize) -> Self {
Operation {
wsaoverlapped: WSAOVERLAPPED::zeroed(),
token,
}
}
}
// You can find most of these here: https://docs.microsoft.com/en-us/windows/win32/winprog/windows-data-types
/// The HANDLE type is actually a `*mut c_void` but windows preserves backwards compatibility by allowing
/// a INVALID_HANDLE_VALUE which is `-1`. We can't express that in Rust so it's much easier for us to treat
/// this as an isize instead;
pub type HANDLE = isize;
pub type BOOL = bool;
pub type WORD = u16;
pub type DWORD = u32;
pub type ULONG = u32;
pub type PULONG = *mut ULONG;
pub type ULONG_PTR = *mut usize;
pub type PULONG_PTR = *mut ULONG_PTR;
pub type LPDWORD = *mut DWORD;
pub type LPWSABUF = *mut WSABUF;
pub type LPWSAOVERLAPPED = *mut WSAOVERLAPPED;
pub type LPWSAOVERLAPPED_COMPLETION_ROUTINE = *const extern "C" fn();
// https://referencesource.microsoft.com/#System.Runtime.Remoting/channels/ipc/win32namedpipes.cs,edc09ced20442fea,references
// read this! https://devblogs.microsoft.com/oldnewthing/20040302-00/?p=40443
/// Defined in `win32.h` which you can find on your windows system
pub const INVALID_HANDLE_VALUE: HANDLE = -1;
// https://docs.microsoft.com/en-us/windows/win32/winsock/windows-sockets-error-codes-2
pub const WSA_IO_PENDING: i32 = 997;
// This can also be written as `4294967295` if you look at sources on the internet.
// Interpreted as an i32 the value is -1
// see for yourself: https://play.rust-lang.org/?version=stable&mode=debug&edition=2018&gist=4b93de7d7eb43fa9cd7f5b60933d8935
pub const INFINITE: u32 = 0xFFFFFFFF;
#[link(name = "Kernel32")]
extern "stdcall" {
// https://docs.microsoft.com/en-us/windows/win32/fileio/createiocompletionport
fn CreateIoCompletionPort(
filehandle: HANDLE,
existing_completionport: HANDLE,
completion_key: ULONG_PTR,
number_of_concurrent_threads: DWORD,
) -> HANDLE;
// https://docs.microsoft.com/en-us/windows/win32/api/winsock2/nf-winsock2-wsarecv
fn WSARecv(
s: RawSocket,
lpBuffers: LPWSABUF,
dwBufferCount: DWORD,
lpNumberOfBytesRecvd: LPDWORD,
lpFlags: LPDWORD,
lpOverlapped: LPWSAOVERLAPPED,
lpCompletionRoutine: LPWSAOVERLAPPED_COMPLETION_ROUTINE,
) -> i32;
// https://docs.microsoft.com/en-us/windows/win32/fileio/postqueuedcompletionstatus
fn PostQueuedCompletionStatus(
CompletionPort: HANDLE,
dwNumberOfBytesTransferred: DWORD,
dwCompletionKey: ULONG_PTR,
lpOverlapped: LPWSAOVERLAPPED,
) -> i32;
/// https://docs.microsoft.com/nb-no/windows/win32/api/ioapiset/nf-ioapiset-getqueuedcompletionstatus
/// Errors: https://docs.microsoft.com/nb-no/windows/win32/debug/system-error-codes--0-499-
/// From this we can see that error `WAIT_TIMEOUT` has the code 258 which we'll
/// need later on
fn GetQueuedCompletionStatusEx(
CompletionPort: HANDLE,
lpCompletionPortEntries: *mut OVERLAPPED_ENTRY,
ulCount: ULONG,
ulNumEntriesRemoved: PULONG,
dwMilliseconds: DWORD,
fAlertable: BOOL,
) -> i32;
fn GetQueuedCompletionStatus(
CompletionPort: HANDLE,
lpNumberOfBytesTransferred: LPDWORD,
lpCompletionKey: PULONG_PTR,
lpOverlapped: LPWSAOVERLAPPED,
dwMilliseconds: DWORD,
) -> i32;
// https://docs.microsoft.com/nb-no/windows/win32/api/handleapi/nf-handleapi-closehandle
fn CloseHandle(hObject: HANDLE) -> i32;
// https://docs.microsoft.com/nb-no/windows/win32/api/winsock/nf-winsock-wsagetlasterror
fn WSAGetLastError() -> i32;
}
// ===== SAFE WRAPPERS =====
pub fn close_handle(handle: isize) -> io::Result<()> {
let res = unsafe { CloseHandle(handle) };
if res == 0 {
Err(std::io::Error::last_os_error().into())
} else {
Ok(())
}
}
pub fn create_completion_port() -> io::Result<isize> {
unsafe {
// number_of_concurrent_threads = 0 means use the number of physical threads but the argument is
// ignored when existing_completionport is set to null.
let res = CreateIoCompletionPort(INVALID_HANDLE_VALUE, 0, ptr::null_mut(), 0);
if (res as *mut usize).is_null() {
return Err(std::io::Error::last_os_error());
}
Ok(res)
}
}
/// Returns the file handle to the completion port we passed in
pub fn create_io_completion_port(
s: RawSocket,
completion_port: isize,
token: usize,
) -> io::Result<isize> {
let res =
unsafe { CreateIoCompletionPort(s as isize, completion_port, token as *mut usize, 0) };
if (res as *mut usize).is_null() {
return Err(std::io::Error::last_os_error());
}
Ok(res)
}
/// Creates a socket read event.
/// ## Returns
/// The number of bytes recieved
pub fn wsa_recv(
s: RawSocket,
wsabuffers: &mut [WSABUF],
op: &mut Operation,
) -> Result<(), io::Error> {
let mut flags = 0;
let operation_ptr: *mut Operation = op;
let res = unsafe {
WSARecv(
s,
wsabuffers.as_mut_ptr(),
1,
ptr::null_mut(),
&mut flags,
operation_ptr as *mut WSAOVERLAPPED,
ptr::null_mut(),
)
};
if res!= 0 {
let err = unsafe { WSAGetLastError() };
if err == WSA_IO_PENDING {
// Everything is OK, and we can wait this with GetQueuedCompletionStatus
Ok(())
} else {
Err(std::io::Error::last_os_error())
}
} else {
// The socket is already ready so we don't need to queue it
// TODO: Avoid queueing this
Ok(())
}
}
pub fn post_queued_completion_status(
completion_port: isize,
bytes_to_transfer: u32,
completion_key: usize,
overlapped_ptr: &mut WSAOVERLAPPED,
) -> io::Result<()> {
let res = unsafe {
PostQueuedCompletionStatus(
completion_port,
bytes_to_transfer,
completion_key as *mut usize,
overlapped_ptr,
)
};
if res == 0 {
Err(std::io::Error::last_os_error().into())
} else {
Ok(())
}
}
/// ## Parameters:
/// - *completion_port:* the handle to a completion port created by calling CreateIoCompletionPort
/// - *completion_port_entries:* a pointer to an array of OVERLAPPED_ENTRY structures
/// - *ul_count:* The maximum number of entries to remove
/// - *timeout:* The timeout in milliseconds, if set to NONE, timeout is set to INFINITE
/// - *alertable:* If this parameter is FALSE, the function does not return until the time-out period has elapsed or
/// an entry is retrieved. If the parameter is TRUE and there are no available entries, the function performs
/// an alertable wait. The thread returns when the system queues an I/O completion routine or APC to the thread
/// and the thread executes the function.
///
/// ## Returns
/// The number of items actually removed from the queue
pub fn get_queued_completion_status_ex(
completion_port: isize,
completion_port_entries: &mut [OVERLAPPED_ENTRY],
ul_count: u32,
timeout: Option<u32>,
alertable: bool,
) -> io::Result<u32> {
let mut ul_num_entries_removed: u32 = 0;
// can't coerce directly to *mut *mut usize and cant cast `&mut` as `*mut`
// let completion_key_ptr: *mut &mut usize = completion_key_ptr;
// // but we can cast a `*mut...`
// let completion_key_ptr: *mut *mut usize = completion_key_ptr as *mut *mut usize;
let timeout = timeout.unwrap_or(INFINITE);
let res = unsafe {
GetQueuedCompletionStatusEx(
completion_port,
completion_port_entries.as_mut_ptr(),
ul_count,
&mut ul_num_entries_removed,
timeout,
alertable,
)
};
if res == 0 {
Err(io::Error::last_os_error())
} else {
Ok(ul_num_entries_removed)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn selector_new_creates_valid_port() {
let selector = Selector::new().expect("create completion port failed");
assert!(selector.completion_port > 0);
}
#[test]
fn selector_register() {
let selector = Selector::new().expect("create completion port failed");
let poll_is_alive = Arc::new(AtomicBool::new(false));
let registrator = selector.registrator(poll_is_alive.clone());
let mut sock: TcpStream = TcpStream::connect("slowwly.robertomurray.co.uk:80").unwrap();
let request = "GET /delay/1000/url/http://www.google.com HTTP/1.1\r\n\
Host: slowwly.robertomurray.co.uk\r\n\
Connection: close\r\n\
| new | identifier_name |
windows.rs | it might run out of memory.
// Now the way we would normally handle this is to have a counter and limit the
// number of outstandig buffers, queueing requests and only handle them when the
// counter is below the high water mark. The same goes for using unlimited timeouts.
// http://www.serverframework.com/asynchronousevents/2011/06/tcp-flow-control-and-asynchronous-writes.html
impl TcpStream {
pub fn connect(adr: impl net::ToSocketAddrs) -> io::Result<Self> {
// This is a shortcut since this will block when establishing the connection.
// There are several ways of avoiding this.
// a) Obtrain the socket using system calls, set it to non_blocking before we connect
// b) use the crate [net2](https://docs.rs/net2/0.2.33/net2/index.html) which
// defines a trait with default implementation for TcpStream which allow us to set
// it to non-blocking before we connect
// Rust creates a WSASocket set to overlapped by default which is just what we need
// https://github.com/rust-lang/rust/blob/f86521e0a33a2b54c4c23dbfc5250013f7a33b11/src/libstd/sys/windows/net.rs#L99
let stream = net::TcpStream::connect(adr)?;
stream.set_nonblocking(true)?;
let mut buffer = vec![0_u8; 1024];
let wsabuf = vec![ffi::WSABUF::new(buffer.len() as u32, buffer.as_mut_ptr())];
Ok(TcpStream {
inner: stream,
buffer,
wsabuf,
event: None,
token: None,
pos: 0,
operations: LinkedList::new(),
})
}
}
impl Read for TcpStream {
fn read(&mut self, buff: &mut [u8]) -> io::Result<usize> {
let mut bytes_read = 0;
for (a, b) in self.buffer.iter().skip(self.pos).zip(buff) {
*b = *a;
bytes_read += 1;
}
self.pos += bytes_read;
Ok(bytes_read)
}
}
impl Write for TcpStream {
fn write(&mut self, buff: &[u8]) -> io::Result<usize> {
self.inner.write(buff)
}
fn flush(&mut self) -> io::Result<()> {
self.inner.flush()
}
}
impl AsRawSocket for TcpStream {
fn as_raw_socket(&self) -> RawSocket {
self.inner.as_raw_socket()
}
}
pub struct Registrator {
completion_port: isize,
is_poll_dead: Arc<AtomicBool>,
}
impl Registrator {
pub fn register(
&self,
soc: &mut TcpStream,
token: usize,
interests: Interests,
) -> io::Result<()> {
if self.is_poll_dead.load(Ordering::SeqCst) {
return Err(io::Error::new(
io::ErrorKind::Interrupted,
"Poll instance is dead.",
));
}
ffi::create_io_completion_port(soc.as_raw_socket(), self.completion_port, 0)?;
let op = ffi::Operation::new(token);
soc.operations.push_back(op);
if interests.is_readable() {
ffi::wsa_recv(
soc.as_raw_socket(),
&mut soc.wsabuf,
soc.operations.back_mut().unwrap(),
)?;
} else {
unimplemented!();
}
Ok(())
}
/// NOTE: An alternative solution is to use the `CompletionKey` to signal that
/// this is a close event. We don't use it for anything else so it is a
/// good candidate to use for timers and special events like this
pub fn close_loop(&self) -> io::Result<()> {
if self
.is_poll_dead
.compare_and_swap(false, true, Ordering::SeqCst)
{
return Err(io::Error::new(
io::ErrorKind::Interrupted,
"Poll instance is dead.",
));
}
let mut overlapped = ffi::WSAOVERLAPPED::zeroed();
ffi::post_queued_completion_status(self.completion_port, 0, 0, &mut overlapped)?;
Ok(())
}
}
// possible Arc<InnerSelector> needed
#[derive(Debug)]
pub struct Selector {
completion_port: isize,
}
impl Selector {
pub fn new() -> io::Result<Self> {
// set up the queue
let completion_port = ffi::create_completion_port()?;
Ok(Selector { completion_port })
}
pub fn registrator(&self, is_poll_dead: Arc<AtomicBool>) -> Registrator {
Registrator {
completion_port: self.completion_port,
is_poll_dead,
}
}
/// Blocks until an Event has occured. Never times out. We could take a parameter
/// for a timeout and pass it on but we'll not do that in our example.
pub fn select(
&mut self,
events: &mut Vec<ffi::OVERLAPPED_ENTRY>,
timeout: Option<i32>,
) -> io::Result<()> {
// calling GetQueueCompletionStatus will either return a handle to a "port" ready to read or
// block if the queue is empty.
// Windows want timeout as u32 so we cast it as such
let timeout = timeout.map(|t| t as u32);
// first let's clear events for any previous events and wait until we get som more
events.clear();
let ul_count = events.capacity() as u32;
let removed_res = ffi::get_queued_completion_status_ex(
self.completion_port as isize,
events,
ul_count,
timeout,
false,
);
// We need to handle the case that the "error" was a WAIT_TIMEOUT error.
// the code for this error is 258 on Windows. We don't treat this as an error
// but set the events returned to 0.
// (i tried to do this in the `ffi` function but there was an error)
let removed = match removed_res {
Ok(n) => n,
Err(ref e) if e.raw_os_error() == Some(258) => 0,
Err(e) => return Err(e),
};
unsafe {
events.set_len(removed as usize);
}
Ok(())
}
}
impl Drop for Selector {
fn drop(&mut self) {
match ffi::close_handle(self.completion_port) {
Ok(_) => (),
Err(e) => {
if!std::thread::panicking() {
panic!(e);
}
}
}
}
}
mod ffi {
use super::*;
use std::io;
use std::os::windows::io::RawSocket;
use std::ptr;
#[repr(C)]
#[derive(Clone, Debug)]
pub struct WSABUF {
len: u32,
buf: *mut u8,
}
impl WSABUF {
pub fn new(len: u32, buf: *mut u8) -> Self {
WSABUF { len, buf }
}
}
#[repr(C)]
#[derive(Debug, Clone)]
pub struct OVERLAPPED_ENTRY {
lp_completion_key: *mut usize,
lp_overlapped: *mut WSAOVERLAPPED,
internal: usize,
bytes_transferred: u32,
}
impl OVERLAPPED_ENTRY {
pub fn id(&self) -> Token {
// TODO: this might be solvable wihtout sacrifising so much of Rust safety guarantees
let operation: &Operation = unsafe { &*(self.lp_overlapped as *const Operation) };
operation.token
}
pub(crate) fn zeroed() -> Self {
OVERLAPPED_ENTRY {
lp_completion_key: ptr::null_mut(),
lp_overlapped: ptr::null_mut(),
internal: 0,
bytes_transferred: 0,
}
}
}
// Reference: https://docs.microsoft.com/en-us/windows/win32/api/winsock2/ns-winsock2-wsaoverlapped
#[repr(C)]
#[derive(Debug)]
pub struct WSAOVERLAPPED {
/// Reserved for internal use
internal: ULONG_PTR,
/// Reserved
internal_high: ULONG_PTR,
/// Reserved for service providers
offset: DWORD,
/// Reserved for service providers
offset_high: DWORD,
/// If an overlapped I/O operation is issued without an I/O completion routine
/// (the operation's lpCompletionRoutine parameter is set to null), then this parameter
/// should either contain a valid handle to a WSAEVENT object or be null. If the
/// lpCompletionRoutine parameter of the call is non-null then applications are free
/// to use this parameter as necessary.
h_event: HANDLE,
}
impl WSAOVERLAPPED {
pub fn zeroed() -> Self {
WSAOVERLAPPED {
internal: ptr::null_mut(),
internal_high: ptr::null_mut(),
offset: 0,
offset_high: 0,
h_event: 0,
}
}
}
/// Operation is a way for us to attach additional context to the `WSAOVERLAPPED`
/// event. Inpired by [BOOST ASIO](https://www.boost.org/doc/libs/1_42_0/boost/asio/detail/win_iocp_io_service.hpp)
#[derive(Debug)]
#[repr(C)]
pub struct Operation {
wsaoverlapped: WSAOVERLAPPED,
token: usize,
}
impl Operation {
pub(crate) fn new(token: usize) -> Self {
Operation {
wsaoverlapped: WSAOVERLAPPED::zeroed(),
token,
}
}
}
// You can find most of these here: https://docs.microsoft.com/en-us/windows/win32/winprog/windows-data-types
/// The HANDLE type is actually a `*mut c_void` but windows preserves backwards compatibility by allowing
/// a INVALID_HANDLE_VALUE which is `-1`. We can't express that in Rust so it's much easier for us to treat
/// this as an isize instead;
pub type HANDLE = isize;
pub type BOOL = bool;
pub type WORD = u16;
pub type DWORD = u32;
pub type ULONG = u32;
pub type PULONG = *mut ULONG;
pub type ULONG_PTR = *mut usize;
pub type PULONG_PTR = *mut ULONG_PTR;
pub type LPDWORD = *mut DWORD;
pub type LPWSABUF = *mut WSABUF;
pub type LPWSAOVERLAPPED = *mut WSAOVERLAPPED;
pub type LPWSAOVERLAPPED_COMPLETION_ROUTINE = *const extern "C" fn();
// https://referencesource.microsoft.com/#System.Runtime.Remoting/channels/ipc/win32namedpipes.cs,edc09ced20442fea,references
// read this! https://devblogs.microsoft.com/oldnewthing/20040302-00/?p=40443
/// Defined in `win32.h` which you can find on your windows system
pub const INVALID_HANDLE_VALUE: HANDLE = -1;
// https://docs.microsoft.com/en-us/windows/win32/winsock/windows-sockets-error-codes-2
pub const WSA_IO_PENDING: i32 = 997;
// This can also be written as `4294967295` if you look at sources on the internet.
// Interpreted as an i32 the value is -1
// see for yourself: https://play.rust-lang.org/?version=stable&mode=debug&edition=2018&gist=4b93de7d7eb43fa9cd7f5b60933d8935
pub const INFINITE: u32 = 0xFFFFFFFF;
#[link(name = "Kernel32")]
extern "stdcall" {
// https://docs.microsoft.com/en-us/windows/win32/fileio/createiocompletionport
fn CreateIoCompletionPort(
filehandle: HANDLE,
existing_completionport: HANDLE,
completion_key: ULONG_PTR,
number_of_concurrent_threads: DWORD,
) -> HANDLE;
// https://docs.microsoft.com/en-us/windows/win32/api/winsock2/nf-winsock2-wsarecv
fn WSARecv(
s: RawSocket,
lpBuffers: LPWSABUF,
dwBufferCount: DWORD,
lpNumberOfBytesRecvd: LPDWORD,
lpFlags: LPDWORD,
lpOverlapped: LPWSAOVERLAPPED,
lpCompletionRoutine: LPWSAOVERLAPPED_COMPLETION_ROUTINE,
) -> i32;
// https://docs.microsoft.com/en-us/windows/win32/fileio/postqueuedcompletionstatus
fn PostQueuedCompletionStatus(
CompletionPort: HANDLE,
dwNumberOfBytesTransferred: DWORD,
dwCompletionKey: ULONG_PTR,
lpOverlapped: LPWSAOVERLAPPED,
) -> i32;
/// https://docs.microsoft.com/nb-no/windows/win32/api/ioapiset/nf-ioapiset-getqueuedcompletionstatus
/// Errors: https://docs.microsoft.com/nb-no/windows/win32/debug/system-error-codes--0-499-
/// From this we can see that error `WAIT_TIMEOUT` has the code 258 which we'll
/// need later on
fn GetQueuedCompletionStatusEx(
CompletionPort: HANDLE,
lpCompletionPortEntries: *mut OVERLAPPED_ENTRY,
ulCount: ULONG,
ulNumEntriesRemoved: PULONG,
dwMilliseconds: DWORD,
fAlertable: BOOL,
) -> i32;
fn GetQueuedCompletionStatus(
CompletionPort: HANDLE,
lpNumberOfBytesTransferred: LPDWORD,
lpCompletionKey: PULONG_PTR,
lpOverlapped: LPWSAOVERLAPPED,
dwMilliseconds: DWORD,
) -> i32;
// https://docs.microsoft.com/nb-no/windows/win32/api/handleapi/nf-handleapi-closehandle
fn CloseHandle(hObject: HANDLE) -> i32;
// https://docs.microsoft.com/nb-no/windows/win32/api/winsock/nf-winsock-wsagetlasterror
fn WSAGetLastError() -> i32;
}
// ===== SAFE WRAPPERS =====
pub fn close_handle(handle: isize) -> io::Result<()> {
let res = unsafe { CloseHandle(handle) };
if res == 0 {
Err(std::io::Error::last_os_error().into())
} else {
Ok(())
}
}
pub fn create_completion_port() -> io::Result<isize> {
unsafe {
// number_of_concurrent_threads = 0 means use the number of physical threads but the argument is
// ignored when existing_completionport is set to null.
let res = CreateIoCompletionPort(INVALID_HANDLE_VALUE, 0, ptr::null_mut(), 0);
if (res as *mut usize).is_null() {
return Err(std::io::Error::last_os_error());
}
Ok(res)
}
}
/// Returns the file handle to the completion port we passed in
pub fn create_io_completion_port(
s: RawSocket,
completion_port: isize,
token: usize,
) -> io::Result<isize> {
let res =
unsafe { CreateIoCompletionPort(s as isize, completion_port, token as *mut usize, 0) };
if (res as *mut usize).is_null() {
return Err(std::io::Error::last_os_error());
}
Ok(res)
}
/// Creates a socket read event.
/// ## Returns
/// The number of bytes recieved
pub fn wsa_recv(
s: RawSocket,
wsabuffers: &mut [WSABUF],
op: &mut Operation,
) -> Result<(), io::Error> {
let mut flags = 0;
let operation_ptr: *mut Operation = op;
let res = unsafe {
WSARecv(
s,
wsabuffers.as_mut_ptr(),
1,
ptr::null_mut(),
&mut flags,
operation_ptr as *mut WSAOVERLAPPED,
ptr::null_mut(),
)
};
if res!= 0 {
let err = unsafe { WSAGetLastError() };
if err == WSA_IO_PENDING {
// Everything is OK, and we can wait this with GetQueuedCompletionStatus
Ok(())
} else {
Err(std::io::Error::last_os_error())
}
} else {
// The socket is already ready so we don't need to queue it
// TODO: Avoid queueing this
Ok(())
}
}
pub fn post_queued_completion_status(
completion_port: isize,
bytes_to_transfer: u32,
completion_key: usize,
overlapped_ptr: &mut WSAOVERLAPPED,
) -> io::Result<()> {
let res = unsafe {
PostQueuedCompletionStatus(
completion_port,
bytes_to_transfer,
completion_key as *mut usize,
overlapped_ptr,
)
};
if res == 0 {
Err(std::io::Error::last_os_error().into())
} else {
Ok(())
}
}
/// ## Parameters:
/// - *completion_port:* the handle to a completion port created by calling CreateIoCompletionPort
/// - *completion_port_entries:* a pointer to an array of OVERLAPPED_ENTRY structures
/// - *ul_count:* The maximum number of entries to remove
/// - *timeout:* The timeout in milliseconds, if set to NONE, timeout is set to INFINITE
/// - *alertable:* If this parameter is FALSE, the function does not return until the time-out period has elapsed or
/// an entry is retrieved. If the parameter is TRUE and there are no available entries, the function performs
/// an alertable wait. The thread returns when the system queues an I/O completion routine or APC to the thread
/// and the thread executes the function.
///
/// ## Returns
/// The number of items actually removed from the queue
pub fn get_queued_completion_status_ex(
completion_port: isize,
completion_port_entries: &mut [OVERLAPPED_ENTRY],
ul_count: u32,
timeout: Option<u32>,
alertable: bool,
) -> io::Result<u32> | } else {
Ok(ul_num_entries_removed)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn selector_new_creates_valid_port() {
let selector = Selector::new().expect("create completion port failed");
assert!(selector.completion_port > 0);
}
#[test]
fn selector_register() {
let selector = Selector::new().expect("create completion port failed");
let poll_is_alive = Arc::new(AtomicBool::new(false));
let registrator = selector.registrator(poll_is_alive.clone());
let mut sock: TcpStream = TcpStream::connect("slowwly.robertomurray.co.uk:80").unwrap();
let request = "GET /delay/1000/url/http://www.google.com HTTP/1.1\r\n\
Host: slowwly.robertomurray.co.uk\r\n\
Connection: close\r\n\
| {
let mut ul_num_entries_removed: u32 = 0;
// can't coerce directly to *mut *mut usize and cant cast `&mut` as `*mut`
// let completion_key_ptr: *mut &mut usize = completion_key_ptr;
// // but we can cast a `*mut ...`
// let completion_key_ptr: *mut *mut usize = completion_key_ptr as *mut *mut usize;
let timeout = timeout.unwrap_or(INFINITE);
let res = unsafe {
GetQueuedCompletionStatusEx(
completion_port,
completion_port_entries.as_mut_ptr(),
ul_count,
&mut ul_num_entries_removed,
timeout,
alertable,
)
};
if res == 0 {
Err(io::Error::last_os_error()) | identifier_body |
windows.rs | that it might run out of memory.
// Now the way we would normally handle this is to have a counter and limit the
// number of outstandig buffers, queueing requests and only handle them when the
// counter is below the high water mark. The same goes for using unlimited timeouts.
// http://www.serverframework.com/asynchronousevents/2011/06/tcp-flow-control-and-asynchronous-writes.html
impl TcpStream {
pub fn connect(adr: impl net::ToSocketAddrs) -> io::Result<Self> {
// This is a shortcut since this will block when establishing the connection.
// There are several ways of avoiding this.
// a) Obtrain the socket using system calls, set it to non_blocking before we connect
// b) use the crate [net2](https://docs.rs/net2/0.2.33/net2/index.html) which
// defines a trait with default implementation for TcpStream which allow us to set
// it to non-blocking before we connect
// Rust creates a WSASocket set to overlapped by default which is just what we need
// https://github.com/rust-lang/rust/blob/f86521e0a33a2b54c4c23dbfc5250013f7a33b11/src/libstd/sys/windows/net.rs#L99
let stream = net::TcpStream::connect(adr)?;
stream.set_nonblocking(true)?;
let mut buffer = vec![0_u8; 1024];
let wsabuf = vec![ffi::WSABUF::new(buffer.len() as u32, buffer.as_mut_ptr())];
Ok(TcpStream {
inner: stream,
buffer,
wsabuf,
event: None,
token: None,
pos: 0,
operations: LinkedList::new(),
})
}
}
impl Read for TcpStream {
fn read(&mut self, buff: &mut [u8]) -> io::Result<usize> {
let mut bytes_read = 0;
for (a, b) in self.buffer.iter().skip(self.pos).zip(buff) {
*b = *a;
bytes_read += 1;
}
self.pos += bytes_read;
Ok(bytes_read)
}
}
impl Write for TcpStream {
fn write(&mut self, buff: &[u8]) -> io::Result<usize> {
self.inner.write(buff)
}
fn flush(&mut self) -> io::Result<()> {
self.inner.flush()
}
}
impl AsRawSocket for TcpStream {
fn as_raw_socket(&self) -> RawSocket {
self.inner.as_raw_socket()
}
}
pub struct Registrator {
completion_port: isize,
is_poll_dead: Arc<AtomicBool>,
}
impl Registrator {
pub fn register(
&self,
soc: &mut TcpStream,
token: usize,
interests: Interests,
) -> io::Result<()> {
if self.is_poll_dead.load(Ordering::SeqCst) {
return Err(io::Error::new(
io::ErrorKind::Interrupted,
"Poll instance is dead.",
));
}
ffi::create_io_completion_port(soc.as_raw_socket(), self.completion_port, 0)?;
let op = ffi::Operation::new(token);
soc.operations.push_back(op);
if interests.is_readable() {
ffi::wsa_recv(
soc.as_raw_socket(),
&mut soc.wsabuf,
soc.operations.back_mut().unwrap(),
)?;
} else {
unimplemented!();
}
Ok(())
}
/// NOTE: An alternative solution is to use the `CompletionKey` to signal that
/// this is a close event. We don't use it for anything else so it is a
/// good candidate to use for timers and special events like this
pub fn close_loop(&self) -> io::Result<()> {
if self
.is_poll_dead
.compare_and_swap(false, true, Ordering::SeqCst)
{
return Err(io::Error::new(
io::ErrorKind::Interrupted,
"Poll instance is dead.",
));
}
let mut overlapped = ffi::WSAOVERLAPPED::zeroed();
ffi::post_queued_completion_status(self.completion_port, 0, 0, &mut overlapped)?;
Ok(())
}
}
// possible Arc<InnerSelector> needed
#[derive(Debug)]
pub struct Selector {
completion_port: isize,
}
impl Selector {
pub fn new() -> io::Result<Self> {
// set up the queue
let completion_port = ffi::create_completion_port()?;
Ok(Selector { completion_port })
}
pub fn registrator(&self, is_poll_dead: Arc<AtomicBool>) -> Registrator {
Registrator {
completion_port: self.completion_port,
is_poll_dead,
}
}
/// Blocks until an Event has occured. Never times out. We could take a parameter
/// for a timeout and pass it on but we'll not do that in our example.
pub fn select(
&mut self,
events: &mut Vec<ffi::OVERLAPPED_ENTRY>,
timeout: Option<i32>,
) -> io::Result<()> {
// calling GetQueueCompletionStatus will either return a handle to a "port" ready to read or
// block if the queue is empty.
// Windows want timeout as u32 so we cast it as such
let timeout = timeout.map(|t| t as u32);
// first let's clear events for any previous events and wait until we get som more
events.clear();
let ul_count = events.capacity() as u32;
let removed_res = ffi::get_queued_completion_status_ex(
self.completion_port as isize,
events,
ul_count,
timeout,
false,
);
// We need to handle the case that the "error" was a WAIT_TIMEOUT error.
// the code for this error is 258 on Windows. We don't treat this as an error
// but set the events returned to 0.
// (i tried to do this in the `ffi` function but there was an error)
let removed = match removed_res {
Ok(n) => n,
Err(ref e) if e.raw_os_error() == Some(258) => 0,
Err(e) => return Err(e),
};
unsafe {
events.set_len(removed as usize);
}
Ok(())
}
}
impl Drop for Selector {
fn drop(&mut self) {
match ffi::close_handle(self.completion_port) {
Ok(_) => (),
Err(e) => {
if!std::thread::panicking() {
panic!(e);
}
}
}
}
}
mod ffi {
use super::*;
use std::io;
use std::os::windows::io::RawSocket;
use std::ptr;
#[repr(C)]
#[derive(Clone, Debug)]
pub struct WSABUF {
len: u32,
buf: *mut u8,
}
impl WSABUF {
pub fn new(len: u32, buf: *mut u8) -> Self {
WSABUF { len, buf }
}
}
#[repr(C)]
#[derive(Debug, Clone)]
pub struct OVERLAPPED_ENTRY {
lp_completion_key: *mut usize,
lp_overlapped: *mut WSAOVERLAPPED,
internal: usize,
bytes_transferred: u32,
}
impl OVERLAPPED_ENTRY {
pub fn id(&self) -> Token {
// TODO: this might be solvable wihtout sacrifising so much of Rust safety guarantees
let operation: &Operation = unsafe { &*(self.lp_overlapped as *const Operation) };
operation.token
}
pub(crate) fn zeroed() -> Self {
OVERLAPPED_ENTRY {
lp_completion_key: ptr::null_mut(),
lp_overlapped: ptr::null_mut(),
internal: 0,
bytes_transferred: 0,
}
}
}
// Reference: https://docs.microsoft.com/en-us/windows/win32/api/winsock2/ns-winsock2-wsaoverlapped
#[repr(C)]
#[derive(Debug)]
pub struct WSAOVERLAPPED {
/// Reserved for internal use
internal: ULONG_PTR,
/// Reserved
internal_high: ULONG_PTR,
/// Reserved for service providers
offset: DWORD,
/// Reserved for service providers
offset_high: DWORD,
/// If an overlapped I/O operation is issued without an I/O completion routine
/// (the operation's lpCompletionRoutine parameter is set to null), then this parameter
/// should either contain a valid handle to a WSAEVENT object or be null. If the
/// lpCompletionRoutine parameter of the call is non-null then applications are free
/// to use this parameter as necessary.
h_event: HANDLE,
}
impl WSAOVERLAPPED {
pub fn zeroed() -> Self {
WSAOVERLAPPED {
internal: ptr::null_mut(),
internal_high: ptr::null_mut(),
offset: 0,
offset_high: 0,
h_event: 0,
}
}
}
/// Operation is a way for us to attach additional context to the `WSAOVERLAPPED`
/// event. Inpired by [BOOST ASIO](https://www.boost.org/doc/libs/1_42_0/boost/asio/detail/win_iocp_io_service.hpp)
#[derive(Debug)]
#[repr(C)]
pub struct Operation {
wsaoverlapped: WSAOVERLAPPED,
token: usize,
}
impl Operation {
pub(crate) fn new(token: usize) -> Self {
Operation {
wsaoverlapped: WSAOVERLAPPED::zeroed(),
token,
}
}
}
// You can find most of these here: https://docs.microsoft.com/en-us/windows/win32/winprog/windows-data-types
/// The HANDLE type is actually a `*mut c_void` but windows preserves backwards compatibility by allowing
/// a INVALID_HANDLE_VALUE which is `-1`. We can't express that in Rust so it's much easier for us to treat
/// this as an isize instead;
pub type HANDLE = isize;
pub type BOOL = bool;
pub type WORD = u16;
pub type DWORD = u32;
pub type ULONG = u32;
pub type PULONG = *mut ULONG;
pub type ULONG_PTR = *mut usize;
pub type PULONG_PTR = *mut ULONG_PTR;
pub type LPDWORD = *mut DWORD;
pub type LPWSABUF = *mut WSABUF;
pub type LPWSAOVERLAPPED = *mut WSAOVERLAPPED;
pub type LPWSAOVERLAPPED_COMPLETION_ROUTINE = *const extern "C" fn();
// https://referencesource.microsoft.com/#System.Runtime.Remoting/channels/ipc/win32namedpipes.cs,edc09ced20442fea,references
// read this! https://devblogs.microsoft.com/oldnewthing/20040302-00/?p=40443
/// Defined in `win32.h` which you can find on your windows system
pub const INVALID_HANDLE_VALUE: HANDLE = -1;
// https://docs.microsoft.com/en-us/windows/win32/winsock/windows-sockets-error-codes-2
pub const WSA_IO_PENDING: i32 = 997;
// This can also be written as `4294967295` if you look at sources on the internet.
// Interpreted as an i32 the value is -1
// see for yourself: https://play.rust-lang.org/?version=stable&mode=debug&edition=2018&gist=4b93de7d7eb43fa9cd7f5b60933d8935 | pub const INFINITE: u32 = 0xFFFFFFFF;
#[link(name = "Kernel32")]
extern "stdcall" {
// https://docs.microsoft.com/en-us/windows/win32/fileio/createiocompletionport
fn CreateIoCompletionPort(
filehandle: HANDLE,
existing_completionport: HANDLE,
completion_key: ULONG_PTR,
number_of_concurrent_threads: DWORD,
) -> HANDLE;
// https://docs.microsoft.com/en-us/windows/win32/api/winsock2/nf-winsock2-wsarecv
fn WSARecv(
s: RawSocket,
lpBuffers: LPWSABUF,
dwBufferCount: DWORD,
lpNumberOfBytesRecvd: LPDWORD,
lpFlags: LPDWORD,
lpOverlapped: LPWSAOVERLAPPED,
lpCompletionRoutine: LPWSAOVERLAPPED_COMPLETION_ROUTINE,
) -> i32;
// https://docs.microsoft.com/en-us/windows/win32/fileio/postqueuedcompletionstatus
fn PostQueuedCompletionStatus(
CompletionPort: HANDLE,
dwNumberOfBytesTransferred: DWORD,
dwCompletionKey: ULONG_PTR,
lpOverlapped: LPWSAOVERLAPPED,
) -> i32;
/// https://docs.microsoft.com/nb-no/windows/win32/api/ioapiset/nf-ioapiset-getqueuedcompletionstatus
/// Errors: https://docs.microsoft.com/nb-no/windows/win32/debug/system-error-codes--0-499-
/// From this we can see that error `WAIT_TIMEOUT` has the code 258 which we'll
/// need later on
fn GetQueuedCompletionStatusEx(
CompletionPort: HANDLE,
lpCompletionPortEntries: *mut OVERLAPPED_ENTRY,
ulCount: ULONG,
ulNumEntriesRemoved: PULONG,
dwMilliseconds: DWORD,
fAlertable: BOOL,
) -> i32;
fn GetQueuedCompletionStatus(
CompletionPort: HANDLE,
lpNumberOfBytesTransferred: LPDWORD,
lpCompletionKey: PULONG_PTR,
lpOverlapped: LPWSAOVERLAPPED,
dwMilliseconds: DWORD,
) -> i32;
// https://docs.microsoft.com/nb-no/windows/win32/api/handleapi/nf-handleapi-closehandle
fn CloseHandle(hObject: HANDLE) -> i32;
// https://docs.microsoft.com/nb-no/windows/win32/api/winsock/nf-winsock-wsagetlasterror
fn WSAGetLastError() -> i32;
}
// ===== SAFE WRAPPERS =====
pub fn close_handle(handle: isize) -> io::Result<()> {
let res = unsafe { CloseHandle(handle) };
if res == 0 {
Err(std::io::Error::last_os_error().into())
} else {
Ok(())
}
}
pub fn create_completion_port() -> io::Result<isize> {
unsafe {
// number_of_concurrent_threads = 0 means use the number of physical threads but the argument is
// ignored when existing_completionport is set to null.
let res = CreateIoCompletionPort(INVALID_HANDLE_VALUE, 0, ptr::null_mut(), 0);
if (res as *mut usize).is_null() {
return Err(std::io::Error::last_os_error());
}
Ok(res)
}
}
/// Returns the file handle to the completion port we passed in
pub fn create_io_completion_port(
s: RawSocket,
completion_port: isize,
token: usize,
) -> io::Result<isize> {
let res =
unsafe { CreateIoCompletionPort(s as isize, completion_port, token as *mut usize, 0) };
if (res as *mut usize).is_null() {
return Err(std::io::Error::last_os_error());
}
Ok(res)
}
/// Creates a socket read event.
/// ## Returns
/// The number of bytes recieved
pub fn wsa_recv(
s: RawSocket,
wsabuffers: &mut [WSABUF],
op: &mut Operation,
) -> Result<(), io::Error> {
let mut flags = 0;
let operation_ptr: *mut Operation = op;
let res = unsafe {
WSARecv(
s,
wsabuffers.as_mut_ptr(),
1,
ptr::null_mut(),
&mut flags,
operation_ptr as *mut WSAOVERLAPPED,
ptr::null_mut(),
)
};
if res!= 0 {
let err = unsafe { WSAGetLastError() };
if err == WSA_IO_PENDING {
// Everything is OK, and we can wait this with GetQueuedCompletionStatus
Ok(())
} else {
Err(std::io::Error::last_os_error())
}
} else {
// The socket is already ready so we don't need to queue it
// TODO: Avoid queueing this
Ok(())
}
}
pub fn post_queued_completion_status(
completion_port: isize,
bytes_to_transfer: u32,
completion_key: usize,
overlapped_ptr: &mut WSAOVERLAPPED,
) -> io::Result<()> {
let res = unsafe {
PostQueuedCompletionStatus(
completion_port,
bytes_to_transfer,
completion_key as *mut usize,
overlapped_ptr,
)
};
if res == 0 {
Err(std::io::Error::last_os_error().into())
} else {
Ok(())
}
}
/// ## Parameters:
/// - *completion_port:* the handle to a completion port created by calling CreateIoCompletionPort
/// - *completion_port_entries:* a pointer to an array of OVERLAPPED_ENTRY structures
/// - *ul_count:* The maximum number of entries to remove
/// - *timeout:* The timeout in milliseconds, if set to NONE, timeout is set to INFINITE
/// - *alertable:* If this parameter is FALSE, the function does not return until the time-out period has elapsed or
/// an entry is retrieved. If the parameter is TRUE and there are no available entries, the function performs
/// an alertable wait. The thread returns when the system queues an I/O completion routine or APC to the thread
/// and the thread executes the function.
///
/// ## Returns
/// The number of items actually removed from the queue
pub fn get_queued_completion_status_ex(
completion_port: isize,
completion_port_entries: &mut [OVERLAPPED_ENTRY],
ul_count: u32,
timeout: Option<u32>,
alertable: bool,
) -> io::Result<u32> {
let mut ul_num_entries_removed: u32 = 0;
// can't coerce directly to *mut *mut usize and cant cast `&mut` as `*mut`
// let completion_key_ptr: *mut &mut usize = completion_key_ptr;
// // but we can cast a `*mut...`
// let completion_key_ptr: *mut *mut usize = completion_key_ptr as *mut *mut usize;
let timeout = timeout.unwrap_or(INFINITE);
let res = unsafe {
GetQueuedCompletionStatusEx(
completion_port,
completion_port_entries.as_mut_ptr(),
ul_count,
&mut ul_num_entries_removed,
timeout,
alertable,
)
};
if res == 0 {
Err(io::Error::last_os_error())
} else {
Ok(ul_num_entries_removed)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn selector_new_creates_valid_port() {
let selector = Selector::new().expect("create completion port failed");
assert!(selector.completion_port > 0);
}
#[test]
fn selector_register() {
let selector = Selector::new().expect("create completion port failed");
let poll_is_alive = Arc::new(AtomicBool::new(false));
let registrator = selector.registrator(poll_is_alive.clone());
let mut sock: TcpStream = TcpStream::connect("slowwly.robertomurray.co.uk:80").unwrap();
let request = "GET /delay/1000/url/http://www.google.com HTTP/1.1\r\n\
Host: slowwly.robertomurray.co.uk\r\n\
Connection: close\r\n\
\ | random_line_split |
|
strobe.rs | bitflags;
use subtle::{self, ConstantTimeEq};
/// Version of Strobe that this crate implements.
pub const STROBE_VERSION: &str = "1.0.2";
bitflags! {
/// Operation flags defined in the Strobe paper. This is defined as a bitflags struct.
pub(crate) struct OpFlags: u8 {
/// Is data being moved inbound
const I = 1<<0;
/// Is data being sent to the application
const A = 1<<1;
/// Does this operation use cipher output
const C = 1<<2;
/// Is data being sent for transport
const T = 1<<3;
/// Use exclusively for metadata operations
const M = 1<<4;
/// Reserved and currently unimplemented. Using this will cause a panic.
const K = 1<<5;
}
}
/// Security parameter. Choice of 128 or 256 bits.
#[derive(Clone, Copy)]
#[repr(usize)]
pub enum SecParam {
B128 = 128,
B256 = 256,
}
/// An empty struct that just indicates that an error occurred in verifying a MAC
#[derive(Debug)]
pub struct AuthError;
/// The main Strobe object. This is currently limited to using Keccak-f\[1600\] as the internal
/// permutation function. For more information on this object, the [protocol specification][spec]
/// is a great resource.
///
/// [spec]: https://strobe.sourceforge.io/specs/
///
/// Description of method input
/// ---------------------------
/// Most operations exposed by `Strobe` take the same set of inputs. The arguments are
///
/// * `data` - The input data to the operation.
/// * `more` - For streaming purposes. Specifies whether you're trying to add more input / get more
/// output to/from the previous operation. For example:
///
/// ```rust
/// # extern crate strobe_rs;
/// # use strobe_rs::{SecParam, Strobe};
/// # fn main() {
/// # let mut s = Strobe::new(b"example-of-more", SecParam::B128);
/// s.ad(b"hello world", false);
/// # }
/// ```
/// is equivalent to
/// ```rust
/// # extern crate strobe_rs;
/// # use strobe_rs::{SecParam, Strobe};
/// # fn main() {
/// # let mut s = Strobe::new(b"example-of-more", SecParam::B128);
/// s.ad(b"hello ", false);
/// s.ad(b"world", true);
/// # }
/// ```
///
/// **NOTE:** If you try to set the `more` flag for an operation that is not preceded by the same
/// operation (e.g., if you try `ad` followed by `send_enc` with `more=true`), then **the function
/// will panic**, since that is an invalid use of the `more` flag.
///
/// Finally, `ratchet` and `meta_ratchet` take a `usize` argument instead of bytes. These functions
/// are individually commented below.
#[derive(Clone)]
pub struct Strobe {
/// Internal Keccak state
pub(crate) st: AlignedKeccakState,
/// Security parameter (128 or 256)
sec: SecParam,
/// This is the `R` parameter in the Strobe spec
rate: usize,
/// Index into `st`
pos: usize,
/// Index into `st`
pos_begin: usize,
/// Represents whether we're a sender or a receiver or uninitialized
is_receiver: Option<bool>,
/// The last operation performed. This is to verify that the `more` flag is only used across
/// identical operations.
prev_flags: Option<OpFlags>,
}
// This defines an operation and meta-operation that mutates its input
macro_rules! def_op_mut {
($name:ident, $meta_name:ident, $flags:expr, $doc_str:expr) => {
#[doc = $doc_str]
pub fn $name(&mut self, data: &mut [u8], more: bool) {
let flags = $flags;
self.operate(flags, data, more);
}
#[doc = $doc_str]
pub fn $meta_name(&mut self, data: &mut [u8], more: bool) {
let flags = $flags | OpFlags::M;
self.operate(flags, data, more);
}
};
}
// This defines an operation and meta-operation that does not mutate its input
macro_rules! def_op_no_mut {
($name:ident, $meta_name:ident, $flags:expr, $doc_str:expr) => {
#[doc = $doc_str]
pub fn $name(&mut self, data: &[u8], more: bool) {
let flags = $flags;
self.operate_no_mutate(flags, data, more);
}
#[doc = $doc_str]
pub fn $meta_name(&mut self, data: &[u8], more: bool) {
let flags = $flags | OpFlags::M;
self.operate_no_mutate(flags, data, more);
}
};
}
impl Strobe {
/// Makes a new `Strobe` object with a given protocol byte string and security parameter.
pub fn new(proto: &[u8], sec: SecParam) -> Strobe {
let rate = KECCAK_BLOCK_SIZE * 8 - (sec as usize) / 4 - 2;
assert!(rate >= 1);
assert!(rate < 254);
// Initialize state: st = F([0x01, R+2, 0x01, 0x00, 0x01, 0x60] + b"STROBEvX.Y.Z")
let mut st_buf = [0u8; KECCAK_BLOCK_SIZE * 8];
st_buf[0..6].copy_from_slice(&[0x01, (rate as u8) + 2, 0x01, 0x00, 0x01, 0x60]);
st_buf[6..13].copy_from_slice(b"STROBEv");
st_buf[13..18].copy_from_slice(STROBE_VERSION.as_bytes());
let mut st = AlignedKeccakState(st_buf);
keccakf_u8(&mut st);
let mut strobe = Strobe {
st,
sec,
rate,
pos: 0,
pos_begin: 0,
is_receiver: None,
prev_flags: None,
};
// Mix the protocol into the state
strobe.meta_ad(proto, false);
strobe
}
/// Returns a string of the form `Strobe-Keccak-<sec>/<b>v<ver>` where `sec` is the bits of
/// security (128 or 256), `b` is the block size (in bits) of the Keccak permutation function,
/// and `ver` is the protocol version.
pub fn version_str(&self) -> String {
format!(
"Strobe-Keccak-{}/{}-v{}",
self.sec as usize,
KECCAK_BLOCK_SIZE * 64,
STROBE_VERSION
)
}
/// Validates that the `more` flag is being used correctly. Panics when validation fails.
fn validate_streaming(&mut self, flags: OpFlags, more: bool) {
// Streaming only makes sense if this operation is the same as last. For example you can do
// s.ad("hello", false);
// s.ad(" world", true).
// But you can't do
// s.ad("hello", false);
// s.key(" world", true).
if more {
assert_eq!(
self.prev_flags,
Some(flags),
"`more` can only be used when this operation is the same as the previous operation"
);
}
// Update the last-performed operation (i.e., the one we're about to perform)
self.prev_flags = Some(flags);
}
// Runs the permutation function on the internal state
fn run_f(&mut self) {
self.st.0[self.pos] ^= self.pos_begin as u8;
self.st.0[self.pos + 1] ^= 0x04;
self.st.0[self.rate + 1] ^= 0x80;
keccakf_u8(&mut self.st);
self.pos = 0;
self.pos_begin = 0;
}
/// XORs the given data into the state. This is a special case of the `duplex` code in the
/// STROBE paper.
fn absorb(&mut self, data: &[u8]) {
for b in data {
self.st.0[self.pos] ^= *b;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// XORs the given data into the state, then sets the data equal the state. This is a special
/// case of the `duplex` code in the STROBE paper.
fn absorb_and_set(&mut self, data: &mut [u8]) {
for b in data {
let state_byte = self.st.0.get_mut(self.pos).unwrap();
*state_byte ^= *b;
*b = *state_byte;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Copies the internal state into the given buffer. This is a special case of `absorb_and_set`
/// where `data` is all zeros.
fn copy_state(&mut self, data: &mut [u8]) {
for b in data {
*b = self.st.0[self.pos];
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Overwrites the state with the given data while XORing the given data with the old state.
/// This is a special case of the `duplex` code in the STROBE paper.
fn exchange(&mut self, data: &mut [u8]) {
for b in data {
let state_byte = self.st.0.get_mut(self.pos).unwrap();
*b ^= *state_byte;
*state_byte ^= *b;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Overwrites the state with the given data. This is a special case of `Strobe::exchange`,
/// where we do not want to mutate the input data.
fn overwrite(&mut self, data: &[u8]) {
for b in data {
self.st.0[self.pos] = *b;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Copies the state into the given buffer and sets the state to 0. This is a special case of
/// `Strobe::exchange`, where `data` is assumed to be the all-zeros string. This is precisely
/// the case when the current operation is PRF.
fn squeeze(&mut self, data: &mut [u8]) {
for b in data {
let state_byte = self.st.0.get_mut(self.pos).unwrap();
*b = *state_byte;
*state_byte = 0;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Overwrites the state with a specified number of zeros. This is a special case of
/// `Strobe::exchange`. More specifically, it's a special case of `Strobe::overwrite` and
/// `Strobe::squeeze`. It's like `squeeze` in that we assume we've been given all zeros as
/// input, and like `overwrite` in that we do not mutate (or take) any input.
fn zero_state(&mut self, mut bytes_to_zero: usize) {
static ZEROS: [u8; 8 * KECCAK_BLOCK_SIZE] = [0u8; 8 * KECCAK_BLOCK_SIZE];
// Do the zero-writing in chunks
while bytes_to_zero > 0 {
let slice_len = core::cmp::min(self.rate - self.pos, bytes_to_zero);
self.st.0[self.pos..(self.pos + slice_len)].copy_from_slice(&ZEROS[..slice_len]);
self.pos += slice_len;
bytes_to_zero -= slice_len;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Mixes the current state index and flags into the state, accounting for whether we are
/// sending or receiving
fn begin_op(&mut self, mut flags: OpFlags) {
if flags.contains(OpFlags::T) {
let is_op_receiving = flags.contains(OpFlags::I);
// If uninitialized, take on the direction of the first directional operation we get
if self.is_receiver.is_none() {
self.is_receiver = Some(is_op_receiving);
}
// So that the sender and receiver agree, toggle the I flag as necessary
// This is equivalent to flags ^= is_receiver
flags.set(OpFlags::I, self.is_receiver.unwrap()!= is_op_receiving);
}
let old_pos_begin = self.pos_begin;
self.pos_begin = self.pos + 1;
// Mix in the position and flags
let to_mix = &mut [old_pos_begin as u8, flags.bits()];
self.absorb(&to_mix[..]);
let force_f = flags.contains(OpFlags::C) || flags.contains(OpFlags::K);
if force_f && self.pos!= 0 {
self.run_f();
}
}
/// Performs the state / data transformation that corresponds to the given flags. If `more` is
/// given, this will treat `data` as a continuation of the data given in the previous
/// call to `operate`.
pub(crate) fn operate(&mut self, flags: OpFlags, data: &mut [u8], more: bool) {
// Make sure the K opflag isn't being used, and that the `more` flag isn't being misused
assert!(!flags.contains(OpFlags::K), "Op flag K not implemented");
self.validate_streaming(flags, more);
// If `more` isn't set, this is a new operation. Do the begin_op sequence
if!more {
self.begin_op(flags);
}
// Meta-ness is only relevant for `begin_op`. Remove it to simplify the below logic.
let flags = flags &!OpFlags::M;
// TODO?: Assert that input is empty under some flag conditions
if flags.contains(OpFlags::C) && flags.contains(OpFlags::T) &&!flags.contains(OpFlags::I) {
// This is equivalent to the `duplex` operation in the Python implementation, with
// `cafter = True`
if flags == OpFlags::C | OpFlags::T {
// This is `send_mac`. Pretend the input is all zeros
self.copy_state(data)
} else {
self.absorb_and_set(data);
}
} else if flags == OpFlags::I | OpFlags::A | OpFlags::C | else if flags.contains(OpFlags::C) {
// This is equivalent to the `duplex` operation in the Python implementation, with
// `cbefore = True`
self.exchange(data);
} else {
// This should normally call `absorb`, but `absorb` does not mutate, so the implementor
// should have used operate_no_mutate instead
panic!("operate should not be called for operations that do not require mutation");
}
}
/// Performs the state transformation that corresponds to the given flags. If `more` is given,
/// this will treat `data` as a continuation of the data given in the previous call to
/// `operate`. This uses non-mutating variants of the specializations of the `duplex` function.
pub(crate) fn operate_no_mutate(&mut self, flags: OpFlags, data: &[u8], more: bool) {
// Make sure the K opflag isn't being used, and that the `more` flag isn't being misused
assert!(!flags.contains(OpFlags::K), "Op flag K not implemented");
self.validate_streaming(flags, more);
// If `more` isn't set, this is a new operation. Do the begin_op sequence
if!more {
self.begin_op(flags);
}
// There are no non-mutating variants of things with flags & (C | T | I) == C | T
if flags.contains(OpFlags::C) && flags.contains(OpFlags::T) &&!flags.contains(OpFlags::I) {
panic!("operate_no_mutate called on something that requires mutation");
} else if flags.contains(OpFlags::C) {
// This is equivalent to a non-mutating form of the `duplex` operation in the Python
// implementation, with `cbefore = True`
self.overwrite(data);
} else {
// This is equivalent to the `duplex` operation in the Python implementation, with
// `cbefore = cafter = False`
self.absorb(data);
};
}
// This is separately defined because it's the only method that can return a `Result`. See docs
// for recv_mac and meta_recv_mac.
fn generalized_recv_mac(&mut self, data: &mut [u8], is_meta: bool) -> Result<(), AuthError> {
// These are the (meta_)recv_mac flags
let flags = if is_meta {
OpFlags::I | OpFlags::C | OpFlags::T | OpFlags::M
} else {
OpFlags::I | OpFlags::C | OpFlags::T
};
// recv_mac can never be streamed
self.operate(flags, data, /* more */ false);
// Constant-time MAC check. This accumulates the truth values of byte == 0
let mut all_zero = subtle::Choice::from(1u8);
for b in data {
all_zero &= b.ct_eq(&0u8);
}
// If the buffer isn't all zeros, that's an invalid MAC
if!bool::from(all_zero) {
Err(AuthError)
} else {
Ok(())
}
}
/// Attempts to authenticate the current state against the given MAC. On failure, it returns an
/// `AuthError`. It behooves the user of this library to check this return value and overreact
/// on error.
pub fn recv_mac(&mut self, data: &mut [u8]) -> Result<(), AuthError> {
self.generalized_recv_mac(data, /* is_meta */ false)
}
/// Attempts to authenticate the current state against the given MAC. On failure, it returns an
/// `AuthError`. It behooves the user of this library to check this return value and overreact
/// on error.
pub fn meta_recv_mac(&mut self, data: &mut [u8]) -> Result<(), AuthError> {
self.generalized_recv_mac(data, /* is_meta */ true)
}
// This is separately defined because it's the only method that takes an integer and mutates
// its input
fn generalized_ratchet(&mut self, num_bytes_to_zero: usize, more: bool, is_meta: bool) {
// These are the (meta_)ratchet flags
let flags = if is_meta {
OpFlags::C | OpFlags::M
} else {
OpFlags::C
};
// We don't make an `operate` call, since this is a super special case. That means we have
// to validate the flags and make the `begin_op` call manually.
self.validate_streaming(flags, more);
if!more {
self.begin_op(flags);
}
self.zero_state(num_bytes_to_zero);
}
/// Ratchets the internal state forward in an irreversible way by zeroing bytes.
///
/// Takes a `usize` argument specifying the number of bytes of public state to zero. If the
/// size exceeds `self.rate`, Keccak-f will be called before more bytes are zeroed.
pub fn ratchet(&mut self, num_bytes_to_zero: usize, more: bool) {
self.generalized_ratchet(num_bytes_to_zero, more, /* is_meta */ false)
}
/// Ratchets the internal state forward in an irreversible way by zeroing bytes.
///
/// Takes a `usize` argument specifying the number of bytes of public state to zero. If the
/// size exceeds `self.rate`, Keccak-f will be called before more bytes are zeroed.
pub fn meta_ratchet(&mut self, num_bytes_to_zero: usize, more: bool) {
self.generalized_ratchet(num_bytes_to_zero, more, /* is_meta */ true)
}
//
// These operations mutate their inputs
//
def_op_mut!(
send_enc,
meta_send_enc,
OpFlags::A | OpFlags::C | OpFlags::T,
"Sends an encrypted message."
);
def_op_mut!(
recv_enc,
meta_recv_enc,
OpFlags::I | OpFlags::A | OpFlags::C | OpFlags::T,
"Receives an encrypted message."
);
def_op_mut!(
send_mac,
meta_send_mac,
OpFlags::C | OpFlags::T,
"Sends a MAC of the internal state. \
The output is independent of the initial contents of the input buffer."
);
def_op_mut!(
prf,
meta_prf,
OpFlags::I | OpFlags::A | OpFlags::C,
"Extracts pseudorandom data as a function of the internal state. \
The output is independent of the initial contents of the input buffer."
);
//
// These operations do not mutate their inputs
//
def_op_no_mut!(
send_clr,
meta_send_clr,
OpFlags::A | OpFlags::T,
"Sends a plaintext message."
);
def_op_no_mut!(
recv_clr,
meta_recv_clr,
OpFlags::I | OpFlags::A | OpFlags::T,
"Receives a plaintext message."
);
def_op_no_mut!(
ad,
meta_ | {
// Special case of case below. This is PRF. Use `squeeze` instead of `exchange`.
self.squeeze(data);
} | conditional_block |
strobe.rs | ::bitflags;
use subtle::{self, ConstantTimeEq};
/// Version of Strobe that this crate implements.
pub const STROBE_VERSION: &str = "1.0.2";
bitflags! {
/// Operation flags defined in the Strobe paper. This is defined as a bitflags struct.
pub(crate) struct OpFlags: u8 {
/// Is data being moved inbound
const I = 1<<0;
/// Is data being sent to the application
const A = 1<<1;
/// Does this operation use cipher output
const C = 1<<2;
/// Is data being sent for transport
const T = 1<<3;
/// Use exclusively for metadata operations
const M = 1<<4;
/// Reserved and currently unimplemented. Using this will cause a panic.
const K = 1<<5;
}
}
/// Security parameter. Choice of 128 or 256 bits.
#[derive(Clone, Copy)]
#[repr(usize)]
pub enum SecParam {
B128 = 128,
B256 = 256,
}
/// An empty struct that just indicates that an error occurred in verifying a MAC
#[derive(Debug)]
pub struct AuthError;
/// The main Strobe object. This is currently limited to using Keccak-f\[1600\] as the internal
/// permutation function. For more information on this object, the [protocol specification][spec]
/// is a great resource.
///
/// [spec]: https://strobe.sourceforge.io/specs/
///
/// Description of method input
/// ---------------------------
/// Most operations exposed by `Strobe` take the same set of inputs. The arguments are
///
/// * `data` - The input data to the operation.
/// * `more` - For streaming purposes. Specifies whether you're trying to add more input / get more
/// output to/from the previous operation. For example:
///
/// ```rust
/// # extern crate strobe_rs;
/// # use strobe_rs::{SecParam, Strobe};
/// # fn main() {
/// # let mut s = Strobe::new(b"example-of-more", SecParam::B128);
/// s.ad(b"hello world", false);
/// # }
/// ```
/// is equivalent to
/// ```rust
/// # extern crate strobe_rs;
/// # use strobe_rs::{SecParam, Strobe};
/// # fn main() {
/// # let mut s = Strobe::new(b"example-of-more", SecParam::B128);
/// s.ad(b"hello ", false);
/// s.ad(b"world", true);
/// # }
/// ```
///
/// **NOTE:** If you try to set the `more` flag for an operation that is not preceded by the same
/// operation (e.g., if you try `ad` followed by `send_enc` with `more=true`), then **the function
/// will panic**, since that is an invalid use of the `more` flag.
///
/// Finally, `ratchet` and `meta_ratchet` take a `usize` argument instead of bytes. These functions
/// are individually commented below.
#[derive(Clone)]
pub struct Strobe {
/// Internal Keccak state
pub(crate) st: AlignedKeccakState,
/// Security parameter (128 or 256)
sec: SecParam,
/// This is the `R` parameter in the Strobe spec
rate: usize,
/// Index into `st`
pos: usize,
/// Index into `st`
pos_begin: usize,
/// Represents whether we're a sender or a receiver or uninitialized
is_receiver: Option<bool>,
/// The last operation performed. This is to verify that the `more` flag is only used across
/// identical operations.
prev_flags: Option<OpFlags>,
}
// This defines an operation and meta-operation that mutates its input
macro_rules! def_op_mut {
($name:ident, $meta_name:ident, $flags:expr, $doc_str:expr) => {
#[doc = $doc_str]
pub fn $name(&mut self, data: &mut [u8], more: bool) {
let flags = $flags;
self.operate(flags, data, more);
}
#[doc = $doc_str]
pub fn $meta_name(&mut self, data: &mut [u8], more: bool) {
let flags = $flags | OpFlags::M;
self.operate(flags, data, more);
}
};
}
// This defines an operation and meta-operation that does not mutate its input
macro_rules! def_op_no_mut {
($name:ident, $meta_name:ident, $flags:expr, $doc_str:expr) => { | self.operate_no_mutate(flags, data, more);
}
#[doc = $doc_str]
pub fn $meta_name(&mut self, data: &[u8], more: bool) {
let flags = $flags | OpFlags::M;
self.operate_no_mutate(flags, data, more);
}
};
}
impl Strobe {
/// Makes a new `Strobe` object with a given protocol byte string and security parameter.
pub fn new(proto: &[u8], sec: SecParam) -> Strobe {
let rate = KECCAK_BLOCK_SIZE * 8 - (sec as usize) / 4 - 2;
assert!(rate >= 1);
assert!(rate < 254);
// Initialize state: st = F([0x01, R+2, 0x01, 0x00, 0x01, 0x60] + b"STROBEvX.Y.Z")
let mut st_buf = [0u8; KECCAK_BLOCK_SIZE * 8];
st_buf[0..6].copy_from_slice(&[0x01, (rate as u8) + 2, 0x01, 0x00, 0x01, 0x60]);
st_buf[6..13].copy_from_slice(b"STROBEv");
st_buf[13..18].copy_from_slice(STROBE_VERSION.as_bytes());
let mut st = AlignedKeccakState(st_buf);
keccakf_u8(&mut st);
let mut strobe = Strobe {
st,
sec,
rate,
pos: 0,
pos_begin: 0,
is_receiver: None,
prev_flags: None,
};
// Mix the protocol into the state
strobe.meta_ad(proto, false);
strobe
}
/// Returns a string of the form `Strobe-Keccak-<sec>/<b>v<ver>` where `sec` is the bits of
/// security (128 or 256), `b` is the block size (in bits) of the Keccak permutation function,
/// and `ver` is the protocol version.
pub fn version_str(&self) -> String {
format!(
"Strobe-Keccak-{}/{}-v{}",
self.sec as usize,
KECCAK_BLOCK_SIZE * 64,
STROBE_VERSION
)
}
/// Validates that the `more` flag is being used correctly. Panics when validation fails.
fn validate_streaming(&mut self, flags: OpFlags, more: bool) {
// Streaming only makes sense if this operation is the same as last. For example you can do
// s.ad("hello", false);
// s.ad(" world", true).
// But you can't do
// s.ad("hello", false);
// s.key(" world", true).
if more {
assert_eq!(
self.prev_flags,
Some(flags),
"`more` can only be used when this operation is the same as the previous operation"
);
}
// Update the last-performed operation (i.e., the one we're about to perform)
self.prev_flags = Some(flags);
}
// Runs the permutation function on the internal state
fn run_f(&mut self) {
self.st.0[self.pos] ^= self.pos_begin as u8;
self.st.0[self.pos + 1] ^= 0x04;
self.st.0[self.rate + 1] ^= 0x80;
keccakf_u8(&mut self.st);
self.pos = 0;
self.pos_begin = 0;
}
/// XORs the given data into the state. This is a special case of the `duplex` code in the
/// STROBE paper.
fn absorb(&mut self, data: &[u8]) {
for b in data {
self.st.0[self.pos] ^= *b;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// XORs the given data into the state, then sets the data equal the state. This is a special
/// case of the `duplex` code in the STROBE paper.
fn absorb_and_set(&mut self, data: &mut [u8]) {
for b in data {
let state_byte = self.st.0.get_mut(self.pos).unwrap();
*state_byte ^= *b;
*b = *state_byte;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Copies the internal state into the given buffer. This is a special case of `absorb_and_set`
/// where `data` is all zeros.
fn copy_state(&mut self, data: &mut [u8]) {
for b in data {
*b = self.st.0[self.pos];
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Overwrites the state with the given data while XORing the given data with the old state.
/// This is a special case of the `duplex` code in the STROBE paper.
fn exchange(&mut self, data: &mut [u8]) {
for b in data {
let state_byte = self.st.0.get_mut(self.pos).unwrap();
*b ^= *state_byte;
*state_byte ^= *b;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Overwrites the state with the given data. This is a special case of `Strobe::exchange`,
/// where we do not want to mutate the input data.
fn overwrite(&mut self, data: &[u8]) {
for b in data {
self.st.0[self.pos] = *b;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Copies the state into the given buffer and sets the state to 0. This is a special case of
/// `Strobe::exchange`, where `data` is assumed to be the all-zeros string. This is precisely
/// the case when the current operation is PRF.
fn squeeze(&mut self, data: &mut [u8]) {
for b in data {
let state_byte = self.st.0.get_mut(self.pos).unwrap();
*b = *state_byte;
*state_byte = 0;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Overwrites the state with a specified number of zeros. This is a special case of
/// `Strobe::exchange`. More specifically, it's a special case of `Strobe::overwrite` and
/// `Strobe::squeeze`. It's like `squeeze` in that we assume we've been given all zeros as
/// input, and like `overwrite` in that we do not mutate (or take) any input.
fn zero_state(&mut self, mut bytes_to_zero: usize) {
static ZEROS: [u8; 8 * KECCAK_BLOCK_SIZE] = [0u8; 8 * KECCAK_BLOCK_SIZE];
// Do the zero-writing in chunks
while bytes_to_zero > 0 {
let slice_len = core::cmp::min(self.rate - self.pos, bytes_to_zero);
self.st.0[self.pos..(self.pos + slice_len)].copy_from_slice(&ZEROS[..slice_len]);
self.pos += slice_len;
bytes_to_zero -= slice_len;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Mixes the current state index and flags into the state, accounting for whether we are
/// sending or receiving
fn begin_op(&mut self, mut flags: OpFlags) {
if flags.contains(OpFlags::T) {
let is_op_receiving = flags.contains(OpFlags::I);
// If uninitialized, take on the direction of the first directional operation we get
if self.is_receiver.is_none() {
self.is_receiver = Some(is_op_receiving);
}
// So that the sender and receiver agree, toggle the I flag as necessary
// This is equivalent to flags ^= is_receiver
flags.set(OpFlags::I, self.is_receiver.unwrap()!= is_op_receiving);
}
let old_pos_begin = self.pos_begin;
self.pos_begin = self.pos + 1;
// Mix in the position and flags
let to_mix = &mut [old_pos_begin as u8, flags.bits()];
self.absorb(&to_mix[..]);
let force_f = flags.contains(OpFlags::C) || flags.contains(OpFlags::K);
if force_f && self.pos!= 0 {
self.run_f();
}
}
/// Performs the state / data transformation that corresponds to the given flags. If `more` is
/// given, this will treat `data` as a continuation of the data given in the previous
/// call to `operate`.
pub(crate) fn operate(&mut self, flags: OpFlags, data: &mut [u8], more: bool) {
// Make sure the K opflag isn't being used, and that the `more` flag isn't being misused
assert!(!flags.contains(OpFlags::K), "Op flag K not implemented");
self.validate_streaming(flags, more);
// If `more` isn't set, this is a new operation. Do the begin_op sequence
if!more {
self.begin_op(flags);
}
// Meta-ness is only relevant for `begin_op`. Remove it to simplify the below logic.
let flags = flags &!OpFlags::M;
// TODO?: Assert that input is empty under some flag conditions
if flags.contains(OpFlags::C) && flags.contains(OpFlags::T) &&!flags.contains(OpFlags::I) {
// This is equivalent to the `duplex` operation in the Python implementation, with
// `cafter = True`
if flags == OpFlags::C | OpFlags::T {
// This is `send_mac`. Pretend the input is all zeros
self.copy_state(data)
} else {
self.absorb_and_set(data);
}
} else if flags == OpFlags::I | OpFlags::A | OpFlags::C {
// Special case of case below. This is PRF. Use `squeeze` instead of `exchange`.
self.squeeze(data);
} else if flags.contains(OpFlags::C) {
// This is equivalent to the `duplex` operation in the Python implementation, with
// `cbefore = True`
self.exchange(data);
} else {
// This should normally call `absorb`, but `absorb` does not mutate, so the implementor
// should have used operate_no_mutate instead
panic!("operate should not be called for operations that do not require mutation");
}
}
/// Performs the state transformation that corresponds to the given flags. If `more` is given,
/// this will treat `data` as a continuation of the data given in the previous call to
/// `operate`. This uses non-mutating variants of the specializations of the `duplex` function.
pub(crate) fn operate_no_mutate(&mut self, flags: OpFlags, data: &[u8], more: bool) {
// Make sure the K opflag isn't being used, and that the `more` flag isn't being misused
assert!(!flags.contains(OpFlags::K), "Op flag K not implemented");
self.validate_streaming(flags, more);
// If `more` isn't set, this is a new operation. Do the begin_op sequence
if!more {
self.begin_op(flags);
}
// There are no non-mutating variants of things with flags & (C | T | I) == C | T
if flags.contains(OpFlags::C) && flags.contains(OpFlags::T) &&!flags.contains(OpFlags::I) {
panic!("operate_no_mutate called on something that requires mutation");
} else if flags.contains(OpFlags::C) {
// This is equivalent to a non-mutating form of the `duplex` operation in the Python
// implementation, with `cbefore = True`
self.overwrite(data);
} else {
// This is equivalent to the `duplex` operation in the Python implementation, with
// `cbefore = cafter = False`
self.absorb(data);
};
}
// This is separately defined because it's the only method that can return a `Result`. See docs
// for recv_mac and meta_recv_mac.
fn generalized_recv_mac(&mut self, data: &mut [u8], is_meta: bool) -> Result<(), AuthError> {
// These are the (meta_)recv_mac flags
let flags = if is_meta {
OpFlags::I | OpFlags::C | OpFlags::T | OpFlags::M
} else {
OpFlags::I | OpFlags::C | OpFlags::T
};
// recv_mac can never be streamed
self.operate(flags, data, /* more */ false);
// Constant-time MAC check. This accumulates the truth values of byte == 0
let mut all_zero = subtle::Choice::from(1u8);
for b in data {
all_zero &= b.ct_eq(&0u8);
}
// If the buffer isn't all zeros, that's an invalid MAC
if!bool::from(all_zero) {
Err(AuthError)
} else {
Ok(())
}
}
/// Attempts to authenticate the current state against the given MAC. On failure, it returns an
/// `AuthError`. It behooves the user of this library to check this return value and overreact
/// on error.
pub fn recv_mac(&mut self, data: &mut [u8]) -> Result<(), AuthError> {
self.generalized_recv_mac(data, /* is_meta */ false)
}
/// Attempts to authenticate the current state against the given MAC. On failure, it returns an
/// `AuthError`. It behooves the user of this library to check this return value and overreact
/// on error.
pub fn meta_recv_mac(&mut self, data: &mut [u8]) -> Result<(), AuthError> {
self.generalized_recv_mac(data, /* is_meta */ true)
}
// This is separately defined because it's the only method that takes an integer and mutates
// its input
fn generalized_ratchet(&mut self, num_bytes_to_zero: usize, more: bool, is_meta: bool) {
// These are the (meta_)ratchet flags
let flags = if is_meta {
OpFlags::C | OpFlags::M
} else {
OpFlags::C
};
// We don't make an `operate` call, since this is a super special case. That means we have
// to validate the flags and make the `begin_op` call manually.
self.validate_streaming(flags, more);
if!more {
self.begin_op(flags);
}
self.zero_state(num_bytes_to_zero);
}
/// Ratchets the internal state forward in an irreversible way by zeroing bytes.
///
/// Takes a `usize` argument specifying the number of bytes of public state to zero. If the
/// size exceeds `self.rate`, Keccak-f will be called before more bytes are zeroed.
pub fn ratchet(&mut self, num_bytes_to_zero: usize, more: bool) {
self.generalized_ratchet(num_bytes_to_zero, more, /* is_meta */ false)
}
/// Ratchets the internal state forward in an irreversible way by zeroing bytes.
///
/// Takes a `usize` argument specifying the number of bytes of public state to zero. If the
/// size exceeds `self.rate`, Keccak-f will be called before more bytes are zeroed.
pub fn meta_ratchet(&mut self, num_bytes_to_zero: usize, more: bool) {
self.generalized_ratchet(num_bytes_to_zero, more, /* is_meta */ true)
}
//
// These operations mutate their inputs
//
def_op_mut!(
send_enc,
meta_send_enc,
OpFlags::A | OpFlags::C | OpFlags::T,
"Sends an encrypted message."
);
def_op_mut!(
recv_enc,
meta_recv_enc,
OpFlags::I | OpFlags::A | OpFlags::C | OpFlags::T,
"Receives an encrypted message."
);
def_op_mut!(
send_mac,
meta_send_mac,
OpFlags::C | OpFlags::T,
"Sends a MAC of the internal state. \
The output is independent of the initial contents of the input buffer."
);
def_op_mut!(
prf,
meta_prf,
OpFlags::I | OpFlags::A | OpFlags::C,
"Extracts pseudorandom data as a function of the internal state. \
The output is independent of the initial contents of the input buffer."
);
//
// These operations do not mutate their inputs
//
def_op_no_mut!(
send_clr,
meta_send_clr,
OpFlags::A | OpFlags::T,
"Sends a plaintext message."
);
def_op_no_mut!(
recv_clr,
meta_recv_clr,
OpFlags::I | OpFlags::A | OpFlags::T,
"Receives a plaintext message."
);
def_op_no_mut!(
ad,
meta_ad, | #[doc = $doc_str]
pub fn $name(&mut self, data: &[u8], more: bool) {
let flags = $flags; | random_line_split |
strobe.rs | bitflags;
use subtle::{self, ConstantTimeEq};
/// Version of Strobe that this crate implements.
pub const STROBE_VERSION: &str = "1.0.2";
bitflags! {
/// Operation flags defined in the Strobe paper. This is defined as a bitflags struct.
pub(crate) struct OpFlags: u8 {
/// Is data being moved inbound
const I = 1<<0;
/// Is data being sent to the application
const A = 1<<1;
/// Does this operation use cipher output
const C = 1<<2;
/// Is data being sent for transport
const T = 1<<3;
/// Use exclusively for metadata operations
const M = 1<<4;
/// Reserved and currently unimplemented. Using this will cause a panic.
const K = 1<<5;
}
}
/// Security parameter. Choice of 128 or 256 bits.
#[derive(Clone, Copy)]
#[repr(usize)]
pub enum SecParam {
B128 = 128,
B256 = 256,
}
/// An empty struct that just indicates that an error occurred in verifying a MAC
#[derive(Debug)]
pub struct AuthError;
/// The main Strobe object. This is currently limited to using Keccak-f\[1600\] as the internal
/// permutation function. For more information on this object, the [protocol specification][spec]
/// is a great resource.
///
/// [spec]: https://strobe.sourceforge.io/specs/
///
/// Description of method input
/// ---------------------------
/// Most operations exposed by `Strobe` take the same set of inputs. The arguments are
///
/// * `data` - The input data to the operation.
/// * `more` - For streaming purposes. Specifies whether you're trying to add more input / get more
/// output to/from the previous operation. For example:
///
/// ```rust
/// # extern crate strobe_rs;
/// # use strobe_rs::{SecParam, Strobe};
/// # fn main() {
/// # let mut s = Strobe::new(b"example-of-more", SecParam::B128);
/// s.ad(b"hello world", false);
/// # }
/// ```
/// is equivalent to
/// ```rust
/// # extern crate strobe_rs;
/// # use strobe_rs::{SecParam, Strobe};
/// # fn main() {
/// # let mut s = Strobe::new(b"example-of-more", SecParam::B128);
/// s.ad(b"hello ", false);
/// s.ad(b"world", true);
/// # }
/// ```
///
/// **NOTE:** If you try to set the `more` flag for an operation that is not preceded by the same
/// operation (e.g., if you try `ad` followed by `send_enc` with `more=true`), then **the function
/// will panic**, since that is an invalid use of the `more` flag.
///
/// Finally, `ratchet` and `meta_ratchet` take a `usize` argument instead of bytes. These functions
/// are individually commented below.
#[derive(Clone)]
pub struct Strobe {
/// Internal Keccak state
pub(crate) st: AlignedKeccakState,
/// Security parameter (128 or 256)
sec: SecParam,
/// This is the `R` parameter in the Strobe spec
rate: usize,
/// Index into `st`
pos: usize,
/// Index into `st`
pos_begin: usize,
/// Represents whether we're a sender or a receiver or uninitialized
is_receiver: Option<bool>,
/// The last operation performed. This is to verify that the `more` flag is only used across
/// identical operations.
prev_flags: Option<OpFlags>,
}
// This defines an operation and meta-operation that mutates its input
macro_rules! def_op_mut {
($name:ident, $meta_name:ident, $flags:expr, $doc_str:expr) => {
#[doc = $doc_str]
pub fn $name(&mut self, data: &mut [u8], more: bool) {
let flags = $flags;
self.operate(flags, data, more);
}
#[doc = $doc_str]
pub fn $meta_name(&mut self, data: &mut [u8], more: bool) {
let flags = $flags | OpFlags::M;
self.operate(flags, data, more);
}
};
}
// This defines an operation and meta-operation that does not mutate its input
macro_rules! def_op_no_mut {
($name:ident, $meta_name:ident, $flags:expr, $doc_str:expr) => {
#[doc = $doc_str]
pub fn $name(&mut self, data: &[u8], more: bool) {
let flags = $flags;
self.operate_no_mutate(flags, data, more);
}
#[doc = $doc_str]
pub fn $meta_name(&mut self, data: &[u8], more: bool) {
let flags = $flags | OpFlags::M;
self.operate_no_mutate(flags, data, more);
}
};
}
impl Strobe {
/// Makes a new `Strobe` object with a given protocol byte string and security parameter.
pub fn new(proto: &[u8], sec: SecParam) -> Strobe {
let rate = KECCAK_BLOCK_SIZE * 8 - (sec as usize) / 4 - 2;
assert!(rate >= 1);
assert!(rate < 254);
// Initialize state: st = F([0x01, R+2, 0x01, 0x00, 0x01, 0x60] + b"STROBEvX.Y.Z")
let mut st_buf = [0u8; KECCAK_BLOCK_SIZE * 8];
st_buf[0..6].copy_from_slice(&[0x01, (rate as u8) + 2, 0x01, 0x00, 0x01, 0x60]);
st_buf[6..13].copy_from_slice(b"STROBEv");
st_buf[13..18].copy_from_slice(STROBE_VERSION.as_bytes());
let mut st = AlignedKeccakState(st_buf);
keccakf_u8(&mut st);
let mut strobe = Strobe {
st,
sec,
rate,
pos: 0,
pos_begin: 0,
is_receiver: None,
prev_flags: None,
};
// Mix the protocol into the state
strobe.meta_ad(proto, false);
strobe
}
/// Returns a string of the form `Strobe-Keccak-<sec>/<b>v<ver>` where `sec` is the bits of
/// security (128 or 256), `b` is the block size (in bits) of the Keccak permutation function,
/// and `ver` is the protocol version.
pub fn version_str(&self) -> String {
format!(
"Strobe-Keccak-{}/{}-v{}",
self.sec as usize,
KECCAK_BLOCK_SIZE * 64,
STROBE_VERSION
)
}
/// Validates that the `more` flag is being used correctly. Panics when validation fails.
fn validate_streaming(&mut self, flags: OpFlags, more: bool) {
// Streaming only makes sense if this operation is the same as last. For example you can do
// s.ad("hello", false);
// s.ad(" world", true).
// But you can't do
// s.ad("hello", false);
// s.key(" world", true).
if more {
assert_eq!(
self.prev_flags,
Some(flags),
"`more` can only be used when this operation is the same as the previous operation"
);
}
// Update the last-performed operation (i.e., the one we're about to perform)
self.prev_flags = Some(flags);
}
// Runs the permutation function on the internal state
fn run_f(&mut self) {
self.st.0[self.pos] ^= self.pos_begin as u8;
self.st.0[self.pos + 1] ^= 0x04;
self.st.0[self.rate + 1] ^= 0x80;
keccakf_u8(&mut self.st);
self.pos = 0;
self.pos_begin = 0;
}
/// XORs the given data into the state. This is a special case of the `duplex` code in the
/// STROBE paper.
fn | (&mut self, data: &[u8]) {
for b in data {
self.st.0[self.pos] ^= *b;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// XORs the given data into the state, then sets the data equal the state. This is a special
/// case of the `duplex` code in the STROBE paper.
fn absorb_and_set(&mut self, data: &mut [u8]) {
for b in data {
let state_byte = self.st.0.get_mut(self.pos).unwrap();
*state_byte ^= *b;
*b = *state_byte;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Copies the internal state into the given buffer. This is a special case of `absorb_and_set`
/// where `data` is all zeros.
fn copy_state(&mut self, data: &mut [u8]) {
for b in data {
*b = self.st.0[self.pos];
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Overwrites the state with the given data while XORing the given data with the old state.
/// This is a special case of the `duplex` code in the STROBE paper.
fn exchange(&mut self, data: &mut [u8]) {
for b in data {
let state_byte = self.st.0.get_mut(self.pos).unwrap();
*b ^= *state_byte;
*state_byte ^= *b;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Overwrites the state with the given data. This is a special case of `Strobe::exchange`,
/// where we do not want to mutate the input data.
fn overwrite(&mut self, data: &[u8]) {
for b in data {
self.st.0[self.pos] = *b;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Copies the state into the given buffer and sets the state to 0. This is a special case of
/// `Strobe::exchange`, where `data` is assumed to be the all-zeros string. This is precisely
/// the case when the current operation is PRF.
fn squeeze(&mut self, data: &mut [u8]) {
for b in data {
let state_byte = self.st.0.get_mut(self.pos).unwrap();
*b = *state_byte;
*state_byte = 0;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Overwrites the state with a specified number of zeros. This is a special case of
/// `Strobe::exchange`. More specifically, it's a special case of `Strobe::overwrite` and
/// `Strobe::squeeze`. It's like `squeeze` in that we assume we've been given all zeros as
/// input, and like `overwrite` in that we do not mutate (or take) any input.
fn zero_state(&mut self, mut bytes_to_zero: usize) {
static ZEROS: [u8; 8 * KECCAK_BLOCK_SIZE] = [0u8; 8 * KECCAK_BLOCK_SIZE];
// Do the zero-writing in chunks
while bytes_to_zero > 0 {
let slice_len = core::cmp::min(self.rate - self.pos, bytes_to_zero);
self.st.0[self.pos..(self.pos + slice_len)].copy_from_slice(&ZEROS[..slice_len]);
self.pos += slice_len;
bytes_to_zero -= slice_len;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Mixes the current state index and flags into the state, accounting for whether we are
/// sending or receiving
fn begin_op(&mut self, mut flags: OpFlags) {
if flags.contains(OpFlags::T) {
let is_op_receiving = flags.contains(OpFlags::I);
// If uninitialized, take on the direction of the first directional operation we get
if self.is_receiver.is_none() {
self.is_receiver = Some(is_op_receiving);
}
// So that the sender and receiver agree, toggle the I flag as necessary
// This is equivalent to flags ^= is_receiver
flags.set(OpFlags::I, self.is_receiver.unwrap()!= is_op_receiving);
}
let old_pos_begin = self.pos_begin;
self.pos_begin = self.pos + 1;
// Mix in the position and flags
let to_mix = &mut [old_pos_begin as u8, flags.bits()];
self.absorb(&to_mix[..]);
let force_f = flags.contains(OpFlags::C) || flags.contains(OpFlags::K);
if force_f && self.pos!= 0 {
self.run_f();
}
}
/// Performs the state / data transformation that corresponds to the given flags. If `more` is
/// given, this will treat `data` as a continuation of the data given in the previous
/// call to `operate`.
pub(crate) fn operate(&mut self, flags: OpFlags, data: &mut [u8], more: bool) {
// Make sure the K opflag isn't being used, and that the `more` flag isn't being misused
assert!(!flags.contains(OpFlags::K), "Op flag K not implemented");
self.validate_streaming(flags, more);
// If `more` isn't set, this is a new operation. Do the begin_op sequence
if!more {
self.begin_op(flags);
}
// Meta-ness is only relevant for `begin_op`. Remove it to simplify the below logic.
let flags = flags &!OpFlags::M;
// TODO?: Assert that input is empty under some flag conditions
if flags.contains(OpFlags::C) && flags.contains(OpFlags::T) &&!flags.contains(OpFlags::I) {
// This is equivalent to the `duplex` operation in the Python implementation, with
// `cafter = True`
if flags == OpFlags::C | OpFlags::T {
// This is `send_mac`. Pretend the input is all zeros
self.copy_state(data)
} else {
self.absorb_and_set(data);
}
} else if flags == OpFlags::I | OpFlags::A | OpFlags::C {
// Special case of case below. This is PRF. Use `squeeze` instead of `exchange`.
self.squeeze(data);
} else if flags.contains(OpFlags::C) {
// This is equivalent to the `duplex` operation in the Python implementation, with
// `cbefore = True`
self.exchange(data);
} else {
// This should normally call `absorb`, but `absorb` does not mutate, so the implementor
// should have used operate_no_mutate instead
panic!("operate should not be called for operations that do not require mutation");
}
}
/// Performs the state transformation that corresponds to the given flags. If `more` is given,
/// this will treat `data` as a continuation of the data given in the previous call to
/// `operate`. This uses non-mutating variants of the specializations of the `duplex` function.
pub(crate) fn operate_no_mutate(&mut self, flags: OpFlags, data: &[u8], more: bool) {
// Make sure the K opflag isn't being used, and that the `more` flag isn't being misused
assert!(!flags.contains(OpFlags::K), "Op flag K not implemented");
self.validate_streaming(flags, more);
// If `more` isn't set, this is a new operation. Do the begin_op sequence
if!more {
self.begin_op(flags);
}
// There are no non-mutating variants of things with flags & (C | T | I) == C | T
if flags.contains(OpFlags::C) && flags.contains(OpFlags::T) &&!flags.contains(OpFlags::I) {
panic!("operate_no_mutate called on something that requires mutation");
} else if flags.contains(OpFlags::C) {
// This is equivalent to a non-mutating form of the `duplex` operation in the Python
// implementation, with `cbefore = True`
self.overwrite(data);
} else {
// This is equivalent to the `duplex` operation in the Python implementation, with
// `cbefore = cafter = False`
self.absorb(data);
};
}
// This is separately defined because it's the only method that can return a `Result`. See docs
// for recv_mac and meta_recv_mac.
fn generalized_recv_mac(&mut self, data: &mut [u8], is_meta: bool) -> Result<(), AuthError> {
// These are the (meta_)recv_mac flags
let flags = if is_meta {
OpFlags::I | OpFlags::C | OpFlags::T | OpFlags::M
} else {
OpFlags::I | OpFlags::C | OpFlags::T
};
// recv_mac can never be streamed
self.operate(flags, data, /* more */ false);
// Constant-time MAC check. This accumulates the truth values of byte == 0
let mut all_zero = subtle::Choice::from(1u8);
for b in data {
all_zero &= b.ct_eq(&0u8);
}
// If the buffer isn't all zeros, that's an invalid MAC
if!bool::from(all_zero) {
Err(AuthError)
} else {
Ok(())
}
}
/// Attempts to authenticate the current state against the given MAC. On failure, it returns an
/// `AuthError`. It behooves the user of this library to check this return value and overreact
/// on error.
pub fn recv_mac(&mut self, data: &mut [u8]) -> Result<(), AuthError> {
self.generalized_recv_mac(data, /* is_meta */ false)
}
/// Attempts to authenticate the current state against the given MAC. On failure, it returns an
/// `AuthError`. It behooves the user of this library to check this return value and overreact
/// on error.
pub fn meta_recv_mac(&mut self, data: &mut [u8]) -> Result<(), AuthError> {
self.generalized_recv_mac(data, /* is_meta */ true)
}
// This is separately defined because it's the only method that takes an integer and mutates
// its input
fn generalized_ratchet(&mut self, num_bytes_to_zero: usize, more: bool, is_meta: bool) {
// These are the (meta_)ratchet flags
let flags = if is_meta {
OpFlags::C | OpFlags::M
} else {
OpFlags::C
};
// We don't make an `operate` call, since this is a super special case. That means we have
// to validate the flags and make the `begin_op` call manually.
self.validate_streaming(flags, more);
if!more {
self.begin_op(flags);
}
self.zero_state(num_bytes_to_zero);
}
/// Ratchets the internal state forward in an irreversible way by zeroing bytes.
///
/// Takes a `usize` argument specifying the number of bytes of public state to zero. If the
/// size exceeds `self.rate`, Keccak-f will be called before more bytes are zeroed.
pub fn ratchet(&mut self, num_bytes_to_zero: usize, more: bool) {
self.generalized_ratchet(num_bytes_to_zero, more, /* is_meta */ false)
}
/// Ratchets the internal state forward in an irreversible way by zeroing bytes.
///
/// Takes a `usize` argument specifying the number of bytes of public state to zero. If the
/// size exceeds `self.rate`, Keccak-f will be called before more bytes are zeroed.
pub fn meta_ratchet(&mut self, num_bytes_to_zero: usize, more: bool) {
self.generalized_ratchet(num_bytes_to_zero, more, /* is_meta */ true)
}
//
// These operations mutate their inputs
//
def_op_mut!(
send_enc,
meta_send_enc,
OpFlags::A | OpFlags::C | OpFlags::T,
"Sends an encrypted message."
);
def_op_mut!(
recv_enc,
meta_recv_enc,
OpFlags::I | OpFlags::A | OpFlags::C | OpFlags::T,
"Receives an encrypted message."
);
def_op_mut!(
send_mac,
meta_send_mac,
OpFlags::C | OpFlags::T,
"Sends a MAC of the internal state. \
The output is independent of the initial contents of the input buffer."
);
def_op_mut!(
prf,
meta_prf,
OpFlags::I | OpFlags::A | OpFlags::C,
"Extracts pseudorandom data as a function of the internal state. \
The output is independent of the initial contents of the input buffer."
);
//
// These operations do not mutate their inputs
//
def_op_no_mut!(
send_clr,
meta_send_clr,
OpFlags::A | OpFlags::T,
"Sends a plaintext message."
);
def_op_no_mut!(
recv_clr,
meta_recv_clr,
OpFlags::I | OpFlags::A | OpFlags::T,
"Receives a plaintext message."
);
def_op_no_mut!(
ad,
meta | absorb | identifier_name |
strobe.rs | bitflags;
use subtle::{self, ConstantTimeEq};
/// Version of Strobe that this crate implements.
pub const STROBE_VERSION: &str = "1.0.2";
bitflags! {
/// Operation flags defined in the Strobe paper. This is defined as a bitflags struct.
pub(crate) struct OpFlags: u8 {
/// Is data being moved inbound
const I = 1<<0;
/// Is data being sent to the application
const A = 1<<1;
/// Does this operation use cipher output
const C = 1<<2;
/// Is data being sent for transport
const T = 1<<3;
/// Use exclusively for metadata operations
const M = 1<<4;
/// Reserved and currently unimplemented. Using this will cause a panic.
const K = 1<<5;
}
}
/// Security parameter. Choice of 128 or 256 bits.
#[derive(Clone, Copy)]
#[repr(usize)]
pub enum SecParam {
B128 = 128,
B256 = 256,
}
/// An empty struct that just indicates that an error occurred in verifying a MAC
#[derive(Debug)]
pub struct AuthError;
/// The main Strobe object. This is currently limited to using Keccak-f\[1600\] as the internal
/// permutation function. For more information on this object, the [protocol specification][spec]
/// is a great resource.
///
/// [spec]: https://strobe.sourceforge.io/specs/
///
/// Description of method input
/// ---------------------------
/// Most operations exposed by `Strobe` take the same set of inputs. The arguments are
///
/// * `data` - The input data to the operation.
/// * `more` - For streaming purposes. Specifies whether you're trying to add more input / get more
/// output to/from the previous operation. For example:
///
/// ```rust
/// # extern crate strobe_rs;
/// # use strobe_rs::{SecParam, Strobe};
/// # fn main() {
/// # let mut s = Strobe::new(b"example-of-more", SecParam::B128);
/// s.ad(b"hello world", false);
/// # }
/// ```
/// is equivalent to
/// ```rust
/// # extern crate strobe_rs;
/// # use strobe_rs::{SecParam, Strobe};
/// # fn main() {
/// # let mut s = Strobe::new(b"example-of-more", SecParam::B128);
/// s.ad(b"hello ", false);
/// s.ad(b"world", true);
/// # }
/// ```
///
/// **NOTE:** If you try to set the `more` flag for an operation that is not preceded by the same
/// operation (e.g., if you try `ad` followed by `send_enc` with `more=true`), then **the function
/// will panic**, since that is an invalid use of the `more` flag.
///
/// Finally, `ratchet` and `meta_ratchet` take a `usize` argument instead of bytes. These functions
/// are individually commented below.
#[derive(Clone)]
pub struct Strobe {
/// Internal Keccak state
pub(crate) st: AlignedKeccakState,
/// Security parameter (128 or 256)
sec: SecParam,
/// This is the `R` parameter in the Strobe spec
rate: usize,
/// Index into `st`
pos: usize,
/// Index into `st`
pos_begin: usize,
/// Represents whether we're a sender or a receiver or uninitialized
is_receiver: Option<bool>,
/// The last operation performed. This is to verify that the `more` flag is only used across
/// identical operations.
prev_flags: Option<OpFlags>,
}
// This defines an operation and meta-operation that mutates its input
macro_rules! def_op_mut {
($name:ident, $meta_name:ident, $flags:expr, $doc_str:expr) => {
#[doc = $doc_str]
pub fn $name(&mut self, data: &mut [u8], more: bool) {
let flags = $flags;
self.operate(flags, data, more);
}
#[doc = $doc_str]
pub fn $meta_name(&mut self, data: &mut [u8], more: bool) {
let flags = $flags | OpFlags::M;
self.operate(flags, data, more);
}
};
}
// This defines an operation and meta-operation that does not mutate its input
macro_rules! def_op_no_mut {
($name:ident, $meta_name:ident, $flags:expr, $doc_str:expr) => {
#[doc = $doc_str]
pub fn $name(&mut self, data: &[u8], more: bool) {
let flags = $flags;
self.operate_no_mutate(flags, data, more);
}
#[doc = $doc_str]
pub fn $meta_name(&mut self, data: &[u8], more: bool) {
let flags = $flags | OpFlags::M;
self.operate_no_mutate(flags, data, more);
}
};
}
impl Strobe {
/// Makes a new `Strobe` object with a given protocol byte string and security parameter.
pub fn new(proto: &[u8], sec: SecParam) -> Strobe {
let rate = KECCAK_BLOCK_SIZE * 8 - (sec as usize) / 4 - 2;
assert!(rate >= 1);
assert!(rate < 254);
// Initialize state: st = F([0x01, R+2, 0x01, 0x00, 0x01, 0x60] + b"STROBEvX.Y.Z")
let mut st_buf = [0u8; KECCAK_BLOCK_SIZE * 8];
st_buf[0..6].copy_from_slice(&[0x01, (rate as u8) + 2, 0x01, 0x00, 0x01, 0x60]);
st_buf[6..13].copy_from_slice(b"STROBEv");
st_buf[13..18].copy_from_slice(STROBE_VERSION.as_bytes());
let mut st = AlignedKeccakState(st_buf);
keccakf_u8(&mut st);
let mut strobe = Strobe {
st,
sec,
rate,
pos: 0,
pos_begin: 0,
is_receiver: None,
prev_flags: None,
};
// Mix the protocol into the state
strobe.meta_ad(proto, false);
strobe
}
/// Returns a string of the form `Strobe-Keccak-<sec>/<b>v<ver>` where `sec` is the bits of
/// security (128 or 256), `b` is the block size (in bits) of the Keccak permutation function,
/// and `ver` is the protocol version.
pub fn version_str(&self) -> String {
format!(
"Strobe-Keccak-{}/{}-v{}",
self.sec as usize,
KECCAK_BLOCK_SIZE * 64,
STROBE_VERSION
)
}
/// Validates that the `more` flag is being used correctly. Panics when validation fails.
fn validate_streaming(&mut self, flags: OpFlags, more: bool) {
// Streaming only makes sense if this operation is the same as last. For example you can do
// s.ad("hello", false);
// s.ad(" world", true).
// But you can't do
// s.ad("hello", false);
// s.key(" world", true).
if more {
assert_eq!(
self.prev_flags,
Some(flags),
"`more` can only be used when this operation is the same as the previous operation"
);
}
// Update the last-performed operation (i.e., the one we're about to perform)
self.prev_flags = Some(flags);
}
// Runs the permutation function on the internal state
fn run_f(&mut self) {
self.st.0[self.pos] ^= self.pos_begin as u8;
self.st.0[self.pos + 1] ^= 0x04;
self.st.0[self.rate + 1] ^= 0x80;
keccakf_u8(&mut self.st);
self.pos = 0;
self.pos_begin = 0;
}
/// XORs the given data into the state. This is a special case of the `duplex` code in the
/// STROBE paper.
fn absorb(&mut self, data: &[u8]) {
for b in data {
self.st.0[self.pos] ^= *b;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// XORs the given data into the state, then sets the data equal the state. This is a special
/// case of the `duplex` code in the STROBE paper.
fn absorb_and_set(&mut self, data: &mut [u8]) {
for b in data {
let state_byte = self.st.0.get_mut(self.pos).unwrap();
*state_byte ^= *b;
*b = *state_byte;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Copies the internal state into the given buffer. This is a special case of `absorb_and_set`
/// where `data` is all zeros.
fn copy_state(&mut self, data: &mut [u8]) {
for b in data {
*b = self.st.0[self.pos];
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Overwrites the state with the given data while XORing the given data with the old state.
/// This is a special case of the `duplex` code in the STROBE paper.
fn exchange(&mut self, data: &mut [u8]) {
for b in data {
let state_byte = self.st.0.get_mut(self.pos).unwrap();
*b ^= *state_byte;
*state_byte ^= *b;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Overwrites the state with the given data. This is a special case of `Strobe::exchange`,
/// where we do not want to mutate the input data.
fn overwrite(&mut self, data: &[u8]) {
for b in data {
self.st.0[self.pos] = *b;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Copies the state into the given buffer and sets the state to 0. This is a special case of
/// `Strobe::exchange`, where `data` is assumed to be the all-zeros string. This is precisely
/// the case when the current operation is PRF.
fn squeeze(&mut self, data: &mut [u8]) {
for b in data {
let state_byte = self.st.0.get_mut(self.pos).unwrap();
*b = *state_byte;
*state_byte = 0;
self.pos += 1;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Overwrites the state with a specified number of zeros. This is a special case of
/// `Strobe::exchange`. More specifically, it's a special case of `Strobe::overwrite` and
/// `Strobe::squeeze`. It's like `squeeze` in that we assume we've been given all zeros as
/// input, and like `overwrite` in that we do not mutate (or take) any input.
fn zero_state(&mut self, mut bytes_to_zero: usize) {
static ZEROS: [u8; 8 * KECCAK_BLOCK_SIZE] = [0u8; 8 * KECCAK_BLOCK_SIZE];
// Do the zero-writing in chunks
while bytes_to_zero > 0 {
let slice_len = core::cmp::min(self.rate - self.pos, bytes_to_zero);
self.st.0[self.pos..(self.pos + slice_len)].copy_from_slice(&ZEROS[..slice_len]);
self.pos += slice_len;
bytes_to_zero -= slice_len;
if self.pos == self.rate {
self.run_f();
}
}
}
/// Mixes the current state index and flags into the state, accounting for whether we are
/// sending or receiving
fn begin_op(&mut self, mut flags: OpFlags) {
if flags.contains(OpFlags::T) {
let is_op_receiving = flags.contains(OpFlags::I);
// If uninitialized, take on the direction of the first directional operation we get
if self.is_receiver.is_none() {
self.is_receiver = Some(is_op_receiving);
}
// So that the sender and receiver agree, toggle the I flag as necessary
// This is equivalent to flags ^= is_receiver
flags.set(OpFlags::I, self.is_receiver.unwrap()!= is_op_receiving);
}
let old_pos_begin = self.pos_begin;
self.pos_begin = self.pos + 1;
// Mix in the position and flags
let to_mix = &mut [old_pos_begin as u8, flags.bits()];
self.absorb(&to_mix[..]);
let force_f = flags.contains(OpFlags::C) || flags.contains(OpFlags::K);
if force_f && self.pos!= 0 {
self.run_f();
}
}
/// Performs the state / data transformation that corresponds to the given flags. If `more` is
/// given, this will treat `data` as a continuation of the data given in the previous
/// call to `operate`.
pub(crate) fn operate(&mut self, flags: OpFlags, data: &mut [u8], more: bool) {
// Make sure the K opflag isn't being used, and that the `more` flag isn't being misused
assert!(!flags.contains(OpFlags::K), "Op flag K not implemented");
self.validate_streaming(flags, more);
// If `more` isn't set, this is a new operation. Do the begin_op sequence
if!more {
self.begin_op(flags);
}
// Meta-ness is only relevant for `begin_op`. Remove it to simplify the below logic.
let flags = flags &!OpFlags::M;
// TODO?: Assert that input is empty under some flag conditions
if flags.contains(OpFlags::C) && flags.contains(OpFlags::T) &&!flags.contains(OpFlags::I) {
// This is equivalent to the `duplex` operation in the Python implementation, with
// `cafter = True`
if flags == OpFlags::C | OpFlags::T {
// This is `send_mac`. Pretend the input is all zeros
self.copy_state(data)
} else {
self.absorb_and_set(data);
}
} else if flags == OpFlags::I | OpFlags::A | OpFlags::C {
// Special case of case below. This is PRF. Use `squeeze` instead of `exchange`.
self.squeeze(data);
} else if flags.contains(OpFlags::C) {
// This is equivalent to the `duplex` operation in the Python implementation, with
// `cbefore = True`
self.exchange(data);
} else {
// This should normally call `absorb`, but `absorb` does not mutate, so the implementor
// should have used operate_no_mutate instead
panic!("operate should not be called for operations that do not require mutation");
}
}
/// Performs the state transformation that corresponds to the given flags. If `more` is given,
/// this will treat `data` as a continuation of the data given in the previous call to
/// `operate`. This uses non-mutating variants of the specializations of the `duplex` function.
pub(crate) fn operate_no_mutate(&mut self, flags: OpFlags, data: &[u8], more: bool) {
// Make sure the K opflag isn't being used, and that the `more` flag isn't being misused
assert!(!flags.contains(OpFlags::K), "Op flag K not implemented");
self.validate_streaming(flags, more);
// If `more` isn't set, this is a new operation. Do the begin_op sequence
if!more {
self.begin_op(flags);
}
// There are no non-mutating variants of things with flags & (C | T | I) == C | T
if flags.contains(OpFlags::C) && flags.contains(OpFlags::T) &&!flags.contains(OpFlags::I) {
panic!("operate_no_mutate called on something that requires mutation");
} else if flags.contains(OpFlags::C) {
// This is equivalent to a non-mutating form of the `duplex` operation in the Python
// implementation, with `cbefore = True`
self.overwrite(data);
} else {
// This is equivalent to the `duplex` operation in the Python implementation, with
// `cbefore = cafter = False`
self.absorb(data);
};
}
// This is separately defined because it's the only method that can return a `Result`. See docs
// for recv_mac and meta_recv_mac.
fn generalized_recv_mac(&mut self, data: &mut [u8], is_meta: bool) -> Result<(), AuthError> {
// These are the (meta_)recv_mac flags
let flags = if is_meta {
OpFlags::I | OpFlags::C | OpFlags::T | OpFlags::M
} else {
OpFlags::I | OpFlags::C | OpFlags::T
};
// recv_mac can never be streamed
self.operate(flags, data, /* more */ false);
// Constant-time MAC check. This accumulates the truth values of byte == 0
let mut all_zero = subtle::Choice::from(1u8);
for b in data {
all_zero &= b.ct_eq(&0u8);
}
// If the buffer isn't all zeros, that's an invalid MAC
if!bool::from(all_zero) {
Err(AuthError)
} else {
Ok(())
}
}
/// Attempts to authenticate the current state against the given MAC. On failure, it returns an
/// `AuthError`. It behooves the user of this library to check this return value and overreact
/// on error.
pub fn recv_mac(&mut self, data: &mut [u8]) -> Result<(), AuthError> {
self.generalized_recv_mac(data, /* is_meta */ false)
}
/// Attempts to authenticate the current state against the given MAC. On failure, it returns an
/// `AuthError`. It behooves the user of this library to check this return value and overreact
/// on error.
pub fn meta_recv_mac(&mut self, data: &mut [u8]) -> Result<(), AuthError> {
self.generalized_recv_mac(data, /* is_meta */ true)
}
// This is separately defined because it's the only method that takes an integer and mutates
// its input
fn generalized_ratchet(&mut self, num_bytes_to_zero: usize, more: bool, is_meta: bool) {
// These are the (meta_)ratchet flags
let flags = if is_meta {
OpFlags::C | OpFlags::M
} else {
OpFlags::C
};
// We don't make an `operate` call, since this is a super special case. That means we have
// to validate the flags and make the `begin_op` call manually.
self.validate_streaming(flags, more);
if!more {
self.begin_op(flags);
}
self.zero_state(num_bytes_to_zero);
}
/// Ratchets the internal state forward in an irreversible way by zeroing bytes.
///
/// Takes a `usize` argument specifying the number of bytes of public state to zero. If the
/// size exceeds `self.rate`, Keccak-f will be called before more bytes are zeroed.
pub fn ratchet(&mut self, num_bytes_to_zero: usize, more: bool) {
self.generalized_ratchet(num_bytes_to_zero, more, /* is_meta */ false)
}
/// Ratchets the internal state forward in an irreversible way by zeroing bytes.
///
/// Takes a `usize` argument specifying the number of bytes of public state to zero. If the
/// size exceeds `self.rate`, Keccak-f will be called before more bytes are zeroed.
pub fn meta_ratchet(&mut self, num_bytes_to_zero: usize, more: bool) |
//
// These operations mutate their inputs
//
def_op_mut!(
send_enc,
meta_send_enc,
OpFlags::A | OpFlags::C | OpFlags::T,
"Sends an encrypted message."
);
def_op_mut!(
recv_enc,
meta_recv_enc,
OpFlags::I | OpFlags::A | OpFlags::C | OpFlags::T,
"Receives an encrypted message."
);
def_op_mut!(
send_mac,
meta_send_mac,
OpFlags::C | OpFlags::T,
"Sends a MAC of the internal state. \
The output is independent of the initial contents of the input buffer."
);
def_op_mut!(
prf,
meta_prf,
OpFlags::I | OpFlags::A | OpFlags::C,
"Extracts pseudorandom data as a function of the internal state. \
The output is independent of the initial contents of the input buffer."
);
//
// These operations do not mutate their inputs
//
def_op_no_mut!(
send_clr,
meta_send_clr,
OpFlags::A | OpFlags::T,
"Sends a plaintext message."
);
def_op_no_mut!(
recv_clr,
meta_recv_clr,
OpFlags::I | OpFlags::A | OpFlags::T,
"Receives a plaintext message."
);
def_op_no_mut!(
ad,
meta_ | {
self.generalized_ratchet(num_bytes_to_zero, more, /* is_meta */ true)
} | identifier_body |
main.rs | #[macro_use]
extern crate diesel;
pub mod models;
pub mod schema;
pub mod error;
use self::models::*;
use self::error::{
Error as MCWhitelistError,
WhitelistErrorKind,
};
use diesel::{
mysql::MysqlConnection,
prelude::*,
r2d2::{
ConnectionManager,
Pool,
},
result::{
Error as DieselError,
DatabaseErrorKind
},
RunQueryDsl,
};
use dotenv::dotenv;
use retry::{delay::Fixed, retry, OperationResult};
use serde_json::json;
use lazy_static::lazy_static;
use serenity::{
client::Client,
framework::standard::{
macros::{command, group},
Args, CommandResult, StandardFramework,
},
model::{channel::Message, guild::Member, id::GuildId, user::User},
prelude::{Context, EventHandler},
};
use std::{env, fs::File, vec};
use url::Url;
group!({
name: "general",
options: {},
commands: [
mclink,
unlink
],
});
const MOJANG_GET_HISTORY: &str = "https://api.mojang.com/user/profiles/";
const MOJANG_GET_UUID: &str = "https://api.mojang.com/profiles/minecraft";
struct Handler;
impl EventHandler for Handler {
fn guild_member_removal(&self, _ctx: Context, guild: GuildId, user: User, _member_data_if_available: Option<Member>) {
let discord_vals: DiscordConfig = get_config().discord;
if &discord_vals.guild_id == guild.as_u64() {
println!("{} is leaving Mooncord", user.name);
rem_account(*user.id.as_u64());
}
}
}
lazy_static! {
static ref POOL: Pool<ConnectionManager<MysqlConnection>> = establish_connection();
}
fn issue_cmd(conn: &mut rcon::Connection, cmd: &str) -> OperationResult<String, String> {
match conn.cmd(cmd) {
Ok(val) => {
println!("{}", val);
OperationResult::Ok(val)
}
Err(why) => {
println!("RCON Failure: {:?}", why);
OperationResult::Retry(format!("{:?}", why))
}
}
}
fn establish_connection() -> Pool<ConnectionManager<MysqlConnection>> {
dotenv().ok();
let db_url = env::var("DATABASE_URL").expect("DATABASE_URL env var must be set");
let manager = ConnectionManager::<MysqlConnection>::new(db_url);
Pool::builder()
.build(manager)
.expect("Failed to create pool")
}
fn get_config() -> ConfigSchema {
let f = File::open("./config.yaml").unwrap();
serde_yaml::from_reader(&f).unwrap()
}
fn main() {
let discord_vals: DiscordConfig = get_config().discord;
// Bot login
let mut client: Client =
Client::new(&discord_vals.token, Handler).expect("Error creating client");
client.with_framework(
StandardFramework::new()
.configure(|c| c.prefix("!"))
.group(&GENERAL_GROUP),
);
// Start listening for events, single shard. Shouldn't need more than one shard
if let Err(why) = client.start() {
println!("An error occurred while running the client: {:?}", why);
}
}
fn add_accounts(discordid: u64, mc_user: &MinecraftUser) -> QueryResult<usize> {
use self::schema::minecrafters;
let connection;
let conn_res = POOL.get();
if conn_res.is_err() {
let msg = "Unable to connect to the MySQL server";
return Err(DieselError::DatabaseError(DatabaseErrorKind::UnableToSendCommand, Box::new(msg.to_string())))
}
connection = conn_res.unwrap();
let mcid = &mc_user.id;
let mcname = &mc_user.name;
let new_user = NewMinecraftUser {
discord_id: discordid,
minecraft_uuid: mcid.to_string(),
minecraft_name: mcname.to_string(),
};
let res = diesel::insert_into(minecrafters::table)
.values(&new_user)
.execute(&connection);
res
}
fn whitelist_account(mc_user: &MinecraftUser, towhitelist: bool) -> Result<(), MCWhitelistError> {
let mc_servers: Vec<MinecraftServerIdentity> = get_config().minecraft.servers;
for server in &mc_servers {
let act: String = format!("{}", if towhitelist { "add" } else { "remove" });
let address: String = format!("{}:{}", &server.ip, &server.port);
let cmd: String = format!("whitelist {} {}", act, mc_user.name);
let res = retry(Fixed::from_millis(2000).take(10), || {
match rcon::Connection::connect(&address, &server.pass) {
Ok(mut val) => issue_cmd(&mut val, &cmd),
Err(why) => {
println!("Error connecting to server: {:?}", why);
OperationResult::Retry(format!("{:?}", why))
}
}
});
let err_msg;
let err_kind;
let non_existing = "That player does not exist";
match res {
Ok(msg) => {
if msg!= non_existing {
continue;
}
err_msg = "Tried to unwhitelist unexisting player";
err_kind = WhitelistErrorKind::NonExistingPlayer;
}
Err(_) => {
err_msg = "RCON Connection error";
err_kind = WhitelistErrorKind::RCONConnectionError;
}
}
return Err(MCWhitelistError::WhitelistError(err_kind, Box::new(err_msg.to_string())))
}
Ok(())
}
fn sel_mc_account(_discord_id: u64) -> Option<MinecraftUser> {
use self::schema::minecrafters::dsl::*;
let connection = POOL.get().unwrap();
let res = minecrafters.filter(discord_id.eq(_discord_id))
.load::<FullMCUser>(&connection)
.expect("Error loading minecraft user");
if res.len() < 1 {
println!("[WARN] NO PLAYER FOUND BY DISCORD ID");
return None
}
let mcid = &res[0].minecraft_uuid;
let mcname = &res[0].minecraft_name;
let mc_user = MinecraftUser {
id: mcid.to_string(),
name: mcname.to_string(),
};
Some(mc_user)
}
fn rem_account(_discord_id: u64) -> bool {
use self::schema::minecrafters::dsl::*;
// Retrieve MC account for whitelist removal
let user: Option<MinecraftUser> = sel_mc_account(_discord_id);
if user.is_none() {
// User was never whitelisted or manually removed
return false;
}
// Overwrite with val
let user: &MinecraftUser = &user.unwrap();
// Attempt whitelist removal, if result is name not exist get uuid history
let res = whitelist_account(&MinecraftUser {
id: user.id.to_string(),
name: user.name.to_string(),
}, false);
match res {
Err(MCWhitelistError::WhitelistError(WhitelistErrorKind::NonExistingPlayer, _)) => {
println!("[Log] Performing deep search to remove player from whitelist");
let uuid_history: Option<Vec<MinecraftUsernameHistory>> = get_mc_uuid_history(&user.id);
if uuid_history.is_none() {
println!("[WARN] NO UUID HISTORY FOUND");
return false;
}
// Another overwrite
let uuid_history: Vec<MinecraftUsernameHistory> = uuid_history.unwrap();
// Get last value in list, assumed newest username
let new_name: &MinecraftUsernameHistory = uuid_history.last().unwrap();
// Get UUID from new user
let new_uuid: Option<Vec<MinecraftUser>> = get_mc_uuid(&new_name.name);
if new_uuid.is_none() {
println!("[WARN] UUID NOT FOUND");
return false;
}
let new_uuid: &MinecraftUser = &new_uuid.unwrap()[0];
// Issue whitelist removal command
let retry_res = whitelist_account(&new_uuid, false);
match retry_res {
Ok(()) => { }
Err(_) => {
println!("[WARN] FAILED TO REMOVE PLAYER FROM WHITELIST!");
return false;
}
}
}
_ => { }
}
let connection = POOL.get().unwrap();
let num_del = diesel::delete(minecrafters.filter(discord_id.eq(_discord_id)))
.execute(&connection)
.expect("Error deleting user by discord id");
num_del > 0
}
fn get_mc_uuid_history(uuid: &str) -> Option<Vec<MinecraftUsernameHistory>> {
let client = reqwest::Client::new();
// Will panic if cannot connect to Mojang
let address: Url = Url::parse(&format!("{}/{}/names", MOJANG_GET_HISTORY, uuid)).unwrap();
let resp = client.get(address).send();
match resp {
Ok(mut val) => Some(serde_json::from_str(&val.text().unwrap()).unwrap()),
Err(why) => {
println!("Error retrieving profile: {:?}", why);
None
}
}
}
fn get_mc_uuid(username: &str) -> Option<Vec<MinecraftUser>> {
let client = reqwest::Client::new();
let payload = json!([&username]);
println!("{:#?}", payload);
// Will panic if cannot connect to Mojang
let resp = client.post(MOJANG_GET_UUID).json(&payload).send();
match resp {
Ok(mut val) => Some(serde_json::from_str(&val.text().unwrap()).unwrap()),
Err(why) => {
println!("Error retrieving profile: {:?}", why);
None
}
}
}
#[command]
fn unlink(ctx: &mut Context, msg: &Message, _args: Args) -> CommandResult {
let discord_vals: DiscordConfig = get_config().discord;
// Check if channel is subscriber channel (and not a direct message)
if &discord_vals.channel_id == msg.channel_id.as_u64() {
msg.channel_id.broadcast_typing(&ctx)?;
let mut response = "Your Minecraft account has been unlinked successfully.";
let success = rem_account(*msg.author.id.as_u64());
if!success {
response = "You were never whitelisted or there was an error trying to remove you from the whitelist.";
}
msg.reply(
&ctx,
response.to_string(),
)?;
}
Ok(())
}
#[command]
fn mclink(ctx: &mut Context, msg: &Message, mut args: Args) -> CommandResult {
let discord_vals: DiscordConfig = get_config().discord;
let sender_id = *msg.author.id.as_u64();
// Check if channel is minecraft whitelisting channel (and not a direct message)
if &discord_vals.channel_id!= msg.channel_id.as_u64() {
return Ok(());
}
// User did not reply with their Minecraft name
if args.is_empty() {
msg.reply(
&ctx,
"Please send me your Minecraft: Java Edition username.\nExample: `!mclink TheDunkel`".to_string(),
)?;
return Ok(());
}
let existing_user = sel_mc_account(sender_id);
if existing_user.is_some() {
msg.reply(
&ctx,
"You have already linked your account.\nYou may only have one linked account at a time.\nTo unlink, please type `!unlink`".to_string(),
)?;
return Ok(());
}
// Retrieve the user's current MC UUID
let json: Option<Vec<MinecraftUser>> = get_mc_uuid(&args.single::<String>().unwrap());
// If resulting array is empty, then username is not found
if json.is_none() {
msg.reply(
&ctx,
"Username not found. Windows 10, Mobile, and Console Editions cannot join.",
)?;
return Ok(());
}
// Overwrite json removing the Some()
let json: Vec<MinecraftUser> = json.unwrap();
let mut response = "There was a system issue linking your profile. Please try again later.";
// Refer to add_account function, act accordingly
let ret_val = add_accounts(sender_id, &json[0]);
match ret_val {
Ok(1) => {
// Issue requests to servers to whitelist
let ret = whitelist_account(&json[0], true);
match ret {
Ok(()) => {
let sender_data: Option<Member> = msg.member(&ctx.cache);
if sender_data.is_some() {
msg.author.direct_message(&ctx, |m| {
// IGNORE THIS I DON'T WANT TO USE THIS RESULT
m.content(format!(
"Your Minecraft account `{}` has been successfully linked.
Please check #minecraft channel pins for server details and FAQ.
**If you leave Mooncord for any reason, you will be removed from the whitelist**",
json[0].name
))
})?;
}
return Ok(())
}
Err(_) => {
response = "Unable to contact one or more game servers. Please try again later.";
rem_account(sender_id);
}
}
}
Err(DieselError::DatabaseError(e, info)) => {
let msg = info.message().to_string();
println!("{}", msg);
match e {
DatabaseErrorKind::UniqueViolation => |
DatabaseErrorKind::UnableToSendCommand => {
response = "Unable to contact MySQL server. Please try again later.";
}
_ => { }
};
}
_ => { }
};
msg.reply(
&ctx,
response.to_string(),
)?;
Ok(())
}
| {
// whack
if msg.contains("discord_id") {
response = "You have already linked your account.\nYou may only have one linked account at a time.\nTo unlink, please type `!unlink`";
} else if msg.contains("minecraft_uuid") {
response = "Somebody has linked this Minecraft account already.\nPlease contact Dunkel#0001 for assistance.";
}
} | conditional_block |
main.rs | #[macro_use]
extern crate diesel;
pub mod models;
pub mod schema;
pub mod error;
use self::models::*;
use self::error::{
Error as MCWhitelistError,
WhitelistErrorKind,
};
use diesel::{
mysql::MysqlConnection,
prelude::*,
r2d2::{
ConnectionManager,
Pool,
},
result::{
Error as DieselError,
DatabaseErrorKind
},
RunQueryDsl,
};
use dotenv::dotenv;
use retry::{delay::Fixed, retry, OperationResult};
use serde_json::json;
use lazy_static::lazy_static;
use serenity::{
client::Client,
framework::standard::{
macros::{command, group},
Args, CommandResult, StandardFramework,
},
model::{channel::Message, guild::Member, id::GuildId, user::User},
prelude::{Context, EventHandler},
};
use std::{env, fs::File, vec};
use url::Url;
group!({
name: "general",
options: {},
commands: [
mclink,
unlink
],
});
const MOJANG_GET_HISTORY: &str = "https://api.mojang.com/user/profiles/";
const MOJANG_GET_UUID: &str = "https://api.mojang.com/profiles/minecraft";
struct Handler;
impl EventHandler for Handler {
fn guild_member_removal(&self, _ctx: Context, guild: GuildId, user: User, _member_data_if_available: Option<Member>) {
let discord_vals: DiscordConfig = get_config().discord;
if &discord_vals.guild_id == guild.as_u64() {
println!("{} is leaving Mooncord", user.name);
rem_account(*user.id.as_u64());
}
}
}
lazy_static! {
static ref POOL: Pool<ConnectionManager<MysqlConnection>> = establish_connection();
}
fn issue_cmd(conn: &mut rcon::Connection, cmd: &str) -> OperationResult<String, String> {
match conn.cmd(cmd) {
Ok(val) => {
println!("{}", val);
OperationResult::Ok(val)
}
Err(why) => {
println!("RCON Failure: {:?}", why);
OperationResult::Retry(format!("{:?}", why))
}
}
}
fn establish_connection() -> Pool<ConnectionManager<MysqlConnection>> {
dotenv().ok();
let db_url = env::var("DATABASE_URL").expect("DATABASE_URL env var must be set");
let manager = ConnectionManager::<MysqlConnection>::new(db_url);
Pool::builder()
.build(manager)
.expect("Failed to create pool")
}
fn get_config() -> ConfigSchema {
let f = File::open("./config.yaml").unwrap();
serde_yaml::from_reader(&f).unwrap()
}
fn main() {
let discord_vals: DiscordConfig = get_config().discord;
// Bot login
let mut client: Client =
Client::new(&discord_vals.token, Handler).expect("Error creating client");
client.with_framework(
StandardFramework::new()
.configure(|c| c.prefix("!"))
.group(&GENERAL_GROUP),
);
// Start listening for events, single shard. Shouldn't need more than one shard
if let Err(why) = client.start() {
println!("An error occurred while running the client: {:?}", why);
}
}
fn add_accounts(discordid: u64, mc_user: &MinecraftUser) -> QueryResult<usize> {
use self::schema::minecrafters;
let connection;
let conn_res = POOL.get();
if conn_res.is_err() {
let msg = "Unable to connect to the MySQL server";
return Err(DieselError::DatabaseError(DatabaseErrorKind::UnableToSendCommand, Box::new(msg.to_string())))
}
connection = conn_res.unwrap();
let mcid = &mc_user.id;
let mcname = &mc_user.name;
let new_user = NewMinecraftUser {
discord_id: discordid,
minecraft_uuid: mcid.to_string(),
minecraft_name: mcname.to_string(),
};
let res = diesel::insert_into(minecrafters::table)
.values(&new_user)
.execute(&connection);
res
}
fn whitelist_account(mc_user: &MinecraftUser, towhitelist: bool) -> Result<(), MCWhitelistError> {
let mc_servers: Vec<MinecraftServerIdentity> = get_config().minecraft.servers;
for server in &mc_servers {
let act: String = format!("{}", if towhitelist { "add" } else { "remove" });
let address: String = format!("{}:{}", &server.ip, &server.port);
let cmd: String = format!("whitelist {} {}", act, mc_user.name);
let res = retry(Fixed::from_millis(2000).take(10), || {
match rcon::Connection::connect(&address, &server.pass) {
Ok(mut val) => issue_cmd(&mut val, &cmd),
Err(why) => {
println!("Error connecting to server: {:?}", why);
OperationResult::Retry(format!("{:?}", why))
}
}
});
let err_msg;
let err_kind;
let non_existing = "That player does not exist";
match res {
Ok(msg) => {
if msg!= non_existing {
continue;
}
err_msg = "Tried to unwhitelist unexisting player";
err_kind = WhitelistErrorKind::NonExistingPlayer;
}
Err(_) => {
err_msg = "RCON Connection error";
err_kind = WhitelistErrorKind::RCONConnectionError;
}
}
return Err(MCWhitelistError::WhitelistError(err_kind, Box::new(err_msg.to_string())))
}
Ok(())
}
fn | (_discord_id: u64) -> Option<MinecraftUser> {
use self::schema::minecrafters::dsl::*;
let connection = POOL.get().unwrap();
let res = minecrafters.filter(discord_id.eq(_discord_id))
.load::<FullMCUser>(&connection)
.expect("Error loading minecraft user");
if res.len() < 1 {
println!("[WARN] NO PLAYER FOUND BY DISCORD ID");
return None
}
let mcid = &res[0].minecraft_uuid;
let mcname = &res[0].minecraft_name;
let mc_user = MinecraftUser {
id: mcid.to_string(),
name: mcname.to_string(),
};
Some(mc_user)
}
fn rem_account(_discord_id: u64) -> bool {
use self::schema::minecrafters::dsl::*;
// Retrieve MC account for whitelist removal
let user: Option<MinecraftUser> = sel_mc_account(_discord_id);
if user.is_none() {
// User was never whitelisted or manually removed
return false;
}
// Overwrite with val
let user: &MinecraftUser = &user.unwrap();
// Attempt whitelist removal, if result is name not exist get uuid history
let res = whitelist_account(&MinecraftUser {
id: user.id.to_string(),
name: user.name.to_string(),
}, false);
match res {
Err(MCWhitelistError::WhitelistError(WhitelistErrorKind::NonExistingPlayer, _)) => {
println!("[Log] Performing deep search to remove player from whitelist");
let uuid_history: Option<Vec<MinecraftUsernameHistory>> = get_mc_uuid_history(&user.id);
if uuid_history.is_none() {
println!("[WARN] NO UUID HISTORY FOUND");
return false;
}
// Another overwrite
let uuid_history: Vec<MinecraftUsernameHistory> = uuid_history.unwrap();
// Get last value in list, assumed newest username
let new_name: &MinecraftUsernameHistory = uuid_history.last().unwrap();
// Get UUID from new user
let new_uuid: Option<Vec<MinecraftUser>> = get_mc_uuid(&new_name.name);
if new_uuid.is_none() {
println!("[WARN] UUID NOT FOUND");
return false;
}
let new_uuid: &MinecraftUser = &new_uuid.unwrap()[0];
// Issue whitelist removal command
let retry_res = whitelist_account(&new_uuid, false);
match retry_res {
Ok(()) => { }
Err(_) => {
println!("[WARN] FAILED TO REMOVE PLAYER FROM WHITELIST!");
return false;
}
}
}
_ => { }
}
let connection = POOL.get().unwrap();
let num_del = diesel::delete(minecrafters.filter(discord_id.eq(_discord_id)))
.execute(&connection)
.expect("Error deleting user by discord id");
num_del > 0
}
fn get_mc_uuid_history(uuid: &str) -> Option<Vec<MinecraftUsernameHistory>> {
let client = reqwest::Client::new();
// Will panic if cannot connect to Mojang
let address: Url = Url::parse(&format!("{}/{}/names", MOJANG_GET_HISTORY, uuid)).unwrap();
let resp = client.get(address).send();
match resp {
Ok(mut val) => Some(serde_json::from_str(&val.text().unwrap()).unwrap()),
Err(why) => {
println!("Error retrieving profile: {:?}", why);
None
}
}
}
fn get_mc_uuid(username: &str) -> Option<Vec<MinecraftUser>> {
let client = reqwest::Client::new();
let payload = json!([&username]);
println!("{:#?}", payload);
// Will panic if cannot connect to Mojang
let resp = client.post(MOJANG_GET_UUID).json(&payload).send();
match resp {
Ok(mut val) => Some(serde_json::from_str(&val.text().unwrap()).unwrap()),
Err(why) => {
println!("Error retrieving profile: {:?}", why);
None
}
}
}
#[command]
fn unlink(ctx: &mut Context, msg: &Message, _args: Args) -> CommandResult {
let discord_vals: DiscordConfig = get_config().discord;
// Check if channel is subscriber channel (and not a direct message)
if &discord_vals.channel_id == msg.channel_id.as_u64() {
msg.channel_id.broadcast_typing(&ctx)?;
let mut response = "Your Minecraft account has been unlinked successfully.";
let success = rem_account(*msg.author.id.as_u64());
if!success {
response = "You were never whitelisted or there was an error trying to remove you from the whitelist.";
}
msg.reply(
&ctx,
response.to_string(),
)?;
}
Ok(())
}
#[command]
fn mclink(ctx: &mut Context, msg: &Message, mut args: Args) -> CommandResult {
let discord_vals: DiscordConfig = get_config().discord;
let sender_id = *msg.author.id.as_u64();
// Check if channel is minecraft whitelisting channel (and not a direct message)
if &discord_vals.channel_id!= msg.channel_id.as_u64() {
return Ok(());
}
// User did not reply with their Minecraft name
if args.is_empty() {
msg.reply(
&ctx,
"Please send me your Minecraft: Java Edition username.\nExample: `!mclink TheDunkel`".to_string(),
)?;
return Ok(());
}
let existing_user = sel_mc_account(sender_id);
if existing_user.is_some() {
msg.reply(
&ctx,
"You have already linked your account.\nYou may only have one linked account at a time.\nTo unlink, please type `!unlink`".to_string(),
)?;
return Ok(());
}
// Retrieve the user's current MC UUID
let json: Option<Vec<MinecraftUser>> = get_mc_uuid(&args.single::<String>().unwrap());
// If resulting array is empty, then username is not found
if json.is_none() {
msg.reply(
&ctx,
"Username not found. Windows 10, Mobile, and Console Editions cannot join.",
)?;
return Ok(());
}
// Overwrite json removing the Some()
let json: Vec<MinecraftUser> = json.unwrap();
let mut response = "There was a system issue linking your profile. Please try again later.";
// Refer to add_account function, act accordingly
let ret_val = add_accounts(sender_id, &json[0]);
match ret_val {
Ok(1) => {
// Issue requests to servers to whitelist
let ret = whitelist_account(&json[0], true);
match ret {
Ok(()) => {
let sender_data: Option<Member> = msg.member(&ctx.cache);
if sender_data.is_some() {
msg.author.direct_message(&ctx, |m| {
// IGNORE THIS I DON'T WANT TO USE THIS RESULT
m.content(format!(
"Your Minecraft account `{}` has been successfully linked.
Please check #minecraft channel pins for server details and FAQ.
**If you leave Mooncord for any reason, you will be removed from the whitelist**",
json[0].name
))
})?;
}
return Ok(())
}
Err(_) => {
response = "Unable to contact one or more game servers. Please try again later.";
rem_account(sender_id);
}
}
}
Err(DieselError::DatabaseError(e, info)) => {
let msg = info.message().to_string();
println!("{}", msg);
match e {
DatabaseErrorKind::UniqueViolation => {
// whack
if msg.contains("discord_id") {
response = "You have already linked your account.\nYou may only have one linked account at a time.\nTo unlink, please type `!unlink`";
} else if msg.contains("minecraft_uuid") {
response = "Somebody has linked this Minecraft account already.\nPlease contact Dunkel#0001 for assistance.";
}
}
DatabaseErrorKind::UnableToSendCommand => {
response = "Unable to contact MySQL server. Please try again later.";
}
_ => { }
};
}
_ => { }
};
msg.reply(
&ctx,
response.to_string(),
)?;
Ok(())
}
| sel_mc_account | identifier_name |
main.rs | #[macro_use]
extern crate diesel;
pub mod models;
pub mod schema;
pub mod error;
use self::models::*;
use self::error::{
Error as MCWhitelistError,
WhitelistErrorKind,
};
use diesel::{
mysql::MysqlConnection,
prelude::*,
r2d2::{
ConnectionManager,
Pool,
},
result::{
Error as DieselError,
DatabaseErrorKind
},
RunQueryDsl,
};
use dotenv::dotenv;
use retry::{delay::Fixed, retry, OperationResult};
use serde_json::json;
use lazy_static::lazy_static;
use serenity::{
client::Client,
framework::standard::{
macros::{command, group},
Args, CommandResult, StandardFramework,
},
model::{channel::Message, guild::Member, id::GuildId, user::User},
prelude::{Context, EventHandler},
};
use std::{env, fs::File, vec};
use url::Url;
group!({
name: "general",
options: {},
commands: [
mclink,
unlink
],
});
const MOJANG_GET_HISTORY: &str = "https://api.mojang.com/user/profiles/";
const MOJANG_GET_UUID: &str = "https://api.mojang.com/profiles/minecraft";
struct Handler;
impl EventHandler for Handler {
fn guild_member_removal(&self, _ctx: Context, guild: GuildId, user: User, _member_data_if_available: Option<Member>) {
let discord_vals: DiscordConfig = get_config().discord;
if &discord_vals.guild_id == guild.as_u64() {
println!("{} is leaving Mooncord", user.name);
rem_account(*user.id.as_u64());
}
}
}
lazy_static! {
static ref POOL: Pool<ConnectionManager<MysqlConnection>> = establish_connection();
}
fn issue_cmd(conn: &mut rcon::Connection, cmd: &str) -> OperationResult<String, String> {
match conn.cmd(cmd) {
Ok(val) => {
println!("{}", val);
OperationResult::Ok(val)
}
Err(why) => {
println!("RCON Failure: {:?}", why);
OperationResult::Retry(format!("{:?}", why))
}
}
}
fn establish_connection() -> Pool<ConnectionManager<MysqlConnection>> {
dotenv().ok();
let db_url = env::var("DATABASE_URL").expect("DATABASE_URL env var must be set");
let manager = ConnectionManager::<MysqlConnection>::new(db_url);
Pool::builder()
.build(manager)
.expect("Failed to create pool")
}
fn get_config() -> ConfigSchema {
let f = File::open("./config.yaml").unwrap();
serde_yaml::from_reader(&f).unwrap()
}
fn main() {
let discord_vals: DiscordConfig = get_config().discord;
// Bot login
let mut client: Client =
Client::new(&discord_vals.token, Handler).expect("Error creating client");
| );
// Start listening for events, single shard. Shouldn't need more than one shard
if let Err(why) = client.start() {
println!("An error occurred while running the client: {:?}", why);
}
}
fn add_accounts(discordid: u64, mc_user: &MinecraftUser) -> QueryResult<usize> {
use self::schema::minecrafters;
let connection;
let conn_res = POOL.get();
if conn_res.is_err() {
let msg = "Unable to connect to the MySQL server";
return Err(DieselError::DatabaseError(DatabaseErrorKind::UnableToSendCommand, Box::new(msg.to_string())))
}
connection = conn_res.unwrap();
let mcid = &mc_user.id;
let mcname = &mc_user.name;
let new_user = NewMinecraftUser {
discord_id: discordid,
minecraft_uuid: mcid.to_string(),
minecraft_name: mcname.to_string(),
};
let res = diesel::insert_into(minecrafters::table)
.values(&new_user)
.execute(&connection);
res
}
fn whitelist_account(mc_user: &MinecraftUser, towhitelist: bool) -> Result<(), MCWhitelistError> {
let mc_servers: Vec<MinecraftServerIdentity> = get_config().minecraft.servers;
for server in &mc_servers {
let act: String = format!("{}", if towhitelist { "add" } else { "remove" });
let address: String = format!("{}:{}", &server.ip, &server.port);
let cmd: String = format!("whitelist {} {}", act, mc_user.name);
let res = retry(Fixed::from_millis(2000).take(10), || {
match rcon::Connection::connect(&address, &server.pass) {
Ok(mut val) => issue_cmd(&mut val, &cmd),
Err(why) => {
println!("Error connecting to server: {:?}", why);
OperationResult::Retry(format!("{:?}", why))
}
}
});
let err_msg;
let err_kind;
let non_existing = "That player does not exist";
match res {
Ok(msg) => {
if msg!= non_existing {
continue;
}
err_msg = "Tried to unwhitelist unexisting player";
err_kind = WhitelistErrorKind::NonExistingPlayer;
}
Err(_) => {
err_msg = "RCON Connection error";
err_kind = WhitelistErrorKind::RCONConnectionError;
}
}
return Err(MCWhitelistError::WhitelistError(err_kind, Box::new(err_msg.to_string())))
}
Ok(())
}
fn sel_mc_account(_discord_id: u64) -> Option<MinecraftUser> {
use self::schema::minecrafters::dsl::*;
let connection = POOL.get().unwrap();
let res = minecrafters.filter(discord_id.eq(_discord_id))
.load::<FullMCUser>(&connection)
.expect("Error loading minecraft user");
if res.len() < 1 {
println!("[WARN] NO PLAYER FOUND BY DISCORD ID");
return None
}
let mcid = &res[0].minecraft_uuid;
let mcname = &res[0].minecraft_name;
let mc_user = MinecraftUser {
id: mcid.to_string(),
name: mcname.to_string(),
};
Some(mc_user)
}
fn rem_account(_discord_id: u64) -> bool {
use self::schema::minecrafters::dsl::*;
// Retrieve MC account for whitelist removal
let user: Option<MinecraftUser> = sel_mc_account(_discord_id);
if user.is_none() {
// User was never whitelisted or manually removed
return false;
}
// Overwrite with val
let user: &MinecraftUser = &user.unwrap();
// Attempt whitelist removal, if result is name not exist get uuid history
let res = whitelist_account(&MinecraftUser {
id: user.id.to_string(),
name: user.name.to_string(),
}, false);
match res {
Err(MCWhitelistError::WhitelistError(WhitelistErrorKind::NonExistingPlayer, _)) => {
println!("[Log] Performing deep search to remove player from whitelist");
let uuid_history: Option<Vec<MinecraftUsernameHistory>> = get_mc_uuid_history(&user.id);
if uuid_history.is_none() {
println!("[WARN] NO UUID HISTORY FOUND");
return false;
}
// Another overwrite
let uuid_history: Vec<MinecraftUsernameHistory> = uuid_history.unwrap();
// Get last value in list, assumed newest username
let new_name: &MinecraftUsernameHistory = uuid_history.last().unwrap();
// Get UUID from new user
let new_uuid: Option<Vec<MinecraftUser>> = get_mc_uuid(&new_name.name);
if new_uuid.is_none() {
println!("[WARN] UUID NOT FOUND");
return false;
}
let new_uuid: &MinecraftUser = &new_uuid.unwrap()[0];
// Issue whitelist removal command
let retry_res = whitelist_account(&new_uuid, false);
match retry_res {
Ok(()) => { }
Err(_) => {
println!("[WARN] FAILED TO REMOVE PLAYER FROM WHITELIST!");
return false;
}
}
}
_ => { }
}
let connection = POOL.get().unwrap();
let num_del = diesel::delete(minecrafters.filter(discord_id.eq(_discord_id)))
.execute(&connection)
.expect("Error deleting user by discord id");
num_del > 0
}
fn get_mc_uuid_history(uuid: &str) -> Option<Vec<MinecraftUsernameHistory>> {
let client = reqwest::Client::new();
// Will panic if cannot connect to Mojang
let address: Url = Url::parse(&format!("{}/{}/names", MOJANG_GET_HISTORY, uuid)).unwrap();
let resp = client.get(address).send();
match resp {
Ok(mut val) => Some(serde_json::from_str(&val.text().unwrap()).unwrap()),
Err(why) => {
println!("Error retrieving profile: {:?}", why);
None
}
}
}
fn get_mc_uuid(username: &str) -> Option<Vec<MinecraftUser>> {
let client = reqwest::Client::new();
let payload = json!([&username]);
println!("{:#?}", payload);
// Will panic if cannot connect to Mojang
let resp = client.post(MOJANG_GET_UUID).json(&payload).send();
match resp {
Ok(mut val) => Some(serde_json::from_str(&val.text().unwrap()).unwrap()),
Err(why) => {
println!("Error retrieving profile: {:?}", why);
None
}
}
}
#[command]
fn unlink(ctx: &mut Context, msg: &Message, _args: Args) -> CommandResult {
let discord_vals: DiscordConfig = get_config().discord;
// Check if channel is subscriber channel (and not a direct message)
if &discord_vals.channel_id == msg.channel_id.as_u64() {
msg.channel_id.broadcast_typing(&ctx)?;
let mut response = "Your Minecraft account has been unlinked successfully.";
let success = rem_account(*msg.author.id.as_u64());
if!success {
response = "You were never whitelisted or there was an error trying to remove you from the whitelist.";
}
msg.reply(
&ctx,
response.to_string(),
)?;
}
Ok(())
}
#[command]
fn mclink(ctx: &mut Context, msg: &Message, mut args: Args) -> CommandResult {
let discord_vals: DiscordConfig = get_config().discord;
let sender_id = *msg.author.id.as_u64();
// Check if channel is minecraft whitelisting channel (and not a direct message)
if &discord_vals.channel_id!= msg.channel_id.as_u64() {
return Ok(());
}
// User did not reply with their Minecraft name
if args.is_empty() {
msg.reply(
&ctx,
"Please send me your Minecraft: Java Edition username.\nExample: `!mclink TheDunkel`".to_string(),
)?;
return Ok(());
}
let existing_user = sel_mc_account(sender_id);
if existing_user.is_some() {
msg.reply(
&ctx,
"You have already linked your account.\nYou may only have one linked account at a time.\nTo unlink, please type `!unlink`".to_string(),
)?;
return Ok(());
}
// Retrieve the user's current MC UUID
let json: Option<Vec<MinecraftUser>> = get_mc_uuid(&args.single::<String>().unwrap());
// If resulting array is empty, then username is not found
if json.is_none() {
msg.reply(
&ctx,
"Username not found. Windows 10, Mobile, and Console Editions cannot join.",
)?;
return Ok(());
}
// Overwrite json removing the Some()
let json: Vec<MinecraftUser> = json.unwrap();
let mut response = "There was a system issue linking your profile. Please try again later.";
// Refer to add_account function, act accordingly
let ret_val = add_accounts(sender_id, &json[0]);
match ret_val {
Ok(1) => {
// Issue requests to servers to whitelist
let ret = whitelist_account(&json[0], true);
match ret {
Ok(()) => {
let sender_data: Option<Member> = msg.member(&ctx.cache);
if sender_data.is_some() {
msg.author.direct_message(&ctx, |m| {
// IGNORE THIS I DON'T WANT TO USE THIS RESULT
m.content(format!(
"Your Minecraft account `{}` has been successfully linked.
Please check #minecraft channel pins for server details and FAQ.
**If you leave Mooncord for any reason, you will be removed from the whitelist**",
json[0].name
))
})?;
}
return Ok(())
}
Err(_) => {
response = "Unable to contact one or more game servers. Please try again later.";
rem_account(sender_id);
}
}
}
Err(DieselError::DatabaseError(e, info)) => {
let msg = info.message().to_string();
println!("{}", msg);
match e {
DatabaseErrorKind::UniqueViolation => {
// whack
if msg.contains("discord_id") {
response = "You have already linked your account.\nYou may only have one linked account at a time.\nTo unlink, please type `!unlink`";
} else if msg.contains("minecraft_uuid") {
response = "Somebody has linked this Minecraft account already.\nPlease contact Dunkel#0001 for assistance.";
}
}
DatabaseErrorKind::UnableToSendCommand => {
response = "Unable to contact MySQL server. Please try again later.";
}
_ => { }
};
}
_ => { }
};
msg.reply(
&ctx,
response.to_string(),
)?;
Ok(())
} | client.with_framework(
StandardFramework::new()
.configure(|c| c.prefix("!"))
.group(&GENERAL_GROUP), | random_line_split |
mod.rs | //! `proptest`-related features for `nalgebra` data structures.
//!
//! **This module is only available when the `proptest-support` feature is enabled in `nalgebra`**.
//!
//! `proptest` is a library for *property-based testing*. While similar to `QuickCheck`,
//! which may be more familiar to some users, it has a more sophisticated design that
//! provides users with automatic invariant-preserving shrinking. This means that when using
//! `proptest`, you rarely need to write your own shrinkers - which is usually very difficult -
//! and can instead get this "for free". Moreover, `proptest` does not rely on a canonical
//! `Arbitrary` trait implementation like `QuickCheck`, though it does also provide this. For
//! more information, check out the [proptest docs](https://docs.rs/proptest/0.10.1/proptest/)
//! and the [proptest book](https://altsysrq.github.io/proptest-book/intro.html).
//!
//! This module provides users of `nalgebra` with tools to work with `nalgebra` types in
//! `proptest` tests. At present, this integration is at an early stage, and only
//! provides tools for generating matrices and vectors, and not any of the geometry types.
//! There are essentially two ways of using this functionality:
//!
//! - Using the [matrix](fn.matrix.html) function to generate matrices with constraints
//! on dimensions and elements.
//! - Relying on the `Arbitrary` implementation of `OMatrix`.
//!
//! The first variant is almost always preferred in practice. Read on to discover why.
//!
//! ### Using free function strategies
//!
//! In `proptest`, it is usually preferable to have free functions that generate *strategies*.
//! Currently, the [matrix](fn.matrix.html) function fills this role. The analogous function for
//! column vectors is [vector](fn.vector.html). Let's take a quick look at how it may be used:
//! ```
//! use nalgebra::proptest::matrix;
//! use proptest::prelude::*;
//!
//! proptest! {
//! # /*
//! #[test]
//! # */
//! fn my_test(a in matrix(-5..= 5, 2..= 4, 1..=4)) {
//! // Generates matrices with elements in the range -5..= 5, rows in 2..=4 and
//! // columns in 1..=4.
//! }
//! }
//!
//! # fn main() { my_test(); }
//! ```
//!
//! In the above example, we generate matrices with constraints on the elements, as well as the
//! on the allowed dimensions. When a failing example is found, the resulting shrinking process
//! will preserve these invariants. We can use this to compose more advanced strategies.
//! For example, let's consider a toy example where we need to generate pairs of matrices
//! with exactly 3 rows fixed at compile-time and the same number of columns, but we want the
//! number of columns to vary. One way to do this is to use `proptest` combinators in combination
//! with [matrix](fn.matrix.html) as follows:
//!
//! ```
//! use nalgebra::{Dyn, OMatrix, Const};
//! use nalgebra::proptest::matrix;
//! use proptest::prelude::*;
//!
//! type MyMatrix = OMatrix<i32, Const::<3>, Dyn>;
//!
//! /// Returns a strategy for pairs of matrices with `U3` rows and the same number of
//! /// columns.
//! fn matrix_pairs() -> impl Strategy<Value=(MyMatrix, MyMatrix)> {
//! matrix(-5..= 5, Const::<3>, 0..= 10)
//! // We first generate the initial matrix `a`, and then depending on the concrete
//! // instances of `a`, we pick a second matrix with the same number of columns
//! .prop_flat_map(|a| {
//! let b = matrix(-5.. 5, Const::<3>, a.ncols());
//! // This returns a new tuple strategy where we keep `a` fixed while
//! // the second item is a strategy that generates instances with the same
//! // dimensions as `a`
//! (Just(a), b)
//! })
//! }
//!
//! proptest! {
//! # /*
//! #[test]
//! # */
//! fn my_test((a, b) in matrix_pairs()) {
//! // Let's double-check that the two matrices do indeed have the same number of
//! // columns
//! prop_assert_eq!(a.ncols(), b.ncols());
//! }
//! }
//!
//! # fn main() { my_test(); }
//! ```
//!
//! ### The `Arbitrary` implementation
//!
//! If you don't care about the dimensions of matrices, you can write tests like these:
//!
//! ```
//! use nalgebra::{DMatrix, DVector, Dyn, Matrix3, OMatrix, Vector3, U3};
//! use proptest::prelude::*;
//!
//! proptest! {
//! # /*
//! #[test]
//! # */
//! fn test_dynamic(matrix: DMatrix<i32>) {
//! // This will generate arbitrary instances of `DMatrix` and also attempt
//! // to shrink/simplify them when test failures are encountered.
//! }
//!
//! # /*
//! #[test]
//! # */
//! fn test_static_and_mixed(matrix: Matrix3<i32>, matrix2: OMatrix<i32, U3, Dyn>) {
//! // Test some property involving these matrices
//! }
//!
//! # /*
//! #[test]
//! # */
//! fn test_vectors(fixed_size_vector: Vector3<i32>, dyn_vector: DVector<i32>) {
//! // Test some property involving these vectors
//! }
//! }
//!
//! # fn main() { test_dynamic(); test_static_and_mixed(); test_vectors(); }
//! ```
//!
//! While this may be convenient, the default strategies for built-in types in `proptest` can
//! generate *any* number, including integers large enough to easily lead to overflow when used in
//! matrix operations, or even infinity or NaN values for floating-point types. Therefore
//! `Arbitrary` is rarely the method of choice for writing property-based tests.
//!
//! ### Notes on shrinking
//!
//! Due to some limitations of the current implementation, shrinking takes place by first
//! shrinking the matrix elements before trying to shrink the dimensions of the matrix.
//! This unfortunately often leads to the fact that a large number of shrinking iterations
//! are necessary to find a (nearly) minimal failing test case. As a workaround for this,
//! you can increase the maximum number of shrinking iterations when debugging. To do this,
//! simply set the `PROPTEST_MAX_SHRINK_ITERS` variable to a high number. For example:
//!
//! ```text
//! PROPTEST_MAX_SHRINK_ITERS=100000 cargo test my_failing_test
//! ```
use crate::allocator::Allocator;
use crate::{Const, DefaultAllocator, Dim, DimName, Dyn, OMatrix, Scalar, U1};
use proptest::arbitrary::Arbitrary;
use proptest::collection::vec;
use proptest::strategy::{BoxedStrategy, Just, NewTree, Strategy, ValueTree};
use proptest::test_runner::TestRunner;
use std::ops::RangeInclusive;
/// Parameters for arbitrary matrix generation.
#[derive(Debug, Clone)]
#[non_exhaustive]
pub struct MatrixParameters<NParameters, R, C> {
/// The range of rows that may be generated.
pub rows: DimRange<R>,
/// The range of columns that may be generated.
pub cols: DimRange<C>,
/// Parameters for the `Arbitrary` implementation of the scalar values.
pub value_parameters: NParameters,
}
/// A range of allowed dimensions for use in generation of matrices.
///
/// The `DimRange` type is used to encode the range of dimensions that can be used for generation
/// of matrices with `proptest`. In most cases, you do not need to concern yourself with
/// `DimRange` directly, as it supports conversion from other types such as `U3` or inclusive
/// ranges such as `5..= 6`. The latter example corresponds to dimensions from (inclusive)
/// `Dyn(5)` to `Dyn(6)` (inclusive).
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct DimRange<D = Dyn>(RangeInclusive<D>);
impl<D: Dim> DimRange<D> {
/// The lower bound for dimensions generated.
pub fn lower_bound(&self) -> D {
*self.0.start()
}
/// The upper bound for dimensions generated.
pub fn upper_bound(&self) -> D {
*self.0.end()
}
}
impl<D: Dim> From<D> for DimRange<D> {
fn from(dim: D) -> Self {
DimRange(dim..=dim)
}
}
impl<D: Dim> From<RangeInclusive<D>> for DimRange<D> {
fn from(range: RangeInclusive<D>) -> Self {
DimRange(range)
}
}
impl From<RangeInclusive<usize>> for DimRange<Dyn> {
fn from(range: RangeInclusive<usize>) -> Self {
DimRange::from(Dyn(*range.start())..=Dyn(*range.end()))
}
}
impl<D: Dim> DimRange<D> {
/// Converts the `DimRange` into an instance of `RangeInclusive`.
pub fn to_range_inclusive(&self) -> RangeInclusive<usize> {
self.lower_bound().value()..=self.upper_bound().value()
}
}
impl From<usize> for DimRange<Dyn> {
fn from(dim: usize) -> Self {
DimRange::from(Dyn(dim))
}
}
/// The default range used for Dyn dimensions when generating arbitrary matrices.
fn dynamic_dim_range() -> DimRange<Dyn> {
DimRange::from(0..=6)
}
/// Create a strategy to generate matrices containing values drawn from the given strategy,
/// with rows and columns in the provided ranges.
///
/// ## Examples | /// use proptest::prelude::*;
///
/// proptest! {
/// # /*
/// #[test]
/// # */
/// fn my_test(a in matrix(0.. 5i32, Const::<3>, 0..= 5)) {
/// // Let's make sure we've got the correct type first
/// let a: OMatrix<_, Const::<3>, Dyn> = a;
/// prop_assert!(a.nrows() == 3);
/// prop_assert!(a.ncols() <= 5);
/// prop_assert!(a.iter().all(|x_ij| *x_ij >= 0 && *x_ij < 5));
/// }
/// }
///
/// # fn main() { my_test(); }
/// ```
///
/// ## Limitations
/// The current implementation has some limitations that lead to suboptimal shrinking behavior.
/// See the [module-level documentation](index.html) for more.
pub fn matrix<R, C, ScalarStrategy>(
value_strategy: ScalarStrategy,
rows: impl Into<DimRange<R>>,
cols: impl Into<DimRange<C>>,
) -> MatrixStrategy<ScalarStrategy, R, C>
where
ScalarStrategy: Strategy + Clone +'static,
ScalarStrategy::Value: Scalar,
R: Dim,
C: Dim,
DefaultAllocator: Allocator<ScalarStrategy::Value, R, C>,
{
matrix_(value_strategy, rows.into(), cols.into())
}
/// Same as `matrix`, but without the additional anonymous generic types
fn matrix_<R, C, ScalarStrategy>(
value_strategy: ScalarStrategy,
rows: DimRange<R>,
cols: DimRange<C>,
) -> MatrixStrategy<ScalarStrategy, R, C>
where
ScalarStrategy: Strategy + Clone +'static,
ScalarStrategy::Value: Scalar,
R: Dim,
C: Dim,
DefaultAllocator: Allocator<ScalarStrategy::Value, R, C>,
{
let nrows = rows.lower_bound().value()..=rows.upper_bound().value();
let ncols = cols.lower_bound().value()..=cols.upper_bound().value();
// Even though we can use this function to generate fixed-size matrices,
// we currently generate all matrices with heap allocated Vec data.
// TODO: Avoid heap allocation for fixed-size matrices.
// Doing this *properly* would probably require us to implement a custom
// strategy and valuetree with custom shrinking logic, which is not trivial
// Perhaps more problematic, however, is the poor shrinking behavior the current setup leads to.
// Shrinking in proptest basically happens in "reverse" of the combinators, so
// by first generating the dimensions and then the elements, we get shrinking that first
// tries to completely shrink the individual elements before trying to reduce the dimension.
// This is clearly the opposite of what we want. I can't find any good way around this
// short of writing our own custom value tree, which we should probably do at some point.
// TODO: Custom implementation of value tree for better shrinking behavior.
let strategy = nrows
.prop_flat_map(move |nrows| (Just(nrows), ncols.clone()))
.prop_flat_map(move |(nrows, ncols)| {
(
Just(nrows),
Just(ncols),
vec(value_strategy.clone(), nrows * ncols),
)
})
.prop_map(|(nrows, ncols, values)| {
// Note: R/C::from_usize will panic if nrows/ncols does not fit in the dimension type.
// However, this should never fail, because we should only be generating
// this stuff in the first place
OMatrix::from_iterator_generic(R::from_usize(nrows), C::from_usize(ncols), values)
})
.boxed();
MatrixStrategy { strategy }
}
/// Create a strategy to generate column vectors containing values drawn from the given strategy,
/// with length in the provided range.
///
/// This is a convenience function for calling
/// [`matrix(value_strategy, length, U1)`](fn.matrix.html) and should
/// be used when you only want to generate column vectors, as it's simpler and makes the intent
/// clear.
pub fn vector<D, ScalarStrategy>(
value_strategy: ScalarStrategy,
length: impl Into<DimRange<D>>,
) -> MatrixStrategy<ScalarStrategy, D, U1>
where
ScalarStrategy: Strategy + Clone +'static,
ScalarStrategy::Value: Scalar,
D: Dim,
DefaultAllocator: Allocator<ScalarStrategy::Value, D>,
{
matrix_(value_strategy, length.into(), Const::<1>.into())
}
impl<NParameters, R, C> Default for MatrixParameters<NParameters, R, C>
where
NParameters: Default,
R: DimName,
C: DimName,
{
fn default() -> Self {
Self {
rows: DimRange::from(R::name()),
cols: DimRange::from(C::name()),
value_parameters: NParameters::default(),
}
}
}
impl<NParameters, R> Default for MatrixParameters<NParameters, R, Dyn>
where
NParameters: Default,
R: DimName,
{
fn default() -> Self {
Self {
rows: DimRange::from(R::name()),
cols: dynamic_dim_range(),
value_parameters: NParameters::default(),
}
}
}
impl<NParameters, C> Default for MatrixParameters<NParameters, Dyn, C>
where
NParameters: Default,
C: DimName,
{
fn default() -> Self {
Self {
rows: dynamic_dim_range(),
cols: DimRange::from(C::name()),
value_parameters: NParameters::default(),
}
}
}
impl<NParameters> Default for MatrixParameters<NParameters, Dyn, Dyn>
where
NParameters: Default,
{
fn default() -> Self {
Self {
rows: dynamic_dim_range(),
cols: dynamic_dim_range(),
value_parameters: NParameters::default(),
}
}
}
impl<T, R, C> Arbitrary for OMatrix<T, R, C>
where
T: Scalar + Arbitrary,
<T as Arbitrary>::Strategy: Clone,
R: Dim,
C: Dim,
MatrixParameters<T::Parameters, R, C>: Default,
DefaultAllocator: Allocator<T, R, C>,
{
type Parameters = MatrixParameters<T::Parameters, R, C>;
fn arbitrary_with(args: Self::Parameters) -> Self::Strategy {
let value_strategy = T::arbitrary_with(args.value_parameters);
matrix(value_strategy, args.rows, args.cols)
}
type Strategy = MatrixStrategy<T::Strategy, R, C>;
}
/// A strategy for generating matrices.
#[derive(Debug, Clone)]
pub struct MatrixStrategy<NStrategy, R: Dim, C: Dim>
where
NStrategy: Strategy,
NStrategy::Value: Scalar,
DefaultAllocator: Allocator<NStrategy::Value, R, C>,
{
// For now we only internally hold a boxed strategy. The reason for introducing this
// separate wrapper struct is so that we can replace the strategy logic with custom logic
// later down the road without introducing significant breaking changes
strategy: BoxedStrategy<OMatrix<NStrategy::Value, R, C>>,
}
impl<NStrategy, R, C> Strategy for MatrixStrategy<NStrategy, R, C>
where
NStrategy: Strategy,
NStrategy::Value: Scalar,
R: Dim,
C: Dim,
DefaultAllocator: Allocator<NStrategy::Value, R, C>,
{
type Tree = MatrixValueTree<NStrategy::Value, R, C>;
type Value = OMatrix<NStrategy::Value, R, C>;
fn new_tree(&self, runner: &mut TestRunner) -> NewTree<Self> {
let underlying_tree = self.strategy.new_tree(runner)?;
Ok(MatrixValueTree {
value_tree: underlying_tree,
})
}
}
/// A value tree for matrices.
pub struct MatrixValueTree<T, R, C>
where
T: Scalar,
R: Dim,
C: Dim,
DefaultAllocator: Allocator<T, R, C>,
{
// For now we only wrap a boxed value tree. The reason for wrapping is that this allows us
// to swap out the value tree logic down the road without significant breaking changes.
value_tree: Box<dyn ValueTree<Value = OMatrix<T, R, C>>>,
}
impl<T, R, C> ValueTree for MatrixValueTree<T, R, C>
where
T: Scalar,
R: Dim,
C: Dim,
DefaultAllocator: Allocator<T, R, C>,
{
type Value = OMatrix<T, R, C>;
fn current(&self) -> Self::Value {
self.value_tree.current()
}
fn simplify(&mut self) -> bool {
self.value_tree.simplify()
}
fn complicate(&mut self) -> bool {
self.value_tree.complicate()
}
} | /// ```
/// use nalgebra::proptest::matrix;
/// use nalgebra::{OMatrix, Const, Dyn}; | random_line_split |
mod.rs | //! `proptest`-related features for `nalgebra` data structures.
//!
//! **This module is only available when the `proptest-support` feature is enabled in `nalgebra`**.
//!
//! `proptest` is a library for *property-based testing*. While similar to `QuickCheck`,
//! which may be more familiar to some users, it has a more sophisticated design that
//! provides users with automatic invariant-preserving shrinking. This means that when using
//! `proptest`, you rarely need to write your own shrinkers - which is usually very difficult -
//! and can instead get this "for free". Moreover, `proptest` does not rely on a canonical
//! `Arbitrary` trait implementation like `QuickCheck`, though it does also provide this. For
//! more information, check out the [proptest docs](https://docs.rs/proptest/0.10.1/proptest/)
//! and the [proptest book](https://altsysrq.github.io/proptest-book/intro.html).
//!
//! This module provides users of `nalgebra` with tools to work with `nalgebra` types in
//! `proptest` tests. At present, this integration is at an early stage, and only
//! provides tools for generating matrices and vectors, and not any of the geometry types.
//! There are essentially two ways of using this functionality:
//!
//! - Using the [matrix](fn.matrix.html) function to generate matrices with constraints
//! on dimensions and elements.
//! - Relying on the `Arbitrary` implementation of `OMatrix`.
//!
//! The first variant is almost always preferred in practice. Read on to discover why.
//!
//! ### Using free function strategies
//!
//! In `proptest`, it is usually preferable to have free functions that generate *strategies*.
//! Currently, the [matrix](fn.matrix.html) function fills this role. The analogous function for
//! column vectors is [vector](fn.vector.html). Let's take a quick look at how it may be used:
//! ```
//! use nalgebra::proptest::matrix;
//! use proptest::prelude::*;
//!
//! proptest! {
//! # /*
//! #[test]
//! # */
//! fn my_test(a in matrix(-5..= 5, 2..= 4, 1..=4)) {
//! // Generates matrices with elements in the range -5..= 5, rows in 2..=4 and
//! // columns in 1..=4.
//! }
//! }
//!
//! # fn main() { my_test(); }
//! ```
//!
//! In the above example, we generate matrices with constraints on the elements, as well as the
//! on the allowed dimensions. When a failing example is found, the resulting shrinking process
//! will preserve these invariants. We can use this to compose more advanced strategies.
//! For example, let's consider a toy example where we need to generate pairs of matrices
//! with exactly 3 rows fixed at compile-time and the same number of columns, but we want the
//! number of columns to vary. One way to do this is to use `proptest` combinators in combination
//! with [matrix](fn.matrix.html) as follows:
//!
//! ```
//! use nalgebra::{Dyn, OMatrix, Const};
//! use nalgebra::proptest::matrix;
//! use proptest::prelude::*;
//!
//! type MyMatrix = OMatrix<i32, Const::<3>, Dyn>;
//!
//! /// Returns a strategy for pairs of matrices with `U3` rows and the same number of
//! /// columns.
//! fn matrix_pairs() -> impl Strategy<Value=(MyMatrix, MyMatrix)> {
//! matrix(-5..= 5, Const::<3>, 0..= 10)
//! // We first generate the initial matrix `a`, and then depending on the concrete
//! // instances of `a`, we pick a second matrix with the same number of columns
//! .prop_flat_map(|a| {
//! let b = matrix(-5.. 5, Const::<3>, a.ncols());
//! // This returns a new tuple strategy where we keep `a` fixed while
//! // the second item is a strategy that generates instances with the same
//! // dimensions as `a`
//! (Just(a), b)
//! })
//! }
//!
//! proptest! {
//! # /*
//! #[test]
//! # */
//! fn my_test((a, b) in matrix_pairs()) {
//! // Let's double-check that the two matrices do indeed have the same number of
//! // columns
//! prop_assert_eq!(a.ncols(), b.ncols());
//! }
//! }
//!
//! # fn main() { my_test(); }
//! ```
//!
//! ### The `Arbitrary` implementation
//!
//! If you don't care about the dimensions of matrices, you can write tests like these:
//!
//! ```
//! use nalgebra::{DMatrix, DVector, Dyn, Matrix3, OMatrix, Vector3, U3};
//! use proptest::prelude::*;
//!
//! proptest! {
//! # /*
//! #[test]
//! # */
//! fn test_dynamic(matrix: DMatrix<i32>) {
//! // This will generate arbitrary instances of `DMatrix` and also attempt
//! // to shrink/simplify them when test failures are encountered.
//! }
//!
//! # /*
//! #[test]
//! # */
//! fn test_static_and_mixed(matrix: Matrix3<i32>, matrix2: OMatrix<i32, U3, Dyn>) {
//! // Test some property involving these matrices
//! }
//!
//! # /*
//! #[test]
//! # */
//! fn test_vectors(fixed_size_vector: Vector3<i32>, dyn_vector: DVector<i32>) {
//! // Test some property involving these vectors
//! }
//! }
//!
//! # fn main() { test_dynamic(); test_static_and_mixed(); test_vectors(); }
//! ```
//!
//! While this may be convenient, the default strategies for built-in types in `proptest` can
//! generate *any* number, including integers large enough to easily lead to overflow when used in
//! matrix operations, or even infinity or NaN values for floating-point types. Therefore
//! `Arbitrary` is rarely the method of choice for writing property-based tests.
//!
//! ### Notes on shrinking
//!
//! Due to some limitations of the current implementation, shrinking takes place by first
//! shrinking the matrix elements before trying to shrink the dimensions of the matrix.
//! This unfortunately often leads to the fact that a large number of shrinking iterations
//! are necessary to find a (nearly) minimal failing test case. As a workaround for this,
//! you can increase the maximum number of shrinking iterations when debugging. To do this,
//! simply set the `PROPTEST_MAX_SHRINK_ITERS` variable to a high number. For example:
//!
//! ```text
//! PROPTEST_MAX_SHRINK_ITERS=100000 cargo test my_failing_test
//! ```
use crate::allocator::Allocator;
use crate::{Const, DefaultAllocator, Dim, DimName, Dyn, OMatrix, Scalar, U1};
use proptest::arbitrary::Arbitrary;
use proptest::collection::vec;
use proptest::strategy::{BoxedStrategy, Just, NewTree, Strategy, ValueTree};
use proptest::test_runner::TestRunner;
use std::ops::RangeInclusive;
/// Parameters for arbitrary matrix generation.
#[derive(Debug, Clone)]
#[non_exhaustive]
pub struct MatrixParameters<NParameters, R, C> {
/// The range of rows that may be generated.
pub rows: DimRange<R>,
/// The range of columns that may be generated.
pub cols: DimRange<C>,
/// Parameters for the `Arbitrary` implementation of the scalar values.
pub value_parameters: NParameters,
}
/// A range of allowed dimensions for use in generation of matrices.
///
/// The `DimRange` type is used to encode the range of dimensions that can be used for generation
/// of matrices with `proptest`. In most cases, you do not need to concern yourself with
/// `DimRange` directly, as it supports conversion from other types such as `U3` or inclusive
/// ranges such as `5..= 6`. The latter example corresponds to dimensions from (inclusive)
/// `Dyn(5)` to `Dyn(6)` (inclusive).
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct DimRange<D = Dyn>(RangeInclusive<D>);
impl<D: Dim> DimRange<D> {
/// The lower bound for dimensions generated.
pub fn lower_bound(&self) -> D {
*self.0.start()
}
/// The upper bound for dimensions generated.
pub fn upper_bound(&self) -> D {
*self.0.end()
}
}
impl<D: Dim> From<D> for DimRange<D> {
fn from(dim: D) -> Self {
DimRange(dim..=dim)
}
}
impl<D: Dim> From<RangeInclusive<D>> for DimRange<D> {
fn from(range: RangeInclusive<D>) -> Self |
}
impl From<RangeInclusive<usize>> for DimRange<Dyn> {
fn from(range: RangeInclusive<usize>) -> Self {
DimRange::from(Dyn(*range.start())..=Dyn(*range.end()))
}
}
impl<D: Dim> DimRange<D> {
/// Converts the `DimRange` into an instance of `RangeInclusive`.
pub fn to_range_inclusive(&self) -> RangeInclusive<usize> {
self.lower_bound().value()..=self.upper_bound().value()
}
}
impl From<usize> for DimRange<Dyn> {
fn from(dim: usize) -> Self {
DimRange::from(Dyn(dim))
}
}
/// The default range used for Dyn dimensions when generating arbitrary matrices.
fn dynamic_dim_range() -> DimRange<Dyn> {
DimRange::from(0..=6)
}
/// Create a strategy to generate matrices containing values drawn from the given strategy,
/// with rows and columns in the provided ranges.
///
/// ## Examples
/// ```
/// use nalgebra::proptest::matrix;
/// use nalgebra::{OMatrix, Const, Dyn};
/// use proptest::prelude::*;
///
/// proptest! {
/// # /*
/// #[test]
/// # */
/// fn my_test(a in matrix(0.. 5i32, Const::<3>, 0..= 5)) {
/// // Let's make sure we've got the correct type first
/// let a: OMatrix<_, Const::<3>, Dyn> = a;
/// prop_assert!(a.nrows() == 3);
/// prop_assert!(a.ncols() <= 5);
/// prop_assert!(a.iter().all(|x_ij| *x_ij >= 0 && *x_ij < 5));
/// }
/// }
///
/// # fn main() { my_test(); }
/// ```
///
/// ## Limitations
/// The current implementation has some limitations that lead to suboptimal shrinking behavior.
/// See the [module-level documentation](index.html) for more.
pub fn matrix<R, C, ScalarStrategy>(
value_strategy: ScalarStrategy,
rows: impl Into<DimRange<R>>,
cols: impl Into<DimRange<C>>,
) -> MatrixStrategy<ScalarStrategy, R, C>
where
ScalarStrategy: Strategy + Clone +'static,
ScalarStrategy::Value: Scalar,
R: Dim,
C: Dim,
DefaultAllocator: Allocator<ScalarStrategy::Value, R, C>,
{
matrix_(value_strategy, rows.into(), cols.into())
}
/// Same as `matrix`, but without the additional anonymous generic types
fn matrix_<R, C, ScalarStrategy>(
value_strategy: ScalarStrategy,
rows: DimRange<R>,
cols: DimRange<C>,
) -> MatrixStrategy<ScalarStrategy, R, C>
where
ScalarStrategy: Strategy + Clone +'static,
ScalarStrategy::Value: Scalar,
R: Dim,
C: Dim,
DefaultAllocator: Allocator<ScalarStrategy::Value, R, C>,
{
let nrows = rows.lower_bound().value()..=rows.upper_bound().value();
let ncols = cols.lower_bound().value()..=cols.upper_bound().value();
// Even though we can use this function to generate fixed-size matrices,
// we currently generate all matrices with heap allocated Vec data.
// TODO: Avoid heap allocation for fixed-size matrices.
// Doing this *properly* would probably require us to implement a custom
// strategy and valuetree with custom shrinking logic, which is not trivial
// Perhaps more problematic, however, is the poor shrinking behavior the current setup leads to.
// Shrinking in proptest basically happens in "reverse" of the combinators, so
// by first generating the dimensions and then the elements, we get shrinking that first
// tries to completely shrink the individual elements before trying to reduce the dimension.
// This is clearly the opposite of what we want. I can't find any good way around this
// short of writing our own custom value tree, which we should probably do at some point.
// TODO: Custom implementation of value tree for better shrinking behavior.
let strategy = nrows
.prop_flat_map(move |nrows| (Just(nrows), ncols.clone()))
.prop_flat_map(move |(nrows, ncols)| {
(
Just(nrows),
Just(ncols),
vec(value_strategy.clone(), nrows * ncols),
)
})
.prop_map(|(nrows, ncols, values)| {
// Note: R/C::from_usize will panic if nrows/ncols does not fit in the dimension type.
// However, this should never fail, because we should only be generating
// this stuff in the first place
OMatrix::from_iterator_generic(R::from_usize(nrows), C::from_usize(ncols), values)
})
.boxed();
MatrixStrategy { strategy }
}
/// Create a strategy to generate column vectors containing values drawn from the given strategy,
/// with length in the provided range.
///
/// This is a convenience function for calling
/// [`matrix(value_strategy, length, U1)`](fn.matrix.html) and should
/// be used when you only want to generate column vectors, as it's simpler and makes the intent
/// clear.
pub fn vector<D, ScalarStrategy>(
value_strategy: ScalarStrategy,
length: impl Into<DimRange<D>>,
) -> MatrixStrategy<ScalarStrategy, D, U1>
where
ScalarStrategy: Strategy + Clone +'static,
ScalarStrategy::Value: Scalar,
D: Dim,
DefaultAllocator: Allocator<ScalarStrategy::Value, D>,
{
matrix_(value_strategy, length.into(), Const::<1>.into())
}
impl<NParameters, R, C> Default for MatrixParameters<NParameters, R, C>
where
NParameters: Default,
R: DimName,
C: DimName,
{
fn default() -> Self {
Self {
rows: DimRange::from(R::name()),
cols: DimRange::from(C::name()),
value_parameters: NParameters::default(),
}
}
}
impl<NParameters, R> Default for MatrixParameters<NParameters, R, Dyn>
where
NParameters: Default,
R: DimName,
{
fn default() -> Self {
Self {
rows: DimRange::from(R::name()),
cols: dynamic_dim_range(),
value_parameters: NParameters::default(),
}
}
}
impl<NParameters, C> Default for MatrixParameters<NParameters, Dyn, C>
where
NParameters: Default,
C: DimName,
{
fn default() -> Self {
Self {
rows: dynamic_dim_range(),
cols: DimRange::from(C::name()),
value_parameters: NParameters::default(),
}
}
}
impl<NParameters> Default for MatrixParameters<NParameters, Dyn, Dyn>
where
NParameters: Default,
{
fn default() -> Self {
Self {
rows: dynamic_dim_range(),
cols: dynamic_dim_range(),
value_parameters: NParameters::default(),
}
}
}
impl<T, R, C> Arbitrary for OMatrix<T, R, C>
where
T: Scalar + Arbitrary,
<T as Arbitrary>::Strategy: Clone,
R: Dim,
C: Dim,
MatrixParameters<T::Parameters, R, C>: Default,
DefaultAllocator: Allocator<T, R, C>,
{
type Parameters = MatrixParameters<T::Parameters, R, C>;
fn arbitrary_with(args: Self::Parameters) -> Self::Strategy {
let value_strategy = T::arbitrary_with(args.value_parameters);
matrix(value_strategy, args.rows, args.cols)
}
type Strategy = MatrixStrategy<T::Strategy, R, C>;
}
/// A strategy for generating matrices.
#[derive(Debug, Clone)]
pub struct MatrixStrategy<NStrategy, R: Dim, C: Dim>
where
NStrategy: Strategy,
NStrategy::Value: Scalar,
DefaultAllocator: Allocator<NStrategy::Value, R, C>,
{
// For now we only internally hold a boxed strategy. The reason for introducing this
// separate wrapper struct is so that we can replace the strategy logic with custom logic
// later down the road without introducing significant breaking changes
strategy: BoxedStrategy<OMatrix<NStrategy::Value, R, C>>,
}
impl<NStrategy, R, C> Strategy for MatrixStrategy<NStrategy, R, C>
where
NStrategy: Strategy,
NStrategy::Value: Scalar,
R: Dim,
C: Dim,
DefaultAllocator: Allocator<NStrategy::Value, R, C>,
{
type Tree = MatrixValueTree<NStrategy::Value, R, C>;
type Value = OMatrix<NStrategy::Value, R, C>;
fn new_tree(&self, runner: &mut TestRunner) -> NewTree<Self> {
let underlying_tree = self.strategy.new_tree(runner)?;
Ok(MatrixValueTree {
value_tree: underlying_tree,
})
}
}
/// A value tree for matrices.
pub struct MatrixValueTree<T, R, C>
where
T: Scalar,
R: Dim,
C: Dim,
DefaultAllocator: Allocator<T, R, C>,
{
// For now we only wrap a boxed value tree. The reason for wrapping is that this allows us
// to swap out the value tree logic down the road without significant breaking changes.
value_tree: Box<dyn ValueTree<Value = OMatrix<T, R, C>>>,
}
impl<T, R, C> ValueTree for MatrixValueTree<T, R, C>
where
T: Scalar,
R: Dim,
C: Dim,
DefaultAllocator: Allocator<T, R, C>,
{
type Value = OMatrix<T, R, C>;
fn current(&self) -> Self::Value {
self.value_tree.current()
}
fn simplify(&mut self) -> bool {
self.value_tree.simplify()
}
fn complicate(&mut self) -> bool {
self.value_tree.complicate()
}
}
| {
DimRange(range)
} | identifier_body |
mod.rs | //! `proptest`-related features for `nalgebra` data structures.
//!
//! **This module is only available when the `proptest-support` feature is enabled in `nalgebra`**.
//!
//! `proptest` is a library for *property-based testing*. While similar to `QuickCheck`,
//! which may be more familiar to some users, it has a more sophisticated design that
//! provides users with automatic invariant-preserving shrinking. This means that when using
//! `proptest`, you rarely need to write your own shrinkers - which is usually very difficult -
//! and can instead get this "for free". Moreover, `proptest` does not rely on a canonical
//! `Arbitrary` trait implementation like `QuickCheck`, though it does also provide this. For
//! more information, check out the [proptest docs](https://docs.rs/proptest/0.10.1/proptest/)
//! and the [proptest book](https://altsysrq.github.io/proptest-book/intro.html).
//!
//! This module provides users of `nalgebra` with tools to work with `nalgebra` types in
//! `proptest` tests. At present, this integration is at an early stage, and only
//! provides tools for generating matrices and vectors, and not any of the geometry types.
//! There are essentially two ways of using this functionality:
//!
//! - Using the [matrix](fn.matrix.html) function to generate matrices with constraints
//! on dimensions and elements.
//! - Relying on the `Arbitrary` implementation of `OMatrix`.
//!
//! The first variant is almost always preferred in practice. Read on to discover why.
//!
//! ### Using free function strategies
//!
//! In `proptest`, it is usually preferable to have free functions that generate *strategies*.
//! Currently, the [matrix](fn.matrix.html) function fills this role. The analogous function for
//! column vectors is [vector](fn.vector.html). Let's take a quick look at how it may be used:
//! ```
//! use nalgebra::proptest::matrix;
//! use proptest::prelude::*;
//!
//! proptest! {
//! # /*
//! #[test]
//! # */
//! fn my_test(a in matrix(-5..= 5, 2..= 4, 1..=4)) {
//! // Generates matrices with elements in the range -5..= 5, rows in 2..=4 and
//! // columns in 1..=4.
//! }
//! }
//!
//! # fn main() { my_test(); }
//! ```
//!
//! In the above example, we generate matrices with constraints on the elements, as well as the
//! on the allowed dimensions. When a failing example is found, the resulting shrinking process
//! will preserve these invariants. We can use this to compose more advanced strategies.
//! For example, let's consider a toy example where we need to generate pairs of matrices
//! with exactly 3 rows fixed at compile-time and the same number of columns, but we want the
//! number of columns to vary. One way to do this is to use `proptest` combinators in combination
//! with [matrix](fn.matrix.html) as follows:
//!
//! ```
//! use nalgebra::{Dyn, OMatrix, Const};
//! use nalgebra::proptest::matrix;
//! use proptest::prelude::*;
//!
//! type MyMatrix = OMatrix<i32, Const::<3>, Dyn>;
//!
//! /// Returns a strategy for pairs of matrices with `U3` rows and the same number of
//! /// columns.
//! fn matrix_pairs() -> impl Strategy<Value=(MyMatrix, MyMatrix)> {
//! matrix(-5..= 5, Const::<3>, 0..= 10)
//! // We first generate the initial matrix `a`, and then depending on the concrete
//! // instances of `a`, we pick a second matrix with the same number of columns
//! .prop_flat_map(|a| {
//! let b = matrix(-5.. 5, Const::<3>, a.ncols());
//! // This returns a new tuple strategy where we keep `a` fixed while
//! // the second item is a strategy that generates instances with the same
//! // dimensions as `a`
//! (Just(a), b)
//! })
//! }
//!
//! proptest! {
//! # /*
//! #[test]
//! # */
//! fn my_test((a, b) in matrix_pairs()) {
//! // Let's double-check that the two matrices do indeed have the same number of
//! // columns
//! prop_assert_eq!(a.ncols(), b.ncols());
//! }
//! }
//!
//! # fn main() { my_test(); }
//! ```
//!
//! ### The `Arbitrary` implementation
//!
//! If you don't care about the dimensions of matrices, you can write tests like these:
//!
//! ```
//! use nalgebra::{DMatrix, DVector, Dyn, Matrix3, OMatrix, Vector3, U3};
//! use proptest::prelude::*;
//!
//! proptest! {
//! # /*
//! #[test]
//! # */
//! fn test_dynamic(matrix: DMatrix<i32>) {
//! // This will generate arbitrary instances of `DMatrix` and also attempt
//! // to shrink/simplify them when test failures are encountered.
//! }
//!
//! # /*
//! #[test]
//! # */
//! fn test_static_and_mixed(matrix: Matrix3<i32>, matrix2: OMatrix<i32, U3, Dyn>) {
//! // Test some property involving these matrices
//! }
//!
//! # /*
//! #[test]
//! # */
//! fn test_vectors(fixed_size_vector: Vector3<i32>, dyn_vector: DVector<i32>) {
//! // Test some property involving these vectors
//! }
//! }
//!
//! # fn main() { test_dynamic(); test_static_and_mixed(); test_vectors(); }
//! ```
//!
//! While this may be convenient, the default strategies for built-in types in `proptest` can
//! generate *any* number, including integers large enough to easily lead to overflow when used in
//! matrix operations, or even infinity or NaN values for floating-point types. Therefore
//! `Arbitrary` is rarely the method of choice for writing property-based tests.
//!
//! ### Notes on shrinking
//!
//! Due to some limitations of the current implementation, shrinking takes place by first
//! shrinking the matrix elements before trying to shrink the dimensions of the matrix.
//! This unfortunately often leads to the fact that a large number of shrinking iterations
//! are necessary to find a (nearly) minimal failing test case. As a workaround for this,
//! you can increase the maximum number of shrinking iterations when debugging. To do this,
//! simply set the `PROPTEST_MAX_SHRINK_ITERS` variable to a high number. For example:
//!
//! ```text
//! PROPTEST_MAX_SHRINK_ITERS=100000 cargo test my_failing_test
//! ```
use crate::allocator::Allocator;
use crate::{Const, DefaultAllocator, Dim, DimName, Dyn, OMatrix, Scalar, U1};
use proptest::arbitrary::Arbitrary;
use proptest::collection::vec;
use proptest::strategy::{BoxedStrategy, Just, NewTree, Strategy, ValueTree};
use proptest::test_runner::TestRunner;
use std::ops::RangeInclusive;
/// Parameters for arbitrary matrix generation.
#[derive(Debug, Clone)]
#[non_exhaustive]
pub struct MatrixParameters<NParameters, R, C> {
/// The range of rows that may be generated.
pub rows: DimRange<R>,
/// The range of columns that may be generated.
pub cols: DimRange<C>,
/// Parameters for the `Arbitrary` implementation of the scalar values.
pub value_parameters: NParameters,
}
/// A range of allowed dimensions for use in generation of matrices.
///
/// The `DimRange` type is used to encode the range of dimensions that can be used for generation
/// of matrices with `proptest`. In most cases, you do not need to concern yourself with
/// `DimRange` directly, as it supports conversion from other types such as `U3` or inclusive
/// ranges such as `5..= 6`. The latter example corresponds to dimensions from (inclusive)
/// `Dyn(5)` to `Dyn(6)` (inclusive).
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct DimRange<D = Dyn>(RangeInclusive<D>);
impl<D: Dim> DimRange<D> {
/// The lower bound for dimensions generated.
pub fn lower_bound(&self) -> D {
*self.0.start()
}
/// The upper bound for dimensions generated.
pub fn upper_bound(&self) -> D {
*self.0.end()
}
}
impl<D: Dim> From<D> for DimRange<D> {
fn from(dim: D) -> Self {
DimRange(dim..=dim)
}
}
impl<D: Dim> From<RangeInclusive<D>> for DimRange<D> {
fn from(range: RangeInclusive<D>) -> Self {
DimRange(range)
}
}
impl From<RangeInclusive<usize>> for DimRange<Dyn> {
fn from(range: RangeInclusive<usize>) -> Self {
DimRange::from(Dyn(*range.start())..=Dyn(*range.end()))
}
}
impl<D: Dim> DimRange<D> {
/// Converts the `DimRange` into an instance of `RangeInclusive`.
pub fn to_range_inclusive(&self) -> RangeInclusive<usize> {
self.lower_bound().value()..=self.upper_bound().value()
}
}
impl From<usize> for DimRange<Dyn> {
fn from(dim: usize) -> Self {
DimRange::from(Dyn(dim))
}
}
/// The default range used for Dyn dimensions when generating arbitrary matrices.
fn dynamic_dim_range() -> DimRange<Dyn> {
DimRange::from(0..=6)
}
/// Create a strategy to generate matrices containing values drawn from the given strategy,
/// with rows and columns in the provided ranges.
///
/// ## Examples
/// ```
/// use nalgebra::proptest::matrix;
/// use nalgebra::{OMatrix, Const, Dyn};
/// use proptest::prelude::*;
///
/// proptest! {
/// # /*
/// #[test]
/// # */
/// fn my_test(a in matrix(0.. 5i32, Const::<3>, 0..= 5)) {
/// // Let's make sure we've got the correct type first
/// let a: OMatrix<_, Const::<3>, Dyn> = a;
/// prop_assert!(a.nrows() == 3);
/// prop_assert!(a.ncols() <= 5);
/// prop_assert!(a.iter().all(|x_ij| *x_ij >= 0 && *x_ij < 5));
/// }
/// }
///
/// # fn main() { my_test(); }
/// ```
///
/// ## Limitations
/// The current implementation has some limitations that lead to suboptimal shrinking behavior.
/// See the [module-level documentation](index.html) for more.
pub fn matrix<R, C, ScalarStrategy>(
value_strategy: ScalarStrategy,
rows: impl Into<DimRange<R>>,
cols: impl Into<DimRange<C>>,
) -> MatrixStrategy<ScalarStrategy, R, C>
where
ScalarStrategy: Strategy + Clone +'static,
ScalarStrategy::Value: Scalar,
R: Dim,
C: Dim,
DefaultAllocator: Allocator<ScalarStrategy::Value, R, C>,
{
matrix_(value_strategy, rows.into(), cols.into())
}
/// Same as `matrix`, but without the additional anonymous generic types
fn matrix_<R, C, ScalarStrategy>(
value_strategy: ScalarStrategy,
rows: DimRange<R>,
cols: DimRange<C>,
) -> MatrixStrategy<ScalarStrategy, R, C>
where
ScalarStrategy: Strategy + Clone +'static,
ScalarStrategy::Value: Scalar,
R: Dim,
C: Dim,
DefaultAllocator: Allocator<ScalarStrategy::Value, R, C>,
{
let nrows = rows.lower_bound().value()..=rows.upper_bound().value();
let ncols = cols.lower_bound().value()..=cols.upper_bound().value();
// Even though we can use this function to generate fixed-size matrices,
// we currently generate all matrices with heap allocated Vec data.
// TODO: Avoid heap allocation for fixed-size matrices.
// Doing this *properly* would probably require us to implement a custom
// strategy and valuetree with custom shrinking logic, which is not trivial
// Perhaps more problematic, however, is the poor shrinking behavior the current setup leads to.
// Shrinking in proptest basically happens in "reverse" of the combinators, so
// by first generating the dimensions and then the elements, we get shrinking that first
// tries to completely shrink the individual elements before trying to reduce the dimension.
// This is clearly the opposite of what we want. I can't find any good way around this
// short of writing our own custom value tree, which we should probably do at some point.
// TODO: Custom implementation of value tree for better shrinking behavior.
let strategy = nrows
.prop_flat_map(move |nrows| (Just(nrows), ncols.clone()))
.prop_flat_map(move |(nrows, ncols)| {
(
Just(nrows),
Just(ncols),
vec(value_strategy.clone(), nrows * ncols),
)
})
.prop_map(|(nrows, ncols, values)| {
// Note: R/C::from_usize will panic if nrows/ncols does not fit in the dimension type.
// However, this should never fail, because we should only be generating
// this stuff in the first place
OMatrix::from_iterator_generic(R::from_usize(nrows), C::from_usize(ncols), values)
})
.boxed();
MatrixStrategy { strategy }
}
/// Create a strategy to generate column vectors containing values drawn from the given strategy,
/// with length in the provided range.
///
/// This is a convenience function for calling
/// [`matrix(value_strategy, length, U1)`](fn.matrix.html) and should
/// be used when you only want to generate column vectors, as it's simpler and makes the intent
/// clear.
pub fn vector<D, ScalarStrategy>(
value_strategy: ScalarStrategy,
length: impl Into<DimRange<D>>,
) -> MatrixStrategy<ScalarStrategy, D, U1>
where
ScalarStrategy: Strategy + Clone +'static,
ScalarStrategy::Value: Scalar,
D: Dim,
DefaultAllocator: Allocator<ScalarStrategy::Value, D>,
{
matrix_(value_strategy, length.into(), Const::<1>.into())
}
impl<NParameters, R, C> Default for MatrixParameters<NParameters, R, C>
where
NParameters: Default,
R: DimName,
C: DimName,
{
fn default() -> Self {
Self {
rows: DimRange::from(R::name()),
cols: DimRange::from(C::name()),
value_parameters: NParameters::default(),
}
}
}
impl<NParameters, R> Default for MatrixParameters<NParameters, R, Dyn>
where
NParameters: Default,
R: DimName,
{
fn | () -> Self {
Self {
rows: DimRange::from(R::name()),
cols: dynamic_dim_range(),
value_parameters: NParameters::default(),
}
}
}
impl<NParameters, C> Default for MatrixParameters<NParameters, Dyn, C>
where
NParameters: Default,
C: DimName,
{
fn default() -> Self {
Self {
rows: dynamic_dim_range(),
cols: DimRange::from(C::name()),
value_parameters: NParameters::default(),
}
}
}
impl<NParameters> Default for MatrixParameters<NParameters, Dyn, Dyn>
where
NParameters: Default,
{
fn default() -> Self {
Self {
rows: dynamic_dim_range(),
cols: dynamic_dim_range(),
value_parameters: NParameters::default(),
}
}
}
impl<T, R, C> Arbitrary for OMatrix<T, R, C>
where
T: Scalar + Arbitrary,
<T as Arbitrary>::Strategy: Clone,
R: Dim,
C: Dim,
MatrixParameters<T::Parameters, R, C>: Default,
DefaultAllocator: Allocator<T, R, C>,
{
type Parameters = MatrixParameters<T::Parameters, R, C>;
fn arbitrary_with(args: Self::Parameters) -> Self::Strategy {
let value_strategy = T::arbitrary_with(args.value_parameters);
matrix(value_strategy, args.rows, args.cols)
}
type Strategy = MatrixStrategy<T::Strategy, R, C>;
}
/// A strategy for generating matrices.
#[derive(Debug, Clone)]
pub struct MatrixStrategy<NStrategy, R: Dim, C: Dim>
where
NStrategy: Strategy,
NStrategy::Value: Scalar,
DefaultAllocator: Allocator<NStrategy::Value, R, C>,
{
// For now we only internally hold a boxed strategy. The reason for introducing this
// separate wrapper struct is so that we can replace the strategy logic with custom logic
// later down the road without introducing significant breaking changes
strategy: BoxedStrategy<OMatrix<NStrategy::Value, R, C>>,
}
impl<NStrategy, R, C> Strategy for MatrixStrategy<NStrategy, R, C>
where
NStrategy: Strategy,
NStrategy::Value: Scalar,
R: Dim,
C: Dim,
DefaultAllocator: Allocator<NStrategy::Value, R, C>,
{
type Tree = MatrixValueTree<NStrategy::Value, R, C>;
type Value = OMatrix<NStrategy::Value, R, C>;
fn new_tree(&self, runner: &mut TestRunner) -> NewTree<Self> {
let underlying_tree = self.strategy.new_tree(runner)?;
Ok(MatrixValueTree {
value_tree: underlying_tree,
})
}
}
/// A value tree for matrices.
pub struct MatrixValueTree<T, R, C>
where
T: Scalar,
R: Dim,
C: Dim,
DefaultAllocator: Allocator<T, R, C>,
{
// For now we only wrap a boxed value tree. The reason for wrapping is that this allows us
// to swap out the value tree logic down the road without significant breaking changes.
value_tree: Box<dyn ValueTree<Value = OMatrix<T, R, C>>>,
}
impl<T, R, C> ValueTree for MatrixValueTree<T, R, C>
where
T: Scalar,
R: Dim,
C: Dim,
DefaultAllocator: Allocator<T, R, C>,
{
type Value = OMatrix<T, R, C>;
fn current(&self) -> Self::Value {
self.value_tree.current()
}
fn simplify(&mut self) -> bool {
self.value_tree.simplify()
}
fn complicate(&mut self) -> bool {
self.value_tree.complicate()
}
}
| default | identifier_name |
probe_reports.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
failure::{Error, ResultExt},
fidl::endpoints::ServiceMarker,
fidl_fuchsia_overnet::ServiceConsumerProxyInterface,
fidl_fuchsia_overnet_protocol::{
DiagnosticMarker, DiagnosticProxy, LinkDiagnosticInfo, NodeDescription, NodeId,
PeerConnectionDiagnosticInfo, ProbeResult, ProbeSelector,
},
futures::{
future::{select, Either},
prelude::*,
},
std::{collections::HashMap, time::Duration},
structopt::StructOpt,
};
const PROBE_TIMEOUT: Duration = Duration::from_millis(500);
const LIST_PEERS_TIMEOUT: Duration = Duration::from_millis(500);
async fn timeout_after<R>(
fut: impl Unpin + Future<Output = Result<R, Error>>,
dur: Duration,
timeout_result: Error,
) -> Result<R, Error> {
let (tx, rx) = futures::channel::oneshot::channel();
std::thread::spawn(move || {
std::thread::sleep(dur);
let _ = tx.send(timeout_result);
});
match select(fut, rx).await {
Either::Left((r, _)) => r,
Either::Right((Ok(r), _)) => Err(r),
Either::Right((_, _)) => Err(failure::format_err!("Canceled timeout")),
}
}
async fn probe_node(
mut node_id: NodeId,
probe_bits: ProbeSelector,
) -> Result<(NodeId, ProbeResult), Error> {
timeout_after(
async move {
let (s, p) = fidl::Channel::create().context("failed to create zx channel")?;
hoist::connect_as_service_consumer()?.connect_to_service(
&mut node_id,
DiagnosticMarker::NAME,
s,
)?;
let probe_result = DiagnosticProxy::new(
fidl::AsyncChannel::from_channel(p).context("failed to make async channel")?,
)
.probe(probe_bits)
.await?;
Ok((node_id, probe_result))
}
.boxed(),
PROBE_TIMEOUT,
failure::format_err!("Probe timed out"),
)
.await
}
// List peers, but wait for things to settle out first
async fn list_peers() -> Result<(NodeId, Vec<NodeId>), Error> {
let svc = hoist::connect_as_service_consumer()?;
// Do an initial query without timeout
let mut peers = svc.list_peers().await?;
// Now loop until we see an error
loop {
match timeout_after(
async { Ok(svc.list_peers().await?) }.boxed(),
LIST_PEERS_TIMEOUT,
failure::format_err!("Timeout"),
)
.await
{
Ok(r) => peers = r,
Err(_) => break,
}
}
let own_id = (|| -> Result<NodeId, Error> {
for peer in peers.iter() {
if peer.is_self {
return Ok(peer.id);
}
}
failure::bail!("Cannot find myself");
})()?;
let peers = peers.into_iter().map(|peer| peer.id).collect();
Ok((own_id, peers))
}
async fn probe(
mut descriptions: Option<&mut HashMap<NodeId, NodeDescription>>,
mut peer_connections: Option<&mut Vec<PeerConnectionDiagnosticInfo>>,
mut links: Option<&mut Vec<LinkDiagnosticInfo>>,
) -> Result<NodeId, Error> {
let probe_bits = ProbeSelector::empty()
| descriptions.as_ref().map_or(ProbeSelector::empty(), |_| ProbeSelector::NodeDescription)
| peer_connections
.as_ref()
.map_or(ProbeSelector::empty(), |_| ProbeSelector::PeerConnections)
| links.as_ref().map_or(ProbeSelector::empty(), |_| ProbeSelector::Links);
assert_ne!(probe_bits, ProbeSelector::empty());
let (own_id, peers) = list_peers().await?;
let mut futures: futures::stream::FuturesUnordered<_> =
peers.into_iter().map(|peer| probe_node(peer, probe_bits)).collect();
while let Some((node_id, result)) = futures.try_next().await? {
if let Some(node_description) = result.node_description {
if let Some(ref mut descriptions) = descriptions {
descriptions.insert(node_id, node_description);
}
}
if let Some(node_peer_connections) = result.peer_connections {
for peer_connection in node_peer_connections.iter() {
if let Some(source) = peer_connection.source {
if node_id!= source {
failure::bail!("Invalid source node id {:?} from {:?}", source, node_id);
}
} else {
failure::bail!("No source node id from {:?}", node_id);
}
if peer_connection.destination.is_none() {
failure::bail!("No destination node id from {:?}", node_id);
}
}
if let Some(ref mut peer_connections) = peer_connections |
}
if let Some(node_links) = result.links {
for link in node_links.iter() {
if let Some(source) = link.source {
if node_id!= source {
failure::bail!("Invalid source node id {:?} from {:?}", source, node_id);
}
} else {
failure::bail!("No source node id from {:?}", node_id);
}
if link.destination.is_none() {
failure::bail!("No destination node id from {:?}", node_id);
}
}
if let Some(ref mut links) = links {
links.extend(node_links.into_iter());
}
}
}
Ok(own_id)
}
enum Attr {
HTML(String),
Text(String),
Bool(bool),
}
struct AttrWriter {
attrs: std::collections::BTreeMap<String, Attr>,
}
impl AttrWriter {
fn new() -> Self {
AttrWriter { attrs: std::collections::BTreeMap::new() }
}
fn set_value(&mut self, key: &str, attr: Attr) -> &mut Self {
self.attrs.insert(key.to_string(), attr);
self
}
fn set(&mut self, key: &str, value: &str) -> &mut Self {
self.set_value(key, Attr::Text(value.to_string()))
}
fn set_html(&mut self, key: &str, value: &str) -> &mut Self {
self.set_value(key, Attr::HTML(value.to_string()))
}
fn set_bool(&mut self, key: &str, value: bool) -> &mut Self {
self.set_value(key, Attr::Bool(value))
}
fn render(self) -> String {
let mut out = String::new();
for (key, value) in self.attrs.into_iter() {
out += if out.is_empty() { " [" } else { ", " };
out += &key;
match value {
Attr::HTML(s) => {
out += "=<";
out += &s;
out += ">";
}
Attr::Text(s) => {
out += "=\"";
out += &s;
out += "\"";
}
Attr::Bool(true) => out += "=true",
Attr::Bool(false) => out += "=false",
}
}
if!out.is_empty() {
out += "]";
}
out
}
}
struct LabelAttrWriter {
out: String,
}
impl LabelAttrWriter {
fn new() -> LabelAttrWriter {
LabelAttrWriter { out: "<table border=\"0\">".to_string() }
}
fn set<T: std::fmt::Display>(mut self, name: &str, value: Option<T>) -> Self {
if let Some(value) = value {
self.out += &format!("<tr><td>{}</td><td>{}</td></tr>", name, value);
}
self
}
fn render(self) -> String {
self.out + "</table>"
}
}
#[derive(StructOpt)]
pub struct FullMapArgs {
#[structopt(short, long)]
exclude_self: bool,
}
pub async fn full_map(args: FullMapArgs) -> Result<String, Error> {
let mut descriptions = HashMap::new();
let mut peer_connections = Vec::new();
let mut links = Vec::new();
let own_id =
probe(Some(&mut descriptions), Some(&mut peer_connections), Some(&mut links)).await?;
let mut out = String::new();
out += "digraph G {\n";
for (node_id, description) in descriptions.iter() {
let is_self = node_id.id == own_id.id;
if args.exclude_self && is_self {
continue;
}
let mut attrs = AttrWriter::new();
if is_self {
attrs.set("shape", "box");
}
let mut label = String::new();
if let Some(os) = description.operating_system {
label += &format!("{:?}", os);
label += " ";
}
if let Some(imp) = description.implementation {
label += &format!("{:?}", imp);
label += ":";
}
label += &format!("{}", node_id.id);
attrs.set("label", &label);
out += &format!(" _{}{}\n", node_id.id, attrs.render());
}
for conn in peer_connections.iter() {
let source = conn.source.unwrap();
let dest = conn.destination.unwrap();
if args.exclude_self && (source.id == own_id.id || dest.id == own_id.id) {
continue;
}
let mut attrs = AttrWriter::new();
attrs
.set(
"color",
match conn.is_client {
None => "gray",
Some(true) => "red",
Some(false) => "magenta",
},
)
.set("weight", "0.9")
.set_bool("constraint", true);
attrs.set(
"style",
match conn.is_established {
None => "dotted",
Some(true) => "solid",
Some(false) => "dashed",
},
);
attrs.set_html(
"label",
&LabelAttrWriter::new()
.set("recv", conn.received_packets)
.set("sent", conn.sent_packets)
.set("lost", conn.lost_packets)
.set("rtt", conn.round_trip_time_microseconds)
.set("cwnd", conn.congestion_window_bytes)
.set("msgsent", conn.messages_sent)
.set("msgbsent", conn.bytes_sent)
.set("connect_to_service_sends", conn.connect_to_service_sends)
.set("connect_to_service_send_bytes", conn.connect_to_service_send_bytes)
.set("update_node_description_sends", conn.update_node_description_sends)
.set("update_node_description_send_bytes", conn.update_node_description_send_bytes)
.set("update_link_status_sends", conn.update_link_status_sends)
.set("update_link_status_send_bytes", conn.update_link_status_send_bytes)
.set("update_link_status_ack_sends", conn.update_link_status_ack_sends)
.set("update_link_status_ack_send_bytes", conn.update_link_status_ack_send_bytes)
.render(),
);
out += &format!(" _{} -> _{}{}\n", source.id, dest.id, attrs.render());
}
for link in links {
let source = link.source.unwrap();
let dest = link.destination.unwrap();
if args.exclude_self && (source.id == own_id.id || dest.id == own_id.id) {
continue;
}
let mut attrs = AttrWriter::new();
attrs.set("color", "blue").set("weight", "1.0").set("penwidth", "4.0");
attrs.set_html(
"label",
&LabelAttrWriter::new()
.set("id", link.source_local_id)
.set("recv", link.received_packets)
.set("sent", link.sent_packets)
.set("recvb", link.received_bytes)
.set("sentb", link.sent_bytes)
.set("pings", link.pings_sent)
.set("fwd", link.packets_forwarded)
.set("rtt", link.round_trip_time_microseconds)
.render(),
);
out += &format!(" _{} -> _{}{}\n", source.id, dest.id, attrs.render());
}
out += "}\n";
Ok(out)
}
| {
peer_connections.extend(node_peer_connections.into_iter());
} | conditional_block |
probe_reports.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
failure::{Error, ResultExt},
fidl::endpoints::ServiceMarker,
fidl_fuchsia_overnet::ServiceConsumerProxyInterface,
fidl_fuchsia_overnet_protocol::{
DiagnosticMarker, DiagnosticProxy, LinkDiagnosticInfo, NodeDescription, NodeId,
PeerConnectionDiagnosticInfo, ProbeResult, ProbeSelector,
},
futures::{
future::{select, Either},
prelude::*,
},
std::{collections::HashMap, time::Duration},
structopt::StructOpt,
};
const PROBE_TIMEOUT: Duration = Duration::from_millis(500);
const LIST_PEERS_TIMEOUT: Duration = Duration::from_millis(500);
async fn timeout_after<R>(
fut: impl Unpin + Future<Output = Result<R, Error>>,
dur: Duration,
timeout_result: Error,
) -> Result<R, Error> {
let (tx, rx) = futures::channel::oneshot::channel();
std::thread::spawn(move || {
std::thread::sleep(dur);
let _ = tx.send(timeout_result);
});
match select(fut, rx).await {
Either::Left((r, _)) => r,
Either::Right((Ok(r), _)) => Err(r),
Either::Right((_, _)) => Err(failure::format_err!("Canceled timeout")),
}
}
async fn probe_node(
mut node_id: NodeId,
probe_bits: ProbeSelector,
) -> Result<(NodeId, ProbeResult), Error> {
timeout_after(
async move {
let (s, p) = fidl::Channel::create().context("failed to create zx channel")?;
hoist::connect_as_service_consumer()?.connect_to_service(
&mut node_id,
DiagnosticMarker::NAME,
s,
)?;
let probe_result = DiagnosticProxy::new(
fidl::AsyncChannel::from_channel(p).context("failed to make async channel")?,
)
.probe(probe_bits)
.await?;
Ok((node_id, probe_result))
}
.boxed(),
PROBE_TIMEOUT,
failure::format_err!("Probe timed out"),
)
.await
}
// List peers, but wait for things to settle out first
async fn list_peers() -> Result<(NodeId, Vec<NodeId>), Error> {
let svc = hoist::connect_as_service_consumer()?;
// Do an initial query without timeout
let mut peers = svc.list_peers().await?;
// Now loop until we see an error
loop {
match timeout_after(
async { Ok(svc.list_peers().await?) }.boxed(),
LIST_PEERS_TIMEOUT,
failure::format_err!("Timeout"),
)
.await
{
Ok(r) => peers = r,
Err(_) => break,
}
}
let own_id = (|| -> Result<NodeId, Error> {
for peer in peers.iter() {
if peer.is_self {
return Ok(peer.id);
}
}
failure::bail!("Cannot find myself");
})()?;
let peers = peers.into_iter().map(|peer| peer.id).collect();
Ok((own_id, peers))
}
async fn probe(
mut descriptions: Option<&mut HashMap<NodeId, NodeDescription>>,
mut peer_connections: Option<&mut Vec<PeerConnectionDiagnosticInfo>>,
mut links: Option<&mut Vec<LinkDiagnosticInfo>>,
) -> Result<NodeId, Error> {
let probe_bits = ProbeSelector::empty()
| descriptions.as_ref().map_or(ProbeSelector::empty(), |_| ProbeSelector::NodeDescription)
| peer_connections
.as_ref()
.map_or(ProbeSelector::empty(), |_| ProbeSelector::PeerConnections)
| links.as_ref().map_or(ProbeSelector::empty(), |_| ProbeSelector::Links);
assert_ne!(probe_bits, ProbeSelector::empty());
let (own_id, peers) = list_peers().await?;
let mut futures: futures::stream::FuturesUnordered<_> =
peers.into_iter().map(|peer| probe_node(peer, probe_bits)).collect();
while let Some((node_id, result)) = futures.try_next().await? {
if let Some(node_description) = result.node_description {
if let Some(ref mut descriptions) = descriptions {
descriptions.insert(node_id, node_description);
}
}
if let Some(node_peer_connections) = result.peer_connections {
for peer_connection in node_peer_connections.iter() {
if let Some(source) = peer_connection.source {
if node_id!= source {
failure::bail!("Invalid source node id {:?} from {:?}", source, node_id);
}
} else {
failure::bail!("No source node id from {:?}", node_id);
}
if peer_connection.destination.is_none() {
failure::bail!("No destination node id from {:?}", node_id);
}
}
if let Some(ref mut peer_connections) = peer_connections {
peer_connections.extend(node_peer_connections.into_iter());
}
}
if let Some(node_links) = result.links {
for link in node_links.iter() {
if let Some(source) = link.source {
if node_id!= source {
failure::bail!("Invalid source node id {:?} from {:?}", source, node_id);
}
} else {
failure::bail!("No source node id from {:?}", node_id);
}
if link.destination.is_none() {
failure::bail!("No destination node id from {:?}", node_id);
}
}
if let Some(ref mut links) = links {
links.extend(node_links.into_iter());
}
}
}
Ok(own_id)
}
enum Attr {
HTML(String),
Text(String),
Bool(bool),
}
struct AttrWriter {
attrs: std::collections::BTreeMap<String, Attr>,
}
impl AttrWriter {
fn new() -> Self {
AttrWriter { attrs: std::collections::BTreeMap::new() }
}
fn set_value(&mut self, key: &str, attr: Attr) -> &mut Self {
self.attrs.insert(key.to_string(), attr);
self
}
fn set(&mut self, key: &str, value: &str) -> &mut Self {
self.set_value(key, Attr::Text(value.to_string()))
}
fn | (&mut self, key: &str, value: &str) -> &mut Self {
self.set_value(key, Attr::HTML(value.to_string()))
}
fn set_bool(&mut self, key: &str, value: bool) -> &mut Self {
self.set_value(key, Attr::Bool(value))
}
fn render(self) -> String {
let mut out = String::new();
for (key, value) in self.attrs.into_iter() {
out += if out.is_empty() { " [" } else { ", " };
out += &key;
match value {
Attr::HTML(s) => {
out += "=<";
out += &s;
out += ">";
}
Attr::Text(s) => {
out += "=\"";
out += &s;
out += "\"";
}
Attr::Bool(true) => out += "=true",
Attr::Bool(false) => out += "=false",
}
}
if!out.is_empty() {
out += "]";
}
out
}
}
struct LabelAttrWriter {
out: String,
}
impl LabelAttrWriter {
fn new() -> LabelAttrWriter {
LabelAttrWriter { out: "<table border=\"0\">".to_string() }
}
fn set<T: std::fmt::Display>(mut self, name: &str, value: Option<T>) -> Self {
if let Some(value) = value {
self.out += &format!("<tr><td>{}</td><td>{}</td></tr>", name, value);
}
self
}
fn render(self) -> String {
self.out + "</table>"
}
}
#[derive(StructOpt)]
pub struct FullMapArgs {
#[structopt(short, long)]
exclude_self: bool,
}
pub async fn full_map(args: FullMapArgs) -> Result<String, Error> {
let mut descriptions = HashMap::new();
let mut peer_connections = Vec::new();
let mut links = Vec::new();
let own_id =
probe(Some(&mut descriptions), Some(&mut peer_connections), Some(&mut links)).await?;
let mut out = String::new();
out += "digraph G {\n";
for (node_id, description) in descriptions.iter() {
let is_self = node_id.id == own_id.id;
if args.exclude_self && is_self {
continue;
}
let mut attrs = AttrWriter::new();
if is_self {
attrs.set("shape", "box");
}
let mut label = String::new();
if let Some(os) = description.operating_system {
label += &format!("{:?}", os);
label += " ";
}
if let Some(imp) = description.implementation {
label += &format!("{:?}", imp);
label += ":";
}
label += &format!("{}", node_id.id);
attrs.set("label", &label);
out += &format!(" _{}{}\n", node_id.id, attrs.render());
}
for conn in peer_connections.iter() {
let source = conn.source.unwrap();
let dest = conn.destination.unwrap();
if args.exclude_self && (source.id == own_id.id || dest.id == own_id.id) {
continue;
}
let mut attrs = AttrWriter::new();
attrs
.set(
"color",
match conn.is_client {
None => "gray",
Some(true) => "red",
Some(false) => "magenta",
},
)
.set("weight", "0.9")
.set_bool("constraint", true);
attrs.set(
"style",
match conn.is_established {
None => "dotted",
Some(true) => "solid",
Some(false) => "dashed",
},
);
attrs.set_html(
"label",
&LabelAttrWriter::new()
.set("recv", conn.received_packets)
.set("sent", conn.sent_packets)
.set("lost", conn.lost_packets)
.set("rtt", conn.round_trip_time_microseconds)
.set("cwnd", conn.congestion_window_bytes)
.set("msgsent", conn.messages_sent)
.set("msgbsent", conn.bytes_sent)
.set("connect_to_service_sends", conn.connect_to_service_sends)
.set("connect_to_service_send_bytes", conn.connect_to_service_send_bytes)
.set("update_node_description_sends", conn.update_node_description_sends)
.set("update_node_description_send_bytes", conn.update_node_description_send_bytes)
.set("update_link_status_sends", conn.update_link_status_sends)
.set("update_link_status_send_bytes", conn.update_link_status_send_bytes)
.set("update_link_status_ack_sends", conn.update_link_status_ack_sends)
.set("update_link_status_ack_send_bytes", conn.update_link_status_ack_send_bytes)
.render(),
);
out += &format!(" _{} -> _{}{}\n", source.id, dest.id, attrs.render());
}
for link in links {
let source = link.source.unwrap();
let dest = link.destination.unwrap();
if args.exclude_self && (source.id == own_id.id || dest.id == own_id.id) {
continue;
}
let mut attrs = AttrWriter::new();
attrs.set("color", "blue").set("weight", "1.0").set("penwidth", "4.0");
attrs.set_html(
"label",
&LabelAttrWriter::new()
.set("id", link.source_local_id)
.set("recv", link.received_packets)
.set("sent", link.sent_packets)
.set("recvb", link.received_bytes)
.set("sentb", link.sent_bytes)
.set("pings", link.pings_sent)
.set("fwd", link.packets_forwarded)
.set("rtt", link.round_trip_time_microseconds)
.render(),
);
out += &format!(" _{} -> _{}{}\n", source.id, dest.id, attrs.render());
}
out += "}\n";
Ok(out)
}
| set_html | identifier_name |
probe_reports.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use {
failure::{Error, ResultExt},
fidl::endpoints::ServiceMarker,
fidl_fuchsia_overnet::ServiceConsumerProxyInterface,
fidl_fuchsia_overnet_protocol::{
DiagnosticMarker, DiagnosticProxy, LinkDiagnosticInfo, NodeDescription, NodeId,
PeerConnectionDiagnosticInfo, ProbeResult, ProbeSelector,
},
futures::{
future::{select, Either},
prelude::*,
},
std::{collections::HashMap, time::Duration},
structopt::StructOpt,
};
const PROBE_TIMEOUT: Duration = Duration::from_millis(500);
const LIST_PEERS_TIMEOUT: Duration = Duration::from_millis(500);
async fn timeout_after<R>(
fut: impl Unpin + Future<Output = Result<R, Error>>,
dur: Duration,
timeout_result: Error,
) -> Result<R, Error> {
let (tx, rx) = futures::channel::oneshot::channel();
std::thread::spawn(move || {
std::thread::sleep(dur);
let _ = tx.send(timeout_result);
});
match select(fut, rx).await {
Either::Left((r, _)) => r,
Either::Right((Ok(r), _)) => Err(r),
Either::Right((_, _)) => Err(failure::format_err!("Canceled timeout")),
}
}
async fn probe_node(
mut node_id: NodeId,
probe_bits: ProbeSelector,
) -> Result<(NodeId, ProbeResult), Error> {
timeout_after(
async move {
let (s, p) = fidl::Channel::create().context("failed to create zx channel")?;
hoist::connect_as_service_consumer()?.connect_to_service(
&mut node_id,
DiagnosticMarker::NAME,
s,
)?;
let probe_result = DiagnosticProxy::new(
fidl::AsyncChannel::from_channel(p).context("failed to make async channel")?,
)
.probe(probe_bits)
.await?;
Ok((node_id, probe_result))
}
.boxed(),
PROBE_TIMEOUT,
failure::format_err!("Probe timed out"),
)
.await
}
// List peers, but wait for things to settle out first
async fn list_peers() -> Result<(NodeId, Vec<NodeId>), Error> {
let svc = hoist::connect_as_service_consumer()?;
// Do an initial query without timeout
let mut peers = svc.list_peers().await?;
// Now loop until we see an error
loop {
match timeout_after(
async { Ok(svc.list_peers().await?) }.boxed(),
LIST_PEERS_TIMEOUT,
failure::format_err!("Timeout"), | {
Ok(r) => peers = r,
Err(_) => break,
}
}
let own_id = (|| -> Result<NodeId, Error> {
for peer in peers.iter() {
if peer.is_self {
return Ok(peer.id);
}
}
failure::bail!("Cannot find myself");
})()?;
let peers = peers.into_iter().map(|peer| peer.id).collect();
Ok((own_id, peers))
}
async fn probe(
mut descriptions: Option<&mut HashMap<NodeId, NodeDescription>>,
mut peer_connections: Option<&mut Vec<PeerConnectionDiagnosticInfo>>,
mut links: Option<&mut Vec<LinkDiagnosticInfo>>,
) -> Result<NodeId, Error> {
let probe_bits = ProbeSelector::empty()
| descriptions.as_ref().map_or(ProbeSelector::empty(), |_| ProbeSelector::NodeDescription)
| peer_connections
.as_ref()
.map_or(ProbeSelector::empty(), |_| ProbeSelector::PeerConnections)
| links.as_ref().map_or(ProbeSelector::empty(), |_| ProbeSelector::Links);
assert_ne!(probe_bits, ProbeSelector::empty());
let (own_id, peers) = list_peers().await?;
let mut futures: futures::stream::FuturesUnordered<_> =
peers.into_iter().map(|peer| probe_node(peer, probe_bits)).collect();
while let Some((node_id, result)) = futures.try_next().await? {
if let Some(node_description) = result.node_description {
if let Some(ref mut descriptions) = descriptions {
descriptions.insert(node_id, node_description);
}
}
if let Some(node_peer_connections) = result.peer_connections {
for peer_connection in node_peer_connections.iter() {
if let Some(source) = peer_connection.source {
if node_id!= source {
failure::bail!("Invalid source node id {:?} from {:?}", source, node_id);
}
} else {
failure::bail!("No source node id from {:?}", node_id);
}
if peer_connection.destination.is_none() {
failure::bail!("No destination node id from {:?}", node_id);
}
}
if let Some(ref mut peer_connections) = peer_connections {
peer_connections.extend(node_peer_connections.into_iter());
}
}
if let Some(node_links) = result.links {
for link in node_links.iter() {
if let Some(source) = link.source {
if node_id!= source {
failure::bail!("Invalid source node id {:?} from {:?}", source, node_id);
}
} else {
failure::bail!("No source node id from {:?}", node_id);
}
if link.destination.is_none() {
failure::bail!("No destination node id from {:?}", node_id);
}
}
if let Some(ref mut links) = links {
links.extend(node_links.into_iter());
}
}
}
Ok(own_id)
}
enum Attr {
HTML(String),
Text(String),
Bool(bool),
}
struct AttrWriter {
attrs: std::collections::BTreeMap<String, Attr>,
}
impl AttrWriter {
fn new() -> Self {
AttrWriter { attrs: std::collections::BTreeMap::new() }
}
fn set_value(&mut self, key: &str, attr: Attr) -> &mut Self {
self.attrs.insert(key.to_string(), attr);
self
}
fn set(&mut self, key: &str, value: &str) -> &mut Self {
self.set_value(key, Attr::Text(value.to_string()))
}
fn set_html(&mut self, key: &str, value: &str) -> &mut Self {
self.set_value(key, Attr::HTML(value.to_string()))
}
fn set_bool(&mut self, key: &str, value: bool) -> &mut Self {
self.set_value(key, Attr::Bool(value))
}
fn render(self) -> String {
let mut out = String::new();
for (key, value) in self.attrs.into_iter() {
out += if out.is_empty() { " [" } else { ", " };
out += &key;
match value {
Attr::HTML(s) => {
out += "=<";
out += &s;
out += ">";
}
Attr::Text(s) => {
out += "=\"";
out += &s;
out += "\"";
}
Attr::Bool(true) => out += "=true",
Attr::Bool(false) => out += "=false",
}
}
if!out.is_empty() {
out += "]";
}
out
}
}
struct LabelAttrWriter {
out: String,
}
impl LabelAttrWriter {
fn new() -> LabelAttrWriter {
LabelAttrWriter { out: "<table border=\"0\">".to_string() }
}
fn set<T: std::fmt::Display>(mut self, name: &str, value: Option<T>) -> Self {
if let Some(value) = value {
self.out += &format!("<tr><td>{}</td><td>{}</td></tr>", name, value);
}
self
}
fn render(self) -> String {
self.out + "</table>"
}
}
#[derive(StructOpt)]
pub struct FullMapArgs {
#[structopt(short, long)]
exclude_self: bool,
}
pub async fn full_map(args: FullMapArgs) -> Result<String, Error> {
let mut descriptions = HashMap::new();
let mut peer_connections = Vec::new();
let mut links = Vec::new();
let own_id =
probe(Some(&mut descriptions), Some(&mut peer_connections), Some(&mut links)).await?;
let mut out = String::new();
out += "digraph G {\n";
for (node_id, description) in descriptions.iter() {
let is_self = node_id.id == own_id.id;
if args.exclude_self && is_self {
continue;
}
let mut attrs = AttrWriter::new();
if is_self {
attrs.set("shape", "box");
}
let mut label = String::new();
if let Some(os) = description.operating_system {
label += &format!("{:?}", os);
label += " ";
}
if let Some(imp) = description.implementation {
label += &format!("{:?}", imp);
label += ":";
}
label += &format!("{}", node_id.id);
attrs.set("label", &label);
out += &format!(" _{}{}\n", node_id.id, attrs.render());
}
for conn in peer_connections.iter() {
let source = conn.source.unwrap();
let dest = conn.destination.unwrap();
if args.exclude_self && (source.id == own_id.id || dest.id == own_id.id) {
continue;
}
let mut attrs = AttrWriter::new();
attrs
.set(
"color",
match conn.is_client {
None => "gray",
Some(true) => "red",
Some(false) => "magenta",
},
)
.set("weight", "0.9")
.set_bool("constraint", true);
attrs.set(
"style",
match conn.is_established {
None => "dotted",
Some(true) => "solid",
Some(false) => "dashed",
},
);
attrs.set_html(
"label",
&LabelAttrWriter::new()
.set("recv", conn.received_packets)
.set("sent", conn.sent_packets)
.set("lost", conn.lost_packets)
.set("rtt", conn.round_trip_time_microseconds)
.set("cwnd", conn.congestion_window_bytes)
.set("msgsent", conn.messages_sent)
.set("msgbsent", conn.bytes_sent)
.set("connect_to_service_sends", conn.connect_to_service_sends)
.set("connect_to_service_send_bytes", conn.connect_to_service_send_bytes)
.set("update_node_description_sends", conn.update_node_description_sends)
.set("update_node_description_send_bytes", conn.update_node_description_send_bytes)
.set("update_link_status_sends", conn.update_link_status_sends)
.set("update_link_status_send_bytes", conn.update_link_status_send_bytes)
.set("update_link_status_ack_sends", conn.update_link_status_ack_sends)
.set("update_link_status_ack_send_bytes", conn.update_link_status_ack_send_bytes)
.render(),
);
out += &format!(" _{} -> _{}{}\n", source.id, dest.id, attrs.render());
}
for link in links {
let source = link.source.unwrap();
let dest = link.destination.unwrap();
if args.exclude_self && (source.id == own_id.id || dest.id == own_id.id) {
continue;
}
let mut attrs = AttrWriter::new();
attrs.set("color", "blue").set("weight", "1.0").set("penwidth", "4.0");
attrs.set_html(
"label",
&LabelAttrWriter::new()
.set("id", link.source_local_id)
.set("recv", link.received_packets)
.set("sent", link.sent_packets)
.set("recvb", link.received_bytes)
.set("sentb", link.sent_bytes)
.set("pings", link.pings_sent)
.set("fwd", link.packets_forwarded)
.set("rtt", link.round_trip_time_microseconds)
.render(),
);
out += &format!(" _{} -> _{}{}\n", source.id, dest.id, attrs.render());
}
out += "}\n";
Ok(out)
} | )
.await | random_line_split |
lib.rs | use anyhow::{anyhow, bail, Error};
use std::cmp;
use std::env;
use walrus::ir::Value;
use walrus::{ExportItem, GlobalId, GlobalKind, MemoryId, Module};
use walrus::{FunctionId, InitExpr, ValType};
use wasm_bindgen_wasm_conventions as wasm_conventions;
const PAGE_SIZE: u32 = 1 << 16;
/// Configuration for the transformation pass in this module.
///
/// Created primarily through `new` and then executed through `run`.
pub struct Config {
maximum_memory: u32,
thread_stack_size: u32,
enabled: bool,
}
impl Config {
/// Create a new configuration with default settings.
pub fn new() -> Config {
Config {
maximum_memory: 1 << 30, // 1GB
thread_stack_size: 1 << 20, // 1MB
enabled: env::var("WASM_BINDGEN_THREADS").is_ok(),
}
}
/// Is threaded Wasm enabled?
pub fn is_enabled(&self, module: &Module) -> bool {
if self.enabled {
return true;
}
// Compatibility with older LLVM outputs. Newer LLVM outputs, when
// atomics are enabled, emit a shared memory. That's a good indicator
// that we have work to do. If shared memory isn't enabled, though then
// this isn't an atomic module so there's nothing to do. We still allow,
// though, an environment variable to force us to go down this path to
// remain compatibile with older LLVM outputs.
match wasm_conventions::get_memory(module) {
Ok(memory) => module.memories.get(memory).shared,
Err(_) => false,
}
}
/// Specify the maximum amount of memory the wasm module can ever have.
///
/// We'll be specifying that the memory for this wasm module is shared, and
/// all shared memories must have their maximum limit specified (whereas
/// by default Rust/LLVM/LLD don't specify a maximum).
///
/// The default for this option is 16MB, and this can be used to change
/// the maximum memory we'll be specifying.
///
/// The `max` argument is in units of bytes.
///
/// If the maximum memory is already specified this setting won't have any
/// affect.
pub fn maximum_memory(&mut self, max: u32) -> &mut Config {
self.maximum_memory = max;
self
}
/// Specify the stack size for all threads spawned.
///
/// The stack size is typically set by rustc as an argument to LLD and
/// defaults to 1MB for the main thread. All threads spawned by the
/// main thread, however, need to allocate their own stack!
///
/// This configuration option indicates how large the stack of each child
/// thread will be. This will be allocated as part of the `start` function
/// and will be stored in LLVM's global stack pointer.
pub fn thread_stack_size(&mut self, size: u32) -> &mut Config {
self.thread_stack_size = size;
self
}
/// Execute the transformation on the parsed wasm module specified.
///
/// This function will prepare `Module` to be run on multiple threads,
/// performing steps such as:
///
/// * All data segments are switched to "passive" data segments to ensure
/// they're only initialized once (coming later)
/// * If memory is exported from this module, it is instead switched to
/// being imported (with the same parameters).
/// * The imported memory is required to be `shared`, ensuring it's backed
/// by a `SharedArrayBuffer` on the web.
/// * A `global` for a thread ID is injected.
/// * Four bytes in linear memory are reserved for the counter of thread
/// IDs.
/// * A `start` function is injected (or prepended if one already exists)
/// which initializes memory for the first thread and otherwise allocates
/// thread ids for all threads.
///
/// More and/or less may happen here over time, stay tuned!
pub fn run(&self, module: &mut Module) -> Result<(), Error> {
if!self.is_enabled(module) {
return Ok(());
}
let memory = wasm_conventions::get_memory(module)?;
let stack_pointer = wasm_conventions::get_shadow_stack_pointer(module)
.ok_or_else(|| anyhow!("failed to find shadow stack pointer"))?;
let addr = allocate_static_data(module, memory, 4, 4)?;
let mem = module.memories.get_mut(memory);
assert!(mem.shared);
let prev_max = mem.maximum.unwrap();
assert!(mem.import.is_some());
mem.maximum = Some(cmp::max(self.maximum_memory / PAGE_SIZE, prev_max));
assert!(mem.data_segments.is_empty());
let tls = Tls {
init: delete_synthetic_func(module, "__wasm_init_tls")?,
size: delete_synthetic_global(module, "__tls_size")?,
align: delete_synthetic_global(module, "__tls_align")?,
};
inject_start(
module,
tls,
addr,
stack_pointer,
self.thread_stack_size,
memory,
)?;
Ok(())
}
}
fn delete_synthetic_func(module: &mut Module, name: &str) -> Result<FunctionId, Error> {
match delete_synthetic_export(module, name)? {
walrus::ExportItem::Function(f) => Ok(f),
_ => bail!("`{}` must be a function", name),
}
}
fn delete_synthetic_global(module: &mut Module, name: &str) -> Result<u32, Error> {
let id = match delete_synthetic_export(module, name)? {
walrus::ExportItem::Global(g) => g,
_ => bail!("`{}` must be a global", name),
};
let g = match module.globals.get(id).kind {
walrus::GlobalKind::Local(g) => g,
walrus::GlobalKind::Import(_) => bail!("`{}` must not be an imported global", name),
};
match g {
InitExpr::Value(Value::I32(v)) => Ok(v as u32),
_ => bail!("`{}` was not an `i32` constant", name),
}
}
fn delete_synthetic_export(module: &mut Module, name: &str) -> Result<ExportItem, Error> {
let item = module
.exports
.iter()
.find(|e| e.name == name)
.ok_or_else(|| anyhow!("failed to find `{}`", name))?;
let ret = item.item;
let id = item.id();
module.exports.delete(id);
Ok(ret)
}
fn allocate_static_data(
module: &mut Module,
memory: MemoryId,
size: u32,
align: u32,
) -> Result<u32, Error> {
// First up, look for a `__heap_base` export which is injected by LLD as
// part of the linking process. Note that `__heap_base` should in theory be
// *after* the stack and data, which means it's at the very end of the
// address space and should be safe for us to inject 4 bytes of data at.
let heap_base = module
.exports
.iter()
.filter(|e| e.name == "__heap_base")
.filter_map(|e| match e.item {
ExportItem::Global(id) => Some(id),
_ => None,
})
.next();
let heap_base = match heap_base {
Some(idx) => idx,
None => bail!("failed to find `__heap_base` for injecting thread id"),
};
// Now we need to bump up `__heap_base` by 4 bytes as we'd like to reserve
// those 4 bytes for our thread id counter. Do lots of validation here to
// make sure that `__heap_base` is an non-mutable integer, and then do
// some logic:
//
// * We require that `__heap_base` is aligned to 4 as that's what the atomic
// will require anyway.
// * We *may* have to add another page to the minimum for this module. If by
// reserving 4 bytes the heap base now lies on a different page then we
// probably went past our minimum page requirement, so we'll need to
// update our memory limits to add one.
//
// Otherwise here we'll rewrite the `__heap_base` global's initializer to be
// 4 larger, reserving us those 4 bytes for a thread id counter.
let (address, add_a_page) = {
let global = module.globals.get_mut(heap_base);
if global.ty!= ValType::I32 {
bail!("the `__heap_base` global doesn't have the type `i32`");
}
if global.mutable {
bail!("the `__heap_base` global is unexpectedly mutable");
}
let offset = match &mut global.kind {
GlobalKind::Local(InitExpr::Value(Value::I32(n))) => n,
_ => bail!("`__heap_base` not a locally defined `i32`"),
};
let address = (*offset as u32 + (align - 1)) &!(align - 1); // align up
let add_a_page = (address + size) / PAGE_SIZE!= address / PAGE_SIZE;
*offset = (address + size) as i32;
(address, add_a_page)
};
if add_a_page {
let memory = module.memories.get_mut(memory);
memory.initial += 1;
memory.maximum = memory.maximum.map(|m| cmp::max(m, memory.initial));
}
Ok(address)
}
struct Tls {
init: walrus::FunctionId,
size: u32,
align: u32,
}
fn inject_start(
module: &mut Module,
tls: Tls,
addr: u32,
stack_pointer: GlobalId,
stack_size: u32,
memory: MemoryId,
) -> Result<(), Error> {
use walrus::ir::*;
assert!(stack_size % PAGE_SIZE == 0);
let mut builder = walrus::FunctionBuilder::new(&mut module.types, &[], &[]);
let local = module.locals.add(ValType::I32);
let mut body = builder.func_body();
// Call previous start function if one is available. Currently this is
// always true because LLVM injects a call to `__wasm_init_memory` as the
// start function which, well, initializes memory.
if let Some(prev) = module.start.take() {
body.call(prev);
}
// Perform an if/else based on whether we're the first thread or not. Our
// thread ID will be zero if we're the first thread, otherwise it'll be
// nonzero (assuming we don't overflow...)
body.i32_const(addr as i32)
.i32_const(1)
.atomic_rmw(
memory,
AtomicOp::Add,
AtomicWidth::I32,
MemArg {
align: 4,
offset: 0,
},
)
.if_else(
None,
// If our thread id is nonzero then we're the second or greater thread, so
// we give ourselves a stack via memory.grow and we update our stack
// pointer as the default stack pointer is surely wrong for us.
|body| {
// local0 = grow_memory(stack_size);
body.i32_const((stack_size / PAGE_SIZE) as i32)
.memory_grow(memory)
.local_set(local);
// if local0 == -1 then trap
body.block(None, |body| {
let target = body.id();
body.local_get(local)
.i32_const(-1)
.binop(BinaryOp::I32Ne)
.br_if(target)
.unreachable();
});
// stack_pointer = local0 + stack_size
body.local_get(local)
.i32_const(PAGE_SIZE as i32)
.binop(BinaryOp::I32Mul)
.i32_const(stack_size as i32)
.binop(BinaryOp::I32Add)
.global_set(stack_pointer);
},
// If the thread id is zero then the default stack pointer works for
// us.
|_| {},
);
// Afterwards we need to initialize our thread-local state.
let malloc = find_wbindgen_malloc(module)?;
body.i32_const(tls.size as i32)
.i32_const(tls.align as i32)
.drop() // TODO: need to actually respect alignment
.call(malloc)
.call(tls.init);
// Finish off our newly generated function.
let id = builder.finish(Vec::new(), &mut module.funcs);
| //... and finally flag it as the new start function
module.start = Some(id);
Ok(())
}
fn find_wbindgen_malloc(module: &Module) -> Result<FunctionId, Error> {
let e = module
.exports
.iter()
.find(|e| e.name == "__wbindgen_malloc")
.ok_or_else(|| anyhow!("failed to find `__wbindgen_malloc`"))?;
match e.item {
walrus::ExportItem::Function(f) => Ok(f),
_ => bail!("`__wbindgen_malloc` wasn't a funtion"),
}
} | random_line_split |
|
lib.rs | use anyhow::{anyhow, bail, Error};
use std::cmp;
use std::env;
use walrus::ir::Value;
use walrus::{ExportItem, GlobalId, GlobalKind, MemoryId, Module};
use walrus::{FunctionId, InitExpr, ValType};
use wasm_bindgen_wasm_conventions as wasm_conventions;
const PAGE_SIZE: u32 = 1 << 16;
/// Configuration for the transformation pass in this module.
///
/// Created primarily through `new` and then executed through `run`.
pub struct Config {
maximum_memory: u32,
thread_stack_size: u32,
enabled: bool,
}
impl Config {
/// Create a new configuration with default settings.
pub fn new() -> Config {
Config {
maximum_memory: 1 << 30, // 1GB
thread_stack_size: 1 << 20, // 1MB
enabled: env::var("WASM_BINDGEN_THREADS").is_ok(),
}
}
/// Is threaded Wasm enabled?
pub fn is_enabled(&self, module: &Module) -> bool {
if self.enabled {
return true;
}
// Compatibility with older LLVM outputs. Newer LLVM outputs, when
// atomics are enabled, emit a shared memory. That's a good indicator
// that we have work to do. If shared memory isn't enabled, though then
// this isn't an atomic module so there's nothing to do. We still allow,
// though, an environment variable to force us to go down this path to
// remain compatibile with older LLVM outputs.
match wasm_conventions::get_memory(module) {
Ok(memory) => module.memories.get(memory).shared,
Err(_) => false,
}
}
/// Specify the maximum amount of memory the wasm module can ever have.
///
/// We'll be specifying that the memory for this wasm module is shared, and
/// all shared memories must have their maximum limit specified (whereas
/// by default Rust/LLVM/LLD don't specify a maximum).
///
/// The default for this option is 16MB, and this can be used to change
/// the maximum memory we'll be specifying.
///
/// The `max` argument is in units of bytes.
///
/// If the maximum memory is already specified this setting won't have any
/// affect.
pub fn maximum_memory(&mut self, max: u32) -> &mut Config {
self.maximum_memory = max;
self
}
/// Specify the stack size for all threads spawned.
///
/// The stack size is typically set by rustc as an argument to LLD and
/// defaults to 1MB for the main thread. All threads spawned by the
/// main thread, however, need to allocate their own stack!
///
/// This configuration option indicates how large the stack of each child
/// thread will be. This will be allocated as part of the `start` function
/// and will be stored in LLVM's global stack pointer.
pub fn thread_stack_size(&mut self, size: u32) -> &mut Config {
self.thread_stack_size = size;
self
}
/// Execute the transformation on the parsed wasm module specified.
///
/// This function will prepare `Module` to be run on multiple threads,
/// performing steps such as:
///
/// * All data segments are switched to "passive" data segments to ensure
/// they're only initialized once (coming later)
/// * If memory is exported from this module, it is instead switched to
/// being imported (with the same parameters).
/// * The imported memory is required to be `shared`, ensuring it's backed
/// by a `SharedArrayBuffer` on the web.
/// * A `global` for a thread ID is injected.
/// * Four bytes in linear memory are reserved for the counter of thread
/// IDs.
/// * A `start` function is injected (or prepended if one already exists)
/// which initializes memory for the first thread and otherwise allocates
/// thread ids for all threads.
///
/// More and/or less may happen here over time, stay tuned!
pub fn run(&self, module: &mut Module) -> Result<(), Error> {
if!self.is_enabled(module) {
return Ok(());
}
let memory = wasm_conventions::get_memory(module)?;
let stack_pointer = wasm_conventions::get_shadow_stack_pointer(module)
.ok_or_else(|| anyhow!("failed to find shadow stack pointer"))?;
let addr = allocate_static_data(module, memory, 4, 4)?;
let mem = module.memories.get_mut(memory);
assert!(mem.shared);
let prev_max = mem.maximum.unwrap();
assert!(mem.import.is_some());
mem.maximum = Some(cmp::max(self.maximum_memory / PAGE_SIZE, prev_max));
assert!(mem.data_segments.is_empty());
let tls = Tls {
init: delete_synthetic_func(module, "__wasm_init_tls")?,
size: delete_synthetic_global(module, "__tls_size")?,
align: delete_synthetic_global(module, "__tls_align")?,
};
inject_start(
module,
tls,
addr,
stack_pointer,
self.thread_stack_size,
memory,
)?;
Ok(())
}
}
fn delete_synthetic_func(module: &mut Module, name: &str) -> Result<FunctionId, Error> |
fn delete_synthetic_global(module: &mut Module, name: &str) -> Result<u32, Error> {
let id = match delete_synthetic_export(module, name)? {
walrus::ExportItem::Global(g) => g,
_ => bail!("`{}` must be a global", name),
};
let g = match module.globals.get(id).kind {
walrus::GlobalKind::Local(g) => g,
walrus::GlobalKind::Import(_) => bail!("`{}` must not be an imported global", name),
};
match g {
InitExpr::Value(Value::I32(v)) => Ok(v as u32),
_ => bail!("`{}` was not an `i32` constant", name),
}
}
fn delete_synthetic_export(module: &mut Module, name: &str) -> Result<ExportItem, Error> {
let item = module
.exports
.iter()
.find(|e| e.name == name)
.ok_or_else(|| anyhow!("failed to find `{}`", name))?;
let ret = item.item;
let id = item.id();
module.exports.delete(id);
Ok(ret)
}
fn allocate_static_data(
module: &mut Module,
memory: MemoryId,
size: u32,
align: u32,
) -> Result<u32, Error> {
// First up, look for a `__heap_base` export which is injected by LLD as
// part of the linking process. Note that `__heap_base` should in theory be
// *after* the stack and data, which means it's at the very end of the
// address space and should be safe for us to inject 4 bytes of data at.
let heap_base = module
.exports
.iter()
.filter(|e| e.name == "__heap_base")
.filter_map(|e| match e.item {
ExportItem::Global(id) => Some(id),
_ => None,
})
.next();
let heap_base = match heap_base {
Some(idx) => idx,
None => bail!("failed to find `__heap_base` for injecting thread id"),
};
// Now we need to bump up `__heap_base` by 4 bytes as we'd like to reserve
// those 4 bytes for our thread id counter. Do lots of validation here to
// make sure that `__heap_base` is an non-mutable integer, and then do
// some logic:
//
// * We require that `__heap_base` is aligned to 4 as that's what the atomic
// will require anyway.
// * We *may* have to add another page to the minimum for this module. If by
// reserving 4 bytes the heap base now lies on a different page then we
// probably went past our minimum page requirement, so we'll need to
// update our memory limits to add one.
//
// Otherwise here we'll rewrite the `__heap_base` global's initializer to be
// 4 larger, reserving us those 4 bytes for a thread id counter.
let (address, add_a_page) = {
let global = module.globals.get_mut(heap_base);
if global.ty!= ValType::I32 {
bail!("the `__heap_base` global doesn't have the type `i32`");
}
if global.mutable {
bail!("the `__heap_base` global is unexpectedly mutable");
}
let offset = match &mut global.kind {
GlobalKind::Local(InitExpr::Value(Value::I32(n))) => n,
_ => bail!("`__heap_base` not a locally defined `i32`"),
};
let address = (*offset as u32 + (align - 1)) &!(align - 1); // align up
let add_a_page = (address + size) / PAGE_SIZE!= address / PAGE_SIZE;
*offset = (address + size) as i32;
(address, add_a_page)
};
if add_a_page {
let memory = module.memories.get_mut(memory);
memory.initial += 1;
memory.maximum = memory.maximum.map(|m| cmp::max(m, memory.initial));
}
Ok(address)
}
struct Tls {
init: walrus::FunctionId,
size: u32,
align: u32,
}
fn inject_start(
module: &mut Module,
tls: Tls,
addr: u32,
stack_pointer: GlobalId,
stack_size: u32,
memory: MemoryId,
) -> Result<(), Error> {
use walrus::ir::*;
assert!(stack_size % PAGE_SIZE == 0);
let mut builder = walrus::FunctionBuilder::new(&mut module.types, &[], &[]);
let local = module.locals.add(ValType::I32);
let mut body = builder.func_body();
// Call previous start function if one is available. Currently this is
// always true because LLVM injects a call to `__wasm_init_memory` as the
// start function which, well, initializes memory.
if let Some(prev) = module.start.take() {
body.call(prev);
}
// Perform an if/else based on whether we're the first thread or not. Our
// thread ID will be zero if we're the first thread, otherwise it'll be
// nonzero (assuming we don't overflow...)
body.i32_const(addr as i32)
.i32_const(1)
.atomic_rmw(
memory,
AtomicOp::Add,
AtomicWidth::I32,
MemArg {
align: 4,
offset: 0,
},
)
.if_else(
None,
// If our thread id is nonzero then we're the second or greater thread, so
// we give ourselves a stack via memory.grow and we update our stack
// pointer as the default stack pointer is surely wrong for us.
|body| {
// local0 = grow_memory(stack_size);
body.i32_const((stack_size / PAGE_SIZE) as i32)
.memory_grow(memory)
.local_set(local);
// if local0 == -1 then trap
body.block(None, |body| {
let target = body.id();
body.local_get(local)
.i32_const(-1)
.binop(BinaryOp::I32Ne)
.br_if(target)
.unreachable();
});
// stack_pointer = local0 + stack_size
body.local_get(local)
.i32_const(PAGE_SIZE as i32)
.binop(BinaryOp::I32Mul)
.i32_const(stack_size as i32)
.binop(BinaryOp::I32Add)
.global_set(stack_pointer);
},
// If the thread id is zero then the default stack pointer works for
// us.
|_| {},
);
// Afterwards we need to initialize our thread-local state.
let malloc = find_wbindgen_malloc(module)?;
body.i32_const(tls.size as i32)
.i32_const(tls.align as i32)
.drop() // TODO: need to actually respect alignment
.call(malloc)
.call(tls.init);
// Finish off our newly generated function.
let id = builder.finish(Vec::new(), &mut module.funcs);
//... and finally flag it as the new start function
module.start = Some(id);
Ok(())
}
fn find_wbindgen_malloc(module: &Module) -> Result<FunctionId, Error> {
let e = module
.exports
.iter()
.find(|e| e.name == "__wbindgen_malloc")
.ok_or_else(|| anyhow!("failed to find `__wbindgen_malloc`"))?;
match e.item {
walrus::ExportItem::Function(f) => Ok(f),
_ => bail!("`__wbindgen_malloc` wasn't a funtion"),
}
}
| {
match delete_synthetic_export(module, name)? {
walrus::ExportItem::Function(f) => Ok(f),
_ => bail!("`{}` must be a function", name),
}
} | identifier_body |
lib.rs | use anyhow::{anyhow, bail, Error};
use std::cmp;
use std::env;
use walrus::ir::Value;
use walrus::{ExportItem, GlobalId, GlobalKind, MemoryId, Module};
use walrus::{FunctionId, InitExpr, ValType};
use wasm_bindgen_wasm_conventions as wasm_conventions;
const PAGE_SIZE: u32 = 1 << 16;
/// Configuration for the transformation pass in this module.
///
/// Created primarily through `new` and then executed through `run`.
pub struct Config {
maximum_memory: u32,
thread_stack_size: u32,
enabled: bool,
}
impl Config {
/// Create a new configuration with default settings.
pub fn new() -> Config {
Config {
maximum_memory: 1 << 30, // 1GB
thread_stack_size: 1 << 20, // 1MB
enabled: env::var("WASM_BINDGEN_THREADS").is_ok(),
}
}
/// Is threaded Wasm enabled?
pub fn is_enabled(&self, module: &Module) -> bool {
if self.enabled {
return true;
}
// Compatibility with older LLVM outputs. Newer LLVM outputs, when
// atomics are enabled, emit a shared memory. That's a good indicator
// that we have work to do. If shared memory isn't enabled, though then
// this isn't an atomic module so there's nothing to do. We still allow,
// though, an environment variable to force us to go down this path to
// remain compatibile with older LLVM outputs.
match wasm_conventions::get_memory(module) {
Ok(memory) => module.memories.get(memory).shared,
Err(_) => false,
}
}
/// Specify the maximum amount of memory the wasm module can ever have.
///
/// We'll be specifying that the memory for this wasm module is shared, and
/// all shared memories must have their maximum limit specified (whereas
/// by default Rust/LLVM/LLD don't specify a maximum).
///
/// The default for this option is 16MB, and this can be used to change
/// the maximum memory we'll be specifying.
///
/// The `max` argument is in units of bytes.
///
/// If the maximum memory is already specified this setting won't have any
/// affect.
pub fn maximum_memory(&mut self, max: u32) -> &mut Config {
self.maximum_memory = max;
self
}
/// Specify the stack size for all threads spawned.
///
/// The stack size is typically set by rustc as an argument to LLD and
/// defaults to 1MB for the main thread. All threads spawned by the
/// main thread, however, need to allocate their own stack!
///
/// This configuration option indicates how large the stack of each child
/// thread will be. This will be allocated as part of the `start` function
/// and will be stored in LLVM's global stack pointer.
pub fn thread_stack_size(&mut self, size: u32) -> &mut Config {
self.thread_stack_size = size;
self
}
/// Execute the transformation on the parsed wasm module specified.
///
/// This function will prepare `Module` to be run on multiple threads,
/// performing steps such as:
///
/// * All data segments are switched to "passive" data segments to ensure
/// they're only initialized once (coming later)
/// * If memory is exported from this module, it is instead switched to
/// being imported (with the same parameters).
/// * The imported memory is required to be `shared`, ensuring it's backed
/// by a `SharedArrayBuffer` on the web.
/// * A `global` for a thread ID is injected.
/// * Four bytes in linear memory are reserved for the counter of thread
/// IDs.
/// * A `start` function is injected (or prepended if one already exists)
/// which initializes memory for the first thread and otherwise allocates
/// thread ids for all threads.
///
/// More and/or less may happen here over time, stay tuned!
pub fn run(&self, module: &mut Module) -> Result<(), Error> {
if!self.is_enabled(module) {
return Ok(());
}
let memory = wasm_conventions::get_memory(module)?;
let stack_pointer = wasm_conventions::get_shadow_stack_pointer(module)
.ok_or_else(|| anyhow!("failed to find shadow stack pointer"))?;
let addr = allocate_static_data(module, memory, 4, 4)?;
let mem = module.memories.get_mut(memory);
assert!(mem.shared);
let prev_max = mem.maximum.unwrap();
assert!(mem.import.is_some());
mem.maximum = Some(cmp::max(self.maximum_memory / PAGE_SIZE, prev_max));
assert!(mem.data_segments.is_empty());
let tls = Tls {
init: delete_synthetic_func(module, "__wasm_init_tls")?,
size: delete_synthetic_global(module, "__tls_size")?,
align: delete_synthetic_global(module, "__tls_align")?,
};
inject_start(
module,
tls,
addr,
stack_pointer,
self.thread_stack_size,
memory,
)?;
Ok(())
}
}
fn delete_synthetic_func(module: &mut Module, name: &str) -> Result<FunctionId, Error> {
match delete_synthetic_export(module, name)? {
walrus::ExportItem::Function(f) => Ok(f),
_ => bail!("`{}` must be a function", name),
}
}
fn delete_synthetic_global(module: &mut Module, name: &str) -> Result<u32, Error> {
let id = match delete_synthetic_export(module, name)? {
walrus::ExportItem::Global(g) => g,
_ => bail!("`{}` must be a global", name),
};
let g = match module.globals.get(id).kind {
walrus::GlobalKind::Local(g) => g,
walrus::GlobalKind::Import(_) => bail!("`{}` must not be an imported global", name),
};
match g {
InitExpr::Value(Value::I32(v)) => Ok(v as u32),
_ => bail!("`{}` was not an `i32` constant", name),
}
}
fn delete_synthetic_export(module: &mut Module, name: &str) -> Result<ExportItem, Error> {
let item = module
.exports
.iter()
.find(|e| e.name == name)
.ok_or_else(|| anyhow!("failed to find `{}`", name))?;
let ret = item.item;
let id = item.id();
module.exports.delete(id);
Ok(ret)
}
fn allocate_static_data(
module: &mut Module,
memory: MemoryId,
size: u32,
align: u32,
) -> Result<u32, Error> {
// First up, look for a `__heap_base` export which is injected by LLD as
// part of the linking process. Note that `__heap_base` should in theory be
// *after* the stack and data, which means it's at the very end of the
// address space and should be safe for us to inject 4 bytes of data at.
let heap_base = module
.exports
.iter()
.filter(|e| e.name == "__heap_base")
.filter_map(|e| match e.item {
ExportItem::Global(id) => Some(id),
_ => None,
})
.next();
let heap_base = match heap_base {
Some(idx) => idx,
None => bail!("failed to find `__heap_base` for injecting thread id"),
};
// Now we need to bump up `__heap_base` by 4 bytes as we'd like to reserve
// those 4 bytes for our thread id counter. Do lots of validation here to
// make sure that `__heap_base` is an non-mutable integer, and then do
// some logic:
//
// * We require that `__heap_base` is aligned to 4 as that's what the atomic
// will require anyway.
// * We *may* have to add another page to the minimum for this module. If by
// reserving 4 bytes the heap base now lies on a different page then we
// probably went past our minimum page requirement, so we'll need to
// update our memory limits to add one.
//
// Otherwise here we'll rewrite the `__heap_base` global's initializer to be
// 4 larger, reserving us those 4 bytes for a thread id counter.
let (address, add_a_page) = {
let global = module.globals.get_mut(heap_base);
if global.ty!= ValType::I32 {
bail!("the `__heap_base` global doesn't have the type `i32`");
}
if global.mutable {
bail!("the `__heap_base` global is unexpectedly mutable");
}
let offset = match &mut global.kind {
GlobalKind::Local(InitExpr::Value(Value::I32(n))) => n,
_ => bail!("`__heap_base` not a locally defined `i32`"),
};
let address = (*offset as u32 + (align - 1)) &!(align - 1); // align up
let add_a_page = (address + size) / PAGE_SIZE!= address / PAGE_SIZE;
*offset = (address + size) as i32;
(address, add_a_page)
};
if add_a_page {
let memory = module.memories.get_mut(memory);
memory.initial += 1;
memory.maximum = memory.maximum.map(|m| cmp::max(m, memory.initial));
}
Ok(address)
}
struct | {
init: walrus::FunctionId,
size: u32,
align: u32,
}
fn inject_start(
module: &mut Module,
tls: Tls,
addr: u32,
stack_pointer: GlobalId,
stack_size: u32,
memory: MemoryId,
) -> Result<(), Error> {
use walrus::ir::*;
assert!(stack_size % PAGE_SIZE == 0);
let mut builder = walrus::FunctionBuilder::new(&mut module.types, &[], &[]);
let local = module.locals.add(ValType::I32);
let mut body = builder.func_body();
// Call previous start function if one is available. Currently this is
// always true because LLVM injects a call to `__wasm_init_memory` as the
// start function which, well, initializes memory.
if let Some(prev) = module.start.take() {
body.call(prev);
}
// Perform an if/else based on whether we're the first thread or not. Our
// thread ID will be zero if we're the first thread, otherwise it'll be
// nonzero (assuming we don't overflow...)
body.i32_const(addr as i32)
.i32_const(1)
.atomic_rmw(
memory,
AtomicOp::Add,
AtomicWidth::I32,
MemArg {
align: 4,
offset: 0,
},
)
.if_else(
None,
// If our thread id is nonzero then we're the second or greater thread, so
// we give ourselves a stack via memory.grow and we update our stack
// pointer as the default stack pointer is surely wrong for us.
|body| {
// local0 = grow_memory(stack_size);
body.i32_const((stack_size / PAGE_SIZE) as i32)
.memory_grow(memory)
.local_set(local);
// if local0 == -1 then trap
body.block(None, |body| {
let target = body.id();
body.local_get(local)
.i32_const(-1)
.binop(BinaryOp::I32Ne)
.br_if(target)
.unreachable();
});
// stack_pointer = local0 + stack_size
body.local_get(local)
.i32_const(PAGE_SIZE as i32)
.binop(BinaryOp::I32Mul)
.i32_const(stack_size as i32)
.binop(BinaryOp::I32Add)
.global_set(stack_pointer);
},
// If the thread id is zero then the default stack pointer works for
// us.
|_| {},
);
// Afterwards we need to initialize our thread-local state.
let malloc = find_wbindgen_malloc(module)?;
body.i32_const(tls.size as i32)
.i32_const(tls.align as i32)
.drop() // TODO: need to actually respect alignment
.call(malloc)
.call(tls.init);
// Finish off our newly generated function.
let id = builder.finish(Vec::new(), &mut module.funcs);
//... and finally flag it as the new start function
module.start = Some(id);
Ok(())
}
fn find_wbindgen_malloc(module: &Module) -> Result<FunctionId, Error> {
let e = module
.exports
.iter()
.find(|e| e.name == "__wbindgen_malloc")
.ok_or_else(|| anyhow!("failed to find `__wbindgen_malloc`"))?;
match e.item {
walrus::ExportItem::Function(f) => Ok(f),
_ => bail!("`__wbindgen_malloc` wasn't a funtion"),
}
}
| Tls | identifier_name |
lib.rs | ::{
ab_glyph, legacy, BuiltInLineBreaker, Extra, FontId, GlyphCruncher, GlyphPositioner,
HorizontalAlign, Layout, LineBreak, LineBreaker, OwnedSection, OwnedText, Section,
SectionGeometry, SectionGlyph, SectionGlyphIter, SectionText, Text, VerticalAlign,
};
use crate::pipe::{glyph_pipe, GlyphVertex, IntoDimensions, RawAndFormat};
use gfx::{
format,
handle::{self, RawDepthStencilView, RawRenderTargetView},
texture,
traits::FactoryExt,
};
use glyph_brush::{ab_glyph::*, BrushAction, BrushError, DefaultSectionHasher};
use log::{log_enabled, warn};
use std::{
borrow::Cow,
error::Error,
fmt,
hash::{BuildHasher, Hash},
};
// Type for the generated glyph cache texture
type TexForm = format::U8Norm;
type TexSurface = <TexForm as format::Formatted>::Surface;
type TexChannel = <TexForm as format::Formatted>::Channel;
type TexFormView = <TexForm as format::Formatted>::View;
type TexSurfaceHandle<R> = handle::Texture<R, TexSurface>;
type TexShaderView<R> = handle::ShaderResourceView<R, TexFormView>;
/// Returns the default 4 dimensional matrix orthographic projection used for drawing.
///
/// # Example
///
/// ```
/// # let (screen_width, screen_height) = (1f32, 2f32);
/// let projection = gfx_glyph::default_transform((screen_width, screen_height));
/// ```
///
/// # Example
///
/// ```no_run
/// # let gfx_color: gfx_core::handle::RenderTargetView<gfx_device_gl::Resources, gfx::format::Srgba8> = unimplemented!();
/// let projection = gfx_glyph::default_transform(&gfx_color);
/// ```
#[inline]
pub fn default_transform<D: IntoDimensions>(d: D) -> [[f32; 4]; 4] {
let (w, h) = d.into_dimensions();
[
[2.0 / w, 0.0, 0.0, 0.0],
[0.0, 2.0 / h, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[-1.0, -1.0, 0.0, 1.0],
]
}
/// Object allowing glyph drawing, containing cache state. Manages glyph positioning cacheing,
/// glyph draw caching & efficient GPU texture cache updating and re-sizing on demand.
///
/// Build using a [`GlyphBrushBuilder`](struct.GlyphBrushBuilder.html).
///
/// # Example
/// ```no_run
/// # use gfx_glyph::{GlyphBrushBuilder};
/// use gfx_glyph::{Section, Text};
/// # fn main() -> Result<(), String> {
/// # let glyph_brush: gfx_glyph::GlyphBrush<gfx_device_gl::Resources, gfx_device_gl::Factory> = unimplemented!();
/// # let gfx_color: gfx_core::handle::RenderTargetView<gfx_device_gl::Resources, gfx::format::Srgba8> = unimplemented!();
/// # let factory: gfx_device_gl::Factory = unimplemented!();
/// # let gfx_encoder: gfx::Encoder<_, _> = factory.create_command_buffer().into();
/// # let some_other_section = Section::default();
///
/// let section = Section::default().add_text(Text::new("Hello gfx_glyph"));
///
/// glyph_brush.queue(section);
/// glyph_brush.queue(some_other_section);
///
/// glyph_brush.use_queue().draw(&mut gfx_encoder, &gfx_color)?;
/// # Ok(()) }
/// ```
///
/// # Caching behaviour
///
/// Calls to [`GlyphBrush::queue`](#method.queue),
/// [`GlyphBrush::glyph_bounds`](#method.glyph_bounds), [`GlyphBrush::glyphs`](#method.glyphs)
/// calculate the positioned glyphs for a section.
/// This is cached so future calls to any of the methods for the same section are much
/// cheaper. In the case of [`GlyphBrush::queue`](#method.queue) the calculations will also be
/// used for actual drawing.
///
/// The cache for a section will be **cleared** after a
/// [`.use_queue().draw(..)`](struct.DrawBuilder.html#method.draw) call when that section has not been used since
/// the previous draw call.
pub struct GlyphBrush<R: gfx::Resources, GF: gfx::Factory<R>, F = FontArc, H = DefaultSectionHasher>
{
font_cache_tex: (
gfx::handle::Texture<R, TexSurface>,
gfx_core::handle::ShaderResourceView<R, f32>,
),
texture_filter_method: texture::FilterMethod,
factory: GF,
program: gfx::handle::Program<R>,
draw_cache: Option<DrawnGlyphBrush<R>>,
glyph_brush: glyph_brush::GlyphBrush<GlyphVertex, Extra, F, H>,
// config
depth_test: gfx::state::Depth,
}
impl<R: gfx::Resources, GF: gfx::Factory<R>, F, H> fmt::Debug for GlyphBrush<R, GF, F, H> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "GlyphBrush")
}
}
impl<R, GF, F, H> GlyphBrush<R, GF, F, H>
where
R: gfx::Resources,
GF: gfx::Factory<R>,
H: BuildHasher,
F: Font,
{
/// Adds an additional font to the one(s) initially added on build.
///
/// Returns a new [`FontId`](struct.FontId.html) to reference this font.
pub fn add_font(&mut self, font: F) -> FontId {
self.glyph_brush.add_font(font)
}
}
impl<R, GF, F, H> GlyphCruncher<F, Extra> for GlyphBrush<R, GF, F, H>
where
F: Font,
R: gfx::Resources,
GF: gfx::Factory<R>,
H: BuildHasher,
{
#[inline]
fn glyphs_custom_layout<'a, 'b, S, L>(
&'b mut self,
section: S,
custom_layout: &L,
) -> SectionGlyphIter<'b>
where
L: GlyphPositioner + Hash,
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush
.glyphs_custom_layout(section, custom_layout)
}
#[inline]
fn fonts(&self) -> &[F] {
self.glyph_brush.fonts()
}
#[inline]
fn glyph_bounds_custom_layout<'a, S, L>(
&mut self,
section: S,
custom_layout: &L,
) -> Option<Rect>
where
L: GlyphPositioner + Hash,
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush
.glyph_bounds_custom_layout(section, custom_layout)
}
}
impl<R, GF, F, H> GlyphBrush<R, GF, F, H>
where
F: Font + Sync,
R: gfx::Resources,
GF: gfx::Factory<R>,
H: BuildHasher,
{
/// Queues a section/layout to be drawn by the next call of
/// [`.use_queue().draw(..)`](struct.DrawBuilder.html#method.draw). Can be called multiple times
/// to queue multiple sections for drawing.
///
/// Benefits from caching, see [caching behaviour](#caching-behaviour).
#[inline]
pub fn queue<'a, S>(&mut self, section: S)
where
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush.queue(section)
}
/// Returns a [`DrawBuilder`](struct.DrawBuilder.html) allowing the queued glyphs to be drawn.
///
/// Drawing will trim the cache, see [caching behaviour](#caching-behaviour).
/// # Example
///
/// ```no_run
/// # fn main() -> Result<(), String> {
/// # let glyph_brush: gfx_glyph::GlyphBrush<gfx_device_gl::Resources, gfx_device_gl::Factory> = unimplemented!();
/// # let gfx_color: gfx_core::handle::RenderTargetView<gfx_device_gl::Resources, gfx::format::Srgba8> = unimplemented!();
/// # let factory: gfx_device_gl::Factory = unimplemented!();
/// # let gfx_encoder: gfx::Encoder<_, _> = factory.create_command_buffer().into();
/// glyph_brush.use_queue().draw(&mut gfx_encoder, &gfx_color)?;
/// # Ok(()) }
/// ```
#[inline]
pub fn use_queue(&mut self) -> DrawBuilder<'_, F, R, GF, H, ()> {
DrawBuilder {
brush: self,
transform: None,
depth_target: None,
}
}
/// Queues a section/layout to be drawn by the next call of
/// [`.use_queue().draw(..)`](struct.DrawBuilder.html#method.draw). Can be called multiple times
/// to queue multiple sections for drawing.
///
/// Used to provide custom `GlyphPositioner` logic, if using built-in
/// [`Layout`](enum.Layout.html) simply use [`queue`](struct.GlyphBrush.html#method.queue)
///
/// Benefits from caching, see [caching behaviour](#caching-behaviour).
#[inline]
pub fn queue_custom_layout<'a, S, G>(&mut self, section: S, custom_layout: &G)
where
G: GlyphPositioner,
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush.queue_custom_layout(section, custom_layout)
}
/// Queues pre-positioned glyphs to be processed by the next call of
/// [`.use_queue().draw(..)`](struct.DrawBuilder.html#method.draw). Can be called multiple times.
#[inline]
pub fn queue_pre_positioned(
&mut self,
glyphs: Vec<SectionGlyph>,
extra: Vec<Extra>,
bounds: Rect,
) {
self.glyph_brush.queue_pre_positioned(glyphs, extra, bounds)
}
/// Retains the section in the cache as if it had been used in the last draw-frame.
///
/// Should not be necessary unless using multiple draws per frame with distinct transforms,
/// see [caching behaviour](#caching-behaviour).
#[inline]
pub fn keep_cached_custom_layout<'a, S, G>(&mut self, section: S, custom_layout: &G)
where
S: Into<Cow<'a, Section<'a>>>,
G: GlyphPositioner,
{
self.glyph_brush
.keep_cached_custom_layout(section, custom_layout)
}
/// Retains the section in the cache as if it had been used in the last draw-frame.
///
/// Should not be necessary unless using multiple draws per frame with distinct transforms,
/// see [caching behaviour](#caching-behaviour).
#[inline]
pub fn keep_cached<'a, S>(&mut self, section: S)
where
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush.keep_cached(section)
}
/// Returns the available fonts.
///
/// The `FontId` corresponds to the index of the font data.
#[inline]
pub fn fonts(&self) -> &[F] {
self.glyph_brush.fonts()
}
/// Draws all queued sections
pub(crate) fn draw<C, CV, DV>(
&mut self,
transform: [[f32; 4]; 4],
encoder: &mut gfx::Encoder<R, C>,
target: &CV,
depth_target: Option<&DV>,
) -> Result<(), String>
where
C: gfx::CommandBuffer<R>,
CV: RawAndFormat<Raw = RawRenderTargetView<R>>,
DV: RawAndFormat<Raw = RawDepthStencilView<R>>,
{
let mut brush_action;
loop {
let tex = self.font_cache_tex.0.clone();
brush_action = self.glyph_brush.process_queued(
|rect, tex_data| {
let offset = [rect.min[0] as u16, rect.min[1] as u16];
let size = [rect.width() as u16, rect.height() as u16];
update_texture(encoder, &tex, offset, size, tex_data);
},
to_vertex,
);
match brush_action {
Ok(_) => break,
Err(BrushError::TextureTooSmall { suggested }) => {
let max_image_dimension =
self.factory.get_capabilities().max_texture_size as u32;
let (new_width, new_height) = if (suggested.0 > max_image_dimension
|| suggested.1 > max_image_dimension)
&& (self.glyph_brush.texture_dimensions().0 < max_image_dimension
|| self.glyph_brush.texture_dimensions().1 < max_image_dimension)
{
(max_image_dimension, max_image_dimension)
} else {
suggested
};
if log_enabled!(log::Level::Warn) {
warn!(
"Increasing glyph texture size {old:?} -> {new:?}. \
Consider building with `.initial_cache_size({new:?})` to avoid \
resizing. Called from:\n{trace}",
old = self.glyph_brush.texture_dimensions(),
new = (new_width, new_height),
trace = outer_backtrace!()
);
}
match create_texture(&mut self.factory, new_width, new_height) {
Ok((new_tex, tex_view)) => {
self.glyph_brush.resize_texture(new_width, new_height);
if let Some(ref mut cache) = self.draw_cache {
cache.pipe_data.font_tex.0 = tex_view.clone();
}
self.font_cache_tex.1 = tex_view;
self.font_cache_tex.0 = new_tex;
}
Err(_) => {
return Err(format!(
"Failed to create {new_width}x{new_height} glyph texture"
));
}
}
}
}
}
// refresh pipe data
// - pipe targets may have changed, or had resolutions changes
// - format may have changed
if let Some(mut cache) = self.draw_cache.take() {
if &cache.pipe_data.out!= target.as_raw() {
cache.pipe_data.out.clone_from(target.as_raw());
}
if let Some(depth_target) = depth_target {
if cache.pipe_data.out_depth.as_ref()!= Some(depth_target.as_raw()) {
cache
.pipe_data
.out_depth
.clone_from(&Some(depth_target.as_raw().clone()));
}
} else |
if cache.pso.0!= target.format() {
cache.pso = (
target.format(),
self.pso_using(target.format(), depth_target.map(|d| d.format())),
);
}
self.draw_cache = Some(cache);
}
match brush_action.unwrap() {
BrushAction::Draw(verts) => {
let draw_cache = if let Some(mut cache) = self.draw_cache.take() {
if cache.pipe_data.vbuf.len() < verts.len() {
cache.pipe_data.vbuf =
new_vertex_buffer(&mut self.factory, encoder, &verts);
} else {
encoder
.update_buffer(&cache.pipe_data.vbuf, &verts, 0)
.unwrap();
}
cache.slice.instances.as_mut().unwrap().0 = verts.len() as _;
cache
} else {
let vbuf = new_vertex_buffer(&mut self.factory, encoder, &verts);
DrawnGlyphBrush {
pipe_data: {
let sampler = self.factory.create_sampler(texture::SamplerInfo::new(
self.texture_filter_method,
texture::WrapMode::Clamp,
));
glyph_pipe::Data {
vbuf,
font_tex: (self.font_cache_tex.1.clone(), sampler),
transform,
out: target.as_raw().clone(),
out_depth: depth_target.map(|d| d.as_raw().clone()),
}
},
pso: (
target.format(),
self.pso_using(target.format(), depth_target.map(|d| d.format())),
),
slice: gfx::Slice {
instances: Some((verts.len() as _, 0)),
..Self::empty_slice()
},
}
};
self.draw_cache = Some(draw_cache);
}
BrushAction::ReDraw => {}
};
if let Some(&mut DrawnGlyphBrush {
ref pso,
ref slice,
ref mut pipe_data,
..
}) = self.draw_cache.as_mut()
{
pipe_data.transform = transform;
encoder.draw(slice, &pso.1, pipe_data);
}
Ok(())
}
fn pso_using(
&mut self,
color_format: gfx::format::Format,
depth_format: Option<gfx::format::Format>,
) -> gfx::PipelineState<R, glyph_pipe::Meta> {
self.factory
.create_pipeline_from_program(
&self.program,
gfx::Primitive::TriangleStrip,
gfx::state::Rasterizer::new_fill(),
glyph_pipe::Init::new(color_format, depth_format, self.depth_test),
)
.unwrap()
}
fn empty_slice() -> gfx::Slice<R> {
gfx::Slice {
start: 0,
end: 4,
buffer: gfx::IndexBuffer::Auto,
base_vertex: 0,
instances: None,
}
}
}
struct DrawnGlyphBrush<R: gfx::Resources> {
pipe_data: glyph_pipe::Data<R>,
pso: (gfx::format::Format, gfx::PipelineState<R, glyph_pipe::Meta>),
slice: gfx::Slice<R>,
}
/// Allocates a vertex buffer 1 per glyph that will be updated on text changes
#[inline]
fn new_vertex_buffer<R: gfx::Resources, F: gfx::Factory<R>, C: gfx::CommandBuffer<R>>(
factory: &mut F,
encoder: &mut gfx::Encoder<R, C>,
verts: &[GlyphVertex],
) -> gfx::handle::Buffer<R, GlyphVertex> {
let buf = factory
.create_buffer(
verts.len(),
gfx::buffer::Role::Vertex,
gfx::memory::Usage::Dynamic,
gfx::memory::Bind::empty(),
)
.unwrap();
encoder.update_buffer(&buf, verts, 0).unwrap();
buf
}
#[inline]
fn to_vertex(
glyph_brush::GlyphVertex {
mut tex_coords,
pixel_coords,
bounds,
extra,
}: glyph_brush::GlyphVertex,
) -> GlyphVertex {
let gl_bounds = bounds;
| {
cache.pipe_data.out_depth.take();
} | conditional_block |
lib.rs | ::{
ab_glyph, legacy, BuiltInLineBreaker, Extra, FontId, GlyphCruncher, GlyphPositioner,
HorizontalAlign, Layout, LineBreak, LineBreaker, OwnedSection, OwnedText, Section,
SectionGeometry, SectionGlyph, SectionGlyphIter, SectionText, Text, VerticalAlign,
};
use crate::pipe::{glyph_pipe, GlyphVertex, IntoDimensions, RawAndFormat};
use gfx::{
format,
handle::{self, RawDepthStencilView, RawRenderTargetView},
texture,
traits::FactoryExt,
};
use glyph_brush::{ab_glyph::*, BrushAction, BrushError, DefaultSectionHasher};
use log::{log_enabled, warn};
use std::{
borrow::Cow,
error::Error,
fmt,
hash::{BuildHasher, Hash},
};
// Type for the generated glyph cache texture
type TexForm = format::U8Norm;
type TexSurface = <TexForm as format::Formatted>::Surface;
type TexChannel = <TexForm as format::Formatted>::Channel;
type TexFormView = <TexForm as format::Formatted>::View;
type TexSurfaceHandle<R> = handle::Texture<R, TexSurface>;
type TexShaderView<R> = handle::ShaderResourceView<R, TexFormView>;
/// Returns the default 4 dimensional matrix orthographic projection used for drawing.
///
/// # Example
///
/// ```
/// # let (screen_width, screen_height) = (1f32, 2f32);
/// let projection = gfx_glyph::default_transform((screen_width, screen_height));
/// ```
///
/// # Example
///
/// ```no_run
/// # let gfx_color: gfx_core::handle::RenderTargetView<gfx_device_gl::Resources, gfx::format::Srgba8> = unimplemented!();
/// let projection = gfx_glyph::default_transform(&gfx_color);
/// ```
#[inline]
pub fn default_transform<D: IntoDimensions>(d: D) -> [[f32; 4]; 4] {
let (w, h) = d.into_dimensions();
[
[2.0 / w, 0.0, 0.0, 0.0],
[0.0, 2.0 / h, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[-1.0, -1.0, 0.0, 1.0],
]
}
/// Object allowing glyph drawing, containing cache state. Manages glyph positioning cacheing,
/// glyph draw caching & efficient GPU texture cache updating and re-sizing on demand.
///
/// Build using a [`GlyphBrushBuilder`](struct.GlyphBrushBuilder.html).
///
/// # Example
/// ```no_run
/// # use gfx_glyph::{GlyphBrushBuilder};
/// use gfx_glyph::{Section, Text};
/// # fn main() -> Result<(), String> {
/// # let glyph_brush: gfx_glyph::GlyphBrush<gfx_device_gl::Resources, gfx_device_gl::Factory> = unimplemented!();
/// # let gfx_color: gfx_core::handle::RenderTargetView<gfx_device_gl::Resources, gfx::format::Srgba8> = unimplemented!();
/// # let factory: gfx_device_gl::Factory = unimplemented!();
/// # let gfx_encoder: gfx::Encoder<_, _> = factory.create_command_buffer().into();
/// # let some_other_section = Section::default();
///
/// let section = Section::default().add_text(Text::new("Hello gfx_glyph"));
///
/// glyph_brush.queue(section);
/// glyph_brush.queue(some_other_section);
///
/// glyph_brush.use_queue().draw(&mut gfx_encoder, &gfx_color)?;
/// # Ok(()) }
/// ```
///
/// # Caching behaviour
///
/// Calls to [`GlyphBrush::queue`](#method.queue),
/// [`GlyphBrush::glyph_bounds`](#method.glyph_bounds), [`GlyphBrush::glyphs`](#method.glyphs)
/// calculate the positioned glyphs for a section.
/// This is cached so future calls to any of the methods for the same section are much
/// cheaper. In the case of [`GlyphBrush::queue`](#method.queue) the calculations will also be
/// used for actual drawing.
///
/// The cache for a section will be **cleared** after a
/// [`.use_queue().draw(..)`](struct.DrawBuilder.html#method.draw) call when that section has not been used since
/// the previous draw call.
pub struct GlyphBrush<R: gfx::Resources, GF: gfx::Factory<R>, F = FontArc, H = DefaultSectionHasher>
{
font_cache_tex: (
gfx::handle::Texture<R, TexSurface>,
gfx_core::handle::ShaderResourceView<R, f32>,
),
texture_filter_method: texture::FilterMethod,
factory: GF,
program: gfx::handle::Program<R>,
draw_cache: Option<DrawnGlyphBrush<R>>,
glyph_brush: glyph_brush::GlyphBrush<GlyphVertex, Extra, F, H>,
// config
depth_test: gfx::state::Depth,
}
impl<R: gfx::Resources, GF: gfx::Factory<R>, F, H> fmt::Debug for GlyphBrush<R, GF, F, H> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "GlyphBrush")
}
}
impl<R, GF, F, H> GlyphBrush<R, GF, F, H>
where
R: gfx::Resources,
GF: gfx::Factory<R>,
H: BuildHasher,
F: Font,
{
/// Adds an additional font to the one(s) initially added on build.
///
/// Returns a new [`FontId`](struct.FontId.html) to reference this font.
pub fn add_font(&mut self, font: F) -> FontId {
self.glyph_brush.add_font(font)
}
}
impl<R, GF, F, H> GlyphCruncher<F, Extra> for GlyphBrush<R, GF, F, H>
where
F: Font,
R: gfx::Resources,
GF: gfx::Factory<R>,
H: BuildHasher,
{
#[inline]
fn glyphs_custom_layout<'a, 'b, S, L>(
&'b mut self,
section: S,
custom_layout: &L,
) -> SectionGlyphIter<'b>
where
L: GlyphPositioner + Hash,
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush
.glyphs_custom_layout(section, custom_layout)
}
#[inline]
fn fonts(&self) -> &[F] {
self.glyph_brush.fonts()
}
#[inline]
fn glyph_bounds_custom_layout<'a, S, L>(
&mut self,
section: S,
custom_layout: &L,
) -> Option<Rect>
where
L: GlyphPositioner + Hash,
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush
.glyph_bounds_custom_layout(section, custom_layout)
}
}
impl<R, GF, F, H> GlyphBrush<R, GF, F, H>
where
F: Font + Sync,
R: gfx::Resources,
GF: gfx::Factory<R>,
H: BuildHasher,
{
/// Queues a section/layout to be drawn by the next call of
/// [`.use_queue().draw(..)`](struct.DrawBuilder.html#method.draw). Can be called multiple times
/// to queue multiple sections for drawing.
///
/// Benefits from caching, see [caching behaviour](#caching-behaviour).
#[inline]
pub fn | <'a, S>(&mut self, section: S)
where
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush.queue(section)
}
/// Returns a [`DrawBuilder`](struct.DrawBuilder.html) allowing the queued glyphs to be drawn.
///
/// Drawing will trim the cache, see [caching behaviour](#caching-behaviour).
/// # Example
///
/// ```no_run
/// # fn main() -> Result<(), String> {
/// # let glyph_brush: gfx_glyph::GlyphBrush<gfx_device_gl::Resources, gfx_device_gl::Factory> = unimplemented!();
/// # let gfx_color: gfx_core::handle::RenderTargetView<gfx_device_gl::Resources, gfx::format::Srgba8> = unimplemented!();
/// # let factory: gfx_device_gl::Factory = unimplemented!();
/// # let gfx_encoder: gfx::Encoder<_, _> = factory.create_command_buffer().into();
/// glyph_brush.use_queue().draw(&mut gfx_encoder, &gfx_color)?;
/// # Ok(()) }
/// ```
#[inline]
pub fn use_queue(&mut self) -> DrawBuilder<'_, F, R, GF, H, ()> {
DrawBuilder {
brush: self,
transform: None,
depth_target: None,
}
}
/// Queues a section/layout to be drawn by the next call of
/// [`.use_queue().draw(..)`](struct.DrawBuilder.html#method.draw). Can be called multiple times
/// to queue multiple sections for drawing.
///
/// Used to provide custom `GlyphPositioner` logic, if using built-in
/// [`Layout`](enum.Layout.html) simply use [`queue`](struct.GlyphBrush.html#method.queue)
///
/// Benefits from caching, see [caching behaviour](#caching-behaviour).
#[inline]
pub fn queue_custom_layout<'a, S, G>(&mut self, section: S, custom_layout: &G)
where
G: GlyphPositioner,
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush.queue_custom_layout(section, custom_layout)
}
/// Queues pre-positioned glyphs to be processed by the next call of
/// [`.use_queue().draw(..)`](struct.DrawBuilder.html#method.draw). Can be called multiple times.
#[inline]
pub fn queue_pre_positioned(
&mut self,
glyphs: Vec<SectionGlyph>,
extra: Vec<Extra>,
bounds: Rect,
) {
self.glyph_brush.queue_pre_positioned(glyphs, extra, bounds)
}
/// Retains the section in the cache as if it had been used in the last draw-frame.
///
/// Should not be necessary unless using multiple draws per frame with distinct transforms,
/// see [caching behaviour](#caching-behaviour).
#[inline]
pub fn keep_cached_custom_layout<'a, S, G>(&mut self, section: S, custom_layout: &G)
where
S: Into<Cow<'a, Section<'a>>>,
G: GlyphPositioner,
{
self.glyph_brush
.keep_cached_custom_layout(section, custom_layout)
}
/// Retains the section in the cache as if it had been used in the last draw-frame.
///
/// Should not be necessary unless using multiple draws per frame with distinct transforms,
/// see [caching behaviour](#caching-behaviour).
#[inline]
pub fn keep_cached<'a, S>(&mut self, section: S)
where
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush.keep_cached(section)
}
/// Returns the available fonts.
///
/// The `FontId` corresponds to the index of the font data.
#[inline]
pub fn fonts(&self) -> &[F] {
self.glyph_brush.fonts()
}
/// Draws all queued sections
pub(crate) fn draw<C, CV, DV>(
&mut self,
transform: [[f32; 4]; 4],
encoder: &mut gfx::Encoder<R, C>,
target: &CV,
depth_target: Option<&DV>,
) -> Result<(), String>
where
C: gfx::CommandBuffer<R>,
CV: RawAndFormat<Raw = RawRenderTargetView<R>>,
DV: RawAndFormat<Raw = RawDepthStencilView<R>>,
{
let mut brush_action;
loop {
let tex = self.font_cache_tex.0.clone();
brush_action = self.glyph_brush.process_queued(
|rect, tex_data| {
let offset = [rect.min[0] as u16, rect.min[1] as u16];
let size = [rect.width() as u16, rect.height() as u16];
update_texture(encoder, &tex, offset, size, tex_data);
},
to_vertex,
);
match brush_action {
Ok(_) => break,
Err(BrushError::TextureTooSmall { suggested }) => {
let max_image_dimension =
self.factory.get_capabilities().max_texture_size as u32;
let (new_width, new_height) = if (suggested.0 > max_image_dimension
|| suggested.1 > max_image_dimension)
&& (self.glyph_brush.texture_dimensions().0 < max_image_dimension
|| self.glyph_brush.texture_dimensions().1 < max_image_dimension)
{
(max_image_dimension, max_image_dimension)
} else {
suggested
};
if log_enabled!(log::Level::Warn) {
warn!(
"Increasing glyph texture size {old:?} -> {new:?}. \
Consider building with `.initial_cache_size({new:?})` to avoid \
resizing. Called from:\n{trace}",
old = self.glyph_brush.texture_dimensions(),
new = (new_width, new_height),
trace = outer_backtrace!()
);
}
match create_texture(&mut self.factory, new_width, new_height) {
Ok((new_tex, tex_view)) => {
self.glyph_brush.resize_texture(new_width, new_height);
if let Some(ref mut cache) = self.draw_cache {
cache.pipe_data.font_tex.0 = tex_view.clone();
}
self.font_cache_tex.1 = tex_view;
self.font_cache_tex.0 = new_tex;
}
Err(_) => {
return Err(format!(
"Failed to create {new_width}x{new_height} glyph texture"
));
}
}
}
}
}
// refresh pipe data
// - pipe targets may have changed, or had resolutions changes
// - format may have changed
if let Some(mut cache) = self.draw_cache.take() {
if &cache.pipe_data.out!= target.as_raw() {
cache.pipe_data.out.clone_from(target.as_raw());
}
if let Some(depth_target) = depth_target {
if cache.pipe_data.out_depth.as_ref()!= Some(depth_target.as_raw()) {
cache
.pipe_data
.out_depth
.clone_from(&Some(depth_target.as_raw().clone()));
}
} else {
cache.pipe_data.out_depth.take();
}
if cache.pso.0!= target.format() {
cache.pso = (
target.format(),
self.pso_using(target.format(), depth_target.map(|d| d.format())),
);
}
self.draw_cache = Some(cache);
}
match brush_action.unwrap() {
BrushAction::Draw(verts) => {
let draw_cache = if let Some(mut cache) = self.draw_cache.take() {
if cache.pipe_data.vbuf.len() < verts.len() {
cache.pipe_data.vbuf =
new_vertex_buffer(&mut self.factory, encoder, &verts);
} else {
encoder
.update_buffer(&cache.pipe_data.vbuf, &verts, 0)
.unwrap();
}
cache.slice.instances.as_mut().unwrap().0 = verts.len() as _;
cache
} else {
let vbuf = new_vertex_buffer(&mut self.factory, encoder, &verts);
DrawnGlyphBrush {
pipe_data: {
let sampler = self.factory.create_sampler(texture::SamplerInfo::new(
self.texture_filter_method,
texture::WrapMode::Clamp,
));
glyph_pipe::Data {
vbuf,
font_tex: (self.font_cache_tex.1.clone(), sampler),
transform,
out: target.as_raw().clone(),
out_depth: depth_target.map(|d| d.as_raw().clone()),
}
},
pso: (
target.format(),
self.pso_using(target.format(), depth_target.map(|d| d.format())),
),
slice: gfx::Slice {
instances: Some((verts.len() as _, 0)),
..Self::empty_slice()
},
}
};
self.draw_cache = Some(draw_cache);
}
BrushAction::ReDraw => {}
};
if let Some(&mut DrawnGlyphBrush {
ref pso,
ref slice,
ref mut pipe_data,
..
}) = self.draw_cache.as_mut()
{
pipe_data.transform = transform;
encoder.draw(slice, &pso.1, pipe_data);
}
Ok(())
}
fn pso_using(
&mut self,
color_format: gfx::format::Format,
depth_format: Option<gfx::format::Format>,
) -> gfx::PipelineState<R, glyph_pipe::Meta> {
self.factory
.create_pipeline_from_program(
&self.program,
gfx::Primitive::TriangleStrip,
gfx::state::Rasterizer::new_fill(),
glyph_pipe::Init::new(color_format, depth_format, self.depth_test),
)
.unwrap()
}
fn empty_slice() -> gfx::Slice<R> {
gfx::Slice {
start: 0,
end: 4,
buffer: gfx::IndexBuffer::Auto,
base_vertex: 0,
instances: None,
}
}
}
struct DrawnGlyphBrush<R: gfx::Resources> {
pipe_data: glyph_pipe::Data<R>,
pso: (gfx::format::Format, gfx::PipelineState<R, glyph_pipe::Meta>),
slice: gfx::Slice<R>,
}
/// Allocates a vertex buffer 1 per glyph that will be updated on text changes
#[inline]
fn new_vertex_buffer<R: gfx::Resources, F: gfx::Factory<R>, C: gfx::CommandBuffer<R>>(
factory: &mut F,
encoder: &mut gfx::Encoder<R, C>,
verts: &[GlyphVertex],
) -> gfx::handle::Buffer<R, GlyphVertex> {
let buf = factory
.create_buffer(
verts.len(),
gfx::buffer::Role::Vertex,
gfx::memory::Usage::Dynamic,
gfx::memory::Bind::empty(),
)
.unwrap();
encoder.update_buffer(&buf, verts, 0).unwrap();
buf
}
#[inline]
fn to_vertex(
glyph_brush::GlyphVertex {
mut tex_coords,
pixel_coords,
bounds,
extra,
}: glyph_brush::GlyphVertex,
) -> GlyphVertex {
let gl_bounds = bounds;
| queue | identifier_name |
lib.rs | brush::{
ab_glyph, legacy, BuiltInLineBreaker, Extra, FontId, GlyphCruncher, GlyphPositioner,
HorizontalAlign, Layout, LineBreak, LineBreaker, OwnedSection, OwnedText, Section,
SectionGeometry, SectionGlyph, SectionGlyphIter, SectionText, Text, VerticalAlign,
};
use crate::pipe::{glyph_pipe, GlyphVertex, IntoDimensions, RawAndFormat};
use gfx::{
format,
handle::{self, RawDepthStencilView, RawRenderTargetView},
texture,
traits::FactoryExt,
};
use glyph_brush::{ab_glyph::*, BrushAction, BrushError, DefaultSectionHasher};
use log::{log_enabled, warn};
use std::{
borrow::Cow,
error::Error,
fmt,
hash::{BuildHasher, Hash},
};
// Type for the generated glyph cache texture
type TexForm = format::U8Norm;
type TexSurface = <TexForm as format::Formatted>::Surface;
type TexChannel = <TexForm as format::Formatted>::Channel;
type TexFormView = <TexForm as format::Formatted>::View;
type TexSurfaceHandle<R> = handle::Texture<R, TexSurface>;
type TexShaderView<R> = handle::ShaderResourceView<R, TexFormView>;
/// Returns the default 4 dimensional matrix orthographic projection used for drawing.
///
/// # Example
///
/// ```
/// # let (screen_width, screen_height) = (1f32, 2f32);
/// let projection = gfx_glyph::default_transform((screen_width, screen_height));
/// ```
///
/// # Example
///
/// ```no_run
/// # let gfx_color: gfx_core::handle::RenderTargetView<gfx_device_gl::Resources, gfx::format::Srgba8> = unimplemented!();
/// let projection = gfx_glyph::default_transform(&gfx_color);
/// ```
#[inline]
pub fn default_transform<D: IntoDimensions>(d: D) -> [[f32; 4]; 4] {
let (w, h) = d.into_dimensions();
[
[2.0 / w, 0.0, 0.0, 0.0],
[0.0, 2.0 / h, 0.0, 0.0],
[0.0, 0.0, 1.0, 0.0],
[-1.0, -1.0, 0.0, 1.0],
]
}
/// Object allowing glyph drawing, containing cache state. Manages glyph positioning cacheing,
/// glyph draw caching & efficient GPU texture cache updating and re-sizing on demand.
///
/// Build using a [`GlyphBrushBuilder`](struct.GlyphBrushBuilder.html).
///
/// # Example
/// ```no_run
/// # use gfx_glyph::{GlyphBrushBuilder};
/// use gfx_glyph::{Section, Text};
/// # fn main() -> Result<(), String> {
/// # let glyph_brush: gfx_glyph::GlyphBrush<gfx_device_gl::Resources, gfx_device_gl::Factory> = unimplemented!();
/// # let gfx_color: gfx_core::handle::RenderTargetView<gfx_device_gl::Resources, gfx::format::Srgba8> = unimplemented!();
/// # let factory: gfx_device_gl::Factory = unimplemented!();
/// # let gfx_encoder: gfx::Encoder<_, _> = factory.create_command_buffer().into();
/// # let some_other_section = Section::default();
///
/// let section = Section::default().add_text(Text::new("Hello gfx_glyph"));
///
/// glyph_brush.queue(section);
/// glyph_brush.queue(some_other_section);
///
/// glyph_brush.use_queue().draw(&mut gfx_encoder, &gfx_color)?;
/// # Ok(()) }
/// ```
///
/// # Caching behaviour
///
/// Calls to [`GlyphBrush::queue`](#method.queue),
/// [`GlyphBrush::glyph_bounds`](#method.glyph_bounds), [`GlyphBrush::glyphs`](#method.glyphs)
/// calculate the positioned glyphs for a section.
/// This is cached so future calls to any of the methods for the same section are much
/// cheaper. In the case of [`GlyphBrush::queue`](#method.queue) the calculations will also be
/// used for actual drawing.
///
/// The cache for a section will be **cleared** after a
/// [`.use_queue().draw(..)`](struct.DrawBuilder.html#method.draw) call when that section has not been used since
/// the previous draw call.
pub struct GlyphBrush<R: gfx::Resources, GF: gfx::Factory<R>, F = FontArc, H = DefaultSectionHasher>
{
font_cache_tex: ( | gfx::handle::Texture<R, TexSurface>,
gfx_core::handle::ShaderResourceView<R, f32>,
),
texture_filter_method: texture::FilterMethod,
factory: GF,
program: gfx::handle::Program<R>,
draw_cache: Option<DrawnGlyphBrush<R>>,
glyph_brush: glyph_brush::GlyphBrush<GlyphVertex, Extra, F, H>,
// config
depth_test: gfx::state::Depth,
}
impl<R: gfx::Resources, GF: gfx::Factory<R>, F, H> fmt::Debug for GlyphBrush<R, GF, F, H> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "GlyphBrush")
}
}
impl<R, GF, F, H> GlyphBrush<R, GF, F, H>
where
R: gfx::Resources,
GF: gfx::Factory<R>,
H: BuildHasher,
F: Font,
{
/// Adds an additional font to the one(s) initially added on build.
///
/// Returns a new [`FontId`](struct.FontId.html) to reference this font.
pub fn add_font(&mut self, font: F) -> FontId {
self.glyph_brush.add_font(font)
}
}
impl<R, GF, F, H> GlyphCruncher<F, Extra> for GlyphBrush<R, GF, F, H>
where
F: Font,
R: gfx::Resources,
GF: gfx::Factory<R>,
H: BuildHasher,
{
#[inline]
fn glyphs_custom_layout<'a, 'b, S, L>(
&'b mut self,
section: S,
custom_layout: &L,
) -> SectionGlyphIter<'b>
where
L: GlyphPositioner + Hash,
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush
.glyphs_custom_layout(section, custom_layout)
}
#[inline]
fn fonts(&self) -> &[F] {
self.glyph_brush.fonts()
}
#[inline]
fn glyph_bounds_custom_layout<'a, S, L>(
&mut self,
section: S,
custom_layout: &L,
) -> Option<Rect>
where
L: GlyphPositioner + Hash,
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush
.glyph_bounds_custom_layout(section, custom_layout)
}
}
impl<R, GF, F, H> GlyphBrush<R, GF, F, H>
where
F: Font + Sync,
R: gfx::Resources,
GF: gfx::Factory<R>,
H: BuildHasher,
{
/// Queues a section/layout to be drawn by the next call of
/// [`.use_queue().draw(..)`](struct.DrawBuilder.html#method.draw). Can be called multiple times
/// to queue multiple sections for drawing.
///
/// Benefits from caching, see [caching behaviour](#caching-behaviour).
#[inline]
pub fn queue<'a, S>(&mut self, section: S)
where
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush.queue(section)
}
/// Returns a [`DrawBuilder`](struct.DrawBuilder.html) allowing the queued glyphs to be drawn.
///
/// Drawing will trim the cache, see [caching behaviour](#caching-behaviour).
/// # Example
///
/// ```no_run
/// # fn main() -> Result<(), String> {
/// # let glyph_brush: gfx_glyph::GlyphBrush<gfx_device_gl::Resources, gfx_device_gl::Factory> = unimplemented!();
/// # let gfx_color: gfx_core::handle::RenderTargetView<gfx_device_gl::Resources, gfx::format::Srgba8> = unimplemented!();
/// # let factory: gfx_device_gl::Factory = unimplemented!();
/// # let gfx_encoder: gfx::Encoder<_, _> = factory.create_command_buffer().into();
/// glyph_brush.use_queue().draw(&mut gfx_encoder, &gfx_color)?;
/// # Ok(()) }
/// ```
#[inline]
pub fn use_queue(&mut self) -> DrawBuilder<'_, F, R, GF, H, ()> {
DrawBuilder {
brush: self,
transform: None,
depth_target: None,
}
}
/// Queues a section/layout to be drawn by the next call of
/// [`.use_queue().draw(..)`](struct.DrawBuilder.html#method.draw). Can be called multiple times
/// to queue multiple sections for drawing.
///
/// Used to provide custom `GlyphPositioner` logic, if using built-in
/// [`Layout`](enum.Layout.html) simply use [`queue`](struct.GlyphBrush.html#method.queue)
///
/// Benefits from caching, see [caching behaviour](#caching-behaviour).
#[inline]
pub fn queue_custom_layout<'a, S, G>(&mut self, section: S, custom_layout: &G)
where
G: GlyphPositioner,
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush.queue_custom_layout(section, custom_layout)
}
/// Queues pre-positioned glyphs to be processed by the next call of
/// [`.use_queue().draw(..)`](struct.DrawBuilder.html#method.draw). Can be called multiple times.
#[inline]
pub fn queue_pre_positioned(
&mut self,
glyphs: Vec<SectionGlyph>,
extra: Vec<Extra>,
bounds: Rect,
) {
self.glyph_brush.queue_pre_positioned(glyphs, extra, bounds)
}
/// Retains the section in the cache as if it had been used in the last draw-frame.
///
/// Should not be necessary unless using multiple draws per frame with distinct transforms,
/// see [caching behaviour](#caching-behaviour).
#[inline]
pub fn keep_cached_custom_layout<'a, S, G>(&mut self, section: S, custom_layout: &G)
where
S: Into<Cow<'a, Section<'a>>>,
G: GlyphPositioner,
{
self.glyph_brush
.keep_cached_custom_layout(section, custom_layout)
}
/// Retains the section in the cache as if it had been used in the last draw-frame.
///
/// Should not be necessary unless using multiple draws per frame with distinct transforms,
/// see [caching behaviour](#caching-behaviour).
#[inline]
pub fn keep_cached<'a, S>(&mut self, section: S)
where
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush.keep_cached(section)
}
/// Returns the available fonts.
///
/// The `FontId` corresponds to the index of the font data.
#[inline]
pub fn fonts(&self) -> &[F] {
self.glyph_brush.fonts()
}
/// Draws all queued sections
pub(crate) fn draw<C, CV, DV>(
&mut self,
transform: [[f32; 4]; 4],
encoder: &mut gfx::Encoder<R, C>,
target: &CV,
depth_target: Option<&DV>,
) -> Result<(), String>
where
C: gfx::CommandBuffer<R>,
CV: RawAndFormat<Raw = RawRenderTargetView<R>>,
DV: RawAndFormat<Raw = RawDepthStencilView<R>>,
{
let mut brush_action;
loop {
let tex = self.font_cache_tex.0.clone();
brush_action = self.glyph_brush.process_queued(
|rect, tex_data| {
let offset = [rect.min[0] as u16, rect.min[1] as u16];
let size = [rect.width() as u16, rect.height() as u16];
update_texture(encoder, &tex, offset, size, tex_data);
},
to_vertex,
);
match brush_action {
Ok(_) => break,
Err(BrushError::TextureTooSmall { suggested }) => {
let max_image_dimension =
self.factory.get_capabilities().max_texture_size as u32;
let (new_width, new_height) = if (suggested.0 > max_image_dimension
|| suggested.1 > max_image_dimension)
&& (self.glyph_brush.texture_dimensions().0 < max_image_dimension
|| self.glyph_brush.texture_dimensions().1 < max_image_dimension)
{
(max_image_dimension, max_image_dimension)
} else {
suggested
};
if log_enabled!(log::Level::Warn) {
warn!(
"Increasing glyph texture size {old:?} -> {new:?}. \
Consider building with `.initial_cache_size({new:?})` to avoid \
resizing. Called from:\n{trace}",
old = self.glyph_brush.texture_dimensions(),
new = (new_width, new_height),
trace = outer_backtrace!()
);
}
match create_texture(&mut self.factory, new_width, new_height) {
Ok((new_tex, tex_view)) => {
self.glyph_brush.resize_texture(new_width, new_height);
if let Some(ref mut cache) = self.draw_cache {
cache.pipe_data.font_tex.0 = tex_view.clone();
}
self.font_cache_tex.1 = tex_view;
self.font_cache_tex.0 = new_tex;
}
Err(_) => {
return Err(format!(
"Failed to create {new_width}x{new_height} glyph texture"
));
}
}
}
}
}
// refresh pipe data
// - pipe targets may have changed, or had resolutions changes
// - format may have changed
if let Some(mut cache) = self.draw_cache.take() {
if &cache.pipe_data.out!= target.as_raw() {
cache.pipe_data.out.clone_from(target.as_raw());
}
if let Some(depth_target) = depth_target {
if cache.pipe_data.out_depth.as_ref()!= Some(depth_target.as_raw()) {
cache
.pipe_data
.out_depth
.clone_from(&Some(depth_target.as_raw().clone()));
}
} else {
cache.pipe_data.out_depth.take();
}
if cache.pso.0!= target.format() {
cache.pso = (
target.format(),
self.pso_using(target.format(), depth_target.map(|d| d.format())),
);
}
self.draw_cache = Some(cache);
}
match brush_action.unwrap() {
BrushAction::Draw(verts) => {
let draw_cache = if let Some(mut cache) = self.draw_cache.take() {
if cache.pipe_data.vbuf.len() < verts.len() {
cache.pipe_data.vbuf =
new_vertex_buffer(&mut self.factory, encoder, &verts);
} else {
encoder
.update_buffer(&cache.pipe_data.vbuf, &verts, 0)
.unwrap();
}
cache.slice.instances.as_mut().unwrap().0 = verts.len() as _;
cache
} else {
let vbuf = new_vertex_buffer(&mut self.factory, encoder, &verts);
DrawnGlyphBrush {
pipe_data: {
let sampler = self.factory.create_sampler(texture::SamplerInfo::new(
self.texture_filter_method,
texture::WrapMode::Clamp,
));
glyph_pipe::Data {
vbuf,
font_tex: (self.font_cache_tex.1.clone(), sampler),
transform,
out: target.as_raw().clone(),
out_depth: depth_target.map(|d| d.as_raw().clone()),
}
},
pso: (
target.format(),
self.pso_using(target.format(), depth_target.map(|d| d.format())),
),
slice: gfx::Slice {
instances: Some((verts.len() as _, 0)),
..Self::empty_slice()
},
}
};
self.draw_cache = Some(draw_cache);
}
BrushAction::ReDraw => {}
};
if let Some(&mut DrawnGlyphBrush {
ref pso,
ref slice,
ref mut pipe_data,
..
}) = self.draw_cache.as_mut()
{
pipe_data.transform = transform;
encoder.draw(slice, &pso.1, pipe_data);
}
Ok(())
}
fn pso_using(
&mut self,
color_format: gfx::format::Format,
depth_format: Option<gfx::format::Format>,
) -> gfx::PipelineState<R, glyph_pipe::Meta> {
self.factory
.create_pipeline_from_program(
&self.program,
gfx::Primitive::TriangleStrip,
gfx::state::Rasterizer::new_fill(),
glyph_pipe::Init::new(color_format, depth_format, self.depth_test),
)
.unwrap()
}
fn empty_slice() -> gfx::Slice<R> {
gfx::Slice {
start: 0,
end: 4,
buffer: gfx::IndexBuffer::Auto,
base_vertex: 0,
instances: None,
}
}
}
struct DrawnGlyphBrush<R: gfx::Resources> {
pipe_data: glyph_pipe::Data<R>,
pso: (gfx::format::Format, gfx::PipelineState<R, glyph_pipe::Meta>),
slice: gfx::Slice<R>,
}
/// Allocates a vertex buffer 1 per glyph that will be updated on text changes
#[inline]
fn new_vertex_buffer<R: gfx::Resources, F: gfx::Factory<R>, C: gfx::CommandBuffer<R>>(
factory: &mut F,
encoder: &mut gfx::Encoder<R, C>,
verts: &[GlyphVertex],
) -> gfx::handle::Buffer<R, GlyphVertex> {
let buf = factory
.create_buffer(
verts.len(),
gfx::buffer::Role::Vertex,
gfx::memory::Usage::Dynamic,
gfx::memory::Bind::empty(),
)
.unwrap();
encoder.update_buffer(&buf, verts, 0).unwrap();
buf
}
#[inline]
fn to_vertex(
glyph_brush::GlyphVertex {
mut tex_coords,
pixel_coords,
bounds,
extra,
}: glyph_brush::GlyphVertex,
) -> GlyphVertex {
let gl_bounds = bounds;
let | random_line_split |
|
lib.rs | section.
/// This is cached so future calls to any of the methods for the same section are much
/// cheaper. In the case of [`GlyphBrush::queue`](#method.queue) the calculations will also be
/// used for actual drawing.
///
/// The cache for a section will be **cleared** after a
/// [`.use_queue().draw(..)`](struct.DrawBuilder.html#method.draw) call when that section has not been used since
/// the previous draw call.
pub struct GlyphBrush<R: gfx::Resources, GF: gfx::Factory<R>, F = FontArc, H = DefaultSectionHasher>
{
font_cache_tex: (
gfx::handle::Texture<R, TexSurface>,
gfx_core::handle::ShaderResourceView<R, f32>,
),
texture_filter_method: texture::FilterMethod,
factory: GF,
program: gfx::handle::Program<R>,
draw_cache: Option<DrawnGlyphBrush<R>>,
glyph_brush: glyph_brush::GlyphBrush<GlyphVertex, Extra, F, H>,
// config
depth_test: gfx::state::Depth,
}
impl<R: gfx::Resources, GF: gfx::Factory<R>, F, H> fmt::Debug for GlyphBrush<R, GF, F, H> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "GlyphBrush")
}
}
impl<R, GF, F, H> GlyphBrush<R, GF, F, H>
where
R: gfx::Resources,
GF: gfx::Factory<R>,
H: BuildHasher,
F: Font,
{
/// Adds an additional font to the one(s) initially added on build.
///
/// Returns a new [`FontId`](struct.FontId.html) to reference this font.
pub fn add_font(&mut self, font: F) -> FontId {
self.glyph_brush.add_font(font)
}
}
impl<R, GF, F, H> GlyphCruncher<F, Extra> for GlyphBrush<R, GF, F, H>
where
F: Font,
R: gfx::Resources,
GF: gfx::Factory<R>,
H: BuildHasher,
{
#[inline]
fn glyphs_custom_layout<'a, 'b, S, L>(
&'b mut self,
section: S,
custom_layout: &L,
) -> SectionGlyphIter<'b>
where
L: GlyphPositioner + Hash,
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush
.glyphs_custom_layout(section, custom_layout)
}
#[inline]
fn fonts(&self) -> &[F] {
self.glyph_brush.fonts()
}
#[inline]
fn glyph_bounds_custom_layout<'a, S, L>(
&mut self,
section: S,
custom_layout: &L,
) -> Option<Rect>
where
L: GlyphPositioner + Hash,
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush
.glyph_bounds_custom_layout(section, custom_layout)
}
}
impl<R, GF, F, H> GlyphBrush<R, GF, F, H>
where
F: Font + Sync,
R: gfx::Resources,
GF: gfx::Factory<R>,
H: BuildHasher,
{
/// Queues a section/layout to be drawn by the next call of
/// [`.use_queue().draw(..)`](struct.DrawBuilder.html#method.draw). Can be called multiple times
/// to queue multiple sections for drawing.
///
/// Benefits from caching, see [caching behaviour](#caching-behaviour).
#[inline]
pub fn queue<'a, S>(&mut self, section: S)
where
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush.queue(section)
}
/// Returns a [`DrawBuilder`](struct.DrawBuilder.html) allowing the queued glyphs to be drawn.
///
/// Drawing will trim the cache, see [caching behaviour](#caching-behaviour).
/// # Example
///
/// ```no_run
/// # fn main() -> Result<(), String> {
/// # let glyph_brush: gfx_glyph::GlyphBrush<gfx_device_gl::Resources, gfx_device_gl::Factory> = unimplemented!();
/// # let gfx_color: gfx_core::handle::RenderTargetView<gfx_device_gl::Resources, gfx::format::Srgba8> = unimplemented!();
/// # let factory: gfx_device_gl::Factory = unimplemented!();
/// # let gfx_encoder: gfx::Encoder<_, _> = factory.create_command_buffer().into();
/// glyph_brush.use_queue().draw(&mut gfx_encoder, &gfx_color)?;
/// # Ok(()) }
/// ```
#[inline]
pub fn use_queue(&mut self) -> DrawBuilder<'_, F, R, GF, H, ()> {
DrawBuilder {
brush: self,
transform: None,
depth_target: None,
}
}
/// Queues a section/layout to be drawn by the next call of
/// [`.use_queue().draw(..)`](struct.DrawBuilder.html#method.draw). Can be called multiple times
/// to queue multiple sections for drawing.
///
/// Used to provide custom `GlyphPositioner` logic, if using built-in
/// [`Layout`](enum.Layout.html) simply use [`queue`](struct.GlyphBrush.html#method.queue)
///
/// Benefits from caching, see [caching behaviour](#caching-behaviour).
#[inline]
pub fn queue_custom_layout<'a, S, G>(&mut self, section: S, custom_layout: &G)
where
G: GlyphPositioner,
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush.queue_custom_layout(section, custom_layout)
}
/// Queues pre-positioned glyphs to be processed by the next call of
/// [`.use_queue().draw(..)`](struct.DrawBuilder.html#method.draw). Can be called multiple times.
#[inline]
pub fn queue_pre_positioned(
&mut self,
glyphs: Vec<SectionGlyph>,
extra: Vec<Extra>,
bounds: Rect,
) {
self.glyph_brush.queue_pre_positioned(glyphs, extra, bounds)
}
/// Retains the section in the cache as if it had been used in the last draw-frame.
///
/// Should not be necessary unless using multiple draws per frame with distinct transforms,
/// see [caching behaviour](#caching-behaviour).
#[inline]
pub fn keep_cached_custom_layout<'a, S, G>(&mut self, section: S, custom_layout: &G)
where
S: Into<Cow<'a, Section<'a>>>,
G: GlyphPositioner,
{
self.glyph_brush
.keep_cached_custom_layout(section, custom_layout)
}
/// Retains the section in the cache as if it had been used in the last draw-frame.
///
/// Should not be necessary unless using multiple draws per frame with distinct transforms,
/// see [caching behaviour](#caching-behaviour).
#[inline]
pub fn keep_cached<'a, S>(&mut self, section: S)
where
S: Into<Cow<'a, Section<'a>>>,
{
self.glyph_brush.keep_cached(section)
}
/// Returns the available fonts.
///
/// The `FontId` corresponds to the index of the font data.
#[inline]
pub fn fonts(&self) -> &[F] {
self.glyph_brush.fonts()
}
/// Draws all queued sections
pub(crate) fn draw<C, CV, DV>(
&mut self,
transform: [[f32; 4]; 4],
encoder: &mut gfx::Encoder<R, C>,
target: &CV,
depth_target: Option<&DV>,
) -> Result<(), String>
where
C: gfx::CommandBuffer<R>,
CV: RawAndFormat<Raw = RawRenderTargetView<R>>,
DV: RawAndFormat<Raw = RawDepthStencilView<R>>,
{
let mut brush_action;
loop {
let tex = self.font_cache_tex.0.clone();
brush_action = self.glyph_brush.process_queued(
|rect, tex_data| {
let offset = [rect.min[0] as u16, rect.min[1] as u16];
let size = [rect.width() as u16, rect.height() as u16];
update_texture(encoder, &tex, offset, size, tex_data);
},
to_vertex,
);
match brush_action {
Ok(_) => break,
Err(BrushError::TextureTooSmall { suggested }) => {
let max_image_dimension =
self.factory.get_capabilities().max_texture_size as u32;
let (new_width, new_height) = if (suggested.0 > max_image_dimension
|| suggested.1 > max_image_dimension)
&& (self.glyph_brush.texture_dimensions().0 < max_image_dimension
|| self.glyph_brush.texture_dimensions().1 < max_image_dimension)
{
(max_image_dimension, max_image_dimension)
} else {
suggested
};
if log_enabled!(log::Level::Warn) {
warn!(
"Increasing glyph texture size {old:?} -> {new:?}. \
Consider building with `.initial_cache_size({new:?})` to avoid \
resizing. Called from:\n{trace}",
old = self.glyph_brush.texture_dimensions(),
new = (new_width, new_height),
trace = outer_backtrace!()
);
}
match create_texture(&mut self.factory, new_width, new_height) {
Ok((new_tex, tex_view)) => {
self.glyph_brush.resize_texture(new_width, new_height);
if let Some(ref mut cache) = self.draw_cache {
cache.pipe_data.font_tex.0 = tex_view.clone();
}
self.font_cache_tex.1 = tex_view;
self.font_cache_tex.0 = new_tex;
}
Err(_) => {
return Err(format!(
"Failed to create {new_width}x{new_height} glyph texture"
));
}
}
}
}
}
// refresh pipe data
// - pipe targets may have changed, or had resolutions changes
// - format may have changed
if let Some(mut cache) = self.draw_cache.take() {
if &cache.pipe_data.out!= target.as_raw() {
cache.pipe_data.out.clone_from(target.as_raw());
}
if let Some(depth_target) = depth_target {
if cache.pipe_data.out_depth.as_ref()!= Some(depth_target.as_raw()) {
cache
.pipe_data
.out_depth
.clone_from(&Some(depth_target.as_raw().clone()));
}
} else {
cache.pipe_data.out_depth.take();
}
if cache.pso.0!= target.format() {
cache.pso = (
target.format(),
self.pso_using(target.format(), depth_target.map(|d| d.format())),
);
}
self.draw_cache = Some(cache);
}
match brush_action.unwrap() {
BrushAction::Draw(verts) => {
let draw_cache = if let Some(mut cache) = self.draw_cache.take() {
if cache.pipe_data.vbuf.len() < verts.len() {
cache.pipe_data.vbuf =
new_vertex_buffer(&mut self.factory, encoder, &verts);
} else {
encoder
.update_buffer(&cache.pipe_data.vbuf, &verts, 0)
.unwrap();
}
cache.slice.instances.as_mut().unwrap().0 = verts.len() as _;
cache
} else {
let vbuf = new_vertex_buffer(&mut self.factory, encoder, &verts);
DrawnGlyphBrush {
pipe_data: {
let sampler = self.factory.create_sampler(texture::SamplerInfo::new(
self.texture_filter_method,
texture::WrapMode::Clamp,
));
glyph_pipe::Data {
vbuf,
font_tex: (self.font_cache_tex.1.clone(), sampler),
transform,
out: target.as_raw().clone(),
out_depth: depth_target.map(|d| d.as_raw().clone()),
}
},
pso: (
target.format(),
self.pso_using(target.format(), depth_target.map(|d| d.format())),
),
slice: gfx::Slice {
instances: Some((verts.len() as _, 0)),
..Self::empty_slice()
},
}
};
self.draw_cache = Some(draw_cache);
}
BrushAction::ReDraw => {}
};
if let Some(&mut DrawnGlyphBrush {
ref pso,
ref slice,
ref mut pipe_data,
..
}) = self.draw_cache.as_mut()
{
pipe_data.transform = transform;
encoder.draw(slice, &pso.1, pipe_data);
}
Ok(())
}
fn pso_using(
&mut self,
color_format: gfx::format::Format,
depth_format: Option<gfx::format::Format>,
) -> gfx::PipelineState<R, glyph_pipe::Meta> {
self.factory
.create_pipeline_from_program(
&self.program,
gfx::Primitive::TriangleStrip,
gfx::state::Rasterizer::new_fill(),
glyph_pipe::Init::new(color_format, depth_format, self.depth_test),
)
.unwrap()
}
fn empty_slice() -> gfx::Slice<R> {
gfx::Slice {
start: 0,
end: 4,
buffer: gfx::IndexBuffer::Auto,
base_vertex: 0,
instances: None,
}
}
}
struct DrawnGlyphBrush<R: gfx::Resources> {
pipe_data: glyph_pipe::Data<R>,
pso: (gfx::format::Format, gfx::PipelineState<R, glyph_pipe::Meta>),
slice: gfx::Slice<R>,
}
/// Allocates a vertex buffer 1 per glyph that will be updated on text changes
#[inline]
fn new_vertex_buffer<R: gfx::Resources, F: gfx::Factory<R>, C: gfx::CommandBuffer<R>>(
factory: &mut F,
encoder: &mut gfx::Encoder<R, C>,
verts: &[GlyphVertex],
) -> gfx::handle::Buffer<R, GlyphVertex> {
let buf = factory
.create_buffer(
verts.len(),
gfx::buffer::Role::Vertex,
gfx::memory::Usage::Dynamic,
gfx::memory::Bind::empty(),
)
.unwrap();
encoder.update_buffer(&buf, verts, 0).unwrap();
buf
}
#[inline]
fn to_vertex(
glyph_brush::GlyphVertex {
mut tex_coords,
pixel_coords,
bounds,
extra,
}: glyph_brush::GlyphVertex,
) -> GlyphVertex {
let gl_bounds = bounds;
let mut gl_rect = Rect {
min: point(pixel_coords.min.x, pixel_coords.min.y),
max: point(pixel_coords.max.x, pixel_coords.max.y),
};
// handle overlapping bounds, modify uv_rect to preserve texture aspect
if gl_rect.max.x > gl_bounds.max.x {
let old_width = gl_rect.width();
gl_rect.max.x = gl_bounds.max.x;
tex_coords.max.x = tex_coords.min.x + tex_coords.width() * gl_rect.width() / old_width;
}
if gl_rect.min.x < gl_bounds.min.x {
let old_width = gl_rect.width();
gl_rect.min.x = gl_bounds.min.x;
tex_coords.min.x = tex_coords.max.x - tex_coords.width() * gl_rect.width() / old_width;
}
if gl_rect.max.y > gl_bounds.max.y {
let old_height = gl_rect.height();
gl_rect.max.y = gl_bounds.max.y;
tex_coords.max.y = tex_coords.min.y + tex_coords.height() * gl_rect.height() / old_height;
}
if gl_rect.min.y < gl_bounds.min.y {
let old_height = gl_rect.height();
gl_rect.min.y = gl_bounds.min.y;
tex_coords.min.y = tex_coords.max.y - tex_coords.height() * gl_rect.height() / old_height;
}
GlyphVertex {
left_top: [gl_rect.min.x, gl_rect.max.y, extra.z],
right_bottom: [gl_rect.max.x, gl_rect.min.y],
tex_left_top: [tex_coords.min.x, tex_coords.max.y],
tex_right_bottom: [tex_coords.max.x, tex_coords.min.y],
color: extra.color,
}
}
// Creates a gfx texture with the given data
fn create_texture<GF, R>(
factory: &mut GF,
width: u32,
height: u32,
) -> Result<(TexSurfaceHandle<R>, TexShaderView<R>), Box<dyn Error>>
where
R: gfx::Resources,
GF: gfx::Factory<R>,
| {
let kind = texture::Kind::D2(
width as texture::Size,
height as texture::Size,
texture::AaMode::Single,
);
let tex = factory.create_texture(
kind,
1,
gfx::memory::Bind::SHADER_RESOURCE,
gfx::memory::Usage::Dynamic,
Some(<TexChannel as format::ChannelTyped>::get_channel_type()),
)?;
let view =
factory.view_texture_as_shader_resource::<TexForm>(&tex, (0, 0), format::Swizzle::new())?;
Ok((tex, view))
} | identifier_body |
|
std_fs.rs | // Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
#![allow(clippy::disallowed_methods)]
use std::fs;
use std::io;
use std::io::Write;
use std::path::Path;
use std::path::PathBuf;
use std::rc::Rc;
use deno_core::unsync::spawn_blocking;
use deno_io::fs::File;
use deno_io::fs::FsResult;
use deno_io::fs::FsStat;
use deno_io::StdFileResourceInner;
use crate::interface::FsDirEntry;
use crate::interface::FsFileType;
use crate::FileSystem;
use crate::OpenOptions;
#[cfg(not(unix))]
use deno_io::fs::FsError;
#[derive(Debug, Clone)]
pub struct RealFs;
#[async_trait::async_trait(?Send)]
impl FileSystem for RealFs {
fn cwd(&self) -> FsResult<PathBuf> {
std::env::current_dir().map_err(Into::into)
}
fn tmp_dir(&self) -> FsResult<PathBuf> {
Ok(std::env::temp_dir())
}
fn chdir(&self, path: &Path) -> FsResult<()> {
std::env::set_current_dir(path).map_err(Into::into)
}
#[cfg(not(unix))]
fn umask(&self, _mask: Option<u32>) -> FsResult<u32> {
// TODO implement umask for Windows
// see https://github.com/nodejs/node/blob/master/src/node_process_methods.cc
// and https://docs.microsoft.com/fr-fr/cpp/c-runtime-library/reference/umask?view=vs-2019
Err(FsError::NotSupported)
}
#[cfg(unix)]
fn umask(&self, mask: Option<u32>) -> FsResult<u32> {
use nix::sys::stat::mode_t;
use nix::sys::stat::umask;
use nix::sys::stat::Mode;
let r = if let Some(mask) = mask {
// If mask provided, return previous.
umask(Mode::from_bits_truncate(mask as mode_t))
} else {
// If no mask provided, we query the current. Requires two syscalls.
let prev = umask(Mode::from_bits_truncate(0o777));
let _ = umask(prev);
prev
};
#[cfg(target_os = "linux")]
{
Ok(r.bits())
}
#[cfg(any(
target_os = "macos",
target_os = "openbsd",
target_os = "freebsd"
))]
{
Ok(r.bits() as u32)
}
}
fn open_sync(
&self,
path: &Path,
options: OpenOptions,
) -> FsResult<Rc<dyn File>> {
let opts = open_options(options);
let std_file = opts.open(path)?;
Ok(Rc::new(StdFileResourceInner::file(std_file)))
}
async fn open_async(
&self,
path: PathBuf,
options: OpenOptions,
) -> FsResult<Rc<dyn File>> {
let opts = open_options(options);
let std_file = spawn_blocking(move || opts.open(path)).await??;
Ok(Rc::new(StdFileResourceInner::file(std_file)))
}
fn mkdir_sync(
&self,
path: &Path,
recursive: bool,
mode: u32,
) -> FsResult<()> {
mkdir(path, recursive, mode)
}
async fn mkdir_async(
&self,
path: PathBuf,
recursive: bool,
mode: u32,
) -> FsResult<()> {
spawn_blocking(move || mkdir(&path, recursive, mode)).await?
}
fn chmod_sync(&self, path: &Path, mode: u32) -> FsResult<()> {
chmod(path, mode)
}
async fn chmod_async(&self, path: PathBuf, mode: u32) -> FsResult<()> {
spawn_blocking(move || chmod(&path, mode)).await?
}
fn chown_sync(
&self,
path: &Path,
uid: Option<u32>,
gid: Option<u32>,
) -> FsResult<()> {
chown(path, uid, gid)
}
async fn chown_async(
&self,
path: PathBuf,
uid: Option<u32>,
gid: Option<u32>,
) -> FsResult<()> {
spawn_blocking(move || chown(&path, uid, gid)).await?
}
fn remove_sync(&self, path: &Path, recursive: bool) -> FsResult<()> {
remove(path, recursive)
}
async fn remove_async(&self, path: PathBuf, recursive: bool) -> FsResult<()> {
spawn_blocking(move || remove(&path, recursive)).await?
}
fn copy_file_sync(&self, from: &Path, to: &Path) -> FsResult<()> {
copy_file(from, to)
}
async fn copy_file_async(&self, from: PathBuf, to: PathBuf) -> FsResult<()> {
spawn_blocking(move || copy_file(&from, &to)).await?
}
fn stat_sync(&self, path: &Path) -> FsResult<FsStat> {
stat(path).map(Into::into)
}
async fn stat_async(&self, path: PathBuf) -> FsResult<FsStat> {
spawn_blocking(move || stat(&path)).await?.map(Into::into)
}
fn lstat_sync(&self, path: &Path) -> FsResult<FsStat> {
lstat(path).map(Into::into)
}
async fn lstat_async(&self, path: PathBuf) -> FsResult<FsStat> {
spawn_blocking(move || lstat(&path)).await?.map(Into::into)
}
fn realpath_sync(&self, path: &Path) -> FsResult<PathBuf> {
realpath(path)
}
async fn realpath_async(&self, path: PathBuf) -> FsResult<PathBuf> {
spawn_blocking(move || realpath(&path)).await?
}
fn read_dir_sync(&self, path: &Path) -> FsResult<Vec<FsDirEntry>> {
read_dir(path)
}
async fn read_dir_async(&self, path: PathBuf) -> FsResult<Vec<FsDirEntry>> {
spawn_blocking(move || read_dir(&path)).await?
}
fn rename_sync(&self, oldpath: &Path, newpath: &Path) -> FsResult<()> {
fs::rename(oldpath, newpath).map_err(Into::into)
}
async fn rename_async(
&self,
oldpath: PathBuf,
newpath: PathBuf,
) -> FsResult<()> {
spawn_blocking(move || fs::rename(oldpath, newpath))
.await?
.map_err(Into::into)
}
fn link_sync(&self, oldpath: &Path, newpath: &Path) -> FsResult<()> {
fs::hard_link(oldpath, newpath).map_err(Into::into)
}
async fn link_async(
&self,
oldpath: PathBuf,
newpath: PathBuf,
) -> FsResult<()> {
spawn_blocking(move || fs::hard_link(oldpath, newpath))
.await?
.map_err(Into::into)
}
fn symlink_sync(
&self,
oldpath: &Path,
newpath: &Path,
file_type: Option<FsFileType>,
) -> FsResult<()> {
symlink(oldpath, newpath, file_type)
}
async fn symlink_async(
&self,
oldpath: PathBuf,
newpath: PathBuf,
file_type: Option<FsFileType>,
) -> FsResult<()> {
spawn_blocking(move || symlink(&oldpath, &newpath, file_type)).await?
}
fn read_link_sync(&self, path: &Path) -> FsResult<PathBuf> {
fs::read_link(path).map_err(Into::into)
}
async fn read_link_async(&self, path: PathBuf) -> FsResult<PathBuf> {
spawn_blocking(move || fs::read_link(path))
.await?
.map_err(Into::into)
}
fn truncate_sync(&self, path: &Path, len: u64) -> FsResult<()> {
truncate(path, len)
}
async fn truncate_async(&self, path: PathBuf, len: u64) -> FsResult<()> {
spawn_blocking(move || truncate(&path, len)).await?
}
fn utime_sync(
&self,
path: &Path,
atime_secs: i64,
atime_nanos: u32,
mtime_secs: i64,
mtime_nanos: u32,
) -> FsResult<()> {
let atime = filetime::FileTime::from_unix_time(atime_secs, atime_nanos);
let mtime = filetime::FileTime::from_unix_time(mtime_secs, mtime_nanos);
filetime::set_file_times(path, atime, mtime).map_err(Into::into)
}
async fn utime_async(
&self,
path: PathBuf,
atime_secs: i64,
atime_nanos: u32,
mtime_secs: i64,
mtime_nanos: u32,
) -> FsResult<()> |
fn write_file_sync(
&self,
path: &Path,
options: OpenOptions,
data: &[u8],
) -> FsResult<()> {
let opts = open_options(options);
let mut file = opts.open(path)?;
#[cfg(unix)]
if let Some(mode) = options.mode {
use std::os::unix::fs::PermissionsExt;
file.set_permissions(fs::Permissions::from_mode(mode))?;
}
file.write_all(data)?;
Ok(())
}
async fn write_file_async(
&self,
path: PathBuf,
options: OpenOptions,
data: Vec<u8>,
) -> FsResult<()> {
spawn_blocking(move || {
let opts = open_options(options);
let mut file = opts.open(path)?;
#[cfg(unix)]
if let Some(mode) = options.mode {
use std::os::unix::fs::PermissionsExt;
file.set_permissions(fs::Permissions::from_mode(mode))?;
}
file.write_all(&data)?;
Ok(())
})
.await?
}
fn read_file_sync(&self, path: &Path) -> FsResult<Vec<u8>> {
fs::read(path).map_err(Into::into)
}
async fn read_file_async(&self, path: PathBuf) -> FsResult<Vec<u8>> {
spawn_blocking(move || fs::read(path))
.await?
.map_err(Into::into)
}
}
fn mkdir(path: &Path, recursive: bool, mode: u32) -> FsResult<()> {
let mut builder = fs::DirBuilder::new();
builder.recursive(recursive);
#[cfg(unix)]
{
use std::os::unix::fs::DirBuilderExt;
builder.mode(mode);
}
#[cfg(not(unix))]
{
_ = mode;
}
builder.create(path).map_err(Into::into)
}
#[cfg(unix)]
fn chmod(path: &Path, mode: u32) -> FsResult<()> {
use std::os::unix::fs::PermissionsExt;
let permissions = fs::Permissions::from_mode(mode);
fs::set_permissions(path, permissions)?;
Ok(())
}
// TODO: implement chmod for Windows (#4357)
#[cfg(not(unix))]
fn chmod(path: &Path, _mode: u32) -> FsResult<()> {
// Still check file/dir exists on Windows
std::fs::metadata(path)?;
Err(FsError::NotSupported)
}
#[cfg(unix)]
fn chown(path: &Path, uid: Option<u32>, gid: Option<u32>) -> FsResult<()> {
use nix::unistd::chown;
use nix::unistd::Gid;
use nix::unistd::Uid;
let owner = uid.map(Uid::from_raw);
let group = gid.map(Gid::from_raw);
let res = chown(path, owner, group);
if let Err(err) = res {
return Err(io::Error::from_raw_os_error(err as i32).into());
}
Ok(())
}
// TODO: implement chown for Windows
#[cfg(not(unix))]
fn chown(_path: &Path, _uid: Option<u32>, _gid: Option<u32>) -> FsResult<()> {
Err(FsError::NotSupported)
}
fn remove(path: &Path, recursive: bool) -> FsResult<()> {
// TODO: this is racy. This should open fds, and then `unlink` those.
let metadata = fs::symlink_metadata(path)?;
let file_type = metadata.file_type();
let res = if file_type.is_dir() {
if recursive {
fs::remove_dir_all(path)
} else {
fs::remove_dir(path)
}
} else if file_type.is_symlink() {
#[cfg(unix)]
{
fs::remove_file(path)
}
#[cfg(not(unix))]
{
use std::os::windows::prelude::MetadataExt;
use winapi::um::winnt::FILE_ATTRIBUTE_DIRECTORY;
if metadata.file_attributes() & FILE_ATTRIBUTE_DIRECTORY!= 0 {
fs::remove_dir(path)
} else {
fs::remove_file(path)
}
}
} else {
fs::remove_file(path)
};
res.map_err(Into::into)
}
fn copy_file(from: &Path, to: &Path) -> FsResult<()> {
#[cfg(target_os = "macos")]
{
use libc::clonefile;
use libc::stat;
use libc::unlink;
use std::ffi::CString;
use std::io::Read;
use std::os::unix::fs::OpenOptionsExt;
use std::os::unix::fs::PermissionsExt;
use std::os::unix::prelude::OsStrExt;
let from_str = CString::new(from.as_os_str().as_bytes()).unwrap();
let to_str = CString::new(to.as_os_str().as_bytes()).unwrap();
// SAFETY: `from` and `to` are valid C strings.
// std::fs::copy does open() + fcopyfile() on macOS. We try to use
// clonefile() instead, which is more efficient.
unsafe {
let mut st = std::mem::zeroed();
let ret = stat(from_str.as_ptr(), &mut st);
if ret!= 0 {
return Err(io::Error::last_os_error().into());
}
if st.st_size > 128 * 1024 {
// Try unlink. If it fails, we are going to try clonefile() anyway.
let _ = unlink(to_str.as_ptr());
// Matches rust stdlib behavior for io::copy.
// https://github.com/rust-lang/rust/blob/3fdd578d72a24d4efc2fe2ad18eec3b6ba72271e/library/std/src/sys/unix/fs.rs#L1613-L1616
if clonefile(from_str.as_ptr(), to_str.as_ptr(), 0) == 0 {
return Ok(());
}
} else {
// Do a regular copy. fcopyfile() is an overkill for < 128KB
// files.
let mut buf = [0u8; 128 * 1024];
let mut from_file = fs::File::open(from)?;
let perm = from_file.metadata()?.permissions();
let mut to_file = fs::OpenOptions::new()
// create the file with the correct mode right away
.mode(perm.mode())
.write(true)
.create(true)
.truncate(true)
.open(to)?;
let writer_metadata = to_file.metadata()?;
if writer_metadata.is_file() {
// Set the correct file permissions, in case the file already existed.
// Don't set the permissions on already existing non-files like
// pipes/FIFOs or device nodes.
to_file.set_permissions(perm)?;
}
loop {
let nread = from_file.read(&mut buf)?;
if nread == 0 {
break;
}
to_file.write_all(&buf[..nread])?;
}
return Ok(());
}
}
// clonefile() failed, fall back to std::fs::copy().
}
fs::copy(from, to)?;
Ok(())
}
#[cfg(not(windows))]
fn stat(path: &Path) -> FsResult<FsStat> {
let metadata = fs::metadata(path)?;
Ok(FsStat::from_std(metadata))
}
#[cfg(windows)]
fn stat(path: &Path) -> FsResult<FsStat> {
let metadata = fs::metadata(path)?;
let mut fsstat = FsStat::from_std(metadata);
use winapi::um::winbase::FILE_FLAG_BACKUP_SEMANTICS;
let path = path.canonicalize()?;
stat_extra(&mut fsstat, &path, FILE_FLAG_BACKUP_SEMANTICS)?;
Ok(fsstat)
}
#[cfg(not(windows))]
fn lstat(path: &Path) -> FsResult<FsStat> {
let metadata = fs::symlink_metadata(path)?;
Ok(FsStat::from_std(metadata))
}
#[cfg(windows)]
fn lstat(path: &Path) -> FsResult<FsStat> {
use winapi::um::winbase::FILE_FLAG_BACKUP_SEMANTICS;
use winapi::um::winbase::FILE_FLAG_OPEN_REPARSE_POINT;
let metadata = fs::symlink_metadata(path)?;
let mut fsstat = FsStat::from_std(metadata);
stat_extra(
&mut fsstat,
path,
FILE_FLAG_BACKUP_SEMANTICS | FILE_FLAG_OPEN_REPARSE_POINT,
)?;
Ok(fsstat)
}
#[cfg(windows)]
fn stat_extra(
fsstat: &mut FsStat,
path: &Path,
file_flags: winapi::shared::minwindef::DWORD,
) -> FsResult<()> {
use std::os::windows::prelude::OsStrExt;
use winapi::um::fileapi::CreateFileW;
use winapi::um::fileapi::OPEN_EXISTING;
use winapi::um::handleapi::CloseHandle;
use winapi::um::handleapi::INVALID_HANDLE_VALUE;
use winapi::um::winnt::FILE_SHARE_DELETE;
use winapi::um::winnt::FILE_SHARE_READ;
use winapi::um::winnt::FILE_SHARE_WRITE;
unsafe fn get_dev(
handle: winapi::shared::ntdef::HANDLE,
) -> std::io::Result<u64> {
use winapi::shared::minwindef::FALSE;
use winapi::um::fileapi::GetFileInformationByHandle;
use winapi::um::fileapi::BY_HANDLE_FILE_INFORMATION;
let info = {
let mut info =
std::mem::MaybeUninit::<BY_HANDLE_FILE_INFORMATION>::zeroed();
if GetFileInformationByHandle(handle, info.as_mut_ptr()) == FALSE {
return Err(std::io::Error::last_os_error());
}
info.assume_init()
};
Ok(info.dwVolumeSerialNumber as u64)
}
// SAFETY: winapi calls
unsafe {
let mut path: Vec<_> = path.as_os_str().encode_wide().collect();
path.push(0);
let file_handle = CreateFileW(
path.as_ptr(),
0,
FILE_SHARE_READ | FILE_SHARE_DELETE | FILE_SHARE_WRITE,
std::ptr::null_mut(),
OPEN_EXISTING,
file_flags,
std::ptr::null_mut(),
);
if file_handle == INVALID_HANDLE_VALUE {
return Err(std::io::Error::last_os_error().into());
}
let result = get_dev(file_handle);
CloseHandle(file_handle);
fsstat.dev = result?;
Ok(())
}
}
fn realpath(path: &Path) -> FsResult<PathBuf> {
Ok(deno_core::strip_unc_prefix(path.canonicalize()?))
}
fn read_dir(path: &Path) -> FsResult<Vec<FsDirEntry>> {
let entries = fs::read_dir(path)?
.filter_map(|entry| {
let entry = entry.ok()?;
let name = entry.file_name().into_string().ok()?;
let metadata = entry.file_type();
macro_rules! method_or_false {
($method:ident) => {
if let Ok(metadata) = &metadata {
metadata.$method()
} else {
false
}
};
}
Some(FsDirEntry {
name,
is_file: method_or_false!(is_file),
is_directory: method_or_false!(is_dir),
is_symlink: method_or_false!(is_symlink),
})
})
.collect();
Ok(entries)
}
#[cfg(not(windows))]
fn symlink(
oldpath: &Path,
newpath: &Path,
_file_type: Option<FsFileType>,
) -> FsResult<()> {
std::os::unix::fs::symlink(oldpath, newpath)?;
Ok(())
}
#[cfg(windows)]
fn symlink(
oldpath: &Path,
newpath: &Path,
file_type: Option<FsFileType>,
) -> FsResult<()> {
let file_type = match file_type {
Some(file_type) => file_type,
None => {
let old_meta = fs::metadata(oldpath);
match old_meta {
Ok(metadata) => {
if metadata.is_file() {
FsFileType::File
} else if metadata.is_dir() {
FsFileType::Directory
} else {
return Err(FsError::Io(io::Error::new(
io::ErrorKind::InvalidInput,
"On Windows the target must be a file or directory",
)));
}
}
Err(err) if err.kind() == io::ErrorKind::NotFound => {
return Err(FsError::Io(io::Error::new(
io::ErrorKind::InvalidInput,
"On Windows an `options` argument is required if the target does not exist",
)))
}
Err(err) => return Err(err.into()),
}
}
};
match file_type {
FsFileType::File => {
std::os::windows::fs::symlink_file(oldpath, newpath)?;
}
FsFileType::Directory => {
std::os::windows::fs::symlink_dir(oldpath, newpath)?;
}
};
Ok(())
}
fn truncate(path: &Path, len: u64) -> FsResult<()> {
let file = fs::OpenOptions::new().write(true).open(path)?;
file.set_len(len)?;
Ok(())
}
fn open_options(options: OpenOptions) -> fs::OpenOptions {
let mut open_options = fs::OpenOptions::new();
if let Some(mode) = options.mode {
// mode only used if creating the file on Unix
// if not specified, defaults to 0o666
#[cfg(unix)]
{
use std::os::unix::fs::OpenOptionsExt;
open_options.mode(mode & 0o777);
}
#[cfg(not(unix))]
let _ = mode; // avoid unused warning
}
open_options.read(options.read);
open_options.create(options.create);
open_options.write(options.write);
open_options.truncate(options.truncate);
open_options.append(options.append);
open_options.create_new(options.create_new);
open_options
}
| {
let atime = filetime::FileTime::from_unix_time(atime_secs, atime_nanos);
let mtime = filetime::FileTime::from_unix_time(mtime_secs, mtime_nanos);
spawn_blocking(move || {
filetime::set_file_times(path, atime, mtime).map_err(Into::into)
})
.await?
} | identifier_body |
std_fs.rs | // Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
#![allow(clippy::disallowed_methods)]
use std::fs;
use std::io;
use std::io::Write;
use std::path::Path;
use std::path::PathBuf;
use std::rc::Rc;
use deno_core::unsync::spawn_blocking;
use deno_io::fs::File;
use deno_io::fs::FsResult;
use deno_io::fs::FsStat;
use deno_io::StdFileResourceInner;
use crate::interface::FsDirEntry;
use crate::interface::FsFileType;
use crate::FileSystem;
use crate::OpenOptions;
#[cfg(not(unix))]
use deno_io::fs::FsError;
#[derive(Debug, Clone)]
pub struct RealFs;
#[async_trait::async_trait(?Send)]
impl FileSystem for RealFs {
fn cwd(&self) -> FsResult<PathBuf> {
std::env::current_dir().map_err(Into::into)
}
fn tmp_dir(&self) -> FsResult<PathBuf> {
Ok(std::env::temp_dir())
}
fn chdir(&self, path: &Path) -> FsResult<()> {
std::env::set_current_dir(path).map_err(Into::into)
}
#[cfg(not(unix))]
fn umask(&self, _mask: Option<u32>) -> FsResult<u32> {
// TODO implement umask for Windows
// see https://github.com/nodejs/node/blob/master/src/node_process_methods.cc
// and https://docs.microsoft.com/fr-fr/cpp/c-runtime-library/reference/umask?view=vs-2019
Err(FsError::NotSupported)
}
#[cfg(unix)]
fn umask(&self, mask: Option<u32>) -> FsResult<u32> {
use nix::sys::stat::mode_t;
use nix::sys::stat::umask;
use nix::sys::stat::Mode;
let r = if let Some(mask) = mask {
// If mask provided, return previous.
umask(Mode::from_bits_truncate(mask as mode_t))
} else {
// If no mask provided, we query the current. Requires two syscalls.
let prev = umask(Mode::from_bits_truncate(0o777));
let _ = umask(prev);
prev
};
#[cfg(target_os = "linux")]
{
Ok(r.bits())
}
#[cfg(any(
target_os = "macos",
target_os = "openbsd",
target_os = "freebsd"
))]
{
Ok(r.bits() as u32)
}
}
fn open_sync(
&self,
path: &Path,
options: OpenOptions,
) -> FsResult<Rc<dyn File>> {
let opts = open_options(options);
let std_file = opts.open(path)?;
Ok(Rc::new(StdFileResourceInner::file(std_file)))
}
async fn open_async(
&self,
path: PathBuf,
options: OpenOptions,
) -> FsResult<Rc<dyn File>> {
let opts = open_options(options);
let std_file = spawn_blocking(move || opts.open(path)).await??;
Ok(Rc::new(StdFileResourceInner::file(std_file)))
}
fn mkdir_sync(
&self,
path: &Path,
recursive: bool,
mode: u32,
) -> FsResult<()> {
mkdir(path, recursive, mode)
}
async fn mkdir_async(
&self,
path: PathBuf,
recursive: bool,
mode: u32,
) -> FsResult<()> {
spawn_blocking(move || mkdir(&path, recursive, mode)).await?
}
fn chmod_sync(&self, path: &Path, mode: u32) -> FsResult<()> {
chmod(path, mode)
}
async fn chmod_async(&self, path: PathBuf, mode: u32) -> FsResult<()> {
spawn_blocking(move || chmod(&path, mode)).await?
}
fn chown_sync(
&self,
path: &Path,
uid: Option<u32>,
gid: Option<u32>,
) -> FsResult<()> {
chown(path, uid, gid)
}
async fn chown_async(
&self,
path: PathBuf,
uid: Option<u32>,
gid: Option<u32>,
) -> FsResult<()> {
spawn_blocking(move || chown(&path, uid, gid)).await?
}
fn remove_sync(&self, path: &Path, recursive: bool) -> FsResult<()> {
remove(path, recursive)
}
async fn remove_async(&self, path: PathBuf, recursive: bool) -> FsResult<()> {
spawn_blocking(move || remove(&path, recursive)).await?
}
fn copy_file_sync(&self, from: &Path, to: &Path) -> FsResult<()> {
copy_file(from, to)
}
async fn copy_file_async(&self, from: PathBuf, to: PathBuf) -> FsResult<()> {
spawn_blocking(move || copy_file(&from, &to)).await?
}
fn stat_sync(&self, path: &Path) -> FsResult<FsStat> {
stat(path).map(Into::into)
}
async fn stat_async(&self, path: PathBuf) -> FsResult<FsStat> {
spawn_blocking(move || stat(&path)).await?.map(Into::into)
}
fn lstat_sync(&self, path: &Path) -> FsResult<FsStat> {
lstat(path).map(Into::into)
}
async fn lstat_async(&self, path: PathBuf) -> FsResult<FsStat> {
spawn_blocking(move || lstat(&path)).await?.map(Into::into)
}
fn realpath_sync(&self, path: &Path) -> FsResult<PathBuf> {
realpath(path)
}
async fn realpath_async(&self, path: PathBuf) -> FsResult<PathBuf> {
spawn_blocking(move || realpath(&path)).await?
}
fn read_dir_sync(&self, path: &Path) -> FsResult<Vec<FsDirEntry>> {
read_dir(path)
}
async fn read_dir_async(&self, path: PathBuf) -> FsResult<Vec<FsDirEntry>> {
spawn_blocking(move || read_dir(&path)).await?
}
fn rename_sync(&self, oldpath: &Path, newpath: &Path) -> FsResult<()> {
fs::rename(oldpath, newpath).map_err(Into::into)
}
async fn rename_async(
&self,
oldpath: PathBuf,
newpath: PathBuf,
) -> FsResult<()> {
spawn_blocking(move || fs::rename(oldpath, newpath))
.await?
.map_err(Into::into)
}
fn link_sync(&self, oldpath: &Path, newpath: &Path) -> FsResult<()> {
fs::hard_link(oldpath, newpath).map_err(Into::into)
}
async fn link_async(
&self,
oldpath: PathBuf,
newpath: PathBuf,
) -> FsResult<()> {
spawn_blocking(move || fs::hard_link(oldpath, newpath))
.await?
.map_err(Into::into)
}
fn symlink_sync(
&self,
oldpath: &Path,
newpath: &Path,
file_type: Option<FsFileType>,
) -> FsResult<()> {
symlink(oldpath, newpath, file_type)
}
async fn symlink_async(
&self,
oldpath: PathBuf,
newpath: PathBuf,
file_type: Option<FsFileType>,
) -> FsResult<()> {
spawn_blocking(move || symlink(&oldpath, &newpath, file_type)).await?
}
fn read_link_sync(&self, path: &Path) -> FsResult<PathBuf> {
fs::read_link(path).map_err(Into::into)
}
async fn read_link_async(&self, path: PathBuf) -> FsResult<PathBuf> {
spawn_blocking(move || fs::read_link(path))
.await?
.map_err(Into::into)
}
fn truncate_sync(&self, path: &Path, len: u64) -> FsResult<()> {
truncate(path, len)
}
async fn truncate_async(&self, path: PathBuf, len: u64) -> FsResult<()> {
spawn_blocking(move || truncate(&path, len)).await?
}
fn utime_sync(
&self,
path: &Path,
atime_secs: i64,
atime_nanos: u32,
mtime_secs: i64,
mtime_nanos: u32,
) -> FsResult<()> {
let atime = filetime::FileTime::from_unix_time(atime_secs, atime_nanos);
let mtime = filetime::FileTime::from_unix_time(mtime_secs, mtime_nanos);
filetime::set_file_times(path, atime, mtime).map_err(Into::into)
}
async fn utime_async(
&self,
path: PathBuf,
atime_secs: i64,
atime_nanos: u32,
mtime_secs: i64,
mtime_nanos: u32,
) -> FsResult<()> {
let atime = filetime::FileTime::from_unix_time(atime_secs, atime_nanos);
let mtime = filetime::FileTime::from_unix_time(mtime_secs, mtime_nanos);
spawn_blocking(move || {
filetime::set_file_times(path, atime, mtime).map_err(Into::into)
})
.await?
}
fn write_file_sync(
&self,
path: &Path,
options: OpenOptions,
data: &[u8],
) -> FsResult<()> {
let opts = open_options(options);
let mut file = opts.open(path)?;
#[cfg(unix)]
if let Some(mode) = options.mode {
use std::os::unix::fs::PermissionsExt;
file.set_permissions(fs::Permissions::from_mode(mode))?;
}
file.write_all(data)?;
Ok(())
}
async fn write_file_async(
&self,
path: PathBuf,
options: OpenOptions,
data: Vec<u8>,
) -> FsResult<()> {
spawn_blocking(move || {
let opts = open_options(options);
let mut file = opts.open(path)?;
#[cfg(unix)]
if let Some(mode) = options.mode {
use std::os::unix::fs::PermissionsExt;
file.set_permissions(fs::Permissions::from_mode(mode))?;
}
file.write_all(&data)?;
Ok(())
})
.await?
}
fn read_file_sync(&self, path: &Path) -> FsResult<Vec<u8>> {
fs::read(path).map_err(Into::into)
}
async fn read_file_async(&self, path: PathBuf) -> FsResult<Vec<u8>> {
spawn_blocking(move || fs::read(path))
.await?
.map_err(Into::into)
}
}
fn mkdir(path: &Path, recursive: bool, mode: u32) -> FsResult<()> {
let mut builder = fs::DirBuilder::new();
builder.recursive(recursive);
#[cfg(unix)]
{
use std::os::unix::fs::DirBuilderExt;
builder.mode(mode);
}
#[cfg(not(unix))]
{
_ = mode;
}
builder.create(path).map_err(Into::into)
}
#[cfg(unix)]
fn chmod(path: &Path, mode: u32) -> FsResult<()> {
use std::os::unix::fs::PermissionsExt;
let permissions = fs::Permissions::from_mode(mode);
fs::set_permissions(path, permissions)?;
Ok(())
}
// TODO: implement chmod for Windows (#4357)
#[cfg(not(unix))]
fn chmod(path: &Path, _mode: u32) -> FsResult<()> {
// Still check file/dir exists on Windows
std::fs::metadata(path)?;
Err(FsError::NotSupported)
}
#[cfg(unix)]
fn chown(path: &Path, uid: Option<u32>, gid: Option<u32>) -> FsResult<()> {
use nix::unistd::chown;
use nix::unistd::Gid;
use nix::unistd::Uid;
let owner = uid.map(Uid::from_raw);
let group = gid.map(Gid::from_raw);
let res = chown(path, owner, group);
if let Err(err) = res {
return Err(io::Error::from_raw_os_error(err as i32).into());
}
Ok(())
}
// TODO: implement chown for Windows
#[cfg(not(unix))]
fn chown(_path: &Path, _uid: Option<u32>, _gid: Option<u32>) -> FsResult<()> {
Err(FsError::NotSupported)
}
fn remove(path: &Path, recursive: bool) -> FsResult<()> {
// TODO: this is racy. This should open fds, and then `unlink` those.
let metadata = fs::symlink_metadata(path)?;
let file_type = metadata.file_type();
let res = if file_type.is_dir() {
if recursive {
fs::remove_dir_all(path)
} else {
fs::remove_dir(path)
}
} else if file_type.is_symlink() {
#[cfg(unix)]
{
fs::remove_file(path)
}
#[cfg(not(unix))]
{
use std::os::windows::prelude::MetadataExt;
use winapi::um::winnt::FILE_ATTRIBUTE_DIRECTORY;
if metadata.file_attributes() & FILE_ATTRIBUTE_DIRECTORY!= 0 {
fs::remove_dir(path)
} else {
fs::remove_file(path)
}
}
} else {
fs::remove_file(path)
};
res.map_err(Into::into)
}
fn copy_file(from: &Path, to: &Path) -> FsResult<()> {
#[cfg(target_os = "macos")]
{
use libc::clonefile;
use libc::stat;
use libc::unlink;
use std::ffi::CString;
use std::io::Read;
use std::os::unix::fs::OpenOptionsExt;
use std::os::unix::fs::PermissionsExt;
use std::os::unix::prelude::OsStrExt;
let from_str = CString::new(from.as_os_str().as_bytes()).unwrap();
let to_str = CString::new(to.as_os_str().as_bytes()).unwrap();
// SAFETY: `from` and `to` are valid C strings.
// std::fs::copy does open() + fcopyfile() on macOS. We try to use
// clonefile() instead, which is more efficient.
unsafe {
let mut st = std::mem::zeroed();
let ret = stat(from_str.as_ptr(), &mut st);
if ret!= 0 {
return Err(io::Error::last_os_error().into());
}
if st.st_size > 128 * 1024 {
// Try unlink. If it fails, we are going to try clonefile() anyway.
let _ = unlink(to_str.as_ptr());
// Matches rust stdlib behavior for io::copy.
// https://github.com/rust-lang/rust/blob/3fdd578d72a24d4efc2fe2ad18eec3b6ba72271e/library/std/src/sys/unix/fs.rs#L1613-L1616
if clonefile(from_str.as_ptr(), to_str.as_ptr(), 0) == 0 {
return Ok(());
}
} else {
// Do a regular copy. fcopyfile() is an overkill for < 128KB
// files.
let mut buf = [0u8; 128 * 1024];
let mut from_file = fs::File::open(from)?;
let perm = from_file.metadata()?.permissions();
let mut to_file = fs::OpenOptions::new()
// create the file with the correct mode right away
.mode(perm.mode())
.write(true)
.create(true)
.truncate(true)
.open(to)?;
let writer_metadata = to_file.metadata()?;
if writer_metadata.is_file() {
// Set the correct file permissions, in case the file already existed.
// Don't set the permissions on already existing non-files like
// pipes/FIFOs or device nodes.
to_file.set_permissions(perm)?;
}
loop {
let nread = from_file.read(&mut buf)?;
if nread == 0 {
break;
}
to_file.write_all(&buf[..nread])?;
}
return Ok(());
}
}
// clonefile() failed, fall back to std::fs::copy().
}
fs::copy(from, to)?;
Ok(())
}
#[cfg(not(windows))]
fn stat(path: &Path) -> FsResult<FsStat> {
let metadata = fs::metadata(path)?;
Ok(FsStat::from_std(metadata))
}
#[cfg(windows)]
fn | (path: &Path) -> FsResult<FsStat> {
let metadata = fs::metadata(path)?;
let mut fsstat = FsStat::from_std(metadata);
use winapi::um::winbase::FILE_FLAG_BACKUP_SEMANTICS;
let path = path.canonicalize()?;
stat_extra(&mut fsstat, &path, FILE_FLAG_BACKUP_SEMANTICS)?;
Ok(fsstat)
}
#[cfg(not(windows))]
fn lstat(path: &Path) -> FsResult<FsStat> {
let metadata = fs::symlink_metadata(path)?;
Ok(FsStat::from_std(metadata))
}
#[cfg(windows)]
fn lstat(path: &Path) -> FsResult<FsStat> {
use winapi::um::winbase::FILE_FLAG_BACKUP_SEMANTICS;
use winapi::um::winbase::FILE_FLAG_OPEN_REPARSE_POINT;
let metadata = fs::symlink_metadata(path)?;
let mut fsstat = FsStat::from_std(metadata);
stat_extra(
&mut fsstat,
path,
FILE_FLAG_BACKUP_SEMANTICS | FILE_FLAG_OPEN_REPARSE_POINT,
)?;
Ok(fsstat)
}
#[cfg(windows)]
fn stat_extra(
fsstat: &mut FsStat,
path: &Path,
file_flags: winapi::shared::minwindef::DWORD,
) -> FsResult<()> {
use std::os::windows::prelude::OsStrExt;
use winapi::um::fileapi::CreateFileW;
use winapi::um::fileapi::OPEN_EXISTING;
use winapi::um::handleapi::CloseHandle;
use winapi::um::handleapi::INVALID_HANDLE_VALUE;
use winapi::um::winnt::FILE_SHARE_DELETE;
use winapi::um::winnt::FILE_SHARE_READ;
use winapi::um::winnt::FILE_SHARE_WRITE;
unsafe fn get_dev(
handle: winapi::shared::ntdef::HANDLE,
) -> std::io::Result<u64> {
use winapi::shared::minwindef::FALSE;
use winapi::um::fileapi::GetFileInformationByHandle;
use winapi::um::fileapi::BY_HANDLE_FILE_INFORMATION;
let info = {
let mut info =
std::mem::MaybeUninit::<BY_HANDLE_FILE_INFORMATION>::zeroed();
if GetFileInformationByHandle(handle, info.as_mut_ptr()) == FALSE {
return Err(std::io::Error::last_os_error());
}
info.assume_init()
};
Ok(info.dwVolumeSerialNumber as u64)
}
// SAFETY: winapi calls
unsafe {
let mut path: Vec<_> = path.as_os_str().encode_wide().collect();
path.push(0);
let file_handle = CreateFileW(
path.as_ptr(),
0,
FILE_SHARE_READ | FILE_SHARE_DELETE | FILE_SHARE_WRITE,
std::ptr::null_mut(),
OPEN_EXISTING,
file_flags,
std::ptr::null_mut(),
);
if file_handle == INVALID_HANDLE_VALUE {
return Err(std::io::Error::last_os_error().into());
}
let result = get_dev(file_handle);
CloseHandle(file_handle);
fsstat.dev = result?;
Ok(())
}
}
fn realpath(path: &Path) -> FsResult<PathBuf> {
Ok(deno_core::strip_unc_prefix(path.canonicalize()?))
}
fn read_dir(path: &Path) -> FsResult<Vec<FsDirEntry>> {
let entries = fs::read_dir(path)?
.filter_map(|entry| {
let entry = entry.ok()?;
let name = entry.file_name().into_string().ok()?;
let metadata = entry.file_type();
macro_rules! method_or_false {
($method:ident) => {
if let Ok(metadata) = &metadata {
metadata.$method()
} else {
false
}
};
}
Some(FsDirEntry {
name,
is_file: method_or_false!(is_file),
is_directory: method_or_false!(is_dir),
is_symlink: method_or_false!(is_symlink),
})
})
.collect();
Ok(entries)
}
#[cfg(not(windows))]
fn symlink(
oldpath: &Path,
newpath: &Path,
_file_type: Option<FsFileType>,
) -> FsResult<()> {
std::os::unix::fs::symlink(oldpath, newpath)?;
Ok(())
}
#[cfg(windows)]
fn symlink(
oldpath: &Path,
newpath: &Path,
file_type: Option<FsFileType>,
) -> FsResult<()> {
let file_type = match file_type {
Some(file_type) => file_type,
None => {
let old_meta = fs::metadata(oldpath);
match old_meta {
Ok(metadata) => {
if metadata.is_file() {
FsFileType::File
} else if metadata.is_dir() {
FsFileType::Directory
} else {
return Err(FsError::Io(io::Error::new(
io::ErrorKind::InvalidInput,
"On Windows the target must be a file or directory",
)));
}
}
Err(err) if err.kind() == io::ErrorKind::NotFound => {
return Err(FsError::Io(io::Error::new(
io::ErrorKind::InvalidInput,
"On Windows an `options` argument is required if the target does not exist",
)))
}
Err(err) => return Err(err.into()),
}
}
};
match file_type {
FsFileType::File => {
std::os::windows::fs::symlink_file(oldpath, newpath)?;
}
FsFileType::Directory => {
std::os::windows::fs::symlink_dir(oldpath, newpath)?;
}
};
Ok(())
}
fn truncate(path: &Path, len: u64) -> FsResult<()> {
let file = fs::OpenOptions::new().write(true).open(path)?;
file.set_len(len)?;
Ok(())
}
fn open_options(options: OpenOptions) -> fs::OpenOptions {
let mut open_options = fs::OpenOptions::new();
if let Some(mode) = options.mode {
// mode only used if creating the file on Unix
// if not specified, defaults to 0o666
#[cfg(unix)]
{
use std::os::unix::fs::OpenOptionsExt;
open_options.mode(mode & 0o777);
}
#[cfg(not(unix))]
let _ = mode; // avoid unused warning
}
open_options.read(options.read);
open_options.create(options.create);
open_options.write(options.write);
open_options.truncate(options.truncate);
open_options.append(options.append);
open_options.create_new(options.create_new);
open_options
}
| stat | identifier_name |
std_fs.rs | // Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
#![allow(clippy::disallowed_methods)]
use std::fs;
use std::io;
use std::io::Write;
use std::path::Path;
use std::path::PathBuf;
use std::rc::Rc;
use deno_core::unsync::spawn_blocking;
use deno_io::fs::File;
use deno_io::fs::FsResult;
use deno_io::fs::FsStat;
use deno_io::StdFileResourceInner;
use crate::interface::FsDirEntry;
use crate::interface::FsFileType;
use crate::FileSystem;
use crate::OpenOptions;
#[cfg(not(unix))]
use deno_io::fs::FsError;
#[derive(Debug, Clone)]
pub struct RealFs;
#[async_trait::async_trait(?Send)]
impl FileSystem for RealFs {
fn cwd(&self) -> FsResult<PathBuf> {
std::env::current_dir().map_err(Into::into)
}
fn tmp_dir(&self) -> FsResult<PathBuf> {
Ok(std::env::temp_dir())
}
fn chdir(&self, path: &Path) -> FsResult<()> {
std::env::set_current_dir(path).map_err(Into::into)
}
#[cfg(not(unix))]
fn umask(&self, _mask: Option<u32>) -> FsResult<u32> {
// TODO implement umask for Windows
// see https://github.com/nodejs/node/blob/master/src/node_process_methods.cc
// and https://docs.microsoft.com/fr-fr/cpp/c-runtime-library/reference/umask?view=vs-2019
Err(FsError::NotSupported)
}
#[cfg(unix)]
fn umask(&self, mask: Option<u32>) -> FsResult<u32> {
use nix::sys::stat::mode_t;
use nix::sys::stat::umask;
use nix::sys::stat::Mode;
let r = if let Some(mask) = mask {
// If mask provided, return previous.
umask(Mode::from_bits_truncate(mask as mode_t))
} else {
// If no mask provided, we query the current. Requires two syscalls.
let prev = umask(Mode::from_bits_truncate(0o777));
let _ = umask(prev);
prev
};
#[cfg(target_os = "linux")]
{
Ok(r.bits())
}
#[cfg(any(
target_os = "macos",
target_os = "openbsd",
target_os = "freebsd"
))]
{
Ok(r.bits() as u32)
}
}
fn open_sync(
&self,
path: &Path,
options: OpenOptions,
) -> FsResult<Rc<dyn File>> {
let opts = open_options(options);
let std_file = opts.open(path)?;
Ok(Rc::new(StdFileResourceInner::file(std_file)))
}
async fn open_async(
&self,
path: PathBuf,
options: OpenOptions,
) -> FsResult<Rc<dyn File>> {
let opts = open_options(options);
let std_file = spawn_blocking(move || opts.open(path)).await??;
Ok(Rc::new(StdFileResourceInner::file(std_file)))
}
fn mkdir_sync(
&self,
path: &Path,
recursive: bool,
mode: u32,
) -> FsResult<()> {
mkdir(path, recursive, mode)
}
async fn mkdir_async(
&self,
path: PathBuf,
recursive: bool,
mode: u32,
) -> FsResult<()> {
spawn_blocking(move || mkdir(&path, recursive, mode)).await?
}
fn chmod_sync(&self, path: &Path, mode: u32) -> FsResult<()> {
chmod(path, mode)
}
async fn chmod_async(&self, path: PathBuf, mode: u32) -> FsResult<()> {
spawn_blocking(move || chmod(&path, mode)).await?
}
fn chown_sync(
&self,
path: &Path,
uid: Option<u32>,
gid: Option<u32>,
) -> FsResult<()> {
chown(path, uid, gid)
}
async fn chown_async(
&self,
path: PathBuf,
uid: Option<u32>,
gid: Option<u32>,
) -> FsResult<()> {
spawn_blocking(move || chown(&path, uid, gid)).await?
}
fn remove_sync(&self, path: &Path, recursive: bool) -> FsResult<()> {
remove(path, recursive)
}
async fn remove_async(&self, path: PathBuf, recursive: bool) -> FsResult<()> {
spawn_blocking(move || remove(&path, recursive)).await?
}
fn copy_file_sync(&self, from: &Path, to: &Path) -> FsResult<()> {
copy_file(from, to)
}
async fn copy_file_async(&self, from: PathBuf, to: PathBuf) -> FsResult<()> {
spawn_blocking(move || copy_file(&from, &to)).await?
}
fn stat_sync(&self, path: &Path) -> FsResult<FsStat> {
stat(path).map(Into::into)
}
async fn stat_async(&self, path: PathBuf) -> FsResult<FsStat> {
spawn_blocking(move || stat(&path)).await?.map(Into::into)
}
fn lstat_sync(&self, path: &Path) -> FsResult<FsStat> {
lstat(path).map(Into::into)
}
async fn lstat_async(&self, path: PathBuf) -> FsResult<FsStat> {
spawn_blocking(move || lstat(&path)).await?.map(Into::into)
}
fn realpath_sync(&self, path: &Path) -> FsResult<PathBuf> {
realpath(path)
}
async fn realpath_async(&self, path: PathBuf) -> FsResult<PathBuf> {
spawn_blocking(move || realpath(&path)).await?
}
fn read_dir_sync(&self, path: &Path) -> FsResult<Vec<FsDirEntry>> {
read_dir(path)
}
async fn read_dir_async(&self, path: PathBuf) -> FsResult<Vec<FsDirEntry>> {
spawn_blocking(move || read_dir(&path)).await?
}
fn rename_sync(&self, oldpath: &Path, newpath: &Path) -> FsResult<()> {
fs::rename(oldpath, newpath).map_err(Into::into)
}
async fn rename_async(
&self,
oldpath: PathBuf,
newpath: PathBuf,
) -> FsResult<()> {
spawn_blocking(move || fs::rename(oldpath, newpath))
.await?
.map_err(Into::into)
}
fn link_sync(&self, oldpath: &Path, newpath: &Path) -> FsResult<()> {
fs::hard_link(oldpath, newpath).map_err(Into::into)
}
async fn link_async(
&self,
oldpath: PathBuf,
newpath: PathBuf,
) -> FsResult<()> {
spawn_blocking(move || fs::hard_link(oldpath, newpath))
.await?
.map_err(Into::into)
}
fn symlink_sync(
&self,
oldpath: &Path,
newpath: &Path,
file_type: Option<FsFileType>,
) -> FsResult<()> {
symlink(oldpath, newpath, file_type)
}
async fn symlink_async(
&self,
oldpath: PathBuf,
newpath: PathBuf,
file_type: Option<FsFileType>,
) -> FsResult<()> {
spawn_blocking(move || symlink(&oldpath, &newpath, file_type)).await?
}
fn read_link_sync(&self, path: &Path) -> FsResult<PathBuf> {
fs::read_link(path).map_err(Into::into)
}
async fn read_link_async(&self, path: PathBuf) -> FsResult<PathBuf> {
spawn_blocking(move || fs::read_link(path))
.await?
.map_err(Into::into)
}
fn truncate_sync(&self, path: &Path, len: u64) -> FsResult<()> {
truncate(path, len)
}
async fn truncate_async(&self, path: PathBuf, len: u64) -> FsResult<()> {
spawn_blocking(move || truncate(&path, len)).await?
}
fn utime_sync(
&self,
path: &Path,
atime_secs: i64,
atime_nanos: u32,
mtime_secs: i64,
mtime_nanos: u32,
) -> FsResult<()> {
let atime = filetime::FileTime::from_unix_time(atime_secs, atime_nanos);
let mtime = filetime::FileTime::from_unix_time(mtime_secs, mtime_nanos);
filetime::set_file_times(path, atime, mtime).map_err(Into::into)
}
async fn utime_async(
&self,
path: PathBuf,
atime_secs: i64,
atime_nanos: u32,
mtime_secs: i64,
mtime_nanos: u32,
) -> FsResult<()> {
let atime = filetime::FileTime::from_unix_time(atime_secs, atime_nanos);
let mtime = filetime::FileTime::from_unix_time(mtime_secs, mtime_nanos);
spawn_blocking(move || {
filetime::set_file_times(path, atime, mtime).map_err(Into::into)
})
.await?
}
fn write_file_sync(
&self,
path: &Path,
options: OpenOptions,
data: &[u8],
) -> FsResult<()> {
let opts = open_options(options);
let mut file = opts.open(path)?;
#[cfg(unix)]
if let Some(mode) = options.mode {
use std::os::unix::fs::PermissionsExt;
file.set_permissions(fs::Permissions::from_mode(mode))?;
}
file.write_all(data)?;
Ok(())
}
async fn write_file_async(
&self,
path: PathBuf,
options: OpenOptions,
data: Vec<u8>,
) -> FsResult<()> {
spawn_blocking(move || {
let opts = open_options(options);
let mut file = opts.open(path)?;
#[cfg(unix)]
if let Some(mode) = options.mode {
use std::os::unix::fs::PermissionsExt;
file.set_permissions(fs::Permissions::from_mode(mode))?;
}
file.write_all(&data)?;
Ok(())
})
.await?
}
fn read_file_sync(&self, path: &Path) -> FsResult<Vec<u8>> {
fs::read(path).map_err(Into::into)
}
async fn read_file_async(&self, path: PathBuf) -> FsResult<Vec<u8>> {
spawn_blocking(move || fs::read(path))
.await?
.map_err(Into::into)
}
}
fn mkdir(path: &Path, recursive: bool, mode: u32) -> FsResult<()> {
let mut builder = fs::DirBuilder::new();
builder.recursive(recursive);
#[cfg(unix)]
{
use std::os::unix::fs::DirBuilderExt;
builder.mode(mode);
}
#[cfg(not(unix))]
{
_ = mode;
}
builder.create(path).map_err(Into::into)
}
#[cfg(unix)]
fn chmod(path: &Path, mode: u32) -> FsResult<()> {
use std::os::unix::fs::PermissionsExt;
let permissions = fs::Permissions::from_mode(mode);
fs::set_permissions(path, permissions)?;
Ok(())
}
// TODO: implement chmod for Windows (#4357)
#[cfg(not(unix))]
fn chmod(path: &Path, _mode: u32) -> FsResult<()> {
// Still check file/dir exists on Windows
std::fs::metadata(path)?;
Err(FsError::NotSupported)
}
#[cfg(unix)]
fn chown(path: &Path, uid: Option<u32>, gid: Option<u32>) -> FsResult<()> {
use nix::unistd::chown;
use nix::unistd::Gid;
use nix::unistd::Uid;
let owner = uid.map(Uid::from_raw);
let group = gid.map(Gid::from_raw);
let res = chown(path, owner, group);
if let Err(err) = res {
return Err(io::Error::from_raw_os_error(err as i32).into());
}
Ok(())
}
// TODO: implement chown for Windows
#[cfg(not(unix))]
fn chown(_path: &Path, _uid: Option<u32>, _gid: Option<u32>) -> FsResult<()> {
Err(FsError::NotSupported)
}
fn remove(path: &Path, recursive: bool) -> FsResult<()> {
// TODO: this is racy. This should open fds, and then `unlink` those.
let metadata = fs::symlink_metadata(path)?;
let file_type = metadata.file_type();
let res = if file_type.is_dir() {
if recursive {
fs::remove_dir_all(path)
} else {
fs::remove_dir(path)
}
} else if file_type.is_symlink() {
#[cfg(unix)]
{
fs::remove_file(path)
}
#[cfg(not(unix))]
{
use std::os::windows::prelude::MetadataExt;
use winapi::um::winnt::FILE_ATTRIBUTE_DIRECTORY;
if metadata.file_attributes() & FILE_ATTRIBUTE_DIRECTORY!= 0 {
fs::remove_dir(path)
} else {
fs::remove_file(path)
}
}
} else {
fs::remove_file(path)
};
res.map_err(Into::into)
}
fn copy_file(from: &Path, to: &Path) -> FsResult<()> {
#[cfg(target_os = "macos")]
{
use libc::clonefile;
use libc::stat;
use libc::unlink;
use std::ffi::CString;
use std::io::Read;
use std::os::unix::fs::OpenOptionsExt;
use std::os::unix::fs::PermissionsExt;
use std::os::unix::prelude::OsStrExt;
let from_str = CString::new(from.as_os_str().as_bytes()).unwrap();
let to_str = CString::new(to.as_os_str().as_bytes()).unwrap();
// SAFETY: `from` and `to` are valid C strings.
// std::fs::copy does open() + fcopyfile() on macOS. We try to use
// clonefile() instead, which is more efficient.
unsafe {
let mut st = std::mem::zeroed();
let ret = stat(from_str.as_ptr(), &mut st);
if ret!= 0 {
return Err(io::Error::last_os_error().into());
}
if st.st_size > 128 * 1024 {
// Try unlink. If it fails, we are going to try clonefile() anyway.
let _ = unlink(to_str.as_ptr());
// Matches rust stdlib behavior for io::copy.
// https://github.com/rust-lang/rust/blob/3fdd578d72a24d4efc2fe2ad18eec3b6ba72271e/library/std/src/sys/unix/fs.rs#L1613-L1616
if clonefile(from_str.as_ptr(), to_str.as_ptr(), 0) == 0 {
return Ok(());
}
} else {
// Do a regular copy. fcopyfile() is an overkill for < 128KB
// files.
let mut buf = [0u8; 128 * 1024];
let mut from_file = fs::File::open(from)?;
let perm = from_file.metadata()?.permissions();
let mut to_file = fs::OpenOptions::new()
// create the file with the correct mode right away
.mode(perm.mode())
.write(true)
.create(true)
.truncate(true)
.open(to)?;
let writer_metadata = to_file.metadata()?;
if writer_metadata.is_file() {
// Set the correct file permissions, in case the file already existed.
// Don't set the permissions on already existing non-files like
// pipes/FIFOs or device nodes.
to_file.set_permissions(perm)?;
}
loop {
let nread = from_file.read(&mut buf)?;
if nread == 0 {
break;
}
to_file.write_all(&buf[..nread])?;
}
return Ok(());
}
}
// clonefile() failed, fall back to std::fs::copy().
}
fs::copy(from, to)?;
Ok(())
}
#[cfg(not(windows))]
fn stat(path: &Path) -> FsResult<FsStat> {
let metadata = fs::metadata(path)?;
Ok(FsStat::from_std(metadata))
}
#[cfg(windows)]
fn stat(path: &Path) -> FsResult<FsStat> {
let metadata = fs::metadata(path)?;
let mut fsstat = FsStat::from_std(metadata);
use winapi::um::winbase::FILE_FLAG_BACKUP_SEMANTICS;
let path = path.canonicalize()?;
stat_extra(&mut fsstat, &path, FILE_FLAG_BACKUP_SEMANTICS)?;
Ok(fsstat)
}
#[cfg(not(windows))]
fn lstat(path: &Path) -> FsResult<FsStat> {
let metadata = fs::symlink_metadata(path)?;
Ok(FsStat::from_std(metadata))
}
#[cfg(windows)]
fn lstat(path: &Path) -> FsResult<FsStat> {
use winapi::um::winbase::FILE_FLAG_BACKUP_SEMANTICS;
use winapi::um::winbase::FILE_FLAG_OPEN_REPARSE_POINT;
let metadata = fs::symlink_metadata(path)?;
let mut fsstat = FsStat::from_std(metadata);
stat_extra(
&mut fsstat,
path,
FILE_FLAG_BACKUP_SEMANTICS | FILE_FLAG_OPEN_REPARSE_POINT,
)?;
Ok(fsstat)
}
#[cfg(windows)]
fn stat_extra(
fsstat: &mut FsStat,
path: &Path,
file_flags: winapi::shared::minwindef::DWORD,
) -> FsResult<()> {
use std::os::windows::prelude::OsStrExt;
use winapi::um::fileapi::CreateFileW;
use winapi::um::fileapi::OPEN_EXISTING;
use winapi::um::handleapi::CloseHandle;
use winapi::um::handleapi::INVALID_HANDLE_VALUE;
use winapi::um::winnt::FILE_SHARE_DELETE;
use winapi::um::winnt::FILE_SHARE_READ;
use winapi::um::winnt::FILE_SHARE_WRITE;
unsafe fn get_dev(
handle: winapi::shared::ntdef::HANDLE,
) -> std::io::Result<u64> {
use winapi::shared::minwindef::FALSE;
use winapi::um::fileapi::GetFileInformationByHandle;
use winapi::um::fileapi::BY_HANDLE_FILE_INFORMATION;
let info = {
let mut info =
std::mem::MaybeUninit::<BY_HANDLE_FILE_INFORMATION>::zeroed();
if GetFileInformationByHandle(handle, info.as_mut_ptr()) == FALSE {
return Err(std::io::Error::last_os_error());
}
info.assume_init()
};
Ok(info.dwVolumeSerialNumber as u64)
}
// SAFETY: winapi calls
unsafe {
let mut path: Vec<_> = path.as_os_str().encode_wide().collect();
path.push(0);
let file_handle = CreateFileW(
path.as_ptr(),
0,
FILE_SHARE_READ | FILE_SHARE_DELETE | FILE_SHARE_WRITE,
std::ptr::null_mut(),
OPEN_EXISTING,
file_flags,
std::ptr::null_mut(),
);
if file_handle == INVALID_HANDLE_VALUE {
return Err(std::io::Error::last_os_error().into());
}
let result = get_dev(file_handle);
CloseHandle(file_handle);
fsstat.dev = result?;
Ok(())
}
}
fn realpath(path: &Path) -> FsResult<PathBuf> {
Ok(deno_core::strip_unc_prefix(path.canonicalize()?))
}
fn read_dir(path: &Path) -> FsResult<Vec<FsDirEntry>> {
let entries = fs::read_dir(path)?
.filter_map(|entry| {
let entry = entry.ok()?;
let name = entry.file_name().into_string().ok()?;
let metadata = entry.file_type();
macro_rules! method_or_false {
($method:ident) => {
if let Ok(metadata) = &metadata {
metadata.$method()
} else {
false
}
};
}
Some(FsDirEntry {
name,
is_file: method_or_false!(is_file),
is_directory: method_or_false!(is_dir),
is_symlink: method_or_false!(is_symlink),
})
}) | #[cfg(not(windows))]
fn symlink(
oldpath: &Path,
newpath: &Path,
_file_type: Option<FsFileType>,
) -> FsResult<()> {
std::os::unix::fs::symlink(oldpath, newpath)?;
Ok(())
}
#[cfg(windows)]
fn symlink(
oldpath: &Path,
newpath: &Path,
file_type: Option<FsFileType>,
) -> FsResult<()> {
let file_type = match file_type {
Some(file_type) => file_type,
None => {
let old_meta = fs::metadata(oldpath);
match old_meta {
Ok(metadata) => {
if metadata.is_file() {
FsFileType::File
} else if metadata.is_dir() {
FsFileType::Directory
} else {
return Err(FsError::Io(io::Error::new(
io::ErrorKind::InvalidInput,
"On Windows the target must be a file or directory",
)));
}
}
Err(err) if err.kind() == io::ErrorKind::NotFound => {
return Err(FsError::Io(io::Error::new(
io::ErrorKind::InvalidInput,
"On Windows an `options` argument is required if the target does not exist",
)))
}
Err(err) => return Err(err.into()),
}
}
};
match file_type {
FsFileType::File => {
std::os::windows::fs::symlink_file(oldpath, newpath)?;
}
FsFileType::Directory => {
std::os::windows::fs::symlink_dir(oldpath, newpath)?;
}
};
Ok(())
}
fn truncate(path: &Path, len: u64) -> FsResult<()> {
let file = fs::OpenOptions::new().write(true).open(path)?;
file.set_len(len)?;
Ok(())
}
fn open_options(options: OpenOptions) -> fs::OpenOptions {
let mut open_options = fs::OpenOptions::new();
if let Some(mode) = options.mode {
// mode only used if creating the file on Unix
// if not specified, defaults to 0o666
#[cfg(unix)]
{
use std::os::unix::fs::OpenOptionsExt;
open_options.mode(mode & 0o777);
}
#[cfg(not(unix))]
let _ = mode; // avoid unused warning
}
open_options.read(options.read);
open_options.create(options.create);
open_options.write(options.write);
open_options.truncate(options.truncate);
open_options.append(options.append);
open_options.create_new(options.create_new);
open_options
} | .collect();
Ok(entries)
}
| random_line_split |
std_fs.rs | // Copyright 2018-2023 the Deno authors. All rights reserved. MIT license.
#![allow(clippy::disallowed_methods)]
use std::fs;
use std::io;
use std::io::Write;
use std::path::Path;
use std::path::PathBuf;
use std::rc::Rc;
use deno_core::unsync::spawn_blocking;
use deno_io::fs::File;
use deno_io::fs::FsResult;
use deno_io::fs::FsStat;
use deno_io::StdFileResourceInner;
use crate::interface::FsDirEntry;
use crate::interface::FsFileType;
use crate::FileSystem;
use crate::OpenOptions;
#[cfg(not(unix))]
use deno_io::fs::FsError;
#[derive(Debug, Clone)]
pub struct RealFs;
#[async_trait::async_trait(?Send)]
impl FileSystem for RealFs {
fn cwd(&self) -> FsResult<PathBuf> {
std::env::current_dir().map_err(Into::into)
}
fn tmp_dir(&self) -> FsResult<PathBuf> {
Ok(std::env::temp_dir())
}
fn chdir(&self, path: &Path) -> FsResult<()> {
std::env::set_current_dir(path).map_err(Into::into)
}
#[cfg(not(unix))]
fn umask(&self, _mask: Option<u32>) -> FsResult<u32> {
// TODO implement umask for Windows
// see https://github.com/nodejs/node/blob/master/src/node_process_methods.cc
// and https://docs.microsoft.com/fr-fr/cpp/c-runtime-library/reference/umask?view=vs-2019
Err(FsError::NotSupported)
}
#[cfg(unix)]
fn umask(&self, mask: Option<u32>) -> FsResult<u32> {
use nix::sys::stat::mode_t;
use nix::sys::stat::umask;
use nix::sys::stat::Mode;
let r = if let Some(mask) = mask | else {
// If no mask provided, we query the current. Requires two syscalls.
let prev = umask(Mode::from_bits_truncate(0o777));
let _ = umask(prev);
prev
};
#[cfg(target_os = "linux")]
{
Ok(r.bits())
}
#[cfg(any(
target_os = "macos",
target_os = "openbsd",
target_os = "freebsd"
))]
{
Ok(r.bits() as u32)
}
}
fn open_sync(
&self,
path: &Path,
options: OpenOptions,
) -> FsResult<Rc<dyn File>> {
let opts = open_options(options);
let std_file = opts.open(path)?;
Ok(Rc::new(StdFileResourceInner::file(std_file)))
}
async fn open_async(
&self,
path: PathBuf,
options: OpenOptions,
) -> FsResult<Rc<dyn File>> {
let opts = open_options(options);
let std_file = spawn_blocking(move || opts.open(path)).await??;
Ok(Rc::new(StdFileResourceInner::file(std_file)))
}
fn mkdir_sync(
&self,
path: &Path,
recursive: bool,
mode: u32,
) -> FsResult<()> {
mkdir(path, recursive, mode)
}
async fn mkdir_async(
&self,
path: PathBuf,
recursive: bool,
mode: u32,
) -> FsResult<()> {
spawn_blocking(move || mkdir(&path, recursive, mode)).await?
}
fn chmod_sync(&self, path: &Path, mode: u32) -> FsResult<()> {
chmod(path, mode)
}
async fn chmod_async(&self, path: PathBuf, mode: u32) -> FsResult<()> {
spawn_blocking(move || chmod(&path, mode)).await?
}
fn chown_sync(
&self,
path: &Path,
uid: Option<u32>,
gid: Option<u32>,
) -> FsResult<()> {
chown(path, uid, gid)
}
async fn chown_async(
&self,
path: PathBuf,
uid: Option<u32>,
gid: Option<u32>,
) -> FsResult<()> {
spawn_blocking(move || chown(&path, uid, gid)).await?
}
fn remove_sync(&self, path: &Path, recursive: bool) -> FsResult<()> {
remove(path, recursive)
}
async fn remove_async(&self, path: PathBuf, recursive: bool) -> FsResult<()> {
spawn_blocking(move || remove(&path, recursive)).await?
}
fn copy_file_sync(&self, from: &Path, to: &Path) -> FsResult<()> {
copy_file(from, to)
}
async fn copy_file_async(&self, from: PathBuf, to: PathBuf) -> FsResult<()> {
spawn_blocking(move || copy_file(&from, &to)).await?
}
fn stat_sync(&self, path: &Path) -> FsResult<FsStat> {
stat(path).map(Into::into)
}
async fn stat_async(&self, path: PathBuf) -> FsResult<FsStat> {
spawn_blocking(move || stat(&path)).await?.map(Into::into)
}
fn lstat_sync(&self, path: &Path) -> FsResult<FsStat> {
lstat(path).map(Into::into)
}
async fn lstat_async(&self, path: PathBuf) -> FsResult<FsStat> {
spawn_blocking(move || lstat(&path)).await?.map(Into::into)
}
fn realpath_sync(&self, path: &Path) -> FsResult<PathBuf> {
realpath(path)
}
async fn realpath_async(&self, path: PathBuf) -> FsResult<PathBuf> {
spawn_blocking(move || realpath(&path)).await?
}
fn read_dir_sync(&self, path: &Path) -> FsResult<Vec<FsDirEntry>> {
read_dir(path)
}
async fn read_dir_async(&self, path: PathBuf) -> FsResult<Vec<FsDirEntry>> {
spawn_blocking(move || read_dir(&path)).await?
}
fn rename_sync(&self, oldpath: &Path, newpath: &Path) -> FsResult<()> {
fs::rename(oldpath, newpath).map_err(Into::into)
}
async fn rename_async(
&self,
oldpath: PathBuf,
newpath: PathBuf,
) -> FsResult<()> {
spawn_blocking(move || fs::rename(oldpath, newpath))
.await?
.map_err(Into::into)
}
fn link_sync(&self, oldpath: &Path, newpath: &Path) -> FsResult<()> {
fs::hard_link(oldpath, newpath).map_err(Into::into)
}
async fn link_async(
&self,
oldpath: PathBuf,
newpath: PathBuf,
) -> FsResult<()> {
spawn_blocking(move || fs::hard_link(oldpath, newpath))
.await?
.map_err(Into::into)
}
fn symlink_sync(
&self,
oldpath: &Path,
newpath: &Path,
file_type: Option<FsFileType>,
) -> FsResult<()> {
symlink(oldpath, newpath, file_type)
}
async fn symlink_async(
&self,
oldpath: PathBuf,
newpath: PathBuf,
file_type: Option<FsFileType>,
) -> FsResult<()> {
spawn_blocking(move || symlink(&oldpath, &newpath, file_type)).await?
}
fn read_link_sync(&self, path: &Path) -> FsResult<PathBuf> {
fs::read_link(path).map_err(Into::into)
}
async fn read_link_async(&self, path: PathBuf) -> FsResult<PathBuf> {
spawn_blocking(move || fs::read_link(path))
.await?
.map_err(Into::into)
}
fn truncate_sync(&self, path: &Path, len: u64) -> FsResult<()> {
truncate(path, len)
}
async fn truncate_async(&self, path: PathBuf, len: u64) -> FsResult<()> {
spawn_blocking(move || truncate(&path, len)).await?
}
fn utime_sync(
&self,
path: &Path,
atime_secs: i64,
atime_nanos: u32,
mtime_secs: i64,
mtime_nanos: u32,
) -> FsResult<()> {
let atime = filetime::FileTime::from_unix_time(atime_secs, atime_nanos);
let mtime = filetime::FileTime::from_unix_time(mtime_secs, mtime_nanos);
filetime::set_file_times(path, atime, mtime).map_err(Into::into)
}
async fn utime_async(
&self,
path: PathBuf,
atime_secs: i64,
atime_nanos: u32,
mtime_secs: i64,
mtime_nanos: u32,
) -> FsResult<()> {
let atime = filetime::FileTime::from_unix_time(atime_secs, atime_nanos);
let mtime = filetime::FileTime::from_unix_time(mtime_secs, mtime_nanos);
spawn_blocking(move || {
filetime::set_file_times(path, atime, mtime).map_err(Into::into)
})
.await?
}
fn write_file_sync(
&self,
path: &Path,
options: OpenOptions,
data: &[u8],
) -> FsResult<()> {
let opts = open_options(options);
let mut file = opts.open(path)?;
#[cfg(unix)]
if let Some(mode) = options.mode {
use std::os::unix::fs::PermissionsExt;
file.set_permissions(fs::Permissions::from_mode(mode))?;
}
file.write_all(data)?;
Ok(())
}
async fn write_file_async(
&self,
path: PathBuf,
options: OpenOptions,
data: Vec<u8>,
) -> FsResult<()> {
spawn_blocking(move || {
let opts = open_options(options);
let mut file = opts.open(path)?;
#[cfg(unix)]
if let Some(mode) = options.mode {
use std::os::unix::fs::PermissionsExt;
file.set_permissions(fs::Permissions::from_mode(mode))?;
}
file.write_all(&data)?;
Ok(())
})
.await?
}
fn read_file_sync(&self, path: &Path) -> FsResult<Vec<u8>> {
fs::read(path).map_err(Into::into)
}
async fn read_file_async(&self, path: PathBuf) -> FsResult<Vec<u8>> {
spawn_blocking(move || fs::read(path))
.await?
.map_err(Into::into)
}
}
fn mkdir(path: &Path, recursive: bool, mode: u32) -> FsResult<()> {
let mut builder = fs::DirBuilder::new();
builder.recursive(recursive);
#[cfg(unix)]
{
use std::os::unix::fs::DirBuilderExt;
builder.mode(mode);
}
#[cfg(not(unix))]
{
_ = mode;
}
builder.create(path).map_err(Into::into)
}
#[cfg(unix)]
fn chmod(path: &Path, mode: u32) -> FsResult<()> {
use std::os::unix::fs::PermissionsExt;
let permissions = fs::Permissions::from_mode(mode);
fs::set_permissions(path, permissions)?;
Ok(())
}
// TODO: implement chmod for Windows (#4357)
#[cfg(not(unix))]
fn chmod(path: &Path, _mode: u32) -> FsResult<()> {
// Still check file/dir exists on Windows
std::fs::metadata(path)?;
Err(FsError::NotSupported)
}
#[cfg(unix)]
fn chown(path: &Path, uid: Option<u32>, gid: Option<u32>) -> FsResult<()> {
use nix::unistd::chown;
use nix::unistd::Gid;
use nix::unistd::Uid;
let owner = uid.map(Uid::from_raw);
let group = gid.map(Gid::from_raw);
let res = chown(path, owner, group);
if let Err(err) = res {
return Err(io::Error::from_raw_os_error(err as i32).into());
}
Ok(())
}
// TODO: implement chown for Windows
#[cfg(not(unix))]
fn chown(_path: &Path, _uid: Option<u32>, _gid: Option<u32>) -> FsResult<()> {
Err(FsError::NotSupported)
}
fn remove(path: &Path, recursive: bool) -> FsResult<()> {
// TODO: this is racy. This should open fds, and then `unlink` those.
let metadata = fs::symlink_metadata(path)?;
let file_type = metadata.file_type();
let res = if file_type.is_dir() {
if recursive {
fs::remove_dir_all(path)
} else {
fs::remove_dir(path)
}
} else if file_type.is_symlink() {
#[cfg(unix)]
{
fs::remove_file(path)
}
#[cfg(not(unix))]
{
use std::os::windows::prelude::MetadataExt;
use winapi::um::winnt::FILE_ATTRIBUTE_DIRECTORY;
if metadata.file_attributes() & FILE_ATTRIBUTE_DIRECTORY!= 0 {
fs::remove_dir(path)
} else {
fs::remove_file(path)
}
}
} else {
fs::remove_file(path)
};
res.map_err(Into::into)
}
fn copy_file(from: &Path, to: &Path) -> FsResult<()> {
#[cfg(target_os = "macos")]
{
use libc::clonefile;
use libc::stat;
use libc::unlink;
use std::ffi::CString;
use std::io::Read;
use std::os::unix::fs::OpenOptionsExt;
use std::os::unix::fs::PermissionsExt;
use std::os::unix::prelude::OsStrExt;
let from_str = CString::new(from.as_os_str().as_bytes()).unwrap();
let to_str = CString::new(to.as_os_str().as_bytes()).unwrap();
// SAFETY: `from` and `to` are valid C strings.
// std::fs::copy does open() + fcopyfile() on macOS. We try to use
// clonefile() instead, which is more efficient.
unsafe {
let mut st = std::mem::zeroed();
let ret = stat(from_str.as_ptr(), &mut st);
if ret!= 0 {
return Err(io::Error::last_os_error().into());
}
if st.st_size > 128 * 1024 {
// Try unlink. If it fails, we are going to try clonefile() anyway.
let _ = unlink(to_str.as_ptr());
// Matches rust stdlib behavior for io::copy.
// https://github.com/rust-lang/rust/blob/3fdd578d72a24d4efc2fe2ad18eec3b6ba72271e/library/std/src/sys/unix/fs.rs#L1613-L1616
if clonefile(from_str.as_ptr(), to_str.as_ptr(), 0) == 0 {
return Ok(());
}
} else {
// Do a regular copy. fcopyfile() is an overkill for < 128KB
// files.
let mut buf = [0u8; 128 * 1024];
let mut from_file = fs::File::open(from)?;
let perm = from_file.metadata()?.permissions();
let mut to_file = fs::OpenOptions::new()
// create the file with the correct mode right away
.mode(perm.mode())
.write(true)
.create(true)
.truncate(true)
.open(to)?;
let writer_metadata = to_file.metadata()?;
if writer_metadata.is_file() {
// Set the correct file permissions, in case the file already existed.
// Don't set the permissions on already existing non-files like
// pipes/FIFOs or device nodes.
to_file.set_permissions(perm)?;
}
loop {
let nread = from_file.read(&mut buf)?;
if nread == 0 {
break;
}
to_file.write_all(&buf[..nread])?;
}
return Ok(());
}
}
// clonefile() failed, fall back to std::fs::copy().
}
fs::copy(from, to)?;
Ok(())
}
#[cfg(not(windows))]
fn stat(path: &Path) -> FsResult<FsStat> {
let metadata = fs::metadata(path)?;
Ok(FsStat::from_std(metadata))
}
#[cfg(windows)]
fn stat(path: &Path) -> FsResult<FsStat> {
let metadata = fs::metadata(path)?;
let mut fsstat = FsStat::from_std(metadata);
use winapi::um::winbase::FILE_FLAG_BACKUP_SEMANTICS;
let path = path.canonicalize()?;
stat_extra(&mut fsstat, &path, FILE_FLAG_BACKUP_SEMANTICS)?;
Ok(fsstat)
}
#[cfg(not(windows))]
fn lstat(path: &Path) -> FsResult<FsStat> {
let metadata = fs::symlink_metadata(path)?;
Ok(FsStat::from_std(metadata))
}
#[cfg(windows)]
fn lstat(path: &Path) -> FsResult<FsStat> {
use winapi::um::winbase::FILE_FLAG_BACKUP_SEMANTICS;
use winapi::um::winbase::FILE_FLAG_OPEN_REPARSE_POINT;
let metadata = fs::symlink_metadata(path)?;
let mut fsstat = FsStat::from_std(metadata);
stat_extra(
&mut fsstat,
path,
FILE_FLAG_BACKUP_SEMANTICS | FILE_FLAG_OPEN_REPARSE_POINT,
)?;
Ok(fsstat)
}
#[cfg(windows)]
fn stat_extra(
fsstat: &mut FsStat,
path: &Path,
file_flags: winapi::shared::minwindef::DWORD,
) -> FsResult<()> {
use std::os::windows::prelude::OsStrExt;
use winapi::um::fileapi::CreateFileW;
use winapi::um::fileapi::OPEN_EXISTING;
use winapi::um::handleapi::CloseHandle;
use winapi::um::handleapi::INVALID_HANDLE_VALUE;
use winapi::um::winnt::FILE_SHARE_DELETE;
use winapi::um::winnt::FILE_SHARE_READ;
use winapi::um::winnt::FILE_SHARE_WRITE;
unsafe fn get_dev(
handle: winapi::shared::ntdef::HANDLE,
) -> std::io::Result<u64> {
use winapi::shared::minwindef::FALSE;
use winapi::um::fileapi::GetFileInformationByHandle;
use winapi::um::fileapi::BY_HANDLE_FILE_INFORMATION;
let info = {
let mut info =
std::mem::MaybeUninit::<BY_HANDLE_FILE_INFORMATION>::zeroed();
if GetFileInformationByHandle(handle, info.as_mut_ptr()) == FALSE {
return Err(std::io::Error::last_os_error());
}
info.assume_init()
};
Ok(info.dwVolumeSerialNumber as u64)
}
// SAFETY: winapi calls
unsafe {
let mut path: Vec<_> = path.as_os_str().encode_wide().collect();
path.push(0);
let file_handle = CreateFileW(
path.as_ptr(),
0,
FILE_SHARE_READ | FILE_SHARE_DELETE | FILE_SHARE_WRITE,
std::ptr::null_mut(),
OPEN_EXISTING,
file_flags,
std::ptr::null_mut(),
);
if file_handle == INVALID_HANDLE_VALUE {
return Err(std::io::Error::last_os_error().into());
}
let result = get_dev(file_handle);
CloseHandle(file_handle);
fsstat.dev = result?;
Ok(())
}
}
fn realpath(path: &Path) -> FsResult<PathBuf> {
Ok(deno_core::strip_unc_prefix(path.canonicalize()?))
}
fn read_dir(path: &Path) -> FsResult<Vec<FsDirEntry>> {
let entries = fs::read_dir(path)?
.filter_map(|entry| {
let entry = entry.ok()?;
let name = entry.file_name().into_string().ok()?;
let metadata = entry.file_type();
macro_rules! method_or_false {
($method:ident) => {
if let Ok(metadata) = &metadata {
metadata.$method()
} else {
false
}
};
}
Some(FsDirEntry {
name,
is_file: method_or_false!(is_file),
is_directory: method_or_false!(is_dir),
is_symlink: method_or_false!(is_symlink),
})
})
.collect();
Ok(entries)
}
#[cfg(not(windows))]
fn symlink(
oldpath: &Path,
newpath: &Path,
_file_type: Option<FsFileType>,
) -> FsResult<()> {
std::os::unix::fs::symlink(oldpath, newpath)?;
Ok(())
}
#[cfg(windows)]
fn symlink(
oldpath: &Path,
newpath: &Path,
file_type: Option<FsFileType>,
) -> FsResult<()> {
let file_type = match file_type {
Some(file_type) => file_type,
None => {
let old_meta = fs::metadata(oldpath);
match old_meta {
Ok(metadata) => {
if metadata.is_file() {
FsFileType::File
} else if metadata.is_dir() {
FsFileType::Directory
} else {
return Err(FsError::Io(io::Error::new(
io::ErrorKind::InvalidInput,
"On Windows the target must be a file or directory",
)));
}
}
Err(err) if err.kind() == io::ErrorKind::NotFound => {
return Err(FsError::Io(io::Error::new(
io::ErrorKind::InvalidInput,
"On Windows an `options` argument is required if the target does not exist",
)))
}
Err(err) => return Err(err.into()),
}
}
};
match file_type {
FsFileType::File => {
std::os::windows::fs::symlink_file(oldpath, newpath)?;
}
FsFileType::Directory => {
std::os::windows::fs::symlink_dir(oldpath, newpath)?;
}
};
Ok(())
}
fn truncate(path: &Path, len: u64) -> FsResult<()> {
let file = fs::OpenOptions::new().write(true).open(path)?;
file.set_len(len)?;
Ok(())
}
fn open_options(options: OpenOptions) -> fs::OpenOptions {
let mut open_options = fs::OpenOptions::new();
if let Some(mode) = options.mode {
// mode only used if creating the file on Unix
// if not specified, defaults to 0o666
#[cfg(unix)]
{
use std::os::unix::fs::OpenOptionsExt;
open_options.mode(mode & 0o777);
}
#[cfg(not(unix))]
let _ = mode; // avoid unused warning
}
open_options.read(options.read);
open_options.create(options.create);
open_options.write(options.write);
open_options.truncate(options.truncate);
open_options.append(options.append);
open_options.create_new(options.create_new);
open_options
}
| {
// If mask provided, return previous.
umask(Mode::from_bits_truncate(mask as mode_t))
} | conditional_block |
register.rs | /*
* Copyright 2018 Fluence Labs Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::net::IpAddr;
use failure::{Error, SyncFailure};
use clap::{value_t, App, AppSettings, Arg, ArgMatches, SubCommand};
use derive_getters::Getters;
use hex;
use web3::types::H256;
use crate::command::*;
use crate::config::SetupConfig;
use crate::contract_func::contract::events::app_deployed::parse_log as parse_deployed;
use crate::contract_func::contract::functions::add_node;
use crate::contract_func::{call_contract, get_transaction_logs, wait_sync, wait_tx_included};
use crate::contract_status::{find_by_tendermint_key, status};
use crate::ethereum_params::EthereumParams;
use crate::step_counter::StepCounter;
use crate::types::{NodeAddress, IP_LEN, TENDERMINT_NODE_ID_LEN};
use crate::utils;
use web3::transports::Http;
use web3::types::H160;
const API_PORT: &str = "api_port";
const CAPACITY: &str = "capacity";
const PRIVATE: &str = "private";
const NO_STATUS_CHECK: &str = "no_status_check";
#[derive(Debug, Getters)]
pub struct Register {
node_ip: IpAddr,
tendermint_key: H256,
tendermint_node_id: H160,
api_port: u16,
capacity: u16,
private: bool,
no_status_check: bool,
eth: EthereumParams,
}
#[derive(PartialEq, Debug)]
pub enum Registered {
TransactionSent(H256),
Deployed {
app_ids: Vec<u64>,
ports: Vec<u16>,
tx: H256,
},
Enqueued(H256),
AlreadyRegistered,
}
impl Register {
/// Creates `Register` structure
pub fn new(
node_address: IpAddr,
tendermint_key: H256,
tendermint_node_id: H160,
api_port: u16,
capacity: u16,
private: bool,
no_status_check: bool,
eth: EthereumParams,
) -> Result<Register, Error> {
Ok(Register {
node_ip: node_address,
tendermint_key,
tendermint_node_id,
api_port,
capacity,
private,
no_status_check,
eth,
})
}
/// Serializes a node IP address and a tendermint key into the hash of node's key address
fn serialize_node_address(&self) -> Result<NodeAddress, Error> {
// serialize tendermint key
let key_str = format!("{:?}", self.tendermint_node_id);
let key_str = key_str.as_str().trim_start_matches("0x");
let key_bytes = hex::decode(key_str.to_owned())?;
let mut key_bytes = key_bytes.as_slice()[0..TENDERMINT_NODE_ID_LEN].to_vec();
// serialize IP address
let ip_str = self.node_ip.to_string();
let split = ip_str.split('.');
let mut addr_bytes: [u8; IP_LEN] = [0; IP_LEN];
for (i, part) in split.enumerate() {
addr_bytes[i] = part.parse()?;
}
let mut addr_vec = addr_bytes.to_vec();
// concatenate tendermint key and IP address
key_bytes.append(&mut addr_vec);
let serialized = hex::encode(key_bytes);
let hash_addr: NodeAddress = serialized.parse()?;
Ok(hash_addr)
}
/// Registers a node in Fluence smart contract
pub fn register(&self, show_progress: bool) -> Result<Registered, Error> {
let (_eloop, transport) = Http::new(self.eth.eth_url.as_str()).map_err(SyncFailure::new)?;
let web3 = &web3::Web3::new(transport);
let publish_to_contract_fn = || -> Result<H256, Error> {
let hash_addr: NodeAddress = self.serialize_node_address()?;
let (call_data, _) = add_node::call(
self.tendermint_key,
hash_addr,
u64::from(self.api_port),
u64::from(self.capacity),
self.private,
);
call_contract(web3, &self.eth, call_data, None)
};
let check_node_registered_fn = || -> Result<bool, Error> {
let contract_status = status::get_status(web3, self.eth.contract_address)?;
Ok(find_by_tendermint_key(&contract_status, self.tendermint_key).is_some())
};
let wait_event_fn = |tx: &H256| -> Result<Registered, Error> {
let logs = get_transaction_logs(self.eth.eth_url.as_str(), tx, parse_deployed)?;
let status = if logs.is_empty() {
Registered::Enqueued(*tx)
} else {
let (app_ids, ports) = logs
.iter()
.filter_map(|e| {
let idx = e.node_i_ds.iter().position(|id| *id == self.tendermint_key);
idx.and_then(|i| e.ports.get(i)).map(|port| {
let port = Into::<u64>::into(*port) as u16;
let app_id = Into::<u64>::into(e.app_id);
(app_id, port)
})
})
.unzip();
Registered::Deployed {
app_ids,
ports,
tx: *tx,
}
};
Ok(status)
};
// sending transaction with the hash of file with code to ethereum
if show_progress {
let mut step_counter = StepCounter::new(1);
if!self.no_status_check {
step_counter.register()
};
if self.eth.wait_eth_sync {
step_counter.register()
};
if self.eth.wait_tx_include {
step_counter.register()
};
if self.eth.wait_eth_sync {
utils::with_progress(
"Waiting while Ethereum node is syncing...",
step_counter.format_next_step().as_str(),
"Ethereum node synced.",
|| wait_sync(web3),
)?;
};
let is_registered = if!self.no_status_check {
utils::with_progress(
"Check if node in smart contract is already registered...",
step_counter.format_next_step().as_str(),
"Smart contract checked.",
|| check_node_registered_fn(),
)?
} else | ;
if is_registered {
Ok(Registered::AlreadyRegistered)
} else {
let tx = utils::with_progress(
"Registering the node in the smart contract...",
step_counter.format_next_step().as_str(),
"Transaction with node registration was sent.",
publish_to_contract_fn,
)?;
if self.eth.wait_tx_include {
utils::print_tx_hash(tx);
utils::with_progress(
"Waiting for the transaction to be included in a block...",
step_counter.format_next_step().as_str(),
"Transaction was included.",
|| {
wait_tx_included(&tx, web3)?;
wait_event_fn(&tx)
},
)
} else {
Ok(Registered::TransactionSent(tx))
}
}
} else {
if self.eth.wait_eth_sync {
wait_sync(web3)?;
}
let is_registered = if!self.no_status_check {
check_node_registered_fn()?
} else {
false
};
if is_registered {
Ok(Registered::AlreadyRegistered)
} else {
let tx = publish_to_contract_fn()?;
if self.eth.wait_tx_include {
wait_event_fn(&tx)
} else {
Ok(Registered::TransactionSent(tx))
}
}
}
}
}
pub fn parse(args: &ArgMatches, config: SetupConfig) -> Result<Register, Error> {
let tendermint_key: H256 = parse_tendermint_key(args)?;
let tendermint_node_id: H160 = parse_tendermint_node_id(args)?;
let api_port = value_t!(args, API_PORT, u16)?;
let capacity = value_t!(args, CAPACITY, u16)?;
let private: bool = args.is_present(PRIVATE);
let no_status_check: bool = args.is_present(NO_STATUS_CHECK);
let eth = parse_ethereum_args(args, config)?;
let node_address = parse_node_ip(&args)?;
Register::new(
node_address,
tendermint_key,
tendermint_node_id,
api_port,
capacity,
private,
no_status_check,
eth,
)
}
/// Parses arguments from console and initialize parameters for Publisher
pub fn subcommand<'a, 'b>() -> App<'a, 'b> {
let args = &[
node_ip().display_order(1),
tendermint_key().display_order(2),
base64_tendermint_key().display_order(3),
tendermint_node_id().display_order(4),
Arg::with_name(API_PORT)
.alias(API_PORT)
.long(API_PORT)
.default_value("20096")
.takes_value(true)
.help("Node API port")
.display_order(5),
Arg::with_name(CAPACITY)
.alias(CAPACITY)
.default_value("20196")
.long(CAPACITY)
.takes_value(true)
.help("Maximum number of apps to be run on the node")
.display_order(5),
Arg::with_name(PRIVATE)
.long(PRIVATE)
.short("p")
.takes_value(false)
.help("Marks node as private, used for pinning apps to nodes")
.display_order(6),
Arg::with_name(NO_STATUS_CHECK)
.long(NO_STATUS_CHECK)
.short("N")
.takes_value(false)
.help("Disable checking if a node is already registered. Registration can be faster but will spend some gas if the node is registered.")
.display_order(7),
];
SubCommand::with_name("register")
.about("Register a node in the smart contract")
.args(with_ethereum_args(args).as_slice())
.setting(AppSettings::ArgRequiredElseHelp)
}
#[cfg(test)]
pub mod tests {
use failure::Error;
use ethkey::Secret;
use rand::prelude::*;
use web3::types::*;
use crate::command::EthereumArgs;
use crate::config::SetupConfig;
use crate::credentials::Credentials;
use crate::ethereum_params::EthereumParams;
use super::Register;
pub fn generate_eth_args(credentials: Credentials) -> EthereumArgs {
let account: Address = "4180fc65d613ba7e1a385181a219f1dbfe7bf11d".parse().unwrap();
EthereumArgs::with_acc_creds(account, credentials)
}
pub fn generate_register(credentials: Credentials) -> Register {
let mut rng = rand::thread_rng();
let rnd_num: u64 = rng.gen();
let tendermint_key: H256 = H256::from(rnd_num);
let tendermint_node_id: H160 = H160::from(rnd_num);
let eth = generate_eth_args(credentials);
let config = SetupConfig::default().unwrap();
let eth_params = EthereumParams::generate(eth, config).unwrap();
Register::new(
"127.0.0.1".parse().unwrap(),
tendermint_key,
tendermint_node_id,
25006,
25100,
false,
false,
eth_params,
)
.unwrap()
}
pub fn generate_with<F>(func: F, credentials: Credentials) -> Register
where
F: FnOnce(&mut Register),
{
let mut register = generate_register(credentials);
func(&mut register);
register
}
pub fn generate_with_account(account: Address, credentials: Credentials) -> Register {
generate_with(|r| r.eth.account = account, credentials)
}
#[test]
fn register_success() -> Result<(), Error> {
let register = generate_with_account(
"fa0de43c68bea2167181cd8a83f990d02a049336".parse()?,
Credentials::No,
);
register.register(false)?;
Ok(())
}
#[test]
fn register_out_of_gas() -> Result<(), Error> {
let register = generate_with(|r| r.eth.gas = 1, Credentials::No);
let result = register.register(false);
assert_eq!(result.is_err(), true);
Ok(())
}
#[test]
fn register_success_with_secret() -> Result<(), Error> {
let secret_arr: H256 =
"a349fe22d5c6f8ad3a1ad91ddb65e8946435b52254ce8c330f7ed796e83bfd92".parse()?;
let secret = Secret::from(secret_arr);
let register = generate_with_account(
"dce48d51717ad5eb87fb56ff55ec609cf37b9aad".parse()?,
Credentials::Secret(secret),
);
register.register(false)?;
Ok(())
}
}
| {
false
} | conditional_block |
register.rs | /*
* Copyright 2018 Fluence Labs Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::net::IpAddr;
use failure::{Error, SyncFailure};
use clap::{value_t, App, AppSettings, Arg, ArgMatches, SubCommand};
use derive_getters::Getters;
use hex;
use web3::types::H256;
use crate::command::*;
use crate::config::SetupConfig;
use crate::contract_func::contract::events::app_deployed::parse_log as parse_deployed;
use crate::contract_func::contract::functions::add_node;
use crate::contract_func::{call_contract, get_transaction_logs, wait_sync, wait_tx_included};
use crate::contract_status::{find_by_tendermint_key, status};
use crate::ethereum_params::EthereumParams;
use crate::step_counter::StepCounter;
use crate::types::{NodeAddress, IP_LEN, TENDERMINT_NODE_ID_LEN};
use crate::utils;
use web3::transports::Http;
use web3::types::H160;
const API_PORT: &str = "api_port";
const CAPACITY: &str = "capacity";
const PRIVATE: &str = "private";
const NO_STATUS_CHECK: &str = "no_status_check";
#[derive(Debug, Getters)]
pub struct Register {
node_ip: IpAddr,
tendermint_key: H256,
tendermint_node_id: H160,
api_port: u16,
capacity: u16,
private: bool,
no_status_check: bool,
eth: EthereumParams,
}
#[derive(PartialEq, Debug)]
pub enum Registered {
TransactionSent(H256),
Deployed {
app_ids: Vec<u64>,
ports: Vec<u16>,
tx: H256,
},
Enqueued(H256),
AlreadyRegistered,
}
impl Register {
/// Creates `Register` structure
pub fn new(
node_address: IpAddr,
tendermint_key: H256,
tendermint_node_id: H160,
api_port: u16,
capacity: u16,
private: bool,
no_status_check: bool,
eth: EthereumParams,
) -> Result<Register, Error> {
Ok(Register {
node_ip: node_address,
tendermint_key,
tendermint_node_id,
api_port,
capacity,
private,
no_status_check,
eth,
})
}
/// Serializes a node IP address and a tendermint key into the hash of node's key address
fn serialize_node_address(&self) -> Result<NodeAddress, Error> {
// serialize tendermint key
let key_str = format!("{:?}", self.tendermint_node_id);
let key_str = key_str.as_str().trim_start_matches("0x");
let key_bytes = hex::decode(key_str.to_owned())?;
let mut key_bytes = key_bytes.as_slice()[0..TENDERMINT_NODE_ID_LEN].to_vec();
// serialize IP address
let ip_str = self.node_ip.to_string();
let split = ip_str.split('.');
let mut addr_bytes: [u8; IP_LEN] = [0; IP_LEN];
for (i, part) in split.enumerate() {
addr_bytes[i] = part.parse()?;
}
let mut addr_vec = addr_bytes.to_vec();
// concatenate tendermint key and IP address
key_bytes.append(&mut addr_vec);
let serialized = hex::encode(key_bytes);
let hash_addr: NodeAddress = serialized.parse()?;
Ok(hash_addr)
}
/// Registers a node in Fluence smart contract
pub fn register(&self, show_progress: bool) -> Result<Registered, Error> {
let (_eloop, transport) = Http::new(self.eth.eth_url.as_str()).map_err(SyncFailure::new)?;
let web3 = &web3::Web3::new(transport);
let publish_to_contract_fn = || -> Result<H256, Error> {
let hash_addr: NodeAddress = self.serialize_node_address()?;
let (call_data, _) = add_node::call(
self.tendermint_key,
hash_addr,
u64::from(self.api_port),
u64::from(self.capacity),
self.private,
);
call_contract(web3, &self.eth, call_data, None)
};
let check_node_registered_fn = || -> Result<bool, Error> {
let contract_status = status::get_status(web3, self.eth.contract_address)?;
Ok(find_by_tendermint_key(&contract_status, self.tendermint_key).is_some())
};
let wait_event_fn = |tx: &H256| -> Result<Registered, Error> {
let logs = get_transaction_logs(self.eth.eth_url.as_str(), tx, parse_deployed)?;
let status = if logs.is_empty() {
Registered::Enqueued(*tx)
} else {
let (app_ids, ports) = logs
.iter()
.filter_map(|e| {
let idx = e.node_i_ds.iter().position(|id| *id == self.tendermint_key);
idx.and_then(|i| e.ports.get(i)).map(|port| {
let port = Into::<u64>::into(*port) as u16;
let app_id = Into::<u64>::into(e.app_id);
(app_id, port)
})
})
.unzip();
Registered::Deployed {
app_ids,
ports,
tx: *tx,
}
};
Ok(status)
};
// sending transaction with the hash of file with code to ethereum
if show_progress {
let mut step_counter = StepCounter::new(1);
if!self.no_status_check {
step_counter.register()
};
if self.eth.wait_eth_sync {
step_counter.register()
};
if self.eth.wait_tx_include {
step_counter.register()
};
if self.eth.wait_eth_sync {
utils::with_progress(
"Waiting while Ethereum node is syncing...",
step_counter.format_next_step().as_str(),
"Ethereum node synced.",
|| wait_sync(web3),
)?;
};
let is_registered = if!self.no_status_check {
utils::with_progress(
"Check if node in smart contract is already registered...",
step_counter.format_next_step().as_str(),
"Smart contract checked.",
|| check_node_registered_fn(),
)?
} else {
false
};
if is_registered {
Ok(Registered::AlreadyRegistered)
} else {
let tx = utils::with_progress(
"Registering the node in the smart contract...",
step_counter.format_next_step().as_str(),
"Transaction with node registration was sent.",
publish_to_contract_fn,
)?;
if self.eth.wait_tx_include {
utils::print_tx_hash(tx);
utils::with_progress(
"Waiting for the transaction to be included in a block...",
step_counter.format_next_step().as_str(),
"Transaction was included.",
|| {
wait_tx_included(&tx, web3)?;
wait_event_fn(&tx)
},
)
} else {
Ok(Registered::TransactionSent(tx))
}
}
} else {
if self.eth.wait_eth_sync {
wait_sync(web3)?;
}
let is_registered = if!self.no_status_check {
check_node_registered_fn()?
} else {
false
};
if is_registered {
Ok(Registered::AlreadyRegistered)
} else {
let tx = publish_to_contract_fn()?;
if self.eth.wait_tx_include {
wait_event_fn(&tx)
} else {
Ok(Registered::TransactionSent(tx))
}
}
}
}
}
pub fn parse(args: &ArgMatches, config: SetupConfig) -> Result<Register, Error> {
let tendermint_key: H256 = parse_tendermint_key(args)?;
let tendermint_node_id: H160 = parse_tendermint_node_id(args)?;
let api_port = value_t!(args, API_PORT, u16)?;
let capacity = value_t!(args, CAPACITY, u16)?;
let private: bool = args.is_present(PRIVATE);
let no_status_check: bool = args.is_present(NO_STATUS_CHECK);
let eth = parse_ethereum_args(args, config)?;
let node_address = parse_node_ip(&args)?;
Register::new(
node_address,
tendermint_key,
tendermint_node_id,
api_port,
capacity,
private,
no_status_check,
eth,
)
}
/// Parses arguments from console and initialize parameters for Publisher
pub fn subcommand<'a, 'b>() -> App<'a, 'b> {
let args = &[
node_ip().display_order(1),
tendermint_key().display_order(2),
base64_tendermint_key().display_order(3),
tendermint_node_id().display_order(4),
Arg::with_name(API_PORT)
.alias(API_PORT)
.long(API_PORT)
.default_value("20096")
.takes_value(true)
.help("Node API port")
.display_order(5),
Arg::with_name(CAPACITY)
.alias(CAPACITY)
.default_value("20196")
.long(CAPACITY)
.takes_value(true)
.help("Maximum number of apps to be run on the node")
.display_order(5),
Arg::with_name(PRIVATE)
.long(PRIVATE)
.short("p")
.takes_value(false)
.help("Marks node as private, used for pinning apps to nodes")
.display_order(6),
Arg::with_name(NO_STATUS_CHECK)
.long(NO_STATUS_CHECK)
.short("N")
.takes_value(false)
.help("Disable checking if a node is already registered. Registration can be faster but will spend some gas if the node is registered.")
.display_order(7),
];
SubCommand::with_name("register")
.about("Register a node in the smart contract")
.args(with_ethereum_args(args).as_slice())
.setting(AppSettings::ArgRequiredElseHelp)
}
#[cfg(test)]
pub mod tests {
use failure::Error;
use ethkey::Secret;
use rand::prelude::*;
use web3::types::*;
use crate::command::EthereumArgs;
use crate::config::SetupConfig;
use crate::credentials::Credentials;
use crate::ethereum_params::EthereumParams;
use super::Register;
pub fn generate_eth_args(credentials: Credentials) -> EthereumArgs |
pub fn generate_register(credentials: Credentials) -> Register {
let mut rng = rand::thread_rng();
let rnd_num: u64 = rng.gen();
let tendermint_key: H256 = H256::from(rnd_num);
let tendermint_node_id: H160 = H160::from(rnd_num);
let eth = generate_eth_args(credentials);
let config = SetupConfig::default().unwrap();
let eth_params = EthereumParams::generate(eth, config).unwrap();
Register::new(
"127.0.0.1".parse().unwrap(),
tendermint_key,
tendermint_node_id,
25006,
25100,
false,
false,
eth_params,
)
.unwrap()
}
pub fn generate_with<F>(func: F, credentials: Credentials) -> Register
where
F: FnOnce(&mut Register),
{
let mut register = generate_register(credentials);
func(&mut register);
register
}
pub fn generate_with_account(account: Address, credentials: Credentials) -> Register {
generate_with(|r| r.eth.account = account, credentials)
}
#[test]
fn register_success() -> Result<(), Error> {
let register = generate_with_account(
"fa0de43c68bea2167181cd8a83f990d02a049336".parse()?,
Credentials::No,
);
register.register(false)?;
Ok(())
}
#[test]
fn register_out_of_gas() -> Result<(), Error> {
let register = generate_with(|r| r.eth.gas = 1, Credentials::No);
let result = register.register(false);
assert_eq!(result.is_err(), true);
Ok(())
}
#[test]
fn register_success_with_secret() -> Result<(), Error> {
let secret_arr: H256 =
"a349fe22d5c6f8ad3a1ad91ddb65e8946435b52254ce8c330f7ed796e83bfd92".parse()?;
let secret = Secret::from(secret_arr);
let register = generate_with_account(
"dce48d51717ad5eb87fb56ff55ec609cf37b9aad".parse()?,
Credentials::Secret(secret),
);
register.register(false)?;
Ok(())
}
}
| {
let account: Address = "4180fc65d613ba7e1a385181a219f1dbfe7bf11d".parse().unwrap();
EthereumArgs::with_acc_creds(account, credentials)
} | identifier_body |
register.rs | /*
* Copyright 2018 Fluence Labs Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::net::IpAddr;
use failure::{Error, SyncFailure};
use clap::{value_t, App, AppSettings, Arg, ArgMatches, SubCommand};
use derive_getters::Getters;
use hex;
use web3::types::H256;
use crate::command::*;
use crate::config::SetupConfig;
use crate::contract_func::contract::events::app_deployed::parse_log as parse_deployed;
use crate::contract_func::contract::functions::add_node;
use crate::contract_func::{call_contract, get_transaction_logs, wait_sync, wait_tx_included};
use crate::contract_status::{find_by_tendermint_key, status};
use crate::ethereum_params::EthereumParams;
use crate::step_counter::StepCounter;
use crate::types::{NodeAddress, IP_LEN, TENDERMINT_NODE_ID_LEN};
use crate::utils;
use web3::transports::Http; | const NO_STATUS_CHECK: &str = "no_status_check";
#[derive(Debug, Getters)]
pub struct Register {
node_ip: IpAddr,
tendermint_key: H256,
tendermint_node_id: H160,
api_port: u16,
capacity: u16,
private: bool,
no_status_check: bool,
eth: EthereumParams,
}
#[derive(PartialEq, Debug)]
pub enum Registered {
TransactionSent(H256),
Deployed {
app_ids: Vec<u64>,
ports: Vec<u16>,
tx: H256,
},
Enqueued(H256),
AlreadyRegistered,
}
impl Register {
/// Creates `Register` structure
pub fn new(
node_address: IpAddr,
tendermint_key: H256,
tendermint_node_id: H160,
api_port: u16,
capacity: u16,
private: bool,
no_status_check: bool,
eth: EthereumParams,
) -> Result<Register, Error> {
Ok(Register {
node_ip: node_address,
tendermint_key,
tendermint_node_id,
api_port,
capacity,
private,
no_status_check,
eth,
})
}
/// Serializes a node IP address and a tendermint key into the hash of node's key address
fn serialize_node_address(&self) -> Result<NodeAddress, Error> {
// serialize tendermint key
let key_str = format!("{:?}", self.tendermint_node_id);
let key_str = key_str.as_str().trim_start_matches("0x");
let key_bytes = hex::decode(key_str.to_owned())?;
let mut key_bytes = key_bytes.as_slice()[0..TENDERMINT_NODE_ID_LEN].to_vec();
// serialize IP address
let ip_str = self.node_ip.to_string();
let split = ip_str.split('.');
let mut addr_bytes: [u8; IP_LEN] = [0; IP_LEN];
for (i, part) in split.enumerate() {
addr_bytes[i] = part.parse()?;
}
let mut addr_vec = addr_bytes.to_vec();
// concatenate tendermint key and IP address
key_bytes.append(&mut addr_vec);
let serialized = hex::encode(key_bytes);
let hash_addr: NodeAddress = serialized.parse()?;
Ok(hash_addr)
}
/// Registers a node in Fluence smart contract
pub fn register(&self, show_progress: bool) -> Result<Registered, Error> {
let (_eloop, transport) = Http::new(self.eth.eth_url.as_str()).map_err(SyncFailure::new)?;
let web3 = &web3::Web3::new(transport);
let publish_to_contract_fn = || -> Result<H256, Error> {
let hash_addr: NodeAddress = self.serialize_node_address()?;
let (call_data, _) = add_node::call(
self.tendermint_key,
hash_addr,
u64::from(self.api_port),
u64::from(self.capacity),
self.private,
);
call_contract(web3, &self.eth, call_data, None)
};
let check_node_registered_fn = || -> Result<bool, Error> {
let contract_status = status::get_status(web3, self.eth.contract_address)?;
Ok(find_by_tendermint_key(&contract_status, self.tendermint_key).is_some())
};
let wait_event_fn = |tx: &H256| -> Result<Registered, Error> {
let logs = get_transaction_logs(self.eth.eth_url.as_str(), tx, parse_deployed)?;
let status = if logs.is_empty() {
Registered::Enqueued(*tx)
} else {
let (app_ids, ports) = logs
.iter()
.filter_map(|e| {
let idx = e.node_i_ds.iter().position(|id| *id == self.tendermint_key);
idx.and_then(|i| e.ports.get(i)).map(|port| {
let port = Into::<u64>::into(*port) as u16;
let app_id = Into::<u64>::into(e.app_id);
(app_id, port)
})
})
.unzip();
Registered::Deployed {
app_ids,
ports,
tx: *tx,
}
};
Ok(status)
};
// sending transaction with the hash of file with code to ethereum
if show_progress {
let mut step_counter = StepCounter::new(1);
if!self.no_status_check {
step_counter.register()
};
if self.eth.wait_eth_sync {
step_counter.register()
};
if self.eth.wait_tx_include {
step_counter.register()
};
if self.eth.wait_eth_sync {
utils::with_progress(
"Waiting while Ethereum node is syncing...",
step_counter.format_next_step().as_str(),
"Ethereum node synced.",
|| wait_sync(web3),
)?;
};
let is_registered = if!self.no_status_check {
utils::with_progress(
"Check if node in smart contract is already registered...",
step_counter.format_next_step().as_str(),
"Smart contract checked.",
|| check_node_registered_fn(),
)?
} else {
false
};
if is_registered {
Ok(Registered::AlreadyRegistered)
} else {
let tx = utils::with_progress(
"Registering the node in the smart contract...",
step_counter.format_next_step().as_str(),
"Transaction with node registration was sent.",
publish_to_contract_fn,
)?;
if self.eth.wait_tx_include {
utils::print_tx_hash(tx);
utils::with_progress(
"Waiting for the transaction to be included in a block...",
step_counter.format_next_step().as_str(),
"Transaction was included.",
|| {
wait_tx_included(&tx, web3)?;
wait_event_fn(&tx)
},
)
} else {
Ok(Registered::TransactionSent(tx))
}
}
} else {
if self.eth.wait_eth_sync {
wait_sync(web3)?;
}
let is_registered = if!self.no_status_check {
check_node_registered_fn()?
} else {
false
};
if is_registered {
Ok(Registered::AlreadyRegistered)
} else {
let tx = publish_to_contract_fn()?;
if self.eth.wait_tx_include {
wait_event_fn(&tx)
} else {
Ok(Registered::TransactionSent(tx))
}
}
}
}
}
pub fn parse(args: &ArgMatches, config: SetupConfig) -> Result<Register, Error> {
let tendermint_key: H256 = parse_tendermint_key(args)?;
let tendermint_node_id: H160 = parse_tendermint_node_id(args)?;
let api_port = value_t!(args, API_PORT, u16)?;
let capacity = value_t!(args, CAPACITY, u16)?;
let private: bool = args.is_present(PRIVATE);
let no_status_check: bool = args.is_present(NO_STATUS_CHECK);
let eth = parse_ethereum_args(args, config)?;
let node_address = parse_node_ip(&args)?;
Register::new(
node_address,
tendermint_key,
tendermint_node_id,
api_port,
capacity,
private,
no_status_check,
eth,
)
}
/// Parses arguments from console and initialize parameters for Publisher
pub fn subcommand<'a, 'b>() -> App<'a, 'b> {
let args = &[
node_ip().display_order(1),
tendermint_key().display_order(2),
base64_tendermint_key().display_order(3),
tendermint_node_id().display_order(4),
Arg::with_name(API_PORT)
.alias(API_PORT)
.long(API_PORT)
.default_value("20096")
.takes_value(true)
.help("Node API port")
.display_order(5),
Arg::with_name(CAPACITY)
.alias(CAPACITY)
.default_value("20196")
.long(CAPACITY)
.takes_value(true)
.help("Maximum number of apps to be run on the node")
.display_order(5),
Arg::with_name(PRIVATE)
.long(PRIVATE)
.short("p")
.takes_value(false)
.help("Marks node as private, used for pinning apps to nodes")
.display_order(6),
Arg::with_name(NO_STATUS_CHECK)
.long(NO_STATUS_CHECK)
.short("N")
.takes_value(false)
.help("Disable checking if a node is already registered. Registration can be faster but will spend some gas if the node is registered.")
.display_order(7),
];
SubCommand::with_name("register")
.about("Register a node in the smart contract")
.args(with_ethereum_args(args).as_slice())
.setting(AppSettings::ArgRequiredElseHelp)
}
#[cfg(test)]
pub mod tests {
use failure::Error;
use ethkey::Secret;
use rand::prelude::*;
use web3::types::*;
use crate::command::EthereumArgs;
use crate::config::SetupConfig;
use crate::credentials::Credentials;
use crate::ethereum_params::EthereumParams;
use super::Register;
pub fn generate_eth_args(credentials: Credentials) -> EthereumArgs {
let account: Address = "4180fc65d613ba7e1a385181a219f1dbfe7bf11d".parse().unwrap();
EthereumArgs::with_acc_creds(account, credentials)
}
pub fn generate_register(credentials: Credentials) -> Register {
let mut rng = rand::thread_rng();
let rnd_num: u64 = rng.gen();
let tendermint_key: H256 = H256::from(rnd_num);
let tendermint_node_id: H160 = H160::from(rnd_num);
let eth = generate_eth_args(credentials);
let config = SetupConfig::default().unwrap();
let eth_params = EthereumParams::generate(eth, config).unwrap();
Register::new(
"127.0.0.1".parse().unwrap(),
tendermint_key,
tendermint_node_id,
25006,
25100,
false,
false,
eth_params,
)
.unwrap()
}
pub fn generate_with<F>(func: F, credentials: Credentials) -> Register
where
F: FnOnce(&mut Register),
{
let mut register = generate_register(credentials);
func(&mut register);
register
}
pub fn generate_with_account(account: Address, credentials: Credentials) -> Register {
generate_with(|r| r.eth.account = account, credentials)
}
#[test]
fn register_success() -> Result<(), Error> {
let register = generate_with_account(
"fa0de43c68bea2167181cd8a83f990d02a049336".parse()?,
Credentials::No,
);
register.register(false)?;
Ok(())
}
#[test]
fn register_out_of_gas() -> Result<(), Error> {
let register = generate_with(|r| r.eth.gas = 1, Credentials::No);
let result = register.register(false);
assert_eq!(result.is_err(), true);
Ok(())
}
#[test]
fn register_success_with_secret() -> Result<(), Error> {
let secret_arr: H256 =
"a349fe22d5c6f8ad3a1ad91ddb65e8946435b52254ce8c330f7ed796e83bfd92".parse()?;
let secret = Secret::from(secret_arr);
let register = generate_with_account(
"dce48d51717ad5eb87fb56ff55ec609cf37b9aad".parse()?,
Credentials::Secret(secret),
);
register.register(false)?;
Ok(())
}
} | use web3::types::H160;
const API_PORT: &str = "api_port";
const CAPACITY: &str = "capacity";
const PRIVATE: &str = "private"; | random_line_split |
register.rs | /*
* Copyright 2018 Fluence Labs Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::net::IpAddr;
use failure::{Error, SyncFailure};
use clap::{value_t, App, AppSettings, Arg, ArgMatches, SubCommand};
use derive_getters::Getters;
use hex;
use web3::types::H256;
use crate::command::*;
use crate::config::SetupConfig;
use crate::contract_func::contract::events::app_deployed::parse_log as parse_deployed;
use crate::contract_func::contract::functions::add_node;
use crate::contract_func::{call_contract, get_transaction_logs, wait_sync, wait_tx_included};
use crate::contract_status::{find_by_tendermint_key, status};
use crate::ethereum_params::EthereumParams;
use crate::step_counter::StepCounter;
use crate::types::{NodeAddress, IP_LEN, TENDERMINT_NODE_ID_LEN};
use crate::utils;
use web3::transports::Http;
use web3::types::H160;
const API_PORT: &str = "api_port";
const CAPACITY: &str = "capacity";
const PRIVATE: &str = "private";
const NO_STATUS_CHECK: &str = "no_status_check";
#[derive(Debug, Getters)]
pub struct Register {
node_ip: IpAddr,
tendermint_key: H256,
tendermint_node_id: H160,
api_port: u16,
capacity: u16,
private: bool,
no_status_check: bool,
eth: EthereumParams,
}
#[derive(PartialEq, Debug)]
pub enum Registered {
TransactionSent(H256),
Deployed {
app_ids: Vec<u64>,
ports: Vec<u16>,
tx: H256,
},
Enqueued(H256),
AlreadyRegistered,
}
impl Register {
/// Creates `Register` structure
pub fn new(
node_address: IpAddr,
tendermint_key: H256,
tendermint_node_id: H160,
api_port: u16,
capacity: u16,
private: bool,
no_status_check: bool,
eth: EthereumParams,
) -> Result<Register, Error> {
Ok(Register {
node_ip: node_address,
tendermint_key,
tendermint_node_id,
api_port,
capacity,
private,
no_status_check,
eth,
})
}
/// Serializes a node IP address and a tendermint key into the hash of node's key address
fn serialize_node_address(&self) -> Result<NodeAddress, Error> {
// serialize tendermint key
let key_str = format!("{:?}", self.tendermint_node_id);
let key_str = key_str.as_str().trim_start_matches("0x");
let key_bytes = hex::decode(key_str.to_owned())?;
let mut key_bytes = key_bytes.as_slice()[0..TENDERMINT_NODE_ID_LEN].to_vec();
// serialize IP address
let ip_str = self.node_ip.to_string();
let split = ip_str.split('.');
let mut addr_bytes: [u8; IP_LEN] = [0; IP_LEN];
for (i, part) in split.enumerate() {
addr_bytes[i] = part.parse()?;
}
let mut addr_vec = addr_bytes.to_vec();
// concatenate tendermint key and IP address
key_bytes.append(&mut addr_vec);
let serialized = hex::encode(key_bytes);
let hash_addr: NodeAddress = serialized.parse()?;
Ok(hash_addr)
}
/// Registers a node in Fluence smart contract
pub fn register(&self, show_progress: bool) -> Result<Registered, Error> {
let (_eloop, transport) = Http::new(self.eth.eth_url.as_str()).map_err(SyncFailure::new)?;
let web3 = &web3::Web3::new(transport);
let publish_to_contract_fn = || -> Result<H256, Error> {
let hash_addr: NodeAddress = self.serialize_node_address()?;
let (call_data, _) = add_node::call(
self.tendermint_key,
hash_addr,
u64::from(self.api_port),
u64::from(self.capacity),
self.private,
);
call_contract(web3, &self.eth, call_data, None)
};
let check_node_registered_fn = || -> Result<bool, Error> {
let contract_status = status::get_status(web3, self.eth.contract_address)?;
Ok(find_by_tendermint_key(&contract_status, self.tendermint_key).is_some())
};
let wait_event_fn = |tx: &H256| -> Result<Registered, Error> {
let logs = get_transaction_logs(self.eth.eth_url.as_str(), tx, parse_deployed)?;
let status = if logs.is_empty() {
Registered::Enqueued(*tx)
} else {
let (app_ids, ports) = logs
.iter()
.filter_map(|e| {
let idx = e.node_i_ds.iter().position(|id| *id == self.tendermint_key);
idx.and_then(|i| e.ports.get(i)).map(|port| {
let port = Into::<u64>::into(*port) as u16;
let app_id = Into::<u64>::into(e.app_id);
(app_id, port)
})
})
.unzip();
Registered::Deployed {
app_ids,
ports,
tx: *tx,
}
};
Ok(status)
};
// sending transaction with the hash of file with code to ethereum
if show_progress {
let mut step_counter = StepCounter::new(1);
if!self.no_status_check {
step_counter.register()
};
if self.eth.wait_eth_sync {
step_counter.register()
};
if self.eth.wait_tx_include {
step_counter.register()
};
if self.eth.wait_eth_sync {
utils::with_progress(
"Waiting while Ethereum node is syncing...",
step_counter.format_next_step().as_str(),
"Ethereum node synced.",
|| wait_sync(web3),
)?;
};
let is_registered = if!self.no_status_check {
utils::with_progress(
"Check if node in smart contract is already registered...",
step_counter.format_next_step().as_str(),
"Smart contract checked.",
|| check_node_registered_fn(),
)?
} else {
false
};
if is_registered {
Ok(Registered::AlreadyRegistered)
} else {
let tx = utils::with_progress(
"Registering the node in the smart contract...",
step_counter.format_next_step().as_str(),
"Transaction with node registration was sent.",
publish_to_contract_fn,
)?;
if self.eth.wait_tx_include {
utils::print_tx_hash(tx);
utils::with_progress(
"Waiting for the transaction to be included in a block...",
step_counter.format_next_step().as_str(),
"Transaction was included.",
|| {
wait_tx_included(&tx, web3)?;
wait_event_fn(&tx)
},
)
} else {
Ok(Registered::TransactionSent(tx))
}
}
} else {
if self.eth.wait_eth_sync {
wait_sync(web3)?;
}
let is_registered = if!self.no_status_check {
check_node_registered_fn()?
} else {
false
};
if is_registered {
Ok(Registered::AlreadyRegistered)
} else {
let tx = publish_to_contract_fn()?;
if self.eth.wait_tx_include {
wait_event_fn(&tx)
} else {
Ok(Registered::TransactionSent(tx))
}
}
}
}
}
pub fn parse(args: &ArgMatches, config: SetupConfig) -> Result<Register, Error> {
let tendermint_key: H256 = parse_tendermint_key(args)?;
let tendermint_node_id: H160 = parse_tendermint_node_id(args)?;
let api_port = value_t!(args, API_PORT, u16)?;
let capacity = value_t!(args, CAPACITY, u16)?;
let private: bool = args.is_present(PRIVATE);
let no_status_check: bool = args.is_present(NO_STATUS_CHECK);
let eth = parse_ethereum_args(args, config)?;
let node_address = parse_node_ip(&args)?;
Register::new(
node_address,
tendermint_key,
tendermint_node_id,
api_port,
capacity,
private,
no_status_check,
eth,
)
}
/// Parses arguments from console and initialize parameters for Publisher
pub fn | <'a, 'b>() -> App<'a, 'b> {
let args = &[
node_ip().display_order(1),
tendermint_key().display_order(2),
base64_tendermint_key().display_order(3),
tendermint_node_id().display_order(4),
Arg::with_name(API_PORT)
.alias(API_PORT)
.long(API_PORT)
.default_value("20096")
.takes_value(true)
.help("Node API port")
.display_order(5),
Arg::with_name(CAPACITY)
.alias(CAPACITY)
.default_value("20196")
.long(CAPACITY)
.takes_value(true)
.help("Maximum number of apps to be run on the node")
.display_order(5),
Arg::with_name(PRIVATE)
.long(PRIVATE)
.short("p")
.takes_value(false)
.help("Marks node as private, used for pinning apps to nodes")
.display_order(6),
Arg::with_name(NO_STATUS_CHECK)
.long(NO_STATUS_CHECK)
.short("N")
.takes_value(false)
.help("Disable checking if a node is already registered. Registration can be faster but will spend some gas if the node is registered.")
.display_order(7),
];
SubCommand::with_name("register")
.about("Register a node in the smart contract")
.args(with_ethereum_args(args).as_slice())
.setting(AppSettings::ArgRequiredElseHelp)
}
#[cfg(test)]
pub mod tests {
use failure::Error;
use ethkey::Secret;
use rand::prelude::*;
use web3::types::*;
use crate::command::EthereumArgs;
use crate::config::SetupConfig;
use crate::credentials::Credentials;
use crate::ethereum_params::EthereumParams;
use super::Register;
pub fn generate_eth_args(credentials: Credentials) -> EthereumArgs {
let account: Address = "4180fc65d613ba7e1a385181a219f1dbfe7bf11d".parse().unwrap();
EthereumArgs::with_acc_creds(account, credentials)
}
pub fn generate_register(credentials: Credentials) -> Register {
let mut rng = rand::thread_rng();
let rnd_num: u64 = rng.gen();
let tendermint_key: H256 = H256::from(rnd_num);
let tendermint_node_id: H160 = H160::from(rnd_num);
let eth = generate_eth_args(credentials);
let config = SetupConfig::default().unwrap();
let eth_params = EthereumParams::generate(eth, config).unwrap();
Register::new(
"127.0.0.1".parse().unwrap(),
tendermint_key,
tendermint_node_id,
25006,
25100,
false,
false,
eth_params,
)
.unwrap()
}
pub fn generate_with<F>(func: F, credentials: Credentials) -> Register
where
F: FnOnce(&mut Register),
{
let mut register = generate_register(credentials);
func(&mut register);
register
}
pub fn generate_with_account(account: Address, credentials: Credentials) -> Register {
generate_with(|r| r.eth.account = account, credentials)
}
#[test]
fn register_success() -> Result<(), Error> {
let register = generate_with_account(
"fa0de43c68bea2167181cd8a83f990d02a049336".parse()?,
Credentials::No,
);
register.register(false)?;
Ok(())
}
#[test]
fn register_out_of_gas() -> Result<(), Error> {
let register = generate_with(|r| r.eth.gas = 1, Credentials::No);
let result = register.register(false);
assert_eq!(result.is_err(), true);
Ok(())
}
#[test]
fn register_success_with_secret() -> Result<(), Error> {
let secret_arr: H256 =
"a349fe22d5c6f8ad3a1ad91ddb65e8946435b52254ce8c330f7ed796e83bfd92".parse()?;
let secret = Secret::from(secret_arr);
let register = generate_with_account(
"dce48d51717ad5eb87fb56ff55ec609cf37b9aad".parse()?,
Credentials::Secret(secret),
);
register.register(false)?;
Ok(())
}
}
| subcommand | identifier_name |
lib.rs | crate are as follows:
* [`linkage`](fn.linkage.html) performs hierarchical clustering on a pairwise
dissimilarity matrix.
* [`Method`](enum.Method.html) determines the linkage criteria.
* [`Dendrogram`](struct.Dendrogram.html) is a representation of a "stepwise"
dendrogram, which serves as the output of hierarchical clustering.
# Usage
Add this to your `Cargo.toml`:
```text
[dependencies]
kodama = "0.3"
```
and this to your crate root:
```
extern crate kodama;
```
# Example
Showing an example is tricky, because it's hard to motivate the use of
hierarchical clustering on small data sets, and especially hard without
domain specific details that suggest a hierarchical clustering may actually
be useful.
Instead of solving the hard problem of motivating a real use case, let's take
a look at a toy use case: a hierarchical clustering of a small number of
geographic points. We'll measure the distance (by way of the crow) between
these points using latitude/longitude coordinates with the
[Haversine formula](https://en.wikipedia.org/wiki/Haversine_formula).
We'll use a small collection of municipalities from central Massachusetts in
our example. Here's the data:
```text
Index Municipality Latitude Longitude
0 Fitchburg 42.5833333 -71.8027778
1 Framingham 42.2791667 -71.4166667
2 Marlborough 42.3458333 -71.5527778
3 Northbridge 42.1513889 -71.6500000
4 Southborough 42.3055556 -71.5250000
5 Westborough 42.2694444 -71.6166667
```
Each municipality in our data represents a single observation, and we'd like to
create a hierarchical clustering of them using [`linkage`](fn.linkage.html).
The input to `linkage` is a *condensed pairwise dissimilarity matrix*. This
matrix stores the dissimilarity between all pairs of observations. The
"condensed" aspect of it means that it only stores the upper triangle (not
including the diagonal) of the matrix. We can do this because hierarchical
clustering requires that our dissimilarities between observations are
reflexive. That is, the dissimilarity between `A` and `B` is the same as the
dissimilarity between `B` and `A`. This is certainly true in our case with the
Haversine formula.
So let's compute all of the pairwise dissimilarities and create our condensed
pairwise matrix:
```
// See: https://en.wikipedia.org/wiki/Haversine_formula
fn haversine((lat1, lon1): (f64, f64), (lat2, lon2): (f64, f64)) -> f64 {
const EARTH_RADIUS: f64 = 3958.756; // miles
let (lat1, lon1) = (lat1.to_radians(), lon1.to_radians());
let (lat2, lon2) = (lat2.to_radians(), lon2.to_radians());
let delta_lat = lat2 - lat1;
let delta_lon = lon2 - lon1;
let x =
(delta_lat / 2.0).sin().powi(2)
+ lat1.cos() * lat2.cos() * (delta_lon / 2.0).sin().powi(2);
2.0 * EARTH_RADIUS * x.sqrt().atan()
}
// From our data set. Each coordinate pair corresponds to a single observation.
let coordinates = vec![
(42.5833333, -71.8027778),
(42.2791667, -71.4166667),
(42.3458333, -71.5527778),
(42.1513889, -71.6500000),
(42.3055556, -71.5250000),
(42.2694444, -71.6166667),
];
// Build our condensed matrix by computing the dissimilarity between all
// possible coordinate pairs.
let mut condensed = vec![];
for row in 0..coordinates.len() - 1 {
for col in row + 1..coordinates.len() {
condensed.push(haversine(coordinates[row], coordinates[col]));
}
}
// The length of a condensed dissimilarity matrix is always equal to
// `N-choose-2`, where `N` is the number of observations.
assert_eq!(condensed.len(), (coordinates.len() * (coordinates.len() - 1)) / 2);
```
Now that we have our condensed dissimilarity matrix, all we need to do is
choose our *linkage criterion*. The linkage criterion refers to the formula
that is used during hierarchical clustering to compute the dissimilarity
between newly formed clusters and all other clusters. This crate provides
several choices, and the choice one makes depends both on the problem you're
trying to solve and your performance requirements. For example, "single"
linkage corresponds to using the minimum dissimilarity between all pairs of
observations between two clusters as the dissimilarity between those two
clusters. It turns out that doing single linkage hierarchical clustering has
a rough isomorphism to computing the minimum spanning tree, which means the
implementation can be quite fast (`O(n^2)`, to be precise). However, other
linkage criteria require more general purpose algorithms with higher constant
factors or even worse time complexity. For example, using median linkage has
worst case `O(n^3)` complexity, although it is often `n^2` in practice.
In this case, we'll choose average linkage (which is `O(n^2)`). With that
decision made, we can finally run linkage:
```
# fn haversine((lat1, lon1): (f64, f64), (lat2, lon2): (f64, f64)) -> f64 {
# const EARTH_RADIUS: f64 = 3958.756; // miles
#
# let (lat1, lon1) = (lat1.to_radians(), lon1.to_radians());
# let (lat2, lon2) = (lat2.to_radians(), lon2.to_radians());
#
# let delta_lat = lat2 - lat1;
# let delta_lon = lon2 - lon1;
# let x =
# (delta_lat / 2.0).sin().powi(2)
# + lat1.cos() * lat2.cos() * (delta_lon / 2.0).sin().powi(2);
# 2.0 * EARTH_RADIUS * x.sqrt().atan()
# }
# let coordinates = vec![
# (42.5833333, -71.8027778),
# (42.2791667, -71.4166667),
# (42.3458333, -71.5527778),
# (42.1513889, -71.6500000),
# (42.3055556, -71.5250000),
# (42.2694444, -71.6166667),
# ];
# let mut condensed = vec![];
# for row in 0..coordinates.len() - 1 {
# for col in row + 1..coordinates.len() {
# condensed.push(haversine(coordinates[row], coordinates[col]));
# }
# }
use kodama::{Method, linkage};
let dend = linkage(&mut condensed, coordinates.len(), Method::Average);
// The dendrogram always has `N - 1` steps, where each step corresponds to a
// newly formed cluster by merging two previous clusters. The last step creates
// a cluster that contains all observations.
assert_eq!(dend.len(), coordinates.len() - 1);
```
The output of `linkage` is a stepwise
[`Dendrogram`](struct.Dendrogram.html).
Each step corresponds to a merge between two previous clusters. Each step is
represented by a 4-tuple: a pair of cluster labels, the dissimilarity between
the two clusters that have been merged and the total number of observations
in the newly formed cluster. Here's what our dendrogram looks like:
```text
cluster1 cluster2 dissimilarity size
2 4 3.1237967760688776 2
5 6 5.757158112027513 3
1 7 8.1392602685723 4
3 8 12.483148228609206 5
0 9 25.589444117482433 6
```
Another way to look at a dendrogram is to visualize it (the following image was
created with matplotlib):

If you're familiar with the central Massachusetts region, then this dendrogram
is probably incredibly boring. But if you're not, then this visualization
immediately tells you which municipalities are closest to each other. For
example, you can tell right away that Fitchburg is quite far from any other
municipality!
# Testing
The testing in this crate is made up of unit tests on internal data structures
and quickcheck properties that check the consistency between the various
clustering algorithms. That is, quickcheck is used to test that, given the
same inputs, the `mst`, `nnchain`, `generic` and `primitive` implementations
all return the same output.
There are some caveats to this testing strategy:
1. Only the `generic` and `primitive` implementations support all linkage
criteria, which means some linkage criteria have worse test coverage.
2. Principally, this testing strategy assumes that at least one of the
implementations is correct.
3. The various implementations do not specify how ties are handled, which
occurs whenever the same dissimilarity value appears two or more times for
distinct pairs of observations. That means there are multiple correct
dendrograms depending on the input. This case is not tested, and instead,
all input matrices are forced to contain distinct dissimilarity values.
4. The output of both Müllner's and SciPy's implementations of hierarchical
clustering has been hand-checked with the output of this crate. It would
be better to test this automatically, but the scaffolding has not been
built.
Obviously, this is not ideal and there is a lot of room for improvement!
*/
#![deny(missing_docs)]
use std::error;
use std::fmt;
use std::io;
use std::result;
use std::str::FromStr;
pub use crate::chain::{nnchain, nnchain_with};
pub use crate::dendrogram::{Dendrogram, Step};
pub use crate::float::Float;
pub use crate::generic::{generic, generic_with};
pub use crate::primitive::{primitive, primitive_with};
pub use crate::spanning::{mst, mst_with};
use crate::active::Active;
use crate::queue::LinkageHeap;
use crate::union::LinkageUnionFind;
mod active;
mod chain;
mod condensed;
mod dendrogram;
mod float;
mod generic;
mod method;
mod primitive;
mod queue;
mod spanning;
#[cfg(test)]
mod test;
mod union;
/// A type alias for `Result<T, Error>`.
pub type Result<T> = result::Result<T, Error>;
/// An error.
#[derive(Clone, Debug)]
pub enum Error {
/// This error occurs when attempting to parse a method string that
/// doesn't correspond to a valid method.
InvalidMethod(String),
}
impl error::Error for Error {}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Error::InvalidMethod(ref name) => {
write!(f, "unrecognized method name: '{}'", name)
}
}
}
}
impl From<Error> for io::Error {
fn from(err: Error) -> io::Error {
io::Error::new(io::ErrorKind::Other, err)
}
}
/// A method for computing the dissimilarities between clusters.
///
/// The method selected dictates how the dissimilarities are computed whenever
/// a new cluster is formed. In particular, when clusters `a` and `b` are
/// merged into a new cluster `ab`, then the pairwise dissimilarity between
/// `ab` and every other cluster is computed using one of the methods variants
/// in this type.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Method {
/// Assigns the minimum dissimilarity between all pairs of observations.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// min(d[ab, x] for ab in AB for x in X)
/// ```
///
/// where `ab` and `x` correspond to all observations in `AB` and `X`,
/// respectively.
Single,
/// Assigns the maximum dissimilarity between all pairs of observations.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// max(d[ab, x] for ab in AB for x in X)
/// ```
///
/// where `ab` and `x` correspond to all observations in `AB` and `X`,
/// respectively.
Complete,
/// Assigns the average dissimilarity between all pairs of observations.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// sum(d[ab, x] for ab in AB for x in X) / (|AB| * |X|)
/// ```
///
/// where `ab` and `x` correspond to all observations in `AB` and `X`,
/// respectively, and `|AB|` and `|X|` correspond to the total number of
/// observations in `AB` and `X`, respectively.
Average,
/// Assigns the weighted dissimilarity between clusters.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// 0.5 * (d(A, X) + d(B, X))
/// ```
///
/// where `A` and `B` correspond to the clusters that merged to create
/// `AB`.
Weighted,
/// Assigns the Ward dissimilarity between clusters.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// let t1 = d(A, X)^2 * (|A| + |X|);
/// let t2 = d(B, X)^2 * (|B| + |X|);
/// let t3 = d(A, B)^2 * |X|;
/// let T = |A| + |B| + |X|;
/// sqrt(t1/T + t2/T + t3/T)
/// ```
///
/// where `A` and `B` correspond to the clusters that merged to create
/// `AB`.
Ward,
/// Assigns the centroid dissimilarity between clusters.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// let t1 = |A| * d(A, X)^2 + |B| * d(B, X)^2);
/// let t2 = |A| * |B| * d(A, B)^2;
/// let size = |A| + |B|;
/// sqrt(t1/size - t2/size^2)
/// ```
///
/// where `A` and `B` correspond to the clusters that merged to create
/// `AB`.
Centroid,
/// Assigns the median dissimilarity between clusters.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// sqrt(d(A, X)^2/2 + d(B, X)^2/2 - d(A, B)^2/4)
/// ```
///
/// where `A` and `B` correspond to the clusters that merged to create
/// `AB`.
Median,
}
impl Method {
/// Convert this linkage method into a nearest neighbor chain method.
///
/// More specifically, if this method is a method that the `nnchain`
/// algorithm can compute, then this returns the corresponding
/// `MethodChain` value. Otherwise, this returns `None`.
pub fn into | f) -> Option<MethodChain> {
match self {
Method::Single => Some(MethodChain::Single),
Method::Complete => Some(MethodChain::Complete),
Method::Average => Some(MethodChain::Average),
Method::Weighted => Some(MethodChain::Weighted),
Method::Ward => Some(MethodChain::Ward),
Method::Centroid | Method::Median => None,
}
}
/// Returns true if and only if the dendrogram should be sorted before
/// generating cluster labels.
fn requires_sorting(&self) -> bool {
match *self {
Method::Centroid | Method::Median => false,
_ => true,
}
}
/// Square the given matrix if and only if this method must compute
/// dissimilarities between clusters on the squares of dissimilarities.
fn square<T: Float>(&self, condensed_matrix: &mut [T]) {
if self.on_squares() {
for x in condensed_matrix.iter_mut() {
*x = *x * *x;
}
}
}
/// Take the square-root of each step-wise dissimilarity in the given
/// dendrogram if this method operates on squares.
fn sqrt<T: Float>(&self, dend: &mut Dendrogram<T>) {
if self.on_squares() {
for step in dend.steps_mut() {
step.dissimilarity = step.dissimilarity.sqrt();
}
}
}
/// Return true if and only if this method computes dissimilarities on
/// squares.
fn on_squares(&self) -> bool {
match *self {
Method::Ward | Method::Centroid | Method::Median => true,
_ => false,
}
}
}
impl FromStr for Method {
type Err = Error;
fn from_str(s: &str) -> Result<Method> {
match s {
"single" => Ok(Method::Single),
"complete" => Ok(Method::Complete),
"average" => Ok(Method::Average),
"weighted" => Ok(Method::Weighted),
"centroid" => Ok(Method::Centroid),
"median" => Ok(Method::Median),
"ward" => Ok(Method::Ward),
_ => Err(Error::InvalidMethod(s.to_string())),
}
}
}
/// A method for computing dissimilarities between clusters in the `nnchain`
/// linkage algorithm.
///
/// The nearest-neighbor chain algorithm,
/// or [`nnchain`](fn.nnchain.html),
/// performs hierarchical clustering using a specialized algorithm that can
/// only compute linkage for methods that do not produce inversions in the
/// final dendrogram. As a result, the `nnchain` algorithm cannot be used
/// with the `Median` or `Centroid` methods. Therefore, `MethodChain`
/// identifies the subset of of methods that can be used with `nnchain`.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum MethodChain {
/// See [`Method::Single`](enum.Method.html#variant.Single).
Single,
/// See [`Method::Complete`](enum.Method.html#variant.Complete).
Complete,
/// See [`Method::Average`](enum.Method.html#variant.Average).
Average,
/// See [`Method::Weighted`](enum.Method.html#variant.Weighted).
Weighted,
/// See [`Method::Ward`](enum.Method.html#variant.Ward).
Ward,
}
impl MethodChain {
/// Convert this `nnchain` linkage method into a general purpose
/// linkage method.
pub fn into_method(self) -> Method {
match self {
MethodChain::Single => Method::Single,
MethodChain::Complete => Method::Complete,
MethodChain::Average => Method::Average,
MethodChain::Weighted => Method::Weighted,
MethodChain::Ward => Method::Ward,
}
}
/// Square the given matrix if and only if this method must compute
/// dissimilarities between clusters on the squares of dissimilarities.
fn square<T: Float>(&self, condensed_matrix: &mut [T]) {
self.into_method().square(condensed_matrix);
}
/// Take the square-root of each step-wise dissimilarity in the given
/// dendrogram if this method operates on squares.
fn sqrt<T: Float>(&self, dend: &mut Dendrogram<T>) {
self.into_method().sqrt(dend);
}
}
impl FromStr for MethodChain {
type Err = Error;
fn from_str(s: &str) -> Result<MethodChain> {
match s {
"single" => Ok(MethodChain::Single),
"complete" => Ok(MethodChain::Complete),
"average" => Ok(MethodChain::Average),
"weighted" => Ok(MethodChain::Weighted),
"ward" => Ok(MethodChain::Ward),
_ => Err(Error::InvalidMethod(s.to_string())),
}
}
}
/// Return a hierarchical clustering of observations given their pairwise
/// dissimilarities.
///
/// The pairwise dissimilarities must be provided as a *condensed pairwise
/// dissimilarity matrix*, where only the values in the upper triangle are
/// explicitly represented, not including the diagonal. As a result, the given
/// matrix should have length `observations-choose-2` and only have values
/// defined for pairs of `(a, b)` where `a < b`.
///
/// `observations` is the total number of observations that are being
/// clustered. Every pair of observations must have a finite non-NaN
/// dissimilarity.
///
/// The return value is a
/// [`Dendrogram`](struct.Dendrogram.html),
/// which encodes the hierarchical clustering as a sequence of
/// `observations - 1` steps, where each step corresponds to the creation of
/// a cluster by merging exactly two previous clusters. The very last cluster
/// created contains all observations.
pub fn linkage<T: Float>(
condensed_dissimilarity_matrix: &mut [T],
observations: usize,
method: Method,
) -> Dendrogram<T> {
let matrix = condensed_dissimilarity_matrix;
let mut state = LinkageState::new();
let mut steps = Dendrogram::new(observations);
linkage_with(&mut state, matrix, observations, method, &mut steps);
steps
}
/// Like [`linkage`](fn.linkage.html), but amortizes allocation.
///
/// The `linkage` function is more ergonomic to use, but also potentially more
/// costly. Therefore, `linkage_with` exposes two key points for amortizing
/// allocation.
///
/// Firstly, [`LinkageState`](struct.LinkageState.html) corresponds to internal
/// mutable scratch space used by the clustering algorithms. It can be
/// reused in subsequent calls to `linkage_with` (or any of the other `with`
/// clustering functions).
///
/// Secondly, the caller must provide a
/// [`Dendrogram`](struct.Dendrogram.html)
/// that is mutated in place. This is in constrast to `linkage` where a
/// dendrogram is created and returned.
pub fn linkage_with<T: Float>(
state: &mut LinkageState<T>,
condensed_dissimilarity_matrix: &mut [T],
observations: usize,
method: Method,
steps: &mut Dendrogram<T>,
) {
let matrix = condensed_dissimilarity_matrix;
if let Method::Single = method {
mst_with(state, matrix, observations, steps);
} else if let Some(method) = method.into_method_chain() {
nnchain_with(state, matrix, observations, method, steps);
} else {
generic_with(state, matrix, observations, method, steps);
}
}
/// Mutable scratch space used by the linkage algorithms.
///
/// `LinkageState` is an opaque representation of mutable scratch space used
/// by the linkage algorithms. It is provided only for callers who wish to
/// amortize allocation using the `with` variants of the clustering functions.
/// This may be useful when your requirements call for rapidly running
/// hierarchical clustering on small dissimilarity matrices.
///
/// The memory used by `LinkageState` is proportional to the number of
/// observations being clustered.
///
/// The `T` type parameter refers to the type of dissimilarity used in the
/// pairwise matrix. In practice, `T` is a floating point type.
#[derive(Debug, Default)]
pub struct LinkageState<T> {
/// Maps a cluster index to the size of that cluster.
///
/// This mapping changes as clustering progresses. Namely, if `a` and `b`
/// are clusters with `a < b` and they are merged, then `a` is no longer a
/// valid cluster index and `b` now corresponds to the new cluster formed
/// by merging `a` and `b`.
sizes: Vec<usize>,
/// All active observations in the dissimilarity matrix.
///
/// When two clusters are merged, one of them is inactivated while the
/// other morphs to represent the merged cluster. This provides efficient
/// iteration over all active clusters.
active: Active,
/// A map from observation index to the minimal edge connecting another
/// observation that is not yet in the minimum spanning tree.
///
/// This is only used in the MST algorithm.
min_dists: Vec<T>,
/// A union-find set for merging clusters.
///
/// This is used for assigning labels to the dendrogram.
set: LinkageUnionFind,
/// A nearest-neighbor chain.
///
/// This is only used in the NN-chain algorithm.
chain: Vec<usize>,
/// A priority queue containing nearest-neighbor dissimilarities.
///
/// This is only used in the generic algorithm.
queue: LinkageHeap<T>,
/// A nearest neighbor candidate for each cluster.
///
/// This is only used in the generic algorithm.
nearest: Vec<usize>,
}
impl<T: Float> LinkageState<T> {
/// Create a new mutable scratch space for use in the `with` variants of
/// the clustering functions.
///
/// The clustering functions will automatically resize the scratch space
/// as needed based on the number of observations being clustered.
pub fn new() -> LinkageState<T> {
LinkageState {
sizes: vec![],
active: Active::new(),
min_dists: vec![],
set: LinkageUnionFind::new(),
chain: vec![],
queue: LinkageHeap::new(),
nearest: vec![],
}
}
/// Clear the scratch space and allocate enough | _method_chain(sel | identifier_name |
lib.rs | crate are as follows:
* [`linkage`](fn.linkage.html) performs hierarchical clustering on a pairwise
dissimilarity matrix.
* [`Method`](enum.Method.html) determines the linkage criteria.
* [`Dendrogram`](struct.Dendrogram.html) is a representation of a "stepwise"
dendrogram, which serves as the output of hierarchical clustering.
# Usage
Add this to your `Cargo.toml`:
```text
[dependencies]
kodama = "0.3"
```
and this to your crate root:
```
extern crate kodama;
```
# Example
Showing an example is tricky, because it's hard to motivate the use of
hierarchical clustering on small data sets, and especially hard without
domain specific details that suggest a hierarchical clustering may actually
be useful.
Instead of solving the hard problem of motivating a real use case, let's take
a look at a toy use case: a hierarchical clustering of a small number of
geographic points. We'll measure the distance (by way of the crow) between
these points using latitude/longitude coordinates with the
[Haversine formula](https://en.wikipedia.org/wiki/Haversine_formula).
We'll use a small collection of municipalities from central Massachusetts in
our example. Here's the data:
```text
Index Municipality Latitude Longitude
0 Fitchburg 42.5833333 -71.8027778
1 Framingham 42.2791667 -71.4166667
2 Marlborough 42.3458333 -71.5527778
3 Northbridge 42.1513889 -71.6500000
4 Southborough 42.3055556 -71.5250000
5 Westborough 42.2694444 -71.6166667
```
Each municipality in our data represents a single observation, and we'd like to
create a hierarchical clustering of them using [`linkage`](fn.linkage.html).
The input to `linkage` is a *condensed pairwise dissimilarity matrix*. This
matrix stores the dissimilarity between all pairs of observations. The
"condensed" aspect of it means that it only stores the upper triangle (not
including the diagonal) of the matrix. We can do this because hierarchical
clustering requires that our dissimilarities between observations are
reflexive. That is, the dissimilarity between `A` and `B` is the same as the
dissimilarity between `B` and `A`. This is certainly true in our case with the
Haversine formula.
So let's compute all of the pairwise dissimilarities and create our condensed
pairwise matrix:
```
// See: https://en.wikipedia.org/wiki/Haversine_formula
fn haversine((lat1, lon1): (f64, f64), (lat2, lon2): (f64, f64)) -> f64 {
const EARTH_RADIUS: f64 = 3958.756; // miles
let (lat1, lon1) = (lat1.to_radians(), lon1.to_radians());
let (lat2, lon2) = (lat2.to_radians(), lon2.to_radians());
let delta_lat = lat2 - lat1;
let delta_lon = lon2 - lon1;
let x =
(delta_lat / 2.0).sin().powi(2)
+ lat1.cos() * lat2.cos() * (delta_lon / 2.0).sin().powi(2);
2.0 * EARTH_RADIUS * x.sqrt().atan()
}
// From our data set. Each coordinate pair corresponds to a single observation.
let coordinates = vec![
(42.5833333, -71.8027778),
(42.2791667, -71.4166667),
(42.3458333, -71.5527778),
(42.1513889, -71.6500000),
(42.3055556, -71.5250000),
(42.2694444, -71.6166667),
];
// Build our condensed matrix by computing the dissimilarity between all
// possible coordinate pairs.
let mut condensed = vec![];
for row in 0..coordinates.len() - 1 {
for col in row + 1..coordinates.len() {
condensed.push(haversine(coordinates[row], coordinates[col]));
}
}
// The length of a condensed dissimilarity matrix is always equal to
// `N-choose-2`, where `N` is the number of observations.
assert_eq!(condensed.len(), (coordinates.len() * (coordinates.len() - 1)) / 2);
```
Now that we have our condensed dissimilarity matrix, all we need to do is
choose our *linkage criterion*. The linkage criterion refers to the formula
that is used during hierarchical clustering to compute the dissimilarity
between newly formed clusters and all other clusters. This crate provides
several choices, and the choice one makes depends both on the problem you're
trying to solve and your performance requirements. For example, "single"
linkage corresponds to using the minimum dissimilarity between all pairs of
observations between two clusters as the dissimilarity between those two
clusters. It turns out that doing single linkage hierarchical clustering has
a rough isomorphism to computing the minimum spanning tree, which means the
implementation can be quite fast (`O(n^2)`, to be precise). However, other
linkage criteria require more general purpose algorithms with higher constant
factors or even worse time complexity. For example, using median linkage has
worst case `O(n^3)` complexity, although it is often `n^2` in practice.
In this case, we'll choose average linkage (which is `O(n^2)`). With that
decision made, we can finally run linkage:
```
# fn haversine((lat1, lon1): (f64, f64), (lat2, lon2): (f64, f64)) -> f64 {
# const EARTH_RADIUS: f64 = 3958.756; // miles
#
# let (lat1, lon1) = (lat1.to_radians(), lon1.to_radians());
# let (lat2, lon2) = (lat2.to_radians(), lon2.to_radians());
#
# let delta_lat = lat2 - lat1;
# let delta_lon = lon2 - lon1;
# let x =
# (delta_lat / 2.0).sin().powi(2)
# + lat1.cos() * lat2.cos() * (delta_lon / 2.0).sin().powi(2);
# 2.0 * EARTH_RADIUS * x.sqrt().atan()
# }
# let coordinates = vec![
# (42.5833333, -71.8027778),
# (42.2791667, -71.4166667),
# (42.3458333, -71.5527778),
# (42.1513889, -71.6500000),
# (42.3055556, -71.5250000),
# (42.2694444, -71.6166667),
# ];
# let mut condensed = vec![];
# for row in 0..coordinates.len() - 1 {
# for col in row + 1..coordinates.len() {
# condensed.push(haversine(coordinates[row], coordinates[col]));
# }
# }
use kodama::{Method, linkage};
let dend = linkage(&mut condensed, coordinates.len(), Method::Average);
// The dendrogram always has `N - 1` steps, where each step corresponds to a
// newly formed cluster by merging two previous clusters. The last step creates
// a cluster that contains all observations.
assert_eq!(dend.len(), coordinates.len() - 1);
```
The output of `linkage` is a stepwise
[`Dendrogram`](struct.Dendrogram.html).
Each step corresponds to a merge between two previous clusters. Each step is
represented by a 4-tuple: a pair of cluster labels, the dissimilarity between
the two clusters that have been merged and the total number of observations
in the newly formed cluster. Here's what our dendrogram looks like:
```text
cluster1 cluster2 dissimilarity size
2 4 3.1237967760688776 2
5 6 5.757158112027513 3
1 7 8.1392602685723 4
3 8 12.483148228609206 5
0 9 25.589444117482433 6
```
Another way to look at a dendrogram is to visualize it (the following image was
created with matplotlib):

If you're familiar with the central Massachusetts region, then this dendrogram
is probably incredibly boring. But if you're not, then this visualization
immediately tells you which municipalities are closest to each other. For
example, you can tell right away that Fitchburg is quite far from any other
municipality!
# Testing
The testing in this crate is made up of unit tests on internal data structures
and quickcheck properties that check the consistency between the various
clustering algorithms. That is, quickcheck is used to test that, given the
same inputs, the `mst`, `nnchain`, `generic` and `primitive` implementations
all return the same output.
There are some caveats to this testing strategy:
1. Only the `generic` and `primitive` implementations support all linkage
criteria, which means some linkage criteria have worse test coverage.
2. Principally, this testing strategy assumes that at least one of the
implementations is correct.
3. The various implementations do not specify how ties are handled, which
occurs whenever the same dissimilarity value appears two or more times for
distinct pairs of observations. That means there are multiple correct
dendrograms depending on the input. This case is not tested, and instead,
all input matrices are forced to contain distinct dissimilarity values.
4. The output of both Müllner's and SciPy's implementations of hierarchical
clustering has been hand-checked with the output of this crate. It would
be better to test this automatically, but the scaffolding has not been
built.
Obviously, this is not ideal and there is a lot of room for improvement!
*/
#![deny(missing_docs)]
use std::error;
use std::fmt;
use std::io;
use std::result;
use std::str::FromStr;
pub use crate::chain::{nnchain, nnchain_with};
pub use crate::dendrogram::{Dendrogram, Step};
pub use crate::float::Float;
pub use crate::generic::{generic, generic_with};
pub use crate::primitive::{primitive, primitive_with};
pub use crate::spanning::{mst, mst_with};
use crate::active::Active;
use crate::queue::LinkageHeap;
use crate::union::LinkageUnionFind;
mod active;
mod chain;
mod condensed;
mod dendrogram;
mod float;
mod generic;
mod method;
mod primitive;
mod queue;
mod spanning;
#[cfg(test)]
mod test;
mod union;
/// A type alias for `Result<T, Error>`.
pub type Result<T> = result::Result<T, Error>;
/// An error.
#[derive(Clone, Debug)]
pub enum Error {
/// This error occurs when attempting to parse a method string that
/// doesn't correspond to a valid method.
InvalidMethod(String),
}
impl error::Error for Error {}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Error::InvalidMethod(ref name) => {
write!(f, "unrecognized method name: '{}'", name)
}
}
}
}
impl From<Error> for io::Error {
fn from(err: Error) -> io::Error {
io::Error::new(io::ErrorKind::Other, err)
}
}
/// A method for computing the dissimilarities between clusters.
///
/// The method selected dictates how the dissimilarities are computed whenever
/// a new cluster is formed. In particular, when clusters `a` and `b` are
/// merged into a new cluster `ab`, then the pairwise dissimilarity between
/// `ab` and every other cluster is computed using one of the methods variants
/// in this type.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Method {
/// Assigns the minimum dissimilarity between all pairs of observations.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// min(d[ab, x] for ab in AB for x in X)
/// ```
///
/// where `ab` and `x` correspond to all observations in `AB` and `X`,
/// respectively.
Single,
/// Assigns the maximum dissimilarity between all pairs of observations.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// max(d[ab, x] for ab in AB for x in X)
/// ```
///
/// where `ab` and `x` correspond to all observations in `AB` and `X`,
/// respectively.
Complete,
/// Assigns the average dissimilarity between all pairs of observations.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// sum(d[ab, x] for ab in AB for x in X) / (|AB| * |X|)
/// ```
///
/// where `ab` and `x` correspond to all observations in `AB` and `X`,
/// respectively, and `|AB|` and `|X|` correspond to the total number of
/// observations in `AB` and `X`, respectively.
Average,
/// Assigns the weighted dissimilarity between clusters.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// 0.5 * (d(A, X) + d(B, X))
/// ```
///
/// where `A` and `B` correspond to the clusters that merged to create
/// `AB`.
Weighted,
/// Assigns the Ward dissimilarity between clusters.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// let t1 = d(A, X)^2 * (|A| + |X|);
/// let t2 = d(B, X)^2 * (|B| + |X|);
/// let t3 = d(A, B)^2 * |X|;
/// let T = |A| + |B| + |X|;
/// sqrt(t1/T + t2/T + t3/T)
/// ```
///
/// where `A` and `B` correspond to the clusters that merged to create
/// `AB`.
Ward,
/// Assigns the centroid dissimilarity between clusters.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// let t1 = |A| * d(A, X)^2 + |B| * d(B, X)^2);
/// let t2 = |A| * |B| * d(A, B)^2;
/// let size = |A| + |B|;
/// sqrt(t1/size - t2/size^2)
/// ```
///
/// where `A` and `B` correspond to the clusters that merged to create
/// `AB`.
Centroid,
/// Assigns the median dissimilarity between clusters.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// sqrt(d(A, X)^2/2 + d(B, X)^2/2 - d(A, B)^2/4)
/// ```
///
/// where `A` and `B` correspond to the clusters that merged to create
/// `AB`.
Median,
}
impl Method {
/// Convert this linkage method into a nearest neighbor chain method.
///
/// More specifically, if this method is a method that the `nnchain`
/// algorithm can compute, then this returns the corresponding
/// `MethodChain` value. Otherwise, this returns `None`.
pub fn into_method_chain(self) -> Option<MethodChain> {
match self {
Method::Single => Some(MethodChain::Single),
Method::Complete => Some(MethodChain::Complete),
Method::Average => Some(MethodChain::Average),
Method::Weighted => Some(MethodChain::Weighted),
Method::Ward => Some(MethodChain::Ward),
Method::Centroid | Method::Median => None,
}
}
/// Returns true if and only if the dendrogram should be sorted before
/// generating cluster labels.
fn requires_sorting(&self) -> bool {
match *self {
Method::Centroid | Method::Median => false,
_ => true,
}
}
/// Square the given matrix if and only if this method must compute
/// dissimilarities between clusters on the squares of dissimilarities.
fn square<T: Float>(&self, condensed_matrix: &mut [T]) {
if self.on_squares() {
for x in condensed_matrix.iter_mut() {
*x = *x * *x;
}
}
}
/// Take the square-root of each step-wise dissimilarity in the given
/// dendrogram if this method operates on squares.
fn sqrt<T: Float>(&self, dend: &mut Dendrogram<T>) {
if self.on_squares() {
for step in dend.steps_mut() {
step.dissimilarity = step.dissimilarity.sqrt();
}
}
}
/// Return true if and only if this method computes dissimilarities on
/// squares.
fn on_squares(&self) -> bool {
match *self {
Method::Ward | Method::Centroid | Method::Median => true,
_ => false,
}
}
}
impl FromStr for Method {
type Err = Error;
fn from_str(s: &str) -> Result<Method> {
match s {
"single" => Ok(Method::Single),
"complete" => Ok(Method::Complete),
"average" => Ok(Method::Average),
"weighted" => Ok(Method::Weighted),
"centroid" => Ok(Method::Centroid),
"median" => Ok(Method::Median),
"ward" => Ok(Method::Ward),
_ => Err(Error::InvalidMethod(s.to_string())),
}
}
}
/// A method for computing dissimilarities between clusters in the `nnchain`
/// linkage algorithm.
///
/// The nearest-neighbor chain algorithm,
/// or [`nnchain`](fn.nnchain.html),
/// performs hierarchical clustering using a specialized algorithm that can
/// only compute linkage for methods that do not produce inversions in the
/// final dendrogram. As a result, the `nnchain` algorithm cannot be used
/// with the `Median` or `Centroid` methods. Therefore, `MethodChain`
/// identifies the subset of of methods that can be used with `nnchain`.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum MethodChain {
/// See [`Method::Single`](enum.Method.html#variant.Single).
Single,
/// See [`Method::Complete`](enum.Method.html#variant.Complete).
Complete,
/// See [`Method::Average`](enum.Method.html#variant.Average).
Average,
/// See [`Method::Weighted`](enum.Method.html#variant.Weighted).
Weighted,
/// See [`Method::Ward`](enum.Method.html#variant.Ward).
Ward,
}
impl MethodChain {
/// Convert this `nnchain` linkage method into a general purpose
/// linkage method.
pub fn into_method(self) -> Method {
match self {
MethodChain::Single => Method::Single,
MethodChain::Complete => Method::Complete,
MethodChain::Average => Method::Average,
MethodChain::Weighted => Method::Weighted,
MethodChain::Ward => Method::Ward,
}
}
/// Square the given matrix if and only if this method must compute
/// dissimilarities between clusters on the squares of dissimilarities.
fn square<T: Float>(&self, condensed_matrix: &mut [T]) {
self.into_method().square(condensed_matrix);
}
/// Take the square-root of each step-wise dissimilarity in the given
/// dendrogram if this method operates on squares.
fn sqrt<T: Float>(&self, dend: &mut Dendrogram<T>) {
| impl FromStr for MethodChain {
type Err = Error;
fn from_str(s: &str) -> Result<MethodChain> {
match s {
"single" => Ok(MethodChain::Single),
"complete" => Ok(MethodChain::Complete),
"average" => Ok(MethodChain::Average),
"weighted" => Ok(MethodChain::Weighted),
"ward" => Ok(MethodChain::Ward),
_ => Err(Error::InvalidMethod(s.to_string())),
}
}
}
/// Return a hierarchical clustering of observations given their pairwise
/// dissimilarities.
///
/// The pairwise dissimilarities must be provided as a *condensed pairwise
/// dissimilarity matrix*, where only the values in the upper triangle are
/// explicitly represented, not including the diagonal. As a result, the given
/// matrix should have length `observations-choose-2` and only have values
/// defined for pairs of `(a, b)` where `a < b`.
///
/// `observations` is the total number of observations that are being
/// clustered. Every pair of observations must have a finite non-NaN
/// dissimilarity.
///
/// The return value is a
/// [`Dendrogram`](struct.Dendrogram.html),
/// which encodes the hierarchical clustering as a sequence of
/// `observations - 1` steps, where each step corresponds to the creation of
/// a cluster by merging exactly two previous clusters. The very last cluster
/// created contains all observations.
pub fn linkage<T: Float>(
condensed_dissimilarity_matrix: &mut [T],
observations: usize,
method: Method,
) -> Dendrogram<T> {
let matrix = condensed_dissimilarity_matrix;
let mut state = LinkageState::new();
let mut steps = Dendrogram::new(observations);
linkage_with(&mut state, matrix, observations, method, &mut steps);
steps
}
/// Like [`linkage`](fn.linkage.html), but amortizes allocation.
///
/// The `linkage` function is more ergonomic to use, but also potentially more
/// costly. Therefore, `linkage_with` exposes two key points for amortizing
/// allocation.
///
/// Firstly, [`LinkageState`](struct.LinkageState.html) corresponds to internal
/// mutable scratch space used by the clustering algorithms. It can be
/// reused in subsequent calls to `linkage_with` (or any of the other `with`
/// clustering functions).
///
/// Secondly, the caller must provide a
/// [`Dendrogram`](struct.Dendrogram.html)
/// that is mutated in place. This is in constrast to `linkage` where a
/// dendrogram is created and returned.
pub fn linkage_with<T: Float>(
state: &mut LinkageState<T>,
condensed_dissimilarity_matrix: &mut [T],
observations: usize,
method: Method,
steps: &mut Dendrogram<T>,
) {
let matrix = condensed_dissimilarity_matrix;
if let Method::Single = method {
mst_with(state, matrix, observations, steps);
} else if let Some(method) = method.into_method_chain() {
nnchain_with(state, matrix, observations, method, steps);
} else {
generic_with(state, matrix, observations, method, steps);
}
}
/// Mutable scratch space used by the linkage algorithms.
///
/// `LinkageState` is an opaque representation of mutable scratch space used
/// by the linkage algorithms. It is provided only for callers who wish to
/// amortize allocation using the `with` variants of the clustering functions.
/// This may be useful when your requirements call for rapidly running
/// hierarchical clustering on small dissimilarity matrices.
///
/// The memory used by `LinkageState` is proportional to the number of
/// observations being clustered.
///
/// The `T` type parameter refers to the type of dissimilarity used in the
/// pairwise matrix. In practice, `T` is a floating point type.
#[derive(Debug, Default)]
pub struct LinkageState<T> {
/// Maps a cluster index to the size of that cluster.
///
/// This mapping changes as clustering progresses. Namely, if `a` and `b`
/// are clusters with `a < b` and they are merged, then `a` is no longer a
/// valid cluster index and `b` now corresponds to the new cluster formed
/// by merging `a` and `b`.
sizes: Vec<usize>,
/// All active observations in the dissimilarity matrix.
///
/// When two clusters are merged, one of them is inactivated while the
/// other morphs to represent the merged cluster. This provides efficient
/// iteration over all active clusters.
active: Active,
/// A map from observation index to the minimal edge connecting another
/// observation that is not yet in the minimum spanning tree.
///
/// This is only used in the MST algorithm.
min_dists: Vec<T>,
/// A union-find set for merging clusters.
///
/// This is used for assigning labels to the dendrogram.
set: LinkageUnionFind,
/// A nearest-neighbor chain.
///
/// This is only used in the NN-chain algorithm.
chain: Vec<usize>,
/// A priority queue containing nearest-neighbor dissimilarities.
///
/// This is only used in the generic algorithm.
queue: LinkageHeap<T>,
/// A nearest neighbor candidate for each cluster.
///
/// This is only used in the generic algorithm.
nearest: Vec<usize>,
}
impl<T: Float> LinkageState<T> {
/// Create a new mutable scratch space for use in the `with` variants of
/// the clustering functions.
///
/// The clustering functions will automatically resize the scratch space
/// as needed based on the number of observations being clustered.
pub fn new() -> LinkageState<T> {
LinkageState {
sizes: vec![],
active: Active::new(),
min_dists: vec![],
set: LinkageUnionFind::new(),
chain: vec![],
queue: LinkageHeap::new(),
nearest: vec![],
}
}
/// Clear the scratch space and allocate enough room | self.into_method().sqrt(dend);
}
}
| identifier_body |
lib.rs | this crate are as follows:
* [`linkage`](fn.linkage.html) performs hierarchical clustering on a pairwise
dissimilarity matrix.
* [`Method`](enum.Method.html) determines the linkage criteria.
* [`Dendrogram`](struct.Dendrogram.html) is a representation of a "stepwise"
dendrogram, which serves as the output of hierarchical clustering.
# Usage
Add this to your `Cargo.toml`:
```text
[dependencies]
kodama = "0.3"
```
and this to your crate root:
```
extern crate kodama;
```
# Example
Showing an example is tricky, because it's hard to motivate the use of
hierarchical clustering on small data sets, and especially hard without
domain specific details that suggest a hierarchical clustering may actually
be useful.
Instead of solving the hard problem of motivating a real use case, let's take
a look at a toy use case: a hierarchical clustering of a small number of
geographic points. We'll measure the distance (by way of the crow) between
these points using latitude/longitude coordinates with the
[Haversine formula](https://en.wikipedia.org/wiki/Haversine_formula).
We'll use a small collection of municipalities from central Massachusetts in
our example. Here's the data:
```text
Index Municipality Latitude Longitude
0 Fitchburg 42.5833333 -71.8027778
1 Framingham 42.2791667 -71.4166667
2 Marlborough 42.3458333 -71.5527778
3 Northbridge 42.1513889 -71.6500000
4 Southborough 42.3055556 -71.5250000
5 Westborough 42.2694444 -71.6166667
```
Each municipality in our data represents a single observation, and we'd like to
create a hierarchical clustering of them using [`linkage`](fn.linkage.html).
The input to `linkage` is a *condensed pairwise dissimilarity matrix*. This
matrix stores the dissimilarity between all pairs of observations. The
"condensed" aspect of it means that it only stores the upper triangle (not
including the diagonal) of the matrix. We can do this because hierarchical
clustering requires that our dissimilarities between observations are
reflexive. That is, the dissimilarity between `A` and `B` is the same as the
dissimilarity between `B` and `A`. This is certainly true in our case with the
Haversine formula.
So let's compute all of the pairwise dissimilarities and create our condensed
pairwise matrix:
```
// See: https://en.wikipedia.org/wiki/Haversine_formula
fn haversine((lat1, lon1): (f64, f64), (lat2, lon2): (f64, f64)) -> f64 {
const EARTH_RADIUS: f64 = 3958.756; // miles
let (lat1, lon1) = (lat1.to_radians(), lon1.to_radians());
let (lat2, lon2) = (lat2.to_radians(), lon2.to_radians());
let delta_lat = lat2 - lat1;
let delta_lon = lon2 - lon1;
let x =
(delta_lat / 2.0).sin().powi(2)
+ lat1.cos() * lat2.cos() * (delta_lon / 2.0).sin().powi(2);
2.0 * EARTH_RADIUS * x.sqrt().atan()
}
// From our data set. Each coordinate pair corresponds to a single observation.
let coordinates = vec![
(42.5833333, -71.8027778),
(42.2791667, -71.4166667),
(42.3458333, -71.5527778),
(42.1513889, -71.6500000),
(42.3055556, -71.5250000),
(42.2694444, -71.6166667),
];
// Build our condensed matrix by computing the dissimilarity between all
// possible coordinate pairs.
let mut condensed = vec![];
for row in 0..coordinates.len() - 1 {
for col in row + 1..coordinates.len() {
condensed.push(haversine(coordinates[row], coordinates[col]));
}
}
// The length of a condensed dissimilarity matrix is always equal to
// `N-choose-2`, where `N` is the number of observations.
assert_eq!(condensed.len(), (coordinates.len() * (coordinates.len() - 1)) / 2);
```
Now that we have our condensed dissimilarity matrix, all we need to do is
choose our *linkage criterion*. The linkage criterion refers to the formula
that is used during hierarchical clustering to compute the dissimilarity
between newly formed clusters and all other clusters. This crate provides
several choices, and the choice one makes depends both on the problem you're
trying to solve and your performance requirements. For example, "single"
linkage corresponds to using the minimum dissimilarity between all pairs of
observations between two clusters as the dissimilarity between those two
clusters. It turns out that doing single linkage hierarchical clustering has
a rough isomorphism to computing the minimum spanning tree, which means the
implementation can be quite fast (`O(n^2)`, to be precise). However, other
linkage criteria require more general purpose algorithms with higher constant
factors or even worse time complexity. For example, using median linkage has
worst case `O(n^3)` complexity, although it is often `n^2` in practice.
In this case, we'll choose average linkage (which is `O(n^2)`). With that
decision made, we can finally run linkage:
```
# fn haversine((lat1, lon1): (f64, f64), (lat2, lon2): (f64, f64)) -> f64 {
# const EARTH_RADIUS: f64 = 3958.756; // miles
#
# let (lat1, lon1) = (lat1.to_radians(), lon1.to_radians());
# let (lat2, lon2) = (lat2.to_radians(), lon2.to_radians());
#
# let delta_lat = lat2 - lat1;
# let delta_lon = lon2 - lon1;
# let x =
# (delta_lat / 2.0).sin().powi(2)
# + lat1.cos() * lat2.cos() * (delta_lon / 2.0).sin().powi(2);
# 2.0 * EARTH_RADIUS * x.sqrt().atan()
# }
# let coordinates = vec![
# (42.5833333, -71.8027778),
# (42.2791667, -71.4166667),
# (42.3458333, -71.5527778),
# (42.1513889, -71.6500000),
# (42.3055556, -71.5250000),
# (42.2694444, -71.6166667),
# ];
# let mut condensed = vec![];
# for row in 0..coordinates.len() - 1 {
# for col in row + 1..coordinates.len() {
# condensed.push(haversine(coordinates[row], coordinates[col]));
# }
# }
use kodama::{Method, linkage};
let dend = linkage(&mut condensed, coordinates.len(), Method::Average);
// The dendrogram always has `N - 1` steps, where each step corresponds to a
// newly formed cluster by merging two previous clusters. The last step creates
// a cluster that contains all observations.
assert_eq!(dend.len(), coordinates.len() - 1);
```
The output of `linkage` is a stepwise
[`Dendrogram`](struct.Dendrogram.html).
Each step corresponds to a merge between two previous clusters. Each step is
represented by a 4-tuple: a pair of cluster labels, the dissimilarity between
the two clusters that have been merged and the total number of observations
in the newly formed cluster. Here's what our dendrogram looks like:
```text
cluster1 cluster2 dissimilarity size
2 4 3.1237967760688776 2
5 6 5.757158112027513 3
1 7 8.1392602685723 4
3 8 12.483148228609206 5
0 9 25.589444117482433 6
```
Another way to look at a dendrogram is to visualize it (the following image was
created with matplotlib):

If you're familiar with the central Massachusetts region, then this dendrogram
is probably incredibly boring. But if you're not, then this visualization
immediately tells you which municipalities are closest to each other. For
example, you can tell right away that Fitchburg is quite far from any other
municipality!
# Testing
The testing in this crate is made up of unit tests on internal data structures
and quickcheck properties that check the consistency between the various
clustering algorithms. That is, quickcheck is used to test that, given the
same inputs, the `mst`, `nnchain`, `generic` and `primitive` implementations
all return the same output.
There are some caveats to this testing strategy:
1. Only the `generic` and `primitive` implementations support all linkage
criteria, which means some linkage criteria have worse test coverage.
2. Principally, this testing strategy assumes that at least one of the
implementations is correct.
3. The various implementations do not specify how ties are handled, which
occurs whenever the same dissimilarity value appears two or more times for
distinct pairs of observations. That means there are multiple correct
dendrograms depending on the input. This case is not tested, and instead,
all input matrices are forced to contain distinct dissimilarity values.
4. The output of both Müllner's and SciPy's implementations of hierarchical
clustering has been hand-checked with the output of this crate. It would
be better to test this automatically, but the scaffolding has not been
built.
Obviously, this is not ideal and there is a lot of room for improvement!
*/
#![deny(missing_docs)]
use std::error;
use std::fmt;
use std::io;
use std::result;
use std::str::FromStr;
pub use crate::chain::{nnchain, nnchain_with};
pub use crate::dendrogram::{Dendrogram, Step};
pub use crate::float::Float;
pub use crate::generic::{generic, generic_with};
pub use crate::primitive::{primitive, primitive_with};
pub use crate::spanning::{mst, mst_with};
use crate::active::Active;
use crate::queue::LinkageHeap;
use crate::union::LinkageUnionFind;
mod active;
mod chain;
mod condensed;
mod dendrogram;
mod float;
mod generic;
mod method;
mod primitive;
mod queue;
mod spanning;
#[cfg(test)]
mod test;
mod union;
/// A type alias for `Result<T, Error>`.
pub type Result<T> = result::Result<T, Error>;
/// An error.
#[derive(Clone, Debug)]
pub enum Error {
/// This error occurs when attempting to parse a method string that
/// doesn't correspond to a valid method.
InvalidMethod(String),
}
impl error::Error for Error {}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Error::InvalidMethod(ref name) => {
write!(f, "unrecognized method name: '{}'", name)
}
}
} | io::Error::new(io::ErrorKind::Other, err)
}
}
/// A method for computing the dissimilarities between clusters.
///
/// The method selected dictates how the dissimilarities are computed whenever
/// a new cluster is formed. In particular, when clusters `a` and `b` are
/// merged into a new cluster `ab`, then the pairwise dissimilarity between
/// `ab` and every other cluster is computed using one of the methods variants
/// in this type.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Method {
/// Assigns the minimum dissimilarity between all pairs of observations.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// min(d[ab, x] for ab in AB for x in X)
/// ```
///
/// where `ab` and `x` correspond to all observations in `AB` and `X`,
/// respectively.
Single,
/// Assigns the maximum dissimilarity between all pairs of observations.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// max(d[ab, x] for ab in AB for x in X)
/// ```
///
/// where `ab` and `x` correspond to all observations in `AB` and `X`,
/// respectively.
Complete,
/// Assigns the average dissimilarity between all pairs of observations.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// sum(d[ab, x] for ab in AB for x in X) / (|AB| * |X|)
/// ```
///
/// where `ab` and `x` correspond to all observations in `AB` and `X`,
/// respectively, and `|AB|` and `|X|` correspond to the total number of
/// observations in `AB` and `X`, respectively.
Average,
/// Assigns the weighted dissimilarity between clusters.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// 0.5 * (d(A, X) + d(B, X))
/// ```
///
/// where `A` and `B` correspond to the clusters that merged to create
/// `AB`.
Weighted,
/// Assigns the Ward dissimilarity between clusters.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// let t1 = d(A, X)^2 * (|A| + |X|);
/// let t2 = d(B, X)^2 * (|B| + |X|);
/// let t3 = d(A, B)^2 * |X|;
/// let T = |A| + |B| + |X|;
/// sqrt(t1/T + t2/T + t3/T)
/// ```
///
/// where `A` and `B` correspond to the clusters that merged to create
/// `AB`.
Ward,
/// Assigns the centroid dissimilarity between clusters.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// let t1 = |A| * d(A, X)^2 + |B| * d(B, X)^2);
/// let t2 = |A| * |B| * d(A, B)^2;
/// let size = |A| + |B|;
/// sqrt(t1/size - t2/size^2)
/// ```
///
/// where `A` and `B` correspond to the clusters that merged to create
/// `AB`.
Centroid,
/// Assigns the median dissimilarity between clusters.
///
/// Specifically, if `AB` is a newly merged cluster and `X` is every other
/// cluster, then the pairwise dissimilarity between `AB` and `X` is
/// computed by
///
/// ```text
/// sqrt(d(A, X)^2/2 + d(B, X)^2/2 - d(A, B)^2/4)
/// ```
///
/// where `A` and `B` correspond to the clusters that merged to create
/// `AB`.
Median,
}
impl Method {
/// Convert this linkage method into a nearest neighbor chain method.
///
/// More specifically, if this method is a method that the `nnchain`
/// algorithm can compute, then this returns the corresponding
/// `MethodChain` value. Otherwise, this returns `None`.
pub fn into_method_chain(self) -> Option<MethodChain> {
match self {
Method::Single => Some(MethodChain::Single),
Method::Complete => Some(MethodChain::Complete),
Method::Average => Some(MethodChain::Average),
Method::Weighted => Some(MethodChain::Weighted),
Method::Ward => Some(MethodChain::Ward),
Method::Centroid | Method::Median => None,
}
}
/// Returns true if and only if the dendrogram should be sorted before
/// generating cluster labels.
fn requires_sorting(&self) -> bool {
match *self {
Method::Centroid | Method::Median => false,
_ => true,
}
}
/// Square the given matrix if and only if this method must compute
/// dissimilarities between clusters on the squares of dissimilarities.
fn square<T: Float>(&self, condensed_matrix: &mut [T]) {
if self.on_squares() {
for x in condensed_matrix.iter_mut() {
*x = *x * *x;
}
}
}
/// Take the square-root of each step-wise dissimilarity in the given
/// dendrogram if this method operates on squares.
fn sqrt<T: Float>(&self, dend: &mut Dendrogram<T>) {
if self.on_squares() {
for step in dend.steps_mut() {
step.dissimilarity = step.dissimilarity.sqrt();
}
}
}
/// Return true if and only if this method computes dissimilarities on
/// squares.
fn on_squares(&self) -> bool {
match *self {
Method::Ward | Method::Centroid | Method::Median => true,
_ => false,
}
}
}
impl FromStr for Method {
type Err = Error;
fn from_str(s: &str) -> Result<Method> {
match s {
"single" => Ok(Method::Single),
"complete" => Ok(Method::Complete),
"average" => Ok(Method::Average),
"weighted" => Ok(Method::Weighted),
"centroid" => Ok(Method::Centroid),
"median" => Ok(Method::Median),
"ward" => Ok(Method::Ward),
_ => Err(Error::InvalidMethod(s.to_string())),
}
}
}
/// A method for computing dissimilarities between clusters in the `nnchain`
/// linkage algorithm.
///
/// The nearest-neighbor chain algorithm,
/// or [`nnchain`](fn.nnchain.html),
/// performs hierarchical clustering using a specialized algorithm that can
/// only compute linkage for methods that do not produce inversions in the
/// final dendrogram. As a result, the `nnchain` algorithm cannot be used
/// with the `Median` or `Centroid` methods. Therefore, `MethodChain`
/// identifies the subset of of methods that can be used with `nnchain`.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum MethodChain {
/// See [`Method::Single`](enum.Method.html#variant.Single).
Single,
/// See [`Method::Complete`](enum.Method.html#variant.Complete).
Complete,
/// See [`Method::Average`](enum.Method.html#variant.Average).
Average,
/// See [`Method::Weighted`](enum.Method.html#variant.Weighted).
Weighted,
/// See [`Method::Ward`](enum.Method.html#variant.Ward).
Ward,
}
impl MethodChain {
/// Convert this `nnchain` linkage method into a general purpose
/// linkage method.
pub fn into_method(self) -> Method {
match self {
MethodChain::Single => Method::Single,
MethodChain::Complete => Method::Complete,
MethodChain::Average => Method::Average,
MethodChain::Weighted => Method::Weighted,
MethodChain::Ward => Method::Ward,
}
}
/// Square the given matrix if and only if this method must compute
/// dissimilarities between clusters on the squares of dissimilarities.
fn square<T: Float>(&self, condensed_matrix: &mut [T]) {
self.into_method().square(condensed_matrix);
}
/// Take the square-root of each step-wise dissimilarity in the given
/// dendrogram if this method operates on squares.
fn sqrt<T: Float>(&self, dend: &mut Dendrogram<T>) {
self.into_method().sqrt(dend);
}
}
impl FromStr for MethodChain {
type Err = Error;
fn from_str(s: &str) -> Result<MethodChain> {
match s {
"single" => Ok(MethodChain::Single),
"complete" => Ok(MethodChain::Complete),
"average" => Ok(MethodChain::Average),
"weighted" => Ok(MethodChain::Weighted),
"ward" => Ok(MethodChain::Ward),
_ => Err(Error::InvalidMethod(s.to_string())),
}
}
}
/// Return a hierarchical clustering of observations given their pairwise
/// dissimilarities.
///
/// The pairwise dissimilarities must be provided as a *condensed pairwise
/// dissimilarity matrix*, where only the values in the upper triangle are
/// explicitly represented, not including the diagonal. As a result, the given
/// matrix should have length `observations-choose-2` and only have values
/// defined for pairs of `(a, b)` where `a < b`.
///
/// `observations` is the total number of observations that are being
/// clustered. Every pair of observations must have a finite non-NaN
/// dissimilarity.
///
/// The return value is a
/// [`Dendrogram`](struct.Dendrogram.html),
/// which encodes the hierarchical clustering as a sequence of
/// `observations - 1` steps, where each step corresponds to the creation of
/// a cluster by merging exactly two previous clusters. The very last cluster
/// created contains all observations.
pub fn linkage<T: Float>(
condensed_dissimilarity_matrix: &mut [T],
observations: usize,
method: Method,
) -> Dendrogram<T> {
let matrix = condensed_dissimilarity_matrix;
let mut state = LinkageState::new();
let mut steps = Dendrogram::new(observations);
linkage_with(&mut state, matrix, observations, method, &mut steps);
steps
}
/// Like [`linkage`](fn.linkage.html), but amortizes allocation.
///
/// The `linkage` function is more ergonomic to use, but also potentially more
/// costly. Therefore, `linkage_with` exposes two key points for amortizing
/// allocation.
///
/// Firstly, [`LinkageState`](struct.LinkageState.html) corresponds to internal
/// mutable scratch space used by the clustering algorithms. It can be
/// reused in subsequent calls to `linkage_with` (or any of the other `with`
/// clustering functions).
///
/// Secondly, the caller must provide a
/// [`Dendrogram`](struct.Dendrogram.html)
/// that is mutated in place. This is in constrast to `linkage` where a
/// dendrogram is created and returned.
pub fn linkage_with<T: Float>(
state: &mut LinkageState<T>,
condensed_dissimilarity_matrix: &mut [T],
observations: usize,
method: Method,
steps: &mut Dendrogram<T>,
) {
let matrix = condensed_dissimilarity_matrix;
if let Method::Single = method {
mst_with(state, matrix, observations, steps);
} else if let Some(method) = method.into_method_chain() {
nnchain_with(state, matrix, observations, method, steps);
} else {
generic_with(state, matrix, observations, method, steps);
}
}
/// Mutable scratch space used by the linkage algorithms.
///
/// `LinkageState` is an opaque representation of mutable scratch space used
/// by the linkage algorithms. It is provided only for callers who wish to
/// amortize allocation using the `with` variants of the clustering functions.
/// This may be useful when your requirements call for rapidly running
/// hierarchical clustering on small dissimilarity matrices.
///
/// The memory used by `LinkageState` is proportional to the number of
/// observations being clustered.
///
/// The `T` type parameter refers to the type of dissimilarity used in the
/// pairwise matrix. In practice, `T` is a floating point type.
#[derive(Debug, Default)]
pub struct LinkageState<T> {
/// Maps a cluster index to the size of that cluster.
///
/// This mapping changes as clustering progresses. Namely, if `a` and `b`
/// are clusters with `a < b` and they are merged, then `a` is no longer a
/// valid cluster index and `b` now corresponds to the new cluster formed
/// by merging `a` and `b`.
sizes: Vec<usize>,
/// All active observations in the dissimilarity matrix.
///
/// When two clusters are merged, one of them is inactivated while the
/// other morphs to represent the merged cluster. This provides efficient
/// iteration over all active clusters.
active: Active,
/// A map from observation index to the minimal edge connecting another
/// observation that is not yet in the minimum spanning tree.
///
/// This is only used in the MST algorithm.
min_dists: Vec<T>,
/// A union-find set for merging clusters.
///
/// This is used for assigning labels to the dendrogram.
set: LinkageUnionFind,
/// A nearest-neighbor chain.
///
/// This is only used in the NN-chain algorithm.
chain: Vec<usize>,
/// A priority queue containing nearest-neighbor dissimilarities.
///
/// This is only used in the generic algorithm.
queue: LinkageHeap<T>,
/// A nearest neighbor candidate for each cluster.
///
/// This is only used in the generic algorithm.
nearest: Vec<usize>,
}
impl<T: Float> LinkageState<T> {
/// Create a new mutable scratch space for use in the `with` variants of
/// the clustering functions.
///
/// The clustering functions will automatically resize the scratch space
/// as needed based on the number of observations being clustered.
pub fn new() -> LinkageState<T> {
LinkageState {
sizes: vec![],
active: Active::new(),
min_dists: vec![],
set: LinkageUnionFind::new(),
chain: vec![],
queue: LinkageHeap::new(),
nearest: vec![],
}
}
/// Clear the scratch space and allocate enough room for | }
impl From<Error> for io::Error {
fn from(err: Error) -> io::Error { | random_line_split |
mesh.rs | use super::vertex::Vertex;
use super::face::Face;
use super::pair::{ Pair, PairInfo };
use std::collections::BinaryHeap;
use nalgebra::base::Matrix4;
use std::path::Path;
use ordered_float::OrderedFloat;
use std::io::LineWriter;
use std::fs::File;
use std::io::prelude::*;
use std::io::BufReader;
use std::io::BufRead;
use std::str::FromStr;
use std::collections::HashSet;
use std::collections::VecDeque;
use std::sync::Arc;
use std::cell::RefCell;
#[derive(Default)]
pub struct Mesh {
vertices : Vec<Arc<RefCell<Vertex>>>, // 顶点实体
faces : Vec<Face>,
pairs : Vec<Pair>,
heap : BinaryHeap<PairInfo>,
pre_pairs : VecDeque<HashSet<usize>>, // 用于建立pair的数对
trash : Vec<usize>, // 可供使用的顶点实体集合
total : u32,
}
impl Mesh{
pub fn load(&mut self, file_path : &str) {
let f = File::open(file_path).unwrap();
let file = BufReader::new(&f);
let mut face_index = 0;
for (_, line) in file.lines().enumerate() {
let l = line.unwrap();
let args : Vec<&str> = l.split(' ').collect();
match args[0] {
"v" => {
assert!( args.len() == 4);
self.vertices.push(Arc::new(RefCell::new(Vertex::new(
f64::from_str(args[1]).unwrap(),
f64::from_str(args[2]).unwrap(),
f64::from_str(args[3]).unwrap()
))));
self.pre_pairs.push_back(HashSet::new());
}
"f" => {
// 取得三个顶点的编号
let idx1 = usize::from_str(args[1]).unwrap() - 1;
let idx2 = usize::from_str(args[2]).unwrap() - 1;
let idx3 = usize::from_str(args[3]).unwrap() - 1;
if idx1 < idx2 {
self.pre_pairs[idx1].insert(idx2);
} else {
assert!( idx1!= idx2);
self.pre_pairs[idx2].insert(idx1);
}
if idx1 < idx3 {
self.pre_pairs[idx1].insert(idx3);
} else {
assert!( idx1!= idx3);
self.pre_pairs[idx3].insert(idx1);
}
if idx2 < idx3 {
self.pre_pairs[idx2].insert(idx3);
} else {
assert!( idx2!= idx3);
self.pre_pairs[idx3].insert(idx2);
}
let mut vertex1 = self.vertices[idx1].borrow_mut();
let mut vertex2 = self.vertices[idx2].borrow_mut();
let mut vertex3 = self.vertices[idx3].borrow_mut();
// 为顶点和三角形面片建立连接
vertex1.add_face(face_index, 0);
vertex2.add_face(face_index, 1);
vertex3.add_face(face_index, 2);
face_index += 1;
// 计算三角形面片的Kp
let vec21 = vertex2.coord - vertex1.coord;
let vec31 = vertex3.coord - vertex1.coord;
let n = vec21.cross(&vec31).normalize(); // 法向量N
let d = n.dot(&vertex1.coord) * -1.0;
let k_p = Matrix4::from_fn(|r, c| {
let src1 = if r > 2 { d } else { n[r] };
let src2 = if c > 2 { d } else { n[c] };
src1 * src2
});
self.faces.push(Face::new(idx1, idx2, idx3, k_p));
}
_ => {
println!("other");
}
}
}
// 多出一个空闲顶点用于中转
self.vertices.push(Arc::new(RefCell::new(Vertex::new(0.0f64, 0.0f64, 0.0f64))));
println!("init vertices {}, faces {}", self.vertices.len(), self.faces.len());
}
pub fn cal_q_v(&mut self) {
// 计算顶点的Qv矩阵
for v in self.vertices.iter() {
let mut vertex = v.borrow_mut();
let mut q_v = Matrix4::zeros();
for (idx, _) in vertex.faces.iter() {
q_v += self.faces[*idx].k_p;
}
vertex.q_v = q_v;
}
}
fn add_pair(&mut self, id1 : usize, id2 : usize) {
assert!( id1 < id2);
// 创建一个pair
let mut vertex1 = self.vertices[id1].borrow_mut();
let mut vertex2 = self.vertices[id2].borrow_mut();
// TODO 阈值应当运行时指定
let dist = Vertex::distance(&vertex1.coord, &vertex2.coord);
if dist > 10.0 {
return;
}
let mut pair = Pair::new(id1, id2);
pair.q_matrix = vertex1.q_v + vertex2.q_v;
pair.cal_best_point(&vertex1.coord, &vertex2.coord);
pair.cal_shrink_value();
let idx = self.pairs.len();
let value = pair.get_value();
self.pairs.push(pair);
// 更新顶点信息,pairinfo信息
let pair_info = PairInfo { id : idx, value : OrderedFloat::from(value) };
self.heap.push(pair_info);
vertex1.add_pair(idx.clone(), 1);
vertex2.add_pair(idx.clone(), 2);
}
pub fn init_pairs(&mut self) {
// TODO 遍历所有面片,建立pair的列表以及pairinfo的堆
let len = self.pre_pairs.len();
for v_idx in 0..len {
let v_set = self.pre_pairs.pop_front().unwrap();
for idx in v_set.iter() {
assert!( v_idx < *idx );
self.add_pair( v_idx, idx.clone());
}
}
println!("number of pairs : {}", self.pairs.len());
}
pub fn run(&mut self, scale : f32) {
// 化简过程
self.total = self.faces.len() as u32;
let target = ( self.total as f32 * scale ) as u32;
println!("total is {}, target is {}", self.total, target);
while self.total > target {
if let Some(vectim) = self.heap.pop() {
self.del_pair(vectim.id);
} else { // empty
println!("to break!!!");
break;
}
loop {
// 检查堆头的pair是否为有效的pair,或者是否需要更新
let mut flag = HeadType::NORMAL;
if let Some(head) = self.heap.peek() {
let temp_pair = self.pairs.get(head.id).unwrap();
if!temp_pair.valid() {
flag = HeadType::DELETE;
}
if temp_pair.valid() && temp_pair.access() {
flag = HeadType::RENEW;
}
} else {
break;
}
// 更新堆头
match flag {
HeadType::DELETE => {
self.heap.pop();
continue;
}
HeadType::RENEW => {
let mut to_renew = self.heap.pop().unwrap();
let mut pair = self.pairs.get_mut(to_renew.id).unwrap();
let (first, second) = pair.get_vertex();
// 创建一个pair
let vertex1 = self.vertices[first].borrow();
let vertex2 = self.vertices[second].borrow();
pair.q_matrix = vertex1.q_v + vertex2.q_v;
pair.cal_best_point(&vertex1.coord, &vertex2.coord);
pair.cal_shrink_value();
pair.clear_access();
to_renew.value = OrderedFloat::from(pair.get_value());
self.heap.push(to_renew);
continue;
}
HeadType::NORMAL => {
break;
}
}
}
}
let mut real_idx : usize = 1;
for step_idx in 0..self.vertices.len() {
let v = self.vertices[step_idx].borrow();
if!v.valid() {
continue;
}
for (f_idx, pos) in v.faces.iter() {
self.faces[*f_idx].indices[*pos] = real_idx;
}
real_idx += 1;
}
let mut face_num = 0;
for f in self.faces.iter() {
if f.valid() {
face_num += 1;
}
}
println!("new face num {}", face_num);
}
fn del_pair(&mut self, id : usize) {
assert!( self.pairs[id].valid() );
self.pairs[id].destroy(); // 将该pair置为无效
// 获取旧顶点
let (first, second) = self.pairs[id].get_vertex();
assert!( first!= second );
let first_ptr = self.vertices[first].clone();
let second_ptr = self.vertices[second].clone();
let mut vertex1 = first_ptr.borrow_mut();
let mut vertex2 = second_ptr.borrow_mut();
// 回收旧顶点编号
self.trash.push(first);
self.trash.push(second);
let new_pos = self.trash.pop().unwrap();
// 获取用于存放新顶点的顶点实体
let temp_pos = self.vertices.len() - 1;
assert!( temp_pos!= second && temp_pos!= first);
let temp_ptr = self.vertices[temp_pos].clone();
let mut new_v = temp_ptr.borrow_mut();
new_v.renew_state();
assert!( new_v.faces.is_empty() );
assert!( new_v.pairs.is_empty() );
let best_point = self.pairs[id].get_best_point();
new_v.coord.x = best_point.x;
new_v.coord.y = best_point.y;
new_v.coord.z = best_point.z;
new_v.q_v = vertex1.q_v + vertex2.q_v;
// 更新相关的面片
for (idx, pos) in vertex1.faces.iter() {
let valid = self.faces[*idx].valid();
if valid {
let both_in = self.faces[*idx].both_in(pos, &second);
if both_in {
self.faces[*idx].destroy();
self.total -= 1;
continue;
}
self.faces[*idx].indices[*pos] = new_pos; // 更新面片的对应顶点坐标
new_v.add_face(idx.clone(), pos.clone());
}
}
for (idx, pos) in vertex2.faces.iter() {
let valid = self.faces[*idx].valid();
if valid {
self.faces[*idx].indices[*pos] = new_pos;
new_v.add_face(idx.clone(), pos.clone());
}
}
// 更新相关的pair
for (idx, pos) in vertex1.pairs.iter() {
if *idx == id {
continue;
}
if self.pairs[*idx].set_vertex(pos, new_pos) {
new_v.pairs.push((idx.clone(), pos.clone()));
} else {
self.pairs[*idx].destroy();
}
}
for (idx, pos) in vertex2.pairs.iter() { | continue;
}
if self.pairs[*idx].set_vertex(pos, new_pos) {
new_v.pairs.push((idx.clone(), pos.clone()));
} else {
self.pairs[*idx].destroy();
}
}
vertex1.destroy();
vertex2.destroy();
self.vertices.swap(new_pos, temp_pos);
}
pub fn save(&mut self, path : &Path) {
let file = match File::create(&path) {
Err(why) => panic!("couldn't create {}", why.to_string()),
Ok(file) => file,
};
let mut file_writer = LineWriter::new(file);
for v_ptr in self.vertices.iter() {
let v = v_ptr.borrow();
if v.valid() {
file_writer.write_all(&v.get_string().into_bytes()).unwrap();
}
}
for f in self.faces.iter() {
if f.valid() {
file_writer.write_all(&f.get_string().into_bytes()).unwrap();
}
}
file_writer.flush().unwrap();
}
}
#[derive(Debug)]
enum HeadType {
DELETE,
RENEW,
NORMAL
} | if *idx == id { | random_line_split |
mesh.rs | use super::vertex::Vertex;
use super::face::Face;
use super::pair::{ Pair, PairInfo };
use std::collections::BinaryHeap;
use nalgebra::base::Matrix4;
use std::path::Path;
use ordered_float::OrderedFloat;
use std::io::LineWriter;
use std::fs::File;
use std::io::prelude::*;
use std::io::BufReader;
use std::io::BufRead;
use std::str::FromStr;
use std::collections::HashSet;
use std::collections::VecDeque;
use std::sync::Arc;
use std::cell::RefCell;
#[derive(Default)]
pub struct Mesh {
vertices : Vec<Arc<RefCell<Vertex>>>, // 顶点实体
faces : Vec<Face>,
pairs : Vec<Pair>,
heap : BinaryHeap<PairInfo>,
pre_pairs : VecDeque<HashSet<usize>>, // 用于建立pair的数对
trash : Vec<usize>, // 可供使用的顶点实体集合
total : u32,
}
impl Mesh{
pub fn load(&mut self, file_path : &str) {
let f = File::open(file_path).unwrap();
let file = BufReader::new(&f);
let mut face_index = 0;
for (_, line) in file.lines().enumerate() {
let l = line.unwrap();
let args : Vec<&str> = l.split(' ').collect();
match args[0] {
"v" => {
assert!( args.len() == 4);
self.vertices.push(Arc::new(RefCell::new(Vertex::new(
f64::from_str(args[1]).unwrap(),
f64::from_str(args[2]).unwrap(),
f64::from_str(args[3]).unwrap()
))));
self.pre_pairs.push_back(HashSet::new());
}
"f" => {
// 取得三个顶点的编号
let idx1 = usize::from_str(args[1]).unwrap() - 1;
let idx2 = usize::from_str(args[2]).unwrap() - 1;
let idx3 = usize::from_str(args[3]).unwrap() - 1;
if idx1 < idx2 {
self.pre_pairs[idx1].insert(idx2);
} else {
assert!( idx1!= idx2);
self.pre_pairs[idx2].insert(idx1);
}
if idx1 < idx3 {
self.pre_pairs[idx1].insert(idx3);
} else {
assert!( idx1!= idx3);
self.pre_pairs[idx3].insert(idx1);
}
if idx2 < idx3 {
self.pre_pairs[idx2].insert(idx3);
} else {
assert!( idx2!= idx3);
self.pre_pairs[idx3].insert(idx2);
}
let mut vertex1 = self.vertices[idx1].borrow_mut();
let mut vertex2 = self.vertices[idx2].borrow_mut();
let mut vertex3 = self.vertices[idx3].borrow_mut();
// 为顶点和三角形面片建立连接
vertex1.add_face(face_index, 0);
vertex2.add_face(face_index, 1);
vertex3.add_face(face_index, 2);
face_index += 1;
// 计算三角形面片的Kp
let vec21 = vertex2.coord - vertex1.coord;
let vec31 = vertex3.coord - vertex1.coord;
let n = vec21.cross(&vec31).normalize(); // 法向量N
let d = n.dot(&vertex1.coord) * -1.0;
let k_p = Matrix4::from_fn(|r, c| {
let src1 = if r > 2 { d } else { n[r] };
let src2 = if c > 2 { d } else { n[c] };
src1 * src2
});
self.faces.push(Face::new(idx1, idx2, idx3, k_p));
}
_ => {
println!("other");
}
}
}
// 多出一个空闲顶点用于中转
self.vertices.push(Arc::new(RefCell::new(Vertex::new(0.0f64, 0.0f64, 0.0f64))));
println!("init vertices {}, faces {}", self.vertices.len(), self.faces.len());
}
pub fn cal_q_v(&mut self) {
// 计算顶点的Qv矩阵
for v in self.vertices.iter() {
let mut vertex = v.borrow_mut();
let mut q_v = Matrix4::zeros();
for (idx, _) in vertex.faces.iter() {
q_v += self.faces[*idx].k_p;
}
vertex.q_v = q_v;
}
}
fn add_pair(&mut self, id1 : usize, id2 : usize) {
assert!( id1 < id2);
// 创建一个pair
let mut vertex1 = self.vertices[id1].borrow_mut();
let mut vertex2 = self.vertices[id2].borrow_mut();
// TODO 阈值应当运行时指定
let dist = Vertex::distance(&vertex1.coord, &vertex2.coord);
if dist > 10.0 {
return;
}
let mut pair = Pair::new(id1, id2);
pair.q_matrix = vertex1.q_v + vertex2.q_v;
pair.cal_best_point(&vertex1.coord, &vertex2.coord);
pair.cal_shrink_value();
let idx = self.pairs.len();
let value = pair.get_value();
self.pairs.push(pair);
// 更新顶点信息,pairinfo信息
let pair_info = PairInfo { id : idx, value : OrderedFloat::from(value) };
self.heap.push(pair_info);
vertex1.add_pair(idx.clone(), 1);
vertex2.add_pair(idx.clone(), 2);
}
pub fn init_pairs(&mut self) {
// TODO 遍历所有面片,建立pair的列表以及pairinfo的堆
let len = self.pre_pairs.len();
for v_idx in 0..len {
let v_set = self.pre_pairs.pop_front().unwrap();
for idx in v_set.iter() {
assert!( v_idx < *idx );
self.add_pair( v_idx, idx.clone());
}
}
println!("number of pairs : {}", self.pairs.len());
}
pub fn run(&mut self, scale : f32) {
// 化简过程
self.total = self.faces.len() as u32;
let target = ( self.total as f32 * scale ) as u32;
println!("total is {}, target is {}", self.total, target);
while self.total > target {
if let Some(vectim) = self.heap.pop() {
self.del_pair(vectim.id);
} else { // empty
println!("to break!!!");
break;
}
loop {
// 检查堆头的pair是否为有效的pair,或者是否需要更新
let mut flag = HeadType::NORMAL;
if let Some(head) = self.heap.peek() {
let temp_pair = self.pairs.get(head.id).unwrap();
if!temp_pair.valid() {
flag = HeadType::DELETE;
}
if temp_pair.valid() && temp_pair.access() {
flag = HeadType::RENEW;
}
} else {
break;
}
// 更新堆头
match flag {
HeadType::DELETE => {
self.heap.pop();
continue;
}
HeadType::RENEW => {
let mut to_renew = self.heap.pop().unwrap();
let mut pair = self.pairs.get_mut(to_renew.id).unwrap();
let (first, second) = pair.get_vertex();
// 创建一个pair
let vertex1 = self.vertices[first].borrow();
let vertex2 = self.vertices[second].borrow();
pair.q_matrix = vertex1.q_v + vertex2.q_v;
pair.cal_best_point(&vertex1.coord, &vertex2.coord);
pair.cal_shrink_value();
pair.clear_access();
to_renew.value = OrderedFloat::from(pair.get_value());
self.heap.push(to_renew);
continue;
}
HeadType::NORMAL => {
break;
}
}
}
}
let mut real_idx : usize = 1;
for step_idx in 0..self.vertices.len() {
let v = self.vertices[step_idx].borrow();
if!v.valid() {
continue;
}
for (f_idx, pos) in v.faces.iter() {
self.faces[*f_idx].indices[*pos] = real_idx;
}
real_idx += 1;
}
let mut face_num = 0;
for f in self.faces.iter() {
if f.valid() {
face_num += 1;
}
}
println!("new face num {}", face_num);
}
fn del_pair(&mut self, id : usize) {
assert!( self.pairs[id].valid() );
self.pairs[id].destroy(); // 将该pair置为无效
// 获取旧顶点
let (first, second) = self.pairs[id].get_vertex();
assert!( first!= second );
let first_ptr = self.vertices[first].clone();
let second_ptr = self.vertices[second].clone();
let mut vertex1 = first_ptr.borrow_mut();
let mut vertex2 = second_ptr.borrow_mut();
// 回收旧顶点编号
self.trash.push(first);
self.trash.push(second);
let new_pos = self.trash.pop().unwrap();
// 获取用于存放新顶点的顶点实体
let temp_pos = self.vertices.len() - 1;
assert!( temp_pos!= second && temp_pos!= first);
let temp_ptr = self.vertices[temp_pos].clone();
let mut new_v = temp_ptr.borrow_mut();
new_v.renew_state();
assert!( new_v.faces.is_empty() );
assert!( new_v.pairs.is_empty() );
let best_point = self.pairs[id].get_best_point();
new_v.coord.x = best_point.x;
new_v.coord.y = best_point.y;
new_v.coord.z = best_point.z;
new_v.q_v = vertex1.q_v + vertex2.q_v;
// 更新相关的面片
for (idx, pos) in vertex1.faces.iter() {
let valid = self.faces[*idx].valid();
if valid {
let both_in = self.faces[*idx].both_in(pos, &second);
if both_in {
self.faces[*idx].destroy();
self.total -= 1;
continue;
}
self.faces[*idx].indices[*pos] = new_pos; // 更新面片的对应顶点坐标
new_v.add_face(idx.clone(), pos.clone());
}
}
for (idx, pos) in vertex2.faces.iter() {
let valid = self.faces[*idx].valid();
if valid {
self.faces[*idx].indices[*pos] = new_pos;
new_v.add_face(idx.clone(), pos.clone());
}
}
// 更新相关的pair
for (idx, pos) in vertex1.pairs.iter() {
if *idx == id {
continue;
}
if self.pairs[*idx].set_vertex(pos, new_pos) {
new_v.pairs.push((idx.clone(), pos.clone()));
} else {
self.pairs[*idx].destroy();
}
}
for (idx, pos) in vertex2.pairs.iter() {
if *idx == id {
continue;
}
if self.pairs[*idx].set_vertex(pos, new_pos) {
new_v.pairs.push((idx.clone(), pos.clone()));
} else {
self.pairs[*idx].destroy();
}
}
vertex1.destroy();
vertex2.destroy();
self.vertices.swap(new_pos, temp_pos);
}
pub fn save(&mut self, path : &Path) {
let file = match File::create(&path) {
Err(why) => panic!("couldn't create {}", why.to_string()),
Ok(file) => file,
};
let mut file_writer = LineWriter::new(file);
for v_ptr in self.vertices.iter() {
let v = v_ptr.borrow();
if v.valid() {
file_writer.write_all(&v.get_string().into_bytes()).unwrap();
}
}
for f in self.faces.iter() {
if f.valid() {
file_writer.write_all(&f.get_string().into_bytes()).unwrap();
}
}
file_writer.flush().unwrap();
}
}
#[derive(Debug)]
enum HeadType {
DELETE,
RENEW,
NORMAL
}
| identifier_name |
||
mesh.rs | use super::vertex::Vertex;
use super::face::Face;
use super::pair::{ Pair, PairInfo };
use std::collections::BinaryHeap;
use nalgebra::base::Matrix4;
use std::path::Path;
use ordered_float::OrderedFloat;
use std::io::LineWriter;
use std::fs::File;
use std::io::prelude::*;
use std::io::BufReader;
use std::io::BufRead;
use std::str::FromStr;
use std::collections::HashSet;
use std::collections::VecDeque;
use std::sync::Arc;
use std::cell::RefCell;
#[derive(Default)]
pub struct Mesh {
vertices : Vec<Arc<RefCell<Vertex>>>, // 顶点实体
faces : Vec<Face>,
pairs : Vec<Pair>,
heap : BinaryHeap<PairInfo>,
pre_pairs : VecDeque<HashSet<usize>>, // 用于建立pair的数对
trash : Vec<usize>, // 可供使用的顶点实体集合
total : u32,
}
impl Mesh{
pub fn load(&mut self, file_path : &str) {
let f = File::open(file_path).unwrap();
let file = BufReader::new(&f);
let mut face_index = 0;
for (_, line) in file.lines().enumerate() {
let l = line.unwrap();
let args : Vec<&str> = l.split(' ').collect();
match args[0] {
"v" => {
assert!( args.len() == 4);
self.vertices.push(Arc::new(RefCell::new(Vertex::new(
f64::from_str(args[1]).unwrap(),
f64::from_str(args[2]).unwrap(),
f64::from_str(args[3]).unwrap()
))));
self.pre_pairs.push_back(HashSet::new());
}
"f" => {
// 取得三个顶点的编号
let idx1 = usize::from_str(args[1]).unwrap() - 1;
let idx2 = usize::from_str(args[2]).unwrap() - 1;
let idx3 = usize::from_str(args[3]).unwrap() - 1;
if idx1 < idx2 {
self.pre_pairs[idx1].insert(idx2);
} else {
assert!( idx1!= idx2);
self.pre_pairs[idx2].insert(idx1);
}
if idx1 < idx3 {
self.pre_pairs[idx1].insert(idx3);
} else {
assert!( idx1!= idx3);
self.pre_pairs[idx3].insert(idx1);
}
if idx2 < idx3 {
self.pre_pairs[idx2].insert(idx3);
} else {
assert!( idx2!= idx3);
self.pre_pairs[idx3].insert(idx2);
}
let mut vertex1 = self.vertices[idx1].borrow_mut();
let mut vertex2 = self.vertices[idx2].borrow_mut();
let mut vertex3 = self.vertices[idx3].borrow_mut();
// 为顶点和三角形面片建立连接
vertex1.add_face(face_index, 0);
vertex2.add_face(face_index, 1);
vertex3.add_face(face_index, 2);
face_index += 1;
// 计算三角形面片的Kp
let vec21 = vertex2.coord - vertex1.coord;
let vec31 = vertex3.coord - vertex1.coord;
let n = vec21.cross(&vec31).normalize(); // 法向量N
let d = n.dot(&vertex1.coord) * -1.0;
let k_p = Matrix4::from_fn(|r, c| {
let src1 = if r > 2 { d } else { n[r] };
let src2 = if c > 2 { d } else { n[c] };
src1 * src2
});
self.faces.push(Face::new(idx1, idx2, idx3, k_p));
}
_ => {
println!("other");
}
}
}
// 多出一个空闲顶点用于中转
self.vertices.push(Arc::new(RefCell::new(Vertex::new(0.0f64, 0.0f64, 0.0f64))));
println!("init vertices {}, faces {}", self.vertices.len(), self.faces.len());
}
pub fn cal_q_v(&mut self) {
// 计算顶点的Qv矩阵
for v in self.vertices.iter() {
let mut vertex = v.borrow_mut();
let mut q_v = Matrix4::zeros();
for (idx, _) in vertex.faces.iter() {
q_v += self.faces[*idx].k_p;
}
vertex.q_v = q_v;
}
}
fn add_pair(&mut self, id1 : usize, id2 : usize) {
assert!( id1 < id2);
// 创建一个pair
let mut vertex1 = self.vertices[id1].borrow_mut();
let mut vertex2 = self.vertices[id2].borrow_mut();
// TODO 阈值应当运行时指定
let dist = Vertex::distance(&vertex1.coord, &vertex2.coord);
if dist > 10.0 {
return;
}
let mut pair = Pair::new(id1, id2);
pair.q_matrix = vertex1.q_v + vertex2.q_v;
pair.cal_best_point(&vertex1.coord, &vertex2.coord);
pair.cal_shrink_value();
let idx = self.pairs.len();
let value = pair.get_value();
self.pairs.push(pair);
// 更新顶点信息,pairinfo信息
let pair_info = PairInfo { id : idx, value : OrderedFloat::from(value) };
self.heap.push(pair_info);
vertex1.add_pair(idx.clone(), 1);
vertex2.add_pair(idx.clone(), 2);
}
pub fn init_pairs(&mut self) {
// TODO 遍历所有面片,建立pair的列表以及pairinfo的堆
let len = self.pre_pairs.len();
for v_idx in 0..len {
let v_set = self.pre_pairs.pop_front().unwrap();
for idx in v_set.iter() {
assert!( v_idx < *idx );
self.add_pair( v_idx, idx.clone());
}
}
println!("number of pairs : {}", self.pairs.len());
}
pub fn run(&mut self, scale : f32) {
// 化简过程
self.total = self.faces.len() as u32;
let target = ( self.total as f32 * scale ) as u32;
println!("total is {}, target is {}", self.total, target);
while self.total > target {
if let Some(vectim) = self.heap.pop() {
self.del_pair(vectim.id);
} else { // empty
println!("to break!!!");
break;
}
loop {
// 检查堆头的pair是否为有效的pair,或者是否需要更新
let mut flag = HeadType::NORMAL;
if let Some(head) = self.heap.peek() {
let temp_pair = self.pairs.get(head.id).unwrap();
if!temp_pair.valid() {
flag = HeadType::DELETE;
}
if temp_pair.valid() && temp_pair.access() {
flag = HeadType::RENEW;
}
} else {
break;
}
// 更新堆头
match flag {
HeadType::DELETE => {
self.heap.pop();
continue;
}
HeadType::RENEW => {
let mut to_renew = self.heap.pop().unwrap();
let mut pair = self.pairs.get_mut(to_renew.id).unwrap();
let (first, second) = pair.get_vertex();
// 创建一个pair
let vertex1 = self.vertices[first].borrow();
let vertex2 = self.vertices[second].borrow();
pair.q_matrix = vertex1.q_v + vertex2.q_v;
pair.cal_best_point(&vertex1.coord, &vertex2.coord);
pair.cal_shrink_value();
pair.clear_access();
to_renew.value = OrderedFloat::from(pair.get_value());
self.heap.push(to_renew);
continue;
}
HeadType::NORMAL => {
break;
}
}
}
}
let mut real_idx : usize = 1;
for step_idx in 0..self.vertices.len() {
let v = self.vertices[step_idx].borrow();
if!v.valid() {
continue;
}
for (f_idx, pos) in v.faces.iter() {
self.faces[*f_idx].indices[*pos] = real_idx;
}
real_idx += 1;
}
let mut face_num = 0;
for f in self.faces.iter() {
if f.valid() {
face_num += 1;
}
}
println!("new face num {}", face_num);
}
fn del_pair(&mut self, id : usize) {
assert!( self.pairs[id].valid() );
self.pairs[id].destroy(); // 将该pair置为无效
// 获取旧顶点
let (first, second) = self.pairs[id].get_vertex();
assert!( first!= second );
let first_ptr = self.vertices[first].clone();
let second_ptr = | new_v.coord.x = best_point.x;
new_v.coord.y = best_point.y;
new_v.coord.z = best_point.z;
new_v.q_v = vertex1.q_v + vertex2.q_v;
// 更新相关的面片
for (idx, pos) in vertex1.faces.iter() {
let valid = self.faces[*idx].valid();
if valid {
let both_in = self.faces[*idx].both_in(pos, &second);
if both_in {
self.faces[*idx].destroy();
self.total -= 1;
continue;
}
self.faces[*idx].indices[*pos] = new_pos; // 更新面片的对应顶点坐标
new_v.add_face(idx.clone(), pos.clone());
}
}
for (idx, pos) in vertex2.faces.iter() {
let valid = self.faces[*idx].valid();
if valid {
self.faces[*idx].indices[*pos] = new_pos;
new_v.add_face(idx.clone(), pos.clone());
}
}
// 更新相关的pair
for (idx, pos) in vertex1.pairs.iter() {
if *idx == id {
continue;
}
if self.pairs[*idx].set_vertex(pos, new_pos) {
new_v.pairs.push((idx.clone(), pos.clone()));
} else {
self.pairs[*idx].destroy();
}
}
for (idx, pos) in vertex2.pairs.iter() {
if *idx == id {
continue;
}
if self.pairs[*idx].set_vertex(pos, new_pos) {
new_v.pairs.push((idx.clone(), pos.clone()));
} else {
self.pairs[*idx].destroy();
}
}
vertex1.destroy();
vertex2.destroy();
self.vertices.swap(new_pos, temp_pos);
}
pub fn save(&mut self, path : &Path) {
let file = match File::create(&path) {
Err(why) => panic!("couldn't create {}", why.to_string()),
Ok(file) => file,
};
let mut file_writer = LineWriter::new(file);
for v_ptr in self.vertices.iter() {
let v = v_ptr.borrow();
if v.valid() {
file_writer.writ
e_all(&v.get_string().into_bytes()).unwrap();
}
}
for f in self.faces.iter() {
if f.valid() {
file_writer.write_all(&f.get_string().into_bytes()).unwrap();
}
}
file_writer.flush().unwrap();
}
}
#[derive(Debug)]
enum HeadType {
DELETE,
RENEW,
NORMAL
}
| self.vertices[second].clone();
let mut vertex1 = first_ptr.borrow_mut();
let mut vertex2 = second_ptr.borrow_mut();
// 回收旧顶点编号
self.trash.push(first);
self.trash.push(second);
let new_pos = self.trash.pop().unwrap();
// 获取用于存放新顶点的顶点实体
let temp_pos = self.vertices.len() - 1;
assert!( temp_pos != second && temp_pos != first);
let temp_ptr = self.vertices[temp_pos].clone();
let mut new_v = temp_ptr.borrow_mut();
new_v.renew_state();
assert!( new_v.faces.is_empty() );
assert!( new_v.pairs.is_empty() );
let best_point = self.pairs[id].get_best_point(); | identifier_body |
mesh.rs | use super::vertex::Vertex;
use super::face::Face;
use super::pair::{ Pair, PairInfo };
use std::collections::BinaryHeap;
use nalgebra::base::Matrix4;
use std::path::Path;
use ordered_float::OrderedFloat;
use std::io::LineWriter;
use std::fs::File;
use std::io::prelude::*;
use std::io::BufReader;
use std::io::BufRead;
use std::str::FromStr;
use std::collections::HashSet;
use std::collections::VecDeque;
use std::sync::Arc;
use std::cell::RefCell;
#[derive(Default)]
pub struct Mesh {
vertices : Vec<Arc<RefCell<Vertex>>>, // 顶点实体
faces : Vec<Face>,
pairs : Vec<Pair>,
heap : BinaryHeap<PairInfo>,
pre_pairs : VecDeque<HashSet<usize>>, // 用于建立pair的数对
trash : Vec<usize>, // 可供使用的顶点实体集合
total : u32,
}
impl Mesh{
pub fn load(&mut self, file_path : &str) {
let f = File::open(file_path).unwrap();
let file = BufReader::new(&f);
let mut face_index = 0;
for (_, line) in file.lines().enumerate() {
let l = line.unwrap();
let args : Vec<&str> = l.split(' ').collect();
match args[0] {
"v" => {
assert!( args.len() == 4);
self.vertices.push(Arc::new(RefCell::new(Vertex::new(
f64::from_str(args[1]).unwrap(),
f64::from_str(args[2]).unwrap(),
f64::from_str(args[3]).unwrap()
))));
self.pre_pairs.push_back(HashSet::new());
}
"f" => {
// 取得三个顶点的编号
let idx1 = usize::from_str(args[1]).unwrap() - 1;
let idx2 = usize::from_str(args[2]).unwrap() - 1;
let idx3 = usize::from_str(args[3]).unwrap() - 1;
if idx1 < idx2 {
self.pre_pairs[idx1].insert(idx2);
| self.pre_pairs[idx2].insert(idx1);
}
if idx1 < idx3 {
self.pre_pairs[idx1].insert(idx3);
} else {
assert!( idx1!= idx3);
self.pre_pairs[idx3].insert(idx1);
}
if idx2 < idx3 {
self.pre_pairs[idx2].insert(idx3);
} else {
assert!( idx2!= idx3);
self.pre_pairs[idx3].insert(idx2);
}
let mut vertex1 = self.vertices[idx1].borrow_mut();
let mut vertex2 = self.vertices[idx2].borrow_mut();
let mut vertex3 = self.vertices[idx3].borrow_mut();
// 为顶点和三角形面片建立连接
vertex1.add_face(face_index, 0);
vertex2.add_face(face_index, 1);
vertex3.add_face(face_index, 2);
face_index += 1;
// 计算三角形面片的Kp
let vec21 = vertex2.coord - vertex1.coord;
let vec31 = vertex3.coord - vertex1.coord;
let n = vec21.cross(&vec31).normalize(); // 法向量N
let d = n.dot(&vertex1.coord) * -1.0;
let k_p = Matrix4::from_fn(|r, c| {
let src1 = if r > 2 { d } else { n[r] };
let src2 = if c > 2 { d } else { n[c] };
src1 * src2
});
self.faces.push(Face::new(idx1, idx2, idx3, k_p));
}
_ => {
println!("other");
}
}
}
// 多出一个空闲顶点用于中转
self.vertices.push(Arc::new(RefCell::new(Vertex::new(0.0f64, 0.0f64, 0.0f64))));
println!("init vertices {}, faces {}", self.vertices.len(), self.faces.len());
}
pub fn cal_q_v(&mut self) {
// 计算顶点的Qv矩阵
for v in self.vertices.iter() {
let mut vertex = v.borrow_mut();
let mut q_v = Matrix4::zeros();
for (idx, _) in vertex.faces.iter() {
q_v += self.faces[*idx].k_p;
}
vertex.q_v = q_v;
}
}
fn add_pair(&mut self, id1 : usize, id2 : usize) {
assert!( id1 < id2);
// 创建一个pair
let mut vertex1 = self.vertices[id1].borrow_mut();
let mut vertex2 = self.vertices[id2].borrow_mut();
// TODO 阈值应当运行时指定
let dist = Vertex::distance(&vertex1.coord, &vertex2.coord);
if dist > 10.0 {
return;
}
let mut pair = Pair::new(id1, id2);
pair.q_matrix = vertex1.q_v + vertex2.q_v;
pair.cal_best_point(&vertex1.coord, &vertex2.coord);
pair.cal_shrink_value();
let idx = self.pairs.len();
let value = pair.get_value();
self.pairs.push(pair);
// 更新顶点信息,pairinfo信息
let pair_info = PairInfo { id : idx, value : OrderedFloat::from(value) };
self.heap.push(pair_info);
vertex1.add_pair(idx.clone(), 1);
vertex2.add_pair(idx.clone(), 2);
}
pub fn init_pairs(&mut self) {
// TODO 遍历所有面片,建立pair的列表以及pairinfo的堆
let len = self.pre_pairs.len();
for v_idx in 0..len {
let v_set = self.pre_pairs.pop_front().unwrap();
for idx in v_set.iter() {
assert!( v_idx < *idx );
self.add_pair( v_idx, idx.clone());
}
}
println!("number of pairs : {}", self.pairs.len());
}
pub fn run(&mut self, scale : f32) {
// 化简过程
self.total = self.faces.len() as u32;
let target = ( self.total as f32 * scale ) as u32;
println!("total is {}, target is {}", self.total, target);
while self.total > target {
if let Some(vectim) = self.heap.pop() {
self.del_pair(vectim.id);
} else { // empty
println!("to break!!!");
break;
}
loop {
// 检查堆头的pair是否为有效的pair,或者是否需要更新
let mut flag = HeadType::NORMAL;
if let Some(head) = self.heap.peek() {
let temp_pair = self.pairs.get(head.id).unwrap();
if!temp_pair.valid() {
flag = HeadType::DELETE;
}
if temp_pair.valid() && temp_pair.access() {
flag = HeadType::RENEW;
}
} else {
break;
}
// 更新堆头
match flag {
HeadType::DELETE => {
self.heap.pop();
continue;
}
HeadType::RENEW => {
let mut to_renew = self.heap.pop().unwrap();
let mut pair = self.pairs.get_mut(to_renew.id).unwrap();
let (first, second) = pair.get_vertex();
// 创建一个pair
let vertex1 = self.vertices[first].borrow();
let vertex2 = self.vertices[second].borrow();
pair.q_matrix = vertex1.q_v + vertex2.q_v;
pair.cal_best_point(&vertex1.coord, &vertex2.coord);
pair.cal_shrink_value();
pair.clear_access();
to_renew.value = OrderedFloat::from(pair.get_value());
self.heap.push(to_renew);
continue;
}
HeadType::NORMAL => {
break;
}
}
}
}
let mut real_idx : usize = 1;
for step_idx in 0..self.vertices.len() {
let v = self.vertices[step_idx].borrow();
if!v.valid() {
continue;
}
for (f_idx, pos) in v.faces.iter() {
self.faces[*f_idx].indices[*pos] = real_idx;
}
real_idx += 1;
}
let mut face_num = 0;
for f in self.faces.iter() {
if f.valid() {
face_num += 1;
}
}
println!("new face num {}", face_num);
}
fn del_pair(&mut self, id : usize) {
assert!( self.pairs[id].valid() );
self.pairs[id].destroy(); // 将该pair置为无效
// 获取旧顶点
let (first, second) = self.pairs[id].get_vertex();
assert!( first!= second );
let first_ptr = self.vertices[first].clone();
let second_ptr = self.vertices[second].clone();
let mut vertex1 = first_ptr.borrow_mut();
let mut vertex2 = second_ptr.borrow_mut();
// 回收旧顶点编号
self.trash.push(first);
self.trash.push(second);
let new_pos = self.trash.pop().unwrap();
// 获取用于存放新顶点的顶点实体
let temp_pos = self.vertices.len() - 1;
assert!( temp_pos!= second && temp_pos!= first);
let temp_ptr = self.vertices[temp_pos].clone();
let mut new_v = temp_ptr.borrow_mut();
new_v.renew_state();
assert!( new_v.faces.is_empty() );
assert!( new_v.pairs.is_empty() );
let best_point = self.pairs[id].get_best_point();
new_v.coord.x = best_point.x;
new_v.coord.y = best_point.y;
new_v.coord.z = best_point.z;
new_v.q_v = vertex1.q_v + vertex2.q_v;
// 更新相关的面片
for (idx, pos) in vertex1.faces.iter() {
let valid = self.faces[*idx].valid();
if valid {
let both_in = self.faces[*idx].both_in(pos, &second);
if both_in {
self.faces[*idx].destroy();
self.total -= 1;
continue;
}
self.faces[*idx].indices[*pos] = new_pos; // 更新面片的对应顶点坐标
new_v.add_face(idx.clone(), pos.clone());
}
}
for (idx, pos) in vertex2.faces.iter() {
let valid = self.faces[*idx].valid();
if valid {
self.faces[*idx].indices[*pos] = new_pos;
new_v.add_face(idx.clone(), pos.clone());
}
}
// 更新相关的pair
for (idx, pos) in vertex1.pairs.iter() {
if *idx == id {
continue;
}
if self.pairs[*idx].set_vertex(pos, new_pos) {
new_v.pairs.push((idx.clone(), pos.clone()));
} else {
self.pairs[*idx].destroy();
}
}
for (idx, pos) in vertex2.pairs.iter() {
if *idx == id {
continue;
}
if self.pairs[*idx].set_vertex(pos, new_pos) {
new_v.pairs.push((idx.clone(), pos.clone()));
} else {
self.pairs[*idx].destroy();
}
}
vertex1.destroy();
vertex2.destroy();
self.vertices.swap(new_pos, temp_pos);
}
pub fn save(&mut self, path : &Path) {
let file = match File::create(&path) {
Err(why) => panic!("couldn't create {}", why.to_string()),
Ok(file) => file,
};
let mut file_writer = LineWriter::new(file);
for v_ptr in self.vertices.iter() {
let v = v_ptr.borrow();
if v.valid() {
file_writer.write_all(&v.get_string().into_bytes()).unwrap();
}
}
for f in self.faces.iter() {
if f.valid() {
file_writer.write_all(&f.get_string().into_bytes()).unwrap();
}
}
file_writer.flush().unwrap();
}
}
#[derive(Debug)]
enum HeadType {
DELETE,
RENEW,
NORMAL
}
| } else {
assert!( idx1 != idx2);
| conditional_block |
main.rs | // Copyright 2020 Sean Kelleher. All rights reserved.
// Use of this source code is governed by a MIT
// licence that can be found in the LICENCE file.
use std::convert::TryInto;
use std::io::ErrorKind;
use std::io::Read;
use std::io::Write;
extern crate alacritty;
extern crate pancurses;
use alacritty::ansi::{Color, NamedColor, Processor};
use alacritty::cli::Options;
use alacritty::config::Config;
use alacritty::index::{Point, Line, Column};
use alacritty::Term;
use alacritty::term::SizeInfo;
use alacritty::tty;
use pancurses::colorpair::ColorPair;
use pancurses::Input;
use pancurses::ToChtype;
use pancurses::Window;
const OS_IO_ERROR: i32 = 5;
fn main() {
let win = pancurses::initscr();
// Characters are not rendered when they're typed, instead they're sent to
// the underlying terminal, which decides whether to echo them or not (by
// writing new characters from `ptyf`, below). An example scenario of when
// this comes in handy is in the case of typing backspace. Using `noecho`
// prevents `^?` being briefly echoed before the cursor in between the time
// that the backspace key was pressed and the time when the new rendering of
// the terminal state is received and output.
pancurses::noecho();
pancurses::start_color();
for i in 0..COLOUR_INDEXES.len()-1 {
pancurses::init_pair(i as i16, COLOUR_INDEXES[i], pancurses::COLOR_BLACK);
}
// We put the window input into non-blocking mode so that `win.getch()`
// returns `None` immediately if there is no input. This allows us to read
// from the PTY and the the window in the same thread. Note that this
// results in a busy loop, which should ideally be replaced by blocking
// reads on separate threads for efficiency.
win.nodelay(true);
let (y, x) = win.get_max_yx();
let size = new_size_info(x - 2, y - 2);
let conf = Config::default();
// `pty` provides methods for manipulating the PTY.
let pty = tty::new(&conf, &Options::default(), &&size, None);
// `ptyf` is a `File` interface to the server end of the PTY client/server
// pair.
let mut ptyf = pty.reader();
// `parser` reads and parses the data read from `pty`, and updates the state
// of the terminal "display" that is maintained in `term`.
let mut parser = Processor::new();
let mut term = Term::new(&conf, size);
let border_chars = ['*', '+', '-'];
let mut cur_border_char = 0;
let mut exit_reason: Option<String> = None;
let mut buf = [0u8; 0x1000];
// We would ideally avoid using labels for loop termination but we use one
// here for simplicity.
'evt_loop: loop {
match ptyf.read(&mut buf[..]) {
Ok(0) => {
// End-of-file.
break 'evt_loop;
},
Ok(n) => {
for byte in &buf[..n] {
parser.advance(&mut term, *byte, &mut ptyf);
}
let result = render_term_to_win(&term, &win, border_chars[cur_border_char]);
if let Err(err) = result {
let colour_type =
match err {
RenderError::ColourSpecFound => "specification",
RenderError::ColourIndexFound => "index",
};
exit_reason = Some(format!(
"encountered a colour {}, which isn't currently supported",
colour_type,
));
break 'evt_loop;
}
},
Err(e) => {
let k = e.kind();
if k == ErrorKind::Other && e.raw_os_error() == Some(OS_IO_ERROR) {
// We interpret an `OS_IO_ERROR` as the PTY process having
// terminated, as it corresponds with this during
// experimentation.
break 'evt_loop;
}
if k!= ErrorKind::Interrupted && k!= ErrorKind::WouldBlock {
exit_reason = Some(format!(
"couldn't read from PTY (error kind: {:?}, os error: {:?}): {}",
e.kind(),
e.raw_os_error(),
e,
));
break 'evt_loop;
};
},
}
if let Some(input) = win.getch() {
match input {
Input::Character(c) => {
let utf8_len = c.len_utf8();
let mut bytes = Vec::with_capacity(utf8_len);
unsafe {
bytes.set_len(utf8_len);
c.encode_utf8(&mut bytes[..]);
}
if utf8_len == 1 && bytes[0] == 4 {
// We use `^D` as a trigger to change the border style.
cur_border_char = (cur_border_char + 1) % border_chars.len();
let result = render_term_to_win(&term, &win, border_chars[cur_border_char]);
if let Err(err) = result {
let colour_type =
match err {
RenderError::ColourSpecFound => "specification",
RenderError::ColourIndexFound => "index",
};
exit_reason = Some(format!(
"encountered a colour {}, which isn't currently supported",
colour_type,
));
break 'evt_loop;
}
} else {
let mut i = 0;
while i < utf8_len {
match ptyf.write(&bytes[..]) {
Ok(0) => {
exit_reason = Some(format!("PTY is unable to accept bytes"));
break 'evt_loop;
},
Ok(n) => {
i += n;
},
Err(e) => {
let k = e.kind();
if k!= ErrorKind::Interrupted && k!= ErrorKind::WouldBlock {
exit_reason = Some(format!(
"couldn't read from PTY (error kind: {:?}, os error: {:?}): {}",
e.kind(),
e.raw_os_error(),
e,
));
break 'evt_loop;
};
},
}
}
}
},
Input::KeyResize => {
let (y, x) = win.get_max_yx();
let size = new_size_info(x - 2, y - 2);
term.resize(&size);
pty.resize(&&size);
},
_ => {
exit_reason = Some(format!("unhandled input: {:?}", input));
break 'evt_loop;
},
}
}
}
pancurses::endwin();
if let Some(s) = exit_reason {
println!("process exited: {}", s);
}
}
const COLOUR_INDEXES: [i16; 8] = [
pancurses::COLOR_WHITE,
pancurses::COLOR_RED,
pancurses::COLOR_GREEN,
pancurses::COLOR_BLUE,
pancurses::COLOR_CYAN,
pancurses::COLOR_MAGENTA,
pancurses::COLOR_YELLOW,
pancurses::COLOR_BLACK,
];
fn get_colour_index(c: i16) -> usize {
for i in 1..COLOUR_INDEXES.len()-1 {
if c == COLOUR_INDEXES[i] {
return i
}
}
0
}
fn | (w: i32, h: i32) -> SizeInfo {
SizeInfo {
width: w as f32,
height: h as f32,
cell_width: 1.0,
cell_height: 1.0,
padding_x: 0.0,
padding_y: 0.0,
}
}
fn render_term_to_win(term: &Term, win: &Window, border_char: char) -> RenderResult {
win.clear();
let (y, x) = win.get_max_yx();
for i in 0..y {
win.mvaddch(i, 0, border_char);
win.mvaddch(i, x-1, border_char);
}
for i in 0..x {
win.mvaddch(0, i, border_char);
win.mvaddch(y-1, i, border_char);
}
let grid = term.grid();
let mut line = Line(0);
while line < grid.num_lines() {
let mut col = Column(0);
while col < grid.num_cols() {
let cell = grid[line][col];
match cell.fg {
Color::Named(name) => {
let c = match name {
NamedColor::Background => pancurses::COLOR_BLACK,
NamedColor::Black => pancurses::COLOR_BLACK,
NamedColor::Blue => pancurses::COLOR_BLUE,
NamedColor::BrightBlack => pancurses::COLOR_BLACK,
NamedColor::BrightBlue => pancurses::COLOR_BLUE,
NamedColor::BrightCyan => pancurses::COLOR_CYAN,
NamedColor::BrightGreen => pancurses::COLOR_GREEN,
NamedColor::BrightMagenta => pancurses::COLOR_MAGENTA,
NamedColor::BrightRed => pancurses::COLOR_RED,
NamedColor::BrightWhite => pancurses::COLOR_WHITE,
NamedColor::BrightYellow => pancurses::COLOR_YELLOW,
NamedColor::Cursor => pancurses::COLOR_BLACK,
NamedColor::CursorText => pancurses::COLOR_WHITE,
NamedColor::Cyan => pancurses::COLOR_CYAN,
NamedColor::DimBlack => pancurses::COLOR_BLACK,
NamedColor::DimBlue => pancurses::COLOR_BLUE,
NamedColor::DimCyan => pancurses::COLOR_CYAN,
NamedColor::DimGreen => pancurses::COLOR_GREEN,
NamedColor::DimMagenta => pancurses::COLOR_MAGENTA,
NamedColor::DimRed => pancurses::COLOR_RED,
NamedColor::DimWhite => pancurses::COLOR_WHITE,
NamedColor::DimYellow => pancurses::COLOR_YELLOW,
NamedColor::Foreground => pancurses::COLOR_WHITE,
NamedColor::Green => pancurses::COLOR_GREEN,
NamedColor::Magenta => pancurses::COLOR_MAGENTA,
NamedColor::Red => pancurses::COLOR_RED,
NamedColor::White => pancurses::COLOR_WHITE,
NamedColor::Yellow => pancurses::COLOR_YELLOW,
};
win.attrset(ColorPair(get_colour_index(c) as u8));
win.mvaddch(
(line.0 as i32) + 1,
(col.0 as i32) + 1,
cell.c.to_chtype(),
);
},
Color::Spec(_) => {
return Err(RenderError::ColourSpecFound);
},
Color::Indexed(_) => {
return Err(RenderError::ColourIndexFound);
},
};
col += 1;
}
line += 1;
}
let Point{line: Line(row), col: Column(col)} = term.cursor().point;
win.mv(
((row + 1) as usize).try_into().unwrap(),
((col + 1) as usize).try_into().unwrap(),
);
win.refresh();
Ok(())
}
type RenderResult = Result<(), RenderError>;
enum RenderError {
// These colour types aren't currently supported.
ColourSpecFound,
ColourIndexFound,
}
| new_size_info | identifier_name |
main.rs | // Copyright 2020 Sean Kelleher. All rights reserved.
// Use of this source code is governed by a MIT
// licence that can be found in the LICENCE file.
use std::convert::TryInto;
use std::io::ErrorKind;
use std::io::Read;
use std::io::Write;
extern crate alacritty;
extern crate pancurses;
use alacritty::ansi::{Color, NamedColor, Processor}; | use alacritty::term::SizeInfo;
use alacritty::tty;
use pancurses::colorpair::ColorPair;
use pancurses::Input;
use pancurses::ToChtype;
use pancurses::Window;
const OS_IO_ERROR: i32 = 5;
fn main() {
let win = pancurses::initscr();
// Characters are not rendered when they're typed, instead they're sent to
// the underlying terminal, which decides whether to echo them or not (by
// writing new characters from `ptyf`, below). An example scenario of when
// this comes in handy is in the case of typing backspace. Using `noecho`
// prevents `^?` being briefly echoed before the cursor in between the time
// that the backspace key was pressed and the time when the new rendering of
// the terminal state is received and output.
pancurses::noecho();
pancurses::start_color();
for i in 0..COLOUR_INDEXES.len()-1 {
pancurses::init_pair(i as i16, COLOUR_INDEXES[i], pancurses::COLOR_BLACK);
}
// We put the window input into non-blocking mode so that `win.getch()`
// returns `None` immediately if there is no input. This allows us to read
// from the PTY and the the window in the same thread. Note that this
// results in a busy loop, which should ideally be replaced by blocking
// reads on separate threads for efficiency.
win.nodelay(true);
let (y, x) = win.get_max_yx();
let size = new_size_info(x - 2, y - 2);
let conf = Config::default();
// `pty` provides methods for manipulating the PTY.
let pty = tty::new(&conf, &Options::default(), &&size, None);
// `ptyf` is a `File` interface to the server end of the PTY client/server
// pair.
let mut ptyf = pty.reader();
// `parser` reads and parses the data read from `pty`, and updates the state
// of the terminal "display" that is maintained in `term`.
let mut parser = Processor::new();
let mut term = Term::new(&conf, size);
let border_chars = ['*', '+', '-'];
let mut cur_border_char = 0;
let mut exit_reason: Option<String> = None;
let mut buf = [0u8; 0x1000];
// We would ideally avoid using labels for loop termination but we use one
// here for simplicity.
'evt_loop: loop {
match ptyf.read(&mut buf[..]) {
Ok(0) => {
// End-of-file.
break 'evt_loop;
},
Ok(n) => {
for byte in &buf[..n] {
parser.advance(&mut term, *byte, &mut ptyf);
}
let result = render_term_to_win(&term, &win, border_chars[cur_border_char]);
if let Err(err) = result {
let colour_type =
match err {
RenderError::ColourSpecFound => "specification",
RenderError::ColourIndexFound => "index",
};
exit_reason = Some(format!(
"encountered a colour {}, which isn't currently supported",
colour_type,
));
break 'evt_loop;
}
},
Err(e) => {
let k = e.kind();
if k == ErrorKind::Other && e.raw_os_error() == Some(OS_IO_ERROR) {
// We interpret an `OS_IO_ERROR` as the PTY process having
// terminated, as it corresponds with this during
// experimentation.
break 'evt_loop;
}
if k!= ErrorKind::Interrupted && k!= ErrorKind::WouldBlock {
exit_reason = Some(format!(
"couldn't read from PTY (error kind: {:?}, os error: {:?}): {}",
e.kind(),
e.raw_os_error(),
e,
));
break 'evt_loop;
};
},
}
if let Some(input) = win.getch() {
match input {
Input::Character(c) => {
let utf8_len = c.len_utf8();
let mut bytes = Vec::with_capacity(utf8_len);
unsafe {
bytes.set_len(utf8_len);
c.encode_utf8(&mut bytes[..]);
}
if utf8_len == 1 && bytes[0] == 4 {
// We use `^D` as a trigger to change the border style.
cur_border_char = (cur_border_char + 1) % border_chars.len();
let result = render_term_to_win(&term, &win, border_chars[cur_border_char]);
if let Err(err) = result {
let colour_type =
match err {
RenderError::ColourSpecFound => "specification",
RenderError::ColourIndexFound => "index",
};
exit_reason = Some(format!(
"encountered a colour {}, which isn't currently supported",
colour_type,
));
break 'evt_loop;
}
} else {
let mut i = 0;
while i < utf8_len {
match ptyf.write(&bytes[..]) {
Ok(0) => {
exit_reason = Some(format!("PTY is unable to accept bytes"));
break 'evt_loop;
},
Ok(n) => {
i += n;
},
Err(e) => {
let k = e.kind();
if k!= ErrorKind::Interrupted && k!= ErrorKind::WouldBlock {
exit_reason = Some(format!(
"couldn't read from PTY (error kind: {:?}, os error: {:?}): {}",
e.kind(),
e.raw_os_error(),
e,
));
break 'evt_loop;
};
},
}
}
}
},
Input::KeyResize => {
let (y, x) = win.get_max_yx();
let size = new_size_info(x - 2, y - 2);
term.resize(&size);
pty.resize(&&size);
},
_ => {
exit_reason = Some(format!("unhandled input: {:?}", input));
break 'evt_loop;
},
}
}
}
pancurses::endwin();
if let Some(s) = exit_reason {
println!("process exited: {}", s);
}
}
const COLOUR_INDEXES: [i16; 8] = [
pancurses::COLOR_WHITE,
pancurses::COLOR_RED,
pancurses::COLOR_GREEN,
pancurses::COLOR_BLUE,
pancurses::COLOR_CYAN,
pancurses::COLOR_MAGENTA,
pancurses::COLOR_YELLOW,
pancurses::COLOR_BLACK,
];
fn get_colour_index(c: i16) -> usize {
for i in 1..COLOUR_INDEXES.len()-1 {
if c == COLOUR_INDEXES[i] {
return i
}
}
0
}
fn new_size_info(w: i32, h: i32) -> SizeInfo {
SizeInfo {
width: w as f32,
height: h as f32,
cell_width: 1.0,
cell_height: 1.0,
padding_x: 0.0,
padding_y: 0.0,
}
}
fn render_term_to_win(term: &Term, win: &Window, border_char: char) -> RenderResult {
win.clear();
let (y, x) = win.get_max_yx();
for i in 0..y {
win.mvaddch(i, 0, border_char);
win.mvaddch(i, x-1, border_char);
}
for i in 0..x {
win.mvaddch(0, i, border_char);
win.mvaddch(y-1, i, border_char);
}
let grid = term.grid();
let mut line = Line(0);
while line < grid.num_lines() {
let mut col = Column(0);
while col < grid.num_cols() {
let cell = grid[line][col];
match cell.fg {
Color::Named(name) => {
let c = match name {
NamedColor::Background => pancurses::COLOR_BLACK,
NamedColor::Black => pancurses::COLOR_BLACK,
NamedColor::Blue => pancurses::COLOR_BLUE,
NamedColor::BrightBlack => pancurses::COLOR_BLACK,
NamedColor::BrightBlue => pancurses::COLOR_BLUE,
NamedColor::BrightCyan => pancurses::COLOR_CYAN,
NamedColor::BrightGreen => pancurses::COLOR_GREEN,
NamedColor::BrightMagenta => pancurses::COLOR_MAGENTA,
NamedColor::BrightRed => pancurses::COLOR_RED,
NamedColor::BrightWhite => pancurses::COLOR_WHITE,
NamedColor::BrightYellow => pancurses::COLOR_YELLOW,
NamedColor::Cursor => pancurses::COLOR_BLACK,
NamedColor::CursorText => pancurses::COLOR_WHITE,
NamedColor::Cyan => pancurses::COLOR_CYAN,
NamedColor::DimBlack => pancurses::COLOR_BLACK,
NamedColor::DimBlue => pancurses::COLOR_BLUE,
NamedColor::DimCyan => pancurses::COLOR_CYAN,
NamedColor::DimGreen => pancurses::COLOR_GREEN,
NamedColor::DimMagenta => pancurses::COLOR_MAGENTA,
NamedColor::DimRed => pancurses::COLOR_RED,
NamedColor::DimWhite => pancurses::COLOR_WHITE,
NamedColor::DimYellow => pancurses::COLOR_YELLOW,
NamedColor::Foreground => pancurses::COLOR_WHITE,
NamedColor::Green => pancurses::COLOR_GREEN,
NamedColor::Magenta => pancurses::COLOR_MAGENTA,
NamedColor::Red => pancurses::COLOR_RED,
NamedColor::White => pancurses::COLOR_WHITE,
NamedColor::Yellow => pancurses::COLOR_YELLOW,
};
win.attrset(ColorPair(get_colour_index(c) as u8));
win.mvaddch(
(line.0 as i32) + 1,
(col.0 as i32) + 1,
cell.c.to_chtype(),
);
},
Color::Spec(_) => {
return Err(RenderError::ColourSpecFound);
},
Color::Indexed(_) => {
return Err(RenderError::ColourIndexFound);
},
};
col += 1;
}
line += 1;
}
let Point{line: Line(row), col: Column(col)} = term.cursor().point;
win.mv(
((row + 1) as usize).try_into().unwrap(),
((col + 1) as usize).try_into().unwrap(),
);
win.refresh();
Ok(())
}
type RenderResult = Result<(), RenderError>;
enum RenderError {
// These colour types aren't currently supported.
ColourSpecFound,
ColourIndexFound,
} | use alacritty::cli::Options;
use alacritty::config::Config;
use alacritty::index::{Point, Line, Column};
use alacritty::Term; | random_line_split |
main.rs | // Copyright 2020 Sean Kelleher. All rights reserved.
// Use of this source code is governed by a MIT
// licence that can be found in the LICENCE file.
use std::convert::TryInto;
use std::io::ErrorKind;
use std::io::Read;
use std::io::Write;
extern crate alacritty;
extern crate pancurses;
use alacritty::ansi::{Color, NamedColor, Processor};
use alacritty::cli::Options;
use alacritty::config::Config;
use alacritty::index::{Point, Line, Column};
use alacritty::Term;
use alacritty::term::SizeInfo;
use alacritty::tty;
use pancurses::colorpair::ColorPair;
use pancurses::Input;
use pancurses::ToChtype;
use pancurses::Window;
const OS_IO_ERROR: i32 = 5;
fn main() {
let win = pancurses::initscr();
// Characters are not rendered when they're typed, instead they're sent to
// the underlying terminal, which decides whether to echo them or not (by
// writing new characters from `ptyf`, below). An example scenario of when
// this comes in handy is in the case of typing backspace. Using `noecho`
// prevents `^?` being briefly echoed before the cursor in between the time
// that the backspace key was pressed and the time when the new rendering of
// the terminal state is received and output.
pancurses::noecho();
pancurses::start_color();
for i in 0..COLOUR_INDEXES.len()-1 {
pancurses::init_pair(i as i16, COLOUR_INDEXES[i], pancurses::COLOR_BLACK);
}
// We put the window input into non-blocking mode so that `win.getch()`
// returns `None` immediately if there is no input. This allows us to read
// from the PTY and the the window in the same thread. Note that this
// results in a busy loop, which should ideally be replaced by blocking
// reads on separate threads for efficiency.
win.nodelay(true);
let (y, x) = win.get_max_yx();
let size = new_size_info(x - 2, y - 2);
let conf = Config::default();
// `pty` provides methods for manipulating the PTY.
let pty = tty::new(&conf, &Options::default(), &&size, None);
// `ptyf` is a `File` interface to the server end of the PTY client/server
// pair.
let mut ptyf = pty.reader();
// `parser` reads and parses the data read from `pty`, and updates the state
// of the terminal "display" that is maintained in `term`.
let mut parser = Processor::new();
let mut term = Term::new(&conf, size);
let border_chars = ['*', '+', '-'];
let mut cur_border_char = 0;
let mut exit_reason: Option<String> = None;
let mut buf = [0u8; 0x1000];
// We would ideally avoid using labels for loop termination but we use one
// here for simplicity.
'evt_loop: loop {
match ptyf.read(&mut buf[..]) {
Ok(0) => {
// End-of-file.
break 'evt_loop;
},
Ok(n) => {
for byte in &buf[..n] {
parser.advance(&mut term, *byte, &mut ptyf);
}
let result = render_term_to_win(&term, &win, border_chars[cur_border_char]);
if let Err(err) = result {
let colour_type =
match err {
RenderError::ColourSpecFound => "specification",
RenderError::ColourIndexFound => "index",
};
exit_reason = Some(format!(
"encountered a colour {}, which isn't currently supported",
colour_type,
));
break 'evt_loop;
}
},
Err(e) => {
let k = e.kind();
if k == ErrorKind::Other && e.raw_os_error() == Some(OS_IO_ERROR) {
// We interpret an `OS_IO_ERROR` as the PTY process having
// terminated, as it corresponds with this during
// experimentation.
break 'evt_loop;
}
if k!= ErrorKind::Interrupted && k!= ErrorKind::WouldBlock {
exit_reason = Some(format!(
"couldn't read from PTY (error kind: {:?}, os error: {:?}): {}",
e.kind(),
e.raw_os_error(),
e,
));
break 'evt_loop;
};
},
}
if let Some(input) = win.getch() {
match input {
Input::Character(c) => {
let utf8_len = c.len_utf8();
let mut bytes = Vec::with_capacity(utf8_len);
unsafe {
bytes.set_len(utf8_len);
c.encode_utf8(&mut bytes[..]);
}
if utf8_len == 1 && bytes[0] == 4 {
// We use `^D` as a trigger to change the border style.
cur_border_char = (cur_border_char + 1) % border_chars.len();
let result = render_term_to_win(&term, &win, border_chars[cur_border_char]);
if let Err(err) = result {
let colour_type =
match err {
RenderError::ColourSpecFound => "specification",
RenderError::ColourIndexFound => "index",
};
exit_reason = Some(format!(
"encountered a colour {}, which isn't currently supported",
colour_type,
));
break 'evt_loop;
}
} else {
let mut i = 0;
while i < utf8_len {
match ptyf.write(&bytes[..]) {
Ok(0) => {
exit_reason = Some(format!("PTY is unable to accept bytes"));
break 'evt_loop;
},
Ok(n) => {
i += n;
},
Err(e) => {
let k = e.kind();
if k!= ErrorKind::Interrupted && k!= ErrorKind::WouldBlock {
exit_reason = Some(format!(
"couldn't read from PTY (error kind: {:?}, os error: {:?}): {}",
e.kind(),
e.raw_os_error(),
e,
));
break 'evt_loop;
};
},
}
}
}
},
Input::KeyResize => {
let (y, x) = win.get_max_yx();
let size = new_size_info(x - 2, y - 2);
term.resize(&size);
pty.resize(&&size);
},
_ => {
exit_reason = Some(format!("unhandled input: {:?}", input));
break 'evt_loop;
},
}
}
}
pancurses::endwin();
if let Some(s) = exit_reason {
println!("process exited: {}", s);
}
}
const COLOUR_INDEXES: [i16; 8] = [
pancurses::COLOR_WHITE,
pancurses::COLOR_RED,
pancurses::COLOR_GREEN,
pancurses::COLOR_BLUE,
pancurses::COLOR_CYAN,
pancurses::COLOR_MAGENTA,
pancurses::COLOR_YELLOW,
pancurses::COLOR_BLACK,
];
fn get_colour_index(c: i16) -> usize |
fn new_size_info(w: i32, h: i32) -> SizeInfo {
SizeInfo {
width: w as f32,
height: h as f32,
cell_width: 1.0,
cell_height: 1.0,
padding_x: 0.0,
padding_y: 0.0,
}
}
fn render_term_to_win(term: &Term, win: &Window, border_char: char) -> RenderResult {
win.clear();
let (y, x) = win.get_max_yx();
for i in 0..y {
win.mvaddch(i, 0, border_char);
win.mvaddch(i, x-1, border_char);
}
for i in 0..x {
win.mvaddch(0, i, border_char);
win.mvaddch(y-1, i, border_char);
}
let grid = term.grid();
let mut line = Line(0);
while line < grid.num_lines() {
let mut col = Column(0);
while col < grid.num_cols() {
let cell = grid[line][col];
match cell.fg {
Color::Named(name) => {
let c = match name {
NamedColor::Background => pancurses::COLOR_BLACK,
NamedColor::Black => pancurses::COLOR_BLACK,
NamedColor::Blue => pancurses::COLOR_BLUE,
NamedColor::BrightBlack => pancurses::COLOR_BLACK,
NamedColor::BrightBlue => pancurses::COLOR_BLUE,
NamedColor::BrightCyan => pancurses::COLOR_CYAN,
NamedColor::BrightGreen => pancurses::COLOR_GREEN,
NamedColor::BrightMagenta => pancurses::COLOR_MAGENTA,
NamedColor::BrightRed => pancurses::COLOR_RED,
NamedColor::BrightWhite => pancurses::COLOR_WHITE,
NamedColor::BrightYellow => pancurses::COLOR_YELLOW,
NamedColor::Cursor => pancurses::COLOR_BLACK,
NamedColor::CursorText => pancurses::COLOR_WHITE,
NamedColor::Cyan => pancurses::COLOR_CYAN,
NamedColor::DimBlack => pancurses::COLOR_BLACK,
NamedColor::DimBlue => pancurses::COLOR_BLUE,
NamedColor::DimCyan => pancurses::COLOR_CYAN,
NamedColor::DimGreen => pancurses::COLOR_GREEN,
NamedColor::DimMagenta => pancurses::COLOR_MAGENTA,
NamedColor::DimRed => pancurses::COLOR_RED,
NamedColor::DimWhite => pancurses::COLOR_WHITE,
NamedColor::DimYellow => pancurses::COLOR_YELLOW,
NamedColor::Foreground => pancurses::COLOR_WHITE,
NamedColor::Green => pancurses::COLOR_GREEN,
NamedColor::Magenta => pancurses::COLOR_MAGENTA,
NamedColor::Red => pancurses::COLOR_RED,
NamedColor::White => pancurses::COLOR_WHITE,
NamedColor::Yellow => pancurses::COLOR_YELLOW,
};
win.attrset(ColorPair(get_colour_index(c) as u8));
win.mvaddch(
(line.0 as i32) + 1,
(col.0 as i32) + 1,
cell.c.to_chtype(),
);
},
Color::Spec(_) => {
return Err(RenderError::ColourSpecFound);
},
Color::Indexed(_) => {
return Err(RenderError::ColourIndexFound);
},
};
col += 1;
}
line += 1;
}
let Point{line: Line(row), col: Column(col)} = term.cursor().point;
win.mv(
((row + 1) as usize).try_into().unwrap(),
((col + 1) as usize).try_into().unwrap(),
);
win.refresh();
Ok(())
}
type RenderResult = Result<(), RenderError>;
enum RenderError {
// These colour types aren't currently supported.
ColourSpecFound,
ColourIndexFound,
}
| {
for i in 1..COLOUR_INDEXES.len()-1 {
if c == COLOUR_INDEXES[i] {
return i
}
}
0
} | identifier_body |
options.rs | "$ ",
env!("CARGO_BIN_NAME"),
" old/ new/\n\n",
"If you have a file with conflict markers, you can pass it as a single argument. Difftastic will diff the two conflicting file states.\n\n",
"$ ",
env!("CARGO_BIN_NAME"),
" file_with_conflicts.js\n\n",
"Difftastic can also be invoked with 7 arguments in the format that GIT_EXTERNAL_DIFF expects.\n\n",
"See the full manual at: https://difftastic.wilfred.me.uk/")
)
.arg(
Arg::new("dump-syntax")
.long("dump-syntax")
.takes_value(true)
.value_name("PATH")
.long_help(
"Parse a single file with tree-sitter and display the difftastic syntax tree.",
).help_heading("DEBUG OPTIONS"),
)
.arg(
Arg::new("dump-ts")
.long("dump-ts")
.takes_value(true)
.value_name("PATH")
.long_help(
"Parse a single file with tree-sitter and display the tree-sitter parse tree.",
).help_heading("DEBUG OPTIONS"),
)
.arg(
Arg::new("context")
.long("context")
.takes_value(true)
.value_name("LINES")
.long_help("The number of contextual lines to show around changed lines.")
.default_value("3")
.env("DFT_CONTEXT")
.validator(|s| s.parse::<u32>())
.required(false),
)
.arg(
Arg::new("width")
.long("width")
.takes_value(true)
.value_name("COLUMNS")
.long_help("Use this many columns when calculating line wrapping. If not specified, difftastic will detect the terminal width.")
.env("DFT_WIDTH")
.validator(|s| s.parse::<usize>())
.required(false),
)
.arg(
Arg::new("tab-width")
.long("tab-width")
.takes_value(true)
.value_name("NUM_SPACES")
.long_help("Treat a tab as this many spaces.")
.env("DFT_TAB_WIDTH")
.default_value(formatcp!("{}", DEFAULT_TAB_WIDTH))
.validator(|s| s.parse::<usize>())
.required(false),
)
.arg(
Arg::new("display").long("display")
.possible_values(["side-by-side", "side-by-side-show-both", "inline", "json"])
.default_value("side-by-side")
.value_name("MODE")
.env("DFT_DISPLAY")
.help("Display mode for showing results.
side-by-side: Display the before file and the after file in two separate columns, with line numbers aligned according to unchanged content. If a change is exclusively additions or exclusively removals, use a single column.
side-by-side-show-both: The same as side-by-side, but always uses two columns.
inline: A single column display, closer to traditional diff display.
json: Output the results as a machine-readable JSON array with an element per file.")
)
.arg(
Arg::new("color").long("color")
.possible_values(["always", "auto", "never"])
.default_value("auto")
.env("DFT_COLOR")
.value_name("WHEN")
.help("When to use color output.")
)
.arg(
Arg::new("background").long("background")
.value_name("BACKGROUND")
.env("DFT_BACKGROUND")
.possible_values(["dark", "light"])
.default_value("dark")
.help("Set the background brightness. Difftastic will prefer brighter colours on dark backgrounds.")
)
.arg(
Arg::new("syntax-highlight").long("syntax-highlight")
.value_name("on/off")
.env("DFT_SYNTAX_HIGHLIGHT")
.possible_values(["on", "off"])
.default_value("on")
.help("Enable or disable syntax highlighting.")
)
.arg(
Arg::new("exit-code").long("exit-code")
.env("DFT_EXIT_CODE")
.help("Set the exit code to 1 if there are syntactic changes in any files. For files where there is no detected language (e.g. unsupported language or binary files), sets the exit code if there are any byte changes.")
)
.arg(
Arg::new("check-only").long("check-only")
.env("DFT_CHECK_ONLY")
.help("Report whether there are any changes, but don't calculate them. Much faster.")
)
.arg(
Arg::new("ignore-comments").long("ignore-comments")
.env("DFT_IGNORE_COMMENTS")
.help("Don't consider comments when diffing.")
)
.arg(
Arg::new("skip-unchanged").long("skip-unchanged")
.help("Don't display anything if a file is unchanged.")
)
.arg(
Arg::new("missing-as-empty").long("missing-as-empty")
.help("Treat paths that don't exist as equivalent to an empty file. Only applies when diffing files, not directories.")
)
.arg(
Arg::new("override").long("override")
.value_name("GLOB:NAME")
.help(concat!("Associate this glob pattern with this language, overriding normal language detection. For example:
$ ", env!("CARGO_BIN_NAME"), " --override='*.c:C++' old.c new.c
See --list-languages for the list of language names. Language names are matched case insensitively. Overrides may also specify the language \"text\" to treat a file as plain text.
This argument may be given more than once. For example:
$ ", env!("CARGO_BIN_NAME"), " --override='CustomFile:json' --override='*.c:text' old.c new.c
To configure multiple overrides using environment variables, difftastic also accepts DFT_OVERRIDE_1 up to DFT_OVERRIDE_9.
$ export DFT_OVERRIDE='CustomFile:json'
$ export DFT_OVERRIDE_1='*.c:text'
$ export DFT_OVERRIDE_2='*.js:javascript jsx'
When multiple overrides are specified, the first matching override wins."))
.env("DFT_OVERRIDE")
.multiple_occurrences(true)
)
.arg(
Arg::new("list-languages").long("list-languages")
.help("Print the all the languages supported by difftastic, along with their extensions.")
)
.arg(
Arg::new("byte-limit").long("byte-limit")
.takes_value(true)
.value_name("LIMIT")
.help("Use a text diff if either input file exceeds this size.")
.default_value(formatcp!("{}", DEFAULT_BYTE_LIMIT))
.env("DFT_BYTE_LIMIT")
.validator(|s| s.parse::<usize>())
.required(false),
)
.arg(
Arg::new("graph-limit").long("graph-limit")
.takes_value(true)
.value_name("LIMIT")
.help("Use a text diff if the structural graph exceed this number of nodes in memory.")
.default_value(formatcp!("{}", DEFAULT_GRAPH_LIMIT))
.env("DFT_GRAPH_LIMIT")
.validator(|s| s.parse::<usize>())
.required(false),
)
.arg(
Arg::new("parse-error-limit").long("parse-error-limit")
.takes_value(true)
.value_name("LIMIT")
.help("Use a text diff if the number of parse errors exceeds this value.")
.default_value(formatcp!("{}", DEFAULT_PARSE_ERROR_LIMIT))
.env("DFT_PARSE_ERROR_LIMIT")
.validator(|s| s.parse::<usize>())
.required(false),
)
.arg(
Arg::new("paths")
.value_name("PATHS")
.multiple_values(true)
.hide(true)
.allow_invalid_utf8(true),
)
.arg_required_else_help(true)
}
#[derive(Debug, Copy, Clone)]
pub enum DisplayMode {
Inline,
SideBySide,
SideBySideShowBoth,
Json,
}
#[derive(Eq, PartialEq, Debug)]
pub enum FileArgument {
NamedPath(std::path::PathBuf),
Stdin,
DevNull,
}
fn try_canonicalize(path: &Path) -> PathBuf |
fn relative_to_current(path: &Path) -> PathBuf {
if let Ok(current_path) = std::env::current_dir() {
let path = try_canonicalize(path);
let current_path = try_canonicalize(¤t_path);
if let Ok(rel_path) = path.strip_prefix(current_path) {
return rel_path.into();
}
}
path.into()
}
impl FileArgument {
/// Return a `FileArgument` representing this command line
/// argument.
pub fn from_cli_argument(arg: &OsStr) -> Self {
if arg == "/dev/null" {
FileArgument::DevNull
} else if arg == "-" {
FileArgument::Stdin
} else {
FileArgument::NamedPath(PathBuf::from(arg))
}
}
/// Return a `FileArgument` that always represents a path that
/// exists, with the exception of `/dev/null`, which is turned into [FileArgument::DevNull].
pub fn from_path_argument(arg: &OsStr) -> Self {
// For new and deleted files, Git passes `/dev/null` as the reference file.
if arg == "/dev/null" {
FileArgument::DevNull
} else {
FileArgument::NamedPath(PathBuf::from(arg))
}
}
pub fn display(&self) -> String {
match self {
FileArgument::NamedPath(path) => relative_to_current(path).display().to_string(),
FileArgument::Stdin => "(stdin)".to_string(),
FileArgument::DevNull => "/dev/null".to_string(),
}
}
}
pub enum Mode {
Diff {
diff_options: DiffOptions,
display_options: DisplayOptions,
set_exit_code: bool,
language_overrides: Vec<(LanguageOverride, Vec<glob::Pattern>)>,
/// The path where we can read the LHS file. This is often a
/// temporary file generated by source control.
lhs_path: FileArgument,
/// The path where we can read the RHS file. This is often a
/// temporary file generated by source control.
rhs_path: FileArgument,
/// The path that we show to the user.
display_path: String,
/// If this file has been renamed, the name it had previously.
old_path: Option<String>,
},
DiffFromConflicts {
diff_options: DiffOptions,
display_options: DisplayOptions,
set_exit_code: bool,
language_overrides: Vec<(LanguageOverride, Vec<glob::Pattern>)>,
path: FileArgument,
/// The path that we show to the user.
display_path: String,
},
ListLanguages {
use_color: bool,
language_overrides: Vec<(LanguageOverride, Vec<glob::Pattern>)>,
},
DumpTreeSitter {
path: String,
language_overrides: Vec<(LanguageOverride, Vec<glob::Pattern>)>,
},
DumpSyntax {
path: String,
ignore_comments: bool,
language_overrides: Vec<(LanguageOverride, Vec<glob::Pattern>)>,
},
}
fn common_path_suffix(lhs_path: &Path, rhs_path: &Path) -> Option<String> {
let lhs_rev_components = lhs_path
.components()
.map(|c| c.as_os_str())
.rev()
.collect::<Vec<_>>();
let rhs_rev_components = rhs_path
.components()
.map(|c| c.as_os_str())
.rev()
.collect::<Vec<_>>();
let mut common_components = vec![];
for (lhs_component, rhs_component) in lhs_rev_components.iter().zip(rhs_rev_components.iter()) {
if lhs_component == rhs_component {
common_components.push(lhs_component.to_string_lossy());
} else {
break;
}
}
if common_components.is_empty() {
None
} else {
common_components.reverse();
Some(common_components.join(&std::path::MAIN_SEPARATOR.to_string()))
}
}
fn build_display_path(lhs_path: &FileArgument, rhs_path: &FileArgument) -> String {
match (lhs_path, rhs_path) {
(FileArgument::NamedPath(lhs), FileArgument::NamedPath(rhs)) => {
match common_path_suffix(lhs, rhs) {
Some(common_suffix) => common_suffix,
None => rhs.display().to_string(),
}
}
(FileArgument::NamedPath(p), _) | (_, FileArgument::NamedPath(p)) => {
p.display().to_string()
}
(FileArgument::DevNull, _) | (_, FileArgument::DevNull) => "/dev/null".into(),
(FileArgument::Stdin, FileArgument::Stdin) => "-".into(),
}
}
fn parse_overrides_or_die(raw_overrides: &[String]) -> Vec<(LanguageOverride, Vec<glob::Pattern>)> {
let mut res: Vec<(LanguageOverride, Vec<glob::Pattern>)> = vec![];
let mut invalid_syntax = false;
for raw_override in raw_overrides {
if let Some((glob_str, lang_name)) = raw_override.rsplit_once(':') {
match glob::Pattern::new(glob_str) {
Ok(pattern) => {
if let Some(language_override) = language_override_from_name(lang_name) {
res.push((language_override, vec![pattern]));
} else {
eprintln!("No such language '{}'", lang_name);
eprintln!("See --list-languages for the names of all languages available. Language overrides are case insensitive.");
invalid_syntax = true;
}
}
Err(e) => {
eprintln!("Invalid glob syntax '{}'", glob_str);
eprintln!("Glob parsing error: {}", e.msg);
invalid_syntax = true;
}
}
} else {
eprintln!("Invalid language override syntax '{}'", raw_override);
eprintln!("Language overrides are in the format 'GLOB:LANG_NAME', e.g. '*.js:JSON'.");
invalid_syntax = true;
}
}
if invalid_syntax {
std::process::exit(EXIT_BAD_ARGUMENTS);
}
res.into_iter()
.coalesce(
|(prev_lang, mut prev_globs), (current_lang, current_globs)| {
if prev_lang == current_lang {
prev_globs.extend(current_globs);
Ok((prev_lang, prev_globs))
} else {
Err(((prev_lang, prev_globs), (current_lang, current_globs)))
}
},
)
.collect()
}
/// Parse CLI arguments passed to the binary.
pub fn parse_args() -> Mode {
let matches = app().get_matches();
let color_output = match matches.value_of("color").expect("color has a default") {
"always" => ColorOutput::Always,
"never" => ColorOutput::Never,
"auto" => ColorOutput::Auto,
_ => {
unreachable!("clap has already validated color")
}
};
let use_color = should_use_color(color_output);
let ignore_comments = matches.is_present("ignore-comments");
let mut raw_overrides: Vec<String> = vec![];
if let Some(overrides) = matches.values_of("override") {
raw_overrides = overrides.map(|s| s.into()).collect();
}
for i in 1..=9 {
if let Ok(value) = env::var(format!("DFT_OVERRIDE_{}", i)) {
raw_overrides.push(value);
}
}
let language_overrides = parse_overrides_or_die(&raw_overrides);
if matches.is_present("list-languages") {
return Mode::ListLanguages {
use_color,
language_overrides,
};
}
if let Some(path) = matches.value_of("dump-syntax") {
return Mode::DumpSyntax {
path: path.to_string(),
ignore_comments,
language_overrides,
};
}
if let Some(path) = matches.value_of("dump-ts") {
return Mode::DumpTreeSitter {
path: path.to_string(),
language_overrides,
};
}
let display_width = if let Some(arg_width) = matches.value_of("width") {
arg_width
.parse::<usize>()
.expect("Already validated by clap")
} else {
detect_display_width()
};
let display_mode = match matches.value_of("display").expect("display has a default") {
"side-by-side" => DisplayMode::SideBySide,
"side-by-side-show-both" => DisplayMode::SideBySideShowBoth,
"inline" => DisplayMode::Inline,
"json" => {
if env::var(format!("DFT_UNSTABLE")).is_err() {
eprintln!("JSON output is an unstable feature and its format may change in future. To enable JSON output, set the environment variable DFT_UNSTABLE=yes.");
std::process::exit(EXIT_BAD_ARGUMENTS);
}
DisplayMode::Json
}
_ => {
unreachable!("clap has already validated display")
}
};
let background_color = match matches
.value_of("background")
.expect("Always present as we've given clap a default")
{
"dark" => BackgroundColor::Dark,
"light" => BackgroundColor::Light,
_ => unreachable!("clap has already validated the values"),
};
let syntax_highlight = matches.value_of("syntax-highlight") == Some("on");
let graph_limit = matches
.value_of("graph-limit")
.expect("Always present as we've given clap a default")
.parse::<usize>()
.expect("Value already validated by clap");
let byte_limit = matches
.value_of("byte-limit")
.expect("Always present as we've given clap a default")
.parse::<usize>()
.expect("Value already validated by clap");
| {
path.canonicalize().unwrap_or_else(|_| path.into())
} | identifier_body |
options.rs | . Difftastic will diff the two conflicting file states.\n\n",
"$ ",
env!("CARGO_BIN_NAME"),
" file_with_conflicts.js\n\n",
"Difftastic can also be invoked with 7 arguments in the format that GIT_EXTERNAL_DIFF expects.\n\n",
"See the full manual at: https://difftastic.wilfred.me.uk/")
)
.arg(
Arg::new("dump-syntax")
.long("dump-syntax")
.takes_value(true)
.value_name("PATH")
.long_help(
"Parse a single file with tree-sitter and display the difftastic syntax tree.",
).help_heading("DEBUG OPTIONS"),
)
.arg(
Arg::new("dump-ts")
.long("dump-ts")
.takes_value(true)
.value_name("PATH")
.long_help(
"Parse a single file with tree-sitter and display the tree-sitter parse tree.",
).help_heading("DEBUG OPTIONS"),
)
.arg(
Arg::new("context")
.long("context")
.takes_value(true)
.value_name("LINES")
.long_help("The number of contextual lines to show around changed lines.")
.default_value("3")
.env("DFT_CONTEXT")
.validator(|s| s.parse::<u32>())
.required(false),
)
.arg(
Arg::new("width")
.long("width")
.takes_value(true)
.value_name("COLUMNS")
.long_help("Use this many columns when calculating line wrapping. If not specified, difftastic will detect the terminal width.")
.env("DFT_WIDTH")
.validator(|s| s.parse::<usize>())
.required(false),
)
.arg(
Arg::new("tab-width")
.long("tab-width")
.takes_value(true)
.value_name("NUM_SPACES")
.long_help("Treat a tab as this many spaces.")
.env("DFT_TAB_WIDTH")
.default_value(formatcp!("{}", DEFAULT_TAB_WIDTH))
.validator(|s| s.parse::<usize>())
.required(false),
)
.arg(
Arg::new("display").long("display")
.possible_values(["side-by-side", "side-by-side-show-both", "inline", "json"])
.default_value("side-by-side")
.value_name("MODE")
.env("DFT_DISPLAY")
.help("Display mode for showing results.
side-by-side: Display the before file and the after file in two separate columns, with line numbers aligned according to unchanged content. If a change is exclusively additions or exclusively removals, use a single column.
side-by-side-show-both: The same as side-by-side, but always uses two columns.
inline: A single column display, closer to traditional diff display.
json: Output the results as a machine-readable JSON array with an element per file.")
)
.arg(
Arg::new("color").long("color")
.possible_values(["always", "auto", "never"])
.default_value("auto")
.env("DFT_COLOR")
.value_name("WHEN")
.help("When to use color output.")
)
.arg(
Arg::new("background").long("background")
.value_name("BACKGROUND")
.env("DFT_BACKGROUND")
.possible_values(["dark", "light"])
.default_value("dark")
.help("Set the background brightness. Difftastic will prefer brighter colours on dark backgrounds.")
)
.arg(
Arg::new("syntax-highlight").long("syntax-highlight")
.value_name("on/off")
.env("DFT_SYNTAX_HIGHLIGHT")
.possible_values(["on", "off"])
.default_value("on")
.help("Enable or disable syntax highlighting.")
)
.arg(
Arg::new("exit-code").long("exit-code")
.env("DFT_EXIT_CODE")
.help("Set the exit code to 1 if there are syntactic changes in any files. For files where there is no detected language (e.g. unsupported language or binary files), sets the exit code if there are any byte changes.")
)
.arg(
Arg::new("check-only").long("check-only")
.env("DFT_CHECK_ONLY")
.help("Report whether there are any changes, but don't calculate them. Much faster.")
)
.arg(
Arg::new("ignore-comments").long("ignore-comments")
.env("DFT_IGNORE_COMMENTS")
.help("Don't consider comments when diffing.")
)
.arg(
Arg::new("skip-unchanged").long("skip-unchanged")
.help("Don't display anything if a file is unchanged.")
)
.arg(
Arg::new("missing-as-empty").long("missing-as-empty")
.help("Treat paths that don't exist as equivalent to an empty file. Only applies when diffing files, not directories.")
)
.arg(
Arg::new("override").long("override")
.value_name("GLOB:NAME")
.help(concat!("Associate this glob pattern with this language, overriding normal language detection. For example:
$ ", env!("CARGO_BIN_NAME"), " --override='*.c:C++' old.c new.c
See --list-languages for the list of language names. Language names are matched case insensitively. Overrides may also specify the language \"text\" to treat a file as plain text.
This argument may be given more than once. For example:
$ ", env!("CARGO_BIN_NAME"), " --override='CustomFile:json' --override='*.c:text' old.c new.c
To configure multiple overrides using environment variables, difftastic also accepts DFT_OVERRIDE_1 up to DFT_OVERRIDE_9.
$ export DFT_OVERRIDE='CustomFile:json'
$ export DFT_OVERRIDE_1='*.c:text'
$ export DFT_OVERRIDE_2='*.js:javascript jsx'
When multiple overrides are specified, the first matching override wins."))
.env("DFT_OVERRIDE")
.multiple_occurrences(true)
)
.arg(
Arg::new("list-languages").long("list-languages")
.help("Print the all the languages supported by difftastic, along with their extensions.")
)
.arg(
Arg::new("byte-limit").long("byte-limit")
.takes_value(true)
.value_name("LIMIT")
.help("Use a text diff if either input file exceeds this size.")
.default_value(formatcp!("{}", DEFAULT_BYTE_LIMIT))
.env("DFT_BYTE_LIMIT")
.validator(|s| s.parse::<usize>())
.required(false),
)
.arg(
Arg::new("graph-limit").long("graph-limit")
.takes_value(true)
.value_name("LIMIT")
.help("Use a text diff if the structural graph exceed this number of nodes in memory.")
.default_value(formatcp!("{}", DEFAULT_GRAPH_LIMIT))
.env("DFT_GRAPH_LIMIT")
.validator(|s| s.parse::<usize>())
.required(false),
)
.arg(
Arg::new("parse-error-limit").long("parse-error-limit")
.takes_value(true)
.value_name("LIMIT")
.help("Use a text diff if the number of parse errors exceeds this value.")
.default_value(formatcp!("{}", DEFAULT_PARSE_ERROR_LIMIT))
.env("DFT_PARSE_ERROR_LIMIT")
.validator(|s| s.parse::<usize>())
.required(false),
)
.arg(
Arg::new("paths")
.value_name("PATHS")
.multiple_values(true)
.hide(true)
.allow_invalid_utf8(true),
)
.arg_required_else_help(true)
}
#[derive(Debug, Copy, Clone)]
pub enum DisplayMode {
Inline,
SideBySide,
SideBySideShowBoth,
Json,
}
#[derive(Eq, PartialEq, Debug)]
pub enum FileArgument {
NamedPath(std::path::PathBuf),
Stdin,
DevNull,
}
fn try_canonicalize(path: &Path) -> PathBuf {
path.canonicalize().unwrap_or_else(|_| path.into())
}
fn relative_to_current(path: &Path) -> PathBuf {
if let Ok(current_path) = std::env::current_dir() {
let path = try_canonicalize(path);
let current_path = try_canonicalize(¤t_path);
if let Ok(rel_path) = path.strip_prefix(current_path) {
return rel_path.into();
}
}
path.into()
}
impl FileArgument {
/// Return a `FileArgument` representing this command line
/// argument.
pub fn from_cli_argument(arg: &OsStr) -> Self {
if arg == "/dev/null" {
FileArgument::DevNull
} else if arg == "-" {
FileArgument::Stdin
} else {
FileArgument::NamedPath(PathBuf::from(arg))
}
}
/// Return a `FileArgument` that always represents a path that
/// exists, with the exception of `/dev/null`, which is turned into [FileArgument::DevNull].
pub fn from_path_argument(arg: &OsStr) -> Self {
// For new and deleted files, Git passes `/dev/null` as the reference file.
if arg == "/dev/null" {
FileArgument::DevNull
} else {
FileArgument::NamedPath(PathBuf::from(arg))
}
}
pub fn display(&self) -> String {
match self {
FileArgument::NamedPath(path) => relative_to_current(path).display().to_string(),
FileArgument::Stdin => "(stdin)".to_string(),
FileArgument::DevNull => "/dev/null".to_string(),
}
}
}
pub enum Mode {
Diff {
diff_options: DiffOptions,
display_options: DisplayOptions,
set_exit_code: bool,
language_overrides: Vec<(LanguageOverride, Vec<glob::Pattern>)>,
/// The path where we can read the LHS file. This is often a
/// temporary file generated by source control.
lhs_path: FileArgument,
/// The path where we can read the RHS file. This is often a
/// temporary file generated by source control.
rhs_path: FileArgument,
/// The path that we show to the user.
display_path: String,
/// If this file has been renamed, the name it had previously.
old_path: Option<String>,
},
DiffFromConflicts {
diff_options: DiffOptions,
display_options: DisplayOptions,
set_exit_code: bool,
language_overrides: Vec<(LanguageOverride, Vec<glob::Pattern>)>,
path: FileArgument,
/// The path that we show to the user.
display_path: String,
},
ListLanguages {
use_color: bool,
language_overrides: Vec<(LanguageOverride, Vec<glob::Pattern>)>,
},
DumpTreeSitter {
path: String,
language_overrides: Vec<(LanguageOverride, Vec<glob::Pattern>)>,
},
DumpSyntax {
path: String,
ignore_comments: bool,
language_overrides: Vec<(LanguageOverride, Vec<glob::Pattern>)>,
},
}
fn common_path_suffix(lhs_path: &Path, rhs_path: &Path) -> Option<String> {
let lhs_rev_components = lhs_path
.components()
.map(|c| c.as_os_str())
.rev()
.collect::<Vec<_>>();
let rhs_rev_components = rhs_path
.components()
.map(|c| c.as_os_str())
.rev()
.collect::<Vec<_>>();
let mut common_components = vec![];
for (lhs_component, rhs_component) in lhs_rev_components.iter().zip(rhs_rev_components.iter()) {
if lhs_component == rhs_component {
common_components.push(lhs_component.to_string_lossy());
} else {
break;
}
}
if common_components.is_empty() {
None
} else {
common_components.reverse();
Some(common_components.join(&std::path::MAIN_SEPARATOR.to_string()))
}
}
fn build_display_path(lhs_path: &FileArgument, rhs_path: &FileArgument) -> String {
match (lhs_path, rhs_path) {
(FileArgument::NamedPath(lhs), FileArgument::NamedPath(rhs)) => {
match common_path_suffix(lhs, rhs) {
Some(common_suffix) => common_suffix,
None => rhs.display().to_string(),
}
}
(FileArgument::NamedPath(p), _) | (_, FileArgument::NamedPath(p)) => {
p.display().to_string()
}
(FileArgument::DevNull, _) | (_, FileArgument::DevNull) => "/dev/null".into(),
(FileArgument::Stdin, FileArgument::Stdin) => "-".into(),
}
}
fn parse_overrides_or_die(raw_overrides: &[String]) -> Vec<(LanguageOverride, Vec<glob::Pattern>)> {
let mut res: Vec<(LanguageOverride, Vec<glob::Pattern>)> = vec![];
let mut invalid_syntax = false;
for raw_override in raw_overrides {
if let Some((glob_str, lang_name)) = raw_override.rsplit_once(':') {
match glob::Pattern::new(glob_str) {
Ok(pattern) => {
if let Some(language_override) = language_override_from_name(lang_name) {
res.push((language_override, vec![pattern]));
} else {
eprintln!("No such language '{}'", lang_name);
eprintln!("See --list-languages for the names of all languages available. Language overrides are case insensitive.");
invalid_syntax = true;
}
}
Err(e) => {
eprintln!("Invalid glob syntax '{}'", glob_str);
eprintln!("Glob parsing error: {}", e.msg);
invalid_syntax = true;
}
}
} else {
eprintln!("Invalid language override syntax '{}'", raw_override);
eprintln!("Language overrides are in the format 'GLOB:LANG_NAME', e.g. '*.js:JSON'.");
invalid_syntax = true;
}
}
if invalid_syntax {
std::process::exit(EXIT_BAD_ARGUMENTS);
}
res.into_iter()
.coalesce(
|(prev_lang, mut prev_globs), (current_lang, current_globs)| {
if prev_lang == current_lang {
prev_globs.extend(current_globs);
Ok((prev_lang, prev_globs))
} else {
Err(((prev_lang, prev_globs), (current_lang, current_globs)))
}
},
)
.collect()
}
/// Parse CLI arguments passed to the binary.
pub fn parse_args() -> Mode {
let matches = app().get_matches();
let color_output = match matches.value_of("color").expect("color has a default") {
"always" => ColorOutput::Always,
"never" => ColorOutput::Never,
"auto" => ColorOutput::Auto,
_ => {
unreachable!("clap has already validated color")
}
};
let use_color = should_use_color(color_output);
let ignore_comments = matches.is_present("ignore-comments");
let mut raw_overrides: Vec<String> = vec![];
if let Some(overrides) = matches.values_of("override") {
raw_overrides = overrides.map(|s| s.into()).collect();
}
for i in 1..=9 {
if let Ok(value) = env::var(format!("DFT_OVERRIDE_{}", i)) {
raw_overrides.push(value);
}
}
let language_overrides = parse_overrides_or_die(&raw_overrides);
if matches.is_present("list-languages") {
return Mode::ListLanguages {
use_color,
language_overrides,
};
}
if let Some(path) = matches.value_of("dump-syntax") {
return Mode::DumpSyntax {
path: path.to_string(),
ignore_comments,
language_overrides,
};
}
if let Some(path) = matches.value_of("dump-ts") {
return Mode::DumpTreeSitter {
path: path.to_string(),
language_overrides,
};
}
let display_width = if let Some(arg_width) = matches.value_of("width") {
arg_width
.parse::<usize>()
.expect("Already validated by clap")
} else {
detect_display_width()
};
let display_mode = match matches.value_of("display").expect("display has a default") {
"side-by-side" => DisplayMode::SideBySide,
"side-by-side-show-both" => DisplayMode::SideBySideShowBoth,
"inline" => DisplayMode::Inline,
"json" => {
if env::var(format!("DFT_UNSTABLE")).is_err() {
eprintln!("JSON output is an unstable feature and its format may change in future. To enable JSON output, set the environment variable DFT_UNSTABLE=yes.");
std::process::exit(EXIT_BAD_ARGUMENTS);
}
DisplayMode::Json
}
_ => {
unreachable!("clap has already validated display")
}
};
let background_color = match matches
.value_of("background")
.expect("Always present as we've given clap a default")
{
"dark" => BackgroundColor::Dark,
"light" => BackgroundColor::Light,
_ => unreachable!("clap has already validated the values"),
};
let syntax_highlight = matches.value_of("syntax-highlight") == Some("on");
let graph_limit = matches
.value_of("graph-limit")
.expect("Always present as we've given clap a default")
.parse::<usize>()
.expect("Value already validated by clap");
let byte_limit = matches
.value_of("byte-limit")
.expect("Always present as we've given clap a default")
.parse::<usize>()
.expect("Value already validated by clap");
let parse_error_limit = matches | .value_of("parse-error-limit")
.expect("Always present as we've given clap a default")
.parse::<usize>()
.expect("Value already validated by clap"); | random_line_split |
|
options.rs | new("missing-as-empty").long("missing-as-empty")
.help("Treat paths that don't exist as equivalent to an empty file. Only applies when diffing files, not directories.")
)
.arg(
Arg::new("override").long("override")
.value_name("GLOB:NAME")
.help(concat!("Associate this glob pattern with this language, overriding normal language detection. For example:
$ ", env!("CARGO_BIN_NAME"), " --override='*.c:C++' old.c new.c
See --list-languages for the list of language names. Language names are matched case insensitively. Overrides may also specify the language \"text\" to treat a file as plain text.
This argument may be given more than once. For example:
$ ", env!("CARGO_BIN_NAME"), " --override='CustomFile:json' --override='*.c:text' old.c new.c
To configure multiple overrides using environment variables, difftastic also accepts DFT_OVERRIDE_1 up to DFT_OVERRIDE_9.
$ export DFT_OVERRIDE='CustomFile:json'
$ export DFT_OVERRIDE_1='*.c:text'
$ export DFT_OVERRIDE_2='*.js:javascript jsx'
When multiple overrides are specified, the first matching override wins."))
.env("DFT_OVERRIDE")
.multiple_occurrences(true)
)
.arg(
Arg::new("list-languages").long("list-languages")
.help("Print the all the languages supported by difftastic, along with their extensions.")
)
.arg(
Arg::new("byte-limit").long("byte-limit")
.takes_value(true)
.value_name("LIMIT")
.help("Use a text diff if either input file exceeds this size.")
.default_value(formatcp!("{}", DEFAULT_BYTE_LIMIT))
.env("DFT_BYTE_LIMIT")
.validator(|s| s.parse::<usize>())
.required(false),
)
.arg(
Arg::new("graph-limit").long("graph-limit")
.takes_value(true)
.value_name("LIMIT")
.help("Use a text diff if the structural graph exceed this number of nodes in memory.")
.default_value(formatcp!("{}", DEFAULT_GRAPH_LIMIT))
.env("DFT_GRAPH_LIMIT")
.validator(|s| s.parse::<usize>())
.required(false),
)
.arg(
Arg::new("parse-error-limit").long("parse-error-limit")
.takes_value(true)
.value_name("LIMIT")
.help("Use a text diff if the number of parse errors exceeds this value.")
.default_value(formatcp!("{}", DEFAULT_PARSE_ERROR_LIMIT))
.env("DFT_PARSE_ERROR_LIMIT")
.validator(|s| s.parse::<usize>())
.required(false),
)
.arg(
Arg::new("paths")
.value_name("PATHS")
.multiple_values(true)
.hide(true)
.allow_invalid_utf8(true),
)
.arg_required_else_help(true)
}
#[derive(Debug, Copy, Clone)]
pub enum DisplayMode {
Inline,
SideBySide,
SideBySideShowBoth,
Json,
}
#[derive(Eq, PartialEq, Debug)]
pub enum FileArgument {
NamedPath(std::path::PathBuf),
Stdin,
DevNull,
}
fn try_canonicalize(path: &Path) -> PathBuf {
path.canonicalize().unwrap_or_else(|_| path.into())
}
fn relative_to_current(path: &Path) -> PathBuf {
if let Ok(current_path) = std::env::current_dir() {
let path = try_canonicalize(path);
let current_path = try_canonicalize(¤t_path);
if let Ok(rel_path) = path.strip_prefix(current_path) {
return rel_path.into();
}
}
path.into()
}
impl FileArgument {
/// Return a `FileArgument` representing this command line
/// argument.
pub fn from_cli_argument(arg: &OsStr) -> Self {
if arg == "/dev/null" {
FileArgument::DevNull
} else if arg == "-" {
FileArgument::Stdin
} else {
FileArgument::NamedPath(PathBuf::from(arg))
}
}
/// Return a `FileArgument` that always represents a path that
/// exists, with the exception of `/dev/null`, which is turned into [FileArgument::DevNull].
pub fn from_path_argument(arg: &OsStr) -> Self {
// For new and deleted files, Git passes `/dev/null` as the reference file.
if arg == "/dev/null" {
FileArgument::DevNull
} else {
FileArgument::NamedPath(PathBuf::from(arg))
}
}
pub fn display(&self) -> String {
match self {
FileArgument::NamedPath(path) => relative_to_current(path).display().to_string(),
FileArgument::Stdin => "(stdin)".to_string(),
FileArgument::DevNull => "/dev/null".to_string(),
}
}
}
pub enum Mode {
Diff {
diff_options: DiffOptions,
display_options: DisplayOptions,
set_exit_code: bool,
language_overrides: Vec<(LanguageOverride, Vec<glob::Pattern>)>,
/// The path where we can read the LHS file. This is often a
/// temporary file generated by source control.
lhs_path: FileArgument,
/// The path where we can read the RHS file. This is often a
/// temporary file generated by source control.
rhs_path: FileArgument,
/// The path that we show to the user.
display_path: String,
/// If this file has been renamed, the name it had previously.
old_path: Option<String>,
},
DiffFromConflicts {
diff_options: DiffOptions,
display_options: DisplayOptions,
set_exit_code: bool,
language_overrides: Vec<(LanguageOverride, Vec<glob::Pattern>)>,
path: FileArgument,
/// The path that we show to the user.
display_path: String,
},
ListLanguages {
use_color: bool,
language_overrides: Vec<(LanguageOverride, Vec<glob::Pattern>)>,
},
DumpTreeSitter {
path: String,
language_overrides: Vec<(LanguageOverride, Vec<glob::Pattern>)>,
},
DumpSyntax {
path: String,
ignore_comments: bool,
language_overrides: Vec<(LanguageOverride, Vec<glob::Pattern>)>,
},
}
fn common_path_suffix(lhs_path: &Path, rhs_path: &Path) -> Option<String> {
let lhs_rev_components = lhs_path
.components()
.map(|c| c.as_os_str())
.rev()
.collect::<Vec<_>>();
let rhs_rev_components = rhs_path
.components()
.map(|c| c.as_os_str())
.rev()
.collect::<Vec<_>>();
let mut common_components = vec![];
for (lhs_component, rhs_component) in lhs_rev_components.iter().zip(rhs_rev_components.iter()) {
if lhs_component == rhs_component {
common_components.push(lhs_component.to_string_lossy());
} else {
break;
}
}
if common_components.is_empty() {
None
} else {
common_components.reverse();
Some(common_components.join(&std::path::MAIN_SEPARATOR.to_string()))
}
}
fn build_display_path(lhs_path: &FileArgument, rhs_path: &FileArgument) -> String {
match (lhs_path, rhs_path) {
(FileArgument::NamedPath(lhs), FileArgument::NamedPath(rhs)) => {
match common_path_suffix(lhs, rhs) {
Some(common_suffix) => common_suffix,
None => rhs.display().to_string(),
}
}
(FileArgument::NamedPath(p), _) | (_, FileArgument::NamedPath(p)) => {
p.display().to_string()
}
(FileArgument::DevNull, _) | (_, FileArgument::DevNull) => "/dev/null".into(),
(FileArgument::Stdin, FileArgument::Stdin) => "-".into(),
}
}
fn parse_overrides_or_die(raw_overrides: &[String]) -> Vec<(LanguageOverride, Vec<glob::Pattern>)> {
let mut res: Vec<(LanguageOverride, Vec<glob::Pattern>)> = vec![];
let mut invalid_syntax = false;
for raw_override in raw_overrides {
if let Some((glob_str, lang_name)) = raw_override.rsplit_once(':') {
match glob::Pattern::new(glob_str) {
Ok(pattern) => {
if let Some(language_override) = language_override_from_name(lang_name) {
res.push((language_override, vec![pattern]));
} else {
eprintln!("No such language '{}'", lang_name);
eprintln!("See --list-languages for the names of all languages available. Language overrides are case insensitive.");
invalid_syntax = true;
}
}
Err(e) => {
eprintln!("Invalid glob syntax '{}'", glob_str);
eprintln!("Glob parsing error: {}", e.msg);
invalid_syntax = true;
}
}
} else {
eprintln!("Invalid language override syntax '{}'", raw_override);
eprintln!("Language overrides are in the format 'GLOB:LANG_NAME', e.g. '*.js:JSON'.");
invalid_syntax = true;
}
}
if invalid_syntax {
std::process::exit(EXIT_BAD_ARGUMENTS);
}
res.into_iter()
.coalesce(
|(prev_lang, mut prev_globs), (current_lang, current_globs)| {
if prev_lang == current_lang {
prev_globs.extend(current_globs);
Ok((prev_lang, prev_globs))
} else {
Err(((prev_lang, prev_globs), (current_lang, current_globs)))
}
},
)
.collect()
}
/// Parse CLI arguments passed to the binary.
pub fn parse_args() -> Mode {
let matches = app().get_matches();
let color_output = match matches.value_of("color").expect("color has a default") {
"always" => ColorOutput::Always,
"never" => ColorOutput::Never,
"auto" => ColorOutput::Auto,
_ => {
unreachable!("clap has already validated color")
}
};
let use_color = should_use_color(color_output);
let ignore_comments = matches.is_present("ignore-comments");
let mut raw_overrides: Vec<String> = vec![];
if let Some(overrides) = matches.values_of("override") {
raw_overrides = overrides.map(|s| s.into()).collect();
}
for i in 1..=9 {
if let Ok(value) = env::var(format!("DFT_OVERRIDE_{}", i)) {
raw_overrides.push(value);
}
}
let language_overrides = parse_overrides_or_die(&raw_overrides);
if matches.is_present("list-languages") {
return Mode::ListLanguages {
use_color,
language_overrides,
};
}
if let Some(path) = matches.value_of("dump-syntax") {
return Mode::DumpSyntax {
path: path.to_string(),
ignore_comments,
language_overrides,
};
}
if let Some(path) = matches.value_of("dump-ts") {
return Mode::DumpTreeSitter {
path: path.to_string(),
language_overrides,
};
}
let display_width = if let Some(arg_width) = matches.value_of("width") {
arg_width
.parse::<usize>()
.expect("Already validated by clap")
} else {
detect_display_width()
};
let display_mode = match matches.value_of("display").expect("display has a default") {
"side-by-side" => DisplayMode::SideBySide,
"side-by-side-show-both" => DisplayMode::SideBySideShowBoth,
"inline" => DisplayMode::Inline,
"json" => {
if env::var(format!("DFT_UNSTABLE")).is_err() {
eprintln!("JSON output is an unstable feature and its format may change in future. To enable JSON output, set the environment variable DFT_UNSTABLE=yes.");
std::process::exit(EXIT_BAD_ARGUMENTS);
}
DisplayMode::Json
}
_ => {
unreachable!("clap has already validated display")
}
};
let background_color = match matches
.value_of("background")
.expect("Always present as we've given clap a default")
{
"dark" => BackgroundColor::Dark,
"light" => BackgroundColor::Light,
_ => unreachable!("clap has already validated the values"),
};
let syntax_highlight = matches.value_of("syntax-highlight") == Some("on");
let graph_limit = matches
.value_of("graph-limit")
.expect("Always present as we've given clap a default")
.parse::<usize>()
.expect("Value already validated by clap");
let byte_limit = matches
.value_of("byte-limit")
.expect("Always present as we've given clap a default")
.parse::<usize>()
.expect("Value already validated by clap");
let parse_error_limit = matches
.value_of("parse-error-limit")
.expect("Always present as we've given clap a default")
.parse::<usize>()
.expect("Value already validated by clap");
let tab_width = matches
.value_of("tab-width")
.expect("Always present as we've given clap a default")
.parse::<usize>()
.expect("Value already validated by clap");
let num_context_lines = matches
.value_of("context")
.expect("Always present as we've given clap a default")
.parse::<u32>()
.expect("Value already validated by clap");
let print_unchanged =!matches.is_present("skip-unchanged");
let set_exit_code = matches.is_present("exit-code");
let check_only = matches.is_present("check-only");
let diff_options = DiffOptions {
graph_limit,
byte_limit,
parse_error_limit,
check_only,
ignore_comments,
};
let args: Vec<_> = matches.values_of_os("paths").unwrap_or_default().collect();
info!("CLI arguments: {:?}", args);
// TODO: document these different ways of calling difftastic.
let (display_path, lhs_path, rhs_path, old_path, in_vcs) = match &args[..] {
[lhs_path, rhs_path] => {
let lhs_arg = FileArgument::from_cli_argument(lhs_path);
let rhs_arg = FileArgument::from_cli_argument(rhs_path);
let display_path = build_display_path(&lhs_arg, &rhs_arg);
(display_path, lhs_arg, rhs_arg, None, false)
}
[display_path, lhs_tmp_file, _lhs_hash, _lhs_mode, rhs_tmp_file, _rhs_hash, _rhs_mode] => {
// https://git-scm.com/docs/git#Documentation/git.txt-codeGITEXTERNALDIFFcode
(
display_path.to_string_lossy().to_string(),
FileArgument::from_path_argument(lhs_tmp_file),
FileArgument::from_path_argument(rhs_tmp_file),
None,
true,
)
}
[old_name, lhs_tmp_file, _lhs_hash, _lhs_mode, rhs_tmp_file, _rhs_hash, _rhs_mode, new_name, _similarity] =>
{
// Rename file.
// TODO: where does git document these 9 arguments?
let old_name = old_name.to_string_lossy().to_string();
let new_name = new_name.to_string_lossy().to_string();
let renamed = format!("Renamed from {} to {}", old_name, new_name);
(
new_name,
FileArgument::from_path_argument(lhs_tmp_file),
FileArgument::from_path_argument(rhs_tmp_file),
Some(renamed),
true,
)
}
[path] => {
let display_options = DisplayOptions {
background_color,
use_color,
print_unchanged,
tab_width,
display_mode,
display_width,
num_context_lines,
syntax_highlight,
in_vcs: true,
};
let display_path = path.to_string_lossy().to_string();
let path = FileArgument::from_path_argument(path);
return Mode::DiffFromConflicts {
display_path,
path,
diff_options,
display_options,
set_exit_code,
language_overrides,
};
}
_ => {
if!args.is_empty() {
eprintln!(
"error: Difftastic does not support being called with {} argument{}.\n",
args.len(),
if args.len() == 1 { "" } else { "s" }
);
}
eprintln!("USAGE:\n\n {}\n", USAGE);
eprintln!("For more information try --help");
std::process::exit(EXIT_BAD_ARGUMENTS);
}
};
let display_options = DisplayOptions {
background_color,
use_color,
print_unchanged,
tab_width,
display_mode,
display_width,
num_context_lines,
syntax_highlight,
in_vcs,
};
Mode::Diff {
diff_options,
display_options,
set_exit_code,
language_overrides,
lhs_path,
rhs_path,
display_path,
old_path,
}
}
/// Choose the display width: try to autodetect, or fall back to a
/// sensible default.
fn detect_display_width() -> usize {
if let Ok((columns, _rows)) = crossterm::terminal::size() {
return columns.into();
}
80
}
pub fn should_use_color(color_output: ColorOutput) -> bool {
match color_output {
ColorOutput::Always => true,
ColorOutput::Auto => {
// Always enable colour if stdout is a TTY or if the git pager is active.
// TODO: consider following the env parsing logic in git_config_bool
// in config.c.
std::io::stdout().is_tty() || env::var("GIT_PAGER_IN_USE").is_ok()
}
ColorOutput::Never => false,
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_app() {
app().debug_assert();
}
#[test]
fn | test_detect_display_width | identifier_name |
|
streamer.rs | // Copyright 2022, The Tremor Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::connectors::{
impls::s3::auth,
prelude::*,
utils::object_storage::{
BufferPart, ConsistentSink, Mode, ObjectId, ObjectStorageBuffer, ObjectStorageCommon,
ObjectStorageSinkImpl, ObjectStorageUpload, YoloSink,
},
};
use aws_sdk_s3::{
types::{CompletedMultipartUpload, CompletedPart},
Client as S3Client,
};
use tremor_common::time::nanotime;
use tremor_pipeline::{EventId, OpMeta};
pub(crate) const CONNECTOR_TYPE: &str = "s3_streamer";
const MORE_THEN_FIVEMBS: usize = 5 * 1024 * 1024 + 100; // Some extra bytes to keep aws happy.
#[derive(Deserialize, Debug, Clone)]
#[serde(deny_unknown_fields)]
pub(crate) struct Config {
aws_region: Option<String>,
url: Option<Url<HttpsDefaults>>,
/// optional default bucket
bucket: Option<String>,
#[serde(default = "Default::default")]
mode: Mode,
#[serde(default = "Config::fivembs")]
buffer_size: usize,
/// Enable path-style access
/// So e.g. creating a bucket is done using:
///
/// PUT http://<host>:<port>/<bucket>
///
/// instead of
///
/// PUT http://<bucket>.<host>:<port>/
///
/// Set this to `true` for accessing s3 compatible backends
/// that do only support path style access, like e.g. minio.
/// Defaults to `true` for backward compatibility.
#[serde(default = "default_true")]
path_style_access: bool,
}
// Defaults for the config.
impl Config {
fn fivembs() -> usize {
MORE_THEN_FIVEMBS
}
fn normalize(&mut self, alias: &Alias) {
if self.buffer_size < MORE_THEN_FIVEMBS {
warn!("[Connector::{alias}] Setting `buffer_size` up to minimum of 5MB.");
self.buffer_size = MORE_THEN_FIVEMBS;
}
}
}
impl ConfigImpl for Config {}
#[derive(Debug, Default)]
pub(crate) struct Builder {}
#[async_trait::async_trait]
impl ConnectorBuilder for Builder {
fn connector_type(&self) -> ConnectorType {
ConnectorType::from(CONNECTOR_TYPE)
}
async fn build_cfg(
&self,
id: &Alias,
_: &ConnectorConfig,
config: &Value,
_kill_switch: &KillSwitch,
) -> Result<Box<dyn Connector>> {
let mut config = Config::new(config)?;
config.normalize(id);
Ok(Box::new(S3Connector { config }))
}
}
struct S3Connector {
config: Config,
}
#[async_trait::async_trait]
impl Connector for S3Connector {
/// Stream the events to the bucket
async fn create_sink(
&mut self,
ctx: SinkContext,
builder: SinkManagerBuilder,
) -> Result<Option<SinkAddr>> {
match self.config.mode {
Mode::Yolo => {
let sink_impl = S3ObjectStorageSinkImpl::yolo(self.config.clone());
let sink: YoloSink<S3ObjectStorageSinkImpl, S3Upload, S3Buffer> =
YoloSink::new(sink_impl);
Ok(Some(builder.spawn(sink, ctx)))
}
Mode::Consistent => {
let sink_impl =
S3ObjectStorageSinkImpl::consistent(self.config.clone(), builder.reply_tx());
let sink: ConsistentSink<S3ObjectStorageSinkImpl, S3Upload, S3Buffer> =
ConsistentSink::new(sink_impl);
Ok(Some(builder.spawn(sink, ctx)))
}
}
}
fn codec_requirements(&self) -> CodecReq {
CodecReq::Required
}
}
// TODO: Maybe: https://docs.rs/object_store/latest/object_store/ is the better abstraction?
pub(super) struct S3ObjectStorageSinkImpl {
config: Config,
client: Option<S3Client>,
reply_tx: Option<ReplySender>,
}
impl ObjectStorageCommon for S3ObjectStorageSinkImpl {
fn default_bucket(&self) -> Option<&String> {
self.config.bucket.as_ref()
}
fn connector_type(&self) -> &str {
CONNECTOR_TYPE
}
}
impl S3ObjectStorageSinkImpl {
pub(crate) fn yolo(config: Config) -> Self {
Self {
config,
client: None,
reply_tx: None,
}
}
pub(crate) fn consistent(config: Config, reply_tx: ReplySender) -> Self {
Self {
config,
client: None,
reply_tx: Some(reply_tx),
}
}
fn get_client(&self) -> Result<&S3Client> {
self.client
.as_ref()
.ok_or_else(|| ErrorKind::S3Error("no s3 client available".to_string()).into())
}
}
pub(crate) struct S3Buffer {
block_size: usize,
data: Vec<u8>,
cursor: usize,
}
impl ObjectStorageBuffer for S3Buffer {
fn new(size: usize) -> Self {
Self {
block_size: size,
data: Vec::with_capacity(size * 2),
cursor: 0,
}
}
fn write(&mut self, mut data: Vec<u8>) {
self.data.append(&mut data);
}
fn read_current_block(&mut self) -> Option<BufferPart> {
if self.data.len() >= self.block_size {
let data = self.data.clone();
self.cursor += data.len();
self.data.clear();
Some(BufferPart {
data,
start: self.cursor,
})
} else {
None
}
}
fn mark_done_until(&mut self, _idx: usize) -> Result<()> {
// no-op
Ok(())
}
fn reset(&mut self) -> BufferPart {
let data = self.data.clone(); // we only clone up to len, not up to capacity
let start = self.cursor;
self.data.clear();
self.cursor = 0;
BufferPart { data, start }
}
}
#[async_trait::async_trait]
impl ObjectStorageSinkImpl<S3Upload> for S3ObjectStorageSinkImpl {
fn buffer_size(&self) -> usize {
self.config.buffer_size
}
async fn connect(&mut self, _ctx: &SinkContext) -> Result<()> {
self.client = Some(
auth::get_client(
self.config.aws_region.clone(),
self.config.url.as_ref(),
self.config.path_style_access,
)
.await?,
);
Ok(())
}
async fn bucket_exists(&mut self, bucket: &str) -> Result<bool> {
self.get_client()?
.head_bucket()
.bucket(bucket)
.send()
.await
.map_err(|e| {
let msg = format!("Failed to access Bucket `{bucket}`: {e}");
Error::from(ErrorKind::S3Error(msg))
})?;
Ok(true)
}
async fn start_upload(
&mut self,
object_id: &ObjectId,
event: &Event,
_ctx: &SinkContext,
) -> Result<S3Upload> {
let resp = self
.get_client()?
.create_multipart_upload()
.bucket(object_id.bucket())
.key(object_id.name())
.send()
.await?;
//let upload = CurrentUpload::new(resp.)
let upload_id = resp.upload_id.ok_or_else(|| {
ErrorKind::S3Error(format!(
"Failed to start upload for s3://{}: upload id not found in response.",
&object_id
))
})?;
let upload = S3Upload::new(object_id.clone(), upload_id, event);
Ok(upload)
}
async fn upload_data(
&mut self,
data: BufferPart,
upload: &mut S3Upload,
ctx: &SinkContext,
) -> Result<usize> {
let end = data.end();
upload.part_number += 1; // the upload part number needs to be >= 1, so we increment before uploading
debug!(
"{ctx} Uploading part {} for {}",
upload.part_number,
upload.object_id(),
);
// Upload the part
let resp = self
.get_client()?
.upload_part()
.body(data.data.into())
.part_number(upload.part_number)
.upload_id(upload.upload_id.clone())
.bucket(upload.object_id().bucket())
.key(upload.object_id().name())
.send()
.await?;
let mut completed = CompletedPart::builder().part_number(upload.part_number);
if let Some(e_tag) = resp.e_tag.as_ref() {
completed = completed.e_tag(e_tag);
}
debug!(
"{ctx} part {} uploaded for {}.",
upload.part_number,
upload.object_id()
);
// Insert into the list of completed parts
upload.parts.push(completed.build());
Ok(end)
}
async fn finish_upload(
&mut self,
mut upload: S3Upload,
final_part: BufferPart,
ctx: &SinkContext,
) -> Result<()> {
debug_assert!(
!upload.failed,
"finish may only be called for non-failed uploads"
);
// Upload the last part if any.
if!final_part.is_empty() {
self.upload_data(final_part, &mut upload, ctx).await?;
}
let S3Upload {
object_id,
event_id,
op_meta,
transactional,
upload_id,
parts,
..
} = upload;
debug!("{ctx} Finishing upload {upload_id} for {object_id}");
let res = self
.get_client()?
.complete_multipart_upload()
.bucket(object_id.bucket())
.upload_id(&upload_id)
.key(object_id.name())
.multipart_upload(
CompletedMultipartUpload::builder()
.set_parts(Some(parts))
.build(),
)
.send()
.await;
// send an ack for all the accumulated events in the finished upload
if let (Some(reply_tx), true) = (self.reply_tx.as_ref(), transactional) {
let cf_data = ContraflowData::new(event_id, nanotime(), op_meta);
let reply = if let Ok(out) = &res | else {
AsyncSinkReply::Fail(cf_data)
};
ctx.swallow_err(
reply_tx.send(reply),
&format!("Error sending ack/fail for upload {upload_id} to {object_id}"),
);
}
res?;
Ok(())
}
async fn fail_upload(&mut self, upload: S3Upload, ctx: &SinkContext) -> Result<()> {
let S3Upload {
object_id,
upload_id,
event_id,
op_meta,
..
} = upload;
if let (Some(reply_tx), true) = (self.reply_tx.as_ref(), upload.transactional) {
ctx.swallow_err(
reply_tx.send(AsyncSinkReply::Fail(ContraflowData::new(
event_id,
nanotime(),
op_meta,
))),
&format!("Error sending fail for upload {upload_id} for {object_id}"),
);
}
ctx.swallow_err(
self.get_client()?
.abort_multipart_upload()
.bucket(object_id.bucket())
.key(object_id.name())
.upload_id(&upload_id)
.send()
.await,
&format!("Error aborting multipart upload {upload_id} for {object_id}"),
);
Ok(())
}
}
pub(crate) struct S3Upload {
object_id: ObjectId,
/// tracking the ids for all accumulated events
event_id: EventId,
/// tracking the traversed operators for each accumulated event for correct sink-reply handling
op_meta: OpMeta,
/// tracking the transactional status of the accumulated events
/// if any one of them is transactional, we send an ack for all
transactional: bool,
/// bookkeeping for multipart uploads.
upload_id: String,
part_number: i32,
parts: Vec<CompletedPart>,
/// whether this upload is marked as failed
failed: bool,
}
impl S3Upload {
fn new(object_id: ObjectId, upload_id: String, event: &Event) -> Self {
Self {
object_id,
event_id: event.id.clone(),
op_meta: event.op_meta.clone(),
transactional: event.transactional,
upload_id,
part_number: 0,
parts: Vec::with_capacity(8),
failed: false,
}
}
}
impl ObjectStorageUpload for S3Upload {
fn object_id(&self) -> &ObjectId {
&self.object_id
}
fn is_failed(&self) -> bool {
self.failed
}
fn mark_as_failed(&mut self) {
self.failed = true;
}
fn track(&mut self, event: &Event) {
self.event_id.track(&event.id);
if!event.op_meta.is_empty() {
self.op_meta.merge(event.op_meta.clone());
}
self.transactional |= event.transactional;
}
}
#[cfg(test)]
mod tests {
use super::*;
use tremor_value::literal;
#[test]
fn config_defaults() -> Result<()> {
let config = literal!({});
let res = Config::new(&config)?;
assert!(res.aws_region.is_none());
assert!(res.url.is_none());
assert!(res.bucket.is_none());
assert_eq!(Mode::Yolo, res.mode);
assert_eq!(5_242_980, res.buffer_size);
Ok(())
}
}
| {
if let Some(location) = out.location() {
debug!("{ctx} Finished upload {upload_id} for {location}");
} else {
debug!("{ctx} Finished upload {upload_id} for {object_id}");
}
// the duration of handling in the sink is a little bit meaningless here
// as a) the actual duration from the first event to the actual finishing of the upload
// is horribly long, and shouldn ot be considered the actual event handling time
// b) It will vary a lot e.g. when an actual upload call is made
AsyncSinkReply::Ack(cf_data, 0)
} | conditional_block |
streamer.rs | // Copyright 2022, The Tremor Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::connectors::{
impls::s3::auth,
prelude::*,
utils::object_storage::{
BufferPart, ConsistentSink, Mode, ObjectId, ObjectStorageBuffer, ObjectStorageCommon,
ObjectStorageSinkImpl, ObjectStorageUpload, YoloSink,
},
};
use aws_sdk_s3::{
types::{CompletedMultipartUpload, CompletedPart},
Client as S3Client,
};
use tremor_common::time::nanotime;
use tremor_pipeline::{EventId, OpMeta};
pub(crate) const CONNECTOR_TYPE: &str = "s3_streamer";
const MORE_THEN_FIVEMBS: usize = 5 * 1024 * 1024 + 100; // Some extra bytes to keep aws happy.
#[derive(Deserialize, Debug, Clone)]
#[serde(deny_unknown_fields)] | pub(crate) struct Config {
aws_region: Option<String>,
url: Option<Url<HttpsDefaults>>,
/// optional default bucket
bucket: Option<String>,
#[serde(default = "Default::default")]
mode: Mode,
#[serde(default = "Config::fivembs")]
buffer_size: usize,
/// Enable path-style access
/// So e.g. creating a bucket is done using:
///
/// PUT http://<host>:<port>/<bucket>
///
/// instead of
///
/// PUT http://<bucket>.<host>:<port>/
///
/// Set this to `true` for accessing s3 compatible backends
/// that do only support path style access, like e.g. minio.
/// Defaults to `true` for backward compatibility.
#[serde(default = "default_true")]
path_style_access: bool,
}
// Defaults for the config.
impl Config {
fn fivembs() -> usize {
MORE_THEN_FIVEMBS
}
fn normalize(&mut self, alias: &Alias) {
if self.buffer_size < MORE_THEN_FIVEMBS {
warn!("[Connector::{alias}] Setting `buffer_size` up to minimum of 5MB.");
self.buffer_size = MORE_THEN_FIVEMBS;
}
}
}
impl ConfigImpl for Config {}
#[derive(Debug, Default)]
pub(crate) struct Builder {}
#[async_trait::async_trait]
impl ConnectorBuilder for Builder {
fn connector_type(&self) -> ConnectorType {
ConnectorType::from(CONNECTOR_TYPE)
}
async fn build_cfg(
&self,
id: &Alias,
_: &ConnectorConfig,
config: &Value,
_kill_switch: &KillSwitch,
) -> Result<Box<dyn Connector>> {
let mut config = Config::new(config)?;
config.normalize(id);
Ok(Box::new(S3Connector { config }))
}
}
struct S3Connector {
config: Config,
}
#[async_trait::async_trait]
impl Connector for S3Connector {
/// Stream the events to the bucket
async fn create_sink(
&mut self,
ctx: SinkContext,
builder: SinkManagerBuilder,
) -> Result<Option<SinkAddr>> {
match self.config.mode {
Mode::Yolo => {
let sink_impl = S3ObjectStorageSinkImpl::yolo(self.config.clone());
let sink: YoloSink<S3ObjectStorageSinkImpl, S3Upload, S3Buffer> =
YoloSink::new(sink_impl);
Ok(Some(builder.spawn(sink, ctx)))
}
Mode::Consistent => {
let sink_impl =
S3ObjectStorageSinkImpl::consistent(self.config.clone(), builder.reply_tx());
let sink: ConsistentSink<S3ObjectStorageSinkImpl, S3Upload, S3Buffer> =
ConsistentSink::new(sink_impl);
Ok(Some(builder.spawn(sink, ctx)))
}
}
}
fn codec_requirements(&self) -> CodecReq {
CodecReq::Required
}
}
// TODO: Maybe: https://docs.rs/object_store/latest/object_store/ is the better abstraction?
pub(super) struct S3ObjectStorageSinkImpl {
config: Config,
client: Option<S3Client>,
reply_tx: Option<ReplySender>,
}
impl ObjectStorageCommon for S3ObjectStorageSinkImpl {
fn default_bucket(&self) -> Option<&String> {
self.config.bucket.as_ref()
}
fn connector_type(&self) -> &str {
CONNECTOR_TYPE
}
}
impl S3ObjectStorageSinkImpl {
pub(crate) fn yolo(config: Config) -> Self {
Self {
config,
client: None,
reply_tx: None,
}
}
pub(crate) fn consistent(config: Config, reply_tx: ReplySender) -> Self {
Self {
config,
client: None,
reply_tx: Some(reply_tx),
}
}
fn get_client(&self) -> Result<&S3Client> {
self.client
.as_ref()
.ok_or_else(|| ErrorKind::S3Error("no s3 client available".to_string()).into())
}
}
pub(crate) struct S3Buffer {
block_size: usize,
data: Vec<u8>,
cursor: usize,
}
impl ObjectStorageBuffer for S3Buffer {
fn new(size: usize) -> Self {
Self {
block_size: size,
data: Vec::with_capacity(size * 2),
cursor: 0,
}
}
fn write(&mut self, mut data: Vec<u8>) {
self.data.append(&mut data);
}
fn read_current_block(&mut self) -> Option<BufferPart> {
if self.data.len() >= self.block_size {
let data = self.data.clone();
self.cursor += data.len();
self.data.clear();
Some(BufferPart {
data,
start: self.cursor,
})
} else {
None
}
}
fn mark_done_until(&mut self, _idx: usize) -> Result<()> {
// no-op
Ok(())
}
fn reset(&mut self) -> BufferPart {
let data = self.data.clone(); // we only clone up to len, not up to capacity
let start = self.cursor;
self.data.clear();
self.cursor = 0;
BufferPart { data, start }
}
}
#[async_trait::async_trait]
impl ObjectStorageSinkImpl<S3Upload> for S3ObjectStorageSinkImpl {
fn buffer_size(&self) -> usize {
self.config.buffer_size
}
async fn connect(&mut self, _ctx: &SinkContext) -> Result<()> {
self.client = Some(
auth::get_client(
self.config.aws_region.clone(),
self.config.url.as_ref(),
self.config.path_style_access,
)
.await?,
);
Ok(())
}
async fn bucket_exists(&mut self, bucket: &str) -> Result<bool> {
self.get_client()?
.head_bucket()
.bucket(bucket)
.send()
.await
.map_err(|e| {
let msg = format!("Failed to access Bucket `{bucket}`: {e}");
Error::from(ErrorKind::S3Error(msg))
})?;
Ok(true)
}
async fn start_upload(
&mut self,
object_id: &ObjectId,
event: &Event,
_ctx: &SinkContext,
) -> Result<S3Upload> {
let resp = self
.get_client()?
.create_multipart_upload()
.bucket(object_id.bucket())
.key(object_id.name())
.send()
.await?;
//let upload = CurrentUpload::new(resp.)
let upload_id = resp.upload_id.ok_or_else(|| {
ErrorKind::S3Error(format!(
"Failed to start upload for s3://{}: upload id not found in response.",
&object_id
))
})?;
let upload = S3Upload::new(object_id.clone(), upload_id, event);
Ok(upload)
}
async fn upload_data(
&mut self,
data: BufferPart,
upload: &mut S3Upload,
ctx: &SinkContext,
) -> Result<usize> {
let end = data.end();
upload.part_number += 1; // the upload part number needs to be >= 1, so we increment before uploading
debug!(
"{ctx} Uploading part {} for {}",
upload.part_number,
upload.object_id(),
);
// Upload the part
let resp = self
.get_client()?
.upload_part()
.body(data.data.into())
.part_number(upload.part_number)
.upload_id(upload.upload_id.clone())
.bucket(upload.object_id().bucket())
.key(upload.object_id().name())
.send()
.await?;
let mut completed = CompletedPart::builder().part_number(upload.part_number);
if let Some(e_tag) = resp.e_tag.as_ref() {
completed = completed.e_tag(e_tag);
}
debug!(
"{ctx} part {} uploaded for {}.",
upload.part_number,
upload.object_id()
);
// Insert into the list of completed parts
upload.parts.push(completed.build());
Ok(end)
}
async fn finish_upload(
&mut self,
mut upload: S3Upload,
final_part: BufferPart,
ctx: &SinkContext,
) -> Result<()> {
debug_assert!(
!upload.failed,
"finish may only be called for non-failed uploads"
);
// Upload the last part if any.
if!final_part.is_empty() {
self.upload_data(final_part, &mut upload, ctx).await?;
}
let S3Upload {
object_id,
event_id,
op_meta,
transactional,
upload_id,
parts,
..
} = upload;
debug!("{ctx} Finishing upload {upload_id} for {object_id}");
let res = self
.get_client()?
.complete_multipart_upload()
.bucket(object_id.bucket())
.upload_id(&upload_id)
.key(object_id.name())
.multipart_upload(
CompletedMultipartUpload::builder()
.set_parts(Some(parts))
.build(),
)
.send()
.await;
// send an ack for all the accumulated events in the finished upload
if let (Some(reply_tx), true) = (self.reply_tx.as_ref(), transactional) {
let cf_data = ContraflowData::new(event_id, nanotime(), op_meta);
let reply = if let Ok(out) = &res {
if let Some(location) = out.location() {
debug!("{ctx} Finished upload {upload_id} for {location}");
} else {
debug!("{ctx} Finished upload {upload_id} for {object_id}");
}
// the duration of handling in the sink is a little bit meaningless here
// as a) the actual duration from the first event to the actual finishing of the upload
// is horribly long, and shouldn ot be considered the actual event handling time
// b) It will vary a lot e.g. when an actual upload call is made
AsyncSinkReply::Ack(cf_data, 0)
} else {
AsyncSinkReply::Fail(cf_data)
};
ctx.swallow_err(
reply_tx.send(reply),
&format!("Error sending ack/fail for upload {upload_id} to {object_id}"),
);
}
res?;
Ok(())
}
async fn fail_upload(&mut self, upload: S3Upload, ctx: &SinkContext) -> Result<()> {
let S3Upload {
object_id,
upload_id,
event_id,
op_meta,
..
} = upload;
if let (Some(reply_tx), true) = (self.reply_tx.as_ref(), upload.transactional) {
ctx.swallow_err(
reply_tx.send(AsyncSinkReply::Fail(ContraflowData::new(
event_id,
nanotime(),
op_meta,
))),
&format!("Error sending fail for upload {upload_id} for {object_id}"),
);
}
ctx.swallow_err(
self.get_client()?
.abort_multipart_upload()
.bucket(object_id.bucket())
.key(object_id.name())
.upload_id(&upload_id)
.send()
.await,
&format!("Error aborting multipart upload {upload_id} for {object_id}"),
);
Ok(())
}
}
pub(crate) struct S3Upload {
object_id: ObjectId,
/// tracking the ids for all accumulated events
event_id: EventId,
/// tracking the traversed operators for each accumulated event for correct sink-reply handling
op_meta: OpMeta,
/// tracking the transactional status of the accumulated events
/// if any one of them is transactional, we send an ack for all
transactional: bool,
/// bookkeeping for multipart uploads.
upload_id: String,
part_number: i32,
parts: Vec<CompletedPart>,
/// whether this upload is marked as failed
failed: bool,
}
impl S3Upload {
fn new(object_id: ObjectId, upload_id: String, event: &Event) -> Self {
Self {
object_id,
event_id: event.id.clone(),
op_meta: event.op_meta.clone(),
transactional: event.transactional,
upload_id,
part_number: 0,
parts: Vec::with_capacity(8),
failed: false,
}
}
}
impl ObjectStorageUpload for S3Upload {
fn object_id(&self) -> &ObjectId {
&self.object_id
}
fn is_failed(&self) -> bool {
self.failed
}
fn mark_as_failed(&mut self) {
self.failed = true;
}
fn track(&mut self, event: &Event) {
self.event_id.track(&event.id);
if!event.op_meta.is_empty() {
self.op_meta.merge(event.op_meta.clone());
}
self.transactional |= event.transactional;
}
}
#[cfg(test)]
mod tests {
use super::*;
use tremor_value::literal;
#[test]
fn config_defaults() -> Result<()> {
let config = literal!({});
let res = Config::new(&config)?;
assert!(res.aws_region.is_none());
assert!(res.url.is_none());
assert!(res.bucket.is_none());
assert_eq!(Mode::Yolo, res.mode);
assert_eq!(5_242_980, res.buffer_size);
Ok(())
}
} | random_line_split |
|
streamer.rs | // Copyright 2022, The Tremor Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::connectors::{
impls::s3::auth,
prelude::*,
utils::object_storage::{
BufferPart, ConsistentSink, Mode, ObjectId, ObjectStorageBuffer, ObjectStorageCommon,
ObjectStorageSinkImpl, ObjectStorageUpload, YoloSink,
},
};
use aws_sdk_s3::{
types::{CompletedMultipartUpload, CompletedPart},
Client as S3Client,
};
use tremor_common::time::nanotime;
use tremor_pipeline::{EventId, OpMeta};
pub(crate) const CONNECTOR_TYPE: &str = "s3_streamer";
const MORE_THEN_FIVEMBS: usize = 5 * 1024 * 1024 + 100; // Some extra bytes to keep aws happy.
#[derive(Deserialize, Debug, Clone)]
#[serde(deny_unknown_fields)]
pub(crate) struct Config {
aws_region: Option<String>,
url: Option<Url<HttpsDefaults>>,
/// optional default bucket
bucket: Option<String>,
#[serde(default = "Default::default")]
mode: Mode,
#[serde(default = "Config::fivembs")]
buffer_size: usize,
/// Enable path-style access
/// So e.g. creating a bucket is done using:
///
/// PUT http://<host>:<port>/<bucket>
///
/// instead of
///
/// PUT http://<bucket>.<host>:<port>/
///
/// Set this to `true` for accessing s3 compatible backends
/// that do only support path style access, like e.g. minio.
/// Defaults to `true` for backward compatibility.
#[serde(default = "default_true")]
path_style_access: bool,
}
// Defaults for the config.
impl Config {
fn fivembs() -> usize {
MORE_THEN_FIVEMBS
}
fn normalize(&mut self, alias: &Alias) {
if self.buffer_size < MORE_THEN_FIVEMBS {
warn!("[Connector::{alias}] Setting `buffer_size` up to minimum of 5MB.");
self.buffer_size = MORE_THEN_FIVEMBS;
}
}
}
impl ConfigImpl for Config {}
#[derive(Debug, Default)]
pub(crate) struct Builder {}
#[async_trait::async_trait]
impl ConnectorBuilder for Builder {
fn connector_type(&self) -> ConnectorType {
ConnectorType::from(CONNECTOR_TYPE)
}
async fn | (
&self,
id: &Alias,
_: &ConnectorConfig,
config: &Value,
_kill_switch: &KillSwitch,
) -> Result<Box<dyn Connector>> {
let mut config = Config::new(config)?;
config.normalize(id);
Ok(Box::new(S3Connector { config }))
}
}
struct S3Connector {
config: Config,
}
#[async_trait::async_trait]
impl Connector for S3Connector {
/// Stream the events to the bucket
async fn create_sink(
&mut self,
ctx: SinkContext,
builder: SinkManagerBuilder,
) -> Result<Option<SinkAddr>> {
match self.config.mode {
Mode::Yolo => {
let sink_impl = S3ObjectStorageSinkImpl::yolo(self.config.clone());
let sink: YoloSink<S3ObjectStorageSinkImpl, S3Upload, S3Buffer> =
YoloSink::new(sink_impl);
Ok(Some(builder.spawn(sink, ctx)))
}
Mode::Consistent => {
let sink_impl =
S3ObjectStorageSinkImpl::consistent(self.config.clone(), builder.reply_tx());
let sink: ConsistentSink<S3ObjectStorageSinkImpl, S3Upload, S3Buffer> =
ConsistentSink::new(sink_impl);
Ok(Some(builder.spawn(sink, ctx)))
}
}
}
fn codec_requirements(&self) -> CodecReq {
CodecReq::Required
}
}
// TODO: Maybe: https://docs.rs/object_store/latest/object_store/ is the better abstraction?
pub(super) struct S3ObjectStorageSinkImpl {
config: Config,
client: Option<S3Client>,
reply_tx: Option<ReplySender>,
}
impl ObjectStorageCommon for S3ObjectStorageSinkImpl {
fn default_bucket(&self) -> Option<&String> {
self.config.bucket.as_ref()
}
fn connector_type(&self) -> &str {
CONNECTOR_TYPE
}
}
impl S3ObjectStorageSinkImpl {
pub(crate) fn yolo(config: Config) -> Self {
Self {
config,
client: None,
reply_tx: None,
}
}
pub(crate) fn consistent(config: Config, reply_tx: ReplySender) -> Self {
Self {
config,
client: None,
reply_tx: Some(reply_tx),
}
}
fn get_client(&self) -> Result<&S3Client> {
self.client
.as_ref()
.ok_or_else(|| ErrorKind::S3Error("no s3 client available".to_string()).into())
}
}
pub(crate) struct S3Buffer {
block_size: usize,
data: Vec<u8>,
cursor: usize,
}
impl ObjectStorageBuffer for S3Buffer {
fn new(size: usize) -> Self {
Self {
block_size: size,
data: Vec::with_capacity(size * 2),
cursor: 0,
}
}
fn write(&mut self, mut data: Vec<u8>) {
self.data.append(&mut data);
}
fn read_current_block(&mut self) -> Option<BufferPart> {
if self.data.len() >= self.block_size {
let data = self.data.clone();
self.cursor += data.len();
self.data.clear();
Some(BufferPart {
data,
start: self.cursor,
})
} else {
None
}
}
fn mark_done_until(&mut self, _idx: usize) -> Result<()> {
// no-op
Ok(())
}
fn reset(&mut self) -> BufferPart {
let data = self.data.clone(); // we only clone up to len, not up to capacity
let start = self.cursor;
self.data.clear();
self.cursor = 0;
BufferPart { data, start }
}
}
#[async_trait::async_trait]
impl ObjectStorageSinkImpl<S3Upload> for S3ObjectStorageSinkImpl {
fn buffer_size(&self) -> usize {
self.config.buffer_size
}
async fn connect(&mut self, _ctx: &SinkContext) -> Result<()> {
self.client = Some(
auth::get_client(
self.config.aws_region.clone(),
self.config.url.as_ref(),
self.config.path_style_access,
)
.await?,
);
Ok(())
}
async fn bucket_exists(&mut self, bucket: &str) -> Result<bool> {
self.get_client()?
.head_bucket()
.bucket(bucket)
.send()
.await
.map_err(|e| {
let msg = format!("Failed to access Bucket `{bucket}`: {e}");
Error::from(ErrorKind::S3Error(msg))
})?;
Ok(true)
}
async fn start_upload(
&mut self,
object_id: &ObjectId,
event: &Event,
_ctx: &SinkContext,
) -> Result<S3Upload> {
let resp = self
.get_client()?
.create_multipart_upload()
.bucket(object_id.bucket())
.key(object_id.name())
.send()
.await?;
//let upload = CurrentUpload::new(resp.)
let upload_id = resp.upload_id.ok_or_else(|| {
ErrorKind::S3Error(format!(
"Failed to start upload for s3://{}: upload id not found in response.",
&object_id
))
})?;
let upload = S3Upload::new(object_id.clone(), upload_id, event);
Ok(upload)
}
async fn upload_data(
&mut self,
data: BufferPart,
upload: &mut S3Upload,
ctx: &SinkContext,
) -> Result<usize> {
let end = data.end();
upload.part_number += 1; // the upload part number needs to be >= 1, so we increment before uploading
debug!(
"{ctx} Uploading part {} for {}",
upload.part_number,
upload.object_id(),
);
// Upload the part
let resp = self
.get_client()?
.upload_part()
.body(data.data.into())
.part_number(upload.part_number)
.upload_id(upload.upload_id.clone())
.bucket(upload.object_id().bucket())
.key(upload.object_id().name())
.send()
.await?;
let mut completed = CompletedPart::builder().part_number(upload.part_number);
if let Some(e_tag) = resp.e_tag.as_ref() {
completed = completed.e_tag(e_tag);
}
debug!(
"{ctx} part {} uploaded for {}.",
upload.part_number,
upload.object_id()
);
// Insert into the list of completed parts
upload.parts.push(completed.build());
Ok(end)
}
async fn finish_upload(
&mut self,
mut upload: S3Upload,
final_part: BufferPart,
ctx: &SinkContext,
) -> Result<()> {
debug_assert!(
!upload.failed,
"finish may only be called for non-failed uploads"
);
// Upload the last part if any.
if!final_part.is_empty() {
self.upload_data(final_part, &mut upload, ctx).await?;
}
let S3Upload {
object_id,
event_id,
op_meta,
transactional,
upload_id,
parts,
..
} = upload;
debug!("{ctx} Finishing upload {upload_id} for {object_id}");
let res = self
.get_client()?
.complete_multipart_upload()
.bucket(object_id.bucket())
.upload_id(&upload_id)
.key(object_id.name())
.multipart_upload(
CompletedMultipartUpload::builder()
.set_parts(Some(parts))
.build(),
)
.send()
.await;
// send an ack for all the accumulated events in the finished upload
if let (Some(reply_tx), true) = (self.reply_tx.as_ref(), transactional) {
let cf_data = ContraflowData::new(event_id, nanotime(), op_meta);
let reply = if let Ok(out) = &res {
if let Some(location) = out.location() {
debug!("{ctx} Finished upload {upload_id} for {location}");
} else {
debug!("{ctx} Finished upload {upload_id} for {object_id}");
}
// the duration of handling in the sink is a little bit meaningless here
// as a) the actual duration from the first event to the actual finishing of the upload
// is horribly long, and shouldn ot be considered the actual event handling time
// b) It will vary a lot e.g. when an actual upload call is made
AsyncSinkReply::Ack(cf_data, 0)
} else {
AsyncSinkReply::Fail(cf_data)
};
ctx.swallow_err(
reply_tx.send(reply),
&format!("Error sending ack/fail for upload {upload_id} to {object_id}"),
);
}
res?;
Ok(())
}
async fn fail_upload(&mut self, upload: S3Upload, ctx: &SinkContext) -> Result<()> {
let S3Upload {
object_id,
upload_id,
event_id,
op_meta,
..
} = upload;
if let (Some(reply_tx), true) = (self.reply_tx.as_ref(), upload.transactional) {
ctx.swallow_err(
reply_tx.send(AsyncSinkReply::Fail(ContraflowData::new(
event_id,
nanotime(),
op_meta,
))),
&format!("Error sending fail for upload {upload_id} for {object_id}"),
);
}
ctx.swallow_err(
self.get_client()?
.abort_multipart_upload()
.bucket(object_id.bucket())
.key(object_id.name())
.upload_id(&upload_id)
.send()
.await,
&format!("Error aborting multipart upload {upload_id} for {object_id}"),
);
Ok(())
}
}
pub(crate) struct S3Upload {
object_id: ObjectId,
/// tracking the ids for all accumulated events
event_id: EventId,
/// tracking the traversed operators for each accumulated event for correct sink-reply handling
op_meta: OpMeta,
/// tracking the transactional status of the accumulated events
/// if any one of them is transactional, we send an ack for all
transactional: bool,
/// bookkeeping for multipart uploads.
upload_id: String,
part_number: i32,
parts: Vec<CompletedPart>,
/// whether this upload is marked as failed
failed: bool,
}
impl S3Upload {
fn new(object_id: ObjectId, upload_id: String, event: &Event) -> Self {
Self {
object_id,
event_id: event.id.clone(),
op_meta: event.op_meta.clone(),
transactional: event.transactional,
upload_id,
part_number: 0,
parts: Vec::with_capacity(8),
failed: false,
}
}
}
impl ObjectStorageUpload for S3Upload {
fn object_id(&self) -> &ObjectId {
&self.object_id
}
fn is_failed(&self) -> bool {
self.failed
}
fn mark_as_failed(&mut self) {
self.failed = true;
}
fn track(&mut self, event: &Event) {
self.event_id.track(&event.id);
if!event.op_meta.is_empty() {
self.op_meta.merge(event.op_meta.clone());
}
self.transactional |= event.transactional;
}
}
#[cfg(test)]
mod tests {
use super::*;
use tremor_value::literal;
#[test]
fn config_defaults() -> Result<()> {
let config = literal!({});
let res = Config::new(&config)?;
assert!(res.aws_region.is_none());
assert!(res.url.is_none());
assert!(res.bucket.is_none());
assert_eq!(Mode::Yolo, res.mode);
assert_eq!(5_242_980, res.buffer_size);
Ok(())
}
}
| build_cfg | identifier_name |
streamer.rs | // Copyright 2022, The Tremor Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::connectors::{
impls::s3::auth,
prelude::*,
utils::object_storage::{
BufferPart, ConsistentSink, Mode, ObjectId, ObjectStorageBuffer, ObjectStorageCommon,
ObjectStorageSinkImpl, ObjectStorageUpload, YoloSink,
},
};
use aws_sdk_s3::{
types::{CompletedMultipartUpload, CompletedPart},
Client as S3Client,
};
use tremor_common::time::nanotime;
use tremor_pipeline::{EventId, OpMeta};
pub(crate) const CONNECTOR_TYPE: &str = "s3_streamer";
const MORE_THEN_FIVEMBS: usize = 5 * 1024 * 1024 + 100; // Some extra bytes to keep aws happy.
#[derive(Deserialize, Debug, Clone)]
#[serde(deny_unknown_fields)]
pub(crate) struct Config {
aws_region: Option<String>,
url: Option<Url<HttpsDefaults>>,
/// optional default bucket
bucket: Option<String>,
#[serde(default = "Default::default")]
mode: Mode,
#[serde(default = "Config::fivembs")]
buffer_size: usize,
/// Enable path-style access
/// So e.g. creating a bucket is done using:
///
/// PUT http://<host>:<port>/<bucket>
///
/// instead of
///
/// PUT http://<bucket>.<host>:<port>/
///
/// Set this to `true` for accessing s3 compatible backends
/// that do only support path style access, like e.g. minio.
/// Defaults to `true` for backward compatibility.
#[serde(default = "default_true")]
path_style_access: bool,
}
// Defaults for the config.
impl Config {
fn fivembs() -> usize {
MORE_THEN_FIVEMBS
}
fn normalize(&mut self, alias: &Alias) {
if self.buffer_size < MORE_THEN_FIVEMBS {
warn!("[Connector::{alias}] Setting `buffer_size` up to minimum of 5MB.");
self.buffer_size = MORE_THEN_FIVEMBS;
}
}
}
impl ConfigImpl for Config {}
#[derive(Debug, Default)]
pub(crate) struct Builder {}
#[async_trait::async_trait]
impl ConnectorBuilder for Builder {
fn connector_type(&self) -> ConnectorType {
ConnectorType::from(CONNECTOR_TYPE)
}
async fn build_cfg(
&self,
id: &Alias,
_: &ConnectorConfig,
config: &Value,
_kill_switch: &KillSwitch,
) -> Result<Box<dyn Connector>> {
let mut config = Config::new(config)?;
config.normalize(id);
Ok(Box::new(S3Connector { config }))
}
}
struct S3Connector {
config: Config,
}
#[async_trait::async_trait]
impl Connector for S3Connector {
/// Stream the events to the bucket
async fn create_sink(
&mut self,
ctx: SinkContext,
builder: SinkManagerBuilder,
) -> Result<Option<SinkAddr>> {
match self.config.mode {
Mode::Yolo => {
let sink_impl = S3ObjectStorageSinkImpl::yolo(self.config.clone());
let sink: YoloSink<S3ObjectStorageSinkImpl, S3Upload, S3Buffer> =
YoloSink::new(sink_impl);
Ok(Some(builder.spawn(sink, ctx)))
}
Mode::Consistent => {
let sink_impl =
S3ObjectStorageSinkImpl::consistent(self.config.clone(), builder.reply_tx());
let sink: ConsistentSink<S3ObjectStorageSinkImpl, S3Upload, S3Buffer> =
ConsistentSink::new(sink_impl);
Ok(Some(builder.spawn(sink, ctx)))
}
}
}
fn codec_requirements(&self) -> CodecReq {
CodecReq::Required
}
}
// TODO: Maybe: https://docs.rs/object_store/latest/object_store/ is the better abstraction?
pub(super) struct S3ObjectStorageSinkImpl {
config: Config,
client: Option<S3Client>,
reply_tx: Option<ReplySender>,
}
impl ObjectStorageCommon for S3ObjectStorageSinkImpl {
fn default_bucket(&self) -> Option<&String> {
self.config.bucket.as_ref()
}
fn connector_type(&self) -> &str {
CONNECTOR_TYPE
}
}
impl S3ObjectStorageSinkImpl {
pub(crate) fn yolo(config: Config) -> Self {
Self {
config,
client: None,
reply_tx: None,
}
}
pub(crate) fn consistent(config: Config, reply_tx: ReplySender) -> Self {
Self {
config,
client: None,
reply_tx: Some(reply_tx),
}
}
fn get_client(&self) -> Result<&S3Client> {
self.client
.as_ref()
.ok_or_else(|| ErrorKind::S3Error("no s3 client available".to_string()).into())
}
}
pub(crate) struct S3Buffer {
block_size: usize,
data: Vec<u8>,
cursor: usize,
}
impl ObjectStorageBuffer for S3Buffer {
fn new(size: usize) -> Self {
Self {
block_size: size,
data: Vec::with_capacity(size * 2),
cursor: 0,
}
}
fn write(&mut self, mut data: Vec<u8>) {
self.data.append(&mut data);
}
fn read_current_block(&mut self) -> Option<BufferPart> {
if self.data.len() >= self.block_size {
let data = self.data.clone();
self.cursor += data.len();
self.data.clear();
Some(BufferPart {
data,
start: self.cursor,
})
} else {
None
}
}
fn mark_done_until(&mut self, _idx: usize) -> Result<()> {
// no-op
Ok(())
}
fn reset(&mut self) -> BufferPart {
let data = self.data.clone(); // we only clone up to len, not up to capacity
let start = self.cursor;
self.data.clear();
self.cursor = 0;
BufferPart { data, start }
}
}
#[async_trait::async_trait]
impl ObjectStorageSinkImpl<S3Upload> for S3ObjectStorageSinkImpl {
fn buffer_size(&self) -> usize {
self.config.buffer_size
}
async fn connect(&mut self, _ctx: &SinkContext) -> Result<()> {
self.client = Some(
auth::get_client(
self.config.aws_region.clone(),
self.config.url.as_ref(),
self.config.path_style_access,
)
.await?,
);
Ok(())
}
async fn bucket_exists(&mut self, bucket: &str) -> Result<bool> {
self.get_client()?
.head_bucket()
.bucket(bucket)
.send()
.await
.map_err(|e| {
let msg = format!("Failed to access Bucket `{bucket}`: {e}");
Error::from(ErrorKind::S3Error(msg))
})?;
Ok(true)
}
async fn start_upload(
&mut self,
object_id: &ObjectId,
event: &Event,
_ctx: &SinkContext,
) -> Result<S3Upload> {
let resp = self
.get_client()?
.create_multipart_upload()
.bucket(object_id.bucket())
.key(object_id.name())
.send()
.await?;
//let upload = CurrentUpload::new(resp.)
let upload_id = resp.upload_id.ok_or_else(|| {
ErrorKind::S3Error(format!(
"Failed to start upload for s3://{}: upload id not found in response.",
&object_id
))
})?;
let upload = S3Upload::new(object_id.clone(), upload_id, event);
Ok(upload)
}
async fn upload_data(
&mut self,
data: BufferPart,
upload: &mut S3Upload,
ctx: &SinkContext,
) -> Result<usize> {
let end = data.end();
upload.part_number += 1; // the upload part number needs to be >= 1, so we increment before uploading
debug!(
"{ctx} Uploading part {} for {}",
upload.part_number,
upload.object_id(),
);
// Upload the part
let resp = self
.get_client()?
.upload_part()
.body(data.data.into())
.part_number(upload.part_number)
.upload_id(upload.upload_id.clone())
.bucket(upload.object_id().bucket())
.key(upload.object_id().name())
.send()
.await?;
let mut completed = CompletedPart::builder().part_number(upload.part_number);
if let Some(e_tag) = resp.e_tag.as_ref() {
completed = completed.e_tag(e_tag);
}
debug!(
"{ctx} part {} uploaded for {}.",
upload.part_number,
upload.object_id()
);
// Insert into the list of completed parts
upload.parts.push(completed.build());
Ok(end)
}
async fn finish_upload(
&mut self,
mut upload: S3Upload,
final_part: BufferPart,
ctx: &SinkContext,
) -> Result<()> {
debug_assert!(
!upload.failed,
"finish may only be called for non-failed uploads"
);
// Upload the last part if any.
if!final_part.is_empty() {
self.upload_data(final_part, &mut upload, ctx).await?;
}
let S3Upload {
object_id,
event_id,
op_meta,
transactional,
upload_id,
parts,
..
} = upload;
debug!("{ctx} Finishing upload {upload_id} for {object_id}");
let res = self
.get_client()?
.complete_multipart_upload()
.bucket(object_id.bucket())
.upload_id(&upload_id)
.key(object_id.name())
.multipart_upload(
CompletedMultipartUpload::builder()
.set_parts(Some(parts))
.build(),
)
.send()
.await;
// send an ack for all the accumulated events in the finished upload
if let (Some(reply_tx), true) = (self.reply_tx.as_ref(), transactional) {
let cf_data = ContraflowData::new(event_id, nanotime(), op_meta);
let reply = if let Ok(out) = &res {
if let Some(location) = out.location() {
debug!("{ctx} Finished upload {upload_id} for {location}");
} else {
debug!("{ctx} Finished upload {upload_id} for {object_id}");
}
// the duration of handling in the sink is a little bit meaningless here
// as a) the actual duration from the first event to the actual finishing of the upload
// is horribly long, and shouldn ot be considered the actual event handling time
// b) It will vary a lot e.g. when an actual upload call is made
AsyncSinkReply::Ack(cf_data, 0)
} else {
AsyncSinkReply::Fail(cf_data)
};
ctx.swallow_err(
reply_tx.send(reply),
&format!("Error sending ack/fail for upload {upload_id} to {object_id}"),
);
}
res?;
Ok(())
}
async fn fail_upload(&mut self, upload: S3Upload, ctx: &SinkContext) -> Result<()> {
let S3Upload {
object_id,
upload_id,
event_id,
op_meta,
..
} = upload;
if let (Some(reply_tx), true) = (self.reply_tx.as_ref(), upload.transactional) {
ctx.swallow_err(
reply_tx.send(AsyncSinkReply::Fail(ContraflowData::new(
event_id,
nanotime(),
op_meta,
))),
&format!("Error sending fail for upload {upload_id} for {object_id}"),
);
}
ctx.swallow_err(
self.get_client()?
.abort_multipart_upload()
.bucket(object_id.bucket())
.key(object_id.name())
.upload_id(&upload_id)
.send()
.await,
&format!("Error aborting multipart upload {upload_id} for {object_id}"),
);
Ok(())
}
}
pub(crate) struct S3Upload {
object_id: ObjectId,
/// tracking the ids for all accumulated events
event_id: EventId,
/// tracking the traversed operators for each accumulated event for correct sink-reply handling
op_meta: OpMeta,
/// tracking the transactional status of the accumulated events
/// if any one of them is transactional, we send an ack for all
transactional: bool,
/// bookkeeping for multipart uploads.
upload_id: String,
part_number: i32,
parts: Vec<CompletedPart>,
/// whether this upload is marked as failed
failed: bool,
}
impl S3Upload {
fn new(object_id: ObjectId, upload_id: String, event: &Event) -> Self {
Self {
object_id,
event_id: event.id.clone(),
op_meta: event.op_meta.clone(),
transactional: event.transactional,
upload_id,
part_number: 0,
parts: Vec::with_capacity(8),
failed: false,
}
}
}
impl ObjectStorageUpload for S3Upload {
fn object_id(&self) -> &ObjectId |
fn is_failed(&self) -> bool {
self.failed
}
fn mark_as_failed(&mut self) {
self.failed = true;
}
fn track(&mut self, event: &Event) {
self.event_id.track(&event.id);
if!event.op_meta.is_empty() {
self.op_meta.merge(event.op_meta.clone());
}
self.transactional |= event.transactional;
}
}
#[cfg(test)]
mod tests {
use super::*;
use tremor_value::literal;
#[test]
fn config_defaults() -> Result<()> {
let config = literal!({});
let res = Config::new(&config)?;
assert!(res.aws_region.is_none());
assert!(res.url.is_none());
assert!(res.bucket.is_none());
assert_eq!(Mode::Yolo, res.mode);
assert_eq!(5_242_980, res.buffer_size);
Ok(())
}
}
| {
&self.object_id
} | identifier_body |
conn_write.rs | use std::cmp;
use std::fmt;
use std::task::Poll;
use bytes::Bytes;
use futures::task::Context;
use tls_api::AsyncSocket;
use crate::common::conn::Conn;
use crate::common::conn::ConnStateSnapshot;
use crate::common::conn_read::ConnReadSideCustom;
use crate::common::pump_stream_to_write_loop::PumpStreamToWrite;
use crate::common::stream::HttpStreamCommand;
use crate::common::stream::HttpStreamCommon;
use crate::common::stream::HttpStreamData;
use crate::common::types::Types;
use crate::common::window_size::StreamOutWindowReceiver;
use crate::data_or_headers::DataOrHeaders;
use crate::data_or_headers_with_flag::DataOrHeadersWithFlag;
use crate::solicit::end_stream::EndStream;
use crate::solicit::frame::DataFlag;
use crate::solicit::frame::DataFrame;
use crate::solicit::frame::Flags;
use crate::solicit::frame::GoawayFrame;
use crate::solicit::frame::HeadersFlag;
use crate::solicit::frame::HeadersMultiFrame;
use crate::solicit::frame::HttpFrame;
use crate::solicit::frame::RstStreamFrame;
use crate::solicit::frame::SettingsFrame;
use crate::solicit::stream_id::StreamId;
use crate::DataOrTrailers;
use crate::Error;
use crate::ErrorCode;
use crate::Headers;
use crate::death::channel::ErrorAwareDrop;
use crate::death::error_holder::ConnDiedType;
use crate::death::oneshot_no_content_drop::DeathAwareOneshotNoContentDropSender;
use crate::solicit_async::TryStreamBox;
pub(crate) trait ConnWriteSideCustom {
type Types: Types;
fn process_message(
&mut self,
message: <Self::Types as Types>::ToWriteMessage,
) -> crate::Result<()>;
}
impl<T, I> Conn<T, I>
where
T: Types,
Self: ConnReadSideCustom<Types = T>,
Self: ConnWriteSideCustom<Types = T>,
HttpStreamCommon<T>: HttpStreamData<Types = T>,
I: AsyncSocket,
{
fn write_part_data(&mut self, stream_id: StreamId, data: Bytes, end_stream: EndStream) {
let max_frame_size = self.peer_settings.max_frame_size as usize;
// if client requested end of stream,
// we must send at least one frame with end stream flag
if end_stream == EndStream::Yes && data.len() == 0 {
let mut frame = DataFrame::with_data(stream_id, Bytes::new());
frame.set_flag(DataFlag::EndStream);
if log_enabled!(log::Level::Trace) | else {
debug!("sending frame {:?}", frame.debug_no_data());
}
self.queued_write.queue_not_goaway(frame);
return;
}
let mut pos = 0;
while pos < data.len() {
let end = cmp::min(data.len(), pos + max_frame_size);
let end_stream_in_frame = if end == data.len() && end_stream == EndStream::Yes {
EndStream::Yes
} else {
EndStream::No
};
let mut frame = DataFrame::with_data(stream_id, data.slice(pos..end));
if end_stream_in_frame == EndStream::Yes {
frame.set_flag(DataFlag::EndStream);
}
self.queued_write.queue_not_goaway(frame);
pos = end;
}
}
fn write_part_headers(&mut self, stream_id: StreamId, headers: Headers, end_stream: EndStream) {
let mut flags = Flags::new(0);
if end_stream == EndStream::Yes {
flags.set(HeadersFlag::EndStream);
}
self.queued_write.queue_not_goaway(HeadersMultiFrame {
flags,
stream_id,
headers,
stream_dep: None,
padding_len: 0,
encoder: &mut self.encoder,
max_frame_size: self.peer_settings.max_frame_size,
});
}
fn write_part_rst(&mut self, stream_id: StreamId, error_code: ErrorCode) {
let frame = RstStreamFrame::new(stream_id, error_code);
self.queued_write.queue_not_goaway(frame);
}
fn write_part(&mut self, stream_id: StreamId, part: HttpStreamCommand) {
match part {
HttpStreamCommand::Data(data, end_stream) => {
self.write_part_data(stream_id, data, end_stream);
}
HttpStreamCommand::Headers(headers, end_stream) => {
self.write_part_headers(stream_id, headers, end_stream);
}
HttpStreamCommand::Rst(error_code) => {
self.write_part_rst(stream_id, error_code);
}
}
}
fn has_write_buffer_capacity(&self) -> bool {
self.queued_write.queued_bytes_len() < 0x8000
}
fn pop_outg_for_stream(
&mut self,
stream_id: StreamId,
) -> Option<(StreamId, HttpStreamCommand, bool)> {
let stream = self.streams.get_mut(stream_id).unwrap();
if let (Some(command), stream) = stream.pop_outg_maybe_remove(&mut self.out_window_size) {
return Some((stream_id, command, stream.is_some()));
}
None
}
pub fn buffer_outg_conn(&mut self) -> crate::Result<bool> {
let mut updated = false;
// shortcut
if!self.has_write_buffer_capacity() {
return Ok(updated);
}
let writable_stream_ids = self.streams.writable_stream_ids();
for &stream_id in &writable_stream_ids {
loop {
if!self.has_write_buffer_capacity() {
return Ok(updated);
}
if let Some((stream_id, part, cont)) = self.pop_outg_for_stream(stream_id) {
self.write_part(stream_id, part);
updated = true;
// Stream is removed from map, need to continue to the next stream
if!cont {
break;
}
} else {
break;
}
}
}
Ok(updated)
}
pub fn send_frame_and_notify<F: Into<HttpFrame>>(&mut self, frame: F) {
// TODO: some of frames should not be in front of GOAWAY
self.queued_write.queue_not_goaway(frame.into());
}
/// Sends an SETTINGS Frame with ack set to acknowledge seeing a SETTINGS frame from the peer.
pub fn send_ack_settings(&mut self) -> crate::Result<()> {
let settings = SettingsFrame::new_ack();
self.send_frame_and_notify(settings);
Ok(())
}
fn process_stream_end(
&mut self,
stream_id: StreamId,
error_code: ErrorCode,
) -> crate::Result<()> {
let stream = self.streams.get_mut(stream_id);
if let Some(mut stream) = stream {
stream.close_outgoing(error_code);
}
Ok(())
}
fn process_stream_enqueue(
&mut self,
stream_id: StreamId,
part: DataOrHeadersWithFlag,
) -> crate::Result<()> {
let stream = self.streams.get_mut(stream_id);
if let Some(mut stream) = stream {
stream.push_back_part(part);
} else {
if let DataOrHeaders::Data(data) = part.content {
self.pump_out_window_size.increase(data.len());
}
}
Ok(())
}
fn process_stream_pull(
&mut self,
stream_id: StreamId,
stream: TryStreamBox<DataOrTrailers>,
out_window: StreamOutWindowReceiver,
) -> crate::Result<()> {
// TODO: spawn in handler
self.loop_handle.spawn(
PumpStreamToWrite::<T> {
to_write_tx: self.to_write_tx.clone(),
stream_id,
out_window,
stream,
}
.run(),
);
Ok(())
}
pub fn process_common_message(&mut self, common: CommonToWriteMessage) -> crate::Result<()> {
match common {
CommonToWriteMessage::StreamEnd(stream_id, error_code) => {
self.process_stream_end(stream_id, error_code)
}
CommonToWriteMessage::StreamEnqueue(stream_id, part) => {
self.process_stream_enqueue(stream_id, part)
}
CommonToWriteMessage::Pull(stream_id, stream, out_window_receiver) => {
self.process_stream_pull(stream_id, stream, out_window_receiver)
}
CommonToWriteMessage::IncreaseInWindow(stream_id, increase) => {
self.increase_in_window(stream_id, increase)
}
CommonToWriteMessage::DumpState(sender) => self.process_dump_state(sender),
}
}
pub fn send_goaway(&mut self, error_code: ErrorCode) -> crate::Result<()> {
debug!("requesting to send GOAWAY with code {:?}", error_code);
let frame = GoawayFrame::new(self.last_peer_stream_id, error_code);
self.queued_write.queue_goaway(frame);
Ok(())
}
pub fn poll_flush(&mut self, cx: &mut Context<'_>) -> crate::Result<()> {
self.buffer_outg_conn()?;
loop {
match self.queued_write.poll(cx) {
Poll::Pending => return Ok(()),
Poll::Ready(Err(e)) => return Err(e),
Poll::Ready(Ok(())) => {}
}
let updated = self.buffer_outg_conn()?;
if!updated {
return Ok(());
}
}
}
}
// Message sent to write loop.
// Processed while write loop is not handling network I/O.
pub(crate) enum CommonToWriteMessage {
IncreaseInWindow(StreamId, u32),
StreamEnqueue(StreamId, DataOrHeadersWithFlag),
StreamEnd(StreamId, ErrorCode), // send when user provided handler completed the stream
Pull(
StreamId,
TryStreamBox<DataOrTrailers>,
StreamOutWindowReceiver,
),
DumpState(DeathAwareOneshotNoContentDropSender<ConnStateSnapshot, ConnDiedType>),
}
impl ErrorAwareDrop for CommonToWriteMessage {
fn drop_with_error(self, error: Error) {
let _ = error;
match self {
CommonToWriteMessage::IncreaseInWindow(_, _) => {}
CommonToWriteMessage::StreamEnqueue(_, _) => {}
CommonToWriteMessage::StreamEnd(_, _) => {}
CommonToWriteMessage::Pull(_, _, _) => {}
CommonToWriteMessage::DumpState(_) => {}
}
}
}
impl fmt::Debug for CommonToWriteMessage {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("CommonToWriteMessage").field(&"...").finish()
}
}
| {
debug!("sending frame {:?}", frame);
} | conditional_block |
conn_write.rs | use std::cmp;
use std::fmt;
use std::task::Poll;
use bytes::Bytes;
use futures::task::Context;
use tls_api::AsyncSocket;
use crate::common::conn::Conn;
use crate::common::conn::ConnStateSnapshot;
use crate::common::conn_read::ConnReadSideCustom;
use crate::common::pump_stream_to_write_loop::PumpStreamToWrite;
use crate::common::stream::HttpStreamCommand;
use crate::common::stream::HttpStreamCommon;
use crate::common::stream::HttpStreamData;
use crate::common::types::Types;
use crate::common::window_size::StreamOutWindowReceiver;
use crate::data_or_headers::DataOrHeaders;
use crate::data_or_headers_with_flag::DataOrHeadersWithFlag;
use crate::solicit::end_stream::EndStream;
use crate::solicit::frame::DataFlag;
use crate::solicit::frame::DataFrame;
use crate::solicit::frame::Flags;
use crate::solicit::frame::GoawayFrame;
use crate::solicit::frame::HeadersFlag;
use crate::solicit::frame::HeadersMultiFrame;
use crate::solicit::frame::HttpFrame;
use crate::solicit::frame::RstStreamFrame;
use crate::solicit::frame::SettingsFrame;
use crate::solicit::stream_id::StreamId;
use crate::DataOrTrailers;
use crate::Error;
use crate::ErrorCode;
use crate::Headers;
use crate::death::channel::ErrorAwareDrop;
use crate::death::error_holder::ConnDiedType;
use crate::death::oneshot_no_content_drop::DeathAwareOneshotNoContentDropSender;
use crate::solicit_async::TryStreamBox;
pub(crate) trait ConnWriteSideCustom {
type Types: Types;
fn process_message(
&mut self,
message: <Self::Types as Types>::ToWriteMessage,
) -> crate::Result<()>;
}
impl<T, I> Conn<T, I>
where
T: Types,
Self: ConnReadSideCustom<Types = T>,
Self: ConnWriteSideCustom<Types = T>,
HttpStreamCommon<T>: HttpStreamData<Types = T>,
I: AsyncSocket,
{
fn write_part_data(&mut self, stream_id: StreamId, data: Bytes, end_stream: EndStream) | let mut pos = 0;
while pos < data.len() {
let end = cmp::min(data.len(), pos + max_frame_size);
let end_stream_in_frame = if end == data.len() && end_stream == EndStream::Yes {
EndStream::Yes
} else {
EndStream::No
};
let mut frame = DataFrame::with_data(stream_id, data.slice(pos..end));
if end_stream_in_frame == EndStream::Yes {
frame.set_flag(DataFlag::EndStream);
}
self.queued_write.queue_not_goaway(frame);
pos = end;
}
}
fn write_part_headers(&mut self, stream_id: StreamId, headers: Headers, end_stream: EndStream) {
let mut flags = Flags::new(0);
if end_stream == EndStream::Yes {
flags.set(HeadersFlag::EndStream);
}
self.queued_write.queue_not_goaway(HeadersMultiFrame {
flags,
stream_id,
headers,
stream_dep: None,
padding_len: 0,
encoder: &mut self.encoder,
max_frame_size: self.peer_settings.max_frame_size,
});
}
fn write_part_rst(&mut self, stream_id: StreamId, error_code: ErrorCode) {
let frame = RstStreamFrame::new(stream_id, error_code);
self.queued_write.queue_not_goaway(frame);
}
fn write_part(&mut self, stream_id: StreamId, part: HttpStreamCommand) {
match part {
HttpStreamCommand::Data(data, end_stream) => {
self.write_part_data(stream_id, data, end_stream);
}
HttpStreamCommand::Headers(headers, end_stream) => {
self.write_part_headers(stream_id, headers, end_stream);
}
HttpStreamCommand::Rst(error_code) => {
self.write_part_rst(stream_id, error_code);
}
}
}
fn has_write_buffer_capacity(&self) -> bool {
self.queued_write.queued_bytes_len() < 0x8000
}
fn pop_outg_for_stream(
&mut self,
stream_id: StreamId,
) -> Option<(StreamId, HttpStreamCommand, bool)> {
let stream = self.streams.get_mut(stream_id).unwrap();
if let (Some(command), stream) = stream.pop_outg_maybe_remove(&mut self.out_window_size) {
return Some((stream_id, command, stream.is_some()));
}
None
}
pub fn buffer_outg_conn(&mut self) -> crate::Result<bool> {
let mut updated = false;
// shortcut
if!self.has_write_buffer_capacity() {
return Ok(updated);
}
let writable_stream_ids = self.streams.writable_stream_ids();
for &stream_id in &writable_stream_ids {
loop {
if!self.has_write_buffer_capacity() {
return Ok(updated);
}
if let Some((stream_id, part, cont)) = self.pop_outg_for_stream(stream_id) {
self.write_part(stream_id, part);
updated = true;
// Stream is removed from map, need to continue to the next stream
if!cont {
break;
}
} else {
break;
}
}
}
Ok(updated)
}
pub fn send_frame_and_notify<F: Into<HttpFrame>>(&mut self, frame: F) {
// TODO: some of frames should not be in front of GOAWAY
self.queued_write.queue_not_goaway(frame.into());
}
/// Sends an SETTINGS Frame with ack set to acknowledge seeing a SETTINGS frame from the peer.
pub fn send_ack_settings(&mut self) -> crate::Result<()> {
let settings = SettingsFrame::new_ack();
self.send_frame_and_notify(settings);
Ok(())
}
fn process_stream_end(
&mut self,
stream_id: StreamId,
error_code: ErrorCode,
) -> crate::Result<()> {
let stream = self.streams.get_mut(stream_id);
if let Some(mut stream) = stream {
stream.close_outgoing(error_code);
}
Ok(())
}
fn process_stream_enqueue(
&mut self,
stream_id: StreamId,
part: DataOrHeadersWithFlag,
) -> crate::Result<()> {
let stream = self.streams.get_mut(stream_id);
if let Some(mut stream) = stream {
stream.push_back_part(part);
} else {
if let DataOrHeaders::Data(data) = part.content {
self.pump_out_window_size.increase(data.len());
}
}
Ok(())
}
fn process_stream_pull(
&mut self,
stream_id: StreamId,
stream: TryStreamBox<DataOrTrailers>,
out_window: StreamOutWindowReceiver,
) -> crate::Result<()> {
// TODO: spawn in handler
self.loop_handle.spawn(
PumpStreamToWrite::<T> {
to_write_tx: self.to_write_tx.clone(),
stream_id,
out_window,
stream,
}
.run(),
);
Ok(())
}
pub fn process_common_message(&mut self, common: CommonToWriteMessage) -> crate::Result<()> {
match common {
CommonToWriteMessage::StreamEnd(stream_id, error_code) => {
self.process_stream_end(stream_id, error_code)
}
CommonToWriteMessage::StreamEnqueue(stream_id, part) => {
self.process_stream_enqueue(stream_id, part)
}
CommonToWriteMessage::Pull(stream_id, stream, out_window_receiver) => {
self.process_stream_pull(stream_id, stream, out_window_receiver)
}
CommonToWriteMessage::IncreaseInWindow(stream_id, increase) => {
self.increase_in_window(stream_id, increase)
}
CommonToWriteMessage::DumpState(sender) => self.process_dump_state(sender),
}
}
pub fn send_goaway(&mut self, error_code: ErrorCode) -> crate::Result<()> {
debug!("requesting to send GOAWAY with code {:?}", error_code);
let frame = GoawayFrame::new(self.last_peer_stream_id, error_code);
self.queued_write.queue_goaway(frame);
Ok(())
}
pub fn poll_flush(&mut self, cx: &mut Context<'_>) -> crate::Result<()> {
self.buffer_outg_conn()?;
loop {
match self.queued_write.poll(cx) {
Poll::Pending => return Ok(()),
Poll::Ready(Err(e)) => return Err(e),
Poll::Ready(Ok(())) => {}
}
let updated = self.buffer_outg_conn()?;
if!updated {
return Ok(());
}
}
}
}
// Message sent to write loop.
// Processed while write loop is not handling network I/O.
pub(crate) enum CommonToWriteMessage {
IncreaseInWindow(StreamId, u32),
StreamEnqueue(StreamId, DataOrHeadersWithFlag),
StreamEnd(StreamId, ErrorCode), // send when user provided handler completed the stream
Pull(
StreamId,
TryStreamBox<DataOrTrailers>,
StreamOutWindowReceiver,
),
DumpState(DeathAwareOneshotNoContentDropSender<ConnStateSnapshot, ConnDiedType>),
}
impl ErrorAwareDrop for CommonToWriteMessage {
fn drop_with_error(self, error: Error) {
let _ = error;
match self {
CommonToWriteMessage::IncreaseInWindow(_, _) => {}
CommonToWriteMessage::StreamEnqueue(_, _) => {}
CommonToWriteMessage::StreamEnd(_, _) => {}
CommonToWriteMessage::Pull(_, _, _) => {}
CommonToWriteMessage::DumpState(_) => {}
}
}
}
impl fmt::Debug for CommonToWriteMessage {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("CommonToWriteMessage").field(&"...").finish()
}
}
| {
let max_frame_size = self.peer_settings.max_frame_size as usize;
// if client requested end of stream,
// we must send at least one frame with end stream flag
if end_stream == EndStream::Yes && data.len() == 0 {
let mut frame = DataFrame::with_data(stream_id, Bytes::new());
frame.set_flag(DataFlag::EndStream);
if log_enabled!(log::Level::Trace) {
debug!("sending frame {:?}", frame);
} else {
debug!("sending frame {:?}", frame.debug_no_data());
}
self.queued_write.queue_not_goaway(frame);
return;
}
| identifier_body |
conn_write.rs | use std::cmp;
use std::fmt;
use std::task::Poll;
use bytes::Bytes;
use futures::task::Context;
use tls_api::AsyncSocket;
use crate::common::conn::Conn;
use crate::common::conn::ConnStateSnapshot;
use crate::common::conn_read::ConnReadSideCustom;
use crate::common::pump_stream_to_write_loop::PumpStreamToWrite;
use crate::common::stream::HttpStreamCommand;
use crate::common::stream::HttpStreamCommon;
use crate::common::stream::HttpStreamData;
use crate::common::types::Types;
use crate::common::window_size::StreamOutWindowReceiver;
use crate::data_or_headers::DataOrHeaders;
use crate::data_or_headers_with_flag::DataOrHeadersWithFlag;
use crate::solicit::end_stream::EndStream;
use crate::solicit::frame::DataFlag;
use crate::solicit::frame::DataFrame;
use crate::solicit::frame::Flags;
use crate::solicit::frame::GoawayFrame;
use crate::solicit::frame::HeadersFlag;
use crate::solicit::frame::HeadersMultiFrame;
use crate::solicit::frame::HttpFrame;
use crate::solicit::frame::RstStreamFrame;
use crate::solicit::frame::SettingsFrame;
use crate::solicit::stream_id::StreamId;
use crate::DataOrTrailers;
use crate::Error;
use crate::ErrorCode;
use crate::Headers;
use crate::death::channel::ErrorAwareDrop;
use crate::death::error_holder::ConnDiedType;
use crate::death::oneshot_no_content_drop::DeathAwareOneshotNoContentDropSender;
use crate::solicit_async::TryStreamBox;
pub(crate) trait ConnWriteSideCustom {
type Types: Types;
fn process_message(
&mut self,
message: <Self::Types as Types>::ToWriteMessage,
) -> crate::Result<()>;
}
impl<T, I> Conn<T, I>
where
T: Types,
Self: ConnReadSideCustom<Types = T>,
Self: ConnWriteSideCustom<Types = T>,
HttpStreamCommon<T>: HttpStreamData<Types = T>,
I: AsyncSocket,
{
fn write_part_data(&mut self, stream_id: StreamId, data: Bytes, end_stream: EndStream) {
let max_frame_size = self.peer_settings.max_frame_size as usize;
// if client requested end of stream,
// we must send at least one frame with end stream flag
if end_stream == EndStream::Yes && data.len() == 0 {
let mut frame = DataFrame::with_data(stream_id, Bytes::new());
frame.set_flag(DataFlag::EndStream);
if log_enabled!(log::Level::Trace) {
debug!("sending frame {:?}", frame);
} else {
debug!("sending frame {:?}", frame.debug_no_data());
}
self.queued_write.queue_not_goaway(frame);
return;
}
let mut pos = 0;
while pos < data.len() {
let end = cmp::min(data.len(), pos + max_frame_size);
let end_stream_in_frame = if end == data.len() && end_stream == EndStream::Yes {
EndStream::Yes
} else {
EndStream::No
};
let mut frame = DataFrame::with_data(stream_id, data.slice(pos..end));
if end_stream_in_frame == EndStream::Yes {
frame.set_flag(DataFlag::EndStream);
}
self.queued_write.queue_not_goaway(frame);
pos = end;
}
}
fn write_part_headers(&mut self, stream_id: StreamId, headers: Headers, end_stream: EndStream) {
let mut flags = Flags::new(0);
if end_stream == EndStream::Yes {
flags.set(HeadersFlag::EndStream);
}
self.queued_write.queue_not_goaway(HeadersMultiFrame {
flags,
stream_id,
headers,
stream_dep: None,
padding_len: 0,
encoder: &mut self.encoder,
max_frame_size: self.peer_settings.max_frame_size,
});
}
fn write_part_rst(&mut self, stream_id: StreamId, error_code: ErrorCode) {
let frame = RstStreamFrame::new(stream_id, error_code);
self.queued_write.queue_not_goaway(frame);
}
fn write_part(&mut self, stream_id: StreamId, part: HttpStreamCommand) {
match part {
HttpStreamCommand::Data(data, end_stream) => {
self.write_part_data(stream_id, data, end_stream);
}
HttpStreamCommand::Headers(headers, end_stream) => {
self.write_part_headers(stream_id, headers, end_stream);
}
HttpStreamCommand::Rst(error_code) => {
self.write_part_rst(stream_id, error_code);
}
}
}
fn has_write_buffer_capacity(&self) -> bool {
self.queued_write.queued_bytes_len() < 0x8000
}
fn pop_outg_for_stream(
&mut self,
stream_id: StreamId,
) -> Option<(StreamId, HttpStreamCommand, bool)> {
let stream = self.streams.get_mut(stream_id).unwrap();
if let (Some(command), stream) = stream.pop_outg_maybe_remove(&mut self.out_window_size) {
return Some((stream_id, command, stream.is_some()));
}
None
}
pub fn buffer_outg_conn(&mut self) -> crate::Result<bool> {
let mut updated = false;
// shortcut
if!self.has_write_buffer_capacity() {
return Ok(updated);
}
let writable_stream_ids = self.streams.writable_stream_ids();
for &stream_id in &writable_stream_ids {
loop {
if!self.has_write_buffer_capacity() {
return Ok(updated);
}
if let Some((stream_id, part, cont)) = self.pop_outg_for_stream(stream_id) {
self.write_part(stream_id, part);
updated = true;
// Stream is removed from map, need to continue to the next stream
if!cont {
break;
}
} else {
break;
}
}
}
Ok(updated)
}
pub fn send_frame_and_notify<F: Into<HttpFrame>>(&mut self, frame: F) {
// TODO: some of frames should not be in front of GOAWAY
self.queued_write.queue_not_goaway(frame.into());
}
/// Sends an SETTINGS Frame with ack set to acknowledge seeing a SETTINGS frame from the peer.
pub fn send_ack_settings(&mut self) -> crate::Result<()> {
let settings = SettingsFrame::new_ack();
self.send_frame_and_notify(settings);
Ok(())
}
fn process_stream_end(
&mut self,
stream_id: StreamId,
error_code: ErrorCode,
) -> crate::Result<()> {
let stream = self.streams.get_mut(stream_id);
if let Some(mut stream) = stream {
stream.close_outgoing(error_code);
}
Ok(())
}
fn process_stream_enqueue(
&mut self,
stream_id: StreamId,
part: DataOrHeadersWithFlag,
) -> crate::Result<()> {
let stream = self.streams.get_mut(stream_id);
if let Some(mut stream) = stream {
stream.push_back_part(part);
} else {
if let DataOrHeaders::Data(data) = part.content {
self.pump_out_window_size.increase(data.len());
}
}
Ok(())
}
fn process_stream_pull(
&mut self,
stream_id: StreamId,
stream: TryStreamBox<DataOrTrailers>,
out_window: StreamOutWindowReceiver,
) -> crate::Result<()> {
// TODO: spawn in handler
self.loop_handle.spawn(
PumpStreamToWrite::<T> {
to_write_tx: self.to_write_tx.clone(),
stream_id,
out_window,
stream,
}
.run(),
);
Ok(())
}
pub fn process_common_message(&mut self, common: CommonToWriteMessage) -> crate::Result<()> {
match common {
CommonToWriteMessage::StreamEnd(stream_id, error_code) => {
self.process_stream_end(stream_id, error_code)
} | self.process_stream_enqueue(stream_id, part)
}
CommonToWriteMessage::Pull(stream_id, stream, out_window_receiver) => {
self.process_stream_pull(stream_id, stream, out_window_receiver)
}
CommonToWriteMessage::IncreaseInWindow(stream_id, increase) => {
self.increase_in_window(stream_id, increase)
}
CommonToWriteMessage::DumpState(sender) => self.process_dump_state(sender),
}
}
pub fn send_goaway(&mut self, error_code: ErrorCode) -> crate::Result<()> {
debug!("requesting to send GOAWAY with code {:?}", error_code);
let frame = GoawayFrame::new(self.last_peer_stream_id, error_code);
self.queued_write.queue_goaway(frame);
Ok(())
}
pub fn poll_flush(&mut self, cx: &mut Context<'_>) -> crate::Result<()> {
self.buffer_outg_conn()?;
loop {
match self.queued_write.poll(cx) {
Poll::Pending => return Ok(()),
Poll::Ready(Err(e)) => return Err(e),
Poll::Ready(Ok(())) => {}
}
let updated = self.buffer_outg_conn()?;
if!updated {
return Ok(());
}
}
}
}
// Message sent to write loop.
// Processed while write loop is not handling network I/O.
pub(crate) enum CommonToWriteMessage {
IncreaseInWindow(StreamId, u32),
StreamEnqueue(StreamId, DataOrHeadersWithFlag),
StreamEnd(StreamId, ErrorCode), // send when user provided handler completed the stream
Pull(
StreamId,
TryStreamBox<DataOrTrailers>,
StreamOutWindowReceiver,
),
DumpState(DeathAwareOneshotNoContentDropSender<ConnStateSnapshot, ConnDiedType>),
}
impl ErrorAwareDrop for CommonToWriteMessage {
fn drop_with_error(self, error: Error) {
let _ = error;
match self {
CommonToWriteMessage::IncreaseInWindow(_, _) => {}
CommonToWriteMessage::StreamEnqueue(_, _) => {}
CommonToWriteMessage::StreamEnd(_, _) => {}
CommonToWriteMessage::Pull(_, _, _) => {}
CommonToWriteMessage::DumpState(_) => {}
}
}
}
impl fmt::Debug for CommonToWriteMessage {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("CommonToWriteMessage").field(&"...").finish()
}
} | CommonToWriteMessage::StreamEnqueue(stream_id, part) => { | random_line_split |
conn_write.rs | use std::cmp;
use std::fmt;
use std::task::Poll;
use bytes::Bytes;
use futures::task::Context;
use tls_api::AsyncSocket;
use crate::common::conn::Conn;
use crate::common::conn::ConnStateSnapshot;
use crate::common::conn_read::ConnReadSideCustom;
use crate::common::pump_stream_to_write_loop::PumpStreamToWrite;
use crate::common::stream::HttpStreamCommand;
use crate::common::stream::HttpStreamCommon;
use crate::common::stream::HttpStreamData;
use crate::common::types::Types;
use crate::common::window_size::StreamOutWindowReceiver;
use crate::data_or_headers::DataOrHeaders;
use crate::data_or_headers_with_flag::DataOrHeadersWithFlag;
use crate::solicit::end_stream::EndStream;
use crate::solicit::frame::DataFlag;
use crate::solicit::frame::DataFrame;
use crate::solicit::frame::Flags;
use crate::solicit::frame::GoawayFrame;
use crate::solicit::frame::HeadersFlag;
use crate::solicit::frame::HeadersMultiFrame;
use crate::solicit::frame::HttpFrame;
use crate::solicit::frame::RstStreamFrame;
use crate::solicit::frame::SettingsFrame;
use crate::solicit::stream_id::StreamId;
use crate::DataOrTrailers;
use crate::Error;
use crate::ErrorCode;
use crate::Headers;
use crate::death::channel::ErrorAwareDrop;
use crate::death::error_holder::ConnDiedType;
use crate::death::oneshot_no_content_drop::DeathAwareOneshotNoContentDropSender;
use crate::solicit_async::TryStreamBox;
pub(crate) trait ConnWriteSideCustom {
type Types: Types;
fn process_message(
&mut self,
message: <Self::Types as Types>::ToWriteMessage,
) -> crate::Result<()>;
}
impl<T, I> Conn<T, I>
where
T: Types,
Self: ConnReadSideCustom<Types = T>,
Self: ConnWriteSideCustom<Types = T>,
HttpStreamCommon<T>: HttpStreamData<Types = T>,
I: AsyncSocket,
{
fn write_part_data(&mut self, stream_id: StreamId, data: Bytes, end_stream: EndStream) {
let max_frame_size = self.peer_settings.max_frame_size as usize;
// if client requested end of stream,
// we must send at least one frame with end stream flag
if end_stream == EndStream::Yes && data.len() == 0 {
let mut frame = DataFrame::with_data(stream_id, Bytes::new());
frame.set_flag(DataFlag::EndStream);
if log_enabled!(log::Level::Trace) {
debug!("sending frame {:?}", frame);
} else {
debug!("sending frame {:?}", frame.debug_no_data());
}
self.queued_write.queue_not_goaway(frame);
return;
}
let mut pos = 0;
while pos < data.len() {
let end = cmp::min(data.len(), pos + max_frame_size);
let end_stream_in_frame = if end == data.len() && end_stream == EndStream::Yes {
EndStream::Yes
} else {
EndStream::No
};
let mut frame = DataFrame::with_data(stream_id, data.slice(pos..end));
if end_stream_in_frame == EndStream::Yes {
frame.set_flag(DataFlag::EndStream);
}
self.queued_write.queue_not_goaway(frame);
pos = end;
}
}
fn write_part_headers(&mut self, stream_id: StreamId, headers: Headers, end_stream: EndStream) {
let mut flags = Flags::new(0);
if end_stream == EndStream::Yes {
flags.set(HeadersFlag::EndStream);
}
self.queued_write.queue_not_goaway(HeadersMultiFrame {
flags,
stream_id,
headers,
stream_dep: None,
padding_len: 0,
encoder: &mut self.encoder,
max_frame_size: self.peer_settings.max_frame_size,
});
}
fn write_part_rst(&mut self, stream_id: StreamId, error_code: ErrorCode) {
let frame = RstStreamFrame::new(stream_id, error_code);
self.queued_write.queue_not_goaway(frame);
}
fn | (&mut self, stream_id: StreamId, part: HttpStreamCommand) {
match part {
HttpStreamCommand::Data(data, end_stream) => {
self.write_part_data(stream_id, data, end_stream);
}
HttpStreamCommand::Headers(headers, end_stream) => {
self.write_part_headers(stream_id, headers, end_stream);
}
HttpStreamCommand::Rst(error_code) => {
self.write_part_rst(stream_id, error_code);
}
}
}
fn has_write_buffer_capacity(&self) -> bool {
self.queued_write.queued_bytes_len() < 0x8000
}
fn pop_outg_for_stream(
&mut self,
stream_id: StreamId,
) -> Option<(StreamId, HttpStreamCommand, bool)> {
let stream = self.streams.get_mut(stream_id).unwrap();
if let (Some(command), stream) = stream.pop_outg_maybe_remove(&mut self.out_window_size) {
return Some((stream_id, command, stream.is_some()));
}
None
}
pub fn buffer_outg_conn(&mut self) -> crate::Result<bool> {
let mut updated = false;
// shortcut
if!self.has_write_buffer_capacity() {
return Ok(updated);
}
let writable_stream_ids = self.streams.writable_stream_ids();
for &stream_id in &writable_stream_ids {
loop {
if!self.has_write_buffer_capacity() {
return Ok(updated);
}
if let Some((stream_id, part, cont)) = self.pop_outg_for_stream(stream_id) {
self.write_part(stream_id, part);
updated = true;
// Stream is removed from map, need to continue to the next stream
if!cont {
break;
}
} else {
break;
}
}
}
Ok(updated)
}
pub fn send_frame_and_notify<F: Into<HttpFrame>>(&mut self, frame: F) {
// TODO: some of frames should not be in front of GOAWAY
self.queued_write.queue_not_goaway(frame.into());
}
/// Sends an SETTINGS Frame with ack set to acknowledge seeing a SETTINGS frame from the peer.
pub fn send_ack_settings(&mut self) -> crate::Result<()> {
let settings = SettingsFrame::new_ack();
self.send_frame_and_notify(settings);
Ok(())
}
fn process_stream_end(
&mut self,
stream_id: StreamId,
error_code: ErrorCode,
) -> crate::Result<()> {
let stream = self.streams.get_mut(stream_id);
if let Some(mut stream) = stream {
stream.close_outgoing(error_code);
}
Ok(())
}
fn process_stream_enqueue(
&mut self,
stream_id: StreamId,
part: DataOrHeadersWithFlag,
) -> crate::Result<()> {
let stream = self.streams.get_mut(stream_id);
if let Some(mut stream) = stream {
stream.push_back_part(part);
} else {
if let DataOrHeaders::Data(data) = part.content {
self.pump_out_window_size.increase(data.len());
}
}
Ok(())
}
fn process_stream_pull(
&mut self,
stream_id: StreamId,
stream: TryStreamBox<DataOrTrailers>,
out_window: StreamOutWindowReceiver,
) -> crate::Result<()> {
// TODO: spawn in handler
self.loop_handle.spawn(
PumpStreamToWrite::<T> {
to_write_tx: self.to_write_tx.clone(),
stream_id,
out_window,
stream,
}
.run(),
);
Ok(())
}
pub fn process_common_message(&mut self, common: CommonToWriteMessage) -> crate::Result<()> {
match common {
CommonToWriteMessage::StreamEnd(stream_id, error_code) => {
self.process_stream_end(stream_id, error_code)
}
CommonToWriteMessage::StreamEnqueue(stream_id, part) => {
self.process_stream_enqueue(stream_id, part)
}
CommonToWriteMessage::Pull(stream_id, stream, out_window_receiver) => {
self.process_stream_pull(stream_id, stream, out_window_receiver)
}
CommonToWriteMessage::IncreaseInWindow(stream_id, increase) => {
self.increase_in_window(stream_id, increase)
}
CommonToWriteMessage::DumpState(sender) => self.process_dump_state(sender),
}
}
pub fn send_goaway(&mut self, error_code: ErrorCode) -> crate::Result<()> {
debug!("requesting to send GOAWAY with code {:?}", error_code);
let frame = GoawayFrame::new(self.last_peer_stream_id, error_code);
self.queued_write.queue_goaway(frame);
Ok(())
}
pub fn poll_flush(&mut self, cx: &mut Context<'_>) -> crate::Result<()> {
self.buffer_outg_conn()?;
loop {
match self.queued_write.poll(cx) {
Poll::Pending => return Ok(()),
Poll::Ready(Err(e)) => return Err(e),
Poll::Ready(Ok(())) => {}
}
let updated = self.buffer_outg_conn()?;
if!updated {
return Ok(());
}
}
}
}
// Message sent to write loop.
// Processed while write loop is not handling network I/O.
pub(crate) enum CommonToWriteMessage {
IncreaseInWindow(StreamId, u32),
StreamEnqueue(StreamId, DataOrHeadersWithFlag),
StreamEnd(StreamId, ErrorCode), // send when user provided handler completed the stream
Pull(
StreamId,
TryStreamBox<DataOrTrailers>,
StreamOutWindowReceiver,
),
DumpState(DeathAwareOneshotNoContentDropSender<ConnStateSnapshot, ConnDiedType>),
}
impl ErrorAwareDrop for CommonToWriteMessage {
fn drop_with_error(self, error: Error) {
let _ = error;
match self {
CommonToWriteMessage::IncreaseInWindow(_, _) => {}
CommonToWriteMessage::StreamEnqueue(_, _) => {}
CommonToWriteMessage::StreamEnd(_, _) => {}
CommonToWriteMessage::Pull(_, _, _) => {}
CommonToWriteMessage::DumpState(_) => {}
}
}
}
impl fmt::Debug for CommonToWriteMessage {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("CommonToWriteMessage").field(&"...").finish()
}
}
| write_part | identifier_name |
stream.rs | use crate::{JsonRpcClient, Middleware, PinBoxFut, Provider, ProviderError};
use ethers_core::types::{Transaction, TxHash, U256};
use futures_core::stream::Stream;
use futures_core::Future;
use futures_util::stream::FuturesUnordered;
use futures_util::{stream, FutureExt, StreamExt};
use pin_project::pin_project;
use serde::{de::DeserializeOwned, Serialize};
use std::collections::VecDeque;
use std::{
fmt::Debug,
pin::Pin,
task::{Context, Poll},
time::Duration,
vec::IntoIter,
};
#[cfg(not(target_arch = "wasm32"))]
use futures_timer::Delay;
#[cfg(target_arch = "wasm32")]
use wasm_timer::Delay;
// https://github.com/tomusdrw/rust-web3/blob/befcb2fb8f3ca0a43e3081f68886fa327e64c8e6/src/api/eth_filter.rs#L20
pub fn interval(duration: Duration) -> impl Stream<Item = ()> + Send + Unpin {
stream::unfold((), move |_| Delay::new(duration).map(|_| Some(((), ())))).map(drop)
}
/// The default polling interval for filters and pending transactions
pub const DEFAULT_POLL_INTERVAL: Duration = Duration::from_millis(7000);
enum FilterWatcherState<'a, R> {
WaitForInterval,
GetFilterChanges(PinBoxFut<'a, Vec<R>>),
NextItem(IntoIter<R>),
}
#[must_use = "filters do nothing unless you stream them"]
#[pin_project]
/// Streams data from an installed filter via `eth_getFilterChanges`
pub struct FilterWatcher<'a, P, R> {
/// The filter's installed id on the ethereum node
pub id: U256,
provider: &'a Provider<P>,
// The polling interval
interval: Box<dyn Stream<Item = ()> + Send + Unpin>,
state: FilterWatcherState<'a, R>,
}
impl<'a, P, R> FilterWatcher<'a, P, R>
where
P: JsonRpcClient,
R: Send + Sync + DeserializeOwned,
{
/// Creates a new watcher with the provided factory and filter id.
pub fn new<T: Into<U256>>(id: T, provider: &'a Provider<P>) -> Self {
Self {
id: id.into(),
interval: Box::new(interval(DEFAULT_POLL_INTERVAL)),
state: FilterWatcherState::WaitForInterval,
provider,
}
}
/// Sets the stream's polling interval
pub fn interval(mut self, duration: Duration) -> Self {
self.interval = Box::new(interval(duration));
self
}
/// Alias for Box::pin, must be called in order to pin the stream and be able
/// to call `next` on it.
pub fn stream(self) -> Pin<Box<Self>> {
Box::pin(self)
}
}
// Pattern for flattening the returned Vec of filter changes taken from
// https://github.com/tomusdrw/rust-web3/blob/f043b222744580bf4be043da757ab0b300c3b2da/src/api/eth_filter.rs#L50-L67
impl<'a, P, R> Stream for FilterWatcher<'a, P, R>
where
P: JsonRpcClient,
R: Serialize + Send + Sync + DeserializeOwned + Debug + 'a,
{
type Item = R;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
let this = self.project();
let id = *this.id;
*this.state = match this.state {
FilterWatcherState::WaitForInterval => {
// Wait the polling period
let _ready = futures_util::ready!(this.interval.poll_next_unpin(cx));
// create a new instance of the future
cx.waker().wake_by_ref();
let fut = Box::pin(this.provider.get_filter_changes(id));
FilterWatcherState::GetFilterChanges(fut)
}
FilterWatcherState::GetFilterChanges(fut) => {
// NOTE: If the provider returns an error, this will return an empty
// vector. Should we make this return a Result instead? Ideally if we're
// in a streamed loop we wouldn't want the loop to terminate if an error
// is encountered (since it might be a temporary error).
let items: Vec<R> = futures_util::ready!(fut.as_mut().poll(cx)).unwrap_or_default();
cx.waker().wake_by_ref();
FilterWatcherState::NextItem(items.into_iter())
}
// Consume 1 element from the vector. If more elements are in the vector,
// the next call will immediately go to this branch instead of trying to get
// filter changes again. Once the whole vector is consumed, it will poll again
// for new logs
FilterWatcherState::NextItem(iter) => {
cx.waker().wake_by_ref();
match iter.next() {
Some(item) => return Poll::Ready(Some(item)),
None => FilterWatcherState::WaitForInterval,
}
}
};
Poll::Pending
}
}
impl<'a, P> FilterWatcher<'a, P, TxHash>
where
P: JsonRpcClient,
{
/// Returns a stream that yields the `Transaction`s for the transaction hashes this stream yields.
///
/// This internally calls `Provider::get_transaction` with every new transaction.
/// No more than n futures will be buffered at any point in time, and less than n may also be
/// buffered depending on the state of each future.
pub fn transactions_unordered(self, n: usize) -> TransactionStream<'a, P, Self> {
TransactionStream::new(self.provider, self, n)
}
}
/// Errors `TransactionStream` can throw
#[derive(Debug, thiserror::Error)]
pub enum GetTransactionError {
#[error("Failed to get transaction `{0}`: {1}")]
ProviderError(TxHash, ProviderError),
/// `get_transaction` resulted in a `None`
#[error("Transaction `{0}` not found")]
NotFound(TxHash),
}
impl From<GetTransactionError> for ProviderError {
fn from(err: GetTransactionError) -> Self {
match err {
GetTransactionError::ProviderError(_, err) => err,
err @ GetTransactionError::NotFound(_) => ProviderError::CustomError(err.to_string()),
}
}
}
type TransactionFut<'a> = Pin<Box<dyn Future<Output = TransactionResult> + 'a>>;
type TransactionResult = Result<Transaction, GetTransactionError>;
/// Drains a stream of transaction hashes and yields entire `Transaction`.
#[must_use = "streams do nothing unless polled"]
pub struct TransactionStream<'a, P, St> {
/// Currently running futures pending completion.
pending: FuturesUnordered<TransactionFut<'a>>,
/// Temporary buffered transaction that get started as soon as another future finishes.
buffered: VecDeque<TxHash>,
/// The provider that gets the transaction
provider: &'a Provider<P>,
/// A stream of transaction hashes.
stream: St,
/// max allowed futures to execute at once.
max_concurrent: usize,
}
impl<'a, P: JsonRpcClient, St> TransactionStream<'a, P, St> {
/// Create a new `TransactionStream` instance
pub fn new(provider: &'a Provider<P>, stream: St, max_concurrent: usize) -> Self {
Self {
pending: Default::default(),
buffered: Default::default(),
provider,
stream,
max_concurrent,
}
}
/// Push a future into the set
fn push_tx(&mut self, tx: TxHash) {
let fut = self
.provider
.get_transaction(tx)
.then(move |res| match res {
Ok(Some(tx)) => futures_util::future::ok(tx),
Ok(None) => futures_util::future::err(GetTransactionError::NotFound(tx)),
Err(err) => futures_util::future::err(GetTransactionError::ProviderError(tx, err)),
});
self.pending.push(Box::pin(fut));
}
}
impl<'a, P, St> Stream for TransactionStream<'a, P, St>
where
P: JsonRpcClient,
St: Stream<Item = TxHash> + Unpin + 'a,
{
type Item = TransactionResult;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
// drain buffered transactions first
while this.pending.len() < this.max_concurrent {
if let Some(tx) = this.buffered.pop_front() {
this.push_tx(tx);
} else {
break;
}
}
let mut stream_done = false;
loop {
match Stream::poll_next(Pin::new(&mut this.stream), cx) {
Poll::Ready(Some(tx)) => {
if this.pending.len() < this.max_concurrent {
this.push_tx(tx);
} else {
this.buffered.push_back(tx);
}
}
Poll::Ready(None) => {
stream_done = true;
break;
}
_ => break,
}
}
// poll running futures
if let tx @ Poll::Ready(Some(_)) = this.pending.poll_next_unpin(cx) {
return tx;
}
if stream_done && this.pending.is_empty() {
// all done
return Poll::Ready(None);
}
Poll::Pending
}
}
#[cfg(test)]
#[cfg(not(target_arch = "wasm32"))]
mod tests {
use super::*;
use crate::{Http, Ws};
use ethers_core::{
types::{TransactionReceipt, TransactionRequest},
utils::{Ganache, Geth},
};
use futures_util::{FutureExt, StreamExt};
use std::collections::HashSet;
use std::convert::TryFrom;
#[tokio::test]
async fn can_stream_pending_transactions() {
let num_txs = 5;
let geth = Geth::new().block_time(2u64).spawn();
let provider = Provider::<Http>::try_from(geth.endpoint())
.unwrap()
.interval(Duration::from_millis(1000));
let ws = Ws::connect(geth.ws_endpoint()).await.unwrap();
let ws_provider = Provider::new(ws);
let accounts = provider.get_accounts().await.unwrap();
let tx = TransactionRequest::new()
.from(accounts[0])
.to(accounts[0])
.value(1e18 as u64);
let mut sending = futures_util::future::join_all(
std::iter::repeat(tx.clone()).take(num_txs).map(|tx| async {
provider
.send_transaction(tx, None)
.await
.unwrap()
.await
.unwrap()
.unwrap()
}),
)
.fuse();
let mut watch_tx_stream = provider
.watch_pending_transactions()
.await
.unwrap()
.transactions_unordered(num_txs)
.fuse();
let mut sub_tx_stream = ws_provider
.subscribe_pending_txs()
.await
.unwrap()
.transactions_unordered(2)
.fuse();
let mut sent: Option<Vec<TransactionReceipt>> = None;
let mut watch_received: Vec<Transaction> = Vec::with_capacity(num_txs);
let mut sub_received: Vec<Transaction> = Vec::with_capacity(num_txs);
loop {
futures_util::select! {
txs = sending => {
sent = Some(txs)
},
tx = watch_tx_stream.next() => watch_received.push(tx.unwrap().unwrap()),
tx = sub_tx_stream.next() => sub_received.push(tx.unwrap().unwrap()),
}; | if let Some(ref sent) = sent {
assert_eq!(sent.len(), watch_received.len());
let sent_txs = sent
.iter()
.map(|tx| tx.transaction_hash)
.collect::<HashSet<_>>();
assert_eq!(sent_txs, watch_received.iter().map(|tx| tx.hash).collect());
assert_eq!(sent_txs, sub_received.iter().map(|tx| tx.hash).collect());
break;
}
}
}
}
#[tokio::test]
async fn can_stream_transactions() {
let ganache = Ganache::new().block_time(2u64).spawn();
let provider = Provider::<Http>::try_from(ganache.endpoint())
.unwrap()
.with_sender(ganache.addresses()[0]);
let accounts = provider.get_accounts().await.unwrap();
let tx = TransactionRequest::new()
.from(accounts[0])
.to(accounts[0])
.value(1e18 as u64);
let txs =
futures_util::future::join_all(std::iter::repeat(tx.clone()).take(3).map(|tx| async {
provider
.send_transaction(tx, None)
.await
.unwrap()
.await
.unwrap()
}))
.await;
let stream = TransactionStream::new(
&provider,
stream::iter(txs.iter().cloned().map(|tx| tx.unwrap().transaction_hash)),
10,
);
let res = stream
.collect::<Vec<_>>()
.await
.into_iter()
.collect::<Result<Vec<_>, _>>()
.unwrap();
assert_eq!(res.len(), txs.len());
assert_eq!(
res.into_iter().map(|tx| tx.hash).collect::<HashSet<_>>(),
txs.into_iter()
.map(|tx| tx.unwrap().transaction_hash)
.collect()
);
}
} | if watch_received.len() == num_txs && sub_received.len() == num_txs { | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.