file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
functional_dependencies.rs |
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! FunctionalDependencies keeps track of functional dependencies
//! inside DFSchema.
use crate::{DFSchema, DFSchemaRef, DataFusionError, JoinType, Result};
use sqlparser::ast::TableConstraint;
use std::collections::HashSet;
use std::fmt::{Display, Formatter};
/// This object defines a constraint on a table.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
enum Constraint {
/// Columns with the given indices form a composite primary key (they are
/// jointly unique and not nullable):
PrimaryKey(Vec<usize>),
/// Columns with the given indices form a composite unique key:
Unique(Vec<usize>),
}
/// This object encapsulates a list of functional constraints:
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct Constraints {
inner: Vec<Constraint>,
}
impl Constraints {
/// Create empty constraints
pub fn empty() -> Self {
Constraints::new(vec![])
}
// This method is private.
// Outside callers can either create empty constraint using `Constraints::empty` API.
// or create constraint from table constraints using `Constraints::new_from_table_constraints` API.
fn new(constraints: Vec<Constraint>) -> Self {
Self { inner: constraints }
}
/// Convert each `TableConstraint` to corresponding `Constraint`
pub fn new_from_table_constraints(
constraints: &[TableConstraint],
df_schema: &DFSchemaRef,
) -> Result<Self> {
let constraints = constraints
.iter()
.map(|c: &TableConstraint| match c {
TableConstraint::Unique {
columns,
is_primary,
..
} => {
// Get primary key and/or unique indices in the schema:
let indices = columns
.iter()
.map(|pk| {
let idx = df_schema
.fields()
.iter()
.position(|item| {
item.qualified_name() == pk.value.clone()
})
.ok_or_else(|| {
DataFusionError::Execution(
"Primary key doesn't exist".to_string(),
)
})?;
Ok(idx)
})
.collect::<Result<Vec<_>>>()?;
Ok(if *is_primary {
Constraint::PrimaryKey(indices)
} else {
Constraint::Unique(indices)
})
}
TableConstraint::ForeignKey {.. } => Err(DataFusionError::Plan(
"Foreign key constraints are not currently supported".to_string(),
)),
TableConstraint::Check {.. } => Err(DataFusionError::Plan(
"Check constraints are not currently supported".to_string(),
)),
TableConstraint::Index {.. } => Err(DataFusionError::Plan(
"Indexes are not currently supported".to_string(),
)),
TableConstraint::FulltextOrSpatial {.. } => Err(DataFusionError::Plan(
"Indexes are not currently supported".to_string(),
)),
})
.collect::<Result<Vec<_>>>()?;
Ok(Constraints::new(constraints))
}
/// Check whether constraints is empty
pub fn is_empty(&self) -> bool {
self.inner.is_empty()
}
}
impl Display for Constraints {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let pk: Vec<String> = self.inner.iter().map(|c| format!("{:?}", c)).collect();
let pk = pk.join(", ");
if!pk.is_empty() {
write!(f, " constraints=[{pk}]")
} else {
write!(f, "")
}
}
}
/// This object defines a functional dependence in the schema. A functional
/// dependence defines a relationship between determinant keys and dependent
/// columns. A determinant key is a column, or a set of columns, whose value
/// uniquely determines values of some other (dependent) columns. If two rows
/// have the same determinant key, dependent columns in these rows are
/// necessarily the same. If the determinant key is unique, the set of
/// dependent columns is equal to the entire schema and the determinant key can
/// serve as a primary key. Note that a primary key may "downgrade" into a
/// determinant key due to an operation such as a join, and this object is
/// used to track dependence relationships in such cases. For more information
/// on functional dependencies, see:
/// <https://www.scaler.com/topics/dbms/functional-dependency-in-dbms/>
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct FunctionalDependence {
// Column indices of the (possibly composite) determinant key:
pub source_indices: Vec<usize>,
// Column indices of dependent column(s): | /// this flag is `false`.
/// Note that as the schema changes between different stages in a plan,
/// such as after LEFT JOIN or RIGHT JOIN operations, this property may
/// change.
pub nullable: bool,
// The functional dependency mode:
pub mode: Dependency,
}
/// Describes functional dependency mode.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Dependency {
Single, // A determinant key may occur only once.
Multi, // A determinant key may occur multiple times (in multiple rows).
}
impl FunctionalDependence {
// Creates a new functional dependence.
pub fn new(
source_indices: Vec<usize>,
target_indices: Vec<usize>,
nullable: bool,
) -> Self {
Self {
source_indices,
target_indices,
nullable,
// Start with the least restrictive mode by default:
mode: Dependency::Multi,
}
}
pub fn with_mode(mut self, mode: Dependency) -> Self {
self.mode = mode;
self
}
}
/// This object encapsulates all functional dependencies in a given relation.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct FunctionalDependencies {
deps: Vec<FunctionalDependence>,
}
impl FunctionalDependencies {
/// Creates an empty `FunctionalDependencies` object.
pub fn empty() -> Self {
Self { deps: vec![] }
}
/// Creates a new `FunctionalDependencies` object from a vector of
/// `FunctionalDependence` objects.
pub fn new(dependencies: Vec<FunctionalDependence>) -> Self {
Self { deps: dependencies }
}
/// Creates a new `FunctionalDependencies` object from the given constraints.
pub fn new_from_constraints(
constraints: Option<&Constraints>,
n_field: usize,
) -> Self {
if let Some(Constraints { inner: constraints }) = constraints {
// Construct dependency objects based on each individual constraint:
let dependencies = constraints
.iter()
.map(|constraint| {
// All the field indices are associated with the whole table
// since we are dealing with table level constraints:
let dependency = match constraint {
Constraint::PrimaryKey(indices) => FunctionalDependence::new(
indices.to_vec(),
(0..n_field).collect::<Vec<_>>(),
false,
),
Constraint::Unique(indices) => FunctionalDependence::new(
indices.to_vec(),
(0..n_field).collect::<Vec<_>>(),
true,
),
};
// As primary keys are guaranteed to be unique, set the
// functional dependency mode to `Dependency::Single`:
dependency.with_mode(Dependency::Single)
})
.collect::<Vec<_>>();
Self::new(dependencies)
} else {
// There is no constraint, return an empty object:
Self::empty()
}
}
pub fn with_dependency(mut self, mode: Dependency) -> Self {
self.deps.iter_mut().for_each(|item| item.mode = mode);
self
}
/// Merges the given functional dependencies with these.
pub fn extend(&mut self, other: FunctionalDependencies) {
self.deps.extend(other.deps);
}
/// Adds the `offset` value to `source_indices` and `target_indices` for
/// each functional dependency.
pub fn add_offset(&mut self, offset: usize) {
self.deps.iter_mut().for_each(
|FunctionalDependence {
source_indices,
target_indices,
..
}| {
*source_indices = add_offset_to_vec(source_indices, offset);
*target_indices = add_offset_to_vec(target_indices, offset);
},
)
}
/// Updates `source_indices` and `target_indices` of each functional
/// dependence using the index mapping given in `proj_indices`.
///
/// Assume that `proj_indices` is \[2, 5, 8\] and we have a functional
/// dependence \[5\] (`source_indices`) -> \[5, 8\] (`target_indices`).
/// In the updated schema, fields at indices \[2, 5, 8\] will transform
/// to \[0, 1, 2\]. Therefore, the resulting functional dependence will
/// be \[1\] -> \[1, 2\].
pub fn project_functional_dependencies(
&self,
proj_indices: &[usize],
// The argument `n_out` denotes the schema field length, which is needed
// to correctly associate a `Single`-mode dependence with the whole table.
n_out: usize,
) -> FunctionalDependencies {
let mut projected_func_dependencies = vec![];
for FunctionalDependence {
source_indices,
target_indices,
nullable,
mode,
} in &self.deps
{
let new_source_indices =
update_elements_with_matching_indices(source_indices, proj_indices);
let new_target_indices = if *mode == Dependency::Single {
// Associate with all of the fields in the schema:
(0..n_out).collect()
} else {
// Update associations according to projection:
update_elements_with_matching_indices(target_indices, proj_indices)
};
// All of the composite indices should still be valid after projection;
// otherwise, functional dependency cannot be propagated.
if new_source_indices.len() == source_indices.len() {
let new_func_dependence = FunctionalDependence::new(
new_source_indices,
new_target_indices,
*nullable,
)
.with_mode(*mode);
projected_func_dependencies.push(new_func_dependence);
}
}
FunctionalDependencies::new(projected_func_dependencies)
}
/// This function joins this set of functional dependencies with the `other`
/// according to the given `join_type`.
pub fn join(
&self,
other: &FunctionalDependencies,
join_type: &JoinType,
left_cols_len: usize,
) -> FunctionalDependencies {
// Get mutable copies of left and right side dependencies:
let mut right_func_dependencies = other.clone();
let mut left_func_dependencies = self.clone();
match join_type {
JoinType::Inner | JoinType::Left | JoinType::Right => {
// Add offset to right schema:
right_func_dependencies.add_offset(left_cols_len);
// Result may have multiple values, update the dependency mode:
left_func_dependencies =
left_func_dependencies.with_dependency(Dependency::Multi);
right_func_dependencies =
right_func_dependencies.with_dependency(Dependency::Multi);
if *join_type == JoinType::Left {
// Downgrade the right side, since it may have additional NULL values:
right_func_dependencies.downgrade_dependencies();
} else if *join_type == JoinType::Right {
// Downgrade the left side, since it may have additional NULL values:
left_func_dependencies.downgrade_dependencies();
}
// Combine left and right functional dependencies:
left_func_dependencies.extend(right_func_dependencies);
left_func_dependencies
}
JoinType::LeftSemi | JoinType::LeftAnti => {
// These joins preserve functional dependencies of the left side:
left_func_dependencies
}
JoinType::RightSemi | JoinType::RightAnti => {
// These joins preserve functional dependencies of the right side:
right_func_dependencies
}
JoinType::Full => {
// All of the functional dependencies are lost in a FULL join:
FunctionalDependencies::empty()
}
}
}
/// This function downgrades a functional dependency when nullability becomes
/// a possibility:
/// - If the dependency in question is UNIQUE (i.e. nullable), a new null value
/// invalidates the dependency.
/// - If the dependency in question is PRIMARY KEY (i.e. not nullable), a new
/// null value turns it into UNIQUE mode.
fn downgrade_dependencies(&mut self) {
// Delete nullable dependencies, since they are no longer valid:
self.deps.retain(|item|!item.nullable);
self.deps.iter_mut().for_each(|item| item.nullable = true);
}
/// This function ensures that functional dependencies involving uniquely
/// occuring determinant keys cover their entire table in terms of
/// dependent columns.
pub fn extend_target_indices(&mut self, n_out: usize) {
self.deps.iter_mut().for_each(
|FunctionalDependence {
mode,
target_indices,
..
}| {
// If unique, cover the whole table:
if *mode == Dependency::Single {
*target_indices = (0..n_out).collect::<Vec<_>>();
}
},
)
}
}
/// Calculates functional dependencies for aggregate output, when there is a GROUP BY expression.
pub fn aggregate_functional_dependencies(
aggr_input_schema: &DFSchema,
group_by_expr_names: &[String],
aggr_schema: &DFSchema,
) -> FunctionalDependencies {
let mut aggregate_func_dependencies = vec![];
let aggr_input_fields = aggr_input_schema.fields();
let aggr_fields = aggr_schema.fields();
// Association covers the whole table:
let target_indices = (0..aggr_schema.fields().len()).collect::<Vec<_>>();
// Get functional dependencies of the schema:
let func_dependencies = aggr_input_schema.functional_dependencies();
for FunctionalDependence {
source_indices,
nullable,
mode,
..
} in &func_dependencies.deps
{
// Keep source indices in a `HashSet` to prevent duplicate entries:
let mut new_source_indices = HashSet::new();
let source_field_names = source_indices
.iter()
.map(|&idx| aggr_input_fields[idx].qualified_name())
.collect::<Vec<_>>();
for (idx, group_by_expr_name) in group_by_expr_names.iter().enumerate() {
// When one of the input determinant expressions matches with
// the GROUP BY expression, add the index of the GROUP BY
// expression as a new determinant key:
if source_field_names.contains(group_by_expr_name) {
new_source_indices.insert(idx);
}
}
// All of the composite indices occur in the GROUP BY expression:
if new_source_indices.len() == source_indices.len() {
aggregate_func_dependencies.push(
FunctionalDependence::new(
new_source_indices.into_iter().collect(),
target_indices.clone(),
*nullable,
)
// input uniqueness stays the same when GROUP BY matches with input functional dependence determinants
.with_mode(*mode),
);
}
}
// If we have a single GROUP BY key, we can guarantee uniqueness after
// aggregation:
if group_by_expr_names.len() == 1 {
// If `source_indices` contain 0, delete this functional dependency
// as it will be added anyway with mode `Dependency::Single`:
if let Some(idx) = aggregate_func_dependencies
.iter()
.position(|item| item.source_indices.contains(&0))
{
// Delete the functional dependency that contains zeroth idx:
aggregate_func_dependencies.remove(idx);
}
// Add a new functional dependency associated with the whole table:
aggregate_func_dependencies.push(
// Use nullable property of the group by expression
FunctionalDependence::new(
vec![0],
target_indices,
aggr_fields[0].is_nullable(),
)
.with_mode(Dependency::Single),
);
}
FunctionalDependencies::new(aggregate_func_dependencies)
}
/// Returns target indices, for the determinant keys that are inside
/// group by expressions.
pub fn get_target_functional_dependencies(
schema: &DFSchema,
group_by_expr_names: &[String],
) -> Option<Vec<usize>> {
let mut combined_target_indices = HashSet::new();
let dependencies = schema.functional_dependencies();
let field_names = schema
.fields()
.iter()
.map(|item| item.qualified_name())
.collect::<Vec<_>>();
for FunctionalDependence {
source_indices,
target_indices,
..
} in &dependencies.deps
{
let source_key_names = source_indices
.iter()
.map(|id_key_idx| field_names[*id_key_idx].clone())
.collect::<Vec<_>>();
// If the GROUP BY expression contains a determinant key, we can use
// the associated fields after aggregation even if they are not part
// of the GROUP BY expression.
if source_key_names
.iter()
.all(|source_key_name| group_by_expr_names.contains(source_key_name))
{
combined_target_indices.extend(target_indices.iter());
}
}
| pub target_indices: Vec<usize>,
/// Flag indicating whether one of the `source_indices` can receive NULL values.
/// For a data source, if the constraint in question is `Constraint::Unique`,
/// this flag is `true`. If the constraint in question is `Constraint::PrimaryKey`, | random_line_split |
functional_dependencies.rs |
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! FunctionalDependencies keeps track of functional dependencies
//! inside DFSchema.
use crate::{DFSchema, DFSchemaRef, DataFusionError, JoinType, Result};
use sqlparser::ast::TableConstraint;
use std::collections::HashSet;
use std::fmt::{Display, Formatter};
/// This object defines a constraint on a table.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
enum Constraint {
/// Columns with the given indices form a composite primary key (they are
/// jointly unique and not nullable):
PrimaryKey(Vec<usize>),
/// Columns with the given indices form a composite unique key:
Unique(Vec<usize>),
}
/// This object encapsulates a list of functional constraints:
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct Constraints {
inner: Vec<Constraint>,
}
impl Constraints {
/// Create empty constraints
pub fn empty() -> Self {
Constraints::new(vec![])
}
// This method is private.
// Outside callers can either create empty constraint using `Constraints::empty` API.
// or create constraint from table constraints using `Constraints::new_from_table_constraints` API.
fn new(constraints: Vec<Constraint>) -> Self {
Self { inner: constraints }
}
/// Convert each `TableConstraint` to corresponding `Constraint`
pub fn new_from_table_constraints(
constraints: &[TableConstraint],
df_schema: &DFSchemaRef,
) -> Result<Self> | DataFusionError::Execution(
"Primary key doesn't exist".to_string(),
)
})?;
Ok(idx)
})
.collect::<Result<Vec<_>>>()?;
Ok(if *is_primary {
Constraint::PrimaryKey(indices)
} else {
Constraint::Unique(indices)
})
}
TableConstraint::ForeignKey {.. } => Err(DataFusionError::Plan(
"Foreign key constraints are not currently supported".to_string(),
)),
TableConstraint::Check {.. } => Err(DataFusionError::Plan(
"Check constraints are not currently supported".to_string(),
)),
TableConstraint::Index {.. } => Err(DataFusionError::Plan(
"Indexes are not currently supported".to_string(),
)),
TableConstraint::FulltextOrSpatial {.. } => Err(DataFusionError::Plan(
"Indexes are not currently supported".to_string(),
)),
})
.collect::<Result<Vec<_>>>()?;
Ok(Constraints::new(constraints))
}
/// Check whether constraints is empty
pub fn is_empty(&self) -> bool {
self.inner.is_empty()
}
}
impl Display for Constraints {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let pk: Vec<String> = self.inner.iter().map(|c| format!("{:?}", c)).collect();
let pk = pk.join(", ");
if!pk.is_empty() {
write!(f, " constraints=[{pk}]")
} else {
write!(f, "")
}
}
}
/// This object defines a functional dependence in the schema. A functional
/// dependence defines a relationship between determinant keys and dependent
/// columns. A determinant key is a column, or a set of columns, whose value
/// uniquely determines values of some other (dependent) columns. If two rows
/// have the same determinant key, dependent columns in these rows are
/// necessarily the same. If the determinant key is unique, the set of
/// dependent columns is equal to the entire schema and the determinant key can
/// serve as a primary key. Note that a primary key may "downgrade" into a
/// determinant key due to an operation such as a join, and this object is
/// used to track dependence relationships in such cases. For more information
/// on functional dependencies, see:
/// <https://www.scaler.com/topics/dbms/functional-dependency-in-dbms/>
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct FunctionalDependence {
// Column indices of the (possibly composite) determinant key:
pub source_indices: Vec<usize>,
// Column indices of dependent column(s):
pub target_indices: Vec<usize>,
/// Flag indicating whether one of the `source_indices` can receive NULL values.
/// For a data source, if the constraint in question is `Constraint::Unique`,
/// this flag is `true`. If the constraint in question is `Constraint::PrimaryKey`,
/// this flag is `false`.
/// Note that as the schema changes between different stages in a plan,
/// such as after LEFT JOIN or RIGHT JOIN operations, this property may
/// change.
pub nullable: bool,
// The functional dependency mode:
pub mode: Dependency,
}
/// Describes functional dependency mode.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Dependency {
Single, // A determinant key may occur only once.
Multi, // A determinant key may occur multiple times (in multiple rows).
}
impl FunctionalDependence {
// Creates a new functional dependence.
pub fn new(
source_indices: Vec<usize>,
target_indices: Vec<usize>,
nullable: bool,
) -> Self {
Self {
source_indices,
target_indices,
nullable,
// Start with the least restrictive mode by default:
mode: Dependency::Multi,
}
}
pub fn with_mode(mut self, mode: Dependency) -> Self {
self.mode = mode;
self
}
}
/// This object encapsulates all functional dependencies in a given relation.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct FunctionalDependencies {
deps: Vec<FunctionalDependence>,
}
impl FunctionalDependencies {
/// Creates an empty `FunctionalDependencies` object.
pub fn empty() -> Self {
Self { deps: vec![] }
}
/// Creates a new `FunctionalDependencies` object from a vector of
/// `FunctionalDependence` objects.
pub fn new(dependencies: Vec<FunctionalDependence>) -> Self {
Self { deps: dependencies }
}
/// Creates a new `FunctionalDependencies` object from the given constraints.
pub fn new_from_constraints(
constraints: Option<&Constraints>,
n_field: usize,
) -> Self {
if let Some(Constraints { inner: constraints }) = constraints {
// Construct dependency objects based on each individual constraint:
let dependencies = constraints
.iter()
.map(|constraint| {
// All the field indices are associated with the whole table
// since we are dealing with table level constraints:
let dependency = match constraint {
Constraint::PrimaryKey(indices) => FunctionalDependence::new(
indices.to_vec(),
(0..n_field).collect::<Vec<_>>(),
false,
),
Constraint::Unique(indices) => FunctionalDependence::new(
indices.to_vec(),
(0..n_field).collect::<Vec<_>>(),
true,
),
};
// As primary keys are guaranteed to be unique, set the
// functional dependency mode to `Dependency::Single`:
dependency.with_mode(Dependency::Single)
})
.collect::<Vec<_>>();
Self::new(dependencies)
} else {
// There is no constraint, return an empty object:
Self::empty()
}
}
pub fn with_dependency(mut self, mode: Dependency) -> Self {
self.deps.iter_mut().for_each(|item| item.mode = mode);
self
}
/// Merges the given functional dependencies with these.
pub fn extend(&mut self, other: FunctionalDependencies) {
self.deps.extend(other.deps);
}
/// Adds the `offset` value to `source_indices` and `target_indices` for
/// each functional dependency.
pub fn add_offset(&mut self, offset: usize) {
self.deps.iter_mut().for_each(
|FunctionalDependence {
source_indices,
target_indices,
..
}| {
*source_indices = add_offset_to_vec(source_indices, offset);
*target_indices = add_offset_to_vec(target_indices, offset);
},
)
}
/// Updates `source_indices` and `target_indices` of each functional
/// dependence using the index mapping given in `proj_indices`.
///
/// Assume that `proj_indices` is \[2, 5, 8\] and we have a functional
/// dependence \[5\] (`source_indices`) -> \[5, 8\] (`target_indices`).
/// In the updated schema, fields at indices \[2, 5, 8\] will transform
/// to \[0, 1, 2\]. Therefore, the resulting functional dependence will
/// be \[1\] -> \[1, 2\].
pub fn project_functional_dependencies(
&self,
proj_indices: &[usize],
// The argument `n_out` denotes the schema field length, which is needed
// to correctly associate a `Single`-mode dependence with the whole table.
n_out: usize,
) -> FunctionalDependencies {
let mut projected_func_dependencies = vec![];
for FunctionalDependence {
source_indices,
target_indices,
nullable,
mode,
} in &self.deps
{
let new_source_indices =
update_elements_with_matching_indices(source_indices, proj_indices);
let new_target_indices = if *mode == Dependency::Single {
// Associate with all of the fields in the schema:
(0..n_out).collect()
} else {
// Update associations according to projection:
update_elements_with_matching_indices(target_indices, proj_indices)
};
// All of the composite indices should still be valid after projection;
// otherwise, functional dependency cannot be propagated.
if new_source_indices.len() == source_indices.len() {
let new_func_dependence = FunctionalDependence::new(
new_source_indices,
new_target_indices,
*nullable,
)
.with_mode(*mode);
projected_func_dependencies.push(new_func_dependence);
}
}
FunctionalDependencies::new(projected_func_dependencies)
}
/// This function joins this set of functional dependencies with the `other`
/// according to the given `join_type`.
pub fn join(
&self,
other: &FunctionalDependencies,
join_type: &JoinType,
left_cols_len: usize,
) -> FunctionalDependencies {
// Get mutable copies of left and right side dependencies:
let mut right_func_dependencies = other.clone();
let mut left_func_dependencies = self.clone();
match join_type {
JoinType::Inner | JoinType::Left | JoinType::Right => {
// Add offset to right schema:
right_func_dependencies.add_offset(left_cols_len);
// Result may have multiple values, update the dependency mode:
left_func_dependencies =
left_func_dependencies.with_dependency(Dependency::Multi);
right_func_dependencies =
right_func_dependencies.with_dependency(Dependency::Multi);
if *join_type == JoinType::Left {
// Downgrade the right side, since it may have additional NULL values:
right_func_dependencies.downgrade_dependencies();
} else if *join_type == JoinType::Right {
// Downgrade the left side, since it may have additional NULL values:
left_func_dependencies.downgrade_dependencies();
}
// Combine left and right functional dependencies:
left_func_dependencies.extend(right_func_dependencies);
left_func_dependencies
}
JoinType::LeftSemi | JoinType::LeftAnti => {
// These joins preserve functional dependencies of the left side:
left_func_dependencies
}
JoinType::RightSemi | JoinType::RightAnti => {
// These joins preserve functional dependencies of the right side:
right_func_dependencies
}
JoinType::Full => {
// All of the functional dependencies are lost in a FULL join:
FunctionalDependencies::empty()
}
}
}
/// This function downgrades a functional dependency when nullability becomes
/// a possibility:
/// - If the dependency in question is UNIQUE (i.e. nullable), a new null value
/// invalidates the dependency.
/// - If the dependency in question is PRIMARY KEY (i.e. not nullable), a new
/// null value turns it into UNIQUE mode.
fn downgrade_dependencies(&mut self) {
// Delete nullable dependencies, since they are no longer valid:
self.deps.retain(|item|!item.nullable);
self.deps.iter_mut().for_each(|item| item.nullable = true);
}
/// This function ensures that functional dependencies involving uniquely
/// occuring determinant keys cover their entire table in terms of
/// dependent columns.
pub fn extend_target_indices(&mut self, n_out: usize) {
self.deps.iter_mut().for_each(
|FunctionalDependence {
mode,
target_indices,
..
}| {
// If unique, cover the whole table:
if *mode == Dependency::Single {
*target_indices = (0..n_out).collect::<Vec<_>>();
}
},
)
}
}
/// Calculates functional dependencies for aggregate output, when there is a GROUP BY expression.
pub fn aggregate_functional_dependencies(
aggr_input_schema: &DFSchema,
group_by_expr_names: &[String],
aggr_schema: &DFSchema,
) -> FunctionalDependencies {
let mut aggregate_func_dependencies = vec![];
let aggr_input_fields = aggr_input_schema.fields();
let aggr_fields = aggr_schema.fields();
// Association covers the whole table:
let target_indices = (0..aggr_schema.fields().len()).collect::<Vec<_>>();
// Get functional dependencies of the schema:
let func_dependencies = aggr_input_schema.functional_dependencies();
for FunctionalDependence {
source_indices,
nullable,
mode,
..
} in &func_dependencies.deps
{
// Keep source indices in a `HashSet` to prevent duplicate entries:
let mut new_source_indices = HashSet::new();
let source_field_names = source_indices
.iter()
.map(|&idx| aggr_input_fields[idx].qualified_name())
.collect::<Vec<_>>();
for (idx, group_by_expr_name) in group_by_expr_names.iter().enumerate() {
// When one of the input determinant expressions matches with
// the GROUP BY expression, add the index of the GROUP BY
// expression as a new determinant key:
if source_field_names.contains(group_by_expr_name) {
new_source_indices.insert(idx);
}
}
// All of the composite indices occur in the GROUP BY expression:
if new_source_indices.len() == source_indices.len() {
aggregate_func_dependencies.push(
FunctionalDependence::new(
new_source_indices.into_iter().collect(),
target_indices.clone(),
*nullable,
)
// input uniqueness stays the same when GROUP BY matches with input functional dependence determinants
.with_mode(*mode),
);
}
}
// If we have a single GROUP BY key, we can guarantee uniqueness after
// aggregation:
if group_by_expr_names.len() == 1 {
// If `source_indices` contain 0, delete this functional dependency
// as it will be added anyway with mode `Dependency::Single`:
if let Some(idx) = aggregate_func_dependencies
.iter()
.position(|item| item.source_indices.contains(&0))
{
// Delete the functional dependency that contains zeroth idx:
aggregate_func_dependencies.remove(idx);
}
// Add a new functional dependency associated with the whole table:
aggregate_func_dependencies.push(
// Use nullable property of the group by expression
FunctionalDependence::new(
vec![0],
target_indices,
aggr_fields[0].is_nullable(),
)
.with_mode(Dependency::Single),
);
}
FunctionalDependencies::new(aggregate_func_dependencies)
}
/// Returns target indices, for the determinant keys that are inside
/// group by expressions.
pub fn get_target_functional_dependencies(
schema: &DFSchema,
group_by_expr_names: &[String],
) -> Option<Vec<usize>> {
let mut combined_target_indices = HashSet::new();
let dependencies = schema.functional_dependencies();
let field_names = schema
.fields()
.iter()
.map(|item| item.qualified_name())
.collect::<Vec<_>>();
for FunctionalDependence {
source_indices,
target_indices,
..
} in &dependencies.deps
{
let source_key_names = source_indices
.iter()
.map(|id_key_idx| field_names[*id_key_idx].clone())
.collect::<Vec<_>>();
// If the GROUP BY expression contains a determinant key, we can use
// the associated fields after aggregation even if they are not part
// of the GROUP BY expression.
if source_key_names
.iter()
.all(|source_key_name| group_by_expr_names.contains(source_key_name))
{
combined_target_indices.extend(target_indices.iter());
}
}
| {
let constraints = constraints
.iter()
.map(|c: &TableConstraint| match c {
TableConstraint::Unique {
columns,
is_primary,
..
} => {
// Get primary key and/or unique indices in the schema:
let indices = columns
.iter()
.map(|pk| {
let idx = df_schema
.fields()
.iter()
.position(|item| {
item.qualified_name() == pk.value.clone()
})
.ok_or_else(|| { | identifier_body |
functional_dependencies.rs |
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! FunctionalDependencies keeps track of functional dependencies
//! inside DFSchema.
use crate::{DFSchema, DFSchemaRef, DataFusionError, JoinType, Result};
use sqlparser::ast::TableConstraint;
use std::collections::HashSet;
use std::fmt::{Display, Formatter};
/// This object defines a constraint on a table.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
enum Constraint {
/// Columns with the given indices form a composite primary key (they are
/// jointly unique and not nullable):
PrimaryKey(Vec<usize>),
/// Columns with the given indices form a composite unique key:
Unique(Vec<usize>),
}
/// This object encapsulates a list of functional constraints:
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct Constraints {
inner: Vec<Constraint>,
}
impl Constraints {
/// Create empty constraints
pub fn empty() -> Self {
Constraints::new(vec![])
}
// This method is private.
// Outside callers can either create empty constraint using `Constraints::empty` API.
// or create constraint from table constraints using `Constraints::new_from_table_constraints` API.
fn new(constraints: Vec<Constraint>) -> Self {
Self { inner: constraints }
}
/// Convert each `TableConstraint` to corresponding `Constraint`
pub fn new_from_table_constraints(
constraints: &[TableConstraint],
df_schema: &DFSchemaRef,
) -> Result<Self> {
let constraints = constraints
.iter()
.map(|c: &TableConstraint| match c {
TableConstraint::Unique {
columns,
is_primary,
..
} => {
// Get primary key and/or unique indices in the schema:
let indices = columns
.iter()
.map(|pk| {
let idx = df_schema
.fields()
.iter()
.position(|item| {
item.qualified_name() == pk.value.clone()
})
.ok_or_else(|| {
DataFusionError::Execution(
"Primary key doesn't exist".to_string(),
)
})?;
Ok(idx)
})
.collect::<Result<Vec<_>>>()?;
Ok(if *is_primary {
Constraint::PrimaryKey(indices)
} else {
Constraint::Unique(indices)
})
}
TableConstraint::ForeignKey {.. } => Err(DataFusionError::Plan(
"Foreign key constraints are not currently supported".to_string(),
)),
TableConstraint::Check {.. } => Err(DataFusionError::Plan(
"Check constraints are not currently supported".to_string(),
)),
TableConstraint::Index {.. } => Err(DataFusionError::Plan(
"Indexes are not currently supported".to_string(),
)),
TableConstraint::FulltextOrSpatial {.. } => Err(DataFusionError::Plan(
"Indexes are not currently supported".to_string(),
)),
})
.collect::<Result<Vec<_>>>()?;
Ok(Constraints::new(constraints))
}
/// Check whether constraints is empty
pub fn is_empty(&self) -> bool {
self.inner.is_empty()
}
}
impl Display for Constraints {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let pk: Vec<String> = self.inner.iter().map(|c| format!("{:?}", c)).collect();
let pk = pk.join(", ");
if!pk.is_empty() {
write!(f, " constraints=[{pk}]")
} else {
write!(f, "")
}
}
}
/// This object defines a functional dependence in the schema. A functional
/// dependence defines a relationship between determinant keys and dependent
/// columns. A determinant key is a column, or a set of columns, whose value
/// uniquely determines values of some other (dependent) columns. If two rows
/// have the same determinant key, dependent columns in these rows are
/// necessarily the same. If the determinant key is unique, the set of
/// dependent columns is equal to the entire schema and the determinant key can
/// serve as a primary key. Note that a primary key may "downgrade" into a
/// determinant key due to an operation such as a join, and this object is
/// used to track dependence relationships in such cases. For more information
/// on functional dependencies, see:
/// <https://www.scaler.com/topics/dbms/functional-dependency-in-dbms/>
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct FunctionalDependence {
// Column indices of the (possibly composite) determinant key:
pub source_indices: Vec<usize>,
// Column indices of dependent column(s):
pub target_indices: Vec<usize>,
/// Flag indicating whether one of the `source_indices` can receive NULL values.
/// For a data source, if the constraint in question is `Constraint::Unique`,
/// this flag is `true`. If the constraint in question is `Constraint::PrimaryKey`,
/// this flag is `false`.
/// Note that as the schema changes between different stages in a plan,
/// such as after LEFT JOIN or RIGHT JOIN operations, this property may
/// change.
pub nullable: bool,
// The functional dependency mode:
pub mode: Dependency,
}
/// Describes functional dependency mode.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Dependency {
Single, // A determinant key may occur only once.
Multi, // A determinant key may occur multiple times (in multiple rows).
}
impl FunctionalDependence {
// Creates a new functional dependence.
pub fn new(
source_indices: Vec<usize>,
target_indices: Vec<usize>,
nullable: bool,
) -> Self {
Self {
source_indices,
target_indices,
nullable,
// Start with the least restrictive mode by default:
mode: Dependency::Multi,
}
}
pub fn with_mode(mut self, mode: Dependency) -> Self {
self.mode = mode;
self
}
}
/// This object encapsulates all functional dependencies in a given relation.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct FunctionalDependencies {
deps: Vec<FunctionalDependence>,
}
impl FunctionalDependencies {
/// Creates an empty `FunctionalDependencies` object.
pub fn empty() -> Self {
Self { deps: vec![] }
}
/// Creates a new `FunctionalDependencies` object from a vector of
/// `FunctionalDependence` objects.
pub fn | (dependencies: Vec<FunctionalDependence>) -> Self {
Self { deps: dependencies }
}
/// Creates a new `FunctionalDependencies` object from the given constraints.
pub fn new_from_constraints(
constraints: Option<&Constraints>,
n_field: usize,
) -> Self {
if let Some(Constraints { inner: constraints }) = constraints {
// Construct dependency objects based on each individual constraint:
let dependencies = constraints
.iter()
.map(|constraint| {
// All the field indices are associated with the whole table
// since we are dealing with table level constraints:
let dependency = match constraint {
Constraint::PrimaryKey(indices) => FunctionalDependence::new(
indices.to_vec(),
(0..n_field).collect::<Vec<_>>(),
false,
),
Constraint::Unique(indices) => FunctionalDependence::new(
indices.to_vec(),
(0..n_field).collect::<Vec<_>>(),
true,
),
};
// As primary keys are guaranteed to be unique, set the
// functional dependency mode to `Dependency::Single`:
dependency.with_mode(Dependency::Single)
})
.collect::<Vec<_>>();
Self::new(dependencies)
} else {
// There is no constraint, return an empty object:
Self::empty()
}
}
pub fn with_dependency(mut self, mode: Dependency) -> Self {
self.deps.iter_mut().for_each(|item| item.mode = mode);
self
}
/// Merges the given functional dependencies with these.
pub fn extend(&mut self, other: FunctionalDependencies) {
self.deps.extend(other.deps);
}
/// Adds the `offset` value to `source_indices` and `target_indices` for
/// each functional dependency.
pub fn add_offset(&mut self, offset: usize) {
self.deps.iter_mut().for_each(
|FunctionalDependence {
source_indices,
target_indices,
..
}| {
*source_indices = add_offset_to_vec(source_indices, offset);
*target_indices = add_offset_to_vec(target_indices, offset);
},
)
}
/// Updates `source_indices` and `target_indices` of each functional
/// dependence using the index mapping given in `proj_indices`.
///
/// Assume that `proj_indices` is \[2, 5, 8\] and we have a functional
/// dependence \[5\] (`source_indices`) -> \[5, 8\] (`target_indices`).
/// In the updated schema, fields at indices \[2, 5, 8\] will transform
/// to \[0, 1, 2\]. Therefore, the resulting functional dependence will
/// be \[1\] -> \[1, 2\].
pub fn project_functional_dependencies(
&self,
proj_indices: &[usize],
// The argument `n_out` denotes the schema field length, which is needed
// to correctly associate a `Single`-mode dependence with the whole table.
n_out: usize,
) -> FunctionalDependencies {
let mut projected_func_dependencies = vec![];
for FunctionalDependence {
source_indices,
target_indices,
nullable,
mode,
} in &self.deps
{
let new_source_indices =
update_elements_with_matching_indices(source_indices, proj_indices);
let new_target_indices = if *mode == Dependency::Single {
// Associate with all of the fields in the schema:
(0..n_out).collect()
} else {
// Update associations according to projection:
update_elements_with_matching_indices(target_indices, proj_indices)
};
// All of the composite indices should still be valid after projection;
// otherwise, functional dependency cannot be propagated.
if new_source_indices.len() == source_indices.len() {
let new_func_dependence = FunctionalDependence::new(
new_source_indices,
new_target_indices,
*nullable,
)
.with_mode(*mode);
projected_func_dependencies.push(new_func_dependence);
}
}
FunctionalDependencies::new(projected_func_dependencies)
}
/// This function joins this set of functional dependencies with the `other`
/// according to the given `join_type`.
pub fn join(
&self,
other: &FunctionalDependencies,
join_type: &JoinType,
left_cols_len: usize,
) -> FunctionalDependencies {
// Get mutable copies of left and right side dependencies:
let mut right_func_dependencies = other.clone();
let mut left_func_dependencies = self.clone();
match join_type {
JoinType::Inner | JoinType::Left | JoinType::Right => {
// Add offset to right schema:
right_func_dependencies.add_offset(left_cols_len);
// Result may have multiple values, update the dependency mode:
left_func_dependencies =
left_func_dependencies.with_dependency(Dependency::Multi);
right_func_dependencies =
right_func_dependencies.with_dependency(Dependency::Multi);
if *join_type == JoinType::Left {
// Downgrade the right side, since it may have additional NULL values:
right_func_dependencies.downgrade_dependencies();
} else if *join_type == JoinType::Right {
// Downgrade the left side, since it may have additional NULL values:
left_func_dependencies.downgrade_dependencies();
}
// Combine left and right functional dependencies:
left_func_dependencies.extend(right_func_dependencies);
left_func_dependencies
}
JoinType::LeftSemi | JoinType::LeftAnti => {
// These joins preserve functional dependencies of the left side:
left_func_dependencies
}
JoinType::RightSemi | JoinType::RightAnti => {
// These joins preserve functional dependencies of the right side:
right_func_dependencies
}
JoinType::Full => {
// All of the functional dependencies are lost in a FULL join:
FunctionalDependencies::empty()
}
}
}
/// This function downgrades a functional dependency when nullability becomes
/// a possibility:
/// - If the dependency in question is UNIQUE (i.e. nullable), a new null value
/// invalidates the dependency.
/// - If the dependency in question is PRIMARY KEY (i.e. not nullable), a new
/// null value turns it into UNIQUE mode.
fn downgrade_dependencies(&mut self) {
// Delete nullable dependencies, since they are no longer valid:
self.deps.retain(|item|!item.nullable);
self.deps.iter_mut().for_each(|item| item.nullable = true);
}
/// This function ensures that functional dependencies involving uniquely
/// occuring determinant keys cover their entire table in terms of
/// dependent columns.
pub fn extend_target_indices(&mut self, n_out: usize) {
self.deps.iter_mut().for_each(
|FunctionalDependence {
mode,
target_indices,
..
}| {
// If unique, cover the whole table:
if *mode == Dependency::Single {
*target_indices = (0..n_out).collect::<Vec<_>>();
}
},
)
}
}
/// Calculates functional dependencies for aggregate output, when there is a GROUP BY expression.
pub fn aggregate_functional_dependencies(
aggr_input_schema: &DFSchema,
group_by_expr_names: &[String],
aggr_schema: &DFSchema,
) -> FunctionalDependencies {
let mut aggregate_func_dependencies = vec![];
let aggr_input_fields = aggr_input_schema.fields();
let aggr_fields = aggr_schema.fields();
// Association covers the whole table:
let target_indices = (0..aggr_schema.fields().len()).collect::<Vec<_>>();
// Get functional dependencies of the schema:
let func_dependencies = aggr_input_schema.functional_dependencies();
for FunctionalDependence {
source_indices,
nullable,
mode,
..
} in &func_dependencies.deps
{
// Keep source indices in a `HashSet` to prevent duplicate entries:
let mut new_source_indices = HashSet::new();
let source_field_names = source_indices
.iter()
.map(|&idx| aggr_input_fields[idx].qualified_name())
.collect::<Vec<_>>();
for (idx, group_by_expr_name) in group_by_expr_names.iter().enumerate() {
// When one of the input determinant expressions matches with
// the GROUP BY expression, add the index of the GROUP BY
// expression as a new determinant key:
if source_field_names.contains(group_by_expr_name) {
new_source_indices.insert(idx);
}
}
// All of the composite indices occur in the GROUP BY expression:
if new_source_indices.len() == source_indices.len() {
aggregate_func_dependencies.push(
FunctionalDependence::new(
new_source_indices.into_iter().collect(),
target_indices.clone(),
*nullable,
)
// input uniqueness stays the same when GROUP BY matches with input functional dependence determinants
.with_mode(*mode),
);
}
}
// If we have a single GROUP BY key, we can guarantee uniqueness after
// aggregation:
if group_by_expr_names.len() == 1 {
// If `source_indices` contain 0, delete this functional dependency
// as it will be added anyway with mode `Dependency::Single`:
if let Some(idx) = aggregate_func_dependencies
.iter()
.position(|item| item.source_indices.contains(&0))
{
// Delete the functional dependency that contains zeroth idx:
aggregate_func_dependencies.remove(idx);
}
// Add a new functional dependency associated with the whole table:
aggregate_func_dependencies.push(
// Use nullable property of the group by expression
FunctionalDependence::new(
vec![0],
target_indices,
aggr_fields[0].is_nullable(),
)
.with_mode(Dependency::Single),
);
}
FunctionalDependencies::new(aggregate_func_dependencies)
}
/// Returns target indices, for the determinant keys that are inside
/// group by expressions.
pub fn get_target_functional_dependencies(
schema: &DFSchema,
group_by_expr_names: &[String],
) -> Option<Vec<usize>> {
let mut combined_target_indices = HashSet::new();
let dependencies = schema.functional_dependencies();
let field_names = schema
.fields()
.iter()
.map(|item| item.qualified_name())
.collect::<Vec<_>>();
for FunctionalDependence {
source_indices,
target_indices,
..
} in &dependencies.deps
{
let source_key_names = source_indices
.iter()
.map(|id_key_idx| field_names[*id_key_idx].clone())
.collect::<Vec<_>>();
// If the GROUP BY expression contains a determinant key, we can use
// the associated fields after aggregation even if they are not part
// of the GROUP BY expression.
if source_key_names
.iter()
.all(|source_key_name| group_by_expr_names.contains(source_key_name))
{
combined_target_indices.extend(target_indices.iter());
}
}
| new | identifier_name |
functional_dependencies.rs |
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! FunctionalDependencies keeps track of functional dependencies
//! inside DFSchema.
use crate::{DFSchema, DFSchemaRef, DataFusionError, JoinType, Result};
use sqlparser::ast::TableConstraint;
use std::collections::HashSet;
use std::fmt::{Display, Formatter};
/// This object defines a constraint on a table.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
enum Constraint {
/// Columns with the given indices form a composite primary key (they are
/// jointly unique and not nullable):
PrimaryKey(Vec<usize>),
/// Columns with the given indices form a composite unique key:
Unique(Vec<usize>),
}
/// This object encapsulates a list of functional constraints:
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub struct Constraints {
inner: Vec<Constraint>,
}
impl Constraints {
/// Create empty constraints
pub fn empty() -> Self {
Constraints::new(vec![])
}
// This method is private.
// Outside callers can either create empty constraint using `Constraints::empty` API.
// or create constraint from table constraints using `Constraints::new_from_table_constraints` API.
fn new(constraints: Vec<Constraint>) -> Self {
Self { inner: constraints }
}
/// Convert each `TableConstraint` to corresponding `Constraint`
pub fn new_from_table_constraints(
constraints: &[TableConstraint],
df_schema: &DFSchemaRef,
) -> Result<Self> {
let constraints = constraints
.iter()
.map(|c: &TableConstraint| match c {
TableConstraint::Unique {
columns,
is_primary,
..
} => {
// Get primary key and/or unique indices in the schema:
let indices = columns
.iter()
.map(|pk| {
let idx = df_schema
.fields()
.iter()
.position(|item| {
item.qualified_name() == pk.value.clone()
})
.ok_or_else(|| {
DataFusionError::Execution(
"Primary key doesn't exist".to_string(),
)
})?;
Ok(idx)
})
.collect::<Result<Vec<_>>>()?;
Ok(if *is_primary | else {
Constraint::Unique(indices)
})
}
TableConstraint::ForeignKey {.. } => Err(DataFusionError::Plan(
"Foreign key constraints are not currently supported".to_string(),
)),
TableConstraint::Check {.. } => Err(DataFusionError::Plan(
"Check constraints are not currently supported".to_string(),
)),
TableConstraint::Index {.. } => Err(DataFusionError::Plan(
"Indexes are not currently supported".to_string(),
)),
TableConstraint::FulltextOrSpatial {.. } => Err(DataFusionError::Plan(
"Indexes are not currently supported".to_string(),
)),
})
.collect::<Result<Vec<_>>>()?;
Ok(Constraints::new(constraints))
}
/// Check whether constraints is empty
pub fn is_empty(&self) -> bool {
self.inner.is_empty()
}
}
impl Display for Constraints {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let pk: Vec<String> = self.inner.iter().map(|c| format!("{:?}", c)).collect();
let pk = pk.join(", ");
if!pk.is_empty() {
write!(f, " constraints=[{pk}]")
} else {
write!(f, "")
}
}
}
/// This object defines a functional dependence in the schema. A functional
/// dependence defines a relationship between determinant keys and dependent
/// columns. A determinant key is a column, or a set of columns, whose value
/// uniquely determines values of some other (dependent) columns. If two rows
/// have the same determinant key, dependent columns in these rows are
/// necessarily the same. If the determinant key is unique, the set of
/// dependent columns is equal to the entire schema and the determinant key can
/// serve as a primary key. Note that a primary key may "downgrade" into a
/// determinant key due to an operation such as a join, and this object is
/// used to track dependence relationships in such cases. For more information
/// on functional dependencies, see:
/// <https://www.scaler.com/topics/dbms/functional-dependency-in-dbms/>
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct FunctionalDependence {
// Column indices of the (possibly composite) determinant key:
pub source_indices: Vec<usize>,
// Column indices of dependent column(s):
pub target_indices: Vec<usize>,
/// Flag indicating whether one of the `source_indices` can receive NULL values.
/// For a data source, if the constraint in question is `Constraint::Unique`,
/// this flag is `true`. If the constraint in question is `Constraint::PrimaryKey`,
/// this flag is `false`.
/// Note that as the schema changes between different stages in a plan,
/// such as after LEFT JOIN or RIGHT JOIN operations, this property may
/// change.
pub nullable: bool,
// The functional dependency mode:
pub mode: Dependency,
}
/// Describes functional dependency mode.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Dependency {
Single, // A determinant key may occur only once.
Multi, // A determinant key may occur multiple times (in multiple rows).
}
impl FunctionalDependence {
// Creates a new functional dependence.
pub fn new(
source_indices: Vec<usize>,
target_indices: Vec<usize>,
nullable: bool,
) -> Self {
Self {
source_indices,
target_indices,
nullable,
// Start with the least restrictive mode by default:
mode: Dependency::Multi,
}
}
pub fn with_mode(mut self, mode: Dependency) -> Self {
self.mode = mode;
self
}
}
/// This object encapsulates all functional dependencies in a given relation.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct FunctionalDependencies {
deps: Vec<FunctionalDependence>,
}
impl FunctionalDependencies {
/// Creates an empty `FunctionalDependencies` object.
pub fn empty() -> Self {
Self { deps: vec![] }
}
/// Creates a new `FunctionalDependencies` object from a vector of
/// `FunctionalDependence` objects.
pub fn new(dependencies: Vec<FunctionalDependence>) -> Self {
Self { deps: dependencies }
}
/// Creates a new `FunctionalDependencies` object from the given constraints.
pub fn new_from_constraints(
constraints: Option<&Constraints>,
n_field: usize,
) -> Self {
if let Some(Constraints { inner: constraints }) = constraints {
// Construct dependency objects based on each individual constraint:
let dependencies = constraints
.iter()
.map(|constraint| {
// All the field indices are associated with the whole table
// since we are dealing with table level constraints:
let dependency = match constraint {
Constraint::PrimaryKey(indices) => FunctionalDependence::new(
indices.to_vec(),
(0..n_field).collect::<Vec<_>>(),
false,
),
Constraint::Unique(indices) => FunctionalDependence::new(
indices.to_vec(),
(0..n_field).collect::<Vec<_>>(),
true,
),
};
// As primary keys are guaranteed to be unique, set the
// functional dependency mode to `Dependency::Single`:
dependency.with_mode(Dependency::Single)
})
.collect::<Vec<_>>();
Self::new(dependencies)
} else {
// There is no constraint, return an empty object:
Self::empty()
}
}
pub fn with_dependency(mut self, mode: Dependency) -> Self {
self.deps.iter_mut().for_each(|item| item.mode = mode);
self
}
/// Merges the given functional dependencies with these.
pub fn extend(&mut self, other: FunctionalDependencies) {
self.deps.extend(other.deps);
}
/// Adds the `offset` value to `source_indices` and `target_indices` for
/// each functional dependency.
pub fn add_offset(&mut self, offset: usize) {
self.deps.iter_mut().for_each(
|FunctionalDependence {
source_indices,
target_indices,
..
}| {
*source_indices = add_offset_to_vec(source_indices, offset);
*target_indices = add_offset_to_vec(target_indices, offset);
},
)
}
/// Updates `source_indices` and `target_indices` of each functional
/// dependence using the index mapping given in `proj_indices`.
///
/// Assume that `proj_indices` is \[2, 5, 8\] and we have a functional
/// dependence \[5\] (`source_indices`) -> \[5, 8\] (`target_indices`).
/// In the updated schema, fields at indices \[2, 5, 8\] will transform
/// to \[0, 1, 2\]. Therefore, the resulting functional dependence will
/// be \[1\] -> \[1, 2\].
pub fn project_functional_dependencies(
&self,
proj_indices: &[usize],
// The argument `n_out` denotes the schema field length, which is needed
// to correctly associate a `Single`-mode dependence with the whole table.
n_out: usize,
) -> FunctionalDependencies {
let mut projected_func_dependencies = vec![];
for FunctionalDependence {
source_indices,
target_indices,
nullable,
mode,
} in &self.deps
{
let new_source_indices =
update_elements_with_matching_indices(source_indices, proj_indices);
let new_target_indices = if *mode == Dependency::Single {
// Associate with all of the fields in the schema:
(0..n_out).collect()
} else {
// Update associations according to projection:
update_elements_with_matching_indices(target_indices, proj_indices)
};
// All of the composite indices should still be valid after projection;
// otherwise, functional dependency cannot be propagated.
if new_source_indices.len() == source_indices.len() {
let new_func_dependence = FunctionalDependence::new(
new_source_indices,
new_target_indices,
*nullable,
)
.with_mode(*mode);
projected_func_dependencies.push(new_func_dependence);
}
}
FunctionalDependencies::new(projected_func_dependencies)
}
/// This function joins this set of functional dependencies with the `other`
/// according to the given `join_type`.
pub fn join(
&self,
other: &FunctionalDependencies,
join_type: &JoinType,
left_cols_len: usize,
) -> FunctionalDependencies {
// Get mutable copies of left and right side dependencies:
let mut right_func_dependencies = other.clone();
let mut left_func_dependencies = self.clone();
match join_type {
JoinType::Inner | JoinType::Left | JoinType::Right => {
// Add offset to right schema:
right_func_dependencies.add_offset(left_cols_len);
// Result may have multiple values, update the dependency mode:
left_func_dependencies =
left_func_dependencies.with_dependency(Dependency::Multi);
right_func_dependencies =
right_func_dependencies.with_dependency(Dependency::Multi);
if *join_type == JoinType::Left {
// Downgrade the right side, since it may have additional NULL values:
right_func_dependencies.downgrade_dependencies();
} else if *join_type == JoinType::Right {
// Downgrade the left side, since it may have additional NULL values:
left_func_dependencies.downgrade_dependencies();
}
// Combine left and right functional dependencies:
left_func_dependencies.extend(right_func_dependencies);
left_func_dependencies
}
JoinType::LeftSemi | JoinType::LeftAnti => {
// These joins preserve functional dependencies of the left side:
left_func_dependencies
}
JoinType::RightSemi | JoinType::RightAnti => {
// These joins preserve functional dependencies of the right side:
right_func_dependencies
}
JoinType::Full => {
// All of the functional dependencies are lost in a FULL join:
FunctionalDependencies::empty()
}
}
}
/// This function downgrades a functional dependency when nullability becomes
/// a possibility:
/// - If the dependency in question is UNIQUE (i.e. nullable), a new null value
/// invalidates the dependency.
/// - If the dependency in question is PRIMARY KEY (i.e. not nullable), a new
/// null value turns it into UNIQUE mode.
fn downgrade_dependencies(&mut self) {
// Delete nullable dependencies, since they are no longer valid:
self.deps.retain(|item|!item.nullable);
self.deps.iter_mut().for_each(|item| item.nullable = true);
}
/// This function ensures that functional dependencies involving uniquely
/// occuring determinant keys cover their entire table in terms of
/// dependent columns.
pub fn extend_target_indices(&mut self, n_out: usize) {
self.deps.iter_mut().for_each(
|FunctionalDependence {
mode,
target_indices,
..
}| {
// If unique, cover the whole table:
if *mode == Dependency::Single {
*target_indices = (0..n_out).collect::<Vec<_>>();
}
},
)
}
}
/// Calculates functional dependencies for aggregate output, when there is a GROUP BY expression.
pub fn aggregate_functional_dependencies(
aggr_input_schema: &DFSchema,
group_by_expr_names: &[String],
aggr_schema: &DFSchema,
) -> FunctionalDependencies {
let mut aggregate_func_dependencies = vec![];
let aggr_input_fields = aggr_input_schema.fields();
let aggr_fields = aggr_schema.fields();
// Association covers the whole table:
let target_indices = (0..aggr_schema.fields().len()).collect::<Vec<_>>();
// Get functional dependencies of the schema:
let func_dependencies = aggr_input_schema.functional_dependencies();
for FunctionalDependence {
source_indices,
nullable,
mode,
..
} in &func_dependencies.deps
{
// Keep source indices in a `HashSet` to prevent duplicate entries:
let mut new_source_indices = HashSet::new();
let source_field_names = source_indices
.iter()
.map(|&idx| aggr_input_fields[idx].qualified_name())
.collect::<Vec<_>>();
for (idx, group_by_expr_name) in group_by_expr_names.iter().enumerate() {
// When one of the input determinant expressions matches with
// the GROUP BY expression, add the index of the GROUP BY
// expression as a new determinant key:
if source_field_names.contains(group_by_expr_name) {
new_source_indices.insert(idx);
}
}
// All of the composite indices occur in the GROUP BY expression:
if new_source_indices.len() == source_indices.len() {
aggregate_func_dependencies.push(
FunctionalDependence::new(
new_source_indices.into_iter().collect(),
target_indices.clone(),
*nullable,
)
// input uniqueness stays the same when GROUP BY matches with input functional dependence determinants
.with_mode(*mode),
);
}
}
// If we have a single GROUP BY key, we can guarantee uniqueness after
// aggregation:
if group_by_expr_names.len() == 1 {
// If `source_indices` contain 0, delete this functional dependency
// as it will be added anyway with mode `Dependency::Single`:
if let Some(idx) = aggregate_func_dependencies
.iter()
.position(|item| item.source_indices.contains(&0))
{
// Delete the functional dependency that contains zeroth idx:
aggregate_func_dependencies.remove(idx);
}
// Add a new functional dependency associated with the whole table:
aggregate_func_dependencies.push(
// Use nullable property of the group by expression
FunctionalDependence::new(
vec![0],
target_indices,
aggr_fields[0].is_nullable(),
)
.with_mode(Dependency::Single),
);
}
FunctionalDependencies::new(aggregate_func_dependencies)
}
/// Returns target indices, for the determinant keys that are inside
/// group by expressions.
pub fn get_target_functional_dependencies(
schema: &DFSchema,
group_by_expr_names: &[String],
) -> Option<Vec<usize>> {
let mut combined_target_indices = HashSet::new();
let dependencies = schema.functional_dependencies();
let field_names = schema
.fields()
.iter()
.map(|item| item.qualified_name())
.collect::<Vec<_>>();
for FunctionalDependence {
source_indices,
target_indices,
..
} in &dependencies.deps
{
let source_key_names = source_indices
.iter()
.map(|id_key_idx| field_names[*id_key_idx].clone())
.collect::<Vec<_>>();
// If the GROUP BY expression contains a determinant key, we can use
// the associated fields after aggregation even if they are not part
// of the GROUP BY expression.
if source_key_names
.iter()
.all(|source_key_name| group_by_expr_names.contains(source_key_name))
{
combined_target_indices.extend(target_indices.iter());
}
}
| {
Constraint::PrimaryKey(indices)
} | conditional_block |
apigroup.rs | use super::{
parse::{self, GroupVersionData},
version::Version,
};
use crate::{error::DiscoveryError, Client, Result};
use k8s_openapi::apimachinery::pkg::apis::meta::v1::{APIGroup, APIVersions};
pub use kube_core::discovery::{verbs, ApiCapabilities, ApiResource, Scope};
use kube_core::gvk::{GroupVersion, GroupVersionKind};
/// Describes one API groups collected resources and capabilities.
///
/// Each `ApiGroup` contains all data pinned to a each version.
/// In particular, one data set within the `ApiGroup` for `"apiregistration.k8s.io"`
/// is the subset pinned to `"v1"`; commonly referred to as `"apiregistration.k8s.io/v1"`.
///
/// If you know the version of the discovered group, you can fetch it directly:
/// ```no_run
/// use kube::{Client, api::{Api, DynamicObject}, discovery, ResourceExt};
/// #[tokio::main]
/// async fn main() -> Result<(), kube::Error> {
/// let client = Client::try_default().await?;
/// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?;
/// for (apiresource, caps) in apigroup.versioned_resources("v1") {
/// println!("Found ApiResource {}", apiresource.kind);
/// }
/// Ok(())
/// }
/// ```
///
/// But if you do not know this information, you can use [`ApiGroup::preferred_version_or_latest`].
///
/// Whichever way you choose the end result is something describing a resource and its abilities:
/// - `Vec<(ApiResource, `ApiCapabilities)>` :: for all resources in a versioned ApiGroup
/// - `(ApiResource, ApiCapabilities)` :: for a single kind under a versioned ApiGroud
///
/// These two types: [`ApiResource`], and [`ApiCapabilities`]
/// should contain the information needed to construct an [`Api`](crate::Api) and start querying the kubernetes API.
/// You will likely need to use [`DynamicObject`] as the generic type for Api to do this,
/// as well as the [`ApiResource`] for the `DynamicType` for the [`Resource`] trait.
///
/// ```no_run
/// use kube::{Client, api::{Api, DynamicObject}, discovery, ResourceExt};
/// #[tokio::main]
/// async fn main() -> Result<(), kube::Error> {
/// let client = Client::try_default().await?;
/// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?;
/// let (ar, caps) = apigroup.recommended_kind("APIService").unwrap();
/// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar);
/// for service in api.list(&Default::default()).await? {
/// println!("Found APIService: {}", service.name());
/// }
/// Ok(())
/// }
/// ```
/// [`ApiResource`]: crate::discovery::ApiResource
/// [`ApiCapabilities`]: crate::discovery::ApiCapabilities
/// [`DynamicObject`]: crate::api::DynamicObject
/// [`Resource`]: crate::Resource
/// [`ApiGroup::preferred_version_or_latest`]: crate::discovery::ApiGroup::preferred_version_or_latest
/// [`ApiGroup::versioned_resources`]: crate::discovery::ApiGroup::versioned_resources
/// [`ApiGroup::recommended_resources`]: crate::discovery::ApiGroup::recommended_resources
/// [`ApiGroup::recommended_kind`]: crate::discovery::ApiGroup::recommended_kind
pub struct ApiGroup {
/// Name of the group e.g. apiregistration.k8s.io
name: String,
/// List of resource information, capabilities at particular versions
data: Vec<GroupVersionData>,
/// Preferred version if exported by the `APIGroup`
preferred: Option<String>,
}
/// Internal queriers to convert from an APIGroup (or APIVersions for core) to our ApiGroup
///
/// These queriers ignore groups with empty versions.
/// This ensures that `ApiGroup::preferred_version_or_latest` always have an answer.
/// On construction, they also sort the internal vec of GroupVersionData according to `Version`.
impl ApiGroup {
pub(crate) async fn query_apis(client: &Client, g: APIGroup) -> Result<Self> {
tracing::debug!(name = g.name.as_str(), "Listing group versions");
let key = g.name;
if g.versions.is_empty() {
return Err(DiscoveryError::EmptyApiGroup(key).into());
}
let mut data = vec![];
for vers in &g.versions {
let resources = client.list_api_group_resources(&vers.group_version).await?;
data.push(GroupVersionData::new(vers.version.clone(), resources)?);
}
let mut group = ApiGroup {
name: key,
data,
preferred: g.preferred_version.map(|v| v.version),
};
group.sort_versions();
Ok(group)
}
pub(crate) async fn query_core(client: &Client, coreapis: APIVersions) -> Result<Self> {
let mut data = vec![];
let key = ApiGroup::CORE_GROUP.to_string();
if coreapis.versions.is_empty() {
return Err(DiscoveryError::EmptyApiGroup(key).into());
}
for v in coreapis.versions {
let resources = client.list_core_api_resources(&v).await?;
data.push(GroupVersionData::new(v, resources)?);
}
let mut group = ApiGroup {
name: ApiGroup::CORE_GROUP.to_string(),
data,
preferred: Some("v1".to_string()),
};
group.sort_versions();
Ok(group)
}
fn sort_versions(&mut self) {
self.data
.sort_by_cached_key(|gvd| Version::parse(gvd.version.as_str()))
}
// shortcut method to give cheapest return for a single GVK
pub(crate) async fn query_gvk(
client: &Client,
gvk: &GroupVersionKind,
) -> Result<(ApiResource, ApiCapabilities)> {
let apiver = gvk.api_version();
let list = if gvk.group.is_empty() {
client.list_core_api_resources(&apiver).await?
} else {
client.list_api_group_resources(&apiver).await?
};
for res in &list.resources {
if res.kind == gvk.kind &&!res.name.contains('/') |
}
Err(DiscoveryError::MissingKind(format!("{:?}", gvk)).into())
}
// shortcut method to give cheapest return for a pinned group
pub(crate) async fn query_gv(client: &Client, gv: &GroupVersion) -> Result<Self> {
let apiver = gv.api_version();
let list = if gv.group.is_empty() {
client.list_core_api_resources(&apiver).await?
} else {
client.list_api_group_resources(&apiver).await?
};
let data = GroupVersionData::new(gv.version.clone(), list)?;
let group = ApiGroup {
name: gv.group.clone(),
data: vec![data],
preferred: Some(gv.version.clone()), // you preferred what you asked for
};
Ok(group)
}
}
/// Public ApiGroup interface
impl ApiGroup {
/// Core group name
pub const CORE_GROUP: &'static str = "";
/// Returns the name of this group.
pub fn name(&self) -> &str {
&self.name
}
/// Returns served versions (e.g. `["v1", "v2beta1"]`) of this group.
///
/// This list is always non-empty, and sorted in the following order:
/// - Stable versions (with the last being the first)
/// - Beta versions (with the last being the first)
/// - Alpha versions (with the last being the first)
/// - Other versions, alphabetically
///
/// in accordance with [kubernetes version priority](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#version-priority).
pub fn versions(&self) -> impl Iterator<Item = &str> {
self.data.as_slice().iter().map(|gvd| gvd.version.as_str())
}
/// Returns preferred version for working with given group.
pub fn preferred_version(&self) -> Option<&str> {
self.preferred.as_deref()
}
/// Returns the preferred version or latest version for working with given group.
///
/// If server does not recommend one, we pick the "most stable and most recent" version
/// in accordance with [kubernetes version priority](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#version-priority).
pub fn preferred_version_or_latest(&self) -> &str {
// NB: self.versions is non-empty by construction in ApiGroup
self.preferred
.as_deref()
.unwrap_or_else(|| self.versions().next().unwrap())
}
/// Returns the resources in the group at an arbitrary version string.
///
/// If the group does not support this version, the returned vector is empty.
///
/// If you are looking for the api recommended list of resources, or just on particular kind
/// consider [`ApiGroup::recommended_resources`] or [`ApiGroup::recommended_kind`] instead.
pub fn versioned_resources(&self, ver: &str) -> Vec<(ApiResource, ApiCapabilities)> {
self.data
.iter()
.find(|gvd| gvd.version == ver)
.map(|gvd| gvd.resources.clone())
.unwrap_or_default()
}
/// Returns the recommended (preferred or latest) versioned resources in the group
///
/// ```no_run
/// use kube::{Client, api::{Api, DynamicObject}, discovery::{self, verbs}, ResourceExt};
/// #[tokio::main]
/// async fn main() -> Result<(), kube::Error> {
/// let client = Client::try_default().await?;
/// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?;
/// for (ar, caps) in apigroup.recommended_resources() {
/// if!caps.supports_operation(verbs::LIST) {
/// continue;
/// }
/// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar);
/// for inst in api.list(&Default::default()).await? {
/// println!("Found {}: {}", ar.kind, inst.name());
/// }
/// }
/// Ok(())
/// }
/// ```
///
/// This is equivalent to taking the [`ApiGroup::versioned_resources`] at the [`ApiGroup::preferred_version_or_latest`].
pub fn recommended_resources(&self) -> Vec<(ApiResource, ApiCapabilities)> {
let ver = self.preferred_version_or_latest();
self.versioned_resources(ver)
}
/// Returns the recommended version of the `kind` in the recommended resources (if found)
///
/// ```no_run
/// use kube::{Client, api::{Api, DynamicObject}, discovery, ResourceExt};
/// #[tokio::main]
/// async fn main() -> Result<(), kube::Error> {
/// let client = Client::try_default().await?;
/// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?;
/// let (ar, caps) = apigroup.recommended_kind("APIService").unwrap();
/// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar);
/// for service in api.list(&Default::default()).await? {
/// println!("Found APIService: {}", service.name());
/// }
/// Ok(())
/// }
/// ```
///
/// This is equivalent to filtering the [`ApiGroup::versioned_resources`] at [`ApiGroup::preferred_version_or_latest`] against a chosen `kind`.
pub fn recommended_kind(&self, kind: &str) -> Option<(ApiResource, ApiCapabilities)> {
let ver = self.preferred_version_or_latest();
for (ar, caps) in self.versioned_resources(ver) {
if ar.kind == kind {
return Some((ar, caps));
}
}
None
}
}
| {
let ar = parse::parse_apiresource(res, &list.group_version)?;
let caps = parse::parse_apicapabilities(&list, &res.name)?;
return Ok((ar, caps));
} | conditional_block |
apigroup.rs | use super::{
parse::{self, GroupVersionData},
version::Version,
};
use crate::{error::DiscoveryError, Client, Result};
use k8s_openapi::apimachinery::pkg::apis::meta::v1::{APIGroup, APIVersions};
pub use kube_core::discovery::{verbs, ApiCapabilities, ApiResource, Scope};
use kube_core::gvk::{GroupVersion, GroupVersionKind};
/// Describes one API groups collected resources and capabilities.
///
/// Each `ApiGroup` contains all data pinned to a each version.
/// In particular, one data set within the `ApiGroup` for `"apiregistration.k8s.io"`
/// is the subset pinned to `"v1"`; commonly referred to as `"apiregistration.k8s.io/v1"`.
///
/// If you know the version of the discovered group, you can fetch it directly:
/// ```no_run
/// use kube::{Client, api::{Api, DynamicObject}, discovery, ResourceExt};
/// #[tokio::main]
/// async fn main() -> Result<(), kube::Error> {
/// let client = Client::try_default().await?;
/// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?;
/// for (apiresource, caps) in apigroup.versioned_resources("v1") {
/// println!("Found ApiResource {}", apiresource.kind);
/// }
/// Ok(())
/// }
/// ```
///
/// But if you do not know this information, you can use [`ApiGroup::preferred_version_or_latest`].
///
/// Whichever way you choose the end result is something describing a resource and its abilities:
/// - `Vec<(ApiResource, `ApiCapabilities)>` :: for all resources in a versioned ApiGroup
/// - `(ApiResource, ApiCapabilities)` :: for a single kind under a versioned ApiGroud
///
/// These two types: [`ApiResource`], and [`ApiCapabilities`]
/// should contain the information needed to construct an [`Api`](crate::Api) and start querying the kubernetes API.
/// You will likely need to use [`DynamicObject`] as the generic type for Api to do this,
/// as well as the [`ApiResource`] for the `DynamicType` for the [`Resource`] trait.
///
/// ```no_run
/// use kube::{Client, api::{Api, DynamicObject}, discovery, ResourceExt};
/// #[tokio::main]
/// async fn main() -> Result<(), kube::Error> {
/// let client = Client::try_default().await?;
/// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?;
/// let (ar, caps) = apigroup.recommended_kind("APIService").unwrap();
/// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar);
/// for service in api.list(&Default::default()).await? {
/// println!("Found APIService: {}", service.name());
/// }
/// Ok(())
/// }
/// ```
/// [`ApiResource`]: crate::discovery::ApiResource
/// [`ApiCapabilities`]: crate::discovery::ApiCapabilities
/// [`DynamicObject`]: crate::api::DynamicObject
/// [`Resource`]: crate::Resource
/// [`ApiGroup::preferred_version_or_latest`]: crate::discovery::ApiGroup::preferred_version_or_latest
/// [`ApiGroup::versioned_resources`]: crate::discovery::ApiGroup::versioned_resources
/// [`ApiGroup::recommended_resources`]: crate::discovery::ApiGroup::recommended_resources
/// [`ApiGroup::recommended_kind`]: crate::discovery::ApiGroup::recommended_kind
pub struct ApiGroup {
/// Name of the group e.g. apiregistration.k8s.io
name: String,
/// List of resource information, capabilities at particular versions
data: Vec<GroupVersionData>,
/// Preferred version if exported by the `APIGroup`
preferred: Option<String>,
}
/// Internal queriers to convert from an APIGroup (or APIVersions for core) to our ApiGroup
///
/// These queriers ignore groups with empty versions.
/// This ensures that `ApiGroup::preferred_version_or_latest` always have an answer.
/// On construction, they also sort the internal vec of GroupVersionData according to `Version`.
impl ApiGroup {
pub(crate) async fn query_apis(client: &Client, g: APIGroup) -> Result<Self> {
tracing::debug!(name = g.name.as_str(), "Listing group versions");
let key = g.name;
if g.versions.is_empty() {
return Err(DiscoveryError::EmptyApiGroup(key).into());
}
let mut data = vec![];
for vers in &g.versions {
let resources = client.list_api_group_resources(&vers.group_version).await?;
data.push(GroupVersionData::new(vers.version.clone(), resources)?);
}
let mut group = ApiGroup {
name: key,
data,
preferred: g.preferred_version.map(|v| v.version),
};
group.sort_versions();
Ok(group)
}
pub(crate) async fn query_core(client: &Client, coreapis: APIVersions) -> Result<Self> {
let mut data = vec![];
let key = ApiGroup::CORE_GROUP.to_string();
if coreapis.versions.is_empty() {
return Err(DiscoveryError::EmptyApiGroup(key).into());
}
for v in coreapis.versions {
let resources = client.list_core_api_resources(&v).await?;
data.push(GroupVersionData::new(v, resources)?);
}
let mut group = ApiGroup {
name: ApiGroup::CORE_GROUP.to_string(),
data,
preferred: Some("v1".to_string()),
};
group.sort_versions();
Ok(group)
}
fn | (&mut self) {
self.data
.sort_by_cached_key(|gvd| Version::parse(gvd.version.as_str()))
}
// shortcut method to give cheapest return for a single GVK
pub(crate) async fn query_gvk(
client: &Client,
gvk: &GroupVersionKind,
) -> Result<(ApiResource, ApiCapabilities)> {
let apiver = gvk.api_version();
let list = if gvk.group.is_empty() {
client.list_core_api_resources(&apiver).await?
} else {
client.list_api_group_resources(&apiver).await?
};
for res in &list.resources {
if res.kind == gvk.kind &&!res.name.contains('/') {
let ar = parse::parse_apiresource(res, &list.group_version)?;
let caps = parse::parse_apicapabilities(&list, &res.name)?;
return Ok((ar, caps));
}
}
Err(DiscoveryError::MissingKind(format!("{:?}", gvk)).into())
}
// shortcut method to give cheapest return for a pinned group
pub(crate) async fn query_gv(client: &Client, gv: &GroupVersion) -> Result<Self> {
let apiver = gv.api_version();
let list = if gv.group.is_empty() {
client.list_core_api_resources(&apiver).await?
} else {
client.list_api_group_resources(&apiver).await?
};
let data = GroupVersionData::new(gv.version.clone(), list)?;
let group = ApiGroup {
name: gv.group.clone(),
data: vec![data],
preferred: Some(gv.version.clone()), // you preferred what you asked for
};
Ok(group)
}
}
/// Public ApiGroup interface
impl ApiGroup {
/// Core group name
pub const CORE_GROUP: &'static str = "";
/// Returns the name of this group.
pub fn name(&self) -> &str {
&self.name
}
/// Returns served versions (e.g. `["v1", "v2beta1"]`) of this group.
///
/// This list is always non-empty, and sorted in the following order:
/// - Stable versions (with the last being the first)
/// - Beta versions (with the last being the first)
/// - Alpha versions (with the last being the first)
/// - Other versions, alphabetically
///
/// in accordance with [kubernetes version priority](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#version-priority).
pub fn versions(&self) -> impl Iterator<Item = &str> {
self.data.as_slice().iter().map(|gvd| gvd.version.as_str())
}
/// Returns preferred version for working with given group.
pub fn preferred_version(&self) -> Option<&str> {
self.preferred.as_deref()
}
/// Returns the preferred version or latest version for working with given group.
///
/// If server does not recommend one, we pick the "most stable and most recent" version
/// in accordance with [kubernetes version priority](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#version-priority).
pub fn preferred_version_or_latest(&self) -> &str {
// NB: self.versions is non-empty by construction in ApiGroup
self.preferred
.as_deref()
.unwrap_or_else(|| self.versions().next().unwrap())
}
/// Returns the resources in the group at an arbitrary version string.
///
/// If the group does not support this version, the returned vector is empty.
///
/// If you are looking for the api recommended list of resources, or just on particular kind
/// consider [`ApiGroup::recommended_resources`] or [`ApiGroup::recommended_kind`] instead.
pub fn versioned_resources(&self, ver: &str) -> Vec<(ApiResource, ApiCapabilities)> {
self.data
.iter()
.find(|gvd| gvd.version == ver)
.map(|gvd| gvd.resources.clone())
.unwrap_or_default()
}
/// Returns the recommended (preferred or latest) versioned resources in the group
///
/// ```no_run
/// use kube::{Client, api::{Api, DynamicObject}, discovery::{self, verbs}, ResourceExt};
/// #[tokio::main]
/// async fn main() -> Result<(), kube::Error> {
/// let client = Client::try_default().await?;
/// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?;
/// for (ar, caps) in apigroup.recommended_resources() {
/// if!caps.supports_operation(verbs::LIST) {
/// continue;
/// }
/// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar);
/// for inst in api.list(&Default::default()).await? {
/// println!("Found {}: {}", ar.kind, inst.name());
/// }
/// }
/// Ok(())
/// }
/// ```
///
/// This is equivalent to taking the [`ApiGroup::versioned_resources`] at the [`ApiGroup::preferred_version_or_latest`].
pub fn recommended_resources(&self) -> Vec<(ApiResource, ApiCapabilities)> {
let ver = self.preferred_version_or_latest();
self.versioned_resources(ver)
}
/// Returns the recommended version of the `kind` in the recommended resources (if found)
///
/// ```no_run
/// use kube::{Client, api::{Api, DynamicObject}, discovery, ResourceExt};
/// #[tokio::main]
/// async fn main() -> Result<(), kube::Error> {
/// let client = Client::try_default().await?;
/// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?;
/// let (ar, caps) = apigroup.recommended_kind("APIService").unwrap();
/// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar);
/// for service in api.list(&Default::default()).await? {
/// println!("Found APIService: {}", service.name());
/// }
/// Ok(())
/// }
/// ```
///
/// This is equivalent to filtering the [`ApiGroup::versioned_resources`] at [`ApiGroup::preferred_version_or_latest`] against a chosen `kind`.
pub fn recommended_kind(&self, kind: &str) -> Option<(ApiResource, ApiCapabilities)> {
let ver = self.preferred_version_or_latest();
for (ar, caps) in self.versioned_resources(ver) {
if ar.kind == kind {
return Some((ar, caps));
}
}
None
}
}
| sort_versions | identifier_name |
apigroup.rs | use super::{
parse::{self, GroupVersionData},
version::Version,
};
use crate::{error::DiscoveryError, Client, Result};
use k8s_openapi::apimachinery::pkg::apis::meta::v1::{APIGroup, APIVersions};
pub use kube_core::discovery::{verbs, ApiCapabilities, ApiResource, Scope};
use kube_core::gvk::{GroupVersion, GroupVersionKind};
/// Describes one API groups collected resources and capabilities.
///
/// Each `ApiGroup` contains all data pinned to a each version.
/// In particular, one data set within the `ApiGroup` for `"apiregistration.k8s.io"`
/// is the subset pinned to `"v1"`; commonly referred to as `"apiregistration.k8s.io/v1"`.
///
/// If you know the version of the discovered group, you can fetch it directly:
/// ```no_run
/// use kube::{Client, api::{Api, DynamicObject}, discovery, ResourceExt};
/// #[tokio::main]
/// async fn main() -> Result<(), kube::Error> {
/// let client = Client::try_default().await?;
/// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?;
/// for (apiresource, caps) in apigroup.versioned_resources("v1") {
/// println!("Found ApiResource {}", apiresource.kind);
/// }
/// Ok(())
/// }
/// ```
///
/// But if you do not know this information, you can use [`ApiGroup::preferred_version_or_latest`].
///
/// Whichever way you choose the end result is something describing a resource and its abilities:
/// - `Vec<(ApiResource, `ApiCapabilities)>` :: for all resources in a versioned ApiGroup
/// - `(ApiResource, ApiCapabilities)` :: for a single kind under a versioned ApiGroud
///
/// These two types: [`ApiResource`], and [`ApiCapabilities`]
/// should contain the information needed to construct an [`Api`](crate::Api) and start querying the kubernetes API.
/// You will likely need to use [`DynamicObject`] as the generic type for Api to do this,
/// as well as the [`ApiResource`] for the `DynamicType` for the [`Resource`] trait.
///
/// ```no_run
/// use kube::{Client, api::{Api, DynamicObject}, discovery, ResourceExt};
/// #[tokio::main]
/// async fn main() -> Result<(), kube::Error> {
/// let client = Client::try_default().await?;
/// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?;
/// let (ar, caps) = apigroup.recommended_kind("APIService").unwrap();
/// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar);
/// for service in api.list(&Default::default()).await? {
/// println!("Found APIService: {}", service.name());
/// }
/// Ok(())
/// }
/// ```
/// [`ApiResource`]: crate::discovery::ApiResource
/// [`ApiCapabilities`]: crate::discovery::ApiCapabilities
/// [`DynamicObject`]: crate::api::DynamicObject
/// [`Resource`]: crate::Resource
/// [`ApiGroup::preferred_version_or_latest`]: crate::discovery::ApiGroup::preferred_version_or_latest
/// [`ApiGroup::versioned_resources`]: crate::discovery::ApiGroup::versioned_resources
/// [`ApiGroup::recommended_resources`]: crate::discovery::ApiGroup::recommended_resources
/// [`ApiGroup::recommended_kind`]: crate::discovery::ApiGroup::recommended_kind
pub struct ApiGroup {
/// Name of the group e.g. apiregistration.k8s.io
name: String,
/// List of resource information, capabilities at particular versions
data: Vec<GroupVersionData>,
/// Preferred version if exported by the `APIGroup`
preferred: Option<String>,
}
/// Internal queriers to convert from an APIGroup (or APIVersions for core) to our ApiGroup
///
/// These queriers ignore groups with empty versions.
/// This ensures that `ApiGroup::preferred_version_or_latest` always have an answer.
/// On construction, they also sort the internal vec of GroupVersionData according to `Version`.
impl ApiGroup {
pub(crate) async fn query_apis(client: &Client, g: APIGroup) -> Result<Self> {
tracing::debug!(name = g.name.as_str(), "Listing group versions");
let key = g.name;
if g.versions.is_empty() {
return Err(DiscoveryError::EmptyApiGroup(key).into());
}
let mut data = vec![];
for vers in &g.versions {
let resources = client.list_api_group_resources(&vers.group_version).await?;
data.push(GroupVersionData::new(vers.version.clone(), resources)?);
}
let mut group = ApiGroup {
name: key,
data,
preferred: g.preferred_version.map(|v| v.version),
};
group.sort_versions();
Ok(group)
}
pub(crate) async fn query_core(client: &Client, coreapis: APIVersions) -> Result<Self> {
let mut data = vec![];
let key = ApiGroup::CORE_GROUP.to_string();
if coreapis.versions.is_empty() {
return Err(DiscoveryError::EmptyApiGroup(key).into());
}
for v in coreapis.versions {
let resources = client.list_core_api_resources(&v).await?;
data.push(GroupVersionData::new(v, resources)?);
}
let mut group = ApiGroup {
name: ApiGroup::CORE_GROUP.to_string(),
data,
preferred: Some("v1".to_string()),
};
group.sort_versions();
Ok(group)
}
fn sort_versions(&mut self) {
self.data
.sort_by_cached_key(|gvd| Version::parse(gvd.version.as_str()))
}
// shortcut method to give cheapest return for a single GVK
pub(crate) async fn query_gvk(
client: &Client,
gvk: &GroupVersionKind,
) -> Result<(ApiResource, ApiCapabilities)> {
let apiver = gvk.api_version();
let list = if gvk.group.is_empty() {
client.list_core_api_resources(&apiver).await?
} else {
client.list_api_group_resources(&apiver).await?
};
for res in &list.resources {
if res.kind == gvk.kind &&!res.name.contains('/') {
let ar = parse::parse_apiresource(res, &list.group_version)?;
let caps = parse::parse_apicapabilities(&list, &res.name)?;
return Ok((ar, caps));
}
}
Err(DiscoveryError::MissingKind(format!("{:?}", gvk)).into())
}
// shortcut method to give cheapest return for a pinned group
pub(crate) async fn query_gv(client: &Client, gv: &GroupVersion) -> Result<Self> {
let apiver = gv.api_version();
let list = if gv.group.is_empty() {
client.list_core_api_resources(&apiver).await?
} else {
client.list_api_group_resources(&apiver).await?
};
let data = GroupVersionData::new(gv.version.clone(), list)?;
let group = ApiGroup {
name: gv.group.clone(),
data: vec![data],
preferred: Some(gv.version.clone()), // you preferred what you asked for
};
Ok(group)
}
}
/// Public ApiGroup interface
impl ApiGroup {
/// Core group name
pub const CORE_GROUP: &'static str = "";
/// Returns the name of this group.
pub fn name(&self) -> &str {
&self.name
}
/// Returns served versions (e.g. `["v1", "v2beta1"]`) of this group.
///
/// This list is always non-empty, and sorted in the following order:
/// - Stable versions (with the last being the first)
/// - Beta versions (with the last being the first)
/// - Alpha versions (with the last being the first)
/// - Other versions, alphabetically
///
/// in accordance with [kubernetes version priority](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#version-priority).
pub fn versions(&self) -> impl Iterator<Item = &str> {
self.data.as_slice().iter().map(|gvd| gvd.version.as_str())
}
/// Returns preferred version for working with given group.
pub fn preferred_version(&self) -> Option<&str> {
self.preferred.as_deref()
}
/// Returns the preferred version or latest version for working with given group.
///
/// If server does not recommend one, we pick the "most stable and most recent" version
/// in accordance with [kubernetes version priority](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#version-priority).
pub fn preferred_version_or_latest(&self) -> &str {
// NB: self.versions is non-empty by construction in ApiGroup
self.preferred
.as_deref()
.unwrap_or_else(|| self.versions().next().unwrap())
}
/// Returns the resources in the group at an arbitrary version string.
///
/// If the group does not support this version, the returned vector is empty.
///
/// If you are looking for the api recommended list of resources, or just on particular kind
/// consider [`ApiGroup::recommended_resources`] or [`ApiGroup::recommended_kind`] instead.
pub fn versioned_resources(&self, ver: &str) -> Vec<(ApiResource, ApiCapabilities)> {
self.data
.iter()
.find(|gvd| gvd.version == ver)
.map(|gvd| gvd.resources.clone())
.unwrap_or_default()
}
/// Returns the recommended (preferred or latest) versioned resources in the group
///
/// ```no_run
/// use kube::{Client, api::{Api, DynamicObject}, discovery::{self, verbs}, ResourceExt};
/// #[tokio::main]
/// async fn main() -> Result<(), kube::Error> {
/// let client = Client::try_default().await?;
/// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?; | /// for inst in api.list(&Default::default()).await? {
/// println!("Found {}: {}", ar.kind, inst.name());
/// }
/// }
/// Ok(())
/// }
/// ```
///
/// This is equivalent to taking the [`ApiGroup::versioned_resources`] at the [`ApiGroup::preferred_version_or_latest`].
pub fn recommended_resources(&self) -> Vec<(ApiResource, ApiCapabilities)> {
let ver = self.preferred_version_or_latest();
self.versioned_resources(ver)
}
/// Returns the recommended version of the `kind` in the recommended resources (if found)
///
/// ```no_run
/// use kube::{Client, api::{Api, DynamicObject}, discovery, ResourceExt};
/// #[tokio::main]
/// async fn main() -> Result<(), kube::Error> {
/// let client = Client::try_default().await?;
/// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?;
/// let (ar, caps) = apigroup.recommended_kind("APIService").unwrap();
/// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar);
/// for service in api.list(&Default::default()).await? {
/// println!("Found APIService: {}", service.name());
/// }
/// Ok(())
/// }
/// ```
///
/// This is equivalent to filtering the [`ApiGroup::versioned_resources`] at [`ApiGroup::preferred_version_or_latest`] against a chosen `kind`.
pub fn recommended_kind(&self, kind: &str) -> Option<(ApiResource, ApiCapabilities)> {
let ver = self.preferred_version_or_latest();
for (ar, caps) in self.versioned_resources(ver) {
if ar.kind == kind {
return Some((ar, caps));
}
}
None
}
} | /// for (ar, caps) in apigroup.recommended_resources() {
/// if !caps.supports_operation(verbs::LIST) {
/// continue;
/// }
/// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar); | random_line_split |
apigroup.rs | use super::{
parse::{self, GroupVersionData},
version::Version,
};
use crate::{error::DiscoveryError, Client, Result};
use k8s_openapi::apimachinery::pkg::apis::meta::v1::{APIGroup, APIVersions};
pub use kube_core::discovery::{verbs, ApiCapabilities, ApiResource, Scope};
use kube_core::gvk::{GroupVersion, GroupVersionKind};
/// Describes one API groups collected resources and capabilities.
///
/// Each `ApiGroup` contains all data pinned to a each version.
/// In particular, one data set within the `ApiGroup` for `"apiregistration.k8s.io"`
/// is the subset pinned to `"v1"`; commonly referred to as `"apiregistration.k8s.io/v1"`.
///
/// If you know the version of the discovered group, you can fetch it directly:
/// ```no_run
/// use kube::{Client, api::{Api, DynamicObject}, discovery, ResourceExt};
/// #[tokio::main]
/// async fn main() -> Result<(), kube::Error> {
/// let client = Client::try_default().await?;
/// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?;
/// for (apiresource, caps) in apigroup.versioned_resources("v1") {
/// println!("Found ApiResource {}", apiresource.kind);
/// }
/// Ok(())
/// }
/// ```
///
/// But if you do not know this information, you can use [`ApiGroup::preferred_version_or_latest`].
///
/// Whichever way you choose the end result is something describing a resource and its abilities:
/// - `Vec<(ApiResource, `ApiCapabilities)>` :: for all resources in a versioned ApiGroup
/// - `(ApiResource, ApiCapabilities)` :: for a single kind under a versioned ApiGroud
///
/// These two types: [`ApiResource`], and [`ApiCapabilities`]
/// should contain the information needed to construct an [`Api`](crate::Api) and start querying the kubernetes API.
/// You will likely need to use [`DynamicObject`] as the generic type for Api to do this,
/// as well as the [`ApiResource`] for the `DynamicType` for the [`Resource`] trait.
///
/// ```no_run
/// use kube::{Client, api::{Api, DynamicObject}, discovery, ResourceExt};
/// #[tokio::main]
/// async fn main() -> Result<(), kube::Error> {
/// let client = Client::try_default().await?;
/// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?;
/// let (ar, caps) = apigroup.recommended_kind("APIService").unwrap();
/// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar);
/// for service in api.list(&Default::default()).await? {
/// println!("Found APIService: {}", service.name());
/// }
/// Ok(())
/// }
/// ```
/// [`ApiResource`]: crate::discovery::ApiResource
/// [`ApiCapabilities`]: crate::discovery::ApiCapabilities
/// [`DynamicObject`]: crate::api::DynamicObject
/// [`Resource`]: crate::Resource
/// [`ApiGroup::preferred_version_or_latest`]: crate::discovery::ApiGroup::preferred_version_or_latest
/// [`ApiGroup::versioned_resources`]: crate::discovery::ApiGroup::versioned_resources
/// [`ApiGroup::recommended_resources`]: crate::discovery::ApiGroup::recommended_resources
/// [`ApiGroup::recommended_kind`]: crate::discovery::ApiGroup::recommended_kind
pub struct ApiGroup {
/// Name of the group e.g. apiregistration.k8s.io
name: String,
/// List of resource information, capabilities at particular versions
data: Vec<GroupVersionData>,
/// Preferred version if exported by the `APIGroup`
preferred: Option<String>,
}
/// Internal queriers to convert from an APIGroup (or APIVersions for core) to our ApiGroup
///
/// These queriers ignore groups with empty versions.
/// This ensures that `ApiGroup::preferred_version_or_latest` always have an answer.
/// On construction, they also sort the internal vec of GroupVersionData according to `Version`.
impl ApiGroup {
pub(crate) async fn query_apis(client: &Client, g: APIGroup) -> Result<Self> {
tracing::debug!(name = g.name.as_str(), "Listing group versions");
let key = g.name;
if g.versions.is_empty() {
return Err(DiscoveryError::EmptyApiGroup(key).into());
}
let mut data = vec![];
for vers in &g.versions {
let resources = client.list_api_group_resources(&vers.group_version).await?;
data.push(GroupVersionData::new(vers.version.clone(), resources)?);
}
let mut group = ApiGroup {
name: key,
data,
preferred: g.preferred_version.map(|v| v.version),
};
group.sort_versions();
Ok(group)
}
pub(crate) async fn query_core(client: &Client, coreapis: APIVersions) -> Result<Self> {
let mut data = vec![];
let key = ApiGroup::CORE_GROUP.to_string();
if coreapis.versions.is_empty() {
return Err(DiscoveryError::EmptyApiGroup(key).into());
}
for v in coreapis.versions {
let resources = client.list_core_api_resources(&v).await?;
data.push(GroupVersionData::new(v, resources)?);
}
let mut group = ApiGroup {
name: ApiGroup::CORE_GROUP.to_string(),
data,
preferred: Some("v1".to_string()),
};
group.sort_versions();
Ok(group)
}
fn sort_versions(&mut self) {
self.data
.sort_by_cached_key(|gvd| Version::parse(gvd.version.as_str()))
}
// shortcut method to give cheapest return for a single GVK
pub(crate) async fn query_gvk(
client: &Client,
gvk: &GroupVersionKind,
) -> Result<(ApiResource, ApiCapabilities)> |
// shortcut method to give cheapest return for a pinned group
pub(crate) async fn query_gv(client: &Client, gv: &GroupVersion) -> Result<Self> {
let apiver = gv.api_version();
let list = if gv.group.is_empty() {
client.list_core_api_resources(&apiver).await?
} else {
client.list_api_group_resources(&apiver).await?
};
let data = GroupVersionData::new(gv.version.clone(), list)?;
let group = ApiGroup {
name: gv.group.clone(),
data: vec![data],
preferred: Some(gv.version.clone()), // you preferred what you asked for
};
Ok(group)
}
}
/// Public ApiGroup interface
impl ApiGroup {
/// Core group name
pub const CORE_GROUP: &'static str = "";
/// Returns the name of this group.
pub fn name(&self) -> &str {
&self.name
}
/// Returns served versions (e.g. `["v1", "v2beta1"]`) of this group.
///
/// This list is always non-empty, and sorted in the following order:
/// - Stable versions (with the last being the first)
/// - Beta versions (with the last being the first)
/// - Alpha versions (with the last being the first)
/// - Other versions, alphabetically
///
/// in accordance with [kubernetes version priority](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#version-priority).
pub fn versions(&self) -> impl Iterator<Item = &str> {
self.data.as_slice().iter().map(|gvd| gvd.version.as_str())
}
/// Returns preferred version for working with given group.
pub fn preferred_version(&self) -> Option<&str> {
self.preferred.as_deref()
}
/// Returns the preferred version or latest version for working with given group.
///
/// If server does not recommend one, we pick the "most stable and most recent" version
/// in accordance with [kubernetes version priority](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#version-priority).
pub fn preferred_version_or_latest(&self) -> &str {
// NB: self.versions is non-empty by construction in ApiGroup
self.preferred
.as_deref()
.unwrap_or_else(|| self.versions().next().unwrap())
}
/// Returns the resources in the group at an arbitrary version string.
///
/// If the group does not support this version, the returned vector is empty.
///
/// If you are looking for the api recommended list of resources, or just on particular kind
/// consider [`ApiGroup::recommended_resources`] or [`ApiGroup::recommended_kind`] instead.
pub fn versioned_resources(&self, ver: &str) -> Vec<(ApiResource, ApiCapabilities)> {
self.data
.iter()
.find(|gvd| gvd.version == ver)
.map(|gvd| gvd.resources.clone())
.unwrap_or_default()
}
/// Returns the recommended (preferred or latest) versioned resources in the group
///
/// ```no_run
/// use kube::{Client, api::{Api, DynamicObject}, discovery::{self, verbs}, ResourceExt};
/// #[tokio::main]
/// async fn main() -> Result<(), kube::Error> {
/// let client = Client::try_default().await?;
/// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?;
/// for (ar, caps) in apigroup.recommended_resources() {
/// if!caps.supports_operation(verbs::LIST) {
/// continue;
/// }
/// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar);
/// for inst in api.list(&Default::default()).await? {
/// println!("Found {}: {}", ar.kind, inst.name());
/// }
/// }
/// Ok(())
/// }
/// ```
///
/// This is equivalent to taking the [`ApiGroup::versioned_resources`] at the [`ApiGroup::preferred_version_or_latest`].
pub fn recommended_resources(&self) -> Vec<(ApiResource, ApiCapabilities)> {
let ver = self.preferred_version_or_latest();
self.versioned_resources(ver)
}
/// Returns the recommended version of the `kind` in the recommended resources (if found)
///
/// ```no_run
/// use kube::{Client, api::{Api, DynamicObject}, discovery, ResourceExt};
/// #[tokio::main]
/// async fn main() -> Result<(), kube::Error> {
/// let client = Client::try_default().await?;
/// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?;
/// let (ar, caps) = apigroup.recommended_kind("APIService").unwrap();
/// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar);
/// for service in api.list(&Default::default()).await? {
/// println!("Found APIService: {}", service.name());
/// }
/// Ok(())
/// }
/// ```
///
/// This is equivalent to filtering the [`ApiGroup::versioned_resources`] at [`ApiGroup::preferred_version_or_latest`] against a chosen `kind`.
pub fn recommended_kind(&self, kind: &str) -> Option<(ApiResource, ApiCapabilities)> {
let ver = self.preferred_version_or_latest();
for (ar, caps) in self.versioned_resources(ver) {
if ar.kind == kind {
return Some((ar, caps));
}
}
None
}
}
| {
let apiver = gvk.api_version();
let list = if gvk.group.is_empty() {
client.list_core_api_resources(&apiver).await?
} else {
client.list_api_group_resources(&apiver).await?
};
for res in &list.resources {
if res.kind == gvk.kind && !res.name.contains('/') {
let ar = parse::parse_apiresource(res, &list.group_version)?;
let caps = parse::parse_apicapabilities(&list, &res.name)?;
return Ok((ar, caps));
}
}
Err(DiscoveryError::MissingKind(format!("{:?}", gvk)).into())
} | identifier_body |
tlcell.rs | use std::any::TypeId;
use std::cell::UnsafeCell;
use std::collections::HashSet;
use std::marker::PhantomData;
use super::Invariant;
std::thread_local! {
static SINGLETON_CHECK: std::cell::RefCell<HashSet<TypeId>> = std::cell::RefCell::new(HashSet::new());
}
struct NotSendOrSync(*const ());
/// Borrowing-owner of zero or more [`TLCell`](struct.TLCell.html)
/// instances.
///
/// See [crate documentation](index.html).
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
pub struct TLCellOwner<Q:'static> {
// Use NotSendOrSync to disable Send and Sync,
not_send_or_sync: PhantomData<NotSendOrSync>,
// Use Invariant<Q> for invariant parameter
typ: PhantomData<Invariant<Q>>,
}
impl<Q:'static> Drop for TLCellOwner<Q> {
fn drop(&mut self) {
SINGLETON_CHECK.with(|set| set.borrow_mut().remove(&TypeId::of::<Q>()));
}
}
impl<Q:'static> Default for TLCellOwner<Q> {
fn default() -> Self {
TLCellOwner::new()
}
}
impl<Q:'static> TLCellOwner<Q> {
/// Create the singleton owner instance. Each owner may be used
/// to create many `TLCell` instances. There may be only one
/// instance of this type per thread at any given time for each
/// different marker type `Q`. This call panics if a second
/// simultaneous instance is created. Since the owner is only
/// valid to use in the thread it is created in, it does not
/// support `Send` or `Sync`.
pub fn new() -> Self {
SINGLETON_CHECK.with(|set| {
assert!(set.borrow_mut().insert(TypeId::of::<Q>()),
"Illegal to create two TLCellOwner instances within the same thread with the same marker type parameter");
});
Self {
not_send_or_sync: PhantomData,
typ: PhantomData,
}
}
/// Create a new cell owned by this owner instance. See also
/// [`TLCell::new`].
///
/// [`TLCell::new`]: struct.TLCell.html
pub fn cell<T>(&self, value: T) -> TLCell<Q, T> {
TLCell::<Q, T>::new(value)
}
/// Borrow contents of a `TLCell` immutably (read-only). Many
/// `TLCell` instances can be borrowed immutably at the same time
/// from the same owner.
#[inline]
pub fn ro<'a, T:?Sized>(&'a self, tc: &'a TLCell<Q, T>) -> &'a T {
unsafe { &*tc.value.get() }
}
/// Borrow contents of a `TLCell` mutably (read-write). Only one
/// `TLCell` at a time can be borrowed from the owner using this
/// call. The returned reference must go out of scope before
/// another can be borrowed.
#[inline]
pub fn rw<'a, T:?Sized>(&'a mut self, tc: &'a TLCell<Q, T>) -> &'a mut T {
unsafe { &mut *tc.value.get() }
}
/// Borrow contents of two `TLCell` instances mutably. Panics if
/// the two `TLCell` instances point to the same memory.
#[inline]
pub fn rw2<'a, T:?Sized, U:?Sized>(
&'a mut self,
tc1: &'a TLCell<Q, T>,
tc2: &'a TLCell<Q, U>,
) -> (&'a mut T, &'a mut U) {
assert!(
tc1 as *const _ as *const () as usize!= tc2 as *const _ as *const () as usize,
"Illegal to borrow same TLCell twice with rw2()"
);
unsafe { (&mut *tc1.value.get(), &mut *tc2.value.get()) }
}
/// Borrow contents of three `TLCell` instances mutably. Panics if
/// any pair of `TLCell` instances point to the same memory.
#[inline]
pub fn rw3<'a, T:?Sized, U:?Sized, V:?Sized>(
&'a mut self,
tc1: &'a TLCell<Q, T>,
tc2: &'a TLCell<Q, U>,
tc3: &'a TLCell<Q, V>,
) -> (&'a mut T, &'a mut U, &'a mut V) {
assert!(
(tc1 as *const _ as *const () as usize!= tc2 as *const _ as *const () as usize)
&& (tc2 as *const _ as *const () as usize!= tc3 as *const _ as *const () as usize)
&& (tc3 as *const _ as *const () as usize!= tc1 as *const _ as *const () as usize),
"Illegal to borrow same TLCell twice with rw3()"
);
unsafe {
(
&mut *tc1.value.get(),
&mut *tc2.value.get(),
&mut *tc3.value.get(),
)
}
}
}
/// Cell whose contents is owned (for borrowing purposes) by a
/// [`TLCellOwner`].
///
/// To borrow from this cell, use the borrowing calls on the
/// [`TLCellOwner`] instance that shares the same marker type. Since
/// there may be another indistinguishable [`TLCellOwner`] in another
/// thread, `Sync` is not supported for this type. However it *is*
/// possible to send the cell to another thread, which then allows its
/// contents to be borrowed using the owner in that thread.
///
/// See also [crate documentation](index.html).
///
/// [`TLCellOwner`]: struct.TLCellOwner.html
#[repr(transparent)]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
pub struct TLCell<Q, T:?Sized> {
// Use Invariant<Q> for invariant parameter
owner: PhantomData<Invariant<Q>>,
// TLCell absolutely cannot be Sync, since otherwise you could send
// two &TLCell's to two different threads, that each have their own
// TLCellOwner<Q> instance and that could therefore both give out
// a &mut T to the same T.
//
// However, it's fine to Send a TLCell to a different thread, because
// you can only send something if nothing borrows it, so nothing can
// be accessing its contents. After sending the TLCell, the original
// TLCellOwner can no longer give access to the TLCell's contents since
// TLCellOwner is!Send +!Sync. Only the TLCellOwner of the new thread
// can give access to this TLCell's contents now.
//
// `UnsafeCell` already disables `Sync` and gives the right `Send` implementation.
value: UnsafeCell<T>,
}
impl<Q, T> TLCell<Q, T> {
/// Create a new `TLCell` owned for borrowing purposes by the
/// `TLCellOwner` derived from the same marker type `Q`.
#[inline]
pub const fn new(value: T) -> TLCell<Q, T> {
TLCell {
owner: PhantomData,
value: UnsafeCell::new(value),
}
}
/// Destroy the cell and return the contained value
///
/// Safety: Since this consumes the cell, there can be no other
/// references to the cell or the data at this point.
#[inline]
pub fn into_inner(self) -> T {
self.value.into_inner()
}
}
impl<Q, T:?Sized> TLCell<Q, T> {
/// Borrow contents of this cell immutably (read-only). Many
/// `TLCell` instances can be borrowed immutably at the same time
/// from the same owner.
#[inline]
pub fn ro<'a>(&'a self, owner: &'a TLCellOwner<Q>) -> &'a T {
owner.ro(self)
}
/// Borrow contents of this cell mutably (read-write). Only one
/// `TLCell` at a time can be borrowed from the owner using this
/// call. The returned reference must go out of scope before
/// another can be borrowed. To mutably borrow from two or three
/// cells at the same time, see [`TLCellOwner::rw2`] or
/// [`TLCellOwner::rw3`].
#[inline]
pub fn rw<'a>(&'a self, owner: &'a mut TLCellOwner<Q>) -> &'a mut T {
owner.rw(self)
}
/// Returns a mutable reference to the underlying data
///
/// Note that this is only useful at the beginning-of-life or
/// end-of-life of the cell when you have exclusive access to it.
/// Normally you'd use [`TLCell::rw`] or [`TLCellOwner::rw`] to
/// get a mutable reference to the contents of the cell.
///
/// Safety: This call borrows `TLCell` mutably which guarantees
/// that we possess the only reference. This means that there can
/// be no active borrows of other forms, even ones obtained using
/// an immutable reference.
#[inline]
pub fn get_mut(&mut self) -> &mut T {
self.value.get_mut()
}
}
impl<Q:'static, T: Default +?Sized> Default for TLCell<Q, T> {
fn default() -> Self {
TLCell::new(T::default())
}
}
#[cfg(test)]
mod tests {
use super::{TLCell, TLCellOwner};
#[test]
#[should_panic]
fn tlcell_singleton_1() {
struct Marker;
let _owner1 = TLCellOwner::<Marker>::new();
let _owner2 = TLCellOwner::<Marker>::new(); // Panic here
}
#[test]
fn tlcell_singleton_2() {
struct Marker;
let owner1 = TLCellOwner::<Marker>::new();
drop(owner1);
let _owner2 = TLCellOwner::<Marker>::new();
}
#[test]
fn tlcell_singleton_3() {
struct Marker1;
struct Marker2;
let _owner1 = TLCellOwner::<Marker1>::new();
let _owner2 = TLCellOwner::<Marker2>::new();
}
#[test]
fn tlcell() {
struct Marker;
type ACellOwner = TLCellOwner<Marker>;
type ACell<T> = TLCell<Marker, T>;
let mut owner = ACellOwner::new();
let c1 = ACell::new(100u32);
let c2 = owner.cell(200u32);
(*owner.rw(&c1)) += 1;
(*owner.rw(&c2)) += 2;
let c1ref = owner.ro(&c1);
let c2ref = owner.ro(&c2);
let total = *c1ref + *c2ref;
assert_eq!(total, 303);
}
#[test]
fn tlcell_threads() {
struct Marker;
type ACellOwner = TLCellOwner<Marker>;
let mut _owner1 = ACellOwner::new();
std::thread::spawn(|| {
let mut _owner2 = ACellOwner::new();
})
.join()
.unwrap();
}
#[test]
fn tlcell_get_mut() {
struct Marker;
type ACellOwner = TLCellOwner<Marker>;
type ACell<T> = TLCell<Marker, T>;
let owner = ACellOwner::new();
let mut cell = ACell::new(100u32);
let mut_ref = cell.get_mut();
*mut_ref = 50;
let cell_ref = owner.ro(&cell);
assert_eq!(*cell_ref, 50);
}
#[test]
fn tlcell_into_inner() {
struct Marker;
type ACell<T> = TLCell<Marker, T>;
let cell = ACell::new(100u32);
assert_eq!(cell.into_inner(), 100);
}
#[test]
fn tlcell_unsized() {
struct Marker;
type ACellOwner = TLCellOwner<Marker>;
type ACell<T> = TLCell<Marker, T>;
let mut owner = ACellOwner::new();
struct Squares(u32);
struct Integers(u64);
trait Series {
fn step(&mut self);
fn value(&self) -> u64;
}
impl Series for Squares {
fn step(&mut self) {
self.0 += 1;
}
fn value(&self) -> u64 {
(self.0 as u64) * (self.0 as u64)
}
}
impl Series for Integers {
fn step(&mut self) {
self.0 += 1;
}
fn value(&self) -> u64 {
self.0
}
}
fn series(init: u32, is_squares: bool) -> Box<ACell<dyn Series>> {
if is_squares | else {
Box::new(ACell::new(Integers(init as u64)))
}
}
let own = &mut owner;
let cell1 = series(4, false);
let cell2 = series(7, true);
let cell3 = series(3, true);
assert_eq!(cell1.ro(own).value(), 4);
cell1.rw(own).step();
assert_eq!(cell1.ro(own).value(), 5);
assert_eq!(own.ro(&cell2).value(), 49);
own.rw(&cell2).step();
assert_eq!(own.ro(&cell2).value(), 64);
let (r1, r2, r3) = own.rw3(&cell1, &cell2, &cell3);
r1.step();
r2.step();
r3.step();
assert_eq!(cell1.ro(own).value(), 6);
assert_eq!(cell2.ro(own).value(), 81);
assert_eq!(cell3.ro(own).value(), 16);
let (r1, r2) = own.rw2(&cell1, &cell2);
r1.step();
r2.step();
assert_eq!(cell1.ro(own).value(), 7);
assert_eq!(cell2.ro(own).value(), 100);
}
}
| {
Box::new(ACell::new(Squares(init)))
} | conditional_block |
tlcell.rs | use std::any::TypeId;
use std::cell::UnsafeCell;
use std::collections::HashSet;
use std::marker::PhantomData;
use super::Invariant;
std::thread_local! {
static SINGLETON_CHECK: std::cell::RefCell<HashSet<TypeId>> = std::cell::RefCell::new(HashSet::new());
}
struct NotSendOrSync(*const ());
/// Borrowing-owner of zero or more [`TLCell`](struct.TLCell.html)
/// instances.
///
/// See [crate documentation](index.html).
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
pub struct TLCellOwner<Q:'static> {
// Use NotSendOrSync to disable Send and Sync,
not_send_or_sync: PhantomData<NotSendOrSync>,
// Use Invariant<Q> for invariant parameter
typ: PhantomData<Invariant<Q>>,
}
impl<Q:'static> Drop for TLCellOwner<Q> {
fn drop(&mut self) {
SINGLETON_CHECK.with(|set| set.borrow_mut().remove(&TypeId::of::<Q>()));
}
}
impl<Q:'static> Default for TLCellOwner<Q> {
fn default() -> Self {
TLCellOwner::new()
}
}
impl<Q:'static> TLCellOwner<Q> {
/// Create the singleton owner instance. Each owner may be used
/// to create many `TLCell` instances. There may be only one
/// instance of this type per thread at any given time for each
/// different marker type `Q`. This call panics if a second
/// simultaneous instance is created. Since the owner is only
/// valid to use in the thread it is created in, it does not
/// support `Send` or `Sync`.
pub fn new() -> Self {
SINGLETON_CHECK.with(|set| {
assert!(set.borrow_mut().insert(TypeId::of::<Q>()),
"Illegal to create two TLCellOwner instances within the same thread with the same marker type parameter");
});
Self {
not_send_or_sync: PhantomData,
typ: PhantomData,
}
}
/// Create a new cell owned by this owner instance. See also
/// [`TLCell::new`].
///
/// [`TLCell::new`]: struct.TLCell.html
pub fn cell<T>(&self, value: T) -> TLCell<Q, T> {
TLCell::<Q, T>::new(value)
}
/// Borrow contents of a `TLCell` immutably (read-only). Many
/// `TLCell` instances can be borrowed immutably at the same time
/// from the same owner.
#[inline]
pub fn ro<'a, T:?Sized>(&'a self, tc: &'a TLCell<Q, T>) -> &'a T {
unsafe { &*tc.value.get() }
}
/// Borrow contents of a `TLCell` mutably (read-write). Only one
/// `TLCell` at a time can be borrowed from the owner using this
/// call. The returned reference must go out of scope before
/// another can be borrowed.
#[inline]
pub fn rw<'a, T:?Sized>(&'a mut self, tc: &'a TLCell<Q, T>) -> &'a mut T {
unsafe { &mut *tc.value.get() }
}
/// Borrow contents of two `TLCell` instances mutably. Panics if
/// the two `TLCell` instances point to the same memory.
#[inline]
pub fn rw2<'a, T:?Sized, U:?Sized>(
&'a mut self,
tc1: &'a TLCell<Q, T>,
tc2: &'a TLCell<Q, U>,
) -> (&'a mut T, &'a mut U) {
assert!(
tc1 as *const _ as *const () as usize!= tc2 as *const _ as *const () as usize,
"Illegal to borrow same TLCell twice with rw2()"
);
unsafe { (&mut *tc1.value.get(), &mut *tc2.value.get()) }
}
/// Borrow contents of three `TLCell` instances mutably. Panics if
/// any pair of `TLCell` instances point to the same memory.
#[inline]
pub fn rw3<'a, T:?Sized, U:?Sized, V:?Sized>(
&'a mut self,
tc1: &'a TLCell<Q, T>,
tc2: &'a TLCell<Q, U>,
tc3: &'a TLCell<Q, V>,
) -> (&'a mut T, &'a mut U, &'a mut V) {
assert!(
(tc1 as *const _ as *const () as usize!= tc2 as *const _ as *const () as usize)
&& (tc2 as *const _ as *const () as usize!= tc3 as *const _ as *const () as usize)
&& (tc3 as *const _ as *const () as usize!= tc1 as *const _ as *const () as usize),
"Illegal to borrow same TLCell twice with rw3()"
);
unsafe {
(
&mut *tc1.value.get(),
&mut *tc2.value.get(),
&mut *tc3.value.get(),
)
}
}
}
/// Cell whose contents is owned (for borrowing purposes) by a | /// thread, `Sync` is not supported for this type. However it *is*
/// possible to send the cell to another thread, which then allows its
/// contents to be borrowed using the owner in that thread.
///
/// See also [crate documentation](index.html).
///
/// [`TLCellOwner`]: struct.TLCellOwner.html
#[repr(transparent)]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
pub struct TLCell<Q, T:?Sized> {
// Use Invariant<Q> for invariant parameter
owner: PhantomData<Invariant<Q>>,
// TLCell absolutely cannot be Sync, since otherwise you could send
// two &TLCell's to two different threads, that each have their own
// TLCellOwner<Q> instance and that could therefore both give out
// a &mut T to the same T.
//
// However, it's fine to Send a TLCell to a different thread, because
// you can only send something if nothing borrows it, so nothing can
// be accessing its contents. After sending the TLCell, the original
// TLCellOwner can no longer give access to the TLCell's contents since
// TLCellOwner is!Send +!Sync. Only the TLCellOwner of the new thread
// can give access to this TLCell's contents now.
//
// `UnsafeCell` already disables `Sync` and gives the right `Send` implementation.
value: UnsafeCell<T>,
}
impl<Q, T> TLCell<Q, T> {
/// Create a new `TLCell` owned for borrowing purposes by the
/// `TLCellOwner` derived from the same marker type `Q`.
#[inline]
pub const fn new(value: T) -> TLCell<Q, T> {
TLCell {
owner: PhantomData,
value: UnsafeCell::new(value),
}
}
/// Destroy the cell and return the contained value
///
/// Safety: Since this consumes the cell, there can be no other
/// references to the cell or the data at this point.
#[inline]
pub fn into_inner(self) -> T {
self.value.into_inner()
}
}
impl<Q, T:?Sized> TLCell<Q, T> {
/// Borrow contents of this cell immutably (read-only). Many
/// `TLCell` instances can be borrowed immutably at the same time
/// from the same owner.
#[inline]
pub fn ro<'a>(&'a self, owner: &'a TLCellOwner<Q>) -> &'a T {
owner.ro(self)
}
/// Borrow contents of this cell mutably (read-write). Only one
/// `TLCell` at a time can be borrowed from the owner using this
/// call. The returned reference must go out of scope before
/// another can be borrowed. To mutably borrow from two or three
/// cells at the same time, see [`TLCellOwner::rw2`] or
/// [`TLCellOwner::rw3`].
#[inline]
pub fn rw<'a>(&'a self, owner: &'a mut TLCellOwner<Q>) -> &'a mut T {
owner.rw(self)
}
/// Returns a mutable reference to the underlying data
///
/// Note that this is only useful at the beginning-of-life or
/// end-of-life of the cell when you have exclusive access to it.
/// Normally you'd use [`TLCell::rw`] or [`TLCellOwner::rw`] to
/// get a mutable reference to the contents of the cell.
///
/// Safety: This call borrows `TLCell` mutably which guarantees
/// that we possess the only reference. This means that there can
/// be no active borrows of other forms, even ones obtained using
/// an immutable reference.
#[inline]
pub fn get_mut(&mut self) -> &mut T {
self.value.get_mut()
}
}
impl<Q:'static, T: Default +?Sized> Default for TLCell<Q, T> {
fn default() -> Self {
TLCell::new(T::default())
}
}
#[cfg(test)]
mod tests {
use super::{TLCell, TLCellOwner};
#[test]
#[should_panic]
fn tlcell_singleton_1() {
struct Marker;
let _owner1 = TLCellOwner::<Marker>::new();
let _owner2 = TLCellOwner::<Marker>::new(); // Panic here
}
#[test]
fn tlcell_singleton_2() {
struct Marker;
let owner1 = TLCellOwner::<Marker>::new();
drop(owner1);
let _owner2 = TLCellOwner::<Marker>::new();
}
#[test]
fn tlcell_singleton_3() {
struct Marker1;
struct Marker2;
let _owner1 = TLCellOwner::<Marker1>::new();
let _owner2 = TLCellOwner::<Marker2>::new();
}
#[test]
fn tlcell() {
struct Marker;
type ACellOwner = TLCellOwner<Marker>;
type ACell<T> = TLCell<Marker, T>;
let mut owner = ACellOwner::new();
let c1 = ACell::new(100u32);
let c2 = owner.cell(200u32);
(*owner.rw(&c1)) += 1;
(*owner.rw(&c2)) += 2;
let c1ref = owner.ro(&c1);
let c2ref = owner.ro(&c2);
let total = *c1ref + *c2ref;
assert_eq!(total, 303);
}
#[test]
fn tlcell_threads() {
struct Marker;
type ACellOwner = TLCellOwner<Marker>;
let mut _owner1 = ACellOwner::new();
std::thread::spawn(|| {
let mut _owner2 = ACellOwner::new();
})
.join()
.unwrap();
}
#[test]
fn tlcell_get_mut() {
struct Marker;
type ACellOwner = TLCellOwner<Marker>;
type ACell<T> = TLCell<Marker, T>;
let owner = ACellOwner::new();
let mut cell = ACell::new(100u32);
let mut_ref = cell.get_mut();
*mut_ref = 50;
let cell_ref = owner.ro(&cell);
assert_eq!(*cell_ref, 50);
}
#[test]
fn tlcell_into_inner() {
struct Marker;
type ACell<T> = TLCell<Marker, T>;
let cell = ACell::new(100u32);
assert_eq!(cell.into_inner(), 100);
}
#[test]
fn tlcell_unsized() {
struct Marker;
type ACellOwner = TLCellOwner<Marker>;
type ACell<T> = TLCell<Marker, T>;
let mut owner = ACellOwner::new();
struct Squares(u32);
struct Integers(u64);
trait Series {
fn step(&mut self);
fn value(&self) -> u64;
}
impl Series for Squares {
fn step(&mut self) {
self.0 += 1;
}
fn value(&self) -> u64 {
(self.0 as u64) * (self.0 as u64)
}
}
impl Series for Integers {
fn step(&mut self) {
self.0 += 1;
}
fn value(&self) -> u64 {
self.0
}
}
fn series(init: u32, is_squares: bool) -> Box<ACell<dyn Series>> {
if is_squares {
Box::new(ACell::new(Squares(init)))
} else {
Box::new(ACell::new(Integers(init as u64)))
}
}
let own = &mut owner;
let cell1 = series(4, false);
let cell2 = series(7, true);
let cell3 = series(3, true);
assert_eq!(cell1.ro(own).value(), 4);
cell1.rw(own).step();
assert_eq!(cell1.ro(own).value(), 5);
assert_eq!(own.ro(&cell2).value(), 49);
own.rw(&cell2).step();
assert_eq!(own.ro(&cell2).value(), 64);
let (r1, r2, r3) = own.rw3(&cell1, &cell2, &cell3);
r1.step();
r2.step();
r3.step();
assert_eq!(cell1.ro(own).value(), 6);
assert_eq!(cell2.ro(own).value(), 81);
assert_eq!(cell3.ro(own).value(), 16);
let (r1, r2) = own.rw2(&cell1, &cell2);
r1.step();
r2.step();
assert_eq!(cell1.ro(own).value(), 7);
assert_eq!(cell2.ro(own).value(), 100);
}
} | /// [`TLCellOwner`].
///
/// To borrow from this cell, use the borrowing calls on the
/// [`TLCellOwner`] instance that shares the same marker type. Since
/// there may be another indistinguishable [`TLCellOwner`] in another | random_line_split |
tlcell.rs | use std::any::TypeId;
use std::cell::UnsafeCell;
use std::collections::HashSet;
use std::marker::PhantomData;
use super::Invariant;
std::thread_local! {
static SINGLETON_CHECK: std::cell::RefCell<HashSet<TypeId>> = std::cell::RefCell::new(HashSet::new());
}
struct | (*const ());
/// Borrowing-owner of zero or more [`TLCell`](struct.TLCell.html)
/// instances.
///
/// See [crate documentation](index.html).
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
pub struct TLCellOwner<Q:'static> {
// Use NotSendOrSync to disable Send and Sync,
not_send_or_sync: PhantomData<NotSendOrSync>,
// Use Invariant<Q> for invariant parameter
typ: PhantomData<Invariant<Q>>,
}
impl<Q:'static> Drop for TLCellOwner<Q> {
fn drop(&mut self) {
SINGLETON_CHECK.with(|set| set.borrow_mut().remove(&TypeId::of::<Q>()));
}
}
impl<Q:'static> Default for TLCellOwner<Q> {
fn default() -> Self {
TLCellOwner::new()
}
}
impl<Q:'static> TLCellOwner<Q> {
/// Create the singleton owner instance. Each owner may be used
/// to create many `TLCell` instances. There may be only one
/// instance of this type per thread at any given time for each
/// different marker type `Q`. This call panics if a second
/// simultaneous instance is created. Since the owner is only
/// valid to use in the thread it is created in, it does not
/// support `Send` or `Sync`.
pub fn new() -> Self {
SINGLETON_CHECK.with(|set| {
assert!(set.borrow_mut().insert(TypeId::of::<Q>()),
"Illegal to create two TLCellOwner instances within the same thread with the same marker type parameter");
});
Self {
not_send_or_sync: PhantomData,
typ: PhantomData,
}
}
/// Create a new cell owned by this owner instance. See also
/// [`TLCell::new`].
///
/// [`TLCell::new`]: struct.TLCell.html
pub fn cell<T>(&self, value: T) -> TLCell<Q, T> {
TLCell::<Q, T>::new(value)
}
/// Borrow contents of a `TLCell` immutably (read-only). Many
/// `TLCell` instances can be borrowed immutably at the same time
/// from the same owner.
#[inline]
pub fn ro<'a, T:?Sized>(&'a self, tc: &'a TLCell<Q, T>) -> &'a T {
unsafe { &*tc.value.get() }
}
/// Borrow contents of a `TLCell` mutably (read-write). Only one
/// `TLCell` at a time can be borrowed from the owner using this
/// call. The returned reference must go out of scope before
/// another can be borrowed.
#[inline]
pub fn rw<'a, T:?Sized>(&'a mut self, tc: &'a TLCell<Q, T>) -> &'a mut T {
unsafe { &mut *tc.value.get() }
}
/// Borrow contents of two `TLCell` instances mutably. Panics if
/// the two `TLCell` instances point to the same memory.
#[inline]
pub fn rw2<'a, T:?Sized, U:?Sized>(
&'a mut self,
tc1: &'a TLCell<Q, T>,
tc2: &'a TLCell<Q, U>,
) -> (&'a mut T, &'a mut U) {
assert!(
tc1 as *const _ as *const () as usize!= tc2 as *const _ as *const () as usize,
"Illegal to borrow same TLCell twice with rw2()"
);
unsafe { (&mut *tc1.value.get(), &mut *tc2.value.get()) }
}
/// Borrow contents of three `TLCell` instances mutably. Panics if
/// any pair of `TLCell` instances point to the same memory.
#[inline]
pub fn rw3<'a, T:?Sized, U:?Sized, V:?Sized>(
&'a mut self,
tc1: &'a TLCell<Q, T>,
tc2: &'a TLCell<Q, U>,
tc3: &'a TLCell<Q, V>,
) -> (&'a mut T, &'a mut U, &'a mut V) {
assert!(
(tc1 as *const _ as *const () as usize!= tc2 as *const _ as *const () as usize)
&& (tc2 as *const _ as *const () as usize!= tc3 as *const _ as *const () as usize)
&& (tc3 as *const _ as *const () as usize!= tc1 as *const _ as *const () as usize),
"Illegal to borrow same TLCell twice with rw3()"
);
unsafe {
(
&mut *tc1.value.get(),
&mut *tc2.value.get(),
&mut *tc3.value.get(),
)
}
}
}
/// Cell whose contents is owned (for borrowing purposes) by a
/// [`TLCellOwner`].
///
/// To borrow from this cell, use the borrowing calls on the
/// [`TLCellOwner`] instance that shares the same marker type. Since
/// there may be another indistinguishable [`TLCellOwner`] in another
/// thread, `Sync` is not supported for this type. However it *is*
/// possible to send the cell to another thread, which then allows its
/// contents to be borrowed using the owner in that thread.
///
/// See also [crate documentation](index.html).
///
/// [`TLCellOwner`]: struct.TLCellOwner.html
#[repr(transparent)]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
pub struct TLCell<Q, T:?Sized> {
// Use Invariant<Q> for invariant parameter
owner: PhantomData<Invariant<Q>>,
// TLCell absolutely cannot be Sync, since otherwise you could send
// two &TLCell's to two different threads, that each have their own
// TLCellOwner<Q> instance and that could therefore both give out
// a &mut T to the same T.
//
// However, it's fine to Send a TLCell to a different thread, because
// you can only send something if nothing borrows it, so nothing can
// be accessing its contents. After sending the TLCell, the original
// TLCellOwner can no longer give access to the TLCell's contents since
// TLCellOwner is!Send +!Sync. Only the TLCellOwner of the new thread
// can give access to this TLCell's contents now.
//
// `UnsafeCell` already disables `Sync` and gives the right `Send` implementation.
value: UnsafeCell<T>,
}
impl<Q, T> TLCell<Q, T> {
/// Create a new `TLCell` owned for borrowing purposes by the
/// `TLCellOwner` derived from the same marker type `Q`.
#[inline]
pub const fn new(value: T) -> TLCell<Q, T> {
TLCell {
owner: PhantomData,
value: UnsafeCell::new(value),
}
}
/// Destroy the cell and return the contained value
///
/// Safety: Since this consumes the cell, there can be no other
/// references to the cell or the data at this point.
#[inline]
pub fn into_inner(self) -> T {
self.value.into_inner()
}
}
impl<Q, T:?Sized> TLCell<Q, T> {
/// Borrow contents of this cell immutably (read-only). Many
/// `TLCell` instances can be borrowed immutably at the same time
/// from the same owner.
#[inline]
pub fn ro<'a>(&'a self, owner: &'a TLCellOwner<Q>) -> &'a T {
owner.ro(self)
}
/// Borrow contents of this cell mutably (read-write). Only one
/// `TLCell` at a time can be borrowed from the owner using this
/// call. The returned reference must go out of scope before
/// another can be borrowed. To mutably borrow from two or three
/// cells at the same time, see [`TLCellOwner::rw2`] or
/// [`TLCellOwner::rw3`].
#[inline]
pub fn rw<'a>(&'a self, owner: &'a mut TLCellOwner<Q>) -> &'a mut T {
owner.rw(self)
}
/// Returns a mutable reference to the underlying data
///
/// Note that this is only useful at the beginning-of-life or
/// end-of-life of the cell when you have exclusive access to it.
/// Normally you'd use [`TLCell::rw`] or [`TLCellOwner::rw`] to
/// get a mutable reference to the contents of the cell.
///
/// Safety: This call borrows `TLCell` mutably which guarantees
/// that we possess the only reference. This means that there can
/// be no active borrows of other forms, even ones obtained using
/// an immutable reference.
#[inline]
pub fn get_mut(&mut self) -> &mut T {
self.value.get_mut()
}
}
impl<Q:'static, T: Default +?Sized> Default for TLCell<Q, T> {
fn default() -> Self {
TLCell::new(T::default())
}
}
#[cfg(test)]
mod tests {
use super::{TLCell, TLCellOwner};
#[test]
#[should_panic]
fn tlcell_singleton_1() {
struct Marker;
let _owner1 = TLCellOwner::<Marker>::new();
let _owner2 = TLCellOwner::<Marker>::new(); // Panic here
}
#[test]
fn tlcell_singleton_2() {
struct Marker;
let owner1 = TLCellOwner::<Marker>::new();
drop(owner1);
let _owner2 = TLCellOwner::<Marker>::new();
}
#[test]
fn tlcell_singleton_3() {
struct Marker1;
struct Marker2;
let _owner1 = TLCellOwner::<Marker1>::new();
let _owner2 = TLCellOwner::<Marker2>::new();
}
#[test]
fn tlcell() {
struct Marker;
type ACellOwner = TLCellOwner<Marker>;
type ACell<T> = TLCell<Marker, T>;
let mut owner = ACellOwner::new();
let c1 = ACell::new(100u32);
let c2 = owner.cell(200u32);
(*owner.rw(&c1)) += 1;
(*owner.rw(&c2)) += 2;
let c1ref = owner.ro(&c1);
let c2ref = owner.ro(&c2);
let total = *c1ref + *c2ref;
assert_eq!(total, 303);
}
#[test]
fn tlcell_threads() {
struct Marker;
type ACellOwner = TLCellOwner<Marker>;
let mut _owner1 = ACellOwner::new();
std::thread::spawn(|| {
let mut _owner2 = ACellOwner::new();
})
.join()
.unwrap();
}
#[test]
fn tlcell_get_mut() {
struct Marker;
type ACellOwner = TLCellOwner<Marker>;
type ACell<T> = TLCell<Marker, T>;
let owner = ACellOwner::new();
let mut cell = ACell::new(100u32);
let mut_ref = cell.get_mut();
*mut_ref = 50;
let cell_ref = owner.ro(&cell);
assert_eq!(*cell_ref, 50);
}
#[test]
fn tlcell_into_inner() {
struct Marker;
type ACell<T> = TLCell<Marker, T>;
let cell = ACell::new(100u32);
assert_eq!(cell.into_inner(), 100);
}
#[test]
fn tlcell_unsized() {
struct Marker;
type ACellOwner = TLCellOwner<Marker>;
type ACell<T> = TLCell<Marker, T>;
let mut owner = ACellOwner::new();
struct Squares(u32);
struct Integers(u64);
trait Series {
fn step(&mut self);
fn value(&self) -> u64;
}
impl Series for Squares {
fn step(&mut self) {
self.0 += 1;
}
fn value(&self) -> u64 {
(self.0 as u64) * (self.0 as u64)
}
}
impl Series for Integers {
fn step(&mut self) {
self.0 += 1;
}
fn value(&self) -> u64 {
self.0
}
}
fn series(init: u32, is_squares: bool) -> Box<ACell<dyn Series>> {
if is_squares {
Box::new(ACell::new(Squares(init)))
} else {
Box::new(ACell::new(Integers(init as u64)))
}
}
let own = &mut owner;
let cell1 = series(4, false);
let cell2 = series(7, true);
let cell3 = series(3, true);
assert_eq!(cell1.ro(own).value(), 4);
cell1.rw(own).step();
assert_eq!(cell1.ro(own).value(), 5);
assert_eq!(own.ro(&cell2).value(), 49);
own.rw(&cell2).step();
assert_eq!(own.ro(&cell2).value(), 64);
let (r1, r2, r3) = own.rw3(&cell1, &cell2, &cell3);
r1.step();
r2.step();
r3.step();
assert_eq!(cell1.ro(own).value(), 6);
assert_eq!(cell2.ro(own).value(), 81);
assert_eq!(cell3.ro(own).value(), 16);
let (r1, r2) = own.rw2(&cell1, &cell2);
r1.step();
r2.step();
assert_eq!(cell1.ro(own).value(), 7);
assert_eq!(cell2.ro(own).value(), 100);
}
}
| NotSendOrSync | identifier_name |
mod.rs | // Copyright (c) The XPeer Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! In a leader based consensus algorithm, each participant maintains a block tree that looks like
//! the following:
//! ```text
//! Height 5 6 7 ...
//!
//! Committed -> B5 -> B6 -> B7
//! |
//! └--> B5' -> B6' -> B7'
//! |
//! └----> B7"
//! ```
//! This module implements `BlockTree` that is an in-memory representation of this tree.
#[cfg(test)]
mod block_tree_test;
use crypto::HashValue;
use failure::bail_err;
use std::collections::{hash_map, HashMap, HashSet};
/// Each block has a unique identifier that is a `HashValue` computed by consensus. It has exactly
/// one parent and zero or more children.
pub trait Block: std::fmt::Debug {
/// The output of executing this block.
type Output;
/// The signatures on this block.
type Signature;
/// Whether consensus has decided to commit this block. This kind of blocks are expected to be
/// sent to storage very soon, unless execution is lagging behind.
fn is_committed(&self) -> bool;
/// Marks this block as committed.
fn set_committed(&mut self);
/// Whether this block has finished execution.
fn is_executed(&self) -> bool;
/// Sets the output of this block.
fn set_output(&mut self, output: Self::Output);
/// Sets the signatures for this block.
fn set_signature(&mut self, signature: Self::Signature);
/// The id of this block.
fn id(&self) -> HashValue;
/// The id of the parent block.
fn parent_id(&self) -> HashValue;
/// Adds a block as its child.
fn add_child(&mut self, child_id: HashValue);
/// The list of children of this block.
fn children(&self) -> &HashSet<HashValue>;
}
/// The `BlockTree` implementation.
#[derive(Debug)]
pub struct BlockTree<B> {
/// A map that keeps track of all existing blocks by their ids.
id_to_block: HashMap<HashValue, B>,
/// The blocks at the lowest height in the map. B5 and B5' in the following example.
/// ```text
/// Committed(B0..4) -> B5 -> B6 -> B7
/// |
/// └--> B5' -> B6' -> B7'
/// |
/// └----> B7"
/// ```
heads: HashSet<HashValue>,
/// Id of the last committed block. B4 in the above example.
last_committed_id: HashValue,
}
impl<B> BlockTree<B>
where
B: Block,
{
/// Constructs a new `BlockTree`.
pub fn new(last_committed_id: HashValue) -> Self {
BlockTree {
id_to_block: HashMap::new(),
heads: HashSet::new(),
last_committed_id,
}
}
/// Adds a new block to the tree.
pub fn add_block(&mut self, block: B) -> Result<(), AddBlockError<B>> {
| id,
);
}
hash_map::Entry::Vacant(_) => bail_err!(AddBlockError::ParentNotFound { block }),
}
Ok(())
}
//
/ Returns a reference to a specific block, if it exists in the tree.
pub fn get_block(&self, id: HashValue) -> Option<&B> {
self.id_to_block.get(&id)
}
/// Returns a mutable reference to a specific block, if it exists in the tree.
pub fn get_block_mut(&mut self, id: HashValue) -> Option<&mut B> {
self.id_to_block.get_mut(&id)
}
/// Returns id of a block that is ready to be sent to VM for execution (its parent has finished
/// execution), if such block exists in the tree.
pub fn get_block_to_execute(&mut self) -> Option<HashValue> {
let mut to_visit: Vec<HashValue> = self.heads.iter().cloned().collect();
while let Some(id) = to_visit.pop() {
let block = self
.id_to_block
.get(&id)
.expect("Missing block in id_to_block.");
if!block.is_executed() {
return Some(id);
}
to_visit.extend(block.children().iter().cloned());
}
None
}
/// Marks given block and all its uncommitted ancestors as committed. This does not cause these
/// blocks to be sent to storage immediately.
pub fn mark_as_committed(
&mut self,
id: HashValue,
signature: B::Signature,
) -> Result<(), CommitBlockError> {
// First put the signatures in the block. Note that if this causes multiple blocks to be
// marked as committed, only the last one will have the signatures.
match self.id_to_block.get_mut(&id) {
Some(block) => {
if block.is_committed() {
bail_err!(CommitBlockError::BlockAlreadyMarkedAsCommitted { id });
} else {
block.set_signature(signature);
}
}
None => bail_err!(CommitBlockError::BlockNotFound { id }),
}
// Mark the current block as committed. Go to parent block and repeat until a committed
// block is found, or no more blocks.
let mut current_id = id;
while let Some(block) = self.id_to_block.get_mut(¤t_id) {
if block.is_committed() {
break;
}
block.set_committed();
current_id = block.parent_id();
}
Ok(())
}
/// Removes all blocks in the tree that conflict with committed blocks. Returns a list of
/// blocks that are ready to be sent to storage (all the committed blocks that have been
/// executed).
pub fn prune(&mut self) -> Vec<B> {
let mut blocks_to_store = vec![];
// First find if there is a committed block in current heads. Since these blocks are at the
// same height, at most one of them can be committed. If all of them are pending we have
// nothing to do here. Otherwise, one of the branches is committed. Throw away the rest of
// them and advance to the next height.
let mut current_heads = self.heads.clone();
while let Some(committed_head) = self.get_committed_head(¤t_heads) {
assert!(
current_heads.remove(&committed_head),
"committed_head should exist.",
);
for id in current_heads {
self.remove_branch(id);
}
match self.id_to_block.entry(committed_head) {
hash_map::Entry::Occupied(entry) => {
current_heads = entry.get().children().clone();
let current_id = *entry.key();
let parent_id = entry.get().parent_id();
if entry.get().is_executed() {
// If this block has been executed, all its proper ancestors must have
// finished execution and present in `blocks_to_store`.
self.heads = current_heads.clone();
self.last_committed_id = current_id;
blocks_to_store.push(entry.remove());
} else {
// The current block has not finished execution. If the parent block does
// not exist in the map, that means parent block (also committed) has been
// executed and removed. Otherwise self.heads does not need to be changed.
if!self.id_to_block.contains_key(&parent_id) {
self.heads = HashSet::new();
self.heads.insert(current_id);
}
}
}
hash_map::Entry::Vacant(_) => unreachable!("committed_head_id should exist."),
}
}
blocks_to_store
}
/// Given a list of heads, returns the committed one if it exists.
fn get_committed_head(&self, heads: &HashSet<HashValue>) -> Option<HashValue> {
let mut committed_head = None;
for head in heads {
let block = self
.id_to_block
.get(head)
.expect("Head should exist in id_to_block.");
if block.is_committed() {
assert!(
committed_head.is_none(),
"Conflicting blocks are both committed.",
);
committed_head = Some(*head);
}
}
committed_head
}
/// Removes a branch at block `head`.
fn remove_branch(&mut self, head: HashValue) {
let mut remaining = vec![head];
while let Some(current_block_id) = remaining.pop() {
let block = self
.id_to_block
.remove(¤t_block_id)
.unwrap_or_else(|| {
panic!(
"Trying to remove a non-existing block {:x}.",
current_block_id,
)
});
assert!(
!block.is_committed(),
"Trying to remove a committed block {:x}.",
current_block_id,
);
remaining.extend(block.children().iter());
}
}
/// Removes the entire subtree at block `id`.
pub fn remove_subtree(&mut self, id: HashValue) {
self.heads.remove(&id);
self.remove_branch(id);
}
/// Resets the block tree with a new `last_committed_id`. This removes all the in-memory
/// blocks.
pub fn reset(&mut self, last_committed_id: HashValue) {
let mut new_block_tree = BlockTree::new(last_committed_id);
std::mem::swap(self, &mut new_block_tree);
}
}
/// An error returned by `add_block`. The error contains the block being added so the caller does
/// not lose it.
#[derive(Debug, Eq, PartialEq)]
pub enum AddBlockError<B: Block> {
ParentNotFound { block: B },
BlockAlreadyExists { block: B },
}
impl<B> AddBlockError<B>
where
B: Block,
{
pub fn into_block(self) -> B {
match self {
AddBlockError::ParentNotFound { block } => block,
AddBlockError::BlockAlreadyExists { block } => block,
}
}
}
impl<B> std::fmt::Display for AddBlockError<B>
where
B: Block,
{
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
AddBlockError::ParentNotFound { block } => {
write!(f, "Parent block {:x} was not found.", block.parent_id())
}
AddBlockError::BlockAlreadyExists { block } => {
write!(f, "Block {:x} already exists.", block.id())
}
}
}
}
/// An error returned by `mark_as_committed`. The error contains id of the block the caller wants
/// to commit.
#[derive(Debug, Eq, PartialEq)]
pub enum CommitBlockError {
BlockNotFound { id: HashValue },
BlockAlreadyMarkedAsCommitted { id: HashValue },
}
impl std::fmt::Display for CommitBlockError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
CommitBlockError::BlockNotFound { id } => write!(f, "Block {:x} was not found.", id),
CommitBlockError::BlockAlreadyMarkedAsCommitted { id } => {
write!(f, "Block {:x} was already marked as committed.", id)
}
}
}
}
| assert!(!self.id_to_block.contains_key(&self.last_committed_id));
let id = block.id();
if self.id_to_block.contains_key(&id) {
bail_err!(AddBlockError::BlockAlreadyExists { block });
}
let parent_id = block.parent_id();
if parent_id == self.last_committed_id {
assert!(self.heads.insert(id), "Block already existed in heads.");
self.id_to_block.insert(id, block);
return Ok(());
}
match self.id_to_block.entry(parent_id) {
hash_map::Entry::Occupied(mut entry) => {
entry.get_mut().add_child(id);
assert!(
self.id_to_block.insert(id, block).is_none(),
"Block {:x} already existed.", | identifier_body |
mod.rs | // Copyright (c) The XPeer Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! In a leader based consensus algorithm, each participant maintains a block tree that looks like
//! the following:
//! ```text
//! Height 5 6 7 ...
//!
//! Committed -> B5 -> B6 -> B7
//! |
//! └--> B5' -> B6' -> B7'
//! |
//! └----> B7"
//! ```
//! This module implements `BlockTree` that is an in-memory representation of this tree.
#[cfg(test)]
mod block_tree_test;
use crypto::HashValue;
use failure::bail_err;
use std::collections::{hash_map, HashMap, HashSet};
/// Each block has a unique identifier that is a `HashValue` computed by consensus. It has exactly
/// one parent and zero or more children.
pub trait Block: std::fmt::Debug {
/// The output of executing this block.
type Output;
/// The signatures on this block.
type Signature;
/// Whether consensus has decided to commit this block. This kind of blocks are expected to be
/// sent to storage very soon, unless execution is lagging behind.
fn is_committed(&self) -> bool;
/// Marks this block as committed.
fn set_committed(&mut self);
/// Whether this block has finished execution.
fn is_executed(&self) -> bool;
/// Sets the output of this block.
fn set_output(&mut self, output: Self::Output);
/// Sets the signatures for this block.
fn set_signature(&mut self, signature: Self::Signature);
/// The id of this block.
fn id(&self) -> HashValue;
/// The id of the parent block.
fn parent_id(&self) -> HashValue;
/// Adds a block as its child.
fn add_child(&mut self, child_id: HashValue);
/// The list of children of this block.
fn children(&self) -> &HashSet<HashValue>;
}
/// The `BlockTree` implementation.
#[derive(Debug)]
pub struct BlockTree<B> {
/// A map that keeps track of all existing blocks by their ids.
id_to_block: HashMap<HashValue, B>,
/// The blocks at the lowest height in the map. B5 and B5' in the following example.
/// ```text
/// Committed(B0..4) -> B5 -> B6 -> B7
/// |
/// └--> B5' -> B6' -> B7'
/// |
/// └----> B7"
/// ```
heads: HashSet<HashValue>,
/// Id of the last committed block. B4 in the above example.
last_committed_id: HashValue,
}
impl<B> BlockTree<B>
where
B: Block,
{
/// Constructs a new `BlockTree`.
pub fn new(last_committed_id: HashValue) -> Self {
BlockTree {
id_to_block: HashMap::new(),
heads: HashSet::new(),
last_committed_id,
}
}
/// Adds a new block to the tree.
pub fn add_block(&mut self, block: B) -> Result<(), AddBlockError<B>> {
assert!(!self.id_to_block.contains_key(&self.last_committed_id));
let id = block.id();
if self.id_to_block.contains_key(&id) {
bail_err!(AddBlockError::BlockAlreadyExists { block });
}
let parent_id = block.parent_id();
if parent_id == self.last_committed_id {
assert!(self.heads.insert(id), "Block already existed in heads.");
self.id_to_block.insert(id, block);
return Ok(());
}
match self.id_to_block.entry(parent_id) {
hash_map::Entry::Occupied(mut entry) => {
entry.get_mut().add_child(id);
assert!(
self.id_to_block.insert(id, block).is_none(),
"Block {:x} already existed.",
id,
);
}
hash_map::Entry::Vacant(_) => bail_err!(AddBlockError::ParentNotFound { block }),
}
Ok(())
}
/// Returns a reference to a specific block, if it exists in the tree.
pub fn get_block(&self, id: HashValue) -> Option<&B> {
self.id_to_block.get(&id)
}
/// Returns a mutable reference to a specific block, if it exists in the tree.
pub fn get_block_mut(&mut self, id: HashValue) -> Option<&mut B> {
self.id_to_block.get_mut(&id)
}
/// Returns id of a block that is ready to be sent to VM for execution (its parent has finished
/// execution), if such block exists in the tree.
pub fn get_block_to_execute(&mut self) -> Option<HashValue> {
let mut to_visit: Vec<HashValue> = self.heads.iter().cloned().collect();
while let Some(id) = to_visit.pop() {
let block = self
.id_to_block
.get(&id)
.expect("Missing block in id_to_block.");
if!block.is_executed() {
return Some(id);
}
to_visit.extend(block.children().iter().cloned());
}
None
}
/// Marks given block and all its uncommitted ancestors as committed. This does not cause these
/// blocks to be sent to storage immediately.
pub fn mark_as_committed(
&mut self,
id: HashValue,
signature: B::Signature,
) -> Result<(), CommitBlockError> {
// First put the signatures in the block. Note that if this causes multiple blocks to be
// marked as committed, only the last one will have the signatures.
match self.id_to_block.get_mut(&id) {
Some(block) => {
if block.is_committed() {
bail_err!(CommitBlockError::BlockAlreadyMarkedAsCommitted { id });
} else {
block.set_signature(signature);
}
}
None => bail_err!(CommitBlockError::BlockNotFound { id }),
}
// Mark the current block as committed. Go to parent block and repeat until a committed
// block is found, or no more blocks.
let mut current_id = id;
while let Some(block) = self.id_to_block.get_mut(¤t_id) {
if block.is_committed() {
break;
}
block.set_committed();
current_id = block.parent_id();
}
Ok(())
}
/// Removes all blocks in the tree that conflict with committed blocks. Returns a list of
/// blocks that are ready to be sent to storage (all the committed blocks that have been
/// executed).
pub fn prune(&mut self) -> Vec<B> {
let mut blocks_to_store = vec![];
// First find if there is a committed block in current heads. Since these blocks are at the
// same height, at most one of them can be committed. If all of them are pending we have
// nothing to do here. Otherwise, one of the branches is committed. Throw away the rest of
// them and advance to the next height.
let mut current_heads = self.heads.clone();
while let Some(committed_head) = self.get_committed_head(¤t_heads) {
assert!(
current_heads.remove(&committed_head),
"committed_head should exist.",
);
for id in current_heads {
self.remove_branch(id);
}
match self.id_to_block.entry(committed_head) {
hash_map::Entry::Occupied(entry) => {
current_heads = entry.get().children().clone();
let current_id = *entry.key();
let parent_id = entry.get().parent_id();
if entry.get().is_executed() {
// If this block has been executed, all its proper ancestors must have
// finished execution and present in `blocks_to_store`.
self.heads = current_heads.clone();
self.last_committed_id = current_id;
blocks_to_store.push(entry.remove());
} else {
// The current block has not finished execution. If the parent block does
// not exist in the map, that means parent block (also committed) has been
// executed and removed. Otherwise self.heads does not need to be changed.
if!self.id_to_block.contains_key(&parent_id) {
self.heads = HashSet::new();
self.heads.insert(current_id);
}
}
}
hash_map::Entry::Vacant(_) => unreachable!("committed_head_id should exist."),
}
}
blocks_to_store
}
/// Given a list of heads, returns the committed one if it exists.
fn get_committed_head(&self, heads: &HashSet<HashValue>) -> Option<HashValue> {
let mut committed_head = None;
for head in heads {
let block = self
.id_to_block
.get(head)
.expect("Head should exist in id_to_block.");
if block.is_committed() {
assert!(
committed_head.is_none(),
"Conflicting blocks are both committed.",
);
committed_head = Some(*head);
}
}
committed_head
}
/// Removes a branch at block `head`.
fn remove_branch(&mut self, head: HashValue) {
let mut remaining = vec![head];
while let Some(current_block_id) = remaining.pop() {
let block = self
.id_to_block
.remove(¤t_block_id)
.unwrap_or_else(|| {
panic!(
"Trying to remove a non-existing block {:x}.",
current_block_id,
)
});
assert!(
!block.is_committed(),
"Trying to remove a committed block {:x}.",
current_block_id,
);
remaining.extend(block.children().iter());
}
}
/// Removes the entire subtree at block `id`.
pub fn remove_subtree(&mut self, id: HashValue) {
self.heads.remove(&id);
self.remove_branch(id);
}
/// Resets the block tree with a new `last_committed_id`. This removes all the in-memory
/// blocks.
pub fn reset(&m | lf, last_committed_id: HashValue) {
let mut new_block_tree = BlockTree::new(last_committed_id);
std::mem::swap(self, &mut new_block_tree);
}
}
/// An error returned by `add_block`. The error contains the block being added so the caller does
/// not lose it.
#[derive(Debug, Eq, PartialEq)]
pub enum AddBlockError<B: Block> {
ParentNotFound { block: B },
BlockAlreadyExists { block: B },
}
impl<B> AddBlockError<B>
where
B: Block,
{
pub fn into_block(self) -> B {
match self {
AddBlockError::ParentNotFound { block } => block,
AddBlockError::BlockAlreadyExists { block } => block,
}
}
}
impl<B> std::fmt::Display for AddBlockError<B>
where
B: Block,
{
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
AddBlockError::ParentNotFound { block } => {
write!(f, "Parent block {:x} was not found.", block.parent_id())
}
AddBlockError::BlockAlreadyExists { block } => {
write!(f, "Block {:x} already exists.", block.id())
}
}
}
}
/// An error returned by `mark_as_committed`. The error contains id of the block the caller wants
/// to commit.
#[derive(Debug, Eq, PartialEq)]
pub enum CommitBlockError {
BlockNotFound { id: HashValue },
BlockAlreadyMarkedAsCommitted { id: HashValue },
}
impl std::fmt::Display for CommitBlockError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
CommitBlockError::BlockNotFound { id } => write!(f, "Block {:x} was not found.", id),
CommitBlockError::BlockAlreadyMarkedAsCommitted { id } => {
write!(f, "Block {:x} was already marked as committed.", id)
}
}
}
}
| ut se | identifier_name |
mod.rs | // Copyright (c) The XPeer Core Contributors
// SPDX-License-Identifier: Apache-2.0
//! In a leader based consensus algorithm, each participant maintains a block tree that looks like
//! the following:
//! ```text
//! Height 5 6 7 ...
//!
//! Committed -> B5 -> B6 -> B7
//! |
//! └--> B5' -> B6' -> B7'
//! |
//! └----> B7"
//! ```
//! This module implements `BlockTree` that is an in-memory representation of this tree.
#[cfg(test)]
mod block_tree_test;
use crypto::HashValue;
use failure::bail_err;
use std::collections::{hash_map, HashMap, HashSet};
/// Each block has a unique identifier that is a `HashValue` computed by consensus. It has exactly
/// one parent and zero or more children.
pub trait Block: std::fmt::Debug {
/// The output of executing this block.
type Output;
/// The signatures on this block.
type Signature;
/// Whether consensus has decided to commit this block. This kind of blocks are expected to be
/// sent to storage very soon, unless execution is lagging behind.
fn is_committed(&self) -> bool;
/// Marks this block as committed.
fn set_committed(&mut self);
/// Whether this block has finished execution.
fn is_executed(&self) -> bool;
/// Sets the output of this block.
fn set_output(&mut self, output: Self::Output);
/// Sets the signatures for this block.
fn set_signature(&mut self, signature: Self::Signature);
/// The id of this block.
fn id(&self) -> HashValue;
/// The id of the parent block.
fn parent_id(&self) -> HashValue;
/// Adds a block as its child.
fn add_child(&mut self, child_id: HashValue);
/// The list of children of this block.
fn children(&self) -> &HashSet<HashValue>;
}
/// The `BlockTree` implementation.
#[derive(Debug)]
pub struct BlockTree<B> {
/// A map that keeps track of all existing blocks by their ids.
id_to_block: HashMap<HashValue, B>,
/// The blocks at the lowest height in the map. B5 and B5' in the following example.
/// ```text
/// Committed(B0..4) -> B5 -> B6 -> B7
/// |
/// └--> B5' -> B6' -> B7'
/// |
/// └----> B7"
/// ```
heads: HashSet<HashValue>,
/// Id of the last committed block. B4 in the above example.
last_committed_id: HashValue,
}
impl<B> BlockTree<B>
where
B: Block,
{
/// Constructs a new `BlockTree`.
pub fn new(last_committed_id: HashValue) -> Self {
BlockTree {
id_to_block: HashMap::new(),
heads: HashSet::new(),
last_committed_id,
}
}
/// Adds a new block to the tree.
pub fn add_block(&mut self, block: B) -> Result<(), AddBlockError<B>> {
assert!(!self.id_to_block.contains_key(&self.last_committed_id));
let id = block.id();
if self.id_to_block.contains_key(&id) {
bail_err!(AddBlockError::BlockAlreadyExists { block });
}
let parent_id = block.parent_id();
if parent_id == self.last_committed_id {
assert!(self.heads.insert(id), "Block already existed in heads.");
self.id_to_block.insert(id, block);
return Ok(());
}
match self.id_to_block.entry(parent_id) {
hash_map::Entry::Occupied(mut entry) => {
entry.get_mut().add_child(id);
assert!(
self.id_to_block.insert(id, block).is_none(),
"Block {:x} already existed.",
id,
);
}
hash_map::Entry::Vacant(_) => bail_err!(AddBlockError::ParentNotFound { block }),
}
Ok(())
}
/// Returns a reference to a specific block, if it exists in the tree.
pub fn get_block(&self, id: HashValue) -> Option<&B> {
self.id_to_block.get(&id)
}
/// Returns a mutable reference to a specific block, if it exists in the tree.
pub fn get_block_mut(&mut self, id: HashValue) -> Option<&mut B> {
self.id_to_block.get_mut(&id)
}
/// Returns id of a block that is ready to be sent to VM for execution (its parent has finished
/// execution), if such block exists in the tree.
pub fn get_block_to_execute(&mut self) -> Option<HashValue> {
let mut to_visit: Vec<HashValue> = self.heads.iter().cloned().collect();
while let Some(id) = to_visit.pop() {
let block = self
.id_to_block
.get(&id)
.expect("Missing block in id_to_block.");
if!block.is_executed() {
return Some(id);
}
to_visit.extend(block.children().iter().cloned());
}
None
}
/// Marks given block and all its uncommitted ancestors as committed. This does not cause these
/// blocks to be sent to storage immediately.
pub fn mark_as_committed(
&mut self,
id: HashValue,
signature: B::Signature,
) -> Result<(), CommitBlockError> {
// First put the signatures in the block. Note that if this causes multiple blocks to be
// marked as committed, only the last one will have the signatures.
match self.id_to_block.get_mut(&id) {
Some(block) => {
if block.is_committed() {
bail_err!(CommitBlockError::BlockAlreadyMarkedAsCommitted { id });
} else {
block.set_signature(signature);
}
}
None => bail_err!(CommitBlockError::BlockNotFound { id }),
}
// Mark the current block as committed. Go to parent block and repeat until a committed
// block is found, or no more blocks.
let mut current_id = id;
while let Some(block) = self.id_to_block.get_mut(¤t_id) {
if block.is_committed() {
break;
}
block.set_committed();
current_id = block.parent_id();
}
Ok(())
}
/// Removes all blocks in the tree that conflict with committed blocks. Returns a list of
/// blocks that are ready to be sent to storage (all the committed blocks that have been
/// executed).
pub fn prune(&mut self) -> Vec<B> {
let mut blocks_to_store = vec![]; | let mut current_heads = self.heads.clone();
while let Some(committed_head) = self.get_committed_head(¤t_heads) {
assert!(
current_heads.remove(&committed_head),
"committed_head should exist.",
);
for id in current_heads {
self.remove_branch(id);
}
match self.id_to_block.entry(committed_head) {
hash_map::Entry::Occupied(entry) => {
current_heads = entry.get().children().clone();
let current_id = *entry.key();
let parent_id = entry.get().parent_id();
if entry.get().is_executed() {
// If this block has been executed, all its proper ancestors must have
// finished execution and present in `blocks_to_store`.
self.heads = current_heads.clone();
self.last_committed_id = current_id;
blocks_to_store.push(entry.remove());
} else {
// The current block has not finished execution. If the parent block does
// not exist in the map, that means parent block (also committed) has been
// executed and removed. Otherwise self.heads does not need to be changed.
if!self.id_to_block.contains_key(&parent_id) {
self.heads = HashSet::new();
self.heads.insert(current_id);
}
}
}
hash_map::Entry::Vacant(_) => unreachable!("committed_head_id should exist."),
}
}
blocks_to_store
}
/// Given a list of heads, returns the committed one if it exists.
fn get_committed_head(&self, heads: &HashSet<HashValue>) -> Option<HashValue> {
let mut committed_head = None;
for head in heads {
let block = self
.id_to_block
.get(head)
.expect("Head should exist in id_to_block.");
if block.is_committed() {
assert!(
committed_head.is_none(),
"Conflicting blocks are both committed.",
);
committed_head = Some(*head);
}
}
committed_head
}
/// Removes a branch at block `head`.
fn remove_branch(&mut self, head: HashValue) {
let mut remaining = vec![head];
while let Some(current_block_id) = remaining.pop() {
let block = self
.id_to_block
.remove(¤t_block_id)
.unwrap_or_else(|| {
panic!(
"Trying to remove a non-existing block {:x}.",
current_block_id,
)
});
assert!(
!block.is_committed(),
"Trying to remove a committed block {:x}.",
current_block_id,
);
remaining.extend(block.children().iter());
}
}
/// Removes the entire subtree at block `id`.
pub fn remove_subtree(&mut self, id: HashValue) {
self.heads.remove(&id);
self.remove_branch(id);
}
/// Resets the block tree with a new `last_committed_id`. This removes all the in-memory
/// blocks.
pub fn reset(&mut self, last_committed_id: HashValue) {
let mut new_block_tree = BlockTree::new(last_committed_id);
std::mem::swap(self, &mut new_block_tree);
}
}
/// An error returned by `add_block`. The error contains the block being added so the caller does
/// not lose it.
#[derive(Debug, Eq, PartialEq)]
pub enum AddBlockError<B: Block> {
ParentNotFound { block: B },
BlockAlreadyExists { block: B },
}
impl<B> AddBlockError<B>
where
B: Block,
{
pub fn into_block(self) -> B {
match self {
AddBlockError::ParentNotFound { block } => block,
AddBlockError::BlockAlreadyExists { block } => block,
}
}
}
impl<B> std::fmt::Display for AddBlockError<B>
where
B: Block,
{
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
AddBlockError::ParentNotFound { block } => {
write!(f, "Parent block {:x} was not found.", block.parent_id())
}
AddBlockError::BlockAlreadyExists { block } => {
write!(f, "Block {:x} already exists.", block.id())
}
}
}
}
/// An error returned by `mark_as_committed`. The error contains id of the block the caller wants
/// to commit.
#[derive(Debug, Eq, PartialEq)]
pub enum CommitBlockError {
BlockNotFound { id: HashValue },
BlockAlreadyMarkedAsCommitted { id: HashValue },
}
impl std::fmt::Display for CommitBlockError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
CommitBlockError::BlockNotFound { id } => write!(f, "Block {:x} was not found.", id),
CommitBlockError::BlockAlreadyMarkedAsCommitted { id } => {
write!(f, "Block {:x} was already marked as committed.", id)
}
}
}
} |
// First find if there is a committed block in current heads. Since these blocks are at the
// same height, at most one of them can be committed. If all of them are pending we have
// nothing to do here. Otherwise, one of the branches is committed. Throw away the rest of
// them and advance to the next height. | random_line_split |
mod.rs | use anyhow::{bail, format_err, Error};
use std::ffi::{CStr, CString};
mod tm_editor;
pub use tm_editor::*;
/// Safe bindings to libc timelocal
///
/// We set tm_isdst to -1.
/// This also normalizes the parameter
pub fn timelocal(t: &mut libc::tm) -> Result<i64, Error> {
t.tm_isdst = -1;
let epoch = unsafe { libc::mktime(t) };
if epoch == -1 {
bail!("libc::mktime failed for {:?}", t);
}
Ok(epoch)
}
/// Safe bindings to libc timegm
///
/// We set tm_isdst to 0.
/// This also normalizes the parameter
pub fn timegm(t: &mut libc::tm) -> Result<i64, Error> {
t.tm_isdst = 0;
let epoch = unsafe { libc::timegm(t) };
if epoch == -1 {
bail!("libc::timegm failed for {:?}", t);
}
Ok(epoch)
}
fn new_libc_tm() -> libc::tm {
libc::tm {
tm_sec: 0,
tm_min: 0,
tm_hour: 0,
tm_mday: 0,
tm_mon: 0,
tm_year: 0,
tm_wday: 0,
tm_yday: 0,
tm_isdst: 0,
tm_gmtoff: 0,
tm_zone: std::ptr::null(),
}
}
/// Safe bindings to libc localtime
pub fn localtime(epoch: i64) -> Result<libc::tm, Error> {
let mut result = new_libc_tm();
unsafe {
if libc::localtime_r(&epoch, &mut result).is_null() {
bail!("libc::localtime failed for '{}'", epoch);
}
}
Ok(result)
}
/// Safe bindings to libc gmtime
pub fn gmtime(epoch: i64) -> Result<libc::tm, Error> {
let mut result = new_libc_tm();
unsafe {
if libc::gmtime_r(&epoch, &mut result).is_null() {
bail!("libc::gmtime failed for '{}'", epoch);
}
}
Ok(result)
}
/// Returns Unix Epoch (now)
///
/// Note: This panics if the SystemTime::now() returns values not
/// repesentable as i64 (should never happen).
pub fn epoch_i64() -> i64 {
use std::convert::TryFrom;
use std::time::{SystemTime, UNIX_EPOCH};
let now = SystemTime::now();
if now > UNIX_EPOCH {
i64::try_from(now.duration_since(UNIX_EPOCH).unwrap().as_secs())
.expect("epoch_i64: now is too large")
} else {
-i64::try_from(UNIX_EPOCH.duration_since(now).unwrap().as_secs())
.expect("epoch_i64: now is too small")
}
}
/// Returns Unix Epoch (now) as f64 with subseconds resolution
///
/// Note: This can be inacurrate for values greater the 2^53. But this
/// should never happen.
pub fn epoch_f64() -> f64 {
use std::time::{SystemTime, UNIX_EPOCH};
let now = SystemTime::now();
if now > UNIX_EPOCH {
now.duration_since(UNIX_EPOCH).unwrap().as_secs_f64()
} else {
-UNIX_EPOCH.duration_since(now).unwrap().as_secs_f64()
}
}
// rust libc bindings do not include strftime
#[link(name = "c")]
extern "C" {
#[link_name = "strftime"]
fn libc_strftime(
s: *mut libc::c_char,
max: libc::size_t,
format: *const libc::c_char,
time: *const libc::tm,
) -> libc::size_t;
}
/// Safe bindings to libc strftime
pub fn strftime(format: &str, t: &libc::tm) -> Result<String, Error> {
let format = CString::new(format)?;
let mut buf = vec![0u8; 8192];
let res = unsafe {
libc_strftime(
buf.as_mut_ptr() as *mut libc::c_char,
buf.len() as libc::size_t,
format.as_ptr(),
t as *const libc::tm,
)
};
let len = nix::errno::Errno::result(res).map(|r| r as usize)?;
if len == 0 {
bail!("strftime: result len is 0 (string too large)");
};
let c_str = CStr::from_bytes_with_nul(&buf[..len + 1])?;
let str_slice: &str = c_str.to_str().unwrap();
Ok(str_slice.to_owned())
}
/// Format epoch as local time
pub fn strftime_local(format: &str, epoch: i64) -> Result<String, Error> {
let localtime = localtime(epoch)?;
strftime(format, &localtime)
}
/// Format epoch as utc time
pub fn strftime_utc(format: &str, epoch: i64) -> Result<String, Error> {
let gmtime = gmtime(epoch)?;
strftime(format, &gmtime)
}
/// Convert Unix epoch into RFC3339 UTC string
pub fn epoch_to_rfc3339_utc(epoch: i64) -> Result<String, Error> {
let gmtime = gmtime(epoch)?;
let year = gmtime.tm_year + 1900;
if year < 0 || year > 9999 {
bail!("epoch_to_rfc3339_utc: wrong year '{}'", year);
}
strftime("%010FT%TZ", &gmtime)
}
/// Convert Unix epoch into RFC3339 local time with TZ
pub fn epoch_to_rfc3339(epoch: i64) -> Result<String, Error> {
let localtime = localtime(epoch)?;
let year = localtime.tm_year + 1900;
if year < 0 || year > 9999 {
bail!("epoch_to_rfc3339: wrong year '{}'", year);
}
// Note: We cannot use strftime %z because of missing collon
let mut offset = localtime.tm_gmtoff;
let prefix = if offset < 0 {
offset = -offset;
'-'
} else {
'+'
};
let mins = offset / 60;
let hours = mins / 60;
let mins = mins % 60;
let mut s = strftime("%10FT%T", &localtime)?;
s.push(prefix);
s.push_str(&format!("{:02}:{:02}", hours, mins));
Ok(s)
}
/// Parse RFC3339 into Unix epoch
pub fn parse_rfc3339(input_str: &str) -> Result<i64, Error> {
let input = input_str.as_bytes();
let expect = |pos: usize, c: u8| {
if input[pos]!= c {
bail!("unexpected char at pos {}", pos);
}
Ok(())
};
let digit = |pos: usize| -> Result<i32, Error> {
let digit = input[pos] as i32;
if digit < 48 || digit > 57 |
Ok(digit - 48)
};
let check_max = |i: i32, max: i32| {
if i > max {
bail!("value too large ({} > {})", i, max);
}
Ok(i)
};
crate::try_block!({
if input.len() < 20 || input.len() > 25 {
bail!("timestamp of unexpected length");
}
let tz = input[19];
match tz {
b'Z' => {
if input.len()!= 20 {
bail!("unexpected length in UTC timestamp");
}
}
b'+' | b'-' => {
if input.len()!= 25 {
bail!("unexpected length in timestamp");
}
}
_ => bail!("unexpected timezone indicator"),
}
let mut tm = TmEditor::new(true);
tm.set_year(digit(0)? * 1000 + digit(1)? * 100 + digit(2)? * 10 + digit(3)?)?;
expect(4, b'-')?;
tm.set_mon(check_max(digit(5)? * 10 + digit(6)?, 12)?)?;
expect(7, b'-')?;
tm.set_mday(check_max(digit(8)? * 10 + digit(9)?, 31)?)?;
expect(10, b'T')?;
tm.set_hour(check_max(digit(11)? * 10 + digit(12)?, 23)?)?;
expect(13, b':')?;
tm.set_min(check_max(digit(14)? * 10 + digit(15)?, 59)?)?;
expect(16, b':')?;
tm.set_sec(check_max(digit(17)? * 10 + digit(18)?, 60)?)?;
let epoch = tm.into_epoch()?;
if tz == b'Z' {
return Ok(epoch);
}
let hours = check_max(digit(20)? * 10 + digit(21)?, 23)?;
expect(22, b':')?;
let mins = check_max(digit(23)? * 10 + digit(24)?, 59)?;
let offset = (hours * 3600 + mins * 60) as i64;
let epoch = match tz {
b'+' => epoch - offset,
b'-' => epoch + offset,
_ => unreachable!(), // already checked above
};
Ok(epoch)
})
.map_err(|err| {
format_err!(
"failed to parse rfc3339 timestamp ({:?}) - {}",
input_str,
err
)
})
}
#[test]
fn test_leap_seconds() {
let convert_reconvert = |epoch| {
let rfc3339 =
epoch_to_rfc3339_utc(epoch).expect("leap second epoch to rfc3339 should work");
let parsed =
parse_rfc3339(&rfc3339).expect("parsing converted leap second epoch should work");
assert_eq!(epoch, parsed);
};
// 2005-12-31T23:59:59Z was followed by a leap second
let epoch = 1136073599;
convert_reconvert(epoch);
convert_reconvert(epoch + 1);
convert_reconvert(epoch + 2);
let parsed = parse_rfc3339("2005-12-31T23:59:60Z").expect("parsing leap second should work");
assert_eq!(parsed, epoch + 1);
}
#[test]
fn test_rfc3339_range() {
// also tests single-digit years/first decade values
let lower = -62167219200;
let lower_str = "0000-01-01T00:00:00Z";
let upper = 253402300799;
let upper_str = "9999-12-31T23:59:59Z";
let converted =
epoch_to_rfc3339_utc(lower).expect("converting lower bound of RFC3339 range should work");
assert_eq!(converted, lower_str);
let converted =
epoch_to_rfc3339_utc(upper).expect("converting upper bound of RFC3339 range should work");
assert_eq!(converted, upper_str);
let parsed =
parse_rfc3339(lower_str).expect("parsing lower bound of RFC3339 range should work");
assert_eq!(parsed, lower);
let parsed =
parse_rfc3339(upper_str).expect("parsing upper bound of RFC3339 range should work");
assert_eq!(parsed, upper);
epoch_to_rfc3339_utc(lower - 1)
.expect_err("converting below lower bound of RFC3339 range should fail");
epoch_to_rfc3339_utc(upper + 1)
.expect_err("converting above upper bound of RFC3339 range should fail");
let first_century = -59011459201;
let first_century_str = "0099-12-31T23:59:59Z";
let converted = epoch_to_rfc3339_utc(first_century)
.expect("converting epoch representing first century year should work");
assert_eq!(converted, first_century_str);
let parsed =
parse_rfc3339(first_century_str).expect("parsing first century string should work");
assert_eq!(parsed, first_century);
let first_millenium = -59011459200;
let first_millenium_str = "0100-01-01T00:00:00Z";
let converted = epoch_to_rfc3339_utc(first_millenium)
.expect("converting epoch representing first millenium year should work");
assert_eq!(converted, first_millenium_str);
let parsed =
parse_rfc3339(first_millenium_str).expect("parsing first millenium string should work");
assert_eq!(parsed, first_millenium);
}
#[test]
fn test_gmtime_range() {
// year must fit into i32
let lower = -67768040609740800;
let upper = 67768036191676799;
let mut lower_tm = gmtime(lower).expect("gmtime should work as long as years fit into i32");
let res = timegm(&mut lower_tm).expect("converting back to epoch should work");
assert_eq!(lower, res);
gmtime(lower - 1).expect_err("gmtime should fail for years not fitting into i32");
let mut upper_tm = gmtime(upper).expect("gmtime should work as long as years fit into i32");
let res = timegm(&mut upper_tm).expect("converting back to epoch should work");
assert_eq!(upper, res);
gmtime(upper + 1).expect_err("gmtime should fail for years not fitting into i32");
}
#[test]
fn test_timezones() {
let input = "2020-12-30T00:00:00+06:30";
let epoch = 1609263000;
let expected_utc = "2020-12-29T17:30:00Z";
let parsed = parse_rfc3339(input).expect("parsing failed");
assert_eq!(parsed, epoch);
let res = epoch_to_rfc3339_utc(parsed).expect("converting to RFC failed");
assert_eq!(expected_utc, res);
}
| {
bail!("unexpected char at pos {}", pos);
} | conditional_block |
mod.rs | use anyhow::{bail, format_err, Error};
use std::ffi::{CStr, CString};
mod tm_editor;
pub use tm_editor::*;
/// Safe bindings to libc timelocal
///
/// We set tm_isdst to -1.
/// This also normalizes the parameter
pub fn timelocal(t: &mut libc::tm) -> Result<i64, Error> {
t.tm_isdst = -1;
let epoch = unsafe { libc::mktime(t) };
if epoch == -1 {
bail!("libc::mktime failed for {:?}", t);
}
Ok(epoch)
}
/// Safe bindings to libc timegm
///
/// We set tm_isdst to 0.
/// This also normalizes the parameter
pub fn timegm(t: &mut libc::tm) -> Result<i64, Error> {
t.tm_isdst = 0;
let epoch = unsafe { libc::timegm(t) };
if epoch == -1 {
bail!("libc::timegm failed for {:?}", t);
}
Ok(epoch)
}
fn new_libc_tm() -> libc::tm {
libc::tm {
tm_sec: 0,
tm_min: 0,
tm_hour: 0,
tm_mday: 0,
tm_mon: 0,
tm_year: 0,
tm_wday: 0,
tm_yday: 0,
tm_isdst: 0,
tm_gmtoff: 0,
tm_zone: std::ptr::null(),
}
}
/// Safe bindings to libc localtime
pub fn localtime(epoch: i64) -> Result<libc::tm, Error> {
let mut result = new_libc_tm();
unsafe {
if libc::localtime_r(&epoch, &mut result).is_null() {
bail!("libc::localtime failed for '{}'", epoch);
}
}
Ok(result)
}
/// Safe bindings to libc gmtime
pub fn gmtime(epoch: i64) -> Result<libc::tm, Error> {
let mut result = new_libc_tm();
unsafe {
if libc::gmtime_r(&epoch, &mut result).is_null() {
bail!("libc::gmtime failed for '{}'", epoch);
}
}
Ok(result)
}
/// Returns Unix Epoch (now)
///
/// Note: This panics if the SystemTime::now() returns values not
/// repesentable as i64 (should never happen).
pub fn epoch_i64() -> i64 {
use std::convert::TryFrom;
use std::time::{SystemTime, UNIX_EPOCH};
let now = SystemTime::now();
if now > UNIX_EPOCH {
i64::try_from(now.duration_since(UNIX_EPOCH).unwrap().as_secs())
.expect("epoch_i64: now is too large")
} else {
-i64::try_from(UNIX_EPOCH.duration_since(now).unwrap().as_secs())
.expect("epoch_i64: now is too small")
}
}
/// Returns Unix Epoch (now) as f64 with subseconds resolution
///
/// Note: This can be inacurrate for values greater the 2^53. But this
/// should never happen.
pub fn epoch_f64() -> f64 {
use std::time::{SystemTime, UNIX_EPOCH};
let now = SystemTime::now();
if now > UNIX_EPOCH {
now.duration_since(UNIX_EPOCH).unwrap().as_secs_f64()
} else {
-UNIX_EPOCH.duration_since(now).unwrap().as_secs_f64()
}
}
// rust libc bindings do not include strftime
#[link(name = "c")]
extern "C" {
#[link_name = "strftime"]
fn libc_strftime(
s: *mut libc::c_char,
max: libc::size_t,
format: *const libc::c_char,
time: *const libc::tm,
) -> libc::size_t;
}
/// Safe bindings to libc strftime
pub fn strftime(format: &str, t: &libc::tm) -> Result<String, Error> {
let format = CString::new(format)?;
let mut buf = vec![0u8; 8192];
let res = unsafe {
libc_strftime(
buf.as_mut_ptr() as *mut libc::c_char,
buf.len() as libc::size_t,
format.as_ptr(),
t as *const libc::tm,
)
};
let len = nix::errno::Errno::result(res).map(|r| r as usize)?;
if len == 0 {
bail!("strftime: result len is 0 (string too large)");
};
let c_str = CStr::from_bytes_with_nul(&buf[..len + 1])?;
let str_slice: &str = c_str.to_str().unwrap();
Ok(str_slice.to_owned())
}
/// Format epoch as local time
pub fn | (format: &str, epoch: i64) -> Result<String, Error> {
let localtime = localtime(epoch)?;
strftime(format, &localtime)
}
/// Format epoch as utc time
pub fn strftime_utc(format: &str, epoch: i64) -> Result<String, Error> {
let gmtime = gmtime(epoch)?;
strftime(format, &gmtime)
}
/// Convert Unix epoch into RFC3339 UTC string
pub fn epoch_to_rfc3339_utc(epoch: i64) -> Result<String, Error> {
let gmtime = gmtime(epoch)?;
let year = gmtime.tm_year + 1900;
if year < 0 || year > 9999 {
bail!("epoch_to_rfc3339_utc: wrong year '{}'", year);
}
strftime("%010FT%TZ", &gmtime)
}
/// Convert Unix epoch into RFC3339 local time with TZ
pub fn epoch_to_rfc3339(epoch: i64) -> Result<String, Error> {
let localtime = localtime(epoch)?;
let year = localtime.tm_year + 1900;
if year < 0 || year > 9999 {
bail!("epoch_to_rfc3339: wrong year '{}'", year);
}
// Note: We cannot use strftime %z because of missing collon
let mut offset = localtime.tm_gmtoff;
let prefix = if offset < 0 {
offset = -offset;
'-'
} else {
'+'
};
let mins = offset / 60;
let hours = mins / 60;
let mins = mins % 60;
let mut s = strftime("%10FT%T", &localtime)?;
s.push(prefix);
s.push_str(&format!("{:02}:{:02}", hours, mins));
Ok(s)
}
/// Parse RFC3339 into Unix epoch
pub fn parse_rfc3339(input_str: &str) -> Result<i64, Error> {
let input = input_str.as_bytes();
let expect = |pos: usize, c: u8| {
if input[pos]!= c {
bail!("unexpected char at pos {}", pos);
}
Ok(())
};
let digit = |pos: usize| -> Result<i32, Error> {
let digit = input[pos] as i32;
if digit < 48 || digit > 57 {
bail!("unexpected char at pos {}", pos);
}
Ok(digit - 48)
};
let check_max = |i: i32, max: i32| {
if i > max {
bail!("value too large ({} > {})", i, max);
}
Ok(i)
};
crate::try_block!({
if input.len() < 20 || input.len() > 25 {
bail!("timestamp of unexpected length");
}
let tz = input[19];
match tz {
b'Z' => {
if input.len()!= 20 {
bail!("unexpected length in UTC timestamp");
}
}
b'+' | b'-' => {
if input.len()!= 25 {
bail!("unexpected length in timestamp");
}
}
_ => bail!("unexpected timezone indicator"),
}
let mut tm = TmEditor::new(true);
tm.set_year(digit(0)? * 1000 + digit(1)? * 100 + digit(2)? * 10 + digit(3)?)?;
expect(4, b'-')?;
tm.set_mon(check_max(digit(5)? * 10 + digit(6)?, 12)?)?;
expect(7, b'-')?;
tm.set_mday(check_max(digit(8)? * 10 + digit(9)?, 31)?)?;
expect(10, b'T')?;
tm.set_hour(check_max(digit(11)? * 10 + digit(12)?, 23)?)?;
expect(13, b':')?;
tm.set_min(check_max(digit(14)? * 10 + digit(15)?, 59)?)?;
expect(16, b':')?;
tm.set_sec(check_max(digit(17)? * 10 + digit(18)?, 60)?)?;
let epoch = tm.into_epoch()?;
if tz == b'Z' {
return Ok(epoch);
}
let hours = check_max(digit(20)? * 10 + digit(21)?, 23)?;
expect(22, b':')?;
let mins = check_max(digit(23)? * 10 + digit(24)?, 59)?;
let offset = (hours * 3600 + mins * 60) as i64;
let epoch = match tz {
b'+' => epoch - offset,
b'-' => epoch + offset,
_ => unreachable!(), // already checked above
};
Ok(epoch)
})
.map_err(|err| {
format_err!(
"failed to parse rfc3339 timestamp ({:?}) - {}",
input_str,
err
)
})
}
#[test]
fn test_leap_seconds() {
let convert_reconvert = |epoch| {
let rfc3339 =
epoch_to_rfc3339_utc(epoch).expect("leap second epoch to rfc3339 should work");
let parsed =
parse_rfc3339(&rfc3339).expect("parsing converted leap second epoch should work");
assert_eq!(epoch, parsed);
};
// 2005-12-31T23:59:59Z was followed by a leap second
let epoch = 1136073599;
convert_reconvert(epoch);
convert_reconvert(epoch + 1);
convert_reconvert(epoch + 2);
let parsed = parse_rfc3339("2005-12-31T23:59:60Z").expect("parsing leap second should work");
assert_eq!(parsed, epoch + 1);
}
#[test]
fn test_rfc3339_range() {
// also tests single-digit years/first decade values
let lower = -62167219200;
let lower_str = "0000-01-01T00:00:00Z";
let upper = 253402300799;
let upper_str = "9999-12-31T23:59:59Z";
let converted =
epoch_to_rfc3339_utc(lower).expect("converting lower bound of RFC3339 range should work");
assert_eq!(converted, lower_str);
let converted =
epoch_to_rfc3339_utc(upper).expect("converting upper bound of RFC3339 range should work");
assert_eq!(converted, upper_str);
let parsed =
parse_rfc3339(lower_str).expect("parsing lower bound of RFC3339 range should work");
assert_eq!(parsed, lower);
let parsed =
parse_rfc3339(upper_str).expect("parsing upper bound of RFC3339 range should work");
assert_eq!(parsed, upper);
epoch_to_rfc3339_utc(lower - 1)
.expect_err("converting below lower bound of RFC3339 range should fail");
epoch_to_rfc3339_utc(upper + 1)
.expect_err("converting above upper bound of RFC3339 range should fail");
let first_century = -59011459201;
let first_century_str = "0099-12-31T23:59:59Z";
let converted = epoch_to_rfc3339_utc(first_century)
.expect("converting epoch representing first century year should work");
assert_eq!(converted, first_century_str);
let parsed =
parse_rfc3339(first_century_str).expect("parsing first century string should work");
assert_eq!(parsed, first_century);
let first_millenium = -59011459200;
let first_millenium_str = "0100-01-01T00:00:00Z";
let converted = epoch_to_rfc3339_utc(first_millenium)
.expect("converting epoch representing first millenium year should work");
assert_eq!(converted, first_millenium_str);
let parsed =
parse_rfc3339(first_millenium_str).expect("parsing first millenium string should work");
assert_eq!(parsed, first_millenium);
}
#[test]
fn test_gmtime_range() {
// year must fit into i32
let lower = -67768040609740800;
let upper = 67768036191676799;
let mut lower_tm = gmtime(lower).expect("gmtime should work as long as years fit into i32");
let res = timegm(&mut lower_tm).expect("converting back to epoch should work");
assert_eq!(lower, res);
gmtime(lower - 1).expect_err("gmtime should fail for years not fitting into i32");
let mut upper_tm = gmtime(upper).expect("gmtime should work as long as years fit into i32");
let res = timegm(&mut upper_tm).expect("converting back to epoch should work");
assert_eq!(upper, res);
gmtime(upper + 1).expect_err("gmtime should fail for years not fitting into i32");
}
#[test]
fn test_timezones() {
let input = "2020-12-30T00:00:00+06:30";
let epoch = 1609263000;
let expected_utc = "2020-12-29T17:30:00Z";
let parsed = parse_rfc3339(input).expect("parsing failed");
assert_eq!(parsed, epoch);
let res = epoch_to_rfc3339_utc(parsed).expect("converting to RFC failed");
assert_eq!(expected_utc, res);
}
| strftime_local | identifier_name |
mod.rs | use anyhow::{bail, format_err, Error};
use std::ffi::{CStr, CString};
mod tm_editor;
pub use tm_editor::*;
/// Safe bindings to libc timelocal
///
/// We set tm_isdst to -1.
/// This also normalizes the parameter
pub fn timelocal(t: &mut libc::tm) -> Result<i64, Error> {
t.tm_isdst = -1;
let epoch = unsafe { libc::mktime(t) };
if epoch == -1 {
bail!("libc::mktime failed for {:?}", t);
}
Ok(epoch)
}
/// Safe bindings to libc timegm
///
/// We set tm_isdst to 0.
/// This also normalizes the parameter
pub fn timegm(t: &mut libc::tm) -> Result<i64, Error> {
t.tm_isdst = 0;
let epoch = unsafe { libc::timegm(t) };
if epoch == -1 {
bail!("libc::timegm failed for {:?}", t);
}
Ok(epoch)
}
fn new_libc_tm() -> libc::tm {
libc::tm {
tm_sec: 0,
tm_min: 0,
tm_hour: 0,
tm_mday: 0,
tm_mon: 0,
tm_year: 0,
tm_wday: 0,
tm_yday: 0,
tm_isdst: 0,
tm_gmtoff: 0,
tm_zone: std::ptr::null(),
}
}
/// Safe bindings to libc localtime
pub fn localtime(epoch: i64) -> Result<libc::tm, Error> {
let mut result = new_libc_tm();
unsafe {
if libc::localtime_r(&epoch, &mut result).is_null() {
bail!("libc::localtime failed for '{}'", epoch);
}
}
Ok(result)
}
/// Safe bindings to libc gmtime
pub fn gmtime(epoch: i64) -> Result<libc::tm, Error> {
let mut result = new_libc_tm();
unsafe {
if libc::gmtime_r(&epoch, &mut result).is_null() {
bail!("libc::gmtime failed for '{}'", epoch);
}
}
Ok(result)
}
/// Returns Unix Epoch (now)
///
/// Note: This panics if the SystemTime::now() returns values not
/// repesentable as i64 (should never happen).
pub fn epoch_i64() -> i64 {
use std::convert::TryFrom;
use std::time::{SystemTime, UNIX_EPOCH};
let now = SystemTime::now();
if now > UNIX_EPOCH {
i64::try_from(now.duration_since(UNIX_EPOCH).unwrap().as_secs())
.expect("epoch_i64: now is too large")
} else {
-i64::try_from(UNIX_EPOCH.duration_since(now).unwrap().as_secs())
.expect("epoch_i64: now is too small")
}
}
/// Returns Unix Epoch (now) as f64 with subseconds resolution
///
/// Note: This can be inacurrate for values greater the 2^53. But this
/// should never happen.
pub fn epoch_f64() -> f64 {
use std::time::{SystemTime, UNIX_EPOCH};
let now = SystemTime::now();
if now > UNIX_EPOCH {
now.duration_since(UNIX_EPOCH).unwrap().as_secs_f64()
} else {
-UNIX_EPOCH.duration_since(now).unwrap().as_secs_f64()
}
}
// rust libc bindings do not include strftime
#[link(name = "c")]
extern "C" {
#[link_name = "strftime"]
fn libc_strftime(
s: *mut libc::c_char,
max: libc::size_t,
format: *const libc::c_char,
time: *const libc::tm,
) -> libc::size_t;
}
/// Safe bindings to libc strftime
pub fn strftime(format: &str, t: &libc::tm) -> Result<String, Error> {
let format = CString::new(format)?;
let mut buf = vec![0u8; 8192];
let res = unsafe {
libc_strftime(
buf.as_mut_ptr() as *mut libc::c_char,
buf.len() as libc::size_t,
format.as_ptr(),
t as *const libc::tm,
)
};
let len = nix::errno::Errno::result(res).map(|r| r as usize)?;
if len == 0 {
bail!("strftime: result len is 0 (string too large)");
};
let c_str = CStr::from_bytes_with_nul(&buf[..len + 1])?;
let str_slice: &str = c_str.to_str().unwrap();
Ok(str_slice.to_owned())
}
/// Format epoch as local time
pub fn strftime_local(format: &str, epoch: i64) -> Result<String, Error> {
let localtime = localtime(epoch)?;
strftime(format, &localtime)
}
/// Format epoch as utc time
pub fn strftime_utc(format: &str, epoch: i64) -> Result<String, Error> |
/// Convert Unix epoch into RFC3339 UTC string
pub fn epoch_to_rfc3339_utc(epoch: i64) -> Result<String, Error> {
let gmtime = gmtime(epoch)?;
let year = gmtime.tm_year + 1900;
if year < 0 || year > 9999 {
bail!("epoch_to_rfc3339_utc: wrong year '{}'", year);
}
strftime("%010FT%TZ", &gmtime)
}
/// Convert Unix epoch into RFC3339 local time with TZ
pub fn epoch_to_rfc3339(epoch: i64) -> Result<String, Error> {
let localtime = localtime(epoch)?;
let year = localtime.tm_year + 1900;
if year < 0 || year > 9999 {
bail!("epoch_to_rfc3339: wrong year '{}'", year);
}
// Note: We cannot use strftime %z because of missing collon
let mut offset = localtime.tm_gmtoff;
let prefix = if offset < 0 {
offset = -offset;
'-'
} else {
'+'
};
let mins = offset / 60;
let hours = mins / 60;
let mins = mins % 60;
let mut s = strftime("%10FT%T", &localtime)?;
s.push(prefix);
s.push_str(&format!("{:02}:{:02}", hours, mins));
Ok(s)
}
/// Parse RFC3339 into Unix epoch
pub fn parse_rfc3339(input_str: &str) -> Result<i64, Error> {
let input = input_str.as_bytes();
let expect = |pos: usize, c: u8| {
if input[pos]!= c {
bail!("unexpected char at pos {}", pos);
}
Ok(())
};
let digit = |pos: usize| -> Result<i32, Error> {
let digit = input[pos] as i32;
if digit < 48 || digit > 57 {
bail!("unexpected char at pos {}", pos);
}
Ok(digit - 48)
};
let check_max = |i: i32, max: i32| {
if i > max {
bail!("value too large ({} > {})", i, max);
}
Ok(i)
};
crate::try_block!({
if input.len() < 20 || input.len() > 25 {
bail!("timestamp of unexpected length");
}
let tz = input[19];
match tz {
b'Z' => {
if input.len()!= 20 {
bail!("unexpected length in UTC timestamp");
}
}
b'+' | b'-' => {
if input.len()!= 25 {
bail!("unexpected length in timestamp");
}
}
_ => bail!("unexpected timezone indicator"),
}
let mut tm = TmEditor::new(true);
tm.set_year(digit(0)? * 1000 + digit(1)? * 100 + digit(2)? * 10 + digit(3)?)?;
expect(4, b'-')?;
tm.set_mon(check_max(digit(5)? * 10 + digit(6)?, 12)?)?;
expect(7, b'-')?;
tm.set_mday(check_max(digit(8)? * 10 + digit(9)?, 31)?)?;
expect(10, b'T')?;
tm.set_hour(check_max(digit(11)? * 10 + digit(12)?, 23)?)?;
expect(13, b':')?;
tm.set_min(check_max(digit(14)? * 10 + digit(15)?, 59)?)?;
expect(16, b':')?;
tm.set_sec(check_max(digit(17)? * 10 + digit(18)?, 60)?)?;
let epoch = tm.into_epoch()?;
if tz == b'Z' {
return Ok(epoch);
}
let hours = check_max(digit(20)? * 10 + digit(21)?, 23)?;
expect(22, b':')?;
let mins = check_max(digit(23)? * 10 + digit(24)?, 59)?;
let offset = (hours * 3600 + mins * 60) as i64;
let epoch = match tz {
b'+' => epoch - offset,
b'-' => epoch + offset,
_ => unreachable!(), // already checked above
};
Ok(epoch)
})
.map_err(|err| {
format_err!(
"failed to parse rfc3339 timestamp ({:?}) - {}",
input_str,
err
)
})
}
#[test]
fn test_leap_seconds() {
let convert_reconvert = |epoch| {
let rfc3339 =
epoch_to_rfc3339_utc(epoch).expect("leap second epoch to rfc3339 should work");
let parsed =
parse_rfc3339(&rfc3339).expect("parsing converted leap second epoch should work");
assert_eq!(epoch, parsed);
};
// 2005-12-31T23:59:59Z was followed by a leap second
let epoch = 1136073599;
convert_reconvert(epoch);
convert_reconvert(epoch + 1);
convert_reconvert(epoch + 2);
let parsed = parse_rfc3339("2005-12-31T23:59:60Z").expect("parsing leap second should work");
assert_eq!(parsed, epoch + 1);
}
#[test]
fn test_rfc3339_range() {
// also tests single-digit years/first decade values
let lower = -62167219200;
let lower_str = "0000-01-01T00:00:00Z";
let upper = 253402300799;
let upper_str = "9999-12-31T23:59:59Z";
let converted =
epoch_to_rfc3339_utc(lower).expect("converting lower bound of RFC3339 range should work");
assert_eq!(converted, lower_str);
let converted =
epoch_to_rfc3339_utc(upper).expect("converting upper bound of RFC3339 range should work");
assert_eq!(converted, upper_str);
let parsed =
parse_rfc3339(lower_str).expect("parsing lower bound of RFC3339 range should work");
assert_eq!(parsed, lower);
let parsed =
parse_rfc3339(upper_str).expect("parsing upper bound of RFC3339 range should work");
assert_eq!(parsed, upper);
epoch_to_rfc3339_utc(lower - 1)
.expect_err("converting below lower bound of RFC3339 range should fail");
epoch_to_rfc3339_utc(upper + 1)
.expect_err("converting above upper bound of RFC3339 range should fail");
let first_century = -59011459201;
let first_century_str = "0099-12-31T23:59:59Z";
let converted = epoch_to_rfc3339_utc(first_century)
.expect("converting epoch representing first century year should work");
assert_eq!(converted, first_century_str);
let parsed =
parse_rfc3339(first_century_str).expect("parsing first century string should work");
assert_eq!(parsed, first_century);
let first_millenium = -59011459200;
let first_millenium_str = "0100-01-01T00:00:00Z";
let converted = epoch_to_rfc3339_utc(first_millenium)
.expect("converting epoch representing first millenium year should work");
assert_eq!(converted, first_millenium_str);
let parsed =
parse_rfc3339(first_millenium_str).expect("parsing first millenium string should work");
assert_eq!(parsed, first_millenium);
}
#[test]
fn test_gmtime_range() {
// year must fit into i32
let lower = -67768040609740800;
let upper = 67768036191676799;
let mut lower_tm = gmtime(lower).expect("gmtime should work as long as years fit into i32");
let res = timegm(&mut lower_tm).expect("converting back to epoch should work");
assert_eq!(lower, res);
gmtime(lower - 1).expect_err("gmtime should fail for years not fitting into i32");
let mut upper_tm = gmtime(upper).expect("gmtime should work as long as years fit into i32");
let res = timegm(&mut upper_tm).expect("converting back to epoch should work");
assert_eq!(upper, res);
gmtime(upper + 1).expect_err("gmtime should fail for years not fitting into i32");
}
#[test]
fn test_timezones() {
let input = "2020-12-30T00:00:00+06:30";
let epoch = 1609263000;
let expected_utc = "2020-12-29T17:30:00Z";
let parsed = parse_rfc3339(input).expect("parsing failed");
assert_eq!(parsed, epoch);
let res = epoch_to_rfc3339_utc(parsed).expect("converting to RFC failed");
assert_eq!(expected_utc, res);
}
| {
let gmtime = gmtime(epoch)?;
strftime(format, &gmtime)
} | identifier_body |
mod.rs | use anyhow::{bail, format_err, Error};
use std::ffi::{CStr, CString};
mod tm_editor;
pub use tm_editor::*;
/// Safe bindings to libc timelocal
///
/// We set tm_isdst to -1.
/// This also normalizes the parameter
pub fn timelocal(t: &mut libc::tm) -> Result<i64, Error> {
t.tm_isdst = -1;
let epoch = unsafe { libc::mktime(t) };
if epoch == -1 {
bail!("libc::mktime failed for {:?}", t);
}
Ok(epoch)
}
/// Safe bindings to libc timegm
///
/// We set tm_isdst to 0.
/// This also normalizes the parameter
pub fn timegm(t: &mut libc::tm) -> Result<i64, Error> {
t.tm_isdst = 0;
let epoch = unsafe { libc::timegm(t) };
if epoch == -1 {
bail!("libc::timegm failed for {:?}", t);
}
Ok(epoch)
}
fn new_libc_tm() -> libc::tm {
libc::tm {
tm_sec: 0,
tm_min: 0,
tm_hour: 0,
tm_mday: 0,
tm_mon: 0,
tm_year: 0,
tm_wday: 0,
tm_yday: 0,
tm_isdst: 0,
tm_gmtoff: 0,
tm_zone: std::ptr::null(),
}
}
/// Safe bindings to libc localtime
pub fn localtime(epoch: i64) -> Result<libc::tm, Error> {
let mut result = new_libc_tm();
unsafe {
if libc::localtime_r(&epoch, &mut result).is_null() {
bail!("libc::localtime failed for '{}'", epoch);
}
}
Ok(result)
}
/// Safe bindings to libc gmtime
pub fn gmtime(epoch: i64) -> Result<libc::tm, Error> {
let mut result = new_libc_tm();
unsafe {
if libc::gmtime_r(&epoch, &mut result).is_null() {
bail!("libc::gmtime failed for '{}'", epoch);
}
}
Ok(result)
}
/// Returns Unix Epoch (now)
///
/// Note: This panics if the SystemTime::now() returns values not
/// repesentable as i64 (should never happen).
pub fn epoch_i64() -> i64 {
use std::convert::TryFrom;
use std::time::{SystemTime, UNIX_EPOCH};
let now = SystemTime::now();
if now > UNIX_EPOCH {
i64::try_from(now.duration_since(UNIX_EPOCH).unwrap().as_secs())
.expect("epoch_i64: now is too large")
} else {
-i64::try_from(UNIX_EPOCH.duration_since(now).unwrap().as_secs())
.expect("epoch_i64: now is too small")
}
}
/// Returns Unix Epoch (now) as f64 with subseconds resolution
///
/// Note: This can be inacurrate for values greater the 2^53. But this
/// should never happen.
pub fn epoch_f64() -> f64 {
use std::time::{SystemTime, UNIX_EPOCH};
let now = SystemTime::now();
if now > UNIX_EPOCH {
now.duration_since(UNIX_EPOCH).unwrap().as_secs_f64()
} else {
-UNIX_EPOCH.duration_since(now).unwrap().as_secs_f64()
}
}
// rust libc bindings do not include strftime
#[link(name = "c")]
extern "C" {
#[link_name = "strftime"]
fn libc_strftime(
s: *mut libc::c_char,
max: libc::size_t,
format: *const libc::c_char,
time: *const libc::tm,
) -> libc::size_t;
}
/// Safe bindings to libc strftime
pub fn strftime(format: &str, t: &libc::tm) -> Result<String, Error> {
let format = CString::new(format)?;
let mut buf = vec![0u8; 8192];
let res = unsafe {
libc_strftime(
buf.as_mut_ptr() as *mut libc::c_char,
buf.len() as libc::size_t,
format.as_ptr(),
t as *const libc::tm,
)
};
let len = nix::errno::Errno::result(res).map(|r| r as usize)?;
if len == 0 {
bail!("strftime: result len is 0 (string too large)");
};
let c_str = CStr::from_bytes_with_nul(&buf[..len + 1])?;
let str_slice: &str = c_str.to_str().unwrap();
Ok(str_slice.to_owned())
}
/// Format epoch as local time
pub fn strftime_local(format: &str, epoch: i64) -> Result<String, Error> {
let localtime = localtime(epoch)?;
strftime(format, &localtime)
}
/// Format epoch as utc time
pub fn strftime_utc(format: &str, epoch: i64) -> Result<String, Error> {
let gmtime = gmtime(epoch)?;
strftime(format, &gmtime)
}
/// Convert Unix epoch into RFC3339 UTC string
pub fn epoch_to_rfc3339_utc(epoch: i64) -> Result<String, Error> {
let gmtime = gmtime(epoch)?;
let year = gmtime.tm_year + 1900;
if year < 0 || year > 9999 {
bail!("epoch_to_rfc3339_utc: wrong year '{}'", year);
}
strftime("%010FT%TZ", &gmtime)
}
/// Convert Unix epoch into RFC3339 local time with TZ
pub fn epoch_to_rfc3339(epoch: i64) -> Result<String, Error> {
let localtime = localtime(epoch)?;
let year = localtime.tm_year + 1900;
if year < 0 || year > 9999 {
bail!("epoch_to_rfc3339: wrong year '{}'", year);
}
// Note: We cannot use strftime %z because of missing collon
let mut offset = localtime.tm_gmtoff;
let prefix = if offset < 0 {
offset = -offset;
'-'
} else {
'+'
};
let mins = offset / 60;
let hours = mins / 60;
let mins = mins % 60;
let mut s = strftime("%10FT%T", &localtime)?;
s.push(prefix);
s.push_str(&format!("{:02}:{:02}", hours, mins));
Ok(s)
}
/// Parse RFC3339 into Unix epoch
pub fn parse_rfc3339(input_str: &str) -> Result<i64, Error> {
let input = input_str.as_bytes();
let expect = |pos: usize, c: u8| {
if input[pos]!= c {
bail!("unexpected char at pos {}", pos);
}
Ok(())
};
let digit = |pos: usize| -> Result<i32, Error> {
let digit = input[pos] as i32;
if digit < 48 || digit > 57 {
bail!("unexpected char at pos {}", pos);
}
Ok(digit - 48)
};
let check_max = |i: i32, max: i32| {
if i > max {
bail!("value too large ({} > {})", i, max);
}
Ok(i)
};
crate::try_block!({
if input.len() < 20 || input.len() > 25 {
bail!("timestamp of unexpected length");
}
let tz = input[19];
match tz {
b'Z' => {
if input.len()!= 20 {
bail!("unexpected length in UTC timestamp");
}
}
b'+' | b'-' => {
if input.len()!= 25 {
bail!("unexpected length in timestamp");
}
}
_ => bail!("unexpected timezone indicator"),
}
let mut tm = TmEditor::new(true);
tm.set_year(digit(0)? * 1000 + digit(1)? * 100 + digit(2)? * 10 + digit(3)?)?;
expect(4, b'-')?;
tm.set_mon(check_max(digit(5)? * 10 + digit(6)?, 12)?)?;
expect(7, b'-')?;
tm.set_mday(check_max(digit(8)? * 10 + digit(9)?, 31)?)?;
expect(10, b'T')?;
tm.set_hour(check_max(digit(11)? * 10 + digit(12)?, 23)?)?;
expect(13, b':')?;
tm.set_min(check_max(digit(14)? * 10 + digit(15)?, 59)?)?;
expect(16, b':')?;
tm.set_sec(check_max(digit(17)? * 10 + digit(18)?, 60)?)?;
let epoch = tm.into_epoch()?;
if tz == b'Z' {
return Ok(epoch);
}
let hours = check_max(digit(20)? * 10 + digit(21)?, 23)?;
expect(22, b':')?;
let mins = check_max(digit(23)? * 10 + digit(24)?, 59)?;
let offset = (hours * 3600 + mins * 60) as i64;
let epoch = match tz {
b'+' => epoch - offset,
b'-' => epoch + offset,
_ => unreachable!(), // already checked above
};
Ok(epoch)
})
.map_err(|err| {
format_err!(
"failed to parse rfc3339 timestamp ({:?}) - {}",
input_str,
err
)
})
}
#[test]
fn test_leap_seconds() {
let convert_reconvert = |epoch| {
let rfc3339 =
epoch_to_rfc3339_utc(epoch).expect("leap second epoch to rfc3339 should work");
let parsed =
parse_rfc3339(&rfc3339).expect("parsing converted leap second epoch should work");
assert_eq!(epoch, parsed);
};
// 2005-12-31T23:59:59Z was followed by a leap second
let epoch = 1136073599;
convert_reconvert(epoch);
convert_reconvert(epoch + 1);
convert_reconvert(epoch + 2);
let parsed = parse_rfc3339("2005-12-31T23:59:60Z").expect("parsing leap second should work");
assert_eq!(parsed, epoch + 1);
}
#[test]
fn test_rfc3339_range() {
// also tests single-digit years/first decade values
let lower = -62167219200;
let lower_str = "0000-01-01T00:00:00Z";
let upper = 253402300799;
let upper_str = "9999-12-31T23:59:59Z";
let converted =
epoch_to_rfc3339_utc(lower).expect("converting lower bound of RFC3339 range should work");
assert_eq!(converted, lower_str);
| let parsed =
parse_rfc3339(lower_str).expect("parsing lower bound of RFC3339 range should work");
assert_eq!(parsed, lower);
let parsed =
parse_rfc3339(upper_str).expect("parsing upper bound of RFC3339 range should work");
assert_eq!(parsed, upper);
epoch_to_rfc3339_utc(lower - 1)
.expect_err("converting below lower bound of RFC3339 range should fail");
epoch_to_rfc3339_utc(upper + 1)
.expect_err("converting above upper bound of RFC3339 range should fail");
let first_century = -59011459201;
let first_century_str = "0099-12-31T23:59:59Z";
let converted = epoch_to_rfc3339_utc(first_century)
.expect("converting epoch representing first century year should work");
assert_eq!(converted, first_century_str);
let parsed =
parse_rfc3339(first_century_str).expect("parsing first century string should work");
assert_eq!(parsed, first_century);
let first_millenium = -59011459200;
let first_millenium_str = "0100-01-01T00:00:00Z";
let converted = epoch_to_rfc3339_utc(first_millenium)
.expect("converting epoch representing first millenium year should work");
assert_eq!(converted, first_millenium_str);
let parsed =
parse_rfc3339(first_millenium_str).expect("parsing first millenium string should work");
assert_eq!(parsed, first_millenium);
}
#[test]
fn test_gmtime_range() {
// year must fit into i32
let lower = -67768040609740800;
let upper = 67768036191676799;
let mut lower_tm = gmtime(lower).expect("gmtime should work as long as years fit into i32");
let res = timegm(&mut lower_tm).expect("converting back to epoch should work");
assert_eq!(lower, res);
gmtime(lower - 1).expect_err("gmtime should fail for years not fitting into i32");
let mut upper_tm = gmtime(upper).expect("gmtime should work as long as years fit into i32");
let res = timegm(&mut upper_tm).expect("converting back to epoch should work");
assert_eq!(upper, res);
gmtime(upper + 1).expect_err("gmtime should fail for years not fitting into i32");
}
#[test]
fn test_timezones() {
let input = "2020-12-30T00:00:00+06:30";
let epoch = 1609263000;
let expected_utc = "2020-12-29T17:30:00Z";
let parsed = parse_rfc3339(input).expect("parsing failed");
assert_eq!(parsed, epoch);
let res = epoch_to_rfc3339_utc(parsed).expect("converting to RFC failed");
assert_eq!(expected_utc, res);
} | let converted =
epoch_to_rfc3339_utc(upper).expect("converting upper bound of RFC3339 range should work");
assert_eq!(converted, upper_str);
| random_line_split |
mc6845.rs | /*
MartyPC
https://github.com/dbalsom/martypc
Copyright 2022-2023 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
devices::mc6845.rs
Implementation of the Motorola MC6845 CRT controller.
Used internally by the MDA and CGA video cards.
*/
use crate::tracelogger::TraceLogger;
const CURSOR_LINE_MASK: u8 = 0b0000_1111;
const CURSOR_ATTR_MASK: u8 = 0b0011_0000;
const REGISTER_MAX: usize = 17;
const REGISTER_UNREADABLE_VALUE: u8 = 0x00;
#[derive (Copy, Clone, Debug)]
pub enum CrtcRegister {
HorizontalTotal,
HorizontalDisplayed,
HorizontalSyncPosition,
SyncWidth,
VerticalTotal,
VerticalTotalAdjust,
VerticalDisplayed,
VerticalSync,
InterlaceMode,
MaximumScanlineAddress,
CursorStartLine,
CursorEndLine,
StartAddressH,
StartAddressL,
CursorAddressH,
CursorAddressL,
LightPenPositionH,
LightPenPositionL,
}
use crate::mc6845::CrtcRegister::*;
macro_rules! trace {
($self:ident, $($t:tt)*) => {{
$self.trace_logger.print(&format!($($t)*));
$self.trace_logger.print("\n".to_string());
}};
}
macro_rules! trace_regs {
($self:ident) => {
$self.trace_logger.print(
&format!("")
/*
&format!(
"[SL:{:03} HCC:{:03} VCC:{:03} VT:{:03} VS:{:03}] ",
$self.scanline,
$self.hcc_c0,
$self.vcc_c4,
$self.crtc_vertical_total,
$self.crtc_vertical_sync_pos
)
*/
);
};
}
pub struct Crtc6845 {
reg: [u8; 18], // Externally-accessable CRTC register file
reg_select: CrtcRegister, // Selected CRTC register
start_address: u16, // Calculated value from R12 & R13
cursor_address: u16, // Calculated value from R14 & R15
lightpen_position: u16, // Calculated value from R16 & R17
cursor_status: bool,
cursor_start_line: u8,
cursor_slow_blink: bool,
cursor_blink_rate: f64,
display_enable: bool, // True if we are in counting in the display area, false otherwise
hcc_c0: u8, // Horizontal character counter (x pos of character)
vlc_c9: u8, // Vertical line counter - counts during vsync period
vcc_c4: u8, // Vertical character counter (y pos of character)
vsc_c3h: u8,
hsc_c3l: u8,
vtac_c5: u8,
vma: u16, // VMA register - Video memory address
vma_t: u16, // VMA' register - Video memory address temporary
trace_logger: TraceLogger,
}
impl Crtc6845 {
fn new(trace_logger: TraceLogger) -> Self {
Self {
reg: [0; 18],
reg_select: HorizontalTotal,
start_address: 0,
cursor_address: 0,
lightpen_position: 0,
cursor_status: false,
cursor_start_line: 0,
cursor_slow_blink: false,
cursor_blink_rate: 0.0,
display_enable: false,
hcc_c0: 0,
vlc_c9: 0,
vcc_c4: 0,
vsc_c3h: 0,
hsc_c3l: 0,
vtac_c5: 0,
vma: 0,
vma_t: 0,
trace_logger
}
}
pub fn select_register(&mut self, idx: usize) {
if idx > REGISTER_MAX {
return
}
let reg_select = match idx {
0 => HorizontalTotal,
1 => HorizontalDisplayed,
2 => HorizontalSyncPosition,
3 => SyncWidth,
4 => VerticalTotal,
5 => VerticalTotalAdjust,
6 => VerticalDisplayed,
7 => VerticalSync,
8 => InterlaceMode,
9 => MaximumScanlineAddress,
10 => CursorStartLine,
11 => CursorEndLine,
12 => StartAddressH,
13 => StartAddressL,
14 => CursorAddressH,
15 => CursorAddressL,
16 => LightPenPositionH,
_ => LightPenPositionL,
};
}
pub fn write_register(&mut self, byte: u8) {
match self.reg_select {
CrtcRegister::HorizontalTotal => {
// (R0) 8 bit write only
self.reg[0] = byte;
},
CrtcRegister::HorizontalDisplayed => {
// (R1) 8 bit write only
self.reg[1] = byte;
}
CrtcRegister::HorizontalSyncPosition => {
// (R2) 8 bit write only
self.reg[2] = byte;
},
CrtcRegister::SyncWidth => {
// (R3) 8 bit write only
self.reg[3] = byte;
},
CrtcRegister::VerticalTotal => {
// (R4) 7 bit write only
self.reg[4] = byte & 0x7F;
trace_regs!(self);
trace!(
self,
"CRTC Register Write (04h): VerticalTotal updated: {}",
self.reg[4]
)
},
CrtcRegister::VerticalTotalAdjust => {
// (R5) 5 bit write only
self.reg[5] = byte & 0x1F;
}
CrtcRegister::VerticalDisplayed => {
// (R6) 7 bit write only
self.reg[6] = byte & 0x7F;
},
CrtcRegister::VerticalSync => {
// (R7) 7 bit write only
self.reg[7] = byte & 0x7F;
trace_regs!(self);
trace!(
self,
"CRTC Register Write (07h): VerticalSync updated: {}",
self.reg[7]
)
},
CrtcRegister::InterlaceMode => {
// (R8) 2 bit write only
self.reg[8] = byte & 0x03;
},
CrtcRegister::MaximumScanlineAddress => {
// (R9) 5 bit write only
self.reg[9] = byte & 0x1F;
}
CrtcRegister::CursorStartLine => {
// (R10) 7 bit bitfield. Write only.
self.reg[10] = byte & 0x7F;
self.cursor_start_line = byte & CURSOR_LINE_MASK;
match byte & CURSOR_ATTR_MASK >> 4 {
0b00 | 0b10 => {
self.cursor_status = true;
self.cursor_slow_blink = false;
}
0b01 => {
self.cursor_status = false;
self.cursor_slow_blink = false;
}
_ => {
self.cursor_status = true;
self.cursor_slow_blink = true;
}
}
}
CrtcRegister::CursorEndLine => {
// (R11) 5 bit write only
self.reg[11] = byte & 0x1F;
}
CrtcRegister::StartAddressH => {
// (R12) 6 bit write only
self.reg[12] = byte & 0x3F;
trace_regs!(self);
trace!(
self, | self.update_start_address();
}
CrtcRegister::StartAddressL => {
// (R13) 8 bit write only
self.reg[13] = byte;
trace_regs!(self);
trace!(
self,
"CRTC Register Write (0Dh): StartAddressL updated: {:02X}",
byte
);
self.update_start_address();
}
CrtcRegister::CursorAddressH => {
// (R14) 6 bit read/write
self.reg[14] = byte & 0x3F;
self.update_cursor_address();
}
CrtcRegister::CursorAddressL => {
// (R15) 8 bit read/write
self.reg[15] = byte;
self.update_cursor_address();
}
CrtcRegister::LightPenPositionH => {
// (R16) 6 bit read only
}
CrtcRegister::LightPenPositionL => {
// (R17) 8 bit read only
}
}
}
pub fn read_register(&self) -> u8 {
match self.reg_select {
CursorAddressH | CursorAddressL | LightPenPositionH | LightPenPositionL => {
self.reg[self.reg_select as usize]
}
_ => REGISTER_UNREADABLE_VALUE
}
}
pub fn read_address(&self) -> u16 {
self.vma
}
fn update_start_address(&mut self) {
self.start_address = (self.reg[12] as u16) << 8 | self.reg[13] as u16
}
fn update_cursor_address(&mut self) {
self.cursor_address = (self.reg[14] as u16) << 8 | self.reg[15] as u16
}
} | "CRTC Register Write (0Ch): StartAddressH updated: {:02X}",
byte
); | random_line_split |
mc6845.rs | /*
MartyPC
https://github.com/dbalsom/martypc
Copyright 2022-2023 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
devices::mc6845.rs
Implementation of the Motorola MC6845 CRT controller.
Used internally by the MDA and CGA video cards.
*/
use crate::tracelogger::TraceLogger;
const CURSOR_LINE_MASK: u8 = 0b0000_1111;
const CURSOR_ATTR_MASK: u8 = 0b0011_0000;
const REGISTER_MAX: usize = 17;
const REGISTER_UNREADABLE_VALUE: u8 = 0x00;
#[derive (Copy, Clone, Debug)]
pub enum CrtcRegister {
HorizontalTotal,
HorizontalDisplayed,
HorizontalSyncPosition,
SyncWidth,
VerticalTotal,
VerticalTotalAdjust,
VerticalDisplayed,
VerticalSync,
InterlaceMode,
MaximumScanlineAddress,
CursorStartLine,
CursorEndLine,
StartAddressH,
StartAddressL,
CursorAddressH,
CursorAddressL,
LightPenPositionH,
LightPenPositionL,
}
use crate::mc6845::CrtcRegister::*;
macro_rules! trace {
($self:ident, $($t:tt)*) => {{
$self.trace_logger.print(&format!($($t)*));
$self.trace_logger.print("\n".to_string());
}};
}
macro_rules! trace_regs {
($self:ident) => {
$self.trace_logger.print(
&format!("")
/*
&format!(
"[SL:{:03} HCC:{:03} VCC:{:03} VT:{:03} VS:{:03}] ",
$self.scanline,
$self.hcc_c0,
$self.vcc_c4,
$self.crtc_vertical_total,
$self.crtc_vertical_sync_pos
)
*/
);
};
}
pub struct Crtc6845 | reg: [u8; 18], // Externally-accessable CRTC register file
reg_select: CrtcRegister, // Selected CRTC register
start_address: u16, // Calculated value from R12 & R13
cursor_address: u16, // Calculated value from R14 & R15
lightpen_position: u16, // Calculated value from R16 & R17
cursor_status: bool,
cursor_start_line: u8,
cursor_slow_blink: bool,
cursor_blink_rate: f64,
display_enable: bool, // True if we are in counting in the display area, false otherwise
hcc_c0: u8, // Horizontal character counter (x pos of character)
vlc_c9: u8, // Vertical line counter - counts during vsync period
vcc_c4: u8, // Vertical character counter (y pos of character)
vsc_c3h: u8,
hsc_c3l: u8,
vtac_c5: u8,
vma: u16, // VMA register - Video memory address
vma_t: u16, // VMA' register - Video memory address temporary
trace_logger: TraceLogger,
}
impl Crtc6845 {
fn new(trace_logger: TraceLogger) -> Self {
Self {
reg: [0; 18],
reg_select: HorizontalTotal,
start_address: 0,
cursor_address: 0,
lightpen_position: 0,
cursor_status: false,
cursor_start_line: 0,
cursor_slow_blink: false,
cursor_blink_rate: 0.0,
display_enable: false,
hcc_c0: 0,
vlc_c9: 0,
vcc_c4: 0,
vsc_c3h: 0,
hsc_c3l: 0,
vtac_c5: 0,
vma: 0,
vma_t: 0,
trace_logger
}
}
pub fn select_register(&mut self, idx: usize) {
if idx > REGISTER_MAX {
return
}
let reg_select = match idx {
0 => HorizontalTotal,
1 => HorizontalDisplayed,
2 => HorizontalSyncPosition,
3 => SyncWidth,
4 => VerticalTotal,
5 => VerticalTotalAdjust,
6 => VerticalDisplayed,
7 => VerticalSync,
8 => InterlaceMode,
9 => MaximumScanlineAddress,
10 => CursorStartLine,
11 => CursorEndLine,
12 => StartAddressH,
13 => StartAddressL,
14 => CursorAddressH,
15 => CursorAddressL,
16 => LightPenPositionH,
_ => LightPenPositionL,
};
}
pub fn write_register(&mut self, byte: u8) {
match self.reg_select {
CrtcRegister::HorizontalTotal => {
// (R0) 8 bit write only
self.reg[0] = byte;
},
CrtcRegister::HorizontalDisplayed => {
// (R1) 8 bit write only
self.reg[1] = byte;
}
CrtcRegister::HorizontalSyncPosition => {
// (R2) 8 bit write only
self.reg[2] = byte;
},
CrtcRegister::SyncWidth => {
// (R3) 8 bit write only
self.reg[3] = byte;
},
CrtcRegister::VerticalTotal => {
// (R4) 7 bit write only
self.reg[4] = byte & 0x7F;
trace_regs!(self);
trace!(
self,
"CRTC Register Write (04h): VerticalTotal updated: {}",
self.reg[4]
)
},
CrtcRegister::VerticalTotalAdjust => {
// (R5) 5 bit write only
self.reg[5] = byte & 0x1F;
}
CrtcRegister::VerticalDisplayed => {
// (R6) 7 bit write only
self.reg[6] = byte & 0x7F;
},
CrtcRegister::VerticalSync => {
// (R7) 7 bit write only
self.reg[7] = byte & 0x7F;
trace_regs!(self);
trace!(
self,
"CRTC Register Write (07h): VerticalSync updated: {}",
self.reg[7]
)
},
CrtcRegister::InterlaceMode => {
// (R8) 2 bit write only
self.reg[8] = byte & 0x03;
},
CrtcRegister::MaximumScanlineAddress => {
// (R9) 5 bit write only
self.reg[9] = byte & 0x1F;
}
CrtcRegister::CursorStartLine => {
// (R10) 7 bit bitfield. Write only.
self.reg[10] = byte & 0x7F;
self.cursor_start_line = byte & CURSOR_LINE_MASK;
match byte & CURSOR_ATTR_MASK >> 4 {
0b00 | 0b10 => {
self.cursor_status = true;
self.cursor_slow_blink = false;
}
0b01 => {
self.cursor_status = false;
self.cursor_slow_blink = false;
}
_ => {
self.cursor_status = true;
self.cursor_slow_blink = true;
}
}
}
CrtcRegister::CursorEndLine => {
// (R11) 5 bit write only
self.reg[11] = byte & 0x1F;
}
CrtcRegister::StartAddressH => {
// (R12) 6 bit write only
self.reg[12] = byte & 0x3F;
trace_regs!(self);
trace!(
self,
"CRTC Register Write (0Ch): StartAddressH updated: {:02X}",
byte
);
self.update_start_address();
}
CrtcRegister::StartAddressL => {
// (R13) 8 bit write only
self.reg[13] = byte;
trace_regs!(self);
trace!(
self,
"CRTC Register Write (0Dh): StartAddressL updated: {:02X}",
byte
);
self.update_start_address();
}
CrtcRegister::CursorAddressH => {
// (R14) 6 bit read/write
self.reg[14] = byte & 0x3F;
self.update_cursor_address();
}
CrtcRegister::CursorAddressL => {
// (R15) 8 bit read/write
self.reg[15] = byte;
self.update_cursor_address();
}
CrtcRegister::LightPenPositionH => {
// (R16) 6 bit read only
}
CrtcRegister::LightPenPositionL => {
// (R17) 8 bit read only
}
}
}
pub fn read_register(&self) -> u8 {
match self.reg_select {
CursorAddressH | CursorAddressL | LightPenPositionH | LightPenPositionL => {
self.reg[self.reg_select as usize]
}
_ => REGISTER_UNREADABLE_VALUE
}
}
pub fn read_address(&self) -> u16 {
self.vma
}
fn update_start_address(&mut self) {
self.start_address = (self.reg[12] as u16) << 8 | self.reg[13] as u16
}
fn update_cursor_address(&mut self) {
self.cursor_address = (self.reg[14] as u16) << 8 | self.reg[15] as u16
}
} | {
| identifier_name |
mc6845.rs | /*
MartyPC
https://github.com/dbalsom/martypc
Copyright 2022-2023 Daniel Balsom
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the “Software”),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------
devices::mc6845.rs
Implementation of the Motorola MC6845 CRT controller.
Used internally by the MDA and CGA video cards.
*/
use crate::tracelogger::TraceLogger;
const CURSOR_LINE_MASK: u8 = 0b0000_1111;
const CURSOR_ATTR_MASK: u8 = 0b0011_0000;
const REGISTER_MAX: usize = 17;
const REGISTER_UNREADABLE_VALUE: u8 = 0x00;
#[derive (Copy, Clone, Debug)]
pub enum CrtcRegister {
HorizontalTotal,
HorizontalDisplayed,
HorizontalSyncPosition,
SyncWidth,
VerticalTotal,
VerticalTotalAdjust,
VerticalDisplayed,
VerticalSync,
InterlaceMode,
MaximumScanlineAddress,
CursorStartLine,
CursorEndLine,
StartAddressH,
StartAddressL,
CursorAddressH,
CursorAddressL,
LightPenPositionH,
LightPenPositionL,
}
use crate::mc6845::CrtcRegister::*;
macro_rules! trace {
($self:ident, $($t:tt)*) => {{
$self.trace_logger.print(&format!($($t)*));
$self.trace_logger.print("\n".to_string());
}};
}
macro_rules! trace_regs {
($self:ident) => {
$self.trace_logger.print(
&format!("")
/*
&format!(
"[SL:{:03} HCC:{:03} VCC:{:03} VT:{:03} VS:{:03}] ",
$self.scanline,
$self.hcc_c0,
$self.vcc_c4,
$self.crtc_vertical_total,
$self.crtc_vertical_sync_pos
)
*/
);
};
}
pub struct Crtc6845 {
reg: [u8; 18], // Externally-accessable CRTC register file
reg_select: CrtcRegister, // Selected CRTC register
start_address: u16, // Calculated value from R12 & R13
cursor_address: u16, // Calculated value from R14 & R15
lightpen_position: u16, // Calculated value from R16 & R17
cursor_status: bool,
cursor_start_line: u8,
cursor_slow_blink: bool,
cursor_blink_rate: f64,
display_enable: bool, // True if we are in counting in the display area, false otherwise
hcc_c0: u8, // Horizontal character counter (x pos of character)
vlc_c9: u8, // Vertical line counter - counts during vsync period
vcc_c4: u8, // Vertical character counter (y pos of character)
vsc_c3h: u8,
hsc_c3l: u8,
vtac_c5: u8,
vma: u16, // VMA register - Video memory address
vma_t: u16, // VMA' register - Video memory address temporary
trace_logger: TraceLogger,
}
impl Crtc6845 {
fn new(trace_logger: TraceLogger) -> Self {
Self {
reg: [0; 18],
reg_select: HorizontalTotal,
start_address: 0,
cursor_address: 0,
lightpen_position: 0,
cursor_status: false,
cursor_start_line: 0,
cursor_slow_blink: false,
cursor_blink_rate: 0.0,
display_enable: false,
hcc_c0: 0,
vlc_c9: 0,
vcc_c4: 0,
vsc_c3h: 0,
hsc_c3l: 0,
vtac_c5: 0,
vma: 0,
vma_t: 0,
trace_logger
}
}
pub fn select_register(&mut self, idx: usize) {
if idx > REGISTER_MAX {
return
}
let reg_select = match idx {
0 => HorizontalTotal,
1 => HorizontalDisplayed,
2 => HorizontalSyncPosition,
3 => SyncWidth,
4 => VerticalTotal,
5 => VerticalTotalAdjust,
6 => VerticalDisplayed,
7 => VerticalSync,
8 => InterlaceMode,
9 => MaximumScanlineAddress,
10 => CursorStartLine,
11 => CursorEndLine,
12 => StartAddressH,
13 => StartAddressL,
14 => CursorAddressH,
15 => CursorAddressL,
16 => LightPenPositionH,
_ => LightPenPositionL,
};
}
pub fn write_register(&mut self, byte: u8) {
|
trace_regs!(self);
trace!(
self,
"CRTC Register Write (04h): VerticalTotal updated: {}",
self.reg[4]
)
},
CrtcRegister::VerticalTotalAdjust => {
// (R5) 5 bit write only
self.reg[5] = byte & 0x1F;
}
CrtcRegister::VerticalDisplayed => {
// (R6) 7 bit write only
self.reg[6] = byte & 0x7F;
},
CrtcRegister::VerticalSync => {
// (R7) 7 bit write only
self.reg[7] = byte & 0x7F;
trace_regs!(self);
trace!(
self,
"CRTC Register Write (07h): VerticalSync updated: {}",
self.reg[7]
)
},
CrtcRegister::InterlaceMode => {
// (R8) 2 bit write only
self.reg[8] = byte & 0x03;
},
CrtcRegister::MaximumScanlineAddress => {
// (R9) 5 bit write only
self.reg[9] = byte & 0x1F;
}
CrtcRegister::CursorStartLine => {
// (R10) 7 bit bitfield. Write only.
self.reg[10] = byte & 0x7F;
self.cursor_start_line = byte & CURSOR_LINE_MASK;
match byte & CURSOR_ATTR_MASK >> 4 {
0b00 | 0b10 => {
self.cursor_status = true;
self.cursor_slow_blink = false;
}
0b01 => {
self.cursor_status = false;
self.cursor_slow_blink = false;
}
_ => {
self.cursor_status = true;
self.cursor_slow_blink = true;
}
}
}
CrtcRegister::CursorEndLine => {
// (R11) 5 bit write only
self.reg[11] = byte & 0x1F;
}
CrtcRegister::StartAddressH => {
// (R12) 6 bit write only
self.reg[12] = byte & 0x3F;
trace_regs!(self);
trace!(
self,
"CRTC Register Write (0Ch): StartAddressH updated: {:02X}",
byte
);
self.update_start_address();
}
CrtcRegister::StartAddressL => {
// (R13) 8 bit write only
self.reg[13] = byte;
trace_regs!(self);
trace!(
self,
"CRTC Register Write (0Dh): StartAddressL updated: {:02X}",
byte
);
self.update_start_address();
}
CrtcRegister::CursorAddressH => {
// (R14) 6 bit read/write
self.reg[14] = byte & 0x3F;
self.update_cursor_address();
}
CrtcRegister::CursorAddressL => {
// (R15) 8 bit read/write
self.reg[15] = byte;
self.update_cursor_address();
}
CrtcRegister::LightPenPositionH => {
// (R16) 6 bit read only
}
CrtcRegister::LightPenPositionL => {
// (R17) 8 bit read only
}
}
}
pu
b fn read_register(&self) -> u8 {
match self.reg_select {
CursorAddressH | CursorAddressL | LightPenPositionH | LightPenPositionL => {
self.reg[self.reg_select as usize]
}
_ => REGISTER_UNREADABLE_VALUE
}
}
pub fn read_address(&self) -> u16 {
self.vma
}
fn update_start_address(&mut self) {
self.start_address = (self.reg[12] as u16) << 8 | self.reg[13] as u16
}
fn update_cursor_address(&mut self) {
self.cursor_address = (self.reg[14] as u16) << 8 | self.reg[15] as u16
}
} | match self.reg_select {
CrtcRegister::HorizontalTotal => {
// (R0) 8 bit write only
self.reg[0] = byte;
},
CrtcRegister::HorizontalDisplayed => {
// (R1) 8 bit write only
self.reg[1] = byte;
}
CrtcRegister::HorizontalSyncPosition => {
// (R2) 8 bit write only
self.reg[2] = byte;
},
CrtcRegister::SyncWidth => {
// (R3) 8 bit write only
self.reg[3] = byte;
},
CrtcRegister::VerticalTotal => {
// (R4) 7 bit write only
self.reg[4] = byte & 0x7F; | identifier_body |
coeditor.rs | use druid::{Selector, WidgetPod, WidgetId, ExtEventSink, EventCtx, Event, Env, Widget, UpdateCtx, LayoutCtx, PaintCtx, BoxConstraints, Target, LifeCycle, LifeCycleCtx, Size};
use std::sync::Arc;
use tokio::sync::broadcast::{Sender};
use tokio::task::JoinHandle;
use parking_lot::RwLock;
use crate::{RustpadClient, Edit};
use std::time::Duration;
use crate::editor_binding::EditorBinding;
use crate::code_editor::code_editor::CodeEditor;
use crate::code_editor::text::{Selection, EditableText};
use tokio::sync::broadcast;
use std::collections::HashMap;
use tokio_tungstenite::tungstenite::Message;
use tokio_tungstenite::connect_async;
use futures::StreamExt;
use log::{info, warn};
pub const COEDITOR_INIT_CLIENT: Selector<Arc<RwLock<RustpadClient>>> = Selector::new("coeditor-init-client");
pub const USER_EDIT_SELECTOR: Selector<Edit> = Selector::new("user-edit");
pub const USER_CURSOR_UPDATE_SELECTOR: Selector<()> = Selector::new("user-cursor-data");
fn create_connection_loop(client: Arc<RwLock<RustpadClient>>, close_tx: Sender<()>) -> JoinHandle<()> {
tokio::spawn(async move {
info!("connecting");
let conn = client.read().server_url.clone();
loop {
let x = Arc::clone(&client);
if try_connect(&conn, x, close_tx.clone()).await.is_none() {
break;
}
tokio::time::sleep(Duration::from_millis(1000)).await;
warn!("Reconnecting...");
}
})
}
async fn try_connect(connect_addr: &String, client: Arc<RwLock<RustpadClient>>, close_tx: Sender<()>) -> Option<()> {
let url = url::Url::parse(connect_addr).unwrap();
let (ws_tx, ws_rx) = futures_channel::mpsc::unbounded::<Message>();
client.write().ws_sender = Some(ws_tx.clone());
client.write().users.clear();
//
let res = connect_async(url).await;
if res.is_err() {
eprintln!("{:?}", res.err().unwrap());
return Some(());
}
let (ws_stream, _) = res.unwrap();
println!("WebSocket handshake has been successfully completed");
client.read().on_connected.invoke(());
let (write, read) = ws_stream.split();
let websocket_tx = ws_rx.map(Ok).forward(write);
let client2 = Arc::clone(&client);
let receive_handler =
read.for_each(|message| async {
if message.is_err() |
let data = message.unwrap().to_string();
println!("Received: {}", &data);
client2.write().handle_message(serde_json::from_slice(data.as_bytes()).expect("parse data failed"));
});
client.write().send_info();
client.write().send_cursor_data();
if let Some(outstanding) = &client.read().outstanding {
client.write().send_operation(outstanding);
}
let mut close_rx = close_tx.subscribe();
tokio::select! {
_ = close_rx.recv() => {
ws_tx.unbounded_send(Message::Close(None)).unwrap();
println!("client closed.");
return None;
}
_ = websocket_tx => {}
_ = receive_handler => {
println!("server closed");
}
}
println!("{} disconnected", &connect_addr);
client.write().ws_sender = None;
Some(())
}
pub struct CoEditorWidget {
inner: WidgetPod<EditorBinding, CodeEditor<EditorBinding>>,
id: WidgetId,
pub server_url: String,
client: Option<Arc<RwLock<RustpadClient>>>,
connection_handle: Option<JoinHandle<()>>,
event_sink: Option<ExtEventSink>,
close_tx: Sender<()>,
last_selection: Selection,
}
impl Drop for CoEditorWidget {
fn drop(&mut self) {
self.close_tx.send(()).unwrap();
futures::executor::block_on(
tokio::time::timeout(Duration::from_secs(5),
self.connection_handle.take().unwrap(),
)
);
println!("CoEditorWidget destructed");
}
}
impl CoEditorWidget {
pub fn new(server_url: String) -> Self {
println!("CoEditorWidget created");
CoEditorWidget {
inner: WidgetPod::new(CodeEditor::<EditorBinding>::multiline()),
server_url,
id: WidgetId::next(),
client: None,
connection_handle: None,
event_sink: None,
close_tx: broadcast::channel(1).0,
last_selection: Selection::default(),
}
}
}
impl Widget<EditorBinding> for CoEditorWidget {
fn event(&mut self, ctx: &mut EventCtx, event: &Event, data: &mut EditorBinding, env: &Env) {
if let Event::Command(cmd) = event {
println!("received {:?}", cmd);
}
match event {
Event::Command(command) if command.get(COEDITOR_INIT_CLIENT).is_some()
&& command.target() == Target::Widget(ctx.widget_id()) => {
let client = command.get(COEDITOR_INIT_CLIENT).unwrap();
data.set_client(client);
println!("editor binding client initialized");
}
Event::Command(command) if command.get(USER_EDIT_SELECTOR).is_some()
&& command.target() == Target::Widget(ctx.widget_id()) => {
println!("received edit command");
let edit = command.get(USER_EDIT_SELECTOR).unwrap();
let selection = self.inner.widget().text().borrow().selection();
let transform_selection = |selection: Selection| -> Selection {
let transform_index = |x: usize| -> usize {
if x < edit.begin {
x
} else if x > edit.end {
x + edit.begin + edit.content.len() - edit.end
} else {
edit.begin + edit.content.len()
}
};
Selection::new(
transform_index(selection.anchor),
transform_index(selection.active),
)
};
data.edit_without_callback(edit);
let _ = self.inner.widget_mut().text_mut().borrow_mut().set_selection(transform_selection(selection));
self.inner.widget_mut().text_mut().borrow_mut().decorations.iter_mut()
.for_each(|(_, b)| *b = transform_selection(b.clone()));
}
Event::Command(command) if command.get(USER_CURSOR_UPDATE_SELECTOR).is_some()
&& command.target() == Target::Widget(ctx.widget_id()) => {
println!("received cursor command");
let content = &data.content;
let unicode_offset_to_utf8_offset = |offset: u32| -> usize {
content.iter().take(offset as usize).collect::<String>().len()
};
let mut new_decorations = HashMap::new();
let my_id = self.client.as_ref().unwrap().read().id();
self.client.as_ref().unwrap().read().user_cursors.iter()
.filter(|(&id, _)| id!= my_id)
.filter(|(_, data)|!data.selections.is_empty())
.for_each(|(&id, sel)| {
new_decorations.insert(id, Selection::new(
unicode_offset_to_utf8_offset(sel.selections[0].0),
unicode_offset_to_utf8_offset(sel.selections[0].1),
));
});
self.inner.widget_mut().text_mut().borrow_mut().decorations = new_decorations;
}
_ => self.inner.event(ctx, event, data, env)
}
}
fn lifecycle(&mut self, ctx: &mut LifeCycleCtx, event: &LifeCycle, data: &EditorBinding, env: &Env) {
self.inner.lifecycle(ctx, event, data, env);
match event {
LifeCycle::WidgetAdded => {
self.id = ctx.widget_id();
println!("CoEditorWidget initialized with id: {:?}", self.id);
self.event_sink = Some(ctx.get_external_handle());
let client = RustpadClient::create(self.server_url.clone());
client.write().widget_id = Some(self.id);
client.write().set_event_sink(
self.event_sink.as_ref().unwrap().clone(),
self.id,
);
self.client = Some(Arc::clone(&client));
ctx.get_external_handle().submit_command(
COEDITOR_INIT_CLIENT,
Box::new(Arc::clone(&client)),
Target::Widget(self.id),
).expect("send command failed");
self.connection_handle = Some(create_connection_loop(client, self.close_tx.clone()));
}
_ => {}
}
}
fn update(&mut self, ctx: &mut UpdateCtx, old_data: &EditorBinding, data: &EditorBinding, env: &Env) {
if old_data.after_edits.len()!= data.after_edits.len() {
println!("editor binding's callback changed from {} to {}", old_data.after_edits.len(), data.after_edits.len());
}
let new_selection = self.inner.widget().text().borrow().selection();
if self.last_selection!= new_selection {
self.last_selection = new_selection;
let borrow = self.inner.widget_mut().text_mut().borrow_mut();
let content = &borrow.layout.text().unwrap().content_as_string;
let active = content.slice(0..new_selection.active).unwrap_or_default().to_string().chars().count() as u32;
let anchor = content.slice(0..new_selection.anchor).unwrap_or_default().to_string().chars().count() as u32;
self.client.as_ref().unwrap().write().update_and_send_cursor_data((active, anchor));
}
self.inner.update(ctx, data, env);
}
fn layout(&mut self, ctx: &mut LayoutCtx, bc: &BoxConstraints, data: &EditorBinding, env: &Env) -> Size {
self.inner.layout(ctx, bc, data, env)
}
fn paint(&mut self, ctx: &mut PaintCtx, data: &EditorBinding, env: &Env) {
self.inner.paint(ctx, data, env)
}
}
| {
return;
} | conditional_block |
coeditor.rs | use druid::{Selector, WidgetPod, WidgetId, ExtEventSink, EventCtx, Event, Env, Widget, UpdateCtx, LayoutCtx, PaintCtx, BoxConstraints, Target, LifeCycle, LifeCycleCtx, Size};
use std::sync::Arc;
use tokio::sync::broadcast::{Sender};
use tokio::task::JoinHandle;
use parking_lot::RwLock;
use crate::{RustpadClient, Edit};
use std::time::Duration;
use crate::editor_binding::EditorBinding;
use crate::code_editor::code_editor::CodeEditor;
use crate::code_editor::text::{Selection, EditableText};
use tokio::sync::broadcast;
use std::collections::HashMap; | use futures::StreamExt;
use log::{info, warn};
pub const COEDITOR_INIT_CLIENT: Selector<Arc<RwLock<RustpadClient>>> = Selector::new("coeditor-init-client");
pub const USER_EDIT_SELECTOR: Selector<Edit> = Selector::new("user-edit");
pub const USER_CURSOR_UPDATE_SELECTOR: Selector<()> = Selector::new("user-cursor-data");
fn create_connection_loop(client: Arc<RwLock<RustpadClient>>, close_tx: Sender<()>) -> JoinHandle<()> {
tokio::spawn(async move {
info!("connecting");
let conn = client.read().server_url.clone();
loop {
let x = Arc::clone(&client);
if try_connect(&conn, x, close_tx.clone()).await.is_none() {
break;
}
tokio::time::sleep(Duration::from_millis(1000)).await;
warn!("Reconnecting...");
}
})
}
async fn try_connect(connect_addr: &String, client: Arc<RwLock<RustpadClient>>, close_tx: Sender<()>) -> Option<()> {
let url = url::Url::parse(connect_addr).unwrap();
let (ws_tx, ws_rx) = futures_channel::mpsc::unbounded::<Message>();
client.write().ws_sender = Some(ws_tx.clone());
client.write().users.clear();
//
let res = connect_async(url).await;
if res.is_err() {
eprintln!("{:?}", res.err().unwrap());
return Some(());
}
let (ws_stream, _) = res.unwrap();
println!("WebSocket handshake has been successfully completed");
client.read().on_connected.invoke(());
let (write, read) = ws_stream.split();
let websocket_tx = ws_rx.map(Ok).forward(write);
let client2 = Arc::clone(&client);
let receive_handler =
read.for_each(|message| async {
if message.is_err() {
return;
}
let data = message.unwrap().to_string();
println!("Received: {}", &data);
client2.write().handle_message(serde_json::from_slice(data.as_bytes()).expect("parse data failed"));
});
client.write().send_info();
client.write().send_cursor_data();
if let Some(outstanding) = &client.read().outstanding {
client.write().send_operation(outstanding);
}
let mut close_rx = close_tx.subscribe();
tokio::select! {
_ = close_rx.recv() => {
ws_tx.unbounded_send(Message::Close(None)).unwrap();
println!("client closed.");
return None;
}
_ = websocket_tx => {}
_ = receive_handler => {
println!("server closed");
}
}
println!("{} disconnected", &connect_addr);
client.write().ws_sender = None;
Some(())
}
pub struct CoEditorWidget {
inner: WidgetPod<EditorBinding, CodeEditor<EditorBinding>>,
id: WidgetId,
pub server_url: String,
client: Option<Arc<RwLock<RustpadClient>>>,
connection_handle: Option<JoinHandle<()>>,
event_sink: Option<ExtEventSink>,
close_tx: Sender<()>,
last_selection: Selection,
}
impl Drop for CoEditorWidget {
fn drop(&mut self) {
self.close_tx.send(()).unwrap();
futures::executor::block_on(
tokio::time::timeout(Duration::from_secs(5),
self.connection_handle.take().unwrap(),
)
);
println!("CoEditorWidget destructed");
}
}
impl CoEditorWidget {
pub fn new(server_url: String) -> Self {
println!("CoEditorWidget created");
CoEditorWidget {
inner: WidgetPod::new(CodeEditor::<EditorBinding>::multiline()),
server_url,
id: WidgetId::next(),
client: None,
connection_handle: None,
event_sink: None,
close_tx: broadcast::channel(1).0,
last_selection: Selection::default(),
}
}
}
impl Widget<EditorBinding> for CoEditorWidget {
fn event(&mut self, ctx: &mut EventCtx, event: &Event, data: &mut EditorBinding, env: &Env) {
if let Event::Command(cmd) = event {
println!("received {:?}", cmd);
}
match event {
Event::Command(command) if command.get(COEDITOR_INIT_CLIENT).is_some()
&& command.target() == Target::Widget(ctx.widget_id()) => {
let client = command.get(COEDITOR_INIT_CLIENT).unwrap();
data.set_client(client);
println!("editor binding client initialized");
}
Event::Command(command) if command.get(USER_EDIT_SELECTOR).is_some()
&& command.target() == Target::Widget(ctx.widget_id()) => {
println!("received edit command");
let edit = command.get(USER_EDIT_SELECTOR).unwrap();
let selection = self.inner.widget().text().borrow().selection();
let transform_selection = |selection: Selection| -> Selection {
let transform_index = |x: usize| -> usize {
if x < edit.begin {
x
} else if x > edit.end {
x + edit.begin + edit.content.len() - edit.end
} else {
edit.begin + edit.content.len()
}
};
Selection::new(
transform_index(selection.anchor),
transform_index(selection.active),
)
};
data.edit_without_callback(edit);
let _ = self.inner.widget_mut().text_mut().borrow_mut().set_selection(transform_selection(selection));
self.inner.widget_mut().text_mut().borrow_mut().decorations.iter_mut()
.for_each(|(_, b)| *b = transform_selection(b.clone()));
}
Event::Command(command) if command.get(USER_CURSOR_UPDATE_SELECTOR).is_some()
&& command.target() == Target::Widget(ctx.widget_id()) => {
println!("received cursor command");
let content = &data.content;
let unicode_offset_to_utf8_offset = |offset: u32| -> usize {
content.iter().take(offset as usize).collect::<String>().len()
};
let mut new_decorations = HashMap::new();
let my_id = self.client.as_ref().unwrap().read().id();
self.client.as_ref().unwrap().read().user_cursors.iter()
.filter(|(&id, _)| id!= my_id)
.filter(|(_, data)|!data.selections.is_empty())
.for_each(|(&id, sel)| {
new_decorations.insert(id, Selection::new(
unicode_offset_to_utf8_offset(sel.selections[0].0),
unicode_offset_to_utf8_offset(sel.selections[0].1),
));
});
self.inner.widget_mut().text_mut().borrow_mut().decorations = new_decorations;
}
_ => self.inner.event(ctx, event, data, env)
}
}
fn lifecycle(&mut self, ctx: &mut LifeCycleCtx, event: &LifeCycle, data: &EditorBinding, env: &Env) {
self.inner.lifecycle(ctx, event, data, env);
match event {
LifeCycle::WidgetAdded => {
self.id = ctx.widget_id();
println!("CoEditorWidget initialized with id: {:?}", self.id);
self.event_sink = Some(ctx.get_external_handle());
let client = RustpadClient::create(self.server_url.clone());
client.write().widget_id = Some(self.id);
client.write().set_event_sink(
self.event_sink.as_ref().unwrap().clone(),
self.id,
);
self.client = Some(Arc::clone(&client));
ctx.get_external_handle().submit_command(
COEDITOR_INIT_CLIENT,
Box::new(Arc::clone(&client)),
Target::Widget(self.id),
).expect("send command failed");
self.connection_handle = Some(create_connection_loop(client, self.close_tx.clone()));
}
_ => {}
}
}
fn update(&mut self, ctx: &mut UpdateCtx, old_data: &EditorBinding, data: &EditorBinding, env: &Env) {
if old_data.after_edits.len()!= data.after_edits.len() {
println!("editor binding's callback changed from {} to {}", old_data.after_edits.len(), data.after_edits.len());
}
let new_selection = self.inner.widget().text().borrow().selection();
if self.last_selection!= new_selection {
self.last_selection = new_selection;
let borrow = self.inner.widget_mut().text_mut().borrow_mut();
let content = &borrow.layout.text().unwrap().content_as_string;
let active = content.slice(0..new_selection.active).unwrap_or_default().to_string().chars().count() as u32;
let anchor = content.slice(0..new_selection.anchor).unwrap_or_default().to_string().chars().count() as u32;
self.client.as_ref().unwrap().write().update_and_send_cursor_data((active, anchor));
}
self.inner.update(ctx, data, env);
}
fn layout(&mut self, ctx: &mut LayoutCtx, bc: &BoxConstraints, data: &EditorBinding, env: &Env) -> Size {
self.inner.layout(ctx, bc, data, env)
}
fn paint(&mut self, ctx: &mut PaintCtx, data: &EditorBinding, env: &Env) {
self.inner.paint(ctx, data, env)
}
} | use tokio_tungstenite::tungstenite::Message;
use tokio_tungstenite::connect_async; | random_line_split |
coeditor.rs | use druid::{Selector, WidgetPod, WidgetId, ExtEventSink, EventCtx, Event, Env, Widget, UpdateCtx, LayoutCtx, PaintCtx, BoxConstraints, Target, LifeCycle, LifeCycleCtx, Size};
use std::sync::Arc;
use tokio::sync::broadcast::{Sender};
use tokio::task::JoinHandle;
use parking_lot::RwLock;
use crate::{RustpadClient, Edit};
use std::time::Duration;
use crate::editor_binding::EditorBinding;
use crate::code_editor::code_editor::CodeEditor;
use crate::code_editor::text::{Selection, EditableText};
use tokio::sync::broadcast;
use std::collections::HashMap;
use tokio_tungstenite::tungstenite::Message;
use tokio_tungstenite::connect_async;
use futures::StreamExt;
use log::{info, warn};
pub const COEDITOR_INIT_CLIENT: Selector<Arc<RwLock<RustpadClient>>> = Selector::new("coeditor-init-client");
pub const USER_EDIT_SELECTOR: Selector<Edit> = Selector::new("user-edit");
pub const USER_CURSOR_UPDATE_SELECTOR: Selector<()> = Selector::new("user-cursor-data");
fn create_connection_loop(client: Arc<RwLock<RustpadClient>>, close_tx: Sender<()>) -> JoinHandle<()> {
tokio::spawn(async move {
info!("connecting");
let conn = client.read().server_url.clone();
loop {
let x = Arc::clone(&client);
if try_connect(&conn, x, close_tx.clone()).await.is_none() {
break;
}
tokio::time::sleep(Duration::from_millis(1000)).await;
warn!("Reconnecting...");
}
})
}
async fn try_connect(connect_addr: &String, client: Arc<RwLock<RustpadClient>>, close_tx: Sender<()>) -> Option<()> {
let url = url::Url::parse(connect_addr).unwrap();
let (ws_tx, ws_rx) = futures_channel::mpsc::unbounded::<Message>();
client.write().ws_sender = Some(ws_tx.clone());
client.write().users.clear();
//
let res = connect_async(url).await;
if res.is_err() {
eprintln!("{:?}", res.err().unwrap());
return Some(());
}
let (ws_stream, _) = res.unwrap();
println!("WebSocket handshake has been successfully completed");
client.read().on_connected.invoke(());
let (write, read) = ws_stream.split();
let websocket_tx = ws_rx.map(Ok).forward(write);
let client2 = Arc::clone(&client);
let receive_handler =
read.for_each(|message| async {
if message.is_err() {
return;
}
let data = message.unwrap().to_string();
println!("Received: {}", &data);
client2.write().handle_message(serde_json::from_slice(data.as_bytes()).expect("parse data failed"));
});
client.write().send_info();
client.write().send_cursor_data();
if let Some(outstanding) = &client.read().outstanding {
client.write().send_operation(outstanding);
}
let mut close_rx = close_tx.subscribe();
tokio::select! {
_ = close_rx.recv() => {
ws_tx.unbounded_send(Message::Close(None)).unwrap();
println!("client closed.");
return None;
}
_ = websocket_tx => {}
_ = receive_handler => {
println!("server closed");
}
}
println!("{} disconnected", &connect_addr);
client.write().ws_sender = None;
Some(())
}
pub struct CoEditorWidget {
inner: WidgetPod<EditorBinding, CodeEditor<EditorBinding>>,
id: WidgetId,
pub server_url: String,
client: Option<Arc<RwLock<RustpadClient>>>,
connection_handle: Option<JoinHandle<()>>,
event_sink: Option<ExtEventSink>,
close_tx: Sender<()>,
last_selection: Selection,
}
impl Drop for CoEditorWidget {
fn drop(&mut self) |
}
impl CoEditorWidget {
pub fn new(server_url: String) -> Self {
println!("CoEditorWidget created");
CoEditorWidget {
inner: WidgetPod::new(CodeEditor::<EditorBinding>::multiline()),
server_url,
id: WidgetId::next(),
client: None,
connection_handle: None,
event_sink: None,
close_tx: broadcast::channel(1).0,
last_selection: Selection::default(),
}
}
}
impl Widget<EditorBinding> for CoEditorWidget {
fn event(&mut self, ctx: &mut EventCtx, event: &Event, data: &mut EditorBinding, env: &Env) {
if let Event::Command(cmd) = event {
println!("received {:?}", cmd);
}
match event {
Event::Command(command) if command.get(COEDITOR_INIT_CLIENT).is_some()
&& command.target() == Target::Widget(ctx.widget_id()) => {
let client = command.get(COEDITOR_INIT_CLIENT).unwrap();
data.set_client(client);
println!("editor binding client initialized");
}
Event::Command(command) if command.get(USER_EDIT_SELECTOR).is_some()
&& command.target() == Target::Widget(ctx.widget_id()) => {
println!("received edit command");
let edit = command.get(USER_EDIT_SELECTOR).unwrap();
let selection = self.inner.widget().text().borrow().selection();
let transform_selection = |selection: Selection| -> Selection {
let transform_index = |x: usize| -> usize {
if x < edit.begin {
x
} else if x > edit.end {
x + edit.begin + edit.content.len() - edit.end
} else {
edit.begin + edit.content.len()
}
};
Selection::new(
transform_index(selection.anchor),
transform_index(selection.active),
)
};
data.edit_without_callback(edit);
let _ = self.inner.widget_mut().text_mut().borrow_mut().set_selection(transform_selection(selection));
self.inner.widget_mut().text_mut().borrow_mut().decorations.iter_mut()
.for_each(|(_, b)| *b = transform_selection(b.clone()));
}
Event::Command(command) if command.get(USER_CURSOR_UPDATE_SELECTOR).is_some()
&& command.target() == Target::Widget(ctx.widget_id()) => {
println!("received cursor command");
let content = &data.content;
let unicode_offset_to_utf8_offset = |offset: u32| -> usize {
content.iter().take(offset as usize).collect::<String>().len()
};
let mut new_decorations = HashMap::new();
let my_id = self.client.as_ref().unwrap().read().id();
self.client.as_ref().unwrap().read().user_cursors.iter()
.filter(|(&id, _)| id!= my_id)
.filter(|(_, data)|!data.selections.is_empty())
.for_each(|(&id, sel)| {
new_decorations.insert(id, Selection::new(
unicode_offset_to_utf8_offset(sel.selections[0].0),
unicode_offset_to_utf8_offset(sel.selections[0].1),
));
});
self.inner.widget_mut().text_mut().borrow_mut().decorations = new_decorations;
}
_ => self.inner.event(ctx, event, data, env)
}
}
fn lifecycle(&mut self, ctx: &mut LifeCycleCtx, event: &LifeCycle, data: &EditorBinding, env: &Env) {
self.inner.lifecycle(ctx, event, data, env);
match event {
LifeCycle::WidgetAdded => {
self.id = ctx.widget_id();
println!("CoEditorWidget initialized with id: {:?}", self.id);
self.event_sink = Some(ctx.get_external_handle());
let client = RustpadClient::create(self.server_url.clone());
client.write().widget_id = Some(self.id);
client.write().set_event_sink(
self.event_sink.as_ref().unwrap().clone(),
self.id,
);
self.client = Some(Arc::clone(&client));
ctx.get_external_handle().submit_command(
COEDITOR_INIT_CLIENT,
Box::new(Arc::clone(&client)),
Target::Widget(self.id),
).expect("send command failed");
self.connection_handle = Some(create_connection_loop(client, self.close_tx.clone()));
}
_ => {}
}
}
fn update(&mut self, ctx: &mut UpdateCtx, old_data: &EditorBinding, data: &EditorBinding, env: &Env) {
if old_data.after_edits.len()!= data.after_edits.len() {
println!("editor binding's callback changed from {} to {}", old_data.after_edits.len(), data.after_edits.len());
}
let new_selection = self.inner.widget().text().borrow().selection();
if self.last_selection!= new_selection {
self.last_selection = new_selection;
let borrow = self.inner.widget_mut().text_mut().borrow_mut();
let content = &borrow.layout.text().unwrap().content_as_string;
let active = content.slice(0..new_selection.active).unwrap_or_default().to_string().chars().count() as u32;
let anchor = content.slice(0..new_selection.anchor).unwrap_or_default().to_string().chars().count() as u32;
self.client.as_ref().unwrap().write().update_and_send_cursor_data((active, anchor));
}
self.inner.update(ctx, data, env);
}
fn layout(&mut self, ctx: &mut LayoutCtx, bc: &BoxConstraints, data: &EditorBinding, env: &Env) -> Size {
self.inner.layout(ctx, bc, data, env)
}
fn paint(&mut self, ctx: &mut PaintCtx, data: &EditorBinding, env: &Env) {
self.inner.paint(ctx, data, env)
}
}
| {
self.close_tx.send(()).unwrap();
futures::executor::block_on(
tokio::time::timeout(Duration::from_secs(5),
self.connection_handle.take().unwrap(),
)
);
println!("CoEditorWidget destructed");
} | identifier_body |
coeditor.rs | use druid::{Selector, WidgetPod, WidgetId, ExtEventSink, EventCtx, Event, Env, Widget, UpdateCtx, LayoutCtx, PaintCtx, BoxConstraints, Target, LifeCycle, LifeCycleCtx, Size};
use std::sync::Arc;
use tokio::sync::broadcast::{Sender};
use tokio::task::JoinHandle;
use parking_lot::RwLock;
use crate::{RustpadClient, Edit};
use std::time::Duration;
use crate::editor_binding::EditorBinding;
use crate::code_editor::code_editor::CodeEditor;
use crate::code_editor::text::{Selection, EditableText};
use tokio::sync::broadcast;
use std::collections::HashMap;
use tokio_tungstenite::tungstenite::Message;
use tokio_tungstenite::connect_async;
use futures::StreamExt;
use log::{info, warn};
pub const COEDITOR_INIT_CLIENT: Selector<Arc<RwLock<RustpadClient>>> = Selector::new("coeditor-init-client");
pub const USER_EDIT_SELECTOR: Selector<Edit> = Selector::new("user-edit");
pub const USER_CURSOR_UPDATE_SELECTOR: Selector<()> = Selector::new("user-cursor-data");
fn create_connection_loop(client: Arc<RwLock<RustpadClient>>, close_tx: Sender<()>) -> JoinHandle<()> {
tokio::spawn(async move {
info!("connecting");
let conn = client.read().server_url.clone();
loop {
let x = Arc::clone(&client);
if try_connect(&conn, x, close_tx.clone()).await.is_none() {
break;
}
tokio::time::sleep(Duration::from_millis(1000)).await;
warn!("Reconnecting...");
}
})
}
async fn try_connect(connect_addr: &String, client: Arc<RwLock<RustpadClient>>, close_tx: Sender<()>) -> Option<()> {
let url = url::Url::parse(connect_addr).unwrap();
let (ws_tx, ws_rx) = futures_channel::mpsc::unbounded::<Message>();
client.write().ws_sender = Some(ws_tx.clone());
client.write().users.clear();
//
let res = connect_async(url).await;
if res.is_err() {
eprintln!("{:?}", res.err().unwrap());
return Some(());
}
let (ws_stream, _) = res.unwrap();
println!("WebSocket handshake has been successfully completed");
client.read().on_connected.invoke(());
let (write, read) = ws_stream.split();
let websocket_tx = ws_rx.map(Ok).forward(write);
let client2 = Arc::clone(&client);
let receive_handler =
read.for_each(|message| async {
if message.is_err() {
return;
}
let data = message.unwrap().to_string();
println!("Received: {}", &data);
client2.write().handle_message(serde_json::from_slice(data.as_bytes()).expect("parse data failed"));
});
client.write().send_info();
client.write().send_cursor_data();
if let Some(outstanding) = &client.read().outstanding {
client.write().send_operation(outstanding);
}
let mut close_rx = close_tx.subscribe();
tokio::select! {
_ = close_rx.recv() => {
ws_tx.unbounded_send(Message::Close(None)).unwrap();
println!("client closed.");
return None;
}
_ = websocket_tx => {}
_ = receive_handler => {
println!("server closed");
}
}
println!("{} disconnected", &connect_addr);
client.write().ws_sender = None;
Some(())
}
pub struct CoEditorWidget {
inner: WidgetPod<EditorBinding, CodeEditor<EditorBinding>>,
id: WidgetId,
pub server_url: String,
client: Option<Arc<RwLock<RustpadClient>>>,
connection_handle: Option<JoinHandle<()>>,
event_sink: Option<ExtEventSink>,
close_tx: Sender<()>,
last_selection: Selection,
}
impl Drop for CoEditorWidget {
fn drop(&mut self) {
self.close_tx.send(()).unwrap();
futures::executor::block_on(
tokio::time::timeout(Duration::from_secs(5),
self.connection_handle.take().unwrap(),
)
);
println!("CoEditorWidget destructed");
}
}
impl CoEditorWidget {
pub fn new(server_url: String) -> Self {
println!("CoEditorWidget created");
CoEditorWidget {
inner: WidgetPod::new(CodeEditor::<EditorBinding>::multiline()),
server_url,
id: WidgetId::next(),
client: None,
connection_handle: None,
event_sink: None,
close_tx: broadcast::channel(1).0,
last_selection: Selection::default(),
}
}
}
impl Widget<EditorBinding> for CoEditorWidget {
fn | (&mut self, ctx: &mut EventCtx, event: &Event, data: &mut EditorBinding, env: &Env) {
if let Event::Command(cmd) = event {
println!("received {:?}", cmd);
}
match event {
Event::Command(command) if command.get(COEDITOR_INIT_CLIENT).is_some()
&& command.target() == Target::Widget(ctx.widget_id()) => {
let client = command.get(COEDITOR_INIT_CLIENT).unwrap();
data.set_client(client);
println!("editor binding client initialized");
}
Event::Command(command) if command.get(USER_EDIT_SELECTOR).is_some()
&& command.target() == Target::Widget(ctx.widget_id()) => {
println!("received edit command");
let edit = command.get(USER_EDIT_SELECTOR).unwrap();
let selection = self.inner.widget().text().borrow().selection();
let transform_selection = |selection: Selection| -> Selection {
let transform_index = |x: usize| -> usize {
if x < edit.begin {
x
} else if x > edit.end {
x + edit.begin + edit.content.len() - edit.end
} else {
edit.begin + edit.content.len()
}
};
Selection::new(
transform_index(selection.anchor),
transform_index(selection.active),
)
};
data.edit_without_callback(edit);
let _ = self.inner.widget_mut().text_mut().borrow_mut().set_selection(transform_selection(selection));
self.inner.widget_mut().text_mut().borrow_mut().decorations.iter_mut()
.for_each(|(_, b)| *b = transform_selection(b.clone()));
}
Event::Command(command) if command.get(USER_CURSOR_UPDATE_SELECTOR).is_some()
&& command.target() == Target::Widget(ctx.widget_id()) => {
println!("received cursor command");
let content = &data.content;
let unicode_offset_to_utf8_offset = |offset: u32| -> usize {
content.iter().take(offset as usize).collect::<String>().len()
};
let mut new_decorations = HashMap::new();
let my_id = self.client.as_ref().unwrap().read().id();
self.client.as_ref().unwrap().read().user_cursors.iter()
.filter(|(&id, _)| id!= my_id)
.filter(|(_, data)|!data.selections.is_empty())
.for_each(|(&id, sel)| {
new_decorations.insert(id, Selection::new(
unicode_offset_to_utf8_offset(sel.selections[0].0),
unicode_offset_to_utf8_offset(sel.selections[0].1),
));
});
self.inner.widget_mut().text_mut().borrow_mut().decorations = new_decorations;
}
_ => self.inner.event(ctx, event, data, env)
}
}
fn lifecycle(&mut self, ctx: &mut LifeCycleCtx, event: &LifeCycle, data: &EditorBinding, env: &Env) {
self.inner.lifecycle(ctx, event, data, env);
match event {
LifeCycle::WidgetAdded => {
self.id = ctx.widget_id();
println!("CoEditorWidget initialized with id: {:?}", self.id);
self.event_sink = Some(ctx.get_external_handle());
let client = RustpadClient::create(self.server_url.clone());
client.write().widget_id = Some(self.id);
client.write().set_event_sink(
self.event_sink.as_ref().unwrap().clone(),
self.id,
);
self.client = Some(Arc::clone(&client));
ctx.get_external_handle().submit_command(
COEDITOR_INIT_CLIENT,
Box::new(Arc::clone(&client)),
Target::Widget(self.id),
).expect("send command failed");
self.connection_handle = Some(create_connection_loop(client, self.close_tx.clone()));
}
_ => {}
}
}
fn update(&mut self, ctx: &mut UpdateCtx, old_data: &EditorBinding, data: &EditorBinding, env: &Env) {
if old_data.after_edits.len()!= data.after_edits.len() {
println!("editor binding's callback changed from {} to {}", old_data.after_edits.len(), data.after_edits.len());
}
let new_selection = self.inner.widget().text().borrow().selection();
if self.last_selection!= new_selection {
self.last_selection = new_selection;
let borrow = self.inner.widget_mut().text_mut().borrow_mut();
let content = &borrow.layout.text().unwrap().content_as_string;
let active = content.slice(0..new_selection.active).unwrap_or_default().to_string().chars().count() as u32;
let anchor = content.slice(0..new_selection.anchor).unwrap_or_default().to_string().chars().count() as u32;
self.client.as_ref().unwrap().write().update_and_send_cursor_data((active, anchor));
}
self.inner.update(ctx, data, env);
}
fn layout(&mut self, ctx: &mut LayoutCtx, bc: &BoxConstraints, data: &EditorBinding, env: &Env) -> Size {
self.inner.layout(ctx, bc, data, env)
}
fn paint(&mut self, ctx: &mut PaintCtx, data: &EditorBinding, env: &Env) {
self.inner.paint(ctx, data, env)
}
}
| event | identifier_name |
lib.rs | /*!
[](https://ci.appveyor.com/project/jaemk/self-update/branch/master)
[](https://travis-ci.org/jaemk/self_update)
[](https://crates.io/crates/self_update)
[](https://docs.rs/self_update)
`self_update` provides updaters for updating rust executables in-place from various release
distribution backends.
```shell
self_update = "0.4"
```
## Usage
Update (replace) the current executable with the latest release downloaded
from `https://api.github.com/repos/jaemk/self_update/releases/latest`.
Note, the [`trust`](https://github.com/japaric/trust) project provides a nice setup for
producing release-builds via CI (travis/appveyor).
```
#[macro_use] extern crate self_update;
fn update() -> Result<(), Box<::std::error::Error>> {
let target = self_update::get_target()?;
let status = self_update::backends::github::Update::configure()?
.repo_owner("jaemk")
.repo_name("self_update")
.target(&target)
.bin_name("self_update_example")
.show_download_progress(true)
.current_version(cargo_crate_version!())
.build()?
.update()?;
println!("Update status: `{}`!", status.version());
Ok(())
}
# fn main() { }
```
Run the above example to see `self_update` in action: `cargo run --example github`
Separate utilities are also exposed:
```
extern crate self_update;
fn update() -> Result<(), Box<::std::error::Error>> {
let target = self_update::get_target()?;
let releases = self_update::backends::github::ReleaseList::configure()
.repo_owner("jaemk")
.repo_name("self_update")
.with_target(&target)
.build()?
.fetch()?;
println!("found releases:");
println!("{:#?}\n", releases);
// get the first available release
let asset = releases[0]
.asset_for(&target).unwrap();
let tmp_dir = self_update::TempDir::new_in(::std::env::current_dir()?, "self_update")?;
let tmp_tarball_path = tmp_dir.path().join(&asset.name);
let tmp_tarball = ::std::fs::File::open(&tmp_tarball_path)?;
self_update::Download::from_url(&asset.download_url)
.download_to(&tmp_tarball)?;
self_update::Extract::from_source(&tmp_tarball_path)
.archive(self_update::ArchiveKind::Tar)
.encoding(self_update::EncodingKind::Gz)
.extract_into(&tmp_dir.path())?;
let tmp_file = tmp_dir.path().join("replacement_tmp");
let bin_name = "self_update_bin";
let bin_path = tmp_dir.path().join(bin_name);
self_update::Move::from_source(&bin_path)
.replace_using_temp(&tmp_file)
.to_dest(&::std::env::current_exe()?)?;
Ok(())
}
# fn main() { }
```
*/
extern crate serde_json;
extern crate reqwest;
extern crate tempdir;
extern crate flate2;
extern crate tar;
extern crate semver;
extern crate pbr;
pub use tempdir::TempDir;
use std::fs;
use std::io;
use std::path;
#[macro_use] mod macros;
pub mod errors;
pub mod backends;
pub mod version;
use errors::*;
/// Try to determine the current target triple.
///
/// Returns a target triple (e.g. `x86_64-unknown-linux-gnu` or `i686-pc-windows-msvc`) or an
/// `Error::Config` if the current config cannot be determined or is not some combination of the
/// following values:
/// `linux, mac, windows` -- `i686, x86, armv7` -- `gnu, musl, msvc`
///
/// * Errors:
/// * Unexpected system config
pub fn get_target() -> Result<String> {
let arch_config = (cfg!(target_arch = "x86"), cfg!(target_arch = "x86_64"), cfg!(target_arch = "arm"));
let arch = match arch_config {
(true, _, _) => "i686",
(_, true, _) => "x86_64",
(_, _, true) => "armv7",
_ => bail!(Error::Update, "Unable to determine target-architecture"),
};
let os_config = (cfg!(target_os = "linux"), cfg!(target_os = "macos"), cfg!(target_os = "windows"));
let os = match os_config {
(true, _, _) => "unknown-linux",
(_, true, _) => "apple-darwin",
(_, _, true) => "pc-windows",
_ => bail!(Error::Update, "Unable to determine target-os"),
};
let s;
let os = if cfg!(target_os = "macos") {
os
} else {
let env_config = (cfg!(target_env = "gnu"), cfg!(target_env = "musl"), cfg!(target_env = "msvc"));
let env = match env_config {
(true, _, _) => "gnu",
(_, true, _) => "musl",
(_, _, true) => "msvc",
_ => bail!(Error::Update, "Unable to determine target-environment"),
};
s = format!("{}-{}", os, env);
&s
};
Ok(format!("{}-{}", arch, os))
}
/// Check if a version tag is greater than the current
#[deprecated(since="0.4.2", note="`should_update` functionality has been moved to `version::bump_is_greater`.\
`version::bump_is_compatible` should be used instead.")]
pub fn should_update(current: &str, latest: &str) -> Result<bool> {
use semver::Version;
Ok(Version::parse(latest)? > Version::parse(current)?)
}
/// Flush a message to stdout and check if they respond `yes`.
/// Interprets a blank response as yes.
///
/// * Errors:
/// * Io flushing
/// * User entered anything other than enter/Y/y
fn confirm(msg: &str) -> Result<()> {
print_flush!("{}", msg);
let mut s = String::new();
io::stdin().read_line(&mut s)?;
let s = s.trim().to_lowercase();
if! s.is_empty() && s!= "y" {
bail!(Error::Update, "Update aborted");
}
Ok(())
}
/// Status returned after updating
///
/// Wrapped `String`s are version tags
#[derive(Debug, Clone)]
pub enum Status {
UpToDate(String),
Updated(String),
}
impl Status {
/// Return the version tag
pub fn version(&self) -> &str {
use Status::*;
match *self {
UpToDate(ref s) => s,
Updated(ref s) => s,
}
}
/// Returns `true` if `Status::UpToDate`
pub fn uptodate(&self) -> bool {
match *self {
Status::UpToDate(_) => true,
_ => false,
}
}
/// Returns `true` if `Status::Updated`
pub fn updated(&self) -> bool {
match *self {
Status::Updated(_) => true,
_ => false,
}
}
}
impl std::fmt::Display for Status {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
use Status::*;
match *self {
UpToDate(ref s) => write!(f, "UpToDate({})", s),
Updated(ref s) => write!(f, "Updated({})", s),
}
}
}
/// Supported archive formats
#[derive(Debug)]
pub enum ArchiveKind {
Tar,
Plain,
}
/// Supported encoding formats
#[derive(Debug)]
pub enum EncodingKind {
Gz,
Plain,
}
/// Extract contents of an encoded archive (e.g. tar.gz) file to a specified directory
///
/// * Errors:
/// * Io - opening files
/// * Io - gzip decoding
/// * Io - archive unpacking
#[derive(Debug)]
pub struct Extract<'a> {
source: &'a path::Path,
archive: ArchiveKind,
encoding: EncodingKind,
}
impl<'a> Extract<'a> {
pub fn from_source(source: &'a path::Path) -> Extract<'a> {
Self {
source: source,
archive: ArchiveKind::Plain,
encoding: EncodingKind::Plain,
}
}
pub fn archive(&mut self, kind: ArchiveKind) -> &mut Self {
self.archive = kind;
self
}
pub fn encoding(&mut self, kind: EncodingKind) -> &mut Self {
self.encoding = kind;
self
}
pub fn | (&self, into_dir: &path::Path) -> Result<()> {
let source = fs::File::open(self.source)?;
let archive: Box<io::Read> = match self.encoding {
EncodingKind::Plain => Box::new(source),
EncodingKind::Gz => {
let reader = flate2::read::GzDecoder::new(source);
Box::new(reader)
},
};
match self.archive {
ArchiveKind::Plain => (),
ArchiveKind::Tar => {
let mut archive = tar::Archive::new(archive);
archive.unpack(into_dir)?;
}
};
Ok(())
}
}
/// Moves a file from the given path to the specified destination.
///
/// `source` and `dest` must be on the same filesystem.
/// If `replace_using_temp` is provided, the destination file will be
/// replaced using the given temp path as a backup in case of `io` errors.
///
/// * Errors:
/// * Io - copying / renaming
#[derive(Debug)]
pub struct Move<'a> {
source: &'a path::Path,
temp: Option<&'a path::Path>,
}
impl<'a> Move<'a> {
/// Specify source file
pub fn from_source(source: &'a path::Path) -> Move<'a> {
Self {
source: source,
temp: None,
}
}
/// If specified and the destination file already exists, the destination
/// file will be "safely" replaced using a temp path.
/// The `temp` dir should must be explicitly provided since `replace` operations require
/// files to live on the same filesystem.
pub fn replace_using_temp(&mut self, temp: &'a path::Path) -> &mut Self {
self.temp = Some(temp);
self
}
/// Move source file to specified destination
pub fn to_dest(&self, dest: &path::Path) -> Result<()> {
match self.temp {
None => {
fs::rename(self.source, dest)?;
}
Some(temp) => {
if dest.exists() {
fs::rename(dest, temp)?;
match fs::rename(self.source, dest) {
Err(e) => {
fs::rename(temp, dest)?;
return Err(Error::from(e))
}
Ok(_) => (),
};
} else {
fs::rename(self.source, dest)?;
}
}
};
Ok(())
}
}
/// Download things into files
///
/// With optional progress bar
#[derive(Debug)]
pub struct Download {
show_progress: bool,
url: String,
}
impl Download {
/// Specify download url
pub fn from_url(url: &str) -> Self {
Self {
show_progress: false,
url: url.to_owned(),
}
}
/// Toggle download progress bar
pub fn show_progress(&mut self, b: bool) -> &mut Self {
self.show_progress = b;
self
}
/// Download the file behind the given `url` into the specified `dest`.
/// Show a sliding progress bar if specified.
/// If the resource doesn't specify a content-length, the progress bar will not be shown
///
/// * Errors:
/// * `reqwest` network errors
/// * Unsuccessful response status
/// * Progress-bar errors
/// * Reading from response to `BufReader`-buffer
/// * Writing from `BufReader`-buffer to `File`
pub fn download_to<T: io::Write>(&self, mut dest: T) -> Result<()> {
use io::BufRead;
set_ssl_vars!();
let resp = reqwest::get(&self.url)?;
let size = resp.headers()
.get::<reqwest::header::ContentLength>()
.map(|ct_len| **ct_len)
.unwrap_or(0);
if!resp.status().is_success() { bail!(Error::Update, "Download request failed with status: {:?}", resp.status()) }
let show_progress = if size == 0 { false } else { self.show_progress };
let mut src = io::BufReader::new(resp);
let mut bar = if show_progress {
let mut bar = pbr::ProgressBar::new(size);
bar.set_units(pbr::Units::Bytes);
bar.format("[=> ]");
Some(bar)
} else { None };
loop {
let n = {
let mut buf = src.fill_buf()?;
dest.write_all(&mut buf)?;
buf.len()
};
if n == 0 { break; }
src.consume(n);
if let Some(ref mut bar) = bar {
bar.add(n as u64);
}
}
if show_progress { println!("... Done"); }
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::env;
#[test]
fn can_determine_target_arch() {
let target = get_target();
assert!(target.is_ok(), "{:?}", target);
let target = target.unwrap();
if let Ok(env_target) = env::var("TARGET") {
assert_eq!(target, env_target);
}
}
}
| extract_into | identifier_name |
lib.rs | /*!
[](https://ci.appveyor.com/project/jaemk/self-update/branch/master)
[](https://travis-ci.org/jaemk/self_update)
[](https://crates.io/crates/self_update)
[](https://docs.rs/self_update)
`self_update` provides updaters for updating rust executables in-place from various release
distribution backends.
```shell
self_update = "0.4"
```
## Usage
Update (replace) the current executable with the latest release downloaded
from `https://api.github.com/repos/jaemk/self_update/releases/latest`.
Note, the [`trust`](https://github.com/japaric/trust) project provides a nice setup for
producing release-builds via CI (travis/appveyor).
```
#[macro_use] extern crate self_update;
fn update() -> Result<(), Box<::std::error::Error>> {
let target = self_update::get_target()?;
let status = self_update::backends::github::Update::configure()?
.repo_owner("jaemk")
.repo_name("self_update")
.target(&target)
.bin_name("self_update_example")
.show_download_progress(true)
.current_version(cargo_crate_version!())
.build()?
.update()?;
println!("Update status: `{}`!", status.version());
Ok(())
}
# fn main() { }
```
Run the above example to see `self_update` in action: `cargo run --example github`
Separate utilities are also exposed:
```
extern crate self_update;
fn update() -> Result<(), Box<::std::error::Error>> {
let target = self_update::get_target()?;
let releases = self_update::backends::github::ReleaseList::configure()
.repo_owner("jaemk")
.repo_name("self_update")
.with_target(&target)
.build()?
.fetch()?;
println!("found releases:");
println!("{:#?}\n", releases);
// get the first available release
let asset = releases[0]
.asset_for(&target).unwrap();
let tmp_dir = self_update::TempDir::new_in(::std::env::current_dir()?, "self_update")?;
let tmp_tarball_path = tmp_dir.path().join(&asset.name);
let tmp_tarball = ::std::fs::File::open(&tmp_tarball_path)?;
self_update::Download::from_url(&asset.download_url)
.download_to(&tmp_tarball)?;
self_update::Extract::from_source(&tmp_tarball_path)
.archive(self_update::ArchiveKind::Tar)
.encoding(self_update::EncodingKind::Gz)
.extract_into(&tmp_dir.path())?;
let tmp_file = tmp_dir.path().join("replacement_tmp");
let bin_name = "self_update_bin";
let bin_path = tmp_dir.path().join(bin_name);
self_update::Move::from_source(&bin_path)
.replace_using_temp(&tmp_file)
.to_dest(&::std::env::current_exe()?)?;
Ok(())
}
# fn main() { }
```
*/
extern crate serde_json;
extern crate reqwest;
extern crate tempdir;
extern crate flate2;
extern crate tar;
extern crate semver;
extern crate pbr;
pub use tempdir::TempDir;
use std::fs;
use std::io;
use std::path;
#[macro_use] mod macros;
pub mod errors;
pub mod backends;
pub mod version;
use errors::*;
/// Try to determine the current target triple.
///
/// Returns a target triple (e.g. `x86_64-unknown-linux-gnu` or `i686-pc-windows-msvc`) or an
/// `Error::Config` if the current config cannot be determined or is not some combination of the
/// following values:
/// `linux, mac, windows` -- `i686, x86, armv7` -- `gnu, musl, msvc`
///
/// * Errors:
/// * Unexpected system config
pub fn get_target() -> Result<String> {
let arch_config = (cfg!(target_arch = "x86"), cfg!(target_arch = "x86_64"), cfg!(target_arch = "arm"));
let arch = match arch_config {
(true, _, _) => "i686",
(_, true, _) => "x86_64",
(_, _, true) => "armv7",
_ => bail!(Error::Update, "Unable to determine target-architecture"),
};
let os_config = (cfg!(target_os = "linux"), cfg!(target_os = "macos"), cfg!(target_os = "windows"));
let os = match os_config {
(true, _, _) => "unknown-linux",
(_, true, _) => "apple-darwin",
(_, _, true) => "pc-windows",
_ => bail!(Error::Update, "Unable to determine target-os"),
};
let s;
let os = if cfg!(target_os = "macos") {
os
} else {
let env_config = (cfg!(target_env = "gnu"), cfg!(target_env = "musl"), cfg!(target_env = "msvc"));
let env = match env_config {
(true, _, _) => "gnu",
(_, true, _) => "musl",
(_, _, true) => "msvc",
_ => bail!(Error::Update, "Unable to determine target-environment"),
};
s = format!("{}-{}", os, env);
&s
};
Ok(format!("{}-{}", arch, os))
}
/// Check if a version tag is greater than the current
#[deprecated(since="0.4.2", note="`should_update` functionality has been moved to `version::bump_is_greater`.\
`version::bump_is_compatible` should be used instead.")]
pub fn should_update(current: &str, latest: &str) -> Result<bool> {
use semver::Version;
Ok(Version::parse(latest)? > Version::parse(current)?)
}
/// Flush a message to stdout and check if they respond `yes`.
/// Interprets a blank response as yes.
///
/// * Errors:
/// * Io flushing
/// * User entered anything other than enter/Y/y
fn confirm(msg: &str) -> Result<()> {
print_flush!("{}", msg);
let mut s = String::new();
io::stdin().read_line(&mut s)?;
let s = s.trim().to_lowercase();
if! s.is_empty() && s!= "y" {
bail!(Error::Update, "Update aborted");
}
Ok(())
}
/// Status returned after updating
///
/// Wrapped `String`s are version tags
#[derive(Debug, Clone)]
pub enum Status {
UpToDate(String),
Updated(String),
}
impl Status {
/// Return the version tag
pub fn version(&self) -> &str {
use Status::*;
match *self {
UpToDate(ref s) => s,
Updated(ref s) => s,
}
}
/// Returns `true` if `Status::UpToDate`
pub fn uptodate(&self) -> bool {
match *self {
Status::UpToDate(_) => true, | pub fn updated(&self) -> bool {
match *self {
Status::Updated(_) => true,
_ => false,
}
}
}
impl std::fmt::Display for Status {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
use Status::*;
match *self {
UpToDate(ref s) => write!(f, "UpToDate({})", s),
Updated(ref s) => write!(f, "Updated({})", s),
}
}
}
/// Supported archive formats
#[derive(Debug)]
pub enum ArchiveKind {
Tar,
Plain,
}
/// Supported encoding formats
#[derive(Debug)]
pub enum EncodingKind {
Gz,
Plain,
}
/// Extract contents of an encoded archive (e.g. tar.gz) file to a specified directory
///
/// * Errors:
/// * Io - opening files
/// * Io - gzip decoding
/// * Io - archive unpacking
#[derive(Debug)]
pub struct Extract<'a> {
source: &'a path::Path,
archive: ArchiveKind,
encoding: EncodingKind,
}
impl<'a> Extract<'a> {
pub fn from_source(source: &'a path::Path) -> Extract<'a> {
Self {
source: source,
archive: ArchiveKind::Plain,
encoding: EncodingKind::Plain,
}
}
pub fn archive(&mut self, kind: ArchiveKind) -> &mut Self {
self.archive = kind;
self
}
pub fn encoding(&mut self, kind: EncodingKind) -> &mut Self {
self.encoding = kind;
self
}
pub fn extract_into(&self, into_dir: &path::Path) -> Result<()> {
let source = fs::File::open(self.source)?;
let archive: Box<io::Read> = match self.encoding {
EncodingKind::Plain => Box::new(source),
EncodingKind::Gz => {
let reader = flate2::read::GzDecoder::new(source);
Box::new(reader)
},
};
match self.archive {
ArchiveKind::Plain => (),
ArchiveKind::Tar => {
let mut archive = tar::Archive::new(archive);
archive.unpack(into_dir)?;
}
};
Ok(())
}
}
/// Moves a file from the given path to the specified destination.
///
/// `source` and `dest` must be on the same filesystem.
/// If `replace_using_temp` is provided, the destination file will be
/// replaced using the given temp path as a backup in case of `io` errors.
///
/// * Errors:
/// * Io - copying / renaming
#[derive(Debug)]
pub struct Move<'a> {
source: &'a path::Path,
temp: Option<&'a path::Path>,
}
impl<'a> Move<'a> {
/// Specify source file
pub fn from_source(source: &'a path::Path) -> Move<'a> {
Self {
source: source,
temp: None,
}
}
/// If specified and the destination file already exists, the destination
/// file will be "safely" replaced using a temp path.
/// The `temp` dir should must be explicitly provided since `replace` operations require
/// files to live on the same filesystem.
pub fn replace_using_temp(&mut self, temp: &'a path::Path) -> &mut Self {
self.temp = Some(temp);
self
}
/// Move source file to specified destination
pub fn to_dest(&self, dest: &path::Path) -> Result<()> {
match self.temp {
None => {
fs::rename(self.source, dest)?;
}
Some(temp) => {
if dest.exists() {
fs::rename(dest, temp)?;
match fs::rename(self.source, dest) {
Err(e) => {
fs::rename(temp, dest)?;
return Err(Error::from(e))
}
Ok(_) => (),
};
} else {
fs::rename(self.source, dest)?;
}
}
};
Ok(())
}
}
/// Download things into files
///
/// With optional progress bar
#[derive(Debug)]
pub struct Download {
show_progress: bool,
url: String,
}
impl Download {
/// Specify download url
pub fn from_url(url: &str) -> Self {
Self {
show_progress: false,
url: url.to_owned(),
}
}
/// Toggle download progress bar
pub fn show_progress(&mut self, b: bool) -> &mut Self {
self.show_progress = b;
self
}
/// Download the file behind the given `url` into the specified `dest`.
/// Show a sliding progress bar if specified.
/// If the resource doesn't specify a content-length, the progress bar will not be shown
///
/// * Errors:
/// * `reqwest` network errors
/// * Unsuccessful response status
/// * Progress-bar errors
/// * Reading from response to `BufReader`-buffer
/// * Writing from `BufReader`-buffer to `File`
pub fn download_to<T: io::Write>(&self, mut dest: T) -> Result<()> {
use io::BufRead;
set_ssl_vars!();
let resp = reqwest::get(&self.url)?;
let size = resp.headers()
.get::<reqwest::header::ContentLength>()
.map(|ct_len| **ct_len)
.unwrap_or(0);
if!resp.status().is_success() { bail!(Error::Update, "Download request failed with status: {:?}", resp.status()) }
let show_progress = if size == 0 { false } else { self.show_progress };
let mut src = io::BufReader::new(resp);
let mut bar = if show_progress {
let mut bar = pbr::ProgressBar::new(size);
bar.set_units(pbr::Units::Bytes);
bar.format("[=> ]");
Some(bar)
} else { None };
loop {
let n = {
let mut buf = src.fill_buf()?;
dest.write_all(&mut buf)?;
buf.len()
};
if n == 0 { break; }
src.consume(n);
if let Some(ref mut bar) = bar {
bar.add(n as u64);
}
}
if show_progress { println!("... Done"); }
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::env;
#[test]
fn can_determine_target_arch() {
let target = get_target();
assert!(target.is_ok(), "{:?}", target);
let target = target.unwrap();
if let Ok(env_target) = env::var("TARGET") {
assert_eq!(target, env_target);
}
}
} | _ => false,
}
}
/// Returns `true` if `Status::Updated` | random_line_split |
lib.rs | /*!
[](https://ci.appveyor.com/project/jaemk/self-update/branch/master)
[](https://travis-ci.org/jaemk/self_update)
[](https://crates.io/crates/self_update)
[](https://docs.rs/self_update)
`self_update` provides updaters for updating rust executables in-place from various release
distribution backends.
```shell
self_update = "0.4"
```
## Usage
Update (replace) the current executable with the latest release downloaded
from `https://api.github.com/repos/jaemk/self_update/releases/latest`.
Note, the [`trust`](https://github.com/japaric/trust) project provides a nice setup for
producing release-builds via CI (travis/appveyor).
```
#[macro_use] extern crate self_update;
fn update() -> Result<(), Box<::std::error::Error>> {
let target = self_update::get_target()?;
let status = self_update::backends::github::Update::configure()?
.repo_owner("jaemk")
.repo_name("self_update")
.target(&target)
.bin_name("self_update_example")
.show_download_progress(true)
.current_version(cargo_crate_version!())
.build()?
.update()?;
println!("Update status: `{}`!", status.version());
Ok(())
}
# fn main() { }
```
Run the above example to see `self_update` in action: `cargo run --example github`
Separate utilities are also exposed:
```
extern crate self_update;
fn update() -> Result<(), Box<::std::error::Error>> {
let target = self_update::get_target()?;
let releases = self_update::backends::github::ReleaseList::configure()
.repo_owner("jaemk")
.repo_name("self_update")
.with_target(&target)
.build()?
.fetch()?;
println!("found releases:");
println!("{:#?}\n", releases);
// get the first available release
let asset = releases[0]
.asset_for(&target).unwrap();
let tmp_dir = self_update::TempDir::new_in(::std::env::current_dir()?, "self_update")?;
let tmp_tarball_path = tmp_dir.path().join(&asset.name);
let tmp_tarball = ::std::fs::File::open(&tmp_tarball_path)?;
self_update::Download::from_url(&asset.download_url)
.download_to(&tmp_tarball)?;
self_update::Extract::from_source(&tmp_tarball_path)
.archive(self_update::ArchiveKind::Tar)
.encoding(self_update::EncodingKind::Gz)
.extract_into(&tmp_dir.path())?;
let tmp_file = tmp_dir.path().join("replacement_tmp");
let bin_name = "self_update_bin";
let bin_path = tmp_dir.path().join(bin_name);
self_update::Move::from_source(&bin_path)
.replace_using_temp(&tmp_file)
.to_dest(&::std::env::current_exe()?)?;
Ok(())
}
# fn main() { }
```
*/
extern crate serde_json;
extern crate reqwest;
extern crate tempdir;
extern crate flate2;
extern crate tar;
extern crate semver;
extern crate pbr;
pub use tempdir::TempDir;
use std::fs;
use std::io;
use std::path;
#[macro_use] mod macros;
pub mod errors;
pub mod backends;
pub mod version;
use errors::*;
/// Try to determine the current target triple.
///
/// Returns a target triple (e.g. `x86_64-unknown-linux-gnu` or `i686-pc-windows-msvc`) or an
/// `Error::Config` if the current config cannot be determined or is not some combination of the
/// following values:
/// `linux, mac, windows` -- `i686, x86, armv7` -- `gnu, musl, msvc`
///
/// * Errors:
/// * Unexpected system config
pub fn get_target() -> Result<String> {
let arch_config = (cfg!(target_arch = "x86"), cfg!(target_arch = "x86_64"), cfg!(target_arch = "arm"));
let arch = match arch_config {
(true, _, _) => "i686",
(_, true, _) => "x86_64",
(_, _, true) => "armv7",
_ => bail!(Error::Update, "Unable to determine target-architecture"),
};
let os_config = (cfg!(target_os = "linux"), cfg!(target_os = "macos"), cfg!(target_os = "windows"));
let os = match os_config {
(true, _, _) => "unknown-linux",
(_, true, _) => "apple-darwin",
(_, _, true) => "pc-windows",
_ => bail!(Error::Update, "Unable to determine target-os"),
};
let s;
let os = if cfg!(target_os = "macos") {
os
} else {
let env_config = (cfg!(target_env = "gnu"), cfg!(target_env = "musl"), cfg!(target_env = "msvc"));
let env = match env_config {
(true, _, _) => "gnu",
(_, true, _) => "musl",
(_, _, true) => "msvc",
_ => bail!(Error::Update, "Unable to determine target-environment"),
};
s = format!("{}-{}", os, env);
&s
};
Ok(format!("{}-{}", arch, os))
}
/// Check if a version tag is greater than the current
#[deprecated(since="0.4.2", note="`should_update` functionality has been moved to `version::bump_is_greater`.\
`version::bump_is_compatible` should be used instead.")]
pub fn should_update(current: &str, latest: &str) -> Result<bool> {
use semver::Version;
Ok(Version::parse(latest)? > Version::parse(current)?)
}
/// Flush a message to stdout and check if they respond `yes`.
/// Interprets a blank response as yes.
///
/// * Errors:
/// * Io flushing
/// * User entered anything other than enter/Y/y
fn confirm(msg: &str) -> Result<()> {
print_flush!("{}", msg);
let mut s = String::new();
io::stdin().read_line(&mut s)?;
let s = s.trim().to_lowercase();
if! s.is_empty() && s!= "y" {
bail!(Error::Update, "Update aborted");
}
Ok(())
}
/// Status returned after updating
///
/// Wrapped `String`s are version tags
#[derive(Debug, Clone)]
pub enum Status {
UpToDate(String),
Updated(String),
}
impl Status {
/// Return the version tag
pub fn version(&self) -> &str {
use Status::*;
match *self {
UpToDate(ref s) => s,
Updated(ref s) => s,
}
}
/// Returns `true` if `Status::UpToDate`
pub fn uptodate(&self) -> bool {
match *self {
Status::UpToDate(_) => true,
_ => false,
}
}
/// Returns `true` if `Status::Updated`
pub fn updated(&self) -> bool {
match *self {
Status::Updated(_) => true,
_ => false,
}
}
}
impl std::fmt::Display for Status {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
use Status::*;
match *self {
UpToDate(ref s) => write!(f, "UpToDate({})", s),
Updated(ref s) => write!(f, "Updated({})", s),
}
}
}
/// Supported archive formats
#[derive(Debug)]
pub enum ArchiveKind {
Tar,
Plain,
}
/// Supported encoding formats
#[derive(Debug)]
pub enum EncodingKind {
Gz,
Plain,
}
/// Extract contents of an encoded archive (e.g. tar.gz) file to a specified directory
///
/// * Errors:
/// * Io - opening files
/// * Io - gzip decoding
/// * Io - archive unpacking
#[derive(Debug)]
pub struct Extract<'a> {
source: &'a path::Path,
archive: ArchiveKind,
encoding: EncodingKind,
}
impl<'a> Extract<'a> {
pub fn from_source(source: &'a path::Path) -> Extract<'a> {
Self {
source: source,
archive: ArchiveKind::Plain,
encoding: EncodingKind::Plain,
}
}
pub fn archive(&mut self, kind: ArchiveKind) -> &mut Self {
self.archive = kind;
self
}
pub fn encoding(&mut self, kind: EncodingKind) -> &mut Self {
self.encoding = kind;
self
}
pub fn extract_into(&self, into_dir: &path::Path) -> Result<()> {
let source = fs::File::open(self.source)?;
let archive: Box<io::Read> = match self.encoding {
EncodingKind::Plain => Box::new(source),
EncodingKind::Gz => {
let reader = flate2::read::GzDecoder::new(source);
Box::new(reader)
},
};
match self.archive {
ArchiveKind::Plain => (),
ArchiveKind::Tar => {
let mut archive = tar::Archive::new(archive);
archive.unpack(into_dir)?;
}
};
Ok(())
}
}
/// Moves a file from the given path to the specified destination.
///
/// `source` and `dest` must be on the same filesystem.
/// If `replace_using_temp` is provided, the destination file will be
/// replaced using the given temp path as a backup in case of `io` errors.
///
/// * Errors:
/// * Io - copying / renaming
#[derive(Debug)]
pub struct Move<'a> {
source: &'a path::Path,
temp: Option<&'a path::Path>,
}
impl<'a> Move<'a> {
/// Specify source file
pub fn from_source(source: &'a path::Path) -> Move<'a> {
Self {
source: source,
temp: None,
}
}
/// If specified and the destination file already exists, the destination
/// file will be "safely" replaced using a temp path.
/// The `temp` dir should must be explicitly provided since `replace` operations require
/// files to live on the same filesystem.
pub fn replace_using_temp(&mut self, temp: &'a path::Path) -> &mut Self {
self.temp = Some(temp);
self
}
/// Move source file to specified destination
pub fn to_dest(&self, dest: &path::Path) -> Result<()> | Ok(())
}
}
/// Download things into files
///
/// With optional progress bar
#[derive(Debug)]
pub struct Download {
show_progress: bool,
url: String,
}
impl Download {
/// Specify download url
pub fn from_url(url: &str) -> Self {
Self {
show_progress: false,
url: url.to_owned(),
}
}
/// Toggle download progress bar
pub fn show_progress(&mut self, b: bool) -> &mut Self {
self.show_progress = b;
self
}
/// Download the file behind the given `url` into the specified `dest`.
/// Show a sliding progress bar if specified.
/// If the resource doesn't specify a content-length, the progress bar will not be shown
///
/// * Errors:
/// * `reqwest` network errors
/// * Unsuccessful response status
/// * Progress-bar errors
/// * Reading from response to `BufReader`-buffer
/// * Writing from `BufReader`-buffer to `File`
pub fn download_to<T: io::Write>(&self, mut dest: T) -> Result<()> {
use io::BufRead;
set_ssl_vars!();
let resp = reqwest::get(&self.url)?;
let size = resp.headers()
.get::<reqwest::header::ContentLength>()
.map(|ct_len| **ct_len)
.unwrap_or(0);
if!resp.status().is_success() { bail!(Error::Update, "Download request failed with status: {:?}", resp.status()) }
let show_progress = if size == 0 { false } else { self.show_progress };
let mut src = io::BufReader::new(resp);
let mut bar = if show_progress {
let mut bar = pbr::ProgressBar::new(size);
bar.set_units(pbr::Units::Bytes);
bar.format("[=> ]");
Some(bar)
} else { None };
loop {
let n = {
let mut buf = src.fill_buf()?;
dest.write_all(&mut buf)?;
buf.len()
};
if n == 0 { break; }
src.consume(n);
if let Some(ref mut bar) = bar {
bar.add(n as u64);
}
}
if show_progress { println!("... Done"); }
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::env;
#[test]
fn can_determine_target_arch() {
let target = get_target();
assert!(target.is_ok(), "{:?}", target);
let target = target.unwrap();
if let Ok(env_target) = env::var("TARGET") {
assert_eq!(target, env_target);
}
}
}
| {
match self.temp {
None => {
fs::rename(self.source, dest)?;
}
Some(temp) => {
if dest.exists() {
fs::rename(dest, temp)?;
match fs::rename(self.source, dest) {
Err(e) => {
fs::rename(temp, dest)?;
return Err(Error::from(e))
}
Ok(_) => (),
};
} else {
fs::rename(self.source, dest)?;
}
}
}; | identifier_body |
mod.rs | mod default_types;
mod jsont;
mod stats;
use crate::cache::Digest;
use crate::process::ShellCommand;
use anyhow::Result;
use once_cell::sync::Lazy;
use std::borrow::Cow;
use std::collections::HashMap;
use std::convert::TryFrom;
use std::ops::Range;
use std::path::{Path, PathBuf};
use std::process::Command;
use utils::display_width;
pub use self::jsont::{Match, Message, SubMatch};
pub static RG_EXISTS: Lazy<bool> = Lazy::new(|| {
std::process::Command::new("rg")
.arg("--version")
.stdout(std::process::Stdio::null())
.status()
.map(|exit_status| exit_status.success())
.unwrap_or(false)
});
/// Map of file extension to ripgrep language.
///
/// https://github.com/BurntSushi/ripgrep/blob/20534fad04/crates/ignore/src/default_types.rs
static RG_LANGUAGE_EXT_TABLE: Lazy<HashMap<&str, &str>> = Lazy::new(|| {
default_types::DEFAULT_TYPES
.iter()
.flat_map(|(lang, values)| {
values.iter().filter_map(|v| {
v.split('.').last().and_then(|ext| {
// Simply ignore the abnormal cases.
if ext.contains('[') || ext.contains('*') {
None
} else {
Some((ext, *lang))
}
})
})
})
.collect()
});
/// Finds the ripgrep language given the file extension `ext`.
pub fn get_language(file_extension: &str) -> Option<&&str> {
RG_LANGUAGE_EXT_TABLE.get(file_extension)
}
/// Word represents the input query around by word boundries.
#[derive(Clone, Debug)]
pub struct Word {
pub raw: String,
pub len: usize,
pub re: regex::Regex,
}
impl Word {
pub fn new(re_word: String, re: regex::Regex) -> Word {
Self {
len: re_word.len(),
raw: re_word,
re,
}
}
pub fn find(&self, line: &str) -> Option<usize> {
self.re.find(line).map(|mat| mat.start())
}
}
#[inline]
fn range(start: usize, end: usize, offset: usize) -> Range<usize> {
start + offset..end + offset
}
impl SubMatch {
pub fn match_indices(&self, offset: usize) -> Range<usize> {
range(self.start, self.end, offset)
}
// FIXME find the word in non-utf8?
pub fn match_indices_for_dumb_jump(&self, offset: usize, search_word: &Word) -> Range<usize> {
// The text in SubMatch is not exactly the search word itself in some cases,
// we need to first find the offset of search word in the SubMatch text manually.
match search_word.find(&self.m.text()) {
Some(search_word_offset) => {
let start = self.start + search_word_offset;
range(start, start + search_word.len, offset)
}
None => Default::default(),
}
}
}
impl PartialEq for Match {
fn eq(&self, other: &Match) -> bool {
// Ignore the `submatches` field.
//
// Given a certain search word, if all the other fields are same, especially the
// `absolute_offset` equals, these two Match can be considered the same.
self.path == other.path
&& self.lines == other.lines
&& self.line_number == other.line_number
&& self.absolute_offset == other.absolute_offset
}
}
impl Eq for Match {}
impl Match {
pub fn path(&self) -> Cow<str> {
self.path.text()
}
pub fn line_number(&self) -> u64 {
self.line_number.unwrap_or_default()
}
pub fn column(&self) -> usize {
self.submatches.get(0).map(|x| x.start).unwrap_or_default()
}
/// Returns true if the text line starts with `pat`.
pub fn line_starts_with(&self, pat: &str) -> bool {
self.lines.text().trim_start().starts_with(pat)
}
pub fn match_indices(&self, offset: usize) -> Vec<usize> {
self.submatches
.iter()
.flat_map(|s| s.match_indices(offset))
.collect()
}
pub fn match_indices_for_dumb_jump(&self, offset: usize, search_word: &Word) -> Vec<usize> {
self.submatches
.iter()
.flat_map(|s| s.match_indices_for_dumb_jump(offset, search_word))
.collect()
}
}
impl TryFrom<&[u8]> for Match {
type Error = Cow<'static, str>;
fn try_from(byte_line: &[u8]) -> Result<Self, Self::Error> {
let msg = serde_json::from_slice::<Message>(byte_line)
.map_err(|e| format!("deserialize error: {e:?}"))?;
if let Message::Match(mat) = msg {
Ok(mat)
} else {
Err("Not Message::Match type".into())
}
}
}
impl TryFrom<&str> for Match {
type Error = Cow<'static, str>;
fn try_from(line: &str) -> Result<Self, Self::Error> {
let msg = serde_json::from_str::<Message>(line)
.map_err(|e| format!("deserialize error: {e:?}"))?;
if let Message::Match(mat) = msg {
Ok(mat)
} else {
Err("Not Message::Match type".into())
}
}
}
impl Match {
/// Returns a pair of the formatted `String` and the offset of origin match indices.
///
/// The formatted String is same with the output line using rg's -vimgrep option.
fn grep_line_format(&self, enable_icon: bool) -> (String, usize) {
let path = self.path();
let line_number = self.line_number();
let column = self.column();
let pattern = self.pattern();
let pattern = pattern.trim_end();
// filepath:line_number:column:text, 3 extra `:` in the formatted String.
let mut offset =
path.len() + display_width(line_number as usize) + display_width(column) + 3;
let formatted_line = if enable_icon {
let icon = icon::file_icon(&path);
offset += icon.len_utf8() + 1;
format!("{icon} {path}:{line_number}:{column}:{pattern}")
} else {
format!("{path}:{line_number}:{column}:{pattern}")
};
(formatted_line, offset)
}
pub fn build_grep_line(&self, enable_icon: bool) -> (String, Vec<usize>) {
let (formatted, offset) = self.grep_line_format(enable_icon);
let indices = self.match_indices(offset);
(formatted, indices)
}
#[inline]
pub fn pattern(&self) -> Cow<str> {
self.lines.text()
}
pub fn pattern_priority(&self) -> dumb_analyzer::Priority {
self.path()
.rsplit_once('.')
.and_then(|(_, file_ext)| {
dumb_analyzer::calculate_pattern_priority(self.pattern(), file_ext)
})
.unwrap_or_default()
}
/// Returns a pair of the formatted `String` and the offset of matches for dumb_jump provider.
///
/// NOTE: [`pattern::DUMB_JUMP_LINE`] must be updated accordingly once the format is changed.
fn jump_line_format(&self, kind: &str) -> (String, usize) {
let path = self.path();
let line_number = self.line_number();
let column = self.column();
let pattern = self.pattern();
let pattern = pattern.trim_end();
let formatted_line = format!("[r{kind}]{path}:{line_number}:{column}:{pattern}",);
let offset = kind.len()
+ path.len()
+ display_width(line_number as usize)
+ display_width(column)
+ 6; // `[r]` + 3 `:`
(formatted_line, offset)
}
pub fn build_jump_line(&self, kind: &str, word: &Word) -> (String, Vec<usize>) {
let (formatted, offset) = self.jump_line_format(kind);
let indices = self.match_indices_for_dumb_jump(offset, word);
(formatted, indices)
}
fn jump_line_format_bare(&self) -> (String, usize) {
let line_number = self.line_number();
let column = self.column();
let pattern = self.pattern();
let pattern = pattern.trim_end();
let formatted_string = format!(" {line_number}:{column}:{pattern}");
let offset = display_width(line_number as usize) + display_width(column) + 2 + 2;
(formatted_string, offset)
}
pub fn build_jump_line_bare(&self, word: &Word) -> (String, Vec<usize>) {
let (formatted, offset) = self.jump_line_format_bare();
let indices = self.match_indices_for_dumb_jump(offset, word);
(formatted, indices)
}
}
const RG_ARGS: &[&str] = &[
"rg",
"--column",
"--line-number",
"--no-heading",
"--color=never",
"--smart-case",
"",
".",
];
// Ref https://github.com/liuchengxu/vim-clap/issues/533
// Now `.` is pushed to the end for all platforms due to https://github.com/liuchengxu/vim-clap/issues/711.
pub const RG_EXEC_CMD: &str =
"rg --column --line-number --no-heading --color=never --smart-case ''.";
// Used for creating the cache in async context.
#[derive(Debug, Clone, Hash)]
pub struct RgTokioCommand {
shell_cmd: ShellCommand,
}
impl RgTokioCommand {
pub fn new(dir: PathBuf) -> Self {
let shell_cmd = ShellCommand::new(RG_EXEC_CMD.into(), dir);
Self { shell_cmd }
}
pub fn cache_digest(&self) -> Option<Digest> {
self.shell_cmd.cache_digest()
}
pub async fn create_cache(self) -> Result<Digest> {
let cache_file = self.shell_cmd.cache_file_path()?;
let std_cmd = rg_command(&self.shell_cmd.dir);
let mut tokio_cmd = tokio::process::Command::from(std_cmd);
crate::process::tokio::write_stdout_to_file(&mut tokio_cmd, &cache_file).await?;
let digest = crate::cache::store_cache_digest(self.shell_cmd.clone(), cache_file)?;
Ok(digest)
}
}
pub fn rg_command<P: AsRef<Path>>(dir: P) -> Command {
// Can not use StdCommand as it joins the args which does not work somehow.
let mut cmd = Command::new(RG_ARGS[0]);
// Do not use --vimgrep here.
cmd.args(&RG_ARGS[1..]).current_dir(dir);
cmd
}
pub fn refresh_cache(dir: impl AsRef<Path>) -> Result<Digest> {
let shell_cmd = rg_shell_command(dir.as_ref());
let cache_file_path = shell_cmd.cache_file_path()?;
let mut cmd = rg_command(dir.as_ref());
crate::process::write_stdout_to_file(&mut cmd, &cache_file_path)?;
let digest = crate::cache::store_cache_digest(shell_cmd, cache_file_path)?;
Ok(digest)
}
#[inline]
pub fn rg_shell_command<P: AsRef<Path>>(dir: P) -> ShellCommand | {
ShellCommand::new(RG_EXEC_CMD.into(), PathBuf::from(dir.as_ref()))
} | identifier_body |
|
mod.rs | mod default_types;
mod jsont;
mod stats;
use crate::cache::Digest;
use crate::process::ShellCommand;
use anyhow::Result;
use once_cell::sync::Lazy;
use std::borrow::Cow;
use std::collections::HashMap;
use std::convert::TryFrom;
use std::ops::Range;
use std::path::{Path, PathBuf};
use std::process::Command;
use utils::display_width;
pub use self::jsont::{Match, Message, SubMatch};
pub static RG_EXISTS: Lazy<bool> = Lazy::new(|| {
std::process::Command::new("rg")
.arg("--version")
.stdout(std::process::Stdio::null())
.status()
.map(|exit_status| exit_status.success())
.unwrap_or(false)
});
/// Map of file extension to ripgrep language.
///
/// https://github.com/BurntSushi/ripgrep/blob/20534fad04/crates/ignore/src/default_types.rs
static RG_LANGUAGE_EXT_TABLE: Lazy<HashMap<&str, &str>> = Lazy::new(|| {
default_types::DEFAULT_TYPES
.iter()
.flat_map(|(lang, values)| {
values.iter().filter_map(|v| {
v.split('.').last().and_then(|ext| {
// Simply ignore the abnormal cases.
if ext.contains('[') || ext.contains('*') {
None
} else {
Some((ext, *lang))
}
})
})
})
.collect()
});
/// Finds the ripgrep language given the file extension `ext`.
pub fn get_language(file_extension: &str) -> Option<&&str> {
RG_LANGUAGE_EXT_TABLE.get(file_extension)
}
/// Word represents the input query around by word boundries.
#[derive(Clone, Debug)]
pub struct Word {
pub raw: String,
pub len: usize, | pub re: regex::Regex,
}
impl Word {
pub fn new(re_word: String, re: regex::Regex) -> Word {
Self {
len: re_word.len(),
raw: re_word,
re,
}
}
pub fn find(&self, line: &str) -> Option<usize> {
self.re.find(line).map(|mat| mat.start())
}
}
#[inline]
fn range(start: usize, end: usize, offset: usize) -> Range<usize> {
start + offset..end + offset
}
impl SubMatch {
pub fn match_indices(&self, offset: usize) -> Range<usize> {
range(self.start, self.end, offset)
}
// FIXME find the word in non-utf8?
pub fn match_indices_for_dumb_jump(&self, offset: usize, search_word: &Word) -> Range<usize> {
// The text in SubMatch is not exactly the search word itself in some cases,
// we need to first find the offset of search word in the SubMatch text manually.
match search_word.find(&self.m.text()) {
Some(search_word_offset) => {
let start = self.start + search_word_offset;
range(start, start + search_word.len, offset)
}
None => Default::default(),
}
}
}
impl PartialEq for Match {
fn eq(&self, other: &Match) -> bool {
// Ignore the `submatches` field.
//
// Given a certain search word, if all the other fields are same, especially the
// `absolute_offset` equals, these two Match can be considered the same.
self.path == other.path
&& self.lines == other.lines
&& self.line_number == other.line_number
&& self.absolute_offset == other.absolute_offset
}
}
impl Eq for Match {}
impl Match {
pub fn path(&self) -> Cow<str> {
self.path.text()
}
pub fn line_number(&self) -> u64 {
self.line_number.unwrap_or_default()
}
pub fn column(&self) -> usize {
self.submatches.get(0).map(|x| x.start).unwrap_or_default()
}
/// Returns true if the text line starts with `pat`.
pub fn line_starts_with(&self, pat: &str) -> bool {
self.lines.text().trim_start().starts_with(pat)
}
pub fn match_indices(&self, offset: usize) -> Vec<usize> {
self.submatches
.iter()
.flat_map(|s| s.match_indices(offset))
.collect()
}
pub fn match_indices_for_dumb_jump(&self, offset: usize, search_word: &Word) -> Vec<usize> {
self.submatches
.iter()
.flat_map(|s| s.match_indices_for_dumb_jump(offset, search_word))
.collect()
}
}
impl TryFrom<&[u8]> for Match {
type Error = Cow<'static, str>;
fn try_from(byte_line: &[u8]) -> Result<Self, Self::Error> {
let msg = serde_json::from_slice::<Message>(byte_line)
.map_err(|e| format!("deserialize error: {e:?}"))?;
if let Message::Match(mat) = msg {
Ok(mat)
} else {
Err("Not Message::Match type".into())
}
}
}
impl TryFrom<&str> for Match {
type Error = Cow<'static, str>;
fn try_from(line: &str) -> Result<Self, Self::Error> {
let msg = serde_json::from_str::<Message>(line)
.map_err(|e| format!("deserialize error: {e:?}"))?;
if let Message::Match(mat) = msg {
Ok(mat)
} else {
Err("Not Message::Match type".into())
}
}
}
impl Match {
/// Returns a pair of the formatted `String` and the offset of origin match indices.
///
/// The formatted String is same with the output line using rg's -vimgrep option.
fn grep_line_format(&self, enable_icon: bool) -> (String, usize) {
let path = self.path();
let line_number = self.line_number();
let column = self.column();
let pattern = self.pattern();
let pattern = pattern.trim_end();
// filepath:line_number:column:text, 3 extra `:` in the formatted String.
let mut offset =
path.len() + display_width(line_number as usize) + display_width(column) + 3;
let formatted_line = if enable_icon {
let icon = icon::file_icon(&path);
offset += icon.len_utf8() + 1;
format!("{icon} {path}:{line_number}:{column}:{pattern}")
} else {
format!("{path}:{line_number}:{column}:{pattern}")
};
(formatted_line, offset)
}
pub fn build_grep_line(&self, enable_icon: bool) -> (String, Vec<usize>) {
let (formatted, offset) = self.grep_line_format(enable_icon);
let indices = self.match_indices(offset);
(formatted, indices)
}
#[inline]
pub fn pattern(&self) -> Cow<str> {
self.lines.text()
}
pub fn pattern_priority(&self) -> dumb_analyzer::Priority {
self.path()
.rsplit_once('.')
.and_then(|(_, file_ext)| {
dumb_analyzer::calculate_pattern_priority(self.pattern(), file_ext)
})
.unwrap_or_default()
}
/// Returns a pair of the formatted `String` and the offset of matches for dumb_jump provider.
///
/// NOTE: [`pattern::DUMB_JUMP_LINE`] must be updated accordingly once the format is changed.
fn jump_line_format(&self, kind: &str) -> (String, usize) {
let path = self.path();
let line_number = self.line_number();
let column = self.column();
let pattern = self.pattern();
let pattern = pattern.trim_end();
let formatted_line = format!("[r{kind}]{path}:{line_number}:{column}:{pattern}",);
let offset = kind.len()
+ path.len()
+ display_width(line_number as usize)
+ display_width(column)
+ 6; // `[r]` + 3 `:`
(formatted_line, offset)
}
pub fn build_jump_line(&self, kind: &str, word: &Word) -> (String, Vec<usize>) {
let (formatted, offset) = self.jump_line_format(kind);
let indices = self.match_indices_for_dumb_jump(offset, word);
(formatted, indices)
}
fn jump_line_format_bare(&self) -> (String, usize) {
let line_number = self.line_number();
let column = self.column();
let pattern = self.pattern();
let pattern = pattern.trim_end();
let formatted_string = format!(" {line_number}:{column}:{pattern}");
let offset = display_width(line_number as usize) + display_width(column) + 2 + 2;
(formatted_string, offset)
}
pub fn build_jump_line_bare(&self, word: &Word) -> (String, Vec<usize>) {
let (formatted, offset) = self.jump_line_format_bare();
let indices = self.match_indices_for_dumb_jump(offset, word);
(formatted, indices)
}
}
const RG_ARGS: &[&str] = &[
"rg",
"--column",
"--line-number",
"--no-heading",
"--color=never",
"--smart-case",
"",
".",
];
// Ref https://github.com/liuchengxu/vim-clap/issues/533
// Now `.` is pushed to the end for all platforms due to https://github.com/liuchengxu/vim-clap/issues/711.
pub const RG_EXEC_CMD: &str =
"rg --column --line-number --no-heading --color=never --smart-case ''.";
// Used for creating the cache in async context.
#[derive(Debug, Clone, Hash)]
pub struct RgTokioCommand {
shell_cmd: ShellCommand,
}
impl RgTokioCommand {
pub fn new(dir: PathBuf) -> Self {
let shell_cmd = ShellCommand::new(RG_EXEC_CMD.into(), dir);
Self { shell_cmd }
}
pub fn cache_digest(&self) -> Option<Digest> {
self.shell_cmd.cache_digest()
}
pub async fn create_cache(self) -> Result<Digest> {
let cache_file = self.shell_cmd.cache_file_path()?;
let std_cmd = rg_command(&self.shell_cmd.dir);
let mut tokio_cmd = tokio::process::Command::from(std_cmd);
crate::process::tokio::write_stdout_to_file(&mut tokio_cmd, &cache_file).await?;
let digest = crate::cache::store_cache_digest(self.shell_cmd.clone(), cache_file)?;
Ok(digest)
}
}
pub fn rg_command<P: AsRef<Path>>(dir: P) -> Command {
// Can not use StdCommand as it joins the args which does not work somehow.
let mut cmd = Command::new(RG_ARGS[0]);
// Do not use --vimgrep here.
cmd.args(&RG_ARGS[1..]).current_dir(dir);
cmd
}
pub fn refresh_cache(dir: impl AsRef<Path>) -> Result<Digest> {
let shell_cmd = rg_shell_command(dir.as_ref());
let cache_file_path = shell_cmd.cache_file_path()?;
let mut cmd = rg_command(dir.as_ref());
crate::process::write_stdout_to_file(&mut cmd, &cache_file_path)?;
let digest = crate::cache::store_cache_digest(shell_cmd, cache_file_path)?;
Ok(digest)
}
#[inline]
pub fn rg_shell_command<P: AsRef<Path>>(dir: P) -> ShellCommand {
ShellCommand::new(RG_EXEC_CMD.into(), PathBuf::from(dir.as_ref()))
} | random_line_split |
|
mod.rs | mod default_types;
mod jsont;
mod stats;
use crate::cache::Digest;
use crate::process::ShellCommand;
use anyhow::Result;
use once_cell::sync::Lazy;
use std::borrow::Cow;
use std::collections::HashMap;
use std::convert::TryFrom;
use std::ops::Range;
use std::path::{Path, PathBuf};
use std::process::Command;
use utils::display_width;
pub use self::jsont::{Match, Message, SubMatch};
pub static RG_EXISTS: Lazy<bool> = Lazy::new(|| {
std::process::Command::new("rg")
.arg("--version")
.stdout(std::process::Stdio::null())
.status()
.map(|exit_status| exit_status.success())
.unwrap_or(false)
});
/// Map of file extension to ripgrep language.
///
/// https://github.com/BurntSushi/ripgrep/blob/20534fad04/crates/ignore/src/default_types.rs
static RG_LANGUAGE_EXT_TABLE: Lazy<HashMap<&str, &str>> = Lazy::new(|| {
default_types::DEFAULT_TYPES
.iter()
.flat_map(|(lang, values)| {
values.iter().filter_map(|v| {
v.split('.').last().and_then(|ext| {
// Simply ignore the abnormal cases.
if ext.contains('[') || ext.contains('*') {
None
} else {
Some((ext, *lang))
}
})
})
})
.collect()
});
/// Finds the ripgrep language given the file extension `ext`.
pub fn get_language(file_extension: &str) -> Option<&&str> {
RG_LANGUAGE_EXT_TABLE.get(file_extension)
}
/// Word represents the input query around by word boundries.
#[derive(Clone, Debug)]
pub struct Word {
pub raw: String,
pub len: usize,
pub re: regex::Regex,
}
impl Word {
pub fn new(re_word: String, re: regex::Regex) -> Word {
Self {
len: re_word.len(),
raw: re_word,
re,
}
}
pub fn find(&self, line: &str) -> Option<usize> {
self.re.find(line).map(|mat| mat.start())
}
}
#[inline]
fn range(start: usize, end: usize, offset: usize) -> Range<usize> {
start + offset..end + offset
}
impl SubMatch {
pub fn match_indices(&self, offset: usize) -> Range<usize> {
range(self.start, self.end, offset)
}
// FIXME find the word in non-utf8?
pub fn match_indices_for_dumb_jump(&self, offset: usize, search_word: &Word) -> Range<usize> {
// The text in SubMatch is not exactly the search word itself in some cases,
// we need to first find the offset of search word in the SubMatch text manually.
match search_word.find(&self.m.text()) {
Some(search_word_offset) => {
let start = self.start + search_word_offset;
range(start, start + search_word.len, offset)
}
None => Default::default(),
}
}
}
impl PartialEq for Match {
fn eq(&self, other: &Match) -> bool {
// Ignore the `submatches` field.
//
// Given a certain search word, if all the other fields are same, especially the
// `absolute_offset` equals, these two Match can be considered the same.
self.path == other.path
&& self.lines == other.lines
&& self.line_number == other.line_number
&& self.absolute_offset == other.absolute_offset
}
}
impl Eq for Match {}
impl Match {
pub fn path(&self) -> Cow<str> {
self.path.text()
}
pub fn line_number(&self) -> u64 {
self.line_number.unwrap_or_default()
}
pub fn column(&self) -> usize {
self.submatches.get(0).map(|x| x.start).unwrap_or_default()
}
/// Returns true if the text line starts with `pat`.
pub fn line_starts_with(&self, pat: &str) -> bool {
self.lines.text().trim_start().starts_with(pat)
}
pub fn match_indices(&self, offset: usize) -> Vec<usize> {
self.submatches
.iter()
.flat_map(|s| s.match_indices(offset))
.collect()
}
pub fn match_indices_for_dumb_jump(&self, offset: usize, search_word: &Word) -> Vec<usize> {
self.submatches
.iter()
.flat_map(|s| s.match_indices_for_dumb_jump(offset, search_word))
.collect()
}
}
impl TryFrom<&[u8]> for Match {
type Error = Cow<'static, str>;
fn try_from(byte_line: &[u8]) -> Result<Self, Self::Error> {
let msg = serde_json::from_slice::<Message>(byte_line)
.map_err(|e| format!("deserialize error: {e:?}"))?;
if let Message::Match(mat) = msg {
Ok(mat)
} else {
Err("Not Message::Match type".into())
}
}
}
impl TryFrom<&str> for Match {
type Error = Cow<'static, str>;
fn try_from(line: &str) -> Result<Self, Self::Error> {
let msg = serde_json::from_str::<Message>(line)
.map_err(|e| format!("deserialize error: {e:?}"))?;
if let Message::Match(mat) = msg {
Ok(mat)
} else |
}
}
impl Match {
/// Returns a pair of the formatted `String` and the offset of origin match indices.
///
/// The formatted String is same with the output line using rg's -vimgrep option.
fn grep_line_format(&self, enable_icon: bool) -> (String, usize) {
let path = self.path();
let line_number = self.line_number();
let column = self.column();
let pattern = self.pattern();
let pattern = pattern.trim_end();
// filepath:line_number:column:text, 3 extra `:` in the formatted String.
let mut offset =
path.len() + display_width(line_number as usize) + display_width(column) + 3;
let formatted_line = if enable_icon {
let icon = icon::file_icon(&path);
offset += icon.len_utf8() + 1;
format!("{icon} {path}:{line_number}:{column}:{pattern}")
} else {
format!("{path}:{line_number}:{column}:{pattern}")
};
(formatted_line, offset)
}
pub fn build_grep_line(&self, enable_icon: bool) -> (String, Vec<usize>) {
let (formatted, offset) = self.grep_line_format(enable_icon);
let indices = self.match_indices(offset);
(formatted, indices)
}
#[inline]
pub fn pattern(&self) -> Cow<str> {
self.lines.text()
}
pub fn pattern_priority(&self) -> dumb_analyzer::Priority {
self.path()
.rsplit_once('.')
.and_then(|(_, file_ext)| {
dumb_analyzer::calculate_pattern_priority(self.pattern(), file_ext)
})
.unwrap_or_default()
}
/// Returns a pair of the formatted `String` and the offset of matches for dumb_jump provider.
///
/// NOTE: [`pattern::DUMB_JUMP_LINE`] must be updated accordingly once the format is changed.
fn jump_line_format(&self, kind: &str) -> (String, usize) {
let path = self.path();
let line_number = self.line_number();
let column = self.column();
let pattern = self.pattern();
let pattern = pattern.trim_end();
let formatted_line = format!("[r{kind}]{path}:{line_number}:{column}:{pattern}",);
let offset = kind.len()
+ path.len()
+ display_width(line_number as usize)
+ display_width(column)
+ 6; // `[r]` + 3 `:`
(formatted_line, offset)
}
pub fn build_jump_line(&self, kind: &str, word: &Word) -> (String, Vec<usize>) {
let (formatted, offset) = self.jump_line_format(kind);
let indices = self.match_indices_for_dumb_jump(offset, word);
(formatted, indices)
}
fn jump_line_format_bare(&self) -> (String, usize) {
let line_number = self.line_number();
let column = self.column();
let pattern = self.pattern();
let pattern = pattern.trim_end();
let formatted_string = format!(" {line_number}:{column}:{pattern}");
let offset = display_width(line_number as usize) + display_width(column) + 2 + 2;
(formatted_string, offset)
}
pub fn build_jump_line_bare(&self, word: &Word) -> (String, Vec<usize>) {
let (formatted, offset) = self.jump_line_format_bare();
let indices = self.match_indices_for_dumb_jump(offset, word);
(formatted, indices)
}
}
const RG_ARGS: &[&str] = &[
"rg",
"--column",
"--line-number",
"--no-heading",
"--color=never",
"--smart-case",
"",
".",
];
// Ref https://github.com/liuchengxu/vim-clap/issues/533
// Now `.` is pushed to the end for all platforms due to https://github.com/liuchengxu/vim-clap/issues/711.
pub const RG_EXEC_CMD: &str =
"rg --column --line-number --no-heading --color=never --smart-case ''.";
// Used for creating the cache in async context.
#[derive(Debug, Clone, Hash)]
pub struct RgTokioCommand {
shell_cmd: ShellCommand,
}
impl RgTokioCommand {
pub fn new(dir: PathBuf) -> Self {
let shell_cmd = ShellCommand::new(RG_EXEC_CMD.into(), dir);
Self { shell_cmd }
}
pub fn cache_digest(&self) -> Option<Digest> {
self.shell_cmd.cache_digest()
}
pub async fn create_cache(self) -> Result<Digest> {
let cache_file = self.shell_cmd.cache_file_path()?;
let std_cmd = rg_command(&self.shell_cmd.dir);
let mut tokio_cmd = tokio::process::Command::from(std_cmd);
crate::process::tokio::write_stdout_to_file(&mut tokio_cmd, &cache_file).await?;
let digest = crate::cache::store_cache_digest(self.shell_cmd.clone(), cache_file)?;
Ok(digest)
}
}
pub fn rg_command<P: AsRef<Path>>(dir: P) -> Command {
// Can not use StdCommand as it joins the args which does not work somehow.
let mut cmd = Command::new(RG_ARGS[0]);
// Do not use --vimgrep here.
cmd.args(&RG_ARGS[1..]).current_dir(dir);
cmd
}
pub fn refresh_cache(dir: impl AsRef<Path>) -> Result<Digest> {
let shell_cmd = rg_shell_command(dir.as_ref());
let cache_file_path = shell_cmd.cache_file_path()?;
let mut cmd = rg_command(dir.as_ref());
crate::process::write_stdout_to_file(&mut cmd, &cache_file_path)?;
let digest = crate::cache::store_cache_digest(shell_cmd, cache_file_path)?;
Ok(digest)
}
#[inline]
pub fn rg_shell_command<P: AsRef<Path>>(dir: P) -> ShellCommand {
ShellCommand::new(RG_EXEC_CMD.into(), PathBuf::from(dir.as_ref()))
}
| {
Err("Not Message::Match type".into())
} | conditional_block |
mod.rs | mod default_types;
mod jsont;
mod stats;
use crate::cache::Digest;
use crate::process::ShellCommand;
use anyhow::Result;
use once_cell::sync::Lazy;
use std::borrow::Cow;
use std::collections::HashMap;
use std::convert::TryFrom;
use std::ops::Range;
use std::path::{Path, PathBuf};
use std::process::Command;
use utils::display_width;
pub use self::jsont::{Match, Message, SubMatch};
pub static RG_EXISTS: Lazy<bool> = Lazy::new(|| {
std::process::Command::new("rg")
.arg("--version")
.stdout(std::process::Stdio::null())
.status()
.map(|exit_status| exit_status.success())
.unwrap_or(false)
});
/// Map of file extension to ripgrep language.
///
/// https://github.com/BurntSushi/ripgrep/blob/20534fad04/crates/ignore/src/default_types.rs
static RG_LANGUAGE_EXT_TABLE: Lazy<HashMap<&str, &str>> = Lazy::new(|| {
default_types::DEFAULT_TYPES
.iter()
.flat_map(|(lang, values)| {
values.iter().filter_map(|v| {
v.split('.').last().and_then(|ext| {
// Simply ignore the abnormal cases.
if ext.contains('[') || ext.contains('*') {
None
} else {
Some((ext, *lang))
}
})
})
})
.collect()
});
/// Finds the ripgrep language given the file extension `ext`.
pub fn get_language(file_extension: &str) -> Option<&&str> {
RG_LANGUAGE_EXT_TABLE.get(file_extension)
}
/// Word represents the input query around by word boundries.
#[derive(Clone, Debug)]
pub struct Word {
pub raw: String,
pub len: usize,
pub re: regex::Regex,
}
impl Word {
pub fn new(re_word: String, re: regex::Regex) -> Word {
Self {
len: re_word.len(),
raw: re_word,
re,
}
}
pub fn find(&self, line: &str) -> Option<usize> {
self.re.find(line).map(|mat| mat.start())
}
}
#[inline]
fn range(start: usize, end: usize, offset: usize) -> Range<usize> {
start + offset..end + offset
}
impl SubMatch {
pub fn match_indices(&self, offset: usize) -> Range<usize> {
range(self.start, self.end, offset)
}
// FIXME find the word in non-utf8?
pub fn match_indices_for_dumb_jump(&self, offset: usize, search_word: &Word) -> Range<usize> {
// The text in SubMatch is not exactly the search word itself in some cases,
// we need to first find the offset of search word in the SubMatch text manually.
match search_word.find(&self.m.text()) {
Some(search_word_offset) => {
let start = self.start + search_word_offset;
range(start, start + search_word.len, offset)
}
None => Default::default(),
}
}
}
impl PartialEq for Match {
fn eq(&self, other: &Match) -> bool {
// Ignore the `submatches` field.
//
// Given a certain search word, if all the other fields are same, especially the
// `absolute_offset` equals, these two Match can be considered the same.
self.path == other.path
&& self.lines == other.lines
&& self.line_number == other.line_number
&& self.absolute_offset == other.absolute_offset
}
}
impl Eq for Match {}
impl Match {
pub fn path(&self) -> Cow<str> {
self.path.text()
}
pub fn line_number(&self) -> u64 {
self.line_number.unwrap_or_default()
}
pub fn column(&self) -> usize {
self.submatches.get(0).map(|x| x.start).unwrap_or_default()
}
/// Returns true if the text line starts with `pat`.
pub fn line_starts_with(&self, pat: &str) -> bool {
self.lines.text().trim_start().starts_with(pat)
}
pub fn match_indices(&self, offset: usize) -> Vec<usize> {
self.submatches
.iter()
.flat_map(|s| s.match_indices(offset))
.collect()
}
pub fn match_indices_for_dumb_jump(&self, offset: usize, search_word: &Word) -> Vec<usize> {
self.submatches
.iter()
.flat_map(|s| s.match_indices_for_dumb_jump(offset, search_word))
.collect()
}
}
impl TryFrom<&[u8]> for Match {
type Error = Cow<'static, str>;
fn try_from(byte_line: &[u8]) -> Result<Self, Self::Error> {
let msg = serde_json::from_slice::<Message>(byte_line)
.map_err(|e| format!("deserialize error: {e:?}"))?;
if let Message::Match(mat) = msg {
Ok(mat)
} else {
Err("Not Message::Match type".into())
}
}
}
impl TryFrom<&str> for Match {
type Error = Cow<'static, str>;
fn try_from(line: &str) -> Result<Self, Self::Error> {
let msg = serde_json::from_str::<Message>(line)
.map_err(|e| format!("deserialize error: {e:?}"))?;
if let Message::Match(mat) = msg {
Ok(mat)
} else {
Err("Not Message::Match type".into())
}
}
}
impl Match {
/// Returns a pair of the formatted `String` and the offset of origin match indices.
///
/// The formatted String is same with the output line using rg's -vimgrep option.
fn grep_line_format(&self, enable_icon: bool) -> (String, usize) {
let path = self.path();
let line_number = self.line_number();
let column = self.column();
let pattern = self.pattern();
let pattern = pattern.trim_end();
// filepath:line_number:column:text, 3 extra `:` in the formatted String.
let mut offset =
path.len() + display_width(line_number as usize) + display_width(column) + 3;
let formatted_line = if enable_icon {
let icon = icon::file_icon(&path);
offset += icon.len_utf8() + 1;
format!("{icon} {path}:{line_number}:{column}:{pattern}")
} else {
format!("{path}:{line_number}:{column}:{pattern}")
};
(formatted_line, offset)
}
pub fn build_grep_line(&self, enable_icon: bool) -> (String, Vec<usize>) {
let (formatted, offset) = self.grep_line_format(enable_icon);
let indices = self.match_indices(offset);
(formatted, indices)
}
#[inline]
pub fn pattern(&self) -> Cow<str> {
self.lines.text()
}
pub fn | (&self) -> dumb_analyzer::Priority {
self.path()
.rsplit_once('.')
.and_then(|(_, file_ext)| {
dumb_analyzer::calculate_pattern_priority(self.pattern(), file_ext)
})
.unwrap_or_default()
}
/// Returns a pair of the formatted `String` and the offset of matches for dumb_jump provider.
///
/// NOTE: [`pattern::DUMB_JUMP_LINE`] must be updated accordingly once the format is changed.
fn jump_line_format(&self, kind: &str) -> (String, usize) {
let path = self.path();
let line_number = self.line_number();
let column = self.column();
let pattern = self.pattern();
let pattern = pattern.trim_end();
let formatted_line = format!("[r{kind}]{path}:{line_number}:{column}:{pattern}",);
let offset = kind.len()
+ path.len()
+ display_width(line_number as usize)
+ display_width(column)
+ 6; // `[r]` + 3 `:`
(formatted_line, offset)
}
pub fn build_jump_line(&self, kind: &str, word: &Word) -> (String, Vec<usize>) {
let (formatted, offset) = self.jump_line_format(kind);
let indices = self.match_indices_for_dumb_jump(offset, word);
(formatted, indices)
}
fn jump_line_format_bare(&self) -> (String, usize) {
let line_number = self.line_number();
let column = self.column();
let pattern = self.pattern();
let pattern = pattern.trim_end();
let formatted_string = format!(" {line_number}:{column}:{pattern}");
let offset = display_width(line_number as usize) + display_width(column) + 2 + 2;
(formatted_string, offset)
}
pub fn build_jump_line_bare(&self, word: &Word) -> (String, Vec<usize>) {
let (formatted, offset) = self.jump_line_format_bare();
let indices = self.match_indices_for_dumb_jump(offset, word);
(formatted, indices)
}
}
const RG_ARGS: &[&str] = &[
"rg",
"--column",
"--line-number",
"--no-heading",
"--color=never",
"--smart-case",
"",
".",
];
// Ref https://github.com/liuchengxu/vim-clap/issues/533
// Now `.` is pushed to the end for all platforms due to https://github.com/liuchengxu/vim-clap/issues/711.
pub const RG_EXEC_CMD: &str =
"rg --column --line-number --no-heading --color=never --smart-case ''.";
// Used for creating the cache in async context.
#[derive(Debug, Clone, Hash)]
pub struct RgTokioCommand {
shell_cmd: ShellCommand,
}
impl RgTokioCommand {
pub fn new(dir: PathBuf) -> Self {
let shell_cmd = ShellCommand::new(RG_EXEC_CMD.into(), dir);
Self { shell_cmd }
}
pub fn cache_digest(&self) -> Option<Digest> {
self.shell_cmd.cache_digest()
}
pub async fn create_cache(self) -> Result<Digest> {
let cache_file = self.shell_cmd.cache_file_path()?;
let std_cmd = rg_command(&self.shell_cmd.dir);
let mut tokio_cmd = tokio::process::Command::from(std_cmd);
crate::process::tokio::write_stdout_to_file(&mut tokio_cmd, &cache_file).await?;
let digest = crate::cache::store_cache_digest(self.shell_cmd.clone(), cache_file)?;
Ok(digest)
}
}
pub fn rg_command<P: AsRef<Path>>(dir: P) -> Command {
// Can not use StdCommand as it joins the args which does not work somehow.
let mut cmd = Command::new(RG_ARGS[0]);
// Do not use --vimgrep here.
cmd.args(&RG_ARGS[1..]).current_dir(dir);
cmd
}
pub fn refresh_cache(dir: impl AsRef<Path>) -> Result<Digest> {
let shell_cmd = rg_shell_command(dir.as_ref());
let cache_file_path = shell_cmd.cache_file_path()?;
let mut cmd = rg_command(dir.as_ref());
crate::process::write_stdout_to_file(&mut cmd, &cache_file_path)?;
let digest = crate::cache::store_cache_digest(shell_cmd, cache_file_path)?;
Ok(digest)
}
#[inline]
pub fn rg_shell_command<P: AsRef<Path>>(dir: P) -> ShellCommand {
ShellCommand::new(RG_EXEC_CMD.into(), PathBuf::from(dir.as_ref()))
}
| pattern_priority | identifier_name |
tls_accept.rs | #![cfg(test)]
// These are basically integration tests for the `connection` submodule, but
// they cannot be "real" integration tests because `connection` isn't a public
// interface and because `connection` exposes a `#[cfg(test)]`-only API for use
// by these tests.
use linkerd2_error::Never;
use linkerd2_identity::{test_util, CrtKey, Name};
use linkerd2_proxy_core::listen::{Accept, Bind as _Bind, Listen as CoreListen};
use linkerd2_proxy_transport::tls::{
self,
accept::{AcceptTls, Connection as ServerConnection},
client::Connection as ClientConnection,
Conditional,
};
use linkerd2_proxy_transport::{connect, Bind, Listen};
use std::{net::SocketAddr, sync::mpsc};
use tokio::{self, io, prelude::*};
use tower::{layer::Layer, ServiceExt};
use tower_util::service_fn;
#[test]
fn plaintext() {
let (client_result, server_result) = run_test(
Conditional::None(tls::ReasonForNoIdentity::Disabled),
|conn| write_then_read(conn, PING),
Conditional::None(tls::ReasonForNoIdentity::Disabled),
|(_, conn)| read_then_write(conn, PING.len(), PONG),
);
assert_eq!(client_result.is_tls(), false);
assert_eq!(&client_result.result.expect("pong")[..], PONG);
assert_eq!(server_result.is_tls(), false);
assert_eq!(&server_result.result.expect("ping")[..], PING);
}
#[test]
fn proxy_to_proxy_tls_works() {
let server_tls = test_util::FOO_NS1.validate().unwrap();
let client_tls = test_util::BAR_NS1.validate().unwrap();
let (client_result, server_result) = run_test(
Conditional::Some((client_tls, server_tls.tls_server_name())),
|conn| write_then_read(conn, PING),
Conditional::Some(server_tls),
|(_, conn)| read_then_write(conn, PING.len(), PONG),
);
assert_eq!(client_result.is_tls(), true);
assert_eq!(&client_result.result.expect("pong")[..], PONG);
assert_eq!(server_result.is_tls(), true);
assert_eq!(&server_result.result.expect("ping")[..], PING);
}
#[test]
fn proxy_to_proxy_tls_pass_through_when_identity_does_not_match() {
let server_tls = test_util::FOO_NS1.validate().unwrap();
// Misuse the client's identity instead of the server's identity. Any
// identity other than `server_tls.server_identity` would work.
let client_tls = test_util::BAR_NS1.validate().expect("valid client cert");
let client_target = test_util::BAR_NS1.crt().name().clone();
let (client_result, server_result) = run_test(
Conditional::Some((client_tls, client_target)),
|conn| write_then_read(conn, PING),
Conditional::Some(server_tls),
|(_, conn)| read_then_write(conn, START_OF_TLS.len(), PONG),
);
// The server's connection will succeed with the TLS client hello passed
// through, because the SNI doesn't match its identity.
assert_eq!(client_result.is_tls(), false);
assert!(client_result.result.is_err());
assert_eq!(server_result.is_tls(), false);
assert_eq!(&server_result.result.unwrap()[..], START_OF_TLS);
}
struct Transported<R> {
/// The value of `Connection::peer_identity()` for the established connection.
///
/// This will be `None` if we never even get a `Connection`.
peer_identity: Option<tls::PeerIdentity>,
/// The connection's result.
result: Result<R, io::Error>,
}
impl<R> Transported<R> {
fn is_tls(&self) -> bool {
self.peer_identity
.as_ref()
.map(|i| i.is_some())
.unwrap_or(false)
}
}
/// Runs a test for a single TCP connection. `client` processes the connection
/// on the client side and `server` processes the connection on the server
/// side.
fn run_test<C, CF, CR, S, SF, SR>(
client_tls: tls::Conditional<(CrtKey, Name)>,
client: C,
server_tls: tls::Conditional<CrtKey>,
server: S,
) -> (Transported<CR>, Transported<SR>)
where
// Client
C: FnOnce(ClientConnection) -> CF + Clone + Send +'static,
CF: Future<Item = CR, Error = io::Error> + Send +'static,
CR: Send +'static,
// Server
S: Fn(ServerConnection) -> SF + Clone + Send +'static,
SF: Future<Item = SR, Error = io::Error> + Send +'static,
SR: Send +'static,
{
{
use tracing_subscriber::{fmt, EnvFilter};
let sub = fmt::Subscriber::builder()
.with_env_filter(EnvFilter::from_default_env())
.finish();
let _ = tracing::subscriber::set_global_default(sub);
}
let (client_tls, client_target_name) = match client_tls {
Conditional::Some((crtkey, name)) => (
Conditional::Some(ClientTls(crtkey)),
Conditional::Some(name),
),
Conditional::None(reason) => (Conditional::None(reason.clone()), Conditional::None(reason)),
};
// A future that will receive a single connection.
let (server, server_addr, server_result) = {
// Saves the result of every connection.
let (sender, receiver) = mpsc::channel::<Transported<SR>>();
// Let the OS decide the port number and then return the resulting
// `SocketAddr` so the client can connect to it. This allows multiple
// tests to run at once, which wouldn't work if they all were bound on
// a fixed port.
let addr = "127.0.0.1:0".parse::<SocketAddr>().unwrap();
let listen = Bind::new(addr, None).bind().expect("must bind");
let listen_addr = listen.listen_addr();
let sender = service_fn(move |(meta, conn): ServerConnection| {
let sender = sender.clone();
let peer_identity = Some(meta.peer_identity.clone());
let server = Box::new(server((meta, conn)).then(move |result| {
sender
.send(Transported {
peer_identity,
result,
})
.expect("send result");
future::ok::<(), Never>(())
}));
Box::new(future::ok::<_, Never>(server))
});
let accept = AcceptTls::new(server_tls, sender);
let server = Server::Init { listen, accept };
(server, listen_addr, receiver)
};
// A future that will open a single connection to the server.
let (client, client_result) = {
// Saves the result of the single connection. This could be a simpler
// type, e.g. `Arc<Mutex>`, but using a channel simplifies the code and
// parallels the server side.
let (sender, receiver) = mpsc::channel::<Transported<CR>>();
let sender_clone = sender.clone();
let peer_identity = Some(client_target_name.clone());
let client = tls::ConnectLayer::new(client_tls)
.layer(connect::Connect::new(None))
.oneshot(Target(server_addr, client_target_name))
.map_err(move |e| {
sender_clone
.send(Transported {
peer_identity: None,
result: Err(e),
})
.expect("send result");
()
})
.and_then(move |conn| {
client(conn).then(move |result| {
sender
.send(Transported {
peer_identity,
result,
})
.expect("send result");
Ok(())
})
});
(client, receiver)
};
tokio::run(server.join(client).map(|_| ()));
let client_result = client_result.try_recv().expect("client complete");
// XXX: This assumes that only one connection is accepted. TODO: allow the
// caller to observe the results for every connection, once we have tests
// that allow accepting multiple connections.
let server_result = server_result.try_recv().expect("server complete");
(client_result, server_result)
}
/// Writes `to_write` and shuts down the write side, then reads until EOF,
/// returning the bytes read.
fn write_then_read(
conn: impl AsyncRead + AsyncWrite,
to_write: &'static [u8],
) -> impl Future<Item = Vec<u8>, Error = io::Error> {
write_and_shutdown(conn, to_write)
.and_then(|conn| io::read_to_end(conn, Vec::new()))
.map(|(_conn, r)| r)
}
/// Reads until EOF then writes `to_write` and shuts down the write side,
/// returning the bytes read.
fn read_then_write(
conn: impl AsyncRead + AsyncWrite,
read_prefix_len: usize,
to_write: &'static [u8],
) -> impl Future<Item = Vec<u8>, Error = io::Error> {
io::read_exact(conn, vec![0; read_prefix_len])
.and_then(move |(conn, r)| write_and_shutdown(conn, to_write).map(|_conn| r))
}
/// writes `to_write` to `conn` and then shuts down the write side of `conn`.
fn write_and_shutdown<T: AsyncRead + AsyncWrite>(
conn: T,
to_write: &'static [u8],
) -> impl Future<Item = T, Error = io::Error> {
io::write_all(conn, to_write).and_then(|(mut conn, _)| {
conn.shutdown()?;
Ok(conn)
})
}
const PING: &[u8] = b"ping";
const PONG: &[u8] = b"pong";
const START_OF_TLS: &[u8] = &[22, 3, 1]; // ContentType::handshake version 3.1
enum Server<A: Accept<ServerConnection>>
where | },
Accepting(<AcceptTls<A, CrtKey> as Accept<<Listen as CoreListen>::Connection>>::Future),
Serving(<AcceptTls<A, CrtKey> as Accept<<Listen as CoreListen>::Connection>>::ConnectionFuture),
}
#[derive(Clone)]
struct Target(SocketAddr, Conditional<Name>);
#[derive(Clone)]
struct ClientTls(CrtKey);
impl<A: Accept<ServerConnection> + Clone> Future for Server<A> {
type Item = ();
type Error = ();
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
loop {
*self = match self {
Server::Init {
ref mut listen,
ref mut accept,
} => {
match Accept::poll_ready(accept) {
Ok(Async::Ready(())) => {}
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(_) => panic!("accept failed"),
}
let conn = match listen.poll_accept() {
Ok(Async::Ready(conn)) => conn,
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(_) => panic!("listener failed"),
};
Server::Accepting(accept.accept(conn))
}
Server::Accepting(ref mut fut) => match fut.poll() {
Ok(Async::Ready(conn_future)) => Server::Serving(conn_future),
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(_) => panic!("accepting failed"),
},
Server::Serving(ref mut fut) => match fut.poll() {
Ok(ready) => return Ok(ready),
Err(_) => panic!("connection failed"),
},
}
}
}
}
impl connect::ConnectAddr for Target {
fn connect_addr(&self) -> SocketAddr {
self.0
}
}
impl tls::HasPeerIdentity for Target {
fn peer_identity(&self) -> Conditional<Name> {
self.1.clone()
}
}
impl tls::client::HasConfig for ClientTls {
fn tls_client_config(&self) -> std::sync::Arc<tls::client::Config> {
self.0.tls_client_config()
}
} | AcceptTls<A, CrtKey>: Accept<<Listen as CoreListen>::Connection>,
{
Init {
listen: Listen,
accept: AcceptTls<A, CrtKey>, | random_line_split |
tls_accept.rs | #![cfg(test)]
// These are basically integration tests for the `connection` submodule, but
// they cannot be "real" integration tests because `connection` isn't a public
// interface and because `connection` exposes a `#[cfg(test)]`-only API for use
// by these tests.
use linkerd2_error::Never;
use linkerd2_identity::{test_util, CrtKey, Name};
use linkerd2_proxy_core::listen::{Accept, Bind as _Bind, Listen as CoreListen};
use linkerd2_proxy_transport::tls::{
self,
accept::{AcceptTls, Connection as ServerConnection},
client::Connection as ClientConnection,
Conditional,
};
use linkerd2_proxy_transport::{connect, Bind, Listen};
use std::{net::SocketAddr, sync::mpsc};
use tokio::{self, io, prelude::*};
use tower::{layer::Layer, ServiceExt};
use tower_util::service_fn;
#[test]
fn plaintext() {
let (client_result, server_result) = run_test(
Conditional::None(tls::ReasonForNoIdentity::Disabled),
|conn| write_then_read(conn, PING),
Conditional::None(tls::ReasonForNoIdentity::Disabled),
|(_, conn)| read_then_write(conn, PING.len(), PONG),
);
assert_eq!(client_result.is_tls(), false);
assert_eq!(&client_result.result.expect("pong")[..], PONG);
assert_eq!(server_result.is_tls(), false);
assert_eq!(&server_result.result.expect("ping")[..], PING);
}
#[test]
fn proxy_to_proxy_tls_works() {
let server_tls = test_util::FOO_NS1.validate().unwrap();
let client_tls = test_util::BAR_NS1.validate().unwrap();
let (client_result, server_result) = run_test(
Conditional::Some((client_tls, server_tls.tls_server_name())),
|conn| write_then_read(conn, PING),
Conditional::Some(server_tls),
|(_, conn)| read_then_write(conn, PING.len(), PONG),
);
assert_eq!(client_result.is_tls(), true);
assert_eq!(&client_result.result.expect("pong")[..], PONG);
assert_eq!(server_result.is_tls(), true);
assert_eq!(&server_result.result.expect("ping")[..], PING);
}
#[test]
fn proxy_to_proxy_tls_pass_through_when_identity_does_not_match() {
let server_tls = test_util::FOO_NS1.validate().unwrap();
// Misuse the client's identity instead of the server's identity. Any
// identity other than `server_tls.server_identity` would work.
let client_tls = test_util::BAR_NS1.validate().expect("valid client cert");
let client_target = test_util::BAR_NS1.crt().name().clone();
let (client_result, server_result) = run_test(
Conditional::Some((client_tls, client_target)),
|conn| write_then_read(conn, PING),
Conditional::Some(server_tls),
|(_, conn)| read_then_write(conn, START_OF_TLS.len(), PONG),
);
// The server's connection will succeed with the TLS client hello passed
// through, because the SNI doesn't match its identity.
assert_eq!(client_result.is_tls(), false);
assert!(client_result.result.is_err());
assert_eq!(server_result.is_tls(), false);
assert_eq!(&server_result.result.unwrap()[..], START_OF_TLS);
}
struct Transported<R> {
/// The value of `Connection::peer_identity()` for the established connection.
///
/// This will be `None` if we never even get a `Connection`.
peer_identity: Option<tls::PeerIdentity>,
/// The connection's result.
result: Result<R, io::Error>,
}
impl<R> Transported<R> {
fn is_tls(&self) -> bool |
}
/// Runs a test for a single TCP connection. `client` processes the connection
/// on the client side and `server` processes the connection on the server
/// side.
fn run_test<C, CF, CR, S, SF, SR>(
client_tls: tls::Conditional<(CrtKey, Name)>,
client: C,
server_tls: tls::Conditional<CrtKey>,
server: S,
) -> (Transported<CR>, Transported<SR>)
where
// Client
C: FnOnce(ClientConnection) -> CF + Clone + Send +'static,
CF: Future<Item = CR, Error = io::Error> + Send +'static,
CR: Send +'static,
// Server
S: Fn(ServerConnection) -> SF + Clone + Send +'static,
SF: Future<Item = SR, Error = io::Error> + Send +'static,
SR: Send +'static,
{
{
use tracing_subscriber::{fmt, EnvFilter};
let sub = fmt::Subscriber::builder()
.with_env_filter(EnvFilter::from_default_env())
.finish();
let _ = tracing::subscriber::set_global_default(sub);
}
let (client_tls, client_target_name) = match client_tls {
Conditional::Some((crtkey, name)) => (
Conditional::Some(ClientTls(crtkey)),
Conditional::Some(name),
),
Conditional::None(reason) => (Conditional::None(reason.clone()), Conditional::None(reason)),
};
// A future that will receive a single connection.
let (server, server_addr, server_result) = {
// Saves the result of every connection.
let (sender, receiver) = mpsc::channel::<Transported<SR>>();
// Let the OS decide the port number and then return the resulting
// `SocketAddr` so the client can connect to it. This allows multiple
// tests to run at once, which wouldn't work if they all were bound on
// a fixed port.
let addr = "127.0.0.1:0".parse::<SocketAddr>().unwrap();
let listen = Bind::new(addr, None).bind().expect("must bind");
let listen_addr = listen.listen_addr();
let sender = service_fn(move |(meta, conn): ServerConnection| {
let sender = sender.clone();
let peer_identity = Some(meta.peer_identity.clone());
let server = Box::new(server((meta, conn)).then(move |result| {
sender
.send(Transported {
peer_identity,
result,
})
.expect("send result");
future::ok::<(), Never>(())
}));
Box::new(future::ok::<_, Never>(server))
});
let accept = AcceptTls::new(server_tls, sender);
let server = Server::Init { listen, accept };
(server, listen_addr, receiver)
};
// A future that will open a single connection to the server.
let (client, client_result) = {
// Saves the result of the single connection. This could be a simpler
// type, e.g. `Arc<Mutex>`, but using a channel simplifies the code and
// parallels the server side.
let (sender, receiver) = mpsc::channel::<Transported<CR>>();
let sender_clone = sender.clone();
let peer_identity = Some(client_target_name.clone());
let client = tls::ConnectLayer::new(client_tls)
.layer(connect::Connect::new(None))
.oneshot(Target(server_addr, client_target_name))
.map_err(move |e| {
sender_clone
.send(Transported {
peer_identity: None,
result: Err(e),
})
.expect("send result");
()
})
.and_then(move |conn| {
client(conn).then(move |result| {
sender
.send(Transported {
peer_identity,
result,
})
.expect("send result");
Ok(())
})
});
(client, receiver)
};
tokio::run(server.join(client).map(|_| ()));
let client_result = client_result.try_recv().expect("client complete");
// XXX: This assumes that only one connection is accepted. TODO: allow the
// caller to observe the results for every connection, once we have tests
// that allow accepting multiple connections.
let server_result = server_result.try_recv().expect("server complete");
(client_result, server_result)
}
/// Writes `to_write` and shuts down the write side, then reads until EOF,
/// returning the bytes read.
fn write_then_read(
conn: impl AsyncRead + AsyncWrite,
to_write: &'static [u8],
) -> impl Future<Item = Vec<u8>, Error = io::Error> {
write_and_shutdown(conn, to_write)
.and_then(|conn| io::read_to_end(conn, Vec::new()))
.map(|(_conn, r)| r)
}
/// Reads until EOF then writes `to_write` and shuts down the write side,
/// returning the bytes read.
fn read_then_write(
conn: impl AsyncRead + AsyncWrite,
read_prefix_len: usize,
to_write: &'static [u8],
) -> impl Future<Item = Vec<u8>, Error = io::Error> {
io::read_exact(conn, vec![0; read_prefix_len])
.and_then(move |(conn, r)| write_and_shutdown(conn, to_write).map(|_conn| r))
}
/// writes `to_write` to `conn` and then shuts down the write side of `conn`.
fn write_and_shutdown<T: AsyncRead + AsyncWrite>(
conn: T,
to_write: &'static [u8],
) -> impl Future<Item = T, Error = io::Error> {
io::write_all(conn, to_write).and_then(|(mut conn, _)| {
conn.shutdown()?;
Ok(conn)
})
}
const PING: &[u8] = b"ping";
const PONG: &[u8] = b"pong";
const START_OF_TLS: &[u8] = &[22, 3, 1]; // ContentType::handshake version 3.1
enum Server<A: Accept<ServerConnection>>
where
AcceptTls<A, CrtKey>: Accept<<Listen as CoreListen>::Connection>,
{
Init {
listen: Listen,
accept: AcceptTls<A, CrtKey>,
},
Accepting(<AcceptTls<A, CrtKey> as Accept<<Listen as CoreListen>::Connection>>::Future),
Serving(<AcceptTls<A, CrtKey> as Accept<<Listen as CoreListen>::Connection>>::ConnectionFuture),
}
#[derive(Clone)]
struct Target(SocketAddr, Conditional<Name>);
#[derive(Clone)]
struct ClientTls(CrtKey);
impl<A: Accept<ServerConnection> + Clone> Future for Server<A> {
type Item = ();
type Error = ();
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
loop {
*self = match self {
Server::Init {
ref mut listen,
ref mut accept,
} => {
match Accept::poll_ready(accept) {
Ok(Async::Ready(())) => {}
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(_) => panic!("accept failed"),
}
let conn = match listen.poll_accept() {
Ok(Async::Ready(conn)) => conn,
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(_) => panic!("listener failed"),
};
Server::Accepting(accept.accept(conn))
}
Server::Accepting(ref mut fut) => match fut.poll() {
Ok(Async::Ready(conn_future)) => Server::Serving(conn_future),
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(_) => panic!("accepting failed"),
},
Server::Serving(ref mut fut) => match fut.poll() {
Ok(ready) => return Ok(ready),
Err(_) => panic!("connection failed"),
},
}
}
}
}
impl connect::ConnectAddr for Target {
fn connect_addr(&self) -> SocketAddr {
self.0
}
}
impl tls::HasPeerIdentity for Target {
fn peer_identity(&self) -> Conditional<Name> {
self.1.clone()
}
}
impl tls::client::HasConfig for ClientTls {
fn tls_client_config(&self) -> std::sync::Arc<tls::client::Config> {
self.0.tls_client_config()
}
}
| {
self.peer_identity
.as_ref()
.map(|i| i.is_some())
.unwrap_or(false)
} | identifier_body |
tls_accept.rs | #![cfg(test)]
// These are basically integration tests for the `connection` submodule, but
// they cannot be "real" integration tests because `connection` isn't a public
// interface and because `connection` exposes a `#[cfg(test)]`-only API for use
// by these tests.
use linkerd2_error::Never;
use linkerd2_identity::{test_util, CrtKey, Name};
use linkerd2_proxy_core::listen::{Accept, Bind as _Bind, Listen as CoreListen};
use linkerd2_proxy_transport::tls::{
self,
accept::{AcceptTls, Connection as ServerConnection},
client::Connection as ClientConnection,
Conditional,
};
use linkerd2_proxy_transport::{connect, Bind, Listen};
use std::{net::SocketAddr, sync::mpsc};
use tokio::{self, io, prelude::*};
use tower::{layer::Layer, ServiceExt};
use tower_util::service_fn;
#[test]
fn plaintext() {
let (client_result, server_result) = run_test(
Conditional::None(tls::ReasonForNoIdentity::Disabled),
|conn| write_then_read(conn, PING),
Conditional::None(tls::ReasonForNoIdentity::Disabled),
|(_, conn)| read_then_write(conn, PING.len(), PONG),
);
assert_eq!(client_result.is_tls(), false);
assert_eq!(&client_result.result.expect("pong")[..], PONG);
assert_eq!(server_result.is_tls(), false);
assert_eq!(&server_result.result.expect("ping")[..], PING);
}
#[test]
fn proxy_to_proxy_tls_works() {
let server_tls = test_util::FOO_NS1.validate().unwrap();
let client_tls = test_util::BAR_NS1.validate().unwrap();
let (client_result, server_result) = run_test(
Conditional::Some((client_tls, server_tls.tls_server_name())),
|conn| write_then_read(conn, PING),
Conditional::Some(server_tls),
|(_, conn)| read_then_write(conn, PING.len(), PONG),
);
assert_eq!(client_result.is_tls(), true);
assert_eq!(&client_result.result.expect("pong")[..], PONG);
assert_eq!(server_result.is_tls(), true);
assert_eq!(&server_result.result.expect("ping")[..], PING);
}
#[test]
fn proxy_to_proxy_tls_pass_through_when_identity_does_not_match() {
let server_tls = test_util::FOO_NS1.validate().unwrap();
// Misuse the client's identity instead of the server's identity. Any
// identity other than `server_tls.server_identity` would work.
let client_tls = test_util::BAR_NS1.validate().expect("valid client cert");
let client_target = test_util::BAR_NS1.crt().name().clone();
let (client_result, server_result) = run_test(
Conditional::Some((client_tls, client_target)),
|conn| write_then_read(conn, PING),
Conditional::Some(server_tls),
|(_, conn)| read_then_write(conn, START_OF_TLS.len(), PONG),
);
// The server's connection will succeed with the TLS client hello passed
// through, because the SNI doesn't match its identity.
assert_eq!(client_result.is_tls(), false);
assert!(client_result.result.is_err());
assert_eq!(server_result.is_tls(), false);
assert_eq!(&server_result.result.unwrap()[..], START_OF_TLS);
}
struct Transported<R> {
/// The value of `Connection::peer_identity()` for the established connection.
///
/// This will be `None` if we never even get a `Connection`.
peer_identity: Option<tls::PeerIdentity>,
/// The connection's result.
result: Result<R, io::Error>,
}
impl<R> Transported<R> {
fn is_tls(&self) -> bool {
self.peer_identity
.as_ref()
.map(|i| i.is_some())
.unwrap_or(false)
}
}
/// Runs a test for a single TCP connection. `client` processes the connection
/// on the client side and `server` processes the connection on the server
/// side.
fn run_test<C, CF, CR, S, SF, SR>(
client_tls: tls::Conditional<(CrtKey, Name)>,
client: C,
server_tls: tls::Conditional<CrtKey>,
server: S,
) -> (Transported<CR>, Transported<SR>)
where
// Client
C: FnOnce(ClientConnection) -> CF + Clone + Send +'static,
CF: Future<Item = CR, Error = io::Error> + Send +'static,
CR: Send +'static,
// Server
S: Fn(ServerConnection) -> SF + Clone + Send +'static,
SF: Future<Item = SR, Error = io::Error> + Send +'static,
SR: Send +'static,
{
{
use tracing_subscriber::{fmt, EnvFilter};
let sub = fmt::Subscriber::builder()
.with_env_filter(EnvFilter::from_default_env())
.finish();
let _ = tracing::subscriber::set_global_default(sub);
}
let (client_tls, client_target_name) = match client_tls {
Conditional::Some((crtkey, name)) => (
Conditional::Some(ClientTls(crtkey)),
Conditional::Some(name),
),
Conditional::None(reason) => (Conditional::None(reason.clone()), Conditional::None(reason)),
};
// A future that will receive a single connection.
let (server, server_addr, server_result) = {
// Saves the result of every connection.
let (sender, receiver) = mpsc::channel::<Transported<SR>>();
// Let the OS decide the port number and then return the resulting
// `SocketAddr` so the client can connect to it. This allows multiple
// tests to run at once, which wouldn't work if they all were bound on
// a fixed port.
let addr = "127.0.0.1:0".parse::<SocketAddr>().unwrap();
let listen = Bind::new(addr, None).bind().expect("must bind");
let listen_addr = listen.listen_addr();
let sender = service_fn(move |(meta, conn): ServerConnection| {
let sender = sender.clone();
let peer_identity = Some(meta.peer_identity.clone());
let server = Box::new(server((meta, conn)).then(move |result| {
sender
.send(Transported {
peer_identity,
result,
})
.expect("send result");
future::ok::<(), Never>(())
}));
Box::new(future::ok::<_, Never>(server))
});
let accept = AcceptTls::new(server_tls, sender);
let server = Server::Init { listen, accept };
(server, listen_addr, receiver)
};
// A future that will open a single connection to the server.
let (client, client_result) = {
// Saves the result of the single connection. This could be a simpler
// type, e.g. `Arc<Mutex>`, but using a channel simplifies the code and
// parallels the server side.
let (sender, receiver) = mpsc::channel::<Transported<CR>>();
let sender_clone = sender.clone();
let peer_identity = Some(client_target_name.clone());
let client = tls::ConnectLayer::new(client_tls)
.layer(connect::Connect::new(None))
.oneshot(Target(server_addr, client_target_name))
.map_err(move |e| {
sender_clone
.send(Transported {
peer_identity: None,
result: Err(e),
})
.expect("send result");
()
})
.and_then(move |conn| {
client(conn).then(move |result| {
sender
.send(Transported {
peer_identity,
result,
})
.expect("send result");
Ok(())
})
});
(client, receiver)
};
tokio::run(server.join(client).map(|_| ()));
let client_result = client_result.try_recv().expect("client complete");
// XXX: This assumes that only one connection is accepted. TODO: allow the
// caller to observe the results for every connection, once we have tests
// that allow accepting multiple connections.
let server_result = server_result.try_recv().expect("server complete");
(client_result, server_result)
}
/// Writes `to_write` and shuts down the write side, then reads until EOF,
/// returning the bytes read.
fn write_then_read(
conn: impl AsyncRead + AsyncWrite,
to_write: &'static [u8],
) -> impl Future<Item = Vec<u8>, Error = io::Error> {
write_and_shutdown(conn, to_write)
.and_then(|conn| io::read_to_end(conn, Vec::new()))
.map(|(_conn, r)| r)
}
/// Reads until EOF then writes `to_write` and shuts down the write side,
/// returning the bytes read.
fn read_then_write(
conn: impl AsyncRead + AsyncWrite,
read_prefix_len: usize,
to_write: &'static [u8],
) -> impl Future<Item = Vec<u8>, Error = io::Error> {
io::read_exact(conn, vec![0; read_prefix_len])
.and_then(move |(conn, r)| write_and_shutdown(conn, to_write).map(|_conn| r))
}
/// writes `to_write` to `conn` and then shuts down the write side of `conn`.
fn write_and_shutdown<T: AsyncRead + AsyncWrite>(
conn: T,
to_write: &'static [u8],
) -> impl Future<Item = T, Error = io::Error> {
io::write_all(conn, to_write).and_then(|(mut conn, _)| {
conn.shutdown()?;
Ok(conn)
})
}
const PING: &[u8] = b"ping";
const PONG: &[u8] = b"pong";
const START_OF_TLS: &[u8] = &[22, 3, 1]; // ContentType::handshake version 3.1
enum Server<A: Accept<ServerConnection>>
where
AcceptTls<A, CrtKey>: Accept<<Listen as CoreListen>::Connection>,
{
Init {
listen: Listen,
accept: AcceptTls<A, CrtKey>,
},
Accepting(<AcceptTls<A, CrtKey> as Accept<<Listen as CoreListen>::Connection>>::Future),
Serving(<AcceptTls<A, CrtKey> as Accept<<Listen as CoreListen>::Connection>>::ConnectionFuture),
}
#[derive(Clone)]
struct Target(SocketAddr, Conditional<Name>);
#[derive(Clone)]
struct | (CrtKey);
impl<A: Accept<ServerConnection> + Clone> Future for Server<A> {
type Item = ();
type Error = ();
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
loop {
*self = match self {
Server::Init {
ref mut listen,
ref mut accept,
} => {
match Accept::poll_ready(accept) {
Ok(Async::Ready(())) => {}
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(_) => panic!("accept failed"),
}
let conn = match listen.poll_accept() {
Ok(Async::Ready(conn)) => conn,
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(_) => panic!("listener failed"),
};
Server::Accepting(accept.accept(conn))
}
Server::Accepting(ref mut fut) => match fut.poll() {
Ok(Async::Ready(conn_future)) => Server::Serving(conn_future),
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(_) => panic!("accepting failed"),
},
Server::Serving(ref mut fut) => match fut.poll() {
Ok(ready) => return Ok(ready),
Err(_) => panic!("connection failed"),
},
}
}
}
}
impl connect::ConnectAddr for Target {
fn connect_addr(&self) -> SocketAddr {
self.0
}
}
impl tls::HasPeerIdentity for Target {
fn peer_identity(&self) -> Conditional<Name> {
self.1.clone()
}
}
impl tls::client::HasConfig for ClientTls {
fn tls_client_config(&self) -> std::sync::Arc<tls::client::Config> {
self.0.tls_client_config()
}
}
| ClientTls | identifier_name |
wasm_vm.rs | use std::collections::{HashMap, HashSet};
use std::fs;
use std::path::PathBuf;
use std::process;
use std::str::FromStr;
use std::sync::{mpsc::Sender, Arc, Mutex};
use std::thread;
use std::time::{Duration, Instant};
use zellij_utils::{serde, zellij_tile};
use serde::{de::DeserializeOwned, Serialize};
use wasmer::{
imports, ChainableNamedResolver, Function, ImportObject, Instance, Module, Store, Value,
WasmerEnv,
};
use wasmer_wasi::{Pipe, WasiEnv, WasiState};
use zellij_tile::data::{Event, EventType, PluginIds};
use crate::{
panes::PaneId,
pty::PtyInstruction,
screen::ScreenInstruction,
thread_bus::{Bus, ThreadSenders},
};
use zellij_utils::errors::{ContextType, PluginContext};
#[derive(Clone, Debug)]
pub(crate) enum PluginInstruction {
Load(Sender<u32>, PathBuf),
Update(Option<u32>, Event), // Focused plugin / broadcast, event data
Render(Sender<String>, u32, usize, usize), // String buffer, plugin id, rows, cols
Unload(u32),
Exit,
}
impl From<&PluginInstruction> for PluginContext {
fn from(plugin_instruction: &PluginInstruction) -> Self {
match *plugin_instruction {
PluginInstruction::Load(..) => PluginContext::Load,
PluginInstruction::Update(..) => PluginContext::Update,
PluginInstruction::Render(..) => PluginContext::Render,
PluginInstruction::Unload(_) => PluginContext::Unload,
PluginInstruction::Exit => PluginContext::Exit,
}
}
}
#[derive(WasmerEnv, Clone)]
pub(crate) struct PluginEnv {
pub plugin_id: u32,
pub senders: ThreadSenders,
pub wasi_env: WasiEnv,
pub subscriptions: Arc<Mutex<HashSet<EventType>>>,
}
// Thread main --------------------------------------------------------------------------------------------------------
pub(crate) fn wasm_thread_main(bus: Bus<PluginInstruction>, store: Store, data_dir: PathBuf) {
let mut plugin_id = 0;
let mut plugin_map = HashMap::new();
loop {
let (event, mut err_ctx) = bus.recv().expect("failed to receive event on channel");
err_ctx.add_call(ContextType::Plugin((&event).into()));
match event {
PluginInstruction::Load(pid_tx, path) => {
let plugin_dir = data_dir.join("plugins/");
let wasm_bytes = fs::read(&path)
.or_else(|_| fs::read(&path.with_extension("wasm")))
.or_else(|_| fs::read(&plugin_dir.join(&path).with_extension("wasm")))
.unwrap_or_else(|_| panic!("cannot find plugin {}", &path.display()));
// FIXME: Cache this compiled module on disk. I could use `(de)serialize_to_file()` for that
let module = Module::new(&store, &wasm_bytes).unwrap();
let output = Pipe::new();
let input = Pipe::new();
let mut wasi_env = WasiState::new("Zellij")
.env("CLICOLOR_FORCE", "1")
.preopen(|p| {
p.directory(".") // FIXME: Change this to a more meaningful dir
.alias(".")
.read(true)
.write(true)
.create(true)
})
.unwrap()
.stdin(Box::new(input))
.stdout(Box::new(output))
.finalize()
.unwrap();
let wasi = wasi_env.import_object(&module).unwrap();
let plugin_env = PluginEnv {
plugin_id,
senders: bus.senders.clone(),
wasi_env,
subscriptions: Arc::new(Mutex::new(HashSet::new())),
};
let zellij = zellij_exports(&store, &plugin_env);
let instance = Instance::new(&module, &zellij.chain_back(wasi)).unwrap();
let start = instance.exports.get_function("_start").unwrap();
// This eventually calls the `.load()` method
start.call(&[]).unwrap();
plugin_map.insert(plugin_id, (instance, plugin_env));
pid_tx.send(plugin_id).unwrap();
plugin_id += 1;
}
PluginInstruction::Update(pid, event) => |
PluginInstruction::Render(buf_tx, pid, rows, cols) => {
let (instance, plugin_env) = plugin_map.get(&pid).unwrap();
let render = instance.exports.get_function("render").unwrap();
render
.call(&[Value::I32(rows as i32), Value::I32(cols as i32)])
.unwrap();
buf_tx.send(wasi_read_string(&plugin_env.wasi_env)).unwrap();
}
PluginInstruction::Unload(pid) => drop(plugin_map.remove(&pid)),
PluginInstruction::Exit => break,
}
}
}
// Plugin API ---------------------------------------------------------------------------------------------------------
pub(crate) fn zellij_exports(store: &Store, plugin_env: &PluginEnv) -> ImportObject {
macro_rules! zellij_export {
($($host_function:ident),+ $(,)?) => {
imports! {
"zellij" => {
$(stringify!($host_function) =>
Function::new_native_with_env(store, plugin_env.clone(), $host_function),)+
}
}
}
}
zellij_export! {
host_subscribe,
host_unsubscribe,
host_set_invisible_borders,
host_set_max_height,
host_set_selectable,
host_get_plugin_ids,
host_open_file,
host_set_timeout,
}
}
fn host_subscribe(plugin_env: &PluginEnv) {
let mut subscriptions = plugin_env.subscriptions.lock().unwrap();
let new: HashSet<EventType> = wasi_read_object(&plugin_env.wasi_env);
subscriptions.extend(new);
}
fn host_unsubscribe(plugin_env: &PluginEnv) {
let mut subscriptions = plugin_env.subscriptions.lock().unwrap();
let old: HashSet<EventType> = wasi_read_object(&plugin_env.wasi_env);
subscriptions.retain(|k|!old.contains(k));
}
fn host_set_selectable(plugin_env: &PluginEnv, selectable: i32) {
let selectable = selectable!= 0;
plugin_env
.senders
.send_to_screen(ScreenInstruction::SetSelectable(
PaneId::Plugin(plugin_env.plugin_id),
selectable,
))
.unwrap()
}
fn host_set_max_height(plugin_env: &PluginEnv, max_height: i32) {
let max_height = max_height as usize;
plugin_env
.senders
.send_to_screen(ScreenInstruction::SetMaxHeight(
PaneId::Plugin(plugin_env.plugin_id),
max_height,
))
.unwrap()
}
fn host_set_invisible_borders(plugin_env: &PluginEnv, invisible_borders: i32) {
let invisible_borders = invisible_borders!= 0;
plugin_env
.senders
.send_to_screen(ScreenInstruction::SetInvisibleBorders(
PaneId::Plugin(plugin_env.plugin_id),
invisible_borders,
))
.unwrap()
}
fn host_get_plugin_ids(plugin_env: &PluginEnv) {
let ids = PluginIds {
plugin_id: plugin_env.plugin_id,
zellij_pid: process::id(),
};
wasi_write_object(&plugin_env.wasi_env, &ids);
}
fn host_open_file(plugin_env: &PluginEnv) {
let path: PathBuf = wasi_read_object(&plugin_env.wasi_env);
plugin_env
.senders
.send_to_pty(PtyInstruction::SpawnTerminal(Some(path)))
.unwrap();
}
fn host_set_timeout(plugin_env: &PluginEnv, secs: f64) {
// There is a fancy, high-performance way to do this with zero additional threads:
// If the plugin thread keeps a BinaryHeap of timer structs, it can manage multiple and easily `.peek()` at the
// next time to trigger in O(1) time. Once the wake-up time is known, the `wasm` thread can use `recv_timeout()`
// to wait for an event with the timeout set to be the time of the next wake up. If events come in in the meantime,
// they are handled, but if the timeout triggers, we replace the event from `recv()` with an
// `Update(pid, TimerEvent)` and pop the timer from the Heap (or reschedule it). No additional threads for as many
// timers as we'd like.
//
// But that's a lot of code, and this is a few lines:
let send_plugin_instructions = plugin_env.senders.to_plugin.clone();
let update_target = Some(plugin_env.plugin_id);
thread::spawn(move || {
let start_time = Instant::now();
thread::sleep(Duration::from_secs_f64(secs));
// FIXME: The way that elapsed time is being calculated here is not exact; it doesn't take into account the
// time it takes an event to actually reach the plugin after it's sent to the `wasm` thread.
let elapsed_time = Instant::now().duration_since(start_time).as_secs_f64();
send_plugin_instructions
.unwrap()
.send(PluginInstruction::Update(
update_target,
Event::Timer(elapsed_time),
))
.unwrap();
});
}
// Helper Functions ---------------------------------------------------------------------------------------------------
// FIXME: Unwrap city
pub fn wasi_read_string(wasi_env: &WasiEnv) -> String {
let mut state = wasi_env.state();
let wasi_file = state.fs.stdout_mut().unwrap().as_mut().unwrap();
let mut buf = String::new();
wasi_file.read_to_string(&mut buf).unwrap();
buf
}
pub fn wasi_write_string(wasi_env: &WasiEnv, buf: &str) {
let mut state = wasi_env.state();
let wasi_file = state.fs.stdin_mut().unwrap().as_mut().unwrap();
writeln!(wasi_file, "{}\r", buf).unwrap();
}
pub fn wasi_write_object(wasi_env: &WasiEnv, object: &impl Serialize) {
wasi_write_string(wasi_env, &serde_json::to_string(&object).unwrap());
}
pub fn wasi_read_object<T: DeserializeOwned>(wasi_env: &WasiEnv) -> T {
let json = wasi_read_string(wasi_env);
serde_json::from_str(&json).unwrap()
}
| {
for (&i, (instance, plugin_env)) in &plugin_map {
let subs = plugin_env.subscriptions.lock().unwrap();
// FIXME: This is very janky... Maybe I should write my own macro for Event -> EventType?
let event_type = EventType::from_str(&event.to_string()).unwrap();
if (pid.is_none() || pid == Some(i)) && subs.contains(&event_type) {
let update = instance.exports.get_function("update").unwrap();
wasi_write_object(&plugin_env.wasi_env, &event);
update.call(&[]).unwrap();
}
}
drop(bus.senders.send_to_screen(ScreenInstruction::Render));
} | conditional_block |
wasm_vm.rs | use std::collections::{HashMap, HashSet};
use std::fs;
use std::path::PathBuf;
use std::process;
use std::str::FromStr;
use std::sync::{mpsc::Sender, Arc, Mutex};
use std::thread;
use std::time::{Duration, Instant};
use zellij_utils::{serde, zellij_tile};
use serde::{de::DeserializeOwned, Serialize};
use wasmer::{
imports, ChainableNamedResolver, Function, ImportObject, Instance, Module, Store, Value,
WasmerEnv,
};
use wasmer_wasi::{Pipe, WasiEnv, WasiState};
use zellij_tile::data::{Event, EventType, PluginIds};
use crate::{
panes::PaneId,
pty::PtyInstruction,
screen::ScreenInstruction,
thread_bus::{Bus, ThreadSenders},
};
use zellij_utils::errors::{ContextType, PluginContext};
#[derive(Clone, Debug)]
pub(crate) enum | {
Load(Sender<u32>, PathBuf),
Update(Option<u32>, Event), // Focused plugin / broadcast, event data
Render(Sender<String>, u32, usize, usize), // String buffer, plugin id, rows, cols
Unload(u32),
Exit,
}
impl From<&PluginInstruction> for PluginContext {
fn from(plugin_instruction: &PluginInstruction) -> Self {
match *plugin_instruction {
PluginInstruction::Load(..) => PluginContext::Load,
PluginInstruction::Update(..) => PluginContext::Update,
PluginInstruction::Render(..) => PluginContext::Render,
PluginInstruction::Unload(_) => PluginContext::Unload,
PluginInstruction::Exit => PluginContext::Exit,
}
}
}
#[derive(WasmerEnv, Clone)]
pub(crate) struct PluginEnv {
pub plugin_id: u32,
pub senders: ThreadSenders,
pub wasi_env: WasiEnv,
pub subscriptions: Arc<Mutex<HashSet<EventType>>>,
}
// Thread main --------------------------------------------------------------------------------------------------------
pub(crate) fn wasm_thread_main(bus: Bus<PluginInstruction>, store: Store, data_dir: PathBuf) {
let mut plugin_id = 0;
let mut plugin_map = HashMap::new();
loop {
let (event, mut err_ctx) = bus.recv().expect("failed to receive event on channel");
err_ctx.add_call(ContextType::Plugin((&event).into()));
match event {
PluginInstruction::Load(pid_tx, path) => {
let plugin_dir = data_dir.join("plugins/");
let wasm_bytes = fs::read(&path)
.or_else(|_| fs::read(&path.with_extension("wasm")))
.or_else(|_| fs::read(&plugin_dir.join(&path).with_extension("wasm")))
.unwrap_or_else(|_| panic!("cannot find plugin {}", &path.display()));
// FIXME: Cache this compiled module on disk. I could use `(de)serialize_to_file()` for that
let module = Module::new(&store, &wasm_bytes).unwrap();
let output = Pipe::new();
let input = Pipe::new();
let mut wasi_env = WasiState::new("Zellij")
.env("CLICOLOR_FORCE", "1")
.preopen(|p| {
p.directory(".") // FIXME: Change this to a more meaningful dir
.alias(".")
.read(true)
.write(true)
.create(true)
})
.unwrap()
.stdin(Box::new(input))
.stdout(Box::new(output))
.finalize()
.unwrap();
let wasi = wasi_env.import_object(&module).unwrap();
let plugin_env = PluginEnv {
plugin_id,
senders: bus.senders.clone(),
wasi_env,
subscriptions: Arc::new(Mutex::new(HashSet::new())),
};
let zellij = zellij_exports(&store, &plugin_env);
let instance = Instance::new(&module, &zellij.chain_back(wasi)).unwrap();
let start = instance.exports.get_function("_start").unwrap();
// This eventually calls the `.load()` method
start.call(&[]).unwrap();
plugin_map.insert(plugin_id, (instance, plugin_env));
pid_tx.send(plugin_id).unwrap();
plugin_id += 1;
}
PluginInstruction::Update(pid, event) => {
for (&i, (instance, plugin_env)) in &plugin_map {
let subs = plugin_env.subscriptions.lock().unwrap();
// FIXME: This is very janky... Maybe I should write my own macro for Event -> EventType?
let event_type = EventType::from_str(&event.to_string()).unwrap();
if (pid.is_none() || pid == Some(i)) && subs.contains(&event_type) {
let update = instance.exports.get_function("update").unwrap();
wasi_write_object(&plugin_env.wasi_env, &event);
update.call(&[]).unwrap();
}
}
drop(bus.senders.send_to_screen(ScreenInstruction::Render));
}
PluginInstruction::Render(buf_tx, pid, rows, cols) => {
let (instance, plugin_env) = plugin_map.get(&pid).unwrap();
let render = instance.exports.get_function("render").unwrap();
render
.call(&[Value::I32(rows as i32), Value::I32(cols as i32)])
.unwrap();
buf_tx.send(wasi_read_string(&plugin_env.wasi_env)).unwrap();
}
PluginInstruction::Unload(pid) => drop(plugin_map.remove(&pid)),
PluginInstruction::Exit => break,
}
}
}
// Plugin API ---------------------------------------------------------------------------------------------------------
pub(crate) fn zellij_exports(store: &Store, plugin_env: &PluginEnv) -> ImportObject {
macro_rules! zellij_export {
($($host_function:ident),+ $(,)?) => {
imports! {
"zellij" => {
$(stringify!($host_function) =>
Function::new_native_with_env(store, plugin_env.clone(), $host_function),)+
}
}
}
}
zellij_export! {
host_subscribe,
host_unsubscribe,
host_set_invisible_borders,
host_set_max_height,
host_set_selectable,
host_get_plugin_ids,
host_open_file,
host_set_timeout,
}
}
fn host_subscribe(plugin_env: &PluginEnv) {
let mut subscriptions = plugin_env.subscriptions.lock().unwrap();
let new: HashSet<EventType> = wasi_read_object(&plugin_env.wasi_env);
subscriptions.extend(new);
}
fn host_unsubscribe(plugin_env: &PluginEnv) {
let mut subscriptions = plugin_env.subscriptions.lock().unwrap();
let old: HashSet<EventType> = wasi_read_object(&plugin_env.wasi_env);
subscriptions.retain(|k|!old.contains(k));
}
fn host_set_selectable(plugin_env: &PluginEnv, selectable: i32) {
let selectable = selectable!= 0;
plugin_env
.senders
.send_to_screen(ScreenInstruction::SetSelectable(
PaneId::Plugin(plugin_env.plugin_id),
selectable,
))
.unwrap()
}
fn host_set_max_height(plugin_env: &PluginEnv, max_height: i32) {
let max_height = max_height as usize;
plugin_env
.senders
.send_to_screen(ScreenInstruction::SetMaxHeight(
PaneId::Plugin(plugin_env.plugin_id),
max_height,
))
.unwrap()
}
fn host_set_invisible_borders(plugin_env: &PluginEnv, invisible_borders: i32) {
let invisible_borders = invisible_borders!= 0;
plugin_env
.senders
.send_to_screen(ScreenInstruction::SetInvisibleBorders(
PaneId::Plugin(plugin_env.plugin_id),
invisible_borders,
))
.unwrap()
}
fn host_get_plugin_ids(plugin_env: &PluginEnv) {
let ids = PluginIds {
plugin_id: plugin_env.plugin_id,
zellij_pid: process::id(),
};
wasi_write_object(&plugin_env.wasi_env, &ids);
}
fn host_open_file(plugin_env: &PluginEnv) {
let path: PathBuf = wasi_read_object(&plugin_env.wasi_env);
plugin_env
.senders
.send_to_pty(PtyInstruction::SpawnTerminal(Some(path)))
.unwrap();
}
fn host_set_timeout(plugin_env: &PluginEnv, secs: f64) {
// There is a fancy, high-performance way to do this with zero additional threads:
// If the plugin thread keeps a BinaryHeap of timer structs, it can manage multiple and easily `.peek()` at the
// next time to trigger in O(1) time. Once the wake-up time is known, the `wasm` thread can use `recv_timeout()`
// to wait for an event with the timeout set to be the time of the next wake up. If events come in in the meantime,
// they are handled, but if the timeout triggers, we replace the event from `recv()` with an
// `Update(pid, TimerEvent)` and pop the timer from the Heap (or reschedule it). No additional threads for as many
// timers as we'd like.
//
// But that's a lot of code, and this is a few lines:
let send_plugin_instructions = plugin_env.senders.to_plugin.clone();
let update_target = Some(plugin_env.plugin_id);
thread::spawn(move || {
let start_time = Instant::now();
thread::sleep(Duration::from_secs_f64(secs));
// FIXME: The way that elapsed time is being calculated here is not exact; it doesn't take into account the
// time it takes an event to actually reach the plugin after it's sent to the `wasm` thread.
let elapsed_time = Instant::now().duration_since(start_time).as_secs_f64();
send_plugin_instructions
.unwrap()
.send(PluginInstruction::Update(
update_target,
Event::Timer(elapsed_time),
))
.unwrap();
});
}
// Helper Functions ---------------------------------------------------------------------------------------------------
// FIXME: Unwrap city
pub fn wasi_read_string(wasi_env: &WasiEnv) -> String {
let mut state = wasi_env.state();
let wasi_file = state.fs.stdout_mut().unwrap().as_mut().unwrap();
let mut buf = String::new();
wasi_file.read_to_string(&mut buf).unwrap();
buf
}
pub fn wasi_write_string(wasi_env: &WasiEnv, buf: &str) {
let mut state = wasi_env.state();
let wasi_file = state.fs.stdin_mut().unwrap().as_mut().unwrap();
writeln!(wasi_file, "{}\r", buf).unwrap();
}
pub fn wasi_write_object(wasi_env: &WasiEnv, object: &impl Serialize) {
wasi_write_string(wasi_env, &serde_json::to_string(&object).unwrap());
}
pub fn wasi_read_object<T: DeserializeOwned>(wasi_env: &WasiEnv) -> T {
let json = wasi_read_string(wasi_env);
serde_json::from_str(&json).unwrap()
}
| PluginInstruction | identifier_name |
wasm_vm.rs | use std::collections::{HashMap, HashSet};
use std::fs;
use std::path::PathBuf;
use std::process;
use std::str::FromStr;
use std::sync::{mpsc::Sender, Arc, Mutex};
use std::thread;
use std::time::{Duration, Instant};
use zellij_utils::{serde, zellij_tile};
use serde::{de::DeserializeOwned, Serialize};
use wasmer::{
imports, ChainableNamedResolver, Function, ImportObject, Instance, Module, Store, Value,
WasmerEnv,
};
use wasmer_wasi::{Pipe, WasiEnv, WasiState};
use zellij_tile::data::{Event, EventType, PluginIds};
use crate::{
panes::PaneId,
pty::PtyInstruction,
screen::ScreenInstruction,
thread_bus::{Bus, ThreadSenders},
};
use zellij_utils::errors::{ContextType, PluginContext};
#[derive(Clone, Debug)]
pub(crate) enum PluginInstruction {
Load(Sender<u32>, PathBuf),
Update(Option<u32>, Event), // Focused plugin / broadcast, event data
Render(Sender<String>, u32, usize, usize), // String buffer, plugin id, rows, cols
Unload(u32),
Exit,
}
impl From<&PluginInstruction> for PluginContext {
fn from(plugin_instruction: &PluginInstruction) -> Self {
match *plugin_instruction {
PluginInstruction::Load(..) => PluginContext::Load,
PluginInstruction::Update(..) => PluginContext::Update,
PluginInstruction::Render(..) => PluginContext::Render,
PluginInstruction::Unload(_) => PluginContext::Unload,
PluginInstruction::Exit => PluginContext::Exit,
}
}
}
#[derive(WasmerEnv, Clone)]
pub(crate) struct PluginEnv {
pub plugin_id: u32,
pub senders: ThreadSenders,
pub wasi_env: WasiEnv,
pub subscriptions: Arc<Mutex<HashSet<EventType>>>,
}
// Thread main --------------------------------------------------------------------------------------------------------
pub(crate) fn wasm_thread_main(bus: Bus<PluginInstruction>, store: Store, data_dir: PathBuf) {
let mut plugin_id = 0;
let mut plugin_map = HashMap::new();
loop {
let (event, mut err_ctx) = bus.recv().expect("failed to receive event on channel");
err_ctx.add_call(ContextType::Plugin((&event).into()));
match event {
PluginInstruction::Load(pid_tx, path) => {
let plugin_dir = data_dir.join("plugins/");
let wasm_bytes = fs::read(&path)
.or_else(|_| fs::read(&path.with_extension("wasm")))
.or_else(|_| fs::read(&plugin_dir.join(&path).with_extension("wasm")))
.unwrap_or_else(|_| panic!("cannot find plugin {}", &path.display()));
// FIXME: Cache this compiled module on disk. I could use `(de)serialize_to_file()` for that
let module = Module::new(&store, &wasm_bytes).unwrap();
let output = Pipe::new();
let input = Pipe::new();
let mut wasi_env = WasiState::new("Zellij")
.env("CLICOLOR_FORCE", "1")
.preopen(|p| {
p.directory(".") // FIXME: Change this to a more meaningful dir
.alias(".")
.read(true)
.write(true)
.create(true)
})
.unwrap()
.stdin(Box::new(input))
.stdout(Box::new(output))
.finalize()
.unwrap();
let wasi = wasi_env.import_object(&module).unwrap();
let plugin_env = PluginEnv {
plugin_id,
senders: bus.senders.clone(),
wasi_env,
subscriptions: Arc::new(Mutex::new(HashSet::new())),
};
let zellij = zellij_exports(&store, &plugin_env);
let instance = Instance::new(&module, &zellij.chain_back(wasi)).unwrap();
let start = instance.exports.get_function("_start").unwrap();
// This eventually calls the `.load()` method
start.call(&[]).unwrap();
plugin_map.insert(plugin_id, (instance, plugin_env));
pid_tx.send(plugin_id).unwrap();
plugin_id += 1;
}
PluginInstruction::Update(pid, event) => {
for (&i, (instance, plugin_env)) in &plugin_map {
let subs = plugin_env.subscriptions.lock().unwrap();
// FIXME: This is very janky... Maybe I should write my own macro for Event -> EventType?
let event_type = EventType::from_str(&event.to_string()).unwrap();
if (pid.is_none() || pid == Some(i)) && subs.contains(&event_type) {
let update = instance.exports.get_function("update").unwrap();
wasi_write_object(&plugin_env.wasi_env, &event);
update.call(&[]).unwrap();
}
}
drop(bus.senders.send_to_screen(ScreenInstruction::Render));
}
PluginInstruction::Render(buf_tx, pid, rows, cols) => {
let (instance, plugin_env) = plugin_map.get(&pid).unwrap();
let render = instance.exports.get_function("render").unwrap();
render
.call(&[Value::I32(rows as i32), Value::I32(cols as i32)])
.unwrap();
buf_tx.send(wasi_read_string(&plugin_env.wasi_env)).unwrap();
}
PluginInstruction::Unload(pid) => drop(plugin_map.remove(&pid)),
PluginInstruction::Exit => break,
}
}
}
// Plugin API ---------------------------------------------------------------------------------------------------------
pub(crate) fn zellij_exports(store: &Store, plugin_env: &PluginEnv) -> ImportObject {
macro_rules! zellij_export {
($($host_function:ident),+ $(,)?) => {
imports! {
"zellij" => {
$(stringify!($host_function) =>
Function::new_native_with_env(store, plugin_env.clone(), $host_function),)+
}
}
}
}
zellij_export! {
host_subscribe,
host_unsubscribe,
host_set_invisible_borders,
host_set_max_height,
host_set_selectable,
host_get_plugin_ids,
host_open_file,
host_set_timeout,
}
}
fn host_subscribe(plugin_env: &PluginEnv) {
let mut subscriptions = plugin_env.subscriptions.lock().unwrap();
let new: HashSet<EventType> = wasi_read_object(&plugin_env.wasi_env);
subscriptions.extend(new);
}
fn host_unsubscribe(plugin_env: &PluginEnv) {
let mut subscriptions = plugin_env.subscriptions.lock().unwrap();
let old: HashSet<EventType> = wasi_read_object(&plugin_env.wasi_env);
subscriptions.retain(|k|!old.contains(k));
}
fn host_set_selectable(plugin_env: &PluginEnv, selectable: i32) {
let selectable = selectable!= 0;
plugin_env
.senders
.send_to_screen(ScreenInstruction::SetSelectable(
PaneId::Plugin(plugin_env.plugin_id),
selectable,
))
.unwrap()
}
fn host_set_max_height(plugin_env: &PluginEnv, max_height: i32) {
let max_height = max_height as usize;
plugin_env
.senders
.send_to_screen(ScreenInstruction::SetMaxHeight(
PaneId::Plugin(plugin_env.plugin_id),
max_height,
))
.unwrap()
}
fn host_set_invisible_borders(plugin_env: &PluginEnv, invisible_borders: i32) {
let invisible_borders = invisible_borders!= 0;
plugin_env
.senders
.send_to_screen(ScreenInstruction::SetInvisibleBorders(
PaneId::Plugin(plugin_env.plugin_id),
invisible_borders,
))
.unwrap()
}
fn host_get_plugin_ids(plugin_env: &PluginEnv) |
fn host_open_file(plugin_env: &PluginEnv) {
let path: PathBuf = wasi_read_object(&plugin_env.wasi_env);
plugin_env
.senders
.send_to_pty(PtyInstruction::SpawnTerminal(Some(path)))
.unwrap();
}
fn host_set_timeout(plugin_env: &PluginEnv, secs: f64) {
// There is a fancy, high-performance way to do this with zero additional threads:
// If the plugin thread keeps a BinaryHeap of timer structs, it can manage multiple and easily `.peek()` at the
// next time to trigger in O(1) time. Once the wake-up time is known, the `wasm` thread can use `recv_timeout()`
// to wait for an event with the timeout set to be the time of the next wake up. If events come in in the meantime,
// they are handled, but if the timeout triggers, we replace the event from `recv()` with an
// `Update(pid, TimerEvent)` and pop the timer from the Heap (or reschedule it). No additional threads for as many
// timers as we'd like.
//
// But that's a lot of code, and this is a few lines:
let send_plugin_instructions = plugin_env.senders.to_plugin.clone();
let update_target = Some(plugin_env.plugin_id);
thread::spawn(move || {
let start_time = Instant::now();
thread::sleep(Duration::from_secs_f64(secs));
// FIXME: The way that elapsed time is being calculated here is not exact; it doesn't take into account the
// time it takes an event to actually reach the plugin after it's sent to the `wasm` thread.
let elapsed_time = Instant::now().duration_since(start_time).as_secs_f64();
send_plugin_instructions
.unwrap()
.send(PluginInstruction::Update(
update_target,
Event::Timer(elapsed_time),
))
.unwrap();
});
}
// Helper Functions ---------------------------------------------------------------------------------------------------
// FIXME: Unwrap city
pub fn wasi_read_string(wasi_env: &WasiEnv) -> String {
let mut state = wasi_env.state();
let wasi_file = state.fs.stdout_mut().unwrap().as_mut().unwrap();
let mut buf = String::new();
wasi_file.read_to_string(&mut buf).unwrap();
buf
}
pub fn wasi_write_string(wasi_env: &WasiEnv, buf: &str) {
let mut state = wasi_env.state();
let wasi_file = state.fs.stdin_mut().unwrap().as_mut().unwrap();
writeln!(wasi_file, "{}\r", buf).unwrap();
}
pub fn wasi_write_object(wasi_env: &WasiEnv, object: &impl Serialize) {
wasi_write_string(wasi_env, &serde_json::to_string(&object).unwrap());
}
pub fn wasi_read_object<T: DeserializeOwned>(wasi_env: &WasiEnv) -> T {
let json = wasi_read_string(wasi_env);
serde_json::from_str(&json).unwrap()
}
| {
let ids = PluginIds {
plugin_id: plugin_env.plugin_id,
zellij_pid: process::id(),
};
wasi_write_object(&plugin_env.wasi_env, &ids);
} | identifier_body |
wasm_vm.rs | use std::collections::{HashMap, HashSet};
use std::fs;
use std::path::PathBuf;
use std::process;
use std::str::FromStr;
use std::sync::{mpsc::Sender, Arc, Mutex};
use std::thread;
use std::time::{Duration, Instant};
use zellij_utils::{serde, zellij_tile};
use serde::{de::DeserializeOwned, Serialize};
use wasmer::{
imports, ChainableNamedResolver, Function, ImportObject, Instance, Module, Store, Value,
WasmerEnv,
};
use wasmer_wasi::{Pipe, WasiEnv, WasiState};
use zellij_tile::data::{Event, EventType, PluginIds};
use crate::{
panes::PaneId,
pty::PtyInstruction,
screen::ScreenInstruction,
thread_bus::{Bus, ThreadSenders},
};
use zellij_utils::errors::{ContextType, PluginContext};
#[derive(Clone, Debug)]
pub(crate) enum PluginInstruction {
Load(Sender<u32>, PathBuf),
Update(Option<u32>, Event), // Focused plugin / broadcast, event data
Render(Sender<String>, u32, usize, usize), // String buffer, plugin id, rows, cols
Unload(u32),
Exit,
}
impl From<&PluginInstruction> for PluginContext {
fn from(plugin_instruction: &PluginInstruction) -> Self {
match *plugin_instruction {
PluginInstruction::Load(..) => PluginContext::Load,
PluginInstruction::Update(..) => PluginContext::Update,
PluginInstruction::Render(..) => PluginContext::Render,
PluginInstruction::Unload(_) => PluginContext::Unload,
PluginInstruction::Exit => PluginContext::Exit,
}
}
}
#[derive(WasmerEnv, Clone)]
pub(crate) struct PluginEnv {
pub plugin_id: u32,
pub senders: ThreadSenders,
pub wasi_env: WasiEnv,
pub subscriptions: Arc<Mutex<HashSet<EventType>>>,
}
// Thread main --------------------------------------------------------------------------------------------------------
pub(crate) fn wasm_thread_main(bus: Bus<PluginInstruction>, store: Store, data_dir: PathBuf) {
let mut plugin_id = 0;
let mut plugin_map = HashMap::new();
loop {
let (event, mut err_ctx) = bus.recv().expect("failed to receive event on channel");
err_ctx.add_call(ContextType::Plugin((&event).into()));
match event {
PluginInstruction::Load(pid_tx, path) => {
let plugin_dir = data_dir.join("plugins/");
let wasm_bytes = fs::read(&path)
.or_else(|_| fs::read(&path.with_extension("wasm")))
.or_else(|_| fs::read(&plugin_dir.join(&path).with_extension("wasm")))
.unwrap_or_else(|_| panic!("cannot find plugin {}", &path.display()));
// FIXME: Cache this compiled module on disk. I could use `(de)serialize_to_file()` for that
let module = Module::new(&store, &wasm_bytes).unwrap();
let output = Pipe::new();
let input = Pipe::new();
let mut wasi_env = WasiState::new("Zellij")
.env("CLICOLOR_FORCE", "1")
.preopen(|p| {
p.directory(".") // FIXME: Change this to a more meaningful dir
.alias(".")
.read(true)
.write(true)
.create(true)
})
.unwrap()
.stdin(Box::new(input))
.stdout(Box::new(output))
.finalize()
.unwrap();
let wasi = wasi_env.import_object(&module).unwrap();
let plugin_env = PluginEnv {
plugin_id,
senders: bus.senders.clone(),
wasi_env,
subscriptions: Arc::new(Mutex::new(HashSet::new())),
};
let zellij = zellij_exports(&store, &plugin_env);
let instance = Instance::new(&module, &zellij.chain_back(wasi)).unwrap();
let start = instance.exports.get_function("_start").unwrap();
// This eventually calls the `.load()` method
start.call(&[]).unwrap();
plugin_map.insert(plugin_id, (instance, plugin_env));
pid_tx.send(plugin_id).unwrap();
plugin_id += 1;
}
PluginInstruction::Update(pid, event) => {
for (&i, (instance, plugin_env)) in &plugin_map {
let subs = plugin_env.subscriptions.lock().unwrap();
// FIXME: This is very janky... Maybe I should write my own macro for Event -> EventType?
let event_type = EventType::from_str(&event.to_string()).unwrap();
if (pid.is_none() || pid == Some(i)) && subs.contains(&event_type) {
let update = instance.exports.get_function("update").unwrap();
wasi_write_object(&plugin_env.wasi_env, &event);
update.call(&[]).unwrap();
}
}
drop(bus.senders.send_to_screen(ScreenInstruction::Render));
}
PluginInstruction::Render(buf_tx, pid, rows, cols) => {
let (instance, plugin_env) = plugin_map.get(&pid).unwrap();
let render = instance.exports.get_function("render").unwrap();
render
.call(&[Value::I32(rows as i32), Value::I32(cols as i32)])
.unwrap();
| }
PluginInstruction::Unload(pid) => drop(plugin_map.remove(&pid)),
PluginInstruction::Exit => break,
}
}
}
// Plugin API ---------------------------------------------------------------------------------------------------------
pub(crate) fn zellij_exports(store: &Store, plugin_env: &PluginEnv) -> ImportObject {
macro_rules! zellij_export {
($($host_function:ident),+ $(,)?) => {
imports! {
"zellij" => {
$(stringify!($host_function) =>
Function::new_native_with_env(store, plugin_env.clone(), $host_function),)+
}
}
}
}
zellij_export! {
host_subscribe,
host_unsubscribe,
host_set_invisible_borders,
host_set_max_height,
host_set_selectable,
host_get_plugin_ids,
host_open_file,
host_set_timeout,
}
}
fn host_subscribe(plugin_env: &PluginEnv) {
let mut subscriptions = plugin_env.subscriptions.lock().unwrap();
let new: HashSet<EventType> = wasi_read_object(&plugin_env.wasi_env);
subscriptions.extend(new);
}
fn host_unsubscribe(plugin_env: &PluginEnv) {
let mut subscriptions = plugin_env.subscriptions.lock().unwrap();
let old: HashSet<EventType> = wasi_read_object(&plugin_env.wasi_env);
subscriptions.retain(|k|!old.contains(k));
}
fn host_set_selectable(plugin_env: &PluginEnv, selectable: i32) {
let selectable = selectable!= 0;
plugin_env
.senders
.send_to_screen(ScreenInstruction::SetSelectable(
PaneId::Plugin(plugin_env.plugin_id),
selectable,
))
.unwrap()
}
fn host_set_max_height(plugin_env: &PluginEnv, max_height: i32) {
let max_height = max_height as usize;
plugin_env
.senders
.send_to_screen(ScreenInstruction::SetMaxHeight(
PaneId::Plugin(plugin_env.plugin_id),
max_height,
))
.unwrap()
}
fn host_set_invisible_borders(plugin_env: &PluginEnv, invisible_borders: i32) {
let invisible_borders = invisible_borders!= 0;
plugin_env
.senders
.send_to_screen(ScreenInstruction::SetInvisibleBorders(
PaneId::Plugin(plugin_env.plugin_id),
invisible_borders,
))
.unwrap()
}
fn host_get_plugin_ids(plugin_env: &PluginEnv) {
let ids = PluginIds {
plugin_id: plugin_env.plugin_id,
zellij_pid: process::id(),
};
wasi_write_object(&plugin_env.wasi_env, &ids);
}
fn host_open_file(plugin_env: &PluginEnv) {
let path: PathBuf = wasi_read_object(&plugin_env.wasi_env);
plugin_env
.senders
.send_to_pty(PtyInstruction::SpawnTerminal(Some(path)))
.unwrap();
}
fn host_set_timeout(plugin_env: &PluginEnv, secs: f64) {
// There is a fancy, high-performance way to do this with zero additional threads:
// If the plugin thread keeps a BinaryHeap of timer structs, it can manage multiple and easily `.peek()` at the
// next time to trigger in O(1) time. Once the wake-up time is known, the `wasm` thread can use `recv_timeout()`
// to wait for an event with the timeout set to be the time of the next wake up. If events come in in the meantime,
// they are handled, but if the timeout triggers, we replace the event from `recv()` with an
// `Update(pid, TimerEvent)` and pop the timer from the Heap (or reschedule it). No additional threads for as many
// timers as we'd like.
//
// But that's a lot of code, and this is a few lines:
let send_plugin_instructions = plugin_env.senders.to_plugin.clone();
let update_target = Some(plugin_env.plugin_id);
thread::spawn(move || {
let start_time = Instant::now();
thread::sleep(Duration::from_secs_f64(secs));
// FIXME: The way that elapsed time is being calculated here is not exact; it doesn't take into account the
// time it takes an event to actually reach the plugin after it's sent to the `wasm` thread.
let elapsed_time = Instant::now().duration_since(start_time).as_secs_f64();
send_plugin_instructions
.unwrap()
.send(PluginInstruction::Update(
update_target,
Event::Timer(elapsed_time),
))
.unwrap();
});
}
// Helper Functions ---------------------------------------------------------------------------------------------------
// FIXME: Unwrap city
pub fn wasi_read_string(wasi_env: &WasiEnv) -> String {
let mut state = wasi_env.state();
let wasi_file = state.fs.stdout_mut().unwrap().as_mut().unwrap();
let mut buf = String::new();
wasi_file.read_to_string(&mut buf).unwrap();
buf
}
pub fn wasi_write_string(wasi_env: &WasiEnv, buf: &str) {
let mut state = wasi_env.state();
let wasi_file = state.fs.stdin_mut().unwrap().as_mut().unwrap();
writeln!(wasi_file, "{}\r", buf).unwrap();
}
pub fn wasi_write_object(wasi_env: &WasiEnv, object: &impl Serialize) {
wasi_write_string(wasi_env, &serde_json::to_string(&object).unwrap());
}
pub fn wasi_read_object<T: DeserializeOwned>(wasi_env: &WasiEnv) -> T {
let json = wasi_read_string(wasi_env);
serde_json::from_str(&json).unwrap()
} | buf_tx.send(wasi_read_string(&plugin_env.wasi_env)).unwrap(); | random_line_split |
state.rs | use std::any::type_name;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use cosmwasm_std::{Api, CanonicalAddr, ReadonlyStorage, StdError, StdResult, Storage};
use cosmwasm_storage::{PrefixedStorage, ReadonlyPrefixedStorage};
use secret_toolkit::{
serialization::{Bincode2, Serde},
storage::{AppendStore, AppendStoreMut},
};
use crate::msg::{Battle, BattleDump, ContractInfo, Hero, HeroDump, PlayerStats, TokenInfo};
use crate::stats::Stats;
pub const CONFIG_KEY: &[u8] = b"config";
pub const PREFIX_VIEW_KEY: &[u8] = b"viewkey";
pub const PREFIX_HISTORY: &[u8] = b"history";
pub const PREFIX_BATTLE_ID: &[u8] = b"battleids";
pub const PREFIX_TOURN_STATS: &[u8] = b"trnstat";
pub const PREFIX_ALL_STATS: &[u8] = b"allstat";
pub const PREFIX_PLAYERS: &[u8] = b"players";
pub const PREFIX_SEEN: &[u8] = b"seen";
pub const ADMIN_KEY: &[u8] = b"admin";
pub const BOTS_KEY: &[u8] = b"bots";
pub const LEADERBOARDS_KEY: &[u8] = b"ldrbds";
pub const IMPORT_FROM_KEY: &[u8] = b"import";
pub const EXPORT_CONFIG_KEY: &[u8] = b"export";
/// arena config
#[derive(Serialize, Deserialize)]
pub struct Config {
/// heroes waiting to fight
pub heroes: Vec<StoreWaitingHero>,
/// prng seed
pub prng_seed: Vec<u8>,
/// combined entropy strings supplied with the heroes
pub entropy: String,
/// current battle count in this arena
pub battle_cnt: u64,
/// battle count from previous arenas
pub previous_battles: u64,
/// viewing key used with the card contracts
pub viewing_key: String,
/// contract info of all the card versions
pub card_versions: Vec<StoreContractInfo>,
/// true if battles are halted
pub fight_halt: bool,
/// total number of players
pub player_cnt: u32,
/// list of new players that need to be added
pub new_players: Vec<CanonicalAddr>,
}
/// export config
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct ExportConfig {
/// new arena contract info
pub new_arena: StoreContractInfo,
/// next block to export
pub next: u32,
}
/// stored leaderboard entry
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct Rank {
/// player's score
pub score: i32,
/// player's address
pub address: CanonicalAddr,
}
/// tournament data
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct Tourney {
/// tournament start time
pub start: u64,
/// tournament leaderboard
pub leaderboard: Vec<Rank>,
}
/// leaderboards
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct Leaderboards {
/// tournament leaderboard
pub tourney: Tourney,
/// all time leaderboard
pub all_time: Vec<Rank>,
}
/// tournament stats
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct TourneyStats {
/// time of last update
pub last_seen: u64,
/// player's stats for this tournament
pub stats: StorePlayerStats,
}
/// stored player stats
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct StorePlayerStats {
/// player's score
pub score: i32,
/// number of battles
pub battles: u32,
/// number of wins
pub wins: u32,
/// number of ties
pub ties: u32,
/// number of times took 3rd place in a 2-way tie
pub third_in_two_way_ties: u32,
/// number of losses
pub losses: u32,
}
impl Default for StorePlayerStats {
fn default() -> Self {
Self {
score: 0,
battles: 0,
wins: 0,
ties: 0,
third_in_two_way_ties: 0,
losses: 0,
}
}
}
impl StorePlayerStats {
/// Returns StdResult<PlayerStats> from converting a StorePlayerStats to a displayable
/// PlayerStats
///
/// # Arguments
///
/// * `api` - a reference to the Api used to convert human and canonical addresses
/// * `address` - a reference to the address corresponding to these stats
pub fn into_humanized<A: Api>(
self,
api: &A,
address: &CanonicalAddr,
) -> StdResult<PlayerStats> {
let stats = PlayerStats {
score: self.score,
address: api.human_address(address)?,
battles: self.battles,
wins: self.wins,
ties: self.ties,
third_in_two_way_ties: self.third_in_two_way_ties,
losses: self.losses,
};
Ok(stats)
}
}
/// waiting hero's info
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct StoreWaitingHero {
/// hero's owner
pub owner: CanonicalAddr,
/// name of the hero
pub name: String,
/// hero's token info
pub token_info: StoreTokenInfo,
/// hero's stats
pub stats: Stats,
}
/// hero info
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct StoreHero {
/// hero's owner
pub owner: CanonicalAddr,
/// name of the hero
pub name: String,
/// hero's token info
pub token_info: StoreTokenInfo,
/// hero's skills before the battle
pub pre_battle_skills: Vec<u8>,
/// hero's skills after the battle
pub post_battle_skills: Vec<u8>,
}
impl StoreHero {
/// Returns StdResult<Hero> from converting a StoreHero to a displayable Hero
///
/// # Arguments
///
/// * `versions` - a slice of ContractInfo of token contract versions
pub fn | (self, versions: &[ContractInfo]) -> StdResult<Hero> {
let hero = Hero {
name: self.name,
token_info: TokenInfo {
token_id: self.token_info.token_id,
address: versions[self.token_info.version as usize].address.clone(),
},
pre_battle_skills: self.pre_battle_skills,
post_battle_skills: self.post_battle_skills,
};
Ok(hero)
}
/// Returns StdResult<HeroDump> from converting a StoreHero to a displayable HeroDump
///
/// # Arguments
///
/// * `api` - a reference to the Api used to convert human and canonical addresses
/// * `versions` - a slice of ContractInfo of token contract versions
pub fn into_dump<A: Api>(self, api: &A, versions: &[ContractInfo]) -> StdResult<HeroDump> {
let hero = HeroDump {
owner: api.human_address(&self.owner)?,
name: self.name,
token_info: TokenInfo {
token_id: self.token_info.token_id,
address: versions[self.token_info.version as usize].address.clone(),
},
pre_battle_skills: self.pre_battle_skills,
post_battle_skills: self.post_battle_skills,
};
Ok(hero)
}
}
/// a hero's token info
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct StoreTokenInfo {
/// hero's token id
pub token_id: String,
/// index of the card contract version
pub version: u8,
}
/// battle info
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct StoreBattle {
/// battle id number
pub battle_number: u64,
/// number of seconds since epoch time 01/01/1970 in which the battle took place
pub timestamp: u64,
/// heroes that fought
pub heroes: Vec<StoreHero>,
/// skill used to determine the winner
pub skill_used: u8,
/// index of winning hero
pub winner: Option<u8>,
/// winning skill value
pub winning_skill_value: u8,
}
impl StoreBattle {
/// Returns StdResult<Battle> from converting a StoreBattle to a displayable Battle
///
/// # Arguments
///
/// * `address` - a reference to the address querying their battle history
/// * `versions` - a slice of ContractInfo of token contract versions
pub fn into_humanized(
mut self,
address: &CanonicalAddr,
versions: &[ContractInfo],
) -> StdResult<Battle> {
if let Some(pos) = self.heroes.iter().position(|h| h.owner == *address) {
let winner = self.winner.map(|u| self.heroes[u as usize].name.clone());
let battle = Battle {
battle_number: self.battle_number,
timestamp: self.timestamp,
my_hero: self.heroes.swap_remove(pos).into_humanized(versions)?,
skill_used: self.skill_used,
winner,
winning_skill_value: self.winning_skill_value,
i_won: self.winner.map_or_else(|| false, |w| w as usize == pos),
};
Ok(battle)
} else {
Err(StdError::generic_err("Battle History corupted"))
}
}
/// Returns StdResult<BattleDump> from converting a StoreBattle to a displayable BattleDump
///
/// # Arguments
///
/// * `api` - a reference to the Api used to convert human and canonical addresses
/// * `versions` - a slice of ContractInfo of token contract versions
pub fn into_dump<A: Api>(
mut self,
api: &A,
versions: &[ContractInfo],
) -> StdResult<BattleDump> {
let battle = BattleDump {
battle_number: self.battle_number,
timestamp: self.timestamp,
heroes: self
.heroes
.drain(..)
.map(|h| h.into_dump(api, versions))
.collect::<StdResult<Vec<HeroDump>>>()?,
skill_used: self.skill_used,
winner: self.winner,
winning_skill_value: self.winning_skill_value,
};
Ok(battle)
}
}
/// code hash and address of a contract
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct StoreContractInfo {
/// contract's code hash string
pub code_hash: String,
/// contract's address
pub address: CanonicalAddr,
}
impl StoreContractInfo {
/// Returns StdResult<ContractInfo> from converting a StoreContractInfo to a displayable
/// ContractInfo
///
/// # Arguments
///
/// * `api` - a reference to the Api used to convert human and canonical addresses
pub fn to_humanized<A: Api>(&self, api: &A) -> StdResult<ContractInfo> {
let info = ContractInfo {
address: api.human_address(&self.address)?,
code_hash: self.code_hash.clone(),
};
Ok(info)
}
}
/// Returns StdResult<()> after saving the battle id
///
/// # Arguments
///
/// * `storage` - a mutable reference to the storage this item should go to
/// * `battle_num` - the battle id to store
/// * `address` - a reference to the address for which to store this battle id
pub fn append_battle_for_addr<S: Storage>(
storage: &mut S,
battle_num: u64,
address: &CanonicalAddr,
) -> StdResult<()> {
let mut store = PrefixedStorage::multilevel(&[PREFIX_BATTLE_ID, address.as_slice()], storage);
let mut store = AppendStoreMut::attach_or_create(&mut store)?;
store.push(&battle_num)
}
/// Returns StdResult<Vec<Battle>> of the battles to display
///
/// # Arguments
///
/// * `api` - a reference to the Api used to convert human and canonical addresses
/// * `storage` - a reference to the contract's storage
/// * `address` - a reference to the address whose battles to display
/// * `page` - page to start displaying
/// * `page_size` - number of txs per page
pub fn get_history<A: Api, S: ReadonlyStorage>(
api: &A,
storage: &S,
address: &CanonicalAddr,
page: u32,
page_size: u32,
) -> StdResult<Vec<Battle>> {
let id_store =
ReadonlyPrefixedStorage::multilevel(&[PREFIX_BATTLE_ID, address.as_slice()], storage);
// Try to access the storage of battle ids for the account.
// If it doesn't exist yet, return an empty list of battles.
let id_store = if let Some(result) = AppendStore::<u64, _>::attach(&id_store) {
result?
} else {
return Ok(vec![]);
};
let config: Config = load(storage, CONFIG_KEY)?;
let versions = config
.card_versions
.iter()
.map(|v| v.to_humanized(api))
.collect::<StdResult<Vec<ContractInfo>>>()?;
// access battle storage
let his_store = ReadonlyPrefixedStorage::new(PREFIX_HISTORY, storage);
// Take `page_size` battles starting from the latest battle, potentially skipping `page * page_size`
// battles from the start.
let battles: StdResult<Vec<Battle>> = id_store
.iter()
.rev()
.skip((page * page_size) as usize)
.take(page_size as usize)
.map(|id| {
id.map(|id| {
load(&his_store, &id.to_le_bytes())
.and_then(|b: StoreBattle| b.into_humanized(address, &versions))
})
.and_then(|x| x)
})
.collect();
battles
}
pub fn save<T: Serialize, S: Storage>(storage: &mut S, key: &[u8], value: &T) -> StdResult<()> {
storage.set(key, &Bincode2::serialize(value)?);
Ok(())
}
pub fn remove<S: Storage>(storage: &mut S, key: &[u8]) {
storage.remove(key);
}
pub fn load<T: DeserializeOwned, S: ReadonlyStorage>(storage: &S, key: &[u8]) -> StdResult<T> {
Bincode2::deserialize(
&storage
.get(key)
.ok_or_else(|| StdError::not_found(type_name::<T>()))?,
)
}
pub fn may_load<T: DeserializeOwned, S: ReadonlyStorage>(
storage: &S,
key: &[u8],
) -> StdResult<Option<T>> {
match storage.get(key) {
Some(value) => Bincode2::deserialize(&value).map(Some),
None => Ok(None),
}
}
| into_humanized | identifier_name |
state.rs | use std::any::type_name;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use cosmwasm_std::{Api, CanonicalAddr, ReadonlyStorage, StdError, StdResult, Storage};
use cosmwasm_storage::{PrefixedStorage, ReadonlyPrefixedStorage};
use secret_toolkit::{
serialization::{Bincode2, Serde},
storage::{AppendStore, AppendStoreMut},
};
use crate::msg::{Battle, BattleDump, ContractInfo, Hero, HeroDump, PlayerStats, TokenInfo};
use crate::stats::Stats;
pub const CONFIG_KEY: &[u8] = b"config";
pub const PREFIX_VIEW_KEY: &[u8] = b"viewkey";
pub const PREFIX_HISTORY: &[u8] = b"history";
pub const PREFIX_BATTLE_ID: &[u8] = b"battleids";
pub const PREFIX_TOURN_STATS: &[u8] = b"trnstat";
pub const PREFIX_ALL_STATS: &[u8] = b"allstat";
pub const PREFIX_PLAYERS: &[u8] = b"players";
pub const PREFIX_SEEN: &[u8] = b"seen";
pub const ADMIN_KEY: &[u8] = b"admin";
pub const BOTS_KEY: &[u8] = b"bots";
pub const LEADERBOARDS_KEY: &[u8] = b"ldrbds";
pub const IMPORT_FROM_KEY: &[u8] = b"import";
pub const EXPORT_CONFIG_KEY: &[u8] = b"export";
/// arena config
#[derive(Serialize, Deserialize)]
pub struct Config {
/// heroes waiting to fight
pub heroes: Vec<StoreWaitingHero>,
/// prng seed
pub prng_seed: Vec<u8>,
/// combined entropy strings supplied with the heroes
pub entropy: String,
/// current battle count in this arena
pub battle_cnt: u64,
/// battle count from previous arenas
pub previous_battles: u64,
/// viewing key used with the card contracts
pub viewing_key: String,
/// contract info of all the card versions
pub card_versions: Vec<StoreContractInfo>,
/// true if battles are halted
pub fight_halt: bool,
/// total number of players
pub player_cnt: u32,
/// list of new players that need to be added
pub new_players: Vec<CanonicalAddr>,
}
/// export config
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct ExportConfig {
/// new arena contract info
pub new_arena: StoreContractInfo,
/// next block to export
pub next: u32,
}
/// stored leaderboard entry
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct Rank {
/// player's score
pub score: i32,
/// player's address
pub address: CanonicalAddr,
}
/// tournament data
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct Tourney {
/// tournament start time
pub start: u64,
/// tournament leaderboard
pub leaderboard: Vec<Rank>,
}
/// leaderboards
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct Leaderboards {
/// tournament leaderboard
pub tourney: Tourney,
/// all time leaderboard
pub all_time: Vec<Rank>,
}
/// tournament stats
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct TourneyStats {
/// time of last update
pub last_seen: u64,
/// player's stats for this tournament
pub stats: StorePlayerStats,
}
/// stored player stats
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct StorePlayerStats {
/// player's score
pub score: i32,
/// number of battles
pub battles: u32,
/// number of wins
pub wins: u32,
/// number of ties
pub ties: u32,
/// number of times took 3rd place in a 2-way tie
pub third_in_two_way_ties: u32,
/// number of losses
pub losses: u32,
}
impl Default for StorePlayerStats {
fn default() -> Self {
Self {
score: 0,
battles: 0,
wins: 0,
ties: 0,
third_in_two_way_ties: 0,
losses: 0,
}
}
}
impl StorePlayerStats {
/// Returns StdResult<PlayerStats> from converting a StorePlayerStats to a displayable
/// PlayerStats
///
/// # Arguments
///
/// * `api` - a reference to the Api used to convert human and canonical addresses
/// * `address` - a reference to the address corresponding to these stats
pub fn into_humanized<A: Api>(
self,
api: &A,
address: &CanonicalAddr,
) -> StdResult<PlayerStats> {
let stats = PlayerStats {
score: self.score,
address: api.human_address(address)?,
battles: self.battles,
wins: self.wins,
ties: self.ties,
third_in_two_way_ties: self.third_in_two_way_ties,
losses: self.losses,
};
Ok(stats)
}
}
/// waiting hero's info
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct StoreWaitingHero {
/// hero's owner
pub owner: CanonicalAddr,
/// name of the hero
pub name: String,
/// hero's token info
pub token_info: StoreTokenInfo,
/// hero's stats
pub stats: Stats,
}
/// hero info
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct StoreHero {
/// hero's owner
pub owner: CanonicalAddr,
/// name of the hero
pub name: String,
/// hero's token info
pub token_info: StoreTokenInfo,
/// hero's skills before the battle
pub pre_battle_skills: Vec<u8>,
/// hero's skills after the battle
pub post_battle_skills: Vec<u8>,
}
impl StoreHero {
/// Returns StdResult<Hero> from converting a StoreHero to a displayable Hero
///
/// # Arguments
///
/// * `versions` - a slice of ContractInfo of token contract versions
pub fn into_humanized(self, versions: &[ContractInfo]) -> StdResult<Hero> {
let hero = Hero {
name: self.name,
token_info: TokenInfo {
token_id: self.token_info.token_id,
address: versions[self.token_info.version as usize].address.clone(),
},
pre_battle_skills: self.pre_battle_skills,
post_battle_skills: self.post_battle_skills,
};
Ok(hero)
}
/// Returns StdResult<HeroDump> from converting a StoreHero to a displayable HeroDump
///
/// # Arguments
///
/// * `api` - a reference to the Api used to convert human and canonical addresses
/// * `versions` - a slice of ContractInfo of token contract versions
pub fn into_dump<A: Api>(self, api: &A, versions: &[ContractInfo]) -> StdResult<HeroDump> {
let hero = HeroDump {
owner: api.human_address(&self.owner)?,
name: self.name,
token_info: TokenInfo {
token_id: self.token_info.token_id,
address: versions[self.token_info.version as usize].address.clone(),
},
pre_battle_skills: self.pre_battle_skills,
post_battle_skills: self.post_battle_skills,
};
Ok(hero)
}
}
/// a hero's token info
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct StoreTokenInfo {
/// hero's token id
pub token_id: String,
/// index of the card contract version
pub version: u8,
}
/// battle info
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct StoreBattle {
/// battle id number
pub battle_number: u64,
/// number of seconds since epoch time 01/01/1970 in which the battle took place
pub timestamp: u64,
/// heroes that fought
pub heroes: Vec<StoreHero>,
/// skill used to determine the winner
pub skill_used: u8,
/// index of winning hero
pub winner: Option<u8>,
/// winning skill value
pub winning_skill_value: u8,
}
impl StoreBattle {
/// Returns StdResult<Battle> from converting a StoreBattle to a displayable Battle
///
/// # Arguments
///
/// * `address` - a reference to the address querying their battle history
/// * `versions` - a slice of ContractInfo of token contract versions
pub fn into_humanized(
mut self,
address: &CanonicalAddr,
versions: &[ContractInfo],
) -> StdResult<Battle> {
if let Some(pos) = self.heroes.iter().position(|h| h.owner == *address) {
let winner = self.winner.map(|u| self.heroes[u as usize].name.clone());
let battle = Battle {
battle_number: self.battle_number,
timestamp: self.timestamp,
my_hero: self.heroes.swap_remove(pos).into_humanized(versions)?,
skill_used: self.skill_used,
winner,
winning_skill_value: self.winning_skill_value,
i_won: self.winner.map_or_else(|| false, |w| w as usize == pos),
};
Ok(battle)
} else {
Err(StdError::generic_err("Battle History corupted"))
}
}
/// Returns StdResult<BattleDump> from converting a StoreBattle to a displayable BattleDump
///
/// # Arguments
///
/// * `api` - a reference to the Api used to convert human and canonical addresses
/// * `versions` - a slice of ContractInfo of token contract versions
pub fn into_dump<A: Api>(
mut self,
api: &A,
versions: &[ContractInfo],
) -> StdResult<BattleDump> {
let battle = BattleDump {
battle_number: self.battle_number,
timestamp: self.timestamp,
heroes: self
.heroes
.drain(..)
.map(|h| h.into_dump(api, versions))
.collect::<StdResult<Vec<HeroDump>>>()?,
skill_used: self.skill_used,
winner: self.winner,
winning_skill_value: self.winning_skill_value,
};
Ok(battle)
}
}
/// code hash and address of a contract
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct StoreContractInfo {
/// contract's code hash string
pub code_hash: String,
/// contract's address
pub address: CanonicalAddr,
}
impl StoreContractInfo {
/// Returns StdResult<ContractInfo> from converting a StoreContractInfo to a displayable
/// ContractInfo
///
/// # Arguments
///
/// * `api` - a reference to the Api used to convert human and canonical addresses
pub fn to_humanized<A: Api>(&self, api: &A) -> StdResult<ContractInfo> {
let info = ContractInfo {
address: api.human_address(&self.address)?,
code_hash: self.code_hash.clone(),
};
Ok(info)
}
}
/// Returns StdResult<()> after saving the battle id
///
/// # Arguments
///
/// * `storage` - a mutable reference to the storage this item should go to
/// * `battle_num` - the battle id to store
/// * `address` - a reference to the address for which to store this battle id
pub fn append_battle_for_addr<S: Storage>(
storage: &mut S,
battle_num: u64,
address: &CanonicalAddr,
) -> StdResult<()> {
let mut store = PrefixedStorage::multilevel(&[PREFIX_BATTLE_ID, address.as_slice()], storage);
let mut store = AppendStoreMut::attach_or_create(&mut store)?;
store.push(&battle_num)
}
| /// * `api` - a reference to the Api used to convert human and canonical addresses
/// * `storage` - a reference to the contract's storage
/// * `address` - a reference to the address whose battles to display
/// * `page` - page to start displaying
/// * `page_size` - number of txs per page
pub fn get_history<A: Api, S: ReadonlyStorage>(
api: &A,
storage: &S,
address: &CanonicalAddr,
page: u32,
page_size: u32,
) -> StdResult<Vec<Battle>> {
let id_store =
ReadonlyPrefixedStorage::multilevel(&[PREFIX_BATTLE_ID, address.as_slice()], storage);
// Try to access the storage of battle ids for the account.
// If it doesn't exist yet, return an empty list of battles.
let id_store = if let Some(result) = AppendStore::<u64, _>::attach(&id_store) {
result?
} else {
return Ok(vec![]);
};
let config: Config = load(storage, CONFIG_KEY)?;
let versions = config
.card_versions
.iter()
.map(|v| v.to_humanized(api))
.collect::<StdResult<Vec<ContractInfo>>>()?;
// access battle storage
let his_store = ReadonlyPrefixedStorage::new(PREFIX_HISTORY, storage);
// Take `page_size` battles starting from the latest battle, potentially skipping `page * page_size`
// battles from the start.
let battles: StdResult<Vec<Battle>> = id_store
.iter()
.rev()
.skip((page * page_size) as usize)
.take(page_size as usize)
.map(|id| {
id.map(|id| {
load(&his_store, &id.to_le_bytes())
.and_then(|b: StoreBattle| b.into_humanized(address, &versions))
})
.and_then(|x| x)
})
.collect();
battles
}
pub fn save<T: Serialize, S: Storage>(storage: &mut S, key: &[u8], value: &T) -> StdResult<()> {
storage.set(key, &Bincode2::serialize(value)?);
Ok(())
}
pub fn remove<S: Storage>(storage: &mut S, key: &[u8]) {
storage.remove(key);
}
pub fn load<T: DeserializeOwned, S: ReadonlyStorage>(storage: &S, key: &[u8]) -> StdResult<T> {
Bincode2::deserialize(
&storage
.get(key)
.ok_or_else(|| StdError::not_found(type_name::<T>()))?,
)
}
pub fn may_load<T: DeserializeOwned, S: ReadonlyStorage>(
storage: &S,
key: &[u8],
) -> StdResult<Option<T>> {
match storage.get(key) {
Some(value) => Bincode2::deserialize(&value).map(Some),
None => Ok(None),
}
} | /// Returns StdResult<Vec<Battle>> of the battles to display
///
/// # Arguments
/// | random_line_split |
state.rs | use std::any::type_name;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use cosmwasm_std::{Api, CanonicalAddr, ReadonlyStorage, StdError, StdResult, Storage};
use cosmwasm_storage::{PrefixedStorage, ReadonlyPrefixedStorage};
use secret_toolkit::{
serialization::{Bincode2, Serde},
storage::{AppendStore, AppendStoreMut},
};
use crate::msg::{Battle, BattleDump, ContractInfo, Hero, HeroDump, PlayerStats, TokenInfo};
use crate::stats::Stats;
pub const CONFIG_KEY: &[u8] = b"config";
pub const PREFIX_VIEW_KEY: &[u8] = b"viewkey";
pub const PREFIX_HISTORY: &[u8] = b"history";
pub const PREFIX_BATTLE_ID: &[u8] = b"battleids";
pub const PREFIX_TOURN_STATS: &[u8] = b"trnstat";
pub const PREFIX_ALL_STATS: &[u8] = b"allstat";
pub const PREFIX_PLAYERS: &[u8] = b"players";
pub const PREFIX_SEEN: &[u8] = b"seen";
pub const ADMIN_KEY: &[u8] = b"admin";
pub const BOTS_KEY: &[u8] = b"bots";
pub const LEADERBOARDS_KEY: &[u8] = b"ldrbds";
pub const IMPORT_FROM_KEY: &[u8] = b"import";
pub const EXPORT_CONFIG_KEY: &[u8] = b"export";
/// arena config
#[derive(Serialize, Deserialize)]
pub struct Config {
/// heroes waiting to fight
pub heroes: Vec<StoreWaitingHero>,
/// prng seed
pub prng_seed: Vec<u8>,
/// combined entropy strings supplied with the heroes
pub entropy: String,
/// current battle count in this arena
pub battle_cnt: u64,
/// battle count from previous arenas
pub previous_battles: u64,
/// viewing key used with the card contracts
pub viewing_key: String,
/// contract info of all the card versions
pub card_versions: Vec<StoreContractInfo>,
/// true if battles are halted
pub fight_halt: bool,
/// total number of players
pub player_cnt: u32,
/// list of new players that need to be added
pub new_players: Vec<CanonicalAddr>,
}
/// export config
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct ExportConfig {
/// new arena contract info
pub new_arena: StoreContractInfo,
/// next block to export
pub next: u32,
}
/// stored leaderboard entry
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct Rank {
/// player's score
pub score: i32,
/// player's address
pub address: CanonicalAddr,
}
/// tournament data
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct Tourney {
/// tournament start time
pub start: u64,
/// tournament leaderboard
pub leaderboard: Vec<Rank>,
}
/// leaderboards
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct Leaderboards {
/// tournament leaderboard
pub tourney: Tourney,
/// all time leaderboard
pub all_time: Vec<Rank>,
}
/// tournament stats
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct TourneyStats {
/// time of last update
pub last_seen: u64,
/// player's stats for this tournament
pub stats: StorePlayerStats,
}
/// stored player stats
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct StorePlayerStats {
/// player's score
pub score: i32,
/// number of battles
pub battles: u32,
/// number of wins
pub wins: u32,
/// number of ties
pub ties: u32,
/// number of times took 3rd place in a 2-way tie
pub third_in_two_way_ties: u32,
/// number of losses
pub losses: u32,
}
impl Default for StorePlayerStats {
fn default() -> Self {
Self {
score: 0,
battles: 0,
wins: 0,
ties: 0,
third_in_two_way_ties: 0,
losses: 0,
}
}
}
impl StorePlayerStats {
/// Returns StdResult<PlayerStats> from converting a StorePlayerStats to a displayable
/// PlayerStats
///
/// # Arguments
///
/// * `api` - a reference to the Api used to convert human and canonical addresses
/// * `address` - a reference to the address corresponding to these stats
pub fn into_humanized<A: Api>(
self,
api: &A,
address: &CanonicalAddr,
) -> StdResult<PlayerStats> {
let stats = PlayerStats {
score: self.score,
address: api.human_address(address)?,
battles: self.battles,
wins: self.wins,
ties: self.ties,
third_in_two_way_ties: self.third_in_two_way_ties,
losses: self.losses,
};
Ok(stats)
}
}
/// waiting hero's info
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct StoreWaitingHero {
/// hero's owner
pub owner: CanonicalAddr,
/// name of the hero
pub name: String,
/// hero's token info
pub token_info: StoreTokenInfo,
/// hero's stats
pub stats: Stats,
}
/// hero info
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct StoreHero {
/// hero's owner
pub owner: CanonicalAddr,
/// name of the hero
pub name: String,
/// hero's token info
pub token_info: StoreTokenInfo,
/// hero's skills before the battle
pub pre_battle_skills: Vec<u8>,
/// hero's skills after the battle
pub post_battle_skills: Vec<u8>,
}
impl StoreHero {
/// Returns StdResult<Hero> from converting a StoreHero to a displayable Hero
///
/// # Arguments
///
/// * `versions` - a slice of ContractInfo of token contract versions
pub fn into_humanized(self, versions: &[ContractInfo]) -> StdResult<Hero> {
let hero = Hero {
name: self.name,
token_info: TokenInfo {
token_id: self.token_info.token_id,
address: versions[self.token_info.version as usize].address.clone(),
},
pre_battle_skills: self.pre_battle_skills,
post_battle_skills: self.post_battle_skills,
};
Ok(hero)
}
/// Returns StdResult<HeroDump> from converting a StoreHero to a displayable HeroDump
///
/// # Arguments
///
/// * `api` - a reference to the Api used to convert human and canonical addresses
/// * `versions` - a slice of ContractInfo of token contract versions
pub fn into_dump<A: Api>(self, api: &A, versions: &[ContractInfo]) -> StdResult<HeroDump> {
let hero = HeroDump {
owner: api.human_address(&self.owner)?,
name: self.name,
token_info: TokenInfo {
token_id: self.token_info.token_id,
address: versions[self.token_info.version as usize].address.clone(),
},
pre_battle_skills: self.pre_battle_skills,
post_battle_skills: self.post_battle_skills,
};
Ok(hero)
}
}
/// a hero's token info
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct StoreTokenInfo {
/// hero's token id
pub token_id: String,
/// index of the card contract version
pub version: u8,
}
/// battle info
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct StoreBattle {
/// battle id number
pub battle_number: u64,
/// number of seconds since epoch time 01/01/1970 in which the battle took place
pub timestamp: u64,
/// heroes that fought
pub heroes: Vec<StoreHero>,
/// skill used to determine the winner
pub skill_used: u8,
/// index of winning hero
pub winner: Option<u8>,
/// winning skill value
pub winning_skill_value: u8,
}
impl StoreBattle {
/// Returns StdResult<Battle> from converting a StoreBattle to a displayable Battle
///
/// # Arguments
///
/// * `address` - a reference to the address querying their battle history
/// * `versions` - a slice of ContractInfo of token contract versions
pub fn into_humanized(
mut self,
address: &CanonicalAddr,
versions: &[ContractInfo],
) -> StdResult<Battle> {
if let Some(pos) = self.heroes.iter().position(|h| h.owner == *address) {
let winner = self.winner.map(|u| self.heroes[u as usize].name.clone());
let battle = Battle {
battle_number: self.battle_number,
timestamp: self.timestamp,
my_hero: self.heroes.swap_remove(pos).into_humanized(versions)?,
skill_used: self.skill_used,
winner,
winning_skill_value: self.winning_skill_value,
i_won: self.winner.map_or_else(|| false, |w| w as usize == pos),
};
Ok(battle)
} else {
Err(StdError::generic_err("Battle History corupted"))
}
}
/// Returns StdResult<BattleDump> from converting a StoreBattle to a displayable BattleDump
///
/// # Arguments
///
/// * `api` - a reference to the Api used to convert human and canonical addresses
/// * `versions` - a slice of ContractInfo of token contract versions
pub fn into_dump<A: Api>(
mut self,
api: &A,
versions: &[ContractInfo],
) -> StdResult<BattleDump> |
}
/// code hash and address of a contract
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct StoreContractInfo {
/// contract's code hash string
pub code_hash: String,
/// contract's address
pub address: CanonicalAddr,
}
impl StoreContractInfo {
/// Returns StdResult<ContractInfo> from converting a StoreContractInfo to a displayable
/// ContractInfo
///
/// # Arguments
///
/// * `api` - a reference to the Api used to convert human and canonical addresses
pub fn to_humanized<A: Api>(&self, api: &A) -> StdResult<ContractInfo> {
let info = ContractInfo {
address: api.human_address(&self.address)?,
code_hash: self.code_hash.clone(),
};
Ok(info)
}
}
/// Returns StdResult<()> after saving the battle id
///
/// # Arguments
///
/// * `storage` - a mutable reference to the storage this item should go to
/// * `battle_num` - the battle id to store
/// * `address` - a reference to the address for which to store this battle id
pub fn append_battle_for_addr<S: Storage>(
storage: &mut S,
battle_num: u64,
address: &CanonicalAddr,
) -> StdResult<()> {
let mut store = PrefixedStorage::multilevel(&[PREFIX_BATTLE_ID, address.as_slice()], storage);
let mut store = AppendStoreMut::attach_or_create(&mut store)?;
store.push(&battle_num)
}
/// Returns StdResult<Vec<Battle>> of the battles to display
///
/// # Arguments
///
/// * `api` - a reference to the Api used to convert human and canonical addresses
/// * `storage` - a reference to the contract's storage
/// * `address` - a reference to the address whose battles to display
/// * `page` - page to start displaying
/// * `page_size` - number of txs per page
pub fn get_history<A: Api, S: ReadonlyStorage>(
api: &A,
storage: &S,
address: &CanonicalAddr,
page: u32,
page_size: u32,
) -> StdResult<Vec<Battle>> {
let id_store =
ReadonlyPrefixedStorage::multilevel(&[PREFIX_BATTLE_ID, address.as_slice()], storage);
// Try to access the storage of battle ids for the account.
// If it doesn't exist yet, return an empty list of battles.
let id_store = if let Some(result) = AppendStore::<u64, _>::attach(&id_store) {
result?
} else {
return Ok(vec![]);
};
let config: Config = load(storage, CONFIG_KEY)?;
let versions = config
.card_versions
.iter()
.map(|v| v.to_humanized(api))
.collect::<StdResult<Vec<ContractInfo>>>()?;
// access battle storage
let his_store = ReadonlyPrefixedStorage::new(PREFIX_HISTORY, storage);
// Take `page_size` battles starting from the latest battle, potentially skipping `page * page_size`
// battles from the start.
let battles: StdResult<Vec<Battle>> = id_store
.iter()
.rev()
.skip((page * page_size) as usize)
.take(page_size as usize)
.map(|id| {
id.map(|id| {
load(&his_store, &id.to_le_bytes())
.and_then(|b: StoreBattle| b.into_humanized(address, &versions))
})
.and_then(|x| x)
})
.collect();
battles
}
pub fn save<T: Serialize, S: Storage>(storage: &mut S, key: &[u8], value: &T) -> StdResult<()> {
storage.set(key, &Bincode2::serialize(value)?);
Ok(())
}
pub fn remove<S: Storage>(storage: &mut S, key: &[u8]) {
storage.remove(key);
}
pub fn load<T: DeserializeOwned, S: ReadonlyStorage>(storage: &S, key: &[u8]) -> StdResult<T> {
Bincode2::deserialize(
&storage
.get(key)
.ok_or_else(|| StdError::not_found(type_name::<T>()))?,
)
}
pub fn may_load<T: DeserializeOwned, S: ReadonlyStorage>(
storage: &S,
key: &[u8],
) -> StdResult<Option<T>> {
match storage.get(key) {
Some(value) => Bincode2::deserialize(&value).map(Some),
None => Ok(None),
}
}
| {
let battle = BattleDump {
battle_number: self.battle_number,
timestamp: self.timestamp,
heroes: self
.heroes
.drain(..)
.map(|h| h.into_dump(api, versions))
.collect::<StdResult<Vec<HeroDump>>>()?,
skill_used: self.skill_used,
winner: self.winner,
winning_skill_value: self.winning_skill_value,
};
Ok(battle)
} | identifier_body |
state.rs | use std::any::type_name;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use cosmwasm_std::{Api, CanonicalAddr, ReadonlyStorage, StdError, StdResult, Storage};
use cosmwasm_storage::{PrefixedStorage, ReadonlyPrefixedStorage};
use secret_toolkit::{
serialization::{Bincode2, Serde},
storage::{AppendStore, AppendStoreMut},
};
use crate::msg::{Battle, BattleDump, ContractInfo, Hero, HeroDump, PlayerStats, TokenInfo};
use crate::stats::Stats;
pub const CONFIG_KEY: &[u8] = b"config";
pub const PREFIX_VIEW_KEY: &[u8] = b"viewkey";
pub const PREFIX_HISTORY: &[u8] = b"history";
pub const PREFIX_BATTLE_ID: &[u8] = b"battleids";
pub const PREFIX_TOURN_STATS: &[u8] = b"trnstat";
pub const PREFIX_ALL_STATS: &[u8] = b"allstat";
pub const PREFIX_PLAYERS: &[u8] = b"players";
pub const PREFIX_SEEN: &[u8] = b"seen";
pub const ADMIN_KEY: &[u8] = b"admin";
pub const BOTS_KEY: &[u8] = b"bots";
pub const LEADERBOARDS_KEY: &[u8] = b"ldrbds";
pub const IMPORT_FROM_KEY: &[u8] = b"import";
pub const EXPORT_CONFIG_KEY: &[u8] = b"export";
/// arena config
#[derive(Serialize, Deserialize)]
pub struct Config {
/// heroes waiting to fight
pub heroes: Vec<StoreWaitingHero>,
/// prng seed
pub prng_seed: Vec<u8>,
/// combined entropy strings supplied with the heroes
pub entropy: String,
/// current battle count in this arena
pub battle_cnt: u64,
/// battle count from previous arenas
pub previous_battles: u64,
/// viewing key used with the card contracts
pub viewing_key: String,
/// contract info of all the card versions
pub card_versions: Vec<StoreContractInfo>,
/// true if battles are halted
pub fight_halt: bool,
/// total number of players
pub player_cnt: u32,
/// list of new players that need to be added
pub new_players: Vec<CanonicalAddr>,
}
/// export config
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct ExportConfig {
/// new arena contract info
pub new_arena: StoreContractInfo,
/// next block to export
pub next: u32,
}
/// stored leaderboard entry
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct Rank {
/// player's score
pub score: i32,
/// player's address
pub address: CanonicalAddr,
}
/// tournament data
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct Tourney {
/// tournament start time
pub start: u64,
/// tournament leaderboard
pub leaderboard: Vec<Rank>,
}
/// leaderboards
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct Leaderboards {
/// tournament leaderboard
pub tourney: Tourney,
/// all time leaderboard
pub all_time: Vec<Rank>,
}
/// tournament stats
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct TourneyStats {
/// time of last update
pub last_seen: u64,
/// player's stats for this tournament
pub stats: StorePlayerStats,
}
/// stored player stats
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct StorePlayerStats {
/// player's score
pub score: i32,
/// number of battles
pub battles: u32,
/// number of wins
pub wins: u32,
/// number of ties
pub ties: u32,
/// number of times took 3rd place in a 2-way tie
pub third_in_two_way_ties: u32,
/// number of losses
pub losses: u32,
}
impl Default for StorePlayerStats {
fn default() -> Self {
Self {
score: 0,
battles: 0,
wins: 0,
ties: 0,
third_in_two_way_ties: 0,
losses: 0,
}
}
}
impl StorePlayerStats {
/// Returns StdResult<PlayerStats> from converting a StorePlayerStats to a displayable
/// PlayerStats
///
/// # Arguments
///
/// * `api` - a reference to the Api used to convert human and canonical addresses
/// * `address` - a reference to the address corresponding to these stats
pub fn into_humanized<A: Api>(
self,
api: &A,
address: &CanonicalAddr,
) -> StdResult<PlayerStats> {
let stats = PlayerStats {
score: self.score,
address: api.human_address(address)?,
battles: self.battles,
wins: self.wins,
ties: self.ties,
third_in_two_way_ties: self.third_in_two_way_ties,
losses: self.losses,
};
Ok(stats)
}
}
/// waiting hero's info
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct StoreWaitingHero {
/// hero's owner
pub owner: CanonicalAddr,
/// name of the hero
pub name: String,
/// hero's token info
pub token_info: StoreTokenInfo,
/// hero's stats
pub stats: Stats,
}
/// hero info
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct StoreHero {
/// hero's owner
pub owner: CanonicalAddr,
/// name of the hero
pub name: String,
/// hero's token info
pub token_info: StoreTokenInfo,
/// hero's skills before the battle
pub pre_battle_skills: Vec<u8>,
/// hero's skills after the battle
pub post_battle_skills: Vec<u8>,
}
impl StoreHero {
/// Returns StdResult<Hero> from converting a StoreHero to a displayable Hero
///
/// # Arguments
///
/// * `versions` - a slice of ContractInfo of token contract versions
pub fn into_humanized(self, versions: &[ContractInfo]) -> StdResult<Hero> {
let hero = Hero {
name: self.name,
token_info: TokenInfo {
token_id: self.token_info.token_id,
address: versions[self.token_info.version as usize].address.clone(),
},
pre_battle_skills: self.pre_battle_skills,
post_battle_skills: self.post_battle_skills,
};
Ok(hero)
}
/// Returns StdResult<HeroDump> from converting a StoreHero to a displayable HeroDump
///
/// # Arguments
///
/// * `api` - a reference to the Api used to convert human and canonical addresses
/// * `versions` - a slice of ContractInfo of token contract versions
pub fn into_dump<A: Api>(self, api: &A, versions: &[ContractInfo]) -> StdResult<HeroDump> {
let hero = HeroDump {
owner: api.human_address(&self.owner)?,
name: self.name,
token_info: TokenInfo {
token_id: self.token_info.token_id,
address: versions[self.token_info.version as usize].address.clone(),
},
pre_battle_skills: self.pre_battle_skills,
post_battle_skills: self.post_battle_skills,
};
Ok(hero)
}
}
/// a hero's token info
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct StoreTokenInfo {
/// hero's token id
pub token_id: String,
/// index of the card contract version
pub version: u8,
}
/// battle info
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct StoreBattle {
/// battle id number
pub battle_number: u64,
/// number of seconds since epoch time 01/01/1970 in which the battle took place
pub timestamp: u64,
/// heroes that fought
pub heroes: Vec<StoreHero>,
/// skill used to determine the winner
pub skill_used: u8,
/// index of winning hero
pub winner: Option<u8>,
/// winning skill value
pub winning_skill_value: u8,
}
impl StoreBattle {
/// Returns StdResult<Battle> from converting a StoreBattle to a displayable Battle
///
/// # Arguments
///
/// * `address` - a reference to the address querying their battle history
/// * `versions` - a slice of ContractInfo of token contract versions
pub fn into_humanized(
mut self,
address: &CanonicalAddr,
versions: &[ContractInfo],
) -> StdResult<Battle> {
if let Some(pos) = self.heroes.iter().position(|h| h.owner == *address) {
let winner = self.winner.map(|u| self.heroes[u as usize].name.clone());
let battle = Battle {
battle_number: self.battle_number,
timestamp: self.timestamp,
my_hero: self.heroes.swap_remove(pos).into_humanized(versions)?,
skill_used: self.skill_used,
winner,
winning_skill_value: self.winning_skill_value,
i_won: self.winner.map_or_else(|| false, |w| w as usize == pos),
};
Ok(battle)
} else {
Err(StdError::generic_err("Battle History corupted"))
}
}
/// Returns StdResult<BattleDump> from converting a StoreBattle to a displayable BattleDump
///
/// # Arguments
///
/// * `api` - a reference to the Api used to convert human and canonical addresses
/// * `versions` - a slice of ContractInfo of token contract versions
pub fn into_dump<A: Api>(
mut self,
api: &A,
versions: &[ContractInfo],
) -> StdResult<BattleDump> {
let battle = BattleDump {
battle_number: self.battle_number,
timestamp: self.timestamp,
heroes: self
.heroes
.drain(..)
.map(|h| h.into_dump(api, versions))
.collect::<StdResult<Vec<HeroDump>>>()?,
skill_used: self.skill_used,
winner: self.winner,
winning_skill_value: self.winning_skill_value,
};
Ok(battle)
}
}
/// code hash and address of a contract
#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct StoreContractInfo {
/// contract's code hash string
pub code_hash: String,
/// contract's address
pub address: CanonicalAddr,
}
impl StoreContractInfo {
/// Returns StdResult<ContractInfo> from converting a StoreContractInfo to a displayable
/// ContractInfo
///
/// # Arguments
///
/// * `api` - a reference to the Api used to convert human and canonical addresses
pub fn to_humanized<A: Api>(&self, api: &A) -> StdResult<ContractInfo> {
let info = ContractInfo {
address: api.human_address(&self.address)?,
code_hash: self.code_hash.clone(),
};
Ok(info)
}
}
/// Returns StdResult<()> after saving the battle id
///
/// # Arguments
///
/// * `storage` - a mutable reference to the storage this item should go to
/// * `battle_num` - the battle id to store
/// * `address` - a reference to the address for which to store this battle id
pub fn append_battle_for_addr<S: Storage>(
storage: &mut S,
battle_num: u64,
address: &CanonicalAddr,
) -> StdResult<()> {
let mut store = PrefixedStorage::multilevel(&[PREFIX_BATTLE_ID, address.as_slice()], storage);
let mut store = AppendStoreMut::attach_or_create(&mut store)?;
store.push(&battle_num)
}
/// Returns StdResult<Vec<Battle>> of the battles to display
///
/// # Arguments
///
/// * `api` - a reference to the Api used to convert human and canonical addresses
/// * `storage` - a reference to the contract's storage
/// * `address` - a reference to the address whose battles to display
/// * `page` - page to start displaying
/// * `page_size` - number of txs per page
pub fn get_history<A: Api, S: ReadonlyStorage>(
api: &A,
storage: &S,
address: &CanonicalAddr,
page: u32,
page_size: u32,
) -> StdResult<Vec<Battle>> {
let id_store =
ReadonlyPrefixedStorage::multilevel(&[PREFIX_BATTLE_ID, address.as_slice()], storage);
// Try to access the storage of battle ids for the account.
// If it doesn't exist yet, return an empty list of battles.
let id_store = if let Some(result) = AppendStore::<u64, _>::attach(&id_store) | else {
return Ok(vec![]);
};
let config: Config = load(storage, CONFIG_KEY)?;
let versions = config
.card_versions
.iter()
.map(|v| v.to_humanized(api))
.collect::<StdResult<Vec<ContractInfo>>>()?;
// access battle storage
let his_store = ReadonlyPrefixedStorage::new(PREFIX_HISTORY, storage);
// Take `page_size` battles starting from the latest battle, potentially skipping `page * page_size`
// battles from the start.
let battles: StdResult<Vec<Battle>> = id_store
.iter()
.rev()
.skip((page * page_size) as usize)
.take(page_size as usize)
.map(|id| {
id.map(|id| {
load(&his_store, &id.to_le_bytes())
.and_then(|b: StoreBattle| b.into_humanized(address, &versions))
})
.and_then(|x| x)
})
.collect();
battles
}
pub fn save<T: Serialize, S: Storage>(storage: &mut S, key: &[u8], value: &T) -> StdResult<()> {
storage.set(key, &Bincode2::serialize(value)?);
Ok(())
}
pub fn remove<S: Storage>(storage: &mut S, key: &[u8]) {
storage.remove(key);
}
pub fn load<T: DeserializeOwned, S: ReadonlyStorage>(storage: &S, key: &[u8]) -> StdResult<T> {
Bincode2::deserialize(
&storage
.get(key)
.ok_or_else(|| StdError::not_found(type_name::<T>()))?,
)
}
pub fn may_load<T: DeserializeOwned, S: ReadonlyStorage>(
storage: &S,
key: &[u8],
) -> StdResult<Option<T>> {
match storage.get(key) {
Some(value) => Bincode2::deserialize(&value).map(Some),
None => Ok(None),
}
}
| {
result?
} | conditional_block |
testingfrog.rs | #![allow(dead_code)]
#![allow(unused_variables)]
#![allow(unreachable_code)]
#![allow(unused_imports)]
#![allow(clippy::all)]
//****************************************
// tracing test
//****************************************
// use axum::{routing::get, Router};
// use std::error::Error;
//
// use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter, Registry};
// use tracing_tree::HierarchicalLayer;
// use website::handlers;
//
// #[tokio::main]
// async fn main() -> Result<(), Box<dyn Error>> {
// Registry::default()
// .with(EnvFilter::from_default_env())
// .with(
// HierarchicalLayer::new(2)
// .with_targets(true)
// .with_bracketed_fields(true),
// )
// .init();
//
// let app = Router::new().route("/", get(handlers::root::get));
//
// axum::Server::bind(&"0.0.0.0:8000".parse().unwrap())
// .serve(app.into_make_service())
// .await?;
//
// Ok(())
// }
// ****************************************
// parsing and rendering test
// ****************************************
use nom::{
branch::alt,
bytes::complete::{tag, take_till, take_until},
character::complete::{newline, space0, space1},
combinator::map,
multi::separated_list0,
sequence::{delimited, pair, tuple},
IResult,
};
use std::{error::Error, fs};
use pulldown_cmark::{html, CodeBlockKind, Event, Parser, Tag};
use syntect::{
highlighting::ThemeSet,
html::{highlighted_html_for_file, highlighted_html_for_string},
parsing::SyntaxSet,
};
type BoxError = Box<dyn Error>;
type BoxResult<T> = Result<T, BoxError>;
#[derive(Debug)]
enum PostStatus {
Draft,
Published,
}
// a simple struct to hold a post, without parsing the markdown
// basically, simply parse the header before feeding it to content pipeline
struct Post<'input> {
title: &'input str,
tags: Vec<&'input str>,
status: PostStatus,
raw_content: &'input str,
}
fn post_title(input: &str) -> IResult<&str, &str> {
delimited(tag("title"), take_until("\n"), newline)(input)
}
fn post_tags(input: &str) -> IResult<&str, Vec<&str>> {
delimited(
pair(tag("tags:"), space0),
separated_list0(
tuple((space0, tag(","), space0)),
take_till(|c| c == ',' || c == '\n'),
),
newline,
)(input)
}
fn post_status(input: &str) -> IResult<&str, PostStatus> {
delimited(
tag("status:"),
delimited(
space1,
alt((
map(tag("published"), |_| PostStatus::Published),
map(tag("draft"), |_| PostStatus::Draft),
)),
space0,
),
newline,
)(input)
}
fn post_header(input: &str) -> IResult<&str, (&str, Vec<&str>, PostStatus)> {
delimited(
pair(tag("---"), newline),
tuple((post_title, post_tags, post_status)),
pair(tag("---"), newline),
)(input)
}
impl<'a> Post<'a> {
fn from_str(input: &'a str) -> Result<Post<'a>, Box<dyn Error>> {
let (remaining, (title, tags, status)) = post_header(input).map_err(|e| e.to_owned())?;
Ok(Self {
title,
tags,
status,
raw_content: remaining,
})
}
}
struct SyntectEvent<I> {
inner: I,
tok: Option<String>,
syntax_set: SyntaxSet,
}
impl<'a, I> SyntectEvent<I> {
fn new(inner: I) -> Self {
Self {
inner,
tok: None,
syntax_set: SyntaxSet::load_defaults_newlines(),
}
}
}
impl<'a, I> Iterator for SyntectEvent<I>
where
I: Iterator<Item = Event<'a>>,
{
type Item = Event<'a>;
fn next(&mut self) -> Option<Self::Item> {
match self.inner.next() {
None => None,
Some(ev) => match ev {
Event::Start(Tag::CodeBlock(CodeBlockKind::Fenced(ref tok))) => {
self.tok = Some(tok.to_string());
Some(ev)
// self.next() // TODO check that, it's fishy, used to strip the <code> block
}
Event::Text(ref content) => {
if let Some(tok) = &self.tok {
let ts = ThemeSet::load_defaults();
let theme = &ts.themes["Solarized (light)"];
let s = self
.syntax_set
.find_syntax_by_token(&tok)
.unwrap_or_else(|| self.syntax_set.find_syntax_plain_text());
eprintln!("syntax found: {}", s.name);
match highlighted_html_for_string(content, &self.syntax_set, &s, &theme) {
Ok(res) => Some(Event::Html(res.into())),
Err(err) => {
eprintln!("error during html conversion: {:?}", err);
Some(ev)
}
}
} else {
Some(ev)
}
}
Event::End(Tag::CodeBlock(CodeBlockKind::Fenced(_))) => {
self.tok = None;
Some(ev)
}
_ => Some(ev),
},
}
}
}
fn main() -> BoxResult<()> {
let fmt = time::format_description::parse("[year]-[month]-[day]")?;
let t = time::Date::parse("2022-08-01-coucou", &fmt)?;
println!("{}", t.format(&fmt)?);
return Ok(());
let raws = fs::read_dir("./blog/posts/")?
.into_iter()
.map(|d| d.and_then(|d| fs::read_to_string(d.path())))
.collect::<Result<Vec<_>, _>>()?;
let posts: Vec<Post> = raws
//.map(|d| d.and_then(|d| fs::read_to_string(d.path()).map_err(|e|
// Box::new(e) as Box<dyn Error>
// )))
.iter()
.map(|s| Post::from_str(s))
.collect::<BoxResult<Vec<_>>>()?;
// let posts2: Vec<Post> = fs::read_dir("./blog/posts/")?
// .into_iter()
// .map(|d| {
// d.and_then(|d| fs::read_to_string(d.path()))
// .map_err(|e| Box::new(e) as Box<dyn Error>)
// .as_ref().map(|x| Post::from_str(x).unwrap())
// .map_err(|e| todo!())
// })
// .collect::<BoxResult<Vec<_>>>()?;
let blah = vec![fs::read_to_string(
"./blog/posts/2020-03-31-quick-static-hosting.md",
)?];
let _p = blah
.iter()
.map(|s| Post::from_str(s).unwrap())
.collect::<Vec<_>>();
let raw = include_str!("test.md");
let post = Post::from_str(raw)?;
let parser = Parser::new(post.raw_content);
// let ts = ThemeSet::load_defaults();
// let ss = SyntaxSet::load_defaults_newlines();
// let theme = &ts.themes["Solarized (light)"];
// for event in &parser {
// println!("{:?}", event);
// // if let Event::Start(Tag::CodeBlock(CodeBlockKind::Fenced(tok))) = event {
// // println!("!!!!!!!!!!!!!! got a token {tok:?}");
// // let syn = ss.find_syntax_by_token(&tok).as_ref().map(|x| &*x.name);
// // println!("syntax? {:?}", syn);
// // }
// }
let events = SyntectEvent::new(parser);
let mut html_output = String::new();
html::push_html(&mut html_output, events);
println!("{}", html_output);
// println!("title: {}", post.title);
// println!("tags: {:?}", post.tags);
// println!("status: {:?}", post.status);
// for sr in ss.syntaxes() {
// println!("{} - {:?}", sr.name, sr.file_extensions);
// }
// let ts = ThemeSet::load_defaults();
// let theme = &ts.themes["Solarized (light)"];
// let html = highlighted_html_for_file("src/bin/geekingfrog.rs", &ss, theme).unwrap();
// println!("{}", html);
Ok(())
}
// //****************************************
// // Axum test
// //****************************************
// use axum::{
// extract::{
// ws::{Message, WebSocket},
// State, WebSocketUpgrade,
// },
// http::StatusCode,
// response::{Html, IntoResponse, Response},
// routing::{get, get_service},
// BoxError, Router,
// };
// use notify::{watcher, RecursiveMode, Watcher, raw_watcher};
// use parking_lot::RwLock;
// use std::{
// net::SocketAddr,
// sync::{mpsc, Arc},
// time::Duration,
// };
// use tera::Tera;
// use tokio::sync::watch::{self, Receiver, Sender};
// use tower::ServiceBuilder;
// use tower_http::{services::ServeDir, trace::TraceLayer};
//
// #[derive(Clone)]
// struct AppState {
// template: Arc<RwLock<Tera>>,
// refresh_chan: Receiver<()>,
// }
//
// #[tokio::main]
// async fn main() -> Result<(), BoxError> {
// tracing_subscriber::fmt::init();
//
// let tera = Arc::new(RwLock::new(
// Tera::new("templates/**/*.html").expect("can get tera"),
// ));
// let (refresh_tx, refresh_rx) = watch::channel(());
//
// // force a new value from the initial one so that calling rx.watch
// // is sure to return something.
// refresh_tx.send(())?;
//
// let app_state = AppState {
// template: tera.clone(),
// refresh_chan: refresh_rx,
// };
//
// let service = ServiceBuilder::new().layer(TraceLayer::new_for_http());
// //.layer(ServeDir::new("templates"));
//
// let app = Router::with_state(app_state)
// .layer(service)
// .route("/", get(root))
// .route("/ws/autorefresh", get(autorefresh_handler))
// .nest(
// "/static",
// get_service(ServeDir::new("static")).handle_error(|err: std::io::Error| async move {
// tracing::info!("Error serving static staff: {err:?}");
// (StatusCode::INTERNAL_SERVER_ERROR, format!("{err:?}"))
// }),
// );
//
// let addr = SocketAddr::from(([127, 0, 0, 1], 8888));
//
// tokio::try_join!(
// async {
// tokio::task::spawn_blocking(move || watch_templates_change(tera, refresh_tx)).await?
// },
// async {
// tracing::debug!("Listening on {addr}");
// axum::Server::bind(&addr)
// .serve(app.into_make_service())
// .await?;
// Ok(())
// }
// )?;
//
// Ok(())
// }
//
// async fn root(State(state): State<AppState>) -> Result<Html<String>, AppError> {
// Ok(state
// .template
// .read()
// .render("index.html", &tera::Context::new())?
// .into())
// }
//
// async fn autorefresh_handler(
// ws: WebSocketUpgrade,
// State(state): State<AppState>,
// ) -> impl IntoResponse {
// tracing::debug!("got a websocket upgrade request");
// ws.on_upgrade(|socket| handle_socket(socket, state.refresh_chan))
// }
//
// async fn handle_socket(mut socket: WebSocket, mut refresh_tx: Receiver<()>) {
// // There's this weird problem, if a watched file has changed at some point
// // there will be a new value on the refresh_rx channel, and calling
// // `changed` on it will return immediately, even if the change has happened
// // before this call. So always ignore the first change on the channel.
// // The sender will always send a new value after channel creation to avoid
// // a different behavior between pages loaded before and after a change
// // to a watched file.
// let mut has_seen_one_change = false;
// loop {
// tokio::select! {
// x = refresh_tx.changed() => {
// tracing::debug!("refresh event!");
// if!has_seen_one_change {
// has_seen_one_change = true;
// continue
// }
//
// match x {
// Ok(_) => if socket.send(Message::Text("refresh".to_string())).await.is_err() {
// tracing::debug!("cannot send stuff, socket probably disconnected");
// break;
// },
// Err(err) => {
// tracing::error!("Cannot read refresh chan??? {err:?}");
// break
// },
// }
// }
// msg = socket.recv() => {
// match msg {
// Some(_) => {
// tracing::debug!("received a websocket message, don't care");
// },
// None => {
// tracing::debug!("websocket disconnected");
// break
// },
// }
// }
// else => break
// }
// }
// }
//
// #[derive(thiserror::Error, Debug)]
// enum AppError {
// #[error("Template error")]
// TemplateError(#[from] tera::Error),
// }
//
// impl IntoResponse for AppError {
// fn into_response(self) -> Response {
// let res = match self {
// AppError::TemplateError(err) => (
// StatusCode::INTERNAL_SERVER_ERROR,
// format!("Templating error: {err}"),
// ),
// };
// res.into_response()
// }
// }
//
// fn watch_templates_change(tera: Arc<RwLock<Tera>>, refresh_tx: Sender<()>) -> Result<(), BoxError> {
// let (tx, rx) = std::sync::mpsc::channel();
// let rx = Debounced {
// rx,
// d: Duration::from_millis(300),
// };
//
// // the default watcher is debounced, but will always send the first event
// // emmediately, and then debounce any further events. But that means a regular
// // update actually triggers many events, which are debounced to 2 events.
// // So use the raw_watcher and manually debounce
// let mut watcher = raw_watcher(tx)?;
// watcher.watch("templates", RecursiveMode::Recursive)?;
// loop {
// match rx.recv() {
// Ok(ev) => {
// tracing::info!("debounced event {ev:?}");
// tera.write().full_reload()?;
// refresh_tx.send(())?;
// }
// Err(_timeout_error) => (),
// }
// }
// }
//
// /// wrap a Receiver<T> such that if many T are received between the given Duration
// /// then only the latest one will be kept and returned when calling recv
// struct Debounced<T> {
// rx: mpsc::Receiver<T>,
// d: Duration,
// } | // loop {
// match prev {
// Some(v) => match self.rx.recv_timeout(self.d) {
// Ok(newval) => {
// prev = Some(newval);
// continue;
// }
// Err(_) => break Ok(v),
// },
// None => match self.rx.recv() {
// Ok(val) => {
// prev = Some(val);
// continue;
// }
// Err(err) => break Err(err),
// },
// }
// }
// }
// } | //
// impl<T> Debounced<T> {
// fn recv(&self) -> Result<T, mpsc::RecvError> {
// let mut prev = None;
// | random_line_split |
testingfrog.rs | #![allow(dead_code)]
#![allow(unused_variables)]
#![allow(unreachable_code)]
#![allow(unused_imports)]
#![allow(clippy::all)]
//****************************************
// tracing test
//****************************************
// use axum::{routing::get, Router};
// use std::error::Error;
//
// use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter, Registry};
// use tracing_tree::HierarchicalLayer;
// use website::handlers;
//
// #[tokio::main]
// async fn main() -> Result<(), Box<dyn Error>> {
// Registry::default()
// .with(EnvFilter::from_default_env())
// .with(
// HierarchicalLayer::new(2)
// .with_targets(true)
// .with_bracketed_fields(true),
// )
// .init();
//
// let app = Router::new().route("/", get(handlers::root::get));
//
// axum::Server::bind(&"0.0.0.0:8000".parse().unwrap())
// .serve(app.into_make_service())
// .await?;
//
// Ok(())
// }
// ****************************************
// parsing and rendering test
// ****************************************
use nom::{
branch::alt,
bytes::complete::{tag, take_till, take_until},
character::complete::{newline, space0, space1},
combinator::map,
multi::separated_list0,
sequence::{delimited, pair, tuple},
IResult,
};
use std::{error::Error, fs};
use pulldown_cmark::{html, CodeBlockKind, Event, Parser, Tag};
use syntect::{
highlighting::ThemeSet,
html::{highlighted_html_for_file, highlighted_html_for_string},
parsing::SyntaxSet,
};
type BoxError = Box<dyn Error>;
type BoxResult<T> = Result<T, BoxError>;
#[derive(Debug)]
enum PostStatus {
Draft,
Published,
}
// a simple struct to hold a post, without parsing the markdown
// basically, simply parse the header before feeding it to content pipeline
struct Post<'input> {
title: &'input str,
tags: Vec<&'input str>,
status: PostStatus,
raw_content: &'input str,
}
fn post_title(input: &str) -> IResult<&str, &str> {
delimited(tag("title"), take_until("\n"), newline)(input)
}
fn post_tags(input: &str) -> IResult<&str, Vec<&str>> {
delimited(
pair(tag("tags:"), space0),
separated_list0(
tuple((space0, tag(","), space0)),
take_till(|c| c == ',' || c == '\n'),
),
newline,
)(input)
}
fn post_status(input: &str) -> IResult<&str, PostStatus> |
fn post_header(input: &str) -> IResult<&str, (&str, Vec<&str>, PostStatus)> {
delimited(
pair(tag("---"), newline),
tuple((post_title, post_tags, post_status)),
pair(tag("---"), newline),
)(input)
}
impl<'a> Post<'a> {
fn from_str(input: &'a str) -> Result<Post<'a>, Box<dyn Error>> {
let (remaining, (title, tags, status)) = post_header(input).map_err(|e| e.to_owned())?;
Ok(Self {
title,
tags,
status,
raw_content: remaining,
})
}
}
struct SyntectEvent<I> {
inner: I,
tok: Option<String>,
syntax_set: SyntaxSet,
}
impl<'a, I> SyntectEvent<I> {
fn new(inner: I) -> Self {
Self {
inner,
tok: None,
syntax_set: SyntaxSet::load_defaults_newlines(),
}
}
}
impl<'a, I> Iterator for SyntectEvent<I>
where
I: Iterator<Item = Event<'a>>,
{
type Item = Event<'a>;
fn next(&mut self) -> Option<Self::Item> {
match self.inner.next() {
None => None,
Some(ev) => match ev {
Event::Start(Tag::CodeBlock(CodeBlockKind::Fenced(ref tok))) => {
self.tok = Some(tok.to_string());
Some(ev)
// self.next() // TODO check that, it's fishy, used to strip the <code> block
}
Event::Text(ref content) => {
if let Some(tok) = &self.tok {
let ts = ThemeSet::load_defaults();
let theme = &ts.themes["Solarized (light)"];
let s = self
.syntax_set
.find_syntax_by_token(&tok)
.unwrap_or_else(|| self.syntax_set.find_syntax_plain_text());
eprintln!("syntax found: {}", s.name);
match highlighted_html_for_string(content, &self.syntax_set, &s, &theme) {
Ok(res) => Some(Event::Html(res.into())),
Err(err) => {
eprintln!("error during html conversion: {:?}", err);
Some(ev)
}
}
} else {
Some(ev)
}
}
Event::End(Tag::CodeBlock(CodeBlockKind::Fenced(_))) => {
self.tok = None;
Some(ev)
}
_ => Some(ev),
},
}
}
}
fn main() -> BoxResult<()> {
let fmt = time::format_description::parse("[year]-[month]-[day]")?;
let t = time::Date::parse("2022-08-01-coucou", &fmt)?;
println!("{}", t.format(&fmt)?);
return Ok(());
let raws = fs::read_dir("./blog/posts/")?
.into_iter()
.map(|d| d.and_then(|d| fs::read_to_string(d.path())))
.collect::<Result<Vec<_>, _>>()?;
let posts: Vec<Post> = raws
//.map(|d| d.and_then(|d| fs::read_to_string(d.path()).map_err(|e|
// Box::new(e) as Box<dyn Error>
// )))
.iter()
.map(|s| Post::from_str(s))
.collect::<BoxResult<Vec<_>>>()?;
// let posts2: Vec<Post> = fs::read_dir("./blog/posts/")?
// .into_iter()
// .map(|d| {
// d.and_then(|d| fs::read_to_string(d.path()))
// .map_err(|e| Box::new(e) as Box<dyn Error>)
// .as_ref().map(|x| Post::from_str(x).unwrap())
// .map_err(|e| todo!())
// })
// .collect::<BoxResult<Vec<_>>>()?;
let blah = vec![fs::read_to_string(
"./blog/posts/2020-03-31-quick-static-hosting.md",
)?];
let _p = blah
.iter()
.map(|s| Post::from_str(s).unwrap())
.collect::<Vec<_>>();
let raw = include_str!("test.md");
let post = Post::from_str(raw)?;
let parser = Parser::new(post.raw_content);
// let ts = ThemeSet::load_defaults();
// let ss = SyntaxSet::load_defaults_newlines();
// let theme = &ts.themes["Solarized (light)"];
// for event in &parser {
// println!("{:?}", event);
// // if let Event::Start(Tag::CodeBlock(CodeBlockKind::Fenced(tok))) = event {
// // println!("!!!!!!!!!!!!!! got a token {tok:?}");
// // let syn = ss.find_syntax_by_token(&tok).as_ref().map(|x| &*x.name);
// // println!("syntax? {:?}", syn);
// // }
// }
let events = SyntectEvent::new(parser);
let mut html_output = String::new();
html::push_html(&mut html_output, events);
println!("{}", html_output);
// println!("title: {}", post.title);
// println!("tags: {:?}", post.tags);
// println!("status: {:?}", post.status);
// for sr in ss.syntaxes() {
// println!("{} - {:?}", sr.name, sr.file_extensions);
// }
// let ts = ThemeSet::load_defaults();
// let theme = &ts.themes["Solarized (light)"];
// let html = highlighted_html_for_file("src/bin/geekingfrog.rs", &ss, theme).unwrap();
// println!("{}", html);
Ok(())
}
// //****************************************
// // Axum test
// //****************************************
// use axum::{
// extract::{
// ws::{Message, WebSocket},
// State, WebSocketUpgrade,
// },
// http::StatusCode,
// response::{Html, IntoResponse, Response},
// routing::{get, get_service},
// BoxError, Router,
// };
// use notify::{watcher, RecursiveMode, Watcher, raw_watcher};
// use parking_lot::RwLock;
// use std::{
// net::SocketAddr,
// sync::{mpsc, Arc},
// time::Duration,
// };
// use tera::Tera;
// use tokio::sync::watch::{self, Receiver, Sender};
// use tower::ServiceBuilder;
// use tower_http::{services::ServeDir, trace::TraceLayer};
//
// #[derive(Clone)]
// struct AppState {
// template: Arc<RwLock<Tera>>,
// refresh_chan: Receiver<()>,
// }
//
// #[tokio::main]
// async fn main() -> Result<(), BoxError> {
// tracing_subscriber::fmt::init();
//
// let tera = Arc::new(RwLock::new(
// Tera::new("templates/**/*.html").expect("can get tera"),
// ));
// let (refresh_tx, refresh_rx) = watch::channel(());
//
// // force a new value from the initial one so that calling rx.watch
// // is sure to return something.
// refresh_tx.send(())?;
//
// let app_state = AppState {
// template: tera.clone(),
// refresh_chan: refresh_rx,
// };
//
// let service = ServiceBuilder::new().layer(TraceLayer::new_for_http());
// //.layer(ServeDir::new("templates"));
//
// let app = Router::with_state(app_state)
// .layer(service)
// .route("/", get(root))
// .route("/ws/autorefresh", get(autorefresh_handler))
// .nest(
// "/static",
// get_service(ServeDir::new("static")).handle_error(|err: std::io::Error| async move {
// tracing::info!("Error serving static staff: {err:?}");
// (StatusCode::INTERNAL_SERVER_ERROR, format!("{err:?}"))
// }),
// );
//
// let addr = SocketAddr::from(([127, 0, 0, 1], 8888));
//
// tokio::try_join!(
// async {
// tokio::task::spawn_blocking(move || watch_templates_change(tera, refresh_tx)).await?
// },
// async {
// tracing::debug!("Listening on {addr}");
// axum::Server::bind(&addr)
// .serve(app.into_make_service())
// .await?;
// Ok(())
// }
// )?;
//
// Ok(())
// }
//
// async fn root(State(state): State<AppState>) -> Result<Html<String>, AppError> {
// Ok(state
// .template
// .read()
// .render("index.html", &tera::Context::new())?
// .into())
// }
//
// async fn autorefresh_handler(
// ws: WebSocketUpgrade,
// State(state): State<AppState>,
// ) -> impl IntoResponse {
// tracing::debug!("got a websocket upgrade request");
// ws.on_upgrade(|socket| handle_socket(socket, state.refresh_chan))
// }
//
// async fn handle_socket(mut socket: WebSocket, mut refresh_tx: Receiver<()>) {
// // There's this weird problem, if a watched file has changed at some point
// // there will be a new value on the refresh_rx channel, and calling
// // `changed` on it will return immediately, even if the change has happened
// // before this call. So always ignore the first change on the channel.
// // The sender will always send a new value after channel creation to avoid
// // a different behavior between pages loaded before and after a change
// // to a watched file.
// let mut has_seen_one_change = false;
// loop {
// tokio::select! {
// x = refresh_tx.changed() => {
// tracing::debug!("refresh event!");
// if!has_seen_one_change {
// has_seen_one_change = true;
// continue
// }
//
// match x {
// Ok(_) => if socket.send(Message::Text("refresh".to_string())).await.is_err() {
// tracing::debug!("cannot send stuff, socket probably disconnected");
// break;
// },
// Err(err) => {
// tracing::error!("Cannot read refresh chan??? {err:?}");
// break
// },
// }
// }
// msg = socket.recv() => {
// match msg {
// Some(_) => {
// tracing::debug!("received a websocket message, don't care");
// },
// None => {
// tracing::debug!("websocket disconnected");
// break
// },
// }
// }
// else => break
// }
// }
// }
//
// #[derive(thiserror::Error, Debug)]
// enum AppError {
// #[error("Template error")]
// TemplateError(#[from] tera::Error),
// }
//
// impl IntoResponse for AppError {
// fn into_response(self) -> Response {
// let res = match self {
// AppError::TemplateError(err) => (
// StatusCode::INTERNAL_SERVER_ERROR,
// format!("Templating error: {err}"),
// ),
// };
// res.into_response()
// }
// }
//
// fn watch_templates_change(tera: Arc<RwLock<Tera>>, refresh_tx: Sender<()>) -> Result<(), BoxError> {
// let (tx, rx) = std::sync::mpsc::channel();
// let rx = Debounced {
// rx,
// d: Duration::from_millis(300),
// };
//
// // the default watcher is debounced, but will always send the first event
// // emmediately, and then debounce any further events. But that means a regular
// // update actually triggers many events, which are debounced to 2 events.
// // So use the raw_watcher and manually debounce
// let mut watcher = raw_watcher(tx)?;
// watcher.watch("templates", RecursiveMode::Recursive)?;
// loop {
// match rx.recv() {
// Ok(ev) => {
// tracing::info!("debounced event {ev:?}");
// tera.write().full_reload()?;
// refresh_tx.send(())?;
// }
// Err(_timeout_error) => (),
// }
// }
// }
//
// /// wrap a Receiver<T> such that if many T are received between the given Duration
// /// then only the latest one will be kept and returned when calling recv
// struct Debounced<T> {
// rx: mpsc::Receiver<T>,
// d: Duration,
// }
//
// impl<T> Debounced<T> {
// fn recv(&self) -> Result<T, mpsc::RecvError> {
// let mut prev = None;
//
// loop {
// match prev {
// Some(v) => match self.rx.recv_timeout(self.d) {
// Ok(newval) => {
// prev = Some(newval);
// continue;
// }
// Err(_) => break Ok(v),
// },
// None => match self.rx.recv() {
// Ok(val) => {
// prev = Some(val);
// continue;
// }
// Err(err) => break Err(err),
// },
// }
// }
// }
// }
| {
delimited(
tag("status:"),
delimited(
space1,
alt((
map(tag("published"), |_| PostStatus::Published),
map(tag("draft"), |_| PostStatus::Draft),
)),
space0,
),
newline,
)(input)
} | identifier_body |
testingfrog.rs | #![allow(dead_code)]
#![allow(unused_variables)]
#![allow(unreachable_code)]
#![allow(unused_imports)]
#![allow(clippy::all)]
//****************************************
// tracing test
//****************************************
// use axum::{routing::get, Router};
// use std::error::Error;
//
// use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter, Registry};
// use tracing_tree::HierarchicalLayer;
// use website::handlers;
//
// #[tokio::main]
// async fn main() -> Result<(), Box<dyn Error>> {
// Registry::default()
// .with(EnvFilter::from_default_env())
// .with(
// HierarchicalLayer::new(2)
// .with_targets(true)
// .with_bracketed_fields(true),
// )
// .init();
//
// let app = Router::new().route("/", get(handlers::root::get));
//
// axum::Server::bind(&"0.0.0.0:8000".parse().unwrap())
// .serve(app.into_make_service())
// .await?;
//
// Ok(())
// }
// ****************************************
// parsing and rendering test
// ****************************************
use nom::{
branch::alt,
bytes::complete::{tag, take_till, take_until},
character::complete::{newline, space0, space1},
combinator::map,
multi::separated_list0,
sequence::{delimited, pair, tuple},
IResult,
};
use std::{error::Error, fs};
use pulldown_cmark::{html, CodeBlockKind, Event, Parser, Tag};
use syntect::{
highlighting::ThemeSet,
html::{highlighted_html_for_file, highlighted_html_for_string},
parsing::SyntaxSet,
};
type BoxError = Box<dyn Error>;
type BoxResult<T> = Result<T, BoxError>;
#[derive(Debug)]
enum PostStatus {
Draft,
Published,
}
// a simple struct to hold a post, without parsing the markdown
// basically, simply parse the header before feeding it to content pipeline
struct Post<'input> {
title: &'input str,
tags: Vec<&'input str>,
status: PostStatus,
raw_content: &'input str,
}
fn post_title(input: &str) -> IResult<&str, &str> {
delimited(tag("title"), take_until("\n"), newline)(input)
}
fn post_tags(input: &str) -> IResult<&str, Vec<&str>> {
delimited(
pair(tag("tags:"), space0),
separated_list0(
tuple((space0, tag(","), space0)),
take_till(|c| c == ',' || c == '\n'),
),
newline,
)(input)
}
fn | (input: &str) -> IResult<&str, PostStatus> {
delimited(
tag("status:"),
delimited(
space1,
alt((
map(tag("published"), |_| PostStatus::Published),
map(tag("draft"), |_| PostStatus::Draft),
)),
space0,
),
newline,
)(input)
}
fn post_header(input: &str) -> IResult<&str, (&str, Vec<&str>, PostStatus)> {
delimited(
pair(tag("---"), newline),
tuple((post_title, post_tags, post_status)),
pair(tag("---"), newline),
)(input)
}
impl<'a> Post<'a> {
fn from_str(input: &'a str) -> Result<Post<'a>, Box<dyn Error>> {
let (remaining, (title, tags, status)) = post_header(input).map_err(|e| e.to_owned())?;
Ok(Self {
title,
tags,
status,
raw_content: remaining,
})
}
}
struct SyntectEvent<I> {
inner: I,
tok: Option<String>,
syntax_set: SyntaxSet,
}
impl<'a, I> SyntectEvent<I> {
fn new(inner: I) -> Self {
Self {
inner,
tok: None,
syntax_set: SyntaxSet::load_defaults_newlines(),
}
}
}
impl<'a, I> Iterator for SyntectEvent<I>
where
I: Iterator<Item = Event<'a>>,
{
type Item = Event<'a>;
fn next(&mut self) -> Option<Self::Item> {
match self.inner.next() {
None => None,
Some(ev) => match ev {
Event::Start(Tag::CodeBlock(CodeBlockKind::Fenced(ref tok))) => {
self.tok = Some(tok.to_string());
Some(ev)
// self.next() // TODO check that, it's fishy, used to strip the <code> block
}
Event::Text(ref content) => {
if let Some(tok) = &self.tok {
let ts = ThemeSet::load_defaults();
let theme = &ts.themes["Solarized (light)"];
let s = self
.syntax_set
.find_syntax_by_token(&tok)
.unwrap_or_else(|| self.syntax_set.find_syntax_plain_text());
eprintln!("syntax found: {}", s.name);
match highlighted_html_for_string(content, &self.syntax_set, &s, &theme) {
Ok(res) => Some(Event::Html(res.into())),
Err(err) => {
eprintln!("error during html conversion: {:?}", err);
Some(ev)
}
}
} else {
Some(ev)
}
}
Event::End(Tag::CodeBlock(CodeBlockKind::Fenced(_))) => {
self.tok = None;
Some(ev)
}
_ => Some(ev),
},
}
}
}
fn main() -> BoxResult<()> {
let fmt = time::format_description::parse("[year]-[month]-[day]")?;
let t = time::Date::parse("2022-08-01-coucou", &fmt)?;
println!("{}", t.format(&fmt)?);
return Ok(());
let raws = fs::read_dir("./blog/posts/")?
.into_iter()
.map(|d| d.and_then(|d| fs::read_to_string(d.path())))
.collect::<Result<Vec<_>, _>>()?;
let posts: Vec<Post> = raws
//.map(|d| d.and_then(|d| fs::read_to_string(d.path()).map_err(|e|
// Box::new(e) as Box<dyn Error>
// )))
.iter()
.map(|s| Post::from_str(s))
.collect::<BoxResult<Vec<_>>>()?;
// let posts2: Vec<Post> = fs::read_dir("./blog/posts/")?
// .into_iter()
// .map(|d| {
// d.and_then(|d| fs::read_to_string(d.path()))
// .map_err(|e| Box::new(e) as Box<dyn Error>)
// .as_ref().map(|x| Post::from_str(x).unwrap())
// .map_err(|e| todo!())
// })
// .collect::<BoxResult<Vec<_>>>()?;
let blah = vec![fs::read_to_string(
"./blog/posts/2020-03-31-quick-static-hosting.md",
)?];
let _p = blah
.iter()
.map(|s| Post::from_str(s).unwrap())
.collect::<Vec<_>>();
let raw = include_str!("test.md");
let post = Post::from_str(raw)?;
let parser = Parser::new(post.raw_content);
// let ts = ThemeSet::load_defaults();
// let ss = SyntaxSet::load_defaults_newlines();
// let theme = &ts.themes["Solarized (light)"];
// for event in &parser {
// println!("{:?}", event);
// // if let Event::Start(Tag::CodeBlock(CodeBlockKind::Fenced(tok))) = event {
// // println!("!!!!!!!!!!!!!! got a token {tok:?}");
// // let syn = ss.find_syntax_by_token(&tok).as_ref().map(|x| &*x.name);
// // println!("syntax? {:?}", syn);
// // }
// }
let events = SyntectEvent::new(parser);
let mut html_output = String::new();
html::push_html(&mut html_output, events);
println!("{}", html_output);
// println!("title: {}", post.title);
// println!("tags: {:?}", post.tags);
// println!("status: {:?}", post.status);
// for sr in ss.syntaxes() {
// println!("{} - {:?}", sr.name, sr.file_extensions);
// }
// let ts = ThemeSet::load_defaults();
// let theme = &ts.themes["Solarized (light)"];
// let html = highlighted_html_for_file("src/bin/geekingfrog.rs", &ss, theme).unwrap();
// println!("{}", html);
Ok(())
}
// //****************************************
// // Axum test
// //****************************************
// use axum::{
// extract::{
// ws::{Message, WebSocket},
// State, WebSocketUpgrade,
// },
// http::StatusCode,
// response::{Html, IntoResponse, Response},
// routing::{get, get_service},
// BoxError, Router,
// };
// use notify::{watcher, RecursiveMode, Watcher, raw_watcher};
// use parking_lot::RwLock;
// use std::{
// net::SocketAddr,
// sync::{mpsc, Arc},
// time::Duration,
// };
// use tera::Tera;
// use tokio::sync::watch::{self, Receiver, Sender};
// use tower::ServiceBuilder;
// use tower_http::{services::ServeDir, trace::TraceLayer};
//
// #[derive(Clone)]
// struct AppState {
// template: Arc<RwLock<Tera>>,
// refresh_chan: Receiver<()>,
// }
//
// #[tokio::main]
// async fn main() -> Result<(), BoxError> {
// tracing_subscriber::fmt::init();
//
// let tera = Arc::new(RwLock::new(
// Tera::new("templates/**/*.html").expect("can get tera"),
// ));
// let (refresh_tx, refresh_rx) = watch::channel(());
//
// // force a new value from the initial one so that calling rx.watch
// // is sure to return something.
// refresh_tx.send(())?;
//
// let app_state = AppState {
// template: tera.clone(),
// refresh_chan: refresh_rx,
// };
//
// let service = ServiceBuilder::new().layer(TraceLayer::new_for_http());
// //.layer(ServeDir::new("templates"));
//
// let app = Router::with_state(app_state)
// .layer(service)
// .route("/", get(root))
// .route("/ws/autorefresh", get(autorefresh_handler))
// .nest(
// "/static",
// get_service(ServeDir::new("static")).handle_error(|err: std::io::Error| async move {
// tracing::info!("Error serving static staff: {err:?}");
// (StatusCode::INTERNAL_SERVER_ERROR, format!("{err:?}"))
// }),
// );
//
// let addr = SocketAddr::from(([127, 0, 0, 1], 8888));
//
// tokio::try_join!(
// async {
// tokio::task::spawn_blocking(move || watch_templates_change(tera, refresh_tx)).await?
// },
// async {
// tracing::debug!("Listening on {addr}");
// axum::Server::bind(&addr)
// .serve(app.into_make_service())
// .await?;
// Ok(())
// }
// )?;
//
// Ok(())
// }
//
// async fn root(State(state): State<AppState>) -> Result<Html<String>, AppError> {
// Ok(state
// .template
// .read()
// .render("index.html", &tera::Context::new())?
// .into())
// }
//
// async fn autorefresh_handler(
// ws: WebSocketUpgrade,
// State(state): State<AppState>,
// ) -> impl IntoResponse {
// tracing::debug!("got a websocket upgrade request");
// ws.on_upgrade(|socket| handle_socket(socket, state.refresh_chan))
// }
//
// async fn handle_socket(mut socket: WebSocket, mut refresh_tx: Receiver<()>) {
// // There's this weird problem, if a watched file has changed at some point
// // there will be a new value on the refresh_rx channel, and calling
// // `changed` on it will return immediately, even if the change has happened
// // before this call. So always ignore the first change on the channel.
// // The sender will always send a new value after channel creation to avoid
// // a different behavior between pages loaded before and after a change
// // to a watched file.
// let mut has_seen_one_change = false;
// loop {
// tokio::select! {
// x = refresh_tx.changed() => {
// tracing::debug!("refresh event!");
// if!has_seen_one_change {
// has_seen_one_change = true;
// continue
// }
//
// match x {
// Ok(_) => if socket.send(Message::Text("refresh".to_string())).await.is_err() {
// tracing::debug!("cannot send stuff, socket probably disconnected");
// break;
// },
// Err(err) => {
// tracing::error!("Cannot read refresh chan??? {err:?}");
// break
// },
// }
// }
// msg = socket.recv() => {
// match msg {
// Some(_) => {
// tracing::debug!("received a websocket message, don't care");
// },
// None => {
// tracing::debug!("websocket disconnected");
// break
// },
// }
// }
// else => break
// }
// }
// }
//
// #[derive(thiserror::Error, Debug)]
// enum AppError {
// #[error("Template error")]
// TemplateError(#[from] tera::Error),
// }
//
// impl IntoResponse for AppError {
// fn into_response(self) -> Response {
// let res = match self {
// AppError::TemplateError(err) => (
// StatusCode::INTERNAL_SERVER_ERROR,
// format!("Templating error: {err}"),
// ),
// };
// res.into_response()
// }
// }
//
// fn watch_templates_change(tera: Arc<RwLock<Tera>>, refresh_tx: Sender<()>) -> Result<(), BoxError> {
// let (tx, rx) = std::sync::mpsc::channel();
// let rx = Debounced {
// rx,
// d: Duration::from_millis(300),
// };
//
// // the default watcher is debounced, but will always send the first event
// // emmediately, and then debounce any further events. But that means a regular
// // update actually triggers many events, which are debounced to 2 events.
// // So use the raw_watcher and manually debounce
// let mut watcher = raw_watcher(tx)?;
// watcher.watch("templates", RecursiveMode::Recursive)?;
// loop {
// match rx.recv() {
// Ok(ev) => {
// tracing::info!("debounced event {ev:?}");
// tera.write().full_reload()?;
// refresh_tx.send(())?;
// }
// Err(_timeout_error) => (),
// }
// }
// }
//
// /// wrap a Receiver<T> such that if many T are received between the given Duration
// /// then only the latest one will be kept and returned when calling recv
// struct Debounced<T> {
// rx: mpsc::Receiver<T>,
// d: Duration,
// }
//
// impl<T> Debounced<T> {
// fn recv(&self) -> Result<T, mpsc::RecvError> {
// let mut prev = None;
//
// loop {
// match prev {
// Some(v) => match self.rx.recv_timeout(self.d) {
// Ok(newval) => {
// prev = Some(newval);
// continue;
// }
// Err(_) => break Ok(v),
// },
// None => match self.rx.recv() {
// Ok(val) => {
// prev = Some(val);
// continue;
// }
// Err(err) => break Err(err),
// },
// }
// }
// }
// }
| post_status | identifier_name |
sync.rs | //! This module supports synchronizing a UPM database with a copy on a remote repository. The
//! remote repository should be an HTTP or HTTPS server supporting the "download", "upload", and
//! "delete" primitives of the UPM sync protocol.
use reqwest::multipart;
use std::io::Read;
use std::path::{Path, PathBuf};
use std::str;
use std::time::Duration;
use backup;
use database::Database;
use error::UpmError;
/// The UPM sync protocol's delete command. This is appended to the repository URL.
const DELETE_CMD: &'static str = "deletefile.php";
/// The UPM sync protocol's upload command. This is appended to the repository URL.
const UPLOAD_CMD: &'static str = "upload.php";
/// This field name is used for the database file when uploading.
const UPM_UPLOAD_FIELD_NAME: &'static str = "userfile";
/// Abort the operation if the server doesn't respond for this time interval.
const TIMEOUT_SECS: u64 = 10;
/// The UPM sync protocol returns an HTTP body of "OK" if the request was successful, otherwise it
/// returns one of these error codes: FILE_DOESNT_EXIST, FILE_WASNT_DELETED, FILE_ALREADY_EXISTS,
/// FILE_WASNT_MOVED, FILE_WASNT_UPLOADED
const UPM_SUCCESS: &'static str = "OK";
/// UPM sync protocol responses should never be longer than this size.
const UPM_MAX_RESPONSE_CODE_LENGTH: usize = 64;
/// The MIME type used when uploading a database.
const DATABASE_MIME_TYPE: &'static str = "application/octet-stream";
impl From<reqwest::Error> for UpmError {
/// Convert a reqwest error into a `UpmError`.
fn from(err: reqwest::Error) -> UpmError {
UpmError::Sync(format!("{}", err))
}
}
/// A successful sync will result in one of these three conditions.
pub enum SyncResult {
/// The remote repository's copy of the database was replaced with the local copy.
RemoteSynced,
/// The local database was replaced with the remote repository's copy.
LocalSynced,
/// Neither the local database nor the remote database was changed, since they were both the
/// same revision.
NeitherSynced,
}
/// Provide basic access to the remote repository.
struct Repository {
url: String,
http_username: String,
http_password: String,
client: reqwest::Client,
}
impl Repository {
/// Create a new `Repository` struct with the provided URL and credentials.
fn new(url: &str, http_username: &str, http_password: &str) -> Result<Repository, UpmError> {
// Create a new reqwest client.
let client = reqwest::Client::builder()
.timeout(Duration::from_secs(TIMEOUT_SECS))
.build()?;
Ok(Repository {
url: String::from(url),
http_username: String::from(http_username),
http_password: String::from(http_password),
client,
})
}
//
// Provide the three operations of the UPM sync protocol:
// Download, delete, and upload.
//
/// Download the remote database with the provided name. The database is returned in raw form
/// as a byte buffer.
fn download(&mut self, database_name: &str) -> Result<Vec<u8>, UpmError> {
let url = self.make_url(database_name);
// Send request
let mut response = self
.client
.get(&url)
.basic_auth(self.http_username.clone(), Some(self.http_password.clone()))
.send()?;
// Process response
if!response.status().is_success() {
return match response.status() {
reqwest::StatusCode::NOT_FOUND => Err(UpmError::SyncDatabaseNotFound),
_ => Err(UpmError::Sync(format!("{}", response.status()))),
};
}
let mut data: Vec<u8> = Vec::new();
response.read_to_end(&mut data)?;
Ok(data)
}
/// Delete the specified database from the remote repository.
fn delete(&mut self, database_name: &str) -> Result<(), UpmError> {
let url = self.make_url(DELETE_CMD);
// Send request
let mut response = self
.client
.post(&url)
.basic_auth(self.http_username.clone(), Some(self.http_password.clone()))
.form(&[("fileToDelete", database_name)])
.send()?;
| self.check_response(&mut response)?;
Ok(())
}
/// Upload the provided database to the remote repository. The database is provided in raw
/// form as a byte buffer.
fn upload(&mut self, database_name: &str, database_bytes: Vec<u8>) -> Result<(), UpmError> {
let url: String = self.make_url(UPLOAD_CMD);
// Thanks to Sean (seanmonstar) for helping to translate this code to multipart code
// of reqwest
let part = multipart::Part::bytes(database_bytes.clone())
.file_name(database_name.to_string())
.mime_str(DATABASE_MIME_TYPE)?;
let form = multipart::Form::new().part(UPM_UPLOAD_FIELD_NAME, part);
// Send request
let mut response = self.client.post(&url).multipart(form).send()?;
// Process response
self.check_response(&mut response)?;
Ok(())
}
/// Construct a URL by appending the provided string to the repository URL, adding a separating
/// slash character if needed.
fn make_url(&self, path_component: &str) -> String {
if self.url.ends_with('/') {
format!("{}{}", self.url, path_component)
} else {
format!("{}/{}", self.url, path_component)
}
}
/// Confirm that the HTTP response was successful and valid.
fn check_response(&self, response: &mut reqwest::Response) -> Result<(), UpmError> {
if!response.status().is_success() {
return Err(UpmError::Sync(format!("{}", response.status())));
}
let mut response_code = String::new();
response.read_to_string(&mut response_code)?;
if response_code.len() > UPM_MAX_RESPONSE_CODE_LENGTH {
return Err(UpmError::Sync(format!(
"Unexpected response from server ({} bytes)",
response_code.len()
)));
}
if response_code!= UPM_SUCCESS {
return Err(UpmError::Sync(format!("Server error: {}", response_code)));
}
Ok(())
}
}
/// Download a database from the remote repository without performing any sync operation with a
/// local database. This is useful when downloading an existing remote database for the first
/// time.
pub fn download<P: AsRef<Path>>(
repo_url: &str,
repo_username: &str,
repo_password: &str,
database_filename: P,
) -> Result<Vec<u8>, UpmError> {
let mut repo = Repository::new(repo_url, repo_username, repo_password)?;
let name = Database::path_to_name(&database_filename)?;
repo.download(&name)
}
/// Synchronize the local and remote databases using the UPM sync protocol. If an optional remote
/// password is provided, it will be used when decrypting the remote database; otherwise, the
/// password of the local database will be used. Return true if the caller needs to reload the
/// local database.
///
/// The sync logic is as follows:
///
/// 1. Download the current remote database from the provided URL.
/// - Attempt to decrypt this database with the master password.
/// - If decryption fails, return
/// [`UpmError::BadPassword`](../error/enum.UpmError.html#variant.BadPassword). (The caller
/// may wish to prompt the user for the remote password, then try again.)
/// 2. Take action based on the revisions of the local and remote database:
/// - If the local revision is greater than the remote revision, upload the local database to
/// the remote repository (overwriting the pre-existing remote database).
/// - If the local revision is less than the remote revision, replace the local database with
/// the remote database (overwriting the pre-existing local database).
/// - If the local revision is the same as the remote revision, then do nothing.
/// 3. The caller may wish to mimic the behavior of the UPM Java application by considering the
/// local database to be dirty if it has not been synced in 5 minutes.
///
/// NOTE: It is theoretically possible for two UPM clients to revision the database separately
/// before syncing, and result in a situation where one will "win" and the other will have its
/// changes silently lost. The caller should exercise the appropriate level of paranoia to
/// mitigate this risk. For example, prompting for sync before the user begins making a
/// modification, and marking the database as dirty after 5 minutes.
pub fn sync(database: &Database, remote_password: Option<&str>) -> Result<SyncResult, UpmError> {
// Collect all the facts.
if database.sync_url.is_empty() {
return Err(UpmError::NoSyncURL);
}
if database.sync_credentials.is_empty() {
return Err(UpmError::NoSyncCredentials);
}
let sync_account = match database.account(&database.sync_credentials) {
Some(a) => a,
None => return Err(UpmError::NoSyncCredentials),
};
let database_filename = match database.path() {
Some(f) => f,
None => return Err(UpmError::NoDatabaseFilename),
};
let database_name = match database.name() {
Some(n) => n,
None => return Err(UpmError::NoDatabaseFilename),
};
let local_password = match database.password() {
Some(p) => p,
None => return Err(UpmError::NoDatabasePassword),
};
let remote_password = match remote_password {
Some(p) => p,
None => local_password,
};
// 1. Download the remote database.
// If the remote database has a different password than the local
// database, we will return UpmError::BadPassword and the caller can
// prompt the user for the remote password, and call this function
// again with Some(remote_password).
let mut repo = Repository::new(
&database.sync_url,
&sync_account.user,
&sync_account.password,
)?;
let remote_exists;
let mut remote_database = match repo.download(database_name) {
Ok(bytes) => {
remote_exists = true;
Database::load_from_bytes(&bytes, remote_password)?
}
Err(UpmError::SyncDatabaseNotFound) => {
// No remote database with that name exists, so this must be a fresh sync.
// We'll use a stub database with revision 0.
remote_exists = false;
Database::new()
}
Err(e) => return Err(e),
};
// 2. Copy databases as needed.
if database.sync_revision > remote_database.sync_revision {
// Copy the local database to the remote.
// First, upload a backup copy in case something goes wrong between delete() and upload().
if super::PARANOID_BACKUPS {
let backup_database_path =
backup::generate_backup_filename(&PathBuf::from(database_name))?;
let backup_database_name = backup_database_path.to_str();
if let Some(backup_database_name) = backup_database_name {
repo.upload(
backup_database_name,
database.save_to_bytes(remote_password)?,
)?;
}
}
// Delete the existing remote database, if it exists.
if remote_exists {
repo.delete(&database_name)?;
}
// Upload the local database to the remote. Make sure to re-encrypt with the local
// password, in case it has been changed recently.
repo.upload(database_name, database.save_to_bytes(local_password)?)?;
Ok(SyncResult::RemoteSynced)
} else if database.sync_revision < remote_database.sync_revision {
// Replace the local database with the remote database
remote_database.set_path(&database_filename)?;
remote_database.save()?;
// The caller should reload the local database when it receives this result.
Ok(SyncResult::LocalSynced)
} else {
// Revisions are the same -- do nothing.
Ok(SyncResult::NeitherSynced)
}
} | // Process response | random_line_split |
sync.rs | //! This module supports synchronizing a UPM database with a copy on a remote repository. The
//! remote repository should be an HTTP or HTTPS server supporting the "download", "upload", and
//! "delete" primitives of the UPM sync protocol.
use reqwest::multipart;
use std::io::Read;
use std::path::{Path, PathBuf};
use std::str;
use std::time::Duration;
use backup;
use database::Database;
use error::UpmError;
/// The UPM sync protocol's delete command. This is appended to the repository URL.
const DELETE_CMD: &'static str = "deletefile.php";
/// The UPM sync protocol's upload command. This is appended to the repository URL.
const UPLOAD_CMD: &'static str = "upload.php";
/// This field name is used for the database file when uploading.
const UPM_UPLOAD_FIELD_NAME: &'static str = "userfile";
/// Abort the operation if the server doesn't respond for this time interval.
const TIMEOUT_SECS: u64 = 10;
/// The UPM sync protocol returns an HTTP body of "OK" if the request was successful, otherwise it
/// returns one of these error codes: FILE_DOESNT_EXIST, FILE_WASNT_DELETED, FILE_ALREADY_EXISTS,
/// FILE_WASNT_MOVED, FILE_WASNT_UPLOADED
const UPM_SUCCESS: &'static str = "OK";
/// UPM sync protocol responses should never be longer than this size.
const UPM_MAX_RESPONSE_CODE_LENGTH: usize = 64;
/// The MIME type used when uploading a database.
const DATABASE_MIME_TYPE: &'static str = "application/octet-stream";
impl From<reqwest::Error> for UpmError {
/// Convert a reqwest error into a `UpmError`.
fn from(err: reqwest::Error) -> UpmError {
UpmError::Sync(format!("{}", err))
}
}
/// A successful sync will result in one of these three conditions.
pub enum SyncResult {
/// The remote repository's copy of the database was replaced with the local copy.
RemoteSynced,
/// The local database was replaced with the remote repository's copy.
LocalSynced,
/// Neither the local database nor the remote database was changed, since they were both the
/// same revision.
NeitherSynced,
}
/// Provide basic access to the remote repository.
struct Repository {
url: String,
http_username: String,
http_password: String,
client: reqwest::Client,
}
impl Repository {
/// Create a new `Repository` struct with the provided URL and credentials.
fn new(url: &str, http_username: &str, http_password: &str) -> Result<Repository, UpmError> {
// Create a new reqwest client.
let client = reqwest::Client::builder()
.timeout(Duration::from_secs(TIMEOUT_SECS))
.build()?;
Ok(Repository {
url: String::from(url),
http_username: String::from(http_username),
http_password: String::from(http_password),
client,
})
}
//
// Provide the three operations of the UPM sync protocol:
// Download, delete, and upload.
//
/// Download the remote database with the provided name. The database is returned in raw form
/// as a byte buffer.
fn download(&mut self, database_name: &str) -> Result<Vec<u8>, UpmError> {
let url = self.make_url(database_name);
// Send request
let mut response = self
.client
.get(&url)
.basic_auth(self.http_username.clone(), Some(self.http_password.clone()))
.send()?;
// Process response
if!response.status().is_success() {
return match response.status() {
reqwest::StatusCode::NOT_FOUND => Err(UpmError::SyncDatabaseNotFound),
_ => Err(UpmError::Sync(format!("{}", response.status()))),
};
}
let mut data: Vec<u8> = Vec::new();
response.read_to_end(&mut data)?;
Ok(data)
}
/// Delete the specified database from the remote repository.
fn delete(&mut self, database_name: &str) -> Result<(), UpmError> {
let url = self.make_url(DELETE_CMD);
// Send request
let mut response = self
.client
.post(&url)
.basic_auth(self.http_username.clone(), Some(self.http_password.clone()))
.form(&[("fileToDelete", database_name)])
.send()?;
// Process response
self.check_response(&mut response)?;
Ok(())
}
/// Upload the provided database to the remote repository. The database is provided in raw
/// form as a byte buffer.
fn upload(&mut self, database_name: &str, database_bytes: Vec<u8>) -> Result<(), UpmError> {
let url: String = self.make_url(UPLOAD_CMD);
// Thanks to Sean (seanmonstar) for helping to translate this code to multipart code
// of reqwest
let part = multipart::Part::bytes(database_bytes.clone())
.file_name(database_name.to_string())
.mime_str(DATABASE_MIME_TYPE)?;
let form = multipart::Form::new().part(UPM_UPLOAD_FIELD_NAME, part);
// Send request
let mut response = self.client.post(&url).multipart(form).send()?;
// Process response
self.check_response(&mut response)?;
Ok(())
}
/// Construct a URL by appending the provided string to the repository URL, adding a separating
/// slash character if needed.
fn make_url(&self, path_component: &str) -> String {
if self.url.ends_with('/') {
format!("{}{}", self.url, path_component)
} else {
format!("{}/{}", self.url, path_component)
}
}
/// Confirm that the HTTP response was successful and valid.
fn | (&self, response: &mut reqwest::Response) -> Result<(), UpmError> {
if!response.status().is_success() {
return Err(UpmError::Sync(format!("{}", response.status())));
}
let mut response_code = String::new();
response.read_to_string(&mut response_code)?;
if response_code.len() > UPM_MAX_RESPONSE_CODE_LENGTH {
return Err(UpmError::Sync(format!(
"Unexpected response from server ({} bytes)",
response_code.len()
)));
}
if response_code!= UPM_SUCCESS {
return Err(UpmError::Sync(format!("Server error: {}", response_code)));
}
Ok(())
}
}
/// Download a database from the remote repository without performing any sync operation with a
/// local database. This is useful when downloading an existing remote database for the first
/// time.
pub fn download<P: AsRef<Path>>(
repo_url: &str,
repo_username: &str,
repo_password: &str,
database_filename: P,
) -> Result<Vec<u8>, UpmError> {
let mut repo = Repository::new(repo_url, repo_username, repo_password)?;
let name = Database::path_to_name(&database_filename)?;
repo.download(&name)
}
/// Synchronize the local and remote databases using the UPM sync protocol. If an optional remote
/// password is provided, it will be used when decrypting the remote database; otherwise, the
/// password of the local database will be used. Return true if the caller needs to reload the
/// local database.
///
/// The sync logic is as follows:
///
/// 1. Download the current remote database from the provided URL.
/// - Attempt to decrypt this database with the master password.
/// - If decryption fails, return
/// [`UpmError::BadPassword`](../error/enum.UpmError.html#variant.BadPassword). (The caller
/// may wish to prompt the user for the remote password, then try again.)
/// 2. Take action based on the revisions of the local and remote database:
/// - If the local revision is greater than the remote revision, upload the local database to
/// the remote repository (overwriting the pre-existing remote database).
/// - If the local revision is less than the remote revision, replace the local database with
/// the remote database (overwriting the pre-existing local database).
/// - If the local revision is the same as the remote revision, then do nothing.
/// 3. The caller may wish to mimic the behavior of the UPM Java application by considering the
/// local database to be dirty if it has not been synced in 5 minutes.
///
/// NOTE: It is theoretically possible for two UPM clients to revision the database separately
/// before syncing, and result in a situation where one will "win" and the other will have its
/// changes silently lost. The caller should exercise the appropriate level of paranoia to
/// mitigate this risk. For example, prompting for sync before the user begins making a
/// modification, and marking the database as dirty after 5 minutes.
pub fn sync(database: &Database, remote_password: Option<&str>) -> Result<SyncResult, UpmError> {
// Collect all the facts.
if database.sync_url.is_empty() {
return Err(UpmError::NoSyncURL);
}
if database.sync_credentials.is_empty() {
return Err(UpmError::NoSyncCredentials);
}
let sync_account = match database.account(&database.sync_credentials) {
Some(a) => a,
None => return Err(UpmError::NoSyncCredentials),
};
let database_filename = match database.path() {
Some(f) => f,
None => return Err(UpmError::NoDatabaseFilename),
};
let database_name = match database.name() {
Some(n) => n,
None => return Err(UpmError::NoDatabaseFilename),
};
let local_password = match database.password() {
Some(p) => p,
None => return Err(UpmError::NoDatabasePassword),
};
let remote_password = match remote_password {
Some(p) => p,
None => local_password,
};
// 1. Download the remote database.
// If the remote database has a different password than the local
// database, we will return UpmError::BadPassword and the caller can
// prompt the user for the remote password, and call this function
// again with Some(remote_password).
let mut repo = Repository::new(
&database.sync_url,
&sync_account.user,
&sync_account.password,
)?;
let remote_exists;
let mut remote_database = match repo.download(database_name) {
Ok(bytes) => {
remote_exists = true;
Database::load_from_bytes(&bytes, remote_password)?
}
Err(UpmError::SyncDatabaseNotFound) => {
// No remote database with that name exists, so this must be a fresh sync.
// We'll use a stub database with revision 0.
remote_exists = false;
Database::new()
}
Err(e) => return Err(e),
};
// 2. Copy databases as needed.
if database.sync_revision > remote_database.sync_revision {
// Copy the local database to the remote.
// First, upload a backup copy in case something goes wrong between delete() and upload().
if super::PARANOID_BACKUPS {
let backup_database_path =
backup::generate_backup_filename(&PathBuf::from(database_name))?;
let backup_database_name = backup_database_path.to_str();
if let Some(backup_database_name) = backup_database_name {
repo.upload(
backup_database_name,
database.save_to_bytes(remote_password)?,
)?;
}
}
// Delete the existing remote database, if it exists.
if remote_exists {
repo.delete(&database_name)?;
}
// Upload the local database to the remote. Make sure to re-encrypt with the local
// password, in case it has been changed recently.
repo.upload(database_name, database.save_to_bytes(local_password)?)?;
Ok(SyncResult::RemoteSynced)
} else if database.sync_revision < remote_database.sync_revision {
// Replace the local database with the remote database
remote_database.set_path(&database_filename)?;
remote_database.save()?;
// The caller should reload the local database when it receives this result.
Ok(SyncResult::LocalSynced)
} else {
// Revisions are the same -- do nothing.
Ok(SyncResult::NeitherSynced)
}
}
| check_response | identifier_name |
sync.rs | //! This module supports synchronizing a UPM database with a copy on a remote repository. The
//! remote repository should be an HTTP or HTTPS server supporting the "download", "upload", and
//! "delete" primitives of the UPM sync protocol.
use reqwest::multipart;
use std::io::Read;
use std::path::{Path, PathBuf};
use std::str;
use std::time::Duration;
use backup;
use database::Database;
use error::UpmError;
/// The UPM sync protocol's delete command. This is appended to the repository URL.
const DELETE_CMD: &'static str = "deletefile.php";
/// The UPM sync protocol's upload command. This is appended to the repository URL.
const UPLOAD_CMD: &'static str = "upload.php";
/// This field name is used for the database file when uploading.
const UPM_UPLOAD_FIELD_NAME: &'static str = "userfile";
/// Abort the operation if the server doesn't respond for this time interval.
const TIMEOUT_SECS: u64 = 10;
/// The UPM sync protocol returns an HTTP body of "OK" if the request was successful, otherwise it
/// returns one of these error codes: FILE_DOESNT_EXIST, FILE_WASNT_DELETED, FILE_ALREADY_EXISTS,
/// FILE_WASNT_MOVED, FILE_WASNT_UPLOADED
const UPM_SUCCESS: &'static str = "OK";
/// UPM sync protocol responses should never be longer than this size.
const UPM_MAX_RESPONSE_CODE_LENGTH: usize = 64;
/// The MIME type used when uploading a database.
const DATABASE_MIME_TYPE: &'static str = "application/octet-stream";
impl From<reqwest::Error> for UpmError {
/// Convert a reqwest error into a `UpmError`.
fn from(err: reqwest::Error) -> UpmError {
UpmError::Sync(format!("{}", err))
}
}
/// A successful sync will result in one of these three conditions.
pub enum SyncResult {
/// The remote repository's copy of the database was replaced with the local copy.
RemoteSynced,
/// The local database was replaced with the remote repository's copy.
LocalSynced,
/// Neither the local database nor the remote database was changed, since they were both the
/// same revision.
NeitherSynced,
}
/// Provide basic access to the remote repository.
struct Repository {
url: String,
http_username: String,
http_password: String,
client: reqwest::Client,
}
impl Repository {
/// Create a new `Repository` struct with the provided URL and credentials.
fn new(url: &str, http_username: &str, http_password: &str) -> Result<Repository, UpmError> {
// Create a new reqwest client.
let client = reqwest::Client::builder()
.timeout(Duration::from_secs(TIMEOUT_SECS))
.build()?;
Ok(Repository {
url: String::from(url),
http_username: String::from(http_username),
http_password: String::from(http_password),
client,
})
}
//
// Provide the three operations of the UPM sync protocol:
// Download, delete, and upload.
//
/// Download the remote database with the provided name. The database is returned in raw form
/// as a byte buffer.
fn download(&mut self, database_name: &str) -> Result<Vec<u8>, UpmError> {
let url = self.make_url(database_name);
// Send request
let mut response = self
.client
.get(&url)
.basic_auth(self.http_username.clone(), Some(self.http_password.clone()))
.send()?;
// Process response
if!response.status().is_success() {
return match response.status() {
reqwest::StatusCode::NOT_FOUND => Err(UpmError::SyncDatabaseNotFound),
_ => Err(UpmError::Sync(format!("{}", response.status()))),
};
}
let mut data: Vec<u8> = Vec::new();
response.read_to_end(&mut data)?;
Ok(data)
}
/// Delete the specified database from the remote repository.
fn delete(&mut self, database_name: &str) -> Result<(), UpmError> {
let url = self.make_url(DELETE_CMD);
// Send request
let mut response = self
.client
.post(&url)
.basic_auth(self.http_username.clone(), Some(self.http_password.clone()))
.form(&[("fileToDelete", database_name)])
.send()?;
// Process response
self.check_response(&mut response)?;
Ok(())
}
/// Upload the provided database to the remote repository. The database is provided in raw
/// form as a byte buffer.
fn upload(&mut self, database_name: &str, database_bytes: Vec<u8>) -> Result<(), UpmError> {
let url: String = self.make_url(UPLOAD_CMD);
// Thanks to Sean (seanmonstar) for helping to translate this code to multipart code
// of reqwest
let part = multipart::Part::bytes(database_bytes.clone())
.file_name(database_name.to_string())
.mime_str(DATABASE_MIME_TYPE)?;
let form = multipart::Form::new().part(UPM_UPLOAD_FIELD_NAME, part);
// Send request
let mut response = self.client.post(&url).multipart(form).send()?;
// Process response
self.check_response(&mut response)?;
Ok(())
}
/// Construct a URL by appending the provided string to the repository URL, adding a separating
/// slash character if needed.
fn make_url(&self, path_component: &str) -> String {
if self.url.ends_with('/') {
format!("{}{}", self.url, path_component)
} else {
format!("{}/{}", self.url, path_component)
}
}
/// Confirm that the HTTP response was successful and valid.
fn check_response(&self, response: &mut reqwest::Response) -> Result<(), UpmError> {
if!response.status().is_success() {
return Err(UpmError::Sync(format!("{}", response.status())));
}
let mut response_code = String::new();
response.read_to_string(&mut response_code)?;
if response_code.len() > UPM_MAX_RESPONSE_CODE_LENGTH {
return Err(UpmError::Sync(format!(
"Unexpected response from server ({} bytes)",
response_code.len()
)));
}
if response_code!= UPM_SUCCESS {
return Err(UpmError::Sync(format!("Server error: {}", response_code)));
}
Ok(())
}
}
/// Download a database from the remote repository without performing any sync operation with a
/// local database. This is useful when downloading an existing remote database for the first
/// time.
pub fn download<P: AsRef<Path>>(
repo_url: &str,
repo_username: &str,
repo_password: &str,
database_filename: P,
) -> Result<Vec<u8>, UpmError> {
let mut repo = Repository::new(repo_url, repo_username, repo_password)?;
let name = Database::path_to_name(&database_filename)?;
repo.download(&name)
}
/// Synchronize the local and remote databases using the UPM sync protocol. If an optional remote
/// password is provided, it will be used when decrypting the remote database; otherwise, the
/// password of the local database will be used. Return true if the caller needs to reload the
/// local database.
///
/// The sync logic is as follows:
///
/// 1. Download the current remote database from the provided URL.
/// - Attempt to decrypt this database with the master password.
/// - If decryption fails, return
/// [`UpmError::BadPassword`](../error/enum.UpmError.html#variant.BadPassword). (The caller
/// may wish to prompt the user for the remote password, then try again.)
/// 2. Take action based on the revisions of the local and remote database:
/// - If the local revision is greater than the remote revision, upload the local database to
/// the remote repository (overwriting the pre-existing remote database).
/// - If the local revision is less than the remote revision, replace the local database with
/// the remote database (overwriting the pre-existing local database).
/// - If the local revision is the same as the remote revision, then do nothing.
/// 3. The caller may wish to mimic the behavior of the UPM Java application by considering the
/// local database to be dirty if it has not been synced in 5 minutes.
///
/// NOTE: It is theoretically possible for two UPM clients to revision the database separately
/// before syncing, and result in a situation where one will "win" and the other will have its
/// changes silently lost. The caller should exercise the appropriate level of paranoia to
/// mitigate this risk. For example, prompting for sync before the user begins making a
/// modification, and marking the database as dirty after 5 minutes.
pub fn sync(database: &Database, remote_password: Option<&str>) -> Result<SyncResult, UpmError> |
let local_password = match database.password() {
Some(p) => p,
None => return Err(UpmError::NoDatabasePassword),
};
let remote_password = match remote_password {
Some(p) => p,
None => local_password,
};
// 1. Download the remote database.
// If the remote database has a different password than the local
// database, we will return UpmError::BadPassword and the caller can
// prompt the user for the remote password, and call this function
// again with Some(remote_password).
let mut repo = Repository::new(
&database.sync_url,
&sync_account.user,
&sync_account.password,
)?;
let remote_exists;
let mut remote_database = match repo.download(database_name) {
Ok(bytes) => {
remote_exists = true;
Database::load_from_bytes(&bytes, remote_password)?
}
Err(UpmError::SyncDatabaseNotFound) => {
// No remote database with that name exists, so this must be a fresh sync.
// We'll use a stub database with revision 0.
remote_exists = false;
Database::new()
}
Err(e) => return Err(e),
};
// 2. Copy databases as needed.
if database.sync_revision > remote_database.sync_revision {
// Copy the local database to the remote.
// First, upload a backup copy in case something goes wrong between delete() and upload().
if super::PARANOID_BACKUPS {
let backup_database_path =
backup::generate_backup_filename(&PathBuf::from(database_name))?;
let backup_database_name = backup_database_path.to_str();
if let Some(backup_database_name) = backup_database_name {
repo.upload(
backup_database_name,
database.save_to_bytes(remote_password)?,
)?;
}
}
// Delete the existing remote database, if it exists.
if remote_exists {
repo.delete(&database_name)?;
}
// Upload the local database to the remote. Make sure to re-encrypt with the local
// password, in case it has been changed recently.
repo.upload(database_name, database.save_to_bytes(local_password)?)?;
Ok(SyncResult::RemoteSynced)
} else if database.sync_revision < remote_database.sync_revision {
// Replace the local database with the remote database
remote_database.set_path(&database_filename)?;
remote_database.save()?;
// The caller should reload the local database when it receives this result.
Ok(SyncResult::LocalSynced)
} else {
// Revisions are the same -- do nothing.
Ok(SyncResult::NeitherSynced)
}
}
| {
// Collect all the facts.
if database.sync_url.is_empty() {
return Err(UpmError::NoSyncURL);
}
if database.sync_credentials.is_empty() {
return Err(UpmError::NoSyncCredentials);
}
let sync_account = match database.account(&database.sync_credentials) {
Some(a) => a,
None => return Err(UpmError::NoSyncCredentials),
};
let database_filename = match database.path() {
Some(f) => f,
None => return Err(UpmError::NoDatabaseFilename),
};
let database_name = match database.name() {
Some(n) => n,
None => return Err(UpmError::NoDatabaseFilename),
}; | identifier_body |
model.rs |
false => None,
}
}
}
#[allow(dead_code)]
pub struct DxModel {
// Window
aspect_ratio: f32,
// D3D12 Targets
device: ComRc<ID3D12Device>,
command_queue: ComRc<ID3D12CommandQueue>,
swap_chain: ComRc<IDXGISwapChain3>,
dc_dev: ComRc<IDCompositionDevice>,
dc_target: ComRc<IDCompositionTarget>,
dc_visual: ComRc<IDCompositionVisual>,
frame_index: u32,
rtv_heap: ComRc<ID3D12DescriptorHeap>,
srv_heap: ComRc<ID3D12DescriptorHeap>,
rtv_descriptor_size: u32,
render_targets: Vec<ComRc<ID3D12Resource>>,
command_allocator: ComRc<ID3D12CommandAllocator>,
// D3D12 Assets
root_signature: ComRc<ID3D12RootSignature>,
pipeline_state: ComRc<ID3D12PipelineState>,
command_list: ComRc<ID3D12GraphicsCommandList>,
// App resources.
vertex_buffer: ComRc<ID3D12Resource>,
vertex_buffer_view: D3D12_VERTEX_BUFFER_VIEW,
index_buffer: ComRc<ID3D12Resource>,
index_buffer_view: D3D12_INDEX_BUFFER_VIEW,
texture: ComRc<ID3D12Resource>,
// Synchronization objects.
fence: ComRc<ID3D12Fence>,
fence_value: u64,
fence_event: HANDLE,
// Pipeline objects.
rotation_radians: f32,
viewport: D3D12_VIEWPORT,
scissor_rect: D3D12_RECT,
}
impl DxModel {
pub fn new(window: &Window) -> Result<DxModel, HRESULT> {
// window params
let size = window.inner_size();
println!("inner_size={:?}", size);
let hwnd = window.raw_window_handle();
let aspect_ratio = (size.width as f32) / (size.height as f32);
let viewport = D3D12_VIEWPORT {
Width: size.width as _,
Height: size.height as _,
MaxDepth: 1.0_f32,
..unsafe { mem::zeroed() }
};
let scissor_rect = D3D12_RECT {
right: size.width as _,
bottom: size.height as _,
..unsafe { mem::zeroed() }
};
// Enable the D3D12 debug layer.
#[cfg(build = "debug")]
{
let debugController = d3d12_get_debug_interface::<ID3D12Debug>()?;
unsafe { debugController.EnableDebugLayer() }
}
let factory = create_dxgi_factory1::<IDXGIFactory4>()?;
// d3d12デバイスの作成
// ハードウェアデバイスが取得できなければ
// WARPデバイスを取得する
let device = factory.d3d12_create_best_device()?;
// コマンドキューの作成
let command_queue = {
let desc = D3D12_COMMAND_QUEUE_DESC {
Flags: D3D12_COMMAND_QUEUE_FLAG_NONE,
Type: D3D12_COMMAND_LIST_TYPE_DIRECT,
NodeMask: 0,
Priority: 0,
};
device.create_command_queue::<ID3D12CommandQueue>(&desc)?
};
// swap chainの作成
let swap_chain = {
let desc = DXGI_SWAP_CHAIN_DESC1 {
BufferCount: FRAME_COUNT,
Width: size.width,
Height: size.height,
Format: DXGI_FORMAT_R8G8B8A8_UNORM,
BufferUsage: DXGI_USAGE_RENDER_TARGET_OUTPUT,
SwapEffect: DXGI_SWAP_EFFECT_FLIP_DISCARD,
SampleDesc: DXGI_SAMPLE_DESC {
Count: 1,
Quality: 0,
},
AlphaMode: DXGI_ALPHA_MODE_PREMULTIPLIED,
Flags: 0,
Scaling: 0,
Stereo: 0,
};
factory
.create_swap_chain_for_composition(&command_queue, &desc)?
.query_interface::<IDXGISwapChain3>()?
};
// DirectComposition 設定
let dc_dev = dcomp_create_device::<IDCompositionDevice>(None)?;
let dc_target = dc_dev.create_target_for_hwnd(hwnd, true)?;
let dc_visual = dc_dev.create_visual()?;
dc_visual.set_content(&swap_chain)?;
dc_target.set_root(&dc_visual)?;
dc_dev.commit()?;
// このサンプルはフルスクリーンへの遷移をサポートしません。
factory.make_window_association(hwnd, DXGI_MWA_NO_ALT_ENTER)?;
let mut frame_index = swap_chain.get_current_back_buffer_index();
// Create descriptor heaps.
// Describe and create a render target view (RTV) descriptor heap.
let rtv_heap = {
let desc = D3D12_DESCRIPTOR_HEAP_DESC {
NumDescriptors: FRAME_COUNT,
Type: D3D12_DESCRIPTOR_HEAP_TYPE_RTV,
Flags: D3D12_DESCRIPTOR_HEAP_FLAG_NONE,
NodeMask: 0,
};
device.create_descriptor_heap::<ID3D12DescriptorHeap>(&desc)?
};
// Describe and create a shader resource view (SRV) heap for the texture.
let srv_heap = {
let desc = D3D12_DESCRIPTOR_HEAP_DESC {
NumDescriptors: 1,
Type: D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV,
Flags: D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE,
NodeMask: 0,
};
device.create_descriptor_heap::<ID3D12DescriptorHeap>(&desc)?
};
let rtv_descriptor_size =
device.get_descriptor_handle_increment_size(D3D12_DESCRIPTOR_HEAP_TYPE_RTV);
// フレームバッファの作成
let render_targets = {
let mut rtv_handle = rtv_heap.get_cpu_descriptor_handle_for_heap_start();
let mut targets: Vec<ComRc<ID3D12Resource>> = Vec::with_capacity(FRAME_COUNT as usize);
for n in 0..FRAME_COUNT {
let target = swap_chain.get_buffer::<ID3D12Resource>(n)?;
device.create_render_target_view(&target, None, rtv_handle);
rtv_handle.offset(1, rtv_descriptor_size);
targets.push(target);
}
targets
};
// コマンドアロケータ
let command_allocator = device.create_command_allocator(D3D12_COMMAND_LIST_TYPE_DIRECT)?;
//------------------------------------------------------------------
// LoadAssets(d3d12の描画初期化)
//------------------------------------------------------------------
// Create the root signature.
let root_signature = {
let ranges = {
let range = D3D12_DESCRIPTOR_RANGE::new(D3D12_DESCRIPTOR_RANGE_TYPE_SRV, 1, 0);
[range]
};
let root_parameters = {
let a =
D3D12_ROOT_PARAMETER::new_constants(1, 0, 0, D3D12_SHADER_VISIBILITY_VERTEX);
let b = D3D12_ROOT_PARAMETER::new_descriptor_table(
&ranges,
D3D12_SHADER_VISIBILITY_PIXEL,
);
[a, b]
};
let samplers = unsafe {
let mut sampler = mem::zeroed::<D3D12_STATIC_SAMPLER_DESC>();
sampler.Filter = D3D12_FILTER_MIN_MAG_MIP_POINT;
sampler.AddressU = D3D12_TEXTURE_ADDRESS_MODE_WRAP;
sampler.AddressV = D3D12_TEXTURE_ADDRESS_MODE_WRAP;
sampler.AddressW = D3D12_TEXTURE_ADDRESS_MODE_WRAP;
sampler.MipLODBias = 0.0;
sampler.MaxAnisotropy = 0;
sampler.ComparisonFunc = D3D12_COMPARISON_FUNC_NEVER;
sampler.BorderColor = D3D12_STATIC_BORDER_COLOR_TRANSPARENT_BLACK;
sampler.MinLOD = 0.0;
sampler.MaxLOD = D3D12_FLOAT32_MAX;
sampler.ShaderRegister = 0;
sampler.RegisterSpace = 0;
sampler.ShaderVisibility = D3D12_SHADER_VISIBILITY_PIXEL;
[sampler]
};
let desc = D3D12_ROOT_SIGNATURE_DESC::new(
&root_parameters,
&samplers,
D3D12_ROOT_SIGNATURE_FLAG_ALLOW_INPUT_ASSEMBLER_INPUT_LAYOUT,
);
let (signature, _error) =
d3d12_serialize_root_signature(&desc, D3D_ROOT_SIGNATURE_VERSION_1)?;
device.create_root_signature::<ID3D12RootSignature>(
0,
signature.get_buffer_pointer(),
signature.get_buffer_size(),
)?
};
// Create the pipeline state, which includes compiling and loading shaders.
let pipeline_state = {
let flags: u32 = {
#[cfg(debug)]
{
D3DCOMPILE_DEBUG | D3DCOMPILE_SKIP_OPTIMIZATION
}
#[cfg(not(debug))]
{
0
}
};
let file = "resources\\shaders.hlsl";
let (vertex_shader, _) =
d3d_compile_from_file(file, None, None, "VSMain", "vs_5_0", flags, 0)?;
let (pixel_shader, _) =
d3d_compile_from_file(file, None, None, "PSMain", "ps_5_0", flags, 0)?;
// Define the vertex input layout.
let input_element_descs = {
let a = D3D12_INPUT_ELEMENT_DESC::new(
*t::POSITION,
0,
DXGI_FORMAT_R32G32B32_FLOAT,
0,
0,
D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA,
0,
);
let b = D3D12_INPUT_ELEMENT_DESC::new(
*t::TEXCOORD,
0,
DXGI_FORMAT_R32G32_FLOAT,
0,
12,
D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA,
0,
);
[a, b]
};
let alpha_blend = {
let mut desc: D3D12_BLEND_DESC = unsafe { mem::zeroed() };
desc.AlphaToCoverageEnable = FALSE;
desc.IndependentBlendEnable = FALSE;
desc.RenderTarget[0] = D3D12_RENDER_TARGET_BLEND_DESC {
BlendEnable: TRUE,
LogicOpEnable: FALSE,
SrcBlend: D3D12_BLEND_ONE,
DestBlend: D3D12_BLEND_INV_SRC_ALPHA,
BlendOp: D3D12_BLEND_OP_ADD,
SrcBlendAlpha: D3D12_BLEND_ONE,
DestBlendAlpha: D3D12_BLEND_INV_SRC_ALPHA,
BlendOpAlpha: D3D12_BLEND_OP_ADD,
LogicOp: D3D12_LOGIC_OP_CLEAR,
RenderTargetWriteMask: D3D12_COLOR_WRITE_ENABLE_ALL as u8,
};
desc
};
// Describe and create the graphics pipeline state object (PSO).
let pso_desc = {
let mut desc: D3D12_GRAPHICS_PIPELINE_STATE_DESC = unsafe { mem::zeroed() };
desc.InputLayout = input_element_descs.layout();
desc.pRootSignature = to_mut_ptr(root_signature.as_ptr());
desc.VS = D3D12_SHADER_BYTECODE::new(&vertex_shader);
desc.PS = D3D12_SHADER_BYTECODE::new(&pixel_shader);
desc.RasterizerState = D3D12_RASTERIZER_DESC::default();
desc.RasterizerState.CullMode = D3D12_CULL_MODE_NONE;
desc.BlendState = alpha_blend;
desc.DepthStencilState.DepthEnable = FALSE;
desc.DepthStencilState.StencilEnable = FALSE;
desc.SampleMask = UINT_MAX;
desc.PrimitiveTopologyType = D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE;
desc.NumRenderTargets = 1;
desc.RTVFormats[0] = DXGI_FORMAT_R8G8B8A8_UNORM;
desc.SampleDesc.Count = 1;
desc
};
device.create_graphics_pipeline_state(&pso_desc)?
};
// Create the command list.
let command_list = device.create_command_list::<ID3D12GraphicsCommandList>(
0,
D3D12_COMMAND_LIST_TYPE_DIRECT,
&command_allocator,
&pipeline_state,
)?;
// Create the vertex buffer.
let (vertex_buffer, vertex_buffer_view) = {
// Define the geometry for a circle.
let items = (-1..CIRCLE_SEGMENTS)
.map(|i| match i {
-1 => {
let pos = [0_f32, 0_f32, 0_f32];
let uv = [0.5_f32, 0.5_f32];
Vertex::new(pos, uv)
}
_ => {
let theta = PI * 2.0_f32 * (i as f32) / (CIRCLE_SEGMENTS as f32);
let x = theta.sin();
let y = theta.cos();
let pos = [x, y * aspect_ratio, 0.0_f32];
let uv = [x * 0.5_f32 + 0.5_f32, y * 0.5_f32 + 0.5_f32];
Vertex::new(pos, uv)
}
})
.collect::<Vec<_>>();
println!("{:?}", items);
let size_of = mem::size_of::<Vertex>();
let size = size_of * items.len();
let p = items.as_ptr();
// Note: using upload heaps to transfer static data like vert buffers is not
// recommended. Every time the GPU needs it, the upload heap will be marshalled
// over. Please read up on Default Heap usage. An upload heap is used here for
// code simplicity and because there are very few verts to actually transfer.
let properties = D3D12_HEAP_PROPERTIES::new(D3D12_HEAP_TYPE_UPLOAD);
let desc = D3D12_RESOURCE_DESC::buffer(size as u64);
let buffer = device.create_committed_resource::<ID3D12Resource>(
&properties,
D3D12_HEAP_FLAG_NONE,
&desc,
D3D12_RESOURCE_STATE_GENERIC_READ,
None,
)?;
// Copy the triangle data to the vertex buffer.
let read_range = D3D12_RANGE::new(0, 0); // We do not intend to read from this resource on the CPU.
buffer.map(0, Some(&read_range))?.memcpy(p, size);
// Initialize the vertex buffer view.
let view = D3D12_VERTEX_BUFFER_VIEW {
BufferLocation: buffer.get_gpu_virtual_address(),
SizeInBytes: size as u32,
StrideInBytes: size_of as u32,
};
(buffer, view)
};
// Create the index buffer
let (index_buffer, index_buffer_view) = {
// Define the geometry for a circle.
let items = (0..CIRCLE_SEGMENTS)
.map(|i| {
let a = 0 as UINT16;
let b = (1 + i) as UINT16;
let c = (2 + i) as UINT16;
[a, b, c]
})
.flat_map(|a| ArrayIterator3::new(a))
.collect::<Vec<_>>();
let size_of = mem::size_of::<UINT16>();
let size = size_of * items.len();
let p = items.as_ptr();
// Note: using upload heaps to transfer static data like vert buffers is not
// recommended. Every time the GPU needs it, the upload heap will be marshalled
// over. Please read up on Default Heap usage. An upload heap is used here for
// code simplicity and because there are very few verts to actually transfer.
let properties = D3D12_HEAP_PROPERTIES::new(D3D12_HEAP_TYPE_UPLOAD);
let desc = D3D12_RESOURCE_DESC::buffer(size as u64);
let buffer = device.create_committed_resource::<ID3D12Resource>(
&properties,
D3D12_HEAP_FLAG_NONE,
&desc,
| {
let rc = self.item[self.index];
self.index += 1;
Some(rc)
} | conditional_block |
|
model.rs | (window: &Window) -> Result<DxModel, HRESULT> {
// window params
let size = window.inner_size();
println!("inner_size={:?}", size);
let hwnd = window.raw_window_handle();
let aspect_ratio = (size.width as f32) / (size.height as f32);
let viewport = D3D12_VIEWPORT {
Width: size.width as _,
Height: size.height as _,
MaxDepth: 1.0_f32,
..unsafe { mem::zeroed() }
};
let scissor_rect = D3D12_RECT {
right: size.width as _,
bottom: size.height as _,
..unsafe { mem::zeroed() }
};
// Enable the D3D12 debug layer.
#[cfg(build = "debug")]
{
let debugController = d3d12_get_debug_interface::<ID3D12Debug>()?;
unsafe { debugController.EnableDebugLayer() }
}
let factory = create_dxgi_factory1::<IDXGIFactory4>()?;
// d3d12デバイスの作成
// ハードウェアデバイスが取得できなければ
// WARPデバイスを取得する
let device = factory.d3d12_create_best_device()?;
// コマンドキューの作成
let command_queue = {
let desc = D3D12_COMMAND_QUEUE_DESC {
Flags: D3D12_COMMAND_QUEUE_FLAG_NONE,
Type: D3D12_COMMAND_LIST_TYPE_DIRECT,
NodeMask: 0,
Priority: 0,
};
device.create_command_queue::<ID3D12CommandQueue>(&desc)?
};
// swap chainの作成
let swap_chain = {
let desc = DXGI_SWAP_CHAIN_DESC1 {
BufferCount: FRAME_COUNT,
Width: size.width,
Height: size.height,
Format: DXGI_FORMAT_R8G8B8A8_UNORM,
BufferUsage: DXGI_USAGE_RENDER_TARGET_OUTPUT,
SwapEffect: DXGI_SWAP_EFFECT_FLIP_DISCARD,
SampleDesc: DXGI_SAMPLE_DESC {
Count: 1,
Quality: 0,
},
AlphaMode: DXGI_ALPHA_MODE_PREMULTIPLIED,
Flags: 0,
Scaling: 0,
Stereo: 0,
};
factory
.create_swap_chain_for_composition(&command_queue, &desc)?
.query_interface::<IDXGISwapChain3>()?
};
// DirectComposition 設定
let dc_dev = dcomp_create_device::<IDCompositionDevice>(None)?;
let dc_target = dc_dev.create_target_for_hwnd(hwnd, true)?;
let dc_visual = dc_dev.create_visual()?;
dc_visual.set_content(&swap_chain)?;
dc_target.set_root(&dc_visual)?;
dc_dev.commit()?;
// このサンプルはフルスクリーンへの遷移をサポートしません。
factory.make_window_association(hwnd, DXGI_MWA_NO_ALT_ENTER)?;
let mut frame_index = swap_chain.get_current_back_buffer_index();
// Create descriptor heaps.
// Describe and create a render target view (RTV) descriptor heap.
let rtv_heap = {
let desc = D3D12_DESCRIPTOR_HEAP_DESC {
NumDescriptors: FRAME_COUNT,
Type: D3D12_DESCRIPTOR_HEAP_TYPE_RTV,
Flags: D3D12_DESCRIPTOR_HEAP_FLAG_NONE,
NodeMask: 0,
};
device.create_descriptor_heap::<ID3D12DescriptorHeap>(&desc)?
};
// Describe and create a shader resource view (SRV) heap for the texture.
let srv_heap = {
let desc = D3D12_DESCRIPTOR_HEAP_DESC {
NumDescriptors: 1,
Type: D3D12_DESCRIPTOR_HEAP_TYPE_CBV_SRV_UAV,
Flags: D3D12_DESCRIPTOR_HEAP_FLAG_SHADER_VISIBLE,
NodeMask: 0,
};
device.create_descriptor_heap::<ID3D12DescriptorHeap>(&desc)?
};
let rtv_descriptor_size =
device.get_descriptor_handle_increment_size(D3D12_DESCRIPTOR_HEAP_TYPE_RTV);
// フレームバッファの作成
let render_targets = {
let mut rtv_handle = rtv_heap.get_cpu_descriptor_handle_for_heap_start();
let mut targets: Vec<ComRc<ID3D12Resource>> = Vec::with_capacity(FRAME_COUNT as usize);
for n in 0..FRAME_COUNT {
let target = swap_chain.get_buffer::<ID3D12Resource>(n)?;
device.create_render_target_view(&target, None, rtv_handle);
rtv_handle.offset(1, rtv_descriptor_size);
targets.push(target);
}
targets
};
// コマンドアロケータ
let command_allocator = device.create_command_allocator(D3D12_COMMAND_LIST_TYPE_DIRECT)?;
//------------------------------------------------------------------
// LoadAssets(d3d12の描画初期化)
//------------------------------------------------------------------
// Create the root signature.
let root_signature = {
let ranges = {
let range = D3D12_DESCRIPTOR_RANGE::new(D3D12_DESCRIPTOR_RANGE_TYPE_SRV, 1, 0);
[range]
};
let root_parameters = {
let a =
D3D12_ROOT_PARAMETER::new_constants(1, 0, 0, D3D12_SHADER_VISIBILITY_VERTEX);
let b = D3D12_ROOT_PARAMETER::new_descriptor_table(
&ranges,
D3D12_SHADER_VISIBILITY_PIXEL,
);
[a, b]
};
let samplers = unsafe {
let mut sampler = mem::zeroed::<D3D12_STATIC_SAMPLER_DESC>();
sampler.Filter = D3D12_FILTER_MIN_MAG_MIP_POINT;
sampler.AddressU = D3D12_TEXTURE_ADDRESS_MODE_WRAP;
sampler.AddressV = D3D12_TEXTURE_ADDRESS_MODE_WRAP;
sampler.AddressW = D3D12_TEXTURE_ADDRESS_MODE_WRAP;
sampler.MipLODBias = 0.0;
sampler.MaxAnisotropy = 0;
sampler.ComparisonFunc = D3D12_COMPARISON_FUNC_NEVER;
sampler.BorderColor = D3D12_STATIC_BORDER_COLOR_TRANSPARENT_BLACK;
sampler.MinLOD = 0.0;
sampler.MaxLOD = D3D12_FLOAT32_MAX;
sampler.ShaderRegister = 0;
sampler.RegisterSpace = 0;
sampler.ShaderVisibility = D3D12_SHADER_VISIBILITY_PIXEL;
[sampler]
};
let desc = D3D12_ROOT_SIGNATURE_DESC::new(
&root_parameters,
&samplers,
D3D12_ROOT_SIGNATURE_FLAG_ALLOW_INPUT_ASSEMBLER_INPUT_LAYOUT,
);
let (signature, _error) =
d3d12_serialize_root_signature(&desc, D3D_ROOT_SIGNATURE_VERSION_1)?;
device.create_root_signature::<ID3D12RootSignature>(
0,
signature.get_buffer_pointer(),
signature.get_buffer_size(),
)?
};
// Create the pipeline state, which includes compiling and loading shaders.
let pipeline_state = {
let flags: u32 = {
#[cfg(debug)]
{
D3DCOMPILE_DEBUG | D3DCOMPILE_SKIP_OPTIMIZATION
}
#[cfg(not(debug))]
{
0
}
};
let file = "resources\\shaders.hlsl";
let (vertex_shader, _) =
d3d_compile_from_file(file, None, None, "VSMain", "vs_5_0", flags, 0)?;
let (pixel_shader, _) =
d3d_compile_from_file(file, None, None, "PSMain", "ps_5_0", flags, 0)?;
// Define the vertex input layout.
let input_element_descs = {
let a = D3D12_INPUT_ELEMENT_DESC::new(
*t::POSITION,
0,
DXGI_FORMAT_R32G32B32_FLOAT,
0,
0,
D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA,
0,
);
let b = D3D12_INPUT_ELEMENT_DESC::new(
*t::TEXCOORD,
0,
DXGI_FORMAT_R32G32_FLOAT,
0,
12,
D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA,
0,
);
[a, b]
};
let alpha_blend = {
let mut desc: D3D12_BLEND_DESC = unsafe { mem::zeroed() };
desc.AlphaToCoverageEnable = FALSE;
desc.IndependentBlendEnable = FALSE;
desc.RenderTarget[0] = D3D12_RENDER_TARGET_BLEND_DESC {
BlendEnable: TRUE,
LogicOpEnable: FALSE,
SrcBlend: D3D12_BLEND_ONE,
DestBlend: D3D12_BLEND_INV_SRC_ALPHA,
BlendOp: D3D12_BLEND_OP_ADD,
SrcBlendAlpha: D3D12_BLEND_ONE,
DestBlendAlpha: D3D12_BLEND_INV_SRC_ALPHA,
BlendOpAlpha: D3D12_BLEND_OP_ADD,
LogicOp: D3D12_LOGIC_OP_CLEAR,
RenderTargetWriteMask: D3D12_COLOR_WRITE_ENABLE_ALL as u8,
};
desc
};
// Describe and create the graphics pipeline state object (PSO).
let pso_desc = {
let mut desc: D3D12_GRAPHICS_PIPELINE_STATE_DESC = unsafe { mem::zeroed() };
desc.InputLayout = input_element_descs.layout();
desc.pRootSignature = to_mut_ptr(root_signature.as_ptr());
desc.VS = D3D12_SHADER_BYTECODE::new(&vertex_shader);
desc.PS = D3D12_SHADER_BYTECODE::new(&pixel_shader);
desc.RasterizerState = D3D12_RASTERIZER_DESC::default();
desc.RasterizerState.CullMode = D3D12_CULL_MODE_NONE;
desc.BlendState = alpha_blend;
desc.DepthStencilState.DepthEnable = FALSE;
desc.DepthStencilState.StencilEnable = FALSE;
desc.SampleMask = UINT_MAX;
desc.PrimitiveTopologyType = D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE;
desc.NumRenderTargets = 1;
desc.RTVFormats[0] = DXGI_FORMAT_R8G8B8A8_UNORM;
desc.SampleDesc.Count = 1;
desc
};
device.create_graphics_pipeline_state(&pso_desc)?
};
// Create the command list.
let command_list = device.create_command_list::<ID3D12GraphicsCommandList>(
0,
D3D12_COMMAND_LIST_TYPE_DIRECT,
&command_allocator,
&pipeline_state,
)?;
// Create the vertex buffer.
let (vertex_buffer, vertex_buffer_view) = {
// Define the geometry for a circle.
let items = (-1..CIRCLE_SEGMENTS)
.map(|i| match i {
-1 => {
let pos = [0_f32, 0_f32, 0_f32];
let uv = [0.5_f32, 0.5_f32];
Vertex::new(pos, uv)
}
_ => {
let theta = PI * 2.0_f32 * (i as f32) / (CIRCLE_SEGMENTS as f32);
let x = theta.sin();
let y = theta.cos();
let pos = [x, y * aspect_ratio, 0.0_f32];
let uv = [x * 0.5_f32 + 0.5_f32, y * 0.5_f32 + 0.5_f32];
Vertex::new(pos, uv)
}
})
.collect::<Vec<_>>();
println!("{:?}", items);
let size_of = mem::size_of::<Vertex>();
let size = size_of * items.len();
let p = items.as_ptr();
// Note: using upload heaps to transfer static data like vert buffers is not
// recommended. Every time the GPU needs it, the upload heap will be marshalled
// over. Please read up on Default Heap usage. An upload heap is used here for
// code simplicity and because there are very few verts to actually transfer.
let properties = D3D12_HEAP_PROPERTIES::new(D3D12_HEAP_TYPE_UPLOAD);
let desc = D3D12_RESOURCE_DESC::buffer(size as u64);
let buffer = device.create_committed_resource::<ID3D12Resource>(
&properties,
D3D12_HEAP_FLAG_NONE,
&desc,
D3D12_RESOURCE_STATE_GENERIC_READ,
None,
)?;
// Copy the triangle data to the vertex buffer.
let read_range = D3D12_RANGE::new(0, 0); // We do not intend to read from this resource on the CPU.
buffer.map(0, Some(&read_range))?.memcpy(p, size);
// Initialize the vertex buffer view.
let view = D3D12_VERTEX_BUFFER_VIEW {
BufferLocation: buffer.get_gpu_virtual_address(),
SizeInBytes: size as u32,
StrideInBytes: size_of as u32,
};
(buffer, view)
};
// Create the index buffer
let (index_buffer, index_buffer_view) = {
// Define the geometry for a circle.
let items = (0..CIRCLE_SEGMENTS)
.map(|i| {
let a = 0 as UINT16;
let b = (1 + i) as UINT16;
let c = (2 + i) as UINT16;
[a, b, c]
})
.flat_map(|a| ArrayIterator3::new(a))
.collect::<Vec<_>>();
let size_of = mem::size_of::<UINT16>();
let size = size_of * items.len();
let p = items.as_ptr();
// Note: using upload heaps to transfer static data like vert buffers is not
// recommended. Every time the GPU needs it, the upload heap will be marshalled
// over. Please read up on Default Heap usage. An upload heap is used here for
// code simplicity and because there are very few verts to actually transfer.
let properties = D3D12_HEAP_PROPERTIES::new(D3D12_HEAP_TYPE_UPLOAD);
let desc = D3D12_RESOURCE_DESC::buffer(size as u64);
let buffer = device.create_committed_resource::<ID3D12Resource>(
&properties,
D3D12_HEAP_FLAG_NONE,
&desc,
D3D12_RESOURCE_STATE_GENERIC_READ,
None,
)?;
// Copy the index data to the index buffer.
let read_range = D3D12_RANGE::new(0, 0); // We do not intend to read from this resource on the CPU.
buffer.map(0, Some(&read_range))?.memcpy(p, size);
// Intialize the index buffer view
let view = D3D12_INDEX_BUFFER_VIEW {
BufferLocation: buffer.get_gpu_virtual_address(),
SizeInBytes: size as u32,
Format: DXGI_FORMAT_R16_UINT,
};
(buffer, view)
};
// Create the texture.
// Note: ComPtr's are CPU objects but this resource needs to stay in scope until
// the command list that references it has finished executing on the GPU.
// We will flush the GPU at the end of this method to ensure the resource is not
// prematurely destroyed.
// texture_upload_heapの開放タイミングがGPUへのフラッシュ後になるように
// 所有権を関数スコープに追い出しておく
let (_texture_upload_heap, texture) = {
// Describe and create a Texture2D.
let texture_desc = D3D12_RESOURCE_DESC::new(
D3D12_RESOURCE_DIMENSION_TEXTURE2D,
0,
TEXTURE_WIDTH,
TEXTURE | new | identifier_name |
|
model.rs | file(file, None, None, "PSMain", "ps_5_0", flags, 0)?;
// Define the vertex input layout.
let input_element_descs = {
let a = D3D12_INPUT_ELEMENT_DESC::new(
*t::POSITION,
0,
DXGI_FORMAT_R32G32B32_FLOAT,
0,
0,
D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA,
0,
);
let b = D3D12_INPUT_ELEMENT_DESC::new(
*t::TEXCOORD,
0,
DXGI_FORMAT_R32G32_FLOAT,
0,
12,
D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA,
0,
);
[a, b]
};
let alpha_blend = {
let mut desc: D3D12_BLEND_DESC = unsafe { mem::zeroed() };
desc.AlphaToCoverageEnable = FALSE;
desc.IndependentBlendEnable = FALSE;
desc.RenderTarget[0] = D3D12_RENDER_TARGET_BLEND_DESC {
BlendEnable: TRUE,
LogicOpEnable: FALSE,
SrcBlend: D3D12_BLEND_ONE,
DestBlend: D3D12_BLEND_INV_SRC_ALPHA,
BlendOp: D3D12_BLEND_OP_ADD,
SrcBlendAlpha: D3D12_BLEND_ONE,
DestBlendAlpha: D3D12_BLEND_INV_SRC_ALPHA,
BlendOpAlpha: D3D12_BLEND_OP_ADD,
LogicOp: D3D12_LOGIC_OP_CLEAR,
RenderTargetWriteMask: D3D12_COLOR_WRITE_ENABLE_ALL as u8,
};
desc
};
// Describe and create the graphics pipeline state object (PSO).
let pso_desc = {
let mut desc: D3D12_GRAPHICS_PIPELINE_STATE_DESC = unsafe { mem::zeroed() };
desc.InputLayout = input_element_descs.layout();
desc.pRootSignature = to_mut_ptr(root_signature.as_ptr());
desc.VS = D3D12_SHADER_BYTECODE::new(&vertex_shader);
desc.PS = D3D12_SHADER_BYTECODE::new(&pixel_shader);
desc.RasterizerState = D3D12_RASTERIZER_DESC::default();
desc.RasterizerState.CullMode = D3D12_CULL_MODE_NONE;
desc.BlendState = alpha_blend;
desc.DepthStencilState.DepthEnable = FALSE;
desc.DepthStencilState.StencilEnable = FALSE;
desc.SampleMask = UINT_MAX;
desc.PrimitiveTopologyType = D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE;
desc.NumRenderTargets = 1;
desc.RTVFormats[0] = DXGI_FORMAT_R8G8B8A8_UNORM;
desc.SampleDesc.Count = 1;
desc
};
device.create_graphics_pipeline_state(&pso_desc)?
};
// Create the command list.
let command_list = device.create_command_list::<ID3D12GraphicsCommandList>(
0,
D3D12_COMMAND_LIST_TYPE_DIRECT,
&command_allocator,
&pipeline_state,
)?;
// Create the vertex buffer.
let (vertex_buffer, vertex_buffer_view) = {
// Define the geometry for a circle.
let items = (-1..CIRCLE_SEGMENTS)
.map(|i| match i {
-1 => {
let pos = [0_f32, 0_f32, 0_f32];
let uv = [0.5_f32, 0.5_f32];
Vertex::new(pos, uv)
}
_ => {
let theta = PI * 2.0_f32 * (i as f32) / (CIRCLE_SEGMENTS as f32);
let x = theta.sin();
let y = theta.cos();
let pos = [x, y * aspect_ratio, 0.0_f32];
let uv = [x * 0.5_f32 + 0.5_f32, y * 0.5_f32 + 0.5_f32];
Vertex::new(pos, uv)
}
})
.collect::<Vec<_>>();
println!("{:?}", items);
let size_of = mem::size_of::<Vertex>();
let size = size_of * items.len();
let p = items.as_ptr();
// Note: using upload heaps to transfer static data like vert buffers is not
// recommended. Every time the GPU needs it, the upload heap will be marshalled
// over. Please read up on Default Heap usage. An upload heap is used here for
// code simplicity and because there are very few verts to actually transfer.
let properties = D3D12_HEAP_PROPERTIES::new(D3D12_HEAP_TYPE_UPLOAD);
let desc = D3D12_RESOURCE_DESC::buffer(size as u64);
let buffer = device.create_committed_resource::<ID3D12Resource>(
&properties,
D3D12_HEAP_FLAG_NONE,
&desc,
D3D12_RESOURCE_STATE_GENERIC_READ,
None,
)?;
// Copy the triangle data to the vertex buffer.
let read_range = D3D12_RANGE::new(0, 0); // We do not intend to read from this resource on the CPU.
buffer.map(0, Some(&read_range))?.memcpy(p, size);
// Initialize the vertex buffer view.
let view = D3D12_VERTEX_BUFFER_VIEW {
BufferLocation: buffer.get_gpu_virtual_address(),
SizeInBytes: size as u32,
StrideInBytes: size_of as u32,
};
(buffer, view)
};
// Create the index buffer
let (index_buffer, index_buffer_view) = {
// Define the geometry for a circle.
let items = (0..CIRCLE_SEGMENTS)
.map(|i| {
let a = 0 as UINT16;
let b = (1 + i) as UINT16;
let c = (2 + i) as UINT16;
[a, b, c]
})
.flat_map(|a| ArrayIterator3::new(a))
.collect::<Vec<_>>();
let size_of = mem::size_of::<UINT16>();
let size = size_of * items.len();
let p = items.as_ptr();
// Note: using upload heaps to transfer static data like vert buffers is not
// recommended. Every time the GPU needs it, the upload heap will be marshalled
// over. Please read up on Default Heap usage. An upload heap is used here for
// code simplicity and because there are very few verts to actually transfer.
let properties = D3D12_HEAP_PROPERTIES::new(D3D12_HEAP_TYPE_UPLOAD);
let desc = D3D12_RESOURCE_DESC::buffer(size as u64);
let buffer = device.create_committed_resource::<ID3D12Resource>(
&properties,
D3D12_HEAP_FLAG_NONE,
&desc,
D3D12_RESOURCE_STATE_GENERIC_READ,
None,
)?;
// Copy the index data to the index buffer.
let read_range = D3D12_RANGE::new(0, 0); // We do not intend to read from this resource on the CPU.
buffer.map(0, Some(&read_range))?.memcpy(p, size);
// Intialize the index buffer view
let view = D3D12_INDEX_BUFFER_VIEW {
BufferLocation: buffer.get_gpu_virtual_address(),
SizeInBytes: size as u32,
Format: DXGI_FORMAT_R16_UINT,
};
(buffer, view)
};
// Create the texture.
// Note: ComPtr's are CPU objects but this resource needs to stay in scope until
// the command list that references it has finished executing on the GPU.
// We will flush the GPU at the end of this method to ensure the resource is not
// prematurely destroyed.
// texture_upload_heapの開放タイミングがGPUへのフラッシュ後になるように
// 所有権を関数スコープに追い出しておく
let (_texture_upload_heap, texture) = {
// Describe and create a Texture2D.
let texture_desc = D3D12_RESOURCE_DESC::new(
D3D12_RESOURCE_DIMENSION_TEXTURE2D,
0,
TEXTURE_WIDTH,
TEXTURE_HEIGHT,
1,
1,
DXGI_FORMAT_R8G8B8A8_UNORM,
1,
0,
D3D12_TEXTURE_LAYOUT_UNKNOWN,
D3D12_RESOURCE_FLAG_NONE,
);
let texture = {
let properties = D3D12_HEAP_PROPERTIES::new(D3D12_HEAP_TYPE_DEFAULT);
device.create_committed_resource::<ID3D12Resource>(
&properties,
D3D12_HEAP_FLAG_NONE,
&texture_desc,
D3D12_RESOURCE_STATE_COPY_DEST,
None,
)?
};
let upload_buffer_size = texture.get_required_intermediate_size(0, 1)?;
// Create the GPU upload buffer.
let texture_upload_heap = {
let properties = D3D12_HEAP_PROPERTIES::new(D3D12_HEAP_TYPE_UPLOAD);
let desc = D3D12_RESOURCE_DESC::buffer(upload_buffer_size);
device.create_committed_resource::<ID3D12Resource>(
&properties,
D3D12_HEAP_FLAG_NONE,
&desc,
D3D12_RESOURCE_STATE_GENERIC_READ,
None,
)?
};
// Copy data to the intermediate upload heap and then schedule a copy
// from the upload heap to the Texture2D.
let texture_bytes = generate_texture_data();
let texture_data = {
let ptr = texture_bytes.as_ptr();
let row_pitch = ((TEXTURE_WIDTH as usize) * mem::size_of::<u32>()) as isize;
let slice_pitch = row_pitch * (TEXTURE_HEIGHT as isize);
[D3D12_SUBRESOURCE_DATA {
pData: ptr as _,
RowPitch: row_pitch,
SlicePitch: slice_pitch,
}]
};
let _ = command_list.update_subresources_as_heap(
&texture,
&texture_upload_heap,
0,
&texture_data,
)?;
{
let barrier = D3D12_RESOURCE_BARRIER::transition(
&texture,
D3D12_RESOURCE_STATE_COPY_DEST,
D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE,
);
command_list.resource_barrier(1, &barrier);
}
// Describe and create a SRV for the texture.
{
let desc = unsafe {
let mut desc = mem::zeroed::<D3D12_SHADER_RESOURCE_VIEW_DESC>();
desc.Shader4ComponentMapping = D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING;
desc.Format = texture_desc.Format;
desc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE2D;
{
let mut t = desc.u.Texture2D_mut();
t.MipLevels = 1;
}
desc
};
device.create_shader_resource_view(
&texture,
&desc,
srv_heap.get_cpu_descriptor_handle_for_heap_start(),
);
}
(texture_upload_heap, texture)
};
// Close the command list and execute it to begin the initial GPU setup.
{
command_list.close()?;
let a: &ID3D12GraphicsCommandList = &command_list;
command_queue.execute_command_lists(&[a]);
}
// Create synchronization objects and wait until assets have been uploaded to the GPU.
let (fence, fence_value, fence_event) = {
let fence = device.create_fence::<ID3D12Fence>(0, D3D12_FENCE_FLAG_NONE)?;
let mut fence_value = 1_u64;
// Create an event handle to use for frame synchronization.
let fence_event = create_event(None, false, false, None)?;
// Wait for the command list to execute; we are reusing the same command
// list in our main loop but for now, we just want to wait for setup to
// complete before continuing.
wait_for_previous_frame(
&swap_chain,
&command_queue,
&fence,
fence_event,
&mut fence_value,
&mut frame_index,
)?;
(fence, fence_value, fence_event)
};
//------------------------------------------------------------------
// result
//------------------------------------------------------------------
Ok(DxModel {
aspect_ratio: aspect_ratio,
device: device,
command_queue: command_queue,
swap_chain: swap_chain,
dc_dev: dc_dev,
dc_target: dc_target,
dc_visual: dc_visual,
frame_index: frame_index,
rtv_heap: rtv_heap,
srv_heap: srv_heap,
rtv_descriptor_size: rtv_descriptor_size,
render_targets: render_targets,
command_allocator: command_allocator,
root_signature: root_signature,
pipeline_state: pipeline_state,
command_list: command_list,
vertex_buffer: vertex_buffer,
vertex_buffer_view: vertex_buffer_view,
index_buffer: index_buffer,
index_buffer_view: index_buffer_view,
texture: texture,
fence: fence,
fence_value: fence_value,
fence_event: fence_event,
rotation_radians: 0_f32,
viewport: viewport,
scissor_rect: scissor_rect,
})
}
pub fn render(&mut self) -> Result<(), HRESULT> {
{
self.populate_command_list()?;
}
{
let command_queue = &self.command_queue;
let command_list = &self.command_list;
let swap_chain = &self.swap_chain;
command_queue.execute_command_lists(&[command_list]);
swap_chain.present(1, 0)?;
}
{
self.wait_for_previous_frame()?;
}
Ok(())
}
/// 描画コマンドリストを構築する
fn populate_command_list(&mut self) -> Result<(), HRESULT> {
let command_allocator = self.command_allocator.as_ref();
let command_list = self.command_list.as_ref();
let pipeline_state = self.pipeline_state.as_ref();
let root_signature = self.root_signature.as_ref();
let srv_heap = self.srv_heap.as_ref();
let rtv_heap = self.rtv | _heap.as_ref();
let rtv_descriptor_size = self.rtv_descriptor_size;
let viewport = &self.viewport;
let scissor_rect = &self.scissor_rect;
let render_targets = self.render_targets.as_slice();
let frame_index = self.frame_index as usize;
let vertex_buffer_view = &self.vertex_buffer_view;
let index_buffer_view = &self.index_buffer_view;
// Command list allocators can only be reset when the associated
// command lists have finished execution on the GPU; apps should use
// fences to determine GPU execution progress.
command_allocator.reset()?;
// However, when ExecuteCommandList() is called on a particular command
// list, that command list can then be reset at any time and must be before
// re-recording.
command_list.reset(command_allocator, pipeline_state)?;
// Set necessary state. | identifier_body |
|
model.rs | desc.RasterizerState = D3D12_RASTERIZER_DESC::default();
desc.RasterizerState.CullMode = D3D12_CULL_MODE_NONE;
desc.BlendState = alpha_blend;
desc.DepthStencilState.DepthEnable = FALSE;
desc.DepthStencilState.StencilEnable = FALSE;
desc.SampleMask = UINT_MAX;
desc.PrimitiveTopologyType = D3D12_PRIMITIVE_TOPOLOGY_TYPE_TRIANGLE;
desc.NumRenderTargets = 1;
desc.RTVFormats[0] = DXGI_FORMAT_R8G8B8A8_UNORM;
desc.SampleDesc.Count = 1;
desc
};
device.create_graphics_pipeline_state(&pso_desc)?
};
// Create the command list.
let command_list = device.create_command_list::<ID3D12GraphicsCommandList>(
0,
D3D12_COMMAND_LIST_TYPE_DIRECT,
&command_allocator,
&pipeline_state,
)?;
// Create the vertex buffer.
let (vertex_buffer, vertex_buffer_view) = {
// Define the geometry for a circle.
let items = (-1..CIRCLE_SEGMENTS)
.map(|i| match i {
-1 => {
let pos = [0_f32, 0_f32, 0_f32];
let uv = [0.5_f32, 0.5_f32];
Vertex::new(pos, uv)
}
_ => {
let theta = PI * 2.0_f32 * (i as f32) / (CIRCLE_SEGMENTS as f32);
let x = theta.sin();
let y = theta.cos();
let pos = [x, y * aspect_ratio, 0.0_f32];
let uv = [x * 0.5_f32 + 0.5_f32, y * 0.5_f32 + 0.5_f32];
Vertex::new(pos, uv)
}
})
.collect::<Vec<_>>();
println!("{:?}", items);
let size_of = mem::size_of::<Vertex>();
let size = size_of * items.len();
let p = items.as_ptr();
// Note: using upload heaps to transfer static data like vert buffers is not
// recommended. Every time the GPU needs it, the upload heap will be marshalled
// over. Please read up on Default Heap usage. An upload heap is used here for
// code simplicity and because there are very few verts to actually transfer.
let properties = D3D12_HEAP_PROPERTIES::new(D3D12_HEAP_TYPE_UPLOAD);
let desc = D3D12_RESOURCE_DESC::buffer(size as u64);
let buffer = device.create_committed_resource::<ID3D12Resource>(
&properties,
D3D12_HEAP_FLAG_NONE,
&desc,
D3D12_RESOURCE_STATE_GENERIC_READ,
None,
)?;
// Copy the triangle data to the vertex buffer.
let read_range = D3D12_RANGE::new(0, 0); // We do not intend to read from this resource on the CPU.
buffer.map(0, Some(&read_range))?.memcpy(p, size);
// Initialize the vertex buffer view.
let view = D3D12_VERTEX_BUFFER_VIEW {
BufferLocation: buffer.get_gpu_virtual_address(),
SizeInBytes: size as u32,
StrideInBytes: size_of as u32,
};
(buffer, view)
};
// Create the index buffer
let (index_buffer, index_buffer_view) = {
// Define the geometry for a circle.
let items = (0..CIRCLE_SEGMENTS)
.map(|i| {
let a = 0 as UINT16;
let b = (1 + i) as UINT16;
let c = (2 + i) as UINT16;
[a, b, c]
})
.flat_map(|a| ArrayIterator3::new(a))
.collect::<Vec<_>>();
let size_of = mem::size_of::<UINT16>();
let size = size_of * items.len();
let p = items.as_ptr();
// Note: using upload heaps to transfer static data like vert buffers is not
// recommended. Every time the GPU needs it, the upload heap will be marshalled
// over. Please read up on Default Heap usage. An upload heap is used here for
// code simplicity and because there are very few verts to actually transfer.
let properties = D3D12_HEAP_PROPERTIES::new(D3D12_HEAP_TYPE_UPLOAD);
let desc = D3D12_RESOURCE_DESC::buffer(size as u64);
let buffer = device.create_committed_resource::<ID3D12Resource>(
&properties,
D3D12_HEAP_FLAG_NONE,
&desc,
D3D12_RESOURCE_STATE_GENERIC_READ,
None,
)?;
// Copy the index data to the index buffer.
let read_range = D3D12_RANGE::new(0, 0); // We do not intend to read from this resource on the CPU.
buffer.map(0, Some(&read_range))?.memcpy(p, size);
// Intialize the index buffer view
let view = D3D12_INDEX_BUFFER_VIEW {
BufferLocation: buffer.get_gpu_virtual_address(),
SizeInBytes: size as u32,
Format: DXGI_FORMAT_R16_UINT,
};
(buffer, view)
};
// Create the texture.
// Note: ComPtr's are CPU objects but this resource needs to stay in scope until
// the command list that references it has finished executing on the GPU.
// We will flush the GPU at the end of this method to ensure the resource is not
// prematurely destroyed.
// texture_upload_heapの開放タイミングがGPUへのフラッシュ後になるように
// 所有権を関数スコープに追い出しておく
let (_texture_upload_heap, texture) = {
// Describe and create a Texture2D.
let texture_desc = D3D12_RESOURCE_DESC::new(
D3D12_RESOURCE_DIMENSION_TEXTURE2D,
0,
TEXTURE_WIDTH,
TEXTURE_HEIGHT,
1,
1,
DXGI_FORMAT_R8G8B8A8_UNORM,
1,
0,
D3D12_TEXTURE_LAYOUT_UNKNOWN,
D3D12_RESOURCE_FLAG_NONE,
);
let texture = {
let properties = D3D12_HEAP_PROPERTIES::new(D3D12_HEAP_TYPE_DEFAULT);
device.create_committed_resource::<ID3D12Resource>(
&properties,
D3D12_HEAP_FLAG_NONE,
&texture_desc,
D3D12_RESOURCE_STATE_COPY_DEST,
None,
)?
};
let upload_buffer_size = texture.get_required_intermediate_size(0, 1)?;
// Create the GPU upload buffer.
let texture_upload_heap = {
let properties = D3D12_HEAP_PROPERTIES::new(D3D12_HEAP_TYPE_UPLOAD);
let desc = D3D12_RESOURCE_DESC::buffer(upload_buffer_size);
device.create_committed_resource::<ID3D12Resource>(
&properties,
D3D12_HEAP_FLAG_NONE,
&desc,
D3D12_RESOURCE_STATE_GENERIC_READ,
None,
)?
};
// Copy data to the intermediate upload heap and then schedule a copy
// from the upload heap to the Texture2D.
let texture_bytes = generate_texture_data();
let texture_data = {
let ptr = texture_bytes.as_ptr();
let row_pitch = ((TEXTURE_WIDTH as usize) * mem::size_of::<u32>()) as isize;
let slice_pitch = row_pitch * (TEXTURE_HEIGHT as isize);
[D3D12_SUBRESOURCE_DATA {
pData: ptr as _,
RowPitch: row_pitch,
SlicePitch: slice_pitch,
}]
};
let _ = command_list.update_subresources_as_heap(
&texture,
&texture_upload_heap,
0,
&texture_data,
)?;
{
let barrier = D3D12_RESOURCE_BARRIER::transition(
&texture,
D3D12_RESOURCE_STATE_COPY_DEST,
D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE,
);
command_list.resource_barrier(1, &barrier);
}
// Describe and create a SRV for the texture.
{
let desc = unsafe {
let mut desc = mem::zeroed::<D3D12_SHADER_RESOURCE_VIEW_DESC>();
desc.Shader4ComponentMapping = D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING;
desc.Format = texture_desc.Format;
desc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE2D;
{
let mut t = desc.u.Texture2D_mut();
t.MipLevels = 1;
}
desc
};
device.create_shader_resource_view(
&texture,
&desc,
srv_heap.get_cpu_descriptor_handle_for_heap_start(),
);
}
(texture_upload_heap, texture)
};
// Close the command list and execute it to begin the initial GPU setup.
{
command_list.close()?;
let a: &ID3D12GraphicsCommandList = &command_list;
command_queue.execute_command_lists(&[a]);
}
// Create synchronization objects and wait until assets have been uploaded to the GPU.
let (fence, fence_value, fence_event) = {
let fence = device.create_fence::<ID3D12Fence>(0, D3D12_FENCE_FLAG_NONE)?;
let mut fence_value = 1_u64;
// Create an event handle to use for frame synchronization.
let fence_event = create_event(None, false, false, None)?;
// Wait for the command list to execute; we are reusing the same command
// list in our main loop but for now, we just want to wait for setup to
// complete before continuing.
wait_for_previous_frame(
&swap_chain,
&command_queue,
&fence,
fence_event,
&mut fence_value,
&mut frame_index,
)?;
(fence, fence_value, fence_event)
};
//------------------------------------------------------------------
// result
//------------------------------------------------------------------
Ok(DxModel {
aspect_ratio: aspect_ratio,
device: device,
command_queue: command_queue,
swap_chain: swap_chain,
dc_dev: dc_dev,
dc_target: dc_target,
dc_visual: dc_visual,
frame_index: frame_index,
rtv_heap: rtv_heap,
srv_heap: srv_heap,
rtv_descriptor_size: rtv_descriptor_size,
render_targets: render_targets,
command_allocator: command_allocator,
root_signature: root_signature,
pipeline_state: pipeline_state,
command_list: command_list,
vertex_buffer: vertex_buffer,
vertex_buffer_view: vertex_buffer_view,
index_buffer: index_buffer,
index_buffer_view: index_buffer_view,
texture: texture,
fence: fence,
fence_value: fence_value,
fence_event: fence_event,
rotation_radians: 0_f32,
viewport: viewport,
scissor_rect: scissor_rect,
})
}
pub fn render(&mut self) -> Result<(), HRESULT> {
{
self.populate_command_list()?;
}
{
let command_queue = &self.command_queue;
let command_list = &self.command_list;
let swap_chain = &self.swap_chain;
command_queue.execute_command_lists(&[command_list]);
swap_chain.present(1, 0)?;
}
{
self.wait_for_previous_frame()?;
}
Ok(())
}
/// 描画コマンドリストを構築する
fn populate_command_list(&mut self) -> Result<(), HRESULT> {
let command_allocator = self.command_allocator.as_ref();
let command_list = self.command_list.as_ref();
let pipeline_state = self.pipeline_state.as_ref();
let root_signature = self.root_signature.as_ref();
let srv_heap = self.srv_heap.as_ref();
let rtv_heap = self.rtv_heap.as_ref();
let rtv_descriptor_size = self.rtv_descriptor_size;
let viewport = &self.viewport;
let scissor_rect = &self.scissor_rect;
let render_targets = self.render_targets.as_slice();
let frame_index = self.frame_index as usize;
let vertex_buffer_view = &self.vertex_buffer_view;
let index_buffer_view = &self.index_buffer_view;
// Command list allocators can only be reset when the associated
// command lists have finished execution on the GPU; apps should use
// fences to determine GPU execution progress.
command_allocator.reset()?;
// However, when ExecuteCommandList() is called on a particular command
// list, that command list can then be reset at any time and must be before
// re-recording.
command_list.reset(command_allocator, pipeline_state)?;
// Set necessary state.
command_list.set_graphics_root_signature(root_signature);
let pp_heaps = [srv_heap];
command_list.set_descriptor_heaps(&pp_heaps);
self.rotation_radians += 0.02_f32;
let rotation_radians = self.rotation_radians;
command_list.set_graphics_root_f32_constant(0, rotation_radians, 0);
command_list.set_graphics_root_descriptor_table(
1,
srv_heap.get_gpu_descriptor_handle_for_heap_start(),
);
let viewports = [*viewport];
command_list.rs_set_viewports(&viewports);
let scissor_rects = [*scissor_rect];
command_list.rs_set_scissor_rects(&scissor_rects);
// Indicate that the back buffer will be used as a render target.
{
let barrier = D3D12_RESOURCE_BARRIER::transition(
&render_targets[frame_index],
D3D12_RESOURCE_STATE_PRESENT,
D3D12_RESOURCE_STATE_RENDER_TARGET,
);
command_list.resource_barrier(1, &barrier);
}
let mut rtv_handle = rtv_heap.get_cpu_descriptor_handle_for_heap_start();
rtv_handle.offset(frame_index as _, rtv_descriptor_size);
let rtv_handles = [rtv_handle];
command_list.om_set_render_targets(&rtv_handles, false, None);
// Record commands.
let clear_color = [0_f32; 4];
let no_rects = [];
command_list.clear_render_target_view(rtv_handle, &clear_color, &no_rects);
command_list.ia_set_primitive_topology(D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
let vertex_buffer_views = [vertex_buffer_view.clone()];
command_list.ia_set_vertex_buffers(0, &vertex_buffer_views);
command_list.ia_set_index_buffer(index_buffer_view);
command_list.draw_indexed_instanced((CIRCLE_SEGMENTS * 3) as _, 1, 0, 0, 0);
// Indicate that the back buffer will now be used to present.
{
let barrier = D3D12_RESOURCE_BARRIER::transition(
&render_targets[frame_index],
D3D12_RESOURCE_STATE_RENDER_TARGET,
D3D12_RESOURCE_STATE_PRESENT,
);
command_list.resource_barrier(1, &barrier);
}
command_list.close()?;
Ok(())
}
fn wait_for_previous_frame(&mut self) -> Result<(), HRESULT> {
let mut fence_value = self.fence_value;
let mut frame_index = self.frame_index;
wait_for_previous_frame(
&self.swap_chain,
&self.command_queue,
&self.fence,
self.fence_event,
&mut fence_value,
&mut frame_index,
)?;
self.fence_value = fence_value;
self.frame_index = frame_index; | Ok(())
}
}
// WAITING FOR THE FRAME TO COMPLETE BEFORE CONTINUING IS NOT BEST PRACTICE. | random_line_split |
|
lib.rs | //! Configures and runs the outbound proxy.
//!
//! The outound proxy is responsible for routing traffic from the local
//! application to external network endpoints.
#![deny(warnings, rust_2018_idioms)]
use linkerd2_app_core::{
classify,
config::Config,
dns, drain,
dst::DstAddr,
errors, http_request_authority_addr, http_request_host_addr,
http_request_l5d_override_dst_addr, http_request_orig_dst_addr, identity,
proxy::{
self,
core::resolve::Resolve,
discover,
http::{
balance, canonicalize, client, fallback, header_from_target, insert,
metrics as http_metrics, normalize_uri, profiles, retry, router, settings,
strip_header,
},
Server,
},
reconnect, serve,
spans::SpanConverter,
svc, trace, trace_context,
transport::{self, connect, tls, OrigDstAddr},
Addr, Conditional, DispatchDeadline, CANONICAL_DST_HEADER, DST_OVERRIDE_HEADER, L5D_CLIENT_ID,
L5D_REMOTE_IP, L5D_REQUIRE_ID, L5D_SERVER_ID,
};
use opencensus_proto::trace::v1 as oc;
use std::collections::HashMap;
use std::time::Duration;
use tokio::sync::mpsc;
use tower_grpc::{self as grpc, generic::client::GrpcService};
use tracing::{debug, info_span};
#[allow(dead_code)] // TODO #2597
mod add_remote_ip_on_rsp;
#[allow(dead_code)] // TODO #2597
mod add_server_id_on_rsp;
mod endpoint;
mod orig_proto_upgrade;
mod require_identity_on_endpoint;
mod resolve;
pub use self::endpoint::Endpoint;
pub use self::resolve::resolve;
const EWMA_DEFAULT_RTT: Duration = Duration::from_millis(30);
const EWMA_DECAY: Duration = Duration::from_secs(10);
pub fn | <A, R, P>(
config: &Config,
local_identity: tls::Conditional<identity::Local>,
listen: transport::Listen<A>,
resolve: R,
dns_resolver: dns::Resolver,
profiles_client: linkerd2_app_core::profiles::Client<P>,
tap_layer: linkerd2_app_core::tap::Layer,
handle_time: http_metrics::handle_time::Scope,
endpoint_http_metrics: linkerd2_app_core::HttpEndpointMetricsRegistry,
route_http_metrics: linkerd2_app_core::HttpRouteMetricsRegistry,
retry_http_metrics: linkerd2_app_core::HttpRouteMetricsRegistry,
transport_metrics: linkerd2_app_core::transport::MetricsRegistry,
span_sink: Option<mpsc::Sender<oc::Span>>,
drain: drain::Watch,
) where
A: OrigDstAddr + Send +'static,
R: Resolve<DstAddr, Endpoint = Endpoint> + Clone + Send + Sync +'static,
R::Future: Send,
R::Resolution: Send,
P: GrpcService<grpc::BoxBody> + Clone + Send + Sync +'static,
P::ResponseBody: Send,
<P::ResponseBody as grpc::Body>::Data: Send,
P::Future: Send,
{
let capacity = config.outbound_router_capacity;
let max_idle_age = config.outbound_router_max_idle_age;
let max_in_flight = config.outbound_max_requests_in_flight;
let canonicalize_timeout = config.dns_canonicalize_timeout;
let dispatch_timeout = config.outbound_dispatch_timeout;
let mut trace_labels = HashMap::new();
trace_labels.insert("direction".to_string(), "outbound".to_string());
// Establishes connections to remote peers (for both TCP
// forwarding and HTTP proxying).
let connect = svc::stack(connect::svc(config.outbound_connect_keepalive))
.push(tls::client::layer(local_identity))
.push_timeout(config.outbound_connect_timeout)
.push(transport_metrics.layer_connect(TransportLabels));
let trace_context_layer = trace_context::layer(
span_sink
.clone()
.map(|span_sink| SpanConverter::client(span_sink, trace_labels.clone())),
);
// Instantiates an HTTP client for for a `client::Config`
let client_stack = connect
.clone()
.push(client::layer(config.h2_settings))
.push(reconnect::layer({
let backoff = config.outbound_connect_backoff.clone();
move |_| Ok(backoff.stream())
}))
.push(trace_context_layer)
.push(normalize_uri::layer());
// A per-`outbound::Endpoint` stack that:
//
// 1. Records http metrics with per-endpoint labels.
// 2. Instruments `tap` inspection.
// 3. Changes request/response versions when the endpoint
// supports protocol upgrade (and the request may be upgraded).
// 4. Appends `l5d-server-id` to responses coming back iff meshed
// TLS was used on the connection.
// 5. Routes requests to the correct client (based on the
// request version and headers).
// 6. Strips any `l5d-server-id` that may have been received from
// the server, before we apply our own.
let endpoint_stack = client_stack
.serves::<Endpoint>()
.push(strip_header::response::layer(L5D_REMOTE_IP))
.push(strip_header::response::layer(L5D_SERVER_ID))
.push(strip_header::request::layer(L5D_REQUIRE_ID))
// disabled due to information leagkage
//.push(add_remote_ip_on_rsp::layer())
//.push(add_server_id_on_rsp::layer())
.push(orig_proto_upgrade::layer())
.push(tap_layer.clone())
.push(http_metrics::layer::<_, classify::Response>(
endpoint_http_metrics,
))
.push(require_identity_on_endpoint::layer())
.push(trace::layer(|endpoint: &Endpoint| {
info_span!("endpoint",?endpoint)
}));
// A per-`dst::Route` layer that uses profile data to configure
// a per-route layer.
//
// 1. The `classify` module installs a `classify::Response`
// extension into each request so that all lower metrics
// implementations can use the route-specific configuration.
// 2. A timeout is optionally enabled if the target `dst::Route`
// specifies a timeout. This goes before `retry` to cap
// retries.
// 3. Retries are optionally enabled depending on if the route
// is retryable.
let dst_route_layer = svc::layers()
.push(insert::target::layer())
.push(http_metrics::layer::<_, classify::Response>(
retry_http_metrics.clone(),
))
.push(retry::layer(retry_http_metrics.clone()))
.push(proxy::http::timeout::layer())
.push(http_metrics::layer::<_, classify::Response>(
route_http_metrics,
))
.push(classify::layer())
.push_buffer_pending(max_in_flight, DispatchDeadline::extract);
// Routes requests to their original destination endpoints. Used as
// a fallback when service discovery has no endpoints for a destination.
//
// If the `l5d-require-id` header is present, then that identity is
// used as the server name when connecting to the endpoint.
let orig_dst_router_layer = svc::layers()
.push_buffer_pending(max_in_flight, DispatchDeadline::extract)
.push(router::layer(
router::Config::new(capacity, max_idle_age),
Endpoint::from_request,
));
// Resolves the target via the control plane and balances requests
// over all endpoints returned from the destination service.
const DISCOVER_UPDATE_BUFFER_CAPACITY: usize = 2;
let balancer_layer = svc::layers()
.push_spawn_ready()
.push(discover::Layer::new(
DISCOVER_UPDATE_BUFFER_CAPACITY,
resolve,
))
.push(balance::layer(EWMA_DEFAULT_RTT, EWMA_DECAY));
// If the balancer fails to be created, i.e., because it is unresolvable,
// fall back to using a router that dispatches request to the
// application-selected original destination.
let distributor = endpoint_stack
.push(fallback::layer(balancer_layer, orig_dst_router_layer))
.serves::<DstAddr>()
.push(trace::layer(
|dst: &DstAddr| info_span!("concrete", dst.concrete = %dst.dst_concrete()),
));
// A per-`DstAddr` stack that does the following:
//
// 1. Adds the `CANONICAL_DST_HEADER` from the `DstAddr`.
// 2. Determines the profile of the destination and applies
// per-route policy.
// 3. Creates a load balancer, configured by resolving the
// `DstAddr` with a resolver.
let dst_stack = distributor
.push_buffer_pending(max_in_flight, DispatchDeadline::extract)
.push(profiles::router::layer(profiles_client, dst_route_layer))
.push(header_from_target::layer(CANONICAL_DST_HEADER));
// Routes request using the `DstAddr` extension.
//
// This is shared across addr-stacks so that multiple addrs that
// canonicalize to the same DstAddr use the same dst-stack service.
let dst_router = dst_stack
.push(trace::layer(
|dst: &DstAddr| info_span!("logical", dst.logical = %dst.dst_logical()),
))
.push_buffer_pending(max_in_flight, DispatchDeadline::extract)
.push(router::layer(
router::Config::new(capacity, max_idle_age),
|req: &http::Request<_>| {
req.extensions()
.get::<Addr>()
.cloned()
.map(|addr| DstAddr::outbound(addr, settings::Settings::from_request(req)))
},
))
.into_inner()
.make();
// Canonicalizes the request-specified `Addr` via DNS, and
// annotates each request with a refined `Addr` so that it may be
// routed by the dst_router.
let addr_stack = svc::stack(svc::Shared::new(dst_router))
.push(canonicalize::layer(dns_resolver, canonicalize_timeout));
// Routes requests to an `Addr`:
//
// 1. If the request had an `l5d-override-dst` header, this value
// is used.
//
// 2. If the request is HTTP/2 and has an :authority, this value
// is used.
//
// 3. If the request is absolute-form HTTP/1, the URI's
// authority is used.
//
// 4. If the request has an HTTP/1 Host header, it is used.
//
// 5. Finally, if the Source had an SO_ORIGINAL_DST, this TCP
// address is used.
let addr_router = addr_stack
.push(strip_header::request::layer(L5D_CLIENT_ID))
.push(strip_header::request::layer(DST_OVERRIDE_HEADER))
.push(insert::target::layer())
.push(trace::layer(|addr: &Addr| info_span!("addr", %addr)))
.push_buffer_pending(max_in_flight, DispatchDeadline::extract)
.push(router::layer(
router::Config::new(capacity, max_idle_age),
|req: &http::Request<_>| {
http_request_l5d_override_dst_addr(req)
.map(|override_addr| {
debug!("using dst-override");
override_addr
})
.or_else(|_| http_request_authority_addr(req))
.or_else(|_| http_request_host_addr(req))
.or_else(|_| http_request_orig_dst_addr(req))
.ok()
},
))
.into_inner()
.make();
// Share a single semaphore across all requests to signal when
// the proxy is overloaded.
let admission_control = svc::stack(addr_router)
.push_concurrency_limit(max_in_flight)
.push_load_shed();
let trace_context_layer = trace_context::layer(
span_sink.map(|span_sink| SpanConverter::server(span_sink, trace_labels)),
);
// Instantiates an HTTP service for each `Source` using the
// shared `addr_router`. The `Source` is stored in the request's
// extensions so that it can be used by the `addr_router`.
let server_stack = svc::stack(svc::Shared::new(admission_control))
.push(insert::layer(move || {
DispatchDeadline::after(dispatch_timeout)
}))
.push(insert::target::layer())
.push(errors::layer())
.push(trace::layer(
|src: &tls::accept::Meta| info_span!("source", target.addr = %src.addrs.target_addr()),
))
.push(trace_context_layer)
.push(handle_time.layer());
let skip_ports = std::sync::Arc::new(config.outbound_ports_disable_protocol_detection.clone());
let proxy = Server::new(
TransportLabels,
transport_metrics,
svc::stack(connect)
.push(svc::map_target::layer(Endpoint::from))
.into_inner(),
server_stack,
config.h2_settings,
drain.clone(),
skip_ports.clone(),
);
let no_tls: tls::Conditional<identity::Local> =
Conditional::None(tls::ReasonForNoPeerName::Loopback.into());
let accept = tls::AcceptTls::new(no_tls, proxy).with_skip_ports(skip_ports);
serve::spawn(listen, accept, drain);
}
#[derive(Copy, Clone, Debug)]
struct TransportLabels;
impl transport::metrics::TransportLabels<Endpoint> for TransportLabels {
type Labels = transport::labels::Key;
fn transport_labels(&self, endpoint: &Endpoint) -> Self::Labels {
transport::labels::Key::connect("outbound", endpoint.identity.as_ref())
}
}
impl transport::metrics::TransportLabels<proxy::server::Protocol> for TransportLabels {
type Labels = transport::labels::Key;
fn transport_labels(&self, proto: &proxy::server::Protocol) -> Self::Labels {
transport::labels::Key::accept("outbound", proto.tls.peer_identity.as_ref())
}
}
| spawn | identifier_name |
lib.rs | //! Configures and runs the outbound proxy.
//!
//! The outound proxy is responsible for routing traffic from the local
//! application to external network endpoints.
#![deny(warnings, rust_2018_idioms)]
use linkerd2_app_core::{
classify,
config::Config,
dns, drain,
dst::DstAddr,
errors, http_request_authority_addr, http_request_host_addr,
http_request_l5d_override_dst_addr, http_request_orig_dst_addr, identity,
proxy::{
self,
core::resolve::Resolve,
discover,
http::{
balance, canonicalize, client, fallback, header_from_target, insert,
metrics as http_metrics, normalize_uri, profiles, retry, router, settings,
strip_header,
},
Server,
},
reconnect, serve,
spans::SpanConverter,
svc, trace, trace_context,
transport::{self, connect, tls, OrigDstAddr},
Addr, Conditional, DispatchDeadline, CANONICAL_DST_HEADER, DST_OVERRIDE_HEADER, L5D_CLIENT_ID,
L5D_REMOTE_IP, L5D_REQUIRE_ID, L5D_SERVER_ID,
};
use opencensus_proto::trace::v1 as oc;
use std::collections::HashMap;
use std::time::Duration;
use tokio::sync::mpsc;
use tower_grpc::{self as grpc, generic::client::GrpcService};
use tracing::{debug, info_span};
#[allow(dead_code)] // TODO #2597
mod add_remote_ip_on_rsp;
#[allow(dead_code)] // TODO #2597
mod add_server_id_on_rsp;
mod endpoint;
mod orig_proto_upgrade;
mod require_identity_on_endpoint;
mod resolve;
pub use self::endpoint::Endpoint;
pub use self::resolve::resolve;
const EWMA_DEFAULT_RTT: Duration = Duration::from_millis(30);
const EWMA_DECAY: Duration = Duration::from_secs(10);
pub fn spawn<A, R, P>(
config: &Config,
local_identity: tls::Conditional<identity::Local>,
listen: transport::Listen<A>,
resolve: R,
dns_resolver: dns::Resolver,
profiles_client: linkerd2_app_core::profiles::Client<P>,
tap_layer: linkerd2_app_core::tap::Layer,
handle_time: http_metrics::handle_time::Scope,
endpoint_http_metrics: linkerd2_app_core::HttpEndpointMetricsRegistry,
route_http_metrics: linkerd2_app_core::HttpRouteMetricsRegistry,
retry_http_metrics: linkerd2_app_core::HttpRouteMetricsRegistry,
transport_metrics: linkerd2_app_core::transport::MetricsRegistry,
span_sink: Option<mpsc::Sender<oc::Span>>,
drain: drain::Watch,
) where
A: OrigDstAddr + Send +'static,
R: Resolve<DstAddr, Endpoint = Endpoint> + Clone + Send + Sync +'static,
R::Future: Send,
R::Resolution: Send,
P: GrpcService<grpc::BoxBody> + Clone + Send + Sync +'static,
P::ResponseBody: Send,
<P::ResponseBody as grpc::Body>::Data: Send,
P::Future: Send,
{
let capacity = config.outbound_router_capacity;
let max_idle_age = config.outbound_router_max_idle_age;
let max_in_flight = config.outbound_max_requests_in_flight;
let canonicalize_timeout = config.dns_canonicalize_timeout;
let dispatch_timeout = config.outbound_dispatch_timeout;
let mut trace_labels = HashMap::new();
trace_labels.insert("direction".to_string(), "outbound".to_string());
// Establishes connections to remote peers (for both TCP
// forwarding and HTTP proxying).
let connect = svc::stack(connect::svc(config.outbound_connect_keepalive))
.push(tls::client::layer(local_identity))
.push_timeout(config.outbound_connect_timeout)
.push(transport_metrics.layer_connect(TransportLabels));
let trace_context_layer = trace_context::layer(
span_sink
.clone()
.map(|span_sink| SpanConverter::client(span_sink, trace_labels.clone())),
);
// Instantiates an HTTP client for for a `client::Config`
let client_stack = connect
.clone()
.push(client::layer(config.h2_settings))
.push(reconnect::layer({
let backoff = config.outbound_connect_backoff.clone();
move |_| Ok(backoff.stream())
}))
.push(trace_context_layer)
.push(normalize_uri::layer());
// A per-`outbound::Endpoint` stack that:
//
// 1. Records http metrics with per-endpoint labels.
// 2. Instruments `tap` inspection.
// 3. Changes request/response versions when the endpoint
// supports protocol upgrade (and the request may be upgraded).
// 4. Appends `l5d-server-id` to responses coming back iff meshed
// TLS was used on the connection.
// 5. Routes requests to the correct client (based on the
// request version and headers).
// 6. Strips any `l5d-server-id` that may have been received from
// the server, before we apply our own.
let endpoint_stack = client_stack
.serves::<Endpoint>()
.push(strip_header::response::layer(L5D_REMOTE_IP))
.push(strip_header::response::layer(L5D_SERVER_ID))
.push(strip_header::request::layer(L5D_REQUIRE_ID))
// disabled due to information leagkage
//.push(add_remote_ip_on_rsp::layer())
//.push(add_server_id_on_rsp::layer())
.push(orig_proto_upgrade::layer())
.push(tap_layer.clone())
.push(http_metrics::layer::<_, classify::Response>(
endpoint_http_metrics,
))
.push(require_identity_on_endpoint::layer())
.push(trace::layer(|endpoint: &Endpoint| {
info_span!("endpoint",?endpoint)
}));
// A per-`dst::Route` layer that uses profile data to configure
// a per-route layer.
//
// 1. The `classify` module installs a `classify::Response`
// extension into each request so that all lower metrics
// implementations can use the route-specific configuration.
// 2. A timeout is optionally enabled if the target `dst::Route`
// specifies a timeout. This goes before `retry` to cap
// retries.
// 3. Retries are optionally enabled depending on if the route
// is retryable.
let dst_route_layer = svc::layers()
.push(insert::target::layer())
.push(http_metrics::layer::<_, classify::Response>(
retry_http_metrics.clone(),
))
.push(retry::layer(retry_http_metrics.clone()))
.push(proxy::http::timeout::layer())
.push(http_metrics::layer::<_, classify::Response>(
route_http_metrics,
))
.push(classify::layer())
.push_buffer_pending(max_in_flight, DispatchDeadline::extract);
// Routes requests to their original destination endpoints. Used as
// a fallback when service discovery has no endpoints for a destination.
//
// If the `l5d-require-id` header is present, then that identity is
// used as the server name when connecting to the endpoint.
let orig_dst_router_layer = svc::layers()
.push_buffer_pending(max_in_flight, DispatchDeadline::extract)
.push(router::layer(
router::Config::new(capacity, max_idle_age),
Endpoint::from_request,
));
// Resolves the target via the control plane and balances requests
// over all endpoints returned from the destination service.
const DISCOVER_UPDATE_BUFFER_CAPACITY: usize = 2;
let balancer_layer = svc::layers()
.push_spawn_ready()
.push(discover::Layer::new(
DISCOVER_UPDATE_BUFFER_CAPACITY,
resolve,
))
.push(balance::layer(EWMA_DEFAULT_RTT, EWMA_DECAY));
// If the balancer fails to be created, i.e., because it is unresolvable,
// fall back to using a router that dispatches request to the
// application-selected original destination.
let distributor = endpoint_stack
.push(fallback::layer(balancer_layer, orig_dst_router_layer))
.serves::<DstAddr>()
.push(trace::layer(
|dst: &DstAddr| info_span!("concrete", dst.concrete = %dst.dst_concrete()),
));
// A per-`DstAddr` stack that does the following:
//
// 1. Adds the `CANONICAL_DST_HEADER` from the `DstAddr`.
// 2. Determines the profile of the destination and applies
// per-route policy.
// 3. Creates a load balancer, configured by resolving the
// `DstAddr` with a resolver.
let dst_stack = distributor
.push_buffer_pending(max_in_flight, DispatchDeadline::extract)
.push(profiles::router::layer(profiles_client, dst_route_layer))
.push(header_from_target::layer(CANONICAL_DST_HEADER));
// Routes request using the `DstAddr` extension.
//
// This is shared across addr-stacks so that multiple addrs that
// canonicalize to the same DstAddr use the same dst-stack service.
let dst_router = dst_stack
.push(trace::layer(
|dst: &DstAddr| info_span!("logical", dst.logical = %dst.dst_logical()),
))
.push_buffer_pending(max_in_flight, DispatchDeadline::extract)
.push(router::layer(
router::Config::new(capacity, max_idle_age),
|req: &http::Request<_>| {
req.extensions()
.get::<Addr>()
.cloned()
.map(|addr| DstAddr::outbound(addr, settings::Settings::from_request(req)))
},
))
.into_inner()
.make();
// Canonicalizes the request-specified `Addr` via DNS, and
// annotates each request with a refined `Addr` so that it may be
// routed by the dst_router.
let addr_stack = svc::stack(svc::Shared::new(dst_router))
.push(canonicalize::layer(dns_resolver, canonicalize_timeout));
// Routes requests to an `Addr`:
//
// 1. If the request had an `l5d-override-dst` header, this value
// is used.
//
// 2. If the request is HTTP/2 and has an :authority, this value
// is used.
//
// 3. If the request is absolute-form HTTP/1, the URI's
// authority is used.
//
// 4. If the request has an HTTP/1 Host header, it is used.
//
// 5. Finally, if the Source had an SO_ORIGINAL_DST, this TCP
// address is used.
let addr_router = addr_stack
.push(strip_header::request::layer(L5D_CLIENT_ID))
.push(strip_header::request::layer(DST_OVERRIDE_HEADER))
.push(insert::target::layer())
.push(trace::layer(|addr: &Addr| info_span!("addr", %addr)))
.push_buffer_pending(max_in_flight, DispatchDeadline::extract)
.push(router::layer(
router::Config::new(capacity, max_idle_age),
|req: &http::Request<_>| {
http_request_l5d_override_dst_addr(req)
.map(|override_addr| {
debug!("using dst-override");
override_addr
})
.or_else(|_| http_request_authority_addr(req))
.or_else(|_| http_request_host_addr(req))
.or_else(|_| http_request_orig_dst_addr(req))
.ok()
},
))
.into_inner()
.make();
// Share a single semaphore across all requests to signal when
// the proxy is overloaded.
let admission_control = svc::stack(addr_router)
.push_concurrency_limit(max_in_flight)
.push_load_shed();
let trace_context_layer = trace_context::layer(
span_sink.map(|span_sink| SpanConverter::server(span_sink, trace_labels)),
);
// Instantiates an HTTP service for each `Source` using the
// shared `addr_router`. The `Source` is stored in the request's
// extensions so that it can be used by the `addr_router`.
let server_stack = svc::stack(svc::Shared::new(admission_control))
.push(insert::layer(move || {
DispatchDeadline::after(dispatch_timeout)
}))
.push(insert::target::layer())
.push(errors::layer())
.push(trace::layer(
|src: &tls::accept::Meta| info_span!("source", target.addr = %src.addrs.target_addr()),
))
.push(trace_context_layer)
.push(handle_time.layer());
let skip_ports = std::sync::Arc::new(config.outbound_ports_disable_protocol_detection.clone());
let proxy = Server::new(
TransportLabels,
transport_metrics,
svc::stack(connect)
.push(svc::map_target::layer(Endpoint::from))
.into_inner(),
server_stack,
config.h2_settings,
drain.clone(),
skip_ports.clone(),
);
let no_tls: tls::Conditional<identity::Local> =
Conditional::None(tls::ReasonForNoPeerName::Loopback.into());
let accept = tls::AcceptTls::new(no_tls, proxy).with_skip_ports(skip_ports);
serve::spawn(listen, accept, drain);
}
#[derive(Copy, Clone, Debug)]
struct TransportLabels;
impl transport::metrics::TransportLabels<Endpoint> for TransportLabels {
type Labels = transport::labels::Key;
fn transport_labels(&self, endpoint: &Endpoint) -> Self::Labels |
}
impl transport::metrics::TransportLabels<proxy::server::Protocol> for TransportLabels {
type Labels = transport::labels::Key;
fn transport_labels(&self, proto: &proxy::server::Protocol) -> Self::Labels {
transport::labels::Key::accept("outbound", proto.tls.peer_identity.as_ref())
}
}
| {
transport::labels::Key::connect("outbound", endpoint.identity.as_ref())
} | identifier_body |
lib.rs | //! Configures and runs the outbound proxy.
//!
//! The outound proxy is responsible for routing traffic from the local
//! application to external network endpoints.
#![deny(warnings, rust_2018_idioms)]
use linkerd2_app_core::{
classify,
config::Config,
dns, drain,
dst::DstAddr,
errors, http_request_authority_addr, http_request_host_addr,
http_request_l5d_override_dst_addr, http_request_orig_dst_addr, identity,
proxy::{
self,
core::resolve::Resolve,
discover,
http::{
balance, canonicalize, client, fallback, header_from_target, insert,
metrics as http_metrics, normalize_uri, profiles, retry, router, settings,
strip_header,
},
Server,
},
reconnect, serve,
spans::SpanConverter,
svc, trace, trace_context,
transport::{self, connect, tls, OrigDstAddr},
Addr, Conditional, DispatchDeadline, CANONICAL_DST_HEADER, DST_OVERRIDE_HEADER, L5D_CLIENT_ID,
L5D_REMOTE_IP, L5D_REQUIRE_ID, L5D_SERVER_ID,
};
use opencensus_proto::trace::v1 as oc;
use std::collections::HashMap;
use std::time::Duration;
use tokio::sync::mpsc;
use tower_grpc::{self as grpc, generic::client::GrpcService};
use tracing::{debug, info_span};
#[allow(dead_code)] // TODO #2597
mod add_remote_ip_on_rsp;
#[allow(dead_code)] // TODO #2597
mod add_server_id_on_rsp;
mod endpoint;
mod orig_proto_upgrade;
mod require_identity_on_endpoint;
mod resolve;
pub use self::endpoint::Endpoint;
pub use self::resolve::resolve;
const EWMA_DEFAULT_RTT: Duration = Duration::from_millis(30);
const EWMA_DECAY: Duration = Duration::from_secs(10);
pub fn spawn<A, R, P>(
config: &Config,
local_identity: tls::Conditional<identity::Local>,
listen: transport::Listen<A>,
resolve: R,
dns_resolver: dns::Resolver,
profiles_client: linkerd2_app_core::profiles::Client<P>,
tap_layer: linkerd2_app_core::tap::Layer,
handle_time: http_metrics::handle_time::Scope,
endpoint_http_metrics: linkerd2_app_core::HttpEndpointMetricsRegistry,
route_http_metrics: linkerd2_app_core::HttpRouteMetricsRegistry,
retry_http_metrics: linkerd2_app_core::HttpRouteMetricsRegistry,
transport_metrics: linkerd2_app_core::transport::MetricsRegistry,
span_sink: Option<mpsc::Sender<oc::Span>>,
drain: drain::Watch,
) where
A: OrigDstAddr + Send +'static,
R: Resolve<DstAddr, Endpoint = Endpoint> + Clone + Send + Sync +'static,
R::Future: Send,
R::Resolution: Send,
P: GrpcService<grpc::BoxBody> + Clone + Send + Sync +'static,
P::ResponseBody: Send,
<P::ResponseBody as grpc::Body>::Data: Send,
P::Future: Send,
{
let capacity = config.outbound_router_capacity;
let max_idle_age = config.outbound_router_max_idle_age;
let max_in_flight = config.outbound_max_requests_in_flight;
let canonicalize_timeout = config.dns_canonicalize_timeout;
let dispatch_timeout = config.outbound_dispatch_timeout;
let mut trace_labels = HashMap::new();
trace_labels.insert("direction".to_string(), "outbound".to_string());
// Establishes connections to remote peers (for both TCP
// forwarding and HTTP proxying).
let connect = svc::stack(connect::svc(config.outbound_connect_keepalive))
.push(tls::client::layer(local_identity))
.push_timeout(config.outbound_connect_timeout)
.push(transport_metrics.layer_connect(TransportLabels));
let trace_context_layer = trace_context::layer(
span_sink
.clone()
.map(|span_sink| SpanConverter::client(span_sink, trace_labels.clone())),
);
// Instantiates an HTTP client for for a `client::Config`
let client_stack = connect
.clone()
.push(client::layer(config.h2_settings))
.push(reconnect::layer({
let backoff = config.outbound_connect_backoff.clone();
move |_| Ok(backoff.stream())
}))
.push(trace_context_layer)
.push(normalize_uri::layer());
// A per-`outbound::Endpoint` stack that:
//
// 1. Records http metrics with per-endpoint labels.
// 2. Instruments `tap` inspection.
// 3. Changes request/response versions when the endpoint
// supports protocol upgrade (and the request may be upgraded).
// 4. Appends `l5d-server-id` to responses coming back iff meshed
// TLS was used on the connection.
// 5. Routes requests to the correct client (based on the
// request version and headers).
// 6. Strips any `l5d-server-id` that may have been received from
// the server, before we apply our own.
let endpoint_stack = client_stack
.serves::<Endpoint>()
.push(strip_header::response::layer(L5D_REMOTE_IP))
.push(strip_header::response::layer(L5D_SERVER_ID))
.push(strip_header::request::layer(L5D_REQUIRE_ID))
// disabled due to information leagkage
//.push(add_remote_ip_on_rsp::layer())
//.push(add_server_id_on_rsp::layer())
.push(orig_proto_upgrade::layer())
.push(tap_layer.clone())
.push(http_metrics::layer::<_, classify::Response>(
endpoint_http_metrics,
))
.push(require_identity_on_endpoint::layer())
.push(trace::layer(|endpoint: &Endpoint| {
info_span!("endpoint",?endpoint)
}));
// A per-`dst::Route` layer that uses profile data to configure
// a per-route layer.
//
// 1. The `classify` module installs a `classify::Response`
// extension into each request so that all lower metrics
// implementations can use the route-specific configuration.
// 2. A timeout is optionally enabled if the target `dst::Route`
// specifies a timeout. This goes before `retry` to cap
// retries.
// 3. Retries are optionally enabled depending on if the route
// is retryable.
let dst_route_layer = svc::layers()
.push(insert::target::layer())
.push(http_metrics::layer::<_, classify::Response>(
retry_http_metrics.clone(),
))
.push(retry::layer(retry_http_metrics.clone()))
.push(proxy::http::timeout::layer())
.push(http_metrics::layer::<_, classify::Response>(
route_http_metrics,
))
.push(classify::layer())
.push_buffer_pending(max_in_flight, DispatchDeadline::extract);
// Routes requests to their original destination endpoints. Used as
// a fallback when service discovery has no endpoints for a destination.
//
// If the `l5d-require-id` header is present, then that identity is
// used as the server name when connecting to the endpoint.
let orig_dst_router_layer = svc::layers()
.push_buffer_pending(max_in_flight, DispatchDeadline::extract)
.push(router::layer(
router::Config::new(capacity, max_idle_age),
Endpoint::from_request,
));
// Resolves the target via the control plane and balances requests
// over all endpoints returned from the destination service.
const DISCOVER_UPDATE_BUFFER_CAPACITY: usize = 2;
let balancer_layer = svc::layers()
.push_spawn_ready()
.push(discover::Layer::new(
DISCOVER_UPDATE_BUFFER_CAPACITY,
resolve,
))
.push(balance::layer(EWMA_DEFAULT_RTT, EWMA_DECAY));
// If the balancer fails to be created, i.e., because it is unresolvable,
// fall back to using a router that dispatches request to the
// application-selected original destination.
let distributor = endpoint_stack
.push(fallback::layer(balancer_layer, orig_dst_router_layer))
.serves::<DstAddr>()
.push(trace::layer(
|dst: &DstAddr| info_span!("concrete", dst.concrete = %dst.dst_concrete()),
));
// A per-`DstAddr` stack that does the following:
//
// 1. Adds the `CANONICAL_DST_HEADER` from the `DstAddr`.
// 2. Determines the profile of the destination and applies
// per-route policy.
// 3. Creates a load balancer, configured by resolving the
// `DstAddr` with a resolver.
let dst_stack = distributor
.push_buffer_pending(max_in_flight, DispatchDeadline::extract)
.push(profiles::router::layer(profiles_client, dst_route_layer))
.push(header_from_target::layer(CANONICAL_DST_HEADER)); | //
// This is shared across addr-stacks so that multiple addrs that
// canonicalize to the same DstAddr use the same dst-stack service.
let dst_router = dst_stack
.push(trace::layer(
|dst: &DstAddr| info_span!("logical", dst.logical = %dst.dst_logical()),
))
.push_buffer_pending(max_in_flight, DispatchDeadline::extract)
.push(router::layer(
router::Config::new(capacity, max_idle_age),
|req: &http::Request<_>| {
req.extensions()
.get::<Addr>()
.cloned()
.map(|addr| DstAddr::outbound(addr, settings::Settings::from_request(req)))
},
))
.into_inner()
.make();
// Canonicalizes the request-specified `Addr` via DNS, and
// annotates each request with a refined `Addr` so that it may be
// routed by the dst_router.
let addr_stack = svc::stack(svc::Shared::new(dst_router))
.push(canonicalize::layer(dns_resolver, canonicalize_timeout));
// Routes requests to an `Addr`:
//
// 1. If the request had an `l5d-override-dst` header, this value
// is used.
//
// 2. If the request is HTTP/2 and has an :authority, this value
// is used.
//
// 3. If the request is absolute-form HTTP/1, the URI's
// authority is used.
//
// 4. If the request has an HTTP/1 Host header, it is used.
//
// 5. Finally, if the Source had an SO_ORIGINAL_DST, this TCP
// address is used.
let addr_router = addr_stack
.push(strip_header::request::layer(L5D_CLIENT_ID))
.push(strip_header::request::layer(DST_OVERRIDE_HEADER))
.push(insert::target::layer())
.push(trace::layer(|addr: &Addr| info_span!("addr", %addr)))
.push_buffer_pending(max_in_flight, DispatchDeadline::extract)
.push(router::layer(
router::Config::new(capacity, max_idle_age),
|req: &http::Request<_>| {
http_request_l5d_override_dst_addr(req)
.map(|override_addr| {
debug!("using dst-override");
override_addr
})
.or_else(|_| http_request_authority_addr(req))
.or_else(|_| http_request_host_addr(req))
.or_else(|_| http_request_orig_dst_addr(req))
.ok()
},
))
.into_inner()
.make();
// Share a single semaphore across all requests to signal when
// the proxy is overloaded.
let admission_control = svc::stack(addr_router)
.push_concurrency_limit(max_in_flight)
.push_load_shed();
let trace_context_layer = trace_context::layer(
span_sink.map(|span_sink| SpanConverter::server(span_sink, trace_labels)),
);
// Instantiates an HTTP service for each `Source` using the
// shared `addr_router`. The `Source` is stored in the request's
// extensions so that it can be used by the `addr_router`.
let server_stack = svc::stack(svc::Shared::new(admission_control))
.push(insert::layer(move || {
DispatchDeadline::after(dispatch_timeout)
}))
.push(insert::target::layer())
.push(errors::layer())
.push(trace::layer(
|src: &tls::accept::Meta| info_span!("source", target.addr = %src.addrs.target_addr()),
))
.push(trace_context_layer)
.push(handle_time.layer());
let skip_ports = std::sync::Arc::new(config.outbound_ports_disable_protocol_detection.clone());
let proxy = Server::new(
TransportLabels,
transport_metrics,
svc::stack(connect)
.push(svc::map_target::layer(Endpoint::from))
.into_inner(),
server_stack,
config.h2_settings,
drain.clone(),
skip_ports.clone(),
);
let no_tls: tls::Conditional<identity::Local> =
Conditional::None(tls::ReasonForNoPeerName::Loopback.into());
let accept = tls::AcceptTls::new(no_tls, proxy).with_skip_ports(skip_ports);
serve::spawn(listen, accept, drain);
}
#[derive(Copy, Clone, Debug)]
struct TransportLabels;
impl transport::metrics::TransportLabels<Endpoint> for TransportLabels {
type Labels = transport::labels::Key;
fn transport_labels(&self, endpoint: &Endpoint) -> Self::Labels {
transport::labels::Key::connect("outbound", endpoint.identity.as_ref())
}
}
impl transport::metrics::TransportLabels<proxy::server::Protocol> for TransportLabels {
type Labels = transport::labels::Key;
fn transport_labels(&self, proto: &proxy::server::Protocol) -> Self::Labels {
transport::labels::Key::accept("outbound", proto.tls.peer_identity.as_ref())
}
} |
// Routes request using the `DstAddr` extension. | random_line_split |
lib.rs | use reqwest::Client;
use serde::{Deserialize, Serialize};
pub use url::Url;
/// Data types to interract with the folder API. Check [FolderApi](folder::FolderApi) for the
/// available actions. You can also check the [HTTP
/// API](https://git.mdns.eu/nextcloud/passwords/wikis/Developers/Api/Folder-Api)
pub mod folder;
/// Data types and builders to interact with the passwords API. Check
/// [PasswordApi](password::PasswordApi) for the available actions. You can also check the [HTTP
/// API](https://git.mdns.eu/nextcloud/passwords/wikis/Developers/Api/Password-Api)
pub mod password;
/// Actions available for the service API. Check [ServiceApi](service::ServiceApi) for more
/// information. You can also check the [HTTP
/// API](https://git.mdns.eu/nextcloud/passwords/wikis/Developers/Api/Service-Api)
pub mod service;
/// Data types, helpers and builders to interact with the settings API. Check
/// [SettingsApi](settings::SettingsApi) for the available actions. You can also check the [HTTP
/// API](https://git.mdns.eu/nextcloud/passwords/wikis/Developers/Api/Settings-Api)
pub mod settings;
/// Data types, helpers and builders to interact with the share API. Check
/// [ShareApi](share::ShareApi) for the available actions. You can also check the [HTTP
/// API](https://git.mdns.eu/nextcloud/passwords/wikis/Developers/Api/Share-Api) for more
/// information.
pub mod share;
/// Data types, helpers and builders to interact with the tag API. Check
/// [TagApi](tag::TagApi) for the available actions. You can also check the [HTTP
/// API](https://git.mdns.eu/nextcloud/passwords/wikis/Developers/Api/Tag-Api)
pub mod tag;
/// Data types and helpers to access the Token API. Check [TokenApi](token::TokenApi) for the
/// available actions. You can also check the [HTTP
/// API](https://git.mdns.eu/nextcloud/passwords/wikis/Developers/Api/Token-Api)
pub mod token;
// TODO: sort the session required methods from the non-session required
mod utils;
pub use utils::{QueryKind, SearchQuery};
mod private {
pub trait Sealed {}
impl Sealed for super::settings::UserSettings {}
impl Sealed for super::settings::ServerSettings {}
impl Sealed for super::settings::ClientSettings {}
}
#[derive(Debug)]
pub struct Color {
pub red: u8,
pub green: u8,
pub blue: u8,
}
impl std::fmt::Display for Color {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(fmt, "#{:02x}{:02x}{:02x}", self.red, self.green, self.blue)
}
}
impl Serialize for Color {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_str(&format!(
"#{:02x}{:02x}{:02x}",
self.red, self.green, self.blue
))
}
}
impl<'de> Deserialize<'de> for Color {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
struct StrVisitor;
impl<'de> serde::de::Visitor<'de> for StrVisitor {
type Value = Color;
fn expecting(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(fmt, "an hex color of the form #abcdef as a string")
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
if value.len() == 7 {
if!value.starts_with('#') {
Err(E::custom("expected the color to start with `#`"))
} else {
let mut result = [0u8; 3];
hex::decode_to_slice(value.trim_start_matches('#'), &mut result).map_err(
|e| E::custom(format!("Could not parse hex string: {:?}", e)),
)?;
Ok(Color {
red: result[0],
green: result[1],
blue: result[2],
})
}
} else {
Err(E::custom(format!(
"Expected a string of length 7, got length: {}",
value.len()
)))
}
}
}
deserializer.deserialize_str(StrVisitor)
}
}
/// Errors
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error("error in communicating with the API")]
ApiError(#[from] reqwest::Error),
#[error("could not connect to the passwords API")]
ConnectionFailed,
#[error("could not cleanly disconnect from the passwords API")]
DisconnectionFailed,
#[error("last shutdown time is in the future")]
TimeError(#[from] std::time::SystemTimeError),
#[error("setting was not valid in this context")]
InvalidSetting,
#[error("serde error")]
Serde(#[from] serde_json::Error),
#[error("endpoint error: {}",.0.message)]
EndpointError(EndpointError),
#[error("error in the login flow: request returned {0}")]
LoginFlowError(u16),
}
#[derive(Serialize, Deserialize, Debug)]
pub struct | {
status: String,
id: u64,
message: String,
}
#[derive(Serialize, Deserialize)]
#[serde(untagged)]
pub enum EndpointResponse<T> {
Error(EndpointError),
Success(T),
}
/// Represent how to first connect to a nextcloud instance
/// The best way to obtain some is using [Login flow
/// v2](https://docs.nextcloud.com/server/19/developer_manual/client_apis/LoginFlow/index.html#login-flow-v2).
/// You can use [register_login_flow_2](LoginDetails::register_login_flow_2) to do this authentication
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct LoginDetails {
pub server: Url,
#[serde(rename = "loginName")]
pub login_name: String,
#[serde(rename = "appPassword")]
pub app_password: String,
}
impl LoginDetails {
/// Login with the login flow v2 to the server. The `auth_callback` is given the URL where the
/// user will grant the permissions, this function should not block (or the authentication will
/// never finish) waiting for the end of the login_flow.
pub async fn register_login_flow_2(
server: Url,
mut auth_callback: impl FnMut(Url),
) -> Result<Self, Error> {
#[derive(Deserialize)]
struct Poll {
token: String,
endpoint: Url,
}
#[derive(Deserialize)]
struct PollRequest {
poll: Poll,
login: Url,
}
#[derive(Serialize)]
struct Token {
token: String,
}
let client = reqwest::Client::new();
let resp = client
.post(&format!("{}index.php/login/v2", server))
.send()
.await?;
if!resp.status().is_success() {
return Err(Error::LoginFlowError(resp.status().as_u16()));
}
let resp: PollRequest = resp.json().await?;
log::debug!("Got poll request for login_flow_v2");
auth_callback(resp.login);
let token = Token {
token: resp.poll.token,
};
let details: LoginDetails = loop {
let poll = client
.post(resp.poll.endpoint.as_str())
.form(&token)
.send()
.await?;
log::debug!("Polled endpoint");
match poll.status().as_u16() {
404 => {
log::debug!("Not ready, need to retry");
tokio::time::delay_for(std::time::Duration::from_millis(100)).await
}
200 => break poll.json().await?,
code => return Err(Error::LoginFlowError(code)),
}
};
Ok(details)
}
}
/// The state needed to re-connect to a nextcloud instance
#[derive(Serialize, Deserialize, Clone)]
pub struct ResumeState {
server_url: Url,
password_url: String,
keepalive: u64,
session_id: String,
shutdown_time: std::time::SystemTime,
login: String,
password: String,
}
/// The main entrypoint to the nextcloud API
pub struct AuthenticatedApi {
server_url: Url,
client: Client,
passwords_url: String,
session_id: String,
keepalive: u64,
login: String,
password: String,
}
impl AuthenticatedApi {
/// Return the URL of the nextcloud instance
pub fn server(&self) -> &Url {
&self.server_url
}
async fn reqwest<D: serde::Serialize>(
&self,
endpoint: impl AsRef<str>,
method: reqwest::Method,
data: D,
) -> Result<reqwest::Response, reqwest::Error> {
self.client
.request(
method,
&format!("{}/{}", self.passwords_url, endpoint.as_ref()),
)
.json(&data)
.header("X-API-SESSION", &self.session_id)
.basic_auth(&self.login, Some(&self.password))
.send()
.await
}
pub(crate) async fn bytes_request<D: serde::Serialize>(
&self,
endpoint: impl AsRef<str>,
method: reqwest::Method,
data: D,
) -> Result<bytes::Bytes, Error> {
let r = self.reqwest(endpoint, method, data).await?;
r.bytes().await.map_err(Into::into)
}
async fn passwords_request<R: serde::de::DeserializeOwned, D: serde::Serialize>(
&self,
endpoint: impl AsRef<str>,
method: reqwest::Method,
data: D,
) -> Result<R, Error> {
let r = self.reqwest(endpoint, method, data).await?;
let text = r.text().await?;
let resp = serde_json::from_str(&text).map_err(|e| {
log::warn!("Response could not be read: {}", text);
e
})?;
match resp {
EndpointResponse::Success(r) => Ok(r),
EndpointResponse::Error(e) => Err(Error::EndpointError(e)),
}
}
pub(crate) async fn passwords_get<R: serde::de::DeserializeOwned, D: serde::Serialize>(
&self,
endpoint: impl AsRef<str>,
data: D,
) -> Result<R, Error> {
self.passwords_request(endpoint, reqwest::Method::GET, data)
.await
}
pub(crate) async fn passwords_post<R: serde::de::DeserializeOwned, D: serde::Serialize>(
&self,
endpoint: impl AsRef<str>,
data: D,
) -> Result<R, Error> {
self.passwords_request(endpoint, reqwest::Method::POST, data)
.await
}
pub(crate) async fn passwords_delete<R: serde::de::DeserializeOwned, D: serde::Serialize>(
&self,
endpoint: impl AsRef<str>,
data: D,
) -> Result<R, Error> {
self.passwords_request(endpoint, reqwest::Method::DELETE, data)
.await
}
pub(crate) async fn passwords_patch<R: serde::de::DeserializeOwned, D: serde::Serialize>(
&self,
endpoint: impl AsRef<str>,
data: D,
) -> Result<R, Error> {
self.passwords_request(endpoint, reqwest::Method::PATCH, data)
.await
}
/// Access the Password API
#[inline]
pub fn password(&self) -> password::PasswordApi<'_> {
password::PasswordApi { api: self }
}
/// Access the Settings API
#[inline]
pub fn settings(&self) -> settings::SettingsApi<'_> {
settings::SettingsApi { api: self }
}
/// Access the Folder API
#[inline]
pub fn folder(&self) -> folder::FolderApi<'_> {
folder::FolderApi { api: self }
}
/// Access the Share API
#[inline]
pub fn share(&self) -> share::ShareApi<'_> {
share::ShareApi { api: self }
}
#[inline]
pub fn service(&self) -> service::ServiceApi<'_> {
service::ServiceApi { api: self }
}
/// Resume a connection to the API using the state. Also gives the session ID
pub async fn resume_session(resume_state: ResumeState) -> Result<(Self, String), Error> {
if resume_state.shutdown_time.elapsed()?.as_secs() > resume_state.keepalive {
log::debug!("Session was too old, creating new session");
AuthenticatedApi::new_session(LoginDetails {
server: resume_state.server_url,
login_name: resume_state.login,
app_password: resume_state.password,
})
.await
} else {
log::debug!("Calling keepalive");
#[derive(Deserialize)]
struct Keepalive {
success: bool,
}
let client = Client::new();
let api = AuthenticatedApi {
server_url: resume_state.server_url,
client,
passwords_url: resume_state.password_url,
session_id: resume_state.session_id,
keepalive: resume_state.keepalive,
login: resume_state.login,
password: resume_state.password,
};
let s: Keepalive = api.passwords_get("1.0/session/keepalive", ()).await?;
assert!(s.success);
let session_id = api.session_id.clone();
Ok((api, session_id))
}
}
/// Create a new session to the API, returns the session ID
pub async fn new_session(login_details: LoginDetails) -> Result<(Self, String), Error> {
#[derive(Serialize, Deserialize, Debug)]
struct OpenSession {
success: bool,
keys: Vec<String>,
}
let client = Client::new();
let passwords_url = format!("{}index.php/apps/passwords/api/", login_details.server);
let session_request = client
.request(
reqwest::Method::POST,
&format!("{}/1.0/session/open", passwords_url),
)
.basic_auth(&login_details.login_name, Some(&login_details.app_password))
.send()
.await?;
let session_id: String = session_request
.headers()
.get("X-API-SESSION")
.expect("no api session header")
.to_str()
.expect("api session is not ascii")
.into();
let session: OpenSession = session_request.json().await?;
if!session.success {
Err(Error::ConnectionFailed)?
}
let mut api = AuthenticatedApi {
server_url: login_details.server,
passwords_url,
client,
login: login_details.login_name,
password: login_details.app_password,
session_id: session_id.clone(),
keepalive: 0,
};
api.keepalive = api.settings().get().session_lifetime().await?;
log::debug!("Session keepalive is: {}", api.keepalive);
Ok((api, session_id))
}
/// Disconnect from the session
pub async fn disconnect(self) -> Result<(), Error> {
#[derive(Deserialize)]
struct CloseSession {
success: bool,
}
let s: CloseSession = self.passwords_get("1.0/session/close", ()).await.unwrap();
if!s.success {
Err(Error::DisconnectionFailed)
} else {
Ok(())
}
}
/// Get the state to be able to resume this session
pub fn get_state(&self) -> ResumeState {
ResumeState {
server_url: self.server_url.clone(),
password_url: self.passwords_url.clone(),
keepalive: self.keepalive,
session_id: self.session_id.clone(),
login: self.login.clone(),
password: self.password.clone(),
shutdown_time: std::time::SystemTime::now(),
}
}
}
#[cfg(test)]
mod tests {}
| EndpointError | identifier_name |
lib.rs | use reqwest::Client;
use serde::{Deserialize, Serialize};
pub use url::Url;
/// Data types to interract with the folder API. Check [FolderApi](folder::FolderApi) for the
/// available actions. You can also check the [HTTP
/// API](https://git.mdns.eu/nextcloud/passwords/wikis/Developers/Api/Folder-Api)
pub mod folder;
/// Data types and builders to interact with the passwords API. Check
/// [PasswordApi](password::PasswordApi) for the available actions. You can also check the [HTTP
/// API](https://git.mdns.eu/nextcloud/passwords/wikis/Developers/Api/Password-Api)
pub mod password;
/// Actions available for the service API. Check [ServiceApi](service::ServiceApi) for more
/// information. You can also check the [HTTP
/// API](https://git.mdns.eu/nextcloud/passwords/wikis/Developers/Api/Service-Api)
pub mod service;
/// Data types, helpers and builders to interact with the settings API. Check
/// [SettingsApi](settings::SettingsApi) for the available actions. You can also check the [HTTP
/// API](https://git.mdns.eu/nextcloud/passwords/wikis/Developers/Api/Settings-Api)
pub mod settings;
/// Data types, helpers and builders to interact with the share API. Check
/// [ShareApi](share::ShareApi) for the available actions. You can also check the [HTTP
/// API](https://git.mdns.eu/nextcloud/passwords/wikis/Developers/Api/Share-Api) for more
/// information.
pub mod share;
/// Data types, helpers and builders to interact with the tag API. Check
/// [TagApi](tag::TagApi) for the available actions. You can also check the [HTTP
/// API](https://git.mdns.eu/nextcloud/passwords/wikis/Developers/Api/Tag-Api)
pub mod tag;
/// Data types and helpers to access the Token API. Check [TokenApi](token::TokenApi) for the
/// available actions. You can also check the [HTTP
/// API](https://git.mdns.eu/nextcloud/passwords/wikis/Developers/Api/Token-Api)
pub mod token;
// TODO: sort the session required methods from the non-session required
mod utils;
pub use utils::{QueryKind, SearchQuery};
mod private {
pub trait Sealed {}
impl Sealed for super::settings::UserSettings {}
impl Sealed for super::settings::ServerSettings {}
impl Sealed for super::settings::ClientSettings {}
}
#[derive(Debug)]
pub struct Color {
pub red: u8,
pub green: u8,
pub blue: u8,
}
impl std::fmt::Display for Color {
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(fmt, "#{:02x}{:02x}{:02x}", self.red, self.green, self.blue)
}
}
impl Serialize for Color {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_str(&format!(
"#{:02x}{:02x}{:02x}",
self.red, self.green, self.blue
))
}
}
impl<'de> Deserialize<'de> for Color {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
struct StrVisitor;
impl<'de> serde::de::Visitor<'de> for StrVisitor {
type Value = Color;
fn expecting(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(fmt, "an hex color of the form #abcdef as a string")
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
if value.len() == 7 {
if!value.starts_with('#') {
Err(E::custom("expected the color to start with `#`"))
} else {
let mut result = [0u8; 3];
hex::decode_to_slice(value.trim_start_matches('#'), &mut result).map_err(
|e| E::custom(format!("Could not parse hex string: {:?}", e)),
)?;
Ok(Color {
red: result[0],
green: result[1],
blue: result[2],
})
}
} else {
Err(E::custom(format!(
"Expected a string of length 7, got length: {}",
value.len()
)))
}
}
}
deserializer.deserialize_str(StrVisitor)
}
}
/// Errors
#[derive(thiserror::Error, Debug)]
pub enum Error {
#[error("error in communicating with the API")]
ApiError(#[from] reqwest::Error),
#[error("could not connect to the passwords API")]
ConnectionFailed,
#[error("could not cleanly disconnect from the passwords API")]
DisconnectionFailed,
#[error("last shutdown time is in the future")]
TimeError(#[from] std::time::SystemTimeError),
#[error("setting was not valid in this context")]
InvalidSetting,
#[error("serde error")]
Serde(#[from] serde_json::Error),
#[error("endpoint error: {}",.0.message)]
EndpointError(EndpointError),
#[error("error in the login flow: request returned {0}")]
LoginFlowError(u16),
}
#[derive(Serialize, Deserialize, Debug)]
pub struct EndpointError {
status: String,
id: u64,
message: String,
}
#[derive(Serialize, Deserialize)]
#[serde(untagged)]
pub enum EndpointResponse<T> {
Error(EndpointError),
Success(T),
}
/// Represent how to first connect to a nextcloud instance
/// The best way to obtain some is using [Login flow
/// v2](https://docs.nextcloud.com/server/19/developer_manual/client_apis/LoginFlow/index.html#login-flow-v2).
/// You can use [register_login_flow_2](LoginDetails::register_login_flow_2) to do this authentication
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct LoginDetails {
pub server: Url,
#[serde(rename = "loginName")]
pub login_name: String,
#[serde(rename = "appPassword")]
pub app_password: String,
}
impl LoginDetails {
/// Login with the login flow v2 to the server. The `auth_callback` is given the URL where the
/// user will grant the permissions, this function should not block (or the authentication will
/// never finish) waiting for the end of the login_flow.
pub async fn register_login_flow_2(
server: Url,
mut auth_callback: impl FnMut(Url),
) -> Result<Self, Error> {
#[derive(Deserialize)]
struct Poll {
token: String,
endpoint: Url,
}
#[derive(Deserialize)]
struct PollRequest {
poll: Poll,
login: Url,
}
#[derive(Serialize)]
struct Token {
token: String,
}
let client = reqwest::Client::new();
let resp = client
.post(&format!("{}index.php/login/v2", server))
.send()
.await?;
if!resp.status().is_success() {
return Err(Error::LoginFlowError(resp.status().as_u16()));
}
let resp: PollRequest = resp.json().await?;
log::debug!("Got poll request for login_flow_v2");
auth_callback(resp.login);
let token = Token {
token: resp.poll.token,
};
let details: LoginDetails = loop {
let poll = client
.post(resp.poll.endpoint.as_str())
.form(&token)
.send()
.await?;
log::debug!("Polled endpoint");
match poll.status().as_u16() {
404 => {
log::debug!("Not ready, need to retry");
tokio::time::delay_for(std::time::Duration::from_millis(100)).await
}
200 => break poll.json().await?,
code => return Err(Error::LoginFlowError(code)),
}
};
Ok(details)
}
}
/// The state needed to re-connect to a nextcloud instance
#[derive(Serialize, Deserialize, Clone)]
pub struct ResumeState {
server_url: Url,
password_url: String,
keepalive: u64,
session_id: String,
shutdown_time: std::time::SystemTime,
login: String,
password: String,
}
/// The main entrypoint to the nextcloud API
pub struct AuthenticatedApi {
server_url: Url,
client: Client,
passwords_url: String,
session_id: String,
keepalive: u64,
login: String,
password: String,
}
impl AuthenticatedApi {
/// Return the URL of the nextcloud instance
pub fn server(&self) -> &Url {
&self.server_url
}
async fn reqwest<D: serde::Serialize>(
&self,
endpoint: impl AsRef<str>,
method: reqwest::Method,
data: D,
) -> Result<reqwest::Response, reqwest::Error> {
self.client
.request(
method,
&format!("{}/{}", self.passwords_url, endpoint.as_ref()),
)
.json(&data)
.header("X-API-SESSION", &self.session_id)
.basic_auth(&self.login, Some(&self.password))
.send()
.await
}
pub(crate) async fn bytes_request<D: serde::Serialize>(
&self,
endpoint: impl AsRef<str>,
method: reqwest::Method,
data: D,
) -> Result<bytes::Bytes, Error> {
let r = self.reqwest(endpoint, method, data).await?;
r.bytes().await.map_err(Into::into)
}
async fn passwords_request<R: serde::de::DeserializeOwned, D: serde::Serialize>(
&self,
endpoint: impl AsRef<str>,
method: reqwest::Method,
data: D,
) -> Result<R, Error> {
let r = self.reqwest(endpoint, method, data).await?;
let text = r.text().await?;
let resp = serde_json::from_str(&text).map_err(|e| {
log::warn!("Response could not be read: {}", text);
e
})?;
match resp {
EndpointResponse::Success(r) => Ok(r),
EndpointResponse::Error(e) => Err(Error::EndpointError(e)),
}
}
pub(crate) async fn passwords_get<R: serde::de::DeserializeOwned, D: serde::Serialize>(
&self,
endpoint: impl AsRef<str>,
data: D,
) -> Result<R, Error> {
self.passwords_request(endpoint, reqwest::Method::GET, data)
.await
}
pub(crate) async fn passwords_post<R: serde::de::DeserializeOwned, D: serde::Serialize>(
&self,
endpoint: impl AsRef<str>,
data: D,
) -> Result<R, Error> {
self.passwords_request(endpoint, reqwest::Method::POST, data)
.await
}
pub(crate) async fn passwords_delete<R: serde::de::DeserializeOwned, D: serde::Serialize>(
&self,
endpoint: impl AsRef<str>,
data: D,
) -> Result<R, Error> {
self.passwords_request(endpoint, reqwest::Method::DELETE, data)
.await
}
pub(crate) async fn passwords_patch<R: serde::de::DeserializeOwned, D: serde::Serialize>(
&self,
endpoint: impl AsRef<str>,
data: D,
) -> Result<R, Error> {
self.passwords_request(endpoint, reqwest::Method::PATCH, data)
.await
}
/// Access the Password API
#[inline]
pub fn password(&self) -> password::PasswordApi<'_> {
password::PasswordApi { api: self }
}
/// Access the Settings API
#[inline]
pub fn settings(&self) -> settings::SettingsApi<'_> {
settings::SettingsApi { api: self }
}
/// Access the Folder API
#[inline]
pub fn folder(&self) -> folder::FolderApi<'_> {
folder::FolderApi { api: self }
}
/// Access the Share API
#[inline]
pub fn share(&self) -> share::ShareApi<'_> {
share::ShareApi { api: self }
}
#[inline]
pub fn service(&self) -> service::ServiceApi<'_> {
service::ServiceApi { api: self }
}
/// Resume a connection to the API using the state. Also gives the session ID
pub async fn resume_session(resume_state: ResumeState) -> Result<(Self, String), Error> {
if resume_state.shutdown_time.elapsed()?.as_secs() > resume_state.keepalive {
log::debug!("Session was too old, creating new session");
AuthenticatedApi::new_session(LoginDetails {
server: resume_state.server_url,
login_name: resume_state.login,
app_password: resume_state.password,
})
.await
} else {
log::debug!("Calling keepalive");
#[derive(Deserialize)]
struct Keepalive {
success: bool,
}
let client = Client::new();
let api = AuthenticatedApi {
server_url: resume_state.server_url,
client,
passwords_url: resume_state.password_url,
session_id: resume_state.session_id,
keepalive: resume_state.keepalive,
login: resume_state.login,
password: resume_state.password,
};
let s: Keepalive = api.passwords_get("1.0/session/keepalive", ()).await?;
assert!(s.success);
let session_id = api.session_id.clone();
Ok((api, session_id))
}
}
/// Create a new session to the API, returns the session ID
pub async fn new_session(login_details: LoginDetails) -> Result<(Self, String), Error> {
#[derive(Serialize, Deserialize, Debug)]
struct OpenSession {
success: bool,
keys: Vec<String>,
}
let client = Client::new();
let passwords_url = format!("{}index.php/apps/passwords/api/", login_details.server);
let session_request = client
.request(
reqwest::Method::POST,
&format!("{}/1.0/session/open", passwords_url),
)
.basic_auth(&login_details.login_name, Some(&login_details.app_password))
.send()
.await?;
let session_id: String = session_request
.headers()
.get("X-API-SESSION")
.expect("no api session header")
.to_str()
.expect("api session is not ascii")
.into();
let session: OpenSession = session_request.json().await?;
if!session.success {
Err(Error::ConnectionFailed)?
}
let mut api = AuthenticatedApi {
server_url: login_details.server,
passwords_url,
client,
login: login_details.login_name,
password: login_details.app_password,
session_id: session_id.clone(),
keepalive: 0,
};
api.keepalive = api.settings().get().session_lifetime().await?;
log::debug!("Session keepalive is: {}", api.keepalive); | /// Disconnect from the session
pub async fn disconnect(self) -> Result<(), Error> {
#[derive(Deserialize)]
struct CloseSession {
success: bool,
}
let s: CloseSession = self.passwords_get("1.0/session/close", ()).await.unwrap();
if!s.success {
Err(Error::DisconnectionFailed)
} else {
Ok(())
}
}
/// Get the state to be able to resume this session
pub fn get_state(&self) -> ResumeState {
ResumeState {
server_url: self.server_url.clone(),
password_url: self.passwords_url.clone(),
keepalive: self.keepalive,
session_id: self.session_id.clone(),
login: self.login.clone(),
password: self.password.clone(),
shutdown_time: std::time::SystemTime::now(),
}
}
}
#[cfg(test)]
mod tests {} |
Ok((api, session_id))
}
| random_line_split |
main.rs | use shared::{
grid::{self as sg, Grid, GridTile, Coordinate},
input::read_stdin_lines
};
use lazy_static::*;
use regex::Regex;
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
enum Tile {
Clay, Sand, Spring, Water, WaterAtRest
}
struct Map {
data: Vec<Vec<Tile>>,
xstart: usize
}
impl Map {
fn new(mut data: Vec<Vec<Tile>>, xstart: usize) -> Self |
fn flow_down(&mut self, y: usize, x: usize) -> (usize, usize) {
for yf in y+1.. self.data.len() {
if self.data[yf][x] == Tile::Sand {
self.data[yf][x] = Tile::Water;
} else if self.data[yf][x]!= Tile::Water {
return (yf - 1, x);
}
}
(self.data.len(), x)
}
fn check_enclosed(&self, y: usize, x: usize) -> (Option<usize>, Option<usize>) {
// Check left
let (mut enclosed_left, mut enclosed_right) = (None, None);
for xf in (0..x).rev() {
if self.data[y][xf] == Tile::Clay
|| self.data[y + 1][xf] == Tile::Sand
{
enclosed_left = Some(xf + 1);
break;
}
}
for xf in x..self.data[y].len() {
if self.data[y][xf] == Tile::Clay
|| self.data[y + 1][xf] == Tile::Sand
{
enclosed_right = Some(xf);
break;
}
}
(enclosed_left, enclosed_right)
}
fn update(&mut self) -> bool {
// Find next flowable water tile
let mut last = None;
/* Lots of trial and error'd code below, lots of redundancies probably */
'outer: for y in (0..self.data.len() - 1).rev() {
for x in 0..self.data[y].len() {
match self.data[y][x] {
Tile::Spring | Tile::Water => {
let below = self.data[y + 1][x];
// Do not try to flow water that's already flowing
if x > 0 && x < self.data[y].len() - 1
&& self.data[y][x - 1] == Tile::Water
&& self.data[y][x + 1] == Tile::Water {
continue;
}
// Do not try to flow flowing water that's stopped at a left wall
if x > 0 && self.data[y][x - 1] == Tile::Clay
&& self.data[y][x + 1] == Tile::Water {
continue;
}
// Do not try to flow flowing water that's stopped at a right wall
if x < self.data[y].len() - 1 && self.data[y][x + 1] == Tile::Clay
&& x > 0 && self.data[y][x - 1] == Tile::Water {
continue;
}
// Try to flow water that is either hanging over sand or over resting water
if below == Tile::Sand || below == Tile::WaterAtRest {
last = Some(self.flow_down(y, x));
if last.unwrap().0 < self.data.len() {
break 'outer;
}
}
}
_ => ()
}
}
}
// No flowable water found, we are done here
if let None = last {
return false;
}
let last = last.unwrap();
if last.0 == self.data.len() {
return false; // Dead flow
}
let (mut ec_left, mut ec_right) = self.check_enclosed(last.0, last.1);
// Normalize enclosed spaces and resolve drops in the flow_right and flow_left helpers
if let Some(l) = ec_left {
if self.data[last.0 + 1][l - 1] == Tile::Sand {
ec_left = None;
}
}
if let Some(r) = ec_right {
if self.data[last.0 + 1][r] == Tile::Sand {
ec_right = None;
}
}
// Keep flowing right until stopped or dropping
let flow_right = |data: &mut Vec<Vec<Tile>>, tile| {
for x in last.1.. {
data[last.0][x] = tile;
if data[last.0 + 1][x] == Tile::Sand || data[last.0][x + 1] == Tile::Water
|| x == data[last.0].len() - 1 || data[last.0][x + 1] == Tile::Clay {
break;
}
}
};
// Keep flowing left until stopped or dropping
let flow_left = |data: &mut Vec<Vec<Tile>>, tile| {
for x in (0..last.1 + 1).rev() {
data[last.0][x] = tile;
if data[last.0 + 1][x] == Tile::Sand
|| x == 0 || data[last.0][x - 1] == Tile::Clay || data[last.0][x - 1] == Tile::Water {
break;
}
}
};
// Try to pool or keep flowing sideways
match (ec_left, ec_right) {
(Some(l), Some(r)) => {
// Enclosed on both sides, pool up
for x in l..r {
self.data[last.0][x] = Tile::WaterAtRest;
}
},
_ => {
// Flow both directions until drop or stop
flow_left(&mut self.data, Tile::Water);
flow_right(&mut self.data, Tile::Water);
}
}
// Extend spouts left and right that can be dropped in the next iteration if not fully enclosed
if let Some(l) = ec_left {
if self.data[last.0 + 1][l - 1] == Tile::Sand {
self.data[last.0][l - 1] = Tile::Water;
}
}
if let Some(r) = ec_right {
if self.data[last.0 + 1][r] == Tile::Sand {
self.data[last.0][r] = Tile::Water;
}
}
true
}
fn count_water(&self, ystart: usize, yend: usize) -> (usize, usize) {
let (mut flowing, mut at_rest) = (0, 0);
for y in ystart..=yend {
for x in 0..self.data[0].len() {
match self.data[y][x] {
Tile::Water => flowing += 1,
Tile::WaterAtRest => at_rest += 1,
_ => ()
}
}
}
(flowing, at_rest)
}
}
impl sg::Grid for Map {
type Coord = sg::Coord;
type Tile = Tile;
fn bounds(&self) -> (Self::Coord, Self::Coord) {
(sg::Coord::new(0, 0),
sg::Coord::new(self.data.len(), self.data[0].len()))
}
fn tile_at(&self, c: &Self::Coord) -> &Self::Tile {
&self.data[c.y()][c.x()]
}
}
impl sg::GridTile for Tile {
fn to_char(&self) -> char {
match self {
Tile::Clay => '#',
Tile::Sand => '.',
Tile::Spring => '+',
Tile::Water => '|',
Tile::WaterAtRest => '~'
}
}
fn color(&self) -> sg::TileColor {
match self {
Tile::Clay => sg::TileColor::Foreground((sg::Color::Yellow, sg::Attribute::Bold)),
Tile::Sand => sg::TileColor::Foreground((sg::Color::Yellow, sg::Attribute::None)),
Tile::Spring => sg::TileColor::Foreground((sg::Color::Blue, sg::Attribute::Bold)),
Tile::Water | Tile::WaterAtRest => sg::TileColor::Foreground((sg::Color::Blue, sg::Attribute::None))
}
}
}
use std::ops::RangeInclusive;
#[derive(Debug)]
enum ScanEntry {
RangeY(RangeInclusive<usize>, usize),
RangeX(usize, RangeInclusive<usize>)
}
impl ScanEntry {
fn parse(e: &String) -> Option<ScanEntry> {
lazy_static! {
static ref PAT: Regex = Regex::new(r"(x|y)=(\d+)(?:..(\d+))?").unwrap();
}
let mut captures = PAT.captures_iter(e);
let first_coord = captures.next()?;
let second_coord = captures.next()?;
if first_coord.get(1)?.as_str() == "x" {
let x: usize = first_coord.get(2)?.as_str().parse().ok()?;
let ys = second_coord.get(2)?.as_str().parse().ok()?;
let ye = second_coord.get(3)?.as_str().parse().ok()?;
Some(ScanEntry::RangeY(ys..=ye, x + 1))
} else if first_coord.get(1)?.as_str() == "y" {
let y = first_coord.get(2)?.as_str().parse().ok()?;
let xs: usize = second_coord.get(2)?.as_str().parse().ok()?;
let xe: usize = second_coord.get(3)?.as_str().parse().ok()?;
Some(ScanEntry::RangeX(y, (xs + 1)..=(xe + 1 )))
} else {
None
}
}
}
use std::collections::HashSet;
fn main() {
let input = read_stdin_lines().expect("could not lock stdin");
let mut results = input.iter().filter_map(ScanEntry::parse).collect::<Vec<_>>();
let (cmin, cmax) = sg::Coord::numeric_limits();
let (mut min, mut max) = (sg::Coord::new(cmax, cmax), sg::Coord::new(cmin, cmin));
let clay = results.iter_mut().fold(HashSet::new(), |mut hs, res| {
let mut update = |c: sg::Coord| {
hs.insert(c);
if c.0 > max.0 { max.0 = c.0; }
if c.1 > max.1 { max.1 = c.1; }
if c.0 < min.0 { min.0 = c.0; }
if c.1 < min.1 { min.1 = c.1; }
};
match res {
ScanEntry::RangeX(y, xr) => {
for x in xr {
update(sg::Coord::new(*y, x));
}
},
ScanEntry::RangeY(yr, x) => {
for y in yr {
update(sg::Coord::new(y, *x));
}
}
}
hs
});
let mut grid = Vec::with_capacity(max.y());
for y in 0..=max.y() {
let mut r = Vec::with_capacity(max.x() - min.x());
for x in min.x() - 1..= max.x() + 1 {
if clay.contains(&sg::Coord::new(y, x)) {
r.push(Tile::Clay);
} else {
r.push(Tile::Sand);
}
}
grid.push(r);
}
let mut grid = Map::new(grid, min.x());
for y in 0.. {
if!grid.update() {
break;
}
}
grid.draw();
let (f, r) = grid.count_water(min.y(), max.y());
println!("Part 1: Total water tiles: {}", f + r);
println!("Part 2: Remaining tiles: {}", r);
}
| {
data[0][500 - xstart + 2] = Tile::Spring;
Map {
data,
xstart
}
} | identifier_body |
main.rs | use shared::{
grid::{self as sg, Grid, GridTile, Coordinate},
input::read_stdin_lines
};
use lazy_static::*;
use regex::Regex;
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
enum Tile {
Clay, Sand, Spring, Water, WaterAtRest
}
struct Map {
data: Vec<Vec<Tile>>,
xstart: usize
}
impl Map {
fn new(mut data: Vec<Vec<Tile>>, xstart: usize) -> Self {
data[0][500 - xstart + 2] = Tile::Spring;
Map {
data,
xstart
}
}
fn flow_down(&mut self, y: usize, x: usize) -> (usize, usize) {
for yf in y+1.. self.data.len() {
if self.data[yf][x] == Tile::Sand {
self.data[yf][x] = Tile::Water;
} else if self.data[yf][x]!= Tile::Water {
return (yf - 1, x);
}
}
(self.data.len(), x)
}
fn check_enclosed(&self, y: usize, x: usize) -> (Option<usize>, Option<usize>) {
// Check left
let (mut enclosed_left, mut enclosed_right) = (None, None);
for xf in (0..x).rev() {
if self.data[y][xf] == Tile::Clay
|| self.data[y + 1][xf] == Tile::Sand
{
enclosed_left = Some(xf + 1);
break;
}
}
for xf in x..self.data[y].len() {
if self.data[y][xf] == Tile::Clay
|| self.data[y + 1][xf] == Tile::Sand
{
enclosed_right = Some(xf);
break;
}
}
(enclosed_left, enclosed_right)
}
fn update(&mut self) -> bool {
// Find next flowable water tile
let mut last = None;
/* Lots of trial and error'd code below, lots of redundancies probably */
'outer: for y in (0..self.data.len() - 1).rev() {
for x in 0..self.data[y].len() {
match self.data[y][x] {
Tile::Spring | Tile::Water => {
let below = self.data[y + 1][x];
// Do not try to flow water that's already flowing
if x > 0 && x < self.data[y].len() - 1
&& self.data[y][x - 1] == Tile::Water
&& self.data[y][x + 1] == Tile::Water {
continue;
}
// Do not try to flow flowing water that's stopped at a left wall
if x > 0 && self.data[y][x - 1] == Tile::Clay
&& self.data[y][x + 1] == Tile::Water {
continue;
}
// Do not try to flow flowing water that's stopped at a right wall
if x < self.data[y].len() - 1 && self.data[y][x + 1] == Tile::Clay
&& x > 0 && self.data[y][x - 1] == Tile::Water {
continue;
}
// Try to flow water that is either hanging over sand or over resting water
if below == Tile::Sand || below == Tile::WaterAtRest {
last = Some(self.flow_down(y, x));
if last.unwrap().0 < self.data.len() {
break 'outer;
}
}
}
_ => ()
}
}
}
// No flowable water found, we are done here
if let None = last {
return false;
}
let last = last.unwrap();
if last.0 == self.data.len() {
return false; // Dead flow
}
let (mut ec_left, mut ec_right) = self.check_enclosed(last.0, last.1);
// Normalize enclosed spaces and resolve drops in the flow_right and flow_left helpers
if let Some(l) = ec_left {
if self.data[last.0 + 1][l - 1] == Tile::Sand {
ec_left = None;
}
}
if let Some(r) = ec_right {
if self.data[last.0 + 1][r] == Tile::Sand {
ec_right = None;
}
}
// Keep flowing right until stopped or dropping
let flow_right = |data: &mut Vec<Vec<Tile>>, tile| {
for x in last.1.. {
data[last.0][x] = tile;
if data[last.0 + 1][x] == Tile::Sand || data[last.0][x + 1] == Tile::Water
|| x == data[last.0].len() - 1 || data[last.0][x + 1] == Tile::Clay {
break;
}
}
};
// Keep flowing left until stopped or dropping
let flow_left = |data: &mut Vec<Vec<Tile>>, tile| {
for x in (0..last.1 + 1).rev() {
data[last.0][x] = tile;
if data[last.0 + 1][x] == Tile::Sand
|| x == 0 || data[last.0][x - 1] == Tile::Clay || data[last.0][x - 1] == Tile::Water {
break;
}
}
};
// Try to pool or keep flowing sideways
match (ec_left, ec_right) {
(Some(l), Some(r)) => {
// Enclosed on both sides, pool up
for x in l..r {
self.data[last.0][x] = Tile::WaterAtRest;
}
},
_ => {
// Flow both directions until drop or stop
flow_left(&mut self.data, Tile::Water);
flow_right(&mut self.data, Tile::Water);
}
}
// Extend spouts left and right that can be dropped in the next iteration if not fully enclosed
if let Some(l) = ec_left {
if self.data[last.0 + 1][l - 1] == Tile::Sand {
self.data[last.0][l - 1] = Tile::Water;
}
}
if let Some(r) = ec_right {
if self.data[last.0 + 1][r] == Tile::Sand {
self.data[last.0][r] = Tile::Water;
}
}
true
}
fn count_water(&self, ystart: usize, yend: usize) -> (usize, usize) {
let (mut flowing, mut at_rest) = (0, 0);
for y in ystart..=yend {
for x in 0..self.data[0].len() {
match self.data[y][x] {
Tile::Water => flowing += 1,
Tile::WaterAtRest => at_rest += 1,
_ => ()
}
}
}
(flowing, at_rest)
}
}
impl sg::Grid for Map {
type Coord = sg::Coord;
type Tile = Tile;
fn bounds(&self) -> (Self::Coord, Self::Coord) {
(sg::Coord::new(0, 0),
sg::Coord::new(self.data.len(), self.data[0].len()))
}
fn | (&self, c: &Self::Coord) -> &Self::Tile {
&self.data[c.y()][c.x()]
}
}
impl sg::GridTile for Tile {
fn to_char(&self) -> char {
match self {
Tile::Clay => '#',
Tile::Sand => '.',
Tile::Spring => '+',
Tile::Water => '|',
Tile::WaterAtRest => '~'
}
}
fn color(&self) -> sg::TileColor {
match self {
Tile::Clay => sg::TileColor::Foreground((sg::Color::Yellow, sg::Attribute::Bold)),
Tile::Sand => sg::TileColor::Foreground((sg::Color::Yellow, sg::Attribute::None)),
Tile::Spring => sg::TileColor::Foreground((sg::Color::Blue, sg::Attribute::Bold)),
Tile::Water | Tile::WaterAtRest => sg::TileColor::Foreground((sg::Color::Blue, sg::Attribute::None))
}
}
}
use std::ops::RangeInclusive;
#[derive(Debug)]
enum ScanEntry {
RangeY(RangeInclusive<usize>, usize),
RangeX(usize, RangeInclusive<usize>)
}
impl ScanEntry {
fn parse(e: &String) -> Option<ScanEntry> {
lazy_static! {
static ref PAT: Regex = Regex::new(r"(x|y)=(\d+)(?:..(\d+))?").unwrap();
}
let mut captures = PAT.captures_iter(e);
let first_coord = captures.next()?;
let second_coord = captures.next()?;
if first_coord.get(1)?.as_str() == "x" {
let x: usize = first_coord.get(2)?.as_str().parse().ok()?;
let ys = second_coord.get(2)?.as_str().parse().ok()?;
let ye = second_coord.get(3)?.as_str().parse().ok()?;
Some(ScanEntry::RangeY(ys..=ye, x + 1))
} else if first_coord.get(1)?.as_str() == "y" {
let y = first_coord.get(2)?.as_str().parse().ok()?;
let xs: usize = second_coord.get(2)?.as_str().parse().ok()?;
let xe: usize = second_coord.get(3)?.as_str().parse().ok()?;
Some(ScanEntry::RangeX(y, (xs + 1)..=(xe + 1 )))
} else {
None
}
}
}
use std::collections::HashSet;
fn main() {
let input = read_stdin_lines().expect("could not lock stdin");
let mut results = input.iter().filter_map(ScanEntry::parse).collect::<Vec<_>>();
let (cmin, cmax) = sg::Coord::numeric_limits();
let (mut min, mut max) = (sg::Coord::new(cmax, cmax), sg::Coord::new(cmin, cmin));
let clay = results.iter_mut().fold(HashSet::new(), |mut hs, res| {
let mut update = |c: sg::Coord| {
hs.insert(c);
if c.0 > max.0 { max.0 = c.0; }
if c.1 > max.1 { max.1 = c.1; }
if c.0 < min.0 { min.0 = c.0; }
if c.1 < min.1 { min.1 = c.1; }
};
match res {
ScanEntry::RangeX(y, xr) => {
for x in xr {
update(sg::Coord::new(*y, x));
}
},
ScanEntry::RangeY(yr, x) => {
for y in yr {
update(sg::Coord::new(y, *x));
}
}
}
hs
});
let mut grid = Vec::with_capacity(max.y());
for y in 0..=max.y() {
let mut r = Vec::with_capacity(max.x() - min.x());
for x in min.x() - 1..= max.x() + 1 {
if clay.contains(&sg::Coord::new(y, x)) {
r.push(Tile::Clay);
} else {
r.push(Tile::Sand);
}
}
grid.push(r);
}
let mut grid = Map::new(grid, min.x());
for y in 0.. {
if!grid.update() {
break;
}
}
grid.draw();
let (f, r) = grid.count_water(min.y(), max.y());
println!("Part 1: Total water tiles: {}", f + r);
println!("Part 2: Remaining tiles: {}", r);
}
| tile_at | identifier_name |
main.rs | use shared::{
grid::{self as sg, Grid, GridTile, Coordinate},
input::read_stdin_lines
};
use lazy_static::*;
use regex::Regex;
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd)]
enum Tile {
Clay, Sand, Spring, Water, WaterAtRest
}
struct Map {
data: Vec<Vec<Tile>>,
xstart: usize
}
impl Map {
fn new(mut data: Vec<Vec<Tile>>, xstart: usize) -> Self {
data[0][500 - xstart + 2] = Tile::Spring;
Map {
data,
xstart
}
}
fn flow_down(&mut self, y: usize, x: usize) -> (usize, usize) {
for yf in y+1.. self.data.len() {
if self.data[yf][x] == Tile::Sand {
self.data[yf][x] = Tile::Water;
} else if self.data[yf][x]!= Tile::Water {
return (yf - 1, x);
}
}
(self.data.len(), x)
}
fn check_enclosed(&self, y: usize, x: usize) -> (Option<usize>, Option<usize>) {
// Check left | let (mut enclosed_left, mut enclosed_right) = (None, None);
for xf in (0..x).rev() {
if self.data[y][xf] == Tile::Clay
|| self.data[y + 1][xf] == Tile::Sand
{
enclosed_left = Some(xf + 1);
break;
}
}
for xf in x..self.data[y].len() {
if self.data[y][xf] == Tile::Clay
|| self.data[y + 1][xf] == Tile::Sand
{
enclosed_right = Some(xf);
break;
}
}
(enclosed_left, enclosed_right)
}
fn update(&mut self) -> bool {
// Find next flowable water tile
let mut last = None;
/* Lots of trial and error'd code below, lots of redundancies probably */
'outer: for y in (0..self.data.len() - 1).rev() {
for x in 0..self.data[y].len() {
match self.data[y][x] {
Tile::Spring | Tile::Water => {
let below = self.data[y + 1][x];
// Do not try to flow water that's already flowing
if x > 0 && x < self.data[y].len() - 1
&& self.data[y][x - 1] == Tile::Water
&& self.data[y][x + 1] == Tile::Water {
continue;
}
// Do not try to flow flowing water that's stopped at a left wall
if x > 0 && self.data[y][x - 1] == Tile::Clay
&& self.data[y][x + 1] == Tile::Water {
continue;
}
// Do not try to flow flowing water that's stopped at a right wall
if x < self.data[y].len() - 1 && self.data[y][x + 1] == Tile::Clay
&& x > 0 && self.data[y][x - 1] == Tile::Water {
continue;
}
// Try to flow water that is either hanging over sand or over resting water
if below == Tile::Sand || below == Tile::WaterAtRest {
last = Some(self.flow_down(y, x));
if last.unwrap().0 < self.data.len() {
break 'outer;
}
}
}
_ => ()
}
}
}
// No flowable water found, we are done here
if let None = last {
return false;
}
let last = last.unwrap();
if last.0 == self.data.len() {
return false; // Dead flow
}
let (mut ec_left, mut ec_right) = self.check_enclosed(last.0, last.1);
// Normalize enclosed spaces and resolve drops in the flow_right and flow_left helpers
if let Some(l) = ec_left {
if self.data[last.0 + 1][l - 1] == Tile::Sand {
ec_left = None;
}
}
if let Some(r) = ec_right {
if self.data[last.0 + 1][r] == Tile::Sand {
ec_right = None;
}
}
// Keep flowing right until stopped or dropping
let flow_right = |data: &mut Vec<Vec<Tile>>, tile| {
for x in last.1.. {
data[last.0][x] = tile;
if data[last.0 + 1][x] == Tile::Sand || data[last.0][x + 1] == Tile::Water
|| x == data[last.0].len() - 1 || data[last.0][x + 1] == Tile::Clay {
break;
}
}
};
// Keep flowing left until stopped or dropping
let flow_left = |data: &mut Vec<Vec<Tile>>, tile| {
for x in (0..last.1 + 1).rev() {
data[last.0][x] = tile;
if data[last.0 + 1][x] == Tile::Sand
|| x == 0 || data[last.0][x - 1] == Tile::Clay || data[last.0][x - 1] == Tile::Water {
break;
}
}
};
// Try to pool or keep flowing sideways
match (ec_left, ec_right) {
(Some(l), Some(r)) => {
// Enclosed on both sides, pool up
for x in l..r {
self.data[last.0][x] = Tile::WaterAtRest;
}
},
_ => {
// Flow both directions until drop or stop
flow_left(&mut self.data, Tile::Water);
flow_right(&mut self.data, Tile::Water);
}
}
// Extend spouts left and right that can be dropped in the next iteration if not fully enclosed
if let Some(l) = ec_left {
if self.data[last.0 + 1][l - 1] == Tile::Sand {
self.data[last.0][l - 1] = Tile::Water;
}
}
if let Some(r) = ec_right {
if self.data[last.0 + 1][r] == Tile::Sand {
self.data[last.0][r] = Tile::Water;
}
}
true
}
fn count_water(&self, ystart: usize, yend: usize) -> (usize, usize) {
let (mut flowing, mut at_rest) = (0, 0);
for y in ystart..=yend {
for x in 0..self.data[0].len() {
match self.data[y][x] {
Tile::Water => flowing += 1,
Tile::WaterAtRest => at_rest += 1,
_ => ()
}
}
}
(flowing, at_rest)
}
}
impl sg::Grid for Map {
type Coord = sg::Coord;
type Tile = Tile;
fn bounds(&self) -> (Self::Coord, Self::Coord) {
(sg::Coord::new(0, 0),
sg::Coord::new(self.data.len(), self.data[0].len()))
}
fn tile_at(&self, c: &Self::Coord) -> &Self::Tile {
&self.data[c.y()][c.x()]
}
}
impl sg::GridTile for Tile {
fn to_char(&self) -> char {
match self {
Tile::Clay => '#',
Tile::Sand => '.',
Tile::Spring => '+',
Tile::Water => '|',
Tile::WaterAtRest => '~'
}
}
fn color(&self) -> sg::TileColor {
match self {
Tile::Clay => sg::TileColor::Foreground((sg::Color::Yellow, sg::Attribute::Bold)),
Tile::Sand => sg::TileColor::Foreground((sg::Color::Yellow, sg::Attribute::None)),
Tile::Spring => sg::TileColor::Foreground((sg::Color::Blue, sg::Attribute::Bold)),
Tile::Water | Tile::WaterAtRest => sg::TileColor::Foreground((sg::Color::Blue, sg::Attribute::None))
}
}
}
use std::ops::RangeInclusive;
#[derive(Debug)]
enum ScanEntry {
RangeY(RangeInclusive<usize>, usize),
RangeX(usize, RangeInclusive<usize>)
}
impl ScanEntry {
fn parse(e: &String) -> Option<ScanEntry> {
lazy_static! {
static ref PAT: Regex = Regex::new(r"(x|y)=(\d+)(?:..(\d+))?").unwrap();
}
let mut captures = PAT.captures_iter(e);
let first_coord = captures.next()?;
let second_coord = captures.next()?;
if first_coord.get(1)?.as_str() == "x" {
let x: usize = first_coord.get(2)?.as_str().parse().ok()?;
let ys = second_coord.get(2)?.as_str().parse().ok()?;
let ye = second_coord.get(3)?.as_str().parse().ok()?;
Some(ScanEntry::RangeY(ys..=ye, x + 1))
} else if first_coord.get(1)?.as_str() == "y" {
let y = first_coord.get(2)?.as_str().parse().ok()?;
let xs: usize = second_coord.get(2)?.as_str().parse().ok()?;
let xe: usize = second_coord.get(3)?.as_str().parse().ok()?;
Some(ScanEntry::RangeX(y, (xs + 1)..=(xe + 1 )))
} else {
None
}
}
}
use std::collections::HashSet;
fn main() {
let input = read_stdin_lines().expect("could not lock stdin");
let mut results = input.iter().filter_map(ScanEntry::parse).collect::<Vec<_>>();
let (cmin, cmax) = sg::Coord::numeric_limits();
let (mut min, mut max) = (sg::Coord::new(cmax, cmax), sg::Coord::new(cmin, cmin));
let clay = results.iter_mut().fold(HashSet::new(), |mut hs, res| {
let mut update = |c: sg::Coord| {
hs.insert(c);
if c.0 > max.0 { max.0 = c.0; }
if c.1 > max.1 { max.1 = c.1; }
if c.0 < min.0 { min.0 = c.0; }
if c.1 < min.1 { min.1 = c.1; }
};
match res {
ScanEntry::RangeX(y, xr) => {
for x in xr {
update(sg::Coord::new(*y, x));
}
},
ScanEntry::RangeY(yr, x) => {
for y in yr {
update(sg::Coord::new(y, *x));
}
}
}
hs
});
let mut grid = Vec::with_capacity(max.y());
for y in 0..=max.y() {
let mut r = Vec::with_capacity(max.x() - min.x());
for x in min.x() - 1..= max.x() + 1 {
if clay.contains(&sg::Coord::new(y, x)) {
r.push(Tile::Clay);
} else {
r.push(Tile::Sand);
}
}
grid.push(r);
}
let mut grid = Map::new(grid, min.x());
for y in 0.. {
if!grid.update() {
break;
}
}
grid.draw();
let (f, r) = grid.count_water(min.y(), max.y());
println!("Part 1: Total water tiles: {}", f + r);
println!("Part 2: Remaining tiles: {}", r);
} | random_line_split |
|
lib.rs | Error::AccountBorrowFailed,
ProgramError::MaxSeedLengthExceeded => InstructionError::MaxSeedLengthExceeded,
ProgramError::InvalidSeeds => InstructionError::InvalidSeeds,
}
}
thread_local! {
static INVOKE_CONTEXT:RefCell<Rc<MockInvokeContext>> = RefCell::new(Rc::new(MockInvokeContext::default()));
}
pub fn builtin_process_instruction(
process_instruction: solana_program::entrypoint::ProcessInstruction,
program_id: &Pubkey,
keyed_accounts: &[KeyedAccount],
input: &[u8],
invoke_context: &mut dyn InvokeContext,
) -> Result<(), InstructionError> {
let mut mock_invoke_context = MockInvokeContext::default();
mock_invoke_context.programs = invoke_context.get_programs().to_vec();
mock_invoke_context.key = *program_id;
// TODO: Populate MockInvokeContext more, or rework to avoid MockInvokeContext entirely.
// The context being passed into the program is incomplete...
let local_invoke_context = RefCell::new(Rc::new(mock_invoke_context));
swap_invoke_context(&local_invoke_context);
// Copy all the accounts into a HashMap to ensure there are no duplicates
let mut accounts: HashMap<Pubkey, Account> = keyed_accounts
.iter()
.map(|ka| (*ka.unsigned_key(), ka.account.borrow().clone()))
.collect();
// Create shared references to each account's lamports/data/owner
let account_refs: HashMap<_, _> = accounts
.iter_mut()
.map(|(key, account)| {
(
*key,
(
Rc::new(RefCell::new(&mut account.lamports)),
Rc::new(RefCell::new(&mut account.data[..])),
&account.owner,
),
)
})
.collect();
// Create AccountInfos
let account_infos: Vec<AccountInfo> = keyed_accounts
.iter()
.map(|keyed_account| {
let key = keyed_account.unsigned_key();
let (lamports, data, owner) = &account_refs[key];
AccountInfo {
key,
is_signer: keyed_account.signer_key().is_some(),
is_writable: keyed_account.is_writable(),
lamports: lamports.clone(),
data: data.clone(),
owner,
executable: keyed_account.executable().unwrap(),
rent_epoch: keyed_account.rent_epoch().unwrap(),
}
})
.collect();
// Execute the BPF entrypoint
let result =
process_instruction(program_id, &account_infos, input).map_err(to_instruction_error);
if result.is_ok() {
// Commit changes to the KeyedAccounts
for keyed_account in keyed_accounts {
let mut account = keyed_account.account.borrow_mut();
let key = keyed_account.unsigned_key();
let (lamports, data, _owner) = &account_refs[key];
account.lamports = **lamports.borrow();
account.data = data.borrow().to_vec();
}
}
swap_invoke_context(&local_invoke_context);
// Propagate logs back to caller's invoke context
// (TODO: This goes away if MockInvokeContext usage can be removed)
let logger = invoke_context.get_logger();
let logger = logger.borrow_mut();
for message in local_invoke_context.borrow().logger.log.borrow_mut().iter() {
if logger.log_enabled() {
logger.log(message);
}
}
result
}
/// Converts a `solana-program`-style entrypoint into the runtime's entrypoint style, for
/// use with `ProgramTest::add_program`
#[macro_export]
macro_rules! processor {
($process_instruction:expr) => {
Some(
|program_id: &Pubkey,
keyed_accounts: &[solana_sdk::keyed_account::KeyedAccount],
input: &[u8],
invoke_context: &mut dyn solana_sdk::process_instruction::InvokeContext| {
$crate::builtin_process_instruction(
$process_instruction,
program_id,
keyed_accounts,
input,
invoke_context,
)
},
)
};
}
pub fn | (other_invoke_context: &RefCell<Rc<MockInvokeContext>>) {
INVOKE_CONTEXT.with(|invoke_context| {
invoke_context.swap(&other_invoke_context);
});
}
struct SyscallStubs {}
impl program_stubs::SyscallStubs for SyscallStubs {
fn sol_log(&self, message: &str) {
INVOKE_CONTEXT.with(|invoke_context| {
let invoke_context = invoke_context.borrow_mut();
let logger = invoke_context.get_logger();
let logger = logger.borrow_mut();
if logger.log_enabled() {
logger.log(&format!("Program log: {}", message));
}
});
}
fn sol_invoke_signed(
&self,
instruction: &Instruction,
account_infos: &[AccountInfo],
signers_seeds: &[&[&[u8]]],
) -> ProgramResult {
//
// TODO: Merge the business logic between here and the BPF invoke path in
// programs/bpf_loader/src/syscalls.rs
//
info!("SyscallStubs::sol_invoke_signed()");
let mut caller = Pubkey::default();
let mut mock_invoke_context = MockInvokeContext::default();
INVOKE_CONTEXT.with(|invoke_context| {
let invoke_context = invoke_context.borrow_mut();
caller = *invoke_context.get_caller().expect("get_caller");
invoke_context.record_instruction(&instruction);
mock_invoke_context.programs = invoke_context.get_programs().to_vec();
// TODO: Populate MockInvokeContext more, or rework to avoid MockInvokeContext entirely.
// The context being passed into the program is incomplete...
});
if instruction.accounts.len() + 1!= account_infos.len() {
panic!(
"Instruction accounts mismatch. Instruction contains {} accounts, with {}
AccountInfos provided",
instruction.accounts.len(),
account_infos.len()
);
}
let message = Message::new(&[instruction.clone()], None);
let program_id_index = message.instructions[0].program_id_index as usize;
let program_id = message.account_keys[program_id_index];
let program_account_info = &account_infos[program_id_index];
if!program_account_info.executable {
panic!("Program account is not executable");
}
if program_account_info.is_writable {
panic!("Program account is writable");
}
fn ai_to_a(ai: &AccountInfo) -> Account {
Account {
lamports: ai.lamports(),
data: ai.try_borrow_data().unwrap().to_vec(),
owner: *ai.owner,
executable: ai.executable,
rent_epoch: ai.rent_epoch,
}
}
let executable_accounts = vec![(program_id, RefCell::new(ai_to_a(program_account_info)))];
let mut accounts = vec![];
for instruction_account in &instruction.accounts {
for account_info in account_infos {
if *account_info.unsigned_key() == instruction_account.pubkey {
if instruction_account.is_writable &&!account_info.is_writable {
panic!("Writeable mismatch for {}", instruction_account.pubkey);
}
if instruction_account.is_signer &&!account_info.is_signer {
let mut program_signer = false;
for seeds in signers_seeds.iter() {
let signer = Pubkey::create_program_address(&seeds, &caller).unwrap();
if instruction_account.pubkey == signer {
program_signer = true;
break;
}
}
if!program_signer {
panic!("Signer mismatch for {}", instruction_account.pubkey);
}
}
accounts.push(Rc::new(RefCell::new(ai_to_a(account_info))));
break;
}
}
}
assert_eq!(accounts.len(), instruction.accounts.len());
solana_runtime::message_processor::MessageProcessor::process_cross_program_instruction(
&message,
&executable_accounts,
&accounts,
&mut mock_invoke_context,
)
.map_err(|err| ProgramError::try_from(err).unwrap_or_else(|err| panic!("{}", err)))?;
// Propagate logs back to caller's invoke context
// (TODO: This goes away if MockInvokeContext usage can be removed)
INVOKE_CONTEXT.with(|invoke_context| {
let logger = invoke_context.borrow().get_logger();
let logger = logger.borrow_mut();
for message in mock_invoke_context.logger.log.borrow_mut().iter() {
if logger.log_enabled() {
logger.log(message);
}
}
});
// Copy writeable account modifications back into the caller's AccountInfos
for (i, instruction_account) in instruction.accounts.iter().enumerate() {
if!instruction_account.is_writable {
continue;
}
for account_info in account_infos {
if *account_info.unsigned_key() == instruction_account.pubkey {
let account = &accounts[i];
**account_info.try_borrow_mut_lamports().unwrap() = account.borrow().lamports;
let mut data = account_info.try_borrow_mut_data()?;
let new_data = &account.borrow().data;
if data.len()!= new_data.len() {
// TODO: Figure out how to change the callers account data size
panic!(
"Account resizing ({} -> {}) not supported yet",
data.len(),
new_data.len()
);
}
data.clone_from_slice(new_data);
}
}
}
Ok(())
}
}
fn find_file(filename: &str) -> Option<PathBuf> {
for path in &["", "tests/fixtures"] {
let candidate = Path::new(path).join(&filename);
if candidate.exists() {
return Some(candidate);
}
}
None
}
fn read_file<P: AsRef<Path>>(path: P) -> Vec<u8> {
let path = path.as_ref();
let mut file = File::open(path)
.unwrap_or_else(|err| panic!("Failed to open \"{}\": {}", path.display(), err));
let mut file_data = Vec::new();
file.read_to_end(&mut file_data)
.unwrap_or_else(|err| panic!("Failed to read \"{}\": {}", path.display(), err));
file_data
}
pub struct ProgramTest {
accounts: Vec<(Pubkey, Account)>,
builtins: Vec<Builtin>,
bpf_compute_max_units: Option<u64>,
prefer_bpf: bool,
}
impl Default for ProgramTest {
/// Initialize a new ProgramTest
///
/// The `bpf` environment variable controls how BPF programs are selected during operation:
/// `export bpf=1` -- use BPF programs if present, otherwise fall back to the
/// native instruction processors provided with the test
/// `export bpf=0` -- use native instruction processor if present, otherwise fall back to
/// the BPF program
/// (default)
/// and the `ProgramTest::prefer_bpf()` method may be used to override the selection at runtime
///
/// BPF program shared objects and account data files are searched for in
/// * the current working directory (the default output location for `cargo build-bpf),
/// * the `tests/fixtures` sub-directory
///
fn default() -> Self {
solana_logger::setup_with_default(
"solana_bpf_loader=debug,\
solana_rbpf::vm=debug,\
solana_runtime::message_processor=info,\
solana_runtime::system_instruction_processor=trace,\
solana_program_test=info",
);
let prefer_bpf = match std::env::var("bpf") {
Ok(val) =>!matches!(val.as_str(), "0" | ""),
Err(_err) => false,
};
Self {
accounts: vec![],
builtins: vec![],
bpf_compute_max_units: None,
prefer_bpf,
}
}
}
// Values returned by `ProgramTest::start`
pub struct StartOutputs {
pub banks_client: BanksClient,
pub payer: Keypair,
pub recent_blockhash: Hash,
pub rent: Rent,
}
impl ProgramTest {
pub fn new(
program_name: &str,
program_id: Pubkey,
process_instruction: Option<ProcessInstructionWithContext>,
) -> Self {
let mut me = Self::default();
me.add_program(program_name, program_id, process_instruction);
me
}
/// Override default BPF program selection
pub fn prefer_bpf(&mut self, prefer_bpf: bool) {
self.prefer_bpf = prefer_bpf;
}
/// Override the BPF compute budget
pub fn set_bpf_compute_max_units(&mut self, bpf_compute_max_units: u64) {
self.bpf_compute_max_units = Some(bpf_compute_max_units);
}
/// Add an account to the test environment
pub fn add_account(&mut self, address: Pubkey, account: Account) {
self.accounts.push((address, account));
}
/// Add an account to the test environment with the account data in the provided `filename`
pub fn add_account_with_file_data(
&mut self,
address: Pubkey,
lamports: u64,
owner: Pubkey,
filename: &str,
) {
self.add_account(
address,
Account {
lamports,
data: read_file(find_file(filename).unwrap_or_else(|| {
panic!("Unable to locate {}", filename);
})),
owner,
executable: false,
rent_epoch: 0,
},
);
}
/// Add an account to the test environment with the account data in the provided as a base 64
/// string
pub fn add_account_with_base64_data(
&mut self,
address: Pubkey,
lamports: u64,
owner: Pubkey,
data_base64: &str,
) {
self.add_account(
address,
Account {
lamports,
data: base64::decode(data_base64)
.unwrap_or_else(|err| panic!("Failed to base64 decode: {}", err)),
owner,
executable: false,
rent_epoch: 0,
},
);
}
/// Add a BPF program to the test environment.
///
/// `program_name` will also used to locate the BPF shared object in the current or fixtures
/// directory.
///
/// If `process_instruction` is provided, the natively built-program may be used instead of the
/// BPF shared object depending on the `bpf` environment variable.
pub fn add_program(
&mut self,
program_name: &str,
program_id: Pubkey,
process_instruction: Option<ProcessInstructionWithContext>,
) {
let loader = solana_program::bpf_loader::id();
let program_file = find_file(&format!("{}.so", program_name));
if process_instruction.is_none() && program_file.is_none() {
panic!("Unable to add program {} ({})", program_name, program_id);
}
if (program_file.is_some() && self.prefer_bpf) || process_instruction.is_none() {
let program_file = program_file.unwrap_or_else(|| {
panic!(
"Program file data not available for {} ({})",
program_name, program_id
);
});
let data = read_file(&program_file);
info!(
"\"{}\" BPF program from {}{}",
program_name,
program_file.display(),
std::fs::metadata(&program_file)
.map(|metadata| {
metadata
.modified()
.map(|time| {
format!(
", modified {}",
HumanTime::from(time)
.to_text_en(Accuracy::Precise, Tense::Past)
)
})
.ok()
})
.ok()
.flatten()
.unwrap_or_else(|| "".to_string())
);
self.add_account(
program_id,
Account {
lamports: Rent::default().minimum_balance(data.len()).min(1),
data,
owner: loader,
executable: true,
rent_epoch: 0,
},
);
} else {
| swap_invoke_context | identifier_name |
lib.rs | Error::AccountBorrowFailed,
ProgramError::MaxSeedLengthExceeded => InstructionError::MaxSeedLengthExceeded,
ProgramError::InvalidSeeds => InstructionError::InvalidSeeds,
}
}
thread_local! {
static INVOKE_CONTEXT:RefCell<Rc<MockInvokeContext>> = RefCell::new(Rc::new(MockInvokeContext::default()));
}
pub fn builtin_process_instruction(
process_instruction: solana_program::entrypoint::ProcessInstruction,
program_id: &Pubkey,
keyed_accounts: &[KeyedAccount],
input: &[u8],
invoke_context: &mut dyn InvokeContext,
) -> Result<(), InstructionError> {
let mut mock_invoke_context = MockInvokeContext::default();
mock_invoke_context.programs = invoke_context.get_programs().to_vec();
mock_invoke_context.key = *program_id;
// TODO: Populate MockInvokeContext more, or rework to avoid MockInvokeContext entirely.
// The context being passed into the program is incomplete...
let local_invoke_context = RefCell::new(Rc::new(mock_invoke_context));
swap_invoke_context(&local_invoke_context);
// Copy all the accounts into a HashMap to ensure there are no duplicates
let mut accounts: HashMap<Pubkey, Account> = keyed_accounts
.iter()
.map(|ka| (*ka.unsigned_key(), ka.account.borrow().clone()))
.collect();
// Create shared references to each account's lamports/data/owner
let account_refs: HashMap<_, _> = accounts
.iter_mut()
.map(|(key, account)| {
(
*key,
(
Rc::new(RefCell::new(&mut account.lamports)),
Rc::new(RefCell::new(&mut account.data[..])),
&account.owner,
),
)
})
.collect();
// Create AccountInfos
let account_infos: Vec<AccountInfo> = keyed_accounts
.iter()
.map(|keyed_account| {
let key = keyed_account.unsigned_key();
let (lamports, data, owner) = &account_refs[key];
AccountInfo {
key,
is_signer: keyed_account.signer_key().is_some(),
is_writable: keyed_account.is_writable(),
lamports: lamports.clone(),
data: data.clone(),
owner,
executable: keyed_account.executable().unwrap(),
rent_epoch: keyed_account.rent_epoch().unwrap(),
}
})
.collect();
// Execute the BPF entrypoint
let result =
process_instruction(program_id, &account_infos, input).map_err(to_instruction_error);
if result.is_ok() {
// Commit changes to the KeyedAccounts
for keyed_account in keyed_accounts {
let mut account = keyed_account.account.borrow_mut();
let key = keyed_account.unsigned_key();
let (lamports, data, _owner) = &account_refs[key];
account.lamports = **lamports.borrow();
account.data = data.borrow().to_vec();
}
}
swap_invoke_context(&local_invoke_context);
// Propagate logs back to caller's invoke context
// (TODO: This goes away if MockInvokeContext usage can be removed)
let logger = invoke_context.get_logger();
let logger = logger.borrow_mut();
for message in local_invoke_context.borrow().logger.log.borrow_mut().iter() {
if logger.log_enabled() {
logger.log(message);
}
}
result
}
/// Converts a `solana-program`-style entrypoint into the runtime's entrypoint style, for
/// use with `ProgramTest::add_program`
#[macro_export]
macro_rules! processor {
($process_instruction:expr) => {
Some(
|program_id: &Pubkey,
keyed_accounts: &[solana_sdk::keyed_account::KeyedAccount],
input: &[u8],
invoke_context: &mut dyn solana_sdk::process_instruction::InvokeContext| {
$crate::builtin_process_instruction(
$process_instruction,
program_id,
keyed_accounts,
input,
invoke_context,
)
},
)
};
}
pub fn swap_invoke_context(other_invoke_context: &RefCell<Rc<MockInvokeContext>>) {
INVOKE_CONTEXT.with(|invoke_context| {
invoke_context.swap(&other_invoke_context);
});
}
struct SyscallStubs {}
impl program_stubs::SyscallStubs for SyscallStubs {
fn sol_log(&self, message: &str) {
INVOKE_CONTEXT.with(|invoke_context| {
let invoke_context = invoke_context.borrow_mut();
let logger = invoke_context.get_logger();
let logger = logger.borrow_mut();
if logger.log_enabled() {
logger.log(&format!("Program log: {}", message));
}
});
}
fn sol_invoke_signed(
&self,
instruction: &Instruction,
account_infos: &[AccountInfo],
signers_seeds: &[&[&[u8]]],
) -> ProgramResult {
//
// TODO: Merge the business logic between here and the BPF invoke path in
// programs/bpf_loader/src/syscalls.rs
//
info!("SyscallStubs::sol_invoke_signed()");
let mut caller = Pubkey::default();
let mut mock_invoke_context = MockInvokeContext::default();
INVOKE_CONTEXT.with(|invoke_context| {
let invoke_context = invoke_context.borrow_mut();
caller = *invoke_context.get_caller().expect("get_caller");
invoke_context.record_instruction(&instruction);
mock_invoke_context.programs = invoke_context.get_programs().to_vec();
// TODO: Populate MockInvokeContext more, or rework to avoid MockInvokeContext entirely.
// The context being passed into the program is incomplete...
});
if instruction.accounts.len() + 1!= account_infos.len() {
panic!(
"Instruction accounts mismatch. Instruction contains {} accounts, with {}
AccountInfos provided",
instruction.accounts.len(),
account_infos.len()
);
}
let message = Message::new(&[instruction.clone()], None);
let program_id_index = message.instructions[0].program_id_index as usize;
let program_id = message.account_keys[program_id_index];
let program_account_info = &account_infos[program_id_index];
if!program_account_info.executable {
panic!("Program account is not executable");
}
if program_account_info.is_writable {
panic!("Program account is writable");
}
fn ai_to_a(ai: &AccountInfo) -> Account {
Account {
lamports: ai.lamports(),
data: ai.try_borrow_data().unwrap().to_vec(),
owner: *ai.owner,
executable: ai.executable,
rent_epoch: ai.rent_epoch,
}
}
let executable_accounts = vec![(program_id, RefCell::new(ai_to_a(program_account_info)))];
let mut accounts = vec![];
for instruction_account in &instruction.accounts {
for account_info in account_infos {
if *account_info.unsigned_key() == instruction_account.pubkey {
if instruction_account.is_writable &&!account_info.is_writable {
panic!("Writeable mismatch for {}", instruction_account.pubkey);
}
if instruction_account.is_signer &&!account_info.is_signer {
let mut program_signer = false;
for seeds in signers_seeds.iter() {
let signer = Pubkey::create_program_address(&seeds, &caller).unwrap();
if instruction_account.pubkey == signer {
program_signer = true;
break;
}
}
if!program_signer {
panic!("Signer mismatch for {}", instruction_account.pubkey);
}
}
accounts.push(Rc::new(RefCell::new(ai_to_a(account_info))));
break;
}
}
}
assert_eq!(accounts.len(), instruction.accounts.len());
solana_runtime::message_processor::MessageProcessor::process_cross_program_instruction(
&message,
&executable_accounts,
&accounts,
&mut mock_invoke_context,
)
.map_err(|err| ProgramError::try_from(err).unwrap_or_else(|err| panic!("{}", err)))?;
// Propagate logs back to caller's invoke context
// (TODO: This goes away if MockInvokeContext usage can be removed)
INVOKE_CONTEXT.with(|invoke_context| {
let logger = invoke_context.borrow().get_logger();
let logger = logger.borrow_mut();
for message in mock_invoke_context.logger.log.borrow_mut().iter() {
if logger.log_enabled() {
logger.log(message);
}
}
});
// Copy writeable account modifications back into the caller's AccountInfos
for (i, instruction_account) in instruction.accounts.iter().enumerate() {
if!instruction_account.is_writable {
continue;
}
for account_info in account_infos {
if *account_info.unsigned_key() == instruction_account.pubkey {
let account = &accounts[i];
**account_info.try_borrow_mut_lamports().unwrap() = account.borrow().lamports;
let mut data = account_info.try_borrow_mut_data()?;
let new_data = &account.borrow().data;
if data.len()!= new_data.len() {
// TODO: Figure out how to change the callers account data size
panic!(
"Account resizing ({} -> {}) not supported yet",
data.len(),
new_data.len()
);
}
data.clone_from_slice(new_data);
}
}
}
Ok(())
}
}
fn find_file(filename: &str) -> Option<PathBuf> {
for path in &["", "tests/fixtures"] {
let candidate = Path::new(path).join(&filename);
if candidate.exists() {
return Some(candidate);
}
}
None
}
fn read_file<P: AsRef<Path>>(path: P) -> Vec<u8> {
let path = path.as_ref();
let mut file = File::open(path)
.unwrap_or_else(|err| panic!("Failed to open \"{}\": {}", path.display(), err));
let mut file_data = Vec::new();
file.read_to_end(&mut file_data)
.unwrap_or_else(|err| panic!("Failed to read \"{}\": {}", path.display(), err));
file_data
}
pub struct ProgramTest {
accounts: Vec<(Pubkey, Account)>,
builtins: Vec<Builtin>,
bpf_compute_max_units: Option<u64>,
prefer_bpf: bool,
}
impl Default for ProgramTest {
/// Initialize a new ProgramTest
///
/// The `bpf` environment variable controls how BPF programs are selected during operation:
/// `export bpf=1` -- use BPF programs if present, otherwise fall back to the
/// native instruction processors provided with the test
/// `export bpf=0` -- use native instruction processor if present, otherwise fall back to
/// the BPF program
/// (default)
/// and the `ProgramTest::prefer_bpf()` method may be used to override the selection at runtime
///
/// BPF program shared objects and account data files are searched for in
/// * the current working directory (the default output location for `cargo build-bpf),
/// * the `tests/fixtures` sub-directory
///
fn default() -> Self {
solana_logger::setup_with_default(
"solana_bpf_loader=debug,\
solana_rbpf::vm=debug,\
solana_runtime::message_processor=info,\
solana_runtime::system_instruction_processor=trace,\
solana_program_test=info",
);
let prefer_bpf = match std::env::var("bpf") {
Ok(val) =>!matches!(val.as_str(), "0" | ""),
Err(_err) => false,
};
Self {
accounts: vec![],
builtins: vec![],
bpf_compute_max_units: None,
prefer_bpf,
}
}
}
// Values returned by `ProgramTest::start`
pub struct StartOutputs {
pub banks_client: BanksClient,
pub payer: Keypair,
pub recent_blockhash: Hash,
pub rent: Rent,
}
impl ProgramTest {
pub fn new(
program_name: &str,
program_id: Pubkey,
process_instruction: Option<ProcessInstructionWithContext>,
) -> Self |
/// Override default BPF program selection
pub fn prefer_bpf(&mut self, prefer_bpf: bool) {
self.prefer_bpf = prefer_bpf;
}
/// Override the BPF compute budget
pub fn set_bpf_compute_max_units(&mut self, bpf_compute_max_units: u64) {
self.bpf_compute_max_units = Some(bpf_compute_max_units);
}
/// Add an account to the test environment
pub fn add_account(&mut self, address: Pubkey, account: Account) {
self.accounts.push((address, account));
}
/// Add an account to the test environment with the account data in the provided `filename`
pub fn add_account_with_file_data(
&mut self,
address: Pubkey,
lamports: u64,
owner: Pubkey,
filename: &str,
) {
self.add_account(
address,
Account {
lamports,
data: read_file(find_file(filename).unwrap_or_else(|| {
panic!("Unable to locate {}", filename);
})),
owner,
executable: false,
rent_epoch: 0,
},
);
}
/// Add an account to the test environment with the account data in the provided as a base 64
/// string
pub fn add_account_with_base64_data(
&mut self,
address: Pubkey,
lamports: u64,
owner: Pubkey,
data_base64: &str,
) {
self.add_account(
address,
Account {
lamports,
data: base64::decode(data_base64)
.unwrap_or_else(|err| panic!("Failed to base64 decode: {}", err)),
owner,
executable: false,
rent_epoch: 0,
},
);
}
/// Add a BPF program to the test environment.
///
/// `program_name` will also used to locate the BPF shared object in the current or fixtures
/// directory.
///
/// If `process_instruction` is provided, the natively built-program may be used instead of the
/// BPF shared object depending on the `bpf` environment variable.
pub fn add_program(
&mut self,
program_name: &str,
program_id: Pubkey,
process_instruction: Option<ProcessInstructionWithContext>,
) {
let loader = solana_program::bpf_loader::id();
let program_file = find_file(&format!("{}.so", program_name));
if process_instruction.is_none() && program_file.is_none() {
panic!("Unable to add program {} ({})", program_name, program_id);
}
if (program_file.is_some() && self.prefer_bpf) || process_instruction.is_none() {
let program_file = program_file.unwrap_or_else(|| {
panic!(
"Program file data not available for {} ({})",
program_name, program_id
);
});
let data = read_file(&program_file);
info!(
"\"{}\" BPF program from {}{}",
program_name,
program_file.display(),
std::fs::metadata(&program_file)
.map(|metadata| {
metadata
.modified()
.map(|time| {
format!(
", modified {}",
HumanTime::from(time)
.to_text_en(Accuracy::Precise, Tense::Past)
)
})
.ok()
})
.ok()
.flatten()
.unwrap_or_else(|| "".to_string())
);
self.add_account(
program_id,
Account {
lamports: Rent::default().minimum_balance(data.len()).min(1),
data,
owner: loader,
executable: true,
rent_epoch: 0,
},
);
} else {
| {
let mut me = Self::default();
me.add_program(program_name, program_id, process_instruction);
me
} | identifier_body |
lib.rs | InstructionError::AccountBorrowFailed,
ProgramError::MaxSeedLengthExceeded => InstructionError::MaxSeedLengthExceeded,
ProgramError::InvalidSeeds => InstructionError::InvalidSeeds,
}
}
thread_local! {
static INVOKE_CONTEXT:RefCell<Rc<MockInvokeContext>> = RefCell::new(Rc::new(MockInvokeContext::default()));
}
pub fn builtin_process_instruction(
process_instruction: solana_program::entrypoint::ProcessInstruction,
program_id: &Pubkey,
keyed_accounts: &[KeyedAccount],
input: &[u8],
invoke_context: &mut dyn InvokeContext,
) -> Result<(), InstructionError> {
let mut mock_invoke_context = MockInvokeContext::default();
mock_invoke_context.programs = invoke_context.get_programs().to_vec();
mock_invoke_context.key = *program_id;
// TODO: Populate MockInvokeContext more, or rework to avoid MockInvokeContext entirely.
// The context being passed into the program is incomplete...
let local_invoke_context = RefCell::new(Rc::new(mock_invoke_context));
swap_invoke_context(&local_invoke_context);
// Copy all the accounts into a HashMap to ensure there are no duplicates
let mut accounts: HashMap<Pubkey, Account> = keyed_accounts
.iter()
.map(|ka| (*ka.unsigned_key(), ka.account.borrow().clone()))
.collect();
// Create shared references to each account's lamports/data/owner
let account_refs: HashMap<_, _> = accounts
.iter_mut()
.map(|(key, account)| {
(
*key,
(
Rc::new(RefCell::new(&mut account.lamports)),
Rc::new(RefCell::new(&mut account.data[..])),
&account.owner,
),
)
})
.collect();
// Create AccountInfos
let account_infos: Vec<AccountInfo> = keyed_accounts
.iter()
.map(|keyed_account| {
let key = keyed_account.unsigned_key();
let (lamports, data, owner) = &account_refs[key];
AccountInfo {
key,
is_signer: keyed_account.signer_key().is_some(),
is_writable: keyed_account.is_writable(),
lamports: lamports.clone(),
data: data.clone(),
owner,
executable: keyed_account.executable().unwrap(),
rent_epoch: keyed_account.rent_epoch().unwrap(),
}
})
.collect();
// Execute the BPF entrypoint
let result =
process_instruction(program_id, &account_infos, input).map_err(to_instruction_error);
if result.is_ok() {
// Commit changes to the KeyedAccounts
for keyed_account in keyed_accounts {
let mut account = keyed_account.account.borrow_mut();
let key = keyed_account.unsigned_key();
let (lamports, data, _owner) = &account_refs[key];
account.lamports = **lamports.borrow();
account.data = data.borrow().to_vec();
}
}
swap_invoke_context(&local_invoke_context);
// Propagate logs back to caller's invoke context
// (TODO: This goes away if MockInvokeContext usage can be removed)
let logger = invoke_context.get_logger();
let logger = logger.borrow_mut();
for message in local_invoke_context.borrow().logger.log.borrow_mut().iter() {
if logger.log_enabled() {
logger.log(message);
}
}
result
}
/// Converts a `solana-program`-style entrypoint into the runtime's entrypoint style, for
/// use with `ProgramTest::add_program`
#[macro_export]
macro_rules! processor {
($process_instruction:expr) => {
Some(
|program_id: &Pubkey,
keyed_accounts: &[solana_sdk::keyed_account::KeyedAccount],
input: &[u8],
invoke_context: &mut dyn solana_sdk::process_instruction::InvokeContext| {
$crate::builtin_process_instruction(
$process_instruction,
program_id,
keyed_accounts,
input,
invoke_context,
)
},
)
};
}
pub fn swap_invoke_context(other_invoke_context: &RefCell<Rc<MockInvokeContext>>) {
INVOKE_CONTEXT.with(|invoke_context| {
invoke_context.swap(&other_invoke_context);
});
}
struct SyscallStubs {}
impl program_stubs::SyscallStubs for SyscallStubs {
fn sol_log(&self, message: &str) {
INVOKE_CONTEXT.with(|invoke_context| {
let invoke_context = invoke_context.borrow_mut();
let logger = invoke_context.get_logger();
let logger = logger.borrow_mut();
if logger.log_enabled() {
logger.log(&format!("Program log: {}", message));
}
});
}
fn sol_invoke_signed(
&self,
instruction: &Instruction,
account_infos: &[AccountInfo],
signers_seeds: &[&[&[u8]]],
) -> ProgramResult {
//
// TODO: Merge the business logic between here and the BPF invoke path in
// programs/bpf_loader/src/syscalls.rs
//
info!("SyscallStubs::sol_invoke_signed()");
let mut caller = Pubkey::default();
let mut mock_invoke_context = MockInvokeContext::default();
INVOKE_CONTEXT.with(|invoke_context| {
let invoke_context = invoke_context.borrow_mut();
caller = *invoke_context.get_caller().expect("get_caller");
invoke_context.record_instruction(&instruction);
mock_invoke_context.programs = invoke_context.get_programs().to_vec();
// TODO: Populate MockInvokeContext more, or rework to avoid MockInvokeContext entirely.
// The context being passed into the program is incomplete...
});
if instruction.accounts.len() + 1!= account_infos.len() {
panic!(
"Instruction accounts mismatch. Instruction contains {} accounts, with {}
AccountInfos provided",
instruction.accounts.len(),
account_infos.len()
);
}
let message = Message::new(&[instruction.clone()], None);
let program_id_index = message.instructions[0].program_id_index as usize;
let program_id = message.account_keys[program_id_index];
let program_account_info = &account_infos[program_id_index];
if!program_account_info.executable {
panic!("Program account is not executable");
}
if program_account_info.is_writable {
panic!("Program account is writable");
}
fn ai_to_a(ai: &AccountInfo) -> Account {
Account {
lamports: ai.lamports(),
data: ai.try_borrow_data().unwrap().to_vec(),
owner: *ai.owner,
executable: ai.executable,
rent_epoch: ai.rent_epoch,
}
}
let executable_accounts = vec![(program_id, RefCell::new(ai_to_a(program_account_info)))];
let mut accounts = vec![];
for instruction_account in &instruction.accounts {
for account_info in account_infos {
if *account_info.unsigned_key() == instruction_account.pubkey {
if instruction_account.is_writable &&!account_info.is_writable {
panic!("Writeable mismatch for {}", instruction_account.pubkey);
}
if instruction_account.is_signer &&!account_info.is_signer {
let mut program_signer = false;
for seeds in signers_seeds.iter() {
let signer = Pubkey::create_program_address(&seeds, &caller).unwrap();
if instruction_account.pubkey == signer {
program_signer = true;
break;
}
}
if!program_signer {
panic!("Signer mismatch for {}", instruction_account.pubkey);
}
}
accounts.push(Rc::new(RefCell::new(ai_to_a(account_info))));
break;
}
}
}
assert_eq!(accounts.len(), instruction.accounts.len());
solana_runtime::message_processor::MessageProcessor::process_cross_program_instruction(
&message,
&executable_accounts,
&accounts,
&mut mock_invoke_context,
)
.map_err(|err| ProgramError::try_from(err).unwrap_or_else(|err| panic!("{}", err)))?;
// Propagate logs back to caller's invoke context
// (TODO: This goes away if MockInvokeContext usage can be removed)
INVOKE_CONTEXT.with(|invoke_context| {
let logger = invoke_context.borrow().get_logger();
let logger = logger.borrow_mut();
for message in mock_invoke_context.logger.log.borrow_mut().iter() {
if logger.log_enabled() {
logger.log(message);
}
}
});
// Copy writeable account modifications back into the caller's AccountInfos
for (i, instruction_account) in instruction.accounts.iter().enumerate() {
if!instruction_account.is_writable {
continue;
}
for account_info in account_infos {
if *account_info.unsigned_key() == instruction_account.pubkey {
let account = &accounts[i];
**account_info.try_borrow_mut_lamports().unwrap() = account.borrow().lamports;
let mut data = account_info.try_borrow_mut_data()?;
let new_data = &account.borrow().data;
if data.len()!= new_data.len() {
// TODO: Figure out how to change the callers account data size
panic!(
"Account resizing ({} -> {}) not supported yet",
data.len(),
new_data.len()
);
}
data.clone_from_slice(new_data);
}
}
}
Ok(())
}
}
fn find_file(filename: &str) -> Option<PathBuf> {
for path in &["", "tests/fixtures"] {
let candidate = Path::new(path).join(&filename);
if candidate.exists() {
return Some(candidate);
}
}
None
}
fn read_file<P: AsRef<Path>>(path: P) -> Vec<u8> {
let path = path.as_ref();
let mut file = File::open(path)
.unwrap_or_else(|err| panic!("Failed to open \"{}\": {}", path.display(), err));
let mut file_data = Vec::new();
file.read_to_end(&mut file_data)
.unwrap_or_else(|err| panic!("Failed to read \"{}\": {}", path.display(), err));
file_data
}
pub struct ProgramTest {
accounts: Vec<(Pubkey, Account)>,
builtins: Vec<Builtin>,
bpf_compute_max_units: Option<u64>,
prefer_bpf: bool,
}
impl Default for ProgramTest {
/// Initialize a new ProgramTest
///
/// The `bpf` environment variable controls how BPF programs are selected during operation:
/// `export bpf=1` -- use BPF programs if present, otherwise fall back to the
/// native instruction processors provided with the test
/// `export bpf=0` -- use native instruction processor if present, otherwise fall back to
/// the BPF program
/// (default)
/// and the `ProgramTest::prefer_bpf()` method may be used to override the selection at runtime
///
/// BPF program shared objects and account data files are searched for in
/// * the current working directory (the default output location for `cargo build-bpf),
/// * the `tests/fixtures` sub-directory
///
fn default() -> Self {
solana_logger::setup_with_default(
"solana_bpf_loader=debug,\
solana_rbpf::vm=debug,\
solana_runtime::message_processor=info,\
solana_runtime::system_instruction_processor=trace,\
solana_program_test=info",
);
let prefer_bpf = match std::env::var("bpf") {
Ok(val) =>!matches!(val.as_str(), "0" | ""),
Err(_err) => false,
};
Self {
accounts: vec![],
builtins: vec![],
bpf_compute_max_units: None,
prefer_bpf,
}
}
}
// Values returned by `ProgramTest::start`
pub struct StartOutputs {
pub banks_client: BanksClient,
pub payer: Keypair,
pub recent_blockhash: Hash,
pub rent: Rent,
}
impl ProgramTest {
pub fn new(
program_name: &str,
program_id: Pubkey,
process_instruction: Option<ProcessInstructionWithContext>,
) -> Self {
let mut me = Self::default();
me.add_program(program_name, program_id, process_instruction);
me
}
/// Override default BPF program selection
pub fn prefer_bpf(&mut self, prefer_bpf: bool) {
self.prefer_bpf = prefer_bpf;
}
/// Override the BPF compute budget
pub fn set_bpf_compute_max_units(&mut self, bpf_compute_max_units: u64) {
self.bpf_compute_max_units = Some(bpf_compute_max_units);
}
/// Add an account to the test environment
pub fn add_account(&mut self, address: Pubkey, account: Account) {
self.accounts.push((address, account));
}
/// Add an account to the test environment with the account data in the provided `filename`
pub fn add_account_with_file_data(
&mut self,
address: Pubkey,
lamports: u64,
owner: Pubkey,
filename: &str,
) {
self.add_account(
address,
Account {
lamports,
data: read_file(find_file(filename).unwrap_or_else(|| {
panic!("Unable to locate {}", filename);
})),
owner,
executable: false,
rent_epoch: 0,
},
);
}
/// Add an account to the test environment with the account data in the provided as a base 64
/// string
pub fn add_account_with_base64_data(
&mut self,
address: Pubkey,
lamports: u64,
owner: Pubkey,
data_base64: &str,
) {
self.add_account(
address,
Account {
lamports,
data: base64::decode(data_base64)
.unwrap_or_else(|err| panic!("Failed to base64 decode: {}", err)),
owner,
executable: false,
rent_epoch: 0,
},
);
}
/// Add a BPF program to the test environment.
///
/// `program_name` will also used to locate the BPF shared object in the current or fixtures
/// directory.
///
/// If `process_instruction` is provided, the natively built-program may be used instead of the
/// BPF shared object depending on the `bpf` environment variable.
pub fn add_program(
&mut self,
program_name: &str,
program_id: Pubkey,
process_instruction: Option<ProcessInstructionWithContext>,
) {
let loader = solana_program::bpf_loader::id();
let program_file = find_file(&format!("{}.so", program_name));
if process_instruction.is_none() && program_file.is_none() {
panic!("Unable to add program {} ({})", program_name, program_id); | if (program_file.is_some() && self.prefer_bpf) || process_instruction.is_none() {
let program_file = program_file.unwrap_or_else(|| {
panic!(
"Program file data not available for {} ({})",
program_name, program_id
);
});
let data = read_file(&program_file);
info!(
"\"{}\" BPF program from {}{}",
program_name,
program_file.display(),
std::fs::metadata(&program_file)
.map(|metadata| {
metadata
.modified()
.map(|time| {
format!(
", modified {}",
HumanTime::from(time)
.to_text_en(Accuracy::Precise, Tense::Past)
)
})
.ok()
})
.ok()
.flatten()
.unwrap_or_else(|| "".to_string())
);
self.add_account(
program_id,
Account {
lamports: Rent::default().minimum_balance(data.len()).min(1),
data,
owner: loader,
executable: true,
rent_epoch: 0,
},
);
} else {
| }
| random_line_split |
utils.rs | // Copyright (c) 2011 Jan Kokemüller
// Copyright (c) 2020 Sebastian Dröge <[email protected]>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
/// Convert linear energy to logarithmic loudness.
pub fn energy_to_loudness(energy: f64) -> f64 {
// The non-test version is faster and more accurate but gives
// slightly different results than the C version and fails the
// tests because of that.
#[cfg(test)]
{
10.0 * (f64::ln(energy) / f64::ln(10.0)) - 0.691
}
#[cfg(not(test))]
{
10.0 * f64::log10(energy) - 0.691
}
}
/// Trait for abstracting over interleaved and planar samples.
pub trait Samples<'a, T: 'a>: Sized {
/// Call the given closure for each sample of the given channel.
// FIXME: Workaround for TrustedLen / TrustedRandomAccess being unstable
// and because of that we wouldn't get nice optimizations
fn foreach_sample(&self, channel: usize, func: impl FnMut(&'a T));
/// Call the given closure for each sample of the given channel.
// FIXME: Workaround for TrustedLen / TrustedRandomAccess being unstable
// and because of that we wouldn't get nice optimizations
fn foreach_sample_zipped<U>(
&self,
channel: usize,
iter: impl Iterator<Item = U>,
func: impl FnMut(&'a T, U),
);
/// Number of frames.
fn frames(&self) -> usize;
/// Number of channels.
fn channels(&self) -> usize;
/// Split into two at the given sample.
fn split_at(self, sample: usize) -> (Self, Self);
}
/// Struct representing interleaved samples.
pub struct Interleaved<'a, T> {
/// Interleaved sample data.
data: &'a [T],
/// Number of channels.
channels: usize,
}
impl<'a, T> Interleaved<'a, T> {
/// Create a new wrapper around the interleaved channels and do a sanity check.
pub fn new(data: &'a [T], channels: usize) -> Result<Self, crate::Error> {
if channels == 0 {
return Err(crate::Error::NoMem);
}
if data.len() % channels!= 0 {
return Err(crate::Error::NoMem);
}
Ok(Interleaved { data, channels })
}
}
impl<'a, T> Samples<'a, T> for Interleaved<'a, T> {
#[inline]
fn foreach_sample(&self, channel: usize, mut func: impl FnMut(&'a T)) {
assert!(channel < self.channels);
for v in self.data.chunks_exact(self.channels) {
func(&v[channel])
}
}
#[inline]
fn foreach_sample_zipped<U>(
&self,
channel: usize,
iter: impl Iterator<Item = U>,
mut func: impl FnMut(&'a T, U),
) {
assert!(channel < self.channels);
for (v, u) in self.data.chunks_exact(self.channels).zip(iter) {
func(&v[channel], u)
}
}
#[inline]
fn frames(&self) -> usize {
self.data.len() / self.channels
}
#[inline]
fn channels(&self) -> usize {
self.channels
}
#[inline]
fn split_at(self, sample: usize) -> (Self, Self) {
assert!(sample * self.channels <= self.data.len());
let (fst, snd) = self.data.split_at(sample * self.channels);
(
Interleaved {
data: fst,
channels: self.channels,
},
Interleaved {
data: snd,
channels: self.channels,
},
)
}
}
/// Struct representing interleaved samples.
pub struct Planar<'a, T> {
data: &'a [&'a [T]],
start: usize,
end: usize,
}
impl<'a, T> Planar<'a, T> {
/// Create a new wrapper around the planar channels and do a sanity check.
pub fn new(data: &'a [&'a [T]]) -> Result<Self, crate::Error> {
if data.is_empty() {
return Err(crate::Error::NoMem);
}
if data.iter().any(|d| data[0].len()!= d.len()) {
return Err(crate::Error::NoMem);
}
Ok(Planar {
data,
start: 0,
end: data[0].len(),
})
}
}
impl<'a, T> Samples<'a, T> for Planar<'a, T> {
#[inline]
fn foreach_sample(&self, channel: usize, mut func: impl FnMut(&'a T)) {
assert!(channel < self.data.len());
for v in &self.data[channel][self.start..self.end] {
func(v)
}
}
#[inline]
fn foreach_sample_zipped<U>(
&self,
channel: usize,
iter: impl Iterator<Item = U>,
mut func: impl FnMut(&'a T, U),
) {
assert!(channel < self.data.len());
for (v, u) in self.data[channel][self.start..self.end].iter().zip(iter) {
func(v, u)
}
}
#[inline]
fn frames(&self) -> usize {
self.end - self.start
}
#[inline] | self.data.len()
}
#[inline]
fn split_at(self, sample: usize) -> (Self, Self) {
assert!(self.start + sample <= self.end);
(
Planar {
data: self.data,
start: self.start,
end: self.start + sample,
},
Planar {
data: self.data,
start: self.start + sample,
end: self.end,
},
)
}
}
/// Trait for converting samples into f32 in the range [0,1].
pub trait AsF32: Copy {
fn as_f32_scaled(self) -> f32;
}
impl AsF32 for i16 {
#[inline]
fn as_f32_scaled(self) -> f32 {
self as f32 / (-(std::i16::MIN as f32))
}
}
impl AsF32 for i32 {
#[inline]
fn as_f32_scaled(self) -> f32 {
self as f32 / (-(std::i32::MIN as f32))
}
}
impl AsF32 for f32 {
#[inline]
fn as_f32_scaled(self) -> f32 {
self
}
}
impl AsF32 for f64 {
#[inline]
fn as_f32_scaled(self) -> f32 {
self as f32
}
}
/// Trait for converting samples into f64 in the range [0,1].
pub trait AsF64: AsF32 + Copy + PartialOrd {
const MAX: f64;
fn as_f64(self) -> f64;
#[inline]
fn as_f64_scaled(self) -> f64 {
self.as_f64() / Self::MAX
}
}
impl AsF64 for i16 {
const MAX: f64 = -(std::i16::MIN as f64);
#[inline]
fn as_f64(self) -> f64 {
self as f64
}
}
impl AsF64 for i32 {
const MAX: f64 = -(std::i32::MIN as f64);
#[inline]
fn as_f64(self) -> f64 {
self as f64
}
}
impl AsF64 for f32 {
const MAX: f64 = 1.0;
#[inline]
fn as_f64(self) -> f64 {
self as f64
}
}
impl AsF64 for f64 {
const MAX: f64 = 1.0;
#[inline]
fn as_f64(self) -> f64 {
self
}
}
#[cfg(test)]
pub mod tests {
#[derive(Clone, Debug)]
pub struct Signal<T: FromF32> {
pub data: Vec<T>,
pub channels: u32,
pub rate: u32,
}
pub trait FromF32: Copy + Clone + std::fmt::Debug + Send + Sync +'static {
fn from_f32(val: f32) -> Self;
}
impl FromF32 for i16 {
fn from_f32(val: f32) -> Self {
(val * (std::i16::MAX - 1) as f32) as i16
}
}
impl FromF32 for i32 {
fn from_f32(val: f32) -> Self {
(val * (std::i32::MAX - 1) as f32) as i32
}
}
impl FromF32 for f32 {
fn from_f32(val: f32) -> Self {
val
}
}
impl FromF32 for f64 {
fn from_f32(val: f32) -> Self {
val as f64
}
}
impl<T: FromF32 + quickcheck::Arbitrary> quickcheck::Arbitrary for Signal<T> {
fn arbitrary<G: quickcheck::Gen>(g: &mut G) -> Self {
use rand::Rng;
let channels = g.gen_range(1, 16);
let rate = g.gen_range(16_000, 224_000);
let num_frames = (rate as f64 * g.gen_range(0.0, 5.0)) as usize;
let max = g.gen_range(0.0, 1.0);
let freqs = [
g.gen_range(20.0, 16_000.0),
g.gen_range(20.0, 16_000.0),
g.gen_range(20.0, 16_000.0),
g.gen_range(20.0, 16_000.0),
];
let volumes = [
g.gen_range(0.0, 1.0),
g.gen_range(0.0, 1.0),
g.gen_range(0.0, 1.0),
g.gen_range(0.0, 1.0),
];
let volume_scale = 1.0 / volumes.iter().sum::<f32>();
let mut accumulators = [0.0; 4];
let steps = [
2.0 * std::f32::consts::PI * freqs[0] / rate as f32,
2.0 * std::f32::consts::PI * freqs[1] / rate as f32,
2.0 * std::f32::consts::PI * freqs[2] / rate as f32,
2.0 * std::f32::consts::PI * freqs[3] / rate as f32,
];
let mut data = vec![T::from_f32(0.0); num_frames * channels as usize];
for frame in data.chunks_exact_mut(channels as usize) {
let val = max
* (f32::sin(accumulators[0]) * volumes[0]
+ f32::sin(accumulators[1]) * volumes[1]
+ f32::sin(accumulators[2]) * volumes[2]
+ f32::sin(accumulators[3]) * volumes[3])
/ volume_scale;
for sample in frame.iter_mut() {
*sample = T::from_f32(val);
}
for (acc, step) in accumulators.iter_mut().zip(steps.iter()) {
*acc += step;
}
}
Signal {
data,
channels,
rate,
}
}
fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
SignalShrinker::boxed(self.clone())
}
}
struct SignalShrinker<A: FromF32> {
seed: Signal<A>,
/// How many elements to take
size: usize,
/// Whether we tried with one channel already
tried_one_channel: bool,
}
impl<A: FromF32 + quickcheck::Arbitrary> SignalShrinker<A> {
fn boxed(seed: Signal<A>) -> Box<dyn Iterator<Item = Signal<A>>> {
let channels = seed.channels;
Box::new(SignalShrinker {
seed,
size: 0,
tried_one_channel: channels == 1,
})
}
}
impl<A> Iterator for SignalShrinker<A>
where
A: FromF32 + quickcheck::Arbitrary,
{
type Item = Signal<A>;
fn next(&mut self) -> Option<Signal<A>> {
if self.size < self.seed.data.len() {
// Generate a smaller vector by removing size elements
let xs1 = if self.tried_one_channel {
Vec::from(&self.seed.data[..self.size])
} else {
self.seed
.data
.iter()
.cloned()
.step_by(self.seed.channels as usize)
.take(self.size)
.collect()
};
if self.size == 0 {
self.size = if self.tried_one_channel {
self.seed.channels as usize
} else {
1
};
} else {
self.size *= 2;
}
Some(Signal {
data: xs1,
channels: if self.tried_one_channel {
self.seed.channels
} else {
1
},
rate: self.seed.rate,
})
} else if!self.tried_one_channel {
self.tried_one_channel = true;
self.size = 0;
self.next()
} else {
None
}
}
}
} | fn channels(&self) -> usize { | random_line_split |
utils.rs | // Copyright (c) 2011 Jan Kokemüller
// Copyright (c) 2020 Sebastian Dröge <[email protected]>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
/// Convert linear energy to logarithmic loudness.
pub fn energy_to_loudness(energy: f64) -> f64 {
// The non-test version is faster and more accurate but gives
// slightly different results than the C version and fails the
// tests because of that.
#[cfg(test)]
{
10.0 * (f64::ln(energy) / f64::ln(10.0)) - 0.691
}
#[cfg(not(test))]
{
10.0 * f64::log10(energy) - 0.691
}
}
/// Trait for abstracting over interleaved and planar samples.
pub trait Samples<'a, T: 'a>: Sized {
/// Call the given closure for each sample of the given channel.
// FIXME: Workaround for TrustedLen / TrustedRandomAccess being unstable
// and because of that we wouldn't get nice optimizations
fn foreach_sample(&self, channel: usize, func: impl FnMut(&'a T));
/// Call the given closure for each sample of the given channel.
// FIXME: Workaround for TrustedLen / TrustedRandomAccess being unstable
// and because of that we wouldn't get nice optimizations
fn foreach_sample_zipped<U>(
&self,
channel: usize,
iter: impl Iterator<Item = U>,
func: impl FnMut(&'a T, U),
);
/// Number of frames.
fn frames(&self) -> usize;
/// Number of channels.
fn channels(&self) -> usize;
/// Split into two at the given sample.
fn split_at(self, sample: usize) -> (Self, Self);
}
/// Struct representing interleaved samples.
pub struct Interleaved<'a, T> {
/// Interleaved sample data.
data: &'a [T],
/// Number of channels.
channels: usize,
}
impl<'a, T> Interleaved<'a, T> {
/// Create a new wrapper around the interleaved channels and do a sanity check.
pub fn new(data: &'a [T], channels: usize) -> Result<Self, crate::Error> {
if channels == 0 {
return Err(crate::Error::NoMem);
}
if data.len() % channels!= 0 {
return Err(crate::Error::NoMem);
}
Ok(Interleaved { data, channels })
}
}
impl<'a, T> Samples<'a, T> for Interleaved<'a, T> {
#[inline]
fn foreach_sample(&self, channel: usize, mut func: impl FnMut(&'a T)) {
assert!(channel < self.channels);
for v in self.data.chunks_exact(self.channels) {
func(&v[channel])
}
}
#[inline]
fn foreach_sample_zipped<U>(
&self,
channel: usize,
iter: impl Iterator<Item = U>,
mut func: impl FnMut(&'a T, U),
) {
assert!(channel < self.channels);
for (v, u) in self.data.chunks_exact(self.channels).zip(iter) {
func(&v[channel], u)
}
}
#[inline]
fn frames(&self) -> usize {
self.data.len() / self.channels
}
#[inline]
fn channels(&self) -> usize {
self.channels
}
#[inline]
fn split_at(self, sample: usize) -> (Self, Self) {
assert!(sample * self.channels <= self.data.len());
let (fst, snd) = self.data.split_at(sample * self.channels);
(
Interleaved {
data: fst,
channels: self.channels,
},
Interleaved {
data: snd,
channels: self.channels,
},
)
}
}
/// Struct representing interleaved samples.
pub struct Planar<'a, T> {
data: &'a [&'a [T]],
start: usize,
end: usize,
}
impl<'a, T> Planar<'a, T> {
/// Create a new wrapper around the planar channels and do a sanity check.
pub fn new(data: &'a [&'a [T]]) -> Result<Self, crate::Error> {
if data.is_empty() {
return Err(crate::Error::NoMem);
}
if data.iter().any(|d| data[0].len()!= d.len()) {
return Err(crate::Error::NoMem);
}
Ok(Planar {
data,
start: 0,
end: data[0].len(),
})
}
}
impl<'a, T> Samples<'a, T> for Planar<'a, T> {
#[inline]
fn foreach_sample(&self, channel: usize, mut func: impl FnMut(&'a T)) {
assert!(channel < self.data.len());
for v in &self.data[channel][self.start..self.end] {
func(v)
}
}
#[inline]
fn foreach_sample_zipped<U>(
&self,
channel: usize,
iter: impl Iterator<Item = U>,
mut func: impl FnMut(&'a T, U),
) {
assert!(channel < self.data.len());
for (v, u) in self.data[channel][self.start..self.end].iter().zip(iter) {
func(v, u)
}
}
#[inline]
fn frames(&self) -> usize {
self.end - self.start
}
#[inline]
fn channels(&self) -> usize {
self.data.len()
}
#[inline]
fn split_at(self, sample: usize) -> (Self, Self) {
assert!(self.start + sample <= self.end);
(
Planar {
data: self.data,
start: self.start,
end: self.start + sample,
},
Planar {
data: self.data,
start: self.start + sample,
end: self.end,
},
)
}
}
/// Trait for converting samples into f32 in the range [0,1].
pub trait AsF32: Copy {
fn as_f32_scaled(self) -> f32;
}
impl AsF32 for i16 {
#[inline]
fn as_f32_scaled(self) -> f32 {
self as f32 / (-(std::i16::MIN as f32))
}
}
impl AsF32 for i32 {
#[inline]
fn as_f32_scaled(self) -> f32 {
self as f32 / (-(std::i32::MIN as f32))
}
}
impl AsF32 for f32 {
#[inline]
fn as_f32_scaled(self) -> f32 {
self
}
}
impl AsF32 for f64 {
#[inline]
fn as_f32_scaled(self) -> f32 {
self as f32
}
}
/// Trait for converting samples into f64 in the range [0,1].
pub trait AsF64: AsF32 + Copy + PartialOrd {
const MAX: f64;
fn as_f64(self) -> f64;
#[inline]
fn as_f64_scaled(self) -> f64 {
self.as_f64() / Self::MAX
}
}
impl AsF64 for i16 {
const MAX: f64 = -(std::i16::MIN as f64);
#[inline]
fn as_f64(self) -> f64 {
self as f64
}
}
impl AsF64 for i32 {
const MAX: f64 = -(std::i32::MIN as f64);
#[inline]
fn as_f64(self) -> f64 {
self as f64
}
}
impl AsF64 for f32 {
const MAX: f64 = 1.0;
#[inline]
fn as_f64(self) -> f64 {
self as f64
}
}
impl AsF64 for f64 {
const MAX: f64 = 1.0;
#[inline]
fn as_f64(self) -> f64 {
self
}
}
#[cfg(test)]
pub mod tests {
#[derive(Clone, Debug)]
pub struct Signal<T: FromF32> {
pub data: Vec<T>,
pub channels: u32,
pub rate: u32,
}
pub trait FromF32: Copy + Clone + std::fmt::Debug + Send + Sync +'static {
fn from_f32(val: f32) -> Self;
}
impl FromF32 for i16 {
fn from_f32(val: f32) -> Self {
(val * (std::i16::MAX - 1) as f32) as i16
}
}
impl FromF32 for i32 {
fn from_f32(val: f32) -> Self {
(val * (std::i32::MAX - 1) as f32) as i32
}
}
impl FromF32 for f32 {
fn from_f32(val: f32) -> Self {
val
}
}
impl FromF32 for f64 {
fn from_f32(val: f32) -> Self {
val as f64
}
}
impl<T: FromF32 + quickcheck::Arbitrary> quickcheck::Arbitrary for Signal<T> {
fn arbitrary<G: quickcheck::Gen>(g: &mut G) -> Self {
use rand::Rng;
let channels = g.gen_range(1, 16);
let rate = g.gen_range(16_000, 224_000);
let num_frames = (rate as f64 * g.gen_range(0.0, 5.0)) as usize;
let max = g.gen_range(0.0, 1.0);
let freqs = [
g.gen_range(20.0, 16_000.0),
g.gen_range(20.0, 16_000.0),
g.gen_range(20.0, 16_000.0),
g.gen_range(20.0, 16_000.0),
];
let volumes = [
g.gen_range(0.0, 1.0),
g.gen_range(0.0, 1.0),
g.gen_range(0.0, 1.0),
g.gen_range(0.0, 1.0),
];
let volume_scale = 1.0 / volumes.iter().sum::<f32>();
let mut accumulators = [0.0; 4];
let steps = [
2.0 * std::f32::consts::PI * freqs[0] / rate as f32,
2.0 * std::f32::consts::PI * freqs[1] / rate as f32,
2.0 * std::f32::consts::PI * freqs[2] / rate as f32,
2.0 * std::f32::consts::PI * freqs[3] / rate as f32,
];
let mut data = vec![T::from_f32(0.0); num_frames * channels as usize];
for frame in data.chunks_exact_mut(channels as usize) {
let val = max
* (f32::sin(accumulators[0]) * volumes[0]
+ f32::sin(accumulators[1]) * volumes[1]
+ f32::sin(accumulators[2]) * volumes[2]
+ f32::sin(accumulators[3]) * volumes[3])
/ volume_scale;
for sample in frame.iter_mut() {
*sample = T::from_f32(val);
}
for (acc, step) in accumulators.iter_mut().zip(steps.iter()) {
*acc += step;
}
}
Signal {
data,
channels,
rate,
}
}
fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
SignalShrinker::boxed(self.clone())
}
}
struct SignalShrinker<A: FromF32> {
seed: Signal<A>,
/// How many elements to take
size: usize,
/// Whether we tried with one channel already
tried_one_channel: bool,
}
impl<A: FromF32 + quickcheck::Arbitrary> SignalShrinker<A> {
fn boxed(seed: Signal<A>) -> Box<dyn Iterator<Item = Signal<A>>> {
let channels = seed.channels;
Box::new(SignalShrinker {
seed,
size: 0,
tried_one_channel: channels == 1,
})
}
}
impl<A> Iterator for SignalShrinker<A>
where
A: FromF32 + quickcheck::Arbitrary,
{
type Item = Signal<A>;
fn ne | mut self) -> Option<Signal<A>> {
if self.size < self.seed.data.len() {
// Generate a smaller vector by removing size elements
let xs1 = if self.tried_one_channel {
Vec::from(&self.seed.data[..self.size])
} else {
self.seed
.data
.iter()
.cloned()
.step_by(self.seed.channels as usize)
.take(self.size)
.collect()
};
if self.size == 0 {
self.size = if self.tried_one_channel {
self.seed.channels as usize
} else {
1
};
} else {
self.size *= 2;
}
Some(Signal {
data: xs1,
channels: if self.tried_one_channel {
self.seed.channels
} else {
1
},
rate: self.seed.rate,
})
} else if!self.tried_one_channel {
self.tried_one_channel = true;
self.size = 0;
self.next()
} else {
None
}
}
}
}
| xt(& | identifier_name |
utils.rs | // Copyright (c) 2011 Jan Kokemüller
// Copyright (c) 2020 Sebastian Dröge <[email protected]>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
/// Convert linear energy to logarithmic loudness.
pub fn energy_to_loudness(energy: f64) -> f64 {
// The non-test version is faster and more accurate but gives
// slightly different results than the C version and fails the
// tests because of that.
#[cfg(test)]
{
10.0 * (f64::ln(energy) / f64::ln(10.0)) - 0.691
}
#[cfg(not(test))]
{
10.0 * f64::log10(energy) - 0.691
}
}
/// Trait for abstracting over interleaved and planar samples.
pub trait Samples<'a, T: 'a>: Sized {
/// Call the given closure for each sample of the given channel.
// FIXME: Workaround for TrustedLen / TrustedRandomAccess being unstable
// and because of that we wouldn't get nice optimizations
fn foreach_sample(&self, channel: usize, func: impl FnMut(&'a T));
/// Call the given closure for each sample of the given channel.
// FIXME: Workaround for TrustedLen / TrustedRandomAccess being unstable
// and because of that we wouldn't get nice optimizations
fn foreach_sample_zipped<U>(
&self,
channel: usize,
iter: impl Iterator<Item = U>,
func: impl FnMut(&'a T, U),
);
/// Number of frames.
fn frames(&self) -> usize;
/// Number of channels.
fn channels(&self) -> usize;
/// Split into two at the given sample.
fn split_at(self, sample: usize) -> (Self, Self);
}
/// Struct representing interleaved samples.
pub struct Interleaved<'a, T> {
/// Interleaved sample data.
data: &'a [T],
/// Number of channels.
channels: usize,
}
impl<'a, T> Interleaved<'a, T> {
/// Create a new wrapper around the interleaved channels and do a sanity check.
pub fn new(data: &'a [T], channels: usize) -> Result<Self, crate::Error> {
if channels == 0 {
return Err(crate::Error::NoMem);
}
if data.len() % channels!= 0 {
return Err(crate::Error::NoMem);
}
Ok(Interleaved { data, channels })
}
}
impl<'a, T> Samples<'a, T> for Interleaved<'a, T> {
#[inline]
fn foreach_sample(&self, channel: usize, mut func: impl FnMut(&'a T)) {
assert!(channel < self.channels);
for v in self.data.chunks_exact(self.channels) {
func(&v[channel])
}
}
#[inline]
fn foreach_sample_zipped<U>(
&self,
channel: usize,
iter: impl Iterator<Item = U>,
mut func: impl FnMut(&'a T, U),
) {
assert!(channel < self.channels);
for (v, u) in self.data.chunks_exact(self.channels).zip(iter) {
func(&v[channel], u)
}
}
#[inline]
fn frames(&self) -> usize {
self.data.len() / self.channels
}
#[inline]
fn channels(&self) -> usize {
self.channels
}
#[inline]
fn split_at(self, sample: usize) -> (Self, Self) {
assert!(sample * self.channels <= self.data.len());
let (fst, snd) = self.data.split_at(sample * self.channels);
(
Interleaved {
data: fst,
channels: self.channels,
},
Interleaved {
data: snd,
channels: self.channels,
},
)
}
}
/// Struct representing interleaved samples.
pub struct Planar<'a, T> {
data: &'a [&'a [T]],
start: usize,
end: usize,
}
impl<'a, T> Planar<'a, T> {
/// Create a new wrapper around the planar channels and do a sanity check.
pub fn new(data: &'a [&'a [T]]) -> Result<Self, crate::Error> {
if data.is_empty() {
return Err(crate::Error::NoMem);
}
if data.iter().any(|d| data[0].len()!= d.len()) {
return Err(crate::Error::NoMem);
}
Ok(Planar {
data,
start: 0,
end: data[0].len(),
})
}
}
impl<'a, T> Samples<'a, T> for Planar<'a, T> {
#[inline]
fn foreach_sample(&self, channel: usize, mut func: impl FnMut(&'a T)) {
assert!(channel < self.data.len());
for v in &self.data[channel][self.start..self.end] {
func(v)
}
}
#[inline]
fn foreach_sample_zipped<U>(
&self,
channel: usize,
iter: impl Iterator<Item = U>,
mut func: impl FnMut(&'a T, U),
) {
assert!(channel < self.data.len());
for (v, u) in self.data[channel][self.start..self.end].iter().zip(iter) {
func(v, u)
}
}
#[inline]
fn frames(&self) -> usize {
self.end - self.start
}
#[inline]
fn channels(&self) -> usize {
self.data.len()
}
#[inline]
fn split_at(self, sample: usize) -> (Self, Self) {
assert!(self.start + sample <= self.end);
(
Planar {
data: self.data,
start: self.start,
end: self.start + sample,
},
Planar {
data: self.data,
start: self.start + sample,
end: self.end,
},
)
}
}
/// Trait for converting samples into f32 in the range [0,1].
pub trait AsF32: Copy {
fn as_f32_scaled(self) -> f32;
}
impl AsF32 for i16 {
#[inline]
fn as_f32_scaled(self) -> f32 {
self as f32 / (-(std::i16::MIN as f32))
}
}
impl AsF32 for i32 {
#[inline]
fn as_f32_scaled(self) -> f32 {
self as f32 / (-(std::i32::MIN as f32))
}
}
impl AsF32 for f32 {
#[inline]
fn as_f32_scaled(self) -> f32 {
|
impl AsF32 for f64 {
#[inline]
fn as_f32_scaled(self) -> f32 {
self as f32
}
}
/// Trait for converting samples into f64 in the range [0,1].
pub trait AsF64: AsF32 + Copy + PartialOrd {
const MAX: f64;
fn as_f64(self) -> f64;
#[inline]
fn as_f64_scaled(self) -> f64 {
self.as_f64() / Self::MAX
}
}
impl AsF64 for i16 {
const MAX: f64 = -(std::i16::MIN as f64);
#[inline]
fn as_f64(self) -> f64 {
self as f64
}
}
impl AsF64 for i32 {
const MAX: f64 = -(std::i32::MIN as f64);
#[inline]
fn as_f64(self) -> f64 {
self as f64
}
}
impl AsF64 for f32 {
const MAX: f64 = 1.0;
#[inline]
fn as_f64(self) -> f64 {
self as f64
}
}
impl AsF64 for f64 {
const MAX: f64 = 1.0;
#[inline]
fn as_f64(self) -> f64 {
self
}
}
#[cfg(test)]
pub mod tests {
#[derive(Clone, Debug)]
pub struct Signal<T: FromF32> {
pub data: Vec<T>,
pub channels: u32,
pub rate: u32,
}
pub trait FromF32: Copy + Clone + std::fmt::Debug + Send + Sync +'static {
fn from_f32(val: f32) -> Self;
}
impl FromF32 for i16 {
fn from_f32(val: f32) -> Self {
(val * (std::i16::MAX - 1) as f32) as i16
}
}
impl FromF32 for i32 {
fn from_f32(val: f32) -> Self {
(val * (std::i32::MAX - 1) as f32) as i32
}
}
impl FromF32 for f32 {
fn from_f32(val: f32) -> Self {
val
}
}
impl FromF32 for f64 {
fn from_f32(val: f32) -> Self {
val as f64
}
}
impl<T: FromF32 + quickcheck::Arbitrary> quickcheck::Arbitrary for Signal<T> {
fn arbitrary<G: quickcheck::Gen>(g: &mut G) -> Self {
use rand::Rng;
let channels = g.gen_range(1, 16);
let rate = g.gen_range(16_000, 224_000);
let num_frames = (rate as f64 * g.gen_range(0.0, 5.0)) as usize;
let max = g.gen_range(0.0, 1.0);
let freqs = [
g.gen_range(20.0, 16_000.0),
g.gen_range(20.0, 16_000.0),
g.gen_range(20.0, 16_000.0),
g.gen_range(20.0, 16_000.0),
];
let volumes = [
g.gen_range(0.0, 1.0),
g.gen_range(0.0, 1.0),
g.gen_range(0.0, 1.0),
g.gen_range(0.0, 1.0),
];
let volume_scale = 1.0 / volumes.iter().sum::<f32>();
let mut accumulators = [0.0; 4];
let steps = [
2.0 * std::f32::consts::PI * freqs[0] / rate as f32,
2.0 * std::f32::consts::PI * freqs[1] / rate as f32,
2.0 * std::f32::consts::PI * freqs[2] / rate as f32,
2.0 * std::f32::consts::PI * freqs[3] / rate as f32,
];
let mut data = vec![T::from_f32(0.0); num_frames * channels as usize];
for frame in data.chunks_exact_mut(channels as usize) {
let val = max
* (f32::sin(accumulators[0]) * volumes[0]
+ f32::sin(accumulators[1]) * volumes[1]
+ f32::sin(accumulators[2]) * volumes[2]
+ f32::sin(accumulators[3]) * volumes[3])
/ volume_scale;
for sample in frame.iter_mut() {
*sample = T::from_f32(val);
}
for (acc, step) in accumulators.iter_mut().zip(steps.iter()) {
*acc += step;
}
}
Signal {
data,
channels,
rate,
}
}
fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
SignalShrinker::boxed(self.clone())
}
}
struct SignalShrinker<A: FromF32> {
seed: Signal<A>,
/// How many elements to take
size: usize,
/// Whether we tried with one channel already
tried_one_channel: bool,
}
impl<A: FromF32 + quickcheck::Arbitrary> SignalShrinker<A> {
fn boxed(seed: Signal<A>) -> Box<dyn Iterator<Item = Signal<A>>> {
let channels = seed.channels;
Box::new(SignalShrinker {
seed,
size: 0,
tried_one_channel: channels == 1,
})
}
}
impl<A> Iterator for SignalShrinker<A>
where
A: FromF32 + quickcheck::Arbitrary,
{
type Item = Signal<A>;
fn next(&mut self) -> Option<Signal<A>> {
if self.size < self.seed.data.len() {
// Generate a smaller vector by removing size elements
let xs1 = if self.tried_one_channel {
Vec::from(&self.seed.data[..self.size])
} else {
self.seed
.data
.iter()
.cloned()
.step_by(self.seed.channels as usize)
.take(self.size)
.collect()
};
if self.size == 0 {
self.size = if self.tried_one_channel {
self.seed.channels as usize
} else {
1
};
} else {
self.size *= 2;
}
Some(Signal {
data: xs1,
channels: if self.tried_one_channel {
self.seed.channels
} else {
1
},
rate: self.seed.rate,
})
} else if!self.tried_one_channel {
self.tried_one_channel = true;
self.size = 0;
self.next()
} else {
None
}
}
}
}
| self
}
} | identifier_body |
utils.rs | // Copyright (c) 2011 Jan Kokemüller
// Copyright (c) 2020 Sebastian Dröge <[email protected]>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
/// Convert linear energy to logarithmic loudness.
pub fn energy_to_loudness(energy: f64) -> f64 {
// The non-test version is faster and more accurate but gives
// slightly different results than the C version and fails the
// tests because of that.
#[cfg(test)]
{
10.0 * (f64::ln(energy) / f64::ln(10.0)) - 0.691
}
#[cfg(not(test))]
{
10.0 * f64::log10(energy) - 0.691
}
}
/// Trait for abstracting over interleaved and planar samples.
pub trait Samples<'a, T: 'a>: Sized {
/// Call the given closure for each sample of the given channel.
// FIXME: Workaround for TrustedLen / TrustedRandomAccess being unstable
// and because of that we wouldn't get nice optimizations
fn foreach_sample(&self, channel: usize, func: impl FnMut(&'a T));
/// Call the given closure for each sample of the given channel.
// FIXME: Workaround for TrustedLen / TrustedRandomAccess being unstable
// and because of that we wouldn't get nice optimizations
fn foreach_sample_zipped<U>(
&self,
channel: usize,
iter: impl Iterator<Item = U>,
func: impl FnMut(&'a T, U),
);
/// Number of frames.
fn frames(&self) -> usize;
/// Number of channels.
fn channels(&self) -> usize;
/// Split into two at the given sample.
fn split_at(self, sample: usize) -> (Self, Self);
}
/// Struct representing interleaved samples.
pub struct Interleaved<'a, T> {
/// Interleaved sample data.
data: &'a [T],
/// Number of channels.
channels: usize,
}
impl<'a, T> Interleaved<'a, T> {
/// Create a new wrapper around the interleaved channels and do a sanity check.
pub fn new(data: &'a [T], channels: usize) -> Result<Self, crate::Error> {
if channels == 0 {
return Err(crate::Error::NoMem);
}
if data.len() % channels!= 0 {
return Err(crate::Error::NoMem);
}
Ok(Interleaved { data, channels })
}
}
impl<'a, T> Samples<'a, T> for Interleaved<'a, T> {
#[inline]
fn foreach_sample(&self, channel: usize, mut func: impl FnMut(&'a T)) {
assert!(channel < self.channels);
for v in self.data.chunks_exact(self.channels) {
func(&v[channel])
}
}
#[inline]
fn foreach_sample_zipped<U>(
&self,
channel: usize,
iter: impl Iterator<Item = U>,
mut func: impl FnMut(&'a T, U),
) {
assert!(channel < self.channels);
for (v, u) in self.data.chunks_exact(self.channels).zip(iter) {
func(&v[channel], u)
}
}
#[inline]
fn frames(&self) -> usize {
self.data.len() / self.channels
}
#[inline]
fn channels(&self) -> usize {
self.channels
}
#[inline]
fn split_at(self, sample: usize) -> (Self, Self) {
assert!(sample * self.channels <= self.data.len());
let (fst, snd) = self.data.split_at(sample * self.channels);
(
Interleaved {
data: fst,
channels: self.channels,
},
Interleaved {
data: snd,
channels: self.channels,
},
)
}
}
/// Struct representing interleaved samples.
pub struct Planar<'a, T> {
data: &'a [&'a [T]],
start: usize,
end: usize,
}
impl<'a, T> Planar<'a, T> {
/// Create a new wrapper around the planar channels and do a sanity check.
pub fn new(data: &'a [&'a [T]]) -> Result<Self, crate::Error> {
if data.is_empty() {
return Err(crate::Error::NoMem);
}
if data.iter().any(|d| data[0].len()!= d.len()) {
return Err(crate::Error::NoMem);
}
Ok(Planar {
data,
start: 0,
end: data[0].len(),
})
}
}
impl<'a, T> Samples<'a, T> for Planar<'a, T> {
#[inline]
fn foreach_sample(&self, channel: usize, mut func: impl FnMut(&'a T)) {
assert!(channel < self.data.len());
for v in &self.data[channel][self.start..self.end] {
func(v)
}
}
#[inline]
fn foreach_sample_zipped<U>(
&self,
channel: usize,
iter: impl Iterator<Item = U>,
mut func: impl FnMut(&'a T, U),
) {
assert!(channel < self.data.len());
for (v, u) in self.data[channel][self.start..self.end].iter().zip(iter) {
func(v, u)
}
}
#[inline]
fn frames(&self) -> usize {
self.end - self.start
}
#[inline]
fn channels(&self) -> usize {
self.data.len()
}
#[inline]
fn split_at(self, sample: usize) -> (Self, Self) {
assert!(self.start + sample <= self.end);
(
Planar {
data: self.data,
start: self.start,
end: self.start + sample,
},
Planar {
data: self.data,
start: self.start + sample,
end: self.end,
},
)
}
}
/// Trait for converting samples into f32 in the range [0,1].
pub trait AsF32: Copy {
fn as_f32_scaled(self) -> f32;
}
impl AsF32 for i16 {
#[inline]
fn as_f32_scaled(self) -> f32 {
self as f32 / (-(std::i16::MIN as f32))
}
}
impl AsF32 for i32 {
#[inline]
fn as_f32_scaled(self) -> f32 {
self as f32 / (-(std::i32::MIN as f32))
}
}
impl AsF32 for f32 {
#[inline]
fn as_f32_scaled(self) -> f32 {
self
}
}
impl AsF32 for f64 {
#[inline]
fn as_f32_scaled(self) -> f32 {
self as f32
}
}
/// Trait for converting samples into f64 in the range [0,1].
pub trait AsF64: AsF32 + Copy + PartialOrd {
const MAX: f64;
fn as_f64(self) -> f64;
#[inline]
fn as_f64_scaled(self) -> f64 {
self.as_f64() / Self::MAX
}
}
impl AsF64 for i16 {
const MAX: f64 = -(std::i16::MIN as f64);
#[inline]
fn as_f64(self) -> f64 {
self as f64
}
}
impl AsF64 for i32 {
const MAX: f64 = -(std::i32::MIN as f64);
#[inline]
fn as_f64(self) -> f64 {
self as f64
}
}
impl AsF64 for f32 {
const MAX: f64 = 1.0;
#[inline]
fn as_f64(self) -> f64 {
self as f64
}
}
impl AsF64 for f64 {
const MAX: f64 = 1.0;
#[inline]
fn as_f64(self) -> f64 {
self
}
}
#[cfg(test)]
pub mod tests {
#[derive(Clone, Debug)]
pub struct Signal<T: FromF32> {
pub data: Vec<T>,
pub channels: u32,
pub rate: u32,
}
pub trait FromF32: Copy + Clone + std::fmt::Debug + Send + Sync +'static {
fn from_f32(val: f32) -> Self;
}
impl FromF32 for i16 {
fn from_f32(val: f32) -> Self {
(val * (std::i16::MAX - 1) as f32) as i16
}
}
impl FromF32 for i32 {
fn from_f32(val: f32) -> Self {
(val * (std::i32::MAX - 1) as f32) as i32
}
}
impl FromF32 for f32 {
fn from_f32(val: f32) -> Self {
val
}
}
impl FromF32 for f64 {
fn from_f32(val: f32) -> Self {
val as f64
}
}
impl<T: FromF32 + quickcheck::Arbitrary> quickcheck::Arbitrary for Signal<T> {
fn arbitrary<G: quickcheck::Gen>(g: &mut G) -> Self {
use rand::Rng;
let channels = g.gen_range(1, 16);
let rate = g.gen_range(16_000, 224_000);
let num_frames = (rate as f64 * g.gen_range(0.0, 5.0)) as usize;
let max = g.gen_range(0.0, 1.0);
let freqs = [
g.gen_range(20.0, 16_000.0),
g.gen_range(20.0, 16_000.0),
g.gen_range(20.0, 16_000.0),
g.gen_range(20.0, 16_000.0),
];
let volumes = [
g.gen_range(0.0, 1.0),
g.gen_range(0.0, 1.0),
g.gen_range(0.0, 1.0),
g.gen_range(0.0, 1.0),
];
let volume_scale = 1.0 / volumes.iter().sum::<f32>();
let mut accumulators = [0.0; 4];
let steps = [
2.0 * std::f32::consts::PI * freqs[0] / rate as f32,
2.0 * std::f32::consts::PI * freqs[1] / rate as f32,
2.0 * std::f32::consts::PI * freqs[2] / rate as f32,
2.0 * std::f32::consts::PI * freqs[3] / rate as f32,
];
let mut data = vec![T::from_f32(0.0); num_frames * channels as usize];
for frame in data.chunks_exact_mut(channels as usize) {
let val = max
* (f32::sin(accumulators[0]) * volumes[0]
+ f32::sin(accumulators[1]) * volumes[1]
+ f32::sin(accumulators[2]) * volumes[2]
+ f32::sin(accumulators[3]) * volumes[3])
/ volume_scale;
for sample in frame.iter_mut() {
*sample = T::from_f32(val);
}
for (acc, step) in accumulators.iter_mut().zip(steps.iter()) {
*acc += step;
}
}
Signal {
data,
channels,
rate,
}
}
fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
SignalShrinker::boxed(self.clone())
}
}
struct SignalShrinker<A: FromF32> {
seed: Signal<A>,
/// How many elements to take
size: usize,
/// Whether we tried with one channel already
tried_one_channel: bool,
}
impl<A: FromF32 + quickcheck::Arbitrary> SignalShrinker<A> {
fn boxed(seed: Signal<A>) -> Box<dyn Iterator<Item = Signal<A>>> {
let channels = seed.channels;
Box::new(SignalShrinker {
seed,
size: 0,
tried_one_channel: channels == 1,
})
}
}
impl<A> Iterator for SignalShrinker<A>
where
A: FromF32 + quickcheck::Arbitrary,
{
type Item = Signal<A>;
fn next(&mut self) -> Option<Signal<A>> {
if self.size < self.seed.data.len() {
// Generate a smaller vector by removing size elements
let xs1 = if self.tried_one_channel {
Vec::from(&self.seed.data[..self.size])
} else {
self.seed
.data
.iter()
.cloned()
.step_by(self.seed.channels as usize)
.take(self.size)
.collect()
};
if self.size == 0 {
self.size = if self.tried_one_channel {
| lse {
1
};
} else {
self.size *= 2;
}
Some(Signal {
data: xs1,
channels: if self.tried_one_channel {
self.seed.channels
} else {
1
},
rate: self.seed.rate,
})
} else if!self.tried_one_channel {
self.tried_one_channel = true;
self.size = 0;
self.next()
} else {
None
}
}
}
}
| self.seed.channels as usize
} e | conditional_block |
router.rs | use anyhow::{bail, Result};
use indexmap::indexmap;
use serde::Deserialize;
use serde_json::json;
use turbo_tasks::{
primitives::{JsonValueVc, StringsVc},
CompletionVc, CompletionsVc, Value,
};
use turbo_tasks_fs::{
json::parse_json_rope_with_source_context, to_sys_path, File, FileSystemPathVc,
};
use turbopack::{evaluate_context::node_evaluate_asset_context, transition::TransitionsByNameVc};
use turbopack_core::{
asset::AssetVc,
changed::any_content_changed,
chunk::ChunkingContext,
context::{AssetContext, AssetContextVc},
environment::{EnvironmentIntention::Middleware, ServerAddrVc},
ident::AssetIdentVc,
issue::IssueVc,
reference_type::{EcmaScriptModulesReferenceSubType, ReferenceType},
resolve::{find_context_file, FindContextFileResult},
source_asset::SourceAssetVc,
virtual_asset::VirtualAssetVc,
};
use turbopack_dev::DevChunkingContextVc;
use turbopack_ecmascript::{
EcmascriptInputTransform, EcmascriptInputTransformsVc, EcmascriptModuleAssetType,
EcmascriptModuleAssetVc, InnerAssetsVc, OptionEcmascriptModuleAssetVc,
};
use turbopack_node::{
evaluate::{evaluate, JavaScriptValue},
execution_context::{ExecutionContext, ExecutionContextVc},
StructuredError,
};
use crate::{
embed_js::{next_asset, next_js_file},
next_config::NextConfigVc,
next_edge::{
context::{get_edge_compile_time_info, get_edge_resolve_options_context},
transition::NextEdgeTransition,
},
next_import_map::get_next_build_import_map,
next_server::context::{get_server_module_options_context, ServerContextType},
util::{parse_config_from_source, NextSourceConfigVc},
};
#[turbo_tasks::function]
fn next_configs() -> StringsVc {
StringsVc::cell(
["next.config.mjs", "next.config.js"]
.into_iter()
.map(ToOwned::to_owned)
.collect(),
)
}
#[turbo_tasks::function]
async fn middleware_files(page_extensions: StringsVc) -> Result<StringsVc> {
let extensions = page_extensions.await?;
let files = ["middleware.", "src/middleware."]
.into_iter()
.flat_map(|f| {
extensions
.iter()
.map(move |ext| String::from(f) + ext.as_str())
})
.collect();
Ok(StringsVc::cell(files))
}
#[turbo_tasks::value(shared)]
#[derive(Debug, Clone, Default)]
#[serde(rename_all = "camelCase")]
pub struct RouterRequest {
pub method: String,
pub pathname: String,
pub raw_query: String,
pub raw_headers: Vec<(String, String)>,
}
#[turbo_tasks::value(shared)]
#[derive(Debug, Clone, Default)]
#[serde(rename_all = "camelCase")]
pub struct RewriteResponse {
pub url: String,
pub headers: Vec<(String, String)>,
}
#[turbo_tasks::value(shared)]
#[derive(Debug, Clone, Default)]
#[serde(rename_all = "camelCase")]
pub struct MiddlewareHeadersResponse {
pub status_code: u16,
pub headers: Vec<(String, String)>,
}
#[turbo_tasks::value(shared)]
#[derive(Debug, Clone, Default)]
pub struct MiddlewareBodyResponse(pub Vec<u8>);
#[turbo_tasks::value(shared)]
#[derive(Debug, Clone, Default)]
pub struct FullMiddlewareResponse {
pub headers: MiddlewareHeadersResponse,
pub body: Vec<u8>,
}
#[derive(Deserialize)]
#[serde(tag = "type", rename_all = "kebab-case")]
enum RouterIncomingMessage {
Rewrite {
data: RewriteResponse,
},
// TODO: Implement
#[allow(dead_code)]
MiddlewareHeaders {
data: MiddlewareHeadersResponse,
},
// TODO: Implement
#[allow(dead_code)]
MiddlewareBody {
data: MiddlewareBodyResponse,
},
FullMiddleware {
data: FullMiddlewareResponse,
},
None,
Error(StructuredError),
}
#[derive(Debug)]
#[turbo_tasks::value]
pub enum RouterResult {
Rewrite(RewriteResponse),
FullMiddleware(FullMiddlewareResponse),
None,
Error,
}
impl From<RouterIncomingMessage> for RouterResult {
fn from(value: RouterIncomingMessage) -> Self {
match value {
RouterIncomingMessage::Rewrite { data } => Self::Rewrite(data),
RouterIncomingMessage::FullMiddleware { data } => Self::FullMiddleware(data),
RouterIncomingMessage::None => Self::None,
_ => Self::Error,
}
}
}
#[turbo_tasks::function]
async fn get_config(
context: AssetContextVc,
project_path: FileSystemPathVc,
configs: StringsVc,
) -> Result<OptionEcmascriptModuleAssetVc> {
let find_config_result = find_context_file(project_path, configs);
let config_asset = match &*find_config_result.await? {
FindContextFileResult::Found(config_path, _) => Some(as_es_module_asset(
SourceAssetVc::new(*config_path).as_asset(),
context,
)),
FindContextFileResult::NotFound(_) => None,
};
Ok(OptionEcmascriptModuleAssetVc::cell(config_asset))
}
fn as_es_module_asset(asset: AssetVc, context: AssetContextVc) -> EcmascriptModuleAssetVc {
EcmascriptModuleAssetVc::new(
asset,
context,
Value::new(EcmascriptModuleAssetType::Typescript),
EcmascriptInputTransformsVc::cell(vec![EcmascriptInputTransform::TypeScript {
use_define_for_class_fields: false,
}]),
context.compile_time_info(),
)
}
#[turbo_tasks::function]
async fn next_config_changed(
context: AssetContextVc,
project_path: FileSystemPathVc,
) -> Result<CompletionVc> {
let next_config = get_config(context, project_path, next_configs()).await?;
Ok(if let Some(c) = *next_config {
any_content_changed(c.into())
} else {
CompletionVc::immutable()
})
}
#[turbo_tasks::function]
async fn config_assets(
context: AssetContextVc,
project_path: FileSystemPathVc,
page_extensions: StringsVc,
) -> Result<InnerAssetsVc> {
let middleware_config =
get_config(context, project_path, middleware_files(page_extensions)).await?;
// The router.ts file expects a manifest of chunks for the middleware. If there
// is no middleware file, then we need to generate a default empty manifest
// and we cannot process it with the next-edge transition because it
// requires a real file for some reason.
let (manifest, config) = match &*middleware_config {
Some(c) => {
let manifest = context.with_transition("next-edge").process(
c.as_asset(),
Value::new(ReferenceType::EcmaScriptModules(
EcmaScriptModulesReferenceSubType::Undefined,
)),
);
let config = parse_config_from_source(c.as_asset());
(manifest, config)
}
None => {
let manifest = as_es_module_asset(
VirtualAssetVc::new(
project_path.join("middleware.js"),
File::from("export default [];").into(),
)
.as_asset(),
context,
)
.as_asset();
let config = NextSourceConfigVc::default();
(manifest, config)
}
};
let config_asset = as_es_module_asset(
VirtualAssetVc::new(
project_path.join("middleware_config.js"),
File::from(format!(
"export default {};",
json!({ "matcher": &config.await?.matcher })
))
.into(),
)
.as_asset(),
context,
)
.as_asset();
Ok(InnerAssetsVc::cell(indexmap! {
"MIDDLEWARE_CHUNK_GROUP".to_string() => manifest,
"MIDDLEWARE_CONFIG".to_string() => config_asset,
}))
}
#[turbo_tasks::function]
fn route_executor(context: AssetContextVc, configs: InnerAssetsVc) -> AssetVc {
EcmascriptModuleAssetVc::new_with_inner_assets(
next_asset("entry/router.ts"),
context,
Value::new(EcmascriptModuleAssetType::Typescript),
EcmascriptInputTransformsVc::cell(vec![EcmascriptInputTransform::TypeScript {
use_define_for_class_fields: false,
}]),
context.compile_time_info(),
configs,
)
.into()
}
#[turbo_tasks::function]
fn edge_transition_map(
server_addr: ServerAddrVc,
project_path: FileSystemPathVc,
output_path: FileSystemPathVc,
next_config: NextConfigVc,
execution_context: ExecutionContextVc,
) -> TransitionsByNameVc {
let edge_compile_time_info = get_edge_compile_time_info(server_addr, Value::new(Middleware));
let edge_chunking_context = DevChunkingContextVc::builder(
project_path,
output_path.join("edge"),
output_path.join("edge/chunks"),
output_path.join("edge/assets"),
edge_compile_time_info.environment(),
)
.build();
let edge_resolve_options_context = get_edge_resolve_options_context(
project_path,
Value::new(ServerContextType::Middleware),
next_config,
execution_context,
);
let server_module_options_context = get_server_module_options_context(
project_path,
execution_context,
Value::new(ServerContextType::Middleware),
next_config,
);
let next_edge_transition = NextEdgeTransition {
edge_compile_time_info,
edge_chunking_context,
edge_module_options_context: Some(server_module_options_context),
edge_resolve_options_context,
output_path: output_path.root(),
base_path: project_path,
bootstrap_file: next_js_file("entry/edge-bootstrap.ts"),
entry_name: "middleware".to_string(),
}
.cell()
.into();
TransitionsByNameVc::cell(
[("next-edge".to_string(), next_edge_transition)]
.into_iter()
.collect(),
)
}
#[turbo_tasks::function]
pub async fn route(
execution_context: ExecutionContextVc,
request: RouterRequestVc,
next_config: NextConfigVc,
server_addr: ServerAddrVc,
routes_changed: CompletionVc,
) -> Result<RouterResultVc> {
let RouterRequest {
ref method,
ref pathname,
..
} = *request.await?;
IssueVc::attach_description(
format!("Next.js Routing for {} {}", method, pathname),
route_internal(
execution_context,
request,
next_config,
server_addr,
routes_changed,
),
)
.await
}
#[turbo_tasks::function]
async fn | (
execution_context: ExecutionContextVc,
request: RouterRequestVc,
next_config: NextConfigVc,
server_addr: ServerAddrVc,
routes_changed: CompletionVc,
) -> Result<RouterResultVc> {
let ExecutionContext {
project_path,
chunking_context,
env,
} = *execution_context.await?;
let context = node_evaluate_asset_context(
project_path,
Some(get_next_build_import_map()),
Some(edge_transition_map(
server_addr,
project_path,
chunking_context.output_root(),
next_config,
execution_context,
)),
);
let configs = config_assets(context, project_path, next_config.page_extensions());
let router_asset = route_executor(context, configs);
// This invalidates the router when the next config changes
let next_config_changed = next_config_changed(context, project_path);
let request = serde_json::value::to_value(&*request.await?)?;
let Some(dir) = to_sys_path(project_path).await? else {
bail!("Next.js requires a disk path to check for valid routes");
};
let result = evaluate(
router_asset,
project_path,
env,
AssetIdentVc::from_path(project_path),
context,
chunking_context.with_layer("router"),
None,
vec![
JsonValueVc::cell(request),
JsonValueVc::cell(dir.to_string_lossy().into()),
],
CompletionsVc::all(vec![next_config_changed, routes_changed]),
/* debug */ false,
)
.await?;
match &*result {
JavaScriptValue::Value(val) => {
let result: RouterIncomingMessage = parse_json_rope_with_source_context(val)?;
Ok(RouterResult::from(result).cell())
}
JavaScriptValue::Error => Ok(RouterResult::Error.cell()),
JavaScriptValue::Stream(_) => {
unimplemented!("Stream not supported now");
}
}
}
| route_internal | identifier_name |
router.rs | use anyhow::{bail, Result};
use indexmap::indexmap;
use serde::Deserialize;
use serde_json::json;
use turbo_tasks::{
primitives::{JsonValueVc, StringsVc},
CompletionVc, CompletionsVc, Value,
};
use turbo_tasks_fs::{
json::parse_json_rope_with_source_context, to_sys_path, File, FileSystemPathVc,
};
use turbopack::{evaluate_context::node_evaluate_asset_context, transition::TransitionsByNameVc};
use turbopack_core::{
asset::AssetVc,
changed::any_content_changed,
chunk::ChunkingContext,
context::{AssetContext, AssetContextVc},
environment::{EnvironmentIntention::Middleware, ServerAddrVc},
ident::AssetIdentVc,
issue::IssueVc,
reference_type::{EcmaScriptModulesReferenceSubType, ReferenceType},
resolve::{find_context_file, FindContextFileResult},
source_asset::SourceAssetVc,
virtual_asset::VirtualAssetVc,
};
use turbopack_dev::DevChunkingContextVc;
use turbopack_ecmascript::{
EcmascriptInputTransform, EcmascriptInputTransformsVc, EcmascriptModuleAssetType,
EcmascriptModuleAssetVc, InnerAssetsVc, OptionEcmascriptModuleAssetVc,
};
use turbopack_node::{
evaluate::{evaluate, JavaScriptValue},
execution_context::{ExecutionContext, ExecutionContextVc},
StructuredError,
};
use crate::{
embed_js::{next_asset, next_js_file},
next_config::NextConfigVc,
next_edge::{
context::{get_edge_compile_time_info, get_edge_resolve_options_context},
transition::NextEdgeTransition,
},
next_import_map::get_next_build_import_map,
next_server::context::{get_server_module_options_context, ServerContextType},
util::{parse_config_from_source, NextSourceConfigVc},
};
#[turbo_tasks::function]
fn next_configs() -> StringsVc {
StringsVc::cell(
["next.config.mjs", "next.config.js"]
.into_iter()
.map(ToOwned::to_owned)
.collect(),
)
}
#[turbo_tasks::function]
async fn middleware_files(page_extensions: StringsVc) -> Result<StringsVc> {
let extensions = page_extensions.await?;
let files = ["middleware.", "src/middleware."]
.into_iter()
.flat_map(|f| {
extensions
.iter()
.map(move |ext| String::from(f) + ext.as_str())
})
.collect();
Ok(StringsVc::cell(files))
}
#[turbo_tasks::value(shared)]
#[derive(Debug, Clone, Default)]
#[serde(rename_all = "camelCase")]
pub struct RouterRequest {
pub method: String,
pub pathname: String,
pub raw_query: String,
pub raw_headers: Vec<(String, String)>,
}
#[turbo_tasks::value(shared)]
#[derive(Debug, Clone, Default)]
#[serde(rename_all = "camelCase")]
pub struct RewriteResponse {
pub url: String,
pub headers: Vec<(String, String)>,
}
#[turbo_tasks::value(shared)]
#[derive(Debug, Clone, Default)]
#[serde(rename_all = "camelCase")]
pub struct MiddlewareHeadersResponse {
pub status_code: u16,
pub headers: Vec<(String, String)>,
}
#[turbo_tasks::value(shared)]
#[derive(Debug, Clone, Default)]
pub struct MiddlewareBodyResponse(pub Vec<u8>);
#[turbo_tasks::value(shared)]
#[derive(Debug, Clone, Default)]
pub struct FullMiddlewareResponse {
pub headers: MiddlewareHeadersResponse,
pub body: Vec<u8>,
}
#[derive(Deserialize)]
#[serde(tag = "type", rename_all = "kebab-case")]
enum RouterIncomingMessage {
Rewrite {
data: RewriteResponse,
},
// TODO: Implement
#[allow(dead_code)]
MiddlewareHeaders {
data: MiddlewareHeadersResponse,
},
// TODO: Implement
#[allow(dead_code)]
MiddlewareBody {
data: MiddlewareBodyResponse,
},
FullMiddleware {
data: FullMiddlewareResponse,
},
None,
Error(StructuredError),
}
#[derive(Debug)]
#[turbo_tasks::value]
pub enum RouterResult {
Rewrite(RewriteResponse),
FullMiddleware(FullMiddlewareResponse),
None,
Error,
}
impl From<RouterIncomingMessage> for RouterResult {
fn from(value: RouterIncomingMessage) -> Self {
match value {
RouterIncomingMessage::Rewrite { data } => Self::Rewrite(data),
RouterIncomingMessage::FullMiddleware { data } => Self::FullMiddleware(data),
RouterIncomingMessage::None => Self::None,
_ => Self::Error,
}
}
}
#[turbo_tasks::function]
async fn get_config(
context: AssetContextVc,
project_path: FileSystemPathVc,
configs: StringsVc,
) -> Result<OptionEcmascriptModuleAssetVc> {
let find_config_result = find_context_file(project_path, configs);
let config_asset = match &*find_config_result.await? {
FindContextFileResult::Found(config_path, _) => Some(as_es_module_asset(
SourceAssetVc::new(*config_path).as_asset(),
context,
)),
FindContextFileResult::NotFound(_) => None,
};
Ok(OptionEcmascriptModuleAssetVc::cell(config_asset))
}
fn as_es_module_asset(asset: AssetVc, context: AssetContextVc) -> EcmascriptModuleAssetVc {
EcmascriptModuleAssetVc::new(
asset,
context,
Value::new(EcmascriptModuleAssetType::Typescript),
EcmascriptInputTransformsVc::cell(vec![EcmascriptInputTransform::TypeScript {
use_define_for_class_fields: false,
}]),
context.compile_time_info(),
)
}
#[turbo_tasks::function]
async fn next_config_changed(
context: AssetContextVc,
project_path: FileSystemPathVc,
) -> Result<CompletionVc> {
let next_config = get_config(context, project_path, next_configs()).await?;
Ok(if let Some(c) = *next_config {
any_content_changed(c.into())
} else {
CompletionVc::immutable()
})
}
#[turbo_tasks::function]
async fn config_assets(
context: AssetContextVc,
project_path: FileSystemPathVc,
page_extensions: StringsVc,
) -> Result<InnerAssetsVc> {
let middleware_config =
get_config(context, project_path, middleware_files(page_extensions)).await?;
// The router.ts file expects a manifest of chunks for the middleware. If there
// is no middleware file, then we need to generate a default empty manifest
// and we cannot process it with the next-edge transition because it
// requires a real file for some reason.
let (manifest, config) = match &*middleware_config {
Some(c) => {
let manifest = context.with_transition("next-edge").process(
c.as_asset(),
Value::new(ReferenceType::EcmaScriptModules(
EcmaScriptModulesReferenceSubType::Undefined,
)),
);
let config = parse_config_from_source(c.as_asset());
(manifest, config)
}
None => {
let manifest = as_es_module_asset(
VirtualAssetVc::new(
project_path.join("middleware.js"),
File::from("export default [];").into(),
)
.as_asset(),
context,
)
.as_asset();
let config = NextSourceConfigVc::default();
(manifest, config)
}
};
let config_asset = as_es_module_asset(
VirtualAssetVc::new(
project_path.join("middleware_config.js"),
File::from(format!(
"export default {};",
json!({ "matcher": &config.await?.matcher })
))
.into(),
)
.as_asset(),
context,
)
.as_asset();
Ok(InnerAssetsVc::cell(indexmap! {
"MIDDLEWARE_CHUNK_GROUP".to_string() => manifest,
"MIDDLEWARE_CONFIG".to_string() => config_asset,
}))
}
#[turbo_tasks::function]
fn route_executor(context: AssetContextVc, configs: InnerAssetsVc) -> AssetVc {
EcmascriptModuleAssetVc::new_with_inner_assets(
next_asset("entry/router.ts"),
context,
Value::new(EcmascriptModuleAssetType::Typescript),
EcmascriptInputTransformsVc::cell(vec![EcmascriptInputTransform::TypeScript {
use_define_for_class_fields: false,
}]),
context.compile_time_info(),
configs,
)
.into()
}
#[turbo_tasks::function]
fn edge_transition_map(
server_addr: ServerAddrVc,
project_path: FileSystemPathVc,
output_path: FileSystemPathVc,
next_config: NextConfigVc,
execution_context: ExecutionContextVc,
) -> TransitionsByNameVc {
let edge_compile_time_info = get_edge_compile_time_info(server_addr, Value::new(Middleware));
let edge_chunking_context = DevChunkingContextVc::builder(
project_path,
output_path.join("edge"),
output_path.join("edge/chunks"),
output_path.join("edge/assets"),
edge_compile_time_info.environment(),
)
.build();
let edge_resolve_options_context = get_edge_resolve_options_context(
project_path,
Value::new(ServerContextType::Middleware),
next_config,
execution_context,
);
let server_module_options_context = get_server_module_options_context(
project_path,
execution_context,
Value::new(ServerContextType::Middleware),
next_config,
);
let next_edge_transition = NextEdgeTransition {
edge_compile_time_info,
edge_chunking_context,
edge_module_options_context: Some(server_module_options_context),
edge_resolve_options_context,
output_path: output_path.root(),
base_path: project_path,
bootstrap_file: next_js_file("entry/edge-bootstrap.ts"),
entry_name: "middleware".to_string(),
}
.cell()
.into();
TransitionsByNameVc::cell(
[("next-edge".to_string(), next_edge_transition)]
.into_iter()
.collect(),
)
}
#[turbo_tasks::function]
pub async fn route(
execution_context: ExecutionContextVc,
request: RouterRequestVc,
next_config: NextConfigVc,
server_addr: ServerAddrVc,
routes_changed: CompletionVc,
) -> Result<RouterResultVc> {
let RouterRequest {
ref method,
ref pathname,
..
} = *request.await?;
IssueVc::attach_description(
format!("Next.js Routing for {} {}", method, pathname),
route_internal(
execution_context,
request,
next_config,
server_addr,
routes_changed,
),
)
.await
}
#[turbo_tasks::function]
async fn route_internal(
execution_context: ExecutionContextVc,
request: RouterRequestVc,
next_config: NextConfigVc,
server_addr: ServerAddrVc,
routes_changed: CompletionVc,
) -> Result<RouterResultVc> {
let ExecutionContext {
project_path,
chunking_context,
env,
} = *execution_context.await?;
let context = node_evaluate_asset_context(
project_path,
Some(get_next_build_import_map()),
Some(edge_transition_map(
server_addr,
project_path,
chunking_context.output_root(),
next_config,
execution_context,
)),
);
let configs = config_assets(context, project_path, next_config.page_extensions());
let router_asset = route_executor(context, configs);
// This invalidates the router when the next config changes
let next_config_changed = next_config_changed(context, project_path);
let request = serde_json::value::to_value(&*request.await?)?;
let Some(dir) = to_sys_path(project_path).await? else {
bail!("Next.js requires a disk path to check for valid routes"); | AssetIdentVc::from_path(project_path),
context,
chunking_context.with_layer("router"),
None,
vec![
JsonValueVc::cell(request),
JsonValueVc::cell(dir.to_string_lossy().into()),
],
CompletionsVc::all(vec![next_config_changed, routes_changed]),
/* debug */ false,
)
.await?;
match &*result {
JavaScriptValue::Value(val) => {
let result: RouterIncomingMessage = parse_json_rope_with_source_context(val)?;
Ok(RouterResult::from(result).cell())
}
JavaScriptValue::Error => Ok(RouterResult::Error.cell()),
JavaScriptValue::Stream(_) => {
unimplemented!("Stream not supported now");
}
}
} | };
let result = evaluate(
router_asset,
project_path,
env, | random_line_split |
minimize.rs | : bool) -> Self {
Self {
delta,
allow_nondet,
}
}
pub fn with_delta(self, delta: f32) -> Self {
Self { delta,..self }
}
pub fn with_allow_nondet(self, allow_nondet: bool) -> Self {
Self {
allow_nondet,
..self
}
}
}
impl Default for MinimizeConfig {
fn default() -> Self {
Self {
delta: KSHORTESTDELTA,
allow_nondet: false,
}
}
}
/// In place minimization of deterministic weighted automata and transducers,
/// and also non-deterministic ones if they use an idempotent semiring.
/// For transducers, the algorithm produces a compact factorization of the minimal transducer.
pub fn minimize<W, F>(ifst: &mut F) -> Result<()>
where
F: MutableFst<W> + ExpandedFst<W> + AllocableFst<W>,
W: WeaklyDivisibleSemiring + WeightQuantize,
W::ReverseWeight: WeightQuantize,
{
minimize_with_config(ifst, MinimizeConfig::default())
}
/// In place minimization of deterministic weighted automata and transducers, | W: WeaklyDivisibleSemiring + WeightQuantize,
W::ReverseWeight: WeightQuantize,
{
let delta = config.delta;
let allow_nondet = config.allow_nondet;
let props = ifst.compute_and_update_properties(
FstProperties::ACCEPTOR
| FstProperties::I_DETERMINISTIC
| FstProperties::WEIGHTED
| FstProperties::UNWEIGHTED,
)?;
let allow_acyclic_minimization = if props.contains(FstProperties::I_DETERMINISTIC) {
true
} else {
if!W::properties().contains(SemiringProperties::IDEMPOTENT) {
bail!("Cannot minimize a non-deterministic FST over a non-idempotent semiring")
} else if!allow_nondet {
bail!("Refusing to minimize a non-deterministic FST with allow_nondet = false")
}
false
};
if!props.contains(FstProperties::ACCEPTOR) {
// Weighted transducer
let mut to_gallic = ToGallicConverter {};
let mut gfst: VectorFst<GallicWeightLeft<W>> = weight_convert(ifst, &mut to_gallic)?;
let push_weights_config = PushWeightsConfig::default().with_delta(delta);
push_weights_with_config(
&mut gfst,
ReweightType::ReweightToInitial,
push_weights_config,
)?;
let quantize_mapper = QuantizeMapper::new(delta);
tr_map(&mut gfst, &quantize_mapper)?;
let encode_table = encode(&mut gfst, EncodeType::EncodeWeightsAndLabels)?;
acceptor_minimize(&mut gfst, allow_acyclic_minimization)?;
decode(&mut gfst, encode_table)?;
let factor_opts: FactorWeightOptions = FactorWeightOptions {
delta: KDELTA,
mode: FactorWeightType::FACTOR_FINAL_WEIGHTS | FactorWeightType::FACTOR_ARC_WEIGHTS,
final_ilabel: 0,
final_olabel: 0,
increment_final_ilabel: false,
increment_final_olabel: false,
};
let fwfst: VectorFst<_> =
factor_weight::<_, VectorFst<GallicWeightLeft<W>>, _, _, GallicFactorLeft<W>>(
&gfst,
factor_opts,
)?;
let mut from_gallic = FromGallicConverter {
superfinal_label: EPS_LABEL,
};
*ifst = weight_convert(&fwfst, &mut from_gallic)?;
Ok(())
} else if props.contains(FstProperties::WEIGHTED) {
// Weighted acceptor
let push_weights_config = PushWeightsConfig::default().with_delta(delta);
push_weights_with_config(ifst, ReweightType::ReweightToInitial, push_weights_config)?;
let quantize_mapper = QuantizeMapper::new(delta);
tr_map(ifst, &quantize_mapper)?;
let encode_table = encode(ifst, EncodeType::EncodeWeightsAndLabels)?;
acceptor_minimize(ifst, allow_acyclic_minimization)?;
decode(ifst, encode_table)
} else {
// Unweighted acceptor
acceptor_minimize(ifst, allow_acyclic_minimization)
}
}
/// In place minimization for weighted final state acceptor.
/// If `allow_acyclic_minimization` is true and the input is acyclic, then a specific
/// minimization is applied.
///
/// An error is returned if the input fst is not a weighted acceptor.
pub fn acceptor_minimize<W: Semiring, F: MutableFst<W> + ExpandedFst<W>>(
ifst: &mut F,
allow_acyclic_minimization: bool,
) -> Result<()> {
let props = ifst.compute_and_update_properties(
FstProperties::ACCEPTOR | FstProperties::UNWEIGHTED | FstProperties::ACYCLIC,
)?;
if!props.contains(FstProperties::ACCEPTOR | FstProperties::UNWEIGHTED) {
bail!("FST is not an unweighted acceptor");
}
connect(ifst)?;
if ifst.num_states() == 0 {
return Ok(());
}
if allow_acyclic_minimization && props.contains(FstProperties::ACYCLIC) {
// Acyclic minimization
tr_sort(ifst, ILabelCompare {});
let minimizer = AcyclicMinimizer::new(ifst)?;
merge_states(minimizer.get_partition(), ifst)?;
} else {
let p = cyclic_minimize(ifst)?;
merge_states(p, ifst)?;
}
tr_unique(ifst);
Ok(())
}
fn merge_states<W: Semiring, F: MutableFst<W>>(partition: Partition, fst: &mut F) -> Result<()> {
let mut state_map = vec![None; partition.num_classes()];
for (i, s) in state_map
.iter_mut()
.enumerate()
.take(partition.num_classes())
{
*s = partition.iter(i).next();
}
for c in 0..partition.num_classes() {
for s in partition.iter(c) {
if s == state_map[c].unwrap() {
let mut it_tr = fst.tr_iter_mut(s as StateId)?;
for idx_tr in 0..it_tr.len() {
let tr = unsafe { it_tr.get_unchecked(idx_tr) };
let nextstate =
state_map[partition.get_class_id(tr.nextstate as usize)].unwrap();
unsafe { it_tr.set_nextstate_unchecked(idx_tr, nextstate as StateId) };
}
} else {
let trs: Vec<_> = fst
.get_trs(s as StateId)?
.trs()
.iter()
.cloned()
.map(|mut tr| {
tr.nextstate = state_map[partition.get_class_id(tr.nextstate as usize)]
.unwrap() as StateId;
tr
})
.collect();
for tr in trs.into_iter() {
fst.add_tr(state_map[c].unwrap() as StateId, tr)?;
}
}
}
}
fst.set_start(
state_map[partition.get_class_id(fst.start().unwrap() as usize) as usize].unwrap()
as StateId,
)?;
connect(fst)?;
Ok(())
}
// Compute the height (distance) to final state
pub fn fst_depth<W: Semiring, F: Fst<W>>(
fst: &F,
state_id_cour: StateId,
accessible_states: &mut HashSet<StateId>,
fully_examined_states: &mut HashSet<StateId>,
heights: &mut Vec<i32>,
) -> Result<()> {
accessible_states.insert(state_id_cour);
for _ in heights.len()..=(state_id_cour as usize) {
heights.push(-1);
}
let mut height_cur_state = 0;
for tr in fst.get_trs(state_id_cour)?.trs() {
let nextstate = tr.nextstate;
if!accessible_states.contains(&nextstate) {
fst_depth(
fst,
nextstate,
accessible_states,
fully_examined_states,
heights,
)?;
}
height_cur_state = max(height_cur_state, 1 + heights[nextstate as usize]);
}
fully_examined_states.insert(state_id_cour);
heights[state_id_cour as usize] = height_cur_state;
Ok(())
}
struct AcyclicMinimizer {
partition: Partition,
}
impl AcyclicMinimizer {
pub fn new<W: Semiring, F: MutableFst<W>>(fst: &mut F) -> Result<Self> {
let mut c = Self {
partition: Partition::empty_new(),
};
c.initialize(fst)?;
c.refine(fst);
Ok(c)
}
fn initialize<W: Semiring, F: MutableFst<W>>(&mut self, fst: &mut F) -> Result<()> {
let mut accessible_state = HashSet::new();
let mut fully_examined_states = HashSet::new();
let mut heights = Vec::new();
fst_depth(
fst,
fst.start().unwrap(),
&mut accessible_state,
&mut fully_examined_states,
&mut heights,
)?;
self.partition.initialize(heights.len());
self.partition
.allocate_classes((heights.iter().max().unwrap() + 1) as usize);
for (s, h) in heights.iter().enumerate() {
self.partition.add(s, *h as usize);
}
Ok(())
}
fn refine<W: Semiring, F: MutableFst<W>>(&mut self, fst: &mut F) {
let state_cmp = StateComparator {
fst,
// This clone is necessary for the moment because the partition is modified while
// still needing the StateComparator.
// TODO: Find a way to remove the clone.
partition: self.partition.clone(),
w: PhantomData,
};
let height = self.partition.num_classes();
for h in 0..height {
// We need here a binary search tree in order to order the states id and create a partition.
// For now uses the crate `stable_bst` which is quite old but seems to do the job
// TODO: Bench the performances of the implementation. Maybe re-write it.
let mut equiv_classes =
TreeMap::<StateId, StateId, _>::with_comparator(|a: &StateId, b: &StateId| {
state_cmp.compare(*a, *b).unwrap()
});
let it_partition: Vec<_> = self.partition.iter(h).collect();
equiv_classes.insert(it_partition[0] as StateId, h as StateId);
let mut classes_to_add = vec![];
for e in it_partition.iter().skip(1) {
// TODO: Remove double lookup
if equiv_classes.contains_key(&(*e as StateId)) {
equiv_classes.insert(*e as StateId, NO_STATE_ID);
} else {
classes_to_add.push(e);
equiv_classes.insert(*e as StateId, NO_STATE_ID);
}
}
for v in classes_to_add {
equiv_classes.insert(*v as StateId, self.partition.add_class() as StateId);
}
for s in it_partition {
let old_class = self.partition.get_class_id(s);
let new_class = *equiv_classes.get(&(s as StateId)).unwrap();
if new_class == NO_STATE_ID {
// The behaviour here is a bit different compared to the c++ because here
// when inserting an equivalent key it modifies the key
// which is not the case in c++.
continue;
}
if old_class!= (new_class as usize) {
self.partition.move_element(s, new_class as usize);
}
}
}
}
pub fn get_partition(self) -> Partition {
self.partition
}
}
struct StateComparator<'a, W: Semiring, F: MutableFst<W>> {
fst: &'a F,
partition: Partition,
w: PhantomData<W>,
}
impl<'a, W: Semiring, F: MutableFst<W>> StateComparator<'a, W, F> {
fn do_compare(&self, x: StateId, y: StateId) -> Result<bool> {
let xfinal = self.fst.final_weight(x)?.unwrap_or_else(W::zero);
let yfinal = self.fst.final_weight(y)?.unwrap_or_else(W::zero);
if xfinal < yfinal {
return Ok(true);
} else if xfinal > yfinal {
return Ok(false);
}
if self.fst.num_trs(x)? < self.fst.num_trs(y)? {
return Ok(true);
}
if self.fst.num_trs(x)? > self.fst.num_trs(y)? {
return Ok(false);
}
let it_x_owner = self.fst.get_trs(x)?;
let it_x = it_x_owner.trs().iter();
let it_y_owner = self.fst.get_trs(y)?;
let it_y = it_y_owner.trs().iter();
for (arc1, arc2) in it_x.zip(it_y) {
if arc1.ilabel < arc2.ilabel {
return Ok(true);
}
if arc1.ilabel > arc2.ilabel {
return Ok(false);
}
let id_1 = self.partition.get_class_id(arc1.nextstate as usize);
let id_2 = self.partition.get_class_id(arc2.nextstate as usize);
if id_1 < id_2 {
return Ok(true);
}
if id_1 > id_2 {
return Ok(false);
}
}
Ok(false)
}
pub fn compare(&self, x: StateId, y: StateId) -> Result<Ordering> {
if x == y {
return Ok(Ordering::Equal);
}
let x_y = self.do_compare(x, y).unwrap();
let y_x = self.do_compare(y, x).unwrap();
if!(x_y) &&!(y_x) {
return Ok(Ordering::Equal);
}
if x_y {
Ok(Ordering::Less)
} else {
Ok(Ordering::Greater)
}
}
}
fn pre_partition<W: Semiring, F: MutableFst<W>>(
fst: &F,
partition: &mut Partition,
queue: &mut LifoQueue,
) {
let mut next_class: StateId = 0;
let num_states = fst.num_states();
let mut state_to_initial_class: Vec<StateId> = vec![0; num_states];
{
let mut hash_to_class_nonfinal = HashMap::<Vec<Label>, StateId>::new();
let mut hash_to_class_final = HashMap::<Vec<Label>, StateId>::new();
for (s, state_to_initial_class_s) in state_to_initial_class
.iter_mut()
.enumerate()
.take(num_states)
{
let this_map = if unsafe { fst.is_final_unchecked(s as StateId) } {
&mut hash_to_class_final
} else {
&mut hash_to_class_nonfinal
};
let ilabels = fst
.get_trs(s as StateId)
.unwrap()
.trs()
.iter()
.map(|e| e.ilabel)
.dedup()
.collect_vec();
match this_map.entry(ilabels) {
Entry::Occupied(e) => {
*state_to_initial_class_s = *e.get();
}
Entry::Vacant(e) => {
e.insert(next_class);
*state_to_initial_class_s = next_class;
next_class += 1;
}
};
}
}
partition.allocate_classes(next_class as usize);
for (s, c) in state_to_initial_class.iter().enumerate().take(num_states) {
partition.add(s, *c as usize);
}
for c in 0..next_class {
queue.enqueue(c);
}
}
fn cyclic_minimize<W: Semiring, F: MutableFst<W>>(fst: &mut F) -> Result<Partition> {
// Initialize
let mut tr: VectorFst<W::ReverseWeight> = reverse(fst)?;
tr_sort(&mut tr, ILabelCompare {});
let mut partition = Partition::new(tr.num_states() - 1);
let mut queue = LifoQueue::default();
pre_partition(fst, &mut partition, &mut queue);
// Compute
while let Some(c) = queue.head() {
queue.dequeue();
// Split
// TODO: Avoid this clone :o
// Here we need to pointer to the partition that is valid even if the partition changes.
let comp = TrIterCompare {
partition: partition.clone(),
};
let mut aiter_queue = BinaryHeap::new_by(|v1, v2| {
if comp.compare(v1, v2) {
Ordering::Less
} else {
Ordering::Greater
}
});
// Split
for s in partition.iter(c as usize) {
if tr.num_trs(s as StateId + 1)? > 0 {
aiter_queue.push(TrsIterCollected {
idx: 0,
trs: tr.get_trs(s as StateId + 1)?,
w: PhantomData,
});
}
}
let mut prev_label = -1;
while!aiter_queue.is_empty() {
let mut aiter = aiter_queue.pop().unwrap();
if aiter.done() {
continue;
}
let tr = aiter.peek().unwrap();
let from_state = tr.nextstate - 1;
let from_label = tr.ilabel;
if prev_label!= from_label as i32 {
partition.finalize_split(&mut Some(&mut queue));
}
let from_class = partition.get_class_id(from_state as usize);
| /// and also non-deterministic ones if they use an idempotent semiring.
/// For transducers, the algorithm produces a compact factorization of the minimal transducer.
pub fn minimize_with_config<W, F>(ifst: &mut F, config: MinimizeConfig) -> Result<()>
where
F: MutableFst<W> + ExpandedFst<W> + AllocableFst<W>, | random_line_split |
minimize.rs | bool) -> Self {
Self {
delta,
allow_nondet,
}
}
pub fn with_delta(self, delta: f32) -> Self {
Self { delta,..self }
}
pub fn with_allow_nondet(self, allow_nondet: bool) -> Self {
Self {
allow_nondet,
..self
}
}
}
impl Default for MinimizeConfig {
fn default() -> Self {
Self {
delta: KSHORTESTDELTA,
allow_nondet: false,
}
}
}
/// In place minimization of deterministic weighted automata and transducers,
/// and also non-deterministic ones if they use an idempotent semiring.
/// For transducers, the algorithm produces a compact factorization of the minimal transducer.
pub fn minimize<W, F>(ifst: &mut F) -> Result<()>
where
F: MutableFst<W> + ExpandedFst<W> + AllocableFst<W>,
W: WeaklyDivisibleSemiring + WeightQuantize,
W::ReverseWeight: WeightQuantize,
{
minimize_with_config(ifst, MinimizeConfig::default())
}
/// In place minimization of deterministic weighted automata and transducers,
/// and also non-deterministic ones if they use an idempotent semiring.
/// For transducers, the algorithm produces a compact factorization of the minimal transducer.
pub fn minimize_with_config<W, F>(ifst: &mut F, config: MinimizeConfig) -> Result<()>
where
F: MutableFst<W> + ExpandedFst<W> + AllocableFst<W>,
W: WeaklyDivisibleSemiring + WeightQuantize,
W::ReverseWeight: WeightQuantize,
{
let delta = config.delta;
let allow_nondet = config.allow_nondet;
let props = ifst.compute_and_update_properties(
FstProperties::ACCEPTOR
| FstProperties::I_DETERMINISTIC
| FstProperties::WEIGHTED
| FstProperties::UNWEIGHTED,
)?;
let allow_acyclic_minimization = if props.contains(FstProperties::I_DETERMINISTIC) {
true
} else {
if!W::properties().contains(SemiringProperties::IDEMPOTENT) {
bail!("Cannot minimize a non-deterministic FST over a non-idempotent semiring")
} else if!allow_nondet {
bail!("Refusing to minimize a non-deterministic FST with allow_nondet = false")
}
false
};
if!props.contains(FstProperties::ACCEPTOR) {
// Weighted transducer
let mut to_gallic = ToGallicConverter {};
let mut gfst: VectorFst<GallicWeightLeft<W>> = weight_convert(ifst, &mut to_gallic)?;
let push_weights_config = PushWeightsConfig::default().with_delta(delta);
push_weights_with_config(
&mut gfst,
ReweightType::ReweightToInitial,
push_weights_config,
)?;
let quantize_mapper = QuantizeMapper::new(delta);
tr_map(&mut gfst, &quantize_mapper)?;
let encode_table = encode(&mut gfst, EncodeType::EncodeWeightsAndLabels)?;
acceptor_minimize(&mut gfst, allow_acyclic_minimization)?;
decode(&mut gfst, encode_table)?;
let factor_opts: FactorWeightOptions = FactorWeightOptions {
delta: KDELTA,
mode: FactorWeightType::FACTOR_FINAL_WEIGHTS | FactorWeightType::FACTOR_ARC_WEIGHTS,
final_ilabel: 0,
final_olabel: 0,
increment_final_ilabel: false,
increment_final_olabel: false,
};
let fwfst: VectorFst<_> =
factor_weight::<_, VectorFst<GallicWeightLeft<W>>, _, _, GallicFactorLeft<W>>(
&gfst,
factor_opts,
)?;
let mut from_gallic = FromGallicConverter {
superfinal_label: EPS_LABEL,
};
*ifst = weight_convert(&fwfst, &mut from_gallic)?;
Ok(())
} else if props.contains(FstProperties::WEIGHTED) {
// Weighted acceptor
let push_weights_config = PushWeightsConfig::default().with_delta(delta);
push_weights_with_config(ifst, ReweightType::ReweightToInitial, push_weights_config)?;
let quantize_mapper = QuantizeMapper::new(delta);
tr_map(ifst, &quantize_mapper)?;
let encode_table = encode(ifst, EncodeType::EncodeWeightsAndLabels)?;
acceptor_minimize(ifst, allow_acyclic_minimization)?;
decode(ifst, encode_table)
} else {
// Unweighted acceptor
acceptor_minimize(ifst, allow_acyclic_minimization)
}
}
/// In place minimization for weighted final state acceptor.
/// If `allow_acyclic_minimization` is true and the input is acyclic, then a specific
/// minimization is applied.
///
/// An error is returned if the input fst is not a weighted acceptor.
pub fn | <W: Semiring, F: MutableFst<W> + ExpandedFst<W>>(
ifst: &mut F,
allow_acyclic_minimization: bool,
) -> Result<()> {
let props = ifst.compute_and_update_properties(
FstProperties::ACCEPTOR | FstProperties::UNWEIGHTED | FstProperties::ACYCLIC,
)?;
if!props.contains(FstProperties::ACCEPTOR | FstProperties::UNWEIGHTED) {
bail!("FST is not an unweighted acceptor");
}
connect(ifst)?;
if ifst.num_states() == 0 {
return Ok(());
}
if allow_acyclic_minimization && props.contains(FstProperties::ACYCLIC) {
// Acyclic minimization
tr_sort(ifst, ILabelCompare {});
let minimizer = AcyclicMinimizer::new(ifst)?;
merge_states(minimizer.get_partition(), ifst)?;
} else {
let p = cyclic_minimize(ifst)?;
merge_states(p, ifst)?;
}
tr_unique(ifst);
Ok(())
}
fn merge_states<W: Semiring, F: MutableFst<W>>(partition: Partition, fst: &mut F) -> Result<()> {
let mut state_map = vec![None; partition.num_classes()];
for (i, s) in state_map
.iter_mut()
.enumerate()
.take(partition.num_classes())
{
*s = partition.iter(i).next();
}
for c in 0..partition.num_classes() {
for s in partition.iter(c) {
if s == state_map[c].unwrap() {
let mut it_tr = fst.tr_iter_mut(s as StateId)?;
for idx_tr in 0..it_tr.len() {
let tr = unsafe { it_tr.get_unchecked(idx_tr) };
let nextstate =
state_map[partition.get_class_id(tr.nextstate as usize)].unwrap();
unsafe { it_tr.set_nextstate_unchecked(idx_tr, nextstate as StateId) };
}
} else {
let trs: Vec<_> = fst
.get_trs(s as StateId)?
.trs()
.iter()
.cloned()
.map(|mut tr| {
tr.nextstate = state_map[partition.get_class_id(tr.nextstate as usize)]
.unwrap() as StateId;
tr
})
.collect();
for tr in trs.into_iter() {
fst.add_tr(state_map[c].unwrap() as StateId, tr)?;
}
}
}
}
fst.set_start(
state_map[partition.get_class_id(fst.start().unwrap() as usize) as usize].unwrap()
as StateId,
)?;
connect(fst)?;
Ok(())
}
// Compute the height (distance) to final state
pub fn fst_depth<W: Semiring, F: Fst<W>>(
fst: &F,
state_id_cour: StateId,
accessible_states: &mut HashSet<StateId>,
fully_examined_states: &mut HashSet<StateId>,
heights: &mut Vec<i32>,
) -> Result<()> {
accessible_states.insert(state_id_cour);
for _ in heights.len()..=(state_id_cour as usize) {
heights.push(-1);
}
let mut height_cur_state = 0;
for tr in fst.get_trs(state_id_cour)?.trs() {
let nextstate = tr.nextstate;
if!accessible_states.contains(&nextstate) {
fst_depth(
fst,
nextstate,
accessible_states,
fully_examined_states,
heights,
)?;
}
height_cur_state = max(height_cur_state, 1 + heights[nextstate as usize]);
}
fully_examined_states.insert(state_id_cour);
heights[state_id_cour as usize] = height_cur_state;
Ok(())
}
struct AcyclicMinimizer {
partition: Partition,
}
impl AcyclicMinimizer {
pub fn new<W: Semiring, F: MutableFst<W>>(fst: &mut F) -> Result<Self> {
let mut c = Self {
partition: Partition::empty_new(),
};
c.initialize(fst)?;
c.refine(fst);
Ok(c)
}
fn initialize<W: Semiring, F: MutableFst<W>>(&mut self, fst: &mut F) -> Result<()> {
let mut accessible_state = HashSet::new();
let mut fully_examined_states = HashSet::new();
let mut heights = Vec::new();
fst_depth(
fst,
fst.start().unwrap(),
&mut accessible_state,
&mut fully_examined_states,
&mut heights,
)?;
self.partition.initialize(heights.len());
self.partition
.allocate_classes((heights.iter().max().unwrap() + 1) as usize);
for (s, h) in heights.iter().enumerate() {
self.partition.add(s, *h as usize);
}
Ok(())
}
fn refine<W: Semiring, F: MutableFst<W>>(&mut self, fst: &mut F) {
let state_cmp = StateComparator {
fst,
// This clone is necessary for the moment because the partition is modified while
// still needing the StateComparator.
// TODO: Find a way to remove the clone.
partition: self.partition.clone(),
w: PhantomData,
};
let height = self.partition.num_classes();
for h in 0..height {
// We need here a binary search tree in order to order the states id and create a partition.
// For now uses the crate `stable_bst` which is quite old but seems to do the job
// TODO: Bench the performances of the implementation. Maybe re-write it.
let mut equiv_classes =
TreeMap::<StateId, StateId, _>::with_comparator(|a: &StateId, b: &StateId| {
state_cmp.compare(*a, *b).unwrap()
});
let it_partition: Vec<_> = self.partition.iter(h).collect();
equiv_classes.insert(it_partition[0] as StateId, h as StateId);
let mut classes_to_add = vec![];
for e in it_partition.iter().skip(1) {
// TODO: Remove double lookup
if equiv_classes.contains_key(&(*e as StateId)) {
equiv_classes.insert(*e as StateId, NO_STATE_ID);
} else {
classes_to_add.push(e);
equiv_classes.insert(*e as StateId, NO_STATE_ID);
}
}
for v in classes_to_add {
equiv_classes.insert(*v as StateId, self.partition.add_class() as StateId);
}
for s in it_partition {
let old_class = self.partition.get_class_id(s);
let new_class = *equiv_classes.get(&(s as StateId)).unwrap();
if new_class == NO_STATE_ID {
// The behaviour here is a bit different compared to the c++ because here
// when inserting an equivalent key it modifies the key
// which is not the case in c++.
continue;
}
if old_class!= (new_class as usize) {
self.partition.move_element(s, new_class as usize);
}
}
}
}
pub fn get_partition(self) -> Partition {
self.partition
}
}
struct StateComparator<'a, W: Semiring, F: MutableFst<W>> {
fst: &'a F,
partition: Partition,
w: PhantomData<W>,
}
impl<'a, W: Semiring, F: MutableFst<W>> StateComparator<'a, W, F> {
fn do_compare(&self, x: StateId, y: StateId) -> Result<bool> {
let xfinal = self.fst.final_weight(x)?.unwrap_or_else(W::zero);
let yfinal = self.fst.final_weight(y)?.unwrap_or_else(W::zero);
if xfinal < yfinal {
return Ok(true);
} else if xfinal > yfinal {
return Ok(false);
}
if self.fst.num_trs(x)? < self.fst.num_trs(y)? {
return Ok(true);
}
if self.fst.num_trs(x)? > self.fst.num_trs(y)? {
return Ok(false);
}
let it_x_owner = self.fst.get_trs(x)?;
let it_x = it_x_owner.trs().iter();
let it_y_owner = self.fst.get_trs(y)?;
let it_y = it_y_owner.trs().iter();
for (arc1, arc2) in it_x.zip(it_y) {
if arc1.ilabel < arc2.ilabel {
return Ok(true);
}
if arc1.ilabel > arc2.ilabel {
return Ok(false);
}
let id_1 = self.partition.get_class_id(arc1.nextstate as usize);
let id_2 = self.partition.get_class_id(arc2.nextstate as usize);
if id_1 < id_2 {
return Ok(true);
}
if id_1 > id_2 {
return Ok(false);
}
}
Ok(false)
}
pub fn compare(&self, x: StateId, y: StateId) -> Result<Ordering> {
if x == y {
return Ok(Ordering::Equal);
}
let x_y = self.do_compare(x, y).unwrap();
let y_x = self.do_compare(y, x).unwrap();
if!(x_y) &&!(y_x) {
return Ok(Ordering::Equal);
}
if x_y {
Ok(Ordering::Less)
} else {
Ok(Ordering::Greater)
}
}
}
fn pre_partition<W: Semiring, F: MutableFst<W>>(
fst: &F,
partition: &mut Partition,
queue: &mut LifoQueue,
) {
let mut next_class: StateId = 0;
let num_states = fst.num_states();
let mut state_to_initial_class: Vec<StateId> = vec![0; num_states];
{
let mut hash_to_class_nonfinal = HashMap::<Vec<Label>, StateId>::new();
let mut hash_to_class_final = HashMap::<Vec<Label>, StateId>::new();
for (s, state_to_initial_class_s) in state_to_initial_class
.iter_mut()
.enumerate()
.take(num_states)
{
let this_map = if unsafe { fst.is_final_unchecked(s as StateId) } {
&mut hash_to_class_final
} else {
&mut hash_to_class_nonfinal
};
let ilabels = fst
.get_trs(s as StateId)
.unwrap()
.trs()
.iter()
.map(|e| e.ilabel)
.dedup()
.collect_vec();
match this_map.entry(ilabels) {
Entry::Occupied(e) => {
*state_to_initial_class_s = *e.get();
}
Entry::Vacant(e) => {
e.insert(next_class);
*state_to_initial_class_s = next_class;
next_class += 1;
}
};
}
}
partition.allocate_classes(next_class as usize);
for (s, c) in state_to_initial_class.iter().enumerate().take(num_states) {
partition.add(s, *c as usize);
}
for c in 0..next_class {
queue.enqueue(c);
}
}
fn cyclic_minimize<W: Semiring, F: MutableFst<W>>(fst: &mut F) -> Result<Partition> {
// Initialize
let mut tr: VectorFst<W::ReverseWeight> = reverse(fst)?;
tr_sort(&mut tr, ILabelCompare {});
let mut partition = Partition::new(tr.num_states() - 1);
let mut queue = LifoQueue::default();
pre_partition(fst, &mut partition, &mut queue);
// Compute
while let Some(c) = queue.head() {
queue.dequeue();
// Split
// TODO: Avoid this clone :o
// Here we need to pointer to the partition that is valid even if the partition changes.
let comp = TrIterCompare {
partition: partition.clone(),
};
let mut aiter_queue = BinaryHeap::new_by(|v1, v2| {
if comp.compare(v1, v2) {
Ordering::Less
} else {
Ordering::Greater
}
});
// Split
for s in partition.iter(c as usize) {
if tr.num_trs(s as StateId + 1)? > 0 {
aiter_queue.push(TrsIterCollected {
idx: 0,
trs: tr.get_trs(s as StateId + 1)?,
w: PhantomData,
});
}
}
let mut prev_label = -1;
while!aiter_queue.is_empty() {
let mut aiter = aiter_queue.pop().unwrap();
if aiter.done() {
continue;
}
let tr = aiter.peek().unwrap();
let from_state = tr.nextstate - 1;
let from_label = tr.ilabel;
if prev_label!= from_label as i32 {
partition.finalize_split(&mut Some(&mut queue));
}
let from_class = partition.get_class_id(from_state as usize);
| acceptor_minimize | identifier_name |
minimize.rs | bool) -> Self {
Self {
delta,
allow_nondet,
}
}
pub fn with_delta(self, delta: f32) -> Self {
Self { delta,..self }
}
pub fn with_allow_nondet(self, allow_nondet: bool) -> Self {
Self {
allow_nondet,
..self
}
}
}
impl Default for MinimizeConfig {
fn default() -> Self {
Self {
delta: KSHORTESTDELTA,
allow_nondet: false,
}
}
}
/// In place minimization of deterministic weighted automata and transducers,
/// and also non-deterministic ones if they use an idempotent semiring.
/// For transducers, the algorithm produces a compact factorization of the minimal transducer.
pub fn minimize<W, F>(ifst: &mut F) -> Result<()>
where
F: MutableFst<W> + ExpandedFst<W> + AllocableFst<W>,
W: WeaklyDivisibleSemiring + WeightQuantize,
W::ReverseWeight: WeightQuantize,
{
minimize_with_config(ifst, MinimizeConfig::default())
}
/// In place minimization of deterministic weighted automata and transducers,
/// and also non-deterministic ones if they use an idempotent semiring.
/// For transducers, the algorithm produces a compact factorization of the minimal transducer.
pub fn minimize_with_config<W, F>(ifst: &mut F, config: MinimizeConfig) -> Result<()>
where
F: MutableFst<W> + ExpandedFst<W> + AllocableFst<W>,
W: WeaklyDivisibleSemiring + WeightQuantize,
W::ReverseWeight: WeightQuantize,
{
let delta = config.delta;
let allow_nondet = config.allow_nondet;
let props = ifst.compute_and_update_properties(
FstProperties::ACCEPTOR
| FstProperties::I_DETERMINISTIC
| FstProperties::WEIGHTED
| FstProperties::UNWEIGHTED,
)?;
let allow_acyclic_minimization = if props.contains(FstProperties::I_DETERMINISTIC) | else {
if!W::properties().contains(SemiringProperties::IDEMPOTENT) {
bail!("Cannot minimize a non-deterministic FST over a non-idempotent semiring")
} else if!allow_nondet {
bail!("Refusing to minimize a non-deterministic FST with allow_nondet = false")
}
false
};
if!props.contains(FstProperties::ACCEPTOR) {
// Weighted transducer
let mut to_gallic = ToGallicConverter {};
let mut gfst: VectorFst<GallicWeightLeft<W>> = weight_convert(ifst, &mut to_gallic)?;
let push_weights_config = PushWeightsConfig::default().with_delta(delta);
push_weights_with_config(
&mut gfst,
ReweightType::ReweightToInitial,
push_weights_config,
)?;
let quantize_mapper = QuantizeMapper::new(delta);
tr_map(&mut gfst, &quantize_mapper)?;
let encode_table = encode(&mut gfst, EncodeType::EncodeWeightsAndLabels)?;
acceptor_minimize(&mut gfst, allow_acyclic_minimization)?;
decode(&mut gfst, encode_table)?;
let factor_opts: FactorWeightOptions = FactorWeightOptions {
delta: KDELTA,
mode: FactorWeightType::FACTOR_FINAL_WEIGHTS | FactorWeightType::FACTOR_ARC_WEIGHTS,
final_ilabel: 0,
final_olabel: 0,
increment_final_ilabel: false,
increment_final_olabel: false,
};
let fwfst: VectorFst<_> =
factor_weight::<_, VectorFst<GallicWeightLeft<W>>, _, _, GallicFactorLeft<W>>(
&gfst,
factor_opts,
)?;
let mut from_gallic = FromGallicConverter {
superfinal_label: EPS_LABEL,
};
*ifst = weight_convert(&fwfst, &mut from_gallic)?;
Ok(())
} else if props.contains(FstProperties::WEIGHTED) {
// Weighted acceptor
let push_weights_config = PushWeightsConfig::default().with_delta(delta);
push_weights_with_config(ifst, ReweightType::ReweightToInitial, push_weights_config)?;
let quantize_mapper = QuantizeMapper::new(delta);
tr_map(ifst, &quantize_mapper)?;
let encode_table = encode(ifst, EncodeType::EncodeWeightsAndLabels)?;
acceptor_minimize(ifst, allow_acyclic_minimization)?;
decode(ifst, encode_table)
} else {
// Unweighted acceptor
acceptor_minimize(ifst, allow_acyclic_minimization)
}
}
/// In place minimization for weighted final state acceptor.
/// If `allow_acyclic_minimization` is true and the input is acyclic, then a specific
/// minimization is applied.
///
/// An error is returned if the input fst is not a weighted acceptor.
pub fn acceptor_minimize<W: Semiring, F: MutableFst<W> + ExpandedFst<W>>(
ifst: &mut F,
allow_acyclic_minimization: bool,
) -> Result<()> {
let props = ifst.compute_and_update_properties(
FstProperties::ACCEPTOR | FstProperties::UNWEIGHTED | FstProperties::ACYCLIC,
)?;
if!props.contains(FstProperties::ACCEPTOR | FstProperties::UNWEIGHTED) {
bail!("FST is not an unweighted acceptor");
}
connect(ifst)?;
if ifst.num_states() == 0 {
return Ok(());
}
if allow_acyclic_minimization && props.contains(FstProperties::ACYCLIC) {
// Acyclic minimization
tr_sort(ifst, ILabelCompare {});
let minimizer = AcyclicMinimizer::new(ifst)?;
merge_states(minimizer.get_partition(), ifst)?;
} else {
let p = cyclic_minimize(ifst)?;
merge_states(p, ifst)?;
}
tr_unique(ifst);
Ok(())
}
fn merge_states<W: Semiring, F: MutableFst<W>>(partition: Partition, fst: &mut F) -> Result<()> {
let mut state_map = vec![None; partition.num_classes()];
for (i, s) in state_map
.iter_mut()
.enumerate()
.take(partition.num_classes())
{
*s = partition.iter(i).next();
}
for c in 0..partition.num_classes() {
for s in partition.iter(c) {
if s == state_map[c].unwrap() {
let mut it_tr = fst.tr_iter_mut(s as StateId)?;
for idx_tr in 0..it_tr.len() {
let tr = unsafe { it_tr.get_unchecked(idx_tr) };
let nextstate =
state_map[partition.get_class_id(tr.nextstate as usize)].unwrap();
unsafe { it_tr.set_nextstate_unchecked(idx_tr, nextstate as StateId) };
}
} else {
let trs: Vec<_> = fst
.get_trs(s as StateId)?
.trs()
.iter()
.cloned()
.map(|mut tr| {
tr.nextstate = state_map[partition.get_class_id(tr.nextstate as usize)]
.unwrap() as StateId;
tr
})
.collect();
for tr in trs.into_iter() {
fst.add_tr(state_map[c].unwrap() as StateId, tr)?;
}
}
}
}
fst.set_start(
state_map[partition.get_class_id(fst.start().unwrap() as usize) as usize].unwrap()
as StateId,
)?;
connect(fst)?;
Ok(())
}
// Compute the height (distance) to final state
pub fn fst_depth<W: Semiring, F: Fst<W>>(
fst: &F,
state_id_cour: StateId,
accessible_states: &mut HashSet<StateId>,
fully_examined_states: &mut HashSet<StateId>,
heights: &mut Vec<i32>,
) -> Result<()> {
accessible_states.insert(state_id_cour);
for _ in heights.len()..=(state_id_cour as usize) {
heights.push(-1);
}
let mut height_cur_state = 0;
for tr in fst.get_trs(state_id_cour)?.trs() {
let nextstate = tr.nextstate;
if!accessible_states.contains(&nextstate) {
fst_depth(
fst,
nextstate,
accessible_states,
fully_examined_states,
heights,
)?;
}
height_cur_state = max(height_cur_state, 1 + heights[nextstate as usize]);
}
fully_examined_states.insert(state_id_cour);
heights[state_id_cour as usize] = height_cur_state;
Ok(())
}
struct AcyclicMinimizer {
partition: Partition,
}
impl AcyclicMinimizer {
pub fn new<W: Semiring, F: MutableFst<W>>(fst: &mut F) -> Result<Self> {
let mut c = Self {
partition: Partition::empty_new(),
};
c.initialize(fst)?;
c.refine(fst);
Ok(c)
}
fn initialize<W: Semiring, F: MutableFst<W>>(&mut self, fst: &mut F) -> Result<()> {
let mut accessible_state = HashSet::new();
let mut fully_examined_states = HashSet::new();
let mut heights = Vec::new();
fst_depth(
fst,
fst.start().unwrap(),
&mut accessible_state,
&mut fully_examined_states,
&mut heights,
)?;
self.partition.initialize(heights.len());
self.partition
.allocate_classes((heights.iter().max().unwrap() + 1) as usize);
for (s, h) in heights.iter().enumerate() {
self.partition.add(s, *h as usize);
}
Ok(())
}
fn refine<W: Semiring, F: MutableFst<W>>(&mut self, fst: &mut F) {
let state_cmp = StateComparator {
fst,
// This clone is necessary for the moment because the partition is modified while
// still needing the StateComparator.
// TODO: Find a way to remove the clone.
partition: self.partition.clone(),
w: PhantomData,
};
let height = self.partition.num_classes();
for h in 0..height {
// We need here a binary search tree in order to order the states id and create a partition.
// For now uses the crate `stable_bst` which is quite old but seems to do the job
// TODO: Bench the performances of the implementation. Maybe re-write it.
let mut equiv_classes =
TreeMap::<StateId, StateId, _>::with_comparator(|a: &StateId, b: &StateId| {
state_cmp.compare(*a, *b).unwrap()
});
let it_partition: Vec<_> = self.partition.iter(h).collect();
equiv_classes.insert(it_partition[0] as StateId, h as StateId);
let mut classes_to_add = vec![];
for e in it_partition.iter().skip(1) {
// TODO: Remove double lookup
if equiv_classes.contains_key(&(*e as StateId)) {
equiv_classes.insert(*e as StateId, NO_STATE_ID);
} else {
classes_to_add.push(e);
equiv_classes.insert(*e as StateId, NO_STATE_ID);
}
}
for v in classes_to_add {
equiv_classes.insert(*v as StateId, self.partition.add_class() as StateId);
}
for s in it_partition {
let old_class = self.partition.get_class_id(s);
let new_class = *equiv_classes.get(&(s as StateId)).unwrap();
if new_class == NO_STATE_ID {
// The behaviour here is a bit different compared to the c++ because here
// when inserting an equivalent key it modifies the key
// which is not the case in c++.
continue;
}
if old_class!= (new_class as usize) {
self.partition.move_element(s, new_class as usize);
}
}
}
}
pub fn get_partition(self) -> Partition {
self.partition
}
}
struct StateComparator<'a, W: Semiring, F: MutableFst<W>> {
fst: &'a F,
partition: Partition,
w: PhantomData<W>,
}
impl<'a, W: Semiring, F: MutableFst<W>> StateComparator<'a, W, F> {
fn do_compare(&self, x: StateId, y: StateId) -> Result<bool> {
let xfinal = self.fst.final_weight(x)?.unwrap_or_else(W::zero);
let yfinal = self.fst.final_weight(y)?.unwrap_or_else(W::zero);
if xfinal < yfinal {
return Ok(true);
} else if xfinal > yfinal {
return Ok(false);
}
if self.fst.num_trs(x)? < self.fst.num_trs(y)? {
return Ok(true);
}
if self.fst.num_trs(x)? > self.fst.num_trs(y)? {
return Ok(false);
}
let it_x_owner = self.fst.get_trs(x)?;
let it_x = it_x_owner.trs().iter();
let it_y_owner = self.fst.get_trs(y)?;
let it_y = it_y_owner.trs().iter();
for (arc1, arc2) in it_x.zip(it_y) {
if arc1.ilabel < arc2.ilabel {
return Ok(true);
}
if arc1.ilabel > arc2.ilabel {
return Ok(false);
}
let id_1 = self.partition.get_class_id(arc1.nextstate as usize);
let id_2 = self.partition.get_class_id(arc2.nextstate as usize);
if id_1 < id_2 {
return Ok(true);
}
if id_1 > id_2 {
return Ok(false);
}
}
Ok(false)
}
pub fn compare(&self, x: StateId, y: StateId) -> Result<Ordering> {
if x == y {
return Ok(Ordering::Equal);
}
let x_y = self.do_compare(x, y).unwrap();
let y_x = self.do_compare(y, x).unwrap();
if!(x_y) &&!(y_x) {
return Ok(Ordering::Equal);
}
if x_y {
Ok(Ordering::Less)
} else {
Ok(Ordering::Greater)
}
}
}
fn pre_partition<W: Semiring, F: MutableFst<W>>(
fst: &F,
partition: &mut Partition,
queue: &mut LifoQueue,
) {
let mut next_class: StateId = 0;
let num_states = fst.num_states();
let mut state_to_initial_class: Vec<StateId> = vec![0; num_states];
{
let mut hash_to_class_nonfinal = HashMap::<Vec<Label>, StateId>::new();
let mut hash_to_class_final = HashMap::<Vec<Label>, StateId>::new();
for (s, state_to_initial_class_s) in state_to_initial_class
.iter_mut()
.enumerate()
.take(num_states)
{
let this_map = if unsafe { fst.is_final_unchecked(s as StateId) } {
&mut hash_to_class_final
} else {
&mut hash_to_class_nonfinal
};
let ilabels = fst
.get_trs(s as StateId)
.unwrap()
.trs()
.iter()
.map(|e| e.ilabel)
.dedup()
.collect_vec();
match this_map.entry(ilabels) {
Entry::Occupied(e) => {
*state_to_initial_class_s = *e.get();
}
Entry::Vacant(e) => {
e.insert(next_class);
*state_to_initial_class_s = next_class;
next_class += 1;
}
};
}
}
partition.allocate_classes(next_class as usize);
for (s, c) in state_to_initial_class.iter().enumerate().take(num_states) {
partition.add(s, *c as usize);
}
for c in 0..next_class {
queue.enqueue(c);
}
}
fn cyclic_minimize<W: Semiring, F: MutableFst<W>>(fst: &mut F) -> Result<Partition> {
// Initialize
let mut tr: VectorFst<W::ReverseWeight> = reverse(fst)?;
tr_sort(&mut tr, ILabelCompare {});
let mut partition = Partition::new(tr.num_states() - 1);
let mut queue = LifoQueue::default();
pre_partition(fst, &mut partition, &mut queue);
// Compute
while let Some(c) = queue.head() {
queue.dequeue();
// Split
// TODO: Avoid this clone :o
// Here we need to pointer to the partition that is valid even if the partition changes.
let comp = TrIterCompare {
partition: partition.clone(),
};
let mut aiter_queue = BinaryHeap::new_by(|v1, v2| {
if comp.compare(v1, v2) {
Ordering::Less
} else {
Ordering::Greater
}
});
// Split
for s in partition.iter(c as usize) {
if tr.num_trs(s as StateId + 1)? > 0 {
aiter_queue.push(TrsIterCollected {
idx: 0,
trs: tr.get_trs(s as StateId + 1)?,
w: PhantomData,
});
}
}
let mut prev_label = -1;
while!aiter_queue.is_empty() {
let mut aiter = aiter_queue.pop().unwrap();
if aiter.done() {
continue;
}
let tr = aiter.peek().unwrap();
let from_state = tr.nextstate - 1;
let from_label = tr.ilabel;
if prev_label!= from_label as i32 {
partition.finalize_split(&mut Some(&mut queue));
}
let from_class = partition.get_class_id(from_state as usize);
| {
true
} | conditional_block |
diag.rs | // Diagnostics engine
use super::{Span, SrcMgr};
use std::cell::RefCell;
use std::cmp;
use std::fmt;
use std::rc::Rc;
use colored::{Color, Colorize};
/// Severity of the diagnostic message.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Severity {
Remark,
Info,
Warning,
Error,
Fatal,
}
impl Severity {
/// Get the color corresponding to this severity.
fn color(&self) -> Color {
match self {
Severity::Remark => Color::Blue,
Severity::Info => Color::Black,
Severity::Warning => Color::Magenta,
Severity::Error | Severity::Fatal => Color::Red,
}
}
}
impl fmt::Display for Severity {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let str = match self {
Severity::Remark => "remark",
Severity::Info => "info",
Severity::Warning => "warning",
Severity::Error => "error",
Severity::Fatal => "fatal error",
};
write!(f, "{}", str)
}
}
/// A note for detailed message or suggesting how to fix it.
pub struct Note {
pub span: Span,
pub fix: Option<String>,
pub message: Option<String>,
}
/// A diagnostic message.
pub struct Diagnostic {
pub severity: Severity,
pub message: String,
/// This is the primary span that causes the issue. This will not be displayed.
/// `new` function will automatically add the span to notes for it to be displayed.
pub span: Option<Span>,
pub notes: Vec<Note>,
}
/// Helpers for building diagnostic message. Intended to be called in chains.
impl Diagnostic {
pub fn new(severity: Severity, msg: impl Into<String>, span: Span) -> Self {
Diagnostic {
severity,
message: msg.into(),
span: Some(span),
notes: vec![Note {
span,
fix: None,
message: None,
}],
}
}
pub fn fix_primary(mut self, fix: impl Into<String>) -> Self {
self.notes[0].fix = Some(fix.into());
self
}
pub fn fix(mut self, span: Span, fix: impl Into<String>) -> Self {
self.notes.push(Note {
span,
fix: Some(fix.into()),
message: None,
});
self
}
}
// Helper class for printing column number in a file with tabs and non-ASCII characters.
struct VisualString {
str: String,
columns: Vec<usize>,
}
impl VisualString {
fn new(str: &str, tab: usize) -> VisualString {
let mut columns = Vec::with_capacity(str.len() + 1);
columns.push(0);
// Current visual string and visual length
let mut vstr = String::new();
let mut vlen = 0;
for ch in str.chars() {
match ch {
'\r' | '\n' => (),
'\t' => {
let newlen = (vlen + tab) / tab * tab;
for _ in vlen..newlen {
vstr.push(' ');
}
vlen = newlen
}
_ => {
vstr.push(ch);
vlen += 1
}
}
for _ in 0..ch.len_utf8() {
columns.push(vlen);
}
}
// Reserve a column for end-of-line character
columns.push(vlen + 1);
VisualString {
str: vstr,
columns: columns,
}
}
fn visual_column(&self, pos: usize) -> usize {
self.columns[pos]
}
fn visual_length(&self) -> usize {
self.columns[self.columns.len() - 1]
}
fn visual_text(&self) -> &str {
&self.str
}
}
impl Diagnostic {
pub fn print(&self, mgr: &SrcMgr, color: bool, tab: usize) {
// Stringify and color severity
let mut severity = format!("{}: ", self.severity);
if color {
severity = severity.color(self.severity.color()).to_string();
}
// Convert spans to fat spans
let primary_span = match self.notes.first().and_then(|x| mgr.find_span(x.span)) {
None => {
// If the message has no associated file, just print it
if color {
eprintln!("{}{}", severity.bold(), self.message.bold());
} else {
eprintln!("{}{}", severity, self.message);
}
return;
}
Some(v) => v,
};
// Obtain line map
let src = &primary_span.source;
let linemap = src.linemap();
// Get line number (starting from 0)
let line = linemap.line_number(primary_span.start);
// Get position within the line
let line_start = linemap.line_start_pos(line);
// Get source code line for handling
let line_text = linemap.line(src, line);
let vstr = VisualString::new(line_text, tab);
// Get colored severity string
// Generate the error message line
let mut msg = format!(
"{}:{}: {}{}",
src.filename(),
line + 1,
severity,
self.message
);
if color {
msg = msg.bold().to_string();
}
// Allocate char vectors to hold indicators and hints
// Make this 1 longer for possibility to point to the line break character.
let mut indicators = vec![' '; vstr.visual_length() + 1];
let mut fixes = vec![' '; vstr.visual_length()];
let mut character = '^';
let mut has_fix = false;
// Fill in ^ and ~ characters for all spans
for note in &self.notes {
let span = match mgr.find_span(note.span) {
// The span is non-existent, continue instead
None => continue,
Some(v) => v,
};
// Unlikely event, we cannot display this
if!Rc::ptr_eq(&span.source, &primary_span.source) {
continue;
}
// Get start and end position, clamped within the line.
let start = span.start as isize - line_start as isize;
let start_clamp = cmp::min(cmp::max(start, 0) as usize, line_text.len());
let end = span.end as isize - line_start as isize;
let end_clamp = cmp::min(cmp::max(end, 0) as usize, line_text.len() + 1);
for i in vstr.visual_column(start_clamp)..vstr.visual_column(end_clamp) {
indicators[i] = character;
}
// We can only display it if it partially covers this line
if note.fix.is_some() && end >= 0 && start <= line_text.len() as isize {
let mut vptr = cmp::min(cmp::max(start, 0) as usize, line_text.len());
// Now replace the part in vector with the replacement suggestion
for ch in note.fix.as_ref().unwrap().chars() {
if vptr >= fixes.len() {
fixes.push(ch);
} else {
fixes[vptr] = ch;
}
vptr += 1;
}
has_fix = true;
}
// For non-primary notes, the character is different.
character = '~';
}
let mut indicator_line: String = indicators.into_iter().collect();
if color {
indicator_line = indicator_line.green().bold().to_string();
}
if has_fix {
let mut line: String = fixes.into_iter().collect();
if color {
line = line.green().to_string();
}
eprintln!(
"{}\n{}\n{}\n{}",
msg,
vstr.visual_text(),
indicator_line,
line
);
} else {
eprintln!("{}\n{}\n{}", msg, vstr.visual_text(), indicator_line);
}
}
}
/// Diagnostic manager
struct DiagMgrMut {
src: Rc<SrcMgr>,
diagnostics: Vec<Diagnostic>,
}
pub struct DiagMgr {
mutable: RefCell<DiagMgrMut>,
}
impl DiagMgr {
/// Create a new diagnostics manager
pub fn new(mgr: Rc<SrcMgr>) -> Self {
Self {
mutable: RefCell::new(DiagMgrMut {
src: mgr,
diagnostics: Vec::new(),
}),
}
}
/// Add a new diagnostic.
pub fn report(&self, diag: Diagnostic) {
let mut m = self.mutable.borrow_mut();
diag.print(&m.src, true, 4);
m.diagnostics.push(diag);
}
/// Create a errpr diagnostic from message and span and report it.
pub fn report_span<M: Into<String>>(&self, severity: Severity, msg: M, span: Span) {
self.report(Diagnostic::new(severity, msg.into(), span));
}
/// Create a errpr diagnostic from message and span and report it.
pub fn report_error<M: Into<String>>(&self, msg: M, span: Span) {
self.report(Diagnostic::new(Severity::Error, msg.into(), span));
}
/// Create a fatal diagnostic from message and span and report it. In addition, abort
/// execution with a panic.
pub fn report_fatal<M: Into<String>>(&self, msg: M, span: Span) ->! |
/// Clear exsting diagnostics
pub fn clear(&self) {
let mut m = self.mutable.borrow_mut();
m.diagnostics.clear();
}
/// Check if there is any fatal error.
pub fn has_fatal(&self) -> bool {
let m = self.mutable.borrow();
m.diagnostics
.iter()
.any(|diag| diag.severity == Severity::Fatal)
}
/// Check if there is any error.
pub fn has_error(&self) -> bool {
let m = self.mutable.borrow();
m.diagnostics
.iter()
.any(|diag| diag.severity == Severity::Error || diag.severity == Severity::Fatal)
}
}
| {
self.report(Diagnostic::new(Severity::Fatal, msg.into(), span));
std::panic::panic_any(Severity::Fatal);
} | identifier_body |
diag.rs | // Diagnostics engine
use super::{Span, SrcMgr};
use std::cell::RefCell;
use std::cmp;
use std::fmt;
use std::rc::Rc;
use colored::{Color, Colorize};
/// Severity of the diagnostic message.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Severity {
Remark,
Info,
Warning,
Error,
Fatal,
}
impl Severity {
/// Get the color corresponding to this severity.
fn color(&self) -> Color {
match self {
Severity::Remark => Color::Blue,
Severity::Info => Color::Black,
Severity::Warning => Color::Magenta,
Severity::Error | Severity::Fatal => Color::Red,
}
}
}
impl fmt::Display for Severity {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let str = match self {
Severity::Remark => "remark",
Severity::Info => "info",
Severity::Warning => "warning",
Severity::Error => "error",
Severity::Fatal => "fatal error",
};
write!(f, "{}", str)
}
}
/// A note for detailed message or suggesting how to fix it.
pub struct Note {
pub span: Span,
pub fix: Option<String>,
pub message: Option<String>,
}
/// A diagnostic message.
pub struct Diagnostic {
pub severity: Severity,
pub message: String,
/// This is the primary span that causes the issue. This will not be displayed.
/// `new` function will automatically add the span to notes for it to be displayed.
pub span: Option<Span>,
pub notes: Vec<Note>,
}
/// Helpers for building diagnostic message. Intended to be called in chains.
impl Diagnostic {
pub fn new(severity: Severity, msg: impl Into<String>, span: Span) -> Self {
Diagnostic {
severity,
message: msg.into(),
span: Some(span),
notes: vec![Note {
span,
fix: None,
message: None,
}],
}
}
pub fn fix_primary(mut self, fix: impl Into<String>) -> Self {
self.notes[0].fix = Some(fix.into());
self
}
pub fn fix(mut self, span: Span, fix: impl Into<String>) -> Self {
self.notes.push(Note {
span,
fix: Some(fix.into()),
message: None,
});
self
}
}
// Helper class for printing column number in a file with tabs and non-ASCII characters.
struct VisualString {
str: String,
columns: Vec<usize>,
}
impl VisualString {
fn new(str: &str, tab: usize) -> VisualString {
let mut columns = Vec::with_capacity(str.len() + 1);
columns.push(0);
// Current visual string and visual length
let mut vstr = String::new();
let mut vlen = 0;
for ch in str.chars() {
match ch {
'\r' | '\n' => (),
'\t' => {
let newlen = (vlen + tab) / tab * tab;
for _ in vlen..newlen {
vstr.push(' ');
}
vlen = newlen
}
_ => {
vstr.push(ch);
vlen += 1
} | }
}
// Reserve a column for end-of-line character
columns.push(vlen + 1);
VisualString {
str: vstr,
columns: columns,
}
}
fn visual_column(&self, pos: usize) -> usize {
self.columns[pos]
}
fn visual_length(&self) -> usize {
self.columns[self.columns.len() - 1]
}
fn visual_text(&self) -> &str {
&self.str
}
}
impl Diagnostic {
pub fn print(&self, mgr: &SrcMgr, color: bool, tab: usize) {
// Stringify and color severity
let mut severity = format!("{}: ", self.severity);
if color {
severity = severity.color(self.severity.color()).to_string();
}
// Convert spans to fat spans
let primary_span = match self.notes.first().and_then(|x| mgr.find_span(x.span)) {
None => {
// If the message has no associated file, just print it
if color {
eprintln!("{}{}", severity.bold(), self.message.bold());
} else {
eprintln!("{}{}", severity, self.message);
}
return;
}
Some(v) => v,
};
// Obtain line map
let src = &primary_span.source;
let linemap = src.linemap();
// Get line number (starting from 0)
let line = linemap.line_number(primary_span.start);
// Get position within the line
let line_start = linemap.line_start_pos(line);
// Get source code line for handling
let line_text = linemap.line(src, line);
let vstr = VisualString::new(line_text, tab);
// Get colored severity string
// Generate the error message line
let mut msg = format!(
"{}:{}: {}{}",
src.filename(),
line + 1,
severity,
self.message
);
if color {
msg = msg.bold().to_string();
}
// Allocate char vectors to hold indicators and hints
// Make this 1 longer for possibility to point to the line break character.
let mut indicators = vec![' '; vstr.visual_length() + 1];
let mut fixes = vec![' '; vstr.visual_length()];
let mut character = '^';
let mut has_fix = false;
// Fill in ^ and ~ characters for all spans
for note in &self.notes {
let span = match mgr.find_span(note.span) {
// The span is non-existent, continue instead
None => continue,
Some(v) => v,
};
// Unlikely event, we cannot display this
if!Rc::ptr_eq(&span.source, &primary_span.source) {
continue;
}
// Get start and end position, clamped within the line.
let start = span.start as isize - line_start as isize;
let start_clamp = cmp::min(cmp::max(start, 0) as usize, line_text.len());
let end = span.end as isize - line_start as isize;
let end_clamp = cmp::min(cmp::max(end, 0) as usize, line_text.len() + 1);
for i in vstr.visual_column(start_clamp)..vstr.visual_column(end_clamp) {
indicators[i] = character;
}
// We can only display it if it partially covers this line
if note.fix.is_some() && end >= 0 && start <= line_text.len() as isize {
let mut vptr = cmp::min(cmp::max(start, 0) as usize, line_text.len());
// Now replace the part in vector with the replacement suggestion
for ch in note.fix.as_ref().unwrap().chars() {
if vptr >= fixes.len() {
fixes.push(ch);
} else {
fixes[vptr] = ch;
}
vptr += 1;
}
has_fix = true;
}
// For non-primary notes, the character is different.
character = '~';
}
let mut indicator_line: String = indicators.into_iter().collect();
if color {
indicator_line = indicator_line.green().bold().to_string();
}
if has_fix {
let mut line: String = fixes.into_iter().collect();
if color {
line = line.green().to_string();
}
eprintln!(
"{}\n{}\n{}\n{}",
msg,
vstr.visual_text(),
indicator_line,
line
);
} else {
eprintln!("{}\n{}\n{}", msg, vstr.visual_text(), indicator_line);
}
}
}
/// Diagnostic manager
struct DiagMgrMut {
src: Rc<SrcMgr>,
diagnostics: Vec<Diagnostic>,
}
pub struct DiagMgr {
mutable: RefCell<DiagMgrMut>,
}
impl DiagMgr {
/// Create a new diagnostics manager
pub fn new(mgr: Rc<SrcMgr>) -> Self {
Self {
mutable: RefCell::new(DiagMgrMut {
src: mgr,
diagnostics: Vec::new(),
}),
}
}
/// Add a new diagnostic.
pub fn report(&self, diag: Diagnostic) {
let mut m = self.mutable.borrow_mut();
diag.print(&m.src, true, 4);
m.diagnostics.push(diag);
}
/// Create a errpr diagnostic from message and span and report it.
pub fn report_span<M: Into<String>>(&self, severity: Severity, msg: M, span: Span) {
self.report(Diagnostic::new(severity, msg.into(), span));
}
/// Create a errpr diagnostic from message and span and report it.
pub fn report_error<M: Into<String>>(&self, msg: M, span: Span) {
self.report(Diagnostic::new(Severity::Error, msg.into(), span));
}
/// Create a fatal diagnostic from message and span and report it. In addition, abort
/// execution with a panic.
pub fn report_fatal<M: Into<String>>(&self, msg: M, span: Span) ->! {
self.report(Diagnostic::new(Severity::Fatal, msg.into(), span));
std::panic::panic_any(Severity::Fatal);
}
/// Clear exsting diagnostics
pub fn clear(&self) {
let mut m = self.mutable.borrow_mut();
m.diagnostics.clear();
}
/// Check if there is any fatal error.
pub fn has_fatal(&self) -> bool {
let m = self.mutable.borrow();
m.diagnostics
.iter()
.any(|diag| diag.severity == Severity::Fatal)
}
/// Check if there is any error.
pub fn has_error(&self) -> bool {
let m = self.mutable.borrow();
m.diagnostics
.iter()
.any(|diag| diag.severity == Severity::Error || diag.severity == Severity::Fatal)
}
} | }
for _ in 0..ch.len_utf8() {
columns.push(vlen); | random_line_split |
diag.rs | // Diagnostics engine
use super::{Span, SrcMgr};
use std::cell::RefCell;
use std::cmp;
use std::fmt;
use std::rc::Rc;
use colored::{Color, Colorize};
/// Severity of the diagnostic message.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Severity {
Remark,
Info,
Warning,
Error,
Fatal,
}
impl Severity {
/// Get the color corresponding to this severity.
fn color(&self) -> Color {
match self {
Severity::Remark => Color::Blue,
Severity::Info => Color::Black,
Severity::Warning => Color::Magenta,
Severity::Error | Severity::Fatal => Color::Red,
}
}
}
impl fmt::Display for Severity {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let str = match self {
Severity::Remark => "remark",
Severity::Info => "info",
Severity::Warning => "warning",
Severity::Error => "error",
Severity::Fatal => "fatal error",
};
write!(f, "{}", str)
}
}
/// A note for detailed message or suggesting how to fix it.
pub struct Note {
pub span: Span,
pub fix: Option<String>,
pub message: Option<String>,
}
/// A diagnostic message.
pub struct Diagnostic {
pub severity: Severity,
pub message: String,
/// This is the primary span that causes the issue. This will not be displayed.
/// `new` function will automatically add the span to notes for it to be displayed.
pub span: Option<Span>,
pub notes: Vec<Note>,
}
/// Helpers for building diagnostic message. Intended to be called in chains.
impl Diagnostic {
pub fn new(severity: Severity, msg: impl Into<String>, span: Span) -> Self {
Diagnostic {
severity,
message: msg.into(),
span: Some(span),
notes: vec![Note {
span,
fix: None,
message: None,
}],
}
}
pub fn fix_primary(mut self, fix: impl Into<String>) -> Self {
self.notes[0].fix = Some(fix.into());
self
}
pub fn fix(mut self, span: Span, fix: impl Into<String>) -> Self {
self.notes.push(Note {
span,
fix: Some(fix.into()),
message: None,
});
self
}
}
// Helper class for printing column number in a file with tabs and non-ASCII characters.
struct VisualString {
str: String,
columns: Vec<usize>,
}
impl VisualString {
fn new(str: &str, tab: usize) -> VisualString {
let mut columns = Vec::with_capacity(str.len() + 1);
columns.push(0);
// Current visual string and visual length
let mut vstr = String::new();
let mut vlen = 0;
for ch in str.chars() {
match ch {
'\r' | '\n' => (),
'\t' => {
let newlen = (vlen + tab) / tab * tab;
for _ in vlen..newlen {
vstr.push(' ');
}
vlen = newlen
}
_ => {
vstr.push(ch);
vlen += 1
}
}
for _ in 0..ch.len_utf8() {
columns.push(vlen);
}
}
// Reserve a column for end-of-line character
columns.push(vlen + 1);
VisualString {
str: vstr,
columns: columns,
}
}
fn visual_column(&self, pos: usize) -> usize {
self.columns[pos]
}
fn visual_length(&self) -> usize {
self.columns[self.columns.len() - 1]
}
fn visual_text(&self) -> &str {
&self.str
}
}
impl Diagnostic {
pub fn print(&self, mgr: &SrcMgr, color: bool, tab: usize) {
// Stringify and color severity
let mut severity = format!("{}: ", self.severity);
if color {
severity = severity.color(self.severity.color()).to_string();
}
// Convert spans to fat spans
let primary_span = match self.notes.first().and_then(|x| mgr.find_span(x.span)) {
None => {
// If the message has no associated file, just print it
if color {
eprintln!("{}{}", severity.bold(), self.message.bold());
} else {
eprintln!("{}{}", severity, self.message);
}
return;
}
Some(v) => v,
};
// Obtain line map
let src = &primary_span.source;
let linemap = src.linemap();
// Get line number (starting from 0)
let line = linemap.line_number(primary_span.start);
// Get position within the line
let line_start = linemap.line_start_pos(line);
// Get source code line for handling
let line_text = linemap.line(src, line);
let vstr = VisualString::new(line_text, tab);
// Get colored severity string
// Generate the error message line
let mut msg = format!(
"{}:{}: {}{}",
src.filename(),
line + 1,
severity,
self.message
);
if color {
msg = msg.bold().to_string();
}
// Allocate char vectors to hold indicators and hints
// Make this 1 longer for possibility to point to the line break character.
let mut indicators = vec![' '; vstr.visual_length() + 1];
let mut fixes = vec![' '; vstr.visual_length()];
let mut character = '^';
let mut has_fix = false;
// Fill in ^ and ~ characters for all spans
for note in &self.notes {
let span = match mgr.find_span(note.span) {
// The span is non-existent, continue instead
None => continue,
Some(v) => v,
};
// Unlikely event, we cannot display this
if!Rc::ptr_eq(&span.source, &primary_span.source) {
continue;
}
// Get start and end position, clamped within the line.
let start = span.start as isize - line_start as isize;
let start_clamp = cmp::min(cmp::max(start, 0) as usize, line_text.len());
let end = span.end as isize - line_start as isize;
let end_clamp = cmp::min(cmp::max(end, 0) as usize, line_text.len() + 1);
for i in vstr.visual_column(start_clamp)..vstr.visual_column(end_clamp) {
indicators[i] = character;
}
// We can only display it if it partially covers this line
if note.fix.is_some() && end >= 0 && start <= line_text.len() as isize {
let mut vptr = cmp::min(cmp::max(start, 0) as usize, line_text.len());
// Now replace the part in vector with the replacement suggestion
for ch in note.fix.as_ref().unwrap().chars() {
if vptr >= fixes.len() {
fixes.push(ch);
} else {
fixes[vptr] = ch;
}
vptr += 1;
}
has_fix = true;
}
// For non-primary notes, the character is different.
character = '~';
}
let mut indicator_line: String = indicators.into_iter().collect();
if color {
indicator_line = indicator_line.green().bold().to_string();
}
if has_fix {
let mut line: String = fixes.into_iter().collect();
if color {
line = line.green().to_string();
}
eprintln!(
"{}\n{}\n{}\n{}",
msg,
vstr.visual_text(),
indicator_line,
line
);
} else {
eprintln!("{}\n{}\n{}", msg, vstr.visual_text(), indicator_line);
}
}
}
/// Diagnostic manager
struct DiagMgrMut {
src: Rc<SrcMgr>,
diagnostics: Vec<Diagnostic>,
}
pub struct DiagMgr {
mutable: RefCell<DiagMgrMut>,
}
impl DiagMgr {
/// Create a new diagnostics manager
pub fn new(mgr: Rc<SrcMgr>) -> Self {
Self {
mutable: RefCell::new(DiagMgrMut {
src: mgr,
diagnostics: Vec::new(),
}),
}
}
/// Add a new diagnostic.
pub fn | (&self, diag: Diagnostic) {
let mut m = self.mutable.borrow_mut();
diag.print(&m.src, true, 4);
m.diagnostics.push(diag);
}
/// Create a errpr diagnostic from message and span and report it.
pub fn report_span<M: Into<String>>(&self, severity: Severity, msg: M, span: Span) {
self.report(Diagnostic::new(severity, msg.into(), span));
}
/// Create a errpr diagnostic from message and span and report it.
pub fn report_error<M: Into<String>>(&self, msg: M, span: Span) {
self.report(Diagnostic::new(Severity::Error, msg.into(), span));
}
/// Create a fatal diagnostic from message and span and report it. In addition, abort
/// execution with a panic.
pub fn report_fatal<M: Into<String>>(&self, msg: M, span: Span) ->! {
self.report(Diagnostic::new(Severity::Fatal, msg.into(), span));
std::panic::panic_any(Severity::Fatal);
}
/// Clear exsting diagnostics
pub fn clear(&self) {
let mut m = self.mutable.borrow_mut();
m.diagnostics.clear();
}
/// Check if there is any fatal error.
pub fn has_fatal(&self) -> bool {
let m = self.mutable.borrow();
m.diagnostics
.iter()
.any(|diag| diag.severity == Severity::Fatal)
}
/// Check if there is any error.
pub fn has_error(&self) -> bool {
let m = self.mutable.borrow();
m.diagnostics
.iter()
.any(|diag| diag.severity == Severity::Error || diag.severity == Severity::Fatal)
}
}
| report | identifier_name |
lib.rs | // Copyright 2016 The android_logger Developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//! A logger which writes to android output.
//!
//! ## Example
//!
//! ```
//! #[macro_use] extern crate log;
//! extern crate android_logger;
//!
//! use log::Level;
//! use android_logger::Filter;
//!
//! /// Android code may not have obvious "main", this is just an example.
//! fn main() {
//! android_logger::init_once(
//! Filter::default()
//! .with_min_level(Level::Trace)
//! );
//!
//! debug!("this is a debug {}", "message");
//! error!("this is printed by default");
//! }
//! ```
//!
//! ## Example with module path filter
//!
//! It is possible to limit log messages to output from a specific crate:
//!
//! ```
//! #[macro_use] extern crate log;
//! extern crate android_logger;
//!
//! use log::Level;
//! use android_logger::Filter;
//!
//! fn main() {
//! android_logger::init_once(
//! Filter::default()
//! .with_min_level(Level::Trace)
//! .with_allowed_module_path("hello::crate")
//! );
//!
//! //..
//! }
//! ```
#[cfg(target_os = "android")]
extern crate android_log_sys as log_ffi;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
use std::sync::RwLock;
#[cfg(target_os = "android")]
use log_ffi::LogPriority;
use log::{Level, Log, Metadata, Record};
use std::ffi::CStr;
use std::mem;
use std::fmt;
use std::ptr;
/// Output log to android system.
#[cfg(target_os = "android")]
fn android_log(prio: log_ffi::LogPriority, tag: &CStr, msg: &CStr) {
unsafe {
log_ffi::__android_log_write(
prio as log_ffi::c_int,
tag.as_ptr() as *const log_ffi::c_char,
msg.as_ptr() as *const log_ffi::c_char,
)
};
}
/// Dummy output placeholder for tests.
#[cfg(not(target_os = "android"))]
fn android_log(_priority: Level, _tag: &CStr, _msg: &CStr) {}
/// Underlying android logger backend
pub struct AndroidLogger {
filter: RwLock<Filter>,
}
lazy_static! {
static ref ANDROID_LOGGER: AndroidLogger = AndroidLogger::default();
}
const LOGGING_TAG_MAX_LEN: usize = 23;
const LOGGING_MSG_MAX_LEN: usize = 4000;
impl Default for AndroidLogger {
/// Create a new logger with default filter
fn default() -> AndroidLogger {
AndroidLogger {
filter: RwLock::new(Filter::default()),
}
}
}
impl Log for AndroidLogger {
fn enabled(&self, _: &Metadata) -> bool {
true
}
fn log(&self, record: &Record) {
if let Some(module_path) = record.module_path() {
let filter = self.filter
.read()
.expect("failed to acquire android_log filter lock for read");
if!filter.is_module_path_allowed(module_path) {
return;
}
}
// tag must not exceed LOGGING_TAG_MAX_LEN
let mut tag_bytes: [u8; LOGGING_TAG_MAX_LEN + 1] = unsafe { mem::uninitialized() };
// truncate the tag here to fit into LOGGING_TAG_MAX_LEN
self.fill_tag_bytes(&mut tag_bytes, record);
// use stack array as C string
let tag: &CStr = unsafe { CStr::from_ptr(mem::transmute(tag_bytes.as_ptr())) };
// message must not exceed LOGGING_MSG_MAX_LEN
// therefore split log message into multiple log calls
let mut writer = PlatformLogWriter::new(record.level(), tag);
// use PlatformLogWriter to output chunks if they exceed max size
let _ = fmt::write(&mut writer, *record.args());
// output the remaining message (this would usually be the most common case)
writer.flush();
}
fn flush(&self) {}
}
impl AndroidLogger {
fn fill_tag_bytes(&self, array: &mut [u8], record: &Record) {
let tag_bytes_iter = record.module_path().unwrap_or_default().bytes();
if tag_bytes_iter.len() > LOGGING_TAG_MAX_LEN {
for (input, output) in tag_bytes_iter
.take(LOGGING_TAG_MAX_LEN - 2)
.chain(b"..\0".iter().cloned())
.zip(array.iter_mut())
{
*output = input;
}
} else {
for (input, output) in tag_bytes_iter
.chain(b"\0".iter().cloned())
.zip(array.iter_mut())
{
*output = input;
}
}
}
}
/// Filter for android logger.
pub struct Filter {
log_level: Option<Level>,
allow_module_paths: Vec<String>,
}
impl Default for Filter {
fn default() -> Self {
Filter {
log_level: None,
allow_module_paths: Vec::new(),
}
}
}
impl Filter {
/// Change the minimum log level.
///
/// All values above the set level are logged. For example, if
/// `Warn` is set, the `Error` is logged too, but `Info` isn't.
pub fn with_min_level(mut self, level: Level) -> Self {
self.log_level = Some(level);
self
}
/// Set allowed module path.
///
/// Allow log entry only if module path matches specified path exactly.
///
/// ## Example:
///
/// ```
/// use android_logger::Filter;
///
/// let filter = Filter::default().with_allowed_module_path("crate");
///
/// assert!(filter.is_module_path_allowed("crate"));
/// assert!(!filter.is_module_path_allowed("other_crate"));
/// assert!(!filter.is_module_path_allowed("crate::subcrate"));
/// ```
///
/// ## Multiple rules example:
///
/// ```
/// use android_logger::Filter;
///
/// let filter = Filter::default()
/// .with_allowed_module_path("A")
/// .with_allowed_module_path("B");
///
/// assert!(filter.is_module_path_allowed("A"));
/// assert!(filter.is_module_path_allowed("B"));
/// assert!(!filter.is_module_path_allowed("C"));
/// assert!(!filter.is_module_path_allowed("A::B"));
/// ```
pub fn with_allowed_module_path<S: Into<String>>(mut self, path: S) -> Self |
/// Set multiple allowed module paths.
///
/// Same as `with_allowed_module_path`, but accepts list of paths.
///
/// ## Example:
///
/// ```
/// use android_logger::Filter;
///
/// let filter = Filter::default()
/// .with_allowed_module_paths(["A", "B"].iter().map(|i| i.to_string()));
///
/// assert!(filter.is_module_path_allowed("A"));
/// assert!(filter.is_module_path_allowed("B"));
/// assert!(!filter.is_module_path_allowed("C"));
/// assert!(!filter.is_module_path_allowed("A::B"));
/// ```
pub fn with_allowed_module_paths<I: IntoIterator<Item = String>>(mut self, paths: I) -> Self {
self.allow_module_paths.extend(paths.into_iter());
self
}
/// Check if module path is allowed by filter rules.
pub fn is_module_path_allowed(&self, path: &str) -> bool {
if self.allow_module_paths.is_empty() {
return true;
}
self.allow_module_paths
.iter()
.any(|allowed_path| allowed_path == path)
}
}
#[cfg(test)]
mod tests {
use super::Filter;
#[test]
fn with_allowed_module_path() {
assert!(Filter::default().is_module_path_allowed("random"));
}
}
struct PlatformLogWriter<'a> {
#[cfg(target_os = "android")] priority: LogPriority,
#[cfg(not(target_os = "android"))] priority: Level,
len: usize,
last_newline_index: usize,
tag: &'a CStr,
buffer: [u8; LOGGING_MSG_MAX_LEN + 1],
}
impl<'a> PlatformLogWriter<'a> {
#[cfg(target_os = "android")]
pub fn new(level: Level, tag: &CStr) -> PlatformLogWriter {
PlatformLogWriter {
priority: match level {
Level::Warn => LogPriority::WARN,
Level::Info => LogPriority::INFO,
Level::Debug => LogPriority::DEBUG,
Level::Error => LogPriority::ERROR,
Level::Trace => LogPriority::VERBOSE,
},
len: 0,
last_newline_index: 0,
tag: tag,
buffer: unsafe { mem::uninitialized() },
}
}
#[cfg(not(target_os = "android"))]
pub fn new(level: Level, tag: &CStr) -> PlatformLogWriter {
PlatformLogWriter {
priority: level,
len: 0,
last_newline_index: 0,
tag: tag,
buffer: unsafe { mem::uninitialized() },
}
}
/// Flush some bytes to android logger.
///
/// If there is a newline, flush up to it.
/// If ther was no newline, flush all.
///
/// Not guaranteed to flush everything.
fn temporal_flush(&mut self) {
let total_len = self.len;
if total_len == 0 {
return;
}
if self.last_newline_index > 0 {
let copy_from_index = self.last_newline_index;
let remaining_chunk_len = total_len - copy_from_index;
self.output_specified_len(copy_from_index);
self.copy_bytes_to_start(copy_from_index, remaining_chunk_len);
self.len = remaining_chunk_len;
} else {
self.output_specified_len(total_len);
self.len = 0;
}
self.last_newline_index = 0;
}
/// Flush everything remaining to android logger.
fn flush(&mut self) {
let total_len = self.len;
if total_len == 0 {
return;
}
self.output_specified_len(total_len);
self.len = 0;
self.last_newline_index = 0;
}
/// Output buffer up until the \0 which will be placed at `len` position.
fn output_specified_len(&mut self, len: usize) {
let mut last_byte: u8 = b'\0';
mem::swap(&mut last_byte, unsafe {
self.buffer.get_unchecked_mut(len)
});
let msg: &CStr = unsafe { CStr::from_ptr(mem::transmute(self.buffer.as_ptr())) };
android_log(self.priority, self.tag, msg);
*unsafe { self.buffer.get_unchecked_mut(len) } = last_byte;
}
/// Copy `len` bytes from `index` position to starting position.
fn copy_bytes_to_start(&mut self, index: usize, len: usize) {
let src = unsafe { self.buffer.as_ptr().offset(index as isize) };
let dst = self.buffer.as_mut_ptr();
unsafe { ptr::copy(src, dst, len) };
}
}
impl<'a> fmt::Write for PlatformLogWriter<'a> {
fn write_str(&mut self, s: &str) -> fmt::Result {
let mut incomming_bytes = s.as_bytes();
while!incomming_bytes.is_empty() {
let len = self.len;
// write everything possible to buffer and mark last \n
let new_len = len + incomming_bytes.len();
let last_newline = self.buffer[len..LOGGING_MSG_MAX_LEN]
.iter_mut()
.zip(incomming_bytes)
.enumerate()
.fold(None, |acc, (i, (output, input))| {
*output = *input;
if *input == b'\n' {
Some(i)
} else {
acc
}
});
// update last \n index
if let Some(newline) = last_newline {
self.last_newline_index = len + newline;
}
// calculate how many bytes were written
let written_len = if new_len <= LOGGING_MSG_MAX_LEN {
// if the len was not exceeded
self.len = new_len;
new_len - len // written len
} else {
// if new length was exceeded
self.len = LOGGING_MSG_MAX_LEN;
self.temporal_flush();
LOGGING_MSG_MAX_LEN - len // written len
};
incomming_bytes = &incomming_bytes[written_len..];
}
Ok(())
}
}
/// Send a log record to Android logging backend.
///
/// This action does not require initialization. However, without initialization it
/// will use the default filter, which allows all logs.
pub fn log(record: &Record) {
ANDROID_LOGGER.log(record)
}
/// Initializes the global logger with an android logger.
///
/// This can be called many times, but will only initialize logging once,
/// and will not replace any other previously initialized logger.
///
/// It is ok to call this at the activity creation, and it will be
/// repeatedly called on every lifecycle restart (i.e. screen rotation).
pub fn init_once(filter: Filter) {
if let Err(err) = log::set_logger(&*ANDROID_LOGGER) {
debug!("android_logger: log::set_logger failed: {}", err);
} else {
if let Some(level) = filter.log_level {
log::set_max_level(level.to_level_filter());
}
*ANDROID_LOGGER
.filter
.write()
.expect("failed to acquire android_log filter lock for write") = filter;
}
}
| {
self.allow_module_paths.push(path.into());
self
} | identifier_body |
lib.rs | // Copyright 2016 The android_logger Developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//! A logger which writes to android output.
//!
//! ## Example
//!
//! ```
//! #[macro_use] extern crate log;
//! extern crate android_logger;
//!
//! use log::Level;
//! use android_logger::Filter;
//!
//! /// Android code may not have obvious "main", this is just an example.
//! fn main() {
//! android_logger::init_once(
//! Filter::default()
//! .with_min_level(Level::Trace)
//! );
//!
//! debug!("this is a debug {}", "message");
//! error!("this is printed by default");
//! }
//! ```
//!
//! ## Example with module path filter
//!
//! It is possible to limit log messages to output from a specific crate:
//!
//! ```
//! #[macro_use] extern crate log;
//! extern crate android_logger;
//!
//! use log::Level;
//! use android_logger::Filter;
//!
//! fn main() {
//! android_logger::init_once(
//! Filter::default()
//! .with_min_level(Level::Trace)
//! .with_allowed_module_path("hello::crate")
//! );
//!
//! //..
//! }
//! ```
#[cfg(target_os = "android")]
extern crate android_log_sys as log_ffi;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
use std::sync::RwLock;
#[cfg(target_os = "android")]
use log_ffi::LogPriority;
use log::{Level, Log, Metadata, Record};
use std::ffi::CStr;
use std::mem;
use std::fmt;
use std::ptr;
/// Output log to android system.
#[cfg(target_os = "android")]
fn android_log(prio: log_ffi::LogPriority, tag: &CStr, msg: &CStr) {
unsafe {
log_ffi::__android_log_write(
prio as log_ffi::c_int,
tag.as_ptr() as *const log_ffi::c_char,
msg.as_ptr() as *const log_ffi::c_char,
)
};
}
/// Dummy output placeholder for tests.
#[cfg(not(target_os = "android"))]
fn | (_priority: Level, _tag: &CStr, _msg: &CStr) {}
/// Underlying android logger backend
pub struct AndroidLogger {
filter: RwLock<Filter>,
}
lazy_static! {
static ref ANDROID_LOGGER: AndroidLogger = AndroidLogger::default();
}
const LOGGING_TAG_MAX_LEN: usize = 23;
const LOGGING_MSG_MAX_LEN: usize = 4000;
impl Default for AndroidLogger {
/// Create a new logger with default filter
fn default() -> AndroidLogger {
AndroidLogger {
filter: RwLock::new(Filter::default()),
}
}
}
impl Log for AndroidLogger {
fn enabled(&self, _: &Metadata) -> bool {
true
}
fn log(&self, record: &Record) {
if let Some(module_path) = record.module_path() {
let filter = self.filter
.read()
.expect("failed to acquire android_log filter lock for read");
if!filter.is_module_path_allowed(module_path) {
return;
}
}
// tag must not exceed LOGGING_TAG_MAX_LEN
let mut tag_bytes: [u8; LOGGING_TAG_MAX_LEN + 1] = unsafe { mem::uninitialized() };
// truncate the tag here to fit into LOGGING_TAG_MAX_LEN
self.fill_tag_bytes(&mut tag_bytes, record);
// use stack array as C string
let tag: &CStr = unsafe { CStr::from_ptr(mem::transmute(tag_bytes.as_ptr())) };
// message must not exceed LOGGING_MSG_MAX_LEN
// therefore split log message into multiple log calls
let mut writer = PlatformLogWriter::new(record.level(), tag);
// use PlatformLogWriter to output chunks if they exceed max size
let _ = fmt::write(&mut writer, *record.args());
// output the remaining message (this would usually be the most common case)
writer.flush();
}
fn flush(&self) {}
}
impl AndroidLogger {
fn fill_tag_bytes(&self, array: &mut [u8], record: &Record) {
let tag_bytes_iter = record.module_path().unwrap_or_default().bytes();
if tag_bytes_iter.len() > LOGGING_TAG_MAX_LEN {
for (input, output) in tag_bytes_iter
.take(LOGGING_TAG_MAX_LEN - 2)
.chain(b"..\0".iter().cloned())
.zip(array.iter_mut())
{
*output = input;
}
} else {
for (input, output) in tag_bytes_iter
.chain(b"\0".iter().cloned())
.zip(array.iter_mut())
{
*output = input;
}
}
}
}
/// Filter for android logger.
pub struct Filter {
log_level: Option<Level>,
allow_module_paths: Vec<String>,
}
impl Default for Filter {
fn default() -> Self {
Filter {
log_level: None,
allow_module_paths: Vec::new(),
}
}
}
impl Filter {
/// Change the minimum log level.
///
/// All values above the set level are logged. For example, if
/// `Warn` is set, the `Error` is logged too, but `Info` isn't.
pub fn with_min_level(mut self, level: Level) -> Self {
self.log_level = Some(level);
self
}
/// Set allowed module path.
///
/// Allow log entry only if module path matches specified path exactly.
///
/// ## Example:
///
/// ```
/// use android_logger::Filter;
///
/// let filter = Filter::default().with_allowed_module_path("crate");
///
/// assert!(filter.is_module_path_allowed("crate"));
/// assert!(!filter.is_module_path_allowed("other_crate"));
/// assert!(!filter.is_module_path_allowed("crate::subcrate"));
/// ```
///
/// ## Multiple rules example:
///
/// ```
/// use android_logger::Filter;
///
/// let filter = Filter::default()
/// .with_allowed_module_path("A")
/// .with_allowed_module_path("B");
///
/// assert!(filter.is_module_path_allowed("A"));
/// assert!(filter.is_module_path_allowed("B"));
/// assert!(!filter.is_module_path_allowed("C"));
/// assert!(!filter.is_module_path_allowed("A::B"));
/// ```
pub fn with_allowed_module_path<S: Into<String>>(mut self, path: S) -> Self {
self.allow_module_paths.push(path.into());
self
}
/// Set multiple allowed module paths.
///
/// Same as `with_allowed_module_path`, but accepts list of paths.
///
/// ## Example:
///
/// ```
/// use android_logger::Filter;
///
/// let filter = Filter::default()
/// .with_allowed_module_paths(["A", "B"].iter().map(|i| i.to_string()));
///
/// assert!(filter.is_module_path_allowed("A"));
/// assert!(filter.is_module_path_allowed("B"));
/// assert!(!filter.is_module_path_allowed("C"));
/// assert!(!filter.is_module_path_allowed("A::B"));
/// ```
pub fn with_allowed_module_paths<I: IntoIterator<Item = String>>(mut self, paths: I) -> Self {
self.allow_module_paths.extend(paths.into_iter());
self
}
/// Check if module path is allowed by filter rules.
pub fn is_module_path_allowed(&self, path: &str) -> bool {
if self.allow_module_paths.is_empty() {
return true;
}
self.allow_module_paths
.iter()
.any(|allowed_path| allowed_path == path)
}
}
#[cfg(test)]
mod tests {
use super::Filter;
#[test]
fn with_allowed_module_path() {
assert!(Filter::default().is_module_path_allowed("random"));
}
}
struct PlatformLogWriter<'a> {
#[cfg(target_os = "android")] priority: LogPriority,
#[cfg(not(target_os = "android"))] priority: Level,
len: usize,
last_newline_index: usize,
tag: &'a CStr,
buffer: [u8; LOGGING_MSG_MAX_LEN + 1],
}
impl<'a> PlatformLogWriter<'a> {
#[cfg(target_os = "android")]
pub fn new(level: Level, tag: &CStr) -> PlatformLogWriter {
PlatformLogWriter {
priority: match level {
Level::Warn => LogPriority::WARN,
Level::Info => LogPriority::INFO,
Level::Debug => LogPriority::DEBUG,
Level::Error => LogPriority::ERROR,
Level::Trace => LogPriority::VERBOSE,
},
len: 0,
last_newline_index: 0,
tag: tag,
buffer: unsafe { mem::uninitialized() },
}
}
#[cfg(not(target_os = "android"))]
pub fn new(level: Level, tag: &CStr) -> PlatformLogWriter {
PlatformLogWriter {
priority: level,
len: 0,
last_newline_index: 0,
tag: tag,
buffer: unsafe { mem::uninitialized() },
}
}
/// Flush some bytes to android logger.
///
/// If there is a newline, flush up to it.
/// If ther was no newline, flush all.
///
/// Not guaranteed to flush everything.
fn temporal_flush(&mut self) {
let total_len = self.len;
if total_len == 0 {
return;
}
if self.last_newline_index > 0 {
let copy_from_index = self.last_newline_index;
let remaining_chunk_len = total_len - copy_from_index;
self.output_specified_len(copy_from_index);
self.copy_bytes_to_start(copy_from_index, remaining_chunk_len);
self.len = remaining_chunk_len;
} else {
self.output_specified_len(total_len);
self.len = 0;
}
self.last_newline_index = 0;
}
/// Flush everything remaining to android logger.
fn flush(&mut self) {
let total_len = self.len;
if total_len == 0 {
return;
}
self.output_specified_len(total_len);
self.len = 0;
self.last_newline_index = 0;
}
/// Output buffer up until the \0 which will be placed at `len` position.
fn output_specified_len(&mut self, len: usize) {
let mut last_byte: u8 = b'\0';
mem::swap(&mut last_byte, unsafe {
self.buffer.get_unchecked_mut(len)
});
let msg: &CStr = unsafe { CStr::from_ptr(mem::transmute(self.buffer.as_ptr())) };
android_log(self.priority, self.tag, msg);
*unsafe { self.buffer.get_unchecked_mut(len) } = last_byte;
}
/// Copy `len` bytes from `index` position to starting position.
fn copy_bytes_to_start(&mut self, index: usize, len: usize) {
let src = unsafe { self.buffer.as_ptr().offset(index as isize) };
let dst = self.buffer.as_mut_ptr();
unsafe { ptr::copy(src, dst, len) };
}
}
impl<'a> fmt::Write for PlatformLogWriter<'a> {
fn write_str(&mut self, s: &str) -> fmt::Result {
let mut incomming_bytes = s.as_bytes();
while!incomming_bytes.is_empty() {
let len = self.len;
// write everything possible to buffer and mark last \n
let new_len = len + incomming_bytes.len();
let last_newline = self.buffer[len..LOGGING_MSG_MAX_LEN]
.iter_mut()
.zip(incomming_bytes)
.enumerate()
.fold(None, |acc, (i, (output, input))| {
*output = *input;
if *input == b'\n' {
Some(i)
} else {
acc
}
});
// update last \n index
if let Some(newline) = last_newline {
self.last_newline_index = len + newline;
}
// calculate how many bytes were written
let written_len = if new_len <= LOGGING_MSG_MAX_LEN {
// if the len was not exceeded
self.len = new_len;
new_len - len // written len
} else {
// if new length was exceeded
self.len = LOGGING_MSG_MAX_LEN;
self.temporal_flush();
LOGGING_MSG_MAX_LEN - len // written len
};
incomming_bytes = &incomming_bytes[written_len..];
}
Ok(())
}
}
/// Send a log record to Android logging backend.
///
/// This action does not require initialization. However, without initialization it
/// will use the default filter, which allows all logs.
pub fn log(record: &Record) {
ANDROID_LOGGER.log(record)
}
/// Initializes the global logger with an android logger.
///
/// This can be called many times, but will only initialize logging once,
/// and will not replace any other previously initialized logger.
///
/// It is ok to call this at the activity creation, and it will be
/// repeatedly called on every lifecycle restart (i.e. screen rotation).
pub fn init_once(filter: Filter) {
if let Err(err) = log::set_logger(&*ANDROID_LOGGER) {
debug!("android_logger: log::set_logger failed: {}", err);
} else {
if let Some(level) = filter.log_level {
log::set_max_level(level.to_level_filter());
}
*ANDROID_LOGGER
.filter
.write()
.expect("failed to acquire android_log filter lock for write") = filter;
}
}
| android_log | identifier_name |
lib.rs | // Copyright 2016 The android_logger Developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//! A logger which writes to android output.
//!
//! ## Example
//!
//! ```
//! #[macro_use] extern crate log;
//! extern crate android_logger;
//!
//! use log::Level;
//! use android_logger::Filter;
//!
//! /// Android code may not have obvious "main", this is just an example.
//! fn main() {
//! android_logger::init_once(
//! Filter::default()
//! .with_min_level(Level::Trace)
//! );
//!
//! debug!("this is a debug {}", "message");
//! error!("this is printed by default");
//! }
//! ```
//!
//! ## Example with module path filter
//!
//! It is possible to limit log messages to output from a specific crate:
//!
//! ```
//! #[macro_use] extern crate log;
//! extern crate android_logger;
//!
//! use log::Level;
//! use android_logger::Filter;
//!
//! fn main() {
//! android_logger::init_once(
//! Filter::default()
//! .with_min_level(Level::Trace)
//! .with_allowed_module_path("hello::crate")
//! );
//!
//! //..
//! }
//! ```
#[cfg(target_os = "android")]
extern crate android_log_sys as log_ffi;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
use std::sync::RwLock;
#[cfg(target_os = "android")]
use log_ffi::LogPriority;
use log::{Level, Log, Metadata, Record};
use std::ffi::CStr;
use std::mem;
use std::fmt;
use std::ptr;
/// Output log to android system.
#[cfg(target_os = "android")]
fn android_log(prio: log_ffi::LogPriority, tag: &CStr, msg: &CStr) {
unsafe {
log_ffi::__android_log_write(
prio as log_ffi::c_int,
tag.as_ptr() as *const log_ffi::c_char,
msg.as_ptr() as *const log_ffi::c_char,
)
};
}
/// Dummy output placeholder for tests.
#[cfg(not(target_os = "android"))]
fn android_log(_priority: Level, _tag: &CStr, _msg: &CStr) {}
/// Underlying android logger backend
pub struct AndroidLogger {
filter: RwLock<Filter>,
}
lazy_static! {
static ref ANDROID_LOGGER: AndroidLogger = AndroidLogger::default();
}
const LOGGING_TAG_MAX_LEN: usize = 23;
const LOGGING_MSG_MAX_LEN: usize = 4000;
impl Default for AndroidLogger {
/// Create a new logger with default filter
fn default() -> AndroidLogger {
AndroidLogger {
filter: RwLock::new(Filter::default()),
}
}
}
impl Log for AndroidLogger {
fn enabled(&self, _: &Metadata) -> bool {
true
}
fn log(&self, record: &Record) {
if let Some(module_path) = record.module_path() {
let filter = self.filter
.read()
.expect("failed to acquire android_log filter lock for read");
if!filter.is_module_path_allowed(module_path) {
return;
}
}
// tag must not exceed LOGGING_TAG_MAX_LEN
let mut tag_bytes: [u8; LOGGING_TAG_MAX_LEN + 1] = unsafe { mem::uninitialized() };
// truncate the tag here to fit into LOGGING_TAG_MAX_LEN
self.fill_tag_bytes(&mut tag_bytes, record);
// use stack array as C string
let tag: &CStr = unsafe { CStr::from_ptr(mem::transmute(tag_bytes.as_ptr())) };
// message must not exceed LOGGING_MSG_MAX_LEN
// therefore split log message into multiple log calls
let mut writer = PlatformLogWriter::new(record.level(), tag);
// use PlatformLogWriter to output chunks if they exceed max size
let _ = fmt::write(&mut writer, *record.args());
// output the remaining message (this would usually be the most common case)
writer.flush();
}
fn flush(&self) {}
}
impl AndroidLogger {
fn fill_tag_bytes(&self, array: &mut [u8], record: &Record) {
let tag_bytes_iter = record.module_path().unwrap_or_default().bytes();
if tag_bytes_iter.len() > LOGGING_TAG_MAX_LEN {
for (input, output) in tag_bytes_iter
.take(LOGGING_TAG_MAX_LEN - 2)
.chain(b"..\0".iter().cloned())
.zip(array.iter_mut())
{
*output = input;
}
} else {
for (input, output) in tag_bytes_iter
.chain(b"\0".iter().cloned())
.zip(array.iter_mut())
{
*output = input;
}
}
}
}
/// Filter for android logger.
pub struct Filter {
log_level: Option<Level>,
allow_module_paths: Vec<String>,
}
impl Default for Filter {
fn default() -> Self {
Filter {
log_level: None,
allow_module_paths: Vec::new(),
}
}
}
impl Filter {
/// Change the minimum log level.
///
/// All values above the set level are logged. For example, if
/// `Warn` is set, the `Error` is logged too, but `Info` isn't.
pub fn with_min_level(mut self, level: Level) -> Self {
self.log_level = Some(level);
self
}
/// Set allowed module path.
///
/// Allow log entry only if module path matches specified path exactly.
///
/// ## Example:
///
/// ```
/// use android_logger::Filter;
///
/// let filter = Filter::default().with_allowed_module_path("crate");
///
/// assert!(filter.is_module_path_allowed("crate"));
/// assert!(!filter.is_module_path_allowed("other_crate"));
/// assert!(!filter.is_module_path_allowed("crate::subcrate"));
/// ```
///
/// ## Multiple rules example:
///
/// ```
/// use android_logger::Filter;
///
/// let filter = Filter::default()
/// .with_allowed_module_path("A")
/// .with_allowed_module_path("B");
///
/// assert!(filter.is_module_path_allowed("A"));
/// assert!(filter.is_module_path_allowed("B"));
/// assert!(!filter.is_module_path_allowed("C"));
/// assert!(!filter.is_module_path_allowed("A::B"));
/// ```
pub fn with_allowed_module_path<S: Into<String>>(mut self, path: S) -> Self {
self.allow_module_paths.push(path.into());
self
}
/// Set multiple allowed module paths.
///
/// Same as `with_allowed_module_path`, but accepts list of paths.
///
/// ## Example:
///
/// ```
/// use android_logger::Filter;
///
/// let filter = Filter::default()
/// .with_allowed_module_paths(["A", "B"].iter().map(|i| i.to_string()));
///
/// assert!(filter.is_module_path_allowed("A"));
/// assert!(filter.is_module_path_allowed("B"));
/// assert!(!filter.is_module_path_allowed("C"));
/// assert!(!filter.is_module_path_allowed("A::B"));
/// ```
pub fn with_allowed_module_paths<I: IntoIterator<Item = String>>(mut self, paths: I) -> Self {
self.allow_module_paths.extend(paths.into_iter());
self
}
/// Check if module path is allowed by filter rules.
pub fn is_module_path_allowed(&self, path: &str) -> bool {
if self.allow_module_paths.is_empty() {
return true;
}
self.allow_module_paths
.iter()
.any(|allowed_path| allowed_path == path)
}
}
#[cfg(test)]
mod tests {
use super::Filter;
#[test]
fn with_allowed_module_path() {
assert!(Filter::default().is_module_path_allowed("random"));
}
}
struct PlatformLogWriter<'a> {
#[cfg(target_os = "android")] priority: LogPriority,
#[cfg(not(target_os = "android"))] priority: Level,
len: usize,
last_newline_index: usize,
tag: &'a CStr,
buffer: [u8; LOGGING_MSG_MAX_LEN + 1],
}
impl<'a> PlatformLogWriter<'a> {
#[cfg(target_os = "android")]
pub fn new(level: Level, tag: &CStr) -> PlatformLogWriter {
PlatformLogWriter {
priority: match level {
Level::Warn => LogPriority::WARN,
Level::Info => LogPriority::INFO,
Level::Debug => LogPriority::DEBUG,
Level::Error => LogPriority::ERROR,
Level::Trace => LogPriority::VERBOSE,
},
len: 0,
last_newline_index: 0,
tag: tag,
buffer: unsafe { mem::uninitialized() },
}
}
#[cfg(not(target_os = "android"))]
pub fn new(level: Level, tag: &CStr) -> PlatformLogWriter {
PlatformLogWriter {
priority: level,
len: 0,
last_newline_index: 0,
tag: tag,
buffer: unsafe { mem::uninitialized() },
}
}
/// Flush some bytes to android logger.
///
/// If there is a newline, flush up to it.
/// If ther was no newline, flush all.
///
/// Not guaranteed to flush everything.
fn temporal_flush(&mut self) {
let total_len = self.len;
if total_len == 0 {
return;
}
if self.last_newline_index > 0 {
let copy_from_index = self.last_newline_index;
let remaining_chunk_len = total_len - copy_from_index;
self.output_specified_len(copy_from_index);
self.copy_bytes_to_start(copy_from_index, remaining_chunk_len);
self.len = remaining_chunk_len;
} else {
self.output_specified_len(total_len);
self.len = 0;
}
self.last_newline_index = 0;
}
/// Flush everything remaining to android logger.
fn flush(&mut self) {
let total_len = self.len;
if total_len == 0 {
return;
}
self.output_specified_len(total_len);
self.len = 0;
self.last_newline_index = 0;
}
/// Output buffer up until the \0 which will be placed at `len` position.
fn output_specified_len(&mut self, len: usize) {
let mut last_byte: u8 = b'\0';
mem::swap(&mut last_byte, unsafe {
self.buffer.get_unchecked_mut(len)
});
let msg: &CStr = unsafe { CStr::from_ptr(mem::transmute(self.buffer.as_ptr())) };
android_log(self.priority, self.tag, msg);
| }
/// Copy `len` bytes from `index` position to starting position.
fn copy_bytes_to_start(&mut self, index: usize, len: usize) {
let src = unsafe { self.buffer.as_ptr().offset(index as isize) };
let dst = self.buffer.as_mut_ptr();
unsafe { ptr::copy(src, dst, len) };
}
}
impl<'a> fmt::Write for PlatformLogWriter<'a> {
fn write_str(&mut self, s: &str) -> fmt::Result {
let mut incomming_bytes = s.as_bytes();
while!incomming_bytes.is_empty() {
let len = self.len;
// write everything possible to buffer and mark last \n
let new_len = len + incomming_bytes.len();
let last_newline = self.buffer[len..LOGGING_MSG_MAX_LEN]
.iter_mut()
.zip(incomming_bytes)
.enumerate()
.fold(None, |acc, (i, (output, input))| {
*output = *input;
if *input == b'\n' {
Some(i)
} else {
acc
}
});
// update last \n index
if let Some(newline) = last_newline {
self.last_newline_index = len + newline;
}
// calculate how many bytes were written
let written_len = if new_len <= LOGGING_MSG_MAX_LEN {
// if the len was not exceeded
self.len = new_len;
new_len - len // written len
} else {
// if new length was exceeded
self.len = LOGGING_MSG_MAX_LEN;
self.temporal_flush();
LOGGING_MSG_MAX_LEN - len // written len
};
incomming_bytes = &incomming_bytes[written_len..];
}
Ok(())
}
}
/// Send a log record to Android logging backend.
///
/// This action does not require initialization. However, without initialization it
/// will use the default filter, which allows all logs.
pub fn log(record: &Record) {
ANDROID_LOGGER.log(record)
}
/// Initializes the global logger with an android logger.
///
/// This can be called many times, but will only initialize logging once,
/// and will not replace any other previously initialized logger.
///
/// It is ok to call this at the activity creation, and it will be
/// repeatedly called on every lifecycle restart (i.e. screen rotation).
pub fn init_once(filter: Filter) {
if let Err(err) = log::set_logger(&*ANDROID_LOGGER) {
debug!("android_logger: log::set_logger failed: {}", err);
} else {
if let Some(level) = filter.log_level {
log::set_max_level(level.to_level_filter());
}
*ANDROID_LOGGER
.filter
.write()
.expect("failed to acquire android_log filter lock for write") = filter;
}
} | *unsafe { self.buffer.get_unchecked_mut(len) } = last_byte; | random_line_split |
lib.rs | // Copyright 2016 The android_logger Developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//! A logger which writes to android output.
//!
//! ## Example
//!
//! ```
//! #[macro_use] extern crate log;
//! extern crate android_logger;
//!
//! use log::Level;
//! use android_logger::Filter;
//!
//! /// Android code may not have obvious "main", this is just an example.
//! fn main() {
//! android_logger::init_once(
//! Filter::default()
//! .with_min_level(Level::Trace)
//! );
//!
//! debug!("this is a debug {}", "message");
//! error!("this is printed by default");
//! }
//! ```
//!
//! ## Example with module path filter
//!
//! It is possible to limit log messages to output from a specific crate:
//!
//! ```
//! #[macro_use] extern crate log;
//! extern crate android_logger;
//!
//! use log::Level;
//! use android_logger::Filter;
//!
//! fn main() {
//! android_logger::init_once(
//! Filter::default()
//! .with_min_level(Level::Trace)
//! .with_allowed_module_path("hello::crate")
//! );
//!
//! //..
//! }
//! ```
#[cfg(target_os = "android")]
extern crate android_log_sys as log_ffi;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
use std::sync::RwLock;
#[cfg(target_os = "android")]
use log_ffi::LogPriority;
use log::{Level, Log, Metadata, Record};
use std::ffi::CStr;
use std::mem;
use std::fmt;
use std::ptr;
/// Output log to android system.
#[cfg(target_os = "android")]
fn android_log(prio: log_ffi::LogPriority, tag: &CStr, msg: &CStr) {
unsafe {
log_ffi::__android_log_write(
prio as log_ffi::c_int,
tag.as_ptr() as *const log_ffi::c_char,
msg.as_ptr() as *const log_ffi::c_char,
)
};
}
/// Dummy output placeholder for tests.
#[cfg(not(target_os = "android"))]
fn android_log(_priority: Level, _tag: &CStr, _msg: &CStr) {}
/// Underlying android logger backend
pub struct AndroidLogger {
filter: RwLock<Filter>,
}
lazy_static! {
static ref ANDROID_LOGGER: AndroidLogger = AndroidLogger::default();
}
const LOGGING_TAG_MAX_LEN: usize = 23;
const LOGGING_MSG_MAX_LEN: usize = 4000;
impl Default for AndroidLogger {
/// Create a new logger with default filter
fn default() -> AndroidLogger {
AndroidLogger {
filter: RwLock::new(Filter::default()),
}
}
}
impl Log for AndroidLogger {
fn enabled(&self, _: &Metadata) -> bool {
true
}
fn log(&self, record: &Record) {
if let Some(module_path) = record.module_path() {
let filter = self.filter
.read()
.expect("failed to acquire android_log filter lock for read");
if!filter.is_module_path_allowed(module_path) {
return;
}
}
// tag must not exceed LOGGING_TAG_MAX_LEN
let mut tag_bytes: [u8; LOGGING_TAG_MAX_LEN + 1] = unsafe { mem::uninitialized() };
// truncate the tag here to fit into LOGGING_TAG_MAX_LEN
self.fill_tag_bytes(&mut tag_bytes, record);
// use stack array as C string
let tag: &CStr = unsafe { CStr::from_ptr(mem::transmute(tag_bytes.as_ptr())) };
// message must not exceed LOGGING_MSG_MAX_LEN
// therefore split log message into multiple log calls
let mut writer = PlatformLogWriter::new(record.level(), tag);
// use PlatformLogWriter to output chunks if they exceed max size
let _ = fmt::write(&mut writer, *record.args());
// output the remaining message (this would usually be the most common case)
writer.flush();
}
fn flush(&self) {}
}
impl AndroidLogger {
fn fill_tag_bytes(&self, array: &mut [u8], record: &Record) {
let tag_bytes_iter = record.module_path().unwrap_or_default().bytes();
if tag_bytes_iter.len() > LOGGING_TAG_MAX_LEN {
for (input, output) in tag_bytes_iter
.take(LOGGING_TAG_MAX_LEN - 2)
.chain(b"..\0".iter().cloned())
.zip(array.iter_mut())
{
*output = input;
}
} else {
for (input, output) in tag_bytes_iter
.chain(b"\0".iter().cloned())
.zip(array.iter_mut())
{
*output = input;
}
}
}
}
/// Filter for android logger.
pub struct Filter {
log_level: Option<Level>,
allow_module_paths: Vec<String>,
}
impl Default for Filter {
fn default() -> Self {
Filter {
log_level: None,
allow_module_paths: Vec::new(),
}
}
}
impl Filter {
/// Change the minimum log level.
///
/// All values above the set level are logged. For example, if
/// `Warn` is set, the `Error` is logged too, but `Info` isn't.
pub fn with_min_level(mut self, level: Level) -> Self {
self.log_level = Some(level);
self
}
/// Set allowed module path.
///
/// Allow log entry only if module path matches specified path exactly.
///
/// ## Example:
///
/// ```
/// use android_logger::Filter;
///
/// let filter = Filter::default().with_allowed_module_path("crate");
///
/// assert!(filter.is_module_path_allowed("crate"));
/// assert!(!filter.is_module_path_allowed("other_crate"));
/// assert!(!filter.is_module_path_allowed("crate::subcrate"));
/// ```
///
/// ## Multiple rules example:
///
/// ```
/// use android_logger::Filter;
///
/// let filter = Filter::default()
/// .with_allowed_module_path("A")
/// .with_allowed_module_path("B");
///
/// assert!(filter.is_module_path_allowed("A"));
/// assert!(filter.is_module_path_allowed("B"));
/// assert!(!filter.is_module_path_allowed("C"));
/// assert!(!filter.is_module_path_allowed("A::B"));
/// ```
pub fn with_allowed_module_path<S: Into<String>>(mut self, path: S) -> Self {
self.allow_module_paths.push(path.into());
self
}
/// Set multiple allowed module paths.
///
/// Same as `with_allowed_module_path`, but accepts list of paths.
///
/// ## Example:
///
/// ```
/// use android_logger::Filter;
///
/// let filter = Filter::default()
/// .with_allowed_module_paths(["A", "B"].iter().map(|i| i.to_string()));
///
/// assert!(filter.is_module_path_allowed("A"));
/// assert!(filter.is_module_path_allowed("B"));
/// assert!(!filter.is_module_path_allowed("C"));
/// assert!(!filter.is_module_path_allowed("A::B"));
/// ```
pub fn with_allowed_module_paths<I: IntoIterator<Item = String>>(mut self, paths: I) -> Self {
self.allow_module_paths.extend(paths.into_iter());
self
}
/// Check if module path is allowed by filter rules.
pub fn is_module_path_allowed(&self, path: &str) -> bool {
if self.allow_module_paths.is_empty() {
return true;
}
self.allow_module_paths
.iter()
.any(|allowed_path| allowed_path == path)
}
}
#[cfg(test)]
mod tests {
use super::Filter;
#[test]
fn with_allowed_module_path() {
assert!(Filter::default().is_module_path_allowed("random"));
}
}
struct PlatformLogWriter<'a> {
#[cfg(target_os = "android")] priority: LogPriority,
#[cfg(not(target_os = "android"))] priority: Level,
len: usize,
last_newline_index: usize,
tag: &'a CStr,
buffer: [u8; LOGGING_MSG_MAX_LEN + 1],
}
impl<'a> PlatformLogWriter<'a> {
#[cfg(target_os = "android")]
pub fn new(level: Level, tag: &CStr) -> PlatformLogWriter {
PlatformLogWriter {
priority: match level {
Level::Warn => LogPriority::WARN,
Level::Info => LogPriority::INFO,
Level::Debug => LogPriority::DEBUG,
Level::Error => LogPriority::ERROR,
Level::Trace => LogPriority::VERBOSE,
},
len: 0,
last_newline_index: 0,
tag: tag,
buffer: unsafe { mem::uninitialized() },
}
}
#[cfg(not(target_os = "android"))]
pub fn new(level: Level, tag: &CStr) -> PlatformLogWriter {
PlatformLogWriter {
priority: level,
len: 0,
last_newline_index: 0,
tag: tag,
buffer: unsafe { mem::uninitialized() },
}
}
/// Flush some bytes to android logger.
///
/// If there is a newline, flush up to it.
/// If ther was no newline, flush all.
///
/// Not guaranteed to flush everything.
fn temporal_flush(&mut self) {
let total_len = self.len;
if total_len == 0 {
return;
}
if self.last_newline_index > 0 {
let copy_from_index = self.last_newline_index;
let remaining_chunk_len = total_len - copy_from_index;
self.output_specified_len(copy_from_index);
self.copy_bytes_to_start(copy_from_index, remaining_chunk_len);
self.len = remaining_chunk_len;
} else {
self.output_specified_len(total_len);
self.len = 0;
}
self.last_newline_index = 0;
}
/// Flush everything remaining to android logger.
fn flush(&mut self) {
let total_len = self.len;
if total_len == 0 {
return;
}
self.output_specified_len(total_len);
self.len = 0;
self.last_newline_index = 0;
}
/// Output buffer up until the \0 which will be placed at `len` position.
fn output_specified_len(&mut self, len: usize) {
let mut last_byte: u8 = b'\0';
mem::swap(&mut last_byte, unsafe {
self.buffer.get_unchecked_mut(len)
});
let msg: &CStr = unsafe { CStr::from_ptr(mem::transmute(self.buffer.as_ptr())) };
android_log(self.priority, self.tag, msg);
*unsafe { self.buffer.get_unchecked_mut(len) } = last_byte;
}
/// Copy `len` bytes from `index` position to starting position.
fn copy_bytes_to_start(&mut self, index: usize, len: usize) {
let src = unsafe { self.buffer.as_ptr().offset(index as isize) };
let dst = self.buffer.as_mut_ptr();
unsafe { ptr::copy(src, dst, len) };
}
}
impl<'a> fmt::Write for PlatformLogWriter<'a> {
fn write_str(&mut self, s: &str) -> fmt::Result {
let mut incomming_bytes = s.as_bytes();
while!incomming_bytes.is_empty() {
let len = self.len;
// write everything possible to buffer and mark last \n
let new_len = len + incomming_bytes.len();
let last_newline = self.buffer[len..LOGGING_MSG_MAX_LEN]
.iter_mut()
.zip(incomming_bytes)
.enumerate()
.fold(None, |acc, (i, (output, input))| {
*output = *input;
if *input == b'\n' {
Some(i)
} else |
});
// update last \n index
if let Some(newline) = last_newline {
self.last_newline_index = len + newline;
}
// calculate how many bytes were written
let written_len = if new_len <= LOGGING_MSG_MAX_LEN {
// if the len was not exceeded
self.len = new_len;
new_len - len // written len
} else {
// if new length was exceeded
self.len = LOGGING_MSG_MAX_LEN;
self.temporal_flush();
LOGGING_MSG_MAX_LEN - len // written len
};
incomming_bytes = &incomming_bytes[written_len..];
}
Ok(())
}
}
/// Send a log record to Android logging backend.
///
/// This action does not require initialization. However, without initialization it
/// will use the default filter, which allows all logs.
pub fn log(record: &Record) {
ANDROID_LOGGER.log(record)
}
/// Initializes the global logger with an android logger.
///
/// This can be called many times, but will only initialize logging once,
/// and will not replace any other previously initialized logger.
///
/// It is ok to call this at the activity creation, and it will be
/// repeatedly called on every lifecycle restart (i.e. screen rotation).
pub fn init_once(filter: Filter) {
if let Err(err) = log::set_logger(&*ANDROID_LOGGER) {
debug!("android_logger: log::set_logger failed: {}", err);
} else {
if let Some(level) = filter.log_level {
log::set_max_level(level.to_level_filter());
}
*ANDROID_LOGGER
.filter
.write()
.expect("failed to acquire android_log filter lock for write") = filter;
}
}
| {
acc
} | conditional_block |
tags.rs | use crate::{ResponseValue, ViewWrap};
use std::fmt::Debug;
use htmldom_read::{Node};
use crate::events::OnClick;
use std::marker::PhantomData;
use std::ops::{Deref, DerefMut};
use std::fmt::Formatter;
use std::sync::Arc;
/// The functions that allow to load images concurrently.
pub mod image_loader {
use std::sync::Arc;
use crate::tags::Image;
use crate::tags::ImageFormat;
use std::collections::LinkedList;
/// Load all images from binary format from the iterator. This function is concurrent.
/// It will create multiple threads to process images in parallel. Returned value contains
/// handles to all images in the order they appeared in the iterator.
pub fn load_all(iter: &mut Iterator<Item = (Vec<u8>, ImageFormat)>) -> Vec<Arc<Image>> {
use std::sync::mpsc;
use std::thread;
// Start loading images async.
let recvs = {
let mut list = LinkedList::new();
for (arr, format) in iter {
let (tx, rx) = mpsc::channel();
list.push_back(rx);
thread::spawn(move || {
let img = Image::from_binary(arr, format);
tx.send(img).unwrap();
});
}
list
};
// Collect results.
let mut vec = Vec::with_capacity(recvs.len());
for rx in recvs {
let image = rx.recv().unwrap();
let arc = Arc::new(image);
vec.push(arc);
}
vec
}
/// Load one image into Arc.
pub fn load(bin: Vec<u8>, format: ImageFormat) -> Arc<Image> {
let img = Image::from_binary(bin, format);
Arc::new(img)
}
}
#[derive(Clone, Debug)]
pub enum TagName {
A,
Canvas,
H4,
H5,
Img,
Li,
P,
Span,
Unknown(String)
}
/// Supported canvas image formats.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ImageFormat {
Png,
Jpg,
}
/// Element in the HTML DOM that can be accessed by Rust interface.
pub trait Element: Debug {
/// Tag name of the element.
fn tag_name(&self) -> TagName;
/// HTML content of this element if it still exists.
fn dom_html(&mut self) -> Option<String> {
let req = self.view_mut().new_request();
let js = format!("\
var inner = document.getElementById('{}').outerHTML;\
window.external.invoke(JSON.stringify({{\
incmd: 'attribute',
request: {},\
value: inner\
}}));
", self.id(), req.id());
let rx = req.run(js);
let response = rx.recv();
if let Err(_) = response {
return None; // likely because Null element was accessed.
}
let response = response.unwrap();
if let ResponseValue::Str(s) = response {
if s.is_empty() {
None
} else {
Some(s)
}
} else {
// Inner HTML request cannot return any other response type.
unreachable!();
}
}
/// Get attribute value of the element if any. Even if attribute is present but is empty
/// None is returned.
fn attribute(&self, name: &str) -> Option<String> {
// Unsafe because we take immutable variable `self` as mutable.
let request = unsafe {
let this = &mut *(self as *const Self as *mut Self);
this.view_mut().new_request()
};
let id = request.id();
let js = format!("\
var attr = document.getElementById('{}').getAttribute('{}');\
attr = attr == null? '' : attr;\
window.external.invoke(JSON.stringify({{\
incmd: 'attribute',\
request: {},\
value: attr\
}}));\
", self.id(), name, id);
let receiver = request.run(js);
let attr = receiver.recv().unwrap();
if let ResponseValue::Str(s) = attr {
if s == "" {
None
} else {
Some(s)
}
} else {
unreachable!()
}
}
/// Set attribute with given name to given value.
fn set_attribute(&mut self, name: &str, value: &str) {
let id = self.id().to_owned();
self.view_mut().eval(
format!(
"document.getElementById('{}').setAttribute('{}', '{}');",
id, name, crate::js_prefix_quotes(value)
)
);
}
/// Append given text to innerHTML field.
fn append_inner_html(&mut self, html: &str) {
let id = self.id().to_owned();
self.view_mut().eval(
format!(
"document.getElementById('{}').innerHTML += '{}';",
id, crate::js_prefix_quotes(html)
)
);
}
/// Clears the outerHTML of the element to remove it from HTML completely.
fn remove_from_html(&mut self) {
let id = self.id().to_owned();
self.view_mut().eval(
format!(
"document.getElementById('{}').outerHTML = '';",
id
)
);
}
/// Element ID.
fn id(&self) -> &String;
/// Change element ID.
fn set_id(&mut self, new_id: &str) {
self.set_attribute("id", new_id)
}
fn view(&self) -> &ViewWrap;
fn view_mut(&mut self) -> &mut ViewWrap {
let p = self.view() as *const ViewWrap as *mut ViewWrap;
unsafe { &mut *p }
}
/// Check whether this element still exists.
/// Actions on non-existing elements have no effect.
fn exists(&mut self) -> bool {
self.dom_html().is_some()
}
fn add_class(&mut self, class: &str) {
let attr = self.attribute("class");
let mut attr = if let Some(s) = attr {
s
} else {
String::with_capacity(class.len())
};
attr.push(' ');
attr.push_str(class);
self.set_attribute("class", &attr);
}
fn remove_class(&mut self, class: &str) {
let attr = self.attribute("class");
if attr.is_none() {
self.set_attribute("class", class);
return;
}
let attr = attr.unwrap();
let split = attr.split_whitespace();
let mut new_str = String::with_capacity(attr.len());
for val in split {
if val!= class {
new_str.push_str(val);
}
}
self.set_attribute("class", &new_str);
}
fn has_class(&self, class: &str) -> bool {
let attr = self.attribute("class");
if attr.is_none() {
return false;
}
let attr = attr.unwrap();
let split = attr.split_whitespace();
for s in split {
if s == class {
return true;
}
}
false
}
}
/// Text content can be set to some text value and read this content back.
pub trait TextContent: Element {
/// Get text contained by this element.
fn text(&self) -> String {
if let Some(s) = self.attribute("textContent") {
s
} else {
String::new()
}
}
fn set_text<T: AsRef<str>>(&mut self, text: T) {
self.set_attribute("textContent", text.as_ref())
}
}
pub trait ImageContent: Element {
/// Set image data to this element.
fn set_image(&mut self, img: Arc<Image>);
/// Get image data of this element.
fn image(&self) -> Option<&Arc<Image>>;
/// Remove any supplied image data.
fn remove_image(&mut self) -> Option<Arc<Image>>;
}
macro_rules! elm_impl {
($name: ident) => {
impl Element for $name {
fn view(&self) -> &ViewWrap {
&self.view
}
fn id(&self) -> &String {
&self.id
}
fn tag_name(&self) -> TagName {
TagName::$name
}
}
}
}
/// Wrap that gives access to the dynamic element which is known to be of given type.
#[derive(Debug)]
pub struct Wrap<T: Element> {
element: Box<dyn Element>,
_p: PhantomData<T>,
}
/// Image data of canvas.
#[derive(Clone)]
pub struct Image {
base64: String,
format: ImageFormat,
}
#[derive(Debug)]
pub struct A {
view: ViewWrap,
id: String,
onclick: OnClick<A>,
}
#[derive(Debug)]
pub struct Canvas {
view: ViewWrap,
id: String,
}
#[derive(Clone, Debug)]
pub struct H4 {
view: ViewWrap,
id: String,
}
#[derive(Clone, Debug)]
pub struct H5 {
view: ViewWrap,
id: String,
}
#[derive(Clone, Debug)]
pub struct Img {
view: ViewWrap,
id: String,
data: Option<Arc<Image>>,
}
#[derive(Clone, Debug)]
pub struct Li {
view: ViewWrap,
id: String,
}
#[derive(Clone, Debug)]
pub struct P {
view: ViewWrap,
id: String,
}
#[derive(Clone, Debug)]
pub struct Span {
view: ViewWrap,
id: String,
}
elm_impl!(A);
elm_impl!(Canvas);
elm_impl!(H4);
elm_impl!(H5);
elm_impl!(Img);
elm_impl!(Li);
elm_impl!(P);
elm_impl!(Span);
#[derive(Clone, Debug)]
pub struct Unknown {
view: ViewWrap,
id: String,
name: String,
}
impl<T> Wrap<T> where T: Element {
/// Wrap given element.
///
/// # Safety
/// Programmer must be sure this element has expected type.
pub unsafe fn new(element: Box<dyn Element>) -> Self {
Wrap {
element,
_p: Default::default(),
}
}
}
impl<T> Deref for Wrap<T> where T: Element {
type Target = Box<T>;
fn deref(&self) -> &Box<T> {
let b = &self.element;
let ptr = b as *const Box<dyn Element> as *const Box<T>;
unsafe { &*ptr }
}
}
impl<T> DerefMut for Wrap<T> where T: Element {
fn deref_mut(&mut self) -> &mut Box<T> {
let b = &mut self.element;
let ptr = b as *mut Box<dyn Element> as *mut Box<T>;
unsafe { &mut *ptr }
}
}
impl Debug for Image {
fn fmt(&self, fmt: &mut Formatter) -> std::fmt::Result {
write!(fmt, "Image {{ base64: [char; ")?;
write!(fmt, "{}", self.base64.len())?;
write!(fmt, "], format: ")?;
write!(fmt, "{:?}", self.format)?;
write!(fmt, " }}")?;
Ok(())
}
}
impl From<&str> for TagName {
fn from(s: &str) -> Self {
use self::TagName::*;
match s.to_lowercase().as_str() {
"a" => A,
"canvas" => Canvas,
"h4" => H4,
"h5" => H5,
"img" => Img,
"li" => Li,
"p" => P,
"span" => Span,
_ => Unknown(String::from(s)),
}
}
}
impl TagName {
/// Create implementation of the tag by it's tag name.
pub fn new_impl(&self, view: ViewWrap, id: String) -> Box<dyn Element> {
match self {
TagName::A => {
let mut b = Box::new(A {
view,
id,
onclick: unsafe { OnClick::null() },
});
let onclick = unsafe { OnClick::new(&mut *b) };
b.onclick = onclick;
b
},
TagName::Canvas => {
Box::new(Canvas {
view,
id,
})
},
TagName::H4 => Box::new(
H4 {
view,
id,
}
),
TagName::H5 => Box::new(
H4 {
view,
id,
}
),
TagName::Img => Box::new(
Img {
view,
id,
data: None,
}
),
TagName::Li => Box::new (
Li {
view,
id,
}
),
TagName::P => Box::new(P { view, id }),
TagName::Span => Box::new(Span { view, id }),
TagName::Unknown(name) => Box::new(Unknown {
view,
id,
name: name.clone(),
}),
}
}
/// Try creating TagName from this node.
pub fn try_from_node(node: &Node) -> Option<Self> {
let tag_name = node.tag_name();
if let Some(tag_name) = tag_name {
let tag_name = TagName::from(tag_name);
Some(tag_name)
} else {
None
}
}
/// Try creating implementation of the Element from this node.
///
/// # Failures
/// Node must contain ID of the element. It also is required to contain opening tag
/// which corresponds to element tag. If either of conditions is not met this function
/// will return None.
pub fn try_impl_from_node(node: &Node, view: ViewWrap) -> Option<Box<dyn Element>> {
let tag_name = Self::try_from_node(node);
if let Some(tag_name) = tag_name {
let id = node.attribute_by_name("id");
if let Some(id) = id {
Some(tag_name.new_impl(view, id.values_to_string()))
} else {
None
}
} else {
None
}
}
}
impl ImageFormat {
pub fn to_string(&self) -> String {
use ImageFormat::*;
match self {
Jpg => "jpg",
Png => "png",
}.to_string()
}
}
impl Image {
/// Encode given array of bytes in Base64 encoding.
pub fn base64(bin: Vec<u8>) -> String {
base64::encode(&bin)
}
/// Generate image struct from given array.
pub fn from_binary(bin: Vec<u8>, format: ImageFormat) -> Image {
Image {
base64: Self::base64(bin),
format,
}
}
/// Convert this image to string that can be supplied to'src' attribute of <img> tag.
pub fn to_img_string(&self) -> String {
format!("data:image/{};base64,{}", self.format.to_string(), self.base64)
}
}
impl A {
pub fn href(&self) -> String |
pub fn set_href<T: AsRef<str>>(&mut self, href: T) {
self.set_attribute("href", href.as_ref())
}
pub fn onclick(&self) -> &OnClick<A> {
&self.onclick
}
pub fn onclick_mut(&mut self) -> &mut OnClick<A> {
&mut self.onclick
}
}
impl ImageContent for Img {
fn set_image(&mut self, img: Arc<Image>) {
self.data = Some(img);
self.set_attribute("src", &self.data.as_ref().unwrap().to_img_string());
}
fn image(&self) -> Option<&Arc<Image>> {
self.data.as_ref()
}
fn remove_image(&mut self) -> Option<Arc<Image>> {
let mut img: Option<Arc<Image>> = None;
std::mem::swap(&mut img, &mut self.data);
img
}
}
impl TextContent for A {}
impl TextContent for H4 {}
impl TextContent for H5 {}
impl TextContent for Li {}
impl TextContent for P {}
impl TextContent for Span {}
impl Element for Unknown {
fn tag_name(&self) -> TagName {
TagName::Unknown(self.id.clone())
}
fn id(&self) -> &String {
&self.id
}
fn view(&self) -> &ViewWrap {
&self.view
}
}
| {
if let Some(s) = self.attribute("href") {
s
} else {
String::new()
}
} | identifier_body |
tags.rs | use crate::{ResponseValue, ViewWrap};
use std::fmt::Debug;
use htmldom_read::{Node};
use crate::events::OnClick;
use std::marker::PhantomData;
use std::ops::{Deref, DerefMut};
use std::fmt::Formatter;
use std::sync::Arc;
/// The functions that allow to load images concurrently.
pub mod image_loader {
use std::sync::Arc;
use crate::tags::Image;
use crate::tags::ImageFormat;
use std::collections::LinkedList;
/// Load all images from binary format from the iterator. This function is concurrent.
/// It will create multiple threads to process images in parallel. Returned value contains
/// handles to all images in the order they appeared in the iterator.
pub fn load_all(iter: &mut Iterator<Item = (Vec<u8>, ImageFormat)>) -> Vec<Arc<Image>> {
use std::sync::mpsc;
use std::thread;
// Start loading images async.
let recvs = {
let mut list = LinkedList::new();
for (arr, format) in iter {
let (tx, rx) = mpsc::channel();
list.push_back(rx);
thread::spawn(move || {
let img = Image::from_binary(arr, format);
tx.send(img).unwrap();
});
}
list
};
// Collect results.
let mut vec = Vec::with_capacity(recvs.len());
for rx in recvs {
let image = rx.recv().unwrap();
let arc = Arc::new(image);
vec.push(arc);
}
vec
}
/// Load one image into Arc.
pub fn load(bin: Vec<u8>, format: ImageFormat) -> Arc<Image> {
let img = Image::from_binary(bin, format);
Arc::new(img)
}
}
#[derive(Clone, Debug)]
pub enum TagName {
A,
Canvas,
H4,
H5,
Img,
Li,
P,
Span, |
Unknown(String)
}
/// Supported canvas image formats.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ImageFormat {
Png,
Jpg,
}
/// Element in the HTML DOM that can be accessed by Rust interface.
pub trait Element: Debug {
/// Tag name of the element.
fn tag_name(&self) -> TagName;
/// HTML content of this element if it still exists.
fn dom_html(&mut self) -> Option<String> {
let req = self.view_mut().new_request();
let js = format!("\
var inner = document.getElementById('{}').outerHTML;\
window.external.invoke(JSON.stringify({{\
incmd: 'attribute',
request: {},\
value: inner\
}}));
", self.id(), req.id());
let rx = req.run(js);
let response = rx.recv();
if let Err(_) = response {
return None; // likely because Null element was accessed.
}
let response = response.unwrap();
if let ResponseValue::Str(s) = response {
if s.is_empty() {
None
} else {
Some(s)
}
} else {
// Inner HTML request cannot return any other response type.
unreachable!();
}
}
/// Get attribute value of the element if any. Even if attribute is present but is empty
/// None is returned.
fn attribute(&self, name: &str) -> Option<String> {
// Unsafe because we take immutable variable `self` as mutable.
let request = unsafe {
let this = &mut *(self as *const Self as *mut Self);
this.view_mut().new_request()
};
let id = request.id();
let js = format!("\
var attr = document.getElementById('{}').getAttribute('{}');\
attr = attr == null? '' : attr;\
window.external.invoke(JSON.stringify({{\
incmd: 'attribute',\
request: {},\
value: attr\
}}));\
", self.id(), name, id);
let receiver = request.run(js);
let attr = receiver.recv().unwrap();
if let ResponseValue::Str(s) = attr {
if s == "" {
None
} else {
Some(s)
}
} else {
unreachable!()
}
}
/// Set attribute with given name to given value.
fn set_attribute(&mut self, name: &str, value: &str) {
let id = self.id().to_owned();
self.view_mut().eval(
format!(
"document.getElementById('{}').setAttribute('{}', '{}');",
id, name, crate::js_prefix_quotes(value)
)
);
}
/// Append given text to innerHTML field.
fn append_inner_html(&mut self, html: &str) {
let id = self.id().to_owned();
self.view_mut().eval(
format!(
"document.getElementById('{}').innerHTML += '{}';",
id, crate::js_prefix_quotes(html)
)
);
}
/// Clears the outerHTML of the element to remove it from HTML completely.
fn remove_from_html(&mut self) {
let id = self.id().to_owned();
self.view_mut().eval(
format!(
"document.getElementById('{}').outerHTML = '';",
id
)
);
}
/// Element ID.
fn id(&self) -> &String;
/// Change element ID.
fn set_id(&mut self, new_id: &str) {
self.set_attribute("id", new_id)
}
fn view(&self) -> &ViewWrap;
fn view_mut(&mut self) -> &mut ViewWrap {
let p = self.view() as *const ViewWrap as *mut ViewWrap;
unsafe { &mut *p }
}
/// Check whether this element still exists.
/// Actions on non-existing elements have no effect.
fn exists(&mut self) -> bool {
self.dom_html().is_some()
}
fn add_class(&mut self, class: &str) {
let attr = self.attribute("class");
let mut attr = if let Some(s) = attr {
s
} else {
String::with_capacity(class.len())
};
attr.push(' ');
attr.push_str(class);
self.set_attribute("class", &attr);
}
fn remove_class(&mut self, class: &str) {
let attr = self.attribute("class");
if attr.is_none() {
self.set_attribute("class", class);
return;
}
let attr = attr.unwrap();
let split = attr.split_whitespace();
let mut new_str = String::with_capacity(attr.len());
for val in split {
if val!= class {
new_str.push_str(val);
}
}
self.set_attribute("class", &new_str);
}
fn has_class(&self, class: &str) -> bool {
let attr = self.attribute("class");
if attr.is_none() {
return false;
}
let attr = attr.unwrap();
let split = attr.split_whitespace();
for s in split {
if s == class {
return true;
}
}
false
}
}
/// Text content can be set to some text value and read this content back.
pub trait TextContent: Element {
/// Get text contained by this element.
fn text(&self) -> String {
if let Some(s) = self.attribute("textContent") {
s
} else {
String::new()
}
}
fn set_text<T: AsRef<str>>(&mut self, text: T) {
self.set_attribute("textContent", text.as_ref())
}
}
pub trait ImageContent: Element {
/// Set image data to this element.
fn set_image(&mut self, img: Arc<Image>);
/// Get image data of this element.
fn image(&self) -> Option<&Arc<Image>>;
/// Remove any supplied image data.
fn remove_image(&mut self) -> Option<Arc<Image>>;
}
macro_rules! elm_impl {
($name: ident) => {
impl Element for $name {
fn view(&self) -> &ViewWrap {
&self.view
}
fn id(&self) -> &String {
&self.id
}
fn tag_name(&self) -> TagName {
TagName::$name
}
}
}
}
/// Wrap that gives access to the dynamic element which is known to be of given type.
#[derive(Debug)]
pub struct Wrap<T: Element> {
element: Box<dyn Element>,
_p: PhantomData<T>,
}
/// Image data of canvas.
#[derive(Clone)]
pub struct Image {
base64: String,
format: ImageFormat,
}
#[derive(Debug)]
pub struct A {
view: ViewWrap,
id: String,
onclick: OnClick<A>,
}
#[derive(Debug)]
pub struct Canvas {
view: ViewWrap,
id: String,
}
#[derive(Clone, Debug)]
pub struct H4 {
view: ViewWrap,
id: String,
}
#[derive(Clone, Debug)]
pub struct H5 {
view: ViewWrap,
id: String,
}
#[derive(Clone, Debug)]
pub struct Img {
view: ViewWrap,
id: String,
data: Option<Arc<Image>>,
}
#[derive(Clone, Debug)]
pub struct Li {
view: ViewWrap,
id: String,
}
#[derive(Clone, Debug)]
pub struct P {
view: ViewWrap,
id: String,
}
#[derive(Clone, Debug)]
pub struct Span {
view: ViewWrap,
id: String,
}
elm_impl!(A);
elm_impl!(Canvas);
elm_impl!(H4);
elm_impl!(H5);
elm_impl!(Img);
elm_impl!(Li);
elm_impl!(P);
elm_impl!(Span);
#[derive(Clone, Debug)]
pub struct Unknown {
view: ViewWrap,
id: String,
name: String,
}
impl<T> Wrap<T> where T: Element {
/// Wrap given element.
///
/// # Safety
/// Programmer must be sure this element has expected type.
pub unsafe fn new(element: Box<dyn Element>) -> Self {
Wrap {
element,
_p: Default::default(),
}
}
}
impl<T> Deref for Wrap<T> where T: Element {
type Target = Box<T>;
fn deref(&self) -> &Box<T> {
let b = &self.element;
let ptr = b as *const Box<dyn Element> as *const Box<T>;
unsafe { &*ptr }
}
}
impl<T> DerefMut for Wrap<T> where T: Element {
fn deref_mut(&mut self) -> &mut Box<T> {
let b = &mut self.element;
let ptr = b as *mut Box<dyn Element> as *mut Box<T>;
unsafe { &mut *ptr }
}
}
impl Debug for Image {
fn fmt(&self, fmt: &mut Formatter) -> std::fmt::Result {
write!(fmt, "Image {{ base64: [char; ")?;
write!(fmt, "{}", self.base64.len())?;
write!(fmt, "], format: ")?;
write!(fmt, "{:?}", self.format)?;
write!(fmt, " }}")?;
Ok(())
}
}
impl From<&str> for TagName {
fn from(s: &str) -> Self {
use self::TagName::*;
match s.to_lowercase().as_str() {
"a" => A,
"canvas" => Canvas,
"h4" => H4,
"h5" => H5,
"img" => Img,
"li" => Li,
"p" => P,
"span" => Span,
_ => Unknown(String::from(s)),
}
}
}
impl TagName {
/// Create implementation of the tag by it's tag name.
pub fn new_impl(&self, view: ViewWrap, id: String) -> Box<dyn Element> {
match self {
TagName::A => {
let mut b = Box::new(A {
view,
id,
onclick: unsafe { OnClick::null() },
});
let onclick = unsafe { OnClick::new(&mut *b) };
b.onclick = onclick;
b
},
TagName::Canvas => {
Box::new(Canvas {
view,
id,
})
},
TagName::H4 => Box::new(
H4 {
view,
id,
}
),
TagName::H5 => Box::new(
H4 {
view,
id,
}
),
TagName::Img => Box::new(
Img {
view,
id,
data: None,
}
),
TagName::Li => Box::new (
Li {
view,
id,
}
),
TagName::P => Box::new(P { view, id }),
TagName::Span => Box::new(Span { view, id }),
TagName::Unknown(name) => Box::new(Unknown {
view,
id,
name: name.clone(),
}),
}
}
/// Try creating TagName from this node.
pub fn try_from_node(node: &Node) -> Option<Self> {
let tag_name = node.tag_name();
if let Some(tag_name) = tag_name {
let tag_name = TagName::from(tag_name);
Some(tag_name)
} else {
None
}
}
/// Try creating implementation of the Element from this node.
///
/// # Failures
/// Node must contain ID of the element. It also is required to contain opening tag
/// which corresponds to element tag. If either of conditions is not met this function
/// will return None.
pub fn try_impl_from_node(node: &Node, view: ViewWrap) -> Option<Box<dyn Element>> {
let tag_name = Self::try_from_node(node);
if let Some(tag_name) = tag_name {
let id = node.attribute_by_name("id");
if let Some(id) = id {
Some(tag_name.new_impl(view, id.values_to_string()))
} else {
None
}
} else {
None
}
}
}
impl ImageFormat {
pub fn to_string(&self) -> String {
use ImageFormat::*;
match self {
Jpg => "jpg",
Png => "png",
}.to_string()
}
}
impl Image {
/// Encode given array of bytes in Base64 encoding.
pub fn base64(bin: Vec<u8>) -> String {
base64::encode(&bin)
}
/// Generate image struct from given array.
pub fn from_binary(bin: Vec<u8>, format: ImageFormat) -> Image {
Image {
base64: Self::base64(bin),
format,
}
}
/// Convert this image to string that can be supplied to'src' attribute of <img> tag.
pub fn to_img_string(&self) -> String {
format!("data:image/{};base64,{}", self.format.to_string(), self.base64)
}
}
impl A {
pub fn href(&self) -> String {
if let Some(s) = self.attribute("href") {
s
} else {
String::new()
}
}
pub fn set_href<T: AsRef<str>>(&mut self, href: T) {
self.set_attribute("href", href.as_ref())
}
pub fn onclick(&self) -> &OnClick<A> {
&self.onclick
}
pub fn onclick_mut(&mut self) -> &mut OnClick<A> {
&mut self.onclick
}
}
impl ImageContent for Img {
fn set_image(&mut self, img: Arc<Image>) {
self.data = Some(img);
self.set_attribute("src", &self.data.as_ref().unwrap().to_img_string());
}
fn image(&self) -> Option<&Arc<Image>> {
self.data.as_ref()
}
fn remove_image(&mut self) -> Option<Arc<Image>> {
let mut img: Option<Arc<Image>> = None;
std::mem::swap(&mut img, &mut self.data);
img
}
}
impl TextContent for A {}
impl TextContent for H4 {}
impl TextContent for H5 {}
impl TextContent for Li {}
impl TextContent for P {}
impl TextContent for Span {}
impl Element for Unknown {
fn tag_name(&self) -> TagName {
TagName::Unknown(self.id.clone())
}
fn id(&self) -> &String {
&self.id
}
fn view(&self) -> &ViewWrap {
&self.view
}
} | random_line_split |
|
tags.rs | use crate::{ResponseValue, ViewWrap};
use std::fmt::Debug;
use htmldom_read::{Node};
use crate::events::OnClick;
use std::marker::PhantomData;
use std::ops::{Deref, DerefMut};
use std::fmt::Formatter;
use std::sync::Arc;
/// The functions that allow to load images concurrently.
pub mod image_loader {
use std::sync::Arc;
use crate::tags::Image;
use crate::tags::ImageFormat;
use std::collections::LinkedList;
/// Load all images from binary format from the iterator. This function is concurrent.
/// It will create multiple threads to process images in parallel. Returned value contains
/// handles to all images in the order they appeared in the iterator.
pub fn load_all(iter: &mut Iterator<Item = (Vec<u8>, ImageFormat)>) -> Vec<Arc<Image>> {
use std::sync::mpsc;
use std::thread;
// Start loading images async.
let recvs = {
let mut list = LinkedList::new();
for (arr, format) in iter {
let (tx, rx) = mpsc::channel();
list.push_back(rx);
thread::spawn(move || {
let img = Image::from_binary(arr, format);
tx.send(img).unwrap();
});
}
list
};
// Collect results.
let mut vec = Vec::with_capacity(recvs.len());
for rx in recvs {
let image = rx.recv().unwrap();
let arc = Arc::new(image);
vec.push(arc);
}
vec
}
/// Load one image into Arc.
pub fn load(bin: Vec<u8>, format: ImageFormat) -> Arc<Image> {
let img = Image::from_binary(bin, format);
Arc::new(img)
}
}
#[derive(Clone, Debug)]
pub enum TagName {
A,
Canvas,
H4,
H5,
Img,
Li,
P,
Span,
Unknown(String)
}
/// Supported canvas image formats.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ImageFormat {
Png,
Jpg,
}
/// Element in the HTML DOM that can be accessed by Rust interface.
pub trait Element: Debug {
/// Tag name of the element.
fn tag_name(&self) -> TagName;
/// HTML content of this element if it still exists.
fn dom_html(&mut self) -> Option<String> {
let req = self.view_mut().new_request();
let js = format!("\
var inner = document.getElementById('{}').outerHTML;\
window.external.invoke(JSON.stringify({{\
incmd: 'attribute',
request: {},\
value: inner\
}}));
", self.id(), req.id());
let rx = req.run(js);
let response = rx.recv();
if let Err(_) = response {
return None; // likely because Null element was accessed.
}
let response = response.unwrap();
if let ResponseValue::Str(s) = response {
if s.is_empty() {
None
} else {
Some(s)
}
} else {
// Inner HTML request cannot return any other response type.
unreachable!();
}
}
/// Get attribute value of the element if any. Even if attribute is present but is empty
/// None is returned.
fn attribute(&self, name: &str) -> Option<String> {
// Unsafe because we take immutable variable `self` as mutable.
let request = unsafe {
let this = &mut *(self as *const Self as *mut Self);
this.view_mut().new_request()
};
let id = request.id();
let js = format!("\
var attr = document.getElementById('{}').getAttribute('{}');\
attr = attr == null? '' : attr;\
window.external.invoke(JSON.stringify({{\
incmd: 'attribute',\
request: {},\
value: attr\
}}));\
", self.id(), name, id);
let receiver = request.run(js);
let attr = receiver.recv().unwrap();
if let ResponseValue::Str(s) = attr {
if s == "" {
None
} else {
Some(s)
}
} else {
unreachable!()
}
}
/// Set attribute with given name to given value.
fn set_attribute(&mut self, name: &str, value: &str) {
let id = self.id().to_owned();
self.view_mut().eval(
format!(
"document.getElementById('{}').setAttribute('{}', '{}');",
id, name, crate::js_prefix_quotes(value)
)
);
}
/// Append given text to innerHTML field.
fn append_inner_html(&mut self, html: &str) {
let id = self.id().to_owned();
self.view_mut().eval(
format!(
"document.getElementById('{}').innerHTML += '{}';",
id, crate::js_prefix_quotes(html)
)
);
}
/// Clears the outerHTML of the element to remove it from HTML completely.
fn remove_from_html(&mut self) {
let id = self.id().to_owned();
self.view_mut().eval(
format!(
"document.getElementById('{}').outerHTML = '';",
id
)
);
}
/// Element ID.
fn id(&self) -> &String;
/// Change element ID.
fn set_id(&mut self, new_id: &str) {
self.set_attribute("id", new_id)
}
fn view(&self) -> &ViewWrap;
fn view_mut(&mut self) -> &mut ViewWrap {
let p = self.view() as *const ViewWrap as *mut ViewWrap;
unsafe { &mut *p }
}
/// Check whether this element still exists.
/// Actions on non-existing elements have no effect.
fn exists(&mut self) -> bool {
self.dom_html().is_some()
}
fn add_class(&mut self, class: &str) {
let attr = self.attribute("class");
let mut attr = if let Some(s) = attr {
s
} else {
String::with_capacity(class.len())
};
attr.push(' ');
attr.push_str(class);
self.set_attribute("class", &attr);
}
fn remove_class(&mut self, class: &str) {
let attr = self.attribute("class");
if attr.is_none() {
self.set_attribute("class", class);
return;
}
let attr = attr.unwrap();
let split = attr.split_whitespace();
let mut new_str = String::with_capacity(attr.len());
for val in split {
if val!= class {
new_str.push_str(val);
}
}
self.set_attribute("class", &new_str);
}
fn has_class(&self, class: &str) -> bool {
let attr = self.attribute("class");
if attr.is_none() {
return false;
}
let attr = attr.unwrap();
let split = attr.split_whitespace();
for s in split {
if s == class {
return true;
}
}
false
}
}
/// Text content can be set to some text value and read this content back.
pub trait TextContent: Element {
/// Get text contained by this element.
fn text(&self) -> String {
if let Some(s) = self.attribute("textContent") {
s
} else {
String::new()
}
}
fn set_text<T: AsRef<str>>(&mut self, text: T) {
self.set_attribute("textContent", text.as_ref())
}
}
pub trait ImageContent: Element {
/// Set image data to this element.
fn set_image(&mut self, img: Arc<Image>);
/// Get image data of this element.
fn image(&self) -> Option<&Arc<Image>>;
/// Remove any supplied image data.
fn remove_image(&mut self) -> Option<Arc<Image>>;
}
macro_rules! elm_impl {
($name: ident) => {
impl Element for $name {
fn view(&self) -> &ViewWrap {
&self.view
}
fn id(&self) -> &String {
&self.id
}
fn tag_name(&self) -> TagName {
TagName::$name
}
}
}
}
/// Wrap that gives access to the dynamic element which is known to be of given type.
#[derive(Debug)]
pub struct Wrap<T: Element> {
element: Box<dyn Element>,
_p: PhantomData<T>,
}
/// Image data of canvas.
#[derive(Clone)]
pub struct Image {
base64: String,
format: ImageFormat,
}
#[derive(Debug)]
pub struct A {
view: ViewWrap,
id: String,
onclick: OnClick<A>,
}
#[derive(Debug)]
pub struct Canvas {
view: ViewWrap,
id: String,
}
#[derive(Clone, Debug)]
pub struct H4 {
view: ViewWrap,
id: String,
}
#[derive(Clone, Debug)]
pub struct H5 {
view: ViewWrap,
id: String,
}
#[derive(Clone, Debug)]
pub struct Img {
view: ViewWrap,
id: String,
data: Option<Arc<Image>>,
}
#[derive(Clone, Debug)]
pub struct Li {
view: ViewWrap,
id: String,
}
#[derive(Clone, Debug)]
pub struct P {
view: ViewWrap,
id: String,
}
#[derive(Clone, Debug)]
pub struct Span {
view: ViewWrap,
id: String,
}
elm_impl!(A);
elm_impl!(Canvas);
elm_impl!(H4);
elm_impl!(H5);
elm_impl!(Img);
elm_impl!(Li);
elm_impl!(P);
elm_impl!(Span);
#[derive(Clone, Debug)]
pub struct Unknown {
view: ViewWrap,
id: String,
name: String,
}
impl<T> Wrap<T> where T: Element {
/// Wrap given element.
///
/// # Safety
/// Programmer must be sure this element has expected type.
pub unsafe fn new(element: Box<dyn Element>) -> Self {
Wrap {
element,
_p: Default::default(),
}
}
}
impl<T> Deref for Wrap<T> where T: Element {
type Target = Box<T>;
fn deref(&self) -> &Box<T> {
let b = &self.element;
let ptr = b as *const Box<dyn Element> as *const Box<T>;
unsafe { &*ptr }
}
}
impl<T> DerefMut for Wrap<T> where T: Element {
fn deref_mut(&mut self) -> &mut Box<T> {
let b = &mut self.element;
let ptr = b as *mut Box<dyn Element> as *mut Box<T>;
unsafe { &mut *ptr }
}
}
impl Debug for Image {
fn fmt(&self, fmt: &mut Formatter) -> std::fmt::Result {
write!(fmt, "Image {{ base64: [char; ")?;
write!(fmt, "{}", self.base64.len())?;
write!(fmt, "], format: ")?;
write!(fmt, "{:?}", self.format)?;
write!(fmt, " }}")?;
Ok(())
}
}
impl From<&str> for TagName {
fn from(s: &str) -> Self {
use self::TagName::*;
match s.to_lowercase().as_str() {
"a" => A,
"canvas" => Canvas,
"h4" => H4,
"h5" => H5,
"img" => Img,
"li" => Li,
"p" => P,
"span" => Span,
_ => Unknown(String::from(s)),
}
}
}
impl TagName {
/// Create implementation of the tag by it's tag name.
pub fn new_impl(&self, view: ViewWrap, id: String) -> Box<dyn Element> {
match self {
TagName::A => {
let mut b = Box::new(A {
view,
id,
onclick: unsafe { OnClick::null() },
});
let onclick = unsafe { OnClick::new(&mut *b) };
b.onclick = onclick;
b
},
TagName::Canvas => {
Box::new(Canvas {
view,
id,
})
},
TagName::H4 => Box::new(
H4 {
view,
id,
}
),
TagName::H5 => Box::new(
H4 {
view,
id,
}
),
TagName::Img => Box::new(
Img {
view,
id,
data: None,
}
),
TagName::Li => Box::new (
Li {
view,
id,
}
),
TagName::P => Box::new(P { view, id }),
TagName::Span => Box::new(Span { view, id }),
TagName::Unknown(name) => Box::new(Unknown {
view,
id,
name: name.clone(),
}),
}
}
/// Try creating TagName from this node.
pub fn | (node: &Node) -> Option<Self> {
let tag_name = node.tag_name();
if let Some(tag_name) = tag_name {
let tag_name = TagName::from(tag_name);
Some(tag_name)
} else {
None
}
}
/// Try creating implementation of the Element from this node.
///
/// # Failures
/// Node must contain ID of the element. It also is required to contain opening tag
/// which corresponds to element tag. If either of conditions is not met this function
/// will return None.
pub fn try_impl_from_node(node: &Node, view: ViewWrap) -> Option<Box<dyn Element>> {
let tag_name = Self::try_from_node(node);
if let Some(tag_name) = tag_name {
let id = node.attribute_by_name("id");
if let Some(id) = id {
Some(tag_name.new_impl(view, id.values_to_string()))
} else {
None
}
} else {
None
}
}
}
impl ImageFormat {
pub fn to_string(&self) -> String {
use ImageFormat::*;
match self {
Jpg => "jpg",
Png => "png",
}.to_string()
}
}
impl Image {
/// Encode given array of bytes in Base64 encoding.
pub fn base64(bin: Vec<u8>) -> String {
base64::encode(&bin)
}
/// Generate image struct from given array.
pub fn from_binary(bin: Vec<u8>, format: ImageFormat) -> Image {
Image {
base64: Self::base64(bin),
format,
}
}
/// Convert this image to string that can be supplied to'src' attribute of <img> tag.
pub fn to_img_string(&self) -> String {
format!("data:image/{};base64,{}", self.format.to_string(), self.base64)
}
}
impl A {
pub fn href(&self) -> String {
if let Some(s) = self.attribute("href") {
s
} else {
String::new()
}
}
pub fn set_href<T: AsRef<str>>(&mut self, href: T) {
self.set_attribute("href", href.as_ref())
}
pub fn onclick(&self) -> &OnClick<A> {
&self.onclick
}
pub fn onclick_mut(&mut self) -> &mut OnClick<A> {
&mut self.onclick
}
}
impl ImageContent for Img {
fn set_image(&mut self, img: Arc<Image>) {
self.data = Some(img);
self.set_attribute("src", &self.data.as_ref().unwrap().to_img_string());
}
fn image(&self) -> Option<&Arc<Image>> {
self.data.as_ref()
}
fn remove_image(&mut self) -> Option<Arc<Image>> {
let mut img: Option<Arc<Image>> = None;
std::mem::swap(&mut img, &mut self.data);
img
}
}
impl TextContent for A {}
impl TextContent for H4 {}
impl TextContent for H5 {}
impl TextContent for Li {}
impl TextContent for P {}
impl TextContent for Span {}
impl Element for Unknown {
fn tag_name(&self) -> TagName {
TagName::Unknown(self.id.clone())
}
fn id(&self) -> &String {
&self.id
}
fn view(&self) -> &ViewWrap {
&self.view
}
}
| try_from_node | identifier_name |
destination.rs | use super::pandas_columns::{
BooleanBlock, BytesBlock, DateTimeBlock, Float64Block, HasPandasColumn, Int64Block,
PandasColumn, PandasColumnObject, StringBlock,
};
use super::types::{PandasDType, PandasTypeSystem};
use anyhow::anyhow;
use connectorx::{
ConnectorAgentError, Consume, DataOrder, Destination, DestinationPartition, Result, TypeAssoc,
TypeSystem,
};
use fehler::{throw, throws};
use itertools::Itertools;
use log::debug;
use pyo3::{
types::{PyDict, PyList},
FromPyObject, PyAny, Python,
};
use std::collections::HashMap;
use std::mem::transmute;
pub struct PandasDestination<'py> {
py: Python<'py>,
nrows: Option<usize>,
schema: Option<Vec<PandasTypeSystem>>,
buffers: Option<&'py PyList>,
buffer_column_index: Option<Vec<Vec<usize>>>,
dataframe: Option<&'py PyAny>, // Using this field other than the return purpose should be careful: this refers to the same data as buffers
}
impl<'a> PandasDestination<'a> {
pub fn new(py: Python<'a>) -> Self {
PandasDestination {
py,
nrows: None,
schema: None,
buffers: None,
buffer_column_index: None,
dataframe: None,
}
}
pub fn result(self) -> Option<&'a PyAny> {
self.dataframe
}
}
impl<'a> Destination for PandasDestination<'a> {
const DATA_ORDERS: &'static [DataOrder] = &[DataOrder::RowMajor];
type TypeSystem = PandasTypeSystem;
type Partition<'b> = PandasPartitionDestination<'b>;
#[throws(ConnectorAgentError)]
fn allocate<S: AsRef<str>>(
&mut self,
nrows: usize,
names: &[S],
schema: &[PandasTypeSystem],
data_order: DataOrder,
) {
if!matches!(data_order, DataOrder::RowMajor) {
throw!(ConnectorAgentError::UnsupportedDataOrder(data_order))
}
if matches!(self.nrows, Some(_)) {
throw!(ConnectorAgentError::DuplicatedAllocation);
}
let (df, buffers, index) = create_dataframe(self.py, names, schema, nrows)?;
debug!("DataFrame created");
// get index for each column: (index of block, index of column within the block)
let mut column_buffer_index: Vec<(usize, usize)> = Vec::with_capacity(index.len());
index.iter().try_for_each(|tuple| -> Result<()> {
column_buffer_index.push(tuple.extract().map_err(|e| {
anyhow!("cannot extract index tuple for `column_buffer_index` {}", e)
})?);
Ok(())
})?;
let nbuffers = buffers.len();
// buffer_column_index[i][j] = the column id of the j-th row (pandas buffer stores columns row-wise) in the i-th buffer.
let mut buffer_column_index = vec![vec![]; nbuffers];
let mut column_buffer_index_cid: Vec<_> = column_buffer_index.iter().enumerate().collect();
column_buffer_index_cid.sort_by_key(|(_, blk)| *blk);
for (cid, &(blkno, _)) in column_buffer_index_cid {
buffer_column_index[blkno].push(cid);
}
self.nrows = Some(nrows);
self.schema = Some(schema.to_vec());
self.buffers = Some(buffers);
self.buffer_column_index = Some(buffer_column_index);
self.dataframe = Some(df);
}
#[throws(ConnectorAgentError)]
fn partition(&mut self, counts: &[usize]) -> Vec<Self::Partition<'_>> {
assert_eq!(
counts.iter().sum::<usize>(),
self.nrows
.ok_or_else(|| ConnectorAgentError::DestinationNotAllocated)?,
"counts: {}!= nrows: {:?}",
counts.iter().sum::<usize>(),
self.nrows
);
let buffers = self.buffers.ok_or_else(|| anyhow!("got None buffers"))?;
let schema = self
.schema
.as_ref()
.ok_or_else(|| anyhow!("got None schema"))?;
let buffer_column_index = self
.buffer_column_index
.as_ref()
.ok_or_else(|| anyhow!("got None buffer_column_index"))?;
let mut partitioned_columns: Vec<Vec<Box<dyn PandasColumnObject>>> =
(0..schema.len()).map(|_| vec![]).collect();
for (buf, cids) in buffers.iter().zip_eq(buffer_column_index) {
for &cid in cids {
match schema[cid] {
PandasTypeSystem::F64(_) => {
let fblock = Float64Block::extract(buf).map_err(|e| anyhow!(e))?;
let fcols = fblock.split()?;
for (&cid, fcol) in cids.iter().zip_eq(fcols) {
partitioned_columns[cid] = fcol
.partition(&counts)
.into_iter()
.map(|c| Box::new(c) as _)
.collect()
}
}
PandasTypeSystem::I64(_) => {
let ublock = Int64Block::extract(buf).map_err(|e| anyhow!(e))?;
let ucols = ublock.split()?;
for (&cid, ucol) in cids.iter().zip_eq(ucols) {
partitioned_columns[cid] = ucol
.partition(&counts)
.into_iter()
.map(|c| Box::new(c) as _)
.collect()
}
}
PandasTypeSystem::Bool(_) => {
let bblock = BooleanBlock::extract(buf).map_err(|e| anyhow!(e))?;
let bcols = bblock.split()?;
for (&cid, bcol) in cids.iter().zip_eq(bcols) {
partitioned_columns[cid] = bcol
.partition(&counts)
.into_iter()
.map(|c| Box::new(c) as _)
.collect()
}
}
PandasTypeSystem::String(_)
| PandasTypeSystem::BoxStr(_)
| PandasTypeSystem::Str(_)
| PandasTypeSystem::Char(_) => {
let block = StringBlock::extract(buf).map_err(|e| anyhow!(e))?;
let cols = block.split()?;
for (&cid, col) in cids.iter().zip_eq(cols) {
partitioned_columns[cid] = col
.partition(&counts)
.into_iter()
.map(|c| Box::new(c) as _)
.collect()
}
}
PandasTypeSystem::Bytes(_) => {
let block = BytesBlock::extract(buf).map_err(|e| anyhow!(e))?;
let cols = block.split()?;
for (&cid, col) in cids.iter().zip_eq(cols) {
partitioned_columns[cid] = col
.partition(&counts)
.into_iter()
.map(|c| Box::new(c) as _)
.collect()
}
}
PandasTypeSystem::DateTime(_) => {
let block = DateTimeBlock::extract(buf).map_err(|e| anyhow!(e))?;
let cols = block.split()?;
for (&cid, col) in cids.iter().zip_eq(cols) {
partitioned_columns[cid] = col
.partition(&counts)
.into_iter()
.map(|c| Box::new(c) as _)
.collect()
}
}
}
}
}
let mut par_destinations = vec![];
for &c in counts.into_iter().rev() {
let mut columns = Vec::with_capacity(partitioned_columns.len());
for (i, partitions) in partitioned_columns.iter_mut().enumerate() {
columns.push(
partitions
.pop()
.ok_or_else(|| anyhow!("empty partition for {}th column", i))?,
);
}
par_destinations.push(PandasPartitionDestination::new(c, columns, schema));
}
// We need to reverse the par_destinations because partitions are poped reversely
par_destinations.into_iter().rev().collect()
}
fn schema(&self) -> &[PandasTypeSystem] {
static EMPTY_SCHEMA: Vec<PandasTypeSystem> = vec![];
self.schema.as_ref().unwrap_or(EMPTY_SCHEMA.as_ref())
}
}
pub struct PandasPartitionDestination<'a> {
nrows: usize,
columns: Vec<Box<dyn PandasColumnObject + 'a>>,
schema: &'a [PandasTypeSystem],
seq: usize,
}
impl<'a> PandasPartitionDestination<'a> {
fn new(
nrows: usize,
columns: Vec<Box<dyn PandasColumnObject + 'a>>,
schema: &'a [PandasTypeSystem],
) -> Self {
Self {
nrows,
columns,
schema,
seq: 0,
}
}
fn loc(&mut self) -> (usize, usize) {
let (row, col) = (self.seq / self.ncols(), self.seq % self.ncols());
self.seq += 1;
(row, col)
}
}
impl<'a> DestinationPartition<'a> for PandasPartitionDestination<'a> {
type TypeSystem = PandasTypeSystem;
fn nrows(&self) -> usize {
self.nrows
}
fn ncols(&self) -> usize {
self.schema.len()
}
fn finalize(&mut self) -> Result<()> {
for col in &mut self.columns {
col.finalize()?;
}
Ok(())
}
}
impl<'a, T> Consume<T> for PandasPartitionDestination<'a>
where
T: HasPandasColumn + TypeAssoc<PandasTypeSystem> + std::fmt::Debug,
{
fn consume(&mut self, value: T) -> Result<()> {
let (_, col) = self.loc();
self.schema[col].check::<T>()?;
// How do we check type id for borrowed types?
// assert!(self.columns[col].typecheck(TypeId::of::<T>()));
let (column, _): (&mut T::PandasColumn<'a>, *const ()) =
unsafe { transmute(&*self.columns[col]) };
column.write(value)
}
}
/// call python code to construct the dataframe and expose its buffers
#[throws(ConnectorAgentError)]
fn create_dataframe<'a, S: AsRef<str>>(
py: Python<'a>,
names: &[S],
schema: &[PandasTypeSystem],
nrows: usize,
) -> (&'a PyAny, &'a PyList, &'a PyList) {
let names: Vec<_> = names.into_iter().map(|s| s.as_ref()).collect();
debug!("names: {:?}", names);
debug!("schema: {:?}", schema);
let mut schema_dict: HashMap<PandasTypeSystem, Vec<usize>> = HashMap::new();
schema.iter().enumerate().for_each(|(idx, &dt)| {
let indices = schema_dict.entry(dt).or_insert(vec![]);
indices.push(idx);
});
debug!("schema_dict: {:?}", schema_dict);
let mut blocks_code = vec![];
schema_dict
.iter()
.for_each(|(&dt, indices)| {
if dt.is_extension() {
// each extension block only contains one column
for idx in indices {
blocks_code.push(format!(
"pd.core.internals.ExtensionBlock(pd.array(np.empty([{}], dtype='{}'), dtype='{}'), placement={}, ndim=2)",
nrows,
dt.npdtype(),
dt.dtype(),
idx,
));
}
} else {
blocks_code.push(format!(
"pd.core.internals.{}(np.empty([{}, {}], dtype='{}'), placement={:?}, ndim=2)",
dt.block_name(),
indices.len(),
nrows,
dt.npdtype(),
indices,
));
}
});
// https://github.com/pandas-dev/pandas/blob/master/pandas/core/internals/managers.py
// Suppose we want to find the array corresponding to our i'th column. |
let code = format!(
r#"import pandas as pd
import numpy as np
blocks = [{}]
block_manager = pd.core.internals.BlockManager(
blocks, [pd.Index(['{}']), pd.RangeIndex(start=0, stop={}, step=1)])
df = pd.DataFrame(block_manager)
blocks = [b.values for b in df._mgr.blocks]
index = [(i, j) for i, j in zip(df._mgr.blknos, df._mgr.blklocs)]"#,
blocks_code.join(","),
format!("{}", names.join("\',\'")),
nrows,
);
debug!("create dataframe code: {}", code);
// run python code
let locals = PyDict::new(py);
py.run(code.as_str(), None, Some(locals))
.map_err(|e| anyhow!(e))?;
// get # of blocks in dataframe
let buffers: &PyList = locals
.get_item("blocks")
.ok_or_else(|| anyhow!("cannot get `blocks` from locals"))?
.downcast::<PyList>()
.map_err(|e| anyhow!("cannot downcast `blocks` to PyList {}", e))?;
let index = locals
.get_item("index")
.ok_or_else(|| anyhow!("cannot get `index` from locals"))?
.downcast::<PyList>()
.map_err(|e| anyhow!("cannot downcast `index` to PyList {}", e))?;
let df = locals
.get_item("df")
.ok_or_else(|| anyhow!("cannot get `df` from locals"))?;
(df, buffers, index)
} | // blknos[i] identifies the block from self.blocks that contains this column.
// blklocs[i] identifies the column of interest within
// self.blocks[self.blknos[i]] | random_line_split |
destination.rs | use super::pandas_columns::{
BooleanBlock, BytesBlock, DateTimeBlock, Float64Block, HasPandasColumn, Int64Block,
PandasColumn, PandasColumnObject, StringBlock,
};
use super::types::{PandasDType, PandasTypeSystem};
use anyhow::anyhow;
use connectorx::{
ConnectorAgentError, Consume, DataOrder, Destination, DestinationPartition, Result, TypeAssoc,
TypeSystem,
};
use fehler::{throw, throws};
use itertools::Itertools;
use log::debug;
use pyo3::{
types::{PyDict, PyList},
FromPyObject, PyAny, Python,
};
use std::collections::HashMap;
use std::mem::transmute;
pub struct PandasDestination<'py> {
py: Python<'py>,
nrows: Option<usize>,
schema: Option<Vec<PandasTypeSystem>>,
buffers: Option<&'py PyList>,
buffer_column_index: Option<Vec<Vec<usize>>>,
dataframe: Option<&'py PyAny>, // Using this field other than the return purpose should be careful: this refers to the same data as buffers
}
impl<'a> PandasDestination<'a> {
pub fn new(py: Python<'a>) -> Self {
PandasDestination {
py,
nrows: None,
schema: None,
buffers: None,
buffer_column_index: None,
dataframe: None,
}
}
pub fn result(self) -> Option<&'a PyAny> {
self.dataframe
}
}
impl<'a> Destination for PandasDestination<'a> {
const DATA_ORDERS: &'static [DataOrder] = &[DataOrder::RowMajor];
type TypeSystem = PandasTypeSystem;
type Partition<'b> = PandasPartitionDestination<'b>;
#[throws(ConnectorAgentError)]
fn allocate<S: AsRef<str>>(
&mut self,
nrows: usize,
names: &[S],
schema: &[PandasTypeSystem],
data_order: DataOrder,
) {
if!matches!(data_order, DataOrder::RowMajor) {
throw!(ConnectorAgentError::UnsupportedDataOrder(data_order))
}
if matches!(self.nrows, Some(_)) {
throw!(ConnectorAgentError::DuplicatedAllocation);
}
let (df, buffers, index) = create_dataframe(self.py, names, schema, nrows)?;
debug!("DataFrame created");
// get index for each column: (index of block, index of column within the block)
let mut column_buffer_index: Vec<(usize, usize)> = Vec::with_capacity(index.len());
index.iter().try_for_each(|tuple| -> Result<()> {
column_buffer_index.push(tuple.extract().map_err(|e| {
anyhow!("cannot extract index tuple for `column_buffer_index` {}", e)
})?);
Ok(())
})?;
let nbuffers = buffers.len();
// buffer_column_index[i][j] = the column id of the j-th row (pandas buffer stores columns row-wise) in the i-th buffer.
let mut buffer_column_index = vec![vec![]; nbuffers];
let mut column_buffer_index_cid: Vec<_> = column_buffer_index.iter().enumerate().collect();
column_buffer_index_cid.sort_by_key(|(_, blk)| *blk);
for (cid, &(blkno, _)) in column_buffer_index_cid {
buffer_column_index[blkno].push(cid);
}
self.nrows = Some(nrows);
self.schema = Some(schema.to_vec());
self.buffers = Some(buffers);
self.buffer_column_index = Some(buffer_column_index);
self.dataframe = Some(df);
}
#[throws(ConnectorAgentError)]
fn partition(&mut self, counts: &[usize]) -> Vec<Self::Partition<'_>> {
assert_eq!(
counts.iter().sum::<usize>(),
self.nrows
.ok_or_else(|| ConnectorAgentError::DestinationNotAllocated)?,
"counts: {}!= nrows: {:?}",
counts.iter().sum::<usize>(),
self.nrows
);
let buffers = self.buffers.ok_or_else(|| anyhow!("got None buffers"))?;
let schema = self
.schema
.as_ref()
.ok_or_else(|| anyhow!("got None schema"))?;
let buffer_column_index = self
.buffer_column_index
.as_ref()
.ok_or_else(|| anyhow!("got None buffer_column_index"))?;
let mut partitioned_columns: Vec<Vec<Box<dyn PandasColumnObject>>> =
(0..schema.len()).map(|_| vec![]).collect();
for (buf, cids) in buffers.iter().zip_eq(buffer_column_index) {
for &cid in cids {
match schema[cid] {
PandasTypeSystem::F64(_) => {
let fblock = Float64Block::extract(buf).map_err(|e| anyhow!(e))?;
let fcols = fblock.split()?;
for (&cid, fcol) in cids.iter().zip_eq(fcols) {
partitioned_columns[cid] = fcol
.partition(&counts)
.into_iter()
.map(|c| Box::new(c) as _)
.collect()
}
}
PandasTypeSystem::I64(_) => {
let ublock = Int64Block::extract(buf).map_err(|e| anyhow!(e))?;
let ucols = ublock.split()?;
for (&cid, ucol) in cids.iter().zip_eq(ucols) {
partitioned_columns[cid] = ucol
.partition(&counts)
.into_iter()
.map(|c| Box::new(c) as _)
.collect()
}
}
PandasTypeSystem::Bool(_) => {
let bblock = BooleanBlock::extract(buf).map_err(|e| anyhow!(e))?;
let bcols = bblock.split()?;
for (&cid, bcol) in cids.iter().zip_eq(bcols) {
partitioned_columns[cid] = bcol
.partition(&counts)
.into_iter()
.map(|c| Box::new(c) as _)
.collect()
}
}
PandasTypeSystem::String(_)
| PandasTypeSystem::BoxStr(_)
| PandasTypeSystem::Str(_)
| PandasTypeSystem::Char(_) => {
let block = StringBlock::extract(buf).map_err(|e| anyhow!(e))?;
let cols = block.split()?;
for (&cid, col) in cids.iter().zip_eq(cols) {
partitioned_columns[cid] = col
.partition(&counts)
.into_iter()
.map(|c| Box::new(c) as _)
.collect()
}
}
PandasTypeSystem::Bytes(_) => {
let block = BytesBlock::extract(buf).map_err(|e| anyhow!(e))?;
let cols = block.split()?;
for (&cid, col) in cids.iter().zip_eq(cols) {
partitioned_columns[cid] = col
.partition(&counts)
.into_iter()
.map(|c| Box::new(c) as _)
.collect()
}
}
PandasTypeSystem::DateTime(_) => {
let block = DateTimeBlock::extract(buf).map_err(|e| anyhow!(e))?;
let cols = block.split()?;
for (&cid, col) in cids.iter().zip_eq(cols) {
partitioned_columns[cid] = col
.partition(&counts)
.into_iter()
.map(|c| Box::new(c) as _)
.collect()
}
}
}
}
}
let mut par_destinations = vec![];
for &c in counts.into_iter().rev() {
let mut columns = Vec::with_capacity(partitioned_columns.len());
for (i, partitions) in partitioned_columns.iter_mut().enumerate() {
columns.push(
partitions
.pop()
.ok_or_else(|| anyhow!("empty partition for {}th column", i))?,
);
}
par_destinations.push(PandasPartitionDestination::new(c, columns, schema));
}
// We need to reverse the par_destinations because partitions are poped reversely
par_destinations.into_iter().rev().collect()
}
fn | (&self) -> &[PandasTypeSystem] {
static EMPTY_SCHEMA: Vec<PandasTypeSystem> = vec![];
self.schema.as_ref().unwrap_or(EMPTY_SCHEMA.as_ref())
}
}
pub struct PandasPartitionDestination<'a> {
nrows: usize,
columns: Vec<Box<dyn PandasColumnObject + 'a>>,
schema: &'a [PandasTypeSystem],
seq: usize,
}
impl<'a> PandasPartitionDestination<'a> {
fn new(
nrows: usize,
columns: Vec<Box<dyn PandasColumnObject + 'a>>,
schema: &'a [PandasTypeSystem],
) -> Self {
Self {
nrows,
columns,
schema,
seq: 0,
}
}
fn loc(&mut self) -> (usize, usize) {
let (row, col) = (self.seq / self.ncols(), self.seq % self.ncols());
self.seq += 1;
(row, col)
}
}
impl<'a> DestinationPartition<'a> for PandasPartitionDestination<'a> {
type TypeSystem = PandasTypeSystem;
fn nrows(&self) -> usize {
self.nrows
}
fn ncols(&self) -> usize {
self.schema.len()
}
fn finalize(&mut self) -> Result<()> {
for col in &mut self.columns {
col.finalize()?;
}
Ok(())
}
}
impl<'a, T> Consume<T> for PandasPartitionDestination<'a>
where
T: HasPandasColumn + TypeAssoc<PandasTypeSystem> + std::fmt::Debug,
{
fn consume(&mut self, value: T) -> Result<()> {
let (_, col) = self.loc();
self.schema[col].check::<T>()?;
// How do we check type id for borrowed types?
// assert!(self.columns[col].typecheck(TypeId::of::<T>()));
let (column, _): (&mut T::PandasColumn<'a>, *const ()) =
unsafe { transmute(&*self.columns[col]) };
column.write(value)
}
}
/// call python code to construct the dataframe and expose its buffers
#[throws(ConnectorAgentError)]
fn create_dataframe<'a, S: AsRef<str>>(
py: Python<'a>,
names: &[S],
schema: &[PandasTypeSystem],
nrows: usize,
) -> (&'a PyAny, &'a PyList, &'a PyList) {
let names: Vec<_> = names.into_iter().map(|s| s.as_ref()).collect();
debug!("names: {:?}", names);
debug!("schema: {:?}", schema);
let mut schema_dict: HashMap<PandasTypeSystem, Vec<usize>> = HashMap::new();
schema.iter().enumerate().for_each(|(idx, &dt)| {
let indices = schema_dict.entry(dt).or_insert(vec![]);
indices.push(idx);
});
debug!("schema_dict: {:?}", schema_dict);
let mut blocks_code = vec![];
schema_dict
.iter()
.for_each(|(&dt, indices)| {
if dt.is_extension() {
// each extension block only contains one column
for idx in indices {
blocks_code.push(format!(
"pd.core.internals.ExtensionBlock(pd.array(np.empty([{}], dtype='{}'), dtype='{}'), placement={}, ndim=2)",
nrows,
dt.npdtype(),
dt.dtype(),
idx,
));
}
} else {
blocks_code.push(format!(
"pd.core.internals.{}(np.empty([{}, {}], dtype='{}'), placement={:?}, ndim=2)",
dt.block_name(),
indices.len(),
nrows,
dt.npdtype(),
indices,
));
}
});
// https://github.com/pandas-dev/pandas/blob/master/pandas/core/internals/managers.py
// Suppose we want to find the array corresponding to our i'th column.
// blknos[i] identifies the block from self.blocks that contains this column.
// blklocs[i] identifies the column of interest within
// self.blocks[self.blknos[i]]
let code = format!(
r#"import pandas as pd
import numpy as np
blocks = [{}]
block_manager = pd.core.internals.BlockManager(
blocks, [pd.Index(['{}']), pd.RangeIndex(start=0, stop={}, step=1)])
df = pd.DataFrame(block_manager)
blocks = [b.values for b in df._mgr.blocks]
index = [(i, j) for i, j in zip(df._mgr.blknos, df._mgr.blklocs)]"#,
blocks_code.join(","),
format!("{}", names.join("\',\'")),
nrows,
);
debug!("create dataframe code: {}", code);
// run python code
let locals = PyDict::new(py);
py.run(code.as_str(), None, Some(locals))
.map_err(|e| anyhow!(e))?;
// get # of blocks in dataframe
let buffers: &PyList = locals
.get_item("blocks")
.ok_or_else(|| anyhow!("cannot get `blocks` from locals"))?
.downcast::<PyList>()
.map_err(|e| anyhow!("cannot downcast `blocks` to PyList {}", e))?;
let index = locals
.get_item("index")
.ok_or_else(|| anyhow!("cannot get `index` from locals"))?
.downcast::<PyList>()
.map_err(|e| anyhow!("cannot downcast `index` to PyList {}", e))?;
let df = locals
.get_item("df")
.ok_or_else(|| anyhow!("cannot get `df` from locals"))?;
(df, buffers, index)
}
| schema | identifier_name |
destination.rs | use super::pandas_columns::{
BooleanBlock, BytesBlock, DateTimeBlock, Float64Block, HasPandasColumn, Int64Block,
PandasColumn, PandasColumnObject, StringBlock,
};
use super::types::{PandasDType, PandasTypeSystem};
use anyhow::anyhow;
use connectorx::{
ConnectorAgentError, Consume, DataOrder, Destination, DestinationPartition, Result, TypeAssoc,
TypeSystem,
};
use fehler::{throw, throws};
use itertools::Itertools;
use log::debug;
use pyo3::{
types::{PyDict, PyList},
FromPyObject, PyAny, Python,
};
use std::collections::HashMap;
use std::mem::transmute;
pub struct PandasDestination<'py> {
py: Python<'py>,
nrows: Option<usize>,
schema: Option<Vec<PandasTypeSystem>>,
buffers: Option<&'py PyList>,
buffer_column_index: Option<Vec<Vec<usize>>>,
dataframe: Option<&'py PyAny>, // Using this field other than the return purpose should be careful: this refers to the same data as buffers
}
impl<'a> PandasDestination<'a> {
pub fn new(py: Python<'a>) -> Self {
PandasDestination {
py,
nrows: None,
schema: None,
buffers: None,
buffer_column_index: None,
dataframe: None,
}
}
pub fn result(self) -> Option<&'a PyAny> {
self.dataframe
}
}
impl<'a> Destination for PandasDestination<'a> {
const DATA_ORDERS: &'static [DataOrder] = &[DataOrder::RowMajor];
type TypeSystem = PandasTypeSystem;
type Partition<'b> = PandasPartitionDestination<'b>;
#[throws(ConnectorAgentError)]
fn allocate<S: AsRef<str>>(
&mut self,
nrows: usize,
names: &[S],
schema: &[PandasTypeSystem],
data_order: DataOrder,
) {
if!matches!(data_order, DataOrder::RowMajor) {
throw!(ConnectorAgentError::UnsupportedDataOrder(data_order))
}
if matches!(self.nrows, Some(_)) {
throw!(ConnectorAgentError::DuplicatedAllocation);
}
let (df, buffers, index) = create_dataframe(self.py, names, schema, nrows)?;
debug!("DataFrame created");
// get index for each column: (index of block, index of column within the block)
let mut column_buffer_index: Vec<(usize, usize)> = Vec::with_capacity(index.len());
index.iter().try_for_each(|tuple| -> Result<()> {
column_buffer_index.push(tuple.extract().map_err(|e| {
anyhow!("cannot extract index tuple for `column_buffer_index` {}", e)
})?);
Ok(())
})?;
let nbuffers = buffers.len();
// buffer_column_index[i][j] = the column id of the j-th row (pandas buffer stores columns row-wise) in the i-th buffer.
let mut buffer_column_index = vec![vec![]; nbuffers];
let mut column_buffer_index_cid: Vec<_> = column_buffer_index.iter().enumerate().collect();
column_buffer_index_cid.sort_by_key(|(_, blk)| *blk);
for (cid, &(blkno, _)) in column_buffer_index_cid {
buffer_column_index[blkno].push(cid);
}
self.nrows = Some(nrows);
self.schema = Some(schema.to_vec());
self.buffers = Some(buffers);
self.buffer_column_index = Some(buffer_column_index);
self.dataframe = Some(df);
}
#[throws(ConnectorAgentError)]
fn partition(&mut self, counts: &[usize]) -> Vec<Self::Partition<'_>> {
assert_eq!(
counts.iter().sum::<usize>(),
self.nrows
.ok_or_else(|| ConnectorAgentError::DestinationNotAllocated)?,
"counts: {}!= nrows: {:?}",
counts.iter().sum::<usize>(),
self.nrows
);
let buffers = self.buffers.ok_or_else(|| anyhow!("got None buffers"))?;
let schema = self
.schema
.as_ref()
.ok_or_else(|| anyhow!("got None schema"))?;
let buffer_column_index = self
.buffer_column_index
.as_ref()
.ok_or_else(|| anyhow!("got None buffer_column_index"))?;
let mut partitioned_columns: Vec<Vec<Box<dyn PandasColumnObject>>> =
(0..schema.len()).map(|_| vec![]).collect();
for (buf, cids) in buffers.iter().zip_eq(buffer_column_index) {
for &cid in cids {
match schema[cid] {
PandasTypeSystem::F64(_) => {
let fblock = Float64Block::extract(buf).map_err(|e| anyhow!(e))?;
let fcols = fblock.split()?;
for (&cid, fcol) in cids.iter().zip_eq(fcols) {
partitioned_columns[cid] = fcol
.partition(&counts)
.into_iter()
.map(|c| Box::new(c) as _)
.collect()
}
}
PandasTypeSystem::I64(_) => {
let ublock = Int64Block::extract(buf).map_err(|e| anyhow!(e))?;
let ucols = ublock.split()?;
for (&cid, ucol) in cids.iter().zip_eq(ucols) {
partitioned_columns[cid] = ucol
.partition(&counts)
.into_iter()
.map(|c| Box::new(c) as _)
.collect()
}
}
PandasTypeSystem::Bool(_) => {
let bblock = BooleanBlock::extract(buf).map_err(|e| anyhow!(e))?;
let bcols = bblock.split()?;
for (&cid, bcol) in cids.iter().zip_eq(bcols) {
partitioned_columns[cid] = bcol
.partition(&counts)
.into_iter()
.map(|c| Box::new(c) as _)
.collect()
}
}
PandasTypeSystem::String(_)
| PandasTypeSystem::BoxStr(_)
| PandasTypeSystem::Str(_)
| PandasTypeSystem::Char(_) => {
let block = StringBlock::extract(buf).map_err(|e| anyhow!(e))?;
let cols = block.split()?;
for (&cid, col) in cids.iter().zip_eq(cols) {
partitioned_columns[cid] = col
.partition(&counts)
.into_iter()
.map(|c| Box::new(c) as _)
.collect()
}
}
PandasTypeSystem::Bytes(_) => {
let block = BytesBlock::extract(buf).map_err(|e| anyhow!(e))?;
let cols = block.split()?;
for (&cid, col) in cids.iter().zip_eq(cols) {
partitioned_columns[cid] = col
.partition(&counts)
.into_iter()
.map(|c| Box::new(c) as _)
.collect()
}
}
PandasTypeSystem::DateTime(_) => {
let block = DateTimeBlock::extract(buf).map_err(|e| anyhow!(e))?;
let cols = block.split()?;
for (&cid, col) in cids.iter().zip_eq(cols) {
partitioned_columns[cid] = col
.partition(&counts)
.into_iter()
.map(|c| Box::new(c) as _)
.collect()
}
}
}
}
}
let mut par_destinations = vec![];
for &c in counts.into_iter().rev() {
let mut columns = Vec::with_capacity(partitioned_columns.len());
for (i, partitions) in partitioned_columns.iter_mut().enumerate() {
columns.push(
partitions
.pop()
.ok_or_else(|| anyhow!("empty partition for {}th column", i))?,
);
}
par_destinations.push(PandasPartitionDestination::new(c, columns, schema));
}
// We need to reverse the par_destinations because partitions are poped reversely
par_destinations.into_iter().rev().collect()
}
fn schema(&self) -> &[PandasTypeSystem] |
}
pub struct PandasPartitionDestination<'a> {
nrows: usize,
columns: Vec<Box<dyn PandasColumnObject + 'a>>,
schema: &'a [PandasTypeSystem],
seq: usize,
}
impl<'a> PandasPartitionDestination<'a> {
fn new(
nrows: usize,
columns: Vec<Box<dyn PandasColumnObject + 'a>>,
schema: &'a [PandasTypeSystem],
) -> Self {
Self {
nrows,
columns,
schema,
seq: 0,
}
}
fn loc(&mut self) -> (usize, usize) {
let (row, col) = (self.seq / self.ncols(), self.seq % self.ncols());
self.seq += 1;
(row, col)
}
}
impl<'a> DestinationPartition<'a> for PandasPartitionDestination<'a> {
type TypeSystem = PandasTypeSystem;
fn nrows(&self) -> usize {
self.nrows
}
fn ncols(&self) -> usize {
self.schema.len()
}
fn finalize(&mut self) -> Result<()> {
for col in &mut self.columns {
col.finalize()?;
}
Ok(())
}
}
impl<'a, T> Consume<T> for PandasPartitionDestination<'a>
where
T: HasPandasColumn + TypeAssoc<PandasTypeSystem> + std::fmt::Debug,
{
fn consume(&mut self, value: T) -> Result<()> {
let (_, col) = self.loc();
self.schema[col].check::<T>()?;
// How do we check type id for borrowed types?
// assert!(self.columns[col].typecheck(TypeId::of::<T>()));
let (column, _): (&mut T::PandasColumn<'a>, *const ()) =
unsafe { transmute(&*self.columns[col]) };
column.write(value)
}
}
/// call python code to construct the dataframe and expose its buffers
#[throws(ConnectorAgentError)]
fn create_dataframe<'a, S: AsRef<str>>(
py: Python<'a>,
names: &[S],
schema: &[PandasTypeSystem],
nrows: usize,
) -> (&'a PyAny, &'a PyList, &'a PyList) {
let names: Vec<_> = names.into_iter().map(|s| s.as_ref()).collect();
debug!("names: {:?}", names);
debug!("schema: {:?}", schema);
let mut schema_dict: HashMap<PandasTypeSystem, Vec<usize>> = HashMap::new();
schema.iter().enumerate().for_each(|(idx, &dt)| {
let indices = schema_dict.entry(dt).or_insert(vec![]);
indices.push(idx);
});
debug!("schema_dict: {:?}", schema_dict);
let mut blocks_code = vec![];
schema_dict
.iter()
.for_each(|(&dt, indices)| {
if dt.is_extension() {
// each extension block only contains one column
for idx in indices {
blocks_code.push(format!(
"pd.core.internals.ExtensionBlock(pd.array(np.empty([{}], dtype='{}'), dtype='{}'), placement={}, ndim=2)",
nrows,
dt.npdtype(),
dt.dtype(),
idx,
));
}
} else {
blocks_code.push(format!(
"pd.core.internals.{}(np.empty([{}, {}], dtype='{}'), placement={:?}, ndim=2)",
dt.block_name(),
indices.len(),
nrows,
dt.npdtype(),
indices,
));
}
});
// https://github.com/pandas-dev/pandas/blob/master/pandas/core/internals/managers.py
// Suppose we want to find the array corresponding to our i'th column.
// blknos[i] identifies the block from self.blocks that contains this column.
// blklocs[i] identifies the column of interest within
// self.blocks[self.blknos[i]]
let code = format!(
r#"import pandas as pd
import numpy as np
blocks = [{}]
block_manager = pd.core.internals.BlockManager(
blocks, [pd.Index(['{}']), pd.RangeIndex(start=0, stop={}, step=1)])
df = pd.DataFrame(block_manager)
blocks = [b.values for b in df._mgr.blocks]
index = [(i, j) for i, j in zip(df._mgr.blknos, df._mgr.blklocs)]"#,
blocks_code.join(","),
format!("{}", names.join("\',\'")),
nrows,
);
debug!("create dataframe code: {}", code);
// run python code
let locals = PyDict::new(py);
py.run(code.as_str(), None, Some(locals))
.map_err(|e| anyhow!(e))?;
// get # of blocks in dataframe
let buffers: &PyList = locals
.get_item("blocks")
.ok_or_else(|| anyhow!("cannot get `blocks` from locals"))?
.downcast::<PyList>()
.map_err(|e| anyhow!("cannot downcast `blocks` to PyList {}", e))?;
let index = locals
.get_item("index")
.ok_or_else(|| anyhow!("cannot get `index` from locals"))?
.downcast::<PyList>()
.map_err(|e| anyhow!("cannot downcast `index` to PyList {}", e))?;
let df = locals
.get_item("df")
.ok_or_else(|| anyhow!("cannot get `df` from locals"))?;
(df, buffers, index)
}
| {
static EMPTY_SCHEMA: Vec<PandasTypeSystem> = vec![];
self.schema.as_ref().unwrap_or(EMPTY_SCHEMA.as_ref())
} | identifier_body |
main.rs | //! CSIS-616 - Program #3
//!
//! Some parts were originally made by: Ralph W. Crosby PhD.
//! Edited and added to by: Paige Peck
//!
//!
//! Process a yaml format deterministic finite automaton producing
//! - A textual representation of the internal state graph
//! - A Graphviz `.dot` file representing the graph
//!
//! # Usage
//!
//! ```
//! cargo run regex
//! ```
//! where: `regex` is a series of symbols that will generate a DFA and decide if input
//! is accepted or rejected by the regex
//!
//! # Output
//!
//! To `stderr`: Debug display of the internal graph structure
//!
//! To `stdout`: Graphviz definitions of the graph structure
use std::io;
use std::io::prelude::*;
use std::io::Write;
// *********************************************************************
/// # Deterministic Finite Automata Structure
struct DFA {
/// The set of characters comprising the alphabet
alphabet: Vec<char>,
/// State number (1 relative) for the start state
start: usize,
/// Set of accept states (1 relative)
accept: Vec<usize>, //will need to be Vec<usize> when multiple accept states are implemented
/// Matrix of transitions, rows are states, columns characters in the alphabet
transitions: Vec<Vec<usize>>,
}
//State based representation of the DFA version of the RegEx
struct StateGraph {
/// The set of characters comprising the alphabet
alphabet: Vec<char>,
/// State number for the start state
start_state: usize,
/// Vector of state objects
states: Vec<Box<State>>
}
//Definition of a single state
struct State {
//Is this an accept state
accept_state: bool,
//Set of transitions
transitions: Vec<usize>
}
struct Transitions {
chars: char,
state: usize
}
fn main() {
//Get and validate the RegEx on the command line
let regex = get_regex(std::env::args());
let dfa = DFA::new_from_regex(®ex);
//Create the dfa structure based on in RegEx entered from the command line
let state_graph = StateGraph::new_from_dfa(&dfa);
//eprintln!("{:?}", state_graph);
state_graph.write_graphviz();
// Process through the input until end of file (cntl-z) is encountered
state_graph.process();
}
// *********************************************************************
/// Return the RegEx passed as the first parameter
fn get_regex(args: std::env::Args) -> String {
// Get the arguments as a vector
let args: Vec<String> = args.collect();
// Make sure only one argument was passed
if args.len()!= 2 {
writeln!(std::io::stderr(), "Usage: cargo run'regex'")
.unwrap();
std::process::exit(1);
}
args[1].to_string()
}
// *********************************************************************
/// Implement the methods of the DFA structure
impl DFA {
//Create and return a DFA on the heap
//Generate the DFA from the given regex
fn new_from_regex(regex: &str) -> Box<DFA> {
//Setup the regex as the language / alphabet of the dfa
//Remove any duplicate word characters
let mut l = regex.replace("|", "");
l = l.replace("+", "");
l = l.replace("*", "");
//Creates a language Vec<char> without the operators in it and pushing the sigma symbol for alphabet purposes
let mut language: Vec<char> = l.chars().collect();
language.sort();
language.dedup();
language.push('Σ');
let final_state = l.len()+1;
//Create a near blank dfa object, with 1 being start state, accept state being the final state
// which is calculated based on the length of the regex length + 1
let mut dfa = Box::new(DFA{alphabet: language,
start: 1,
accept: [final_state].to_vec(),
transitions: vec![] });
//Set current and next state to traverse through the graph as we create the transition matrix.
let mut current_state = 1;
let mut next_state = 2;
//Create the Transitions Struct to save any transitions characters. These are characters that would
// need to be cycled back to. First character and second state will always start this off.
let mut transitions: Vec<Transitions> = Vec::new();
let t = Transitions{chars: regex.chars().next().unwrap(),
state: 2};
transitions.push(t);
//Create a previous_char character for | and * operators
let mut previous_char = regex.chars().next().unwrap();
//Traverse through the regex string, reading characters and deciding what to do depending on the character.
for c in regex.chars() {
let mut states: Vec<usize> = Vec::new();
//Checks if previous char was a | operator.
//If so, save the current character as a transition or cycle character
//Also fixes any previous transition state
if previous_char == '|' {
for (n, a) in dfa.alphabet.iter().enumerate() {
if *a == c {
dfa.transitions[0][n] = next_state;
}
}
let j = Transitions{chars: c, state: next_state};
transitions.push(j);
}
//Same as above, just with the * operator.
if previous_char == '*' {
let j = Transitions{chars: c, state: next_state};
transitions.push(j);
}
//Operator '|': Implemented - single and multiple | operators are working
//Multiple types of symbols are untested and could produce varying results
//Checks if character is | operator. If so, save the final state as an accept state, reset
//current state back to 1, and set previous_char as |
if c == '|' {
let final_bar_state = dfa.transitions.len()+1;
let mut final_bar_state_count: Vec<usize> = Vec::new();
dfa.accept.push(final_bar_state);
for _a in dfa.alphabet.iter() {
final_bar_state_count.push(final_bar_state);
}
dfa.transitions.push(final_bar_state_count);
current_state = 1;
previous_char = '|';
}
//Operator '+': Implemented - single works, multiple is funky, almost working
//Removes the previous transition matrix to remake it with updated states
//Fix to the multiple + operators I believe is using a for loop to go through the entire transitions vec
// but I have ran out of time to get that working.
else if c == '+' {
dfa.transitions.remove(dfa.transitions.len()-1);
next_state -= 1;
current_state -= 1;
for a in dfa.alphabet.iter() {
if a == &previous_char {
states.push(next_state);
} else {
if *a == transitions[0].chars {
states.push(transitions[0].state);
} else {
states.push(1);
}
}
}
dfa.transitions.push(states);
next_state += 1;
current_state += 1;
}
//Operator '*': Implemented - Single and multiple * operators are working. Something funky happens with the more characters
// added into the regex, especially after a *. Not time to check it. Very close to getting this part fixed, most of it works
//Similar to + operator, remove previous transition to replace it with new one.
// Step back 2 states for next and current to allow for proper transition. Push necessary states.
// Potential fix is similar to + operator with iterating over transitions instead of just checking index 0.
//At the end, add 2 to current state to get back, and set previous_char as *
else if c == '*' {
dfa.transitions.remove(dfa.transitions.len()-1);
let mut pushed_forward = false;
next_state -= 2;
current_state -= 2;
for a in dfa.alphabet.iter() {
if a == &previous_char {
next_state += 1;
states.push(next_state);
} else if *a == 'Σ' {
states.push(1);
} else {
if *a == transitions[0].chars {
states.push(transitions[0].state);
} else if!pushed_forward {
next_state += 1;
states.push(next_state);
pushed_forward = true;
} else {
states.push(1);
}
}
}
dfa.transitions.push(states);
current_state += 2;
previous_char = '*';
}
//All word character symbols: Implemented
//Allows for any character that is in the language to be added in, checks if there is a transition/cycle
//to be made, set the state as that before pushing. If it is not a transition, push to state 1
//if sigma symbol, push to state 1
else if c!= 'Σ'
{
for a in dfa.alphabet.iter() {
let mut was_transition = false;
if c == *a {
states.push(next_state);
}
else {
for i in 0..transitions.len() {
if *a == transitions[i].chars {
states.push(transitions[i].state);
was_transition = true;
}
}
if was_transition == false {
if previous_char == '*' && *a!= 'Σ' {
states.push(1);
previous_char = c;
} else {
states.push(1);
}
}
}
}
if previous_char!= '|' {
dfa.transitions.push(states);
}
next_state += 1;
current_state += 1;
previous_char = c;
}
}
//Go back through and fix any transitions that weren't marked properly
// (i.e. | transitions to state 2 from state 4 if applicable)
for i in 0..dfa.transitions.len() {
for n in 0..dfa.transitions[i].len() {
if n < dfa.transitions[i].len() - 1 && dfa.transitions[i][n] == 1 {
for c in 0..transitions.len() {
if dfa.alphabet[n] == transitions[c].chars {
dfa.transitions[i][n] = transitions[c].state;
}
}
}
}
}
//Set final state as a cycle for transition matrix. If 3 states, push [3,3,3]
let mut final_state_count: Vec<usize> = Vec::new();
for _alphabet in dfa.alphabet.iter() {
final_state_count.push(final_state);
}
dfa.transitions.push(final_state_count);
dfa
}
}
// *********************************************************************
// Implement the methods of the DFA structure
impl StateGraph<> {
/// Create a state graph from a DFA structure
fn new_from_dfa(dfa: &DFA) -> Box<StateGraph> {
// Create an empty graph object
let mut graph = Box::new(StateGraph{alphabet: dfa.alphabet.clone(),
start_state: dfa.start - 1,
states: vec!() });
// Look through the transition table building state objects
for row in dfa.transitions.iter() {
let mut v = Box::new(State{accept_state: false, transitions: vec!()});
for col in row {
v.transitions.push(col-1);
}
graph.states.push(v);
}
// Set the accept states
for astate in dfa.accept.iter() {
graph.states[*astate - 1].accept_state = true;
}
graph
}
/// Execute the graph on a sentence
/// Return Err if a character not in the alphabet is encountered
/// Return Ok and a bool indicating accept (true) or reject (false)
fn test_sentence(&self, sentence: &str) -> Result<bool, String> {
let mut state = self.start_state;
//Full alphabet to test against for sigma character
let full_alphabet: Vec<char> = "abcdefghijklmnopqrstuvwxyz0123456789 ".chars().collect();
for ch in sentence.chars() {
//Check if character is a word character. Accept it if it is and change it to the 'Σ' symbol for matching purposes
let mut c = ch;
if!self.alphabet.contains(&c) && full_alphabet.contains(&c) {
c = 'Σ';
}
let state_no = match self.alphabet.iter().position(|v| *v == ch || *v == c) {
Some(t) => t,
None => return Err(format!("Character <{}> does not have a transition", ch))
};
print!("δ(q{}, {}) → ", state+1, ch);
state = self.states[state].transitions[state_no];
println!("(q{})", state+1);
}
Ok(self.states[state].accept_state)
}
fn write_graphviz(&self) {
println!("digraph {{");
println!("\trankdir=LR;");
println!("\tnode [shape=point]; start;");
for (n, state) in self.states.iter().enumerate() {
if state.accept_state {
println!("\tnode [shape=doublecircle]; q{};", n+1);
}
}
println!("\tnode [shape=circle];");
println!("\tstart -> q{}", self.start_state+1);
for (n, state) in self.states.iter().enumerate() {
for (i, ch) in self.alphabet.iter().enumerate() {
println!("\tq{} -> q{} [label=\"{}\"]", n+1, state.transitions[i] + 1, ch);
}
}
println!("}}");
}
fn process(& |
let stdin = io::stdin();
for line in stdin.lock().lines() {
// Get the line out of the Result, should never error
let sentence = &line.unwrap();
println!("Processing sentence <{}>", sentence);
match self.test_sentence(sentence) {
Ok(b) => println!("{}",
if b {"Accept"} else {"Reject"}),
Err(s) => println!("Error processing sentence: {}", s)
}
}
}
}
#[cfg(test)]
mod test {
use super::*;
//This test is used to make sure that it creates a graphviz file
#[test]
fn test1() {
let dfa = DFA::new_from_regex("a*b");
//Create the dfa structure based on in RegEx entered from the command line
let state_graph = StateGraph::new_from_dfa(&dfa);
state_graph.write_graphviz();
}
} | self) { | identifier_name |
main.rs | //! CSIS-616 - Program #3
//!
//! Some parts were originally made by: Ralph W. Crosby PhD.
//! Edited and added to by: Paige Peck
//!
//!
//! Process a yaml format deterministic finite automaton producing
//! - A textual representation of the internal state graph
//! - A Graphviz `.dot` file representing the graph
//!
//! # Usage
//!
//! ```
//! cargo run regex
//! ```
//! where: `regex` is a series of symbols that will generate a DFA and decide if input
//! is accepted or rejected by the regex
//!
//! # Output
//!
//! To `stderr`: Debug display of the internal graph structure
//!
//! To `stdout`: Graphviz definitions of the graph structure
use std::io;
use std::io::prelude::*;
use std::io::Write;
// *********************************************************************
/// # Deterministic Finite Automata Structure
struct DFA {
/// The set of characters comprising the alphabet
alphabet: Vec<char>,
/// State number (1 relative) for the start state
start: usize,
/// Set of accept states (1 relative)
accept: Vec<usize>, //will need to be Vec<usize> when multiple accept states are implemented
/// Matrix of transitions, rows are states, columns characters in the alphabet
transitions: Vec<Vec<usize>>,
}
//State based representation of the DFA version of the RegEx
struct StateGraph {
/// The set of characters comprising the alphabet
alphabet: Vec<char>,
/// State number for the start state
start_state: usize,
/// Vector of state objects
states: Vec<Box<State>>
}
//Definition of a single state
struct State {
//Is this an accept state
accept_state: bool,
//Set of transitions
transitions: Vec<usize>
}
struct Transitions {
chars: char,
state: usize
}
fn main() {
//Get and validate the RegEx on the command line
let regex = get_regex(std::env::args());
let dfa = DFA::new_from_regex(®ex);
//Create the dfa structure based on in RegEx entered from the command line
let state_graph = StateGraph::new_from_dfa(&dfa);
//eprintln!("{:?}", state_graph);
state_graph.write_graphviz();
// Process through the input until end of file (cntl-z) is encountered
state_graph.process();
}
// *********************************************************************
/// Return the RegEx passed as the first parameter
fn get_regex(args: std::env::Args) -> String {
// Get the arguments as a vector
let args: Vec<String> = args.collect();
// Make sure only one argument was passed
if args.len()!= 2 {
writeln!(std::io::stderr(), "Usage: cargo run'regex'")
.unwrap();
std::process::exit(1);
}
args[1].to_string()
}
// *********************************************************************
/// Implement the methods of the DFA structure
impl DFA {
//Create and return a DFA on the heap
//Generate the DFA from the given regex
fn new_from_regex(regex: &str) -> Box<DFA> {
//Setup the regex as the language / alphabet of the dfa
//Remove any duplicate word characters
let mut l = regex.replace("|", "");
l = l.replace("+", "");
l = l.replace("*", "");
//Creates a language Vec<char> without the operators in it and pushing the sigma symbol for alphabet purposes
let mut language: Vec<char> = l.chars().collect();
language.sort();
language.dedup();
language.push('Σ');
let final_state = l.len()+1;
//Create a near blank dfa object, with 1 being start state, accept state being the final state
// which is calculated based on the length of the regex length + 1
let mut dfa = Box::new(DFA{alphabet: language,
start: 1,
accept: [final_state].to_vec(),
transitions: vec![] });
//Set current and next state to traverse through the graph as we create the transition matrix.
let mut current_state = 1;
let mut next_state = 2;
//Create the Transitions Struct to save any transitions characters. These are characters that would
// need to be cycled back to. First character and second state will always start this off.
let mut transitions: Vec<Transitions> = Vec::new();
let t = Transitions{chars: regex.chars().next().unwrap(),
state: 2};
transitions.push(t);
//Create a previous_char character for | and * operators
let mut previous_char = regex.chars().next().unwrap();
//Traverse through the regex string, reading characters and deciding what to do depending on the character.
for c in regex.chars() {
let mut states: Vec<usize> = Vec::new();
//Checks if previous char was a | operator.
//If so, save the current character as a transition or cycle character
//Also fixes any previous transition state
if previous_char == '|' {
for (n, a) in dfa.alphabet.iter().enumerate() {
if *a == c {
dfa.transitions[0][n] = next_state;
}
}
let j = Transitions{chars: c, state: next_state};
transitions.push(j);
}
//Same as above, just with the * operator.
if previous_char == '*' {
let j = Transitions{chars: c, state: next_state};
transitions.push(j);
}
//Operator '|': Implemented - single and multiple | operators are working
//Multiple types of symbols are untested and could produce varying results
//Checks if character is | operator. If so, save the final state as an accept state, reset
//current state back to 1, and set previous_char as |
if c == '|' {
let final_bar_state = dfa.transitions.len()+1;
let mut final_bar_state_count: Vec<usize> = Vec::new();
dfa.accept.push(final_bar_state);
for _a in dfa.alphabet.iter() {
final_bar_state_count.push(final_bar_state);
}
dfa.transitions.push(final_bar_state_count);
current_state = 1;
previous_char = '|';
}
//Operator '+': Implemented - single works, multiple is funky, almost working
//Removes the previous transition matrix to remake it with updated states
//Fix to the multiple + operators I believe is using a for loop to go through the entire transitions vec
// but I have ran out of time to get that working.
else if c == '+' {
dfa.transitions.remove(dfa.transitions.len()-1);
next_state -= 1;
current_state -= 1;
for a in dfa.alphabet.iter() {
if a == &previous_char {
states.push(next_state);
} else {
if *a == transitions[0].chars {
states.push(transitions[0].state);
} else {
states.push(1);
}
}
}
dfa.transitions.push(states);
next_state += 1;
current_state += 1;
}
//Operator '*': Implemented - Single and multiple * operators are working. Something funky happens with the more characters
// added into the regex, especially after a *. Not time to check it. Very close to getting this part fixed, most of it works
//Similar to + operator, remove previous transition to replace it with new one.
// Step back 2 states for next and current to allow for proper transition. Push necessary states. | //At the end, add 2 to current state to get back, and set previous_char as *
else if c == '*' {
dfa.transitions.remove(dfa.transitions.len()-1);
let mut pushed_forward = false;
next_state -= 2;
current_state -= 2;
for a in dfa.alphabet.iter() {
if a == &previous_char {
next_state += 1;
states.push(next_state);
} else if *a == 'Σ' {
states.push(1);
} else {
if *a == transitions[0].chars {
states.push(transitions[0].state);
} else if!pushed_forward {
next_state += 1;
states.push(next_state);
pushed_forward = true;
} else {
states.push(1);
}
}
}
dfa.transitions.push(states);
current_state += 2;
previous_char = '*';
}
//All word character symbols: Implemented
//Allows for any character that is in the language to be added in, checks if there is a transition/cycle
//to be made, set the state as that before pushing. If it is not a transition, push to state 1
//if sigma symbol, push to state 1
else if c!= 'Σ'
{
for a in dfa.alphabet.iter() {
let mut was_transition = false;
if c == *a {
states.push(next_state);
}
else {
for i in 0..transitions.len() {
if *a == transitions[i].chars {
states.push(transitions[i].state);
was_transition = true;
}
}
if was_transition == false {
if previous_char == '*' && *a!= 'Σ' {
states.push(1);
previous_char = c;
} else {
states.push(1);
}
}
}
}
if previous_char!= '|' {
dfa.transitions.push(states);
}
next_state += 1;
current_state += 1;
previous_char = c;
}
}
//Go back through and fix any transitions that weren't marked properly
// (i.e. | transitions to state 2 from state 4 if applicable)
for i in 0..dfa.transitions.len() {
for n in 0..dfa.transitions[i].len() {
if n < dfa.transitions[i].len() - 1 && dfa.transitions[i][n] == 1 {
for c in 0..transitions.len() {
if dfa.alphabet[n] == transitions[c].chars {
dfa.transitions[i][n] = transitions[c].state;
}
}
}
}
}
//Set final state as a cycle for transition matrix. If 3 states, push [3,3,3]
let mut final_state_count: Vec<usize> = Vec::new();
for _alphabet in dfa.alphabet.iter() {
final_state_count.push(final_state);
}
dfa.transitions.push(final_state_count);
dfa
}
}
// *********************************************************************
// Implement the methods of the DFA structure
impl StateGraph<> {
/// Create a state graph from a DFA structure
fn new_from_dfa(dfa: &DFA) -> Box<StateGraph> {
// Create an empty graph object
let mut graph = Box::new(StateGraph{alphabet: dfa.alphabet.clone(),
start_state: dfa.start - 1,
states: vec!() });
// Look through the transition table building state objects
for row in dfa.transitions.iter() {
let mut v = Box::new(State{accept_state: false, transitions: vec!()});
for col in row {
v.transitions.push(col-1);
}
graph.states.push(v);
}
// Set the accept states
for astate in dfa.accept.iter() {
graph.states[*astate - 1].accept_state = true;
}
graph
}
/// Execute the graph on a sentence
/// Return Err if a character not in the alphabet is encountered
/// Return Ok and a bool indicating accept (true) or reject (false)
fn test_sentence(&self, sentence: &str) -> Result<bool, String> {
let mut state = self.start_state;
//Full alphabet to test against for sigma character
let full_alphabet: Vec<char> = "abcdefghijklmnopqrstuvwxyz0123456789 ".chars().collect();
for ch in sentence.chars() {
//Check if character is a word character. Accept it if it is and change it to the 'Σ' symbol for matching purposes
let mut c = ch;
if!self.alphabet.contains(&c) && full_alphabet.contains(&c) {
c = 'Σ';
}
let state_no = match self.alphabet.iter().position(|v| *v == ch || *v == c) {
Some(t) => t,
None => return Err(format!("Character <{}> does not have a transition", ch))
};
print!("δ(q{}, {}) → ", state+1, ch);
state = self.states[state].transitions[state_no];
println!("(q{})", state+1);
}
Ok(self.states[state].accept_state)
}
fn write_graphviz(&self) {
println!("digraph {{");
println!("\trankdir=LR;");
println!("\tnode [shape=point]; start;");
for (n, state) in self.states.iter().enumerate() {
if state.accept_state {
println!("\tnode [shape=doublecircle]; q{};", n+1);
}
}
println!("\tnode [shape=circle];");
println!("\tstart -> q{}", self.start_state+1);
for (n, state) in self.states.iter().enumerate() {
for (i, ch) in self.alphabet.iter().enumerate() {
println!("\tq{} -> q{} [label=\"{}\"]", n+1, state.transitions[i] + 1, ch);
}
}
println!("}}");
}
fn process(&self) {
let stdin = io::stdin();
for line in stdin.lock().lines() {
// Get the line out of the Result, should never error
let sentence = &line.unwrap();
println!("Processing sentence <{}>", sentence);
match self.test_sentence(sentence) {
Ok(b) => println!("{}",
if b {"Accept"} else {"Reject"}),
Err(s) => println!("Error processing sentence: {}", s)
}
}
}
}
#[cfg(test)]
mod test {
use super::*;
//This test is used to make sure that it creates a graphviz file
#[test]
fn test1() {
let dfa = DFA::new_from_regex("a*b");
//Create the dfa structure based on in RegEx entered from the command line
let state_graph = StateGraph::new_from_dfa(&dfa);
state_graph.write_graphviz();
}
} | // Potential fix is similar to + operator with iterating over transitions instead of just checking index 0. | random_line_split |
main.rs | //! CSIS-616 - Program #3
//!
//! Some parts were originally made by: Ralph W. Crosby PhD.
//! Edited and added to by: Paige Peck
//!
//!
//! Process a yaml format deterministic finite automaton producing
//! - A textual representation of the internal state graph
//! - A Graphviz `.dot` file representing the graph
//!
//! # Usage
//!
//! ```
//! cargo run regex
//! ```
//! where: `regex` is a series of symbols that will generate a DFA and decide if input
//! is accepted or rejected by the regex
//!
//! # Output
//!
//! To `stderr`: Debug display of the internal graph structure
//!
//! To `stdout`: Graphviz definitions of the graph structure
use std::io;
use std::io::prelude::*;
use std::io::Write;
// *********************************************************************
/// # Deterministic Finite Automata Structure
struct DFA {
/// The set of characters comprising the alphabet
alphabet: Vec<char>,
/// State number (1 relative) for the start state
start: usize,
/// Set of accept states (1 relative)
accept: Vec<usize>, //will need to be Vec<usize> when multiple accept states are implemented
/// Matrix of transitions, rows are states, columns characters in the alphabet
transitions: Vec<Vec<usize>>,
}
//State based representation of the DFA version of the RegEx
struct StateGraph {
/// The set of characters comprising the alphabet
alphabet: Vec<char>,
/// State number for the start state
start_state: usize,
/// Vector of state objects
states: Vec<Box<State>>
}
//Definition of a single state
struct State {
//Is this an accept state
accept_state: bool,
//Set of transitions
transitions: Vec<usize>
}
struct Transitions {
chars: char,
state: usize
}
fn main() |
// *********************************************************************
/// Return the RegEx passed as the first parameter
fn get_regex(args: std::env::Args) -> String {
// Get the arguments as a vector
let args: Vec<String> = args.collect();
// Make sure only one argument was passed
if args.len()!= 2 {
writeln!(std::io::stderr(), "Usage: cargo run'regex'")
.unwrap();
std::process::exit(1);
}
args[1].to_string()
}
// *********************************************************************
/// Implement the methods of the DFA structure
impl DFA {
//Create and return a DFA on the heap
//Generate the DFA from the given regex
fn new_from_regex(regex: &str) -> Box<DFA> {
//Setup the regex as the language / alphabet of the dfa
//Remove any duplicate word characters
let mut l = regex.replace("|", "");
l = l.replace("+", "");
l = l.replace("*", "");
//Creates a language Vec<char> without the operators in it and pushing the sigma symbol for alphabet purposes
let mut language: Vec<char> = l.chars().collect();
language.sort();
language.dedup();
language.push('Σ');
let final_state = l.len()+1;
//Create a near blank dfa object, with 1 being start state, accept state being the final state
// which is calculated based on the length of the regex length + 1
let mut dfa = Box::new(DFA{alphabet: language,
start: 1,
accept: [final_state].to_vec(),
transitions: vec![] });
//Set current and next state to traverse through the graph as we create the transition matrix.
let mut current_state = 1;
let mut next_state = 2;
//Create the Transitions Struct to save any transitions characters. These are characters that would
// need to be cycled back to. First character and second state will always start this off.
let mut transitions: Vec<Transitions> = Vec::new();
let t = Transitions{chars: regex.chars().next().unwrap(),
state: 2};
transitions.push(t);
//Create a previous_char character for | and * operators
let mut previous_char = regex.chars().next().unwrap();
//Traverse through the regex string, reading characters and deciding what to do depending on the character.
for c in regex.chars() {
let mut states: Vec<usize> = Vec::new();
//Checks if previous char was a | operator.
//If so, save the current character as a transition or cycle character
//Also fixes any previous transition state
if previous_char == '|' {
for (n, a) in dfa.alphabet.iter().enumerate() {
if *a == c {
dfa.transitions[0][n] = next_state;
}
}
let j = Transitions{chars: c, state: next_state};
transitions.push(j);
}
//Same as above, just with the * operator.
if previous_char == '*' {
let j = Transitions{chars: c, state: next_state};
transitions.push(j);
}
//Operator '|': Implemented - single and multiple | operators are working
//Multiple types of symbols are untested and could produce varying results
//Checks if character is | operator. If so, save the final state as an accept state, reset
//current state back to 1, and set previous_char as |
if c == '|' {
let final_bar_state = dfa.transitions.len()+1;
let mut final_bar_state_count: Vec<usize> = Vec::new();
dfa.accept.push(final_bar_state);
for _a in dfa.alphabet.iter() {
final_bar_state_count.push(final_bar_state);
}
dfa.transitions.push(final_bar_state_count);
current_state = 1;
previous_char = '|';
}
//Operator '+': Implemented - single works, multiple is funky, almost working
//Removes the previous transition matrix to remake it with updated states
//Fix to the multiple + operators I believe is using a for loop to go through the entire transitions vec
// but I have ran out of time to get that working.
else if c == '+' {
dfa.transitions.remove(dfa.transitions.len()-1);
next_state -= 1;
current_state -= 1;
for a in dfa.alphabet.iter() {
if a == &previous_char {
states.push(next_state);
} else {
if *a == transitions[0].chars {
states.push(transitions[0].state);
} else {
states.push(1);
}
}
}
dfa.transitions.push(states);
next_state += 1;
current_state += 1;
}
//Operator '*': Implemented - Single and multiple * operators are working. Something funky happens with the more characters
// added into the regex, especially after a *. Not time to check it. Very close to getting this part fixed, most of it works
//Similar to + operator, remove previous transition to replace it with new one.
// Step back 2 states for next and current to allow for proper transition. Push necessary states.
// Potential fix is similar to + operator with iterating over transitions instead of just checking index 0.
//At the end, add 2 to current state to get back, and set previous_char as *
else if c == '*' {
dfa.transitions.remove(dfa.transitions.len()-1);
let mut pushed_forward = false;
next_state -= 2;
current_state -= 2;
for a in dfa.alphabet.iter() {
if a == &previous_char {
next_state += 1;
states.push(next_state);
} else if *a == 'Σ' {
states.push(1);
} else {
if *a == transitions[0].chars {
states.push(transitions[0].state);
} else if!pushed_forward {
next_state += 1;
states.push(next_state);
pushed_forward = true;
} else {
states.push(1);
}
}
}
dfa.transitions.push(states);
current_state += 2;
previous_char = '*';
}
//All word character symbols: Implemented
//Allows for any character that is in the language to be added in, checks if there is a transition/cycle
//to be made, set the state as that before pushing. If it is not a transition, push to state 1
//if sigma symbol, push to state 1
else if c!= 'Σ'
{
for a in dfa.alphabet.iter() {
let mut was_transition = false;
if c == *a {
states.push(next_state);
}
else {
for i in 0..transitions.len() {
if *a == transitions[i].chars {
states.push(transitions[i].state);
was_transition = true;
}
}
if was_transition == false {
if previous_char == '*' && *a!= 'Σ' {
states.push(1);
previous_char = c;
} else {
states.push(1);
}
}
}
}
if previous_char!= '|' {
dfa.transitions.push(states);
}
next_state += 1;
current_state += 1;
previous_char = c;
}
}
//Go back through and fix any transitions that weren't marked properly
// (i.e. | transitions to state 2 from state 4 if applicable)
for i in 0..dfa.transitions.len() {
for n in 0..dfa.transitions[i].len() {
if n < dfa.transitions[i].len() - 1 && dfa.transitions[i][n] == 1 {
for c in 0..transitions.len() {
if dfa.alphabet[n] == transitions[c].chars {
dfa.transitions[i][n] = transitions[c].state;
}
}
}
}
}
//Set final state as a cycle for transition matrix. If 3 states, push [3,3,3]
let mut final_state_count: Vec<usize> = Vec::new();
for _alphabet in dfa.alphabet.iter() {
final_state_count.push(final_state);
}
dfa.transitions.push(final_state_count);
dfa
}
}
// *********************************************************************
// Implement the methods of the DFA structure
impl StateGraph<> {
/// Create a state graph from a DFA structure
fn new_from_dfa(dfa: &DFA) -> Box<StateGraph> {
// Create an empty graph object
let mut graph = Box::new(StateGraph{alphabet: dfa.alphabet.clone(),
start_state: dfa.start - 1,
states: vec!() });
// Look through the transition table building state objects
for row in dfa.transitions.iter() {
let mut v = Box::new(State{accept_state: false, transitions: vec!()});
for col in row {
v.transitions.push(col-1);
}
graph.states.push(v);
}
// Set the accept states
for astate in dfa.accept.iter() {
graph.states[*astate - 1].accept_state = true;
}
graph
}
/// Execute the graph on a sentence
/// Return Err if a character not in the alphabet is encountered
/// Return Ok and a bool indicating accept (true) or reject (false)
fn test_sentence(&self, sentence: &str) -> Result<bool, String> {
let mut state = self.start_state;
//Full alphabet to test against for sigma character
let full_alphabet: Vec<char> = "abcdefghijklmnopqrstuvwxyz0123456789 ".chars().collect();
for ch in sentence.chars() {
//Check if character is a word character. Accept it if it is and change it to the 'Σ' symbol for matching purposes
let mut c = ch;
if!self.alphabet.contains(&c) && full_alphabet.contains(&c) {
c = 'Σ';
}
let state_no = match self.alphabet.iter().position(|v| *v == ch || *v == c) {
Some(t) => t,
None => return Err(format!("Character <{}> does not have a transition", ch))
};
print!("δ(q{}, {}) → ", state+1, ch);
state = self.states[state].transitions[state_no];
println!("(q{})", state+1);
}
Ok(self.states[state].accept_state)
}
fn write_graphviz(&self) {
println!("digraph {{");
println!("\trankdir=LR;");
println!("\tnode [shape=point]; start;");
for (n, state) in self.states.iter().enumerate() {
if state.accept_state {
println!("\tnode [shape=doublecircle]; q{};", n+1);
}
}
println!("\tnode [shape=circle];");
println!("\tstart -> q{}", self.start_state+1);
for (n, state) in self.states.iter().enumerate() {
for (i, ch) in self.alphabet.iter().enumerate() {
println!("\tq{} -> q{} [label=\"{}\"]", n+1, state.transitions[i] + 1, ch);
}
}
println!("}}");
}
fn process(&self) {
let stdin = io::stdin();
for line in stdin.lock().lines() {
// Get the line out of the Result, should never error
let sentence = &line.unwrap();
println!("Processing sentence <{}>", sentence);
match self.test_sentence(sentence) {
Ok(b) => println!("{}",
if b {"Accept"} else {"Reject"}),
Err(s) => println!("Error processing sentence: {}", s)
}
}
}
}
#[cfg(test)]
mod test {
use super::*;
//This test is used to make sure that it creates a graphviz file
#[test]
fn test1() {
let dfa = DFA::new_from_regex("a*b");
//Create the dfa structure based on in RegEx entered from the command line
let state_graph = StateGraph::new_from_dfa(&dfa);
state_graph.write_graphviz();
}
} | {
//Get and validate the RegEx on the command line
let regex = get_regex(std::env::args());
let dfa = DFA::new_from_regex(®ex);
//Create the dfa structure based on in RegEx entered from the command line
let state_graph = StateGraph::new_from_dfa(&dfa);
//eprintln!("{:?}", state_graph);
state_graph.write_graphviz();
// Process through the input until end of file (cntl-z) is encountered
state_graph.process();
} | identifier_body |
i2c.rs | //! I2C Peripheral
use crate::common::{ Register, Frequency, I2CInterrupt, I2CFlags, I2CBitMode, MasterMode, DutyCycle, DualAddress };
use crate::common::enums::RCCPeripheral;
use crate::common::structs::pins::Pin;
use embedded_hal::blocking::i2c::{ Read, Write, WriteRead };
use crate::peripherals::extended::{ gpio::Gpio, rcc::Rcc };
pub const I2C1: u32 = 0x4000_5400;
pub const I2C2: u32 = 0x4000_5800;
pub const I2C3: u32 = 0x4000_5C00;
pub const SIZE: usize = 10;
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum I2CError {
/// NACK received
NACK,
/// Bus error
Bus,
/// Arbitration loss
Arbitration,
/// Overrun - Slave mode only
Overrun,
/// PEC - SMBUS mode only
PEC,
/// Timeout - SMBUS mode only
Timeout,
/// Alert - SMBUS mode only
Alert,
Other,
}
#[repr(C)]
pub struct I2c {
#[repr(C)]
block: &'static [Register<u32>; SIZE],
pins: (Pin, Pin),
}
impl I2c {
/// Sets bit at block and offset given
pub fn set(&mut self, b: usize, o: usize) -> &mut Self {
self.block[b] |= 1 << o;
self
}
/// Clears bit at block and offset given
pub fn clear(&mut self, b: usize, o: usize) -> &mut Self {
self.block[b] &=!(1 << o);
self
}
/// Checks if bit is set
pub fn is_set(&self, r: usize, b: usize) -> bool {
(self.block[r].read() >> b) & 1 == 1
}
pub fn write_bits(&mut self, b: usize, o: usize, data: u32, size: usize) -> &mut Self {
let mask = (1u32 << size) - 1;
let old = self.block[b].read();
self.block[b].write( old &!(mask << o) | ((data & mask) << o) );
self
}
}
impl I2c {
/// Set up as master
pub fn master<'a>(address: u32, pins: (Pin, Pin), rcc: &'a Rcc, clocks: Clocks, speed: Frequency) -> Result<Self, I2CError> {
let i2cid = match address {
I2C1 => RCCPeripheral::I2C1,
I2C2 => RCCPeripheral::I2C2,
I2C3 => RCCPeripheral::I2C3,
_ => return Err(I2CError::Other),
};
let new = I2c {
block: &mut *(address as *mut _),
pins,
};
let (sda, scl) = pins;
// TODO : Change to each board
sda.altfn(4)
.speed(HIGH);
scl.altfn(4)
.speed(HIGH);
rcc.peripheral_state(true, i2cid);
rcc.reset_peripheral(i2cid);
// TODO set up RCC clocks
// Disable he peripheral
// by clearing PE bit in CR1
new.clear(0, 0);
// Calculate settings for I2C speed modes
// If the user used the RCC given clocks, APB clock is legal
// Configure bus frequency into I2C peripheral
new.write_bits(1, 0, clocks.apb1.mhz(), 6);
let trise = if speed <= Frequency::KHz(100) {
clocks.apb1.mhz() + 1
} else {
((clocks.apb.mhz() * 300) / 1000) + 1
};
// Configure correct rise times
new.write_bits(8, 0, trise, 6);
// I2C clock control calculation
// If in slow mode
if speed <= Frequency::KHz(100) {
let ccr = match clocks.apb.hz() / (speed.hz() * 2) {
0...3 => 4,
n => n,
};
// Set clock to standard mode with appropiate parameters for selected speed
new.clear(7, 15)
.clear(7, 14)
.write_bits(7, 0, ccr, 12);
} else {
// Fast mode
// Defaults for now to 2:1 duty cycle
if true {
let ccr = match clocks.apb1.hz() / (speed.hz() * 3) {
0 => 1,
n => n,
};
new.set(7, 15)
.clear(7, 14)
.write_bits(7, 0, ccr, 12);
} else {
// 16:9 duty cycle
let ccr = match clocks.apb1.hz() / (speed.hz() * 25) {
0 => 1,
n => n,
};
new.set(7, 15)
.set(7, 14)
.write_bits(7, 0, ccr, 12);
}
}
new.set(0, 0);
Ok( new )
}
/// Stop the peripheral and release the pins
pub fn free(&mut self) -> (Pin, Pin) {
self.clear(0, 0);
self.pins
}
}
/*
impl I2c {
/// Scans for devices and returns all the addresses it found connected
pub fn scan(&mut self) -> Vec<u8> {
let mut addresses = Vec::new();
let mut void = &[0];
for i in 0..128 {
match self.read(i, void) {
Ok(()) => addresses.push(i),
_ => (),
}
}
addresses
}
}
*/
impl Read for I2c {
type Error = I2CError;
/// Read bytes into buffer
/// This function is based on MASTER mode
/// WARNING!
/// `unsafe` function (but now marked as such). This function may leave the sender hanging
/// if the sender sends more bytes than what the buffer can hold.
/// This is due to no STOP signal being sent back.
fn read(&mut self, addr: u8, buffer: &mut [u8]) -> Result<(), I2CError> {
let last = buffer.len() - 1;
// Send start condition and ACK bit
self.start()
.ack();
// Wait until START condition is generated
while!self.is_set(5, 0) {}
// Wait until all devices are listening to us (bus is free)
while!self.is_set(6, 0) &&!self.is_set(6, 1) {}
// Set up current address to talk to
self.write_data(((addr as u32) << 1) + 1);
// wait until address was sent
while!self.is_set(5, 1) {}
// Clear condition by reading SR2
let _ = self.block[6].read();
// Store bytes
for i in 0..last {
buffer[i] = self.recv_byte()?;
}
self.nack()
.stop();
// Read last byte
buffer[last] = self.recv_byte()?;
Ok(())
}
}
impl Write for I2c {
type Error = I2CError;
/// Send a buffer of bytes
fn write(&mut self, addr: u8, bytes: &[u8]) -> Result<(), I2CError> {
// Send START condition
self.start();
// Wait until START condition is generated
while!self.is_set(5, 0) {}
// Wait until all devices are listening to us (bus is free)
while!self.is_set(6, 0) &&!self.is_set(6, 1) {}
// Set up current address to talk to
self.write_data((addr as u32) << 1);
// wait until address was sent
while!self.is_set(5, 1) {}
// Clear condition by reading SR2
// let _ = ptr::read_volatile(self as u32 + 0x18);
let _ = self.block[6].read();
// Send the bytes
for b in bytes {
self.send_byte(*b)?;
}
Ok(())
}
}
impl WriteRead for I2c {
type Error = I2CError;
/// Writes some bytes then reads some bytes
fn write_read(&mut self, addr: u8, bytes: &[u8], buffer: &mut [u8]) -> Result<(), I2CError> {
self.write(addr, bytes)?;
self.read(addr, buffer)
}
}
impl I2c {
/// Sends a byte
pub fn send_byte(&mut self, byte: u8) -> Result<&mut Self, I2CError> {
// Wait until TX buffer is empty
while!self.is_raised(I2CFlags::TxEmpty) {}
self.write_data(byte as u32);
while {
if self.is_raised(I2CFlags::ACKFailure) {
return Err(I2CError::NACK);
}
!self.is_raised(I2CFlags::TransferComplete)
} {}
Ok( self )
}
/// Receive a byte
pub fn recv_byte(&self) -> Result<u8, I2CError> {
while!self.is_raised(I2CFlags::RxNotEmpty) {}
Ok( self.read_data() )
}
}
impl I2c {
/// Enable the sending of ACK signal after byte transfer
pub fn ack(&mut self) -> &mut Self {
self.set(0, 10)
}
/// Disable the sending of ACK signal (effectively sending a NACK) after byte transfer
pub fn nack(&mut self) -> &mut Self {
self.clear(0, 10)
}
/// Stop generation
/// 0: No stop generation
/// 1: Slave Mode - Release the SCL and SDA lines after current byte transfer
/// Master Mode - Stop generation after the current byte transfer or current Start condition is sent
pub fn stop(&mut self) -> &mut Self {
self.set(0, 9)
}
/// Start generation
/// 0: No start generation
/// 1: Slave Mode - Start generation when bus id free
/// Master Mode - Repeated start generation
pub fn start(&mut self) -> &mut Self {
self.set(0, 8)
}
/// Enable/Disable peripheral
pub fn state(&mut self, s: bool) -> &mut Self {
match s {
true => self.set(0, 0),
_ => self.clear(0, 0),
}
}
/// If enabled the next byte will received in shift register
pub fn receive_in_shift(&mut self) -> &mut Self {
self.set(0, 11)
}
/// Starts Packet Error Checking (PEC) for the next transfer
pub fn start_pec(&mut self) -> &mut Self {
self.set(0, 12)
}
/// Resets the peripheral
pub fn reset(&mut self) -> &mut Self {
// TODO : check lines are free
self.stop()
.set(0, 15)
}
/// Sets the frequency of the transfer
pub fn set_frequency(&mut self, f: Frequency) -> Result<&mut Self, I2CError> {
match f.mhz() {
2...50 => Ok( self.write_bits(1, 0, f.mhz() as u32, 6) ),
_ => Err(I2CError::InvalidBusSpeed),
}
}
/// Indicate this is the last trasnfer
pub fn last_transfer(&mut self) -> &mut Self {
self.set(1, 12)
}
/// Enable/Disable interrupt
pub fn int_state(&mut self, s: bool, int: I2CInterrupt) -> &mut Self {
let offsets = int.offsets();
match s {
true => self.set(offsets.0, offsets.1),
_ => self.clear(offsets.0, offsets.1),
}
}
/// Sets the addressing mode between 7-bit and 10-bit
pub fn | (&mut self, a: I2CBitMode) -> &mut Self {
match a {
I2CBitMode::Bit7 => self.clear(2, 15),
_ => self.set(2, 15),
}
}
/// Writes the interface address 1
/// To be set **after** the interface bit size is set (7-bit or 10-bit)
pub fn set_address_1(&mut self, addr: u32) -> &mut Self {
match self.is_set(2, 15) {
true => self.write_bits(2, 0, addr, 10),
_ => self.write_bits(2, 1, addr, 7),
}
}
/// Writes the interface address 2
/// Returns an error if not in 7 bit mode
pub fn set_address_2(&mut self, addr: u32) -> Result<&mut Self, I2CError> {
match self.is_set(2, 15) {
true => Err( I2CError::Address2NotAllowed),
_ => Ok( self.write_bits(3, 1, addr, 7) ),
}
}
/// Enable/Disable dual addressing mode
pub fn dual_address_state(&mut self, s: bool) -> &mut Self {
match s {
true => self.set(3, 0),
_ => self.clear(3, 0),
}
}
/// Read received byte
pub fn read_data(&self) -> u8 {
self.block[4].read() as u8
}
/// Write data to be transmitted
pub fn write_data(&mut self, data: u32) -> &mut Self {
self.write_bits(4, 0, data, 8)
}
/// Returns true if the flag is raised
pub fn is_raised(&self, f: I2CFlags) -> bool {
let offsets = f.offsets();
self.is_set( offsets.0, offsets.1 )
}
/// Returns true if the device is master
pub fn is_master(&self) -> bool {
self.is_set(6, 0)
}
/// Returns true if the bus is busy
pub fn is_bus_busy(&self) -> bool {
self.is_set(6, 1)
}
/// Returns true if the TRA bit is set
pub fn is_tra_set(&self) -> bool {
self.is_set(6, 2)
}
/// Returns which Dual Address has matched
pub fn which_addr(&self) -> DualAddress {
match self.is_set(6, 7) {
true => DualAddress::Addr2,
_ => DualAddress::Addr1,
}
}
/// Returns the PEC register
pub fn pec(&self) -> u32 {
(self.block[6].read() >> 8) & 0b1111_1111
}
/// Clear the given flag
/// If the flag is cleared by hardware, it does nothing
pub fn clear_flag(&mut self, f: I2CFlags) -> &mut Self {
match f.offsets() {
(5, o) => match o {
8...15 => self.clear(5, o),
_ => self
},
_ => self
}
}
/// Set CCR
/// Refer to the STM32F4 user manual
pub fn set_ccr(&mut self, data: u32) -> &mut Self {
self.write_bits(7, 0, data, 12)
}
/// Set Master Mode
/// Refer to the STM32F4 user manual
pub fn set_master_mode(&mut self, mode: MasterMode) -> &mut Self {
match mode {
MasterMode::SM => self.clear(7, 15),
MasterMode::FM => self.set(7, 15),
}
}
/// Set duty cycle
/// Refer to the STM32F4 user manual
pub fn set_duty_cycle(&mut self, d: DutyCycle) -> &mut Self {
match d {
DutyCycle::D2 => self.clear(7, 14),
DutyCycle::D169 => self.set(7, 14),
}
}
/// Set maximum rise time
pub fn max_rise_time(&mut self, data: u32) -> &mut Self {
self.write_bits(8, 0, data, 6)
}
/// Enable/Disable Analog Filter
pub fn analog_filter_state(&mut self, s: bool) -> &mut Self {
match s {
true => self.clear(9, 4),
_ => self.set(9, 4),
}
}
/// Sets the Digital Noise Filter
pub fn digital_noise_filter(&mut self, d: Option<u32>) -> &mut Self {
match d {
None => self.write_bits(9, 0, 0, 4),
Some(a) => self.write_bits(9, 0, a, 4),
}
}
}
impl I2c {
/// Set master mode (Standard or Fast)
pub fn master_mode(&mut self, mode: MasterMode) -> &mut Self {
match mode {
MasterMode::SM => self.clear(7, 15),
_ => self.set(7, 15),
}
}
/// Write Interface Address
pub fn set_address(&mut self, address: u32) -> &mut Self {
let cons = if self.is_set(2, 15) { (0, 10) } else { (1, 7) };
self.write_bits(2, cons.0, address, cons.1)
}
/// Set secondary address
pub fn set_secondary_address(&mut self, address: u32) -> &mut Self {
self.write_bits(3, 1, address, 7)
}
} | address_mode | identifier_name |
i2c.rs | //! I2C Peripheral
use crate::common::{ Register, Frequency, I2CInterrupt, I2CFlags, I2CBitMode, MasterMode, DutyCycle, DualAddress };
use crate::common::enums::RCCPeripheral;
use crate::common::structs::pins::Pin;
use embedded_hal::blocking::i2c::{ Read, Write, WriteRead };
use crate::peripherals::extended::{ gpio::Gpio, rcc::Rcc };
pub const I2C1: u32 = 0x4000_5400;
pub const I2C2: u32 = 0x4000_5800;
pub const I2C3: u32 = 0x4000_5C00;
pub const SIZE: usize = 10;
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum I2CError {
/// NACK received
NACK,
/// Bus error
Bus,
/// Arbitration loss
Arbitration,
/// Overrun - Slave mode only
Overrun,
/// PEC - SMBUS mode only
PEC,
/// Timeout - SMBUS mode only
Timeout,
/// Alert - SMBUS mode only
Alert,
Other,
}
#[repr(C)]
pub struct I2c {
#[repr(C)]
block: &'static [Register<u32>; SIZE],
pins: (Pin, Pin),
}
impl I2c {
/// Sets bit at block and offset given
pub fn set(&mut self, b: usize, o: usize) -> &mut Self {
self.block[b] |= 1 << o;
self
}
/// Clears bit at block and offset given
pub fn clear(&mut self, b: usize, o: usize) -> &mut Self {
self.block[b] &=!(1 << o);
self
}
/// Checks if bit is set
pub fn is_set(&self, r: usize, b: usize) -> bool {
(self.block[r].read() >> b) & 1 == 1
}
pub fn write_bits(&mut self, b: usize, o: usize, data: u32, size: usize) -> &mut Self {
let mask = (1u32 << size) - 1;
let old = self.block[b].read();
self.block[b].write( old &!(mask << o) | ((data & mask) << o) );
self
}
}
impl I2c {
/// Set up as master
pub fn master<'a>(address: u32, pins: (Pin, Pin), rcc: &'a Rcc, clocks: Clocks, speed: Frequency) -> Result<Self, I2CError> {
let i2cid = match address {
I2C1 => RCCPeripheral::I2C1,
I2C2 => RCCPeripheral::I2C2,
I2C3 => RCCPeripheral::I2C3,
_ => return Err(I2CError::Other),
};
let new = I2c {
block: &mut *(address as *mut _),
pins,
};
let (sda, scl) = pins;
// TODO : Change to each board
sda.altfn(4)
.speed(HIGH);
scl.altfn(4)
.speed(HIGH);
rcc.peripheral_state(true, i2cid);
rcc.reset_peripheral(i2cid);
// TODO set up RCC clocks
// Disable he peripheral
// by clearing PE bit in CR1
new.clear(0, 0);
// Calculate settings for I2C speed modes
// If the user used the RCC given clocks, APB clock is legal
// Configure bus frequency into I2C peripheral
new.write_bits(1, 0, clocks.apb1.mhz(), 6);
let trise = if speed <= Frequency::KHz(100) {
clocks.apb1.mhz() + 1
} else {
((clocks.apb.mhz() * 300) / 1000) + 1
};
// Configure correct rise times
new.write_bits(8, 0, trise, 6);
// I2C clock control calculation
// If in slow mode
if speed <= Frequency::KHz(100) {
let ccr = match clocks.apb.hz() / (speed.hz() * 2) {
0...3 => 4,
n => n,
};
// Set clock to standard mode with appropiate parameters for selected speed
new.clear(7, 15)
.clear(7, 14)
.write_bits(7, 0, ccr, 12);
} else {
// Fast mode
// Defaults for now to 2:1 duty cycle
if true {
let ccr = match clocks.apb1.hz() / (speed.hz() * 3) {
0 => 1,
n => n,
};
new.set(7, 15)
.clear(7, 14)
.write_bits(7, 0, ccr, 12);
} else {
// 16:9 duty cycle
let ccr = match clocks.apb1.hz() / (speed.hz() * 25) {
0 => 1,
n => n,
};
new.set(7, 15)
.set(7, 14)
.write_bits(7, 0, ccr, 12);
}
}
new.set(0, 0);
Ok( new )
}
/// Stop the peripheral and release the pins
pub fn free(&mut self) -> (Pin, Pin) {
self.clear(0, 0);
self.pins
}
}
/*
impl I2c {
/// Scans for devices and returns all the addresses it found connected
pub fn scan(&mut self) -> Vec<u8> {
let mut addresses = Vec::new();
let mut void = &[0];
for i in 0..128 {
match self.read(i, void) {
Ok(()) => addresses.push(i),
_ => (),
}
}
addresses
}
}
*/
impl Read for I2c {
type Error = I2CError;
/// Read bytes into buffer
/// This function is based on MASTER mode
/// WARNING!
/// `unsafe` function (but now marked as such). This function may leave the sender hanging
/// if the sender sends more bytes than what the buffer can hold.
/// This is due to no STOP signal being sent back.
fn read(&mut self, addr: u8, buffer: &mut [u8]) -> Result<(), I2CError> {
let last = buffer.len() - 1;
// Send start condition and ACK bit
self.start()
.ack();
// Wait until START condition is generated
while!self.is_set(5, 0) {}
// Wait until all devices are listening to us (bus is free)
while!self.is_set(6, 0) &&!self.is_set(6, 1) {}
// Set up current address to talk to
self.write_data(((addr as u32) << 1) + 1);
// wait until address was sent
while!self.is_set(5, 1) {}
// Clear condition by reading SR2
let _ = self.block[6].read();
// Store bytes
for i in 0..last {
buffer[i] = self.recv_byte()?;
}
self.nack()
.stop();
// Read last byte
buffer[last] = self.recv_byte()?;
Ok(())
}
}
impl Write for I2c {
type Error = I2CError;
/// Send a buffer of bytes
fn write(&mut self, addr: u8, bytes: &[u8]) -> Result<(), I2CError> {
// Send START condition
self.start();
// Wait until START condition is generated
while!self.is_set(5, 0) {}
// Wait until all devices are listening to us (bus is free)
while!self.is_set(6, 0) &&!self.is_set(6, 1) {}
// Set up current address to talk to
self.write_data((addr as u32) << 1);
// wait until address was sent
while!self.is_set(5, 1) {}
// Clear condition by reading SR2
// let _ = ptr::read_volatile(self as u32 + 0x18);
let _ = self.block[6].read();
// Send the bytes
for b in bytes {
self.send_byte(*b)?;
}
Ok(())
}
}
impl WriteRead for I2c {
type Error = I2CError;
/// Writes some bytes then reads some bytes
fn write_read(&mut self, addr: u8, bytes: &[u8], buffer: &mut [u8]) -> Result<(), I2CError> {
self.write(addr, bytes)?;
self.read(addr, buffer)
}
}
impl I2c {
/// Sends a byte
pub fn send_byte(&mut self, byte: u8) -> Result<&mut Self, I2CError> {
// Wait until TX buffer is empty
while!self.is_raised(I2CFlags::TxEmpty) {}
self.write_data(byte as u32);
while {
if self.is_raised(I2CFlags::ACKFailure) {
return Err(I2CError::NACK);
}
!self.is_raised(I2CFlags::TransferComplete)
} {}
Ok( self )
}
/// Receive a byte
pub fn recv_byte(&self) -> Result<u8, I2CError> {
while!self.is_raised(I2CFlags::RxNotEmpty) {}
Ok( self.read_data() )
}
}
impl I2c {
/// Enable the sending of ACK signal after byte transfer
pub fn ack(&mut self) -> &mut Self {
self.set(0, 10)
}
/// Disable the sending of ACK signal (effectively sending a NACK) after byte transfer
pub fn nack(&mut self) -> &mut Self {
self.clear(0, 10)
}
/// Stop generation
/// 0: No stop generation
/// 1: Slave Mode - Release the SCL and SDA lines after current byte transfer
/// Master Mode - Stop generation after the current byte transfer or current Start condition is sent
pub fn stop(&mut self) -> &mut Self {
self.set(0, 9)
}
/// Start generation
/// 0: No start generation
/// 1: Slave Mode - Start generation when bus id free
/// Master Mode - Repeated start generation
pub fn start(&mut self) -> &mut Self {
self.set(0, 8)
}
/// Enable/Disable peripheral
pub fn state(&mut self, s: bool) -> &mut Self {
match s {
true => self.set(0, 0),
_ => self.clear(0, 0),
}
}
/// If enabled the next byte will received in shift register
pub fn receive_in_shift(&mut self) -> &mut Self {
self.set(0, 11)
}
/// Starts Packet Error Checking (PEC) for the next transfer
pub fn start_pec(&mut self) -> &mut Self {
self.set(0, 12)
}
/// Resets the peripheral
pub fn reset(&mut self) -> &mut Self {
// TODO : check lines are free
self.stop()
.set(0, 15)
}
/// Sets the frequency of the transfer
pub fn set_frequency(&mut self, f: Frequency) -> Result<&mut Self, I2CError> {
match f.mhz() {
2...50 => Ok( self.write_bits(1, 0, f.mhz() as u32, 6) ),
_ => Err(I2CError::InvalidBusSpeed),
}
}
/// Indicate this is the last trasnfer
pub fn last_transfer(&mut self) -> &mut Self {
self.set(1, 12)
}
/// Enable/Disable interrupt
pub fn int_state(&mut self, s: bool, int: I2CInterrupt) -> &mut Self {
let offsets = int.offsets();
match s {
true => self.set(offsets.0, offsets.1),
_ => self.clear(offsets.0, offsets.1),
}
}
/// Sets the addressing mode between 7-bit and 10-bit
pub fn address_mode(&mut self, a: I2CBitMode) -> &mut Self {
match a {
I2CBitMode::Bit7 => self.clear(2, 15),
_ => self.set(2, 15),
}
}
/// Writes the interface address 1
/// To be set **after** the interface bit size is set (7-bit or 10-bit)
pub fn set_address_1(&mut self, addr: u32) -> &mut Self {
match self.is_set(2, 15) {
true => self.write_bits(2, 0, addr, 10),
_ => self.write_bits(2, 1, addr, 7),
}
}
/// Writes the interface address 2
/// Returns an error if not in 7 bit mode
pub fn set_address_2(&mut self, addr: u32) -> Result<&mut Self, I2CError> {
match self.is_set(2, 15) {
true => Err( I2CError::Address2NotAllowed),
_ => Ok( self.write_bits(3, 1, addr, 7) ),
}
}
/// Enable/Disable dual addressing mode
pub fn dual_address_state(&mut self, s: bool) -> &mut Self {
match s {
true => self.set(3, 0),
_ => self.clear(3, 0),
}
}
/// Read received byte
pub fn read_data(&self) -> u8 {
self.block[4].read() as u8
}
/// Write data to be transmitted
pub fn write_data(&mut self, data: u32) -> &mut Self {
self.write_bits(4, 0, data, 8)
}
/// Returns true if the flag is raised
pub fn is_raised(&self, f: I2CFlags) -> bool {
let offsets = f.offsets();
self.is_set( offsets.0, offsets.1 )
}
/// Returns true if the device is master
pub fn is_master(&self) -> bool {
self.is_set(6, 0)
}
/// Returns true if the bus is busy
pub fn is_bus_busy(&self) -> bool {
self.is_set(6, 1)
}
/// Returns true if the TRA bit is set
pub fn is_tra_set(&self) -> bool {
self.is_set(6, 2)
}
/// Returns which Dual Address has matched
pub fn which_addr(&self) -> DualAddress {
match self.is_set(6, 7) {
true => DualAddress::Addr2,
_ => DualAddress::Addr1,
}
}
/// Returns the PEC register
pub fn pec(&self) -> u32 |
/// Clear the given flag
/// If the flag is cleared by hardware, it does nothing
pub fn clear_flag(&mut self, f: I2CFlags) -> &mut Self {
match f.offsets() {
(5, o) => match o {
8...15 => self.clear(5, o),
_ => self
},
_ => self
}
}
/// Set CCR
/// Refer to the STM32F4 user manual
pub fn set_ccr(&mut self, data: u32) -> &mut Self {
self.write_bits(7, 0, data, 12)
}
/// Set Master Mode
/// Refer to the STM32F4 user manual
pub fn set_master_mode(&mut self, mode: MasterMode) -> &mut Self {
match mode {
MasterMode::SM => self.clear(7, 15),
MasterMode::FM => self.set(7, 15),
}
}
/// Set duty cycle
/// Refer to the STM32F4 user manual
pub fn set_duty_cycle(&mut self, d: DutyCycle) -> &mut Self {
match d {
DutyCycle::D2 => self.clear(7, 14),
DutyCycle::D169 => self.set(7, 14),
}
}
/// Set maximum rise time
pub fn max_rise_time(&mut self, data: u32) -> &mut Self {
self.write_bits(8, 0, data, 6)
}
/// Enable/Disable Analog Filter
pub fn analog_filter_state(&mut self, s: bool) -> &mut Self {
match s {
true => self.clear(9, 4),
_ => self.set(9, 4),
}
}
/// Sets the Digital Noise Filter
pub fn digital_noise_filter(&mut self, d: Option<u32>) -> &mut Self {
match d {
None => self.write_bits(9, 0, 0, 4),
Some(a) => self.write_bits(9, 0, a, 4),
}
}
}
impl I2c {
/// Set master mode (Standard or Fast)
pub fn master_mode(&mut self, mode: MasterMode) -> &mut Self {
match mode {
MasterMode::SM => self.clear(7, 15),
_ => self.set(7, 15),
}
}
/// Write Interface Address
pub fn set_address(&mut self, address: u32) -> &mut Self {
let cons = if self.is_set(2, 15) { (0, 10) } else { (1, 7) };
self.write_bits(2, cons.0, address, cons.1)
}
/// Set secondary address
pub fn set_secondary_address(&mut self, address: u32) -> &mut Self {
self.write_bits(3, 1, address, 7)
}
} | {
(self.block[6].read() >> 8) & 0b1111_1111
} | identifier_body |
i2c.rs | //! I2C Peripheral
use crate::common::{ Register, Frequency, I2CInterrupt, I2CFlags, I2CBitMode, MasterMode, DutyCycle, DualAddress };
use crate::common::enums::RCCPeripheral;
use crate::common::structs::pins::Pin;
use embedded_hal::blocking::i2c::{ Read, Write, WriteRead };
use crate::peripherals::extended::{ gpio::Gpio, rcc::Rcc };
pub const I2C1: u32 = 0x4000_5400;
pub const I2C2: u32 = 0x4000_5800;
pub const I2C3: u32 = 0x4000_5C00;
pub const SIZE: usize = 10;
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum I2CError {
/// NACK received
NACK,
/// Bus error
Bus,
/// Arbitration loss
Arbitration,
/// Overrun - Slave mode only
Overrun,
/// PEC - SMBUS mode only
PEC,
/// Timeout - SMBUS mode only
Timeout,
/// Alert - SMBUS mode only
Alert,
Other,
}
#[repr(C)]
pub struct I2c {
#[repr(C)]
block: &'static [Register<u32>; SIZE],
pins: (Pin, Pin),
}
impl I2c {
/// Sets bit at block and offset given
pub fn set(&mut self, b: usize, o: usize) -> &mut Self {
self.block[b] |= 1 << o;
self
}
/// Clears bit at block and offset given
pub fn clear(&mut self, b: usize, o: usize) -> &mut Self {
self.block[b] &=!(1 << o);
self
}
/// Checks if bit is set
pub fn is_set(&self, r: usize, b: usize) -> bool {
(self.block[r].read() >> b) & 1 == 1
}
pub fn write_bits(&mut self, b: usize, o: usize, data: u32, size: usize) -> &mut Self {
let mask = (1u32 << size) - 1;
let old = self.block[b].read();
self.block[b].write( old &!(mask << o) | ((data & mask) << o) );
self
}
}
impl I2c {
/// Set up as master
pub fn master<'a>(address: u32, pins: (Pin, Pin), rcc: &'a Rcc, clocks: Clocks, speed: Frequency) -> Result<Self, I2CError> {
let i2cid = match address {
I2C1 => RCCPeripheral::I2C1,
I2C2 => RCCPeripheral::I2C2,
I2C3 => RCCPeripheral::I2C3,
_ => return Err(I2CError::Other),
};
let new = I2c {
block: &mut *(address as *mut _),
pins,
};
let (sda, scl) = pins;
// TODO : Change to each board
sda.altfn(4)
.speed(HIGH);
scl.altfn(4)
.speed(HIGH);
rcc.peripheral_state(true, i2cid);
rcc.reset_peripheral(i2cid);
// TODO set up RCC clocks
// Disable he peripheral
// by clearing PE bit in CR1
new.clear(0, 0);
// Calculate settings for I2C speed modes
// If the user used the RCC given clocks, APB clock is legal
// Configure bus frequency into I2C peripheral
new.write_bits(1, 0, clocks.apb1.mhz(), 6);
let trise = if speed <= Frequency::KHz(100) {
clocks.apb1.mhz() + 1
} else {
((clocks.apb.mhz() * 300) / 1000) + 1
};
// Configure correct rise times
new.write_bits(8, 0, trise, 6);
// I2C clock control calculation
// If in slow mode
if speed <= Frequency::KHz(100) {
let ccr = match clocks.apb.hz() / (speed.hz() * 2) {
0...3 => 4,
n => n,
};
// Set clock to standard mode with appropiate parameters for selected speed
new.clear(7, 15)
.clear(7, 14)
.write_bits(7, 0, ccr, 12);
} else {
// Fast mode
// Defaults for now to 2:1 duty cycle
if true {
let ccr = match clocks.apb1.hz() / (speed.hz() * 3) {
0 => 1,
n => n,
};
new.set(7, 15)
.clear(7, 14)
.write_bits(7, 0, ccr, 12);
} else {
// 16:9 duty cycle
let ccr = match clocks.apb1.hz() / (speed.hz() * 25) {
0 => 1,
n => n,
};
new.set(7, 15)
.set(7, 14)
.write_bits(7, 0, ccr, 12);
}
}
new.set(0, 0);
Ok( new )
}
/// Stop the peripheral and release the pins
pub fn free(&mut self) -> (Pin, Pin) {
self.clear(0, 0);
self.pins
}
}
/*
impl I2c {
/// Scans for devices and returns all the addresses it found connected
pub fn scan(&mut self) -> Vec<u8> {
let mut addresses = Vec::new();
let mut void = &[0];
for i in 0..128 {
match self.read(i, void) {
Ok(()) => addresses.push(i),
_ => (),
}
}
addresses
}
}
*/
impl Read for I2c {
type Error = I2CError;
/// Read bytes into buffer
/// This function is based on MASTER mode
/// WARNING!
/// `unsafe` function (but now marked as such). This function may leave the sender hanging
/// if the sender sends more bytes than what the buffer can hold.
/// This is due to no STOP signal being sent back.
fn read(&mut self, addr: u8, buffer: &mut [u8]) -> Result<(), I2CError> {
let last = buffer.len() - 1;
// Send start condition and ACK bit
self.start()
.ack();
// Wait until START condition is generated
while!self.is_set(5, 0) {}
// Wait until all devices are listening to us (bus is free)
while!self.is_set(6, 0) &&!self.is_set(6, 1) {}
// Set up current address to talk to
self.write_data(((addr as u32) << 1) + 1);
// wait until address was sent |
// Clear condition by reading SR2
let _ = self.block[6].read();
// Store bytes
for i in 0..last {
buffer[i] = self.recv_byte()?;
}
self.nack()
.stop();
// Read last byte
buffer[last] = self.recv_byte()?;
Ok(())
}
}
impl Write for I2c {
type Error = I2CError;
/// Send a buffer of bytes
fn write(&mut self, addr: u8, bytes: &[u8]) -> Result<(), I2CError> {
// Send START condition
self.start();
// Wait until START condition is generated
while!self.is_set(5, 0) {}
// Wait until all devices are listening to us (bus is free)
while!self.is_set(6, 0) &&!self.is_set(6, 1) {}
// Set up current address to talk to
self.write_data((addr as u32) << 1);
// wait until address was sent
while!self.is_set(5, 1) {}
// Clear condition by reading SR2
// let _ = ptr::read_volatile(self as u32 + 0x18);
let _ = self.block[6].read();
// Send the bytes
for b in bytes {
self.send_byte(*b)?;
}
Ok(())
}
}
impl WriteRead for I2c {
type Error = I2CError;
/// Writes some bytes then reads some bytes
fn write_read(&mut self, addr: u8, bytes: &[u8], buffer: &mut [u8]) -> Result<(), I2CError> {
self.write(addr, bytes)?;
self.read(addr, buffer)
}
}
impl I2c {
/// Sends a byte
pub fn send_byte(&mut self, byte: u8) -> Result<&mut Self, I2CError> {
// Wait until TX buffer is empty
while!self.is_raised(I2CFlags::TxEmpty) {}
self.write_data(byte as u32);
while {
if self.is_raised(I2CFlags::ACKFailure) {
return Err(I2CError::NACK);
}
!self.is_raised(I2CFlags::TransferComplete)
} {}
Ok( self )
}
/// Receive a byte
pub fn recv_byte(&self) -> Result<u8, I2CError> {
while!self.is_raised(I2CFlags::RxNotEmpty) {}
Ok( self.read_data() )
}
}
impl I2c {
/// Enable the sending of ACK signal after byte transfer
pub fn ack(&mut self) -> &mut Self {
self.set(0, 10)
}
/// Disable the sending of ACK signal (effectively sending a NACK) after byte transfer
pub fn nack(&mut self) -> &mut Self {
self.clear(0, 10)
}
/// Stop generation
/// 0: No stop generation
/// 1: Slave Mode - Release the SCL and SDA lines after current byte transfer
/// Master Mode - Stop generation after the current byte transfer or current Start condition is sent
pub fn stop(&mut self) -> &mut Self {
self.set(0, 9)
}
/// Start generation
/// 0: No start generation
/// 1: Slave Mode - Start generation when bus id free
/// Master Mode - Repeated start generation
pub fn start(&mut self) -> &mut Self {
self.set(0, 8)
}
/// Enable/Disable peripheral
pub fn state(&mut self, s: bool) -> &mut Self {
match s {
true => self.set(0, 0),
_ => self.clear(0, 0),
}
}
/// If enabled the next byte will received in shift register
pub fn receive_in_shift(&mut self) -> &mut Self {
self.set(0, 11)
}
/// Starts Packet Error Checking (PEC) for the next transfer
pub fn start_pec(&mut self) -> &mut Self {
self.set(0, 12)
}
/// Resets the peripheral
pub fn reset(&mut self) -> &mut Self {
// TODO : check lines are free
self.stop()
.set(0, 15)
}
/// Sets the frequency of the transfer
pub fn set_frequency(&mut self, f: Frequency) -> Result<&mut Self, I2CError> {
match f.mhz() {
2...50 => Ok( self.write_bits(1, 0, f.mhz() as u32, 6) ),
_ => Err(I2CError::InvalidBusSpeed),
}
}
/// Indicate this is the last trasnfer
pub fn last_transfer(&mut self) -> &mut Self {
self.set(1, 12)
}
/// Enable/Disable interrupt
pub fn int_state(&mut self, s: bool, int: I2CInterrupt) -> &mut Self {
let offsets = int.offsets();
match s {
true => self.set(offsets.0, offsets.1),
_ => self.clear(offsets.0, offsets.1),
}
}
/// Sets the addressing mode between 7-bit and 10-bit
pub fn address_mode(&mut self, a: I2CBitMode) -> &mut Self {
match a {
I2CBitMode::Bit7 => self.clear(2, 15),
_ => self.set(2, 15),
}
}
/// Writes the interface address 1
/// To be set **after** the interface bit size is set (7-bit or 10-bit)
pub fn set_address_1(&mut self, addr: u32) -> &mut Self {
match self.is_set(2, 15) {
true => self.write_bits(2, 0, addr, 10),
_ => self.write_bits(2, 1, addr, 7),
}
}
/// Writes the interface address 2
/// Returns an error if not in 7 bit mode
pub fn set_address_2(&mut self, addr: u32) -> Result<&mut Self, I2CError> {
match self.is_set(2, 15) {
true => Err( I2CError::Address2NotAllowed),
_ => Ok( self.write_bits(3, 1, addr, 7) ),
}
}
/// Enable/Disable dual addressing mode
pub fn dual_address_state(&mut self, s: bool) -> &mut Self {
match s {
true => self.set(3, 0),
_ => self.clear(3, 0),
}
}
/// Read received byte
pub fn read_data(&self) -> u8 {
self.block[4].read() as u8
}
/// Write data to be transmitted
pub fn write_data(&mut self, data: u32) -> &mut Self {
self.write_bits(4, 0, data, 8)
}
/// Returns true if the flag is raised
pub fn is_raised(&self, f: I2CFlags) -> bool {
let offsets = f.offsets();
self.is_set( offsets.0, offsets.1 )
}
/// Returns true if the device is master
pub fn is_master(&self) -> bool {
self.is_set(6, 0)
}
/// Returns true if the bus is busy
pub fn is_bus_busy(&self) -> bool {
self.is_set(6, 1)
}
/// Returns true if the TRA bit is set
pub fn is_tra_set(&self) -> bool {
self.is_set(6, 2)
}
/// Returns which Dual Address has matched
pub fn which_addr(&self) -> DualAddress {
match self.is_set(6, 7) {
true => DualAddress::Addr2,
_ => DualAddress::Addr1,
}
}
/// Returns the PEC register
pub fn pec(&self) -> u32 {
(self.block[6].read() >> 8) & 0b1111_1111
}
/// Clear the given flag
/// If the flag is cleared by hardware, it does nothing
pub fn clear_flag(&mut self, f: I2CFlags) -> &mut Self {
match f.offsets() {
(5, o) => match o {
8...15 => self.clear(5, o),
_ => self
},
_ => self
}
}
/// Set CCR
/// Refer to the STM32F4 user manual
pub fn set_ccr(&mut self, data: u32) -> &mut Self {
self.write_bits(7, 0, data, 12)
}
/// Set Master Mode
/// Refer to the STM32F4 user manual
pub fn set_master_mode(&mut self, mode: MasterMode) -> &mut Self {
match mode {
MasterMode::SM => self.clear(7, 15),
MasterMode::FM => self.set(7, 15),
}
}
/// Set duty cycle
/// Refer to the STM32F4 user manual
pub fn set_duty_cycle(&mut self, d: DutyCycle) -> &mut Self {
match d {
DutyCycle::D2 => self.clear(7, 14),
DutyCycle::D169 => self.set(7, 14),
}
}
/// Set maximum rise time
pub fn max_rise_time(&mut self, data: u32) -> &mut Self {
self.write_bits(8, 0, data, 6)
}
/// Enable/Disable Analog Filter
pub fn analog_filter_state(&mut self, s: bool) -> &mut Self {
match s {
true => self.clear(9, 4),
_ => self.set(9, 4),
}
}
/// Sets the Digital Noise Filter
pub fn digital_noise_filter(&mut self, d: Option<u32>) -> &mut Self {
match d {
None => self.write_bits(9, 0, 0, 4),
Some(a) => self.write_bits(9, 0, a, 4),
}
}
}
impl I2c {
/// Set master mode (Standard or Fast)
pub fn master_mode(&mut self, mode: MasterMode) -> &mut Self {
match mode {
MasterMode::SM => self.clear(7, 15),
_ => self.set(7, 15),
}
}
/// Write Interface Address
pub fn set_address(&mut self, address: u32) -> &mut Self {
let cons = if self.is_set(2, 15) { (0, 10) } else { (1, 7) };
self.write_bits(2, cons.0, address, cons.1)
}
/// Set secondary address
pub fn set_secondary_address(&mut self, address: u32) -> &mut Self {
self.write_bits(3, 1, address, 7)
}
} | while !self.is_set(5, 1) {} | random_line_split |
download.rs | use std::collections::{HashMap, HashSet};
use console::style;
use dialoguer::Confirm;
use fimfic_tracker::{
Config, Id, Result, SensibilityLevel, Story, StoryData, StoryStatus, StoryUpdate, TrackerError,
};
use crate::args::{Download, Prompt};
use crate::readable::ReadableDate;
use crate::Requester;
macro_rules! format_update {
(author, $before:expr => $after:expr) => {
format_update!([green] &$before, &$after)
};
(chapters, $before:expr => $after:expr) => {
format_update!([blue] $before, $after)
};
(words, $before:expr => $after:expr) => {
format_update!([blue] $before, $after)
};
(timestamp, $before:expr => $after:expr) => {
format_update!([yellow] ReadableDate($before), ReadableDate($after))
};
(status, $before:expr => $after:expr) => {
format_update!([yellow] $before, $after)
};
([$color:ident] $before:expr, $after:expr) => {
format_args!(
"{} {} {}",
style($before).$color(),
style("=>").cyan(),
style($after).$color().bold()
)
};
}
macro_rules! info_story_checking {
($story:expr) => {
info!("Checking for {}...", format_story!($story));
};
}
macro_rules! info_update {
([ignored] $story:expr, $on:ident, $before:expr => $after:expr) => {
info_update!($story, $on, $before, $after, ". Ignoring")
};
($story:expr, $on:ident, $before:expr => $after:expr) => {
info_update!($story, $on, $before, $after, "")
};
($story:expr, $on:ident, $before:expr, $after:expr, $extra:expr) => {
info!(
"{} has an update on {} ({}){}",
format_story!($story),
stringify!($on),
format_update!($on, $before => $after),
$extra
);
};
}
#[derive(Debug)]
enum StoryDownload {
Update(Id, Story),
Forced(Id),
}
pub fn download(
config: &Config,
requester: &Requester,
story_data: &mut StoryData,
Download {
force,
prompt,
ref ids,
}: Download,
) -> Result<()> {
let selected_ids: Vec<Id> = if ids.is_empty() {
story_data.keys().cloned().collect()
} else {
story_data
.keys()
.filter(|id| ids.contains(id))
.cloned()
.collect()
};
let mut ignored_ids: HashSet<Id> = HashSet::with_capacity(selected_ids.len());
let mut printed = false;
macro_rules! set_printed {
() => {
if!printed {
printed = true;
}
};
}
for (id, story) in story_data.iter().filter_map(|(id, story)| {
if selected_ids.contains(id) {
Some((*id, story))
} else {
None
}
}) {
if let StoryStatus::Incomplete = story.status {
continue;
}
set_printed!();
let status_notice = format!(
"{} has been marked as {} by the author",
format_story!(story),
format_status!(story)
);
match prompt {
Prompt::AssumeYes => {
info!("{}. Checking for an update on it anyways.", status_notice);
}
Prompt::AssumeNo => {
info!("{}. Skipping checking for an update on it.", status_notice);
ignored_ids.insert(id);
}
Prompt::Ask => {
let confirm = Confirm::new()
.with_prompt(format!(
"{}. Do you want to still check for an update on it?",
status_notice
))
.interact()
.map_err(|err| {
TrackerError::io(err)
.context("failed to launch overwrite confirmation prompt")
})?;
if!confirm {
ignored_ids.insert(id);
}
}
}
}
if printed {
separate!();
printed = false;
}
let mut updated_stories: HashMap<Id, Story> = HashMap::with_capacity(selected_ids.len());
let mut ids_to_download: HashSet<Id> = HashSet::with_capacity(selected_ids.len());
for (id, story) in story_data
.iter()
.filter(|(id, _)| selected_ids.contains(id) &&!ignored_ids.contains(id))
.map(|(id, story)| (*id, story))
{
info_story_checking!(story);
let updated_story: Story = requester.get_story_response(id)?.into();
let title_changed = story.title!= updated_story.title;
let author_changed = story.author!= updated_story.author;
let status_changed = story.status!= updated_story.status;
let story_update = story.compare_to(&updated_story)?;
if story_update.is_some() || title_changed || author_changed || status_changed {
// If we are here, something will be printed to stderr. Be it by the specific cases
// just below or by the resulting StoryUpdate comparison.
set_printed!();
if title_changed || author_changed || status_changed {
clear_last_lines!();
if title_changed {
info!(
"{} has changed its title to {}",
format_story!(story),
style(&updated_story.title).green().bold()
);
}
if author_changed {
info!(
"{} has changed its author ({})",
format_story!(story),
format_update!(author, story.author => updated_story.author)
);
}
if status_changed {
info!(
"{} has changed its status ({})",
format_story!(story),
format_update!(status, story.status => updated_story.status),
);
}
// Avoid this message from being repeated twice in verbose output.
if verbose_disabled!() {
info_story_checking!(story);
}
}
updated_stories.insert(id, updated_story);
}
clear_last_lines!();
match story_update {
Some(StoryUpdate::Chapters { before, after }) => {
info_update!(story, chapters, before => after);
}
Some(StoryUpdate::Words { before, after })
if config.sensibility_level >= SensibilityLevel::IncludeWords =>
{
info_update!(story, words, before => after);
}
Some(StoryUpdate::DateTime { before, after })
if config.sensibility_level == SensibilityLevel::Anything =>
{
info_update!(story, timestamp, before => after);
}
Some(StoryUpdate::Words { before, after }) => {
info_update!([ignored] story, words, before => after);
continue;
}
Some(StoryUpdate::DateTime { before, after }) => {
info_update!([ignored] story, timestamp, before => after);
continue;
}
None => continue,
};
ids_to_download.insert(id);
}
// Update stories with ignored updates.
// This way if the downloads fail, these should be saved by the "emergency save".
//
// After this block, `updated_stories` should only contain stories whose IDs are in
// `ids_to_download`.
{
let mut updated_ids = story_data
.keys()
.filter(|id|!ids_to_download.contains(id))
.filter_map(|id| updated_stories.remove_entry(id))
.collect::<Vec<(Id, Story)>>();
debug!("Ignored updates: {:?}", &updated_ids);
for (id, story) in updated_ids.drain(..) {
story_data.insert(id, story);
}
}
if printed {
separate!();
}
if!force && ids_to_download.is_empty() {
info!("There is nothing to download");
} else if force {
progress_or_info!(
"{}",
style(format!(
"Force downloading {}",
if ids.is_empty() && ignored_ids.is_empty() {
"every story on the tracking list"
} else {
"selected stories"
}
))
.bold(),
);
separate!();
}
let use_separator = config.exec.is_some() &&!config.quiet;
let delay = std::time::Duration::from_secs(config.download_delay);
let mut stories_to_download: Vec<StoryDownload> = story_data
.keys()
// Only download the stories that:
// (1) Whose IDs were given by the user if any.
// (2) The user responded to its prompt with Y.
.filter(|id| selected_ids.contains(id) &&!ignored_ids.contains(id))
// Download all stories if the user forced it, otherwise only those who passed the update
// sensibility test.
.filter(|id| force || ids_to_download.contains(id))
.map(|id| match updated_stories.remove(id) {
Some(story) => StoryDownload::Update(*id, story),
None => StoryDownload::Forced(*id),
})
.collect();
debug!("Stories to download: {:?}", &stories_to_download);
for (is_first, story_download) in stories_to_download
.drain(..)
.enumerate()
.map(|(index, story_download)| (index == 0, story_download))
{
download_delay!(!is_first, use_separator, delay);
match &story_download {
StoryDownload::Update(_, story) => requester.download(story)?,
// While this should be safe to unwrap, in the unlikely event that it panics the
// "emergency save" would be skipped. | StoryDownload::Forced(id) => match story_data.get(id) {
Some(story) => requester.download(story)?,
None => warn!("{} is not present in the tracker file.", id),
},
};
// Insert the update once it downloads.
if let StoryDownload::Update(id, story) = story_download {
story_data.insert(id, story);
}
}
Ok(())
} | // So I throw in a `match` to "safely" unwrap it and throw a warning if it is not
// present. | random_line_split |
download.rs | use std::collections::{HashMap, HashSet};
use console::style;
use dialoguer::Confirm;
use fimfic_tracker::{
Config, Id, Result, SensibilityLevel, Story, StoryData, StoryStatus, StoryUpdate, TrackerError,
};
use crate::args::{Download, Prompt};
use crate::readable::ReadableDate;
use crate::Requester;
macro_rules! format_update {
(author, $before:expr => $after:expr) => {
format_update!([green] &$before, &$after)
};
(chapters, $before:expr => $after:expr) => {
format_update!([blue] $before, $after)
};
(words, $before:expr => $after:expr) => {
format_update!([blue] $before, $after)
};
(timestamp, $before:expr => $after:expr) => {
format_update!([yellow] ReadableDate($before), ReadableDate($after))
};
(status, $before:expr => $after:expr) => {
format_update!([yellow] $before, $after)
};
([$color:ident] $before:expr, $after:expr) => {
format_args!(
"{} {} {}",
style($before).$color(),
style("=>").cyan(),
style($after).$color().bold()
)
};
}
macro_rules! info_story_checking {
($story:expr) => {
info!("Checking for {}...", format_story!($story));
};
}
macro_rules! info_update {
([ignored] $story:expr, $on:ident, $before:expr => $after:expr) => {
info_update!($story, $on, $before, $after, ". Ignoring")
};
($story:expr, $on:ident, $before:expr => $after:expr) => {
info_update!($story, $on, $before, $after, "")
};
($story:expr, $on:ident, $before:expr, $after:expr, $extra:expr) => {
info!(
"{} has an update on {} ({}){}",
format_story!($story),
stringify!($on),
format_update!($on, $before => $after),
$extra
);
};
}
#[derive(Debug)]
enum StoryDownload {
Update(Id, Story),
Forced(Id),
}
pub fn download(
config: &Config,
requester: &Requester,
story_data: &mut StoryData,
Download {
force,
prompt,
ref ids,
}: Download,
) -> Result<()> {
let selected_ids: Vec<Id> = if ids.is_empty() {
story_data.keys().cloned().collect()
} else {
story_data
.keys()
.filter(|id| ids.contains(id))
.cloned()
.collect()
};
let mut ignored_ids: HashSet<Id> = HashSet::with_capacity(selected_ids.len());
let mut printed = false;
macro_rules! set_printed {
() => {
if!printed {
printed = true;
}
};
}
for (id, story) in story_data.iter().filter_map(|(id, story)| {
if selected_ids.contains(id) {
Some((*id, story))
} else {
None
}
}) {
if let StoryStatus::Incomplete = story.status |
set_printed!();
let status_notice = format!(
"{} has been marked as {} by the author",
format_story!(story),
format_status!(story)
);
match prompt {
Prompt::AssumeYes => {
info!("{}. Checking for an update on it anyways.", status_notice);
}
Prompt::AssumeNo => {
info!("{}. Skipping checking for an update on it.", status_notice);
ignored_ids.insert(id);
}
Prompt::Ask => {
let confirm = Confirm::new()
.with_prompt(format!(
"{}. Do you want to still check for an update on it?",
status_notice
))
.interact()
.map_err(|err| {
TrackerError::io(err)
.context("failed to launch overwrite confirmation prompt")
})?;
if!confirm {
ignored_ids.insert(id);
}
}
}
}
if printed {
separate!();
printed = false;
}
let mut updated_stories: HashMap<Id, Story> = HashMap::with_capacity(selected_ids.len());
let mut ids_to_download: HashSet<Id> = HashSet::with_capacity(selected_ids.len());
for (id, story) in story_data
.iter()
.filter(|(id, _)| selected_ids.contains(id) &&!ignored_ids.contains(id))
.map(|(id, story)| (*id, story))
{
info_story_checking!(story);
let updated_story: Story = requester.get_story_response(id)?.into();
let title_changed = story.title!= updated_story.title;
let author_changed = story.author!= updated_story.author;
let status_changed = story.status!= updated_story.status;
let story_update = story.compare_to(&updated_story)?;
if story_update.is_some() || title_changed || author_changed || status_changed {
// If we are here, something will be printed to stderr. Be it by the specific cases
// just below or by the resulting StoryUpdate comparison.
set_printed!();
if title_changed || author_changed || status_changed {
clear_last_lines!();
if title_changed {
info!(
"{} has changed its title to {}",
format_story!(story),
style(&updated_story.title).green().bold()
);
}
if author_changed {
info!(
"{} has changed its author ({})",
format_story!(story),
format_update!(author, story.author => updated_story.author)
);
}
if status_changed {
info!(
"{} has changed its status ({})",
format_story!(story),
format_update!(status, story.status => updated_story.status),
);
}
// Avoid this message from being repeated twice in verbose output.
if verbose_disabled!() {
info_story_checking!(story);
}
}
updated_stories.insert(id, updated_story);
}
clear_last_lines!();
match story_update {
Some(StoryUpdate::Chapters { before, after }) => {
info_update!(story, chapters, before => after);
}
Some(StoryUpdate::Words { before, after })
if config.sensibility_level >= SensibilityLevel::IncludeWords =>
{
info_update!(story, words, before => after);
}
Some(StoryUpdate::DateTime { before, after })
if config.sensibility_level == SensibilityLevel::Anything =>
{
info_update!(story, timestamp, before => after);
}
Some(StoryUpdate::Words { before, after }) => {
info_update!([ignored] story, words, before => after);
continue;
}
Some(StoryUpdate::DateTime { before, after }) => {
info_update!([ignored] story, timestamp, before => after);
continue;
}
None => continue,
};
ids_to_download.insert(id);
}
// Update stories with ignored updates.
// This way if the downloads fail, these should be saved by the "emergency save".
//
// After this block, `updated_stories` should only contain stories whose IDs are in
// `ids_to_download`.
{
let mut updated_ids = story_data
.keys()
.filter(|id|!ids_to_download.contains(id))
.filter_map(|id| updated_stories.remove_entry(id))
.collect::<Vec<(Id, Story)>>();
debug!("Ignored updates: {:?}", &updated_ids);
for (id, story) in updated_ids.drain(..) {
story_data.insert(id, story);
}
}
if printed {
separate!();
}
if!force && ids_to_download.is_empty() {
info!("There is nothing to download");
} else if force {
progress_or_info!(
"{}",
style(format!(
"Force downloading {}",
if ids.is_empty() && ignored_ids.is_empty() {
"every story on the tracking list"
} else {
"selected stories"
}
))
.bold(),
);
separate!();
}
let use_separator = config.exec.is_some() &&!config.quiet;
let delay = std::time::Duration::from_secs(config.download_delay);
let mut stories_to_download: Vec<StoryDownload> = story_data
.keys()
// Only download the stories that:
// (1) Whose IDs were given by the user if any.
// (2) The user responded to its prompt with Y.
.filter(|id| selected_ids.contains(id) &&!ignored_ids.contains(id))
// Download all stories if the user forced it, otherwise only those who passed the update
// sensibility test.
.filter(|id| force || ids_to_download.contains(id))
.map(|id| match updated_stories.remove(id) {
Some(story) => StoryDownload::Update(*id, story),
None => StoryDownload::Forced(*id),
})
.collect();
debug!("Stories to download: {:?}", &stories_to_download);
for (is_first, story_download) in stories_to_download
.drain(..)
.enumerate()
.map(|(index, story_download)| (index == 0, story_download))
{
download_delay!(!is_first, use_separator, delay);
match &story_download {
StoryDownload::Update(_, story) => requester.download(story)?,
// While this should be safe to unwrap, in the unlikely event that it panics the
// "emergency save" would be skipped.
// So I throw in a `match` to "safely" unwrap it and throw a warning if it is not
// present.
StoryDownload::Forced(id) => match story_data.get(id) {
Some(story) => requester.download(story)?,
None => warn!("{} is not present in the tracker file.", id),
},
};
// Insert the update once it downloads.
if let StoryDownload::Update(id, story) = story_download {
story_data.insert(id, story);
}
}
Ok(())
}
| {
continue;
} | conditional_block |
download.rs | use std::collections::{HashMap, HashSet};
use console::style;
use dialoguer::Confirm;
use fimfic_tracker::{
Config, Id, Result, SensibilityLevel, Story, StoryData, StoryStatus, StoryUpdate, TrackerError,
};
use crate::args::{Download, Prompt};
use crate::readable::ReadableDate;
use crate::Requester;
macro_rules! format_update {
(author, $before:expr => $after:expr) => {
format_update!([green] &$before, &$after)
};
(chapters, $before:expr => $after:expr) => {
format_update!([blue] $before, $after)
};
(words, $before:expr => $after:expr) => {
format_update!([blue] $before, $after)
};
(timestamp, $before:expr => $after:expr) => {
format_update!([yellow] ReadableDate($before), ReadableDate($after))
};
(status, $before:expr => $after:expr) => {
format_update!([yellow] $before, $after)
};
([$color:ident] $before:expr, $after:expr) => {
format_args!(
"{} {} {}",
style($before).$color(),
style("=>").cyan(),
style($after).$color().bold()
)
};
}
macro_rules! info_story_checking {
($story:expr) => {
info!("Checking for {}...", format_story!($story));
};
}
macro_rules! info_update {
([ignored] $story:expr, $on:ident, $before:expr => $after:expr) => {
info_update!($story, $on, $before, $after, ". Ignoring")
};
($story:expr, $on:ident, $before:expr => $after:expr) => {
info_update!($story, $on, $before, $after, "")
};
($story:expr, $on:ident, $before:expr, $after:expr, $extra:expr) => {
info!(
"{} has an update on {} ({}){}",
format_story!($story),
stringify!($on),
format_update!($on, $before => $after),
$extra
);
};
}
#[derive(Debug)]
enum StoryDownload {
Update(Id, Story),
Forced(Id),
}
pub fn download(
config: &Config,
requester: &Requester,
story_data: &mut StoryData,
Download {
force,
prompt,
ref ids,
}: Download,
) -> Result<()> | }
for (id, story) in story_data.iter().filter_map(|(id, story)| {
if selected_ids.contains(id) {
Some((*id, story))
} else {
None
}
}) {
if let StoryStatus::Incomplete = story.status {
continue;
}
set_printed!();
let status_notice = format!(
"{} has been marked as {} by the author",
format_story!(story),
format_status!(story)
);
match prompt {
Prompt::AssumeYes => {
info!("{}. Checking for an update on it anyways.", status_notice);
}
Prompt::AssumeNo => {
info!("{}. Skipping checking for an update on it.", status_notice);
ignored_ids.insert(id);
}
Prompt::Ask => {
let confirm = Confirm::new()
.with_prompt(format!(
"{}. Do you want to still check for an update on it?",
status_notice
))
.interact()
.map_err(|err| {
TrackerError::io(err)
.context("failed to launch overwrite confirmation prompt")
})?;
if!confirm {
ignored_ids.insert(id);
}
}
}
}
if printed {
separate!();
printed = false;
}
let mut updated_stories: HashMap<Id, Story> = HashMap::with_capacity(selected_ids.len());
let mut ids_to_download: HashSet<Id> = HashSet::with_capacity(selected_ids.len());
for (id, story) in story_data
.iter()
.filter(|(id, _)| selected_ids.contains(id) &&!ignored_ids.contains(id))
.map(|(id, story)| (*id, story))
{
info_story_checking!(story);
let updated_story: Story = requester.get_story_response(id)?.into();
let title_changed = story.title!= updated_story.title;
let author_changed = story.author!= updated_story.author;
let status_changed = story.status!= updated_story.status;
let story_update = story.compare_to(&updated_story)?;
if story_update.is_some() || title_changed || author_changed || status_changed {
// If we are here, something will be printed to stderr. Be it by the specific cases
// just below or by the resulting StoryUpdate comparison.
set_printed!();
if title_changed || author_changed || status_changed {
clear_last_lines!();
if title_changed {
info!(
"{} has changed its title to {}",
format_story!(story),
style(&updated_story.title).green().bold()
);
}
if author_changed {
info!(
"{} has changed its author ({})",
format_story!(story),
format_update!(author, story.author => updated_story.author)
);
}
if status_changed {
info!(
"{} has changed its status ({})",
format_story!(story),
format_update!(status, story.status => updated_story.status),
);
}
// Avoid this message from being repeated twice in verbose output.
if verbose_disabled!() {
info_story_checking!(story);
}
}
updated_stories.insert(id, updated_story);
}
clear_last_lines!();
match story_update {
Some(StoryUpdate::Chapters { before, after }) => {
info_update!(story, chapters, before => after);
}
Some(StoryUpdate::Words { before, after })
if config.sensibility_level >= SensibilityLevel::IncludeWords =>
{
info_update!(story, words, before => after);
}
Some(StoryUpdate::DateTime { before, after })
if config.sensibility_level == SensibilityLevel::Anything =>
{
info_update!(story, timestamp, before => after);
}
Some(StoryUpdate::Words { before, after }) => {
info_update!([ignored] story, words, before => after);
continue;
}
Some(StoryUpdate::DateTime { before, after }) => {
info_update!([ignored] story, timestamp, before => after);
continue;
}
None => continue,
};
ids_to_download.insert(id);
}
// Update stories with ignored updates.
// This way if the downloads fail, these should be saved by the "emergency save".
//
// After this block, `updated_stories` should only contain stories whose IDs are in
// `ids_to_download`.
{
let mut updated_ids = story_data
.keys()
.filter(|id|!ids_to_download.contains(id))
.filter_map(|id| updated_stories.remove_entry(id))
.collect::<Vec<(Id, Story)>>();
debug!("Ignored updates: {:?}", &updated_ids);
for (id, story) in updated_ids.drain(..) {
story_data.insert(id, story);
}
}
if printed {
separate!();
}
if!force && ids_to_download.is_empty() {
info!("There is nothing to download");
} else if force {
progress_or_info!(
"{}",
style(format!(
"Force downloading {}",
if ids.is_empty() && ignored_ids.is_empty() {
"every story on the tracking list"
} else {
"selected stories"
}
))
.bold(),
);
separate!();
}
let use_separator = config.exec.is_some() &&!config.quiet;
let delay = std::time::Duration::from_secs(config.download_delay);
let mut stories_to_download: Vec<StoryDownload> = story_data
.keys()
// Only download the stories that:
// (1) Whose IDs were given by the user if any.
// (2) The user responded to its prompt with Y.
.filter(|id| selected_ids.contains(id) &&!ignored_ids.contains(id))
// Download all stories if the user forced it, otherwise only those who passed the update
// sensibility test.
.filter(|id| force || ids_to_download.contains(id))
.map(|id| match updated_stories.remove(id) {
Some(story) => StoryDownload::Update(*id, story),
None => StoryDownload::Forced(*id),
})
.collect();
debug!("Stories to download: {:?}", &stories_to_download);
for (is_first, story_download) in stories_to_download
.drain(..)
.enumerate()
.map(|(index, story_download)| (index == 0, story_download))
{
download_delay!(!is_first, use_separator, delay);
match &story_download {
StoryDownload::Update(_, story) => requester.download(story)?,
// While this should be safe to unwrap, in the unlikely event that it panics the
// "emergency save" would be skipped.
// So I throw in a `match` to "safely" unwrap it and throw a warning if it is not
// present.
StoryDownload::Forced(id) => match story_data.get(id) {
Some(story) => requester.download(story)?,
None => warn!("{} is not present in the tracker file.", id),
},
};
// Insert the update once it downloads.
if let StoryDownload::Update(id, story) = story_download {
story_data.insert(id, story);
}
}
Ok(())
}
| {
let selected_ids: Vec<Id> = if ids.is_empty() {
story_data.keys().cloned().collect()
} else {
story_data
.keys()
.filter(|id| ids.contains(id))
.cloned()
.collect()
};
let mut ignored_ids: HashSet<Id> = HashSet::with_capacity(selected_ids.len());
let mut printed = false;
macro_rules! set_printed {
() => {
if !printed {
printed = true;
}
}; | identifier_body |
download.rs | use std::collections::{HashMap, HashSet};
use console::style;
use dialoguer::Confirm;
use fimfic_tracker::{
Config, Id, Result, SensibilityLevel, Story, StoryData, StoryStatus, StoryUpdate, TrackerError,
};
use crate::args::{Download, Prompt};
use crate::readable::ReadableDate;
use crate::Requester;
macro_rules! format_update {
(author, $before:expr => $after:expr) => {
format_update!([green] &$before, &$after)
};
(chapters, $before:expr => $after:expr) => {
format_update!([blue] $before, $after)
};
(words, $before:expr => $after:expr) => {
format_update!([blue] $before, $after)
};
(timestamp, $before:expr => $after:expr) => {
format_update!([yellow] ReadableDate($before), ReadableDate($after))
};
(status, $before:expr => $after:expr) => {
format_update!([yellow] $before, $after)
};
([$color:ident] $before:expr, $after:expr) => {
format_args!(
"{} {} {}",
style($before).$color(),
style("=>").cyan(),
style($after).$color().bold()
)
};
}
macro_rules! info_story_checking {
($story:expr) => {
info!("Checking for {}...", format_story!($story));
};
}
macro_rules! info_update {
([ignored] $story:expr, $on:ident, $before:expr => $after:expr) => {
info_update!($story, $on, $before, $after, ". Ignoring")
};
($story:expr, $on:ident, $before:expr => $after:expr) => {
info_update!($story, $on, $before, $after, "")
};
($story:expr, $on:ident, $before:expr, $after:expr, $extra:expr) => {
info!(
"{} has an update on {} ({}){}",
format_story!($story),
stringify!($on),
format_update!($on, $before => $after),
$extra
);
};
}
#[derive(Debug)]
enum | {
Update(Id, Story),
Forced(Id),
}
pub fn download(
config: &Config,
requester: &Requester,
story_data: &mut StoryData,
Download {
force,
prompt,
ref ids,
}: Download,
) -> Result<()> {
let selected_ids: Vec<Id> = if ids.is_empty() {
story_data.keys().cloned().collect()
} else {
story_data
.keys()
.filter(|id| ids.contains(id))
.cloned()
.collect()
};
let mut ignored_ids: HashSet<Id> = HashSet::with_capacity(selected_ids.len());
let mut printed = false;
macro_rules! set_printed {
() => {
if!printed {
printed = true;
}
};
}
for (id, story) in story_data.iter().filter_map(|(id, story)| {
if selected_ids.contains(id) {
Some((*id, story))
} else {
None
}
}) {
if let StoryStatus::Incomplete = story.status {
continue;
}
set_printed!();
let status_notice = format!(
"{} has been marked as {} by the author",
format_story!(story),
format_status!(story)
);
match prompt {
Prompt::AssumeYes => {
info!("{}. Checking for an update on it anyways.", status_notice);
}
Prompt::AssumeNo => {
info!("{}. Skipping checking for an update on it.", status_notice);
ignored_ids.insert(id);
}
Prompt::Ask => {
let confirm = Confirm::new()
.with_prompt(format!(
"{}. Do you want to still check for an update on it?",
status_notice
))
.interact()
.map_err(|err| {
TrackerError::io(err)
.context("failed to launch overwrite confirmation prompt")
})?;
if!confirm {
ignored_ids.insert(id);
}
}
}
}
if printed {
separate!();
printed = false;
}
let mut updated_stories: HashMap<Id, Story> = HashMap::with_capacity(selected_ids.len());
let mut ids_to_download: HashSet<Id> = HashSet::with_capacity(selected_ids.len());
for (id, story) in story_data
.iter()
.filter(|(id, _)| selected_ids.contains(id) &&!ignored_ids.contains(id))
.map(|(id, story)| (*id, story))
{
info_story_checking!(story);
let updated_story: Story = requester.get_story_response(id)?.into();
let title_changed = story.title!= updated_story.title;
let author_changed = story.author!= updated_story.author;
let status_changed = story.status!= updated_story.status;
let story_update = story.compare_to(&updated_story)?;
if story_update.is_some() || title_changed || author_changed || status_changed {
// If we are here, something will be printed to stderr. Be it by the specific cases
// just below or by the resulting StoryUpdate comparison.
set_printed!();
if title_changed || author_changed || status_changed {
clear_last_lines!();
if title_changed {
info!(
"{} has changed its title to {}",
format_story!(story),
style(&updated_story.title).green().bold()
);
}
if author_changed {
info!(
"{} has changed its author ({})",
format_story!(story),
format_update!(author, story.author => updated_story.author)
);
}
if status_changed {
info!(
"{} has changed its status ({})",
format_story!(story),
format_update!(status, story.status => updated_story.status),
);
}
// Avoid this message from being repeated twice in verbose output.
if verbose_disabled!() {
info_story_checking!(story);
}
}
updated_stories.insert(id, updated_story);
}
clear_last_lines!();
match story_update {
Some(StoryUpdate::Chapters { before, after }) => {
info_update!(story, chapters, before => after);
}
Some(StoryUpdate::Words { before, after })
if config.sensibility_level >= SensibilityLevel::IncludeWords =>
{
info_update!(story, words, before => after);
}
Some(StoryUpdate::DateTime { before, after })
if config.sensibility_level == SensibilityLevel::Anything =>
{
info_update!(story, timestamp, before => after);
}
Some(StoryUpdate::Words { before, after }) => {
info_update!([ignored] story, words, before => after);
continue;
}
Some(StoryUpdate::DateTime { before, after }) => {
info_update!([ignored] story, timestamp, before => after);
continue;
}
None => continue,
};
ids_to_download.insert(id);
}
// Update stories with ignored updates.
// This way if the downloads fail, these should be saved by the "emergency save".
//
// After this block, `updated_stories` should only contain stories whose IDs are in
// `ids_to_download`.
{
let mut updated_ids = story_data
.keys()
.filter(|id|!ids_to_download.contains(id))
.filter_map(|id| updated_stories.remove_entry(id))
.collect::<Vec<(Id, Story)>>();
debug!("Ignored updates: {:?}", &updated_ids);
for (id, story) in updated_ids.drain(..) {
story_data.insert(id, story);
}
}
if printed {
separate!();
}
if!force && ids_to_download.is_empty() {
info!("There is nothing to download");
} else if force {
progress_or_info!(
"{}",
style(format!(
"Force downloading {}",
if ids.is_empty() && ignored_ids.is_empty() {
"every story on the tracking list"
} else {
"selected stories"
}
))
.bold(),
);
separate!();
}
let use_separator = config.exec.is_some() &&!config.quiet;
let delay = std::time::Duration::from_secs(config.download_delay);
let mut stories_to_download: Vec<StoryDownload> = story_data
.keys()
// Only download the stories that:
// (1) Whose IDs were given by the user if any.
// (2) The user responded to its prompt with Y.
.filter(|id| selected_ids.contains(id) &&!ignored_ids.contains(id))
// Download all stories if the user forced it, otherwise only those who passed the update
// sensibility test.
.filter(|id| force || ids_to_download.contains(id))
.map(|id| match updated_stories.remove(id) {
Some(story) => StoryDownload::Update(*id, story),
None => StoryDownload::Forced(*id),
})
.collect();
debug!("Stories to download: {:?}", &stories_to_download);
for (is_first, story_download) in stories_to_download
.drain(..)
.enumerate()
.map(|(index, story_download)| (index == 0, story_download))
{
download_delay!(!is_first, use_separator, delay);
match &story_download {
StoryDownload::Update(_, story) => requester.download(story)?,
// While this should be safe to unwrap, in the unlikely event that it panics the
// "emergency save" would be skipped.
// So I throw in a `match` to "safely" unwrap it and throw a warning if it is not
// present.
StoryDownload::Forced(id) => match story_data.get(id) {
Some(story) => requester.download(story)?,
None => warn!("{} is not present in the tracker file.", id),
},
};
// Insert the update once it downloads.
if let StoryDownload::Update(id, story) = story_download {
story_data.insert(id, story);
}
}
Ok(())
}
| StoryDownload | identifier_name |
asnd.rs | //! The ``asnd`` module of ``ogc-rs``.
//!
//! This module implements a safe wrapper around the audio functions found in ``asndlib.h``.
use crate::{ffi, OgcError, Result};
use alloc::format;
use core::time::Duration;
macro_rules! if_not {
($valid:ident => $error_output:expr, $var:ident $(,)*) => {
if $var == ffi::$valid as _ {
Ok(())
} else {
Err(OgcError::Audio(format!($error_output, $var)))
}
};
}
/// Voice Options Callback Type
pub type VoiceOptionsCallback = Option<unsafe extern "C" fn(i32)>;
/// Options to be passed when creating a new voice.
///
/// # Examples
///
/// Create `VoiceOptions` with voice slot 2 and format Mono16Bit:
///
/// ```rust
/// let options = VoiceOptions::new().voice(2).format(VoiceFormat::Mono16Bit);
/// ```
pub struct VoiceOptions {
voice: u32,
format: VoiceFormat,
pitch: u32,
delay: u32,
volume_left: u8,
volume_right: u8,
callback: VoiceOptionsCallback,
}
impl Default for VoiceOptions {
fn default() -> Self {
VoiceOptions::new()
}
}
impl VoiceOptions {
/// Create this struct with sensible default values.
pub fn new() -> Self {
Self {
voice: 0,
format: VoiceFormat::Stereo16Bit,
pitch: 48000,
delay: 0,
volume_left: 255,
volume_right: 255,
callback: None,
}
}
/// Voice slot to use for this sound. Valid values are `0..16` non-inclusive.
#[must_use]
pub fn voice(mut self, voice: u32) -> Self {
assert!(voice < 16, "Voice index {} is >= 16", voice);
self.voice = voice;
self
}
/// Format to use for this sound.
#[must_use]
pub fn format(mut self, format: VoiceFormat) -> Self {
self.format = format;
self
}
/// Frequency to use, in Hz.
#[must_use]
pub fn pitch(mut self, pitch: u32) -> Self {
self.pitch = pitch;
self
}
/// Delay to wait before playing, in milliseconds.
#[must_use]
pub fn delay(mut self, delay: u32) -> Self {
self.delay = delay;
self
}
/// Voice volume of the left channel.
#[must_use]
pub fn volume_left(mut self, volume_left: u8) -> Self {
self.volume_left = volume_left;
self
}
/// Voice volume of the right channel.
#[must_use]
pub fn volume_right(mut self, volume_right: u8) -> Self {
self.volume_right = volume_right;
self
}
/// Optional callback function to use.
#[must_use]
pub fn callback(mut self, callback: Option<unsafe extern "C" fn(i32)>) -> Self {
self.callback = callback;
self
}
}
/// Source voice format.
pub enum VoiceFormat {
Mono8Bit,
Mono16Bit,
Mono16BitBe,
Stereo8Bit,
Stereo16Bit,
Stereo16BitBe,
Mono8BitU,
Mono16BitLE,
Stereo8BitU,
Stereo16BitLe,
}
impl VoiceFormat {
fn as_i32(&self) -> i32 {
match self {
VoiceFormat::Mono8Bit => 0,
VoiceFormat::Mono16Bit => 1,
VoiceFormat::Mono16BitBe => 1,
VoiceFormat::Stereo8Bit => 2,
VoiceFormat::Stereo16Bit => 3,
VoiceFormat::Stereo16BitBe => 3,
VoiceFormat::Mono8BitU => 4,
VoiceFormat::Mono16BitLE => 5,
VoiceFormat::Stereo8BitU => 6,
VoiceFormat::Stereo16BitLe => 7,
}
}
}
/// Represents the asnd service.
/// This service can only be created once!
/// If you use `Asnd::init()`, you cannot do `Audio::init()`.
/// Only one of them can be used at a time.
pub struct Asnd;
/// Implementation of the asnd service.
impl Asnd {
/// Initializes the asnd lib and fixes the hardware sample rate to 48000hz.
pub fn init() -> Self {
unsafe {
ffi::ASND_Init();
}
Self
}
/// De-initializes the asnd lib. This is also called when `Asnd` gets dropped.
pub fn end() {
unsafe {
ffi::ASND_End();
}
}
/// Pauses if true and resumes if false.
pub fn pause(should_pause: bool) {
unsafe {
ffi::ASND_Pause(should_pause as i32);
}
}
/// Returns true if paused, false if not paused.
pub fn is_paused() -> bool {
unsafe { ffi::ASND_Is_Paused() > 0 }
}
/// Returns the global time in milliseconds. Time is updated from the IRQ.
pub fn get_time() -> u32 {
unsafe { ffi::ASND_GetTime() }
}
/// Returns the global sample counter. Can be used to implement timers with high precision.
pub fn | () -> u32 {
unsafe { ffi::ASND_GetSampleCounter() }
}
/// Returns the samples sent from the IRQ in one tick.
pub fn get_samples_per_tick() -> u32 {
unsafe { ffi::ASND_GetSamplesPerTick() }
}
/// Sets the global time, in milliseconds.
pub fn set_time(time: u32) {
unsafe {
ffi::ASND_SetTime(time);
}
}
/// Sets a global callback for general purposes. It is called by the IRQ.
pub fn set_callback<F>(callback: Option<unsafe extern "C" fn()>) {
unsafe {
ffi::ASND_SetCallback(callback);
}
}
/// Returs the current audio rate. Default is 48000hz.
pub fn get_audio_rate() -> i32 {
unsafe { ffi::ASND_GetAudioRate() }
}
/// Sets a PCM voice to play. This function stops one previous voice. Use
/// `Asnd::status_voice()` to test status. The voices are played in 16-bit stereo,
/// regardless of source format. The buffer MUST be aligned and padded to 32 bytes.
pub fn set_voice(options: VoiceOptions, sound_buffer: &mut [u8]) -> Result<()> {
Self::validate_buffer(sound_buffer);
let err = unsafe {
ffi::ASND_SetVoice(
options.voice as i32,
options.format.as_i32(),
options.pitch as i32,
options.delay as i32,
sound_buffer.as_mut_ptr() as *mut _,
sound_buffer.len() as i32,
options.volume_left as i32,
options.volume_right as i32,
options.callback,
)
};
if_not!(SND_OK => "Asnd::set_voice() failed with error {}!", err)
}
/// Sets a PCM voice to play infinitely. See `Asnd::set_voice()` as it is largely identical.
/// The buffer MUST be aligned and padded to 32 bytes.
pub fn set_infinite_voice(options: VoiceOptions, sound_buffer: &mut [u8]) -> Result<()> {
Self::validate_buffer(sound_buffer);
let err = unsafe {
ffi::ASND_SetInfiniteVoice(
options.voice as i32,
options.format.as_i32(),
options.pitch as i32,
options.delay as i32,
sound_buffer.as_mut_ptr() as *mut _,
sound_buffer.len() as i32,
options.volume_left as i32,
options.volume_right as i32,
)
};
if_not!(SND_OK => "Asnd::set_infinite_voice() failed with error {}", err)
}
/// Adds a PCM voice to play from the second buffer. Sound buffer must be 32-byte
/// aligned and have same sample format as first buffer. This must only be called after
/// `Asnd::set_voice()`, which must return `Ok()`.
/// The buffer MUST be aligned and padded to 32 bytes.
fn add_voice(voice: u32, sound_buffer: &mut [u8]) -> Result<()> {
assert!(voice < 16, "Voice index {} is >= 16", voice);
Self::validate_buffer(sound_buffer);
let err = unsafe {
ffi::ASND_AddVoice(
voice as i32,
sound_buffer.as_mut_ptr() as *mut _,
sound_buffer.len() as i32,
)
};
if_not!(SND_OK => "Asnd::add_voice() failed with error {}", err)
}
/// Stops the selected voice. If the voice is used in song mode, you need to
/// assign the samples with `Asnd::set_song_sample_voice()`.
pub fn stop_voice(voice: u32) -> Result<()> {
assert!(voice < 16, "Voice index {} is >= 16", voice);
let err = unsafe { ffi::ASND_StopVoice(voice as i32) };
if_not!(SND_OK => "Asnd::stop_voice() failed with error {}", err)
}
/// Pauses the selected voice. Can also be used to resume voice.
pub fn pause_voice(voice: u32, pause: bool) -> Result<()> {
assert!(voice < 16, "Voice index {} is >= 16", voice);
let err = unsafe { ffi::ASND_PauseVoice(voice as i32, pause as i32) };
if_not!(SND_OK => "Asnd::pause_voice() failed with error {}", err)
}
/// Returns the state of the selected voice.
pub fn status_voice(voice: u32) -> Result<()> {
assert!(voice < 16, "Voice index {} is >= 16", voice);
let err = unsafe { ffi::ASND_StatusVoice(voice as i32) };
if_not!(SND_WORKING => "Asnd::status_voice() failed with error {}", err)
}
/// Returns the first unused voice. Fails if no voices are available.
pub fn get_first_unused_voice() -> Result<u32> {
let err = unsafe { ffi::ASND_GetFirstUnusedVoice() };
match err {
x if x < 16 => Ok(x as u32),
_ => Err(OgcError::Audio(format!(
"Asnd::get_first_unused_voice() failed with error {}",
err
))),
}
}
/// Changes the voice-pitch in real time. This function can be used to
/// create audio effects such as Doppler effect simulation.
pub fn change_pitch_voice(voice: u32, pitch: u32) -> Result<()> {
assert!(voice < 16, "Voice index {} is >= 16", voice);
let err = unsafe { ffi::ASND_ChangePitchVoice(voice as i32, pitch as i32) };
if_not!(SND_OK => "Asnd::change_pitch_voice() failed with error {}", err)
}
/// Changes the voice volume in real time. This function can be used to create
/// audio effects like distance attenuation.
pub fn change_volume_voice(voice: u32, volume_left: u8, volume_right: u8) -> Result<()> {
assert!(voice < 16, "Voice index {} is >= 16", voice);
let err = unsafe {
ffi::ASND_ChangeVolumeVoice(voice as i32, volume_left as i32, volume_right as i32)
};
if_not!(SND_OK => "Asnd::change_volume_voice() failed with error {}", err)
}
/// Returns the voice tick counter. This value represents the number of ticks
/// since this voice started to play, sans delay time. If the lib is initialized with
/// `INIT_RATE=48000`, a return value of 24000 is equal to 0.5 seconds.
pub fn get_tick_counter_voice(voice: u32) -> u32 {
assert!(voice < 16, "Voice index {} is >= 16", voice);
unsafe { ffi::ASND_GetTickCounterVoice(voice as i32) }
}
/// Returns the voice playback time. This value represents the time in milliseconds
/// since this voice started playing.
pub fn get_timer_voice(voice: u32) -> u32 {
assert!(voice < 16, "Voice index {} is >= 16", voice);
unsafe { ffi::ASND_GetTimerVoice(voice as i32) }
}
/// Tests if a pointer is in use by a voice as a buffer.
/// This must be the same pointer sent to `Asnd::add_voice()` or `Asnd::set_voice()`.
/// Returns 0 if the pointer is unused.
/// Returns 1 if the pointer is used as a buffer.
/// Returns `ogc_sys::SND_INVALID` if invalid.
pub fn test_pointer<T>(voice: u32, pointer: *mut T) -> i32 {
assert!(voice < 16, "Voice index {} is >= 16", voice);
unsafe { ffi::ASND_TestPointer(voice as i32, pointer as *mut _) }
}
/// Tests to determine if the voice is ready to receive a new buffer sample
/// with `Asnd::add_voice()`. Returns true if voice is ready.
pub fn test_voice_buffer_ready(voice: u32) -> bool {
assert!(voice < 16, "Voice index {} is >= 16", voice);
unsafe { ffi::ASND_TestVoiceBufferReady(voice as i32) > 0 }
}
/// Returns the DSP usage, in percent `(0..=100)`.
pub fn get_dsp_percent_use() -> u32 {
unsafe { ffi::ASND_GetDSP_PercentUse() }
}
/// Returns DSP process time, in nano seconds.
pub fn get_dsp_process_time() -> Duration {
unsafe { Duration::from_nanos(ffi::ASND_GetDSP_ProcessTime().into()) }
}
fn validate_buffer(sound_buffer: &mut [u8]) {
assert_eq!(
0,
sound_buffer.as_ptr().align_offset(32),
"Data is not aligned correctly."
);
assert_eq!(
0,
sound_buffer.len() % 32,
"Data length is not a multiple of 32."
);
}
}
impl Drop for Asnd {
fn drop(&mut self) {
Self::end();
}
}
| get_sample_counter | identifier_name |
asnd.rs | //! The ``asnd`` module of ``ogc-rs``.
//!
//! This module implements a safe wrapper around the audio functions found in ``asndlib.h``.
use crate::{ffi, OgcError, Result};
use alloc::format;
use core::time::Duration;
macro_rules! if_not {
($valid:ident => $error_output:expr, $var:ident $(,)*) => {
if $var == ffi::$valid as _ {
Ok(())
} else {
Err(OgcError::Audio(format!($error_output, $var)))
}
};
}
/// Voice Options Callback Type
pub type VoiceOptionsCallback = Option<unsafe extern "C" fn(i32)>;
/// Options to be passed when creating a new voice.
///
/// # Examples
///
/// Create `VoiceOptions` with voice slot 2 and format Mono16Bit:
///
/// ```rust
/// let options = VoiceOptions::new().voice(2).format(VoiceFormat::Mono16Bit);
/// ```
pub struct VoiceOptions {
voice: u32,
format: VoiceFormat,
pitch: u32,
delay: u32,
volume_left: u8,
volume_right: u8,
callback: VoiceOptionsCallback,
}
impl Default for VoiceOptions {
fn default() -> Self {
VoiceOptions::new()
}
}
impl VoiceOptions {
/// Create this struct with sensible default values.
pub fn new() -> Self {
Self {
voice: 0,
format: VoiceFormat::Stereo16Bit,
pitch: 48000,
delay: 0,
volume_left: 255,
volume_right: 255,
callback: None,
}
}
/// Voice slot to use for this sound. Valid values are `0..16` non-inclusive.
#[must_use]
pub fn voice(mut self, voice: u32) -> Self {
assert!(voice < 16, "Voice index {} is >= 16", voice);
self.voice = voice;
self
}
/// Format to use for this sound.
#[must_use]
pub fn format(mut self, format: VoiceFormat) -> Self {
self.format = format;
self
}
/// Frequency to use, in Hz.
#[must_use]
pub fn pitch(mut self, pitch: u32) -> Self {
self.pitch = pitch;
self
}
/// Delay to wait before playing, in milliseconds.
#[must_use]
pub fn delay(mut self, delay: u32) -> Self {
self.delay = delay;
self
}
/// Voice volume of the left channel.
#[must_use]
pub fn volume_left(mut self, volume_left: u8) -> Self {
self.volume_left = volume_left;
self
}
/// Voice volume of the right channel.
#[must_use]
pub fn volume_right(mut self, volume_right: u8) -> Self {
self.volume_right = volume_right;
self
}
/// Optional callback function to use.
#[must_use]
pub fn callback(mut self, callback: Option<unsafe extern "C" fn(i32)>) -> Self {
self.callback = callback;
self
}
}
/// Source voice format.
pub enum VoiceFormat {
Mono8Bit,
Mono16Bit,
Mono16BitBe,
Stereo8Bit,
Stereo16Bit,
Stereo16BitBe,
Mono8BitU,
Mono16BitLE,
Stereo8BitU,
Stereo16BitLe,
}
impl VoiceFormat {
fn as_i32(&self) -> i32 {
match self {
VoiceFormat::Mono8Bit => 0,
VoiceFormat::Mono16Bit => 1,
VoiceFormat::Mono16BitBe => 1,
VoiceFormat::Stereo8Bit => 2,
VoiceFormat::Stereo16Bit => 3,
VoiceFormat::Stereo16BitBe => 3,
VoiceFormat::Mono8BitU => 4,
VoiceFormat::Mono16BitLE => 5,
VoiceFormat::Stereo8BitU => 6,
VoiceFormat::Stereo16BitLe => 7,
}
}
}
/// Represents the asnd service.
/// This service can only be created once!
/// If you use `Asnd::init()`, you cannot do `Audio::init()`.
/// Only one of them can be used at a time.
pub struct Asnd;
/// Implementation of the asnd service.
impl Asnd {
/// Initializes the asnd lib and fixes the hardware sample rate to 48000hz.
pub fn init() -> Self {
unsafe {
ffi::ASND_Init();
}
Self
}
/// De-initializes the asnd lib. This is also called when `Asnd` gets dropped.
pub fn end() {
unsafe {
ffi::ASND_End();
}
}
/// Pauses if true and resumes if false.
pub fn pause(should_pause: bool) {
unsafe {
ffi::ASND_Pause(should_pause as i32);
}
}
/// Returns true if paused, false if not paused.
pub fn is_paused() -> bool {
unsafe { ffi::ASND_Is_Paused() > 0 }
}
/// Returns the global time in milliseconds. Time is updated from the IRQ.
pub fn get_time() -> u32 {
unsafe { ffi::ASND_GetTime() }
}
/// Returns the global sample counter. Can be used to implement timers with high precision.
pub fn get_sample_counter() -> u32 {
unsafe { ffi::ASND_GetSampleCounter() }
}
/// Returns the samples sent from the IRQ in one tick.
pub fn get_samples_per_tick() -> u32 {
unsafe { ffi::ASND_GetSamplesPerTick() }
}
/// Sets the global time, in milliseconds.
pub fn set_time(time: u32) {
unsafe {
ffi::ASND_SetTime(time);
}
}
/// Sets a global callback for general purposes. It is called by the IRQ.
pub fn set_callback<F>(callback: Option<unsafe extern "C" fn()>) {
unsafe {
ffi::ASND_SetCallback(callback);
}
}
/// Returs the current audio rate. Default is 48000hz.
pub fn get_audio_rate() -> i32 {
unsafe { ffi::ASND_GetAudioRate() }
}
/// Sets a PCM voice to play. This function stops one previous voice. Use
/// `Asnd::status_voice()` to test status. The voices are played in 16-bit stereo,
/// regardless of source format. The buffer MUST be aligned and padded to 32 bytes.
pub fn set_voice(options: VoiceOptions, sound_buffer: &mut [u8]) -> Result<()> {
Self::validate_buffer(sound_buffer);
let err = unsafe {
ffi::ASND_SetVoice(
options.voice as i32,
options.format.as_i32(),
options.pitch as i32,
options.delay as i32,
sound_buffer.as_mut_ptr() as *mut _,
sound_buffer.len() as i32,
options.volume_left as i32,
options.volume_right as i32,
options.callback,
)
};
if_not!(SND_OK => "Asnd::set_voice() failed with error {}!", err)
}
/// Sets a PCM voice to play infinitely. See `Asnd::set_voice()` as it is largely identical.
/// The buffer MUST be aligned and padded to 32 bytes.
pub fn set_infinite_voice(options: VoiceOptions, sound_buffer: &mut [u8]) -> Result<()> {
Self::validate_buffer(sound_buffer);
let err = unsafe {
ffi::ASND_SetInfiniteVoice(
options.voice as i32,
options.format.as_i32(),
options.pitch as i32,
options.delay as i32,
sound_buffer.as_mut_ptr() as *mut _,
sound_buffer.len() as i32,
options.volume_left as i32,
options.volume_right as i32,
)
};
if_not!(SND_OK => "Asnd::set_infinite_voice() failed with error {}", err)
}
/// Adds a PCM voice to play from the second buffer. Sound buffer must be 32-byte
/// aligned and have same sample format as first buffer. This must only be called after
/// `Asnd::set_voice()`, which must return `Ok()`.
/// The buffer MUST be aligned and padded to 32 bytes.
fn add_voice(voice: u32, sound_buffer: &mut [u8]) -> Result<()> {
assert!(voice < 16, "Voice index {} is >= 16", voice);
Self::validate_buffer(sound_buffer);
let err = unsafe {
ffi::ASND_AddVoice(
voice as i32,
sound_buffer.as_mut_ptr() as *mut _,
sound_buffer.len() as i32,
)
};
if_not!(SND_OK => "Asnd::add_voice() failed with error {}", err)
}
/// Stops the selected voice. If the voice is used in song mode, you need to
/// assign the samples with `Asnd::set_song_sample_voice()`.
pub fn stop_voice(voice: u32) -> Result<()> {
assert!(voice < 16, "Voice index {} is >= 16", voice);
let err = unsafe { ffi::ASND_StopVoice(voice as i32) };
if_not!(SND_OK => "Asnd::stop_voice() failed with error {}", err)
}
/// Pauses the selected voice. Can also be used to resume voice.
pub fn pause_voice(voice: u32, pause: bool) -> Result<()> {
assert!(voice < 16, "Voice index {} is >= 16", voice);
let err = unsafe { ffi::ASND_PauseVoice(voice as i32, pause as i32) };
if_not!(SND_OK => "Asnd::pause_voice() failed with error {}", err)
}
/// Returns the state of the selected voice.
pub fn status_voice(voice: u32) -> Result<()> {
assert!(voice < 16, "Voice index {} is >= 16", voice);
let err = unsafe { ffi::ASND_StatusVoice(voice as i32) };
if_not!(SND_WORKING => "Asnd::status_voice() failed with error {}", err)
}
/// Returns the first unused voice. Fails if no voices are available.
pub fn get_first_unused_voice() -> Result<u32> {
let err = unsafe { ffi::ASND_GetFirstUnusedVoice() };
match err {
x if x < 16 => Ok(x as u32),
_ => Err(OgcError::Audio(format!(
"Asnd::get_first_unused_voice() failed with error {}",
err
))),
}
}
/// Changes the voice-pitch in real time. This function can be used to
/// create audio effects such as Doppler effect simulation.
pub fn change_pitch_voice(voice: u32, pitch: u32) -> Result<()> {
assert!(voice < 16, "Voice index {} is >= 16", voice);
let err = unsafe { ffi::ASND_ChangePitchVoice(voice as i32, pitch as i32) };
if_not!(SND_OK => "Asnd::change_pitch_voice() failed with error {}", err)
}
/// Changes the voice volume in real time. This function can be used to create
/// audio effects like distance attenuation.
pub fn change_volume_voice(voice: u32, volume_left: u8, volume_right: u8) -> Result<()> {
assert!(voice < 16, "Voice index {} is >= 16", voice);
let err = unsafe {
ffi::ASND_ChangeVolumeVoice(voice as i32, volume_left as i32, volume_right as i32)
};
if_not!(SND_OK => "Asnd::change_volume_voice() failed with error {}", err)
}
/// Returns the voice tick counter. This value represents the number of ticks
/// since this voice started to play, sans delay time. If the lib is initialized with
/// `INIT_RATE=48000`, a return value of 24000 is equal to 0.5 seconds.
pub fn get_tick_counter_voice(voice: u32) -> u32 {
assert!(voice < 16, "Voice index {} is >= 16", voice);
unsafe { ffi::ASND_GetTickCounterVoice(voice as i32) }
}
/// Returns the voice playback time. This value represents the time in milliseconds
/// since this voice started playing.
pub fn get_timer_voice(voice: u32) -> u32 {
assert!(voice < 16, "Voice index {} is >= 16", voice);
unsafe { ffi::ASND_GetTimerVoice(voice as i32) }
}
/// Tests if a pointer is in use by a voice as a buffer.
/// This must be the same pointer sent to `Asnd::add_voice()` or `Asnd::set_voice()`.
/// Returns 0 if the pointer is unused.
/// Returns 1 if the pointer is used as a buffer.
/// Returns `ogc_sys::SND_INVALID` if invalid.
pub fn test_pointer<T>(voice: u32, pointer: *mut T) -> i32 |
/// Tests to determine if the voice is ready to receive a new buffer sample
/// with `Asnd::add_voice()`. Returns true if voice is ready.
pub fn test_voice_buffer_ready(voice: u32) -> bool {
assert!(voice < 16, "Voice index {} is >= 16", voice);
unsafe { ffi::ASND_TestVoiceBufferReady(voice as i32) > 0 }
}
/// Returns the DSP usage, in percent `(0..=100)`.
pub fn get_dsp_percent_use() -> u32 {
unsafe { ffi::ASND_GetDSP_PercentUse() }
}
/// Returns DSP process time, in nano seconds.
pub fn get_dsp_process_time() -> Duration {
unsafe { Duration::from_nanos(ffi::ASND_GetDSP_ProcessTime().into()) }
}
fn validate_buffer(sound_buffer: &mut [u8]) {
assert_eq!(
0,
sound_buffer.as_ptr().align_offset(32),
"Data is not aligned correctly."
);
assert_eq!(
0,
sound_buffer.len() % 32,
"Data length is not a multiple of 32."
);
}
}
impl Drop for Asnd {
fn drop(&mut self) {
Self::end();
}
}
| {
assert!(voice < 16, "Voice index {} is >= 16", voice);
unsafe { ffi::ASND_TestPointer(voice as i32, pointer as *mut _) }
} | identifier_body |
asnd.rs | //! The ``asnd`` module of ``ogc-rs``.
//!
//! This module implements a safe wrapper around the audio functions found in ``asndlib.h``.
use crate::{ffi, OgcError, Result};
use alloc::format;
use core::time::Duration;
macro_rules! if_not {
($valid:ident => $error_output:expr, $var:ident $(,)*) => {
if $var == ffi::$valid as _ {
Ok(())
} else {
Err(OgcError::Audio(format!($error_output, $var)))
}
};
}
/// Voice Options Callback Type
pub type VoiceOptionsCallback = Option<unsafe extern "C" fn(i32)>;
/// Options to be passed when creating a new voice.
///
/// # Examples
///
/// Create `VoiceOptions` with voice slot 2 and format Mono16Bit:
///
/// ```rust
/// let options = VoiceOptions::new().voice(2).format(VoiceFormat::Mono16Bit);
/// ```
pub struct VoiceOptions {
voice: u32,
format: VoiceFormat,
pitch: u32,
delay: u32,
volume_left: u8,
volume_right: u8,
callback: VoiceOptionsCallback,
}
impl Default for VoiceOptions {
fn default() -> Self {
VoiceOptions::new()
}
}
impl VoiceOptions {
/// Create this struct with sensible default values.
pub fn new() -> Self {
Self {
voice: 0,
format: VoiceFormat::Stereo16Bit,
pitch: 48000,
delay: 0,
volume_left: 255,
volume_right: 255,
callback: None,
}
}
/// Voice slot to use for this sound. Valid values are `0..16` non-inclusive.
#[must_use]
pub fn voice(mut self, voice: u32) -> Self {
assert!(voice < 16, "Voice index {} is >= 16", voice);
self.voice = voice;
self
}
/// Format to use for this sound.
#[must_use]
pub fn format(mut self, format: VoiceFormat) -> Self {
self.format = format;
self
}
/// Frequency to use, in Hz.
#[must_use]
pub fn pitch(mut self, pitch: u32) -> Self {
self.pitch = pitch;
self
}
/// Delay to wait before playing, in milliseconds.
#[must_use]
pub fn delay(mut self, delay: u32) -> Self {
self.delay = delay;
self
}
/// Voice volume of the left channel.
#[must_use]
pub fn volume_left(mut self, volume_left: u8) -> Self {
self.volume_left = volume_left;
self
}
/// Voice volume of the right channel.
#[must_use]
pub fn volume_right(mut self, volume_right: u8) -> Self {
self.volume_right = volume_right;
self
}
/// Optional callback function to use.
#[must_use]
pub fn callback(mut self, callback: Option<unsafe extern "C" fn(i32)>) -> Self {
self.callback = callback;
self
}
}
/// Source voice format.
pub enum VoiceFormat {
Mono8Bit,
Mono16Bit,
Mono16BitBe,
Stereo8Bit,
Stereo16Bit,
Stereo16BitBe,
Mono8BitU,
Mono16BitLE,
Stereo8BitU,
Stereo16BitLe,
}
impl VoiceFormat {
fn as_i32(&self) -> i32 {
match self {
VoiceFormat::Mono8Bit => 0,
VoiceFormat::Mono16Bit => 1,
VoiceFormat::Mono16BitBe => 1,
VoiceFormat::Stereo8Bit => 2,
VoiceFormat::Stereo16Bit => 3,
VoiceFormat::Stereo16BitBe => 3,
VoiceFormat::Mono8BitU => 4,
VoiceFormat::Mono16BitLE => 5,
VoiceFormat::Stereo8BitU => 6,
VoiceFormat::Stereo16BitLe => 7,
}
}
}
/// Represents the asnd service.
/// This service can only be created once!
/// If you use `Asnd::init()`, you cannot do `Audio::init()`.
/// Only one of them can be used at a time.
pub struct Asnd;
/// Implementation of the asnd service.
impl Asnd {
/// Initializes the asnd lib and fixes the hardware sample rate to 48000hz.
pub fn init() -> Self {
unsafe {
ffi::ASND_Init();
}
Self
}
/// De-initializes the asnd lib. This is also called when `Asnd` gets dropped.
pub fn end() {
unsafe {
ffi::ASND_End();
}
}
/// Pauses if true and resumes if false.
pub fn pause(should_pause: bool) {
unsafe {
ffi::ASND_Pause(should_pause as i32);
}
}
/// Returns true if paused, false if not paused.
pub fn is_paused() -> bool {
unsafe { ffi::ASND_Is_Paused() > 0 }
}
/// Returns the global time in milliseconds. Time is updated from the IRQ.
pub fn get_time() -> u32 {
unsafe { ffi::ASND_GetTime() }
}
/// Returns the global sample counter. Can be used to implement timers with high precision.
pub fn get_sample_counter() -> u32 {
unsafe { ffi::ASND_GetSampleCounter() }
}
/// Returns the samples sent from the IRQ in one tick.
pub fn get_samples_per_tick() -> u32 {
unsafe { ffi::ASND_GetSamplesPerTick() }
}
/// Sets the global time, in milliseconds.
pub fn set_time(time: u32) {
unsafe {
ffi::ASND_SetTime(time);
}
}
/// Sets a global callback for general purposes. It is called by the IRQ.
pub fn set_callback<F>(callback: Option<unsafe extern "C" fn()>) {
unsafe {
ffi::ASND_SetCallback(callback);
}
}
/// Returs the current audio rate. Default is 48000hz.
pub fn get_audio_rate() -> i32 {
unsafe { ffi::ASND_GetAudioRate() }
}
/// Sets a PCM voice to play. This function stops one previous voice. Use
/// `Asnd::status_voice()` to test status. The voices are played in 16-bit stereo,
/// regardless of source format. The buffer MUST be aligned and padded to 32 bytes.
pub fn set_voice(options: VoiceOptions, sound_buffer: &mut [u8]) -> Result<()> {
Self::validate_buffer(sound_buffer);
let err = unsafe {
ffi::ASND_SetVoice(
options.voice as i32,
options.format.as_i32(),
options.pitch as i32,
options.delay as i32,
sound_buffer.as_mut_ptr() as *mut _,
sound_buffer.len() as i32,
options.volume_left as i32,
options.volume_right as i32,
options.callback,
)
};
if_not!(SND_OK => "Asnd::set_voice() failed with error {}!", err)
}
/// Sets a PCM voice to play infinitely. See `Asnd::set_voice()` as it is largely identical.
/// The buffer MUST be aligned and padded to 32 bytes.
pub fn set_infinite_voice(options: VoiceOptions, sound_buffer: &mut [u8]) -> Result<()> {
Self::validate_buffer(sound_buffer);
let err = unsafe {
ffi::ASND_SetInfiniteVoice(
options.voice as i32,
options.format.as_i32(),
options.pitch as i32,
options.delay as i32,
sound_buffer.as_mut_ptr() as *mut _,
sound_buffer.len() as i32,
options.volume_left as i32,
options.volume_right as i32,
)
};
if_not!(SND_OK => "Asnd::set_infinite_voice() failed with error {}", err)
}
/// Adds a PCM voice to play from the second buffer. Sound buffer must be 32-byte
/// aligned and have same sample format as first buffer. This must only be called after
/// `Asnd::set_voice()`, which must return `Ok()`.
/// The buffer MUST be aligned and padded to 32 bytes.
fn add_voice(voice: u32, sound_buffer: &mut [u8]) -> Result<()> {
assert!(voice < 16, "Voice index {} is >= 16", voice);
Self::validate_buffer(sound_buffer);
let err = unsafe {
ffi::ASND_AddVoice(
voice as i32,
sound_buffer.as_mut_ptr() as *mut _,
sound_buffer.len() as i32,
)
};
if_not!(SND_OK => "Asnd::add_voice() failed with error {}", err)
}
/// Stops the selected voice. If the voice is used in song mode, you need to
/// assign the samples with `Asnd::set_song_sample_voice()`.
pub fn stop_voice(voice: u32) -> Result<()> {
assert!(voice < 16, "Voice index {} is >= 16", voice);
let err = unsafe { ffi::ASND_StopVoice(voice as i32) };
if_not!(SND_OK => "Asnd::stop_voice() failed with error {}", err)
}
/// Pauses the selected voice. Can also be used to resume voice.
pub fn pause_voice(voice: u32, pause: bool) -> Result<()> {
assert!(voice < 16, "Voice index {} is >= 16", voice);
let err = unsafe { ffi::ASND_PauseVoice(voice as i32, pause as i32) };
if_not!(SND_OK => "Asnd::pause_voice() failed with error {}", err)
}
/// Returns the state of the selected voice.
pub fn status_voice(voice: u32) -> Result<()> {
assert!(voice < 16, "Voice index {} is >= 16", voice);
let err = unsafe { ffi::ASND_StatusVoice(voice as i32) };
if_not!(SND_WORKING => "Asnd::status_voice() failed with error {}", err)
}
/// Returns the first unused voice. Fails if no voices are available.
pub fn get_first_unused_voice() -> Result<u32> {
let err = unsafe { ffi::ASND_GetFirstUnusedVoice() };
match err {
x if x < 16 => Ok(x as u32),
_ => Err(OgcError::Audio(format!(
"Asnd::get_first_unused_voice() failed with error {}",
err
))),
}
}
/// Changes the voice-pitch in real time. This function can be used to
/// create audio effects such as Doppler effect simulation.
pub fn change_pitch_voice(voice: u32, pitch: u32) -> Result<()> {
assert!(voice < 16, "Voice index {} is >= 16", voice);
let err = unsafe { ffi::ASND_ChangePitchVoice(voice as i32, pitch as i32) };
if_not!(SND_OK => "Asnd::change_pitch_voice() failed with error {}", err)
}
/// Changes the voice volume in real time. This function can be used to create
/// audio effects like distance attenuation.
pub fn change_volume_voice(voice: u32, volume_left: u8, volume_right: u8) -> Result<()> {
assert!(voice < 16, "Voice index {} is >= 16", voice);
let err = unsafe {
ffi::ASND_ChangeVolumeVoice(voice as i32, volume_left as i32, volume_right as i32)
};
if_not!(SND_OK => "Asnd::change_volume_voice() failed with error {}", err)
}
/// Returns the voice tick counter. This value represents the number of ticks
/// since this voice started to play, sans delay time. If the lib is initialized with
/// `INIT_RATE=48000`, a return value of 24000 is equal to 0.5 seconds.
pub fn get_tick_counter_voice(voice: u32) -> u32 {
assert!(voice < 16, "Voice index {} is >= 16", voice);
unsafe { ffi::ASND_GetTickCounterVoice(voice as i32) }
}
/// Returns the voice playback time. This value represents the time in milliseconds
/// since this voice started playing.
pub fn get_timer_voice(voice: u32) -> u32 {
assert!(voice < 16, "Voice index {} is >= 16", voice);
unsafe { ffi::ASND_GetTimerVoice(voice as i32) }
}
/// Tests if a pointer is in use by a voice as a buffer.
/// This must be the same pointer sent to `Asnd::add_voice()` or `Asnd::set_voice()`.
/// Returns 0 if the pointer is unused.
/// Returns 1 if the pointer is used as a buffer.
/// Returns `ogc_sys::SND_INVALID` if invalid.
pub fn test_pointer<T>(voice: u32, pointer: *mut T) -> i32 {
assert!(voice < 16, "Voice index {} is >= 16", voice);
unsafe { ffi::ASND_TestPointer(voice as i32, pointer as *mut _) }
}
/// Tests to determine if the voice is ready to receive a new buffer sample
/// with `Asnd::add_voice()`. Returns true if voice is ready.
pub fn test_voice_buffer_ready(voice: u32) -> bool {
assert!(voice < 16, "Voice index {} is >= 16", voice);
unsafe { ffi::ASND_TestVoiceBufferReady(voice as i32) > 0 }
}
/// Returns the DSP usage, in percent `(0..=100)`.
pub fn get_dsp_percent_use() -> u32 {
unsafe { ffi::ASND_GetDSP_PercentUse() }
}
/// Returns DSP process time, in nano seconds.
pub fn get_dsp_process_time() -> Duration {
unsafe { Duration::from_nanos(ffi::ASND_GetDSP_ProcessTime().into()) }
}
fn validate_buffer(sound_buffer: &mut [u8]) {
assert_eq!(
0,
sound_buffer.as_ptr().align_offset(32),
"Data is not aligned correctly."
);
assert_eq!(
0,
sound_buffer.len() % 32,
"Data length is not a multiple of 32."
);
}
}
impl Drop for Asnd {
fn drop(&mut self) {
Self::end();
} | } | random_line_split |
|
path.rs | //! File path utilities.
//!
//! Some of the functions are similar to [`std::path::Path`] ones, but here they
//! work directly upon [`&str`](str) instead of [`&OsStr`](std::ffi::OsStr).
use crate::co;
use crate::decl::*;
use crate::guard::*;
use crate::prelude::*;
/// Returns an iterator over the files and folders within a directory.
/// Optionally, a wildcard can be specified to filter files by name.
///
/// This is a high-level abstraction over [`HFINDFILE`](crate::HFINDFILE)
/// iteration functions.
///
/// # Examples
///
/// Listing all text files in a directory:
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// for file_path in w::path::dir_list("C:\\temp", Some("*.txt")) {
/// let file_path = file_path?;
/// println!("{}", file_path);
/// }
/// # Ok::<_, winsafe::co::ERROR>(())
/// ```
#[must_use]
pub fn dir_list<'a>(
dir_path: &'a str,
filter: Option<&'a str>,
) -> impl Iterator<Item = SysResult<String>> + 'a
{
DirListIter::new(dir_path.to_owned(), filter)
}
/// Returns an interator over the files within a directory, and all its
/// subdirectories, recursively.
///
/// This is a high-level abstraction over [`HFINDFILE`](crate::HFINDFILE)
/// iteration functions.
///
/// # Examples
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// // Ordinary for loop
/// for file_path in w::path::dir_walk("C:\\Temp") {
/// let file_path = file_path?;
/// println!("{}", file_path);
/// }
///
/// // Closure with try_for_each
/// w::path::dir_walk("C:\\Temp")
/// .try_for_each(|file_path| {
/// let file_path = file_path?;
/// println!("{}", file_path);
/// Ok(())
/// })?;
///
/// // Collecting into a Vec
/// let all = w::path::dir_walk("C:\\Temp")
/// .collect::<w::SysResult<Vec<_>>>()?;
///
/// // Transforming and collecting into a Vec
/// let all = w::path::dir_walk("C:\\Temp")
/// .map(|file_path| {
/// let file_path = file_path?;
/// Ok(format!("PATH: {}", file_path))
/// })
/// .collect::<w::SysResult<Vec<_>>>()?;
/// # Ok::<_, winsafe::co::ERROR>(())
/// ```
#[must_use]
pub fn dir_walk<'a>(
dir_path: &'a str,
) -> impl Iterator<Item = SysResult<String>> + 'a
{
DirWalkIter::new(dir_path.to_owned())
}
/// Returns the path of the current EXE file, without the EXE filename, and
/// without a trailing backslash.
///
/// In a debug build, the `target\debug` folders will be suppressed.
#[cfg(debug_assertions)]
#[must_use]
pub fn exe_path() -> SysResult<String> {
let dbg = HINSTANCE::NULL.GetModuleFileName()?;
Ok(
get_path( // target
get_path( // debug
get_path(&dbg).unwrap(), // exe name
).unwrap(),
).unwrap()
.to_owned(),
)
}
/// Returns the path of the current EXE file, without the EXE filename, and
/// without a trailing backslash.
///
/// In a debug build, the `target\debug` folders will be suppressed.
#[cfg(not(debug_assertions))]
#[must_use]
pub fn exe_path() -> SysResult<String> {
Ok(
get_path(&HINSTANCE::NULL.GetModuleFileName()?)
.unwrap().to_owned(),
)
}
/// Returns true if the path exists.
#[must_use]
pub fn exists(full_path: &str) -> bool {
GetFileAttributes(full_path).is_ok()
}
/// Extracts the file name from a full path, if any.
///
/// # Examples
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// let f = w::path::get_file_name("C:\\Temp\\foo.txt"); // foo.txt
/// ```
#[must_use]
pub fn get_file_name(full_path: &str) -> Option<&str> {
match full_path.rfind('\\') {
None => Some(full_path), // if no backslash, the whole string is the file name
Some(idx) => if idx == full_path.chars().count() - 1 {
None // last char is '\\', no file name
} else {
Some(&full_path[idx + 1..])
},
}
}
/// Extracts the full path, but the last part.
///
/// # Examples
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// let p = w::path::get_path("C:\\Temp\\xx\\a.txt"); // C:\Temp\xx
/// let q = w::path::get_path("C:\\Temp\\xx\\"); // C:\Temp\xx
/// let r = w::path::get_path("C:\\Temp\\xx"); // C:\Temp"
/// ```
#[must_use]
pub fn get_path(full_path: &str) -> Option<&str> {
full_path.rfind('\\') // if no backslash, the whole string is the file name, so no path
.map(|idx| &full_path[0..idx])
}
/// Tells whether the full path ends in one of the given extensions,
/// case-insensitive.
///
/// # Examples
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// println!("{}",
/// w::path::has_extension("file.txt", &[".txt", ".bat"]));
/// ```
#[must_use]
pub fn has_extension(full_path: &str, extensions: &[impl AsRef<str>]) -> bool {
let full_path_u = full_path.to_uppercase();
extensions.iter()
.find(|ext| {
let ext_u = ext.as_ref().to_uppercase();
full_path_u.ends_with(&ext_u)
})
.is_some()
}
/// Returns true if the path is a directory.
///
/// # Panics
///
/// Panics if the path does not exist.
#[must_use]
pub fn is_directory(full_path: &str) -> bool {
let flags = GetFileAttributes(full_path).unwrap();
flags.has(co::FILE_ATTRIBUTE::DIRECTORY)
}
/// Returns true if the path is hidden.
///
/// # Panics
///
/// Panics if the path does not exist.
#[must_use]
pub fn is_hidden(full_path: &str) -> bool {
let flags = GetFileAttributes(full_path).unwrap();
flags.has(co::FILE_ATTRIBUTE::HIDDEN)
}
/// Replaces the extension by the given one.
///
/// # Examples
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// let p = w::path::replace_extension(
/// "C:\\Temp\\something.txt", ".sh"); // C:\Temp\something.sh
/// ```
#[must_use]
pub fn replace_extension(full_path: &str, new_extension: &str) -> String {
if let Some(last) = full_path.chars().last() {
if last == '\\' { // full_path is a directory, do nothing
return rtrim_backslash(full_path).to_owned();
}
}
let new_has_dot = new_extension.chars().next() == Some('.');
match full_path.rfind('.') {
None => format!("{}{}{}", // file name without extension, just append it
full_path,
if new_has_dot { "" } else { "." },
new_extension,
),
Some(idx) => format!("{}{}{}",
&full_path[0..idx],
if new_has_dot { "" } else { "." },
new_extension,
),
}
}
/// Replaces the file name by the given one.
#[must_use]
pub fn replace_file_name(full_path: &str, new_file: &str) -> String {
match get_path(full_path) {
None => new_file.to_owned(),
Some(path) => format!("{}\\{}", path, new_file),
}
}
/// Keeps the file name and replaces the path by the given one.
///
/// # Examples
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// let p = w::path::replace_path( // C:\another\foo.txt
/// "C:\\Temp\\foo.txt",
/// "C:\\another",
/// );
/// ```
#[must_use]
pub fn replace_path(full_path: &str, new_path: &str) -> String {
let file_name = get_file_name(full_path);
format!("{}{}{}",
rtrim_backslash(new_path),
if file_name.is_some() { "\\" } else { "" },
file_name.unwrap_or(""))
}
/// Removes a trailing backslash, if any.
///
/// # Examples
///
/// ```no_run
/// use winsafe::{self as w, prelude::*};
///
/// let p = w::path::rtrim_backslash("C:\\Temp\\"); // C:\Temp
/// ```
#[must_use]
pub fn rtrim_backslash(full_path: &str) -> &str {
match full_path.chars().last() {
None => full_path, // empty string
Some(last_ch) => if last_ch == '\\' {
let mut chars = full_path.chars();
chars.next_back(); // remove last char
chars.as_str()
} else {
full_path // no trailing backslash
},
}
}
/// Returns a `Vec` with each part of the full path.
#[must_use]
pub fn split_parts(full_path: &str) -> Vec<&str> {
let no_bs = rtrim_backslash(full_path);
no_bs.split('\\').collect()
}
//------------------------------------------------------------------------------
pub(in crate::kernel) struct DirListIter<'a> {
dir_path: String,
filter: Option<&'a str>,
hfind: Option<FindCloseGuard>,
wfd: WIN32_FIND_DATA,
no_more: bool,
}
impl<'a> Iterator for DirListIter<'a> {
type Item = SysResult<String>;
fn next(&mut self) -> Option<Self::Item> {
if self.no_more {
return None;
}
let found = match &self.hfind {
None => { // first pass
let dir_final = match self.filter {
None => format!("{}\\*", self.dir_path),
Some(filter) => format!("{}\\{}", self.dir_path, filter),
};
let found = match HFINDFILE::FindFirstFile(&dir_final, &mut self.wfd) {
Err(e) => {
self.no_more = true; // prevent further iterations
return Some(Err(e));
},
Ok((hfind, found)) => {
self.hfind = Some(hfind); // store our find handle
found
},
};
found
},
Some(hfind) => { // subsequent passes
match hfind.FindNextFile(&mut self.wfd) {
Err(e) => {
self.no_more = true; // prevent further iterations
return Some(Err(e));
},
Ok(found) => found,
}
},
};
if found {
let file_name = self.wfd.cFileName();
if file_name == "." || file_name == ".." { // skip these
self.next()
} else {
Some(Ok(format!("{}\\{}", self.dir_path, self.wfd.cFileName())))
}
} else {
None
}
}
}
impl<'a> DirListIter<'a> {
pub(in crate::kernel) fn new(
dir_path: String,
filter: Option<&'a str>,
) -> Self {
Self {
dir_path: rtrim_backslash(&dir_path).to_owned(),
filter,
hfind: None,
wfd: WIN32_FIND_DATA::default(),
no_more: false,
}
}
}
//------------------------------------------------------------------------------
pub(in crate::kernel) struct DirWalkIter<'a> {
runner: DirListIter<'a>,
subdir_runner: Option<Box<DirWalkIter<'a>>>,
no_more: bool,
}
impl<'a> Iterator for DirWalkIter<'a> {
type Item = SysResult<String>;
fn next(&mut self) -> Option<Self::Item> {
if self.no_more {
return None;
}
match &mut self.subdir_runner {
None => {
let cur_file = self.runner.next();
match cur_file {
None => None,
Some(cur_file) => {
match cur_file {
Err(e) => {
self.no_more = true; // prevent further iterations
Some(Err(e))
},
Ok(cur_file) => {
if is_directory(&cur_file) {
self.subdir_runner = Some(Box::new(Self::new(cur_file))); // recursively
self.next()
} else {
Some(Ok(cur_file))
}
},
}
},
}
},
Some(subdir_runner) => {
let inner_file = subdir_runner.next();
match inner_file {
None => { // subdir_runner finished his work
self.subdir_runner = None;
self.next()
},
Some(inner_file) => {
Some(inner_file)
},
}
|
impl<'a> DirWalkIter<'a> {
pub(in crate::kernel) fn new(dir_path: String) -> Self {
Self {
runner: DirListIter::new(dir_path, None),
subdir_runner: None,
no_more: false,
}
}
} | },
}
}
}
| random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.