file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
fill.rs
|
use lyon::tessellation::FillOptions;
|
///
/// This trait allows the `Drawing` context to automatically provide an implementation of the
/// following builder methods for all primitives that provide some fill tessellation options.
pub trait SetFill: Sized {
/// Provide a mutable reference to the `FillOptions` field.
fn fill_options_mut(&mut self) -> &mut FillOptions;
/// Specify the whole set of fill tessellation options.
fn fill_opts(mut self, opts: FillOptions) -> Self {
*self.fill_options_mut() = opts;
self
}
/// Maximum allowed distance to the path when building an approximation.
fn fill_tolerance(mut self, tolerance: f32) -> Self {
self.fill_options_mut().tolerance = tolerance;
self
}
/// Specify the rule used to determine what is inside and what is outside of the shape.
///
/// Currently, only the `EvenOdd` rule is implemented.
fn fill_rule(mut self, rule: lyon::tessellation::FillRule) -> Self {
self.fill_options_mut().fill_rule = rule;
self
}
/// Whether to perform a vertical or horizontal traversal of the geometry.
///
/// Default value: `Vertical`.
fn fill_sweep_orientation(mut self, orientation: lyon::tessellation::Orientation) -> Self {
self.fill_options_mut().sweep_orientation = orientation;
self
}
/// A fast path to avoid some expensive operations if the path is known to not have any
/// self-intersections.
///
/// Do not set this to `false` if the path may have intersecting edges else the tessellator may
/// panic or produce incorrect results. In doubt, do not change the default value.
///
/// Default value: `true`.
fn handle_intersections(mut self, handle: bool) -> Self {
self.fill_options_mut().handle_intersections = handle;
self
}
}
impl SetFill for Option<FillOptions> {
fn fill_options_mut(&mut self) -> &mut FillOptions {
self.get_or_insert_with(Default::default)
}
}
|
/// Nodes that support fill tessellation.
|
random_line_split
|
fill.rs
|
use lyon::tessellation::FillOptions;
/// Nodes that support fill tessellation.
///
/// This trait allows the `Drawing` context to automatically provide an implementation of the
/// following builder methods for all primitives that provide some fill tessellation options.
pub trait SetFill: Sized {
/// Provide a mutable reference to the `FillOptions` field.
fn fill_options_mut(&mut self) -> &mut FillOptions;
/// Specify the whole set of fill tessellation options.
fn fill_opts(mut self, opts: FillOptions) -> Self {
*self.fill_options_mut() = opts;
self
}
/// Maximum allowed distance to the path when building an approximation.
fn fill_tolerance(mut self, tolerance: f32) -> Self {
self.fill_options_mut().tolerance = tolerance;
self
}
/// Specify the rule used to determine what is inside and what is outside of the shape.
///
/// Currently, only the `EvenOdd` rule is implemented.
fn fill_rule(mut self, rule: lyon::tessellation::FillRule) -> Self {
self.fill_options_mut().fill_rule = rule;
self
}
/// Whether to perform a vertical or horizontal traversal of the geometry.
///
/// Default value: `Vertical`.
fn
|
(mut self, orientation: lyon::tessellation::Orientation) -> Self {
self.fill_options_mut().sweep_orientation = orientation;
self
}
/// A fast path to avoid some expensive operations if the path is known to not have any
/// self-intersections.
///
/// Do not set this to `false` if the path may have intersecting edges else the tessellator may
/// panic or produce incorrect results. In doubt, do not change the default value.
///
/// Default value: `true`.
fn handle_intersections(mut self, handle: bool) -> Self {
self.fill_options_mut().handle_intersections = handle;
self
}
}
impl SetFill for Option<FillOptions> {
fn fill_options_mut(&mut self) -> &mut FillOptions {
self.get_or_insert_with(Default::default)
}
}
|
fill_sweep_orientation
|
identifier_name
|
ctl_flags.rs
|
// ctl_flags.rs
use super::consts::*;
// Represents control flags of a sysctl
bitflags! {
pub struct CtlFlags : libc::c_uint {
/// Allow reads of variable
const RD = CTLFLAG_RD;
/// Allow writes to the variable
const WR = CTLFLAG_WR;
const RW = Self::RD.bits | Self::WR.bits;
/// This sysctl is not active yet
const DORMANT = CTLFLAG_DORMANT;
/// All users can set this var
const ANYBODY = CTLFLAG_ANYBODY;
/// Permit set only if securelevel<=0
const SECURE = CTLFLAG_SECURE;
/// Prisoned roots can fiddle
const PRISON = CTLFLAG_PRISON;
/// Dynamic oid - can be freed
const DYN = CTLFLAG_DYN;
/// Skip this sysctl when listing
const SKIP = CTLFLAG_DORMANT;
/// Secure level
const SECURE_MASK = 0x00F00000;
/// Default value is loaded from getenv()
const TUN = CTLFLAG_TUN;
/// Readable tunable
const RDTUN = Self::RD.bits | Self::TUN.bits;
|
/// Readable and writeable tunable
const RWTUN = Self::RW.bits | Self::TUN.bits;
/// Handler is MP safe
const MPSAFE = CTLFLAG_MPSAFE;
/// Prisons with vnet can fiddle
const VNET = CTLFLAG_VNET;
/// Oid is being removed
const DYING = CTLFLAG_DYING;
/// Can be read in capability mode
const CAPRD = CTLFLAG_CAPRD;
/// Can be written in capability mode
const CAPWR = CTLFLAG_CAPWR;
/// Statistics; not a tuneable
const STATS = CTLFLAG_STATS;
/// Don't fetch tunable from getenv()
const NOFETCH = CTLFLAG_NOFETCH;
/// Can be read and written in capability mode
const CAPRW = Self::CAPRD.bits | Self::CAPWR.bits;
}
}
#[cfg(test)]
mod tests {
use crate::Sysctl;
#[test]
fn ctl_flags() {
// This sysctl should be read-only.
#[cfg(any(target_os = "freebsd", target_os = "macos"))]
let ctl: crate::Ctl = crate::Ctl::new("kern.ostype").unwrap();
#[cfg(any(target_os = "android", target_os = "linux"))]
let ctl: crate::Ctl = crate::Ctl::new("kernel.ostype").unwrap();
let flags: crate::CtlFlags = ctl.flags().unwrap();
assert_eq!(flags.bits() & crate::CTLFLAG_RD, crate::CTLFLAG_RD);
assert_eq!(flags.bits() & crate::CTLFLAG_WR, 0);
}
}
|
random_line_split
|
|
ctl_flags.rs
|
// ctl_flags.rs
use super::consts::*;
// Represents control flags of a sysctl
bitflags! {
pub struct CtlFlags : libc::c_uint {
/// Allow reads of variable
const RD = CTLFLAG_RD;
/// Allow writes to the variable
const WR = CTLFLAG_WR;
const RW = Self::RD.bits | Self::WR.bits;
/// This sysctl is not active yet
const DORMANT = CTLFLAG_DORMANT;
/// All users can set this var
const ANYBODY = CTLFLAG_ANYBODY;
/// Permit set only if securelevel<=0
const SECURE = CTLFLAG_SECURE;
/// Prisoned roots can fiddle
const PRISON = CTLFLAG_PRISON;
/// Dynamic oid - can be freed
const DYN = CTLFLAG_DYN;
/// Skip this sysctl when listing
const SKIP = CTLFLAG_DORMANT;
/// Secure level
const SECURE_MASK = 0x00F00000;
/// Default value is loaded from getenv()
const TUN = CTLFLAG_TUN;
/// Readable tunable
const RDTUN = Self::RD.bits | Self::TUN.bits;
/// Readable and writeable tunable
const RWTUN = Self::RW.bits | Self::TUN.bits;
/// Handler is MP safe
const MPSAFE = CTLFLAG_MPSAFE;
/// Prisons with vnet can fiddle
const VNET = CTLFLAG_VNET;
/// Oid is being removed
const DYING = CTLFLAG_DYING;
/// Can be read in capability mode
const CAPRD = CTLFLAG_CAPRD;
/// Can be written in capability mode
const CAPWR = CTLFLAG_CAPWR;
/// Statistics; not a tuneable
const STATS = CTLFLAG_STATS;
/// Don't fetch tunable from getenv()
const NOFETCH = CTLFLAG_NOFETCH;
/// Can be read and written in capability mode
const CAPRW = Self::CAPRD.bits | Self::CAPWR.bits;
}
}
#[cfg(test)]
mod tests {
use crate::Sysctl;
#[test]
fn
|
() {
// This sysctl should be read-only.
#[cfg(any(target_os = "freebsd", target_os = "macos"))]
let ctl: crate::Ctl = crate::Ctl::new("kern.ostype").unwrap();
#[cfg(any(target_os = "android", target_os = "linux"))]
let ctl: crate::Ctl = crate::Ctl::new("kernel.ostype").unwrap();
let flags: crate::CtlFlags = ctl.flags().unwrap();
assert_eq!(flags.bits() & crate::CTLFLAG_RD, crate::CTLFLAG_RD);
assert_eq!(flags.bits() & crate::CTLFLAG_WR, 0);
}
}
|
ctl_flags
|
identifier_name
|
ctl_flags.rs
|
// ctl_flags.rs
use super::consts::*;
// Represents control flags of a sysctl
bitflags! {
pub struct CtlFlags : libc::c_uint {
/// Allow reads of variable
const RD = CTLFLAG_RD;
/// Allow writes to the variable
const WR = CTLFLAG_WR;
const RW = Self::RD.bits | Self::WR.bits;
/// This sysctl is not active yet
const DORMANT = CTLFLAG_DORMANT;
/// All users can set this var
const ANYBODY = CTLFLAG_ANYBODY;
/// Permit set only if securelevel<=0
const SECURE = CTLFLAG_SECURE;
/// Prisoned roots can fiddle
const PRISON = CTLFLAG_PRISON;
/// Dynamic oid - can be freed
const DYN = CTLFLAG_DYN;
/// Skip this sysctl when listing
const SKIP = CTLFLAG_DORMANT;
/// Secure level
const SECURE_MASK = 0x00F00000;
/// Default value is loaded from getenv()
const TUN = CTLFLAG_TUN;
/// Readable tunable
const RDTUN = Self::RD.bits | Self::TUN.bits;
/// Readable and writeable tunable
const RWTUN = Self::RW.bits | Self::TUN.bits;
/// Handler is MP safe
const MPSAFE = CTLFLAG_MPSAFE;
/// Prisons with vnet can fiddle
const VNET = CTLFLAG_VNET;
/// Oid is being removed
const DYING = CTLFLAG_DYING;
/// Can be read in capability mode
const CAPRD = CTLFLAG_CAPRD;
/// Can be written in capability mode
const CAPWR = CTLFLAG_CAPWR;
/// Statistics; not a tuneable
const STATS = CTLFLAG_STATS;
/// Don't fetch tunable from getenv()
const NOFETCH = CTLFLAG_NOFETCH;
/// Can be read and written in capability mode
const CAPRW = Self::CAPRD.bits | Self::CAPWR.bits;
}
}
#[cfg(test)]
mod tests {
use crate::Sysctl;
#[test]
fn ctl_flags()
|
}
|
{
// This sysctl should be read-only.
#[cfg(any(target_os = "freebsd", target_os = "macos"))]
let ctl: crate::Ctl = crate::Ctl::new("kern.ostype").unwrap();
#[cfg(any(target_os = "android", target_os = "linux"))]
let ctl: crate::Ctl = crate::Ctl::new("kernel.ostype").unwrap();
let flags: crate::CtlFlags = ctl.flags().unwrap();
assert_eq!(flags.bits() & crate::CTLFLAG_RD, crate::CTLFLAG_RD);
assert_eq!(flags.bits() & crate::CTLFLAG_WR, 0);
}
|
identifier_body
|
mod.rs
|
// +--------------------------------------------------------------------------+
// | Copyright 2016 Matthew D. Steele <[email protected]> |
// | |
// | This file is part of System Syzygy. |
// | |
// | System Syzygy is free software: you can redistribute it and/or modify it |
// | under the terms of the GNU General Public License as published by the |
// | Free Software Foundation, either version 3 of the License, or (at your |
// | option) any later version. |
// | |
// | System Syzygy is distributed in the hope that it will be useful, but |
// | WITHOUT ANY WARRANTY; without even the implied warranty of |
// | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
// | General Public License for details. |
// | |
// | You should have received a copy of the GNU General Public License along |
// | with System Syzygy. If not, see <http://www.gnu.org/licenses/>. |
// +--------------------------------------------------------------------------+
mod scenes;
mod view;
use self::view::View;
use crate::gui::Window;
use crate::modes::{run_puzzle, Mode};
use crate::save::SaveData;
// ========================================================================= //
pub fn run_cross_the_line(
window: &mut Window,
save_data: &mut SaveData,
) -> Mode {
let view = {
let visible_rect = window.visible_rect();
View::new(
&mut window.resources(),
visible_rect,
|
};
run_puzzle(window, save_data, view)
}
// ========================================================================= //
|
&save_data.game_mut().cross_the_line,
)
|
random_line_split
|
mod.rs
|
// +--------------------------------------------------------------------------+
// | Copyright 2016 Matthew D. Steele <[email protected]> |
// | |
// | This file is part of System Syzygy. |
// | |
// | System Syzygy is free software: you can redistribute it and/or modify it |
// | under the terms of the GNU General Public License as published by the |
// | Free Software Foundation, either version 3 of the License, or (at your |
// | option) any later version. |
// | |
// | System Syzygy is distributed in the hope that it will be useful, but |
// | WITHOUT ANY WARRANTY; without even the implied warranty of |
// | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
// | General Public License for details. |
// | |
// | You should have received a copy of the GNU General Public License along |
// | with System Syzygy. If not, see <http://www.gnu.org/licenses/>. |
// +--------------------------------------------------------------------------+
mod scenes;
mod view;
use self::view::View;
use crate::gui::Window;
use crate::modes::{run_puzzle, Mode};
use crate::save::SaveData;
// ========================================================================= //
pub fn run_cross_the_line(
window: &mut Window,
save_data: &mut SaveData,
) -> Mode
|
// ========================================================================= //
|
{
let view = {
let visible_rect = window.visible_rect();
View::new(
&mut window.resources(),
visible_rect,
&save_data.game_mut().cross_the_line,
)
};
run_puzzle(window, save_data, view)
}
|
identifier_body
|
mod.rs
|
// +--------------------------------------------------------------------------+
// | Copyright 2016 Matthew D. Steele <[email protected]> |
// | |
// | This file is part of System Syzygy. |
// | |
// | System Syzygy is free software: you can redistribute it and/or modify it |
// | under the terms of the GNU General Public License as published by the |
// | Free Software Foundation, either version 3 of the License, or (at your |
// | option) any later version. |
// | |
// | System Syzygy is distributed in the hope that it will be useful, but |
// | WITHOUT ANY WARRANTY; without even the implied warranty of |
// | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
// | General Public License for details. |
// | |
// | You should have received a copy of the GNU General Public License along |
// | with System Syzygy. If not, see <http://www.gnu.org/licenses/>. |
// +--------------------------------------------------------------------------+
mod scenes;
mod view;
use self::view::View;
use crate::gui::Window;
use crate::modes::{run_puzzle, Mode};
use crate::save::SaveData;
// ========================================================================= //
pub fn
|
(
window: &mut Window,
save_data: &mut SaveData,
) -> Mode {
let view = {
let visible_rect = window.visible_rect();
View::new(
&mut window.resources(),
visible_rect,
&save_data.game_mut().cross_the_line,
)
};
run_puzzle(window, save_data, view)
}
// ========================================================================= //
|
run_cross_the_line
|
identifier_name
|
utils.rs
|
#[allow(dead_code)]
pub struct XorShift {
x : i32,
max : i32,
cur : usize,
count : usize,
}
impl XorShift {
#[allow(dead_code)]
pub fn new(seed : i32, max : i32, count: usize) -> XorShift {
XorShift{x: seed, max: max, count: count, cur: 0}
}
}
impl Iterator for XorShift {
type Item = i32;
fn next(&mut self) -> Option<i32> {
if self.cur >= self.count {
|
self.x ^= self.x >> 12; // a
self.x ^= self.x << 25; // b
self.x ^= self.x >> 27; // c
self.cur += 1;
return Some(self.x % self.max);
}
}
|
return None;
}
|
random_line_split
|
utils.rs
|
#[allow(dead_code)]
pub struct XorShift {
x : i32,
max : i32,
cur : usize,
count : usize,
}
impl XorShift {
#[allow(dead_code)]
pub fn new(seed : i32, max : i32, count: usize) -> XorShift {
XorShift{x: seed, max: max, count: count, cur: 0}
}
}
impl Iterator for XorShift {
type Item = i32;
fn next(&mut self) -> Option<i32> {
if self.cur >= self.count
|
self.x ^= self.x >> 12; // a
self.x ^= self.x << 25; // b
self.x ^= self.x >> 27; // c
self.cur += 1;
return Some(self.x % self.max);
}
}
|
{
return None;
}
|
conditional_block
|
utils.rs
|
#[allow(dead_code)]
pub struct
|
{
x : i32,
max : i32,
cur : usize,
count : usize,
}
impl XorShift {
#[allow(dead_code)]
pub fn new(seed : i32, max : i32, count: usize) -> XorShift {
XorShift{x: seed, max: max, count: count, cur: 0}
}
}
impl Iterator for XorShift {
type Item = i32;
fn next(&mut self) -> Option<i32> {
if self.cur >= self.count {
return None;
}
self.x ^= self.x >> 12; // a
self.x ^= self.x << 25; // b
self.x ^= self.x >> 27; // c
self.cur += 1;
return Some(self.x % self.max);
}
}
|
XorShift
|
identifier_name
|
networking.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use script_runtime::{CommonScriptMsg, ScriptChan, ScriptThreadEventCategory};
use task::{Task, TaskCanceller};
use task_source::TaskSource;
#[derive(JSTraceable)]
pub struct NetworkingTaskSource(pub Box<ScriptChan + Send +'static>);
impl Clone for NetworkingTaskSource {
fn clone(&self) -> NetworkingTaskSource {
NetworkingTaskSource(self.0.clone())
}
}
impl TaskSource for NetworkingTaskSource {
fn queue_with_canceller<T>(
&self,
msg: Box<T>,
canceller: &TaskCanceller,
) -> Result<(), ()>
where
T: Send + Task +'static,
{
self.0.send(CommonScriptMsg::Task(
ScriptThreadEventCategory::NetworkEvent,
canceller.wrap_task(msg),
))
}
}
|
impl NetworkingTaskSource {
/// This queues a task that will not be cancelled when its associated
/// global scope gets destroyed.
pub fn queue_unconditionally<T>(&self, msg: Box<T>) -> Result<(), ()>
where
T: Task + Send +'static,
{
self.0.send(CommonScriptMsg::Task(ScriptThreadEventCategory::NetworkEvent, msg))
}
}
|
random_line_split
|
|
networking.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use script_runtime::{CommonScriptMsg, ScriptChan, ScriptThreadEventCategory};
use task::{Task, TaskCanceller};
use task_source::TaskSource;
#[derive(JSTraceable)]
pub struct
|
(pub Box<ScriptChan + Send +'static>);
impl Clone for NetworkingTaskSource {
fn clone(&self) -> NetworkingTaskSource {
NetworkingTaskSource(self.0.clone())
}
}
impl TaskSource for NetworkingTaskSource {
fn queue_with_canceller<T>(
&self,
msg: Box<T>,
canceller: &TaskCanceller,
) -> Result<(), ()>
where
T: Send + Task +'static,
{
self.0.send(CommonScriptMsg::Task(
ScriptThreadEventCategory::NetworkEvent,
canceller.wrap_task(msg),
))
}
}
impl NetworkingTaskSource {
/// This queues a task that will not be cancelled when its associated
/// global scope gets destroyed.
pub fn queue_unconditionally<T>(&self, msg: Box<T>) -> Result<(), ()>
where
T: Task + Send +'static,
{
self.0.send(CommonScriptMsg::Task(ScriptThreadEventCategory::NetworkEvent, msg))
}
}
|
NetworkingTaskSource
|
identifier_name
|
transform.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
use values::computed::{Length, LengthOrPercentage, Number};
use values::generics::transform::TimingFunction as GenericTimingFunction;
use values::generics::transform::TransformOrigin as GenericTransformOrigin;
/// The computed value of a CSS `<transform-origin>`
pub type TransformOrigin = GenericTransformOrigin<LengthOrPercentage, LengthOrPercentage, Length>;
/// A computed timing function.
pub type TimingFunction = GenericTimingFunction<u32, Number>;
impl TransformOrigin {
/// Returns the initial computed value for `transform-origin`.
#[inline]
pub fn initial_value() -> Self {
Self::new(
LengthOrPercentage::Percentage(0.5),
LengthOrPercentage::Percentage(0.5),
Length::from_px(0)
)
}
}
impl Animatable for TransformOrigin {
#[inline]
fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> {
Ok(Self::new(
self.horizontal.add_weighted(&other.horizontal, self_portion, other_portion)?,
self.vertical.add_weighted(&other.vertical, self_portion, other_portion)?,
self.depth.add_weighted(&other.depth, self_portion, other_portion)?,
))
}
#[inline]
fn compute_distance(&self, other: &Self) -> Result<f64, ()> {
self.compute_squared_distance(other).map(f64::sqrt)
}
#[inline]
fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> {
Ok(
self.horizontal.compute_squared_distance(&other.horizontal)? +
self.vertical.compute_squared_distance(&other.vertical)? +
self.depth.compute_squared_distance(&other.depth)?
)
}
}
|
//! Computed types for CSS values that are related to transformations.
use properties::animated_properties::Animatable;
|
random_line_split
|
transform.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Computed types for CSS values that are related to transformations.
use properties::animated_properties::Animatable;
use values::computed::{Length, LengthOrPercentage, Number};
use values::generics::transform::TimingFunction as GenericTimingFunction;
use values::generics::transform::TransformOrigin as GenericTransformOrigin;
/// The computed value of a CSS `<transform-origin>`
pub type TransformOrigin = GenericTransformOrigin<LengthOrPercentage, LengthOrPercentage, Length>;
/// A computed timing function.
pub type TimingFunction = GenericTimingFunction<u32, Number>;
impl TransformOrigin {
/// Returns the initial computed value for `transform-origin`.
#[inline]
pub fn initial_value() -> Self
|
}
impl Animatable for TransformOrigin {
#[inline]
fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> {
Ok(Self::new(
self.horizontal.add_weighted(&other.horizontal, self_portion, other_portion)?,
self.vertical.add_weighted(&other.vertical, self_portion, other_portion)?,
self.depth.add_weighted(&other.depth, self_portion, other_portion)?,
))
}
#[inline]
fn compute_distance(&self, other: &Self) -> Result<f64, ()> {
self.compute_squared_distance(other).map(f64::sqrt)
}
#[inline]
fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> {
Ok(
self.horizontal.compute_squared_distance(&other.horizontal)? +
self.vertical.compute_squared_distance(&other.vertical)? +
self.depth.compute_squared_distance(&other.depth)?
)
}
}
|
{
Self::new(
LengthOrPercentage::Percentage(0.5),
LengthOrPercentage::Percentage(0.5),
Length::from_px(0)
)
}
|
identifier_body
|
transform.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Computed types for CSS values that are related to transformations.
use properties::animated_properties::Animatable;
use values::computed::{Length, LengthOrPercentage, Number};
use values::generics::transform::TimingFunction as GenericTimingFunction;
use values::generics::transform::TransformOrigin as GenericTransformOrigin;
/// The computed value of a CSS `<transform-origin>`
pub type TransformOrigin = GenericTransformOrigin<LengthOrPercentage, LengthOrPercentage, Length>;
/// A computed timing function.
pub type TimingFunction = GenericTimingFunction<u32, Number>;
impl TransformOrigin {
/// Returns the initial computed value for `transform-origin`.
#[inline]
pub fn initial_value() -> Self {
Self::new(
LengthOrPercentage::Percentage(0.5),
LengthOrPercentage::Percentage(0.5),
Length::from_px(0)
)
}
}
impl Animatable for TransformOrigin {
#[inline]
fn add_weighted(&self, other: &Self, self_portion: f64, other_portion: f64) -> Result<Self, ()> {
Ok(Self::new(
self.horizontal.add_weighted(&other.horizontal, self_portion, other_portion)?,
self.vertical.add_weighted(&other.vertical, self_portion, other_portion)?,
self.depth.add_weighted(&other.depth, self_portion, other_portion)?,
))
}
#[inline]
fn
|
(&self, other: &Self) -> Result<f64, ()> {
self.compute_squared_distance(other).map(f64::sqrt)
}
#[inline]
fn compute_squared_distance(&self, other: &Self) -> Result<f64, ()> {
Ok(
self.horizontal.compute_squared_distance(&other.horizontal)? +
self.vertical.compute_squared_distance(&other.vertical)? +
self.depth.compute_squared_distance(&other.depth)?
)
}
}
|
compute_distance
|
identifier_name
|
payload.rs
|
// Copyright 2020 lowRISC contributors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// SPDX-License-Identifier: Apache-2.0
//! SPI flash protocol payload.
use crate::io::Read;
use crate::io::Write;
use crate::protocol::wire::FromWireError;
use crate::protocol::wire::FromWire;
use crate::protocol::wire::ToWireError;
use crate::protocol::wire::ToWire;
use crate::protocol::wire::WireEnum;
/// Data for CRC8 implementation.
struct Crc8 {
crc: u16,
}
/// The CRC8 implementation.
impl Crc8 {
/// Initialize CRC8 data to 0.
pub fn init() -> Self {
Self {
crc: 0,
}
}
/// Get the calculated CRC8 checksum.
pub fn get(&self) -> u8 {
(self.crc >> 8 & 0xff) as u8
}
/// Adds the specified data to the CRC8 checksum.
/// Taken from
/// https://chromium.googlesource.com/chromiumos/platform/vboot_reference/+/stabilize2/firmware/lib/crc8.c
/// Uses x^8+x^2+x+1 polynomial.
pub fn add(&mut self, data: &[u8]) -> &mut Self {
for byte in data {
self.crc ^= (*byte as u16) << 8;
for _ in 0..8 {
if self.crc & 0x8000!= 0 {
self.crc ^= 0x1070 << 3;
}
self.crc <<= 1;
}
}
self
}
}
/// Compute the checksum of the given header and payload buffer.
pub fn compute_checksum(header: &Header, payload: &[u8]) -> u8 {
Crc8::init()
.add(&[header.content.to_wire_value()])
.add(&header.content_len.to_be_bytes())
.add(&payload[..header.content_len as usize])
.get()
}
wire_enum! {
/// The content type.
pub enum ContentType: u8 {
/// Error
Error = 0x00,
/// Manticore
Manticore = 0x01,
/// Firmware
Firmware = 0x02,
}
}
/// A parsed header.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
pub struct Header {
/// The content type following the header.
pub content: ContentType,
/// The length of the content following the header.
pub content_len: u16,
/// A checksum including the header (excluding this field)
// and the content following the header.
pub checksum: u8,
}
/// The length of a payload header on the wire, in bytes.
pub const HEADER_LEN: usize = 4;
impl<'a> FromWire<'a> for Header {
fn from_wire<R: Read<'a>>(mut r: R) -> Result<Self, FromWireError>
|
}
impl ToWire for Header {
fn to_wire<W: Write>(&self, mut w: W) -> Result<(), ToWireError> {
w.write_be(self.content.to_wire_value())?;
w.write_be(self.content_len)?;
w.write_be(self.checksum)?;
Ok(())
}
}
|
{
let content_u8 = r.read_be::<u8>()?;
let content = ContentType::from_wire_value(content_u8).ok_or(FromWireError::OutOfRange)?;
let content_len = r.read_be::<u16>()?;
let checksum = r.read_be::<u8>()?;
Ok(Self {
content,
content_len,
checksum,
})
}
|
identifier_body
|
payload.rs
|
// Copyright 2020 lowRISC contributors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// SPDX-License-Identifier: Apache-2.0
//! SPI flash protocol payload.
use crate::io::Read;
use crate::io::Write;
use crate::protocol::wire::FromWireError;
use crate::protocol::wire::FromWire;
use crate::protocol::wire::ToWireError;
use crate::protocol::wire::ToWire;
use crate::protocol::wire::WireEnum;
/// Data for CRC8 implementation.
struct Crc8 {
crc: u16,
}
/// The CRC8 implementation.
impl Crc8 {
/// Initialize CRC8 data to 0.
pub fn init() -> Self {
Self {
crc: 0,
}
}
/// Get the calculated CRC8 checksum.
pub fn get(&self) -> u8 {
(self.crc >> 8 & 0xff) as u8
}
/// Adds the specified data to the CRC8 checksum.
/// Taken from
/// https://chromium.googlesource.com/chromiumos/platform/vboot_reference/+/stabilize2/firmware/lib/crc8.c
/// Uses x^8+x^2+x+1 polynomial.
pub fn add(&mut self, data: &[u8]) -> &mut Self {
for byte in data {
self.crc ^= (*byte as u16) << 8;
for _ in 0..8 {
if self.crc & 0x8000!= 0
|
self.crc <<= 1;
}
}
self
}
}
/// Compute the checksum of the given header and payload buffer.
pub fn compute_checksum(header: &Header, payload: &[u8]) -> u8 {
Crc8::init()
.add(&[header.content.to_wire_value()])
.add(&header.content_len.to_be_bytes())
.add(&payload[..header.content_len as usize])
.get()
}
wire_enum! {
/// The content type.
pub enum ContentType: u8 {
/// Error
Error = 0x00,
/// Manticore
Manticore = 0x01,
/// Firmware
Firmware = 0x02,
}
}
/// A parsed header.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
pub struct Header {
/// The content type following the header.
pub content: ContentType,
/// The length of the content following the header.
pub content_len: u16,
/// A checksum including the header (excluding this field)
// and the content following the header.
pub checksum: u8,
}
/// The length of a payload header on the wire, in bytes.
pub const HEADER_LEN: usize = 4;
impl<'a> FromWire<'a> for Header {
fn from_wire<R: Read<'a>>(mut r: R) -> Result<Self, FromWireError> {
let content_u8 = r.read_be::<u8>()?;
let content = ContentType::from_wire_value(content_u8).ok_or(FromWireError::OutOfRange)?;
let content_len = r.read_be::<u16>()?;
let checksum = r.read_be::<u8>()?;
Ok(Self {
content,
content_len,
checksum,
})
}
}
impl ToWire for Header {
fn to_wire<W: Write>(&self, mut w: W) -> Result<(), ToWireError> {
w.write_be(self.content.to_wire_value())?;
w.write_be(self.content_len)?;
w.write_be(self.checksum)?;
Ok(())
}
}
|
{
self.crc ^= 0x1070 << 3;
}
|
conditional_block
|
payload.rs
|
// Copyright 2020 lowRISC contributors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// SPDX-License-Identifier: Apache-2.0
//! SPI flash protocol payload.
use crate::io::Read;
use crate::io::Write;
use crate::protocol::wire::FromWireError;
use crate::protocol::wire::FromWire;
use crate::protocol::wire::ToWireError;
use crate::protocol::wire::ToWire;
use crate::protocol::wire::WireEnum;
/// Data for CRC8 implementation.
struct Crc8 {
crc: u16,
}
/// The CRC8 implementation.
impl Crc8 {
/// Initialize CRC8 data to 0.
pub fn init() -> Self {
Self {
crc: 0,
}
}
/// Get the calculated CRC8 checksum.
pub fn get(&self) -> u8 {
(self.crc >> 8 & 0xff) as u8
}
/// Adds the specified data to the CRC8 checksum.
/// Taken from
/// https://chromium.googlesource.com/chromiumos/platform/vboot_reference/+/stabilize2/firmware/lib/crc8.c
/// Uses x^8+x^2+x+1 polynomial.
pub fn add(&mut self, data: &[u8]) -> &mut Self {
for byte in data {
self.crc ^= (*byte as u16) << 8;
for _ in 0..8 {
if self.crc & 0x8000!= 0 {
self.crc ^= 0x1070 << 3;
}
self.crc <<= 1;
}
}
self
}
}
/// Compute the checksum of the given header and payload buffer.
pub fn compute_checksum(header: &Header, payload: &[u8]) -> u8 {
Crc8::init()
.add(&[header.content.to_wire_value()])
.add(&header.content_len.to_be_bytes())
.add(&payload[..header.content_len as usize])
.get()
}
wire_enum! {
/// The content type.
pub enum ContentType: u8 {
/// Error
Error = 0x00,
/// Manticore
Manticore = 0x01,
/// Firmware
Firmware = 0x02,
}
}
/// A parsed header.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
pub struct Header {
/// The content type following the header.
pub content: ContentType,
/// The length of the content following the header.
pub content_len: u16,
/// A checksum including the header (excluding this field)
// and the content following the header.
pub checksum: u8,
}
/// The length of a payload header on the wire, in bytes.
pub const HEADER_LEN: usize = 4;
impl<'a> FromWire<'a> for Header {
fn from_wire<R: Read<'a>>(mut r: R) -> Result<Self, FromWireError> {
let content_u8 = r.read_be::<u8>()?;
let content = ContentType::from_wire_value(content_u8).ok_or(FromWireError::OutOfRange)?;
let content_len = r.read_be::<u16>()?;
let checksum = r.read_be::<u8>()?;
Ok(Self {
content,
content_len,
checksum,
})
}
}
impl ToWire for Header {
fn to_wire<W: Write>(&self, mut w: W) -> Result<(), ToWireError> {
|
Ok(())
}
}
|
w.write_be(self.content.to_wire_value())?;
w.write_be(self.content_len)?;
w.write_be(self.checksum)?;
|
random_line_split
|
payload.rs
|
// Copyright 2020 lowRISC contributors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// SPDX-License-Identifier: Apache-2.0
//! SPI flash protocol payload.
use crate::io::Read;
use crate::io::Write;
use crate::protocol::wire::FromWireError;
use crate::protocol::wire::FromWire;
use crate::protocol::wire::ToWireError;
use crate::protocol::wire::ToWire;
use crate::protocol::wire::WireEnum;
/// Data for CRC8 implementation.
struct Crc8 {
crc: u16,
}
/// The CRC8 implementation.
impl Crc8 {
/// Initialize CRC8 data to 0.
pub fn init() -> Self {
Self {
crc: 0,
}
}
/// Get the calculated CRC8 checksum.
pub fn
|
(&self) -> u8 {
(self.crc >> 8 & 0xff) as u8
}
/// Adds the specified data to the CRC8 checksum.
/// Taken from
/// https://chromium.googlesource.com/chromiumos/platform/vboot_reference/+/stabilize2/firmware/lib/crc8.c
/// Uses x^8+x^2+x+1 polynomial.
pub fn add(&mut self, data: &[u8]) -> &mut Self {
for byte in data {
self.crc ^= (*byte as u16) << 8;
for _ in 0..8 {
if self.crc & 0x8000!= 0 {
self.crc ^= 0x1070 << 3;
}
self.crc <<= 1;
}
}
self
}
}
/// Compute the checksum of the given header and payload buffer.
pub fn compute_checksum(header: &Header, payload: &[u8]) -> u8 {
Crc8::init()
.add(&[header.content.to_wire_value()])
.add(&header.content_len.to_be_bytes())
.add(&payload[..header.content_len as usize])
.get()
}
wire_enum! {
/// The content type.
pub enum ContentType: u8 {
/// Error
Error = 0x00,
/// Manticore
Manticore = 0x01,
/// Firmware
Firmware = 0x02,
}
}
/// A parsed header.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
pub struct Header {
/// The content type following the header.
pub content: ContentType,
/// The length of the content following the header.
pub content_len: u16,
/// A checksum including the header (excluding this field)
// and the content following the header.
pub checksum: u8,
}
/// The length of a payload header on the wire, in bytes.
pub const HEADER_LEN: usize = 4;
impl<'a> FromWire<'a> for Header {
fn from_wire<R: Read<'a>>(mut r: R) -> Result<Self, FromWireError> {
let content_u8 = r.read_be::<u8>()?;
let content = ContentType::from_wire_value(content_u8).ok_or(FromWireError::OutOfRange)?;
let content_len = r.read_be::<u16>()?;
let checksum = r.read_be::<u8>()?;
Ok(Self {
content,
content_len,
checksum,
})
}
}
impl ToWire for Header {
fn to_wire<W: Write>(&self, mut w: W) -> Result<(), ToWireError> {
w.write_be(self.content.to_wire_value())?;
w.write_be(self.content_len)?;
w.write_be(self.checksum)?;
Ok(())
}
}
|
get
|
identifier_name
|
remux.rs
|
#![allow(unused)]
//! Losslessly convert file formats without decoding. I.e. MP4 into something
//! else, without altering the contained video, audio, etc. streams.
//!
//! Tested with this file:
//! * [sintel_trailer-1080p.mp4](https://download.blender.org/durian/trailer/sintel_trailer-1080p.mp4)
//! * sha1: `6679861234298c4d406ed66e8bd7d2e0583bbe91`
//!
//! Example code based on the
//! [original C example](https://github.com/FFmpeg/FFmpeg/blob/master/doc/examples/remuxing.c).
use ffmpeg_dev::sys::{
self, AVCodecID_AV_CODEC_ID_H264 as AV_CODEC_ID_H264,
AVMediaType_AVMEDIA_TYPE_ATTACHMENT as AVMEDIA_TYPE_ATTACHMENT,
AVMediaType_AVMEDIA_TYPE_AUDIO as AVMEDIA_TYPE_AUDIO,
AVMediaType_AVMEDIA_TYPE_DATA as AVMEDIA_TYPE_DATA,
AVMediaType_AVMEDIA_TYPE_NB as AVMEDIA_TYPE_NB,
AVMediaType_AVMEDIA_TYPE_SUBTITLE as AVMEDIA_TYPE_SUBTITLE,
AVMediaType_AVMEDIA_TYPE_UNKNOWN as AVMEDIA_TYPE_UNKNOWN,
AVMediaType_AVMEDIA_TYPE_VIDEO as AVMEDIA_TYPE_VIDEO,
AVRounding_AV_ROUND_NEAR_INF as AV_ROUND_NEAR_INF,
AVRounding_AV_ROUND_PASS_MINMAX as AV_ROUND_PASS_MINMAX, AVFMT_NOFILE, AVIO_FLAG_WRITE,
|
};
use libc::{c_float, c_void, size_t};
use std::convert::AsRef;
use std::ffi::{CStr, CString};
use std::os::raw::{c_char, c_int};
use std::path::{Path, PathBuf};
fn c_str(s: &str) -> CString {
CString::new(s).expect("str to c str")
}
unsafe fn remux(input_path: &str, output_path: &str) {
// I/O
assert!(PathBuf::from(input_path).exists());
let input_path_cstr = CString::new(input_path).expect("to c str");
let output_path_cstr = CString::new(output_path).expect("to c str");
// SETUP AV CONTEXT
let mut ifmt_ctx: *mut sys::AVFormatContext = std::ptr::null_mut();
let mut ofmt_ctx: *mut sys::AVFormatContext = std::ptr::null_mut();
let mut pkt: sys::AVPacket = std::mem::zeroed();
// OPEN SOURCE
assert_eq!(
sys::avformat_open_input(
&mut ifmt_ctx,
input_path_cstr.as_ptr(),
std::ptr::null_mut(),
std::ptr::null_mut(),
),
0
);
assert!(sys::avformat_find_stream_info(ifmt_ctx, std::ptr::null_mut()) >= 0);
sys::av_dump_format(ifmt_ctx, 0, input_path_cstr.as_ptr(), 0);
// OUTPUT CONTEXT
assert!(
sys::avformat_alloc_output_context2(
&mut ofmt_ctx,
std::ptr::null_mut(),
std::ptr::null_mut(),
output_path_cstr.as_ptr(),
) >= 0
);
// OUTPUT META
let mut ofmt: *mut sys::AVOutputFormat = (*ofmt_ctx).oformat;
// STREAM TRACKER
let stream_mapping_size: u32 = (*ifmt_ctx).nb_streams;
let mut stream_mapping: Vec<i32> = vec![0; stream_mapping_size as usize];
// SOURCE TO DEST STREAMS
let input_streams = {
let len = stream_mapping_size as usize;
std::slice::from_raw_parts((*ifmt_ctx).streams, len)
.iter()
.map(|x| (*x).as_ref().expect("not null"))
.collect::<Vec<&sys::AVStream>>()
};
for (index, in_stream) in input_streams.iter().enumerate() {
assert!(!in_stream.codecpar.is_null());
let mut out_stream: *mut sys::AVStream = std::ptr::null_mut();
let skip = { (*in_stream.codecpar).codec_type!= AVMEDIA_TYPE_VIDEO };
if skip {
stream_mapping[index] = -1;
} else {
out_stream = sys::avformat_new_stream(ofmt_ctx, std::ptr::null());
assert!(!out_stream.is_null());
let status = sys::avcodec_parameters_copy((*out_stream).codecpar, in_stream.codecpar);
assert!(status >= 0);
(*(*out_stream).codecpar).codec_tag = 0;
}
}
sys::av_dump_format(ofmt_ctx, 0, output_path_cstr.as_ptr(), 1);
// OPEN OUTPUT STREAM
if ((*ofmt).flags & (AVFMT_NOFILE as i32)) == 0 {
let status = sys::avio_open(
&mut (*ofmt_ctx).pb,
output_path_cstr.as_ptr(),
AVIO_FLAG_WRITE as i32,
);
assert!(status >= 0);
}
// WRITE OUTPUT
let mut opts: *mut ffmpeg_dev::sys::AVDictionary = std::ptr::null_mut();
ffmpeg_dev::sys::av_dict_set(
&mut opts,
c_str("movflags").as_ptr(),
c_str("frag_keyframe+empty_moov+default_base_moof").as_ptr(),
0,
);
assert!(sys::avformat_write_header(ofmt_ctx, &mut opts) >= 0);
let mut status = 0;
loop {
if sys::av_read_frame(ifmt_ctx, &mut pkt)!= 0 {
break;
}
// SOURCE
let in_stream: *mut sys::AVStream =
(*(*ifmt_ctx).streams).offset(pkt.stream_index as isize);
assert!(!in_stream.is_null());
// DEST
let mut out_stream: *mut sys::AVStream = std::ptr::null_mut();
//???
let skip = {
pkt.stream_index >= stream_mapping.len() as i32
|| stream_mapping[pkt.stream_index as usize] < 0
};
if skip {
sys::av_packet_unref(&mut pkt);
continue;
}
pkt.stream_index = stream_mapping[pkt.stream_index as usize];
out_stream = (*(*ofmt_ctx).streams).offset(pkt.stream_index as isize);
// COPY PACKET
pkt.pts = sys::av_rescale_q_rnd(
pkt.pts,
(*in_stream).time_base,
(*out_stream).time_base,
AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX,
);
pkt.dts = sys::av_rescale_q_rnd(
pkt.dts,
(*in_stream).time_base,
(*out_stream).time_base,
AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX,
);
pkt.duration = sys::av_rescale_q(
pkt.duration,
(*in_stream).time_base,
(*out_stream).time_base,
);
pkt.pos = -1;
// WRITE
// RESCALE OUTPUT PACKET TIMESTAMP VALUES FROM CODEC TO STREAM TIMEBASE
// av_packet_rescale_ts(pkt, *time_base, st->time_base);
// pkt->stream_index = st->index;
// WRITE THE COMPRESSED FRAME TO THE MEDIA FILE
assert!(sys::av_interleaved_write_frame(ofmt_ctx, &mut pkt) >= 0);
// assert!(av_write_frame(ofmt_ctx, &mut pkt) >= 0);
sys::av_packet_unref(&mut pkt);
}
sys::av_write_trailer(ofmt_ctx);
(*ifmt_ctx);
// CLOSE OUTPUT
if!ofmt_ctx.is_null() && ((*ofmt).flags & (AVFMT_NOFILE as i32) <= 0) {
sys::avio_closep(&mut (*ofmt_ctx).pb);
}
sys::avformat_free_context(ofmt_ctx);
assert!(status!= sys::EOF);
}
///////////////////////////////////////////////////////////////////////////////
// MAIN
///////////////////////////////////////////////////////////////////////////////
pub fn main() {
// HELPER FUNCTION
pub fn path_exists(path: &str) -> bool {
std::fs::metadata(path).is_ok()
}
// CLI Example:
// ```
// cargo run --example remux -- \
// assets/samples/sintel_trailer.1080p.mp4 assets/output/test.mp4
// ```
fn run() -> Result<(), String> {
let args = std::env::args().collect::<Vec<_>>();
let input_path = args
.get(1)
.ok_or(String::from("missing input argument"))?
.as_ref();
let output_path = args
.get(2)
.ok_or(String::from("missing output argument"))?
.as_ref();
if!path_exists(input_path) {
return Err(String::from("missing input file"));
}
unsafe {
remux(input_path, output_path);
};
Ok(())
}
match run() {
Ok(()) => (),
Err(msg) => {
eprintln!("[failed] {:?}", msg);
}
}
}
|
random_line_split
|
|
remux.rs
|
#![allow(unused)]
//! Losslessly convert file formats without decoding. I.e. MP4 into something
//! else, without altering the contained video, audio, etc. streams.
//!
//! Tested with this file:
//! * [sintel_trailer-1080p.mp4](https://download.blender.org/durian/trailer/sintel_trailer-1080p.mp4)
//! * sha1: `6679861234298c4d406ed66e8bd7d2e0583bbe91`
//!
//! Example code based on the
//! [original C example](https://github.com/FFmpeg/FFmpeg/blob/master/doc/examples/remuxing.c).
use ffmpeg_dev::sys::{
self, AVCodecID_AV_CODEC_ID_H264 as AV_CODEC_ID_H264,
AVMediaType_AVMEDIA_TYPE_ATTACHMENT as AVMEDIA_TYPE_ATTACHMENT,
AVMediaType_AVMEDIA_TYPE_AUDIO as AVMEDIA_TYPE_AUDIO,
AVMediaType_AVMEDIA_TYPE_DATA as AVMEDIA_TYPE_DATA,
AVMediaType_AVMEDIA_TYPE_NB as AVMEDIA_TYPE_NB,
AVMediaType_AVMEDIA_TYPE_SUBTITLE as AVMEDIA_TYPE_SUBTITLE,
AVMediaType_AVMEDIA_TYPE_UNKNOWN as AVMEDIA_TYPE_UNKNOWN,
AVMediaType_AVMEDIA_TYPE_VIDEO as AVMEDIA_TYPE_VIDEO,
AVRounding_AV_ROUND_NEAR_INF as AV_ROUND_NEAR_INF,
AVRounding_AV_ROUND_PASS_MINMAX as AV_ROUND_PASS_MINMAX, AVFMT_NOFILE, AVIO_FLAG_WRITE,
};
use libc::{c_float, c_void, size_t};
use std::convert::AsRef;
use std::ffi::{CStr, CString};
use std::os::raw::{c_char, c_int};
use std::path::{Path, PathBuf};
fn c_str(s: &str) -> CString {
CString::new(s).expect("str to c str")
}
unsafe fn remux(input_path: &str, output_path: &str) {
// I/O
assert!(PathBuf::from(input_path).exists());
let input_path_cstr = CString::new(input_path).expect("to c str");
let output_path_cstr = CString::new(output_path).expect("to c str");
// SETUP AV CONTEXT
let mut ifmt_ctx: *mut sys::AVFormatContext = std::ptr::null_mut();
let mut ofmt_ctx: *mut sys::AVFormatContext = std::ptr::null_mut();
let mut pkt: sys::AVPacket = std::mem::zeroed();
// OPEN SOURCE
assert_eq!(
sys::avformat_open_input(
&mut ifmt_ctx,
input_path_cstr.as_ptr(),
std::ptr::null_mut(),
std::ptr::null_mut(),
),
0
);
assert!(sys::avformat_find_stream_info(ifmt_ctx, std::ptr::null_mut()) >= 0);
sys::av_dump_format(ifmt_ctx, 0, input_path_cstr.as_ptr(), 0);
// OUTPUT CONTEXT
assert!(
sys::avformat_alloc_output_context2(
&mut ofmt_ctx,
std::ptr::null_mut(),
std::ptr::null_mut(),
output_path_cstr.as_ptr(),
) >= 0
);
// OUTPUT META
let mut ofmt: *mut sys::AVOutputFormat = (*ofmt_ctx).oformat;
// STREAM TRACKER
let stream_mapping_size: u32 = (*ifmt_ctx).nb_streams;
let mut stream_mapping: Vec<i32> = vec![0; stream_mapping_size as usize];
// SOURCE TO DEST STREAMS
let input_streams = {
let len = stream_mapping_size as usize;
std::slice::from_raw_parts((*ifmt_ctx).streams, len)
.iter()
.map(|x| (*x).as_ref().expect("not null"))
.collect::<Vec<&sys::AVStream>>()
};
for (index, in_stream) in input_streams.iter().enumerate() {
assert!(!in_stream.codecpar.is_null());
let mut out_stream: *mut sys::AVStream = std::ptr::null_mut();
let skip = { (*in_stream.codecpar).codec_type!= AVMEDIA_TYPE_VIDEO };
if skip {
stream_mapping[index] = -1;
} else {
out_stream = sys::avformat_new_stream(ofmt_ctx, std::ptr::null());
assert!(!out_stream.is_null());
let status = sys::avcodec_parameters_copy((*out_stream).codecpar, in_stream.codecpar);
assert!(status >= 0);
(*(*out_stream).codecpar).codec_tag = 0;
}
}
sys::av_dump_format(ofmt_ctx, 0, output_path_cstr.as_ptr(), 1);
// OPEN OUTPUT STREAM
if ((*ofmt).flags & (AVFMT_NOFILE as i32)) == 0 {
let status = sys::avio_open(
&mut (*ofmt_ctx).pb,
output_path_cstr.as_ptr(),
AVIO_FLAG_WRITE as i32,
);
assert!(status >= 0);
}
// WRITE OUTPUT
let mut opts: *mut ffmpeg_dev::sys::AVDictionary = std::ptr::null_mut();
ffmpeg_dev::sys::av_dict_set(
&mut opts,
c_str("movflags").as_ptr(),
c_str("frag_keyframe+empty_moov+default_base_moof").as_ptr(),
0,
);
assert!(sys::avformat_write_header(ofmt_ctx, &mut opts) >= 0);
let mut status = 0;
loop {
if sys::av_read_frame(ifmt_ctx, &mut pkt)!= 0 {
break;
}
// SOURCE
let in_stream: *mut sys::AVStream =
(*(*ifmt_ctx).streams).offset(pkt.stream_index as isize);
assert!(!in_stream.is_null());
// DEST
let mut out_stream: *mut sys::AVStream = std::ptr::null_mut();
//???
let skip = {
pkt.stream_index >= stream_mapping.len() as i32
|| stream_mapping[pkt.stream_index as usize] < 0
};
if skip {
sys::av_packet_unref(&mut pkt);
continue;
}
pkt.stream_index = stream_mapping[pkt.stream_index as usize];
out_stream = (*(*ofmt_ctx).streams).offset(pkt.stream_index as isize);
// COPY PACKET
pkt.pts = sys::av_rescale_q_rnd(
pkt.pts,
(*in_stream).time_base,
(*out_stream).time_base,
AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX,
);
pkt.dts = sys::av_rescale_q_rnd(
pkt.dts,
(*in_stream).time_base,
(*out_stream).time_base,
AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX,
);
pkt.duration = sys::av_rescale_q(
pkt.duration,
(*in_stream).time_base,
(*out_stream).time_base,
);
pkt.pos = -1;
// WRITE
// RESCALE OUTPUT PACKET TIMESTAMP VALUES FROM CODEC TO STREAM TIMEBASE
// av_packet_rescale_ts(pkt, *time_base, st->time_base);
// pkt->stream_index = st->index;
// WRITE THE COMPRESSED FRAME TO THE MEDIA FILE
assert!(sys::av_interleaved_write_frame(ofmt_ctx, &mut pkt) >= 0);
// assert!(av_write_frame(ofmt_ctx, &mut pkt) >= 0);
sys::av_packet_unref(&mut pkt);
}
sys::av_write_trailer(ofmt_ctx);
(*ifmt_ctx);
// CLOSE OUTPUT
if!ofmt_ctx.is_null() && ((*ofmt).flags & (AVFMT_NOFILE as i32) <= 0) {
sys::avio_closep(&mut (*ofmt_ctx).pb);
}
sys::avformat_free_context(ofmt_ctx);
assert!(status!= sys::EOF);
}
///////////////////////////////////////////////////////////////////////////////
// MAIN
///////////////////////////////////////////////////////////////////////////////
pub fn
|
() {
// HELPER FUNCTION
pub fn path_exists(path: &str) -> bool {
std::fs::metadata(path).is_ok()
}
// CLI Example:
// ```
// cargo run --example remux -- \
// assets/samples/sintel_trailer.1080p.mp4 assets/output/test.mp4
// ```
fn run() -> Result<(), String> {
let args = std::env::args().collect::<Vec<_>>();
let input_path = args
.get(1)
.ok_or(String::from("missing input argument"))?
.as_ref();
let output_path = args
.get(2)
.ok_or(String::from("missing output argument"))?
.as_ref();
if!path_exists(input_path) {
return Err(String::from("missing input file"));
}
unsafe {
remux(input_path, output_path);
};
Ok(())
}
match run() {
Ok(()) => (),
Err(msg) => {
eprintln!("[failed] {:?}", msg);
}
}
}
|
main
|
identifier_name
|
remux.rs
|
#![allow(unused)]
//! Losslessly convert file formats without decoding. I.e. MP4 into something
//! else, without altering the contained video, audio, etc. streams.
//!
//! Tested with this file:
//! * [sintel_trailer-1080p.mp4](https://download.blender.org/durian/trailer/sintel_trailer-1080p.mp4)
//! * sha1: `6679861234298c4d406ed66e8bd7d2e0583bbe91`
//!
//! Example code based on the
//! [original C example](https://github.com/FFmpeg/FFmpeg/blob/master/doc/examples/remuxing.c).
use ffmpeg_dev::sys::{
self, AVCodecID_AV_CODEC_ID_H264 as AV_CODEC_ID_H264,
AVMediaType_AVMEDIA_TYPE_ATTACHMENT as AVMEDIA_TYPE_ATTACHMENT,
AVMediaType_AVMEDIA_TYPE_AUDIO as AVMEDIA_TYPE_AUDIO,
AVMediaType_AVMEDIA_TYPE_DATA as AVMEDIA_TYPE_DATA,
AVMediaType_AVMEDIA_TYPE_NB as AVMEDIA_TYPE_NB,
AVMediaType_AVMEDIA_TYPE_SUBTITLE as AVMEDIA_TYPE_SUBTITLE,
AVMediaType_AVMEDIA_TYPE_UNKNOWN as AVMEDIA_TYPE_UNKNOWN,
AVMediaType_AVMEDIA_TYPE_VIDEO as AVMEDIA_TYPE_VIDEO,
AVRounding_AV_ROUND_NEAR_INF as AV_ROUND_NEAR_INF,
AVRounding_AV_ROUND_PASS_MINMAX as AV_ROUND_PASS_MINMAX, AVFMT_NOFILE, AVIO_FLAG_WRITE,
};
use libc::{c_float, c_void, size_t};
use std::convert::AsRef;
use std::ffi::{CStr, CString};
use std::os::raw::{c_char, c_int};
use std::path::{Path, PathBuf};
fn c_str(s: &str) -> CString {
CString::new(s).expect("str to c str")
}
unsafe fn remux(input_path: &str, output_path: &str) {
// I/O
assert!(PathBuf::from(input_path).exists());
let input_path_cstr = CString::new(input_path).expect("to c str");
let output_path_cstr = CString::new(output_path).expect("to c str");
// SETUP AV CONTEXT
let mut ifmt_ctx: *mut sys::AVFormatContext = std::ptr::null_mut();
let mut ofmt_ctx: *mut sys::AVFormatContext = std::ptr::null_mut();
let mut pkt: sys::AVPacket = std::mem::zeroed();
// OPEN SOURCE
assert_eq!(
sys::avformat_open_input(
&mut ifmt_ctx,
input_path_cstr.as_ptr(),
std::ptr::null_mut(),
std::ptr::null_mut(),
),
0
);
assert!(sys::avformat_find_stream_info(ifmt_ctx, std::ptr::null_mut()) >= 0);
sys::av_dump_format(ifmt_ctx, 0, input_path_cstr.as_ptr(), 0);
// OUTPUT CONTEXT
assert!(
sys::avformat_alloc_output_context2(
&mut ofmt_ctx,
std::ptr::null_mut(),
std::ptr::null_mut(),
output_path_cstr.as_ptr(),
) >= 0
);
// OUTPUT META
let mut ofmt: *mut sys::AVOutputFormat = (*ofmt_ctx).oformat;
// STREAM TRACKER
let stream_mapping_size: u32 = (*ifmt_ctx).nb_streams;
let mut stream_mapping: Vec<i32> = vec![0; stream_mapping_size as usize];
// SOURCE TO DEST STREAMS
let input_streams = {
let len = stream_mapping_size as usize;
std::slice::from_raw_parts((*ifmt_ctx).streams, len)
.iter()
.map(|x| (*x).as_ref().expect("not null"))
.collect::<Vec<&sys::AVStream>>()
};
for (index, in_stream) in input_streams.iter().enumerate() {
assert!(!in_stream.codecpar.is_null());
let mut out_stream: *mut sys::AVStream = std::ptr::null_mut();
let skip = { (*in_stream.codecpar).codec_type!= AVMEDIA_TYPE_VIDEO };
if skip {
stream_mapping[index] = -1;
} else {
out_stream = sys::avformat_new_stream(ofmt_ctx, std::ptr::null());
assert!(!out_stream.is_null());
let status = sys::avcodec_parameters_copy((*out_stream).codecpar, in_stream.codecpar);
assert!(status >= 0);
(*(*out_stream).codecpar).codec_tag = 0;
}
}
sys::av_dump_format(ofmt_ctx, 0, output_path_cstr.as_ptr(), 1);
// OPEN OUTPUT STREAM
if ((*ofmt).flags & (AVFMT_NOFILE as i32)) == 0
|
// WRITE OUTPUT
let mut opts: *mut ffmpeg_dev::sys::AVDictionary = std::ptr::null_mut();
ffmpeg_dev::sys::av_dict_set(
&mut opts,
c_str("movflags").as_ptr(),
c_str("frag_keyframe+empty_moov+default_base_moof").as_ptr(),
0,
);
assert!(sys::avformat_write_header(ofmt_ctx, &mut opts) >= 0);
let mut status = 0;
loop {
if sys::av_read_frame(ifmt_ctx, &mut pkt)!= 0 {
break;
}
// SOURCE
let in_stream: *mut sys::AVStream =
(*(*ifmt_ctx).streams).offset(pkt.stream_index as isize);
assert!(!in_stream.is_null());
// DEST
let mut out_stream: *mut sys::AVStream = std::ptr::null_mut();
//???
let skip = {
pkt.stream_index >= stream_mapping.len() as i32
|| stream_mapping[pkt.stream_index as usize] < 0
};
if skip {
sys::av_packet_unref(&mut pkt);
continue;
}
pkt.stream_index = stream_mapping[pkt.stream_index as usize];
out_stream = (*(*ofmt_ctx).streams).offset(pkt.stream_index as isize);
// COPY PACKET
pkt.pts = sys::av_rescale_q_rnd(
pkt.pts,
(*in_stream).time_base,
(*out_stream).time_base,
AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX,
);
pkt.dts = sys::av_rescale_q_rnd(
pkt.dts,
(*in_stream).time_base,
(*out_stream).time_base,
AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX,
);
pkt.duration = sys::av_rescale_q(
pkt.duration,
(*in_stream).time_base,
(*out_stream).time_base,
);
pkt.pos = -1;
// WRITE
// RESCALE OUTPUT PACKET TIMESTAMP VALUES FROM CODEC TO STREAM TIMEBASE
// av_packet_rescale_ts(pkt, *time_base, st->time_base);
// pkt->stream_index = st->index;
// WRITE THE COMPRESSED FRAME TO THE MEDIA FILE
assert!(sys::av_interleaved_write_frame(ofmt_ctx, &mut pkt) >= 0);
// assert!(av_write_frame(ofmt_ctx, &mut pkt) >= 0);
sys::av_packet_unref(&mut pkt);
}
sys::av_write_trailer(ofmt_ctx);
(*ifmt_ctx);
// CLOSE OUTPUT
if!ofmt_ctx.is_null() && ((*ofmt).flags & (AVFMT_NOFILE as i32) <= 0) {
sys::avio_closep(&mut (*ofmt_ctx).pb);
}
sys::avformat_free_context(ofmt_ctx);
assert!(status!= sys::EOF);
}
///////////////////////////////////////////////////////////////////////////////
// MAIN
///////////////////////////////////////////////////////////////////////////////
pub fn main() {
// HELPER FUNCTION
pub fn path_exists(path: &str) -> bool {
std::fs::metadata(path).is_ok()
}
// CLI Example:
// ```
// cargo run --example remux -- \
// assets/samples/sintel_trailer.1080p.mp4 assets/output/test.mp4
// ```
fn run() -> Result<(), String> {
let args = std::env::args().collect::<Vec<_>>();
let input_path = args
.get(1)
.ok_or(String::from("missing input argument"))?
.as_ref();
let output_path = args
.get(2)
.ok_or(String::from("missing output argument"))?
.as_ref();
if!path_exists(input_path) {
return Err(String::from("missing input file"));
}
unsafe {
remux(input_path, output_path);
};
Ok(())
}
match run() {
Ok(()) => (),
Err(msg) => {
eprintln!("[failed] {:?}", msg);
}
}
}
|
{
let status = sys::avio_open(
&mut (*ofmt_ctx).pb,
output_path_cstr.as_ptr(),
AVIO_FLAG_WRITE as i32,
);
assert!(status >= 0);
}
|
conditional_block
|
remux.rs
|
#![allow(unused)]
//! Losslessly convert file formats without decoding. I.e. MP4 into something
//! else, without altering the contained video, audio, etc. streams.
//!
//! Tested with this file:
//! * [sintel_trailer-1080p.mp4](https://download.blender.org/durian/trailer/sintel_trailer-1080p.mp4)
//! * sha1: `6679861234298c4d406ed66e8bd7d2e0583bbe91`
//!
//! Example code based on the
//! [original C example](https://github.com/FFmpeg/FFmpeg/blob/master/doc/examples/remuxing.c).
use ffmpeg_dev::sys::{
self, AVCodecID_AV_CODEC_ID_H264 as AV_CODEC_ID_H264,
AVMediaType_AVMEDIA_TYPE_ATTACHMENT as AVMEDIA_TYPE_ATTACHMENT,
AVMediaType_AVMEDIA_TYPE_AUDIO as AVMEDIA_TYPE_AUDIO,
AVMediaType_AVMEDIA_TYPE_DATA as AVMEDIA_TYPE_DATA,
AVMediaType_AVMEDIA_TYPE_NB as AVMEDIA_TYPE_NB,
AVMediaType_AVMEDIA_TYPE_SUBTITLE as AVMEDIA_TYPE_SUBTITLE,
AVMediaType_AVMEDIA_TYPE_UNKNOWN as AVMEDIA_TYPE_UNKNOWN,
AVMediaType_AVMEDIA_TYPE_VIDEO as AVMEDIA_TYPE_VIDEO,
AVRounding_AV_ROUND_NEAR_INF as AV_ROUND_NEAR_INF,
AVRounding_AV_ROUND_PASS_MINMAX as AV_ROUND_PASS_MINMAX, AVFMT_NOFILE, AVIO_FLAG_WRITE,
};
use libc::{c_float, c_void, size_t};
use std::convert::AsRef;
use std::ffi::{CStr, CString};
use std::os::raw::{c_char, c_int};
use std::path::{Path, PathBuf};
fn c_str(s: &str) -> CString {
CString::new(s).expect("str to c str")
}
unsafe fn remux(input_path: &str, output_path: &str) {
// I/O
assert!(PathBuf::from(input_path).exists());
let input_path_cstr = CString::new(input_path).expect("to c str");
let output_path_cstr = CString::new(output_path).expect("to c str");
// SETUP AV CONTEXT
let mut ifmt_ctx: *mut sys::AVFormatContext = std::ptr::null_mut();
let mut ofmt_ctx: *mut sys::AVFormatContext = std::ptr::null_mut();
let mut pkt: sys::AVPacket = std::mem::zeroed();
// OPEN SOURCE
assert_eq!(
sys::avformat_open_input(
&mut ifmt_ctx,
input_path_cstr.as_ptr(),
std::ptr::null_mut(),
std::ptr::null_mut(),
),
0
);
assert!(sys::avformat_find_stream_info(ifmt_ctx, std::ptr::null_mut()) >= 0);
sys::av_dump_format(ifmt_ctx, 0, input_path_cstr.as_ptr(), 0);
// OUTPUT CONTEXT
assert!(
sys::avformat_alloc_output_context2(
&mut ofmt_ctx,
std::ptr::null_mut(),
std::ptr::null_mut(),
output_path_cstr.as_ptr(),
) >= 0
);
// OUTPUT META
let mut ofmt: *mut sys::AVOutputFormat = (*ofmt_ctx).oformat;
// STREAM TRACKER
let stream_mapping_size: u32 = (*ifmt_ctx).nb_streams;
let mut stream_mapping: Vec<i32> = vec![0; stream_mapping_size as usize];
// SOURCE TO DEST STREAMS
let input_streams = {
let len = stream_mapping_size as usize;
std::slice::from_raw_parts((*ifmt_ctx).streams, len)
.iter()
.map(|x| (*x).as_ref().expect("not null"))
.collect::<Vec<&sys::AVStream>>()
};
for (index, in_stream) in input_streams.iter().enumerate() {
assert!(!in_stream.codecpar.is_null());
let mut out_stream: *mut sys::AVStream = std::ptr::null_mut();
let skip = { (*in_stream.codecpar).codec_type!= AVMEDIA_TYPE_VIDEO };
if skip {
stream_mapping[index] = -1;
} else {
out_stream = sys::avformat_new_stream(ofmt_ctx, std::ptr::null());
assert!(!out_stream.is_null());
let status = sys::avcodec_parameters_copy((*out_stream).codecpar, in_stream.codecpar);
assert!(status >= 0);
(*(*out_stream).codecpar).codec_tag = 0;
}
}
sys::av_dump_format(ofmt_ctx, 0, output_path_cstr.as_ptr(), 1);
// OPEN OUTPUT STREAM
if ((*ofmt).flags & (AVFMT_NOFILE as i32)) == 0 {
let status = sys::avio_open(
&mut (*ofmt_ctx).pb,
output_path_cstr.as_ptr(),
AVIO_FLAG_WRITE as i32,
);
assert!(status >= 0);
}
// WRITE OUTPUT
let mut opts: *mut ffmpeg_dev::sys::AVDictionary = std::ptr::null_mut();
ffmpeg_dev::sys::av_dict_set(
&mut opts,
c_str("movflags").as_ptr(),
c_str("frag_keyframe+empty_moov+default_base_moof").as_ptr(),
0,
);
assert!(sys::avformat_write_header(ofmt_ctx, &mut opts) >= 0);
let mut status = 0;
loop {
if sys::av_read_frame(ifmt_ctx, &mut pkt)!= 0 {
break;
}
// SOURCE
let in_stream: *mut sys::AVStream =
(*(*ifmt_ctx).streams).offset(pkt.stream_index as isize);
assert!(!in_stream.is_null());
// DEST
let mut out_stream: *mut sys::AVStream = std::ptr::null_mut();
//???
let skip = {
pkt.stream_index >= stream_mapping.len() as i32
|| stream_mapping[pkt.stream_index as usize] < 0
};
if skip {
sys::av_packet_unref(&mut pkt);
continue;
}
pkt.stream_index = stream_mapping[pkt.stream_index as usize];
out_stream = (*(*ofmt_ctx).streams).offset(pkt.stream_index as isize);
// COPY PACKET
pkt.pts = sys::av_rescale_q_rnd(
pkt.pts,
(*in_stream).time_base,
(*out_stream).time_base,
AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX,
);
pkt.dts = sys::av_rescale_q_rnd(
pkt.dts,
(*in_stream).time_base,
(*out_stream).time_base,
AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX,
);
pkt.duration = sys::av_rescale_q(
pkt.duration,
(*in_stream).time_base,
(*out_stream).time_base,
);
pkt.pos = -1;
// WRITE
// RESCALE OUTPUT PACKET TIMESTAMP VALUES FROM CODEC TO STREAM TIMEBASE
// av_packet_rescale_ts(pkt, *time_base, st->time_base);
// pkt->stream_index = st->index;
// WRITE THE COMPRESSED FRAME TO THE MEDIA FILE
assert!(sys::av_interleaved_write_frame(ofmt_ctx, &mut pkt) >= 0);
// assert!(av_write_frame(ofmt_ctx, &mut pkt) >= 0);
sys::av_packet_unref(&mut pkt);
}
sys::av_write_trailer(ofmt_ctx);
(*ifmt_ctx);
// CLOSE OUTPUT
if!ofmt_ctx.is_null() && ((*ofmt).flags & (AVFMT_NOFILE as i32) <= 0) {
sys::avio_closep(&mut (*ofmt_ctx).pb);
}
sys::avformat_free_context(ofmt_ctx);
assert!(status!= sys::EOF);
}
///////////////////////////////////////////////////////////////////////////////
// MAIN
///////////////////////////////////////////////////////////////////////////////
pub fn main()
|
if!path_exists(input_path) {
return Err(String::from("missing input file"));
}
unsafe {
remux(input_path, output_path);
};
Ok(())
}
match run() {
Ok(()) => (),
Err(msg) => {
eprintln!("[failed] {:?}", msg);
}
}
}
|
{
// HELPER FUNCTION
pub fn path_exists(path: &str) -> bool {
std::fs::metadata(path).is_ok()
}
// CLI Example:
// ```
// cargo run --example remux -- \
// assets/samples/sintel_trailer.1080p.mp4 assets/output/test.mp4
// ```
fn run() -> Result<(), String> {
let args = std::env::args().collect::<Vec<_>>();
let input_path = args
.get(1)
.ok_or(String::from("missing input argument"))?
.as_ref();
let output_path = args
.get(2)
.ok_or(String::from("missing output argument"))?
.as_ref();
|
identifier_body
|
htmldlistelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLDListElementBinding;
use dom::bindings::js::Root;
use dom::bindings::str::DOMString;
use dom::document::Document;
use dom::htmlelement::HTMLElement;
use dom::node::Node;
use html5ever_atoms::LocalName;
#[dom_struct]
pub struct HTMLDListElement {
htmlelement: HTMLElement
}
impl HTMLDListElement {
fn new_inherited(local_name: LocalName, prefix: Option<DOMString>, document: &Document) -> HTMLDListElement {
HTMLDListElement {
htmlelement:
HTMLElement::new_inherited(local_name, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn
|
(local_name: LocalName,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLDListElement> {
Node::reflect_node(box HTMLDListElement::new_inherited(local_name, prefix, document),
document,
HTMLDListElementBinding::Wrap)
}
}
|
new
|
identifier_name
|
htmldlistelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLDListElementBinding;
use dom::bindings::js::Root;
use dom::bindings::str::DOMString;
use dom::document::Document;
use dom::htmlelement::HTMLElement;
use dom::node::Node;
use html5ever_atoms::LocalName;
#[dom_struct]
pub struct HTMLDListElement {
htmlelement: HTMLElement
}
impl HTMLDListElement {
fn new_inherited(local_name: LocalName, prefix: Option<DOMString>, document: &Document) -> HTMLDListElement {
HTMLDListElement {
htmlelement:
HTMLElement::new_inherited(local_name, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(local_name: LocalName,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLDListElement> {
Node::reflect_node(box HTMLDListElement::new_inherited(local_name, prefix, document),
document,
HTMLDListElementBinding::Wrap)
|
}
}
|
random_line_split
|
|
htmldlistelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLDListElementBinding;
use dom::bindings::js::Root;
use dom::bindings::str::DOMString;
use dom::document::Document;
use dom::htmlelement::HTMLElement;
use dom::node::Node;
use html5ever_atoms::LocalName;
#[dom_struct]
pub struct HTMLDListElement {
htmlelement: HTMLElement
}
impl HTMLDListElement {
fn new_inherited(local_name: LocalName, prefix: Option<DOMString>, document: &Document) -> HTMLDListElement {
HTMLDListElement {
htmlelement:
HTMLElement::new_inherited(local_name, prefix, document)
}
}
#[allow(unrooted_must_root)]
pub fn new(local_name: LocalName,
prefix: Option<DOMString>,
document: &Document) -> Root<HTMLDListElement>
|
}
|
{
Node::reflect_node(box HTMLDListElement::new_inherited(local_name, prefix, document),
document,
HTMLDListElementBinding::Wrap)
}
|
identifier_body
|
hash.rs
|
// Rust Bitcoin Library
// Written in 2014 by
// Andrew Poelstra <[email protected]>
// To the extent possible under law, the author(s) have dedicated all
// copyright and related and neighboring rights to this software to
// the public domain worldwide. This software is distributed without
// any warranty.
//
// You should have received a copy of the CC0 Public Domain Dedication
// along with this software.
// If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
//
//! # Hash functions
//!
//! Utility functions related to hashing data, including merkleization
use std::char::from_digit;
use std::cmp::min;
use std::default::Default;
use std::error;
use std::fmt;
use std::io::Cursor;
use std::mem;
use serde;
use crypto::digest::Digest;
use crypto::sha2::Sha256;
use crypto::ripemd160::Ripemd160;
use network::encodable::{ConsensusDecodable, ConsensusEncodable};
use network::serialize::{RawEncoder, BitcoinHash};
use util::uint::Uint256;
/// Hex deserialization error
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum HexError {
/// Length was not 64 characters
BadLength(usize),
/// Non-hex character in string
BadCharacter(char)
}
impl fmt::Display for HexError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
HexError::BadLength(n) => write!(f, "bad length {} for sha256d hex string", n),
HexError::BadCharacter(c) => write!(f, "bad character {} in sha256d hex string", c)
}
}
}
impl error::Error for HexError {
fn cause(&self) -> Option<&error::Error> { None }
fn description(&self) -> &str
|
}
/// A Bitcoin hash, 32-bytes, computed from x as SHA256(SHA256(x))
pub struct Sha256dHash([u8; 32]);
impl_array_newtype!(Sha256dHash, u8, 32);
/// A RIPEMD-160 hash
pub struct Ripemd160Hash([u8; 20]);
impl_array_newtype!(Ripemd160Hash, u8, 20);
/// A Bitcoin hash160, 20-bytes, computed from x as RIPEMD160(SHA256(x))
pub struct Hash160([u8; 20]);
impl_array_newtype!(Hash160, u8, 20);
/// A 32-bit hash obtained by truncating a real hash
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub struct Hash32((u8, u8, u8, u8));
/// A 48-bit hash obtained by truncating a real hash
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub struct Hash48((u8, u8, u8, u8, u8, u8));
/// A 64-bit hash obtained by truncating a real hash
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub struct Hash64((u8, u8, u8, u8, u8, u8, u8, u8));
impl Ripemd160Hash {
/// Create a hash by hashing some data
pub fn from_data(data: &[u8]) -> Ripemd160Hash {
let mut ret = [0; 20];
let mut rmd = Ripemd160::new();
rmd.input(data);
rmd.result(&mut ret);
Ripemd160Hash(ret)
}
}
impl Hash160 {
/// Create a hash by hashing some data
pub fn from_data(data: &[u8]) -> Hash160 {
let mut tmp = [0; 32];
let mut ret = [0; 20];
let mut sha2 = Sha256::new();
let mut rmd = Ripemd160::new();
sha2.input(data);
sha2.result(&mut tmp);
rmd.input(&tmp);
rmd.result(&mut ret);
Hash160(ret)
}
}
// This doesn't make much sense to me, but is implicit behaviour
// in the C++ reference client, so we need it for consensus.
impl Default for Sha256dHash {
#[inline]
fn default() -> Sha256dHash { Sha256dHash([0u8; 32]) }
}
impl Sha256dHash {
/// Create a hash by hashing some data
pub fn from_data(data: &[u8]) -> Sha256dHash {
let Sha256dHash(mut ret): Sha256dHash = Default::default();
let mut sha2 = Sha256::new();
sha2.input(data);
sha2.result(&mut ret);
sha2.reset();
sha2.input(&ret);
sha2.result(&mut ret);
Sha256dHash(ret)
}
/// Converts a hash to a little-endian Uint256
#[inline]
pub fn into_le(self) -> Uint256 {
let Sha256dHash(data) = self;
let mut ret: [u64; 4] = unsafe { mem::transmute(data) };
for x in (&mut ret).iter_mut() { *x = x.to_le(); }
Uint256(ret)
}
/// Converts a hash to a big-endian Uint256
#[inline]
pub fn into_be(self) -> Uint256 {
let Sha256dHash(mut data) = self;
data.reverse();
let mut ret: [u64; 4] = unsafe { mem::transmute(data) };
for x in (&mut ret).iter_mut() { *x = x.to_be(); }
Uint256(ret)
}
/// Converts a hash to a Hash32 by truncation
#[inline]
pub fn into_hash32(self) -> Hash32 {
let Sha256dHash(data) = self;
unsafe { mem::transmute([data[0], data[8], data[16], data[24]]) }
}
/// Converts a hash to a Hash48 by truncation
#[inline]
pub fn into_hash48(self) -> Hash48 {
let Sha256dHash(data) = self;
unsafe { mem::transmute([data[0], data[6], data[12], data[18], data[24], data[30]]) }
}
// Human-readable hex output
/// Decodes a big-endian (i.e. reversed vs sha256sum output) hex string as a Sha256dHash
#[inline]
pub fn from_hex(s: &str) -> Result<Sha256dHash, HexError> {
if s.len()!= 64 {
return Err(HexError::BadLength(s.len()));
}
let bytes = s.as_bytes();
let mut ret: [u8; 32] = unsafe { mem::uninitialized() };
for i in 0..32 {
let hi = match bytes[2*i] {
b @ b'0'...b'9' => (b - b'0') as u8,
b @ b'a'...b'f' => (b - b'a' + 10) as u8,
b @ b'A'...b'F' => (b - b'A' + 10) as u8,
b => return Err(HexError::BadCharacter(b as char))
};
let lo = match bytes[2*i + 1] {
b @ b'0'...b'9' => (b - b'0') as u8,
b @ b'a'...b'f' => (b - b'a' + 10) as u8,
b @ b'A'...b'F' => (b - b'A' + 10) as u8,
b => return Err(HexError::BadCharacter(b as char))
};
ret[31 - i] = hi * 0x10 + lo;
}
Ok(Sha256dHash(ret))
}
/// Converts a hash to a Hash64 by truncation
#[inline]
pub fn into_hash64(self) -> Hash64 {
let Sha256dHash(data) = self;
unsafe { mem::transmute([data[0], data[4], data[8], data[12],
data[16], data[20], data[24], data[28]]) }
}
/// Human-readable hex output
pub fn le_hex_string(&self) -> String {
let &Sha256dHash(data) = self;
let mut ret = String::with_capacity(64);
for item in data.iter().take(32) {
ret.push(from_digit((*item / 0x10) as u32, 16).unwrap());
ret.push(from_digit((*item & 0x0f) as u32, 16).unwrap());
}
ret
}
/// Human-readable hex output
pub fn be_hex_string(&self) -> String {
let &Sha256dHash(data) = self;
let mut ret = String::with_capacity(64);
for i in (0..32).rev() {
ret.push(from_digit((data[i] / 0x10) as u32, 16).unwrap());
ret.push(from_digit((data[i] & 0x0f) as u32, 16).unwrap());
}
ret
}
}
// Note that this outputs hashes as big endian hex numbers, so this should be
// used only for user-facing stuff. Internal and network serialization is
// little-endian and should be done using the consensus `encodable::ConsensusEncodable`
// interface.
impl serde::Serialize for Sha256dHash {
fn serialize<S>(&self, serializer: &mut S) -> Result<(), S::Error>
where S: serde::Serializer,
{
unsafe {
use std::{char, mem, str};
let mut string: [u8; 64] = mem::uninitialized();
for i in 0..32 {
string[2 * i] = char::from_digit((self.0[31 - i] / 0x10) as u32, 16).unwrap() as u8;
string[2 * i + 1] = char::from_digit((self.0[31 - i] & 0x0f) as u32, 16).unwrap() as u8;
}
serializer.visit_str(str::from_utf8_unchecked(&string))
}
}
}
impl serde::Deserialize for Sha256dHash {
#[inline]
fn deserialize<D>(d: &mut D) -> Result<Sha256dHash, D::Error>
where D: serde::Deserializer
{
struct Sha256dHashVisitor;
impl serde::de::Visitor for Sha256dHashVisitor {
type Value = Sha256dHash;
fn visit_string<E>(&mut self, v: String) -> Result<Sha256dHash, E>
where E: serde::de::Error
{
self.visit_str(&v)
}
fn visit_str<E>(&mut self, hex_str: &str) -> Result<Sha256dHash, E>
where E: serde::de::Error
{
Sha256dHash::from_hex(hex_str).map_err(|e| serde::de::Error::syntax(&e.to_string()))
}
}
d.visit(Sha256dHashVisitor)
}
}
// Consensus encoding (little-endian)
impl_newtype_consensus_encoding!(Hash32);
impl_newtype_consensus_encoding!(Hash48);
impl_newtype_consensus_encoding!(Hash64);
impl_newtype_consensus_encoding!(Sha256dHash);
impl fmt::Debug for Sha256dHash {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::LowerHex::fmt(self, f) }
}
impl fmt::Display for Sha256dHash {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::LowerHex::fmt(self, f) }
}
impl fmt::LowerHex for Sha256dHash {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let &Sha256dHash(data) = self;
for ch in data.iter().rev() {
try!(write!(f, "{:02x}", ch));
}
Ok(())
}
}
impl fmt::UpperHex for Sha256dHash {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let &Sha256dHash(data) = self;
for ch in data.iter().rev() {
try!(write!(f, "{:02X}", ch));
}
Ok(())
}
}
/// Any collection of objects for which a merkle root makes sense to calculate
pub trait MerkleRoot {
/// Construct a merkle tree from a collection, with elements ordered as
/// they were in the original collection, and return the merkle root.
fn merkle_root(&self) -> Sha256dHash;
}
impl<'a, T: BitcoinHash> MerkleRoot for &'a [T] {
fn merkle_root(&self) -> Sha256dHash {
fn merkle_root(data: Vec<Sha256dHash>) -> Sha256dHash {
// Base case
if data.len() < 1 {
return Default::default();
}
if data.len() < 2 {
return data[0];
}
// Recursion
let mut next = vec![];
for idx in 0..((data.len() + 1) / 2) {
let idx1 = 2 * idx;
let idx2 = min(idx1 + 1, data.len() - 1);
let mut encoder = RawEncoder::new(Cursor::new(vec![]));
data[idx1].consensus_encode(&mut encoder).unwrap();
data[idx2].consensus_encode(&mut encoder).unwrap();
next.push(encoder.into_inner().into_inner().bitcoin_hash());
}
merkle_root(next)
}
merkle_root(self.iter().map(|obj| obj.bitcoin_hash()).collect())
}
}
impl <T: BitcoinHash> MerkleRoot for Vec<T> {
fn merkle_root(&self) -> Sha256dHash {
(&self[..]).merkle_root()
}
}
#[cfg(test)]
mod tests {
use num::FromPrimitive;
use strason;
use network::serialize::{serialize, deserialize};
use util::hash::Sha256dHash;
#[test]
fn test_sha256d() {
// nb the 5df6... output is the one you get from sha256sum. this is the
// "little-endian" hex string since it matches the in-memory representation
// of a Uint256 (which is little-endian) after transmutation
assert_eq!(Sha256dHash::from_data(&[]).le_hex_string(),
"5df6e0e2761359d30a8275058e299fcc0381534545f55cf43e41983f5d4c9456");
assert_eq!(Sha256dHash::from_data(&[]).be_hex_string(),
"56944c5d3f98413ef45cf54545538103cc9f298e0575820ad3591376e2e0f65d");
assert_eq!(format!("{}", Sha256dHash::from_data(&[])),
"56944c5d3f98413ef45cf54545538103cc9f298e0575820ad3591376e2e0f65d");
assert_eq!(format!("{:?}", Sha256dHash::from_data(&[])),
"56944c5d3f98413ef45cf54545538103cc9f298e0575820ad3591376e2e0f65d");
assert_eq!(format!("{:x}", Sha256dHash::from_data(&[])),
"56944c5d3f98413ef45cf54545538103cc9f298e0575820ad3591376e2e0f65d");
assert_eq!(format!("{:X}", Sha256dHash::from_data(&[])),
"56944C5D3F98413EF45CF54545538103CC9F298E0575820AD3591376E2E0F65D");
}
#[test]
fn test_consenus_encode_roundtrip() {
let hash = Sha256dHash::from_data(&[]);
let serial = serialize(&hash).unwrap();
let deserial = deserialize(&serial).unwrap();
assert_eq!(hash, deserial);
}
#[test]
fn test_hash_encode_decode() {
let hash = Sha256dHash::from_data(&[]);
let encoded = strason::from_serialize(&hash).unwrap();
assert_eq!(encoded.to_bytes(),
"\"56944c5d3f98413ef45cf54545538103cc9f298e0575820ad3591376e2e0f65d\"".as_bytes());
let decoded = encoded.into_deserialize().unwrap();
assert_eq!(hash, decoded);
}
#[test]
fn test_sighash_single_vec() {
let one = Sha256dHash([1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0]);
assert_eq!(Some(one.into_le()), FromPrimitive::from_u64(1));
assert_eq!(Some(one.into_le().low_128()), FromPrimitive::from_u64(1));
}
}
|
{
match *self {
HexError::BadLength(_) => "sha256d hex string non-64 length",
HexError::BadCharacter(_) => "sha256d bad hex character"
}
}
|
identifier_body
|
hash.rs
|
// Rust Bitcoin Library
// Written in 2014 by
// Andrew Poelstra <[email protected]>
// To the extent possible under law, the author(s) have dedicated all
// copyright and related and neighboring rights to this software to
// the public domain worldwide. This software is distributed without
// any warranty.
//
// You should have received a copy of the CC0 Public Domain Dedication
// along with this software.
// If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
//
//! # Hash functions
//!
//! Utility functions related to hashing data, including merkleization
use std::char::from_digit;
use std::cmp::min;
use std::default::Default;
use std::error;
use std::fmt;
use std::io::Cursor;
use std::mem;
use serde;
use crypto::digest::Digest;
use crypto::sha2::Sha256;
use crypto::ripemd160::Ripemd160;
use network::encodable::{ConsensusDecodable, ConsensusEncodable};
use network::serialize::{RawEncoder, BitcoinHash};
use util::uint::Uint256;
/// Hex deserialization error
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum HexError {
/// Length was not 64 characters
BadLength(usize),
/// Non-hex character in string
BadCharacter(char)
}
impl fmt::Display for HexError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
HexError::BadLength(n) => write!(f, "bad length {} for sha256d hex string", n),
HexError::BadCharacter(c) => write!(f, "bad character {} in sha256d hex string", c)
}
}
}
impl error::Error for HexError {
fn cause(&self) -> Option<&error::Error> { None }
fn description(&self) -> &str {
match *self {
HexError::BadLength(_) => "sha256d hex string non-64 length",
HexError::BadCharacter(_) => "sha256d bad hex character"
}
}
}
/// A Bitcoin hash, 32-bytes, computed from x as SHA256(SHA256(x))
pub struct Sha256dHash([u8; 32]);
impl_array_newtype!(Sha256dHash, u8, 32);
/// A RIPEMD-160 hash
pub struct Ripemd160Hash([u8; 20]);
impl_array_newtype!(Ripemd160Hash, u8, 20);
/// A Bitcoin hash160, 20-bytes, computed from x as RIPEMD160(SHA256(x))
pub struct Hash160([u8; 20]);
impl_array_newtype!(Hash160, u8, 20);
/// A 32-bit hash obtained by truncating a real hash
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub struct Hash32((u8, u8, u8, u8));
/// A 48-bit hash obtained by truncating a real hash
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub struct Hash48((u8, u8, u8, u8, u8, u8));
/// A 64-bit hash obtained by truncating a real hash
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub struct Hash64((u8, u8, u8, u8, u8, u8, u8, u8));
impl Ripemd160Hash {
/// Create a hash by hashing some data
pub fn from_data(data: &[u8]) -> Ripemd160Hash {
let mut ret = [0; 20];
let mut rmd = Ripemd160::new();
rmd.input(data);
rmd.result(&mut ret);
Ripemd160Hash(ret)
}
}
impl Hash160 {
/// Create a hash by hashing some data
pub fn from_data(data: &[u8]) -> Hash160 {
let mut tmp = [0; 32];
let mut ret = [0; 20];
let mut sha2 = Sha256::new();
let mut rmd = Ripemd160::new();
sha2.input(data);
sha2.result(&mut tmp);
rmd.input(&tmp);
rmd.result(&mut ret);
Hash160(ret)
}
}
// This doesn't make much sense to me, but is implicit behaviour
// in the C++ reference client, so we need it for consensus.
impl Default for Sha256dHash {
#[inline]
fn default() -> Sha256dHash { Sha256dHash([0u8; 32]) }
}
impl Sha256dHash {
/// Create a hash by hashing some data
pub fn from_data(data: &[u8]) -> Sha256dHash {
let Sha256dHash(mut ret): Sha256dHash = Default::default();
let mut sha2 = Sha256::new();
sha2.input(data);
sha2.result(&mut ret);
sha2.reset();
sha2.input(&ret);
sha2.result(&mut ret);
Sha256dHash(ret)
}
/// Converts a hash to a little-endian Uint256
#[inline]
|
for x in (&mut ret).iter_mut() { *x = x.to_le(); }
Uint256(ret)
}
/// Converts a hash to a big-endian Uint256
#[inline]
pub fn into_be(self) -> Uint256 {
let Sha256dHash(mut data) = self;
data.reverse();
let mut ret: [u64; 4] = unsafe { mem::transmute(data) };
for x in (&mut ret).iter_mut() { *x = x.to_be(); }
Uint256(ret)
}
/// Converts a hash to a Hash32 by truncation
#[inline]
pub fn into_hash32(self) -> Hash32 {
let Sha256dHash(data) = self;
unsafe { mem::transmute([data[0], data[8], data[16], data[24]]) }
}
/// Converts a hash to a Hash48 by truncation
#[inline]
pub fn into_hash48(self) -> Hash48 {
let Sha256dHash(data) = self;
unsafe { mem::transmute([data[0], data[6], data[12], data[18], data[24], data[30]]) }
}
// Human-readable hex output
/// Decodes a big-endian (i.e. reversed vs sha256sum output) hex string as a Sha256dHash
#[inline]
pub fn from_hex(s: &str) -> Result<Sha256dHash, HexError> {
if s.len()!= 64 {
return Err(HexError::BadLength(s.len()));
}
let bytes = s.as_bytes();
let mut ret: [u8; 32] = unsafe { mem::uninitialized() };
for i in 0..32 {
let hi = match bytes[2*i] {
b @ b'0'...b'9' => (b - b'0') as u8,
b @ b'a'...b'f' => (b - b'a' + 10) as u8,
b @ b'A'...b'F' => (b - b'A' + 10) as u8,
b => return Err(HexError::BadCharacter(b as char))
};
let lo = match bytes[2*i + 1] {
b @ b'0'...b'9' => (b - b'0') as u8,
b @ b'a'...b'f' => (b - b'a' + 10) as u8,
b @ b'A'...b'F' => (b - b'A' + 10) as u8,
b => return Err(HexError::BadCharacter(b as char))
};
ret[31 - i] = hi * 0x10 + lo;
}
Ok(Sha256dHash(ret))
}
/// Converts a hash to a Hash64 by truncation
#[inline]
pub fn into_hash64(self) -> Hash64 {
let Sha256dHash(data) = self;
unsafe { mem::transmute([data[0], data[4], data[8], data[12],
data[16], data[20], data[24], data[28]]) }
}
/// Human-readable hex output
pub fn le_hex_string(&self) -> String {
let &Sha256dHash(data) = self;
let mut ret = String::with_capacity(64);
for item in data.iter().take(32) {
ret.push(from_digit((*item / 0x10) as u32, 16).unwrap());
ret.push(from_digit((*item & 0x0f) as u32, 16).unwrap());
}
ret
}
/// Human-readable hex output
pub fn be_hex_string(&self) -> String {
let &Sha256dHash(data) = self;
let mut ret = String::with_capacity(64);
for i in (0..32).rev() {
ret.push(from_digit((data[i] / 0x10) as u32, 16).unwrap());
ret.push(from_digit((data[i] & 0x0f) as u32, 16).unwrap());
}
ret
}
}
// Note that this outputs hashes as big endian hex numbers, so this should be
// used only for user-facing stuff. Internal and network serialization is
// little-endian and should be done using the consensus `encodable::ConsensusEncodable`
// interface.
impl serde::Serialize for Sha256dHash {
fn serialize<S>(&self, serializer: &mut S) -> Result<(), S::Error>
where S: serde::Serializer,
{
unsafe {
use std::{char, mem, str};
let mut string: [u8; 64] = mem::uninitialized();
for i in 0..32 {
string[2 * i] = char::from_digit((self.0[31 - i] / 0x10) as u32, 16).unwrap() as u8;
string[2 * i + 1] = char::from_digit((self.0[31 - i] & 0x0f) as u32, 16).unwrap() as u8;
}
serializer.visit_str(str::from_utf8_unchecked(&string))
}
}
}
impl serde::Deserialize for Sha256dHash {
#[inline]
fn deserialize<D>(d: &mut D) -> Result<Sha256dHash, D::Error>
where D: serde::Deserializer
{
struct Sha256dHashVisitor;
impl serde::de::Visitor for Sha256dHashVisitor {
type Value = Sha256dHash;
fn visit_string<E>(&mut self, v: String) -> Result<Sha256dHash, E>
where E: serde::de::Error
{
self.visit_str(&v)
}
fn visit_str<E>(&mut self, hex_str: &str) -> Result<Sha256dHash, E>
where E: serde::de::Error
{
Sha256dHash::from_hex(hex_str).map_err(|e| serde::de::Error::syntax(&e.to_string()))
}
}
d.visit(Sha256dHashVisitor)
}
}
// Consensus encoding (little-endian)
impl_newtype_consensus_encoding!(Hash32);
impl_newtype_consensus_encoding!(Hash48);
impl_newtype_consensus_encoding!(Hash64);
impl_newtype_consensus_encoding!(Sha256dHash);
impl fmt::Debug for Sha256dHash {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::LowerHex::fmt(self, f) }
}
impl fmt::Display for Sha256dHash {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::LowerHex::fmt(self, f) }
}
impl fmt::LowerHex for Sha256dHash {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let &Sha256dHash(data) = self;
for ch in data.iter().rev() {
try!(write!(f, "{:02x}", ch));
}
Ok(())
}
}
impl fmt::UpperHex for Sha256dHash {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let &Sha256dHash(data) = self;
for ch in data.iter().rev() {
try!(write!(f, "{:02X}", ch));
}
Ok(())
}
}
/// Any collection of objects for which a merkle root makes sense to calculate
pub trait MerkleRoot {
/// Construct a merkle tree from a collection, with elements ordered as
/// they were in the original collection, and return the merkle root.
fn merkle_root(&self) -> Sha256dHash;
}
impl<'a, T: BitcoinHash> MerkleRoot for &'a [T] {
fn merkle_root(&self) -> Sha256dHash {
fn merkle_root(data: Vec<Sha256dHash>) -> Sha256dHash {
// Base case
if data.len() < 1 {
return Default::default();
}
if data.len() < 2 {
return data[0];
}
// Recursion
let mut next = vec![];
for idx in 0..((data.len() + 1) / 2) {
let idx1 = 2 * idx;
let idx2 = min(idx1 + 1, data.len() - 1);
let mut encoder = RawEncoder::new(Cursor::new(vec![]));
data[idx1].consensus_encode(&mut encoder).unwrap();
data[idx2].consensus_encode(&mut encoder).unwrap();
next.push(encoder.into_inner().into_inner().bitcoin_hash());
}
merkle_root(next)
}
merkle_root(self.iter().map(|obj| obj.bitcoin_hash()).collect())
}
}
impl <T: BitcoinHash> MerkleRoot for Vec<T> {
fn merkle_root(&self) -> Sha256dHash {
(&self[..]).merkle_root()
}
}
#[cfg(test)]
mod tests {
use num::FromPrimitive;
use strason;
use network::serialize::{serialize, deserialize};
use util::hash::Sha256dHash;
#[test]
fn test_sha256d() {
// nb the 5df6... output is the one you get from sha256sum. this is the
// "little-endian" hex string since it matches the in-memory representation
// of a Uint256 (which is little-endian) after transmutation
assert_eq!(Sha256dHash::from_data(&[]).le_hex_string(),
"5df6e0e2761359d30a8275058e299fcc0381534545f55cf43e41983f5d4c9456");
assert_eq!(Sha256dHash::from_data(&[]).be_hex_string(),
"56944c5d3f98413ef45cf54545538103cc9f298e0575820ad3591376e2e0f65d");
assert_eq!(format!("{}", Sha256dHash::from_data(&[])),
"56944c5d3f98413ef45cf54545538103cc9f298e0575820ad3591376e2e0f65d");
assert_eq!(format!("{:?}", Sha256dHash::from_data(&[])),
"56944c5d3f98413ef45cf54545538103cc9f298e0575820ad3591376e2e0f65d");
assert_eq!(format!("{:x}", Sha256dHash::from_data(&[])),
"56944c5d3f98413ef45cf54545538103cc9f298e0575820ad3591376e2e0f65d");
assert_eq!(format!("{:X}", Sha256dHash::from_data(&[])),
"56944C5D3F98413EF45CF54545538103CC9F298E0575820AD3591376E2E0F65D");
}
#[test]
fn test_consenus_encode_roundtrip() {
let hash = Sha256dHash::from_data(&[]);
let serial = serialize(&hash).unwrap();
let deserial = deserialize(&serial).unwrap();
assert_eq!(hash, deserial);
}
#[test]
fn test_hash_encode_decode() {
let hash = Sha256dHash::from_data(&[]);
let encoded = strason::from_serialize(&hash).unwrap();
assert_eq!(encoded.to_bytes(),
"\"56944c5d3f98413ef45cf54545538103cc9f298e0575820ad3591376e2e0f65d\"".as_bytes());
let decoded = encoded.into_deserialize().unwrap();
assert_eq!(hash, decoded);
}
#[test]
fn test_sighash_single_vec() {
let one = Sha256dHash([1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0]);
assert_eq!(Some(one.into_le()), FromPrimitive::from_u64(1));
assert_eq!(Some(one.into_le().low_128()), FromPrimitive::from_u64(1));
}
}
|
pub fn into_le(self) -> Uint256 {
let Sha256dHash(data) = self;
let mut ret: [u64; 4] = unsafe { mem::transmute(data) };
|
random_line_split
|
hash.rs
|
// Rust Bitcoin Library
// Written in 2014 by
// Andrew Poelstra <[email protected]>
// To the extent possible under law, the author(s) have dedicated all
// copyright and related and neighboring rights to this software to
// the public domain worldwide. This software is distributed without
// any warranty.
//
// You should have received a copy of the CC0 Public Domain Dedication
// along with this software.
// If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
//
//! # Hash functions
//!
//! Utility functions related to hashing data, including merkleization
use std::char::from_digit;
use std::cmp::min;
use std::default::Default;
use std::error;
use std::fmt;
use std::io::Cursor;
use std::mem;
use serde;
use crypto::digest::Digest;
use crypto::sha2::Sha256;
use crypto::ripemd160::Ripemd160;
use network::encodable::{ConsensusDecodable, ConsensusEncodable};
use network::serialize::{RawEncoder, BitcoinHash};
use util::uint::Uint256;
/// Hex deserialization error
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum HexError {
/// Length was not 64 characters
BadLength(usize),
/// Non-hex character in string
BadCharacter(char)
}
impl fmt::Display for HexError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
HexError::BadLength(n) => write!(f, "bad length {} for sha256d hex string", n),
HexError::BadCharacter(c) => write!(f, "bad character {} in sha256d hex string", c)
}
}
}
impl error::Error for HexError {
fn cause(&self) -> Option<&error::Error> { None }
fn description(&self) -> &str {
match *self {
HexError::BadLength(_) => "sha256d hex string non-64 length",
HexError::BadCharacter(_) => "sha256d bad hex character"
}
}
}
/// A Bitcoin hash, 32-bytes, computed from x as SHA256(SHA256(x))
pub struct
|
([u8; 32]);
impl_array_newtype!(Sha256dHash, u8, 32);
/// A RIPEMD-160 hash
pub struct Ripemd160Hash([u8; 20]);
impl_array_newtype!(Ripemd160Hash, u8, 20);
/// A Bitcoin hash160, 20-bytes, computed from x as RIPEMD160(SHA256(x))
pub struct Hash160([u8; 20]);
impl_array_newtype!(Hash160, u8, 20);
/// A 32-bit hash obtained by truncating a real hash
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub struct Hash32((u8, u8, u8, u8));
/// A 48-bit hash obtained by truncating a real hash
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub struct Hash48((u8, u8, u8, u8, u8, u8));
/// A 64-bit hash obtained by truncating a real hash
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub struct Hash64((u8, u8, u8, u8, u8, u8, u8, u8));
impl Ripemd160Hash {
/// Create a hash by hashing some data
pub fn from_data(data: &[u8]) -> Ripemd160Hash {
let mut ret = [0; 20];
let mut rmd = Ripemd160::new();
rmd.input(data);
rmd.result(&mut ret);
Ripemd160Hash(ret)
}
}
impl Hash160 {
/// Create a hash by hashing some data
pub fn from_data(data: &[u8]) -> Hash160 {
let mut tmp = [0; 32];
let mut ret = [0; 20];
let mut sha2 = Sha256::new();
let mut rmd = Ripemd160::new();
sha2.input(data);
sha2.result(&mut tmp);
rmd.input(&tmp);
rmd.result(&mut ret);
Hash160(ret)
}
}
// This doesn't make much sense to me, but is implicit behaviour
// in the C++ reference client, so we need it for consensus.
impl Default for Sha256dHash {
#[inline]
fn default() -> Sha256dHash { Sha256dHash([0u8; 32]) }
}
impl Sha256dHash {
/// Create a hash by hashing some data
pub fn from_data(data: &[u8]) -> Sha256dHash {
let Sha256dHash(mut ret): Sha256dHash = Default::default();
let mut sha2 = Sha256::new();
sha2.input(data);
sha2.result(&mut ret);
sha2.reset();
sha2.input(&ret);
sha2.result(&mut ret);
Sha256dHash(ret)
}
/// Converts a hash to a little-endian Uint256
#[inline]
pub fn into_le(self) -> Uint256 {
let Sha256dHash(data) = self;
let mut ret: [u64; 4] = unsafe { mem::transmute(data) };
for x in (&mut ret).iter_mut() { *x = x.to_le(); }
Uint256(ret)
}
/// Converts a hash to a big-endian Uint256
#[inline]
pub fn into_be(self) -> Uint256 {
let Sha256dHash(mut data) = self;
data.reverse();
let mut ret: [u64; 4] = unsafe { mem::transmute(data) };
for x in (&mut ret).iter_mut() { *x = x.to_be(); }
Uint256(ret)
}
/// Converts a hash to a Hash32 by truncation
#[inline]
pub fn into_hash32(self) -> Hash32 {
let Sha256dHash(data) = self;
unsafe { mem::transmute([data[0], data[8], data[16], data[24]]) }
}
/// Converts a hash to a Hash48 by truncation
#[inline]
pub fn into_hash48(self) -> Hash48 {
let Sha256dHash(data) = self;
unsafe { mem::transmute([data[0], data[6], data[12], data[18], data[24], data[30]]) }
}
// Human-readable hex output
/// Decodes a big-endian (i.e. reversed vs sha256sum output) hex string as a Sha256dHash
#[inline]
pub fn from_hex(s: &str) -> Result<Sha256dHash, HexError> {
if s.len()!= 64 {
return Err(HexError::BadLength(s.len()));
}
let bytes = s.as_bytes();
let mut ret: [u8; 32] = unsafe { mem::uninitialized() };
for i in 0..32 {
let hi = match bytes[2*i] {
b @ b'0'...b'9' => (b - b'0') as u8,
b @ b'a'...b'f' => (b - b'a' + 10) as u8,
b @ b'A'...b'F' => (b - b'A' + 10) as u8,
b => return Err(HexError::BadCharacter(b as char))
};
let lo = match bytes[2*i + 1] {
b @ b'0'...b'9' => (b - b'0') as u8,
b @ b'a'...b'f' => (b - b'a' + 10) as u8,
b @ b'A'...b'F' => (b - b'A' + 10) as u8,
b => return Err(HexError::BadCharacter(b as char))
};
ret[31 - i] = hi * 0x10 + lo;
}
Ok(Sha256dHash(ret))
}
/// Converts a hash to a Hash64 by truncation
#[inline]
pub fn into_hash64(self) -> Hash64 {
let Sha256dHash(data) = self;
unsafe { mem::transmute([data[0], data[4], data[8], data[12],
data[16], data[20], data[24], data[28]]) }
}
/// Human-readable hex output
pub fn le_hex_string(&self) -> String {
let &Sha256dHash(data) = self;
let mut ret = String::with_capacity(64);
for item in data.iter().take(32) {
ret.push(from_digit((*item / 0x10) as u32, 16).unwrap());
ret.push(from_digit((*item & 0x0f) as u32, 16).unwrap());
}
ret
}
/// Human-readable hex output
pub fn be_hex_string(&self) -> String {
let &Sha256dHash(data) = self;
let mut ret = String::with_capacity(64);
for i in (0..32).rev() {
ret.push(from_digit((data[i] / 0x10) as u32, 16).unwrap());
ret.push(from_digit((data[i] & 0x0f) as u32, 16).unwrap());
}
ret
}
}
// Note that this outputs hashes as big endian hex numbers, so this should be
// used only for user-facing stuff. Internal and network serialization is
// little-endian and should be done using the consensus `encodable::ConsensusEncodable`
// interface.
impl serde::Serialize for Sha256dHash {
fn serialize<S>(&self, serializer: &mut S) -> Result<(), S::Error>
where S: serde::Serializer,
{
unsafe {
use std::{char, mem, str};
let mut string: [u8; 64] = mem::uninitialized();
for i in 0..32 {
string[2 * i] = char::from_digit((self.0[31 - i] / 0x10) as u32, 16).unwrap() as u8;
string[2 * i + 1] = char::from_digit((self.0[31 - i] & 0x0f) as u32, 16).unwrap() as u8;
}
serializer.visit_str(str::from_utf8_unchecked(&string))
}
}
}
impl serde::Deserialize for Sha256dHash {
#[inline]
fn deserialize<D>(d: &mut D) -> Result<Sha256dHash, D::Error>
where D: serde::Deserializer
{
struct Sha256dHashVisitor;
impl serde::de::Visitor for Sha256dHashVisitor {
type Value = Sha256dHash;
fn visit_string<E>(&mut self, v: String) -> Result<Sha256dHash, E>
where E: serde::de::Error
{
self.visit_str(&v)
}
fn visit_str<E>(&mut self, hex_str: &str) -> Result<Sha256dHash, E>
where E: serde::de::Error
{
Sha256dHash::from_hex(hex_str).map_err(|e| serde::de::Error::syntax(&e.to_string()))
}
}
d.visit(Sha256dHashVisitor)
}
}
// Consensus encoding (little-endian)
impl_newtype_consensus_encoding!(Hash32);
impl_newtype_consensus_encoding!(Hash48);
impl_newtype_consensus_encoding!(Hash64);
impl_newtype_consensus_encoding!(Sha256dHash);
impl fmt::Debug for Sha256dHash {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::LowerHex::fmt(self, f) }
}
impl fmt::Display for Sha256dHash {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fmt::LowerHex::fmt(self, f) }
}
impl fmt::LowerHex for Sha256dHash {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let &Sha256dHash(data) = self;
for ch in data.iter().rev() {
try!(write!(f, "{:02x}", ch));
}
Ok(())
}
}
impl fmt::UpperHex for Sha256dHash {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let &Sha256dHash(data) = self;
for ch in data.iter().rev() {
try!(write!(f, "{:02X}", ch));
}
Ok(())
}
}
/// Any collection of objects for which a merkle root makes sense to calculate
pub trait MerkleRoot {
/// Construct a merkle tree from a collection, with elements ordered as
/// they were in the original collection, and return the merkle root.
fn merkle_root(&self) -> Sha256dHash;
}
impl<'a, T: BitcoinHash> MerkleRoot for &'a [T] {
fn merkle_root(&self) -> Sha256dHash {
fn merkle_root(data: Vec<Sha256dHash>) -> Sha256dHash {
// Base case
if data.len() < 1 {
return Default::default();
}
if data.len() < 2 {
return data[0];
}
// Recursion
let mut next = vec![];
for idx in 0..((data.len() + 1) / 2) {
let idx1 = 2 * idx;
let idx2 = min(idx1 + 1, data.len() - 1);
let mut encoder = RawEncoder::new(Cursor::new(vec![]));
data[idx1].consensus_encode(&mut encoder).unwrap();
data[idx2].consensus_encode(&mut encoder).unwrap();
next.push(encoder.into_inner().into_inner().bitcoin_hash());
}
merkle_root(next)
}
merkle_root(self.iter().map(|obj| obj.bitcoin_hash()).collect())
}
}
impl <T: BitcoinHash> MerkleRoot for Vec<T> {
fn merkle_root(&self) -> Sha256dHash {
(&self[..]).merkle_root()
}
}
#[cfg(test)]
mod tests {
use num::FromPrimitive;
use strason;
use network::serialize::{serialize, deserialize};
use util::hash::Sha256dHash;
#[test]
fn test_sha256d() {
// nb the 5df6... output is the one you get from sha256sum. this is the
// "little-endian" hex string since it matches the in-memory representation
// of a Uint256 (which is little-endian) after transmutation
assert_eq!(Sha256dHash::from_data(&[]).le_hex_string(),
"5df6e0e2761359d30a8275058e299fcc0381534545f55cf43e41983f5d4c9456");
assert_eq!(Sha256dHash::from_data(&[]).be_hex_string(),
"56944c5d3f98413ef45cf54545538103cc9f298e0575820ad3591376e2e0f65d");
assert_eq!(format!("{}", Sha256dHash::from_data(&[])),
"56944c5d3f98413ef45cf54545538103cc9f298e0575820ad3591376e2e0f65d");
assert_eq!(format!("{:?}", Sha256dHash::from_data(&[])),
"56944c5d3f98413ef45cf54545538103cc9f298e0575820ad3591376e2e0f65d");
assert_eq!(format!("{:x}", Sha256dHash::from_data(&[])),
"56944c5d3f98413ef45cf54545538103cc9f298e0575820ad3591376e2e0f65d");
assert_eq!(format!("{:X}", Sha256dHash::from_data(&[])),
"56944C5D3F98413EF45CF54545538103CC9F298E0575820AD3591376E2E0F65D");
}
#[test]
fn test_consenus_encode_roundtrip() {
let hash = Sha256dHash::from_data(&[]);
let serial = serialize(&hash).unwrap();
let deserial = deserialize(&serial).unwrap();
assert_eq!(hash, deserial);
}
#[test]
fn test_hash_encode_decode() {
let hash = Sha256dHash::from_data(&[]);
let encoded = strason::from_serialize(&hash).unwrap();
assert_eq!(encoded.to_bytes(),
"\"56944c5d3f98413ef45cf54545538103cc9f298e0575820ad3591376e2e0f65d\"".as_bytes());
let decoded = encoded.into_deserialize().unwrap();
assert_eq!(hash, decoded);
}
#[test]
fn test_sighash_single_vec() {
let one = Sha256dHash([1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0]);
assert_eq!(Some(one.into_le()), FromPrimitive::from_u64(1));
assert_eq!(Some(one.into_le().low_128()), FromPrimitive::from_u64(1));
}
}
|
Sha256dHash
|
identifier_name
|
socket.rs
|
use chan::{self, Sender, Receiver};
use json;
use serde::ser::Serialize;
use std::io::{BufReader, Read, Write};
use std::net::Shutdown;
use std::{fs, thread};
use unix_socket::{UnixListener, UnixStream};
use datatype::{Command, DownloadFailed, Error, Event};
use gateway::Gateway;
use interpreter::CommandExec;
/// The `Socket` gateway is used for communication via Unix Domain Sockets.
pub struct Socket {
pub cmd_sock: String,
pub ev_sock: String,
}
impl Gateway for Socket {
fn start(&mut self, ctx: Sender<CommandExec>, erx: Receiver<Event>) {
info!("Listening for commands at socket {}", self.cmd_sock);
info!("Sending events to socket {}", self.ev_sock);
let _ = fs::remove_file(&self.cmd_sock);
let cmd_sock = UnixListener::bind(&self.cmd_sock).expect("command socket");
let ev_sock = self.ev_sock.clone();
thread::spawn(move || loop {
handle_event(&ev_sock, erx.recv().expect("socket events"))
});
for conn in cmd_sock.incoming() {
let ctx = ctx.clone();
conn.map(|stream| thread::spawn(move || handle_stream(stream, &ctx)))
.map(|_handle| ())
.unwrap_or_else(|err| error!("couldn't open socket connection: {}", err));
}
}
}
fn handle_stream(mut stream: UnixStream, ctx: &Sender<CommandExec>) {
info!("New socket connection.");
let resp = parse_command(&mut stream, ctx)
.map(|ev| json::to_vec(&ev).expect("couldn't encode Event"))
.unwrap_or_else(|err| format!("{}", err).into_bytes());
stream.write_all(&resp).unwrap_or_else(|err| error!("couldn't write to commands socket: {}", err));
stream.shutdown(Shutdown::Write).unwrap_or_else(|err| error!("couldn't close commands socket: {}", err));
}
fn parse_command(stream: &mut UnixStream, ctx: &Sender<CommandExec>) -> Result<Event, Error> {
let mut reader = BufReader::new(stream);
let mut input = String::new();
reader.read_to_string(&mut input)?;
debug!("socket input: {}", input);
let cmd = input.parse::<Command>()?;
let (etx, erx) = chan::async::<Event>();
ctx.send(CommandExec { cmd: cmd, etx: Some(etx) });
erx.recv().ok_or_else(|| Error::Socket("internal receiver error".to_string()))
}
fn handle_event(ev_sock: &str, event: Event) {
let reply = match event {
Event::DownloadComplete(dl) =>
|
Event::DownloadFailed(id, reason) => {
EventWrapper::new("DownloadFailed", DownloadFailed { update_id: id, reason: reason }).to_json()
}
_ => return
};
let _ = UnixStream::connect(ev_sock)
.map_err(|err| debug!("skipping event socket broadcast: {}", err))
.map(|mut stream| {
stream.write_all(&reply).unwrap_or_else(|err| error!("couldn't write to events socket: {}", err));
stream.shutdown(Shutdown::Write).unwrap_or_else(|err| error!("couldn't close events socket: {}", err));
});
}
// FIXME(PRO-1322): create a proper JSON api
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)]
struct EventWrapper<S: Serialize> {
pub version: String,
pub event: String,
pub data: S
}
impl<S: Serialize> EventWrapper<S> {
fn new(event: &str, data: S) -> Self {
EventWrapper { version: "0.1".into(), event: event.into(), data: data }
}
fn to_json(&self) -> Vec<u8> {
json::to_vec(self).expect("encode EventWrapper")
}
}
#[cfg(test)]
mod tests {
use super::*;
use crossbeam;
use uuid::Uuid;
use datatype::{Command, DownloadComplete, Event};
const CMD_SOCK: &'static str = "/tmp/sota-commands.socket";
const EV_SOCK: &'static str = "/tmp/sota-events.socket";
#[test]
fn socket_commands_and_events() {
let (ctx, crx) = chan::sync::<CommandExec>(0);
let (etx, erx) = chan::sync::<Event>(0);
let mut socket = Socket { cmd_sock: CMD_SOCK.into(), ev_sock: EV_SOCK.into() };
thread::spawn(move || socket.start(ctx, erx));
let _ = fs::remove_file(EV_SOCK);
let serv = UnixListener::bind(EV_SOCK).expect("open events socket");
let send = DownloadComplete { update_id: Uuid::default(), update_image: "/foo".into(), signature: "sig".into() };
etx.send(Event::DownloadComplete(send.clone()));
let (stream, _) = serv.accept().expect("read events socket");
let recv: EventWrapper<DownloadComplete> = json::from_reader(&stream).expect("recv event");
assert_eq!(recv.version, "0.1".to_string());
assert_eq!(recv.event, "DownloadComplete".to_string());
assert_eq!(recv.data, send);
thread::spawn(move || {
let _ = etx; // move into this scope
loop {
match crx.recv() {
Some(CommandExec { cmd: Command::StartInstall(id), etx: Some(etx) }) => {
etx.send(Event::InstallingUpdate(id));
}
Some(_) => panic!("expected StartInstall"),
None => break
}
}
});
crossbeam::scope(|scope| {
for n in 0..10 {
scope.spawn(move || {
let id = format!("00000000-0000-0000-0000-00000000000{}", n).parse::<Uuid>().unwrap();
let mut stream = UnixStream::connect(CMD_SOCK).expect("open command socket");
let _ = stream.write_all(&format!("StartInstall {}", id).into_bytes()).expect("write to stream");
stream.shutdown(Shutdown::Write).expect("shut down writing");
assert_eq!(Event::InstallingUpdate(id), json::from_reader(&stream).expect("read event"));
});
}
});
}
}
|
{
EventWrapper::new("DownloadComplete", dl).to_json()
}
|
conditional_block
|
socket.rs
|
use chan::{self, Sender, Receiver};
use json;
use serde::ser::Serialize;
use std::io::{BufReader, Read, Write};
use std::net::Shutdown;
use std::{fs, thread};
use unix_socket::{UnixListener, UnixStream};
use datatype::{Command, DownloadFailed, Error, Event};
use gateway::Gateway;
use interpreter::CommandExec;
/// The `Socket` gateway is used for communication via Unix Domain Sockets.
pub struct Socket {
pub cmd_sock: String,
pub ev_sock: String,
}
impl Gateway for Socket {
fn start(&mut self, ctx: Sender<CommandExec>, erx: Receiver<Event>) {
info!("Listening for commands at socket {}", self.cmd_sock);
info!("Sending events to socket {}", self.ev_sock);
let _ = fs::remove_file(&self.cmd_sock);
let cmd_sock = UnixListener::bind(&self.cmd_sock).expect("command socket");
let ev_sock = self.ev_sock.clone();
thread::spawn(move || loop {
handle_event(&ev_sock, erx.recv().expect("socket events"))
});
for conn in cmd_sock.incoming() {
let ctx = ctx.clone();
conn.map(|stream| thread::spawn(move || handle_stream(stream, &ctx)))
.map(|_handle| ())
.unwrap_or_else(|err| error!("couldn't open socket connection: {}", err));
}
}
}
fn handle_stream(mut stream: UnixStream, ctx: &Sender<CommandExec>) {
info!("New socket connection.");
let resp = parse_command(&mut stream, ctx)
.map(|ev| json::to_vec(&ev).expect("couldn't encode Event"))
.unwrap_or_else(|err| format!("{}", err).into_bytes());
stream.write_all(&resp).unwrap_or_else(|err| error!("couldn't write to commands socket: {}", err));
stream.shutdown(Shutdown::Write).unwrap_or_else(|err| error!("couldn't close commands socket: {}", err));
}
fn parse_command(stream: &mut UnixStream, ctx: &Sender<CommandExec>) -> Result<Event, Error> {
let mut reader = BufReader::new(stream);
let mut input = String::new();
reader.read_to_string(&mut input)?;
debug!("socket input: {}", input);
let cmd = input.parse::<Command>()?;
let (etx, erx) = chan::async::<Event>();
ctx.send(CommandExec { cmd: cmd, etx: Some(etx) });
erx.recv().ok_or_else(|| Error::Socket("internal receiver error".to_string()))
}
fn
|
(ev_sock: &str, event: Event) {
let reply = match event {
Event::DownloadComplete(dl) => {
EventWrapper::new("DownloadComplete", dl).to_json()
}
Event::DownloadFailed(id, reason) => {
EventWrapper::new("DownloadFailed", DownloadFailed { update_id: id, reason: reason }).to_json()
}
_ => return
};
let _ = UnixStream::connect(ev_sock)
.map_err(|err| debug!("skipping event socket broadcast: {}", err))
.map(|mut stream| {
stream.write_all(&reply).unwrap_or_else(|err| error!("couldn't write to events socket: {}", err));
stream.shutdown(Shutdown::Write).unwrap_or_else(|err| error!("couldn't close events socket: {}", err));
});
}
// FIXME(PRO-1322): create a proper JSON api
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)]
struct EventWrapper<S: Serialize> {
pub version: String,
pub event: String,
pub data: S
}
impl<S: Serialize> EventWrapper<S> {
fn new(event: &str, data: S) -> Self {
EventWrapper { version: "0.1".into(), event: event.into(), data: data }
}
fn to_json(&self) -> Vec<u8> {
json::to_vec(self).expect("encode EventWrapper")
}
}
#[cfg(test)]
mod tests {
use super::*;
use crossbeam;
use uuid::Uuid;
use datatype::{Command, DownloadComplete, Event};
const CMD_SOCK: &'static str = "/tmp/sota-commands.socket";
const EV_SOCK: &'static str = "/tmp/sota-events.socket";
#[test]
fn socket_commands_and_events() {
let (ctx, crx) = chan::sync::<CommandExec>(0);
let (etx, erx) = chan::sync::<Event>(0);
let mut socket = Socket { cmd_sock: CMD_SOCK.into(), ev_sock: EV_SOCK.into() };
thread::spawn(move || socket.start(ctx, erx));
let _ = fs::remove_file(EV_SOCK);
let serv = UnixListener::bind(EV_SOCK).expect("open events socket");
let send = DownloadComplete { update_id: Uuid::default(), update_image: "/foo".into(), signature: "sig".into() };
etx.send(Event::DownloadComplete(send.clone()));
let (stream, _) = serv.accept().expect("read events socket");
let recv: EventWrapper<DownloadComplete> = json::from_reader(&stream).expect("recv event");
assert_eq!(recv.version, "0.1".to_string());
assert_eq!(recv.event, "DownloadComplete".to_string());
assert_eq!(recv.data, send);
thread::spawn(move || {
let _ = etx; // move into this scope
loop {
match crx.recv() {
Some(CommandExec { cmd: Command::StartInstall(id), etx: Some(etx) }) => {
etx.send(Event::InstallingUpdate(id));
}
Some(_) => panic!("expected StartInstall"),
None => break
}
}
});
crossbeam::scope(|scope| {
for n in 0..10 {
scope.spawn(move || {
let id = format!("00000000-0000-0000-0000-00000000000{}", n).parse::<Uuid>().unwrap();
let mut stream = UnixStream::connect(CMD_SOCK).expect("open command socket");
let _ = stream.write_all(&format!("StartInstall {}", id).into_bytes()).expect("write to stream");
stream.shutdown(Shutdown::Write).expect("shut down writing");
assert_eq!(Event::InstallingUpdate(id), json::from_reader(&stream).expect("read event"));
});
}
});
}
}
|
handle_event
|
identifier_name
|
socket.rs
|
use chan::{self, Sender, Receiver};
use json;
use serde::ser::Serialize;
use std::io::{BufReader, Read, Write};
use std::net::Shutdown;
use std::{fs, thread};
use unix_socket::{UnixListener, UnixStream};
use datatype::{Command, DownloadFailed, Error, Event};
use gateway::Gateway;
use interpreter::CommandExec;
/// The `Socket` gateway is used for communication via Unix Domain Sockets.
pub struct Socket {
pub cmd_sock: String,
pub ev_sock: String,
}
impl Gateway for Socket {
fn start(&mut self, ctx: Sender<CommandExec>, erx: Receiver<Event>)
|
}
fn handle_stream(mut stream: UnixStream, ctx: &Sender<CommandExec>) {
info!("New socket connection.");
let resp = parse_command(&mut stream, ctx)
.map(|ev| json::to_vec(&ev).expect("couldn't encode Event"))
.unwrap_or_else(|err| format!("{}", err).into_bytes());
stream.write_all(&resp).unwrap_or_else(|err| error!("couldn't write to commands socket: {}", err));
stream.shutdown(Shutdown::Write).unwrap_or_else(|err| error!("couldn't close commands socket: {}", err));
}
fn parse_command(stream: &mut UnixStream, ctx: &Sender<CommandExec>) -> Result<Event, Error> {
let mut reader = BufReader::new(stream);
let mut input = String::new();
reader.read_to_string(&mut input)?;
debug!("socket input: {}", input);
let cmd = input.parse::<Command>()?;
let (etx, erx) = chan::async::<Event>();
ctx.send(CommandExec { cmd: cmd, etx: Some(etx) });
erx.recv().ok_or_else(|| Error::Socket("internal receiver error".to_string()))
}
fn handle_event(ev_sock: &str, event: Event) {
let reply = match event {
Event::DownloadComplete(dl) => {
EventWrapper::new("DownloadComplete", dl).to_json()
}
Event::DownloadFailed(id, reason) => {
EventWrapper::new("DownloadFailed", DownloadFailed { update_id: id, reason: reason }).to_json()
}
_ => return
};
let _ = UnixStream::connect(ev_sock)
.map_err(|err| debug!("skipping event socket broadcast: {}", err))
.map(|mut stream| {
stream.write_all(&reply).unwrap_or_else(|err| error!("couldn't write to events socket: {}", err));
stream.shutdown(Shutdown::Write).unwrap_or_else(|err| error!("couldn't close events socket: {}", err));
});
}
// FIXME(PRO-1322): create a proper JSON api
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)]
struct EventWrapper<S: Serialize> {
pub version: String,
pub event: String,
pub data: S
}
impl<S: Serialize> EventWrapper<S> {
fn new(event: &str, data: S) -> Self {
EventWrapper { version: "0.1".into(), event: event.into(), data: data }
}
fn to_json(&self) -> Vec<u8> {
json::to_vec(self).expect("encode EventWrapper")
}
}
#[cfg(test)]
mod tests {
use super::*;
use crossbeam;
use uuid::Uuid;
use datatype::{Command, DownloadComplete, Event};
const CMD_SOCK: &'static str = "/tmp/sota-commands.socket";
const EV_SOCK: &'static str = "/tmp/sota-events.socket";
#[test]
fn socket_commands_and_events() {
let (ctx, crx) = chan::sync::<CommandExec>(0);
let (etx, erx) = chan::sync::<Event>(0);
let mut socket = Socket { cmd_sock: CMD_SOCK.into(), ev_sock: EV_SOCK.into() };
thread::spawn(move || socket.start(ctx, erx));
let _ = fs::remove_file(EV_SOCK);
let serv = UnixListener::bind(EV_SOCK).expect("open events socket");
let send = DownloadComplete { update_id: Uuid::default(), update_image: "/foo".into(), signature: "sig".into() };
etx.send(Event::DownloadComplete(send.clone()));
let (stream, _) = serv.accept().expect("read events socket");
let recv: EventWrapper<DownloadComplete> = json::from_reader(&stream).expect("recv event");
assert_eq!(recv.version, "0.1".to_string());
assert_eq!(recv.event, "DownloadComplete".to_string());
assert_eq!(recv.data, send);
thread::spawn(move || {
let _ = etx; // move into this scope
loop {
match crx.recv() {
Some(CommandExec { cmd: Command::StartInstall(id), etx: Some(etx) }) => {
etx.send(Event::InstallingUpdate(id));
}
Some(_) => panic!("expected StartInstall"),
None => break
}
}
});
crossbeam::scope(|scope| {
for n in 0..10 {
scope.spawn(move || {
let id = format!("00000000-0000-0000-0000-00000000000{}", n).parse::<Uuid>().unwrap();
let mut stream = UnixStream::connect(CMD_SOCK).expect("open command socket");
let _ = stream.write_all(&format!("StartInstall {}", id).into_bytes()).expect("write to stream");
stream.shutdown(Shutdown::Write).expect("shut down writing");
assert_eq!(Event::InstallingUpdate(id), json::from_reader(&stream).expect("read event"));
});
}
});
}
}
|
{
info!("Listening for commands at socket {}", self.cmd_sock);
info!("Sending events to socket {}", self.ev_sock);
let _ = fs::remove_file(&self.cmd_sock);
let cmd_sock = UnixListener::bind(&self.cmd_sock).expect("command socket");
let ev_sock = self.ev_sock.clone();
thread::spawn(move || loop {
handle_event(&ev_sock, erx.recv().expect("socket events"))
});
for conn in cmd_sock.incoming() {
let ctx = ctx.clone();
conn.map(|stream| thread::spawn(move || handle_stream(stream, &ctx)))
.map(|_handle| ())
.unwrap_or_else(|err| error!("couldn't open socket connection: {}", err));
}
}
|
identifier_body
|
socket.rs
|
use chan::{self, Sender, Receiver};
use json;
use serde::ser::Serialize;
use std::io::{BufReader, Read, Write};
use std::net::Shutdown;
use std::{fs, thread};
use unix_socket::{UnixListener, UnixStream};
use datatype::{Command, DownloadFailed, Error, Event};
use gateway::Gateway;
use interpreter::CommandExec;
/// The `Socket` gateway is used for communication via Unix Domain Sockets.
pub struct Socket {
pub cmd_sock: String,
pub ev_sock: String,
}
impl Gateway for Socket {
fn start(&mut self, ctx: Sender<CommandExec>, erx: Receiver<Event>) {
info!("Listening for commands at socket {}", self.cmd_sock);
info!("Sending events to socket {}", self.ev_sock);
let _ = fs::remove_file(&self.cmd_sock);
let cmd_sock = UnixListener::bind(&self.cmd_sock).expect("command socket");
let ev_sock = self.ev_sock.clone();
thread::spawn(move || loop {
handle_event(&ev_sock, erx.recv().expect("socket events"))
});
for conn in cmd_sock.incoming() {
let ctx = ctx.clone();
conn.map(|stream| thread::spawn(move || handle_stream(stream, &ctx)))
.map(|_handle| ())
.unwrap_or_else(|err| error!("couldn't open socket connection: {}", err));
}
}
}
fn handle_stream(mut stream: UnixStream, ctx: &Sender<CommandExec>) {
info!("New socket connection.");
let resp = parse_command(&mut stream, ctx)
.map(|ev| json::to_vec(&ev).expect("couldn't encode Event"))
.unwrap_or_else(|err| format!("{}", err).into_bytes());
stream.write_all(&resp).unwrap_or_else(|err| error!("couldn't write to commands socket: {}", err));
stream.shutdown(Shutdown::Write).unwrap_or_else(|err| error!("couldn't close commands socket: {}", err));
}
fn parse_command(stream: &mut UnixStream, ctx: &Sender<CommandExec>) -> Result<Event, Error> {
let mut reader = BufReader::new(stream);
let mut input = String::new();
reader.read_to_string(&mut input)?;
debug!("socket input: {}", input);
let cmd = input.parse::<Command>()?;
let (etx, erx) = chan::async::<Event>();
ctx.send(CommandExec { cmd: cmd, etx: Some(etx) });
erx.recv().ok_or_else(|| Error::Socket("internal receiver error".to_string()))
}
fn handle_event(ev_sock: &str, event: Event) {
let reply = match event {
Event::DownloadComplete(dl) => {
EventWrapper::new("DownloadComplete", dl).to_json()
}
Event::DownloadFailed(id, reason) => {
EventWrapper::new("DownloadFailed", DownloadFailed { update_id: id, reason: reason }).to_json()
}
_ => return
};
let _ = UnixStream::connect(ev_sock)
.map_err(|err| debug!("skipping event socket broadcast: {}", err))
.map(|mut stream| {
stream.write_all(&reply).unwrap_or_else(|err| error!("couldn't write to events socket: {}", err));
stream.shutdown(Shutdown::Write).unwrap_or_else(|err| error!("couldn't close events socket: {}", err));
});
}
// FIXME(PRO-1322): create a proper JSON api
#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)]
struct EventWrapper<S: Serialize> {
pub version: String,
pub event: String,
pub data: S
}
impl<S: Serialize> EventWrapper<S> {
fn new(event: &str, data: S) -> Self {
EventWrapper { version: "0.1".into(), event: event.into(), data: data }
}
fn to_json(&self) -> Vec<u8> {
json::to_vec(self).expect("encode EventWrapper")
}
}
#[cfg(test)]
mod tests {
use super::*;
use crossbeam;
use uuid::Uuid;
use datatype::{Command, DownloadComplete, Event};
const CMD_SOCK: &'static str = "/tmp/sota-commands.socket";
const EV_SOCK: &'static str = "/tmp/sota-events.socket";
|
#[test]
fn socket_commands_and_events() {
let (ctx, crx) = chan::sync::<CommandExec>(0);
let (etx, erx) = chan::sync::<Event>(0);
let mut socket = Socket { cmd_sock: CMD_SOCK.into(), ev_sock: EV_SOCK.into() };
thread::spawn(move || socket.start(ctx, erx));
let _ = fs::remove_file(EV_SOCK);
let serv = UnixListener::bind(EV_SOCK).expect("open events socket");
let send = DownloadComplete { update_id: Uuid::default(), update_image: "/foo".into(), signature: "sig".into() };
etx.send(Event::DownloadComplete(send.clone()));
let (stream, _) = serv.accept().expect("read events socket");
let recv: EventWrapper<DownloadComplete> = json::from_reader(&stream).expect("recv event");
assert_eq!(recv.version, "0.1".to_string());
assert_eq!(recv.event, "DownloadComplete".to_string());
assert_eq!(recv.data, send);
thread::spawn(move || {
let _ = etx; // move into this scope
loop {
match crx.recv() {
Some(CommandExec { cmd: Command::StartInstall(id), etx: Some(etx) }) => {
etx.send(Event::InstallingUpdate(id));
}
Some(_) => panic!("expected StartInstall"),
None => break
}
}
});
crossbeam::scope(|scope| {
for n in 0..10 {
scope.spawn(move || {
let id = format!("00000000-0000-0000-0000-00000000000{}", n).parse::<Uuid>().unwrap();
let mut stream = UnixStream::connect(CMD_SOCK).expect("open command socket");
let _ = stream.write_all(&format!("StartInstall {}", id).into_bytes()).expect("write to stream");
stream.shutdown(Shutdown::Write).expect("shut down writing");
assert_eq!(Event::InstallingUpdate(id), json::from_reader(&stream).expect("read event"));
});
}
});
}
}
|
random_line_split
|
|
dfa_builder.rs
|
//
// Copyright 2016 Andrew Hunter
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
|
//!
//! The DFA builder trait is implemented by classes that initialise DFAs. Deterministic Finite Automata can act as pattern
//! matchers: for any given state, they only have a single possible transition for each input symbol. They can be efficiently
//! implemented using a table-based approach.
//!
//! The main type of DFA that can be built using this trait is the `SymbolRangeDfa`.
//!
use super::state_machine::*;
///
/// Class that can build a particular type of DFA
///
pub trait DfaBuilder<InputSymbol, OutputSymbol, DfaType> {
///
/// Starts the next state for this DFA
///
/// When this is first called, the DFA will enter state 0, then state 1, etc. If this hasn't been called yet then the DFA is not
/// in a valid state and the other calls cannot be made.
///
fn start_state(&mut self);
///
/// Adds a transition to the current state
///
/// Any input symbol can appear exactly once in a state, and must not overlap any other input symbol. Transitions must be in input
/// symbol order.
///
fn transition(&mut self, symbol: InputSymbol, target_state: StateId);
///
/// Sets the current state as an accepting state and sets the output symbol that will be produced if this is the longest match
///
fn accept(&mut self, symbol: OutputSymbol);
///
/// Finishes building the DFA and returns the matcher for the pattern it represents
///
fn build(self) -> DfaType;
}
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
|
random_line_split
|
capture_actor.rs
|
/*
* Exopticon - A free video surveillance system.
* Copyright (C) 2020 David Matthew Mattli <[email protected]>
*
* This file is part of Exopticon.
*
* Exopticon is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Exopticon is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Exopticon. If not, see <http://www.gnu.org/licenses/>.
*/
use std::convert::TryInto;
use std::env;
use std::fs;
use std::path::Path;
use std::path::PathBuf;
use std::process::Stdio;
use std::time::Instant;
use actix::{
fut::wrap_future, registry::SystemService, Actor, ActorContext, ActorFuture, AsyncContext,
Context, Handler, Message, StreamHandler,
};
use bytes::BytesMut;
use chrono::{DateTime, Utc};
use exserial::models::CaptureMessage;
use tokio::io::AsyncWriteExt;
use tokio::process::{ChildStdin, Command};
use tokio_util::codec::length_delimited;
use uuid::Uuid;
use crate::capture_supervisor::{CaptureActorMetrics, StopCaptureWorker};
use crate::db_registry;
use crate::models::{CreateVideoUnitFile, UpdateVideoUnitFile};
use crate::ws_camera_server::{CameraFrame, FrameResolution, FrameSource, WsCameraServer};
/*
/// Holds messages from capture worker
#[derive(Default, Debug, PartialEq, Deserialize, Serialize)]
pub struct CaptureMessage {
/// type of worker message
#[serde(rename = "type")]
#[serde(default)]
pub message_type: String,
/// if message is a log, the log level
#[serde(default)]
pub level: String,
/// if the message is a log, the log message
#[serde(default)]
pub message: String,
/// if the message is a frame, the jpeg frame
#[serde(rename = "jpegFrame")]
#[serde(default)]
#[serde(with = "serde_bytes")]
pub jpeg: Vec<u8>,
/// if the message is a frame, the sd scaled jpeg frame
#[serde(rename = "jpegFrameScaled")]
#[serde(default)]
#[serde(with = "serde_bytes")]
pub scaled_jpeg: Vec<u8>,
/// if the message is a frame the original width of the image
#[serde(rename = "unscaledWidth")]
#[serde(default)]
pub unscaled_width: i32,
/// if the message is a frame the original height of the image
#[serde(rename = "unscaledHeight")]
#[serde(default)]
pub unscaled_height: i32,
/// if message is a frame, the offset from the beginning of file
#[serde(default)]
pub offset: i64,
///
#[serde(default)]
pub height: i32,
/// if message is a new file, the created file name
#[serde(default)]
pub filename: String,
/// if message is a new file, the file creation time
#[serde(rename = "beginTime")]
#[serde(default)]
pub begin_time: String,
/// if the message is a closed file, the file end time
#[serde(rename = "endTime")]
#[serde(default)]
pub end_time: String,
}
*/
enum CaptureState {
Running,
Shutdown,
}
/// Holds state of capture actor
pub struct CaptureActor {
/// id of camera actor is capturing video for
pub camera_id: i32,
/// url of video stream
pub stream_url: String,
/// absolute path to video storage
pub storage_path: String,
/// capture start time
pub capture_start: Instant,
/// id of currently open video unit
pub video_unit_id: Option<Uuid>,
/// id of currently open video file
pub video_file_id: Option<i32>,
/// frame offset from beginning of the current video unit
pub offset: i64,
/// filename currently being captured
pub filename: Option<String>,
/// stdin of worker process
pub stdin: Option<ChildStdin>,
/// State of CaptureActor
state: CaptureState,
/// CaptureActor metrics
metrics: CaptureActorMetrics,
}
impl CaptureActor {
/// Returns new initialized `CaptureActor`
pub const fn new(
camera_id: i32,
stream_url: String,
storage_path: String,
capture_start: Instant,
metrics: CaptureActorMetrics,
) -> Self {
Self {
camera_id,
stream_url,
storage_path,
capture_start,
video_unit_id: None,
video_file_id: None,
offset: 0,
filename: None,
stdin: None,
state: CaptureState::Running,
metrics,
}
}
/// Called when the underlying capture worker signals the file as
/// closed. The database record is updated
#[allow(clippy::cast_possible_truncation)]
fn close_file(&self, ctx: &mut Context<Self>, filename: &str, end_time: DateTime<Utc>) {
if let (Some(video_unit_id), Some(video_file_id), Ok(metadata)) = (
self.video_unit_id,
self.video_file_id,
fs::metadata(filename),
) {
let fut = db_registry::get_db().send(UpdateVideoUnitFile {
video_unit_id,
end_time: end_time.naive_utc(),
video_file_id,
size: metadata
.len()
.try_into()
.expect("Unexpected i32 overflow in metadata.len()"),
});
ctx.spawn(
wrap_future::<_, Self>(fut).map(|result, _actor, _ctx| match result {
Ok(Ok((_video_unit, _video_file))) => {}
Ok(Err(e)) => panic!("CaptureWorker: Error updating video unit: {}", e),
Err(e) => panic!("CaptureWorker: Error updating video unit: {}", e),
}),
);
} else {
error!("Error closing file!");
}
}
/// Processes a `CaptureMessage` from the capture worker,
/// performing the appropriate action.
#[allow(clippy::panic)]
fn message_to_action(&mut self, msg: CaptureMessage, ctx: &mut Context<Self>) {
// Check if log
match msg {
CaptureMessage::Log { message } => {
debug!("Capture worker {} log message: {}", self.camera_id, message)
}
CaptureMessage::Frame {
jpeg,
offset,
unscaled_width,
unscaled_height,
} => {
if self.video_unit_id.is_none() {
error!("Video Unit id not set!");
}
WsCameraServer::from_registry().do_send(CameraFrame {
camera_id: self.camera_id,
jpeg,
observations: Vec::new(),
resolution: FrameResolution::HD,
source: FrameSource::Camera {
camera_id: self.camera_id,
analysis_offset: Instant::now().duration_since(self.capture_start),
},
video_unit_id: self
.video_unit_id
.expect("video unit to be set. state violation!"),
offset,
unscaled_width,
unscaled_height,
});
self.offset += 1;
}
CaptureMessage::ScaledFrame {
jpeg,
offset,
unscaled_width,
unscaled_height,
} => {
self.metrics.frame_count.inc();
WsCameraServer::from_registry().do_send(CameraFrame {
camera_id: self.camera_id,
jpeg,
observations: Vec::new(),
resolution: FrameResolution::SD,
source: FrameSource::Camera {
camera_id: self.camera_id,
analysis_offset: Instant::now().duration_since(self.capture_start),
},
video_unit_id: self.video_unit_id.unwrap_or_else(Uuid::nil),
offset,
unscaled_width,
unscaled_height,
});
self.offset += 1;
}
CaptureMessage::NewFile {
filename,
begin_time,
} => {
let new_id = Uuid::new_v4();
// worker has created a new file. Write video_unit and
// file to database.
if let Ok(date) = begin_time.parse::<DateTime<Utc>>() {
let fut = db_registry::get_db().send(CreateVideoUnitFile {
video_unit_id: new_id,
camera_id: self.camera_id,
monotonic_index: 0,
begin_time: date.naive_utc(),
filename: filename.clone(),
});
self.video_unit_id = Some(new_id);
ctx.spawn(wrap_future::<_, Self>(fut).map(
|result, actor, _ctx| match result {
Ok(Ok((_video_unit, video_file))) => {
actor.video_file_id = Some(video_file.id);
actor.filename = Some(filename)
}
Ok(Err(e)) => {
panic!("Error inserting video unit: db handler error {}", e)
}
Err(e) => panic!("Error inserting video unit: message error {}", e),
},
));
} else {
error!("CaptureWorker: unable to parse begin time: {}", begin_time);
}
self.offset = 0;
}
CaptureMessage::EndFile { filename, end_time } => {
if let Ok(end_time) = end_time.parse::<DateTime<Utc>>() {
self.close_file(ctx, &filename, end_time);
self.video_unit_id = None;
self.video_file_id = None;
self.filename = None;
} else {
error!("CaptureActor: Error handling close file message.");
}
}
}
}
}
impl StreamHandler<Result<BytesMut, std::io::Error>> for CaptureActor {
fn handle(&mut self, item: Result<BytesMut, std::io::Error>, ctx: &mut Context<Self>)
|
fn finished(&mut self, _ctx: &mut Self::Context) {
info!("Stream handler finished!");
}
}
/// Message for capture actor to start worker
#[derive(Message)]
#[rtype(result = "()")]
struct StartWorker;
impl Handler<StartWorker> for CaptureActor {
type Result = ();
fn handle(&mut self, _msg: StartWorker, ctx: &mut Context<Self>) -> Self::Result {
debug!("Launching worker for stream: {}", self.stream_url);
let storage_path = Path::new(&self.storage_path).join(self.camera_id.to_string());
if std::fs::create_dir(&storage_path).is_err() {
// The error returned by create_dir has no information so
// we can't really distinguish between failure
// scenarios. If the directory already exists everything
// is fine, otherwise we fail later.
}
let worker_path = env::var("EXOPTICONWORKERS").unwrap_or_else(|_| "/".to_string());
let executable_path: PathBuf = [worker_path, "cworkers/captureworker".to_string()]
.iter()
.collect();
let hwaccel_method =
env::var("EXOPTICON_HWACCEL_METHOD").unwrap_or_else(|_| "none".to_string());
let mut cmd = Command::new(executable_path);
cmd.arg(&self.stream_url);
cmd.arg(&storage_path);
cmd.arg(hwaccel_method);
cmd.stdout(Stdio::piped());
cmd.stdin(Stdio::piped());
let mut child = cmd.spawn().expect("Failed to launch");
let stdout = child
.stdout
.take()
.expect("Failed to open stdout on worker child");
self.stdin = Some(child.stdin.take().expect("Failed to open stdin"));
let framed_stream = length_delimited::Builder::new().new_read(stdout);
Self::add_stream(framed_stream, ctx);
let fut = wrap_future::<_, Self>(child).map(|_status, actor, ctx| {
// Change this to an error when we can't distinguish
// between intentional and unintentional exits.
debug!("CaptureWorker {}: capture process died...", actor.camera_id);
// Close file if open
if let Some(filename) = &actor.filename {
debug!(
"CaptureActor {}: capture process died, closing file: {}",
actor.camera_id, &filename
);
actor.close_file(ctx, filename, Utc::now());
actor.filename = None;
}
match actor.state {
CaptureState::Running => {
debug!("Shutting down running captureworker...");
actor.stdin = None;
ctx.terminate();
}
CaptureState::Shutdown => {
debug!("Shutting down captureworker...");
actor.stdin = None;
ctx.terminate();
}
}
});
ctx.spawn(fut);
}
}
impl Handler<StopCaptureWorker> for CaptureActor {
type Result = ();
fn handle(&mut self, _msg: StopCaptureWorker, _ctx: &mut Context<Self>) -> Self::Result {
debug!(
"Camera {} received StopCaptureWorker, going to shutdown...",
self.camera_id
);
self.state = CaptureState::Shutdown;
match &mut self.stdin {
Some(stdin) => {
stdin.shutdown();
}
None => {}
}
self.stdin = None;
}
}
impl Actor for CaptureActor {
type Context = Context<Self>;
fn started(&mut self, ctx: &mut Self::Context) {
info!("Starting CaptureActor for camera id: {}", self.camera_id);
ctx.address().do_send(StartWorker {});
}
fn stopped(&mut self, _ctx: &mut Self::Context) {
info!("Capture Actor for camera id {} is stopped!", self.camera_id);
}
}
|
{
let item = match item {
Ok(b) => b,
Err(e) => {
debug!("stream handle error! {}", e);
self.stdin = None;
ctx.terminate();
return;
}
};
let frame: Result<CaptureMessage, bincode::Error> = bincode::deserialize(&item[..]);
match frame {
Ok(f) => self.message_to_action(f, ctx),
Err(e) => error!("Error deserializing frame! {}", e),
}
}
|
identifier_body
|
capture_actor.rs
|
/*
* Exopticon - A free video surveillance system.
* Copyright (C) 2020 David Matthew Mattli <[email protected]>
*
* This file is part of Exopticon.
*
* Exopticon is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Exopticon is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Exopticon. If not, see <http://www.gnu.org/licenses/>.
*/
use std::convert::TryInto;
use std::env;
use std::fs;
use std::path::Path;
use std::path::PathBuf;
use std::process::Stdio;
use std::time::Instant;
use actix::{
fut::wrap_future, registry::SystemService, Actor, ActorContext, ActorFuture, AsyncContext,
Context, Handler, Message, StreamHandler,
};
use bytes::BytesMut;
use chrono::{DateTime, Utc};
use exserial::models::CaptureMessage;
use tokio::io::AsyncWriteExt;
use tokio::process::{ChildStdin, Command};
use tokio_util::codec::length_delimited;
use uuid::Uuid;
use crate::capture_supervisor::{CaptureActorMetrics, StopCaptureWorker};
use crate::db_registry;
use crate::models::{CreateVideoUnitFile, UpdateVideoUnitFile};
use crate::ws_camera_server::{CameraFrame, FrameResolution, FrameSource, WsCameraServer};
/*
/// Holds messages from capture worker
#[derive(Default, Debug, PartialEq, Deserialize, Serialize)]
pub struct CaptureMessage {
/// type of worker message
#[serde(rename = "type")]
#[serde(default)]
pub message_type: String,
/// if message is a log, the log level
#[serde(default)]
pub level: String,
/// if the message is a log, the log message
#[serde(default)]
pub message: String,
/// if the message is a frame, the jpeg frame
#[serde(rename = "jpegFrame")]
#[serde(default)]
#[serde(with = "serde_bytes")]
pub jpeg: Vec<u8>,
/// if the message is a frame, the sd scaled jpeg frame
#[serde(rename = "jpegFrameScaled")]
#[serde(default)]
#[serde(with = "serde_bytes")]
pub scaled_jpeg: Vec<u8>,
/// if the message is a frame the original width of the image
#[serde(rename = "unscaledWidth")]
#[serde(default)]
pub unscaled_width: i32,
/// if the message is a frame the original height of the image
#[serde(rename = "unscaledHeight")]
#[serde(default)]
pub unscaled_height: i32,
/// if message is a frame, the offset from the beginning of file
#[serde(default)]
pub offset: i64,
///
#[serde(default)]
pub height: i32,
/// if message is a new file, the created file name
#[serde(default)]
pub filename: String,
/// if message is a new file, the file creation time
#[serde(rename = "beginTime")]
#[serde(default)]
pub begin_time: String,
/// if the message is a closed file, the file end time
#[serde(rename = "endTime")]
#[serde(default)]
pub end_time: String,
}
*/
enum CaptureState {
Running,
Shutdown,
}
/// Holds state of capture actor
pub struct CaptureActor {
/// id of camera actor is capturing video for
pub camera_id: i32,
/// url of video stream
pub stream_url: String,
/// absolute path to video storage
pub storage_path: String,
/// capture start time
pub capture_start: Instant,
/// id of currently open video unit
pub video_unit_id: Option<Uuid>,
/// id of currently open video file
pub video_file_id: Option<i32>,
/// frame offset from beginning of the current video unit
pub offset: i64,
/// filename currently being captured
pub filename: Option<String>,
/// stdin of worker process
pub stdin: Option<ChildStdin>,
/// State of CaptureActor
state: CaptureState,
/// CaptureActor metrics
metrics: CaptureActorMetrics,
}
impl CaptureActor {
/// Returns new initialized `CaptureActor`
pub const fn new(
camera_id: i32,
stream_url: String,
storage_path: String,
capture_start: Instant,
metrics: CaptureActorMetrics,
) -> Self {
Self {
camera_id,
stream_url,
storage_path,
capture_start,
video_unit_id: None,
video_file_id: None,
offset: 0,
filename: None,
stdin: None,
state: CaptureState::Running,
metrics,
}
}
/// Called when the underlying capture worker signals the file as
/// closed. The database record is updated
#[allow(clippy::cast_possible_truncation)]
fn close_file(&self, ctx: &mut Context<Self>, filename: &str, end_time: DateTime<Utc>) {
if let (Some(video_unit_id), Some(video_file_id), Ok(metadata)) = (
self.video_unit_id,
self.video_file_id,
fs::metadata(filename),
) {
let fut = db_registry::get_db().send(UpdateVideoUnitFile {
video_unit_id,
end_time: end_time.naive_utc(),
video_file_id,
size: metadata
.len()
.try_into()
.expect("Unexpected i32 overflow in metadata.len()"),
});
ctx.spawn(
wrap_future::<_, Self>(fut).map(|result, _actor, _ctx| match result {
Ok(Ok((_video_unit, _video_file))) => {}
Ok(Err(e)) => panic!("CaptureWorker: Error updating video unit: {}", e),
Err(e) => panic!("CaptureWorker: Error updating video unit: {}", e),
}),
);
} else {
|
}
/// Processes a `CaptureMessage` from the capture worker,
/// performing the appropriate action.
#[allow(clippy::panic)]
fn message_to_action(&mut self, msg: CaptureMessage, ctx: &mut Context<Self>) {
// Check if log
match msg {
CaptureMessage::Log { message } => {
debug!("Capture worker {} log message: {}", self.camera_id, message)
}
CaptureMessage::Frame {
jpeg,
offset,
unscaled_width,
unscaled_height,
} => {
if self.video_unit_id.is_none() {
error!("Video Unit id not set!");
}
WsCameraServer::from_registry().do_send(CameraFrame {
camera_id: self.camera_id,
jpeg,
observations: Vec::new(),
resolution: FrameResolution::HD,
source: FrameSource::Camera {
camera_id: self.camera_id,
analysis_offset: Instant::now().duration_since(self.capture_start),
},
video_unit_id: self
.video_unit_id
.expect("video unit to be set. state violation!"),
offset,
unscaled_width,
unscaled_height,
});
self.offset += 1;
}
CaptureMessage::ScaledFrame {
jpeg,
offset,
unscaled_width,
unscaled_height,
} => {
self.metrics.frame_count.inc();
WsCameraServer::from_registry().do_send(CameraFrame {
camera_id: self.camera_id,
jpeg,
observations: Vec::new(),
resolution: FrameResolution::SD,
source: FrameSource::Camera {
camera_id: self.camera_id,
analysis_offset: Instant::now().duration_since(self.capture_start),
},
video_unit_id: self.video_unit_id.unwrap_or_else(Uuid::nil),
offset,
unscaled_width,
unscaled_height,
});
self.offset += 1;
}
CaptureMessage::NewFile {
filename,
begin_time,
} => {
let new_id = Uuid::new_v4();
// worker has created a new file. Write video_unit and
// file to database.
if let Ok(date) = begin_time.parse::<DateTime<Utc>>() {
let fut = db_registry::get_db().send(CreateVideoUnitFile {
video_unit_id: new_id,
camera_id: self.camera_id,
monotonic_index: 0,
begin_time: date.naive_utc(),
filename: filename.clone(),
});
self.video_unit_id = Some(new_id);
ctx.spawn(wrap_future::<_, Self>(fut).map(
|result, actor, _ctx| match result {
Ok(Ok((_video_unit, video_file))) => {
actor.video_file_id = Some(video_file.id);
actor.filename = Some(filename)
}
Ok(Err(e)) => {
panic!("Error inserting video unit: db handler error {}", e)
}
Err(e) => panic!("Error inserting video unit: message error {}", e),
},
));
} else {
error!("CaptureWorker: unable to parse begin time: {}", begin_time);
}
self.offset = 0;
}
CaptureMessage::EndFile { filename, end_time } => {
if let Ok(end_time) = end_time.parse::<DateTime<Utc>>() {
self.close_file(ctx, &filename, end_time);
self.video_unit_id = None;
self.video_file_id = None;
self.filename = None;
} else {
error!("CaptureActor: Error handling close file message.");
}
}
}
}
}
impl StreamHandler<Result<BytesMut, std::io::Error>> for CaptureActor {
fn handle(&mut self, item: Result<BytesMut, std::io::Error>, ctx: &mut Context<Self>) {
let item = match item {
Ok(b) => b,
Err(e) => {
debug!("stream handle error! {}", e);
self.stdin = None;
ctx.terminate();
return;
}
};
let frame: Result<CaptureMessage, bincode::Error> = bincode::deserialize(&item[..]);
match frame {
Ok(f) => self.message_to_action(f, ctx),
Err(e) => error!("Error deserializing frame! {}", e),
}
}
fn finished(&mut self, _ctx: &mut Self::Context) {
info!("Stream handler finished!");
}
}
/// Message for capture actor to start worker
#[derive(Message)]
#[rtype(result = "()")]
struct StartWorker;
impl Handler<StartWorker> for CaptureActor {
type Result = ();
fn handle(&mut self, _msg: StartWorker, ctx: &mut Context<Self>) -> Self::Result {
debug!("Launching worker for stream: {}", self.stream_url);
let storage_path = Path::new(&self.storage_path).join(self.camera_id.to_string());
if std::fs::create_dir(&storage_path).is_err() {
// The error returned by create_dir has no information so
// we can't really distinguish between failure
// scenarios. If the directory already exists everything
// is fine, otherwise we fail later.
}
let worker_path = env::var("EXOPTICONWORKERS").unwrap_or_else(|_| "/".to_string());
let executable_path: PathBuf = [worker_path, "cworkers/captureworker".to_string()]
.iter()
.collect();
let hwaccel_method =
env::var("EXOPTICON_HWACCEL_METHOD").unwrap_or_else(|_| "none".to_string());
let mut cmd = Command::new(executable_path);
cmd.arg(&self.stream_url);
cmd.arg(&storage_path);
cmd.arg(hwaccel_method);
cmd.stdout(Stdio::piped());
cmd.stdin(Stdio::piped());
let mut child = cmd.spawn().expect("Failed to launch");
let stdout = child
.stdout
.take()
.expect("Failed to open stdout on worker child");
self.stdin = Some(child.stdin.take().expect("Failed to open stdin"));
let framed_stream = length_delimited::Builder::new().new_read(stdout);
Self::add_stream(framed_stream, ctx);
let fut = wrap_future::<_, Self>(child).map(|_status, actor, ctx| {
// Change this to an error when we can't distinguish
// between intentional and unintentional exits.
debug!("CaptureWorker {}: capture process died...", actor.camera_id);
// Close file if open
if let Some(filename) = &actor.filename {
debug!(
"CaptureActor {}: capture process died, closing file: {}",
actor.camera_id, &filename
);
actor.close_file(ctx, filename, Utc::now());
actor.filename = None;
}
match actor.state {
CaptureState::Running => {
debug!("Shutting down running captureworker...");
actor.stdin = None;
ctx.terminate();
}
CaptureState::Shutdown => {
debug!("Shutting down captureworker...");
actor.stdin = None;
ctx.terminate();
}
}
});
ctx.spawn(fut);
}
}
impl Handler<StopCaptureWorker> for CaptureActor {
type Result = ();
fn handle(&mut self, _msg: StopCaptureWorker, _ctx: &mut Context<Self>) -> Self::Result {
debug!(
"Camera {} received StopCaptureWorker, going to shutdown...",
self.camera_id
);
self.state = CaptureState::Shutdown;
match &mut self.stdin {
Some(stdin) => {
stdin.shutdown();
}
None => {}
}
self.stdin = None;
}
}
impl Actor for CaptureActor {
type Context = Context<Self>;
fn started(&mut self, ctx: &mut Self::Context) {
info!("Starting CaptureActor for camera id: {}", self.camera_id);
ctx.address().do_send(StartWorker {});
}
fn stopped(&mut self, _ctx: &mut Self::Context) {
info!("Capture Actor for camera id {} is stopped!", self.camera_id);
}
}
|
error!("Error closing file!");
}
|
random_line_split
|
capture_actor.rs
|
/*
* Exopticon - A free video surveillance system.
* Copyright (C) 2020 David Matthew Mattli <[email protected]>
*
* This file is part of Exopticon.
*
* Exopticon is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Exopticon is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Exopticon. If not, see <http://www.gnu.org/licenses/>.
*/
use std::convert::TryInto;
use std::env;
use std::fs;
use std::path::Path;
use std::path::PathBuf;
use std::process::Stdio;
use std::time::Instant;
use actix::{
fut::wrap_future, registry::SystemService, Actor, ActorContext, ActorFuture, AsyncContext,
Context, Handler, Message, StreamHandler,
};
use bytes::BytesMut;
use chrono::{DateTime, Utc};
use exserial::models::CaptureMessage;
use tokio::io::AsyncWriteExt;
use tokio::process::{ChildStdin, Command};
use tokio_util::codec::length_delimited;
use uuid::Uuid;
use crate::capture_supervisor::{CaptureActorMetrics, StopCaptureWorker};
use crate::db_registry;
use crate::models::{CreateVideoUnitFile, UpdateVideoUnitFile};
use crate::ws_camera_server::{CameraFrame, FrameResolution, FrameSource, WsCameraServer};
/*
/// Holds messages from capture worker
#[derive(Default, Debug, PartialEq, Deserialize, Serialize)]
pub struct CaptureMessage {
/// type of worker message
#[serde(rename = "type")]
#[serde(default)]
pub message_type: String,
/// if message is a log, the log level
#[serde(default)]
pub level: String,
/// if the message is a log, the log message
#[serde(default)]
pub message: String,
/// if the message is a frame, the jpeg frame
#[serde(rename = "jpegFrame")]
#[serde(default)]
#[serde(with = "serde_bytes")]
pub jpeg: Vec<u8>,
/// if the message is a frame, the sd scaled jpeg frame
#[serde(rename = "jpegFrameScaled")]
#[serde(default)]
#[serde(with = "serde_bytes")]
pub scaled_jpeg: Vec<u8>,
/// if the message is a frame the original width of the image
#[serde(rename = "unscaledWidth")]
#[serde(default)]
pub unscaled_width: i32,
/// if the message is a frame the original height of the image
#[serde(rename = "unscaledHeight")]
#[serde(default)]
pub unscaled_height: i32,
/// if message is a frame, the offset from the beginning of file
#[serde(default)]
pub offset: i64,
///
#[serde(default)]
pub height: i32,
/// if message is a new file, the created file name
#[serde(default)]
pub filename: String,
/// if message is a new file, the file creation time
#[serde(rename = "beginTime")]
#[serde(default)]
pub begin_time: String,
/// if the message is a closed file, the file end time
#[serde(rename = "endTime")]
#[serde(default)]
pub end_time: String,
}
*/
enum CaptureState {
Running,
Shutdown,
}
/// Holds state of capture actor
pub struct CaptureActor {
/// id of camera actor is capturing video for
pub camera_id: i32,
/// url of video stream
pub stream_url: String,
/// absolute path to video storage
pub storage_path: String,
/// capture start time
pub capture_start: Instant,
/// id of currently open video unit
pub video_unit_id: Option<Uuid>,
/// id of currently open video file
pub video_file_id: Option<i32>,
/// frame offset from beginning of the current video unit
pub offset: i64,
/// filename currently being captured
pub filename: Option<String>,
/// stdin of worker process
pub stdin: Option<ChildStdin>,
/// State of CaptureActor
state: CaptureState,
/// CaptureActor metrics
metrics: CaptureActorMetrics,
}
impl CaptureActor {
/// Returns new initialized `CaptureActor`
pub const fn new(
camera_id: i32,
stream_url: String,
storage_path: String,
capture_start: Instant,
metrics: CaptureActorMetrics,
) -> Self {
Self {
camera_id,
stream_url,
storage_path,
capture_start,
video_unit_id: None,
video_file_id: None,
offset: 0,
filename: None,
stdin: None,
state: CaptureState::Running,
metrics,
}
}
/// Called when the underlying capture worker signals the file as
/// closed. The database record is updated
#[allow(clippy::cast_possible_truncation)]
fn
|
(&self, ctx: &mut Context<Self>, filename: &str, end_time: DateTime<Utc>) {
if let (Some(video_unit_id), Some(video_file_id), Ok(metadata)) = (
self.video_unit_id,
self.video_file_id,
fs::metadata(filename),
) {
let fut = db_registry::get_db().send(UpdateVideoUnitFile {
video_unit_id,
end_time: end_time.naive_utc(),
video_file_id,
size: metadata
.len()
.try_into()
.expect("Unexpected i32 overflow in metadata.len()"),
});
ctx.spawn(
wrap_future::<_, Self>(fut).map(|result, _actor, _ctx| match result {
Ok(Ok((_video_unit, _video_file))) => {}
Ok(Err(e)) => panic!("CaptureWorker: Error updating video unit: {}", e),
Err(e) => panic!("CaptureWorker: Error updating video unit: {}", e),
}),
);
} else {
error!("Error closing file!");
}
}
/// Processes a `CaptureMessage` from the capture worker,
/// performing the appropriate action.
#[allow(clippy::panic)]
fn message_to_action(&mut self, msg: CaptureMessage, ctx: &mut Context<Self>) {
// Check if log
match msg {
CaptureMessage::Log { message } => {
debug!("Capture worker {} log message: {}", self.camera_id, message)
}
CaptureMessage::Frame {
jpeg,
offset,
unscaled_width,
unscaled_height,
} => {
if self.video_unit_id.is_none() {
error!("Video Unit id not set!");
}
WsCameraServer::from_registry().do_send(CameraFrame {
camera_id: self.camera_id,
jpeg,
observations: Vec::new(),
resolution: FrameResolution::HD,
source: FrameSource::Camera {
camera_id: self.camera_id,
analysis_offset: Instant::now().duration_since(self.capture_start),
},
video_unit_id: self
.video_unit_id
.expect("video unit to be set. state violation!"),
offset,
unscaled_width,
unscaled_height,
});
self.offset += 1;
}
CaptureMessage::ScaledFrame {
jpeg,
offset,
unscaled_width,
unscaled_height,
} => {
self.metrics.frame_count.inc();
WsCameraServer::from_registry().do_send(CameraFrame {
camera_id: self.camera_id,
jpeg,
observations: Vec::new(),
resolution: FrameResolution::SD,
source: FrameSource::Camera {
camera_id: self.camera_id,
analysis_offset: Instant::now().duration_since(self.capture_start),
},
video_unit_id: self.video_unit_id.unwrap_or_else(Uuid::nil),
offset,
unscaled_width,
unscaled_height,
});
self.offset += 1;
}
CaptureMessage::NewFile {
filename,
begin_time,
} => {
let new_id = Uuid::new_v4();
// worker has created a new file. Write video_unit and
// file to database.
if let Ok(date) = begin_time.parse::<DateTime<Utc>>() {
let fut = db_registry::get_db().send(CreateVideoUnitFile {
video_unit_id: new_id,
camera_id: self.camera_id,
monotonic_index: 0,
begin_time: date.naive_utc(),
filename: filename.clone(),
});
self.video_unit_id = Some(new_id);
ctx.spawn(wrap_future::<_, Self>(fut).map(
|result, actor, _ctx| match result {
Ok(Ok((_video_unit, video_file))) => {
actor.video_file_id = Some(video_file.id);
actor.filename = Some(filename)
}
Ok(Err(e)) => {
panic!("Error inserting video unit: db handler error {}", e)
}
Err(e) => panic!("Error inserting video unit: message error {}", e),
},
));
} else {
error!("CaptureWorker: unable to parse begin time: {}", begin_time);
}
self.offset = 0;
}
CaptureMessage::EndFile { filename, end_time } => {
if let Ok(end_time) = end_time.parse::<DateTime<Utc>>() {
self.close_file(ctx, &filename, end_time);
self.video_unit_id = None;
self.video_file_id = None;
self.filename = None;
} else {
error!("CaptureActor: Error handling close file message.");
}
}
}
}
}
impl StreamHandler<Result<BytesMut, std::io::Error>> for CaptureActor {
fn handle(&mut self, item: Result<BytesMut, std::io::Error>, ctx: &mut Context<Self>) {
let item = match item {
Ok(b) => b,
Err(e) => {
debug!("stream handle error! {}", e);
self.stdin = None;
ctx.terminate();
return;
}
};
let frame: Result<CaptureMessage, bincode::Error> = bincode::deserialize(&item[..]);
match frame {
Ok(f) => self.message_to_action(f, ctx),
Err(e) => error!("Error deserializing frame! {}", e),
}
}
fn finished(&mut self, _ctx: &mut Self::Context) {
info!("Stream handler finished!");
}
}
/// Message for capture actor to start worker
#[derive(Message)]
#[rtype(result = "()")]
struct StartWorker;
impl Handler<StartWorker> for CaptureActor {
type Result = ();
fn handle(&mut self, _msg: StartWorker, ctx: &mut Context<Self>) -> Self::Result {
debug!("Launching worker for stream: {}", self.stream_url);
let storage_path = Path::new(&self.storage_path).join(self.camera_id.to_string());
if std::fs::create_dir(&storage_path).is_err() {
// The error returned by create_dir has no information so
// we can't really distinguish between failure
// scenarios. If the directory already exists everything
// is fine, otherwise we fail later.
}
let worker_path = env::var("EXOPTICONWORKERS").unwrap_or_else(|_| "/".to_string());
let executable_path: PathBuf = [worker_path, "cworkers/captureworker".to_string()]
.iter()
.collect();
let hwaccel_method =
env::var("EXOPTICON_HWACCEL_METHOD").unwrap_or_else(|_| "none".to_string());
let mut cmd = Command::new(executable_path);
cmd.arg(&self.stream_url);
cmd.arg(&storage_path);
cmd.arg(hwaccel_method);
cmd.stdout(Stdio::piped());
cmd.stdin(Stdio::piped());
let mut child = cmd.spawn().expect("Failed to launch");
let stdout = child
.stdout
.take()
.expect("Failed to open stdout on worker child");
self.stdin = Some(child.stdin.take().expect("Failed to open stdin"));
let framed_stream = length_delimited::Builder::new().new_read(stdout);
Self::add_stream(framed_stream, ctx);
let fut = wrap_future::<_, Self>(child).map(|_status, actor, ctx| {
// Change this to an error when we can't distinguish
// between intentional and unintentional exits.
debug!("CaptureWorker {}: capture process died...", actor.camera_id);
// Close file if open
if let Some(filename) = &actor.filename {
debug!(
"CaptureActor {}: capture process died, closing file: {}",
actor.camera_id, &filename
);
actor.close_file(ctx, filename, Utc::now());
actor.filename = None;
}
match actor.state {
CaptureState::Running => {
debug!("Shutting down running captureworker...");
actor.stdin = None;
ctx.terminate();
}
CaptureState::Shutdown => {
debug!("Shutting down captureworker...");
actor.stdin = None;
ctx.terminate();
}
}
});
ctx.spawn(fut);
}
}
impl Handler<StopCaptureWorker> for CaptureActor {
type Result = ();
fn handle(&mut self, _msg: StopCaptureWorker, _ctx: &mut Context<Self>) -> Self::Result {
debug!(
"Camera {} received StopCaptureWorker, going to shutdown...",
self.camera_id
);
self.state = CaptureState::Shutdown;
match &mut self.stdin {
Some(stdin) => {
stdin.shutdown();
}
None => {}
}
self.stdin = None;
}
}
impl Actor for CaptureActor {
type Context = Context<Self>;
fn started(&mut self, ctx: &mut Self::Context) {
info!("Starting CaptureActor for camera id: {}", self.camera_id);
ctx.address().do_send(StartWorker {});
}
fn stopped(&mut self, _ctx: &mut Self::Context) {
info!("Capture Actor for camera id {} is stopped!", self.camera_id);
}
}
|
close_file
|
identifier_name
|
basic_types.rs
|
// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
//! Ethcore basic typenames.
use util::hash::H2048;
/// Type for a 2048-bit log-bloom, as used by our blocks.
pub type LogBloom = H2048;
/// Constant 2048-bit datum for 0. Often used as a default.
pub static ZERO_LOGBLOOM: LogBloom = H2048([0x00; 256]);
#[cfg_attr(feature="dev", allow(enum_variant_names))]
/// Semantic boolean for when a seal/signature is included.
pub enum Seal {
/// The seal/signature is included.
With,
/// The seal/signature is not included.
Without,
}
|
random_line_split
|
|
basic_types.rs
|
// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Ethcore basic typenames.
use util::hash::H2048;
/// Type for a 2048-bit log-bloom, as used by our blocks.
pub type LogBloom = H2048;
/// Constant 2048-bit datum for 0. Often used as a default.
pub static ZERO_LOGBLOOM: LogBloom = H2048([0x00; 256]);
#[cfg_attr(feature="dev", allow(enum_variant_names))]
/// Semantic boolean for when a seal/signature is included.
pub enum
|
{
/// The seal/signature is included.
With,
/// The seal/signature is not included.
Without,
}
|
Seal
|
identifier_name
|
img.rs
|
use std::default::{Default};
use std::cmp::{min, max};
use std::convert::{Into};
use image::{Primitive, GenericImage, Pixel, ImageBuffer, Rgba};
use geom::{V2, Rect};
use rgb;
/// Set alpha channel to transparent if pixels have a specific color.
pub fn color_key<P: Pixel<Subpixel=u8>, I: GenericImage<Pixel=P>, C: Into<rgb::SRgba>>(
image: &I, color: C) -> ImageBuffer<Rgba<u8>, Vec<u8>> {
let (w, h) = image.dimensions();
let srgba = color.into();
ImageBuffer::from_fn(w, h, |x, y| {
let (pr, pg, pb, mut pa) = image.get_pixel(x, y).to_rgba().channels4();
if pr == srgba.r && pg == srgba.g && pb == srgba.b {
pa = Default::default();
}
Pixel::from_channels(pr, pg, pb, pa)
})
}
/// Return the rectangle enclosing the parts of the image that aren't fully
/// transparent.
pub fn crop_alpha<T: Primitive+Default, P: Pixel<Subpixel=T>, I: GenericImage<Pixel=P>>(
image: &I) -> Rect<i32> {
let (w, h) = image.dimensions();
let mut p1 = V2(w as i32, h as i32);
let mut p2 = V2(0i32, 0i32);
let transparent: T = Default::default();
for y in 0..(h as i32) {
for x in 0..(w as i32) {
let (_, _, _, a) = image.get_pixel(x as u32, y as u32).channels4();
if a!= transparent {
p1.0 = min(x, p1.0);
p2.0 = max(x + 1, p2.0);
p1.1 = min(y, p1.1);
p2.1 = max(y + 1, p2.1);
}
}
}
if p1.0 > p2.0 { Rect(V2(0, 0), V2(0, 0)) } // Empty image.
else { Rect(p1, p2 - p1) }
}
pub fn
|
<T, P, I, J>(image: &I, target: &mut J, offset: V2<i32>)
where T: Primitive+Default,
P: Pixel<Subpixel=T>,
I: GenericImage<Pixel=P>,
J: GenericImage<Pixel=P> {
let (w, h) = image.dimensions();
// TODO: Check for going over bounds.
for y in 0..(h) {
for x in 0..(w) {
target.put_pixel(x + offset.0 as u32, y + offset.1 as u32, image.get_pixel(x, y));
}
}
}
|
blit
|
identifier_name
|
img.rs
|
use std::default::{Default};
use std::cmp::{min, max};
use std::convert::{Into};
use image::{Primitive, GenericImage, Pixel, ImageBuffer, Rgba};
use geom::{V2, Rect};
use rgb;
/// Set alpha channel to transparent if pixels have a specific color.
|
let (w, h) = image.dimensions();
let srgba = color.into();
ImageBuffer::from_fn(w, h, |x, y| {
let (pr, pg, pb, mut pa) = image.get_pixel(x, y).to_rgba().channels4();
if pr == srgba.r && pg == srgba.g && pb == srgba.b {
pa = Default::default();
}
Pixel::from_channels(pr, pg, pb, pa)
})
}
/// Return the rectangle enclosing the parts of the image that aren't fully
/// transparent.
pub fn crop_alpha<T: Primitive+Default, P: Pixel<Subpixel=T>, I: GenericImage<Pixel=P>>(
image: &I) -> Rect<i32> {
let (w, h) = image.dimensions();
let mut p1 = V2(w as i32, h as i32);
let mut p2 = V2(0i32, 0i32);
let transparent: T = Default::default();
for y in 0..(h as i32) {
for x in 0..(w as i32) {
let (_, _, _, a) = image.get_pixel(x as u32, y as u32).channels4();
if a!= transparent {
p1.0 = min(x, p1.0);
p2.0 = max(x + 1, p2.0);
p1.1 = min(y, p1.1);
p2.1 = max(y + 1, p2.1);
}
}
}
if p1.0 > p2.0 { Rect(V2(0, 0), V2(0, 0)) } // Empty image.
else { Rect(p1, p2 - p1) }
}
pub fn blit<T, P, I, J>(image: &I, target: &mut J, offset: V2<i32>)
where T: Primitive+Default,
P: Pixel<Subpixel=T>,
I: GenericImage<Pixel=P>,
J: GenericImage<Pixel=P> {
let (w, h) = image.dimensions();
// TODO: Check for going over bounds.
for y in 0..(h) {
for x in 0..(w) {
target.put_pixel(x + offset.0 as u32, y + offset.1 as u32, image.get_pixel(x, y));
}
}
}
|
pub fn color_key<P: Pixel<Subpixel=u8>, I: GenericImage<Pixel=P>, C: Into<rgb::SRgba>>(
image: &I, color: C) -> ImageBuffer<Rgba<u8>, Vec<u8>> {
|
random_line_split
|
img.rs
|
use std::default::{Default};
use std::cmp::{min, max};
use std::convert::{Into};
use image::{Primitive, GenericImage, Pixel, ImageBuffer, Rgba};
use geom::{V2, Rect};
use rgb;
/// Set alpha channel to transparent if pixels have a specific color.
pub fn color_key<P: Pixel<Subpixel=u8>, I: GenericImage<Pixel=P>, C: Into<rgb::SRgba>>(
image: &I, color: C) -> ImageBuffer<Rgba<u8>, Vec<u8>> {
let (w, h) = image.dimensions();
let srgba = color.into();
ImageBuffer::from_fn(w, h, |x, y| {
let (pr, pg, pb, mut pa) = image.get_pixel(x, y).to_rgba().channels4();
if pr == srgba.r && pg == srgba.g && pb == srgba.b {
pa = Default::default();
}
Pixel::from_channels(pr, pg, pb, pa)
})
}
/// Return the rectangle enclosing the parts of the image that aren't fully
/// transparent.
pub fn crop_alpha<T: Primitive+Default, P: Pixel<Subpixel=T>, I: GenericImage<Pixel=P>>(
image: &I) -> Rect<i32> {
let (w, h) = image.dimensions();
let mut p1 = V2(w as i32, h as i32);
let mut p2 = V2(0i32, 0i32);
let transparent: T = Default::default();
for y in 0..(h as i32) {
for x in 0..(w as i32) {
let (_, _, _, a) = image.get_pixel(x as u32, y as u32).channels4();
if a!= transparent {
p1.0 = min(x, p1.0);
p2.0 = max(x + 1, p2.0);
p1.1 = min(y, p1.1);
p2.1 = max(y + 1, p2.1);
}
}
}
if p1.0 > p2.0
|
// Empty image.
else { Rect(p1, p2 - p1) }
}
pub fn blit<T, P, I, J>(image: &I, target: &mut J, offset: V2<i32>)
where T: Primitive+Default,
P: Pixel<Subpixel=T>,
I: GenericImage<Pixel=P>,
J: GenericImage<Pixel=P> {
let (w, h) = image.dimensions();
// TODO: Check for going over bounds.
for y in 0..(h) {
for x in 0..(w) {
target.put_pixel(x + offset.0 as u32, y + offset.1 as u32, image.get_pixel(x, y));
}
}
}
|
{ Rect(V2(0, 0), V2(0, 0)) }
|
conditional_block
|
img.rs
|
use std::default::{Default};
use std::cmp::{min, max};
use std::convert::{Into};
use image::{Primitive, GenericImage, Pixel, ImageBuffer, Rgba};
use geom::{V2, Rect};
use rgb;
/// Set alpha channel to transparent if pixels have a specific color.
pub fn color_key<P: Pixel<Subpixel=u8>, I: GenericImage<Pixel=P>, C: Into<rgb::SRgba>>(
image: &I, color: C) -> ImageBuffer<Rgba<u8>, Vec<u8>> {
let (w, h) = image.dimensions();
let srgba = color.into();
ImageBuffer::from_fn(w, h, |x, y| {
let (pr, pg, pb, mut pa) = image.get_pixel(x, y).to_rgba().channels4();
if pr == srgba.r && pg == srgba.g && pb == srgba.b {
pa = Default::default();
}
Pixel::from_channels(pr, pg, pb, pa)
})
}
/// Return the rectangle enclosing the parts of the image that aren't fully
/// transparent.
pub fn crop_alpha<T: Primitive+Default, P: Pixel<Subpixel=T>, I: GenericImage<Pixel=P>>(
image: &I) -> Rect<i32> {
let (w, h) = image.dimensions();
let mut p1 = V2(w as i32, h as i32);
let mut p2 = V2(0i32, 0i32);
let transparent: T = Default::default();
for y in 0..(h as i32) {
for x in 0..(w as i32) {
let (_, _, _, a) = image.get_pixel(x as u32, y as u32).channels4();
if a!= transparent {
p1.0 = min(x, p1.0);
p2.0 = max(x + 1, p2.0);
p1.1 = min(y, p1.1);
p2.1 = max(y + 1, p2.1);
}
}
}
if p1.0 > p2.0 { Rect(V2(0, 0), V2(0, 0)) } // Empty image.
else { Rect(p1, p2 - p1) }
}
pub fn blit<T, P, I, J>(image: &I, target: &mut J, offset: V2<i32>)
where T: Primitive+Default,
P: Pixel<Subpixel=T>,
I: GenericImage<Pixel=P>,
J: GenericImage<Pixel=P>
|
{
let (w, h) = image.dimensions();
// TODO: Check for going over bounds.
for y in 0..(h) {
for x in 0..(w) {
target.put_pixel(x + offset.0 as u32, y + offset.1 as u32, image.get_pixel(x, y));
}
}
}
|
identifier_body
|
|
lib.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
extern crate hashglobe;
extern crate smallvec;
#[cfg(feature = "known_system_malloc")]
use hashglobe::alloc;
use hashglobe::FailedAllocationError;
use smallvec::Array;
use smallvec::SmallVec;
use std::vec::Vec;
pub trait FallibleVec<T> {
/// Append |val| to the end of |vec|. Returns Ok(()) on success,
/// Err(reason) if it fails, with |reason| describing the failure.
fn try_push(&mut self, value: T) -> Result<(), FailedAllocationError>;
}
/////////////////////////////////////////////////////////////////
// Vec
impl<T> FallibleVec<T> for Vec<T> {
#[inline(always)]
fn try_push(&mut self, val: T) -> Result<(), FailedAllocationError> {
#[cfg(feature = "known_system_malloc")]
{
if self.capacity() == self.len() {
try_double_vec(self)?;
debug_assert!(self.capacity() > self.len());
}
}
self.push(val);
Ok(())
}
}
// Double the capacity of |vec|, or fail to do so due to lack of memory.
// Returns Ok(()) on success, Err(..) on failure.
#[cfg(feature = "known_system_malloc")]
#[inline(never)]
#[cold]
fn try_double_vec<T>(vec: &mut Vec<T>) -> Result<(), FailedAllocationError> {
use std::mem;
let old_ptr = vec.as_mut_ptr();
let old_len = vec.len();
let old_cap: usize = vec.capacity();
let new_cap: usize = if old_cap == 0 {
4
} else {
old_cap
.checked_mul(2)
.ok_or(FailedAllocationError::new("capacity overflow for Vec"))?
};
let new_size_bytes = new_cap
.checked_mul(mem::size_of::<T>())
.ok_or(FailedAllocationError::new("capacity overflow for Vec"))?;
let new_ptr = unsafe {
if old_cap == 0 {
alloc::alloc(new_size_bytes, 0)
} else {
alloc::realloc(old_ptr as *mut u8, new_size_bytes)
}
};
if new_ptr.is_null() {
return Err(FailedAllocationError::new(
"out of memory when allocating Vec",
));
}
let new_vec = unsafe { Vec::from_raw_parts(new_ptr as *mut T, old_len, new_cap) };
mem::forget(mem::replace(vec, new_vec));
Ok(())
}
/////////////////////////////////////////////////////////////////
// SmallVec
impl<T: Array> FallibleVec<T::Item> for SmallVec<T> {
#[inline(always)]
fn try_push(&mut self, val: T::Item) -> Result<(), FailedAllocationError> {
#[cfg(feature = "known_system_malloc")]
{
if self.capacity() == self.len() {
try_double_small_vec(self)?;
debug_assert!(self.capacity() > self.len());
}
}
self.push(val);
Ok(())
}
}
// Double the capacity of |svec|, or fail to do so due to lack of memory.
// Returns Ok(()) on success, Err(..) on failure.
#[cfg(feature = "known_system_malloc")]
#[inline(never)]
#[cold]
fn try_double_small_vec<T>(svec: &mut SmallVec<T>) -> Result<(), FailedAllocationError>
where
T: Array,
{
use std::mem;
use std::ptr::copy_nonoverlapping;
let old_ptr = svec.as_mut_ptr();
let old_len = svec.len();
let old_cap: usize = svec.capacity();
let new_cap: usize = if old_cap == 0 {
4
} else {
old_cap
.checked_mul(2)
.ok_or(FailedAllocationError::new("capacity overflow for SmallVec"))?
};
// This surely shouldn't fail, if |old_cap| was previously accepted as a
// valid value. But err on the side of caution.
let old_size_bytes = old_cap
.checked_mul(mem::size_of::<T>())
.ok_or(FailedAllocationError::new("capacity overflow for SmallVec"))?;
let new_size_bytes = new_cap
.checked_mul(mem::size_of::<T>())
.ok_or(FailedAllocationError::new("capacity overflow for SmallVec"))?;
let new_ptr;
if svec.spilled() {
// There's an old block to free, and, presumably, old contents to
// copy. realloc takes care of both aspects.
unsafe {
new_ptr = alloc::realloc(old_ptr as *mut u8, new_size_bytes);
}
} else {
// There's no old block to free. There may be old contents to copy.
unsafe {
new_ptr = alloc::alloc(new_size_bytes, 0);
if!new_ptr.is_null() && old_size_bytes > 0 {
copy_nonoverlapping(old_ptr as *const u8, new_ptr as *mut u8, old_size_bytes);
}
}
}
|
if new_ptr.is_null() {
return Err(FailedAllocationError::new(
"out of memory when allocating SmallVec",
));
}
let new_vec = unsafe { Vec::from_raw_parts(new_ptr as *mut T::Item, old_len, new_cap) };
let new_svec = SmallVec::from_vec(new_vec);
mem::forget(mem::replace(svec, new_svec));
Ok(())
}
|
random_line_split
|
|
lib.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
extern crate hashglobe;
extern crate smallvec;
#[cfg(feature = "known_system_malloc")]
use hashglobe::alloc;
use hashglobe::FailedAllocationError;
use smallvec::Array;
use smallvec::SmallVec;
use std::vec::Vec;
pub trait FallibleVec<T> {
/// Append |val| to the end of |vec|. Returns Ok(()) on success,
/// Err(reason) if it fails, with |reason| describing the failure.
fn try_push(&mut self, value: T) -> Result<(), FailedAllocationError>;
}
/////////////////////////////////////////////////////////////////
// Vec
impl<T> FallibleVec<T> for Vec<T> {
#[inline(always)]
fn try_push(&mut self, val: T) -> Result<(), FailedAllocationError>
|
}
// Double the capacity of |vec|, or fail to do so due to lack of memory.
// Returns Ok(()) on success, Err(..) on failure.
#[cfg(feature = "known_system_malloc")]
#[inline(never)]
#[cold]
fn try_double_vec<T>(vec: &mut Vec<T>) -> Result<(), FailedAllocationError> {
use std::mem;
let old_ptr = vec.as_mut_ptr();
let old_len = vec.len();
let old_cap: usize = vec.capacity();
let new_cap: usize = if old_cap == 0 {
4
} else {
old_cap
.checked_mul(2)
.ok_or(FailedAllocationError::new("capacity overflow for Vec"))?
};
let new_size_bytes = new_cap
.checked_mul(mem::size_of::<T>())
.ok_or(FailedAllocationError::new("capacity overflow for Vec"))?;
let new_ptr = unsafe {
if old_cap == 0 {
alloc::alloc(new_size_bytes, 0)
} else {
alloc::realloc(old_ptr as *mut u8, new_size_bytes)
}
};
if new_ptr.is_null() {
return Err(FailedAllocationError::new(
"out of memory when allocating Vec",
));
}
let new_vec = unsafe { Vec::from_raw_parts(new_ptr as *mut T, old_len, new_cap) };
mem::forget(mem::replace(vec, new_vec));
Ok(())
}
/////////////////////////////////////////////////////////////////
// SmallVec
impl<T: Array> FallibleVec<T::Item> for SmallVec<T> {
#[inline(always)]
fn try_push(&mut self, val: T::Item) -> Result<(), FailedAllocationError> {
#[cfg(feature = "known_system_malloc")]
{
if self.capacity() == self.len() {
try_double_small_vec(self)?;
debug_assert!(self.capacity() > self.len());
}
}
self.push(val);
Ok(())
}
}
// Double the capacity of |svec|, or fail to do so due to lack of memory.
// Returns Ok(()) on success, Err(..) on failure.
#[cfg(feature = "known_system_malloc")]
#[inline(never)]
#[cold]
fn try_double_small_vec<T>(svec: &mut SmallVec<T>) -> Result<(), FailedAllocationError>
where
T: Array,
{
use std::mem;
use std::ptr::copy_nonoverlapping;
let old_ptr = svec.as_mut_ptr();
let old_len = svec.len();
let old_cap: usize = svec.capacity();
let new_cap: usize = if old_cap == 0 {
4
} else {
old_cap
.checked_mul(2)
.ok_or(FailedAllocationError::new("capacity overflow for SmallVec"))?
};
// This surely shouldn't fail, if |old_cap| was previously accepted as a
// valid value. But err on the side of caution.
let old_size_bytes = old_cap
.checked_mul(mem::size_of::<T>())
.ok_or(FailedAllocationError::new("capacity overflow for SmallVec"))?;
let new_size_bytes = new_cap
.checked_mul(mem::size_of::<T>())
.ok_or(FailedAllocationError::new("capacity overflow for SmallVec"))?;
let new_ptr;
if svec.spilled() {
// There's an old block to free, and, presumably, old contents to
// copy. realloc takes care of both aspects.
unsafe {
new_ptr = alloc::realloc(old_ptr as *mut u8, new_size_bytes);
}
} else {
// There's no old block to free. There may be old contents to copy.
unsafe {
new_ptr = alloc::alloc(new_size_bytes, 0);
if!new_ptr.is_null() && old_size_bytes > 0 {
copy_nonoverlapping(old_ptr as *const u8, new_ptr as *mut u8, old_size_bytes);
}
}
}
if new_ptr.is_null() {
return Err(FailedAllocationError::new(
"out of memory when allocating SmallVec",
));
}
let new_vec = unsafe { Vec::from_raw_parts(new_ptr as *mut T::Item, old_len, new_cap) };
let new_svec = SmallVec::from_vec(new_vec);
mem::forget(mem::replace(svec, new_svec));
Ok(())
}
|
{
#[cfg(feature = "known_system_malloc")]
{
if self.capacity() == self.len() {
try_double_vec(self)?;
debug_assert!(self.capacity() > self.len());
}
}
self.push(val);
Ok(())
}
|
identifier_body
|
lib.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
extern crate hashglobe;
extern crate smallvec;
#[cfg(feature = "known_system_malloc")]
use hashglobe::alloc;
use hashglobe::FailedAllocationError;
use smallvec::Array;
use smallvec::SmallVec;
use std::vec::Vec;
pub trait FallibleVec<T> {
/// Append |val| to the end of |vec|. Returns Ok(()) on success,
/// Err(reason) if it fails, with |reason| describing the failure.
fn try_push(&mut self, value: T) -> Result<(), FailedAllocationError>;
}
/////////////////////////////////////////////////////////////////
// Vec
impl<T> FallibleVec<T> for Vec<T> {
#[inline(always)]
fn try_push(&mut self, val: T) -> Result<(), FailedAllocationError> {
#[cfg(feature = "known_system_malloc")]
{
if self.capacity() == self.len() {
try_double_vec(self)?;
debug_assert!(self.capacity() > self.len());
}
}
self.push(val);
Ok(())
}
}
// Double the capacity of |vec|, or fail to do so due to lack of memory.
// Returns Ok(()) on success, Err(..) on failure.
#[cfg(feature = "known_system_malloc")]
#[inline(never)]
#[cold]
fn try_double_vec<T>(vec: &mut Vec<T>) -> Result<(), FailedAllocationError> {
use std::mem;
let old_ptr = vec.as_mut_ptr();
let old_len = vec.len();
let old_cap: usize = vec.capacity();
let new_cap: usize = if old_cap == 0 {
4
} else {
old_cap
.checked_mul(2)
.ok_or(FailedAllocationError::new("capacity overflow for Vec"))?
};
let new_size_bytes = new_cap
.checked_mul(mem::size_of::<T>())
.ok_or(FailedAllocationError::new("capacity overflow for Vec"))?;
let new_ptr = unsafe {
if old_cap == 0 {
alloc::alloc(new_size_bytes, 0)
} else {
alloc::realloc(old_ptr as *mut u8, new_size_bytes)
}
};
if new_ptr.is_null() {
return Err(FailedAllocationError::new(
"out of memory when allocating Vec",
));
}
let new_vec = unsafe { Vec::from_raw_parts(new_ptr as *mut T, old_len, new_cap) };
mem::forget(mem::replace(vec, new_vec));
Ok(())
}
/////////////////////////////////////////////////////////////////
// SmallVec
impl<T: Array> FallibleVec<T::Item> for SmallVec<T> {
#[inline(always)]
fn
|
(&mut self, val: T::Item) -> Result<(), FailedAllocationError> {
#[cfg(feature = "known_system_malloc")]
{
if self.capacity() == self.len() {
try_double_small_vec(self)?;
debug_assert!(self.capacity() > self.len());
}
}
self.push(val);
Ok(())
}
}
// Double the capacity of |svec|, or fail to do so due to lack of memory.
// Returns Ok(()) on success, Err(..) on failure.
#[cfg(feature = "known_system_malloc")]
#[inline(never)]
#[cold]
fn try_double_small_vec<T>(svec: &mut SmallVec<T>) -> Result<(), FailedAllocationError>
where
T: Array,
{
use std::mem;
use std::ptr::copy_nonoverlapping;
let old_ptr = svec.as_mut_ptr();
let old_len = svec.len();
let old_cap: usize = svec.capacity();
let new_cap: usize = if old_cap == 0 {
4
} else {
old_cap
.checked_mul(2)
.ok_or(FailedAllocationError::new("capacity overflow for SmallVec"))?
};
// This surely shouldn't fail, if |old_cap| was previously accepted as a
// valid value. But err on the side of caution.
let old_size_bytes = old_cap
.checked_mul(mem::size_of::<T>())
.ok_or(FailedAllocationError::new("capacity overflow for SmallVec"))?;
let new_size_bytes = new_cap
.checked_mul(mem::size_of::<T>())
.ok_or(FailedAllocationError::new("capacity overflow for SmallVec"))?;
let new_ptr;
if svec.spilled() {
// There's an old block to free, and, presumably, old contents to
// copy. realloc takes care of both aspects.
unsafe {
new_ptr = alloc::realloc(old_ptr as *mut u8, new_size_bytes);
}
} else {
// There's no old block to free. There may be old contents to copy.
unsafe {
new_ptr = alloc::alloc(new_size_bytes, 0);
if!new_ptr.is_null() && old_size_bytes > 0 {
copy_nonoverlapping(old_ptr as *const u8, new_ptr as *mut u8, old_size_bytes);
}
}
}
if new_ptr.is_null() {
return Err(FailedAllocationError::new(
"out of memory when allocating SmallVec",
));
}
let new_vec = unsafe { Vec::from_raw_parts(new_ptr as *mut T::Item, old_len, new_cap) };
let new_svec = SmallVec::from_vec(new_vec);
mem::forget(mem::replace(svec, new_svec));
Ok(())
}
|
try_push
|
identifier_name
|
lib.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
extern crate hashglobe;
extern crate smallvec;
#[cfg(feature = "known_system_malloc")]
use hashglobe::alloc;
use hashglobe::FailedAllocationError;
use smallvec::Array;
use smallvec::SmallVec;
use std::vec::Vec;
pub trait FallibleVec<T> {
/// Append |val| to the end of |vec|. Returns Ok(()) on success,
/// Err(reason) if it fails, with |reason| describing the failure.
fn try_push(&mut self, value: T) -> Result<(), FailedAllocationError>;
}
/////////////////////////////////////////////////////////////////
// Vec
impl<T> FallibleVec<T> for Vec<T> {
#[inline(always)]
fn try_push(&mut self, val: T) -> Result<(), FailedAllocationError> {
#[cfg(feature = "known_system_malloc")]
{
if self.capacity() == self.len() {
try_double_vec(self)?;
debug_assert!(self.capacity() > self.len());
}
}
self.push(val);
Ok(())
}
}
// Double the capacity of |vec|, or fail to do so due to lack of memory.
// Returns Ok(()) on success, Err(..) on failure.
#[cfg(feature = "known_system_malloc")]
#[inline(never)]
#[cold]
fn try_double_vec<T>(vec: &mut Vec<T>) -> Result<(), FailedAllocationError> {
use std::mem;
let old_ptr = vec.as_mut_ptr();
let old_len = vec.len();
let old_cap: usize = vec.capacity();
let new_cap: usize = if old_cap == 0 {
4
} else {
old_cap
.checked_mul(2)
.ok_or(FailedAllocationError::new("capacity overflow for Vec"))?
};
let new_size_bytes = new_cap
.checked_mul(mem::size_of::<T>())
.ok_or(FailedAllocationError::new("capacity overflow for Vec"))?;
let new_ptr = unsafe {
if old_cap == 0 {
alloc::alloc(new_size_bytes, 0)
} else {
alloc::realloc(old_ptr as *mut u8, new_size_bytes)
}
};
if new_ptr.is_null() {
return Err(FailedAllocationError::new(
"out of memory when allocating Vec",
));
}
let new_vec = unsafe { Vec::from_raw_parts(new_ptr as *mut T, old_len, new_cap) };
mem::forget(mem::replace(vec, new_vec));
Ok(())
}
/////////////////////////////////////////////////////////////////
// SmallVec
impl<T: Array> FallibleVec<T::Item> for SmallVec<T> {
#[inline(always)]
fn try_push(&mut self, val: T::Item) -> Result<(), FailedAllocationError> {
#[cfg(feature = "known_system_malloc")]
{
if self.capacity() == self.len() {
try_double_small_vec(self)?;
debug_assert!(self.capacity() > self.len());
}
}
self.push(val);
Ok(())
}
}
// Double the capacity of |svec|, or fail to do so due to lack of memory.
// Returns Ok(()) on success, Err(..) on failure.
#[cfg(feature = "known_system_malloc")]
#[inline(never)]
#[cold]
fn try_double_small_vec<T>(svec: &mut SmallVec<T>) -> Result<(), FailedAllocationError>
where
T: Array,
{
use std::mem;
use std::ptr::copy_nonoverlapping;
let old_ptr = svec.as_mut_ptr();
let old_len = svec.len();
let old_cap: usize = svec.capacity();
let new_cap: usize = if old_cap == 0
|
else {
old_cap
.checked_mul(2)
.ok_or(FailedAllocationError::new("capacity overflow for SmallVec"))?
};
// This surely shouldn't fail, if |old_cap| was previously accepted as a
// valid value. But err on the side of caution.
let old_size_bytes = old_cap
.checked_mul(mem::size_of::<T>())
.ok_or(FailedAllocationError::new("capacity overflow for SmallVec"))?;
let new_size_bytes = new_cap
.checked_mul(mem::size_of::<T>())
.ok_or(FailedAllocationError::new("capacity overflow for SmallVec"))?;
let new_ptr;
if svec.spilled() {
// There's an old block to free, and, presumably, old contents to
// copy. realloc takes care of both aspects.
unsafe {
new_ptr = alloc::realloc(old_ptr as *mut u8, new_size_bytes);
}
} else {
// There's no old block to free. There may be old contents to copy.
unsafe {
new_ptr = alloc::alloc(new_size_bytes, 0);
if!new_ptr.is_null() && old_size_bytes > 0 {
copy_nonoverlapping(old_ptr as *const u8, new_ptr as *mut u8, old_size_bytes);
}
}
}
if new_ptr.is_null() {
return Err(FailedAllocationError::new(
"out of memory when allocating SmallVec",
));
}
let new_vec = unsafe { Vec::from_raw_parts(new_ptr as *mut T::Item, old_len, new_cap) };
let new_svec = SmallVec::from_vec(new_vec);
mem::forget(mem::replace(svec, new_svec));
Ok(())
}
|
{
4
}
|
conditional_block
|
coercion.rs
|
for avoiding the linearity
//! of mut things (when the expected is &mut T and you have &mut T). See
//! the various `src/test/run-pass/coerce-reborrow-*.rs` tests for
//! examples of where this is useful.
//!
//! ## Subtle note
//!
//! When deciding what type coercions to consider, we do not attempt to
//! resolve any type variables we may encounter. This is because `b`
//! represents the expected type "as the user wrote it", meaning that if
//! the user defined a generic function like
//!
//! fn foo<A>(a: A, b: A) {... }
//!
//! and then we wrote `foo(&1, @2)`, we will not auto-borrow
//! either argument. In older code we went to some lengths to
//! resolve the `b` variable, which could mean that we'd
//! auto-borrow later arguments but not earlier ones, which
//! seems very confusing.
//!
//! ## Subtler note
//!
//! However, right now, if the user manually specifies the
//! values for the type variables, as so:
//!
//! foo::<&int>(@1, @2)
//!
//! then we *will* auto-borrow, because we can't distinguish this from a
//! function that declared `&int`. This is inconsistent but it's easiest
//! at the moment. The right thing to do, I think, is to consider the
//! *unsubstituted* type when deciding whether to auto-borrow, but the
//! *substituted* type when considering the bounds and so forth. But most
//! of our methods don't give access to the unsubstituted type, and
//! rightly so because they'd be error-prone. So maybe the thing to do is
//! to actually determine the kind of coercions that should occur
//! separately and pass them in. Or maybe it's ok as is. Anyway, it's
//! sort of a minor point so I've opted to leave it for later---after all
//! we may want to adjust precisely when coercions occur.
use check::{autoderef, FnCtxt, LvaluePreference, UnresolvedTypeAction};
use middle::infer::{self, Coercion};
use middle::traits::{self, ObligationCause};
use middle::traits::{predicate_for_trait_def, report_selection_error};
use middle::ty::{AutoDerefRef, AdjustDerefRef};
use middle::ty::{self, mt, Ty};
use middle::ty_relate::RelateResult;
use util::common::indent;
use std::cell::RefCell;
use std::collections::VecDeque;
use syntax::ast;
struct
|
<'a, 'tcx: 'a> {
fcx: &'a FnCtxt<'a, 'tcx>,
origin: infer::TypeOrigin,
unsizing_obligations: RefCell<Vec<traits::PredicateObligation<'tcx>>>,
}
type CoerceResult<'tcx> = RelateResult<'tcx, Option<ty::AutoAdjustment<'tcx>>>;
impl<'f, 'tcx> Coerce<'f, 'tcx> {
fn tcx(&self) -> &ty::ctxt<'tcx> {
self.fcx.tcx()
}
fn subtype(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> CoerceResult<'tcx> {
try!(self.fcx.infcx().sub_types(false, self.origin.clone(), a, b));
Ok(None) // No coercion required.
}
fn unpack_actual_value<T, F>(&self, a: Ty<'tcx>, f: F) -> T where
F: FnOnce(Ty<'tcx>) -> T,
{
f(self.fcx.infcx().shallow_resolve(a))
}
fn coerce(&self,
expr_a: &ast::Expr,
a: Ty<'tcx>,
b: Ty<'tcx>)
-> CoerceResult<'tcx> {
debug!("Coerce.tys({:?} => {:?})",
a,
b);
// Consider coercing the subtype to a DST
let unsize = self.unpack_actual_value(a, |a| {
self.coerce_unsized(a, b)
});
if unsize.is_ok() {
return unsize;
}
// Examine the supertype and consider auto-borrowing.
//
// Note: does not attempt to resolve type variables we encounter.
// See above for details.
match b.sty {
ty::TyRawPtr(mt_b) => {
return self.unpack_actual_value(a, |a| {
self.coerce_unsafe_ptr(a, b, mt_b.mutbl)
});
}
ty::TyRef(_, mt_b) => {
return self.unpack_actual_value(a, |a| {
self.coerce_borrowed_pointer(expr_a, a, b, mt_b.mutbl)
});
}
_ => {}
}
self.unpack_actual_value(a, |a| {
match a.sty {
ty::TyBareFn(Some(_), a_f) => {
// Function items are coercible to any closure
// type; function pointers are not (that would
// require double indirection).
self.coerce_from_fn_item(a, a_f, b)
}
ty::TyBareFn(None, a_f) => {
// We permit coercion of fn pointers to drop the
// unsafe qualifier.
self.coerce_from_fn_pointer(a, a_f, b)
}
_ => {
// Otherwise, just use subtyping rules.
self.subtype(a, b)
}
}
})
}
/// Reborrows `&mut A` to `&mut B` and `&(mut) A` to `&B`.
/// To match `A` with `B`, autoderef will be performed,
/// calling `deref`/`deref_mut` where necessary.
fn coerce_borrowed_pointer(&self,
expr_a: &ast::Expr,
a: Ty<'tcx>,
b: Ty<'tcx>,
mutbl_b: ast::Mutability)
-> CoerceResult<'tcx> {
debug!("coerce_borrowed_pointer(a={:?}, b={:?})",
a,
b);
// If we have a parameter of type `&M T_a` and the value
// provided is `expr`, we will be adding an implicit borrow,
// meaning that we convert `f(expr)` to `f(&M *expr)`. Therefore,
// to type check, we will construct the type that `&M*expr` would
// yield.
match a.sty {
ty::TyRef(_, mt_a) => {
try!(coerce_mutbls(mt_a.mutbl, mutbl_b));
}
_ => return self.subtype(a, b)
}
let coercion = Coercion(self.origin.span());
let r_borrow = self.fcx.infcx().next_region_var(coercion);
let r_borrow = self.tcx().mk_region(r_borrow);
let autoref = Some(ty::AutoPtr(r_borrow, mutbl_b));
let lvalue_pref = LvaluePreference::from_mutbl(mutbl_b);
let mut first_error = None;
let (_, autoderefs, success) = autoderef(self.fcx,
expr_a.span,
a,
Some(expr_a),
UnresolvedTypeAction::Ignore,
lvalue_pref,
|inner_ty, autoderef| {
if autoderef == 0 {
// Don't let this pass, otherwise it would cause
// &T to autoref to &&T.
return None;
}
let ty = ty::mk_rptr(self.tcx(), r_borrow,
mt {ty: inner_ty, mutbl: mutbl_b});
if let Err(err) = self.subtype(ty, b) {
if first_error.is_none() {
first_error = Some(err);
}
None
} else {
Some(())
}
});
match success {
Some(_) => {
Ok(Some(AdjustDerefRef(AutoDerefRef {
autoderefs: autoderefs,
autoref: autoref,
unsize: None
})))
}
None => {
// Return original error as if overloaded deref was never
// attempted, to avoid irrelevant/confusing error messages.
Err(first_error.expect("coerce_borrowed_pointer failed with no error?"))
}
}
}
// &[T; n] or &mut [T; n] -> &[T]
// or &mut [T; n] -> &mut [T]
// or &Concrete -> &Trait, etc.
fn coerce_unsized(&self,
source: Ty<'tcx>,
target: Ty<'tcx>)
-> CoerceResult<'tcx> {
debug!("coerce_unsized(source={:?}, target={:?})",
source,
target);
let traits = (self.tcx().lang_items.unsize_trait(),
self.tcx().lang_items.coerce_unsized_trait());
let (unsize_did, coerce_unsized_did) = if let (Some(u), Some(cu)) = traits {
(u, cu)
} else {
debug!("Missing Unsize or CoerceUnsized traits");
return Err(ty::terr_mismatch);
};
// Note, we want to avoid unnecessary unsizing. We don't want to coerce to
// a DST unless we have to. This currently comes out in the wash since
// we can't unify [T] with U. But to properly support DST, we need to allow
// that, at which point we will need extra checks on the target here.
// Handle reborrows before selecting `Source: CoerceUnsized<Target>`.
let (source, reborrow) = match (&source.sty, &target.sty) {
(&ty::TyRef(_, mt_a), &ty::TyRef(_, mt_b)) => {
try!(coerce_mutbls(mt_a.mutbl, mt_b.mutbl));
let coercion = Coercion(self.origin.span());
let r_borrow = self.fcx.infcx().next_region_var(coercion);
let region = self.tcx().mk_region(r_borrow);
(mt_a.ty, Some(ty::AutoPtr(region, mt_b.mutbl)))
}
(&ty::TyRef(_, mt_a), &ty::TyRawPtr(mt_b)) => {
try!(coerce_mutbls(mt_a.mutbl, mt_b.mutbl));
(mt_a.ty, Some(ty::AutoUnsafe(mt_b.mutbl)))
}
_ => (source, None)
};
let source = ty::adjust_ty_for_autoref(self.tcx(), source, reborrow);
let mut selcx = traits::SelectionContext::new(self.fcx.infcx(), self.fcx);
// Use a FIFO queue for this custom fulfillment procedure.
let mut queue = VecDeque::new();
let mut leftover_predicates = vec![];
// Create an obligation for `Source: CoerceUnsized<Target>`.
let cause = ObligationCause::misc(self.origin.span(), self.fcx.body_id);
queue.push_back(predicate_for_trait_def(self.tcx(),
cause,
coerce_unsized_did,
0,
source,
vec![target]));
// Keep resolving `CoerceUnsized` and `Unsize` predicates to avoid
// emitting a coercion in cases like `Foo<$1>` -> `Foo<$2>`, where
// inference might unify those two inner type variables later.
let traits = [coerce_unsized_did, unsize_did];
while let Some(obligation) = queue.pop_front() {
debug!("coerce_unsized resolve step: {:?}", obligation);
let trait_ref = match obligation.predicate {
ty::Predicate::Trait(ref tr) if traits.contains(&tr.def_id()) => {
tr.clone()
}
_ => {
leftover_predicates.push(obligation);
continue;
}
};
match selcx.select(&obligation.with(trait_ref)) {
// Uncertain or unimplemented.
Ok(None) | Err(traits::Unimplemented) => {
debug!("coerce_unsized: early return - can't prove obligation");
return Err(ty::terr_mismatch);
}
// Object safety violations or miscellaneous.
Err(err) => {
report_selection_error(self.fcx.infcx(), &obligation, &err);
// Treat this like an obligation and follow through
// with the unsizing - the lack of a coercion should
// be silent, as it causes a type mismatch later.
}
Ok(Some(vtable)) => {
for obligation in vtable.nested_obligations() {
queue.push_back(obligation);
}
}
}
}
let mut obligations = self.unsizing_obligations.borrow_mut();
assert!(obligations.is_empty());
*obligations = leftover_predicates;
let adjustment = AutoDerefRef {
autoderefs: if reborrow.is_some() { 1 } else { 0 },
autoref: reborrow,
unsize: Some(target)
};
debug!("Success, coerced with {:?}", adjustment);
Ok(Some(AdjustDerefRef(adjustment)))
}
fn coerce_from_fn_pointer(&self,
a: Ty<'tcx>,
fn_ty_a: &'tcx ty::BareFnTy<'tcx>,
b: Ty<'tcx>)
-> CoerceResult<'tcx>
{
/*!
* Attempts to coerce from the type of a Rust function item
* into a closure or a `proc`.
*/
self.unpack_actual_value(b, |b| {
debug!("coerce_from_fn_pointer(a={:?}, b={:?})",
a, b);
if let ty::TyBareFn(None, fn_ty_b) = b.sty {
match (fn_ty_a.unsafety, fn_ty_b.unsafety) {
(ast::Unsafety::Normal, ast::Unsafety::Unsafe) => {
let unsafe_a = self.tcx().safe_to_unsafe_fn_ty(fn_ty_a);
try!(self.subtype(unsafe_a, b));
return Ok(Some(ty::AdjustUnsafeFnPointer));
}
_ => {}
}
}
self.subtype(a, b)
})
}
fn coerce_from_fn_item(&self,
a: Ty<'tcx>,
fn_ty_a: &'tcx ty::BareFnTy<'tcx>,
b: Ty<'tcx>)
-> CoerceResult<'tcx> {
/*!
* Attempts to coerce from the type of a Rust function item
* into a closure or a `proc`.
*/
self.unpack_actual_value(b, |b| {
debug!("coerce_from_fn_item(a={:?}, b={:?})",
a, b);
match b.sty {
ty::TyBareFn(None, _) => {
let a_fn_pointer = ty::mk_bare_fn(self.tcx(), None, fn_ty_a);
try!(self.subtype(a_fn_pointer, b));
Ok(Some(ty::AdjustReifyFnPointer))
}
_ => self.subtype(a, b)
}
})
}
fn coerce_unsafe_ptr(&self,
a: Ty<'tcx>,
b: Ty<'tcx>,
mutbl_b: ast::Mutability)
-> CoerceResult<'tcx> {
debug!("coerce_unsafe_ptr(a={:?}, b={:?})",
a,
b);
let (is_ref, mt_a) = match a.sty {
ty::TyRef(_, mt) => (true, mt),
ty::TyRawPtr(mt) => (false, mt),
_ => {
return self.subtype(a, b);
}
};
// Check that the types which they point at are compatible.
let a_unsafe = ty::mk_ptr(self.tcx(), ty::mt{ mutbl: mutbl_b, ty: mt_a.ty });
try!(self.subtype(a_unsafe, b));
try!(coerce_mutbls(mt_a.mutbl, mutbl_b));
// Although references and unsafe ptrs have the same
// representation, we still register an AutoDerefRef so that
// regionck knows that the region for `a` must be valid here.
if is_ref {
Ok(Some(AdjustDerefRef(AutoDerefRef {
autoderefs: 1,
autoref: Some(ty::AutoUnsafe(mutbl_b)),
unsize: None
})))
} else {
Ok(None)
}
}
}
pub fn mk_assignty<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
expr: &ast::Expr,
a: Ty<'tcx>,
b: Ty<'tcx>)
|
Coerce
|
identifier_name
|
coercion.rs
|
also for avoiding the linearity
//! of mut things (when the expected is &mut T and you have &mut T). See
//! the various `src/test/run-pass/coerce-reborrow-*.rs` tests for
//! examples of where this is useful.
//!
//! ## Subtle note
//!
//! When deciding what type coercions to consider, we do not attempt to
//! resolve any type variables we may encounter. This is because `b`
//! represents the expected type "as the user wrote it", meaning that if
//! the user defined a generic function like
//!
//! fn foo<A>(a: A, b: A) {... }
//!
//! and then we wrote `foo(&1, @2)`, we will not auto-borrow
//! either argument. In older code we went to some lengths to
//! resolve the `b` variable, which could mean that we'd
//! auto-borrow later arguments but not earlier ones, which
//! seems very confusing.
//!
//! ## Subtler note
//!
//! However, right now, if the user manually specifies the
//! values for the type variables, as so:
//!
//! foo::<&int>(@1, @2)
//!
//! then we *will* auto-borrow, because we can't distinguish this from a
//! function that declared `&int`. This is inconsistent but it's easiest
//! at the moment. The right thing to do, I think, is to consider the
//! *unsubstituted* type when deciding whether to auto-borrow, but the
//! *substituted* type when considering the bounds and so forth. But most
//! of our methods don't give access to the unsubstituted type, and
//! rightly so because they'd be error-prone. So maybe the thing to do is
//! to actually determine the kind of coercions that should occur
//! separately and pass them in. Or maybe it's ok as is. Anyway, it's
//! sort of a minor point so I've opted to leave it for later---after all
//! we may want to adjust precisely when coercions occur.
use check::{autoderef, FnCtxt, LvaluePreference, UnresolvedTypeAction};
use middle::infer::{self, Coercion};
use middle::traits::{self, ObligationCause};
use middle::traits::{predicate_for_trait_def, report_selection_error};
use middle::ty::{AutoDerefRef, AdjustDerefRef};
use middle::ty::{self, mt, Ty};
use middle::ty_relate::RelateResult;
use util::common::indent;
use std::cell::RefCell;
use std::collections::VecDeque;
use syntax::ast;
struct Coerce<'a, 'tcx: 'a> {
fcx: &'a FnCtxt<'a, 'tcx>,
origin: infer::TypeOrigin,
unsizing_obligations: RefCell<Vec<traits::PredicateObligation<'tcx>>>,
}
type CoerceResult<'tcx> = RelateResult<'tcx, Option<ty::AutoAdjustment<'tcx>>>;
impl<'f, 'tcx> Coerce<'f, 'tcx> {
fn tcx(&self) -> &ty::ctxt<'tcx> {
self.fcx.tcx()
}
fn subtype(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> CoerceResult<'tcx> {
try!(self.fcx.infcx().sub_types(false, self.origin.clone(), a, b));
Ok(None) // No coercion required.
}
fn unpack_actual_value<T, F>(&self, a: Ty<'tcx>, f: F) -> T where
F: FnOnce(Ty<'tcx>) -> T,
{
f(self.fcx.infcx().shallow_resolve(a))
}
fn coerce(&self,
expr_a: &ast::Expr,
a: Ty<'tcx>,
b: Ty<'tcx>)
-> CoerceResult<'tcx> {
debug!("Coerce.tys({:?} => {:?})",
a,
b);
// Consider coercing the subtype to a DST
let unsize = self.unpack_actual_value(a, |a| {
self.coerce_unsized(a, b)
});
if unsize.is_ok() {
return unsize;
}
// Examine the supertype and consider auto-borrowing.
//
// Note: does not attempt to resolve type variables we encounter.
// See above for details.
match b.sty {
ty::TyRawPtr(mt_b) => {
return self.unpack_actual_value(a, |a| {
self.coerce_unsafe_ptr(a, b, mt_b.mutbl)
});
}
ty::TyRef(_, mt_b) => {
return self.unpack_actual_value(a, |a| {
self.coerce_borrowed_pointer(expr_a, a, b, mt_b.mutbl)
});
}
_ => {}
}
self.unpack_actual_value(a, |a| {
match a.sty {
ty::TyBareFn(Some(_), a_f) => {
// Function items are coercible to any closure
|
}
ty::TyBareFn(None, a_f) => {
// We permit coercion of fn pointers to drop the
// unsafe qualifier.
self.coerce_from_fn_pointer(a, a_f, b)
}
_ => {
// Otherwise, just use subtyping rules.
self.subtype(a, b)
}
}
})
}
/// Reborrows `&mut A` to `&mut B` and `&(mut) A` to `&B`.
/// To match `A` with `B`, autoderef will be performed,
/// calling `deref`/`deref_mut` where necessary.
fn coerce_borrowed_pointer(&self,
expr_a: &ast::Expr,
a: Ty<'tcx>,
b: Ty<'tcx>,
mutbl_b: ast::Mutability)
-> CoerceResult<'tcx> {
debug!("coerce_borrowed_pointer(a={:?}, b={:?})",
a,
b);
// If we have a parameter of type `&M T_a` and the value
// provided is `expr`, we will be adding an implicit borrow,
// meaning that we convert `f(expr)` to `f(&M *expr)`. Therefore,
// to type check, we will construct the type that `&M*expr` would
// yield.
match a.sty {
ty::TyRef(_, mt_a) => {
try!(coerce_mutbls(mt_a.mutbl, mutbl_b));
}
_ => return self.subtype(a, b)
}
let coercion = Coercion(self.origin.span());
let r_borrow = self.fcx.infcx().next_region_var(coercion);
let r_borrow = self.tcx().mk_region(r_borrow);
let autoref = Some(ty::AutoPtr(r_borrow, mutbl_b));
let lvalue_pref = LvaluePreference::from_mutbl(mutbl_b);
let mut first_error = None;
let (_, autoderefs, success) = autoderef(self.fcx,
expr_a.span,
a,
Some(expr_a),
UnresolvedTypeAction::Ignore,
lvalue_pref,
|inner_ty, autoderef| {
if autoderef == 0 {
// Don't let this pass, otherwise it would cause
// &T to autoref to &&T.
return None;
}
let ty = ty::mk_rptr(self.tcx(), r_borrow,
mt {ty: inner_ty, mutbl: mutbl_b});
if let Err(err) = self.subtype(ty, b) {
if first_error.is_none() {
first_error = Some(err);
}
None
} else {
Some(())
}
});
match success {
Some(_) => {
Ok(Some(AdjustDerefRef(AutoDerefRef {
autoderefs: autoderefs,
autoref: autoref,
unsize: None
})))
}
None => {
// Return original error as if overloaded deref was never
// attempted, to avoid irrelevant/confusing error messages.
Err(first_error.expect("coerce_borrowed_pointer failed with no error?"))
}
}
}
// &[T; n] or &mut [T; n] -> &[T]
// or &mut [T; n] -> &mut [T]
// or &Concrete -> &Trait, etc.
fn coerce_unsized(&self,
source: Ty<'tcx>,
target: Ty<'tcx>)
-> CoerceResult<'tcx> {
debug!("coerce_unsized(source={:?}, target={:?})",
source,
target);
let traits = (self.tcx().lang_items.unsize_trait(),
self.tcx().lang_items.coerce_unsized_trait());
let (unsize_did, coerce_unsized_did) = if let (Some(u), Some(cu)) = traits {
(u, cu)
} else {
debug!("Missing Unsize or CoerceUnsized traits");
return Err(ty::terr_mismatch);
};
// Note, we want to avoid unnecessary unsizing. We don't want to coerce to
// a DST unless we have to. This currently comes out in the wash since
// we can't unify [T] with U. But to properly support DST, we need to allow
// that, at which point we will need extra checks on the target here.
// Handle reborrows before selecting `Source: CoerceUnsized<Target>`.
let (source, reborrow) = match (&source.sty, &target.sty) {
(&ty::TyRef(_, mt_a), &ty::TyRef(_, mt_b)) => {
try!(coerce_mutbls(mt_a.mutbl, mt_b.mutbl));
let coercion = Coercion(self.origin.span());
let r_borrow = self.fcx.infcx().next_region_var(coercion);
let region = self.tcx().mk_region(r_borrow);
(mt_a.ty, Some(ty::AutoPtr(region, mt_b.mutbl)))
}
(&ty::TyRef(_, mt_a), &ty::TyRawPtr(mt_b)) => {
try!(coerce_mutbls(mt_a.mutbl, mt_b.mutbl));
(mt_a.ty, Some(ty::AutoUnsafe(mt_b.mutbl)))
}
_ => (source, None)
};
let source = ty::adjust_ty_for_autoref(self.tcx(), source, reborrow);
let mut selcx = traits::SelectionContext::new(self.fcx.infcx(), self.fcx);
// Use a FIFO queue for this custom fulfillment procedure.
let mut queue = VecDeque::new();
let mut leftover_predicates = vec![];
// Create an obligation for `Source: CoerceUnsized<Target>`.
let cause = ObligationCause::misc(self.origin.span(), self.fcx.body_id);
queue.push_back(predicate_for_trait_def(self.tcx(),
cause,
coerce_unsized_did,
0,
source,
vec![target]));
// Keep resolving `CoerceUnsized` and `Unsize` predicates to avoid
// emitting a coercion in cases like `Foo<$1>` -> `Foo<$2>`, where
// inference might unify those two inner type variables later.
let traits = [coerce_unsized_did, unsize_did];
while let Some(obligation) = queue.pop_front() {
debug!("coerce_unsized resolve step: {:?}", obligation);
let trait_ref = match obligation.predicate {
ty::Predicate::Trait(ref tr) if traits.contains(&tr.def_id()) => {
tr.clone()
}
_ => {
leftover_predicates.push(obligation);
continue;
}
};
match selcx.select(&obligation.with(trait_ref)) {
// Uncertain or unimplemented.
Ok(None) | Err(traits::Unimplemented) => {
debug!("coerce_unsized: early return - can't prove obligation");
return Err(ty::terr_mismatch);
}
// Object safety violations or miscellaneous.
Err(err) => {
report_selection_error(self.fcx.infcx(), &obligation, &err);
// Treat this like an obligation and follow through
// with the unsizing - the lack of a coercion should
// be silent, as it causes a type mismatch later.
}
Ok(Some(vtable)) => {
for obligation in vtable.nested_obligations() {
queue.push_back(obligation);
}
}
}
}
let mut obligations = self.unsizing_obligations.borrow_mut();
assert!(obligations.is_empty());
*obligations = leftover_predicates;
let adjustment = AutoDerefRef {
autoderefs: if reborrow.is_some() { 1 } else { 0 },
autoref: reborrow,
unsize: Some(target)
};
debug!("Success, coerced with {:?}", adjustment);
Ok(Some(AdjustDerefRef(adjustment)))
}
fn coerce_from_fn_pointer(&self,
a: Ty<'tcx>,
fn_ty_a: &'tcx ty::BareFnTy<'tcx>,
b: Ty<'tcx>)
-> CoerceResult<'tcx>
{
/*!
* Attempts to coerce from the type of a Rust function item
* into a closure or a `proc`.
*/
self.unpack_actual_value(b, |b| {
debug!("coerce_from_fn_pointer(a={:?}, b={:?})",
a, b);
if let ty::TyBareFn(None, fn_ty_b) = b.sty {
match (fn_ty_a.unsafety, fn_ty_b.unsafety) {
(ast::Unsafety::Normal, ast::Unsafety::Unsafe) => {
let unsafe_a = self.tcx().safe_to_unsafe_fn_ty(fn_ty_a);
try!(self.subtype(unsafe_a, b));
return Ok(Some(ty::AdjustUnsafeFnPointer));
}
_ => {}
}
}
self.subtype(a, b)
})
}
fn coerce_from_fn_item(&self,
a: Ty<'tcx>,
fn_ty_a: &'tcx ty::BareFnTy<'tcx>,
b: Ty<'tcx>)
-> CoerceResult<'tcx> {
/*!
* Attempts to coerce from the type of a Rust function item
* into a closure or a `proc`.
*/
self.unpack_actual_value(b, |b| {
debug!("coerce_from_fn_item(a={:?}, b={:?})",
a, b);
match b.sty {
ty::TyBareFn(None, _) => {
let a_fn_pointer = ty::mk_bare_fn(self.tcx(), None, fn_ty_a);
try!(self.subtype(a_fn_pointer, b));
Ok(Some(ty::AdjustReifyFnPointer))
}
_ => self.subtype(a, b)
}
})
}
fn coerce_unsafe_ptr(&self,
a: Ty<'tcx>,
b: Ty<'tcx>,
mutbl_b: ast::Mutability)
-> CoerceResult<'tcx> {
debug!("coerce_unsafe_ptr(a={:?}, b={:?})",
a,
b);
let (is_ref, mt_a) = match a.sty {
ty::TyRef(_, mt) => (true, mt),
ty::TyRawPtr(mt) => (false, mt),
_ => {
return self.subtype(a, b);
}
};
// Check that the types which they point at are compatible.
let a_unsafe = ty::mk_ptr(self.tcx(), ty::mt{ mutbl: mutbl_b, ty: mt_a.ty });
try!(self.subtype(a_unsafe, b));
try!(coerce_mutbls(mt_a.mutbl, mutbl_b));
// Although references and unsafe ptrs have the same
// representation, we still register an AutoDerefRef so that
// regionck knows that the region for `a` must be valid here.
if is_ref {
Ok(Some(AdjustDerefRef(AutoDerefRef {
autoderefs: 1,
autoref: Some(ty::AutoUnsafe(mutbl_b)),
unsize: None
})))
} else {
Ok(None)
}
}
}
pub fn mk_assignty<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
expr: &ast::Expr,
a: Ty<'tcx>,
b: Ty<'tcx>)
|
// type; function pointers are not (that would
// require double indirection).
self.coerce_from_fn_item(a, a_f, b)
|
random_line_split
|
coercion.rs
|
for avoiding the linearity
//! of mut things (when the expected is &mut T and you have &mut T). See
//! the various `src/test/run-pass/coerce-reborrow-*.rs` tests for
//! examples of where this is useful.
//!
//! ## Subtle note
//!
//! When deciding what type coercions to consider, we do not attempt to
//! resolve any type variables we may encounter. This is because `b`
//! represents the expected type "as the user wrote it", meaning that if
//! the user defined a generic function like
//!
//! fn foo<A>(a: A, b: A) {... }
//!
//! and then we wrote `foo(&1, @2)`, we will not auto-borrow
//! either argument. In older code we went to some lengths to
//! resolve the `b` variable, which could mean that we'd
//! auto-borrow later arguments but not earlier ones, which
//! seems very confusing.
//!
//! ## Subtler note
//!
//! However, right now, if the user manually specifies the
//! values for the type variables, as so:
//!
//! foo::<&int>(@1, @2)
//!
//! then we *will* auto-borrow, because we can't distinguish this from a
//! function that declared `&int`. This is inconsistent but it's easiest
//! at the moment. The right thing to do, I think, is to consider the
//! *unsubstituted* type when deciding whether to auto-borrow, but the
//! *substituted* type when considering the bounds and so forth. But most
//! of our methods don't give access to the unsubstituted type, and
//! rightly so because they'd be error-prone. So maybe the thing to do is
//! to actually determine the kind of coercions that should occur
//! separately and pass them in. Or maybe it's ok as is. Anyway, it's
//! sort of a minor point so I've opted to leave it for later---after all
//! we may want to adjust precisely when coercions occur.
use check::{autoderef, FnCtxt, LvaluePreference, UnresolvedTypeAction};
use middle::infer::{self, Coercion};
use middle::traits::{self, ObligationCause};
use middle::traits::{predicate_for_trait_def, report_selection_error};
use middle::ty::{AutoDerefRef, AdjustDerefRef};
use middle::ty::{self, mt, Ty};
use middle::ty_relate::RelateResult;
use util::common::indent;
use std::cell::RefCell;
use std::collections::VecDeque;
use syntax::ast;
struct Coerce<'a, 'tcx: 'a> {
fcx: &'a FnCtxt<'a, 'tcx>,
origin: infer::TypeOrigin,
unsizing_obligations: RefCell<Vec<traits::PredicateObligation<'tcx>>>,
}
type CoerceResult<'tcx> = RelateResult<'tcx, Option<ty::AutoAdjustment<'tcx>>>;
impl<'f, 'tcx> Coerce<'f, 'tcx> {
fn tcx(&self) -> &ty::ctxt<'tcx> {
self.fcx.tcx()
}
fn subtype(&self, a: Ty<'tcx>, b: Ty<'tcx>) -> CoerceResult<'tcx> {
try!(self.fcx.infcx().sub_types(false, self.origin.clone(), a, b));
Ok(None) // No coercion required.
}
fn unpack_actual_value<T, F>(&self, a: Ty<'tcx>, f: F) -> T where
F: FnOnce(Ty<'tcx>) -> T,
{
f(self.fcx.infcx().shallow_resolve(a))
}
fn coerce(&self,
expr_a: &ast::Expr,
a: Ty<'tcx>,
b: Ty<'tcx>)
-> CoerceResult<'tcx> {
debug!("Coerce.tys({:?} => {:?})",
a,
b);
// Consider coercing the subtype to a DST
let unsize = self.unpack_actual_value(a, |a| {
self.coerce_unsized(a, b)
});
if unsize.is_ok() {
return unsize;
}
// Examine the supertype and consider auto-borrowing.
//
// Note: does not attempt to resolve type variables we encounter.
// See above for details.
match b.sty {
ty::TyRawPtr(mt_b) => {
return self.unpack_actual_value(a, |a| {
self.coerce_unsafe_ptr(a, b, mt_b.mutbl)
});
}
ty::TyRef(_, mt_b) => {
return self.unpack_actual_value(a, |a| {
self.coerce_borrowed_pointer(expr_a, a, b, mt_b.mutbl)
});
}
_ => {}
}
self.unpack_actual_value(a, |a| {
match a.sty {
ty::TyBareFn(Some(_), a_f) => {
// Function items are coercible to any closure
// type; function pointers are not (that would
// require double indirection).
self.coerce_from_fn_item(a, a_f, b)
}
ty::TyBareFn(None, a_f) => {
// We permit coercion of fn pointers to drop the
// unsafe qualifier.
self.coerce_from_fn_pointer(a, a_f, b)
}
_ => {
// Otherwise, just use subtyping rules.
self.subtype(a, b)
}
}
})
}
/// Reborrows `&mut A` to `&mut B` and `&(mut) A` to `&B`.
/// To match `A` with `B`, autoderef will be performed,
/// calling `deref`/`deref_mut` where necessary.
fn coerce_borrowed_pointer(&self,
expr_a: &ast::Expr,
a: Ty<'tcx>,
b: Ty<'tcx>,
mutbl_b: ast::Mutability)
-> CoerceResult<'tcx> {
debug!("coerce_borrowed_pointer(a={:?}, b={:?})",
a,
b);
// If we have a parameter of type `&M T_a` and the value
// provided is `expr`, we will be adding an implicit borrow,
// meaning that we convert `f(expr)` to `f(&M *expr)`. Therefore,
// to type check, we will construct the type that `&M*expr` would
// yield.
match a.sty {
ty::TyRef(_, mt_a) => {
try!(coerce_mutbls(mt_a.mutbl, mutbl_b));
}
_ => return self.subtype(a, b)
}
let coercion = Coercion(self.origin.span());
let r_borrow = self.fcx.infcx().next_region_var(coercion);
let r_borrow = self.tcx().mk_region(r_borrow);
let autoref = Some(ty::AutoPtr(r_borrow, mutbl_b));
let lvalue_pref = LvaluePreference::from_mutbl(mutbl_b);
let mut first_error = None;
let (_, autoderefs, success) = autoderef(self.fcx,
expr_a.span,
a,
Some(expr_a),
UnresolvedTypeAction::Ignore,
lvalue_pref,
|inner_ty, autoderef| {
if autoderef == 0 {
// Don't let this pass, otherwise it would cause
// &T to autoref to &&T.
return None;
}
let ty = ty::mk_rptr(self.tcx(), r_borrow,
mt {ty: inner_ty, mutbl: mutbl_b});
if let Err(err) = self.subtype(ty, b) {
if first_error.is_none() {
first_error = Some(err);
}
None
} else {
Some(())
}
});
match success {
Some(_) => {
Ok(Some(AdjustDerefRef(AutoDerefRef {
autoderefs: autoderefs,
autoref: autoref,
unsize: None
})))
}
None => {
// Return original error as if overloaded deref was never
// attempted, to avoid irrelevant/confusing error messages.
Err(first_error.expect("coerce_borrowed_pointer failed with no error?"))
}
}
}
// &[T; n] or &mut [T; n] -> &[T]
// or &mut [T; n] -> &mut [T]
// or &Concrete -> &Trait, etc.
fn coerce_unsized(&self,
source: Ty<'tcx>,
target: Ty<'tcx>)
-> CoerceResult<'tcx> {
debug!("coerce_unsized(source={:?}, target={:?})",
source,
target);
let traits = (self.tcx().lang_items.unsize_trait(),
self.tcx().lang_items.coerce_unsized_trait());
let (unsize_did, coerce_unsized_did) = if let (Some(u), Some(cu)) = traits {
(u, cu)
} else {
debug!("Missing Unsize or CoerceUnsized traits");
return Err(ty::terr_mismatch);
};
// Note, we want to avoid unnecessary unsizing. We don't want to coerce to
// a DST unless we have to. This currently comes out in the wash since
// we can't unify [T] with U. But to properly support DST, we need to allow
// that, at which point we will need extra checks on the target here.
// Handle reborrows before selecting `Source: CoerceUnsized<Target>`.
let (source, reborrow) = match (&source.sty, &target.sty) {
(&ty::TyRef(_, mt_a), &ty::TyRef(_, mt_b)) => {
try!(coerce_mutbls(mt_a.mutbl, mt_b.mutbl));
let coercion = Coercion(self.origin.span());
let r_borrow = self.fcx.infcx().next_region_var(coercion);
let region = self.tcx().mk_region(r_borrow);
(mt_a.ty, Some(ty::AutoPtr(region, mt_b.mutbl)))
}
(&ty::TyRef(_, mt_a), &ty::TyRawPtr(mt_b)) => {
try!(coerce_mutbls(mt_a.mutbl, mt_b.mutbl));
(mt_a.ty, Some(ty::AutoUnsafe(mt_b.mutbl)))
}
_ => (source, None)
};
let source = ty::adjust_ty_for_autoref(self.tcx(), source, reborrow);
let mut selcx = traits::SelectionContext::new(self.fcx.infcx(), self.fcx);
// Use a FIFO queue for this custom fulfillment procedure.
let mut queue = VecDeque::new();
let mut leftover_predicates = vec![];
// Create an obligation for `Source: CoerceUnsized<Target>`.
let cause = ObligationCause::misc(self.origin.span(), self.fcx.body_id);
queue.push_back(predicate_for_trait_def(self.tcx(),
cause,
coerce_unsized_did,
0,
source,
vec![target]));
// Keep resolving `CoerceUnsized` and `Unsize` predicates to avoid
// emitting a coercion in cases like `Foo<$1>` -> `Foo<$2>`, where
// inference might unify those two inner type variables later.
let traits = [coerce_unsized_did, unsize_did];
while let Some(obligation) = queue.pop_front() {
debug!("coerce_unsized resolve step: {:?}", obligation);
let trait_ref = match obligation.predicate {
ty::Predicate::Trait(ref tr) if traits.contains(&tr.def_id()) => {
tr.clone()
}
_ => {
leftover_predicates.push(obligation);
continue;
}
};
match selcx.select(&obligation.with(trait_ref)) {
// Uncertain or unimplemented.
Ok(None) | Err(traits::Unimplemented) => {
debug!("coerce_unsized: early return - can't prove obligation");
return Err(ty::terr_mismatch);
}
// Object safety violations or miscellaneous.
Err(err) => {
report_selection_error(self.fcx.infcx(), &obligation, &err);
// Treat this like an obligation and follow through
// with the unsizing - the lack of a coercion should
// be silent, as it causes a type mismatch later.
}
Ok(Some(vtable)) => {
for obligation in vtable.nested_obligations() {
queue.push_back(obligation);
}
}
}
}
let mut obligations = self.unsizing_obligations.borrow_mut();
assert!(obligations.is_empty());
*obligations = leftover_predicates;
let adjustment = AutoDerefRef {
autoderefs: if reborrow.is_some() { 1 } else { 0 },
autoref: reborrow,
unsize: Some(target)
};
debug!("Success, coerced with {:?}", adjustment);
Ok(Some(AdjustDerefRef(adjustment)))
}
fn coerce_from_fn_pointer(&self,
a: Ty<'tcx>,
fn_ty_a: &'tcx ty::BareFnTy<'tcx>,
b: Ty<'tcx>)
-> CoerceResult<'tcx>
{
/*!
* Attempts to coerce from the type of a Rust function item
* into a closure or a `proc`.
*/
self.unpack_actual_value(b, |b| {
debug!("coerce_from_fn_pointer(a={:?}, b={:?})",
a, b);
if let ty::TyBareFn(None, fn_ty_b) = b.sty {
match (fn_ty_a.unsafety, fn_ty_b.unsafety) {
(ast::Unsafety::Normal, ast::Unsafety::Unsafe) => {
let unsafe_a = self.tcx().safe_to_unsafe_fn_ty(fn_ty_a);
try!(self.subtype(unsafe_a, b));
return Ok(Some(ty::AdjustUnsafeFnPointer));
}
_ => {}
}
}
self.subtype(a, b)
})
}
fn coerce_from_fn_item(&self,
a: Ty<'tcx>,
fn_ty_a: &'tcx ty::BareFnTy<'tcx>,
b: Ty<'tcx>)
-> CoerceResult<'tcx>
|
fn coerce_unsafe_ptr(&self,
a: Ty<'tcx>,
b: Ty<'tcx>,
mutbl_b: ast::Mutability)
-> CoerceResult<'tcx> {
debug!("coerce_unsafe_ptr(a={:?}, b={:?})",
a,
b);
let (is_ref, mt_a) = match a.sty {
ty::TyRef(_, mt) => (true, mt),
ty::TyRawPtr(mt) => (false, mt),
_ => {
return self.subtype(a, b);
}
};
// Check that the types which they point at are compatible.
let a_unsafe = ty::mk_ptr(self.tcx(), ty::mt{ mutbl: mutbl_b, ty: mt_a.ty });
try!(self.subtype(a_unsafe, b));
try!(coerce_mutbls(mt_a.mutbl, mutbl_b));
// Although references and unsafe ptrs have the same
// representation, we still register an AutoDerefRef so that
// regionck knows that the region for `a` must be valid here.
if is_ref {
Ok(Some(AdjustDerefRef(AutoDerefRef {
autoderefs: 1,
autoref: Some(ty::AutoUnsafe(mutbl_b)),
unsize: None
})))
} else {
Ok(None)
}
}
}
pub fn mk_assignty<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
expr: &ast::Expr,
a: Ty<'tcx>,
b: Ty<'tcx>)
|
{
/*!
* Attempts to coerce from the type of a Rust function item
* into a closure or a `proc`.
*/
self.unpack_actual_value(b, |b| {
debug!("coerce_from_fn_item(a={:?}, b={:?})",
a, b);
match b.sty {
ty::TyBareFn(None, _) => {
let a_fn_pointer = ty::mk_bare_fn(self.tcx(), None, fn_ty_a);
try!(self.subtype(a_fn_pointer, b));
Ok(Some(ty::AdjustReifyFnPointer))
}
_ => self.subtype(a, b)
}
})
}
|
identifier_body
|
viewport.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use cssparser::{Parser, ToCss};
use euclid::scale_factor::ScaleFactor;
use euclid::size::TypedSize2D;
use std::ascii::AsciiExt;
use std::fmt;
use util::geometry::{PagePx, ViewportPx};
use values::specified::AllowedNumericType;
define_css_keyword_enum!(UserZoom:
"zoom" => Zoom,
"fixed" => Fixed);
define_css_keyword_enum!(Orientation:
"auto" => Auto,
"portrait" => Portrait,
"landscape" => Landscape);
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(feature = "servo", derive(Deserialize, Serialize, HeapSizeOf))]
pub struct ViewportConstraints {
pub size: TypedSize2D<ViewportPx, f32>,
pub initial_zoom: ScaleFactor<PagePx, ViewportPx, f32>,
pub min_zoom: Option<ScaleFactor<PagePx, ViewportPx, f32>>,
pub max_zoom: Option<ScaleFactor<PagePx, ViewportPx, f32>>,
pub user_zoom: UserZoom,
pub orientation: Orientation
}
impl ToCss for ViewportConstraints {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result
where W: fmt::Write
{
try!(write!(dest, "@viewport {{"));
try!(write!(dest, " width: {}px;", self.size.width.get()));
try!(write!(dest, " height: {}px;", self.size.height.get()));
try!(write!(dest, " zoom: {};", self.initial_zoom.get()));
if let Some(min_zoom) = self.min_zoom {
try!(write!(dest, " min-zoom: {};", min_zoom.get()));
}
if let Some(max_zoom) = self.max_zoom {
try!(write!(dest, " max-zoom: {};", max_zoom.get()));
}
try!(write!(dest, " user-zoom: ")); try!(self.user_zoom.to_css(dest));
try!(write!(dest, "; orientation: ")); try!(self.orientation.to_css(dest));
write!(dest, "; }}")
}
}
/// Zoom is a number | percentage | auto
/// See http://dev.w3.org/csswg/css-device-adapt/#descdef-viewport-zoom
#[derive(Copy, Clone, Debug, PartialEq)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub enum Zoom {
Number(f32),
Percentage(f32),
Auto,
}
impl ToCss for Zoom {
fn
|
<W>(&self, dest: &mut W) -> fmt::Result
where W: fmt::Write
{
match *self {
Zoom::Number(number) => write!(dest, "{}", number),
Zoom::Percentage(percentage) => write!(dest, "{}%", percentage * 100.),
Zoom::Auto => write!(dest, "auto")
}
}
}
impl Zoom {
pub fn parse(input: &mut Parser) -> Result<Zoom, ()> {
use cssparser::Token;
match try!(input.next()) {
Token::Percentage(ref value) if AllowedNumericType::NonNegative.is_ok(value.unit_value) =>
Ok(Zoom::Percentage(value.unit_value)),
Token::Number(ref value) if AllowedNumericType::NonNegative.is_ok(value.value) =>
Ok(Zoom::Number(value.value)),
Token::Ident(ref value) if value.eq_ignore_ascii_case("auto") =>
Ok(Zoom::Auto),
_ => Err(())
}
}
#[inline]
pub fn to_f32(&self) -> Option<f32> {
match *self {
Zoom::Number(number) => Some(number as f32),
Zoom::Percentage(percentage) => Some(percentage as f32),
Zoom::Auto => None
}
}
}
|
to_css
|
identifier_name
|
viewport.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use cssparser::{Parser, ToCss};
use euclid::scale_factor::ScaleFactor;
use euclid::size::TypedSize2D;
use std::ascii::AsciiExt;
use std::fmt;
use util::geometry::{PagePx, ViewportPx};
use values::specified::AllowedNumericType;
define_css_keyword_enum!(UserZoom:
"zoom" => Zoom,
"fixed" => Fixed);
define_css_keyword_enum!(Orientation:
"auto" => Auto,
"portrait" => Portrait,
"landscape" => Landscape);
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(feature = "servo", derive(Deserialize, Serialize, HeapSizeOf))]
pub struct ViewportConstraints {
pub size: TypedSize2D<ViewportPx, f32>,
pub initial_zoom: ScaleFactor<PagePx, ViewportPx, f32>,
pub min_zoom: Option<ScaleFactor<PagePx, ViewportPx, f32>>,
pub max_zoom: Option<ScaleFactor<PagePx, ViewportPx, f32>>,
pub user_zoom: UserZoom,
pub orientation: Orientation
}
impl ToCss for ViewportConstraints {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result
where W: fmt::Write
{
try!(write!(dest, "@viewport {{"));
try!(write!(dest, " width: {}px;", self.size.width.get()));
try!(write!(dest, " height: {}px;", self.size.height.get()));
try!(write!(dest, " zoom: {};", self.initial_zoom.get()));
if let Some(min_zoom) = self.min_zoom {
try!(write!(dest, " min-zoom: {};", min_zoom.get()));
}
if let Some(max_zoom) = self.max_zoom {
try!(write!(dest, " max-zoom: {};", max_zoom.get()));
}
try!(write!(dest, " user-zoom: ")); try!(self.user_zoom.to_css(dest));
try!(write!(dest, "; orientation: ")); try!(self.orientation.to_css(dest));
write!(dest, "; }}")
}
}
/// Zoom is a number | percentage | auto
/// See http://dev.w3.org/csswg/css-device-adapt/#descdef-viewport-zoom
#[derive(Copy, Clone, Debug, PartialEq)]
#[cfg_attr(feature = "servo", derive(HeapSizeOf))]
pub enum Zoom {
Number(f32),
Percentage(f32),
Auto,
}
impl ToCss for Zoom {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result
where W: fmt::Write
{
match *self {
Zoom::Number(number) => write!(dest, "{}", number),
Zoom::Percentage(percentage) => write!(dest, "{}%", percentage * 100.),
Zoom::Auto => write!(dest, "auto")
}
}
}
impl Zoom {
pub fn parse(input: &mut Parser) -> Result<Zoom, ()> {
use cssparser::Token;
match try!(input.next()) {
Token::Percentage(ref value) if AllowedNumericType::NonNegative.is_ok(value.unit_value) =>
Ok(Zoom::Percentage(value.unit_value)),
Token::Number(ref value) if AllowedNumericType::NonNegative.is_ok(value.value) =>
Ok(Zoom::Number(value.value)),
Token::Ident(ref value) if value.eq_ignore_ascii_case("auto") =>
Ok(Zoom::Auto),
_ => Err(())
|
}
}
#[inline]
pub fn to_f32(&self) -> Option<f32> {
match *self {
Zoom::Number(number) => Some(number as f32),
Zoom::Percentage(percentage) => Some(percentage as f32),
Zoom::Auto => None
}
}
}
|
random_line_split
|
|
bitpacker.rs
|
use std::io::Write;
use std::io;
use common::serialize::BinarySerializable;
use std::mem;
/// Computes the number of bits that will be used for bitpacking.
///
/// In general the target is the minimum number of bits
/// required to express the amplitude given in argument.
///
/// e.g. If the amplitude is 10, we can store all ints on simply 4bits.
///
/// The logic is slightly more convoluted here as for optimization
/// reasons, we want to ensure that a value spawns over at most 8 bytes
/// of aligns bytes.
///
/// Spawning over 9 bytes is possible for instance, if we do
/// bitpacking with an amplitude of 63 bits.
/// In this case, the second int will start on bit
/// 63 (which belongs to byte 7) and ends at byte 15;
/// Hence 9 bytes (from byte 7 to byte 15 included).
///
/// To avoid this, we force the number of bits to 64bits
/// when the result is greater than `64-8 = 56 bits`.
///
/// Note that this only affects rare use cases spawning over
/// a very large range of values. Even in this case, it results
/// in an extra cost of at most 12% compared to the optimal
/// number of bits.
pub fn compute_num_bits(amplitude: u64) -> u8 {
let amplitude = (64u32 - amplitude.leading_zeros()) as u8;
if amplitude <= 64 - 8 { amplitude } else { 64 }
}
pub struct BitPacker {
mini_buffer: u64,
mini_buffer_written: usize,
num_bits: usize,
written_size: usize,
}
impl BitPacker {
pub fn new(num_bits: usize) -> BitPacker {
BitPacker {
mini_buffer: 0u64,
mini_buffer_written: 0,
num_bits: num_bits,
written_size: 0,
}
}
pub fn write<TWrite: Write>(&mut self, val: u64, output: &mut TWrite) -> io::Result<()> {
let val_u64 = val as u64;
if self.mini_buffer_written + self.num_bits > 64 {
self.mini_buffer |= val_u64.wrapping_shl(self.mini_buffer_written as u32);
self.written_size += self.mini_buffer.serialize(output)?;
self.mini_buffer = val_u64.wrapping_shr((64 - self.mini_buffer_written) as u32);
self.mini_buffer_written = self.mini_buffer_written + (self.num_bits as usize) - 64;
} else {
self.mini_buffer |= val_u64 << self.mini_buffer_written;
self.mini_buffer_written += self.num_bits;
if self.mini_buffer_written == 64 {
self.written_size += self.mini_buffer.serialize(output)?;
self.mini_buffer_written = 0;
self.mini_buffer = 0u64;
}
}
Ok(())
}
fn
|
<TWrite: Write>(&mut self, output: &mut TWrite) -> io::Result<()> {
if self.mini_buffer_written > 0 {
let num_bytes = (self.mini_buffer_written + 7) / 8;
let arr: [u8; 8] = unsafe { mem::transmute::<u64, [u8; 8]>(self.mini_buffer) };
output.write_all(&arr[..num_bytes])?;
self.written_size += num_bytes;
self.mini_buffer_written = 0;
}
Ok(())
}
pub fn close<TWrite: Write>(&mut self, output: &mut TWrite) -> io::Result<usize> {
self.flush(output)?;
// Padding the write file to simplify reads.
output.write_all(&[0u8; 7])?;
self.written_size += 7;
Ok(self.written_size)
}
}
pub struct BitUnpacker {
num_bits: usize,
mask: u64,
data_ptr: *const u8,
data_len: usize,
}
impl BitUnpacker {
pub fn new(data: &[u8], num_bits: usize) -> BitUnpacker {
let mask: u64 = if num_bits == 64 {
!0u64
} else {
(1u64 << num_bits) - 1u64
};
BitUnpacker {
num_bits: num_bits,
mask: mask,
data_ptr: data.as_ptr(),
data_len: data.len(),
}
}
pub fn get(&self, idx: usize) -> u64 {
if self.num_bits == 0 {
return 0;
}
let addr = (idx * self.num_bits) / 8;
let bit_shift = idx * self.num_bits - addr * 8;
let val_unshifted_unmasked: u64;
debug_assert!(addr + 8 <= self.data_len,
"The fast field field should have been padded with 7 bytes.");
val_unshifted_unmasked = unsafe { *(self.data_ptr.offset(addr as isize) as *const u64) };
let val_shifted = (val_unshifted_unmasked >> bit_shift) as u64;
(val_shifted & self.mask)
}
}
#[cfg(test)]
mod test {
use super::{BitPacker, BitUnpacker, compute_num_bits};
#[test]
fn test_compute_num_bits() {
assert_eq!(compute_num_bits(1), 1u8);
assert_eq!(compute_num_bits(0), 0u8);
assert_eq!(compute_num_bits(2), 2u8);
assert_eq!(compute_num_bits(3), 2u8);
assert_eq!(compute_num_bits(4), 3u8);
assert_eq!(compute_num_bits(255), 8u8);
assert_eq!(compute_num_bits(256), 9u8);
assert_eq!(compute_num_bits(5_000_000_000), 33u8);
}
fn test_bitpacker_util(len: usize, num_bits: usize) {
let mut data = Vec::new();
let mut bitpacker = BitPacker::new(num_bits);
let max_val: u64 = (1 << num_bits) - 1;
let vals: Vec<u64> = (0u64..len as u64)
.map(|i| if max_val == 0 { 0 } else { i % max_val })
.collect();
for &val in &vals {
bitpacker.write(val, &mut data).unwrap();
}
let num_bytes = bitpacker.close(&mut data).unwrap();
assert_eq!(num_bytes, (num_bits * len + 7) / 8 + 7);
assert_eq!(data.len(), num_bytes);
let bitunpacker = BitUnpacker::new(&data, num_bits);
for (i, val) in vals.iter().enumerate() {
assert_eq!(bitunpacker.get(i), *val);
}
}
#[test]
fn test_bitpacker() {
test_bitpacker_util(10, 3);
test_bitpacker_util(10, 0);
test_bitpacker_util(10, 1);
test_bitpacker_util(6, 14);
test_bitpacker_util(1000, 14);
}
}
|
flush
|
identifier_name
|
bitpacker.rs
|
use std::io::Write;
use std::io;
use common::serialize::BinarySerializable;
use std::mem;
/// Computes the number of bits that will be used for bitpacking.
///
/// In general the target is the minimum number of bits
/// required to express the amplitude given in argument.
///
/// e.g. If the amplitude is 10, we can store all ints on simply 4bits.
///
/// The logic is slightly more convoluted here as for optimization
/// reasons, we want to ensure that a value spawns over at most 8 bytes
/// of aligns bytes.
///
/// Spawning over 9 bytes is possible for instance, if we do
/// bitpacking with an amplitude of 63 bits.
/// In this case, the second int will start on bit
/// 63 (which belongs to byte 7) and ends at byte 15;
/// Hence 9 bytes (from byte 7 to byte 15 included).
///
/// To avoid this, we force the number of bits to 64bits
/// when the result is greater than `64-8 = 56 bits`.
///
/// Note that this only affects rare use cases spawning over
/// a very large range of values. Even in this case, it results
/// in an extra cost of at most 12% compared to the optimal
/// number of bits.
pub fn compute_num_bits(amplitude: u64) -> u8 {
let amplitude = (64u32 - amplitude.leading_zeros()) as u8;
if amplitude <= 64 - 8 { amplitude } else { 64 }
}
pub struct BitPacker {
mini_buffer: u64,
mini_buffer_written: usize,
num_bits: usize,
written_size: usize,
}
impl BitPacker {
pub fn new(num_bits: usize) -> BitPacker {
BitPacker {
mini_buffer: 0u64,
mini_buffer_written: 0,
num_bits: num_bits,
written_size: 0,
}
}
pub fn write<TWrite: Write>(&mut self, val: u64, output: &mut TWrite) -> io::Result<()> {
let val_u64 = val as u64;
if self.mini_buffer_written + self.num_bits > 64 {
self.mini_buffer |= val_u64.wrapping_shl(self.mini_buffer_written as u32);
self.written_size += self.mini_buffer.serialize(output)?;
self.mini_buffer = val_u64.wrapping_shr((64 - self.mini_buffer_written) as u32);
self.mini_buffer_written = self.mini_buffer_written + (self.num_bits as usize) - 64;
} else {
self.mini_buffer |= val_u64 << self.mini_buffer_written;
self.mini_buffer_written += self.num_bits;
if self.mini_buffer_written == 64 {
self.written_size += self.mini_buffer.serialize(output)?;
self.mini_buffer_written = 0;
self.mini_buffer = 0u64;
}
}
Ok(())
}
fn flush<TWrite: Write>(&mut self, output: &mut TWrite) -> io::Result<()> {
if self.mini_buffer_written > 0 {
let num_bytes = (self.mini_buffer_written + 7) / 8;
let arr: [u8; 8] = unsafe { mem::transmute::<u64, [u8; 8]>(self.mini_buffer) };
output.write_all(&arr[..num_bytes])?;
self.written_size += num_bytes;
self.mini_buffer_written = 0;
}
Ok(())
}
pub fn close<TWrite: Write>(&mut self, output: &mut TWrite) -> io::Result<usize> {
self.flush(output)?;
// Padding the write file to simplify reads.
output.write_all(&[0u8; 7])?;
self.written_size += 7;
Ok(self.written_size)
}
}
pub struct BitUnpacker {
num_bits: usize,
mask: u64,
data_ptr: *const u8,
data_len: usize,
}
impl BitUnpacker {
pub fn new(data: &[u8], num_bits: usize) -> BitUnpacker {
let mask: u64 = if num_bits == 64 {
!0u64
} else {
(1u64 << num_bits) - 1u64
};
BitUnpacker {
num_bits: num_bits,
mask: mask,
data_ptr: data.as_ptr(),
data_len: data.len(),
}
}
pub fn get(&self, idx: usize) -> u64 {
if self.num_bits == 0 {
return 0;
}
let addr = (idx * self.num_bits) / 8;
let bit_shift = idx * self.num_bits - addr * 8;
let val_unshifted_unmasked: u64;
debug_assert!(addr + 8 <= self.data_len,
"The fast field field should have been padded with 7 bytes.");
val_unshifted_unmasked = unsafe { *(self.data_ptr.offset(addr as isize) as *const u64) };
let val_shifted = (val_unshifted_unmasked >> bit_shift) as u64;
(val_shifted & self.mask)
}
}
#[cfg(test)]
mod test {
use super::{BitPacker, BitUnpacker, compute_num_bits};
#[test]
fn test_compute_num_bits() {
assert_eq!(compute_num_bits(1), 1u8);
assert_eq!(compute_num_bits(0), 0u8);
assert_eq!(compute_num_bits(2), 2u8);
assert_eq!(compute_num_bits(3), 2u8);
assert_eq!(compute_num_bits(4), 3u8);
assert_eq!(compute_num_bits(255), 8u8);
assert_eq!(compute_num_bits(256), 9u8);
assert_eq!(compute_num_bits(5_000_000_000), 33u8);
}
fn test_bitpacker_util(len: usize, num_bits: usize) {
let mut data = Vec::new();
let mut bitpacker = BitPacker::new(num_bits);
let max_val: u64 = (1 << num_bits) - 1;
let vals: Vec<u64> = (0u64..len as u64)
.map(|i| if max_val == 0 { 0 } else
|
)
.collect();
for &val in &vals {
bitpacker.write(val, &mut data).unwrap();
}
let num_bytes = bitpacker.close(&mut data).unwrap();
assert_eq!(num_bytes, (num_bits * len + 7) / 8 + 7);
assert_eq!(data.len(), num_bytes);
let bitunpacker = BitUnpacker::new(&data, num_bits);
for (i, val) in vals.iter().enumerate() {
assert_eq!(bitunpacker.get(i), *val);
}
}
#[test]
fn test_bitpacker() {
test_bitpacker_util(10, 3);
test_bitpacker_util(10, 0);
test_bitpacker_util(10, 1);
test_bitpacker_util(6, 14);
test_bitpacker_util(1000, 14);
}
}
|
{ i % max_val }
|
conditional_block
|
bitpacker.rs
|
use std::io::Write;
use std::io;
use common::serialize::BinarySerializable;
use std::mem;
/// Computes the number of bits that will be used for bitpacking.
///
/// In general the target is the minimum number of bits
/// required to express the amplitude given in argument.
///
/// e.g. If the amplitude is 10, we can store all ints on simply 4bits.
///
/// The logic is slightly more convoluted here as for optimization
/// reasons, we want to ensure that a value spawns over at most 8 bytes
/// of aligns bytes.
///
/// Spawning over 9 bytes is possible for instance, if we do
/// bitpacking with an amplitude of 63 bits.
/// In this case, the second int will start on bit
/// 63 (which belongs to byte 7) and ends at byte 15;
/// Hence 9 bytes (from byte 7 to byte 15 included).
///
/// To avoid this, we force the number of bits to 64bits
/// when the result is greater than `64-8 = 56 bits`.
///
/// Note that this only affects rare use cases spawning over
/// a very large range of values. Even in this case, it results
/// in an extra cost of at most 12% compared to the optimal
/// number of bits.
pub fn compute_num_bits(amplitude: u64) -> u8 {
let amplitude = (64u32 - amplitude.leading_zeros()) as u8;
if amplitude <= 64 - 8 { amplitude } else { 64 }
}
pub struct BitPacker {
mini_buffer: u64,
mini_buffer_written: usize,
num_bits: usize,
written_size: usize,
}
impl BitPacker {
pub fn new(num_bits: usize) -> BitPacker {
BitPacker {
mini_buffer: 0u64,
mini_buffer_written: 0,
num_bits: num_bits,
written_size: 0,
}
}
pub fn write<TWrite: Write>(&mut self, val: u64, output: &mut TWrite) -> io::Result<()> {
let val_u64 = val as u64;
if self.mini_buffer_written + self.num_bits > 64 {
self.mini_buffer |= val_u64.wrapping_shl(self.mini_buffer_written as u32);
self.written_size += self.mini_buffer.serialize(output)?;
self.mini_buffer = val_u64.wrapping_shr((64 - self.mini_buffer_written) as u32);
self.mini_buffer_written = self.mini_buffer_written + (self.num_bits as usize) - 64;
} else {
self.mini_buffer |= val_u64 << self.mini_buffer_written;
self.mini_buffer_written += self.num_bits;
if self.mini_buffer_written == 64 {
self.written_size += self.mini_buffer.serialize(output)?;
self.mini_buffer_written = 0;
self.mini_buffer = 0u64;
}
}
Ok(())
}
fn flush<TWrite: Write>(&mut self, output: &mut TWrite) -> io::Result<()> {
if self.mini_buffer_written > 0 {
let num_bytes = (self.mini_buffer_written + 7) / 8;
let arr: [u8; 8] = unsafe { mem::transmute::<u64, [u8; 8]>(self.mini_buffer) };
output.write_all(&arr[..num_bytes])?;
self.written_size += num_bytes;
self.mini_buffer_written = 0;
}
Ok(())
}
pub fn close<TWrite: Write>(&mut self, output: &mut TWrite) -> io::Result<usize> {
self.flush(output)?;
// Padding the write file to simplify reads.
output.write_all(&[0u8; 7])?;
self.written_size += 7;
Ok(self.written_size)
}
}
pub struct BitUnpacker {
num_bits: usize,
mask: u64,
data_ptr: *const u8,
data_len: usize,
}
impl BitUnpacker {
pub fn new(data: &[u8], num_bits: usize) -> BitUnpacker {
let mask: u64 = if num_bits == 64 {
!0u64
} else {
(1u64 << num_bits) - 1u64
};
BitUnpacker {
num_bits: num_bits,
mask: mask,
|
data_ptr: data.as_ptr(),
data_len: data.len(),
}
}
pub fn get(&self, idx: usize) -> u64 {
if self.num_bits == 0 {
return 0;
}
let addr = (idx * self.num_bits) / 8;
let bit_shift = idx * self.num_bits - addr * 8;
let val_unshifted_unmasked: u64;
debug_assert!(addr + 8 <= self.data_len,
"The fast field field should have been padded with 7 bytes.");
val_unshifted_unmasked = unsafe { *(self.data_ptr.offset(addr as isize) as *const u64) };
let val_shifted = (val_unshifted_unmasked >> bit_shift) as u64;
(val_shifted & self.mask)
}
}
#[cfg(test)]
mod test {
use super::{BitPacker, BitUnpacker, compute_num_bits};
#[test]
fn test_compute_num_bits() {
assert_eq!(compute_num_bits(1), 1u8);
assert_eq!(compute_num_bits(0), 0u8);
assert_eq!(compute_num_bits(2), 2u8);
assert_eq!(compute_num_bits(3), 2u8);
assert_eq!(compute_num_bits(4), 3u8);
assert_eq!(compute_num_bits(255), 8u8);
assert_eq!(compute_num_bits(256), 9u8);
assert_eq!(compute_num_bits(5_000_000_000), 33u8);
}
fn test_bitpacker_util(len: usize, num_bits: usize) {
let mut data = Vec::new();
let mut bitpacker = BitPacker::new(num_bits);
let max_val: u64 = (1 << num_bits) - 1;
let vals: Vec<u64> = (0u64..len as u64)
.map(|i| if max_val == 0 { 0 } else { i % max_val })
.collect();
for &val in &vals {
bitpacker.write(val, &mut data).unwrap();
}
let num_bytes = bitpacker.close(&mut data).unwrap();
assert_eq!(num_bytes, (num_bits * len + 7) / 8 + 7);
assert_eq!(data.len(), num_bytes);
let bitunpacker = BitUnpacker::new(&data, num_bits);
for (i, val) in vals.iter().enumerate() {
assert_eq!(bitunpacker.get(i), *val);
}
}
#[test]
fn test_bitpacker() {
test_bitpacker_util(10, 3);
test_bitpacker_util(10, 0);
test_bitpacker_util(10, 1);
test_bitpacker_util(6, 14);
test_bitpacker_util(1000, 14);
}
}
|
random_line_split
|
|
regions-close-over-type-parameter-2.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(box_syntax)]
// Test for what happens when a type parameter `A` is closed over into
// an object. This should yield errors unless `A` (and the object)
// both have suitable bounds.
trait Foo { fn get(&self); }
impl<A> Foo for A {
fn get(&self)
|
}
fn repeater3<'a,A:'a>(v: A) -> Box<Foo+'a> {
box v as Box<Foo+'a>
}
fn main() {
// Error results because the type of is inferred to be
// ~Repeat<&'blk isize> where blk is the lifetime of the block below.
let _ = {
let tmp0 = 3;
let tmp1 = &tmp0; //~ ERROR `tmp0` does not live long enough
repeater3(tmp1)
};
}
|
{ }
|
identifier_body
|
regions-close-over-type-parameter-2.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(box_syntax)]
// Test for what happens when a type parameter `A` is closed over into
// an object. This should yield errors unless `A` (and the object)
// both have suitable bounds.
trait Foo { fn get(&self); }
impl<A> Foo for A {
fn get(&self) { }
}
fn repeater3<'a,A:'a>(v: A) -> Box<Foo+'a> {
box v as Box<Foo+'a>
}
fn main() {
// Error results because the type of is inferred to be
// ~Repeat<&'blk isize> where blk is the lifetime of the block below.
|
}
|
let _ = {
let tmp0 = 3;
let tmp1 = &tmp0; //~ ERROR `tmp0` does not live long enough
repeater3(tmp1)
};
|
random_line_split
|
regions-close-over-type-parameter-2.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(box_syntax)]
// Test for what happens when a type parameter `A` is closed over into
// an object. This should yield errors unless `A` (and the object)
// both have suitable bounds.
trait Foo { fn get(&self); }
impl<A> Foo for A {
fn get(&self) { }
}
fn repeater3<'a,A:'a>(v: A) -> Box<Foo+'a> {
box v as Box<Foo+'a>
}
fn
|
() {
// Error results because the type of is inferred to be
// ~Repeat<&'blk isize> where blk is the lifetime of the block below.
let _ = {
let tmp0 = 3;
let tmp1 = &tmp0; //~ ERROR `tmp0` does not live long enough
repeater3(tmp1)
};
}
|
main
|
identifier_name
|
lin_reg.rs
|
use rm::linalg::Matrix;
use rm::linalg::Vector;
use rm::learning::SupModel;
use rm::learning::lin_reg::LinRegressor;
use libnum::abs;
#[test]
fn test_optimized_regression() {
let mut lin_mod = LinRegressor::default();
let inputs = Matrix::new(3, 1, vec![2.0, 3.0, 4.0]);
let targets = Vector::new(vec![5.0, 6.0, 7.0]);
lin_mod.train_with_optimization(&inputs, &targets);
let _ = lin_mod.parameters().unwrap();
}
#[test]
fn test_regression() {
let mut lin_mod = LinRegressor::default();
let inputs = Matrix::new(3, 1, vec![2.0, 3.0, 4.0]);
let targets = Vector::new(vec![5.0, 6.0, 7.0]);
lin_mod.train(&inputs, &targets).unwrap();
let parameters = lin_mod.parameters().unwrap();
let err_1 = abs(parameters[0] - 3.0);
let err_2 = abs(parameters[1] - 1.0);
assert!(err_1 < 1e-8);
assert!(err_2 < 1e-8);
}
#[test]
#[should_panic]
fn test_no_train_params() {
let lin_mod = LinRegressor::default();
let _ = lin_mod.parameters().unwrap();
}
#[test]
#[should_panic]
fn test_no_train_predict() {
let lin_mod = LinRegressor::default();
let inputs = Matrix::new(3, 2, vec![1.0, 2.0, 1.0, 3.0, 1.0, 4.0]);
let _ = lin_mod.predict(&inputs).unwrap();
}
#[cfg(feature = "datasets")]
#[test]
fn test_regression_datasets_trees() {
use rm::datasets::trees;
let trees = trees::load();
let mut lin_mod = LinRegressor::default();
lin_mod.train(&trees.data(), &trees.target()).unwrap();
let params = lin_mod.parameters().unwrap();
assert_eq!(params, &Vector::new(vec![-57.98765891838409, 4.708160503017506, 0.3392512342447438]));
let predicted = lin_mod.predict(&trees.data()).unwrap();
let expected = vec![4.837659653793278, 4.55385163347481, 4.816981265588826, 15.874115228921276,
19.869008437727473, 21.018326956518717, 16.192688074961563, 19.245949183164257,
21.413021404689726, 20.187581283767756, 22.015402271048487, 21.468464618616007,
21.468464618616007, 20.50615412980805, 23.954109686181766, 27.852202904652785,
31.583966481344966, 33.806481916796706, 30.60097760433255, 28.697035014921106,
34.388184394951004, 36.008318964043994, 35.38525970948079, 41.76899799551756,
44.87770231764652, 50.942867757643015, 52.223751092491256, 53.42851282520877,
53.899328875510534, 53.899328875510534, 68.51530482306926];
assert_eq!(predicted, Vector::new(expected));
}
#[test]
#[ignore = "FIXME #183 fails nondeterministically"]
fn
|
() {
let inputs = Matrix::new(0, 1, vec![]);
let targets = Vector::new(vec![]);
let mut lin_mod = LinRegressor::default();
let res = lin_mod.train(&inputs, &targets);
assert!(res.is_err());
}
|
test_train_no_data
|
identifier_name
|
lin_reg.rs
|
use rm::linalg::Matrix;
use rm::linalg::Vector;
use rm::learning::SupModel;
use rm::learning::lin_reg::LinRegressor;
use libnum::abs;
#[test]
fn test_optimized_regression() {
let mut lin_mod = LinRegressor::default();
let inputs = Matrix::new(3, 1, vec![2.0, 3.0, 4.0]);
let targets = Vector::new(vec![5.0, 6.0, 7.0]);
lin_mod.train_with_optimization(&inputs, &targets);
let _ = lin_mod.parameters().unwrap();
}
#[test]
fn test_regression() {
let mut lin_mod = LinRegressor::default();
let inputs = Matrix::new(3, 1, vec![2.0, 3.0, 4.0]);
let targets = Vector::new(vec![5.0, 6.0, 7.0]);
lin_mod.train(&inputs, &targets).unwrap();
let parameters = lin_mod.parameters().unwrap();
let err_1 = abs(parameters[0] - 3.0);
let err_2 = abs(parameters[1] - 1.0);
assert!(err_1 < 1e-8);
assert!(err_2 < 1e-8);
}
#[test]
#[should_panic]
fn test_no_train_params() {
let lin_mod = LinRegressor::default();
let _ = lin_mod.parameters().unwrap();
}
#[test]
#[should_panic]
fn test_no_train_predict() {
let lin_mod = LinRegressor::default();
let inputs = Matrix::new(3, 2, vec![1.0, 2.0, 1.0, 3.0, 1.0, 4.0]);
let _ = lin_mod.predict(&inputs).unwrap();
}
#[cfg(feature = "datasets")]
#[test]
fn test_regression_datasets_trees()
|
#[test]
#[ignore = "FIXME #183 fails nondeterministically"]
fn test_train_no_data() {
let inputs = Matrix::new(0, 1, vec![]);
let targets = Vector::new(vec![]);
let mut lin_mod = LinRegressor::default();
let res = lin_mod.train(&inputs, &targets);
assert!(res.is_err());
}
|
{
use rm::datasets::trees;
let trees = trees::load();
let mut lin_mod = LinRegressor::default();
lin_mod.train(&trees.data(), &trees.target()).unwrap();
let params = lin_mod.parameters().unwrap();
assert_eq!(params, &Vector::new(vec![-57.98765891838409, 4.708160503017506, 0.3392512342447438]));
let predicted = lin_mod.predict(&trees.data()).unwrap();
let expected = vec![4.837659653793278, 4.55385163347481, 4.816981265588826, 15.874115228921276,
19.869008437727473, 21.018326956518717, 16.192688074961563, 19.245949183164257,
21.413021404689726, 20.187581283767756, 22.015402271048487, 21.468464618616007,
21.468464618616007, 20.50615412980805, 23.954109686181766, 27.852202904652785,
31.583966481344966, 33.806481916796706, 30.60097760433255, 28.697035014921106,
34.388184394951004, 36.008318964043994, 35.38525970948079, 41.76899799551756,
44.87770231764652, 50.942867757643015, 52.223751092491256, 53.42851282520877,
53.899328875510534, 53.899328875510534, 68.51530482306926];
assert_eq!(predicted, Vector::new(expected));
}
|
identifier_body
|
lin_reg.rs
|
use rm::linalg::Matrix;
use rm::linalg::Vector;
use rm::learning::SupModel;
use rm::learning::lin_reg::LinRegressor;
use libnum::abs;
#[test]
fn test_optimized_regression() {
let mut lin_mod = LinRegressor::default();
let inputs = Matrix::new(3, 1, vec![2.0, 3.0, 4.0]);
let targets = Vector::new(vec![5.0, 6.0, 7.0]);
lin_mod.train_with_optimization(&inputs, &targets);
let _ = lin_mod.parameters().unwrap();
}
#[test]
fn test_regression() {
let mut lin_mod = LinRegressor::default();
let inputs = Matrix::new(3, 1, vec![2.0, 3.0, 4.0]);
let targets = Vector::new(vec![5.0, 6.0, 7.0]);
lin_mod.train(&inputs, &targets).unwrap();
let parameters = lin_mod.parameters().unwrap();
let err_1 = abs(parameters[0] - 3.0);
let err_2 = abs(parameters[1] - 1.0);
assert!(err_1 < 1e-8);
assert!(err_2 < 1e-8);
}
#[test]
#[should_panic]
fn test_no_train_params() {
let lin_mod = LinRegressor::default();
let _ = lin_mod.parameters().unwrap();
}
#[test]
#[should_panic]
|
let lin_mod = LinRegressor::default();
let inputs = Matrix::new(3, 2, vec![1.0, 2.0, 1.0, 3.0, 1.0, 4.0]);
let _ = lin_mod.predict(&inputs).unwrap();
}
#[cfg(feature = "datasets")]
#[test]
fn test_regression_datasets_trees() {
use rm::datasets::trees;
let trees = trees::load();
let mut lin_mod = LinRegressor::default();
lin_mod.train(&trees.data(), &trees.target()).unwrap();
let params = lin_mod.parameters().unwrap();
assert_eq!(params, &Vector::new(vec![-57.98765891838409, 4.708160503017506, 0.3392512342447438]));
let predicted = lin_mod.predict(&trees.data()).unwrap();
let expected = vec![4.837659653793278, 4.55385163347481, 4.816981265588826, 15.874115228921276,
19.869008437727473, 21.018326956518717, 16.192688074961563, 19.245949183164257,
21.413021404689726, 20.187581283767756, 22.015402271048487, 21.468464618616007,
21.468464618616007, 20.50615412980805, 23.954109686181766, 27.852202904652785,
31.583966481344966, 33.806481916796706, 30.60097760433255, 28.697035014921106,
34.388184394951004, 36.008318964043994, 35.38525970948079, 41.76899799551756,
44.87770231764652, 50.942867757643015, 52.223751092491256, 53.42851282520877,
53.899328875510534, 53.899328875510534, 68.51530482306926];
assert_eq!(predicted, Vector::new(expected));
}
#[test]
#[ignore = "FIXME #183 fails nondeterministically"]
fn test_train_no_data() {
let inputs = Matrix::new(0, 1, vec![]);
let targets = Vector::new(vec![]);
let mut lin_mod = LinRegressor::default();
let res = lin_mod.train(&inputs, &targets);
assert!(res.is_err());
}
|
fn test_no_train_predict() {
|
random_line_split
|
decompressor.rs
|
use ll;
use parse_code;
use std::io;
struct DecoderContext {
c: *mut ll::ZSTD_DCtx,
}
impl Default for DecoderContext {
fn default() -> Self {
DecoderContext {
c: unsafe { ll::ZSTD_createDCtx() },
}
}
}
impl Drop for DecoderContext {
fn drop(&mut self) {
let code = unsafe { ll::ZSTD_freeDCtx(self.c) };
parse_code(code).unwrap();
}
}
/// Allows to decompress independently multiple blocks of data.
///
/// This reduces memory usage compared to calling `decompress` multiple times.
#[derive(Default)]
pub struct Decompressor {
context: DecoderContext,
dict: Vec<u8>,
}
impl Decompressor {
/// Creates a new zstd decompressor.
pub fn new() -> Self {
Decompressor::with_dict(Vec::new())
}
/// Creates a new zstd decompressor, using the given dictionary.
pub fn with_dict(dict: Vec<u8>) -> Self {
Decompressor {
context: DecoderContext::default(),
dict: dict,
}
}
/// Deompress a single block of data to the given destination buffer.
///
/// Returns the number of bytes written, or an error if something happened
/// (for instance if the destination buffer was too small).
pub fn decompress_to_buffer(
&mut self,
source: &[u8],
destination: &mut [u8],
) -> io::Result<usize> {
let code = unsafe {
ll::ZSTD_decompress_usingDict(
self.context.c,
destination.as_mut_ptr(),
destination.len(),
source.as_ptr(),
source.len(),
self.dict.as_ptr(),
|
self.dict.len(),
)
};
parse_code(code)
}
/// Decompress a block of data, and return the decompressed result in a `Vec<u8>`.
///
/// The decompressed data should be less than `capacity` bytes,
/// or an error will be returned.
pub fn decompress(
&mut self,
data: &[u8],
capacity: usize,
) -> io::Result<Vec<u8>> {
let mut buffer = Vec::with_capacity(capacity);
unsafe {
buffer.set_len(capacity);
let len = try!(self.decompress_to_buffer(data, &mut buffer[..]));
buffer.set_len(len);
}
Ok(buffer)
}
}
|
random_line_split
|
|
decompressor.rs
|
use ll;
use parse_code;
use std::io;
struct DecoderContext {
c: *mut ll::ZSTD_DCtx,
}
impl Default for DecoderContext {
fn
|
() -> Self {
DecoderContext {
c: unsafe { ll::ZSTD_createDCtx() },
}
}
}
impl Drop for DecoderContext {
fn drop(&mut self) {
let code = unsafe { ll::ZSTD_freeDCtx(self.c) };
parse_code(code).unwrap();
}
}
/// Allows to decompress independently multiple blocks of data.
///
/// This reduces memory usage compared to calling `decompress` multiple times.
#[derive(Default)]
pub struct Decompressor {
context: DecoderContext,
dict: Vec<u8>,
}
impl Decompressor {
/// Creates a new zstd decompressor.
pub fn new() -> Self {
Decompressor::with_dict(Vec::new())
}
/// Creates a new zstd decompressor, using the given dictionary.
pub fn with_dict(dict: Vec<u8>) -> Self {
Decompressor {
context: DecoderContext::default(),
dict: dict,
}
}
/// Deompress a single block of data to the given destination buffer.
///
/// Returns the number of bytes written, or an error if something happened
/// (for instance if the destination buffer was too small).
pub fn decompress_to_buffer(
&mut self,
source: &[u8],
destination: &mut [u8],
) -> io::Result<usize> {
let code = unsafe {
ll::ZSTD_decompress_usingDict(
self.context.c,
destination.as_mut_ptr(),
destination.len(),
source.as_ptr(),
source.len(),
self.dict.as_ptr(),
self.dict.len(),
)
};
parse_code(code)
}
/// Decompress a block of data, and return the decompressed result in a `Vec<u8>`.
///
/// The decompressed data should be less than `capacity` bytes,
/// or an error will be returned.
pub fn decompress(
&mut self,
data: &[u8],
capacity: usize,
) -> io::Result<Vec<u8>> {
let mut buffer = Vec::with_capacity(capacity);
unsafe {
buffer.set_len(capacity);
let len = try!(self.decompress_to_buffer(data, &mut buffer[..]));
buffer.set_len(len);
}
Ok(buffer)
}
}
|
default
|
identifier_name
|
decompressor.rs
|
use ll;
use parse_code;
use std::io;
struct DecoderContext {
c: *mut ll::ZSTD_DCtx,
}
impl Default for DecoderContext {
fn default() -> Self
|
}
impl Drop for DecoderContext {
fn drop(&mut self) {
let code = unsafe { ll::ZSTD_freeDCtx(self.c) };
parse_code(code).unwrap();
}
}
/// Allows to decompress independently multiple blocks of data.
///
/// This reduces memory usage compared to calling `decompress` multiple times.
#[derive(Default)]
pub struct Decompressor {
context: DecoderContext,
dict: Vec<u8>,
}
impl Decompressor {
/// Creates a new zstd decompressor.
pub fn new() -> Self {
Decompressor::with_dict(Vec::new())
}
/// Creates a new zstd decompressor, using the given dictionary.
pub fn with_dict(dict: Vec<u8>) -> Self {
Decompressor {
context: DecoderContext::default(),
dict: dict,
}
}
/// Deompress a single block of data to the given destination buffer.
///
/// Returns the number of bytes written, or an error if something happened
/// (for instance if the destination buffer was too small).
pub fn decompress_to_buffer(
&mut self,
source: &[u8],
destination: &mut [u8],
) -> io::Result<usize> {
let code = unsafe {
ll::ZSTD_decompress_usingDict(
self.context.c,
destination.as_mut_ptr(),
destination.len(),
source.as_ptr(),
source.len(),
self.dict.as_ptr(),
self.dict.len(),
)
};
parse_code(code)
}
/// Decompress a block of data, and return the decompressed result in a `Vec<u8>`.
///
/// The decompressed data should be less than `capacity` bytes,
/// or an error will be returned.
pub fn decompress(
&mut self,
data: &[u8],
capacity: usize,
) -> io::Result<Vec<u8>> {
let mut buffer = Vec::with_capacity(capacity);
unsafe {
buffer.set_len(capacity);
let len = try!(self.decompress_to_buffer(data, &mut buffer[..]));
buffer.set_len(len);
}
Ok(buffer)
}
}
|
{
DecoderContext {
c: unsafe { ll::ZSTD_createDCtx() },
}
}
|
identifier_body
|
mod.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use servo::config::opts;
use std::ptr;
use std::thread;
use std::time::Duration;
pub fn deinit() {
// An unfortunate hack to make sure the linker's dead code stripping doesn't strip our
// `Info.plist`.
unsafe {
ptr::read_volatile(&INFO_PLIST[0]);
}
let thread_count = unsafe {
macos_count_running_threads()
};
if thread_count!= 1
|
else {
println!("All threads have shutdown (good).");
}
}
#[link_section = "__TEXT,__info_plist"]
#[no_mangle]
pub static INFO_PLIST: [u8; 619] = *include_bytes!("Info.plist");
#[link(name = "count_threads")]
extern {
fn macos_count_running_threads() -> i32;
}
|
{
println!("{} threads are still running after shutdown (bad).", thread_count);
if opts::get().clean_shutdown {
println!("Waiting until all threads have shutdown");
loop {
let thread_count = unsafe {
macos_count_running_threads()
};
if thread_count == 1 {
break;
}
thread::sleep(Duration::from_millis(1000));
println!("{} threads are still running.", thread_count);
}
}
}
|
conditional_block
|
mod.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use servo::config::opts;
use std::ptr;
use std::thread;
use std::time::Duration;
pub fn deinit() {
// An unfortunate hack to make sure the linker's dead code stripping doesn't strip our
// `Info.plist`.
unsafe {
ptr::read_volatile(&INFO_PLIST[0]);
}
let thread_count = unsafe {
macos_count_running_threads()
};
if thread_count!= 1 {
println!("{} threads are still running after shutdown (bad).", thread_count);
if opts::get().clean_shutdown {
println!("Waiting until all threads have shutdown");
loop {
let thread_count = unsafe {
macos_count_running_threads()
};
if thread_count == 1 {
break;
}
thread::sleep(Duration::from_millis(1000));
|
}
}
#[link_section = "__TEXT,__info_plist"]
#[no_mangle]
pub static INFO_PLIST: [u8; 619] = *include_bytes!("Info.plist");
#[link(name = "count_threads")]
extern {
fn macos_count_running_threads() -> i32;
}
|
println!("{} threads are still running.", thread_count);
}
}
} else {
println!("All threads have shutdown (good).");
|
random_line_split
|
mod.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use servo::config::opts;
use std::ptr;
use std::thread;
use std::time::Duration;
pub fn
|
() {
// An unfortunate hack to make sure the linker's dead code stripping doesn't strip our
// `Info.plist`.
unsafe {
ptr::read_volatile(&INFO_PLIST[0]);
}
let thread_count = unsafe {
macos_count_running_threads()
};
if thread_count!= 1 {
println!("{} threads are still running after shutdown (bad).", thread_count);
if opts::get().clean_shutdown {
println!("Waiting until all threads have shutdown");
loop {
let thread_count = unsafe {
macos_count_running_threads()
};
if thread_count == 1 {
break;
}
thread::sleep(Duration::from_millis(1000));
println!("{} threads are still running.", thread_count);
}
}
} else {
println!("All threads have shutdown (good).");
}
}
#[link_section = "__TEXT,__info_plist"]
#[no_mangle]
pub static INFO_PLIST: [u8; 619] = *include_bytes!("Info.plist");
#[link(name = "count_threads")]
extern {
fn macos_count_running_threads() -> i32;
}
|
deinit
|
identifier_name
|
mod.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
use servo::config::opts;
use std::ptr;
use std::thread;
use std::time::Duration;
pub fn deinit()
|
break;
}
thread::sleep(Duration::from_millis(1000));
println!("{} threads are still running.", thread_count);
}
}
} else {
println!("All threads have shutdown (good).");
}
}
#[link_section = "__TEXT,__info_plist"]
#[no_mangle]
pub static INFO_PLIST: [u8; 619] = *include_bytes!("Info.plist");
#[link(name = "count_threads")]
extern {
fn macos_count_running_threads() -> i32;
}
|
{
// An unfortunate hack to make sure the linker's dead code stripping doesn't strip our
// `Info.plist`.
unsafe {
ptr::read_volatile(&INFO_PLIST[0]);
}
let thread_count = unsafe {
macos_count_running_threads()
};
if thread_count != 1 {
println!("{} threads are still running after shutdown (bad).", thread_count);
if opts::get().clean_shutdown {
println!("Waiting until all threads have shutdown");
loop {
let thread_count = unsafe {
macos_count_running_threads()
};
if thread_count == 1 {
|
identifier_body
|
extra.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
use layout::wrapper::LayoutNode;
use script::layout_interface::LayoutChan;
/// Functionality useful for querying the layout-specific data on DOM nodes.
pub trait LayoutAuxMethods {
fn initialize_layout_data(self, chan: LayoutChan);
fn initialize_style_for_subtree(self, chan: LayoutChan);
}
impl<'ln> LayoutAuxMethods for LayoutNode<'ln> {
/// Resets layout data and styles for the node.
///
/// FIXME(pcwalton): Do this as part of box building instead of in a traversal.
fn initialize_layout_data(self, chan: LayoutChan) {
let mut layout_data_ref = self.mutate_layout_data();
match *layout_data_ref.get() {
None => {
*layout_data_ref.get() = Some(LayoutDataWrapper {
chan: Some(chan),
data: ~PrivateLayoutData::new(),
});
}
Some(_) => {}
}
}
/// Resets layout data and styles for a Node tree.
///
/// FIXME(pcwalton): Do this as part of box building instead of in a traversal.
fn initialize_style_for_subtree(self, chan: LayoutChan) {
for n in self.traverse_preorder() {
n.initialize_layout_data(chan.clone());
}
}
}
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Code for managing the layout data in the DOM.
use layout::util::{PrivateLayoutData, LayoutDataAccess, LayoutDataWrapper};
|
random_line_split
|
extra.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Code for managing the layout data in the DOM.
use layout::util::{PrivateLayoutData, LayoutDataAccess, LayoutDataWrapper};
use layout::wrapper::LayoutNode;
use script::layout_interface::LayoutChan;
/// Functionality useful for querying the layout-specific data on DOM nodes.
pub trait LayoutAuxMethods {
fn initialize_layout_data(self, chan: LayoutChan);
fn initialize_style_for_subtree(self, chan: LayoutChan);
}
impl<'ln> LayoutAuxMethods for LayoutNode<'ln> {
/// Resets layout data and styles for the node.
///
/// FIXME(pcwalton): Do this as part of box building instead of in a traversal.
fn initialize_layout_data(self, chan: LayoutChan) {
let mut layout_data_ref = self.mutate_layout_data();
match *layout_data_ref.get() {
None => {
*layout_data_ref.get() = Some(LayoutDataWrapper {
chan: Some(chan),
data: ~PrivateLayoutData::new(),
});
}
Some(_) => {}
}
}
/// Resets layout data and styles for a Node tree.
///
/// FIXME(pcwalton): Do this as part of box building instead of in a traversal.
fn initialize_style_for_subtree(self, chan: LayoutChan)
|
}
|
{
for n in self.traverse_preorder() {
n.initialize_layout_data(chan.clone());
}
}
|
identifier_body
|
extra.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Code for managing the layout data in the DOM.
use layout::util::{PrivateLayoutData, LayoutDataAccess, LayoutDataWrapper};
use layout::wrapper::LayoutNode;
use script::layout_interface::LayoutChan;
/// Functionality useful for querying the layout-specific data on DOM nodes.
pub trait LayoutAuxMethods {
fn initialize_layout_data(self, chan: LayoutChan);
fn initialize_style_for_subtree(self, chan: LayoutChan);
}
impl<'ln> LayoutAuxMethods for LayoutNode<'ln> {
/// Resets layout data and styles for the node.
///
/// FIXME(pcwalton): Do this as part of box building instead of in a traversal.
fn
|
(self, chan: LayoutChan) {
let mut layout_data_ref = self.mutate_layout_data();
match *layout_data_ref.get() {
None => {
*layout_data_ref.get() = Some(LayoutDataWrapper {
chan: Some(chan),
data: ~PrivateLayoutData::new(),
});
}
Some(_) => {}
}
}
/// Resets layout data and styles for a Node tree.
///
/// FIXME(pcwalton): Do this as part of box building instead of in a traversal.
fn initialize_style_for_subtree(self, chan: LayoutChan) {
for n in self.traverse_preorder() {
n.initialize_layout_data(chan.clone());
}
}
}
|
initialize_layout_data
|
identifier_name
|
read_only_memory.rs
|
// Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#![cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use base::{MemoryMappingBuilder, SharedMemory};
use kvm::*;
use kvm_sys::kvm_regs;
use vm_memory::{GuestAddress, GuestMemory};
#[test]
fn test_run()
|
let kvm = Kvm::new().expect("new kvm failed");
let mut vm = Vm::new(&kvm, guest_mem).expect("new vm failed");
let vcpu = Vcpu::new(0, &kvm, &vm).expect("new vcpu failed");
let mut vcpu_sregs = vcpu.get_sregs().expect("get sregs failed");
vcpu_sregs.cs.base = 0;
vcpu_sregs.cs.selector = 0;
vcpu_sregs.es.base = 0x3000;
vcpu_sregs.es.selector = 0;
vcpu.set_sregs(&vcpu_sregs).expect("set sregs failed");
let mut vcpu_regs: kvm_regs = unsafe { std::mem::zeroed() };
vcpu_regs.rip = load_addr.offset() as u64;
vcpu_regs.rflags = 2;
vcpu_regs.rax = 0x66;
vcpu_regs.rbx = 0;
vcpu.set_regs(&vcpu_regs).expect("set regs failed");
vm.add_memory_region(
GuestAddress(0),
Box::new(
MemoryMappingBuilder::new(mem_size as usize)
.from_shared_memory(&mem)
.build()
.expect("failed to create memory mapping"),
),
false,
false,
)
.expect("failed to register memory");
// Give some read only memory for the test code to read from and force a vcpu exit when it reads
// from it.
let mem_ro = SharedMemory::anon(0x1000).expect("failed to create shared memory");
let mmap_ro = MemoryMappingBuilder::new(0x1000)
.from_shared_memory(&mem_ro)
.build()
.expect("failed to create memory mapping");
mmap_ro
.write_obj(vcpu_regs.rax as u8, 0)
.expect("failed writing data to ro memory");
vm.add_memory_region(
GuestAddress(vcpu_sregs.es.base),
Box::new(
MemoryMappingBuilder::new(0x1000)
.from_shared_memory(&mem_ro)
.build()
.expect("failed to create memory mapping"),
),
true,
false,
)
.expect("failed to register memory");
// Ensure we get exactly 1 exit from attempting to write to read only memory.
let mut exits = 0;
let runnable_vcpu = vcpu.to_runnable(None).unwrap();
loop {
match runnable_vcpu.run().expect("run failed") {
VcpuExit::Hlt => break,
VcpuExit::MmioWrite {
address,
size: 1,
data,
} => {
assert_eq!(address, vcpu_sregs.es.base);
assert_eq!(data[0] as u64, vcpu_regs.rax + 1);
exits += 1;
}
r => panic!("unexpected exit reason: {:?}", r),
}
}
// Check that exactly 1 attempt to write to read only memory was made, and that the memory is
// unchanged after that attempt.
assert_eq!(exits, 1);
assert_eq!(
mmap_ro
.read_obj::<u8>(0)
.expect("failed to read data from ro memory"),
vcpu_regs.rax as u8
);
}
|
{
/*
0000 268A07 mov al,[es:bx]
0003 0401 add al,0x1
0005 268807 mov [es:bx],al
0008 F4 hlt
*/
let code = [0x26, 0x8a, 0x07, 0x04, 0x01, 0x26, 0x88, 0x07, 0xf4];
let mem_size = 0x2000;
let load_addr = GuestAddress(0x1000);
let guest_mem = GuestMemory::new(&[]).unwrap();
let mem = SharedMemory::anon(mem_size).expect("failed to create shared memory");
let mmap = MemoryMappingBuilder::new(mem_size as usize)
.from_shared_memory(&mem)
.build()
.expect("failed to create memory mapping");
mmap.write_slice(&code[..], load_addr.offset() as usize)
.expect("Writing code to memory failed.");
|
identifier_body
|
read_only_memory.rs
|
// Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#![cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use base::{MemoryMappingBuilder, SharedMemory};
use kvm::*;
use kvm_sys::kvm_regs;
use vm_memory::{GuestAddress, GuestMemory};
#[test]
fn test_run() {
/*
0000 268A07 mov al,[es:bx]
0003 0401 add al,0x1
0005 268807 mov [es:bx],al
0008 F4 hlt
*/
let code = [0x26, 0x8a, 0x07, 0x04, 0x01, 0x26, 0x88, 0x07, 0xf4];
let mem_size = 0x2000;
let load_addr = GuestAddress(0x1000);
let guest_mem = GuestMemory::new(&[]).unwrap();
let mem = SharedMemory::anon(mem_size).expect("failed to create shared memory");
let mmap = MemoryMappingBuilder::new(mem_size as usize)
.from_shared_memory(&mem)
.build()
.expect("failed to create memory mapping");
mmap.write_slice(&code[..], load_addr.offset() as usize)
.expect("Writing code to memory failed.");
let kvm = Kvm::new().expect("new kvm failed");
let mut vm = Vm::new(&kvm, guest_mem).expect("new vm failed");
let vcpu = Vcpu::new(0, &kvm, &vm).expect("new vcpu failed");
let mut vcpu_sregs = vcpu.get_sregs().expect("get sregs failed");
vcpu_sregs.cs.base = 0;
vcpu_sregs.cs.selector = 0;
vcpu_sregs.es.base = 0x3000;
vcpu_sregs.es.selector = 0;
vcpu.set_sregs(&vcpu_sregs).expect("set sregs failed");
let mut vcpu_regs: kvm_regs = unsafe { std::mem::zeroed() };
vcpu_regs.rip = load_addr.offset() as u64;
vcpu_regs.rflags = 2;
vcpu_regs.rax = 0x66;
vcpu_regs.rbx = 0;
vcpu.set_regs(&vcpu_regs).expect("set regs failed");
vm.add_memory_region(
|
GuestAddress(0),
Box::new(
MemoryMappingBuilder::new(mem_size as usize)
.from_shared_memory(&mem)
.build()
.expect("failed to create memory mapping"),
),
false,
false,
)
.expect("failed to register memory");
// Give some read only memory for the test code to read from and force a vcpu exit when it reads
// from it.
let mem_ro = SharedMemory::anon(0x1000).expect("failed to create shared memory");
let mmap_ro = MemoryMappingBuilder::new(0x1000)
.from_shared_memory(&mem_ro)
.build()
.expect("failed to create memory mapping");
mmap_ro
.write_obj(vcpu_regs.rax as u8, 0)
.expect("failed writing data to ro memory");
vm.add_memory_region(
GuestAddress(vcpu_sregs.es.base),
Box::new(
MemoryMappingBuilder::new(0x1000)
.from_shared_memory(&mem_ro)
.build()
.expect("failed to create memory mapping"),
),
true,
false,
)
.expect("failed to register memory");
// Ensure we get exactly 1 exit from attempting to write to read only memory.
let mut exits = 0;
let runnable_vcpu = vcpu.to_runnable(None).unwrap();
loop {
match runnable_vcpu.run().expect("run failed") {
VcpuExit::Hlt => break,
VcpuExit::MmioWrite {
address,
size: 1,
data,
} => {
assert_eq!(address, vcpu_sregs.es.base);
assert_eq!(data[0] as u64, vcpu_regs.rax + 1);
exits += 1;
}
r => panic!("unexpected exit reason: {:?}", r),
}
}
// Check that exactly 1 attempt to write to read only memory was made, and that the memory is
// unchanged after that attempt.
assert_eq!(exits, 1);
assert_eq!(
mmap_ro
.read_obj::<u8>(0)
.expect("failed to read data from ro memory"),
vcpu_regs.rax as u8
);
}
|
random_line_split
|
|
read_only_memory.rs
|
// Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#![cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use base::{MemoryMappingBuilder, SharedMemory};
use kvm::*;
use kvm_sys::kvm_regs;
use vm_memory::{GuestAddress, GuestMemory};
#[test]
fn
|
() {
/*
0000 268A07 mov al,[es:bx]
0003 0401 add al,0x1
0005 268807 mov [es:bx],al
0008 F4 hlt
*/
let code = [0x26, 0x8a, 0x07, 0x04, 0x01, 0x26, 0x88, 0x07, 0xf4];
let mem_size = 0x2000;
let load_addr = GuestAddress(0x1000);
let guest_mem = GuestMemory::new(&[]).unwrap();
let mem = SharedMemory::anon(mem_size).expect("failed to create shared memory");
let mmap = MemoryMappingBuilder::new(mem_size as usize)
.from_shared_memory(&mem)
.build()
.expect("failed to create memory mapping");
mmap.write_slice(&code[..], load_addr.offset() as usize)
.expect("Writing code to memory failed.");
let kvm = Kvm::new().expect("new kvm failed");
let mut vm = Vm::new(&kvm, guest_mem).expect("new vm failed");
let vcpu = Vcpu::new(0, &kvm, &vm).expect("new vcpu failed");
let mut vcpu_sregs = vcpu.get_sregs().expect("get sregs failed");
vcpu_sregs.cs.base = 0;
vcpu_sregs.cs.selector = 0;
vcpu_sregs.es.base = 0x3000;
vcpu_sregs.es.selector = 0;
vcpu.set_sregs(&vcpu_sregs).expect("set sregs failed");
let mut vcpu_regs: kvm_regs = unsafe { std::mem::zeroed() };
vcpu_regs.rip = load_addr.offset() as u64;
vcpu_regs.rflags = 2;
vcpu_regs.rax = 0x66;
vcpu_regs.rbx = 0;
vcpu.set_regs(&vcpu_regs).expect("set regs failed");
vm.add_memory_region(
GuestAddress(0),
Box::new(
MemoryMappingBuilder::new(mem_size as usize)
.from_shared_memory(&mem)
.build()
.expect("failed to create memory mapping"),
),
false,
false,
)
.expect("failed to register memory");
// Give some read only memory for the test code to read from and force a vcpu exit when it reads
// from it.
let mem_ro = SharedMemory::anon(0x1000).expect("failed to create shared memory");
let mmap_ro = MemoryMappingBuilder::new(0x1000)
.from_shared_memory(&mem_ro)
.build()
.expect("failed to create memory mapping");
mmap_ro
.write_obj(vcpu_regs.rax as u8, 0)
.expect("failed writing data to ro memory");
vm.add_memory_region(
GuestAddress(vcpu_sregs.es.base),
Box::new(
MemoryMappingBuilder::new(0x1000)
.from_shared_memory(&mem_ro)
.build()
.expect("failed to create memory mapping"),
),
true,
false,
)
.expect("failed to register memory");
// Ensure we get exactly 1 exit from attempting to write to read only memory.
let mut exits = 0;
let runnable_vcpu = vcpu.to_runnable(None).unwrap();
loop {
match runnable_vcpu.run().expect("run failed") {
VcpuExit::Hlt => break,
VcpuExit::MmioWrite {
address,
size: 1,
data,
} => {
assert_eq!(address, vcpu_sregs.es.base);
assert_eq!(data[0] as u64, vcpu_regs.rax + 1);
exits += 1;
}
r => panic!("unexpected exit reason: {:?}", r),
}
}
// Check that exactly 1 attempt to write to read only memory was made, and that the memory is
// unchanged after that attempt.
assert_eq!(exits, 1);
assert_eq!(
mmap_ro
.read_obj::<u8>(0)
.expect("failed to read data from ro memory"),
vcpu_regs.rax as u8
);
}
|
test_run
|
identifier_name
|
compensate.rs
|
#include <constants.rh>
#include <crctools.rh>
#include <math.rh>
#include <util.rh>
; vim: syntax=fasm
_start:
mov r3, #0x1000 ; Output buffer address
mov [r3], #0x6c6c6548 ; 'lleH'
mov [r3+#4], #0x57202c6f ; 'W,o'
mov [r3+#8], #0x646c726f ; 'dlro'
|
jz $next
call $_error
next:
push #0xDEADBEEF
push #14
push #0x1000
call $_compensate_crc
test r0, r0
jz $verify
call $_error
verify:
push #14
push #0x1000
call $_crc_block
cmp r0, #0xDEADBEEF
jz $finish
call $_error
finish:
mov [#0x1000], #0x000a4b4f
mov [VMADDR_NEWBLOCKPOS], #0x1000 ; Pointer
mov [VMADDR_NEWBLOCKSIZE], #3 ; Size
call $_success
|
mov [r3+#12], #0x00000a21 ; '!\n'
push #14 ; Length
push r3 ; Pointer
call $_crc_block
cmp r0, #0x4b17617b ; Check value
|
random_line_split
|
abort.rs
|
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{DecodeError, FieldInfo};
use std::fmt::{self, Debug, Display, Formatter};
/// Decodes the ISS value for an Instruction Abort.
pub fn
|
(iss: u64) -> Result<Vec<FieldInfo>, DecodeError> {
let res0a = FieldInfo::get(iss, "RES0", Some("Reserved"), 13, 25).check_res0()?;
let fnv = FieldInfo::get_bit(iss, "FnV", Some("FAR not Valid"), 10).describe_bit(describe_fnv);
let ea = FieldInfo::get_bit(iss, "EA", Some("External abort type"), 9);
let res0b = FieldInfo::get_bit(iss, "RES0", Some("Reserved"), 8).check_res0()?;
let s1ptw = FieldInfo::get_bit(iss, "S1PTW", Some("Stage-1 translation table walk"), 7);
let res0c = FieldInfo::get_bit(iss, "RES0", Some("Reserved"), 6).check_res0()?;
let ifsc = FieldInfo::get(iss, "IFSC", Some("Instruction Fault Status Code"), 0, 6)
.describe(describe_fsc)?;
let set = if ifsc.value == 0b010000 {
FieldInfo::get(iss, "SET", Some("Synchronous Error Type"), 11, 13).describe(describe_set)?
} else {
FieldInfo::get(iss, "RES0", Some("Reserved"), 11, 13)
};
Ok(vec![res0a, set, fnv, ea, res0b, s1ptw, res0c, ifsc])
}
/// Decodes the ISS value for a Data Abort.
pub fn decode_iss_data_abort(iss: u64) -> Result<Vec<FieldInfo>, DecodeError> {
let isv = FieldInfo::get_bit(iss, "ISV", Some("Instruction Syndrome Valid"), 24)
.describe_bit(describe_isv);
let intruction_syndrome_fields = if isv.as_bit() {
// These fields are part of the instruction syndrome, and are only valid if ISV is true.
let sas = FieldInfo::get(iss, "SAS", Some("Syndrome Access Size"), 22, 24);
let sas_value = match sas.value {
0b00 => SyndromeAccessSize::Byte,
0b01 => SyndromeAccessSize::Halfword,
0b10 => SyndromeAccessSize::Word,
0b11 => SyndromeAccessSize::Doubleword,
_ => unreachable!(),
};
let sas = sas.with_description(sas_value.to_string());
let sse = FieldInfo::get_bit(iss, "SSE", Some("Syndrome Sign Extend"), 21);
let srt = FieldInfo::get(iss, "SRT", Some("Syndrome Register Transfer"), 16, 21);
let sf = FieldInfo::get_bit(iss, "SF", Some("Sixty-Four"), 15).describe_bit(describe_sf);
let ar =
FieldInfo::get_bit(iss, "AR", Some("Acquire/Release"), 14).describe_bit(describe_ar);
vec![sas, sse, srt, sf, ar]
} else {
let res0 = FieldInfo::get(iss, "RES0", Some("Reserved"), 14, 24).check_res0()?;
vec![res0]
};
let vncr = FieldInfo::get_bit(iss, "VNCR", None, 13);
let fnv = FieldInfo::get_bit(iss, "FnV", Some("FAR not Valid"), 10).describe_bit(describe_fnv);
let ea = FieldInfo::get_bit(iss, "EA", Some("External abort type"), 9);
let cm = FieldInfo::get_bit(iss, "CM", Some("Cache Maintenance"), 8);
let s1ptw = FieldInfo::get_bit(iss, "S1PTW", Some("Stage-1 translation table walk"), 7);
let wnr = FieldInfo::get_bit(iss, "WnR", Some("Write not Read"), 6).describe_bit(describe_wnr);
let dfsc =
FieldInfo::get(iss, "DFSC", Some("Data Fault Status Code"), 0, 6).describe(describe_fsc)?;
let set = if dfsc.value == 0b010000 {
FieldInfo::get(iss, "SET", Some("Synchronous Error Type"), 11, 13).describe(describe_set)?
} else {
FieldInfo::get(iss, "RES0", Some("Reserved"), 11, 13)
};
let mut fields = vec![isv];
fields.extend(intruction_syndrome_fields);
fields.extend(vec![vncr, set, fnv, ea, cm, s1ptw, wnr, dfsc]);
Ok(fields)
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
enum SyndromeAccessSize {
Byte = 0b00,
Halfword = 0b01,
Word = 0b10,
Doubleword = 0b11,
}
impl Display for SyndromeAccessSize {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
let s = match self {
Self::Byte => "byte",
Self::Halfword => "halfword",
Self::Word => "word",
Self::Doubleword => "doubleword",
};
write!(f, "{}", s)
}
}
fn describe_isv(isv: bool) -> &'static str {
if isv {
"Valid instruction syndrome"
} else {
"No valid instruction syndrome"
}
}
fn describe_sf(sf: bool) -> &'static str {
if sf {
"64-bit wide register"
} else {
"32-bit wide register"
}
}
fn describe_ar(ar: bool) -> &'static str {
if ar {
"Acquire/release semantics"
} else {
"No acquire/release semantics"
}
}
fn describe_fnv(fnv: bool) -> &'static str {
if fnv {
"FAR is not valid, it holds an unknown value"
} else {
"FAR is valid"
}
}
fn describe_wnr(wnr: bool) -> &'static str {
if wnr {
"Abort caused by writing to memory"
} else {
"Abort caused by reading from memory"
}
}
fn describe_fsc(fsc: u64) -> Result<&'static str, DecodeError> {
let description = match fsc {
0b000000 => {
"Address size fault, level 0 of translation or translation table base register."
}
0b000001 => "Address size fault, level 1.",
0b000010 => "Address size fault, level 2.",
0b000011 => "Address size fault, level 3.",
0b000100 => "Translation fault, level 0.",
0b000101 => "Translation fault, level 1.",
0b000110 => "Translation fault, level 2.",
0b000111 => "Translation fault, level 3.",
0b001001 => "Access flag fault, level 1.",
0b001010 => "Access flag fault, level 2.",
0b001011 => "Access flag fault, level 3.",
0b001000 => "Access flag fault, level 0.",
0b001100 => "Permission fault, level 0.",
0b001101 => "Permission fault, level 1.",
0b001110 => "Permission fault, level 2.",
0b001111 => "Permission fault, level 3.",
0b010000 => {
"Synchronous External abort, not on translation table walk or hardware update of \
translation table."
}
0b010001 => "Synchronous Tag Check Fault.",
0b010011 => {
"Synchronous External abort on translation table walk or hardware update of \
translation table, level -1."
}
0b010100 => {
"Synchronous External abort on translation table walk or hardware update of \
translation table, level 0."
}
0b010101 => {
"Synchronous External abort on translation table walk or hardware update of \
translation table, level 1."
}
0b010110 => {
"Synchronous External abort on translation table walk or hardware update of \
translation table, level 2."
}
0b010111 => {
"Synchronous External abort on translation table walk or hardware update of \
translation table, level 3."
}
0b011000 => {
"Synchronous parity or ECC error on memory access, not on translation table walk."
}
0b011011 => {
"Synchronous parity or ECC error on memory access on translation table walk or \
hardware update of translation table, level -1."
}
0b011100 => {
"Synchronous parity or ECC error on memory access on translation table walk or \
hardware update of translation table, level 0."
}
0b011101 => {
"Synchronous parity or ECC error on memory access on translation table walk or \
hardware update of translation table, level 1."
}
0b011110 => {
"Synchronous parity or ECC error on memory access on translation table walk or \
hardware update of translation table, level 2."
}
0b011111 => {
"Synchronous parity or ECC error on memory access on translation table walk or \
hardware update of translation table, level 3."
}
0b100001 => "Alignment fault.",
0b101001 => "Address size fault, level -1.",
0b101011 => "Translation fault, level -1.",
0b110000 => "TLB conflict abort.",
0b110001 => "Unsupported atomic hardware update fault.",
0b110100 => "IMPLEMENTATION DEFINED fault (Lockdown).",
0b110101 => "IMPLEMENTATION DEFINED fault (Unsupported Exclusive or Atomic access).",
_ => return Err(DecodeError::InvalidFsc { fsc }),
};
Ok(description)
}
fn describe_set(set: u64) -> Result<&'static str, DecodeError> {
Ok(match set {
0b00 => "Recoverable state (UER)",
0b10 => "Uncontainable (UC)",
0b11 => "Restartable state (UEO)",
_ => return Err(DecodeError::InvalidSet { set }),
})
}
|
decode_iss_instruction_abort
|
identifier_name
|
abort.rs
|
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{DecodeError, FieldInfo};
use std::fmt::{self, Debug, Display, Formatter};
/// Decodes the ISS value for an Instruction Abort.
pub fn decode_iss_instruction_abort(iss: u64) -> Result<Vec<FieldInfo>, DecodeError> {
let res0a = FieldInfo::get(iss, "RES0", Some("Reserved"), 13, 25).check_res0()?;
let fnv = FieldInfo::get_bit(iss, "FnV", Some("FAR not Valid"), 10).describe_bit(describe_fnv);
let ea = FieldInfo::get_bit(iss, "EA", Some("External abort type"), 9);
let res0b = FieldInfo::get_bit(iss, "RES0", Some("Reserved"), 8).check_res0()?;
let s1ptw = FieldInfo::get_bit(iss, "S1PTW", Some("Stage-1 translation table walk"), 7);
let res0c = FieldInfo::get_bit(iss, "RES0", Some("Reserved"), 6).check_res0()?;
let ifsc = FieldInfo::get(iss, "IFSC", Some("Instruction Fault Status Code"), 0, 6)
.describe(describe_fsc)?;
let set = if ifsc.value == 0b010000 {
FieldInfo::get(iss, "SET", Some("Synchronous Error Type"), 11, 13).describe(describe_set)?
} else {
FieldInfo::get(iss, "RES0", Some("Reserved"), 11, 13)
};
Ok(vec![res0a, set, fnv, ea, res0b, s1ptw, res0c, ifsc])
}
/// Decodes the ISS value for a Data Abort.
pub fn decode_iss_data_abort(iss: u64) -> Result<Vec<FieldInfo>, DecodeError> {
let isv = FieldInfo::get_bit(iss, "ISV", Some("Instruction Syndrome Valid"), 24)
.describe_bit(describe_isv);
let intruction_syndrome_fields = if isv.as_bit() {
// These fields are part of the instruction syndrome, and are only valid if ISV is true.
let sas = FieldInfo::get(iss, "SAS", Some("Syndrome Access Size"), 22, 24);
let sas_value = match sas.value {
0b00 => SyndromeAccessSize::Byte,
0b01 => SyndromeAccessSize::Halfword,
0b10 => SyndromeAccessSize::Word,
0b11 => SyndromeAccessSize::Doubleword,
_ => unreachable!(),
};
let sas = sas.with_description(sas_value.to_string());
let sse = FieldInfo::get_bit(iss, "SSE", Some("Syndrome Sign Extend"), 21);
let srt = FieldInfo::get(iss, "SRT", Some("Syndrome Register Transfer"), 16, 21);
let sf = FieldInfo::get_bit(iss, "SF", Some("Sixty-Four"), 15).describe_bit(describe_sf);
let ar =
FieldInfo::get_bit(iss, "AR", Some("Acquire/Release"), 14).describe_bit(describe_ar);
vec![sas, sse, srt, sf, ar]
} else {
let res0 = FieldInfo::get(iss, "RES0", Some("Reserved"), 14, 24).check_res0()?;
vec![res0]
};
let vncr = FieldInfo::get_bit(iss, "VNCR", None, 13);
let fnv = FieldInfo::get_bit(iss, "FnV", Some("FAR not Valid"), 10).describe_bit(describe_fnv);
let ea = FieldInfo::get_bit(iss, "EA", Some("External abort type"), 9);
let cm = FieldInfo::get_bit(iss, "CM", Some("Cache Maintenance"), 8);
let s1ptw = FieldInfo::get_bit(iss, "S1PTW", Some("Stage-1 translation table walk"), 7);
let wnr = FieldInfo::get_bit(iss, "WnR", Some("Write not Read"), 6).describe_bit(describe_wnr);
let dfsc =
FieldInfo::get(iss, "DFSC", Some("Data Fault Status Code"), 0, 6).describe(describe_fsc)?;
let set = if dfsc.value == 0b010000 {
FieldInfo::get(iss, "SET", Some("Synchronous Error Type"), 11, 13).describe(describe_set)?
} else {
FieldInfo::get(iss, "RES0", Some("Reserved"), 11, 13)
};
let mut fields = vec![isv];
fields.extend(intruction_syndrome_fields);
fields.extend(vec![vncr, set, fnv, ea, cm, s1ptw, wnr, dfsc]);
Ok(fields)
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
enum SyndromeAccessSize {
Byte = 0b00,
Halfword = 0b01,
Word = 0b10,
Doubleword = 0b11,
}
impl Display for SyndromeAccessSize {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
let s = match self {
Self::Byte => "byte",
Self::Halfword => "halfword",
Self::Word => "word",
Self::Doubleword => "doubleword",
};
write!(f, "{}", s)
}
}
fn describe_isv(isv: bool) -> &'static str {
if isv {
"Valid instruction syndrome"
} else {
"No valid instruction syndrome"
}
}
fn describe_sf(sf: bool) -> &'static str {
if sf {
"64-bit wide register"
} else {
"32-bit wide register"
}
}
fn describe_ar(ar: bool) -> &'static str {
if ar {
"Acquire/release semantics"
} else {
"No acquire/release semantics"
}
}
fn describe_fnv(fnv: bool) -> &'static str {
if fnv {
"FAR is not valid, it holds an unknown value"
} else {
"FAR is valid"
}
}
fn describe_wnr(wnr: bool) -> &'static str {
if wnr {
"Abort caused by writing to memory"
} else {
"Abort caused by reading from memory"
}
}
fn describe_fsc(fsc: u64) -> Result<&'static str, DecodeError> {
let description = match fsc {
0b000000 => {
"Address size fault, level 0 of translation or translation table base register."
}
0b000001 => "Address size fault, level 1.",
0b000010 => "Address size fault, level 2.",
0b000011 => "Address size fault, level 3.",
0b000100 => "Translation fault, level 0.",
0b000101 => "Translation fault, level 1.",
0b000110 => "Translation fault, level 2.",
0b000111 => "Translation fault, level 3.",
0b001001 => "Access flag fault, level 1.",
0b001010 => "Access flag fault, level 2.",
0b001011 => "Access flag fault, level 3.",
0b001000 => "Access flag fault, level 0.",
0b001100 => "Permission fault, level 0.",
0b001101 => "Permission fault, level 1.",
0b001110 => "Permission fault, level 2.",
0b001111 => "Permission fault, level 3.",
0b010000 => {
"Synchronous External abort, not on translation table walk or hardware update of \
translation table."
}
0b010001 => "Synchronous Tag Check Fault.",
0b010011 => {
"Synchronous External abort on translation table walk or hardware update of \
translation table, level -1."
}
0b010100 => {
"Synchronous External abort on translation table walk or hardware update of \
|
}
0b010110 => {
"Synchronous External abort on translation table walk or hardware update of \
translation table, level 2."
}
0b010111 => {
"Synchronous External abort on translation table walk or hardware update of \
translation table, level 3."
}
0b011000 => {
"Synchronous parity or ECC error on memory access, not on translation table walk."
}
0b011011 => {
"Synchronous parity or ECC error on memory access on translation table walk or \
hardware update of translation table, level -1."
}
0b011100 => {
"Synchronous parity or ECC error on memory access on translation table walk or \
hardware update of translation table, level 0."
}
0b011101 => {
"Synchronous parity or ECC error on memory access on translation table walk or \
hardware update of translation table, level 1."
}
0b011110 => {
"Synchronous parity or ECC error on memory access on translation table walk or \
hardware update of translation table, level 2."
}
0b011111 => {
"Synchronous parity or ECC error on memory access on translation table walk or \
hardware update of translation table, level 3."
}
0b100001 => "Alignment fault.",
0b101001 => "Address size fault, level -1.",
0b101011 => "Translation fault, level -1.",
0b110000 => "TLB conflict abort.",
0b110001 => "Unsupported atomic hardware update fault.",
0b110100 => "IMPLEMENTATION DEFINED fault (Lockdown).",
0b110101 => "IMPLEMENTATION DEFINED fault (Unsupported Exclusive or Atomic access).",
_ => return Err(DecodeError::InvalidFsc { fsc }),
};
Ok(description)
}
fn describe_set(set: u64) -> Result<&'static str, DecodeError> {
Ok(match set {
0b00 => "Recoverable state (UER)",
0b10 => "Uncontainable (UC)",
0b11 => "Restartable state (UEO)",
_ => return Err(DecodeError::InvalidSet { set }),
})
}
|
translation table, level 0."
}
0b010101 => {
"Synchronous External abort on translation table walk or hardware update of \
translation table, level 1."
|
random_line_split
|
lib.rs
|
//! The `pact_consumer` crate provides tools for writing consumer [Pact
//! tests][pact]. It implements the [V3 Pact specification][spec]. You can also
//! use it as a simple HTTP mocking library for Rust.
//!
//! [pact]: https://docs.pact.io/ [spec]:
//! https://github.com/pact-foundation/pact-specification
//!
//! ## What is Pact?
//!
//! [Pact][pact] is a [cross-language standard][spec] for testing the
//! communication between the consumer of a REST API, and the code that provides
//! that API. Test cases are written from the consumer's perspective, and they
//! can then be exported testing the provider.
//!
//! The big advantages of Pact are:
//!
//! 1. The mocks you write to test the client can also be reused to verify that
//! the server would actually respond the way the client expects. This gives
//! the end-to-end assurance of integration tests (well, almost), but with
//! the speed and convenience of unit tests.
//! 2. Pact has been implemented in many popular languages, so you can test
//! clients and servers in multiple languages.
//!
//! Whenever possible, we try to use vocabulary similar to the Ruby or
//! JavaScript API for basic concepts, and we try to provide the same behavior.
//! But we offer many handy builder methods to make tests cleaner.
//!
//! ## How to use it
//!
//! To use this crate, add it to your `[dev-dependencies]` in your `Cargo.toml`:
//!
//! ```toml
//! [dev-dependencies]
//! pact_consumer = "0.8"
//! ```
//!
//! Once this is done, you can then write the following inside a function marked
//! with `#[tokio::test]`:
//!
//! ```
//! # tokio_test::block_on(async {
//! use pact_consumer::prelude::*;
//!
//! // Define the Pact for the test, specify the names of the consuming
//! // application and the provider application.
//! let pact = PactBuilder::new("Consumer", "Alice Service")
//! // Start a new interaction. We can add as many interactions as we want.
//! .interaction("a retrieve Mallory request", "", |mut i| async move {
//! // Defines a provider state. It is optional.
//! i.given("there is some good mallory");
//! // Define the request, a GET (default) request to '/mallory'.
//! i.request.path("/mallory");
//! // Define the response we want returned. We assume a 200 OK
//! // response by default.
//! i.response
//! .content_type("text/plain")
//! .body("That is some good Mallory.");
|
//! // Return the interaction builder back to the pact framework
//! i
//! })
//! .await
//! .build();
//! # });
//! ```
//!
//! You can than use an HTTP client like `reqwest` to make requests against your
//! server.
//!
//! ```rust
//! # tokio_test::block_on(async {
//! # use pact_models::pact::Pact;
//! # use std::io::Read;
//! # use pact_consumer::prelude::*;
//! # let alice_service = PactBuilder::new("Consumer", "Alice Service")
//! # // Start a new interaction. We can add as many interactions as we want.
//! # .interaction("a retrieve Mallory request", "", |mut i| async move {
//! # // Defines a provider state. It is optional.
//! # i.given("there is some good mallory");
//! # // Define the request, a GET (default) request to '/mallory'.
//! # i.request.path("/mallory");
//! # // Define the response we want returned. We assume a 200 OK
//! # // response by default.
//! # i.response
//! # .content_type("text/plain")
//! # .body("That is some good Mallory.");
//! # // Return the interaction builder back to the pact framework
//! # i
//! # }).await.start_mock_server();
//!
//! // You would use your actual client code here.
//! let mallory_url = alice_service.path("/mallory");
//! let mut response = reqwest::get(mallory_url).await.expect("could not fetch URL")
//! .text().await.expect("Could not read response body");
//! assert_eq!(response, "That is some good Mallory.");
//!
//! // When `alice_service` goes out of scope, your pact will be validated,
//! // and the test will fail if the mock server didn't receive matching
//! // requests.
//! # });
//! ```
//!
//! ## Matching using patterns
//!
//! You can also use patterns like `like!`, `each_like!` or `term!` to allow
//! more general matches, and you can build complex patterns using the
//! `json_pattern!` macro:
//!
//! ```
//! # tokio_test::block_on(async {
//! use pact_consumer::prelude::*;
//! use pact_consumer::*;
//!
//! PactBuilder::new("quotes client", "quotes service")
//! .interaction("add a new quote to the database", "", |mut i| async move {
//! i.request
//! .post()
//! .path("/quotes")
//! .json_utf8()
//! .json_body(json_pattern!({
//! // Allow the client to send any string as a quote.
//! // When testing the server, use "Eureka!".
//! "quote": like!("Eureka!"),
//! // Allow the client to send any string as an author.
//! // When testing the server, use "Archimedes".
//! "by": like!("Archimedes"),
//! // Allow the client to send an array of strings.
//! // When testing the server, send a single-item array
//! // containing the string "greek".
//! "tags": each_like!("greek"),
//! }));
//!
//! i.response
//! .created()
//! // Return a location of "/quotes/12" to the client. When
//! // testing the server, allow it to return any numeric ID.
//! .header("Location", term!("^/quotes/[0-9]+$", "/quotes/12"));
//! i
//! });
//! # });
//! ```
//!
//! The key insight here is this "pact" can be used to test both the client and
//! the server:
//!
//! - When testing the **client**, we allow the request to be anything which
//! matches the patterns—so `"quote"` can be any string, not just `"Eureka!"`.
//! But we respond with the specified values, such as `"/quotes/12"`.
//! - When testing the **server**, we send the specified values, such as
//! `"Eureka!"`. But we allow the server to respond with anything matching the
//! regular expression `^/quotes/[0-9]+$`, because we don't know what database
//! ID it will use.
//!
//! Also, when testing the server, we may need to set up particular database
//! fixtures. This can be done using the string passed to `given` in the
//! examples above.
//!
//! ## Testing using domain objects
//!
//! Normally, it's best to generate your JSON using your actual domain objects.
//! This is easier, and it reduces duplication in your code.
//!
//! ```
//! use pact_consumer::prelude::*;
//! use pact_consumer::{each_like, each_like_helper, json_pattern};
//! use serde::{Deserialize, Serialize};
//!
//! /// Our application's domain object representing a user.
//! #[derive(Deserialize, Serialize)]
//! struct User {
//! /// All users have this field.
//! name: String,
//!
//! /// The server may omit this field when sending JSON, or it may send it
//! /// as `null`.
//! comment: Option<String>,
//! }
//!
//! // Create our example user using our normal application objects.
//! let example = User {
//! name: "J. Smith".to_owned(),
//! comment: None,
//! };
//!
//! # tokio_test::block_on(async move {
//! PactBuilder::new("consumer", "provider")
//! .interaction("get all users", "", |mut i| async move {
//! i.given("a list of users in the database");
//! i.request.path("/users");
//! i.response
//! .json_utf8()
//! .json_body(each_like!(
//! // Here, `strip_null_fields` will remove `comment` from
//! // the generated JSON, allowing our pattern to match
//! // missing comments, null comments, and comments with
//! // strings.
//! strip_null_fields(serde_json::json!(example)),
//! ));
//! i
//! })
//! .await
//! .build();
//! # });
//! ```
//!
//! ## Testing messages
//!
//! Testing message consumers is supported. There are two types: asynchronous messages and synchronous request/response.
//!
//! ### Asynchronous messages
//!
//! Asynchronous messages are you normal type of single shot or fire and forget type messages. They are typically sent to a
//! message queue or topic as a notification or event. With Pact tests, we will be testing that our consumer of the messages
//! works with the messages setup as the expectations in test. This should be the message handler code that processes the
//! actual messages that come off the message queue in production.
//!
//! The generated Pact file from the test run can then be used to verify whatever created the messages adheres to the Pact
//! file.
//!
//! ```rust
//! # tokio_test::block_on(async {
//! use pact_consumer::prelude::*;
//! use pact_consumer::*;
//! use expectest::prelude::*;
//! use serde_json::{Value, from_slice};
//!
//! // Define the Pact for the test (you can setup multiple interactions by chaining the given or message_interaction calls)
//! // For messages we need to use the V4 Pact format.
//! let mut pact_builder = PactBuilder::new_v4("message-consumer", "message-provider"); // Define the message consumer and provider by name
//! pact_builder
//! // Adds an interaction given the message description and type.
//! .message_interaction("Mallory Message", "core/interaction/message", |mut i| async move {
//! // defines a provider state. It is optional.
//! i.given("there is some good mallory".to_string());
//! // Can set the test name (optional)
//! i.test_name("a_message_consumer_side_of_a_pact_goes_a_little_something_like_this");
//! // Set the contents of the message. Here we use a JSON pattern, so that matching rules are applied
//! i.json_body(json_pattern!({
//! "mallory": like!("That is some good Mallory.")
//! }));
//! // Need to return the mutated interaction builder
//! i
//! })
//! .await;
//!
//! // This will return each message configured with the Pact builder. We need to process them
//! // with out message handler (it should be the one used to actually process your messages).
//! for message in pact_builder.messages() {
//! let bytes = message.contents.contents.value().unwrap();
//!
//! // Process the message here as it would if it came off the queue
//! let message: Value = serde_json::from_slice(&bytes).unwrap();
//!
//! // Make some assertions on the processed value
//! expect!(message.as_object().unwrap().get("mallory")).to(be_some().value("That is some good Mallory."));
//! }
//! # });
//! ```
//!
//! ### Synchronous request/response messages
//!
//! Synchronous request/response messages are a form of message interchange were a request message is sent to another service and
//! one or more response messages are returned. Examples of this would be things like Websockets and gRPC.
//!
//! ```rust
//! # use bytes::Bytes;
//! # struct MessageHandler {}
//! # struct MockProvider { pub message: Bytes }
//! # impl MessageHandler { fn process(bytes: Bytes, provider: &MockProvider) -> anyhow::Result<&str> { Ok("That is some good Mallory.") } }
//! # tokio_test::block_on(async {
//! use pact_consumer::prelude::*;
//! use pact_consumer::*;
//! use expectest::prelude::*;
//! use serde_json::{Value, from_slice};
//!
//! // Define the Pact for the test (you can setup multiple interactions by chaining the given or message_interaction calls)
//! // For synchronous messages we also need to use the V4 Pact format.
//! let mut pact_builder = PactBuilder::new_v4("message-consumer", "message-provider"); // Define the message consumer and provider by name
//! pact_builder
//! // Adds an interaction given the message description and type.
//! .synchronous_message_interaction("Mallory Message", "core/interaction/synchronous-message", |mut i| async move {
//! // defines a provider state. It is optional.
//! i.given("there is some good mallory".to_string());
//! // Can set the test name (optional)
//! i.test_name("a_synchronous_message_consumer_side_of_a_pact_goes_a_little_something_like_this");
//! // Set the contents of the request message. Here we use a JSON pattern, so that matching rules are applied.
//! // This is the request message that is going to be forwarded to the provider
//! i.request_json_body(json_pattern!({
//! "requestFor": like!("Some good Mallory, please.")
//! }));
//! // Add a response message we expect the provider to return. You can call this multiple times to add multiple messages.
//! i.response_json_body(json_pattern!({
//! "mallory": like!("That is some good Mallory.")
//! }));
//! // Need to return the mutated interaction builder
//! i
//! })
//! .await;
//!
//! // For our test we want to invoke our message handling code that is going to initialise the request
//! // to the provider with the request message. But we need some mechanism to mock the response
//! // with the resulting response message so we can confirm our message handler works with it.
//! for message in pact_builder.synchronous_messages() {
//! // the request message we must make
//! let request_message_bytes = message.request.contents.value().unwrap();
//! // the response message we expect to receive from the provider
//! let response_message_bytes = message.response.first().unwrap().contents.value().unwrap();
//!
//! // We use a mock here, assuming there is a Trait that controls the response message that our
//! // mock can implement.
//! let mock_provider = MockProvider { message: response_message_bytes };
//! // Invoke our message handler to send the request message from the Pact interaction and then
//! // wait for the response message. In this case it will be the response via the mock provider.
//! let response = MessageHandler::process(request_message_bytes, &mock_provider);
//!
//! // Make some assertions on the processed value
//! expect!(response).to(be_ok().value("That is some good Mallory."));
//! }
//! # });
//! ```
//!
//! ## Using Pact plugins
//!
//! The consumer test builders support using Pact plugins. Plugins are defined in the [Pact plugins project](https://github.com/pact-foundation/pact-plugins).
//! To use plugins requires the use of Pact specification V4 Pacts.
//!
//! To use a plugin, first you need to let the builder know to load the plugin and then configure the interaction based on
//! the requirements for the plugin. Each plugin may have different requirements, so you will have to consult the plugin
//! docs on what is required. The plugins will be loaded from the plugin directory. By default, this is `~/.pact/plugins` or
//! the value of the `PACT_PLUGIN_DIR` environment variable.
//!
//! There are generic functions that take JSON data structures and pass these on to the plugin to
//! setup the interaction. For request/response HTTP interactions, there is the `contents` function on the request and
//! response builders. For message interactions, the function is called `contents_from`.
//!
//! For example, if we use the CSV plugin from the plugins project, our test would look like:
//!
//! ```no_run
//! use expectest::prelude::*;
//! use regex::Regex;
//! use pact_consumer::prelude::*;
//! #[tokio::test]
//! async fn test_csv_client() {
//! // Create a new V4 Pact
//! let csv_service = PactBuilder::new_v4("CsvClient", "CsvServer")
//! // Tell the builder we are using the CSV plugin
//! .using_plugin("csv", None).await
//! // Add the interaction for the CSV request
//! .interaction("request for a CSV report", "core/interaction/http", |mut i| async move {
//! // Path to the request we are going to make
//! i.request.path("/reports/report001.csv");
//! // Response we expect back
//! i.response
//! .ok()
//! // We use the generic "contents" function to send the expected response data to the plugin in JSON format
//! .contents(ContentType::from("text/csv"), json!({
//! "csvHeaders": false,
//! "column:1": "matching(type,'Name')",
//! "column:2": "matching(number,100)",
//! "column:3": "matching(datetime, 'yyyy-MM-dd','2000-01-01')"
//! })).await;
//! i.clone()
//! })
//! .await
//! // Now start the mock server
//! .start_mock_server_async()
//! .await;
//!
//! // Now we can make our actual request for the CSV file and validate the response
//! let client = CsvClient::new(csv_service.url().clone());
//! let data = client.fetch("report001.csv").await.unwrap();
//!
//! let columns: Vec<&str> = data.trim().split(",").collect();
//! expect!(columns.get(0)).to(be_some().value(&"Name"));
//! expect!(columns.get(1)).to(be_some().value(&"100"));
//! let date = columns.get(2).unwrap();
//! let re = Regex::new("\\d{4}-\\d{2}-\\d{2}").unwrap();
//! expect!(re.is_match(date)).to(be_true());
//! }
//! ```
//!
//! ## More Info
//!
//! For more advice on writing good pacts, see [Best Practices][].
//!
//! [Best Practices]: https://docs.pact.io/best_practices/consumer.html
#![warn(missing_docs)]
// Child modules which define macros (must be first because macros are resolved)
// in source inclusion order).
#[macro_use]
pub mod patterns;
#[cfg(test)]
#[macro_use]
mod test_support;
// Other child modules.
pub mod builders;
pub mod mock_server;
pub mod util;
/// A "prelude" or a default list of import types to include. This includes
/// the basic DSL, but it avoids including rarely-used types.
///
/// ```
/// use pact_consumer::prelude::*;
/// ```
pub mod prelude {
pub use crate::builders::{HttpPartBuilder, PactBuilder};
pub use crate::mock_server::{StartMockServer, ValidatingMockServer};
pub use crate::patterns::{EachLike, Like, Term};
pub use crate::patterns::{JsonPattern, Pattern, StringPattern};
pub use crate::patterns::{DateTime};
pub use crate::util::strip_null_fields;
}
/// Consumer version
pub const PACT_CONSUMER_VERSION: Option<&'static str> = option_env!("CARGO_PKG_VERSION");
|
random_line_split
|
|
game.rs
|
use std::cell::RefCell;
use std::collections::HashSet;
use std::rc::Rc;
use slackbot::{CommandHandler, Sender};
use rand::{self, Rng};
pub struct GameCommandHandler {
|
GameCommandHandler {
registered_users: user_store
}
}
}
impl CommandHandler for GameCommandHandler {
fn handle(&mut self, sender: &mut Sender, args: &Vec<String>) {
let team_size = if args.len() > 0 && args[0] == "1v1" { 1 } else { 2 };
let users = self.registered_users.borrow();
if users.len() < team_size * 2 {
sender.respond_in_channel(format!("Not enough players! Need {0} players to be registered to start a game.", team_size * 2)).unwrap();
} else {
let mut users = &mut users.iter().collect::<Vec<_>>()[..];
let mut rng = rand::thread_rng();
rng.shuffle(users);
let team1 = &users[..team_size];
let team2 = &users[team_size..team_size * 2];
sender.respond_in_channel(format!("⚽ We have a game! {:?} vs {:?}", team1, team2)).unwrap();
}
}
}
|
registered_users: Rc<RefCell<HashSet<String>>>
}
impl GameCommandHandler {
pub fn new(user_store: Rc<RefCell<HashSet<String>>>) -> Self {
|
random_line_split
|
game.rs
|
use std::cell::RefCell;
use std::collections::HashSet;
use std::rc::Rc;
use slackbot::{CommandHandler, Sender};
use rand::{self, Rng};
pub struct GameCommandHandler {
registered_users: Rc<RefCell<HashSet<String>>>
}
impl GameCommandHandler {
pub fn new(user_store: Rc<RefCell<HashSet<String>>>) -> Self {
GameCommandHandler {
registered_users: user_store
}
}
}
impl CommandHandler for GameCommandHandler {
fn handle(&mut self, sender: &mut Sender, args: &Vec<String>) {
let team_size = if args.len() > 0 && args[0] == "1v1" { 1 } else
|
;
let users = self.registered_users.borrow();
if users.len() < team_size * 2 {
sender.respond_in_channel(format!("Not enough players! Need {0} players to be registered to start a game.", team_size * 2)).unwrap();
} else {
let mut users = &mut users.iter().collect::<Vec<_>>()[..];
let mut rng = rand::thread_rng();
rng.shuffle(users);
let team1 = &users[..team_size];
let team2 = &users[team_size..team_size * 2];
sender.respond_in_channel(format!("⚽ We have a game! {:?} vs {:?}", team1, team2)).unwrap();
}
}
}
|
{ 2 }
|
conditional_block
|
game.rs
|
use std::cell::RefCell;
use std::collections::HashSet;
use std::rc::Rc;
use slackbot::{CommandHandler, Sender};
use rand::{self, Rng};
pub struct GameCommandHandler {
registered_users: Rc<RefCell<HashSet<String>>>
}
impl GameCommandHandler {
pub fn new(user_store: Rc<RefCell<HashSet<String>>>) -> Self {
GameCommandHandler {
registered_users: user_store
}
}
}
impl CommandHandler for GameCommandHandler {
fn
|
(&mut self, sender: &mut Sender, args: &Vec<String>) {
let team_size = if args.len() > 0 && args[0] == "1v1" { 1 } else { 2 };
let users = self.registered_users.borrow();
if users.len() < team_size * 2 {
sender.respond_in_channel(format!("Not enough players! Need {0} players to be registered to start a game.", team_size * 2)).unwrap();
} else {
let mut users = &mut users.iter().collect::<Vec<_>>()[..];
let mut rng = rand::thread_rng();
rng.shuffle(users);
let team1 = &users[..team_size];
let team2 = &users[team_size..team_size * 2];
sender.respond_in_channel(format!("⚽ We have a game! {:?} vs {:?}", team1, team2)).unwrap();
}
}
}
|
handle
|
identifier_name
|
hash_join.rs
|
// http://rosettacode.org/wiki/Hash_join
use std::collections::HashMap;
type LeftTable<'a> = Vec<(i64, &'a str)>;
type RightTable<'a> = Vec<(&'a str, &'a str)>;
type ResultTable<'a> = Vec<(i64, &'a str, &'a str)>;
fn hash_join<'a>(s: LeftTable<'a>, r: RightTable<'a>) -> ResultTable<'a> {
let mut hash_map: HashMap<&str, Vec<&str>> = HashMap::new();
//hash phase
for (name, nemesis) in r {
if hash_map.contains_key(name) {
hash_map.get_mut(name).unwrap().push(nemesis);
} else {
hash_map.insert(name, vec![nemesis]);
}
}
let mut result = vec![];
//join phase
for (age, name) in s {
if let Some(v) = hash_map.get(name) {
for nemesis in v {
result.push((age, name, *nemesis));
}
}
}
return result;
}
#[cfg(not(test))]
pub fn
|
() {
let table1 = vec![(27, "Jonah"),
(18, "Alan"),
(28, "Glory"),
(18, "Popeye"),
(28, "Alan")];
let table2 = vec![("Jonah", "Whales"),
("Jonah", "Spiders"),
("Alan", "Ghosts"),
("Alan", "Zombies"),
("Glory", "Buffy")];
let result = hash_join(table1, table2);
for (age, name, nemesis) in result {
println!("{}, {}, {}", age, name, nemesis);
}
}
#[test]
pub fn test() {
let t1 = vec![(0, "hello"),
(1, "world")];
let t2 = vec![("hello", "rust"),
("hello", "cargo")];
let r = hash_join(t1, t2);
assert!(r == vec![(0, "hello", "rust"), (0, "hello", "cargo")]);
}
|
main
|
identifier_name
|
hash_join.rs
|
// http://rosettacode.org/wiki/Hash_join
use std::collections::HashMap;
type LeftTable<'a> = Vec<(i64, &'a str)>;
type RightTable<'a> = Vec<(&'a str, &'a str)>;
type ResultTable<'a> = Vec<(i64, &'a str, &'a str)>;
fn hash_join<'a>(s: LeftTable<'a>, r: RightTable<'a>) -> ResultTable<'a>
|
}
return result;
}
#[cfg(not(test))]
pub fn main() {
let table1 = vec![(27, "Jonah"),
(18, "Alan"),
(28, "Glory"),
(18, "Popeye"),
(28, "Alan")];
let table2 = vec![("Jonah", "Whales"),
("Jonah", "Spiders"),
("Alan", "Ghosts"),
("Alan", "Zombies"),
("Glory", "Buffy")];
let result = hash_join(table1, table2);
for (age, name, nemesis) in result {
println!("{}, {}, {}", age, name, nemesis);
}
}
#[test]
pub fn test() {
let t1 = vec![(0, "hello"),
(1, "world")];
let t2 = vec![("hello", "rust"),
("hello", "cargo")];
let r = hash_join(t1, t2);
assert!(r == vec![(0, "hello", "rust"), (0, "hello", "cargo")]);
}
|
{
let mut hash_map: HashMap<&str, Vec<&str>> = HashMap::new();
//hash phase
for (name, nemesis) in r {
if hash_map.contains_key(name) {
hash_map.get_mut(name).unwrap().push(nemesis);
} else {
hash_map.insert(name, vec![nemesis]);
}
}
let mut result = vec![];
//join phase
for (age, name) in s {
if let Some(v) = hash_map.get(name) {
for nemesis in v {
result.push((age, name, *nemesis));
}
}
|
identifier_body
|
hash_join.rs
|
// http://rosettacode.org/wiki/Hash_join
use std::collections::HashMap;
type LeftTable<'a> = Vec<(i64, &'a str)>;
type RightTable<'a> = Vec<(&'a str, &'a str)>;
type ResultTable<'a> = Vec<(i64, &'a str, &'a str)>;
fn hash_join<'a>(s: LeftTable<'a>, r: RightTable<'a>) -> ResultTable<'a> {
let mut hash_map: HashMap<&str, Vec<&str>> = HashMap::new();
//hash phase
for (name, nemesis) in r {
if hash_map.contains_key(name) {
hash_map.get_mut(name).unwrap().push(nemesis);
} else {
hash_map.insert(name, vec![nemesis]);
|
let mut result = vec![];
//join phase
for (age, name) in s {
if let Some(v) = hash_map.get(name) {
for nemesis in v {
result.push((age, name, *nemesis));
}
}
}
return result;
}
#[cfg(not(test))]
pub fn main() {
let table1 = vec![(27, "Jonah"),
(18, "Alan"),
(28, "Glory"),
(18, "Popeye"),
(28, "Alan")];
let table2 = vec![("Jonah", "Whales"),
("Jonah", "Spiders"),
("Alan", "Ghosts"),
("Alan", "Zombies"),
("Glory", "Buffy")];
let result = hash_join(table1, table2);
for (age, name, nemesis) in result {
println!("{}, {}, {}", age, name, nemesis);
}
}
#[test]
pub fn test() {
let t1 = vec![(0, "hello"),
(1, "world")];
let t2 = vec![("hello", "rust"),
("hello", "cargo")];
let r = hash_join(t1, t2);
assert!(r == vec![(0, "hello", "rust"), (0, "hello", "cargo")]);
}
|
}
}
|
random_line_split
|
hash_join.rs
|
// http://rosettacode.org/wiki/Hash_join
use std::collections::HashMap;
type LeftTable<'a> = Vec<(i64, &'a str)>;
type RightTable<'a> = Vec<(&'a str, &'a str)>;
type ResultTable<'a> = Vec<(i64, &'a str, &'a str)>;
fn hash_join<'a>(s: LeftTable<'a>, r: RightTable<'a>) -> ResultTable<'a> {
let mut hash_map: HashMap<&str, Vec<&str>> = HashMap::new();
//hash phase
for (name, nemesis) in r {
if hash_map.contains_key(name) {
hash_map.get_mut(name).unwrap().push(nemesis);
} else
|
}
let mut result = vec![];
//join phase
for (age, name) in s {
if let Some(v) = hash_map.get(name) {
for nemesis in v {
result.push((age, name, *nemesis));
}
}
}
return result;
}
#[cfg(not(test))]
pub fn main() {
let table1 = vec![(27, "Jonah"),
(18, "Alan"),
(28, "Glory"),
(18, "Popeye"),
(28, "Alan")];
let table2 = vec![("Jonah", "Whales"),
("Jonah", "Spiders"),
("Alan", "Ghosts"),
("Alan", "Zombies"),
("Glory", "Buffy")];
let result = hash_join(table1, table2);
for (age, name, nemesis) in result {
println!("{}, {}, {}", age, name, nemesis);
}
}
#[test]
pub fn test() {
let t1 = vec![(0, "hello"),
(1, "world")];
let t2 = vec![("hello", "rust"),
("hello", "cargo")];
let r = hash_join(t1, t2);
assert!(r == vec![(0, "hello", "rust"), (0, "hello", "cargo")]);
}
|
{
hash_map.insert(name, vec![nemesis]);
}
|
conditional_block
|
config.rs
|
use alloc::boxed::Box;
use fs::{KScheme, Resource, SliceResource, SliceMutResource};
use network::common::{DNS_ADDR, IP_ADDR, IP_ROUTER_ADDR, IP_SUBNET, MAC_ADDR};
use system::error::{Error, ENOENT, Result};
use system::syscall::{MODE_DIR, MODE_FILE};
/// Network configuration scheme
pub struct
|
;
impl KScheme for NetConfigScheme {
fn scheme(&self) -> &str {
"netcfg"
}
fn open(&mut self, url: &str, _: usize) -> Result<Box<Resource>> {
match url.splitn(2, ":").nth(1).unwrap_or("") {
"dns" => Ok(Box::new(SliceMutResource::new("netcfg:dns", unsafe { &mut DNS_ADDR.bytes }, MODE_FILE))),
"ip" => Ok(Box::new(SliceMutResource::new("netcfg:ip", unsafe { &mut IP_ADDR.bytes }, MODE_FILE))),
"ip_router" => Ok(Box::new(SliceMutResource::new("netcfg:ip_router", unsafe { &mut IP_ROUTER_ADDR.bytes }, MODE_FILE))),
"ip_subnet" => Ok(Box::new(SliceMutResource::new("netcfg:ip_subnet", unsafe { &mut IP_SUBNET.bytes }, MODE_FILE))),
"mac" => Ok(Box::new(SliceMutResource::new("netcfg:mac", unsafe { &mut MAC_ADDR.bytes }, MODE_FILE))),
"" => Ok(Box::new(SliceResource::new("netcfg:", b"dns\nip\nmac", MODE_DIR))),
_ => Err(Error::new(ENOENT))
}
}
}
|
NetConfigScheme
|
identifier_name
|
config.rs
|
use alloc::boxed::Box;
use fs::{KScheme, Resource, SliceResource, SliceMutResource};
use network::common::{DNS_ADDR, IP_ADDR, IP_ROUTER_ADDR, IP_SUBNET, MAC_ADDR};
use system::error::{Error, ENOENT, Result};
use system::syscall::{MODE_DIR, MODE_FILE};
/// Network configuration scheme
|
"netcfg"
}
fn open(&mut self, url: &str, _: usize) -> Result<Box<Resource>> {
match url.splitn(2, ":").nth(1).unwrap_or("") {
"dns" => Ok(Box::new(SliceMutResource::new("netcfg:dns", unsafe { &mut DNS_ADDR.bytes }, MODE_FILE))),
"ip" => Ok(Box::new(SliceMutResource::new("netcfg:ip", unsafe { &mut IP_ADDR.bytes }, MODE_FILE))),
"ip_router" => Ok(Box::new(SliceMutResource::new("netcfg:ip_router", unsafe { &mut IP_ROUTER_ADDR.bytes }, MODE_FILE))),
"ip_subnet" => Ok(Box::new(SliceMutResource::new("netcfg:ip_subnet", unsafe { &mut IP_SUBNET.bytes }, MODE_FILE))),
"mac" => Ok(Box::new(SliceMutResource::new("netcfg:mac", unsafe { &mut MAC_ADDR.bytes }, MODE_FILE))),
"" => Ok(Box::new(SliceResource::new("netcfg:", b"dns\nip\nmac", MODE_DIR))),
_ => Err(Error::new(ENOENT))
}
}
}
|
pub struct NetConfigScheme;
impl KScheme for NetConfigScheme {
fn scheme(&self) -> &str {
|
random_line_split
|
config.rs
|
use alloc::boxed::Box;
use fs::{KScheme, Resource, SliceResource, SliceMutResource};
use network::common::{DNS_ADDR, IP_ADDR, IP_ROUTER_ADDR, IP_SUBNET, MAC_ADDR};
use system::error::{Error, ENOENT, Result};
use system::syscall::{MODE_DIR, MODE_FILE};
/// Network configuration scheme
pub struct NetConfigScheme;
impl KScheme for NetConfigScheme {
fn scheme(&self) -> &str
|
fn open(&mut self, url: &str, _: usize) -> Result<Box<Resource>> {
match url.splitn(2, ":").nth(1).unwrap_or("") {
"dns" => Ok(Box::new(SliceMutResource::new("netcfg:dns", unsafe { &mut DNS_ADDR.bytes }, MODE_FILE))),
"ip" => Ok(Box::new(SliceMutResource::new("netcfg:ip", unsafe { &mut IP_ADDR.bytes }, MODE_FILE))),
"ip_router" => Ok(Box::new(SliceMutResource::new("netcfg:ip_router", unsafe { &mut IP_ROUTER_ADDR.bytes }, MODE_FILE))),
"ip_subnet" => Ok(Box::new(SliceMutResource::new("netcfg:ip_subnet", unsafe { &mut IP_SUBNET.bytes }, MODE_FILE))),
"mac" => Ok(Box::new(SliceMutResource::new("netcfg:mac", unsafe { &mut MAC_ADDR.bytes }, MODE_FILE))),
"" => Ok(Box::new(SliceResource::new("netcfg:", b"dns\nip\nmac", MODE_DIR))),
_ => Err(Error::new(ENOENT))
}
}
}
|
{
"netcfg"
}
|
identifier_body
|
vga.rs
|
//! VGA helpers
use core::fmt::Write;
use log::*;
use spin::Mutex;
const ROWS: usize = 25;
const COLS: usize = 80;
pub struct VgaWriter {
vmem: *mut u8,
offset: usize,
}
impl VgaWriter {
pub unsafe fn new(vmem: *mut u8) -> VgaWriter {
let mut vga_writer = VgaWriter { vmem, offset: 0 };
vga_writer.clear();
vga_writer
}
pub fn clear(&mut self) {
for i in 0..ROWS {
self.clear_line(i);
}
self.offset = 0;
}
fn clear_line(&mut self, line: usize) {
assert!(line < ROWS);
for i in 0..COLS {
unsafe {
*self.vmem.offset(2 * (i + line * COLS) as isize) = 0;
}
}
}
fn scroll(&mut self, lines: usize) {
if lines == 0 {
return;
}
let lines = core::cmp::min(lines, ROWS);
if lines == ROWS {
self.clear();
return;
}
unsafe {
core::ptr::copy(
self.vmem.offset((lines * COLS * 2) as isize),
self.vmem,
(ROWS - lines) * COLS * 2,
);
}
for i in (ROWS - lines)..ROWS {
self.clear_line(i);
}
self.offset = self.offset.saturating_sub(lines * COLS);
}
}
unsafe impl Send for VgaWriter {}
impl Write for VgaWriter {
fn write_str(&mut self, s: &str) -> core::fmt::Result {
for c in s.chars() {
if self.offset >= ROWS * COLS {
self.scroll(1);
assert!(self.offset < ROWS * COLS);
}
if c == '\n'
|
let b = if c.is_ascii() { c as u8 } else { '?' as u8 };
unsafe {
*self.vmem.offset(2 * self.offset as isize) = b;
}
self.offset += 1;
}
Ok(())
}
}
pub struct VgaLog {
writer: Mutex<VgaWriter>,
}
impl VgaLog {
pub fn new(writer: VgaWriter) -> VgaLog {
VgaLog {
writer: Mutex::new(writer),
}
}
}
impl Log for VgaLog {
fn enabled(&self, _: &Metadata) -> bool {
true
}
fn log(&self, record: &Record) {
let mut writer = self.writer.lock();
let _ = writeln!(
&mut writer,
"[{}] {}: {}",
level_as_string(record.level()),
record.target(),
record.args()
);
}
fn flush(&self) {
// No-op since we write directly to screen.
}
}
fn level_as_string(level: Level) -> &'static str {
use Level::*;
match level {
Error => "ERROR",
Warn => " WARN",
Info => " INFO",
Debug => "DEBUG",
Trace => "TRACE",
}
}
|
{
self.offset = ((self.offset + COLS) / COLS) * COLS;
continue;
}
|
conditional_block
|
vga.rs
|
//! VGA helpers
use core::fmt::Write;
use log::*;
use spin::Mutex;
const ROWS: usize = 25;
const COLS: usize = 80;
pub struct VgaWriter {
vmem: *mut u8,
offset: usize,
}
impl VgaWriter {
pub unsafe fn new(vmem: *mut u8) -> VgaWriter {
let mut vga_writer = VgaWriter { vmem, offset: 0 };
vga_writer.clear();
vga_writer
}
pub fn clear(&mut self) {
for i in 0..ROWS {
self.clear_line(i);
}
self.offset = 0;
}
fn clear_line(&mut self, line: usize) {
assert!(line < ROWS);
for i in 0..COLS {
unsafe {
*self.vmem.offset(2 * (i + line * COLS) as isize) = 0;
}
}
}
fn scroll(&mut self, lines: usize)
|
self.clear_line(i);
}
self.offset = self.offset.saturating_sub(lines * COLS);
}
}
unsafe impl Send for VgaWriter {}
impl Write for VgaWriter {
fn write_str(&mut self, s: &str) -> core::fmt::Result {
for c in s.chars() {
if self.offset >= ROWS * COLS {
self.scroll(1);
assert!(self.offset < ROWS * COLS);
}
if c == '\n' {
self.offset = ((self.offset + COLS) / COLS) * COLS;
continue;
}
let b = if c.is_ascii() { c as u8 } else { '?' as u8 };
unsafe {
*self.vmem.offset(2 * self.offset as isize) = b;
}
self.offset += 1;
}
Ok(())
}
}
pub struct VgaLog {
writer: Mutex<VgaWriter>,
}
impl VgaLog {
pub fn new(writer: VgaWriter) -> VgaLog {
VgaLog {
writer: Mutex::new(writer),
}
}
}
impl Log for VgaLog {
fn enabled(&self, _: &Metadata) -> bool {
true
}
fn log(&self, record: &Record) {
let mut writer = self.writer.lock();
let _ = writeln!(
&mut writer,
"[{}] {}: {}",
level_as_string(record.level()),
record.target(),
record.args()
);
}
fn flush(&self) {
// No-op since we write directly to screen.
}
}
fn level_as_string(level: Level) -> &'static str {
use Level::*;
match level {
Error => "ERROR",
Warn => " WARN",
Info => " INFO",
Debug => "DEBUG",
Trace => "TRACE",
}
}
|
{
if lines == 0 {
return;
}
let lines = core::cmp::min(lines, ROWS);
if lines == ROWS {
self.clear();
return;
}
unsafe {
core::ptr::copy(
self.vmem.offset((lines * COLS * 2) as isize),
self.vmem,
(ROWS - lines) * COLS * 2,
);
}
for i in (ROWS - lines)..ROWS {
|
identifier_body
|
vga.rs
|
//! VGA helpers
use core::fmt::Write;
use log::*;
|
const ROWS: usize = 25;
const COLS: usize = 80;
pub struct VgaWriter {
vmem: *mut u8,
offset: usize,
}
impl VgaWriter {
pub unsafe fn new(vmem: *mut u8) -> VgaWriter {
let mut vga_writer = VgaWriter { vmem, offset: 0 };
vga_writer.clear();
vga_writer
}
pub fn clear(&mut self) {
for i in 0..ROWS {
self.clear_line(i);
}
self.offset = 0;
}
fn clear_line(&mut self, line: usize) {
assert!(line < ROWS);
for i in 0..COLS {
unsafe {
*self.vmem.offset(2 * (i + line * COLS) as isize) = 0;
}
}
}
fn scroll(&mut self, lines: usize) {
if lines == 0 {
return;
}
let lines = core::cmp::min(lines, ROWS);
if lines == ROWS {
self.clear();
return;
}
unsafe {
core::ptr::copy(
self.vmem.offset((lines * COLS * 2) as isize),
self.vmem,
(ROWS - lines) * COLS * 2,
);
}
for i in (ROWS - lines)..ROWS {
self.clear_line(i);
}
self.offset = self.offset.saturating_sub(lines * COLS);
}
}
unsafe impl Send for VgaWriter {}
impl Write for VgaWriter {
fn write_str(&mut self, s: &str) -> core::fmt::Result {
for c in s.chars() {
if self.offset >= ROWS * COLS {
self.scroll(1);
assert!(self.offset < ROWS * COLS);
}
if c == '\n' {
self.offset = ((self.offset + COLS) / COLS) * COLS;
continue;
}
let b = if c.is_ascii() { c as u8 } else { '?' as u8 };
unsafe {
*self.vmem.offset(2 * self.offset as isize) = b;
}
self.offset += 1;
}
Ok(())
}
}
pub struct VgaLog {
writer: Mutex<VgaWriter>,
}
impl VgaLog {
pub fn new(writer: VgaWriter) -> VgaLog {
VgaLog {
writer: Mutex::new(writer),
}
}
}
impl Log for VgaLog {
fn enabled(&self, _: &Metadata) -> bool {
true
}
fn log(&self, record: &Record) {
let mut writer = self.writer.lock();
let _ = writeln!(
&mut writer,
"[{}] {}: {}",
level_as_string(record.level()),
record.target(),
record.args()
);
}
fn flush(&self) {
// No-op since we write directly to screen.
}
}
fn level_as_string(level: Level) -> &'static str {
use Level::*;
match level {
Error => "ERROR",
Warn => " WARN",
Info => " INFO",
Debug => "DEBUG",
Trace => "TRACE",
}
}
|
use spin::Mutex;
|
random_line_split
|
vga.rs
|
//! VGA helpers
use core::fmt::Write;
use log::*;
use spin::Mutex;
const ROWS: usize = 25;
const COLS: usize = 80;
pub struct VgaWriter {
vmem: *mut u8,
offset: usize,
}
impl VgaWriter {
pub unsafe fn new(vmem: *mut u8) -> VgaWriter {
let mut vga_writer = VgaWriter { vmem, offset: 0 };
vga_writer.clear();
vga_writer
}
pub fn clear(&mut self) {
for i in 0..ROWS {
self.clear_line(i);
}
self.offset = 0;
}
fn clear_line(&mut self, line: usize) {
assert!(line < ROWS);
for i in 0..COLS {
unsafe {
*self.vmem.offset(2 * (i + line * COLS) as isize) = 0;
}
}
}
fn scroll(&mut self, lines: usize) {
if lines == 0 {
return;
}
let lines = core::cmp::min(lines, ROWS);
if lines == ROWS {
self.clear();
return;
}
unsafe {
core::ptr::copy(
self.vmem.offset((lines * COLS * 2) as isize),
self.vmem,
(ROWS - lines) * COLS * 2,
);
}
for i in (ROWS - lines)..ROWS {
self.clear_line(i);
}
self.offset = self.offset.saturating_sub(lines * COLS);
}
}
unsafe impl Send for VgaWriter {}
impl Write for VgaWriter {
fn write_str(&mut self, s: &str) -> core::fmt::Result {
for c in s.chars() {
if self.offset >= ROWS * COLS {
self.scroll(1);
assert!(self.offset < ROWS * COLS);
}
if c == '\n' {
self.offset = ((self.offset + COLS) / COLS) * COLS;
continue;
}
let b = if c.is_ascii() { c as u8 } else { '?' as u8 };
unsafe {
*self.vmem.offset(2 * self.offset as isize) = b;
}
self.offset += 1;
}
Ok(())
}
}
pub struct VgaLog {
writer: Mutex<VgaWriter>,
}
impl VgaLog {
pub fn new(writer: VgaWriter) -> VgaLog {
VgaLog {
writer: Mutex::new(writer),
}
}
}
impl Log for VgaLog {
fn
|
(&self, _: &Metadata) -> bool {
true
}
fn log(&self, record: &Record) {
let mut writer = self.writer.lock();
let _ = writeln!(
&mut writer,
"[{}] {}: {}",
level_as_string(record.level()),
record.target(),
record.args()
);
}
fn flush(&self) {
// No-op since we write directly to screen.
}
}
fn level_as_string(level: Level) -> &'static str {
use Level::*;
match level {
Error => "ERROR",
Warn => " WARN",
Info => " INFO",
Debug => "DEBUG",
Trace => "TRACE",
}
}
|
enabled
|
identifier_name
|
move-in-guard-1.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(box_syntax)]
pub fn main() {
let x: Box<_> = box 1;
let v = (1, 2);
match v {
(1, _) if take(x) => (),
(_, 2) if take(x) => (), //~ ERROR use of moved value: `x`
_ => (),
}
}
fn take<T>(_: T) -> bool { false }
|
// http://rust-lang.org/COPYRIGHT.
//
|
random_line_split
|
move-in-guard-1.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(box_syntax)]
pub fn main() {
let x: Box<_> = box 1;
let v = (1, 2);
match v {
(1, _) if take(x) => (),
(_, 2) if take(x) => (), //~ ERROR use of moved value: `x`
_ => (),
}
}
fn take<T>(_: T) -> bool
|
{ false }
|
identifier_body
|
|
move-in-guard-1.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(box_syntax)]
pub fn
|
() {
let x: Box<_> = box 1;
let v = (1, 2);
match v {
(1, _) if take(x) => (),
(_, 2) if take(x) => (), //~ ERROR use of moved value: `x`
_ => (),
}
}
fn take<T>(_: T) -> bool { false }
|
main
|
identifier_name
|
builtin-superkinds-self-type.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests (negatively) the ability for the Self type in default methods
// to use capabilities granted by builtin kinds as supertraits.
use std::sync::mpsc::{channel, Sender};
trait Foo : Sync+'static {
fn foo(self, mut chan: Sender<Self>)
|
}
impl <T: Sync> Foo for T { }
//~^ ERROR the parameter type `T` may not live long enough
fn main() {
let (tx, rx) = channel();
1193182.foo(tx);
assert!(rx.recv() == 1193182);
}
|
{ }
|
identifier_body
|
builtin-superkinds-self-type.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
|
use std::sync::mpsc::{channel, Sender};
trait Foo : Sync+'static {
fn foo(self, mut chan: Sender<Self>) { }
}
impl <T: Sync> Foo for T { }
//~^ ERROR the parameter type `T` may not live long enough
fn main() {
let (tx, rx) = channel();
1193182.foo(tx);
assert!(rx.recv() == 1193182);
}
|
// Tests (negatively) the ability for the Self type in default methods
// to use capabilities granted by builtin kinds as supertraits.
|
random_line_split
|
builtin-superkinds-self-type.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests (negatively) the ability for the Self type in default methods
// to use capabilities granted by builtin kinds as supertraits.
use std::sync::mpsc::{channel, Sender};
trait Foo : Sync+'static {
fn foo(self, mut chan: Sender<Self>) { }
}
impl <T: Sync> Foo for T { }
//~^ ERROR the parameter type `T` may not live long enough
fn
|
() {
let (tx, rx) = channel();
1193182.foo(tx);
assert!(rx.recv() == 1193182);
}
|
main
|
identifier_name
|
lib.rs
|
This module re-exports everything and includes free functions for all traits methods doing
out-of-place modifications.
* You can import the whole prelude using:
```.ignore
use nalgebra::*;
```
The preferred way to use **nalgebra** is to import types and traits explicitly, and call
free-functions using the `na::` prefix:
```.rust
extern crate nalgebra as na;
use na::{Vec3, Rot3, Rotation};
fn main() {
let a = Vec3::new(1.0f64, 1.0, 1.0);
let mut b = Rot3::new(na::zero());
b.append_rotation_mut(&a);
assert!(na::approx_eq(&na::rotation(&b), &a));
}
```
## Features
**nalgebra** is meant to be a general-purpose, low-dimensional, linear algebra library, with
an optimized set of tools for computer graphics and physics. Those features include:
* Vectors with static sizes: `Vec0`, `Vec1`, `Vec2`, `Vec3`, `Vec4`, `Vec5`, `Vec6`.
* Points with static sizes: `Pnt0`, `Pnt1`, `Pnt2`, `Pnt3`, `Pnt4`, `Pnt5`, `Pnt6`.
* Square matrices with static sizes: `Mat1`, `Mat2`, `Mat3`, `Mat4`, `Mat5`, `Mat6 `.
* Rotation matrices: `Rot2`, `Rot3`, `Rot4`.
* Quaternions: `Quat`, `UnitQuat`.
* Isometries: `Iso2`, `Iso3`, `Iso4`.
* 3D projections for computer graphics: `Persp3`, `PerspMat3`, `Ortho3`, `OrthoMat3`.
* Dynamically sized vector: `DVec`.
* Dynamically sized (square or rectangular) matrix: `DMat`.
* A few methods for data analysis: `Cov`, `Mean`.
* Almost one trait per functionality: useful for generic programming.
* Operator overloading using multidispatch.
## **nalgebra** in use
Here are some projects using **nalgebra**.
Feel free to add your project to this list if you happen to use **nalgebra**!
* [nphysics](https://github.com/sebcrozet/nphysics): a real-time physics engine.
* [ncollide](https://github.com/sebcrozet/ncollide): a collision detection library.
* [kiss3d](https://github.com/sebcrozet/kiss3d): a minimalistic graphics engine.
* [nrays](https://github.com/sebcrozet/nrays): a ray tracer.
*/
#![deny(non_camel_case_types)]
#![deny(unused_parens)]
#![deny(non_upper_case_globals)]
#![deny(unused_qualifications)]
#![deny(unused_results)]
#![warn(missing_docs)]
#![doc(html_root_url = "http://nalgebra.org/doc")]
extern crate rustc_serialize;
extern crate rand;
extern crate num;
#[cfg(feature="arbitrary")]
extern crate quickcheck;
use std::cmp;
use std::ops::{Neg, Mul};
use num::{Zero, One};
pub use traits::{
Absolute,
AbsoluteRotate,
ApproxEq,
Axpy,
Basis,
BaseFloat,
BaseNum,
Bounded,
Cast,
Col,
ColSlice, RowSlice,
Cov,
Cross,
CrossMatrix,
Det,
Diag,
Dim,
Dot,
EigenQR,
Eye,
FloatPnt,
FloatVec,
FromHomogeneous,
Indexable,
Inv,
Iterable,
IterableMut,
Mat,
Mean,
Norm,
NumPnt,
NumVec,
Orig,
Outer,
POrd,
POrdering,
PntAsVec,
Repeat,
Rotate, Rotation, RotationMatrix, RotationWithTranslation, RotationTo,
Row,
Shape,
SquareMat,
ToHomogeneous,
Transform, Transformation,
Translate, Translation,
Transpose,
UniformSphereSample
};
pub use structs::{
Identity,
DMat,
DVec, DVec1, DVec2, DVec3, DVec4, DVec5, DVec6,
Iso2, Iso3, Iso4,
Mat1, Mat2, Mat3, Mat4,
Mat5, Mat6,
Rot2, Rot3, Rot4,
Vec0, Vec1, Vec2, Vec3, Vec4, Vec5, Vec6,
Pnt0, Pnt1, Pnt2, Pnt3, Pnt4, Pnt5, Pnt6,
Persp3, PerspMat3,
Ortho3, OrthoMat3,
Quat, UnitQuat
};
pub use linalg::{
qr,
householder_matrix,
cholesky
};
mod structs;
mod traits;
mod linalg;
mod macros;
// mod lower_triangular;
// mod chol;
/// Change the input value to ensure it is on the range `[min, max]`.
#[inline(always)]
pub fn clamp<T: PartialOrd>(val: T, min: T, max: T) -> T {
if val > min {
if val < max {
val
}
else {
max
}
}
else {
min
}
}
/// Same as `cmp::max`.
#[inline(always)]
pub fn max<T: Ord>(a: T, b: T) -> T {
cmp::max(a, b)
}
/// Same as `cmp::min`.
#[inline(always)]
pub fn min<T: Ord>(a: T, b: T) -> T {
cmp::min(a, b)
}
/// Returns the infimum of `a` and `b`.
#[inline(always)]
pub fn inf<T: POrd>(a: &T, b: &T) -> T {
POrd::inf(a, b)
}
/// Returns the supremum of `a` and `b`.
#[inline(always)]
pub fn sup<T: POrd>(a: &T, b: &T) -> T {
POrd::sup(a, b)
}
/// Compare `a` and `b` using a partial ordering relation.
#[inline(always)]
pub fn partial_cmp<T: POrd>(a: &T, b: &T) -> POrdering {
POrd::partial_cmp(a, b)
}
/// Returns `true` iff `a` and `b` are comparable and `a < b`.
#[inline(always)]
pub fn partial_lt<T: POrd>(a: &T, b: &T) -> bool {
POrd::partial_lt(a, b)
}
/// Returns `true` iff `a` and `b` are comparable and `a <= b`.
#[inline(always)]
pub fn partial_le<T: POrd>(a: &T, b: &T) -> bool {
POrd::partial_le(a, b)
}
/// Returns `true` iff `a` and `b` are comparable and `a > b`.
#[inline(always)]
pub fn partial_gt<T: POrd>(a: &T, b: &T) -> bool {
POrd::partial_gt(a, b)
}
/// Returns `true` iff `a` and `b` are comparable and `a >= b`.
#[inline(always)]
pub fn partial_ge<T: POrd>(a: &T, b: &T) -> bool {
POrd::partial_ge(a, b)
}
/// Return the minimum of `a` and `b` if they are comparable.
#[inline(always)]
pub fn partial_min<'a, T: POrd>(a: &'a T, b: &'a T) -> Option<&'a T> {
POrd::partial_min(a, b)
}
/// Return the maximum of `a` and `b` if they are comparable.
#[inline(always)]
pub fn partial_max<'a, T: POrd>(a: &'a T, b: &'a T) -> Option<&'a T> {
POrd::partial_max(a, b)
}
/// Clamp `value` between `min` and `max`. Returns `None` if `value` is not comparable to
/// `min` or `max`.
#[inline(always)]
pub fn partial_clamp<'a, T: POrd>(value: &'a T, min: &'a T, max: &'a T) -> Option<&'a T> {
POrd::partial_clamp(value, min, max)
}
//
//
// Constructors
//
//
/// Create a special identity object.
///
/// Same as `Identity::new()`.
#[inline(always)]
pub fn identity() -> Identity {
Identity::new()
}
/// Create a zero-valued value.
///
/// This is the same as `std::num::zero()`.
#[inline(always)]
pub fn zero<T: Zero>() -> T {
Zero::zero()
}
/// Tests is a value is iqual to zero.
#[inline(always)]
pub fn is_zero<T: Zero>(val: &T) -> bool {
val.is_zero()
}
/// Create a one-valued value.
///
/// This is the same as `std::num::one()`.
#[inline(always)]
pub fn one<T: One>() -> T {
One::one()
}
//
//
// Geometry
//
//
/// Returns the trivial origin of an affine space.
#[inline(always)]
pub fn orig<P: Orig>() -> P {
Orig::orig()
}
/// Returns the center of two points.
#[inline]
pub fn center<N: BaseFloat, P: FloatPnt<N, V>, V: Copy + Norm<N>>(a: &P, b: &P) -> P {
let _2 = one::<N>() + one();
(*a + *b.as_vec()) / _2
}
/*
* FloatPnt
*/
/// Returns the distance between two points.
#[inline(always)]
pub fn dist<N: BaseFloat, P: FloatPnt<N, V>, V: Norm<N>>(a: &P, b: &P) -> N {
a.dist(b)
}
/// Returns the squared distance between two points.
#[inline(always)]
pub fn sqdist<N: BaseFloat, P: FloatPnt<N, V>, V: Norm<N>>(a: &P, b: &P) -> N {
a.sqdist(b)
}
/*
* Translation<V>
*/
/// Gets the translation applicable by `m`.
///
/// ```rust
/// extern crate nalgebra as na;
/// use na::{Vec3, Iso3};
///
/// fn main() {
/// let t = Iso3::new(Vec3::new(1.0f64, 1.0, 1.0), na::zero());
/// let trans = na::translation(&t);
///
/// assert!(trans == Vec3::new(1.0, 1.0, 1.0));
/// }
/// ```
#[inline(always)]
pub fn translation<V, M: Translation<V>>(m: &M) -> V {
m.translation()
}
/// Gets the inverse translation applicable by `m`.
///
/// ```rust
/// extern crate nalgebra as na;
/// use na::{Vec3, Iso3};
///
/// fn main() {
/// let t = Iso3::new(Vec3::new(1.0f64, 1.0, 1.0), na::zero());
/// let itrans = na::inv_translation(&t);
///
/// assert!(itrans == Vec3::new(-1.0, -1.0, -1.0));
/// }
/// ```
#[inline(always)]
pub fn inv_translation<V, M: Translation<V>>(m: &M) -> V {
m.inv_translation()
}
/// Applies the translation `v` to a copy of `m`.
#[inline(always)]
pub fn append_translation<V, M: Translation<V>>(m: &M, v: &V) -> M {
Translation::append_translation(m, v)
}
/*
* Translate<P>
*/
/// Applies a translation to a point.
///
/// ```rust
/// extern crate nalgebra as na;
/// use na::{Pnt3, Vec3, Iso3};
///
/// fn main() {
/// let t = Iso3::new(Vec3::new(1.0f64, 1.0, 1.0), na::zero());
/// let p = Pnt3::new(2.0, 2.0, 2.0);
///
/// let tp = na::translate(&t, &p);
///
/// assert!(tp == Pnt3::new(3.0, 3.0, 3.0))
/// }
/// ```
#[inline(always)]
pub fn translate<P, M: Translate<P>>(m: &M, p: &P) -> P {
m.translate(p)
}
/// Applies an inverse translation to a point.
///
/// ```rust
/// extern crate nalgebra as na;
/// use na::{Pnt3, Vec3, Iso3};
///
/// fn main() {
/// let t = Iso3::new(Vec3::new(1.0f64, 1.0, 1.0), na::zero());
/// let p = Pnt3::new(2.0, 2.0, 2.0);
///
/// let tp = na::inv_translate(&t, &p);
///
/// assert!(na::approx_eq(&tp, &Pnt3::new(1.0, 1.0, 1.0)))
/// }
#[inline(always)]
pub fn in
|
, M: Translate<P>>(m: &M, p: &P) -> P {
m.inv_translate(p)
}
/*
* Rotation<V>
*/
/// Gets the rotation applicable by `m`.
///
/// ```rust
/// extern crate nalgebra as na;
/// use na::{Vec3, Rot3};
///
/// fn main() {
/// let t = Rot3::new(Vec3::new(1.0f64, 1.0, 1.0));
///
/// assert!(na::approx_eq(&na::rotation(&t), &Vec3::new(1.0, 1.0, 1.0)));
/// }
/// ```
#[inline(always)]
pub fn rotation<V, M: Rotation<V>>(m: &M) -> V {
m.rotation()
}
/// Gets the inverse rotation applicable by `m`.
///
/// ```rust
/// extern crate nalgebra as na;
/// use na::{Vec3, Rot3};
///
/// fn main() {
/// let t = Rot3::new(Vec3::new(1.0f64, 1.0, 1.0));
///
/// assert!(na::approx_eq(&na::inv_rotation(&t), &Vec3::new(-1.0, -1.0, -1.0)));
/// }
/// ```
#[inline(always)]
pub fn inv_rotation<V, M: Rotation<V>>(m: &M) -> V {
m.inv_rotation()
}
// FIXME: this example is a bit shity
/// Applies the rotation `v` to a copy of `m`.
///
/// ```rust
/// extern crate nalgebra as na;
/// use na::{Vec3, Rot3};
///
/// fn main() {
/// let t = Rot3::new(Vec3::new(0.0f64, 0.0, 0.0));
/// let v = Vec3::new(1.0, 1.0, 1.0);
/// let rt = na::append_rotation(&t, &v);
///
/// assert!(na::approx_eq(&na::rotation(&rt), &Vec3::new(1.0, 1.0, 1.0)))
/// }
/// ```
#[inline(always)]
pub fn append_rotation<V, M: Rotation<V>>(m: &M, v: &V) -> M {
Rotation::append_rotation(m, v)
}
// FIXME: this example is a bit shity
/// Pre-applies the rotation `v` to a copy of `m`.
///
/// ```rust
/// extern crate nalgebra as na;
/// use na::{Vec3, Rot3};
///
/// fn main() {
/// let t = Rot3::new(Vec3::new(0.0f64, 0.0, 0.0));
/// let v = Vec3::new(1.0, 1.0, 1.0);
/// let rt = na::prepend_rotation(&t, &v);
///
/// assert!(na::approx_eq(&na::rotation(&rt), &Vec3::new(1.0, 1.0, 1.0)))
/// }
/// ```
#[inline(always)]
pub fn prepend_rotation<V, M: Rotation<V>>(m: &M, v: &V) -> M {
Rotation::prepend_rotation(m, v)
}
/*
* Rotate<V>
*/
/// Applies a rotation to a vector.
///
/// ```rust
/// extern crate nalgebra as na;
/// use na::{BaseFloat, Rot3, Vec3};
///
/// fn main() {
/// let t = Rot3::new(Vec3::new(0.0f64, 0.0, 0.5 * <f64 as BaseFloat>::pi()));
/// let v = Vec3::new(1.0, 0.0, 0.0);
///
/// let tv = na::rotate(&t, &v);
///
/// assert!(na::approx_eq(&tv, &Vec3::new(0.0, 1.0, 0.0)))
/// }
/// ```
#[inline(always)]
pub fn rotate<V, M: Rotate<V>>(m: &M, v: &V) -> V {
m.rotate(v)
}
/// Applies an inverse rotation to a vector.
///
/// ```rust
/// extern crate nalgebra as na;
/// use na::{BaseFloat, Rot3, Vec3};
///
/// fn main() {
/// let t = Rot3::new(Vec3::new(0.0f64, 0.0, 0.5 * <f64 as BaseFloat>::pi()));
/// let v = Vec3::new(1.0, 0.0, 0.0);
///
/// let tv = na::inv_rotate(&t, &v);
///
/// assert!(na::approx_eq(&tv, &Vec3::new(0.0, -1.0, 0.0)))
/// }
/// ```
#[inline(always)]
pub fn inv_rotate<V, M: Rotate<V>>(m: &M, v: &V) -> V {
m.inv_rotate(v)
}
/*
* RotationWithTranslation<LV, AV>
*/
/// Rotates a copy of `m` by `amount` using `center` as the pivot point.
#[inline(always)]
pub fn append_rotation_wrt_point<LV: Neg<Output = LV> + Copy,
AV,
M: RotationWithTranslation<LV, AV>>(
m: &M,
amount: &AV,
center: &LV) -> M {
RotationWithTranslation::append_rotation_wrt_point(m, amount, center)
}
/// Rotates a copy of `m` by `amount` using `m.translation()` as the pivot point.
#[inline(always)]
pub fn append_rotation_wrt_center<LV: Neg<Output = LV> + Copy,
AV,
M: RotationWithTranslation<LV, AV>>(
m: &M,
amount: &AV) -> M {
RotationWithTranslation::append_rotation_wrt_center(m, amount)
}
/*
* RotationTo
*/
/// Computes the angle of the rotation needed to transfom `a` to `b`.
#[inline(always)]
pub fn angle_between<V: RotationTo>(a: &V, b: &V) -> V::AngleType {
a.angle_to(b)
}
/// Computes the rotation needed to transform `a` to `b`.
#[inline(always)]
pub fn rotation_between<V: RotationTo>(a: &V, b: &V) -> V::DeltaRotationType {
a.rotation_to(b)
}
/*
* RotationMatrix<LV, AV, R>
*/
/// Builds a rotation matrix from `r`.
#[inline(always)]
pub fn to_rot_mat<N, LV, AV, R, M>(r: &R) -> M
where R: RotationMatrix<N, LV, AV, Output = M>,
M: SquareMat<N, LV> + Rotation<AV> + Copy,
LV: Mul<M, Output = LV>
{
// FIXME: rust-lang/rust#20413
r.to_rot_mat()
}
/*
* AbsoluteRotate<V>
*/
/// Applies a rotation using the absolute values of its components.
#[inline(always)]
pub fn absolute_rotate<V, M: AbsoluteRotate<V>>(m: &M, v: &V) -> V {
m.absolute_rotate(v)
}
/*
* Transformation<T>
*/
/// Gets the transformation applicable by `m`.
#[inline(always)]
pub fn transformation<T, M: Transformation<T>>(m: &M) -> T {
m.transformation()
}
/// Gets the inverse transformation applicable by `m`.
#[inline(always)]
pub fn inv_transformation<T, M: Transformation<T>>(m: &M) -> T {
m.inv_transformation()
}
/// Gets a transformed copy of `m`.
#[inline(always)]
pub fn append_transformation<T, M: Transformation<T>>(m: &M, t: &T) -> M {
Transformation::append_transformation(m, t)
}
/*
* Transform<V>
*/
/// Applies a transformation to a vector.
#[inline(always)]
pub fn transform<V, M: Transform<V>>(m: &M, v: &V) -> V {
m.transform(v)
}
/// Applies an inverse transformation to a vector.
#[inline(always)]
pub fn inv_transform<V, M: Transform<V>>(m: &M, v: &V) -> V {
m.inv_transform(v)
}
/*
* Dot<N>
*/
/// Computes the dot product of two vectors.
#[inline(always)]
pub fn dot<V: Dot<N>, N>(a: &V, b: &V) -> N {
Dot::dot(a, b)
}
/*
* Norm<N>
*/
/// Computes the L2 norm of a vector.
#[inline(always)]
pub fn norm<V: Norm<N>, N: BaseFloat>(v: &V) -> N {
Norm::norm(v)
}
/// Computes the squared L2 norm of a vector.
#[inline(always)]
pub fn sqnorm<V: Norm<N>, N: BaseFloat>(v: &V) -> N {
Norm::sqnorm(v)
}
/// Gets the normalized version of a vector.
#[inline(always)]
pub fn normalize<V: Norm<N>, N: BaseFloat>(v: &V) -> V {
Norm::normalize(v)
}
/*
* Det<N>
*/
/// Computes the determinant of a square matrix.
#[inline(always)]
pub fn det<M: Det<N>, N>(m: &M) -> N {
Det::det(m)
}
/*
* Cross<V>
*/
/// Computes the cross product of two vectors.
#[inline(always)]
pub fn cross<LV: Cross>(a: &LV, b: &LV) -> LV::CrossProductType {
Cross::cross(a, b)
}
/*
* CrossMatrix<M>
*/
/// Given a vector, computes the matrix which, when multiplied by another vector, computes a cross
/// product.
#[inline(always)]
pub fn cross_matrix<V: CrossMatrix<M>, M>(v: &V) -> M {
CrossMatrix::cross_matrix(v)
}
/*
* ToHomogeneous<U>
*/
/// Converts a matrix or vector to homogeneous coordinates.
#[inline(always)]
pub fn to_homogeneous<M: ToHomogeneous<Res>, Res>(m: &M) -> Res {
ToHomogeneous::to_homogeneous(m)
}
/*
* FromHomogeneous<U>
*/
/// Converts a matrix or vector from homogeneous coordinates.
///
/// w-normalization is appied.
#[inline(always)]
pub fn from_homogeneous<M, Res: FromHomogeneous<M>>(m: &M) -> Res {
FromHomogeneous::from(m)
}
/*
* UniformSphereSample
*/
/// Samples the unit sphere living on the dimension as the samples types.
///
/// The number of sampling point is implementation-specific. It is always uniform.
#[inline(always)]
pub fn sample_sphere<V: UniformSphereSample, F: FnMut(V)>(f: F) {
UniformSphereSample::sample(f)
}
//
//
// Operations
//
//
/*
* AproxEq<N>
*/
/// Tests approximate equality.
#[inline(always)]
pub fn approx_eq<T: ApproxEq<N>, N>(a: &T, b: &T) -> bool {
ApproxEq::approx_eq(a, b)
}
/// Tests approximate equality using a custom epsilon.
#[inline(always)]
pub fn approx_eq_eps<T: ApproxEq<N>, N>(a: &T, b: &T, eps: &N) -> bool {
ApproxEq::approx_eq_eps(a, b, eps)
}
/*
* Absolute<A>
*/
/// Computes a component-wise absolute value.
#[inline(always)]
pub fn abs<M: Absolute<Res>, Res>(m: &M) -> Res {
Absolute::abs(m)
}
/*
* Inv
*/
/// Gets an inverted copy of a matrix.
#[inline(always)]
pub fn inv<M: Inv>(m: &M) -> Option<M> {
Inv::inv(m)
}
/*
* Transpose
*/
/// Gets a transposed copy of a matrix.
#[inline(always)]
pub fn transpose<M: Transpose>(m: &M) -> M {
Transpose::transpose(m)
}
/*
* Outer<M>
*/
/// Computes the outer product of two vectors.
#[inline(always)]
pub fn outer<V: Outer>(a: &V, b: &V) -> V::OuterProductType {
Outer::outer(a, b)
}
/*
* Cov<M>
*/
/// Computes the covariance of a set of observations.
#[inline(always)]
pub fn cov<M: Cov<Res>, Res>(observations: &M) -> Res {
Cov::cov(observations)
}
/*
* Mean<N>
*/
/// Computes the mean of a set of observations.
#[inline(always)]
pub fn mean<N, M: Mean<N>>(observations: &M) -> N {
Mean::mean(observations)
}
/*
* EigenQR<N, V>
*/
/// Computes the eigenvalues and eigenvectors of a square matrix usin the QR algorithm.
#[inline(always)]
pub fn eigen_qr<N, V, M>(m: &M, eps: &N, niter: usize) -> (M, V)
where V: Mul<M, Output = V>,
M: EigenQR<N, V> {
EigenQR::eigen_qr(m, eps, niter)
}
//
//
// Structure
//
//
/*
* Eye
*/
/// Construct the identity matrix for a given dimension
#[inline(always)]
pub fn new_identity<M: Eye>(dim: usize) -> M {
Eye::new_identity(dim)
}
/*
* Repeat
*/
/// Create an object by repeating a value.
///
/// Same as `Identity::new()`.
#[inline(always)]
pub fn repeat<N, T: Repeat<N>>(val: N) -> T {
Repeat::repeat(val)
}
/*
* Basis
*/
/// Computes the canonical basis for a given dimension.
#[inline(always)]
pub fn canonical_basis<V: Basis, F: FnMut(V) -> bool>(f: F) {
Basis::canonical_basis(f)
}
/// Computes the basis of the orthonormal subspace of a given vector.
#[inline(always)]
pub fn orthonormal_subspace_basis<V: Basis, F: FnMut(V) -> bool>(v: &V, f: F) {
Basis::orthonormal_subspace_basis(v, f)
}
/// Gets the (0-based) i-th element of the canonical basis of V.
#[inline]
pub fn canonical_basis_element<V: Basis>(i: usize) -> Option<V> {
Basis::canonical_basis_element(i)
}
/*
* Row<R>
*/
/*
* Col<C>
*/
/*
* Diag<V>
*/
/// Gets the diagonal of a square matrix.
#[inline(always)]
pub fn diag<M: Diag<V>, V>(m: &M) -> V {
m.diag()
}
/*
* Dim
*/
/// Gets the dimension an object lives in.
///
/// Same as `Dim::dim::(None::<V>)`.
#[inline(always)]
pub fn dim<V: Dim>() -> usize {
Dim::dim(None::<V>)
}
/// Gets the indexable range of an object.
#[inline(always)]
pub fn shape<V: Shape<I>, I>(v: &V) -> I {
v.shape()
}
/*
* Cast<T>
*/
/// Converts an object from one type to another.
///
/// For primitive types, this is the same as
|
v_translate<P
|
identifier_name
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.