file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
guard.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! Machinery to conditionally expose things.
use js::jsapi::JSContext;
use js::rust::HandleObject;
use servo_config::prefs;
/// A container with a condition.
pub struct Guard<T: Clone + Copy> {
condition: Condition,
value: T,
}
impl<T: Clone + Copy> Guard<T> {
/// Construct a new guarded value.
pub const fn new(condition: Condition, value: T) -> Self {
Guard {
condition: condition,
value: value,
}
}
/// Expose the value if the condition is satisfied.
///
/// The passed handle is the object on which the value may be exposed.
pub unsafe fn expose(&self, cx: *mut JSContext, obj: HandleObject) -> Option<T> {
if self.condition.is_satisfied(cx, obj) {
Some(self.value)
} else {
None
}
}
}
|
/// The condition is satisfied if the function returns true.
Func(unsafe fn(*mut JSContext, HandleObject) -> bool),
/// The condition is satisfied if the preference is set.
Pref(&'static str),
/// The condition is always satisfied.
Satisfied,
}
impl Condition {
unsafe fn is_satisfied(&self, cx: *mut JSContext, obj: HandleObject) -> bool {
match *self {
Condition::Pref(name) => prefs::pref_map().get(name).as_bool().unwrap_or(false),
Condition::Func(f) => f(cx, obj),
Condition::Satisfied => true,
}
}
}
|
/// A condition to expose things.
pub enum Condition {
|
random_line_split
|
htmltablesectionelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::HTMLTableSectionElementBinding::{
self, HTMLTableSectionElementMethods,
};
use crate::dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use crate::dom::bindings::error::{ErrorResult, Fallible};
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::root::{DomRoot, LayoutDom, RootedReference};
use crate::dom::bindings::str::DOMString;
use crate::dom::document::Document;
use crate::dom::element::{Element, RawLayoutElementHelpers};
use crate::dom::htmlcollection::{CollectionFilter, HTMLCollection};
use crate::dom::htmlelement::HTMLElement;
use crate::dom::htmltablerowelement::HTMLTableRowElement;
use crate::dom::node::{window_from_node, Node};
use crate::dom::virtualmethods::VirtualMethods;
use cssparser::RGBA;
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
use style::attr::AttrValue;
#[dom_struct]
pub struct
|
{
htmlelement: HTMLElement,
}
impl HTMLTableSectionElement {
fn new_inherited(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> HTMLTableSectionElement {
HTMLTableSectionElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
}
}
#[allow(unrooted_must_root)]
pub fn new(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> DomRoot<HTMLTableSectionElement> {
Node::reflect_node(
Box::new(HTMLTableSectionElement::new_inherited(
local_name, prefix, document,
)),
document,
HTMLTableSectionElementBinding::Wrap,
)
}
}
#[derive(JSTraceable)]
struct RowsFilter;
impl CollectionFilter for RowsFilter {
fn filter(&self, elem: &Element, root: &Node) -> bool {
elem.is::<HTMLTableRowElement>() && elem.upcast::<Node>().GetParentNode().r() == Some(root)
}
}
impl HTMLTableSectionElementMethods for HTMLTableSectionElement {
// https://html.spec.whatwg.org/multipage/#dom-tbody-rows
fn Rows(&self) -> DomRoot<HTMLCollection> {
HTMLCollection::create(&window_from_node(self), self.upcast(), Box::new(RowsFilter))
}
// https://html.spec.whatwg.org/multipage/#dom-tbody-insertrow
fn InsertRow(&self, index: i32) -> Fallible<DomRoot<HTMLElement>> {
let node = self.upcast::<Node>();
node.insert_cell_or_row(
index,
|| self.Rows(),
|| HTMLTableRowElement::new(local_name!("tr"), None, &node.owner_doc()),
)
}
// https://html.spec.whatwg.org/multipage/#dom-tbody-deleterow
fn DeleteRow(&self, index: i32) -> ErrorResult {
let node = self.upcast::<Node>();
node.delete_cell_or_row(index, || self.Rows(), |n| n.is::<HTMLTableRowElement>())
}
}
pub trait HTMLTableSectionElementLayoutHelpers {
fn get_background_color(&self) -> Option<RGBA>;
}
#[allow(unsafe_code)]
impl HTMLTableSectionElementLayoutHelpers for LayoutDom<HTMLTableSectionElement> {
fn get_background_color(&self) -> Option<RGBA> {
unsafe {
(&*self.upcast::<Element>().unsafe_get())
.get_attr_for_layout(&ns!(), &local_name!("bgcolor"))
.and_then(AttrValue::as_color)
.cloned()
}
}
}
impl VirtualMethods for HTMLTableSectionElement {
fn super_type(&self) -> Option<&dyn VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &dyn VirtualMethods)
}
fn parse_plain_attribute(&self, local_name: &LocalName, value: DOMString) -> AttrValue {
match *local_name {
local_name!("bgcolor") => AttrValue::from_legacy_color(value.into()),
_ => self
.super_type()
.unwrap()
.parse_plain_attribute(local_name, value),
}
}
}
|
HTMLTableSectionElement
|
identifier_name
|
htmltablesectionelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::HTMLTableSectionElementBinding::{
self, HTMLTableSectionElementMethods,
};
use crate::dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use crate::dom::bindings::error::{ErrorResult, Fallible};
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::root::{DomRoot, LayoutDom, RootedReference};
use crate::dom::bindings::str::DOMString;
use crate::dom::document::Document;
use crate::dom::element::{Element, RawLayoutElementHelpers};
use crate::dom::htmlcollection::{CollectionFilter, HTMLCollection};
use crate::dom::htmlelement::HTMLElement;
use crate::dom::htmltablerowelement::HTMLTableRowElement;
use crate::dom::node::{window_from_node, Node};
use crate::dom::virtualmethods::VirtualMethods;
use cssparser::RGBA;
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
use style::attr::AttrValue;
#[dom_struct]
pub struct HTMLTableSectionElement {
htmlelement: HTMLElement,
}
impl HTMLTableSectionElement {
fn new_inherited(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> HTMLTableSectionElement {
HTMLTableSectionElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
}
}
#[allow(unrooted_must_root)]
pub fn new(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> DomRoot<HTMLTableSectionElement> {
Node::reflect_node(
Box::new(HTMLTableSectionElement::new_inherited(
local_name, prefix, document,
)),
document,
HTMLTableSectionElementBinding::Wrap,
)
}
}
#[derive(JSTraceable)]
struct RowsFilter;
impl CollectionFilter for RowsFilter {
fn filter(&self, elem: &Element, root: &Node) -> bool {
elem.is::<HTMLTableRowElement>() && elem.upcast::<Node>().GetParentNode().r() == Some(root)
}
}
impl HTMLTableSectionElementMethods for HTMLTableSectionElement {
// https://html.spec.whatwg.org/multipage/#dom-tbody-rows
fn Rows(&self) -> DomRoot<HTMLCollection>
|
// https://html.spec.whatwg.org/multipage/#dom-tbody-insertrow
fn InsertRow(&self, index: i32) -> Fallible<DomRoot<HTMLElement>> {
let node = self.upcast::<Node>();
node.insert_cell_or_row(
index,
|| self.Rows(),
|| HTMLTableRowElement::new(local_name!("tr"), None, &node.owner_doc()),
)
}
// https://html.spec.whatwg.org/multipage/#dom-tbody-deleterow
fn DeleteRow(&self, index: i32) -> ErrorResult {
let node = self.upcast::<Node>();
node.delete_cell_or_row(index, || self.Rows(), |n| n.is::<HTMLTableRowElement>())
}
}
pub trait HTMLTableSectionElementLayoutHelpers {
fn get_background_color(&self) -> Option<RGBA>;
}
#[allow(unsafe_code)]
impl HTMLTableSectionElementLayoutHelpers for LayoutDom<HTMLTableSectionElement> {
fn get_background_color(&self) -> Option<RGBA> {
unsafe {
(&*self.upcast::<Element>().unsafe_get())
.get_attr_for_layout(&ns!(), &local_name!("bgcolor"))
.and_then(AttrValue::as_color)
.cloned()
}
}
}
impl VirtualMethods for HTMLTableSectionElement {
fn super_type(&self) -> Option<&dyn VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &dyn VirtualMethods)
}
fn parse_plain_attribute(&self, local_name: &LocalName, value: DOMString) -> AttrValue {
match *local_name {
local_name!("bgcolor") => AttrValue::from_legacy_color(value.into()),
_ => self
.super_type()
.unwrap()
.parse_plain_attribute(local_name, value),
}
}
}
|
{
HTMLCollection::create(&window_from_node(self), self.upcast(), Box::new(RowsFilter))
}
|
identifier_body
|
htmltablesectionelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::dom::bindings::codegen::Bindings::HTMLTableSectionElementBinding::{
self, HTMLTableSectionElementMethods,
};
use crate::dom::bindings::codegen::Bindings::NodeBinding::NodeMethods;
use crate::dom::bindings::error::{ErrorResult, Fallible};
use crate::dom::bindings::inheritance::Castable;
use crate::dom::bindings::root::{DomRoot, LayoutDom, RootedReference};
use crate::dom::bindings::str::DOMString;
use crate::dom::document::Document;
use crate::dom::element::{Element, RawLayoutElementHelpers};
use crate::dom::htmlcollection::{CollectionFilter, HTMLCollection};
use crate::dom::htmlelement::HTMLElement;
use crate::dom::htmltablerowelement::HTMLTableRowElement;
use crate::dom::node::{window_from_node, Node};
use crate::dom::virtualmethods::VirtualMethods;
use cssparser::RGBA;
use dom_struct::dom_struct;
use html5ever::{LocalName, Prefix};
use style::attr::AttrValue;
#[dom_struct]
pub struct HTMLTableSectionElement {
|
htmlelement: HTMLElement,
}
impl HTMLTableSectionElement {
fn new_inherited(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> HTMLTableSectionElement {
HTMLTableSectionElement {
htmlelement: HTMLElement::new_inherited(local_name, prefix, document),
}
}
#[allow(unrooted_must_root)]
pub fn new(
local_name: LocalName,
prefix: Option<Prefix>,
document: &Document,
) -> DomRoot<HTMLTableSectionElement> {
Node::reflect_node(
Box::new(HTMLTableSectionElement::new_inherited(
local_name, prefix, document,
)),
document,
HTMLTableSectionElementBinding::Wrap,
)
}
}
#[derive(JSTraceable)]
struct RowsFilter;
impl CollectionFilter for RowsFilter {
fn filter(&self, elem: &Element, root: &Node) -> bool {
elem.is::<HTMLTableRowElement>() && elem.upcast::<Node>().GetParentNode().r() == Some(root)
}
}
impl HTMLTableSectionElementMethods for HTMLTableSectionElement {
// https://html.spec.whatwg.org/multipage/#dom-tbody-rows
fn Rows(&self) -> DomRoot<HTMLCollection> {
HTMLCollection::create(&window_from_node(self), self.upcast(), Box::new(RowsFilter))
}
// https://html.spec.whatwg.org/multipage/#dom-tbody-insertrow
fn InsertRow(&self, index: i32) -> Fallible<DomRoot<HTMLElement>> {
let node = self.upcast::<Node>();
node.insert_cell_or_row(
index,
|| self.Rows(),
|| HTMLTableRowElement::new(local_name!("tr"), None, &node.owner_doc()),
)
}
// https://html.spec.whatwg.org/multipage/#dom-tbody-deleterow
fn DeleteRow(&self, index: i32) -> ErrorResult {
let node = self.upcast::<Node>();
node.delete_cell_or_row(index, || self.Rows(), |n| n.is::<HTMLTableRowElement>())
}
}
pub trait HTMLTableSectionElementLayoutHelpers {
fn get_background_color(&self) -> Option<RGBA>;
}
#[allow(unsafe_code)]
impl HTMLTableSectionElementLayoutHelpers for LayoutDom<HTMLTableSectionElement> {
fn get_background_color(&self) -> Option<RGBA> {
unsafe {
(&*self.upcast::<Element>().unsafe_get())
.get_attr_for_layout(&ns!(), &local_name!("bgcolor"))
.and_then(AttrValue::as_color)
.cloned()
}
}
}
impl VirtualMethods for HTMLTableSectionElement {
fn super_type(&self) -> Option<&dyn VirtualMethods> {
Some(self.upcast::<HTMLElement>() as &dyn VirtualMethods)
}
fn parse_plain_attribute(&self, local_name: &LocalName, value: DOMString) -> AttrValue {
match *local_name {
local_name!("bgcolor") => AttrValue::from_legacy_color(value.into()),
_ => self
.super_type()
.unwrap()
.parse_plain_attribute(local_name, value),
}
}
}
|
random_line_split
|
|
lib.rs
|
#![doc(
html_logo_url = "https://cdn.rawgit.com/urschrei/polyline-ffi/master/line.svg",
html_root_url = "https://docs.rs/polyline-ffi/"
)]
//! This module exposes functions for accessing the Polyline encoding and decoding functions via FFI
//!
//!
//! ## A Note on Coordinate Order
//! This crate uses `Coordinate` and `LineString` types from the `geo-types` crate, which encodes coordinates in `(x, y)` order. The Polyline algorithm and first-party documentation assumes the _opposite_ coordinate order. It is thus advisable to pay careful attention to the order of the coordinates you use for encoding and decoding.
use polyline::{decode_polyline, encode_coordinates};
use std::ffi::{CStr, CString};
use std::slice;
use std::{f64, ptr};
use geo_types::{CoordFloat, LineString};
use libc::c_char;
// we only want to allow 5 or 6, but we need the previous values for the cast to work
#[allow(dead_code)]
enum Precision {
Zero,
One,
Two,
Three,
Four,
Five,
Six,
}
// We currently only allow 5 or 6
fn get_precision(input: u32) -> Option<u32> {
match input {
5 => Some(Precision::Five as u32),
6 => Some(Precision::Six as u32),
_ => None,
}
}
/// A C-compatible `struct` originating **outside** Rust
/// used for passing arrays across the FFI boundary
#[repr(C)]
pub struct ExternalArray {
pub data: *const libc::c_void,
pub len: libc::size_t,
}
/// A C-compatible `struct` originating **inside** Rust
/// used for passing arrays across the FFI boundary
#[repr(C)]
pub struct InternalArray {
pub data: *mut libc::c_void,
pub len: libc::size_t,
}
impl Drop for InternalArray {
fn drop(&mut self) {
if self.data.is_null() {
return;
}
let _ = unsafe {
// we originated this data, so pointer-to-slice -> box -> vec
let p = ptr::slice_from_raw_parts_mut(self.data as *mut [f64; 2], self.len);
drop(Box::from_raw(p));
};
}
}
// Build an InternalArray from a LineString, so it can be leaked across the FFI boundary
impl<T> From<LineString<T>> for InternalArray
where
T: CoordFloat,
{
fn from(sl: LineString<T>) -> Self {
let v: Vec<[T; 2]> = sl.0.iter().map(|p| [p.x, p.y]).collect();
let boxed = v.into_boxed_slice();
let blen = boxed.len();
let rawp = Box::into_raw(boxed);
InternalArray {
data: rawp as *mut libc::c_void,
len: blen as libc::size_t,
}
}
}
// Build a LineString from an InternalArray
impl From<InternalArray> for LineString<f64> {
fn from(arr: InternalArray) -> Self {
// we originated this data, so pointer-to-slice -> box -> vec
unsafe {
let p = ptr::slice_from_raw_parts_mut(arr.data as *mut [f64; 2], arr.len);
let v = Box::from_raw(p).to_vec();
v.into()
}
}
}
// Build an InternalArray from a LineString, so it can be leaked across the FFI boundary
impl From<Vec<[f64; 2]>> for InternalArray {
fn from(v: Vec<[f64; 2]>) -> Self {
let boxed = v.into_boxed_slice();
let blen = boxed.len();
let rawp = Box::into_raw(boxed);
InternalArray {
data: rawp as *mut libc::c_void,
len: blen as libc::size_t,
}
}
}
// Build an InternalArray from a LineString, so it can be leaked across the FFI boundary
impl From<Vec<[f64; 2]>> for ExternalArray {
fn from(v: Vec<[f64; 2]>) -> Self {
let boxed = v.into_boxed_slice();
let blen = boxed.len();
let rawp = Box::into_raw(boxed);
ExternalArray {
data: rawp as *mut libc::c_void,
len: blen as libc::size_t,
}
}
}
// Build a LineString from an ExternalArray
impl From<ExternalArray> for LineString<f64> {
fn from(arr: ExternalArray) -> Self {
// we need to take ownership of this data, so slice -> vec
unsafe {
let v = slice::from_raw_parts(arr.data as *mut [f64; 2], arr.len).to_vec();
v.into()
}
}
}
// Decode a Polyline into an InternalArray
fn arr_from_string(incoming: &str, precision: u32) -> InternalArray {
let result: InternalArray = if get_precision(precision).is_some() {
match decode_polyline(incoming, precision) {
Ok(res) => res.into(),
// should be easy to check for
Err(_) => vec![[f64::NAN, f64::NAN]].into(),
}
} else {
// bad precision parameter
vec![[f64::NAN, f64::NAN]].into()
};
result
}
// Decode an Array into a Polyline
fn string_from_arr(incoming: ExternalArray, precision: u32) -> String {
let inc: LineString<_> = incoming.into();
if get_precision(precision).is_some() {
match encode_coordinates(Into::<LineString<_>>::into(inc), precision) {
Ok(res) => res,
// we don't need to adapt the error
Err(res) => res,
}
} else {
"Bad precision parameter supplied".to_string()
}
}
/// Convert a Polyline into an array of coordinates
///
/// Callers must pass two arguments:
///
/// - a pointer to `NUL`-terminated characters (`char*`)
/// - an unsigned 32-bit `int` for precision (5 for Google Polylines, 6 for
/// OSRM and Valhalla Polylines)
///
/// A decoding failure will return an [Array](struct.Array.html) whose `data` field is `[[NaN, NaN]]`, and whose `len` field is `1`.
///
/// Implementations calling this function **must** call [`drop_float_array`](fn.drop_float_array.html)
/// with the returned [Array](struct.Array.html), in order to free the memory it allocates.
///
/// # Safety
///
/// This function is unsafe because it accesses a raw pointer which could contain arbitrary data
#[no_mangle]
pub unsafe extern "C" fn decode_polyline_ffi(pl: *const c_char, precision: u32) -> InternalArray {
let s = CStr::from_ptr(pl).to_str();
if let Ok(unwrapped) = s {
arr_from_string(unwrapped, precision)
} else {
vec![[f64::NAN, f64::NAN]].into()
}
}
/// Convert an array of coordinates into a Polyline
///
/// Callers must pass two arguments:
///
/// - a [Struct](struct.Array.html) with two fields:
/// - `data`, a void pointer to an array of floating-point lat, lon coordinates: `[[1.0, 2.0]]`
/// - `len`, the length of the array being passed. Its type must be `size_t`: `1`
/// - an unsigned 32-bit `int` for precision (5 for Google Polylines, 6 for
/// OSRM and Valhalla Polylines)
///
/// A decoding failure will return one of the following:
///
/// - a `char*` beginning with "Longitude error:" if invalid longitudes are passed
/// - a `char*` beginning with "Latitude error:" if invalid latitudes are passed
///
/// Implementations calling this function **must** call [`drop_cstring`](fn.drop_cstring.html)
/// with the returned `c_char` pointer, in order to free the memory it allocates.
///
/// # Safety
///
/// This function is unsafe because it accesses a raw pointer which could contain arbitrary data
#[no_mangle]
pub extern "C" fn encode_coordinates_ffi(coords: ExternalArray, precision: u32) -> *mut c_char {
let s: String = string_from_arr(coords, precision);
match CString::new(s) {
Ok(res) => res.into_raw(),
// It's arguably better to fail noisily, but this is robust
Err(_) => CString::new("Couldn't decode Polyline".to_string())
.unwrap()
.into_raw(),
}
}
|
///
/// This function is unsafe because it accesses a raw pointer which could contain arbitrary data
#[no_mangle]
pub extern "C" fn drop_float_array(_: InternalArray) {}
/// Free `CString` memory which Rust has allocated across the FFI boundary
///
/// # Safety
///
/// This function is unsafe because it accesses a raw pointer which could contain arbitrary data
#[no_mangle]
pub unsafe extern "C" fn drop_cstring(p: *mut c_char) {
drop(CString::from_raw(p));
}
#[cfg(test)]
mod tests {
use super::*;
use std::ptr;
#[test]
fn test_drop_empty_float_array() {
let original: LineString<_> = vec![[2.0, 1.0], [4.0, 3.0]].into();
// move into an Array, and leak it
let mut arr: InternalArray = original.into();
// zero Array contents
arr.data = ptr::null_mut();
drop_float_array(arr);
}
#[test]
fn test_coordinate_conversion() {
let input = vec![[2.0, 1.0], [4.0, 3.0]];
let output = "_ibE_seK_seK_seK";
let input_arr: ExternalArray = input.into();
let transformed: String = super::string_from_arr(input_arr, 5);
assert_eq!(transformed, output);
}
#[test]
fn test_string_conversion() {
let input = "_ibE_seK_seK_seK";
let output = vec![[2.0, 1.0], [4.0, 3.0]];
// String to Array
let transformed: InternalArray = super::arr_from_string(input, 5);
// Array to LS via slice, as we want to take ownership of a copy for testing purposes
let v = unsafe {
slice::from_raw_parts(transformed.data as *mut [f64; 2], transformed.len).to_vec()
};
let ls: LineString<_> = v.into();
assert_eq!(ls, output.into());
}
#[test]
#[should_panic]
fn test_bad_string_conversion() {
let input = "_p~iF~ps|U_u🗑lLnnqC_mqNvxq`@";
let output = vec![[1.0, 2.0], [3.0, 4.0]];
// String to Array
let transformed: InternalArray = super::arr_from_string(input, 5);
// Array to LS via slice, as we want to take ownership of a copy for testing purposes
let v = unsafe {
slice::from_raw_parts(transformed.data as *mut [f64; 2], transformed.len).to_vec()
};
let ls: LineString<_> = v.into();
assert_eq!(ls, output.into());
}
#[test]
fn test_long_vec() {
use std::clone::Clone;
let arr = include!("../test_fixtures/berlin.rs");
let s = include!("../test_fixtures/berlin_decoded.rs");
for _ in 0..9999 {
let a = arr.clone();
let s_ = s.clone();
let n = 5;
let input_ls: ExternalArray = a.into();
let transformed: String = super::string_from_arr(input_ls, n);
assert_eq!(transformed, s_);
}
}
}
|
/// Free Array memory which Rust has allocated across the FFI boundary
///
/// # Safety
|
random_line_split
|
lib.rs
|
#![doc(
html_logo_url = "https://cdn.rawgit.com/urschrei/polyline-ffi/master/line.svg",
html_root_url = "https://docs.rs/polyline-ffi/"
)]
//! This module exposes functions for accessing the Polyline encoding and decoding functions via FFI
//!
//!
//! ## A Note on Coordinate Order
//! This crate uses `Coordinate` and `LineString` types from the `geo-types` crate, which encodes coordinates in `(x, y)` order. The Polyline algorithm and first-party documentation assumes the _opposite_ coordinate order. It is thus advisable to pay careful attention to the order of the coordinates you use for encoding and decoding.
use polyline::{decode_polyline, encode_coordinates};
use std::ffi::{CStr, CString};
use std::slice;
use std::{f64, ptr};
use geo_types::{CoordFloat, LineString};
use libc::c_char;
// we only want to allow 5 or 6, but we need the previous values for the cast to work
#[allow(dead_code)]
enum Precision {
Zero,
One,
Two,
Three,
Four,
Five,
Six,
}
// We currently only allow 5 or 6
fn get_precision(input: u32) -> Option<u32> {
match input {
5 => Some(Precision::Five as u32),
6 => Some(Precision::Six as u32),
_ => None,
}
}
/// A C-compatible `struct` originating **outside** Rust
/// used for passing arrays across the FFI boundary
#[repr(C)]
pub struct ExternalArray {
pub data: *const libc::c_void,
pub len: libc::size_t,
}
/// A C-compatible `struct` originating **inside** Rust
/// used for passing arrays across the FFI boundary
#[repr(C)]
pub struct InternalArray {
pub data: *mut libc::c_void,
pub len: libc::size_t,
}
impl Drop for InternalArray {
fn drop(&mut self) {
if self.data.is_null() {
return;
}
let _ = unsafe {
// we originated this data, so pointer-to-slice -> box -> vec
let p = ptr::slice_from_raw_parts_mut(self.data as *mut [f64; 2], self.len);
drop(Box::from_raw(p));
};
}
}
// Build an InternalArray from a LineString, so it can be leaked across the FFI boundary
impl<T> From<LineString<T>> for InternalArray
where
T: CoordFloat,
{
fn from(sl: LineString<T>) -> Self {
let v: Vec<[T; 2]> = sl.0.iter().map(|p| [p.x, p.y]).collect();
let boxed = v.into_boxed_slice();
let blen = boxed.len();
let rawp = Box::into_raw(boxed);
InternalArray {
data: rawp as *mut libc::c_void,
len: blen as libc::size_t,
}
}
}
// Build a LineString from an InternalArray
impl From<InternalArray> for LineString<f64> {
fn from(arr: InternalArray) -> Self {
// we originated this data, so pointer-to-slice -> box -> vec
unsafe {
let p = ptr::slice_from_raw_parts_mut(arr.data as *mut [f64; 2], arr.len);
let v = Box::from_raw(p).to_vec();
v.into()
}
}
}
// Build an InternalArray from a LineString, so it can be leaked across the FFI boundary
impl From<Vec<[f64; 2]>> for InternalArray {
fn from(v: Vec<[f64; 2]>) -> Self {
let boxed = v.into_boxed_slice();
let blen = boxed.len();
let rawp = Box::into_raw(boxed);
InternalArray {
data: rawp as *mut libc::c_void,
len: blen as libc::size_t,
}
}
}
// Build an InternalArray from a LineString, so it can be leaked across the FFI boundary
impl From<Vec<[f64; 2]>> for ExternalArray {
fn from(v: Vec<[f64; 2]>) -> Self {
let boxed = v.into_boxed_slice();
let blen = boxed.len();
let rawp = Box::into_raw(boxed);
ExternalArray {
data: rawp as *mut libc::c_void,
len: blen as libc::size_t,
}
}
}
// Build a LineString from an ExternalArray
impl From<ExternalArray> for LineString<f64> {
fn from(arr: ExternalArray) -> Self {
// we need to take ownership of this data, so slice -> vec
unsafe {
let v = slice::from_raw_parts(arr.data as *mut [f64; 2], arr.len).to_vec();
v.into()
}
}
}
// Decode a Polyline into an InternalArray
fn arr_from_string(incoming: &str, precision: u32) -> InternalArray {
let result: InternalArray = if get_precision(precision).is_some() {
match decode_polyline(incoming, precision) {
Ok(res) => res.into(),
// should be easy to check for
Err(_) => vec![[f64::NAN, f64::NAN]].into(),
}
} else {
// bad precision parameter
vec![[f64::NAN, f64::NAN]].into()
};
result
}
// Decode an Array into a Polyline
fn string_from_arr(incoming: ExternalArray, precision: u32) -> String {
let inc: LineString<_> = incoming.into();
if get_precision(precision).is_some() {
match encode_coordinates(Into::<LineString<_>>::into(inc), precision) {
Ok(res) => res,
// we don't need to adapt the error
Err(res) => res,
}
} else {
"Bad precision parameter supplied".to_string()
}
}
/// Convert a Polyline into an array of coordinates
///
/// Callers must pass two arguments:
///
/// - a pointer to `NUL`-terminated characters (`char*`)
/// - an unsigned 32-bit `int` for precision (5 for Google Polylines, 6 for
/// OSRM and Valhalla Polylines)
///
/// A decoding failure will return an [Array](struct.Array.html) whose `data` field is `[[NaN, NaN]]`, and whose `len` field is `1`.
///
/// Implementations calling this function **must** call [`drop_float_array`](fn.drop_float_array.html)
/// with the returned [Array](struct.Array.html), in order to free the memory it allocates.
///
/// # Safety
///
/// This function is unsafe because it accesses a raw pointer which could contain arbitrary data
#[no_mangle]
pub unsafe extern "C" fn decode_polyline_ffi(pl: *const c_char, precision: u32) -> InternalArray {
let s = CStr::from_ptr(pl).to_str();
if let Ok(unwrapped) = s {
arr_from_string(unwrapped, precision)
} else {
vec![[f64::NAN, f64::NAN]].into()
}
}
/// Convert an array of coordinates into a Polyline
///
/// Callers must pass two arguments:
///
/// - a [Struct](struct.Array.html) with two fields:
/// - `data`, a void pointer to an array of floating-point lat, lon coordinates: `[[1.0, 2.0]]`
/// - `len`, the length of the array being passed. Its type must be `size_t`: `1`
/// - an unsigned 32-bit `int` for precision (5 for Google Polylines, 6 for
/// OSRM and Valhalla Polylines)
///
/// A decoding failure will return one of the following:
///
/// - a `char*` beginning with "Longitude error:" if invalid longitudes are passed
/// - a `char*` beginning with "Latitude error:" if invalid latitudes are passed
///
/// Implementations calling this function **must** call [`drop_cstring`](fn.drop_cstring.html)
/// with the returned `c_char` pointer, in order to free the memory it allocates.
///
/// # Safety
///
/// This function is unsafe because it accesses a raw pointer which could contain arbitrary data
#[no_mangle]
pub extern "C" fn encode_coordinates_ffi(coords: ExternalArray, precision: u32) -> *mut c_char {
let s: String = string_from_arr(coords, precision);
match CString::new(s) {
Ok(res) => res.into_raw(),
// It's arguably better to fail noisily, but this is robust
Err(_) => CString::new("Couldn't decode Polyline".to_string())
.unwrap()
.into_raw(),
}
}
/// Free Array memory which Rust has allocated across the FFI boundary
///
/// # Safety
///
/// This function is unsafe because it accesses a raw pointer which could contain arbitrary data
#[no_mangle]
pub extern "C" fn drop_float_array(_: InternalArray) {}
/// Free `CString` memory which Rust has allocated across the FFI boundary
///
/// # Safety
///
/// This function is unsafe because it accesses a raw pointer which could contain arbitrary data
#[no_mangle]
pub unsafe extern "C" fn drop_cstring(p: *mut c_char) {
drop(CString::from_raw(p));
}
#[cfg(test)]
mod tests {
use super::*;
use std::ptr;
#[test]
fn test_drop_empty_float_array() {
let original: LineString<_> = vec![[2.0, 1.0], [4.0, 3.0]].into();
// move into an Array, and leak it
let mut arr: InternalArray = original.into();
// zero Array contents
arr.data = ptr::null_mut();
drop_float_array(arr);
}
#[test]
fn test_coordinate_conversion() {
let input = vec![[2.0, 1.0], [4.0, 3.0]];
let output = "_ibE_seK_seK_seK";
let input_arr: ExternalArray = input.into();
let transformed: String = super::string_from_arr(input_arr, 5);
assert_eq!(transformed, output);
}
#[test]
fn test_string_conversion() {
let input = "_ibE_seK_seK_seK";
let output = vec![[2.0, 1.0], [4.0, 3.0]];
// String to Array
let transformed: InternalArray = super::arr_from_string(input, 5);
// Array to LS via slice, as we want to take ownership of a copy for testing purposes
let v = unsafe {
slice::from_raw_parts(transformed.data as *mut [f64; 2], transformed.len).to_vec()
};
let ls: LineString<_> = v.into();
assert_eq!(ls, output.into());
}
#[test]
#[should_panic]
fn test_bad_string_conversion() {
let input = "_p~iF~ps|U_u🗑lLnnqC_mqNvxq`@";
let output = vec![[1.0, 2.0], [3.0, 4.0]];
// String to Array
let transformed: InternalArray = super::arr_from_string(input, 5);
// Array to LS via slice, as we want to take ownership of a copy for testing purposes
let v = unsafe {
slice::from_raw_parts(transformed.data as *mut [f64; 2], transformed.len).to_vec()
};
let ls: LineString<_> = v.into();
assert_eq!(ls, output.into());
}
#[test]
fn tes
|
{
use std::clone::Clone;
let arr = include!("../test_fixtures/berlin.rs");
let s = include!("../test_fixtures/berlin_decoded.rs");
for _ in 0..9999 {
let a = arr.clone();
let s_ = s.clone();
let n = 5;
let input_ls: ExternalArray = a.into();
let transformed: String = super::string_from_arr(input_ls, n);
assert_eq!(transformed, s_);
}
}
}
|
t_long_vec()
|
identifier_name
|
lib.rs
|
#![doc(
html_logo_url = "https://cdn.rawgit.com/urschrei/polyline-ffi/master/line.svg",
html_root_url = "https://docs.rs/polyline-ffi/"
)]
//! This module exposes functions for accessing the Polyline encoding and decoding functions via FFI
//!
//!
//! ## A Note on Coordinate Order
//! This crate uses `Coordinate` and `LineString` types from the `geo-types` crate, which encodes coordinates in `(x, y)` order. The Polyline algorithm and first-party documentation assumes the _opposite_ coordinate order. It is thus advisable to pay careful attention to the order of the coordinates you use for encoding and decoding.
use polyline::{decode_polyline, encode_coordinates};
use std::ffi::{CStr, CString};
use std::slice;
use std::{f64, ptr};
use geo_types::{CoordFloat, LineString};
use libc::c_char;
// we only want to allow 5 or 6, but we need the previous values for the cast to work
#[allow(dead_code)]
enum Precision {
Zero,
One,
Two,
Three,
Four,
Five,
Six,
}
// We currently only allow 5 or 6
fn get_precision(input: u32) -> Option<u32> {
match input {
5 => Some(Precision::Five as u32),
6 => Some(Precision::Six as u32),
_ => None,
}
}
/// A C-compatible `struct` originating **outside** Rust
/// used for passing arrays across the FFI boundary
#[repr(C)]
pub struct ExternalArray {
pub data: *const libc::c_void,
pub len: libc::size_t,
}
/// A C-compatible `struct` originating **inside** Rust
/// used for passing arrays across the FFI boundary
#[repr(C)]
pub struct InternalArray {
pub data: *mut libc::c_void,
pub len: libc::size_t,
}
impl Drop for InternalArray {
fn drop(&mut self) {
if self.data.is_null() {
return;
}
let _ = unsafe {
// we originated this data, so pointer-to-slice -> box -> vec
let p = ptr::slice_from_raw_parts_mut(self.data as *mut [f64; 2], self.len);
drop(Box::from_raw(p));
};
}
}
// Build an InternalArray from a LineString, so it can be leaked across the FFI boundary
impl<T> From<LineString<T>> for InternalArray
where
T: CoordFloat,
{
fn from(sl: LineString<T>) -> Self {
let v: Vec<[T; 2]> = sl.0.iter().map(|p| [p.x, p.y]).collect();
let boxed = v.into_boxed_slice();
let blen = boxed.len();
let rawp = Box::into_raw(boxed);
InternalArray {
data: rawp as *mut libc::c_void,
len: blen as libc::size_t,
}
}
}
// Build a LineString from an InternalArray
impl From<InternalArray> for LineString<f64> {
fn from(arr: InternalArray) -> Self {
// we originated this data, so pointer-to-slice -> box -> vec
unsafe {
let p = ptr::slice_from_raw_parts_mut(arr.data as *mut [f64; 2], arr.len);
let v = Box::from_raw(p).to_vec();
v.into()
}
}
}
// Build an InternalArray from a LineString, so it can be leaked across the FFI boundary
impl From<Vec<[f64; 2]>> for InternalArray {
fn from(v: Vec<[f64; 2]>) -> Self {
let boxed = v.into_boxed_slice();
let blen = boxed.len();
let rawp = Box::into_raw(boxed);
InternalArray {
data: rawp as *mut libc::c_void,
len: blen as libc::size_t,
}
}
}
// Build an InternalArray from a LineString, so it can be leaked across the FFI boundary
impl From<Vec<[f64; 2]>> for ExternalArray {
fn from(v: Vec<[f64; 2]>) -> Self {
let boxed = v.into_boxed_slice();
let blen = boxed.len();
let rawp = Box::into_raw(boxed);
ExternalArray {
data: rawp as *mut libc::c_void,
len: blen as libc::size_t,
}
}
}
// Build a LineString from an ExternalArray
impl From<ExternalArray> for LineString<f64> {
fn from(arr: ExternalArray) -> Self {
// we need to take ownership of this data, so slice -> vec
unsafe {
let v = slice::from_raw_parts(arr.data as *mut [f64; 2], arr.len).to_vec();
v.into()
}
}
}
// Decode a Polyline into an InternalArray
fn arr_from_string(incoming: &str, precision: u32) -> InternalArray {
let result: InternalArray = if get_precision(precision).is_some() {
match decode_polyline(incoming, precision) {
Ok(res) => res.into(),
// should be easy to check for
Err(_) => vec![[f64::NAN, f64::NAN]].into(),
}
} else {
// bad precision parameter
vec![[f64::NAN, f64::NAN]].into()
};
result
}
// Decode an Array into a Polyline
fn string_from_arr(incoming: ExternalArray, precision: u32) -> String {
let inc: LineString<_> = incoming.into();
if get_precision(precision).is_some() {
match encode_coordinates(Into::<LineString<_>>::into(inc), precision) {
Ok(res) => res,
// we don't need to adapt the error
Err(res) => res,
}
} else {
"Bad precision parameter supplied".to_string()
}
}
/// Convert a Polyline into an array of coordinates
///
/// Callers must pass two arguments:
///
/// - a pointer to `NUL`-terminated characters (`char*`)
/// - an unsigned 32-bit `int` for precision (5 for Google Polylines, 6 for
/// OSRM and Valhalla Polylines)
///
/// A decoding failure will return an [Array](struct.Array.html) whose `data` field is `[[NaN, NaN]]`, and whose `len` field is `1`.
///
/// Implementations calling this function **must** call [`drop_float_array`](fn.drop_float_array.html)
/// with the returned [Array](struct.Array.html), in order to free the memory it allocates.
///
/// # Safety
///
/// This function is unsafe because it accesses a raw pointer which could contain arbitrary data
#[no_mangle]
pub unsafe extern "C" fn decode_polyline_ffi(pl: *const c_char, precision: u32) -> InternalArray {
let s = CStr::from_ptr(pl).to_str();
if let Ok(unwrapped) = s {
arr_from_string(unwrapped, precision)
} else {
vec![[f64::NAN, f64::NAN]].into()
}
}
/// Convert an array of coordinates into a Polyline
///
/// Callers must pass two arguments:
///
/// - a [Struct](struct.Array.html) with two fields:
/// - `data`, a void pointer to an array of floating-point lat, lon coordinates: `[[1.0, 2.0]]`
/// - `len`, the length of the array being passed. Its type must be `size_t`: `1`
/// - an unsigned 32-bit `int` for precision (5 for Google Polylines, 6 for
/// OSRM and Valhalla Polylines)
///
/// A decoding failure will return one of the following:
///
/// - a `char*` beginning with "Longitude error:" if invalid longitudes are passed
/// - a `char*` beginning with "Latitude error:" if invalid latitudes are passed
///
/// Implementations calling this function **must** call [`drop_cstring`](fn.drop_cstring.html)
/// with the returned `c_char` pointer, in order to free the memory it allocates.
///
/// # Safety
///
/// This function is unsafe because it accesses a raw pointer which could contain arbitrary data
#[no_mangle]
pub extern "C" fn encode_coordinates_ffi(coords: ExternalArray, precision: u32) -> *mut c_char {
let s: String = string_from_arr(coords, precision);
match CString::new(s) {
Ok(res) => res.into_raw(),
// It's arguably better to fail noisily, but this is robust
Err(_) => CString::new("Couldn't decode Polyline".to_string())
.unwrap()
.into_raw(),
}
}
/// Free Array memory which Rust has allocated across the FFI boundary
///
/// # Safety
///
/// This function is unsafe because it accesses a raw pointer which could contain arbitrary data
#[no_mangle]
pub extern "C" fn drop_float_array(_: InternalArray) {}
/// Free `CString` memory which Rust has allocated across the FFI boundary
///
/// # Safety
///
/// This function is unsafe because it accesses a raw pointer which could contain arbitrary data
#[no_mangle]
pub unsafe extern "C" fn drop_cstring(p: *mut c_char)
|
#[cfg(test)]
mod tests {
use super::*;
use std::ptr;
#[test]
fn test_drop_empty_float_array() {
let original: LineString<_> = vec![[2.0, 1.0], [4.0, 3.0]].into();
// move into an Array, and leak it
let mut arr: InternalArray = original.into();
// zero Array contents
arr.data = ptr::null_mut();
drop_float_array(arr);
}
#[test]
fn test_coordinate_conversion() {
let input = vec![[2.0, 1.0], [4.0, 3.0]];
let output = "_ibE_seK_seK_seK";
let input_arr: ExternalArray = input.into();
let transformed: String = super::string_from_arr(input_arr, 5);
assert_eq!(transformed, output);
}
#[test]
fn test_string_conversion() {
let input = "_ibE_seK_seK_seK";
let output = vec![[2.0, 1.0], [4.0, 3.0]];
// String to Array
let transformed: InternalArray = super::arr_from_string(input, 5);
// Array to LS via slice, as we want to take ownership of a copy for testing purposes
let v = unsafe {
slice::from_raw_parts(transformed.data as *mut [f64; 2], transformed.len).to_vec()
};
let ls: LineString<_> = v.into();
assert_eq!(ls, output.into());
}
#[test]
#[should_panic]
fn test_bad_string_conversion() {
let input = "_p~iF~ps|U_u🗑lLnnqC_mqNvxq`@";
let output = vec![[1.0, 2.0], [3.0, 4.0]];
// String to Array
let transformed: InternalArray = super::arr_from_string(input, 5);
// Array to LS via slice, as we want to take ownership of a copy for testing purposes
let v = unsafe {
slice::from_raw_parts(transformed.data as *mut [f64; 2], transformed.len).to_vec()
};
let ls: LineString<_> = v.into();
assert_eq!(ls, output.into());
}
#[test]
fn test_long_vec() {
use std::clone::Clone;
let arr = include!("../test_fixtures/berlin.rs");
let s = include!("../test_fixtures/berlin_decoded.rs");
for _ in 0..9999 {
let a = arr.clone();
let s_ = s.clone();
let n = 5;
let input_ls: ExternalArray = a.into();
let transformed: String = super::string_from_arr(input_ls, n);
assert_eq!(transformed, s_);
}
}
}
|
{
drop(CString::from_raw(p));
}
|
identifier_body
|
test.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use libc;
use option::{Some, None};
use cell::Cell;
use clone::Clone;
use container::Container;
use iter::{Iterator, range};
use super::io::net::ip::{SocketAddr, Ipv4Addr, Ipv6Addr};
use vec::{OwnedVector, MutableVector, ImmutableVector};
use path::GenericPath;
use rt::sched::Scheduler;
use unstable::{run_in_bare_thread};
use rt::thread::Thread;
use rt::task::Task;
use rt::uv::uvio::UvEventLoop;
use rt::work_queue::WorkQueue;
use rt::sleeper_list::SleeperList;
use rt::comm::oneshot;
use result::{Result, Ok, Err};
pub fn new_test_uv_sched() -> Scheduler {
let queue = WorkQueue::new();
let queues = ~[queue.clone()];
let mut sched = Scheduler::new(~UvEventLoop::new(),
queue,
queues,
SleeperList::new());
// Don't wait for the Shutdown message
sched.no_sleep = true;
return sched;
}
pub fn run_in_newsched_task(f: ~fn()) {
let f = Cell::new(f);
do run_in_bare_thread {
run_in_newsched_task_core(f.take());
}
}
pub fn run_in_newsched_task_core(f: ~fn()) {
use rt::sched::Shutdown;
let mut sched = ~new_test_uv_sched();
let exit_handle = Cell::new(sched.make_handle());
let on_exit: ~fn(bool) = |exit_status| {
exit_handle.take().send(Shutdown);
rtassert!(exit_status);
};
let mut task = ~Task::new_root(&mut sched.stack_pool, None, f);
task.death.on_exit = Some(on_exit);
sched.bootstrap(task);
}
#[cfg(target_os="macos")]
#[allow(non_camel_case_types)]
mod darwin_fd_limit {
/*!
* darwin_fd_limit exists to work around an issue where launchctl on Mac OS X defaults the
* rlimit maxfiles to 256/unlimited. The default soft limit of 256 ends up being far too low
* for our multithreaded scheduler testing, depending on the number of cores available.
*
* This fixes issue #7772.
*/
use libc;
type rlim_t = libc::uint64_t;
struct rlimit {
rlim_cur: rlim_t,
rlim_max: rlim_t
}
#[nolink]
extern {
// name probably doesn't need to be mut, but the C function doesn't specify const
fn sysctl(name: *mut libc::c_int, namelen: libc::c_uint,
oldp: *mut libc::c_void, oldlenp: *mut libc::size_t,
newp: *mut libc::c_void, newlen: libc::size_t) -> libc::c_int;
fn getrlimit(resource: libc::c_int, rlp: *mut rlimit) -> libc::c_int;
fn setrlimit(resource: libc::c_int, rlp: *rlimit) -> libc::c_int;
}
static CTL_KERN: libc::c_int = 1;
static KERN_MAXFILESPERPROC: libc::c_int = 29;
static RLIMIT_NOFILE: libc::c_int = 8;
pub unsafe fn raise_fd_limit() {
#[fixed_stack_segment]; #[inline(never)];
// The strategy here is to fetch the current resource limits, read the kern.maxfilesperproc
// sysctl value, and bump the soft resource limit for maxfiles up to the sysctl value.
use ptr::{to_unsafe_ptr, to_mut_unsafe_ptr, mut_null};
use sys::size_of_val;
use os::last_os_error;
// Fetch the kern.maxfilesperproc value
let mut mib: [libc::c_int,..2] = [CTL_KERN, KERN_MAXFILESPERPROC];
let mut maxfiles: libc::c_int = 0;
let mut size: libc::size_t = size_of_val(&maxfiles) as libc::size_t;
if sysctl(to_mut_unsafe_ptr(&mut mib[0]), 2,
to_mut_unsafe_ptr(&mut maxfiles) as *mut libc::c_void,
to_mut_unsafe_ptr(&mut size),
mut_null(), 0)!= 0 {
let err = last_os_error();
error2!("raise_fd_limit: error calling sysctl: {}", err);
return;
}
// Fetch the current resource limits
let mut rlim = rlimit{rlim_cur: 0, rlim_max: 0};
if getrlimit(RLIMIT_NOFILE, to_mut_unsafe_ptr(&mut rlim))!= 0 {
let err = last_os_error();
error2!("raise_fd_limit: error calling getrlimit: {}", err);
return;
}
// Bump the soft limit to the smaller of kern.maxfilesperproc and the hard limit
rlim.rlim_cur = ::cmp::min(maxfiles as rlim_t, rlim.rlim_max);
// Set our newly-increased resource limit
if setrlimit(RLIMIT_NOFILE, to_unsafe_ptr(&rlim))!= 0 {
let err = last_os_error();
error2!("raise_fd_limit: error calling setrlimit: {}", err);
return;
}
}
}
#[cfg(not(target_os="macos"))]
mod darwin_fd_limit {
pub unsafe fn raise_fd_limit() {}
}
#[doc(hidden)]
pub fn prepare_for_lots_of_tests() {
// Bump the fd limit on OS X. See darwin_fd_limit for an explanation.
unsafe { darwin_fd_limit::raise_fd_limit() }
}
/// Create more than one scheduler and run a function in a task
/// in one of the schedulers. The schedulers will stay alive
/// until the function `f` returns.
pub fn run_in_mt_newsched_task(f: ~fn()) {
use os;
use from_str::FromStr;
use rt::sched::Shutdown;
use rt::util;
// see comment in other function (raising fd limits)
prepare_for_lots_of_tests();
let f = Cell::new(f);
do run_in_bare_thread {
let nthreads = match os::getenv("RUST_RT_TEST_THREADS") {
Some(nstr) => FromStr::from_str(nstr).unwrap(),
None => {
if util::limit_thread_creation_due_to_osx_and_valgrind() {
1
} else {
// Using more threads than cores in test code
// to force the OS to preempt them frequently.
// Assuming that this help stress test concurrent types.
util::num_cpus() * 2
}
}
};
let sleepers = SleeperList::new();
let mut handles = ~[];
let mut scheds = ~[];
let mut work_queues = ~[];
for _ in range(0u, nthreads) {
let work_queue = WorkQueue::new();
work_queues.push(work_queue);
}
for i in range(0u, nthreads) {
let loop_ = ~UvEventLoop::new();
let mut sched = ~Scheduler::new(loop_,
work_queues[i].clone(),
work_queues.clone(),
sleepers.clone());
let handle = sched.make_handle();
handles.push(handle);
scheds.push(sched);
}
let handles = Cell::new(handles);
let on_exit: ~fn(bool) = |exit_status| {
let mut handles = handles.take();
// Tell schedulers to exit
for handle in handles.mut_iter() {
handle.send(Shutdown);
}
rtassert!(exit_status);
};
let mut main_task = ~Task::new_root(&mut scheds[0].stack_pool, None, f.take());
main_task.death.on_exit = Some(on_exit);
let mut threads = ~[];
let main_task = Cell::new(main_task);
let main_thread = {
let sched = scheds.pop();
let sched_cell = Cell::new(sched);
do Thread::start {
let sched = sched_cell.take();
sched.bootstrap(main_task.take());
}
};
threads.push(main_thread);
while!scheds.is_empty() {
let mut sched = scheds.pop();
let bootstrap_task = ~do Task::new_root(&mut sched.stack_pool, None) || {
rtdebug!("bootstrapping non-primary scheduler");
};
let bootstrap_task_cell = Cell::new(bootstrap_task);
let sched_cell = Cell::new(sched);
let thread = do Thread::start {
let sched = sched_cell.take();
sched.bootstrap(bootstrap_task_cell.take());
};
threads.push(thread);
}
// Wait for schedulers
for thread in threads.move_iter() {
thread.join();
}
}
}
/// Test tasks will abort on failure instead of unwinding
pub fn spawntask(f: ~fn()) {
Scheduler::run_task(Task::build_child(None, f));
}
/// Create a new task and run it right now. Aborts on failure
pub fn spawntask_later(f: ~fn()) {
Scheduler::run_task_later(Task::build_child(None, f));
}
pub fn spawntask_random(f: ~fn()) {
use rand::{Rand, rng};
let mut rng = rng();
let run_now: bool = Rand::rand(&mut rng);
if run_now {
spawntask(f)
} else {
spawntask_later(f)
}
}
pub fn spawntask_try(f: ~fn()) -> Result<(),()> {
let (port, chan) = oneshot();
let chan = Cell::new(chan);
let on_exit: ~fn(bool) = |exit_status| chan.take().send(exit_status);
let mut new_task = Task::build_root(None, f);
new_task.death.on_exit = Some(on_exit);
Scheduler::run_task(new_task);
let exit_status = port.recv();
if exit_status { Ok(()) } else { Err(()) }
}
/// Spawn a new task in a new scheduler and return a thread handle.
pub fn
|
(f: ~fn()) -> Thread {
let f = Cell::new(f);
let thread = do Thread::start {
run_in_newsched_task_core(f.take());
};
return thread;
}
/// Get a ~Task for testing purposes other than actually scheduling it.
pub fn with_test_task(blk: ~fn(~Task) -> ~Task) {
do run_in_bare_thread {
let mut sched = ~new_test_uv_sched();
let task = blk(~Task::new_root(&mut sched.stack_pool, None, ||{}));
cleanup_task(task);
}
}
/// Use to cleanup tasks created for testing but not "run".
pub fn cleanup_task(mut task: ~Task) {
task.destroyed = true;
}
/// Get a port number, starting at 9600, for use in tests
#[fixed_stack_segment] #[inline(never)]
pub fn next_test_port() -> u16 {
unsafe {
return rust_dbg_next_port(base_port() as libc::uintptr_t) as u16;
}
extern {
fn rust_dbg_next_port(base: libc::uintptr_t) -> libc::uintptr_t;
}
}
/// Get a unique IPv4 localhost:port pair starting at 9600
pub fn next_test_ip4() -> SocketAddr {
SocketAddr { ip: Ipv4Addr(127, 0, 0, 1), port: next_test_port() }
}
/// Get a unique IPv6 localhost:port pair starting at 9600
pub fn next_test_ip6() -> SocketAddr {
SocketAddr { ip: Ipv6Addr(0, 0, 0, 0, 0, 0, 0, 1), port: next_test_port() }
}
/*
XXX: Welcome to MegaHack City.
The bots run multiple builds at the same time, and these builds
all want to use ports. This function figures out which workspace
it is running in and assigns a port range based on it.
*/
fn base_port() -> uint {
use os;
use str::StrSlice;
use vec::ImmutableVector;
let base = 9600u;
let range = 1000;
let bases = [
("32-opt", base + range * 1),
("32-noopt", base + range * 2),
("64-opt", base + range * 3),
("64-noopt", base + range * 4),
("64-opt-vg", base + range * 5),
("all-opt", base + range * 6),
("snap3", base + range * 7),
("dist", base + range * 8)
];
// FIXME (#9639): This needs to handle non-utf8 paths
let path = os::getcwd();
let path_s = path.as_str().unwrap();
let mut final_base = base;
for &(dir, base) in bases.iter() {
if path_s.contains(dir) {
final_base = base;
break;
}
}
return final_base;
}
/// Get a constant that represents the number of times to repeat
/// stress tests. Default 1.
pub fn stress_factor() -> uint {
use os::getenv;
use from_str::from_str;
match getenv("RUST_RT_STRESS") {
Some(val) => from_str::<uint>(val).unwrap(),
None => 1
}
}
|
spawntask_thread
|
identifier_name
|
test.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use libc;
use option::{Some, None};
use cell::Cell;
use clone::Clone;
use container::Container;
use iter::{Iterator, range};
use super::io::net::ip::{SocketAddr, Ipv4Addr, Ipv6Addr};
use vec::{OwnedVector, MutableVector, ImmutableVector};
use path::GenericPath;
use rt::sched::Scheduler;
use unstable::{run_in_bare_thread};
use rt::thread::Thread;
use rt::task::Task;
use rt::uv::uvio::UvEventLoop;
use rt::work_queue::WorkQueue;
use rt::sleeper_list::SleeperList;
use rt::comm::oneshot;
use result::{Result, Ok, Err};
pub fn new_test_uv_sched() -> Scheduler {
let queue = WorkQueue::new();
let queues = ~[queue.clone()];
let mut sched = Scheduler::new(~UvEventLoop::new(),
queue,
queues,
SleeperList::new());
// Don't wait for the Shutdown message
sched.no_sleep = true;
return sched;
}
pub fn run_in_newsched_task(f: ~fn()) {
let f = Cell::new(f);
do run_in_bare_thread {
run_in_newsched_task_core(f.take());
}
}
pub fn run_in_newsched_task_core(f: ~fn()) {
use rt::sched::Shutdown;
let mut sched = ~new_test_uv_sched();
let exit_handle = Cell::new(sched.make_handle());
let on_exit: ~fn(bool) = |exit_status| {
exit_handle.take().send(Shutdown);
rtassert!(exit_status);
};
let mut task = ~Task::new_root(&mut sched.stack_pool, None, f);
task.death.on_exit = Some(on_exit);
sched.bootstrap(task);
}
#[cfg(target_os="macos")]
#[allow(non_camel_case_types)]
mod darwin_fd_limit {
/*!
* darwin_fd_limit exists to work around an issue where launchctl on Mac OS X defaults the
* rlimit maxfiles to 256/unlimited. The default soft limit of 256 ends up being far too low
* for our multithreaded scheduler testing, depending on the number of cores available.
*
* This fixes issue #7772.
*/
use libc;
type rlim_t = libc::uint64_t;
struct rlimit {
rlim_cur: rlim_t,
rlim_max: rlim_t
}
#[nolink]
extern {
// name probably doesn't need to be mut, but the C function doesn't specify const
fn sysctl(name: *mut libc::c_int, namelen: libc::c_uint,
oldp: *mut libc::c_void, oldlenp: *mut libc::size_t,
newp: *mut libc::c_void, newlen: libc::size_t) -> libc::c_int;
fn getrlimit(resource: libc::c_int, rlp: *mut rlimit) -> libc::c_int;
fn setrlimit(resource: libc::c_int, rlp: *rlimit) -> libc::c_int;
}
static CTL_KERN: libc::c_int = 1;
static KERN_MAXFILESPERPROC: libc::c_int = 29;
static RLIMIT_NOFILE: libc::c_int = 8;
pub unsafe fn raise_fd_limit() {
#[fixed_stack_segment]; #[inline(never)];
// The strategy here is to fetch the current resource limits, read the kern.maxfilesperproc
// sysctl value, and bump the soft resource limit for maxfiles up to the sysctl value.
use ptr::{to_unsafe_ptr, to_mut_unsafe_ptr, mut_null};
use sys::size_of_val;
use os::last_os_error;
// Fetch the kern.maxfilesperproc value
let mut mib: [libc::c_int,..2] = [CTL_KERN, KERN_MAXFILESPERPROC];
let mut maxfiles: libc::c_int = 0;
let mut size: libc::size_t = size_of_val(&maxfiles) as libc::size_t;
if sysctl(to_mut_unsafe_ptr(&mut mib[0]), 2,
to_mut_unsafe_ptr(&mut maxfiles) as *mut libc::c_void,
to_mut_unsafe_ptr(&mut size),
mut_null(), 0)!= 0 {
let err = last_os_error();
error2!("raise_fd_limit: error calling sysctl: {}", err);
return;
}
// Fetch the current resource limits
let mut rlim = rlimit{rlim_cur: 0, rlim_max: 0};
if getrlimit(RLIMIT_NOFILE, to_mut_unsafe_ptr(&mut rlim))!= 0 {
let err = last_os_error();
error2!("raise_fd_limit: error calling getrlimit: {}", err);
return;
}
// Bump the soft limit to the smaller of kern.maxfilesperproc and the hard limit
rlim.rlim_cur = ::cmp::min(maxfiles as rlim_t, rlim.rlim_max);
// Set our newly-increased resource limit
if setrlimit(RLIMIT_NOFILE, to_unsafe_ptr(&rlim))!= 0 {
let err = last_os_error();
error2!("raise_fd_limit: error calling setrlimit: {}", err);
return;
}
}
}
#[cfg(not(target_os="macos"))]
mod darwin_fd_limit {
pub unsafe fn raise_fd_limit()
|
}
#[doc(hidden)]
pub fn prepare_for_lots_of_tests() {
// Bump the fd limit on OS X. See darwin_fd_limit for an explanation.
unsafe { darwin_fd_limit::raise_fd_limit() }
}
/// Create more than one scheduler and run a function in a task
/// in one of the schedulers. The schedulers will stay alive
/// until the function `f` returns.
pub fn run_in_mt_newsched_task(f: ~fn()) {
use os;
use from_str::FromStr;
use rt::sched::Shutdown;
use rt::util;
// see comment in other function (raising fd limits)
prepare_for_lots_of_tests();
let f = Cell::new(f);
do run_in_bare_thread {
let nthreads = match os::getenv("RUST_RT_TEST_THREADS") {
Some(nstr) => FromStr::from_str(nstr).unwrap(),
None => {
if util::limit_thread_creation_due_to_osx_and_valgrind() {
1
} else {
// Using more threads than cores in test code
// to force the OS to preempt them frequently.
// Assuming that this help stress test concurrent types.
util::num_cpus() * 2
}
}
};
let sleepers = SleeperList::new();
let mut handles = ~[];
let mut scheds = ~[];
let mut work_queues = ~[];
for _ in range(0u, nthreads) {
let work_queue = WorkQueue::new();
work_queues.push(work_queue);
}
for i in range(0u, nthreads) {
let loop_ = ~UvEventLoop::new();
let mut sched = ~Scheduler::new(loop_,
work_queues[i].clone(),
work_queues.clone(),
sleepers.clone());
let handle = sched.make_handle();
handles.push(handle);
scheds.push(sched);
}
let handles = Cell::new(handles);
let on_exit: ~fn(bool) = |exit_status| {
let mut handles = handles.take();
// Tell schedulers to exit
for handle in handles.mut_iter() {
handle.send(Shutdown);
}
rtassert!(exit_status);
};
let mut main_task = ~Task::new_root(&mut scheds[0].stack_pool, None, f.take());
main_task.death.on_exit = Some(on_exit);
let mut threads = ~[];
let main_task = Cell::new(main_task);
let main_thread = {
let sched = scheds.pop();
let sched_cell = Cell::new(sched);
do Thread::start {
let sched = sched_cell.take();
sched.bootstrap(main_task.take());
}
};
threads.push(main_thread);
while!scheds.is_empty() {
let mut sched = scheds.pop();
let bootstrap_task = ~do Task::new_root(&mut sched.stack_pool, None) || {
rtdebug!("bootstrapping non-primary scheduler");
};
let bootstrap_task_cell = Cell::new(bootstrap_task);
let sched_cell = Cell::new(sched);
let thread = do Thread::start {
let sched = sched_cell.take();
sched.bootstrap(bootstrap_task_cell.take());
};
threads.push(thread);
}
// Wait for schedulers
for thread in threads.move_iter() {
thread.join();
}
}
}
/// Test tasks will abort on failure instead of unwinding
pub fn spawntask(f: ~fn()) {
Scheduler::run_task(Task::build_child(None, f));
}
/// Create a new task and run it right now. Aborts on failure
pub fn spawntask_later(f: ~fn()) {
Scheduler::run_task_later(Task::build_child(None, f));
}
pub fn spawntask_random(f: ~fn()) {
use rand::{Rand, rng};
let mut rng = rng();
let run_now: bool = Rand::rand(&mut rng);
if run_now {
spawntask(f)
} else {
spawntask_later(f)
}
}
pub fn spawntask_try(f: ~fn()) -> Result<(),()> {
let (port, chan) = oneshot();
let chan = Cell::new(chan);
let on_exit: ~fn(bool) = |exit_status| chan.take().send(exit_status);
let mut new_task = Task::build_root(None, f);
new_task.death.on_exit = Some(on_exit);
Scheduler::run_task(new_task);
let exit_status = port.recv();
if exit_status { Ok(()) } else { Err(()) }
}
/// Spawn a new task in a new scheduler and return a thread handle.
pub fn spawntask_thread(f: ~fn()) -> Thread {
let f = Cell::new(f);
let thread = do Thread::start {
run_in_newsched_task_core(f.take());
};
return thread;
}
/// Get a ~Task for testing purposes other than actually scheduling it.
pub fn with_test_task(blk: ~fn(~Task) -> ~Task) {
do run_in_bare_thread {
let mut sched = ~new_test_uv_sched();
let task = blk(~Task::new_root(&mut sched.stack_pool, None, ||{}));
cleanup_task(task);
}
}
/// Use to cleanup tasks created for testing but not "run".
pub fn cleanup_task(mut task: ~Task) {
task.destroyed = true;
}
/// Get a port number, starting at 9600, for use in tests
#[fixed_stack_segment] #[inline(never)]
pub fn next_test_port() -> u16 {
unsafe {
return rust_dbg_next_port(base_port() as libc::uintptr_t) as u16;
}
extern {
fn rust_dbg_next_port(base: libc::uintptr_t) -> libc::uintptr_t;
}
}
/// Get a unique IPv4 localhost:port pair starting at 9600
pub fn next_test_ip4() -> SocketAddr {
SocketAddr { ip: Ipv4Addr(127, 0, 0, 1), port: next_test_port() }
}
/// Get a unique IPv6 localhost:port pair starting at 9600
pub fn next_test_ip6() -> SocketAddr {
SocketAddr { ip: Ipv6Addr(0, 0, 0, 0, 0, 0, 0, 1), port: next_test_port() }
}
/*
XXX: Welcome to MegaHack City.
The bots run multiple builds at the same time, and these builds
all want to use ports. This function figures out which workspace
it is running in and assigns a port range based on it.
*/
fn base_port() -> uint {
use os;
use str::StrSlice;
use vec::ImmutableVector;
let base = 9600u;
let range = 1000;
let bases = [
("32-opt", base + range * 1),
("32-noopt", base + range * 2),
("64-opt", base + range * 3),
("64-noopt", base + range * 4),
("64-opt-vg", base + range * 5),
("all-opt", base + range * 6),
("snap3", base + range * 7),
("dist", base + range * 8)
];
// FIXME (#9639): This needs to handle non-utf8 paths
let path = os::getcwd();
let path_s = path.as_str().unwrap();
let mut final_base = base;
for &(dir, base) in bases.iter() {
if path_s.contains(dir) {
final_base = base;
break;
}
}
return final_base;
}
/// Get a constant that represents the number of times to repeat
/// stress tests. Default 1.
pub fn stress_factor() -> uint {
use os::getenv;
use from_str::from_str;
match getenv("RUST_RT_STRESS") {
Some(val) => from_str::<uint>(val).unwrap(),
None => 1
}
}
|
{}
|
identifier_body
|
test.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use libc;
use option::{Some, None};
use cell::Cell;
use clone::Clone;
use container::Container;
use iter::{Iterator, range};
use super::io::net::ip::{SocketAddr, Ipv4Addr, Ipv6Addr};
use vec::{OwnedVector, MutableVector, ImmutableVector};
use path::GenericPath;
use rt::sched::Scheduler;
use unstable::{run_in_bare_thread};
use rt::thread::Thread;
use rt::task::Task;
use rt::uv::uvio::UvEventLoop;
use rt::work_queue::WorkQueue;
use rt::sleeper_list::SleeperList;
use rt::comm::oneshot;
use result::{Result, Ok, Err};
pub fn new_test_uv_sched() -> Scheduler {
let queue = WorkQueue::new();
let queues = ~[queue.clone()];
let mut sched = Scheduler::new(~UvEventLoop::new(),
queue,
queues,
SleeperList::new());
// Don't wait for the Shutdown message
sched.no_sleep = true;
return sched;
}
pub fn run_in_newsched_task(f: ~fn()) {
let f = Cell::new(f);
do run_in_bare_thread {
run_in_newsched_task_core(f.take());
}
}
pub fn run_in_newsched_task_core(f: ~fn()) {
use rt::sched::Shutdown;
let mut sched = ~new_test_uv_sched();
let exit_handle = Cell::new(sched.make_handle());
let on_exit: ~fn(bool) = |exit_status| {
exit_handle.take().send(Shutdown);
rtassert!(exit_status);
};
let mut task = ~Task::new_root(&mut sched.stack_pool, None, f);
task.death.on_exit = Some(on_exit);
sched.bootstrap(task);
}
#[cfg(target_os="macos")]
#[allow(non_camel_case_types)]
mod darwin_fd_limit {
/*!
* darwin_fd_limit exists to work around an issue where launchctl on Mac OS X defaults the
* rlimit maxfiles to 256/unlimited. The default soft limit of 256 ends up being far too low
* for our multithreaded scheduler testing, depending on the number of cores available.
*
* This fixes issue #7772.
*/
use libc;
type rlim_t = libc::uint64_t;
struct rlimit {
rlim_cur: rlim_t,
rlim_max: rlim_t
}
#[nolink]
extern {
// name probably doesn't need to be mut, but the C function doesn't specify const
fn sysctl(name: *mut libc::c_int, namelen: libc::c_uint,
oldp: *mut libc::c_void, oldlenp: *mut libc::size_t,
newp: *mut libc::c_void, newlen: libc::size_t) -> libc::c_int;
fn getrlimit(resource: libc::c_int, rlp: *mut rlimit) -> libc::c_int;
fn setrlimit(resource: libc::c_int, rlp: *rlimit) -> libc::c_int;
}
static CTL_KERN: libc::c_int = 1;
static KERN_MAXFILESPERPROC: libc::c_int = 29;
static RLIMIT_NOFILE: libc::c_int = 8;
pub unsafe fn raise_fd_limit() {
#[fixed_stack_segment]; #[inline(never)];
// The strategy here is to fetch the current resource limits, read the kern.maxfilesperproc
// sysctl value, and bump the soft resource limit for maxfiles up to the sysctl value.
use ptr::{to_unsafe_ptr, to_mut_unsafe_ptr, mut_null};
use sys::size_of_val;
use os::last_os_error;
// Fetch the kern.maxfilesperproc value
let mut mib: [libc::c_int,..2] = [CTL_KERN, KERN_MAXFILESPERPROC];
let mut maxfiles: libc::c_int = 0;
let mut size: libc::size_t = size_of_val(&maxfiles) as libc::size_t;
if sysctl(to_mut_unsafe_ptr(&mut mib[0]), 2,
to_mut_unsafe_ptr(&mut maxfiles) as *mut libc::c_void,
to_mut_unsafe_ptr(&mut size),
mut_null(), 0)!= 0 {
let err = last_os_error();
error2!("raise_fd_limit: error calling sysctl: {}", err);
return;
}
// Fetch the current resource limits
let mut rlim = rlimit{rlim_cur: 0, rlim_max: 0};
if getrlimit(RLIMIT_NOFILE, to_mut_unsafe_ptr(&mut rlim))!= 0 {
let err = last_os_error();
error2!("raise_fd_limit: error calling getrlimit: {}", err);
return;
}
// Bump the soft limit to the smaller of kern.maxfilesperproc and the hard limit
rlim.rlim_cur = ::cmp::min(maxfiles as rlim_t, rlim.rlim_max);
// Set our newly-increased resource limit
if setrlimit(RLIMIT_NOFILE, to_unsafe_ptr(&rlim))!= 0 {
let err = last_os_error();
error2!("raise_fd_limit: error calling setrlimit: {}", err);
return;
}
}
}
#[cfg(not(target_os="macos"))]
mod darwin_fd_limit {
pub unsafe fn raise_fd_limit() {}
}
#[doc(hidden)]
pub fn prepare_for_lots_of_tests() {
// Bump the fd limit on OS X. See darwin_fd_limit for an explanation.
unsafe { darwin_fd_limit::raise_fd_limit() }
}
/// Create more than one scheduler and run a function in a task
/// in one of the schedulers. The schedulers will stay alive
/// until the function `f` returns.
pub fn run_in_mt_newsched_task(f: ~fn()) {
use os;
use from_str::FromStr;
use rt::sched::Shutdown;
use rt::util;
// see comment in other function (raising fd limits)
prepare_for_lots_of_tests();
let f = Cell::new(f);
do run_in_bare_thread {
let nthreads = match os::getenv("RUST_RT_TEST_THREADS") {
Some(nstr) => FromStr::from_str(nstr).unwrap(),
None => {
if util::limit_thread_creation_due_to_osx_and_valgrind() {
1
} else
|
}
};
let sleepers = SleeperList::new();
let mut handles = ~[];
let mut scheds = ~[];
let mut work_queues = ~[];
for _ in range(0u, nthreads) {
let work_queue = WorkQueue::new();
work_queues.push(work_queue);
}
for i in range(0u, nthreads) {
let loop_ = ~UvEventLoop::new();
let mut sched = ~Scheduler::new(loop_,
work_queues[i].clone(),
work_queues.clone(),
sleepers.clone());
let handle = sched.make_handle();
handles.push(handle);
scheds.push(sched);
}
let handles = Cell::new(handles);
let on_exit: ~fn(bool) = |exit_status| {
let mut handles = handles.take();
// Tell schedulers to exit
for handle in handles.mut_iter() {
handle.send(Shutdown);
}
rtassert!(exit_status);
};
let mut main_task = ~Task::new_root(&mut scheds[0].stack_pool, None, f.take());
main_task.death.on_exit = Some(on_exit);
let mut threads = ~[];
let main_task = Cell::new(main_task);
let main_thread = {
let sched = scheds.pop();
let sched_cell = Cell::new(sched);
do Thread::start {
let sched = sched_cell.take();
sched.bootstrap(main_task.take());
}
};
threads.push(main_thread);
while!scheds.is_empty() {
let mut sched = scheds.pop();
let bootstrap_task = ~do Task::new_root(&mut sched.stack_pool, None) || {
rtdebug!("bootstrapping non-primary scheduler");
};
let bootstrap_task_cell = Cell::new(bootstrap_task);
let sched_cell = Cell::new(sched);
let thread = do Thread::start {
let sched = sched_cell.take();
sched.bootstrap(bootstrap_task_cell.take());
};
threads.push(thread);
}
// Wait for schedulers
for thread in threads.move_iter() {
thread.join();
}
}
}
/// Test tasks will abort on failure instead of unwinding
pub fn spawntask(f: ~fn()) {
Scheduler::run_task(Task::build_child(None, f));
}
/// Create a new task and run it right now. Aborts on failure
pub fn spawntask_later(f: ~fn()) {
Scheduler::run_task_later(Task::build_child(None, f));
}
pub fn spawntask_random(f: ~fn()) {
use rand::{Rand, rng};
let mut rng = rng();
let run_now: bool = Rand::rand(&mut rng);
if run_now {
spawntask(f)
} else {
spawntask_later(f)
}
}
pub fn spawntask_try(f: ~fn()) -> Result<(),()> {
let (port, chan) = oneshot();
let chan = Cell::new(chan);
let on_exit: ~fn(bool) = |exit_status| chan.take().send(exit_status);
let mut new_task = Task::build_root(None, f);
new_task.death.on_exit = Some(on_exit);
Scheduler::run_task(new_task);
let exit_status = port.recv();
if exit_status { Ok(()) } else { Err(()) }
}
/// Spawn a new task in a new scheduler and return a thread handle.
pub fn spawntask_thread(f: ~fn()) -> Thread {
let f = Cell::new(f);
let thread = do Thread::start {
run_in_newsched_task_core(f.take());
};
return thread;
}
/// Get a ~Task for testing purposes other than actually scheduling it.
pub fn with_test_task(blk: ~fn(~Task) -> ~Task) {
do run_in_bare_thread {
let mut sched = ~new_test_uv_sched();
let task = blk(~Task::new_root(&mut sched.stack_pool, None, ||{}));
cleanup_task(task);
}
}
/// Use to cleanup tasks created for testing but not "run".
pub fn cleanup_task(mut task: ~Task) {
task.destroyed = true;
}
/// Get a port number, starting at 9600, for use in tests
#[fixed_stack_segment] #[inline(never)]
pub fn next_test_port() -> u16 {
unsafe {
return rust_dbg_next_port(base_port() as libc::uintptr_t) as u16;
}
extern {
fn rust_dbg_next_port(base: libc::uintptr_t) -> libc::uintptr_t;
}
}
/// Get a unique IPv4 localhost:port pair starting at 9600
pub fn next_test_ip4() -> SocketAddr {
SocketAddr { ip: Ipv4Addr(127, 0, 0, 1), port: next_test_port() }
}
/// Get a unique IPv6 localhost:port pair starting at 9600
pub fn next_test_ip6() -> SocketAddr {
SocketAddr { ip: Ipv6Addr(0, 0, 0, 0, 0, 0, 0, 1), port: next_test_port() }
}
/*
XXX: Welcome to MegaHack City.
The bots run multiple builds at the same time, and these builds
all want to use ports. This function figures out which workspace
it is running in and assigns a port range based on it.
*/
fn base_port() -> uint {
use os;
use str::StrSlice;
use vec::ImmutableVector;
let base = 9600u;
let range = 1000;
let bases = [
("32-opt", base + range * 1),
("32-noopt", base + range * 2),
("64-opt", base + range * 3),
("64-noopt", base + range * 4),
("64-opt-vg", base + range * 5),
("all-opt", base + range * 6),
("snap3", base + range * 7),
("dist", base + range * 8)
];
// FIXME (#9639): This needs to handle non-utf8 paths
let path = os::getcwd();
let path_s = path.as_str().unwrap();
let mut final_base = base;
for &(dir, base) in bases.iter() {
if path_s.contains(dir) {
final_base = base;
break;
}
}
return final_base;
}
/// Get a constant that represents the number of times to repeat
/// stress tests. Default 1.
pub fn stress_factor() -> uint {
use os::getenv;
use from_str::from_str;
match getenv("RUST_RT_STRESS") {
Some(val) => from_str::<uint>(val).unwrap(),
None => 1
}
}
|
{
// Using more threads than cores in test code
// to force the OS to preempt them frequently.
// Assuming that this help stress test concurrent types.
util::num_cpus() * 2
}
|
conditional_block
|
test.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use libc;
use option::{Some, None};
use cell::Cell;
use clone::Clone;
use container::Container;
use iter::{Iterator, range};
use super::io::net::ip::{SocketAddr, Ipv4Addr, Ipv6Addr};
use vec::{OwnedVector, MutableVector, ImmutableVector};
use path::GenericPath;
use rt::sched::Scheduler;
use unstable::{run_in_bare_thread};
use rt::thread::Thread;
use rt::task::Task;
use rt::uv::uvio::UvEventLoop;
use rt::work_queue::WorkQueue;
use rt::sleeper_list::SleeperList;
use rt::comm::oneshot;
use result::{Result, Ok, Err};
pub fn new_test_uv_sched() -> Scheduler {
let queue = WorkQueue::new();
let queues = ~[queue.clone()];
let mut sched = Scheduler::new(~UvEventLoop::new(),
queue,
queues,
SleeperList::new());
// Don't wait for the Shutdown message
sched.no_sleep = true;
return sched;
}
pub fn run_in_newsched_task(f: ~fn()) {
let f = Cell::new(f);
do run_in_bare_thread {
run_in_newsched_task_core(f.take());
}
}
pub fn run_in_newsched_task_core(f: ~fn()) {
use rt::sched::Shutdown;
let mut sched = ~new_test_uv_sched();
let exit_handle = Cell::new(sched.make_handle());
let on_exit: ~fn(bool) = |exit_status| {
exit_handle.take().send(Shutdown);
rtassert!(exit_status);
};
let mut task = ~Task::new_root(&mut sched.stack_pool, None, f);
task.death.on_exit = Some(on_exit);
sched.bootstrap(task);
}
#[cfg(target_os="macos")]
#[allow(non_camel_case_types)]
mod darwin_fd_limit {
/*!
* darwin_fd_limit exists to work around an issue where launchctl on Mac OS X defaults the
* rlimit maxfiles to 256/unlimited. The default soft limit of 256 ends up being far too low
* for our multithreaded scheduler testing, depending on the number of cores available.
*
* This fixes issue #7772.
*/
use libc;
type rlim_t = libc::uint64_t;
struct rlimit {
rlim_cur: rlim_t,
rlim_max: rlim_t
}
#[nolink]
extern {
// name probably doesn't need to be mut, but the C function doesn't specify const
fn sysctl(name: *mut libc::c_int, namelen: libc::c_uint,
oldp: *mut libc::c_void, oldlenp: *mut libc::size_t,
newp: *mut libc::c_void, newlen: libc::size_t) -> libc::c_int;
fn getrlimit(resource: libc::c_int, rlp: *mut rlimit) -> libc::c_int;
fn setrlimit(resource: libc::c_int, rlp: *rlimit) -> libc::c_int;
}
static CTL_KERN: libc::c_int = 1;
static KERN_MAXFILESPERPROC: libc::c_int = 29;
static RLIMIT_NOFILE: libc::c_int = 8;
pub unsafe fn raise_fd_limit() {
#[fixed_stack_segment]; #[inline(never)];
// The strategy here is to fetch the current resource limits, read the kern.maxfilesperproc
// sysctl value, and bump the soft resource limit for maxfiles up to the sysctl value.
use ptr::{to_unsafe_ptr, to_mut_unsafe_ptr, mut_null};
use sys::size_of_val;
use os::last_os_error;
// Fetch the kern.maxfilesperproc value
let mut mib: [libc::c_int,..2] = [CTL_KERN, KERN_MAXFILESPERPROC];
let mut maxfiles: libc::c_int = 0;
let mut size: libc::size_t = size_of_val(&maxfiles) as libc::size_t;
if sysctl(to_mut_unsafe_ptr(&mut mib[0]), 2,
to_mut_unsafe_ptr(&mut maxfiles) as *mut libc::c_void,
to_mut_unsafe_ptr(&mut size),
mut_null(), 0)!= 0 {
let err = last_os_error();
error2!("raise_fd_limit: error calling sysctl: {}", err);
return;
}
// Fetch the current resource limits
let mut rlim = rlimit{rlim_cur: 0, rlim_max: 0};
if getrlimit(RLIMIT_NOFILE, to_mut_unsafe_ptr(&mut rlim))!= 0 {
let err = last_os_error();
error2!("raise_fd_limit: error calling getrlimit: {}", err);
return;
}
// Bump the soft limit to the smaller of kern.maxfilesperproc and the hard limit
rlim.rlim_cur = ::cmp::min(maxfiles as rlim_t, rlim.rlim_max);
// Set our newly-increased resource limit
if setrlimit(RLIMIT_NOFILE, to_unsafe_ptr(&rlim))!= 0 {
let err = last_os_error();
error2!("raise_fd_limit: error calling setrlimit: {}", err);
return;
}
}
}
#[cfg(not(target_os="macos"))]
mod darwin_fd_limit {
pub unsafe fn raise_fd_limit() {}
}
#[doc(hidden)]
pub fn prepare_for_lots_of_tests() {
// Bump the fd limit on OS X. See darwin_fd_limit for an explanation.
unsafe { darwin_fd_limit::raise_fd_limit() }
}
/// Create more than one scheduler and run a function in a task
/// in one of the schedulers. The schedulers will stay alive
/// until the function `f` returns.
pub fn run_in_mt_newsched_task(f: ~fn()) {
use os;
use from_str::FromStr;
use rt::sched::Shutdown;
use rt::util;
// see comment in other function (raising fd limits)
prepare_for_lots_of_tests();
let f = Cell::new(f);
do run_in_bare_thread {
let nthreads = match os::getenv("RUST_RT_TEST_THREADS") {
Some(nstr) => FromStr::from_str(nstr).unwrap(),
None => {
|
// to force the OS to preempt them frequently.
// Assuming that this help stress test concurrent types.
util::num_cpus() * 2
}
}
};
let sleepers = SleeperList::new();
let mut handles = ~[];
let mut scheds = ~[];
let mut work_queues = ~[];
for _ in range(0u, nthreads) {
let work_queue = WorkQueue::new();
work_queues.push(work_queue);
}
for i in range(0u, nthreads) {
let loop_ = ~UvEventLoop::new();
let mut sched = ~Scheduler::new(loop_,
work_queues[i].clone(),
work_queues.clone(),
sleepers.clone());
let handle = sched.make_handle();
handles.push(handle);
scheds.push(sched);
}
let handles = Cell::new(handles);
let on_exit: ~fn(bool) = |exit_status| {
let mut handles = handles.take();
// Tell schedulers to exit
for handle in handles.mut_iter() {
handle.send(Shutdown);
}
rtassert!(exit_status);
};
let mut main_task = ~Task::new_root(&mut scheds[0].stack_pool, None, f.take());
main_task.death.on_exit = Some(on_exit);
let mut threads = ~[];
let main_task = Cell::new(main_task);
let main_thread = {
let sched = scheds.pop();
let sched_cell = Cell::new(sched);
do Thread::start {
let sched = sched_cell.take();
sched.bootstrap(main_task.take());
}
};
threads.push(main_thread);
while!scheds.is_empty() {
let mut sched = scheds.pop();
let bootstrap_task = ~do Task::new_root(&mut sched.stack_pool, None) || {
rtdebug!("bootstrapping non-primary scheduler");
};
let bootstrap_task_cell = Cell::new(bootstrap_task);
let sched_cell = Cell::new(sched);
let thread = do Thread::start {
let sched = sched_cell.take();
sched.bootstrap(bootstrap_task_cell.take());
};
threads.push(thread);
}
// Wait for schedulers
for thread in threads.move_iter() {
thread.join();
}
}
}
/// Test tasks will abort on failure instead of unwinding
pub fn spawntask(f: ~fn()) {
Scheduler::run_task(Task::build_child(None, f));
}
/// Create a new task and run it right now. Aborts on failure
pub fn spawntask_later(f: ~fn()) {
Scheduler::run_task_later(Task::build_child(None, f));
}
pub fn spawntask_random(f: ~fn()) {
use rand::{Rand, rng};
let mut rng = rng();
let run_now: bool = Rand::rand(&mut rng);
if run_now {
spawntask(f)
} else {
spawntask_later(f)
}
}
pub fn spawntask_try(f: ~fn()) -> Result<(),()> {
let (port, chan) = oneshot();
let chan = Cell::new(chan);
let on_exit: ~fn(bool) = |exit_status| chan.take().send(exit_status);
let mut new_task = Task::build_root(None, f);
new_task.death.on_exit = Some(on_exit);
Scheduler::run_task(new_task);
let exit_status = port.recv();
if exit_status { Ok(()) } else { Err(()) }
}
/// Spawn a new task in a new scheduler and return a thread handle.
pub fn spawntask_thread(f: ~fn()) -> Thread {
let f = Cell::new(f);
let thread = do Thread::start {
run_in_newsched_task_core(f.take());
};
return thread;
}
/// Get a ~Task for testing purposes other than actually scheduling it.
pub fn with_test_task(blk: ~fn(~Task) -> ~Task) {
do run_in_bare_thread {
let mut sched = ~new_test_uv_sched();
let task = blk(~Task::new_root(&mut sched.stack_pool, None, ||{}));
cleanup_task(task);
}
}
/// Use to cleanup tasks created for testing but not "run".
pub fn cleanup_task(mut task: ~Task) {
task.destroyed = true;
}
/// Get a port number, starting at 9600, for use in tests
#[fixed_stack_segment] #[inline(never)]
pub fn next_test_port() -> u16 {
unsafe {
return rust_dbg_next_port(base_port() as libc::uintptr_t) as u16;
}
extern {
fn rust_dbg_next_port(base: libc::uintptr_t) -> libc::uintptr_t;
}
}
/// Get a unique IPv4 localhost:port pair starting at 9600
pub fn next_test_ip4() -> SocketAddr {
SocketAddr { ip: Ipv4Addr(127, 0, 0, 1), port: next_test_port() }
}
/// Get a unique IPv6 localhost:port pair starting at 9600
pub fn next_test_ip6() -> SocketAddr {
SocketAddr { ip: Ipv6Addr(0, 0, 0, 0, 0, 0, 0, 1), port: next_test_port() }
}
/*
XXX: Welcome to MegaHack City.
The bots run multiple builds at the same time, and these builds
all want to use ports. This function figures out which workspace
it is running in and assigns a port range based on it.
*/
fn base_port() -> uint {
use os;
use str::StrSlice;
use vec::ImmutableVector;
let base = 9600u;
let range = 1000;
let bases = [
("32-opt", base + range * 1),
("32-noopt", base + range * 2),
("64-opt", base + range * 3),
("64-noopt", base + range * 4),
("64-opt-vg", base + range * 5),
("all-opt", base + range * 6),
("snap3", base + range * 7),
("dist", base + range * 8)
];
// FIXME (#9639): This needs to handle non-utf8 paths
let path = os::getcwd();
let path_s = path.as_str().unwrap();
let mut final_base = base;
for &(dir, base) in bases.iter() {
if path_s.contains(dir) {
final_base = base;
break;
}
}
return final_base;
}
/// Get a constant that represents the number of times to repeat
/// stress tests. Default 1.
pub fn stress_factor() -> uint {
use os::getenv;
use from_str::from_str;
match getenv("RUST_RT_STRESS") {
Some(val) => from_str::<uint>(val).unwrap(),
None => 1
}
}
|
if util::limit_thread_creation_due_to_osx_and_valgrind() {
1
} else {
// Using more threads than cores in test code
|
random_line_split
|
trace.rs
|
//! The Trace trait must be implemented by every type that can be GC managed.
use heap::TraceStack;
/// Trace trait. Every type that can be managed by the GC must implement this trait.
/// This trait is unsafe in that incorrectly implementing it can cause Undefined Behavior.
pub unsafe trait Trace {
/// If the type can contain GC managed pointers, this must return true
fn traversible(&self) -> bool
|
/// If the type can contain GC managed pointers, this must visit each pointer.
///
/// This function must be thread-safe!
///
/// It must read a snapshot of the data structure it is implemented for.
unsafe fn trace(&self, _stack: &mut TraceStack) {}
}
unsafe impl Trace for usize {}
unsafe impl Trace for isize {}
unsafe impl Trace for i8 {}
unsafe impl Trace for u8 {}
unsafe impl Trace for i16 {}
unsafe impl Trace for u16 {}
unsafe impl Trace for i32 {}
unsafe impl Trace for u32 {}
unsafe impl Trace for i64 {}
unsafe impl Trace for u64 {}
unsafe impl Trace for f32 {}
unsafe impl Trace for f64 {}
unsafe impl<'a> Trace for &'a str {}
unsafe impl Trace for String {}
|
{
false
}
|
identifier_body
|
trace.rs
|
//! The Trace trait must be implemented by every type that can be GC managed.
use heap::TraceStack;
/// Trace trait. Every type that can be managed by the GC must implement this trait.
/// This trait is unsafe in that incorrectly implementing it can cause Undefined Behavior.
pub unsafe trait Trace {
|
/// If the type can contain GC managed pointers, this must visit each pointer.
///
/// This function must be thread-safe!
///
/// It must read a snapshot of the data structure it is implemented for.
unsafe fn trace(&self, _stack: &mut TraceStack) {}
}
unsafe impl Trace for usize {}
unsafe impl Trace for isize {}
unsafe impl Trace for i8 {}
unsafe impl Trace for u8 {}
unsafe impl Trace for i16 {}
unsafe impl Trace for u16 {}
unsafe impl Trace for i32 {}
unsafe impl Trace for u32 {}
unsafe impl Trace for i64 {}
unsafe impl Trace for u64 {}
unsafe impl Trace for f32 {}
unsafe impl Trace for f64 {}
unsafe impl<'a> Trace for &'a str {}
unsafe impl Trace for String {}
|
/// If the type can contain GC managed pointers, this must return true
fn traversible(&self) -> bool {
false
}
|
random_line_split
|
trace.rs
|
//! The Trace trait must be implemented by every type that can be GC managed.
use heap::TraceStack;
/// Trace trait. Every type that can be managed by the GC must implement this trait.
/// This trait is unsafe in that incorrectly implementing it can cause Undefined Behavior.
pub unsafe trait Trace {
/// If the type can contain GC managed pointers, this must return true
fn traversible(&self) -> bool {
false
}
/// If the type can contain GC managed pointers, this must visit each pointer.
///
/// This function must be thread-safe!
///
/// It must read a snapshot of the data structure it is implemented for.
unsafe fn
|
(&self, _stack: &mut TraceStack) {}
}
unsafe impl Trace for usize {}
unsafe impl Trace for isize {}
unsafe impl Trace for i8 {}
unsafe impl Trace for u8 {}
unsafe impl Trace for i16 {}
unsafe impl Trace for u16 {}
unsafe impl Trace for i32 {}
unsafe impl Trace for u32 {}
unsafe impl Trace for i64 {}
unsafe impl Trace for u64 {}
unsafe impl Trace for f32 {}
unsafe impl Trace for f64 {}
unsafe impl<'a> Trace for &'a str {}
unsafe impl Trace for String {}
|
trace
|
identifier_name
|
fixed_interval.rs
|
use std::iter::Iterator;
use std::time::Duration;
/// A retry strategy driven by a fixed interval.
#[derive(Debug, Clone)]
pub struct FixedInterval {
duration: Duration,
}
impl FixedInterval {
/// Constructs a new fixed interval strategy.
pub fn new(duration: Duration) -> FixedInterval {
FixedInterval { duration: duration }
}
/// Constructs a new fixed interval strategy,
/// given a duration in milliseconds.
pub fn from_millis(millis: u64) -> FixedInterval {
FixedInterval {
duration: Duration::from_millis(millis),
}
}
}
impl Iterator for FixedInterval {
type Item = Duration;
fn next(&mut self) -> Option<Duration>
|
}
#[test]
fn returns_some_fixed() {
let mut s = FixedInterval::new(Duration::from_millis(123));
assert_eq!(s.next(), Some(Duration::from_millis(123)));
assert_eq!(s.next(), Some(Duration::from_millis(123)));
assert_eq!(s.next(), Some(Duration::from_millis(123)));
}
|
{
Some(self.duration)
}
|
identifier_body
|
fixed_interval.rs
|
use std::iter::Iterator;
use std::time::Duration;
/// A retry strategy driven by a fixed interval.
#[derive(Debug, Clone)]
pub struct FixedInterval {
duration: Duration,
}
impl FixedInterval {
/// Constructs a new fixed interval strategy.
pub fn
|
(duration: Duration) -> FixedInterval {
FixedInterval { duration: duration }
}
/// Constructs a new fixed interval strategy,
/// given a duration in milliseconds.
pub fn from_millis(millis: u64) -> FixedInterval {
FixedInterval {
duration: Duration::from_millis(millis),
}
}
}
impl Iterator for FixedInterval {
type Item = Duration;
fn next(&mut self) -> Option<Duration> {
Some(self.duration)
}
}
#[test]
fn returns_some_fixed() {
let mut s = FixedInterval::new(Duration::from_millis(123));
assert_eq!(s.next(), Some(Duration::from_millis(123)));
assert_eq!(s.next(), Some(Duration::from_millis(123)));
assert_eq!(s.next(), Some(Duration::from_millis(123)));
}
|
new
|
identifier_name
|
fixed_interval.rs
|
use std::iter::Iterator;
use std::time::Duration;
/// A retry strategy driven by a fixed interval.
#[derive(Debug, Clone)]
pub struct FixedInterval {
duration: Duration,
}
impl FixedInterval {
/// Constructs a new fixed interval strategy.
pub fn new(duration: Duration) -> FixedInterval {
FixedInterval { duration: duration }
}
/// Constructs a new fixed interval strategy,
/// given a duration in milliseconds.
pub fn from_millis(millis: u64) -> FixedInterval {
FixedInterval {
duration: Duration::from_millis(millis),
}
}
}
impl Iterator for FixedInterval {
type Item = Duration;
|
#[test]
fn returns_some_fixed() {
let mut s = FixedInterval::new(Duration::from_millis(123));
assert_eq!(s.next(), Some(Duration::from_millis(123)));
assert_eq!(s.next(), Some(Duration::from_millis(123)));
assert_eq!(s.next(), Some(Duration::from_millis(123)));
}
|
fn next(&mut self) -> Option<Duration> {
Some(self.duration)
}
}
|
random_line_split
|
traits.rs
|
// * This file is part of the uutils coreutils package.
// *
// * (c) 2020 Alex Lyon <[email protected]>
|
// * (c) 2020 nicoo <[email protected]>
// *
// * For the full copyright and license information, please view the LICENSE file
// * that was distributed with this source code.
pub(crate) use num_traits::{
identities::{One, Zero},
ops::overflowing::OverflowingAdd,
};
use num_traits::{
int::PrimInt,
ops::wrapping::{WrappingMul, WrappingNeg, WrappingSub},
};
use std::fmt::{Debug, Display};
pub(crate) trait Int:
Display + Debug + PrimInt + OverflowingAdd + WrappingNeg + WrappingSub + WrappingMul
{
fn as_u64(&self) -> u64;
fn from_u64(n: u64) -> Self;
#[cfg(debug_assertions)]
fn as_u128(&self) -> u128;
}
pub(crate) trait DoubleInt: Int {
/// An integer type with twice the width of `Self`.
/// In particular, multiplications (of `Int` values) can be performed in
/// `Self::DoubleWidth` without possibility of overflow.
type DoubleWidth: Int;
fn as_double_width(self) -> Self::DoubleWidth;
fn from_double_width(n: Self::DoubleWidth) -> Self;
}
macro_rules! int {
( $x:ty ) => {
impl Int for $x {
fn as_u64(&self) -> u64 {
*self as u64
}
fn from_u64(n: u64) -> Self {
n as _
}
#[cfg(debug_assertions)]
fn as_u128(&self) -> u128 {
*self as u128
}
}
};
}
macro_rules! double_int {
( $x:ty, $y:ty ) => {
int!($x);
impl DoubleInt for $x {
type DoubleWidth = $y;
fn as_double_width(self) -> $y {
self as _
}
fn from_double_width(n: $y) -> Self {
n as _
}
}
};
}
double_int!(u32, u64);
double_int!(u64, u128);
int!(u128);
/// Helper macro for instantiating tests over u32 and u64
#[cfg(test)]
#[macro_export]
macro_rules! parametrized_check {
( $f:ident ) => {
paste::item! {
#[test]
fn [< $f _ u32 >]() {
$f::<u32>()
}
#[test]
fn [< $f _ u64 >]() {
$f::<u64>()
}
}
};
}
|
random_line_split
|
|
graph.rs
|
format!("bb {} is not in precessors: {:?}", bb.index(), predecessors)
}
);
}
}
basic_blocks.push(bb);
let term = data.terminator();
match term.kind {
TerminatorKind::Return {.. }
| TerminatorKind::Abort
| TerminatorKind::Yield {.. }
| TerminatorKind::SwitchInt {.. } => {
// The `bb` has more than one _outgoing_ edge, or exits the function. Save the
// current sequence of `basic_blocks` gathered to this point, as a new
// `BasicCoverageBlockData`.
Self::add_basic_coverage_block(
&mut bcbs,
&mut bb_to_bcb,
basic_blocks.split_off(0),
);
debug!(" because term.kind = {:?}", term.kind);
// Note that this condition is based on `TerminatorKind`, even though it
// theoretically boils down to `successors().len()!= 1`; that is, either zero
// (e.g., `Return`, `Abort`) or multiple successors (e.g., `SwitchInt`), but
// since the BCB CFG ignores things like unwind branches (which exist in the
// `Terminator`s `successors()` list) checking the number of successors won't
// work.
}
// The following `TerminatorKind`s are either not expected outside an unwind branch,
// or they should not (under normal circumstances) branch. Coverage graphs are
// simplified by assuring coverage results are accurate for program executions that
// don't panic.
//
// Programs that panic and unwind may record slightly inaccurate coverage results
// for a coverage region containing the `Terminator` that began the panic. This
// is as intended. (See Issue #78544 for a possible future option to support
// coverage in test programs that panic.)
TerminatorKind::Goto {.. }
| TerminatorKind::Resume
| TerminatorKind::Unreachable
| TerminatorKind::Drop {.. }
| TerminatorKind::DropAndReplace {.. }
| TerminatorKind::Call {.. }
| TerminatorKind::GeneratorDrop
| TerminatorKind::Assert {.. }
| TerminatorKind::FalseEdge {.. }
| TerminatorKind::FalseUnwind {.. }
| TerminatorKind::InlineAsm {.. } => {}
}
}
if!basic_blocks.is_empty() {
// process any remaining basic_blocks into a final `BasicCoverageBlockData`
Self::add_basic_coverage_block(&mut bcbs, &mut bb_to_bcb, basic_blocks.split_off(0));
debug!(" because the end of the MIR CFG was reached while traversing");
}
(bcbs, bb_to_bcb)
}
fn add_basic_coverage_block(
bcbs: &mut IndexVec<BasicCoverageBlock, BasicCoverageBlockData>,
bb_to_bcb: &mut IndexVec<BasicBlock, Option<BasicCoverageBlock>>,
basic_blocks: Vec<BasicBlock>,
) {
let bcb = BasicCoverageBlock::from_usize(bcbs.len());
for &bb in basic_blocks.iter() {
bb_to_bcb[bb] = Some(bcb);
}
let bcb_data = BasicCoverageBlockData::from(basic_blocks);
debug!("adding bcb{}: {:?}", bcb.index(), bcb_data);
bcbs.push(bcb_data);
}
#[inline(always)]
pub fn iter_enumerated(
&self,
) -> impl Iterator<Item = (BasicCoverageBlock, &BasicCoverageBlockData)> {
self.bcbs.iter_enumerated()
}
#[inline(always)]
pub fn iter_enumerated_mut(
&mut self,
) -> impl Iterator<Item = (BasicCoverageBlock, &mut BasicCoverageBlockData)> {
self.bcbs.iter_enumerated_mut()
}
#[inline(always)]
pub fn bcb_from_bb(&self, bb: BasicBlock) -> Option<BasicCoverageBlock> {
if bb.index() < self.bb_to_bcb.len() { self.bb_to_bcb[bb] } else { None }
}
#[inline(always)]
pub fn is_dominated_by(&self, node: BasicCoverageBlock, dom: BasicCoverageBlock) -> bool {
self.dominators.as_ref().unwrap().is_dominated_by(node, dom)
}
#[inline(always)]
pub fn dominators(&self) -> &Dominators<BasicCoverageBlock> {
self.dominators.as_ref().unwrap()
}
}
impl Index<BasicCoverageBlock> for CoverageGraph {
type Output = BasicCoverageBlockData;
#[inline]
fn index(&self, index: BasicCoverageBlock) -> &BasicCoverageBlockData {
&self.bcbs[index]
}
}
impl IndexMut<BasicCoverageBlock> for CoverageGraph {
#[inline]
fn index_mut(&mut self, index: BasicCoverageBlock) -> &mut BasicCoverageBlockData {
&mut self.bcbs[index]
}
}
impl graph::DirectedGraph for CoverageGraph {
type Node = BasicCoverageBlock;
}
impl graph::WithNumNodes for CoverageGraph {
#[inline]
fn num_nodes(&self) -> usize {
self.bcbs.len()
}
}
impl graph::WithStartNode for CoverageGraph {
#[inline]
fn start_node(&self) -> Self::Node {
self.bcb_from_bb(mir::START_BLOCK)
.expect("mir::START_BLOCK should be in a BasicCoverageBlock")
}
}
type BcbSuccessors<'graph> = std::slice::Iter<'graph, BasicCoverageBlock>;
impl<'graph> graph::GraphSuccessors<'graph> for CoverageGraph {
type Item = BasicCoverageBlock;
type Iter = std::iter::Cloned<BcbSuccessors<'graph>>;
}
impl graph::WithSuccessors for CoverageGraph {
#[inline]
fn successors(&self, node: Self::Node) -> <Self as GraphSuccessors<'_>>::Iter {
self.successors[node].iter().cloned()
}
}
impl graph::GraphPredecessors<'graph> for CoverageGraph {
type Item = BasicCoverageBlock;
type Iter = std::iter::Copied<std::slice::Iter<'graph, BasicCoverageBlock>>;
}
impl graph::WithPredecessors for CoverageGraph {
#[inline]
fn predecessors(&self, node: Self::Node) -> <Self as graph::GraphPredecessors<'_>>::Iter {
self.predecessors[node].iter().copied()
}
}
rustc_index::newtype_index! {
/// A node in the [control-flow graph][CFG] of CoverageGraph.
pub(super) struct BasicCoverageBlock {
DEBUG_FORMAT = "bcb{}",
const START_BCB = 0,
}
}
/// `BasicCoverageBlockData` holds the data indexed by a `BasicCoverageBlock`.
///
/// A `BasicCoverageBlock` (BCB) represents the maximal-length sequence of MIR `BasicBlock`s without
/// conditional branches, and form a new, simplified, coverage-specific Control Flow Graph, without
/// altering the original MIR CFG.
///
/// Note that running the MIR `SimplifyCfg` transform is not sufficient (and therefore not
/// necessary). The BCB-based CFG is a more aggressive simplification. For example:
///
/// * The BCB CFG ignores (trims) branches not relevant to coverage, such as unwind-related code,
/// that is injected by the Rust compiler but has no physical source code to count. This also
/// means a BasicBlock with a `Call` terminator can be merged into its primary successor target
/// block, in the same BCB. (But, note: Issue #78544: "MIR InstrumentCoverage: Improve coverage
/// of `#[should_panic]` tests and `catch_unwind()` handlers")
/// * Some BasicBlock terminators support Rust-specific concerns--like borrow-checking--that are
/// not relevant to coverage analysis. `FalseUnwind`, for example, can be treated the same as
/// a `Goto`, and merged with its successor into the same BCB.
///
/// Each BCB with at least one computed `CoverageSpan` will have no more than one `Counter`.
/// In some cases, a BCB's execution count can be computed by `Expression`. Additional
/// disjoint `CoverageSpan`s in a BCB can also be counted by `Expression` (by adding `ZERO`
/// to the BCB's primary counter or expression).
///
/// The BCB CFG is critical to simplifying the coverage analysis by ensuring graph path-based
/// queries (`is_dominated_by()`, `predecessors`, `successors`, etc.) have branch (control flow)
/// significance.
#[derive(Debug, Clone)]
pub(super) struct BasicCoverageBlockData {
pub basic_blocks: Vec<BasicBlock>,
pub counter_kind: Option<CoverageKind>,
edge_from_bcbs: Option<FxHashMap<BasicCoverageBlock, CoverageKind>>,
}
impl BasicCoverageBlockData {
pub fn from(basic_blocks: Vec<BasicBlock>) -> Self {
assert!(basic_blocks.len() > 0);
Self { basic_blocks, counter_kind: None, edge_from_bcbs: None }
}
#[inline(always)]
pub fn leader_bb(&self) -> BasicBlock {
self.basic_blocks[0]
}
#[inline(always)]
pub fn last_bb(&self) -> BasicBlock {
*self.basic_blocks.last().unwrap()
}
#[inline(always)]
pub fn terminator<'a, 'tcx>(&self, mir_body: &'a mir::Body<'tcx>) -> &'a Terminator<'tcx> {
&mir_body[self.last_bb()].terminator()
}
pub fn set_counter(
&mut self,
counter_kind: CoverageKind,
) -> Result<ExpressionOperandId, Error> {
debug_assert!(
// If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also
// have an expression (to be injected into an existing `BasicBlock` represented by this
// `BasicCoverageBlock`).
self.edge_from_bcbs.is_none() || counter_kind.is_expression(),
"attempt to add a `Counter` to a BCB target with existing incoming edge counters"
);
let operand = counter_kind.as_operand_id();
if let Some(replaced) = self.counter_kind.replace(counter_kind) {
Error::from_string(format!(
"attempt to set a BasicCoverageBlock coverage counter more than once; \
{:?} already had counter {:?}",
self, replaced,
))
} else {
Ok(operand)
}
}
#[inline(always)]
pub fn counter(&self) -> Option<&CoverageKind> {
self.counter_kind.as_ref()
}
#[inline(always)]
pub fn take_counter(&mut self) -> Option<CoverageKind> {
self.counter_kind.take()
}
pub fn set_edge_counter_from(
&mut self,
from_bcb: BasicCoverageBlock,
counter_kind: CoverageKind,
) -> Result<ExpressionOperandId, Error> {
if level_enabled!(tracing::Level::DEBUG) {
// If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also
// have an expression (to be injected into an existing `BasicBlock` represented by this
// `BasicCoverageBlock`).
if!self.counter_kind.as_ref().map_or(true, |c| c.is_expression()) {
return Error::from_string(format!(
"attempt to add an incoming edge counter from {:?} when the target BCB already \
has a `Counter`",
from_bcb
));
}
}
let operand = counter_kind.as_operand_id();
if let Some(replaced) =
self.edge_from_bcbs.get_or_insert_default().insert(from_bcb, counter_kind)
{
Error::from_string(format!(
"attempt to set an edge counter more than once; from_bcb: \
{:?} already had counter {:?}",
from_bcb, replaced,
))
} else {
Ok(operand)
}
}
#[inline]
pub fn edge_counter_from(&self, from_bcb: BasicCoverageBlock) -> Option<&CoverageKind> {
if let Some(edge_from_bcbs) = &self.edge_from_bcbs {
edge_from_bcbs.get(&from_bcb)
} else {
None
}
}
#[inline]
pub fn take_edge_counters(
&mut self,
) -> Option<impl Iterator<Item = (BasicCoverageBlock, CoverageKind)>> {
self.edge_from_bcbs.take().map_or(None, |m| Some(m.into_iter()))
}
pub fn id(&self) -> String {
format!(
"@{}",
self.basic_blocks
.iter()
.map(|bb| bb.index().to_string())
.collect::<Vec<_>>()
.join(ID_SEPARATOR)
)
}
}
/// Represents a successor from a branching BasicCoverageBlock (such as the arms of a `SwitchInt`)
/// as either the successor BCB itself, if it has only one incoming edge, or the successor _plus_
/// the specific branching BCB, representing the edge between the two. The latter case
/// distinguishes this incoming edge from other incoming edges to the same `target_bcb`.
#[derive(Clone, Copy, PartialEq, Eq)]
pub(super) struct BcbBranch {
pub edge_from_bcb: Option<BasicCoverageBlock>,
pub target_bcb: BasicCoverageBlock,
}
impl BcbBranch {
pub fn from_to(
from_bcb: BasicCoverageBlock,
to_bcb: BasicCoverageBlock,
basic_coverage_blocks: &CoverageGraph,
) -> Self {
let edge_from_bcb = if basic_coverage_blocks.predecessors[to_bcb].len() > 1 {
Some(from_bcb)
} else {
None
};
Self { edge_from_bcb, target_bcb: to_bcb }
}
pub fn counter<'a>(
&self,
basic_coverage_blocks: &'a CoverageGraph,
) -> Option<&'a CoverageKind> {
if let Some(from_bcb) = self.edge_from_bcb {
basic_coverage_blocks[self.target_bcb].edge_counter_from(from_bcb)
} else {
basic_coverage_blocks[self.target_bcb].counter()
}
}
pub fn is_only_path_to_target(&self) -> bool {
self.edge_from_bcb.is_none()
}
}
impl std::fmt::Debug for BcbBranch {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if let Some(from_bcb) = self.edge_from_bcb {
write!(fmt, "{:?}->{:?}", from_bcb, self.target_bcb)
} else {
write!(fmt, "{:?}", self.target_bcb)
}
}
}
// Returns the `Terminator`s non-unwind successors.
// FIXME(#78544): MIR InstrumentCoverage: Improve coverage of `#[should_panic]` tests and
// `catch_unwind()` handlers.
fn bcb_filtered_successors<'a, 'tcx>(
body: &'tcx &'a mir::Body<'tcx>,
term_kind: &'tcx TerminatorKind<'tcx>,
) -> Box<dyn Iterator<Item = &'a BasicBlock> + 'a> {
let mut successors = term_kind.successors();
Box::new(
match &term_kind {
// SwitchInt successors are never unwind, and all of them should be traversed.
TerminatorKind::SwitchInt {.. } => successors,
// For all other kinds, return only the first successor, if any, and ignore unwinds.
// NOTE: `chain(&[])` is required to coerce the `option::iter` (from
// `next().into_iter()`) into the `mir::Successors` aliased type.
_ => successors.next().into_iter().chain(&[]),
}
.filter(move |&&successor| {
body[successor].terminator().kind!= TerminatorKind::Unreachable
}),
)
}
/// Maintains separate worklists for each loop in the BasicCoverageBlock CFG, plus one for the
/// CoverageGraph outside all loops. This supports traversing the BCB CFG in a way that
/// ensures a loop is completely traversed before processing Blocks after the end of the loop.
#[derive(Debug)]
pub(super) struct TraversalContext {
/// From one or more backedges returning to a loop header.
pub loop_backedges: Option<(Vec<BasicCoverageBlock>, BasicCoverageBlock)>,
/// worklist, to be traversed, of CoverageGraph in the loop with the given loop
/// backedges, such that the loop is the inner inner-most loop containing these
/// CoverageGraph
pub worklist: Vec<BasicCoverageBlock>,
}
pub(super) struct TraverseCoverageGraphWithLoops {
pub backedges: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>,
pub context_stack: Vec<TraversalContext>,
visited: BitSet<BasicCoverageBlock>,
}
impl TraverseCoverageGraphWithLoops {
pub fn new(basic_coverage_blocks: &CoverageGraph) -> Self {
let start_bcb = basic_coverage_blocks.start_node();
let backedges = find_loop_backedges(basic_coverage_blocks);
let context_stack =
vec![TraversalContext { loop_backedges: None, worklist: vec![start_bcb] }];
// `context_stack` starts with a `TraversalContext` for the main function context (beginning
// with the `start` BasicCoverageBlock of the function). New worklists are pushed to the top
// of the stack as loops are entered, and popped off of the stack when a loop's worklist is
// exhausted.
let visited = BitSet::new_empty(basic_coverage_blocks.num_nodes());
Self { backedges, context_stack, visited }
}
pub fn next(&mut self, basic_coverage_blocks: &CoverageGraph) -> Option<BasicCoverageBlock> {
debug!(
"TraverseCoverageGraphWithLoops::next - context_stack: {:?}",
self.context_stack.iter().rev().collect::<Vec<_>>()
);
while let Some(next_bcb) = {
// Strip contexts with empty worklists from the top of the stack
while self.context_stack.last().map_or(false, |context| context.worklist.is_empty()) {
self.context_stack.pop();
}
// Pop the next bcb off of the current context_stack. If none, all BCBs were visited.
self.context_stack.last_mut().map_or(None, |context| context.worklist.pop())
} {
if!self.visited.insert(next_bcb) {
debug!("Already visited: {:?}", next_bcb);
continue;
}
debug!("Visiting {:?}", next_bcb);
if self.backedges[next_bcb].len() > 0 {
debug!("{:?} is a loop header! Start a new TraversalContext...", next_bcb);
self.context_stack.push(TraversalContext {
loop_backedges: Some((self.backedges[next_bcb].clone(), next_bcb)),
worklist: Vec::new(),
});
}
self.extend_worklist(basic_coverage_blocks, next_bcb);
return Some(next_bcb);
}
None
}
pub fn extend_worklist(
&mut self,
basic_coverage_blocks: &CoverageGraph,
bcb: BasicCoverageBlock,
) {
let successors = &basic_coverage_blocks.successors[bcb];
debug!("{:?} has {} successors:", bcb, successors.len());
for &successor in successors {
if successor == bcb {
debug!(
"{:?} has itself as its own successor. (Note, the compiled code will \
generate an infinite loop.)",
bcb
);
// Don't re-add this successor to the worklist. We are already processing it.
break;
}
for context in self.context_stack.iter_mut().rev() {
// Add successors of the current BCB to the appropriate context. Successors that
// stay within a loop are added to the BCBs context worklist. Successors that
// exit the loop (they are not dominated by the loop header) must be reachable
// from other BCBs outside the loop, and they will be added to a different
// worklist.
//
// Branching blocks (with more than one successor) must be processed before
// blocks with only one successor, to prevent unnecessarily complicating
// `Expression`s by creating a Counter in a `BasicCoverageBlock` that the
// branching block would have given an `Expression` (or vice versa).
let (some_successor_to_add, some_loop_header) =
if let Some((_, loop_header)) = context.loop_backedges {
if basic_coverage_blocks.is_dominated_by(successor, loop_header)
|
{
(Some(successor), Some(loop_header))
}
|
conditional_block
|
|
graph.rs
|
's `successors()`. Coverage spans must map to actual source code,
// so compiler generated blocks and paths can be ignored. To that end, the CFG traversal
// intentionally omits unwind paths.
// FIXME(#78544): MIR InstrumentCoverage: Improve coverage of `#[should_panic]` tests and
// `catch_unwind()` handlers.
let mir_cfg_without_unwind = ShortCircuitPreorder::new(&mir_body, bcb_filtered_successors);
let mut basic_blocks = Vec::new();
for (bb, data) in mir_cfg_without_unwind {
if let Some(last) = basic_blocks.last() {
let predecessors = &mir_body.predecessors()[bb];
if predecessors.len() > 1 ||!predecessors.contains(last) {
// The `bb` has more than one _incoming_ edge, and should start its own
// `BasicCoverageBlockData`. (Note, the `basic_blocks` vector does not yet
// include `bb`; it contains a sequence of one or more sequential basic_blocks
// with no intermediate branches in or out. Save these as a new
// `BasicCoverageBlockData` before starting the new one.)
Self::add_basic_coverage_block(
&mut bcbs,
&mut bb_to_bcb,
basic_blocks.split_off(0),
);
debug!(
" because {}",
if predecessors.len() > 1 {
"predecessors.len() > 1".to_owned()
} else {
format!("bb {} is not in precessors: {:?}", bb.index(), predecessors)
}
);
}
}
basic_blocks.push(bb);
let term = data.terminator();
match term.kind {
TerminatorKind::Return {.. }
| TerminatorKind::Abort
| TerminatorKind::Yield {.. }
| TerminatorKind::SwitchInt {.. } => {
// The `bb` has more than one _outgoing_ edge, or exits the function. Save the
// current sequence of `basic_blocks` gathered to this point, as a new
// `BasicCoverageBlockData`.
Self::add_basic_coverage_block(
&mut bcbs,
&mut bb_to_bcb,
basic_blocks.split_off(0),
);
debug!(" because term.kind = {:?}", term.kind);
// Note that this condition is based on `TerminatorKind`, even though it
// theoretically boils down to `successors().len()!= 1`; that is, either zero
// (e.g., `Return`, `Abort`) or multiple successors (e.g., `SwitchInt`), but
// since the BCB CFG ignores things like unwind branches (which exist in the
// `Terminator`s `successors()` list) checking the number of successors won't
// work.
}
// The following `TerminatorKind`s are either not expected outside an unwind branch,
// or they should not (under normal circumstances) branch. Coverage graphs are
// simplified by assuring coverage results are accurate for program executions that
// don't panic.
//
// Programs that panic and unwind may record slightly inaccurate coverage results
// for a coverage region containing the `Terminator` that began the panic. This
// is as intended. (See Issue #78544 for a possible future option to support
// coverage in test programs that panic.)
TerminatorKind::Goto {.. }
| TerminatorKind::Resume
| TerminatorKind::Unreachable
| TerminatorKind::Drop {.. }
| TerminatorKind::DropAndReplace {.. }
| TerminatorKind::Call {.. }
| TerminatorKind::GeneratorDrop
| TerminatorKind::Assert {.. }
| TerminatorKind::FalseEdge {.. }
| TerminatorKind::FalseUnwind {.. }
| TerminatorKind::InlineAsm {.. } => {}
}
}
if!basic_blocks.is_empty() {
// process any remaining basic_blocks into a final `BasicCoverageBlockData`
Self::add_basic_coverage_block(&mut bcbs, &mut bb_to_bcb, basic_blocks.split_off(0));
debug!(" because the end of the MIR CFG was reached while traversing");
}
(bcbs, bb_to_bcb)
}
fn add_basic_coverage_block(
bcbs: &mut IndexVec<BasicCoverageBlock, BasicCoverageBlockData>,
bb_to_bcb: &mut IndexVec<BasicBlock, Option<BasicCoverageBlock>>,
basic_blocks: Vec<BasicBlock>,
)
|
#[inline(always)]
pub fn iter_enumerated(
&self,
) -> impl Iterator<Item = (BasicCoverageBlock, &BasicCoverageBlockData)> {
self.bcbs.iter_enumerated()
}
#[inline(always)]
pub fn iter_enumerated_mut(
&mut self,
) -> impl Iterator<Item = (BasicCoverageBlock, &mut BasicCoverageBlockData)> {
self.bcbs.iter_enumerated_mut()
}
#[inline(always)]
pub fn bcb_from_bb(&self, bb: BasicBlock) -> Option<BasicCoverageBlock> {
if bb.index() < self.bb_to_bcb.len() { self.bb_to_bcb[bb] } else { None }
}
#[inline(always)]
pub fn is_dominated_by(&self, node: BasicCoverageBlock, dom: BasicCoverageBlock) -> bool {
self.dominators.as_ref().unwrap().is_dominated_by(node, dom)
}
#[inline(always)]
pub fn dominators(&self) -> &Dominators<BasicCoverageBlock> {
self.dominators.as_ref().unwrap()
}
}
impl Index<BasicCoverageBlock> for CoverageGraph {
type Output = BasicCoverageBlockData;
#[inline]
fn index(&self, index: BasicCoverageBlock) -> &BasicCoverageBlockData {
&self.bcbs[index]
}
}
impl IndexMut<BasicCoverageBlock> for CoverageGraph {
#[inline]
fn index_mut(&mut self, index: BasicCoverageBlock) -> &mut BasicCoverageBlockData {
&mut self.bcbs[index]
}
}
impl graph::DirectedGraph for CoverageGraph {
type Node = BasicCoverageBlock;
}
impl graph::WithNumNodes for CoverageGraph {
#[inline]
fn num_nodes(&self) -> usize {
self.bcbs.len()
}
}
impl graph::WithStartNode for CoverageGraph {
#[inline]
fn start_node(&self) -> Self::Node {
self.bcb_from_bb(mir::START_BLOCK)
.expect("mir::START_BLOCK should be in a BasicCoverageBlock")
}
}
type BcbSuccessors<'graph> = std::slice::Iter<'graph, BasicCoverageBlock>;
impl<'graph> graph::GraphSuccessors<'graph> for CoverageGraph {
type Item = BasicCoverageBlock;
type Iter = std::iter::Cloned<BcbSuccessors<'graph>>;
}
impl graph::WithSuccessors for CoverageGraph {
#[inline]
fn successors(&self, node: Self::Node) -> <Self as GraphSuccessors<'_>>::Iter {
self.successors[node].iter().cloned()
}
}
impl graph::GraphPredecessors<'graph> for CoverageGraph {
type Item = BasicCoverageBlock;
type Iter = std::iter::Copied<std::slice::Iter<'graph, BasicCoverageBlock>>;
}
impl graph::WithPredecessors for CoverageGraph {
#[inline]
fn predecessors(&self, node: Self::Node) -> <Self as graph::GraphPredecessors<'_>>::Iter {
self.predecessors[node].iter().copied()
}
}
rustc_index::newtype_index! {
/// A node in the [control-flow graph][CFG] of CoverageGraph.
pub(super) struct BasicCoverageBlock {
DEBUG_FORMAT = "bcb{}",
const START_BCB = 0,
}
}
/// `BasicCoverageBlockData` holds the data indexed by a `BasicCoverageBlock`.
///
/// A `BasicCoverageBlock` (BCB) represents the maximal-length sequence of MIR `BasicBlock`s without
/// conditional branches, and form a new, simplified, coverage-specific Control Flow Graph, without
/// altering the original MIR CFG.
///
/// Note that running the MIR `SimplifyCfg` transform is not sufficient (and therefore not
/// necessary). The BCB-based CFG is a more aggressive simplification. For example:
///
/// * The BCB CFG ignores (trims) branches not relevant to coverage, such as unwind-related code,
/// that is injected by the Rust compiler but has no physical source code to count. This also
/// means a BasicBlock with a `Call` terminator can be merged into its primary successor target
/// block, in the same BCB. (But, note: Issue #78544: "MIR InstrumentCoverage: Improve coverage
/// of `#[should_panic]` tests and `catch_unwind()` handlers")
/// * Some BasicBlock terminators support Rust-specific concerns--like borrow-checking--that are
/// not relevant to coverage analysis. `FalseUnwind`, for example, can be treated the same as
/// a `Goto`, and merged with its successor into the same BCB.
///
/// Each BCB with at least one computed `CoverageSpan` will have no more than one `Counter`.
/// In some cases, a BCB's execution count can be computed by `Expression`. Additional
/// disjoint `CoverageSpan`s in a BCB can also be counted by `Expression` (by adding `ZERO`
/// to the BCB's primary counter or expression).
///
/// The BCB CFG is critical to simplifying the coverage analysis by ensuring graph path-based
/// queries (`is_dominated_by()`, `predecessors`, `successors`, etc.) have branch (control flow)
/// significance.
#[derive(Debug, Clone)]
pub(super) struct BasicCoverageBlockData {
pub basic_blocks: Vec<BasicBlock>,
pub counter_kind: Option<CoverageKind>,
edge_from_bcbs: Option<FxHashMap<BasicCoverageBlock, CoverageKind>>,
}
impl BasicCoverageBlockData {
pub fn from(basic_blocks: Vec<BasicBlock>) -> Self {
assert!(basic_blocks.len() > 0);
Self { basic_blocks, counter_kind: None, edge_from_bcbs: None }
}
#[inline(always)]
pub fn leader_bb(&self) -> BasicBlock {
self.basic_blocks[0]
}
#[inline(always)]
pub fn last_bb(&self) -> BasicBlock {
*self.basic_blocks.last().unwrap()
}
#[inline(always)]
pub fn terminator<'a, 'tcx>(&self, mir_body: &'a mir::Body<'tcx>) -> &'a Terminator<'tcx> {
&mir_body[self.last_bb()].terminator()
}
pub fn set_counter(
&mut self,
counter_kind: CoverageKind,
) -> Result<ExpressionOperandId, Error> {
debug_assert!(
// If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also
// have an expression (to be injected into an existing `BasicBlock` represented by this
// `BasicCoverageBlock`).
self.edge_from_bcbs.is_none() || counter_kind.is_expression(),
"attempt to add a `Counter` to a BCB target with existing incoming edge counters"
);
let operand = counter_kind.as_operand_id();
if let Some(replaced) = self.counter_kind.replace(counter_kind) {
Error::from_string(format!(
"attempt to set a BasicCoverageBlock coverage counter more than once; \
{:?} already had counter {:?}",
self, replaced,
))
} else {
Ok(operand)
}
}
#[inline(always)]
pub fn counter(&self) -> Option<&CoverageKind> {
self.counter_kind.as_ref()
}
#[inline(always)]
pub fn take_counter(&mut self) -> Option<CoverageKind> {
self.counter_kind.take()
}
pub fn set_edge_counter_from(
&mut self,
from_bcb: BasicCoverageBlock,
counter_kind: CoverageKind,
) -> Result<ExpressionOperandId, Error> {
if level_enabled!(tracing::Level::DEBUG) {
// If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also
// have an expression (to be injected into an existing `BasicBlock` represented by this
// `BasicCoverageBlock`).
if!self.counter_kind.as_ref().map_or(true, |c| c.is_expression()) {
return Error::from_string(format!(
"attempt to add an incoming edge counter from {:?} when the target BCB already \
has a `Counter`",
from_bcb
));
}
}
let operand = counter_kind.as_operand_id();
if let Some(replaced) =
self.edge_from_bcbs.get_or_insert_default().insert(from_bcb, counter_kind)
{
Error::from_string(format!(
"attempt to set an edge counter more than once; from_bcb: \
{:?} already had counter {:?}",
from_bcb, replaced,
))
} else {
Ok(operand)
}
}
#[inline]
pub fn edge_counter_from(&self, from_bcb: BasicCoverageBlock) -> Option<&CoverageKind> {
if let Some(edge_from_bcbs) = &self.edge_from_bcbs {
edge_from_bcbs.get(&from_bcb)
} else {
None
}
}
#[inline]
pub fn take_edge_counters(
&mut self,
) -> Option<impl Iterator<Item = (BasicCoverageBlock, CoverageKind)>> {
self.edge_from_bcbs.take().map_or(None, |m| Some(m.into_iter()))
}
pub fn id(&self) -> String {
format!(
"@{}",
self.basic_blocks
.iter()
.map(|bb| bb.index().to_string())
.collect::<Vec<_>>()
.join(ID_SEPARATOR)
)
}
}
/// Represents a successor from a branching BasicCoverageBlock (such as the arms of a `SwitchInt`)
/// as either the successor BCB itself, if it has only one incoming edge, or the successor _plus_
/// the specific branching BCB, representing the edge between the two. The latter case
/// distinguishes this incoming edge from other incoming edges to the same `target_bcb`.
#[derive(Clone, Copy, PartialEq, Eq)]
pub(super) struct BcbBranch {
pub edge_from_bcb: Option<BasicCoverageBlock>,
pub target_bcb: BasicCoverageBlock,
}
impl BcbBranch {
pub fn from_to(
from_bcb: BasicCoverageBlock,
to_bcb: BasicCoverageBlock,
basic_coverage_blocks: &CoverageGraph,
) -> Self {
let edge_from_bcb = if basic_coverage_blocks.predecessors[to_bcb].len() > 1 {
Some(from_bcb)
} else {
None
};
Self { edge_from_bcb, target_bcb: to_bcb }
}
pub fn counter<'a>(
&self,
basic_coverage_blocks: &'a CoverageGraph,
) -> Option<&'a CoverageKind> {
if let Some(from_bcb) = self.edge_from_bcb {
basic_coverage_blocks[self.target_bcb].edge_counter_from(from_bcb)
} else {
basic_coverage_blocks[self.target_bcb].counter()
}
}
pub fn is_only_path_to_target(&self) -> bool {
self.edge_from_bcb.is_none()
}
}
impl std::fmt::Debug for BcbBranch {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if let Some(from_bcb) = self.edge_from_bcb {
write!(fmt, "{:?}->{:?}", from_bcb, self.target_bcb)
} else {
write!(fmt, "{:?}", self.target_bcb)
}
}
}
// Returns the `Terminator`s non-unwind successors.
// FIXME(#78544): MIR InstrumentCoverage: Improve coverage of `#[should_panic]` tests and
// `catch_unwind()` handlers.
fn bcb_filtered_successors<'a, 'tcx>(
body: &'tcx &'a mir::Body<'tcx>,
term_kind: &'tcx TerminatorKind<'tcx>,
) -> Box<dyn Iterator<Item = &'a BasicBlock> + 'a> {
let mut successors = term_kind.successors();
Box::new(
match &term_kind {
// SwitchInt successors are never unwind, and all of them should be traversed.
TerminatorKind::SwitchInt {.. } => successors,
// For all other kinds, return only the first successor, if any, and ignore unwinds.
// NOTE: `chain(&[])` is required to coerce the `option::iter` (from
// `next().into_iter()`) into the `mir::Successors` aliased type.
_ => successors.next().into_iter().chain(&[]),
}
.filter(move |&&successor| {
body[successor].terminator().kind!= TerminatorKind::Unreachable
}),
)
}
/// Maintains separate worklists for each loop in the BasicCoverageBlock CFG, plus one for the
/// CoverageGraph outside all loops. This supports traversing the BCB CFG in a way that
/// ensures a loop is completely traversed before processing Blocks after the end of the loop.
#[derive(Debug)]
pub(super) struct TraversalContext {
/// From one or more backedges returning to a loop header.
pub loop_backedges: Option<(Vec<BasicCoverageBlock>, BasicCoverageBlock)>,
/// worklist, to be traversed, of CoverageGraph in the loop with the given loop
/// backedges, such that the loop is the inner inner-most loop containing these
/// CoverageGraph
pub worklist: Vec<BasicCoverageBlock>,
}
pub(super) struct TraverseCoverageGraphWithLoops {
pub backedges: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>,
pub context_stack: Vec<TraversalContext>,
visited: BitSet<BasicCoverageBlock>,
}
impl TraverseCoverageGraphWithLoops {
pub fn new(basic_coverage_blocks: &CoverageGraph) -> Self {
let start_bcb = basic_coverage_blocks.start_node();
let backedges = find_loop_backedges(basic_coverage_blocks);
let context_stack =
vec![TraversalContext { loop_backedges: None, worklist: vec![start_bcb] }];
// `context_stack` starts with a `TraversalContext` for the main function context (beginning
// with the `start` BasicCoverageBlock of the function). New worklists are pushed to the top
// of the stack as loops are entered, and popped off of the stack when a loop's worklist is
// exhausted.
let visited = BitSet::new_empty(basic_coverage_blocks.num_nodes());
Self { backedges, context_stack, visited }
}
pub fn next(&mut self, basic_coverage_blocks: &CoverageGraph) -> Option<BasicCoverageBlock> {
debug!(
"TraverseCoverageGraphWithLoops::next - context_stack: {:?}",
self.context_stack.iter().rev().collect::<Vec<_>>()
);
while let Some(next_bcb) = {
// Strip contexts with empty worklists from the top of the stack
while self.context_stack.last().map_or(false, |context| context.worklist.is_empty()) {
self.context_stack.pop();
}
// Pop the next bcb off of the current context_stack. If none, all BCBs were visited.
self.context_stack.last_mut().map_or(None, |context| context.worklist.pop())
} {
if!self.visited.insert(next_bcb) {
debug!("Already visited: {:?}", next_bcb);
continue;
}
debug!("Visiting {:?}", next_bcb);
if self.backedges[next_bcb].len() > 0 {
debug!("{:?} is a loop header! Start a new TraversalContext...", next_bcb);
self.context_stack.push(TraversalContext {
loop_backedges: Some((self.backedges[next_bcb].clone(), next_bcb)),
worklist: Vec::new(),
});
}
self.extend_worklist(basic_coverage_blocks, next_bcb);
return Some(next_bcb);
}
None
}
pub fn extend_worklist(
&mut self,
basic_coverage_blocks: &CoverageGraph,
bcb: BasicCoverageBlock,
) {
|
{
let bcb = BasicCoverageBlock::from_usize(bcbs.len());
for &bb in basic_blocks.iter() {
bb_to_bcb[bb] = Some(bcb);
}
let bcb_data = BasicCoverageBlockData::from(basic_blocks);
debug!("adding bcb{}: {:?}", bcb.index(), bcb_data);
bcbs.push(bcb_data);
}
|
identifier_body
|
graph.rs
|
terminator's `successors()`. Coverage spans must map to actual source code,
// so compiler generated blocks and paths can be ignored. To that end, the CFG traversal
// intentionally omits unwind paths.
// FIXME(#78544): MIR InstrumentCoverage: Improve coverage of `#[should_panic]` tests and
// `catch_unwind()` handlers.
let mir_cfg_without_unwind = ShortCircuitPreorder::new(&mir_body, bcb_filtered_successors);
let mut basic_blocks = Vec::new();
for (bb, data) in mir_cfg_without_unwind {
if let Some(last) = basic_blocks.last() {
let predecessors = &mir_body.predecessors()[bb];
if predecessors.len() > 1 ||!predecessors.contains(last) {
// The `bb` has more than one _incoming_ edge, and should start its own
// `BasicCoverageBlockData`. (Note, the `basic_blocks` vector does not yet
// include `bb`; it contains a sequence of one or more sequential basic_blocks
// with no intermediate branches in or out. Save these as a new
// `BasicCoverageBlockData` before starting the new one.)
Self::add_basic_coverage_block(
&mut bcbs,
&mut bb_to_bcb,
basic_blocks.split_off(0),
);
debug!(
" because {}",
if predecessors.len() > 1 {
"predecessors.len() > 1".to_owned()
} else {
format!("bb {} is not in precessors: {:?}", bb.index(), predecessors)
}
);
}
}
basic_blocks.push(bb);
let term = data.terminator();
match term.kind {
TerminatorKind::Return {.. }
| TerminatorKind::Abort
| TerminatorKind::Yield {.. }
| TerminatorKind::SwitchInt {.. } => {
// The `bb` has more than one _outgoing_ edge, or exits the function. Save the
// current sequence of `basic_blocks` gathered to this point, as a new
// `BasicCoverageBlockData`.
Self::add_basic_coverage_block(
&mut bcbs,
&mut bb_to_bcb,
basic_blocks.split_off(0),
);
debug!(" because term.kind = {:?}", term.kind);
// Note that this condition is based on `TerminatorKind`, even though it
// theoretically boils down to `successors().len()!= 1`; that is, either zero
// (e.g., `Return`, `Abort`) or multiple successors (e.g., `SwitchInt`), but
// since the BCB CFG ignores things like unwind branches (which exist in the
// `Terminator`s `successors()` list) checking the number of successors won't
// work.
}
// The following `TerminatorKind`s are either not expected outside an unwind branch,
// or they should not (under normal circumstances) branch. Coverage graphs are
// simplified by assuring coverage results are accurate for program executions that
// don't panic.
//
// Programs that panic and unwind may record slightly inaccurate coverage results
// for a coverage region containing the `Terminator` that began the panic. This
// is as intended. (See Issue #78544 for a possible future option to support
// coverage in test programs that panic.)
TerminatorKind::Goto {.. }
| TerminatorKind::Resume
| TerminatorKind::Unreachable
| TerminatorKind::Drop {.. }
| TerminatorKind::DropAndReplace {.. }
| TerminatorKind::Call {.. }
| TerminatorKind::GeneratorDrop
| TerminatorKind::Assert {.. }
| TerminatorKind::FalseEdge {.. }
| TerminatorKind::FalseUnwind {.. }
| TerminatorKind::InlineAsm {.. } => {}
}
}
if!basic_blocks.is_empty() {
// process any remaining basic_blocks into a final `BasicCoverageBlockData`
Self::add_basic_coverage_block(&mut bcbs, &mut bb_to_bcb, basic_blocks.split_off(0));
debug!(" because the end of the MIR CFG was reached while traversing");
}
(bcbs, bb_to_bcb)
}
fn add_basic_coverage_block(
bcbs: &mut IndexVec<BasicCoverageBlock, BasicCoverageBlockData>,
bb_to_bcb: &mut IndexVec<BasicBlock, Option<BasicCoverageBlock>>,
basic_blocks: Vec<BasicBlock>,
) {
let bcb = BasicCoverageBlock::from_usize(bcbs.len());
for &bb in basic_blocks.iter() {
bb_to_bcb[bb] = Some(bcb);
}
let bcb_data = BasicCoverageBlockData::from(basic_blocks);
debug!("adding bcb{}: {:?}", bcb.index(), bcb_data);
bcbs.push(bcb_data);
}
#[inline(always)]
pub fn iter_enumerated(
&self,
) -> impl Iterator<Item = (BasicCoverageBlock, &BasicCoverageBlockData)> {
self.bcbs.iter_enumerated()
}
#[inline(always)]
pub fn iter_enumerated_mut(
&mut self,
) -> impl Iterator<Item = (BasicCoverageBlock, &mut BasicCoverageBlockData)> {
self.bcbs.iter_enumerated_mut()
}
#[inline(always)]
pub fn bcb_from_bb(&self, bb: BasicBlock) -> Option<BasicCoverageBlock> {
if bb.index() < self.bb_to_bcb.len() { self.bb_to_bcb[bb] } else { None }
}
#[inline(always)]
pub fn is_dominated_by(&self, node: BasicCoverageBlock, dom: BasicCoverageBlock) -> bool {
self.dominators.as_ref().unwrap().is_dominated_by(node, dom)
}
#[inline(always)]
pub fn dominators(&self) -> &Dominators<BasicCoverageBlock> {
self.dominators.as_ref().unwrap()
}
}
impl Index<BasicCoverageBlock> for CoverageGraph {
type Output = BasicCoverageBlockData;
#[inline]
fn index(&self, index: BasicCoverageBlock) -> &BasicCoverageBlockData {
&self.bcbs[index]
}
}
impl IndexMut<BasicCoverageBlock> for CoverageGraph {
#[inline]
fn index_mut(&mut self, index: BasicCoverageBlock) -> &mut BasicCoverageBlockData {
&mut self.bcbs[index]
}
}
impl graph::DirectedGraph for CoverageGraph {
type Node = BasicCoverageBlock;
}
impl graph::WithNumNodes for CoverageGraph {
#[inline]
fn num_nodes(&self) -> usize {
self.bcbs.len()
}
}
impl graph::WithStartNode for CoverageGraph {
#[inline]
fn start_node(&self) -> Self::Node {
self.bcb_from_bb(mir::START_BLOCK)
.expect("mir::START_BLOCK should be in a BasicCoverageBlock")
}
}
type BcbSuccessors<'graph> = std::slice::Iter<'graph, BasicCoverageBlock>;
impl<'graph> graph::GraphSuccessors<'graph> for CoverageGraph {
type Item = BasicCoverageBlock;
type Iter = std::iter::Cloned<BcbSuccessors<'graph>>;
}
impl graph::WithSuccessors for CoverageGraph {
#[inline]
fn successors(&self, node: Self::Node) -> <Self as GraphSuccessors<'_>>::Iter {
self.successors[node].iter().cloned()
}
}
impl graph::GraphPredecessors<'graph> for CoverageGraph {
type Item = BasicCoverageBlock;
type Iter = std::iter::Copied<std::slice::Iter<'graph, BasicCoverageBlock>>;
}
impl graph::WithPredecessors for CoverageGraph {
#[inline]
fn predecessors(&self, node: Self::Node) -> <Self as graph::GraphPredecessors<'_>>::Iter {
self.predecessors[node].iter().copied()
}
}
rustc_index::newtype_index! {
/// A node in the [control-flow graph][CFG] of CoverageGraph.
pub(super) struct BasicCoverageBlock {
DEBUG_FORMAT = "bcb{}",
const START_BCB = 0,
}
}
/// `BasicCoverageBlockData` holds the data indexed by a `BasicCoverageBlock`.
///
/// A `BasicCoverageBlock` (BCB) represents the maximal-length sequence of MIR `BasicBlock`s without
/// conditional branches, and form a new, simplified, coverage-specific Control Flow Graph, without
/// altering the original MIR CFG.
///
/// Note that running the MIR `SimplifyCfg` transform is not sufficient (and therefore not
/// necessary). The BCB-based CFG is a more aggressive simplification. For example:
///
/// * The BCB CFG ignores (trims) branches not relevant to coverage, such as unwind-related code,
/// that is injected by the Rust compiler but has no physical source code to count. This also
/// means a BasicBlock with a `Call` terminator can be merged into its primary successor target
/// block, in the same BCB. (But, note: Issue #78544: "MIR InstrumentCoverage: Improve coverage
/// of `#[should_panic]` tests and `catch_unwind()` handlers")
/// * Some BasicBlock terminators support Rust-specific concerns--like borrow-checking--that are
/// not relevant to coverage analysis. `FalseUnwind`, for example, can be treated the same as
/// a `Goto`, and merged with its successor into the same BCB.
///
/// Each BCB with at least one computed `CoverageSpan` will have no more than one `Counter`.
/// In some cases, a BCB's execution count can be computed by `Expression`. Additional
/// disjoint `CoverageSpan`s in a BCB can also be counted by `Expression` (by adding `ZERO`
/// to the BCB's primary counter or expression).
///
/// The BCB CFG is critical to simplifying the coverage analysis by ensuring graph path-based
/// queries (`is_dominated_by()`, `predecessors`, `successors`, etc.) have branch (control flow)
/// significance.
#[derive(Debug, Clone)]
pub(super) struct BasicCoverageBlockData {
|
impl BasicCoverageBlockData {
pub fn from(basic_blocks: Vec<BasicBlock>) -> Self {
assert!(basic_blocks.len() > 0);
Self { basic_blocks, counter_kind: None, edge_from_bcbs: None }
}
#[inline(always)]
pub fn leader_bb(&self) -> BasicBlock {
self.basic_blocks[0]
}
#[inline(always)]
pub fn last_bb(&self) -> BasicBlock {
*self.basic_blocks.last().unwrap()
}
#[inline(always)]
pub fn terminator<'a, 'tcx>(&self, mir_body: &'a mir::Body<'tcx>) -> &'a Terminator<'tcx> {
&mir_body[self.last_bb()].terminator()
}
pub fn set_counter(
&mut self,
counter_kind: CoverageKind,
) -> Result<ExpressionOperandId, Error> {
debug_assert!(
// If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also
// have an expression (to be injected into an existing `BasicBlock` represented by this
// `BasicCoverageBlock`).
self.edge_from_bcbs.is_none() || counter_kind.is_expression(),
"attempt to add a `Counter` to a BCB target with existing incoming edge counters"
);
let operand = counter_kind.as_operand_id();
if let Some(replaced) = self.counter_kind.replace(counter_kind) {
Error::from_string(format!(
"attempt to set a BasicCoverageBlock coverage counter more than once; \
{:?} already had counter {:?}",
self, replaced,
))
} else {
Ok(operand)
}
}
#[inline(always)]
pub fn counter(&self) -> Option<&CoverageKind> {
self.counter_kind.as_ref()
}
#[inline(always)]
pub fn take_counter(&mut self) -> Option<CoverageKind> {
self.counter_kind.take()
}
pub fn set_edge_counter_from(
&mut self,
from_bcb: BasicCoverageBlock,
counter_kind: CoverageKind,
) -> Result<ExpressionOperandId, Error> {
if level_enabled!(tracing::Level::DEBUG) {
// If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also
// have an expression (to be injected into an existing `BasicBlock` represented by this
// `BasicCoverageBlock`).
if!self.counter_kind.as_ref().map_or(true, |c| c.is_expression()) {
return Error::from_string(format!(
"attempt to add an incoming edge counter from {:?} when the target BCB already \
has a `Counter`",
from_bcb
));
}
}
let operand = counter_kind.as_operand_id();
if let Some(replaced) =
self.edge_from_bcbs.get_or_insert_default().insert(from_bcb, counter_kind)
{
Error::from_string(format!(
"attempt to set an edge counter more than once; from_bcb: \
{:?} already had counter {:?}",
from_bcb, replaced,
))
} else {
Ok(operand)
}
}
#[inline]
pub fn edge_counter_from(&self, from_bcb: BasicCoverageBlock) -> Option<&CoverageKind> {
if let Some(edge_from_bcbs) = &self.edge_from_bcbs {
edge_from_bcbs.get(&from_bcb)
} else {
None
}
}
#[inline]
pub fn take_edge_counters(
&mut self,
) -> Option<impl Iterator<Item = (BasicCoverageBlock, CoverageKind)>> {
self.edge_from_bcbs.take().map_or(None, |m| Some(m.into_iter()))
}
pub fn id(&self) -> String {
format!(
"@{}",
self.basic_blocks
.iter()
.map(|bb| bb.index().to_string())
.collect::<Vec<_>>()
.join(ID_SEPARATOR)
)
}
}
/// Represents a successor from a branching BasicCoverageBlock (such as the arms of a `SwitchInt`)
/// as either the successor BCB itself, if it has only one incoming edge, or the successor _plus_
/// the specific branching BCB, representing the edge between the two. The latter case
/// distinguishes this incoming edge from other incoming edges to the same `target_bcb`.
#[derive(Clone, Copy, PartialEq, Eq)]
pub(super) struct BcbBranch {
pub edge_from_bcb: Option<BasicCoverageBlock>,
pub target_bcb: BasicCoverageBlock,
}
impl BcbBranch {
pub fn from_to(
from_bcb: BasicCoverageBlock,
to_bcb: BasicCoverageBlock,
basic_coverage_blocks: &CoverageGraph,
) -> Self {
let edge_from_bcb = if basic_coverage_blocks.predecessors[to_bcb].len() > 1 {
Some(from_bcb)
} else {
None
};
Self { edge_from_bcb, target_bcb: to_bcb }
}
pub fn counter<'a>(
&self,
basic_coverage_blocks: &'a CoverageGraph,
) -> Option<&'a CoverageKind> {
if let Some(from_bcb) = self.edge_from_bcb {
basic_coverage_blocks[self.target_bcb].edge_counter_from(from_bcb)
} else {
basic_coverage_blocks[self.target_bcb].counter()
}
}
pub fn is_only_path_to_target(&self) -> bool {
self.edge_from_bcb.is_none()
}
}
impl std::fmt::Debug for BcbBranch {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if let Some(from_bcb) = self.edge_from_bcb {
write!(fmt, "{:?}->{:?}", from_bcb, self.target_bcb)
} else {
write!(fmt, "{:?}", self.target_bcb)
}
}
}
// Returns the `Terminator`s non-unwind successors.
// FIXME(#78544): MIR InstrumentCoverage: Improve coverage of `#[should_panic]` tests and
// `catch_unwind()` handlers.
fn bcb_filtered_successors<'a, 'tcx>(
body: &'tcx &'a mir::Body<'tcx>,
term_kind: &'tcx TerminatorKind<'tcx>,
) -> Box<dyn Iterator<Item = &'a BasicBlock> + 'a> {
let mut successors = term_kind.successors();
Box::new(
match &term_kind {
// SwitchInt successors are never unwind, and all of them should be traversed.
TerminatorKind::SwitchInt {.. } => successors,
// For all other kinds, return only the first successor, if any, and ignore unwinds.
// NOTE: `chain(&[])` is required to coerce the `option::iter` (from
// `next().into_iter()`) into the `mir::Successors` aliased type.
_ => successors.next().into_iter().chain(&[]),
}
.filter(move |&&successor| {
body[successor].terminator().kind!= TerminatorKind::Unreachable
}),
)
}
/// Maintains separate worklists for each loop in the BasicCoverageBlock CFG, plus one for the
/// CoverageGraph outside all loops. This supports traversing the BCB CFG in a way that
/// ensures a loop is completely traversed before processing Blocks after the end of the loop.
#[derive(Debug)]
pub(super) struct TraversalContext {
/// From one or more backedges returning to a loop header.
pub loop_backedges: Option<(Vec<BasicCoverageBlock>, BasicCoverageBlock)>,
/// worklist, to be traversed, of CoverageGraph in the loop with the given loop
/// backedges, such that the loop is the inner inner-most loop containing these
/// CoverageGraph
pub worklist: Vec<BasicCoverageBlock>,
}
pub(super) struct TraverseCoverageGraphWithLoops {
pub backedges: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>,
pub context_stack: Vec<TraversalContext>,
visited: BitSet<BasicCoverageBlock>,
}
impl TraverseCoverageGraphWithLoops {
pub fn new(basic_coverage_blocks: &CoverageGraph) -> Self {
let start_bcb = basic_coverage_blocks.start_node();
let backedges = find_loop_backedges(basic_coverage_blocks);
let context_stack =
vec![TraversalContext { loop_backedges: None, worklist: vec![start_bcb] }];
// `context_stack` starts with a `TraversalContext` for the main function context (beginning
// with the `start` BasicCoverageBlock of the function). New worklists are pushed to the top
// of the stack as loops are entered, and popped off of the stack when a loop's worklist is
// exhausted.
let visited = BitSet::new_empty(basic_coverage_blocks.num_nodes());
Self { backedges, context_stack, visited }
}
pub fn next(&mut self, basic_coverage_blocks: &CoverageGraph) -> Option<BasicCoverageBlock> {
debug!(
"TraverseCoverageGraphWithLoops::next - context_stack: {:?}",
self.context_stack.iter().rev().collect::<Vec<_>>()
);
while let Some(next_bcb) = {
// Strip contexts with empty worklists from the top of the stack
while self.context_stack.last().map_or(false, |context| context.worklist.is_empty()) {
self.context_stack.pop();
}
// Pop the next bcb off of the current context_stack. If none, all BCBs were visited.
self.context_stack.last_mut().map_or(None, |context| context.worklist.pop())
} {
if!self.visited.insert(next_bcb) {
debug!("Already visited: {:?}", next_bcb);
continue;
}
debug!("Visiting {:?}", next_bcb);
if self.backedges[next_bcb].len() > 0 {
debug!("{:?} is a loop header! Start a new TraversalContext...", next_bcb);
self.context_stack.push(TraversalContext {
loop_backedges: Some((self.backedges[next_bcb].clone(), next_bcb)),
worklist: Vec::new(),
});
}
self.extend_worklist(basic_coverage_blocks, next_bcb);
return Some(next_bcb);
}
None
}
pub fn extend_worklist(
&mut self,
basic_coverage_blocks: &CoverageGraph,
bcb: BasicCoverageBlock,
) {
|
pub basic_blocks: Vec<BasicBlock>,
pub counter_kind: Option<CoverageKind>,
edge_from_bcbs: Option<FxHashMap<BasicCoverageBlock, CoverageKind>>,
}
|
random_line_split
|
graph.rs
|
's `successors()`. Coverage spans must map to actual source code,
// so compiler generated blocks and paths can be ignored. To that end, the CFG traversal
// intentionally omits unwind paths.
// FIXME(#78544): MIR InstrumentCoverage: Improve coverage of `#[should_panic]` tests and
// `catch_unwind()` handlers.
let mir_cfg_without_unwind = ShortCircuitPreorder::new(&mir_body, bcb_filtered_successors);
let mut basic_blocks = Vec::new();
for (bb, data) in mir_cfg_without_unwind {
if let Some(last) = basic_blocks.last() {
let predecessors = &mir_body.predecessors()[bb];
if predecessors.len() > 1 ||!predecessors.contains(last) {
// The `bb` has more than one _incoming_ edge, and should start its own
// `BasicCoverageBlockData`. (Note, the `basic_blocks` vector does not yet
// include `bb`; it contains a sequence of one or more sequential basic_blocks
// with no intermediate branches in or out. Save these as a new
// `BasicCoverageBlockData` before starting the new one.)
Self::add_basic_coverage_block(
&mut bcbs,
&mut bb_to_bcb,
basic_blocks.split_off(0),
);
debug!(
" because {}",
if predecessors.len() > 1 {
"predecessors.len() > 1".to_owned()
} else {
format!("bb {} is not in precessors: {:?}", bb.index(), predecessors)
}
);
}
}
basic_blocks.push(bb);
let term = data.terminator();
match term.kind {
TerminatorKind::Return {.. }
| TerminatorKind::Abort
| TerminatorKind::Yield {.. }
| TerminatorKind::SwitchInt {.. } => {
// The `bb` has more than one _outgoing_ edge, or exits the function. Save the
// current sequence of `basic_blocks` gathered to this point, as a new
// `BasicCoverageBlockData`.
Self::add_basic_coverage_block(
&mut bcbs,
&mut bb_to_bcb,
basic_blocks.split_off(0),
);
debug!(" because term.kind = {:?}", term.kind);
// Note that this condition is based on `TerminatorKind`, even though it
// theoretically boils down to `successors().len()!= 1`; that is, either zero
// (e.g., `Return`, `Abort`) or multiple successors (e.g., `SwitchInt`), but
// since the BCB CFG ignores things like unwind branches (which exist in the
// `Terminator`s `successors()` list) checking the number of successors won't
// work.
}
// The following `TerminatorKind`s are either not expected outside an unwind branch,
// or they should not (under normal circumstances) branch. Coverage graphs are
// simplified by assuring coverage results are accurate for program executions that
// don't panic.
//
// Programs that panic and unwind may record slightly inaccurate coverage results
// for a coverage region containing the `Terminator` that began the panic. This
// is as intended. (See Issue #78544 for a possible future option to support
// coverage in test programs that panic.)
TerminatorKind::Goto {.. }
| TerminatorKind::Resume
| TerminatorKind::Unreachable
| TerminatorKind::Drop {.. }
| TerminatorKind::DropAndReplace {.. }
| TerminatorKind::Call {.. }
| TerminatorKind::GeneratorDrop
| TerminatorKind::Assert {.. }
| TerminatorKind::FalseEdge {.. }
| TerminatorKind::FalseUnwind {.. }
| TerminatorKind::InlineAsm {.. } => {}
}
}
if!basic_blocks.is_empty() {
// process any remaining basic_blocks into a final `BasicCoverageBlockData`
Self::add_basic_coverage_block(&mut bcbs, &mut bb_to_bcb, basic_blocks.split_off(0));
debug!(" because the end of the MIR CFG was reached while traversing");
}
(bcbs, bb_to_bcb)
}
fn add_basic_coverage_block(
bcbs: &mut IndexVec<BasicCoverageBlock, BasicCoverageBlockData>,
bb_to_bcb: &mut IndexVec<BasicBlock, Option<BasicCoverageBlock>>,
basic_blocks: Vec<BasicBlock>,
) {
let bcb = BasicCoverageBlock::from_usize(bcbs.len());
for &bb in basic_blocks.iter() {
bb_to_bcb[bb] = Some(bcb);
}
let bcb_data = BasicCoverageBlockData::from(basic_blocks);
debug!("adding bcb{}: {:?}", bcb.index(), bcb_data);
bcbs.push(bcb_data);
}
#[inline(always)]
pub fn iter_enumerated(
&self,
) -> impl Iterator<Item = (BasicCoverageBlock, &BasicCoverageBlockData)> {
self.bcbs.iter_enumerated()
}
#[inline(always)]
pub fn iter_enumerated_mut(
&mut self,
) -> impl Iterator<Item = (BasicCoverageBlock, &mut BasicCoverageBlockData)> {
self.bcbs.iter_enumerated_mut()
}
#[inline(always)]
pub fn bcb_from_bb(&self, bb: BasicBlock) -> Option<BasicCoverageBlock> {
if bb.index() < self.bb_to_bcb.len() { self.bb_to_bcb[bb] } else { None }
}
#[inline(always)]
pub fn
|
(&self, node: BasicCoverageBlock, dom: BasicCoverageBlock) -> bool {
self.dominators.as_ref().unwrap().is_dominated_by(node, dom)
}
#[inline(always)]
pub fn dominators(&self) -> &Dominators<BasicCoverageBlock> {
self.dominators.as_ref().unwrap()
}
}
impl Index<BasicCoverageBlock> for CoverageGraph {
type Output = BasicCoverageBlockData;
#[inline]
fn index(&self, index: BasicCoverageBlock) -> &BasicCoverageBlockData {
&self.bcbs[index]
}
}
impl IndexMut<BasicCoverageBlock> for CoverageGraph {
#[inline]
fn index_mut(&mut self, index: BasicCoverageBlock) -> &mut BasicCoverageBlockData {
&mut self.bcbs[index]
}
}
impl graph::DirectedGraph for CoverageGraph {
type Node = BasicCoverageBlock;
}
impl graph::WithNumNodes for CoverageGraph {
#[inline]
fn num_nodes(&self) -> usize {
self.bcbs.len()
}
}
impl graph::WithStartNode for CoverageGraph {
#[inline]
fn start_node(&self) -> Self::Node {
self.bcb_from_bb(mir::START_BLOCK)
.expect("mir::START_BLOCK should be in a BasicCoverageBlock")
}
}
type BcbSuccessors<'graph> = std::slice::Iter<'graph, BasicCoverageBlock>;
impl<'graph> graph::GraphSuccessors<'graph> for CoverageGraph {
type Item = BasicCoverageBlock;
type Iter = std::iter::Cloned<BcbSuccessors<'graph>>;
}
impl graph::WithSuccessors for CoverageGraph {
#[inline]
fn successors(&self, node: Self::Node) -> <Self as GraphSuccessors<'_>>::Iter {
self.successors[node].iter().cloned()
}
}
impl graph::GraphPredecessors<'graph> for CoverageGraph {
type Item = BasicCoverageBlock;
type Iter = std::iter::Copied<std::slice::Iter<'graph, BasicCoverageBlock>>;
}
impl graph::WithPredecessors for CoverageGraph {
#[inline]
fn predecessors(&self, node: Self::Node) -> <Self as graph::GraphPredecessors<'_>>::Iter {
self.predecessors[node].iter().copied()
}
}
rustc_index::newtype_index! {
/// A node in the [control-flow graph][CFG] of CoverageGraph.
pub(super) struct BasicCoverageBlock {
DEBUG_FORMAT = "bcb{}",
const START_BCB = 0,
}
}
/// `BasicCoverageBlockData` holds the data indexed by a `BasicCoverageBlock`.
///
/// A `BasicCoverageBlock` (BCB) represents the maximal-length sequence of MIR `BasicBlock`s without
/// conditional branches, and form a new, simplified, coverage-specific Control Flow Graph, without
/// altering the original MIR CFG.
///
/// Note that running the MIR `SimplifyCfg` transform is not sufficient (and therefore not
/// necessary). The BCB-based CFG is a more aggressive simplification. For example:
///
/// * The BCB CFG ignores (trims) branches not relevant to coverage, such as unwind-related code,
/// that is injected by the Rust compiler but has no physical source code to count. This also
/// means a BasicBlock with a `Call` terminator can be merged into its primary successor target
/// block, in the same BCB. (But, note: Issue #78544: "MIR InstrumentCoverage: Improve coverage
/// of `#[should_panic]` tests and `catch_unwind()` handlers")
/// * Some BasicBlock terminators support Rust-specific concerns--like borrow-checking--that are
/// not relevant to coverage analysis. `FalseUnwind`, for example, can be treated the same as
/// a `Goto`, and merged with its successor into the same BCB.
///
/// Each BCB with at least one computed `CoverageSpan` will have no more than one `Counter`.
/// In some cases, a BCB's execution count can be computed by `Expression`. Additional
/// disjoint `CoverageSpan`s in a BCB can also be counted by `Expression` (by adding `ZERO`
/// to the BCB's primary counter or expression).
///
/// The BCB CFG is critical to simplifying the coverage analysis by ensuring graph path-based
/// queries (`is_dominated_by()`, `predecessors`, `successors`, etc.) have branch (control flow)
/// significance.
#[derive(Debug, Clone)]
pub(super) struct BasicCoverageBlockData {
pub basic_blocks: Vec<BasicBlock>,
pub counter_kind: Option<CoverageKind>,
edge_from_bcbs: Option<FxHashMap<BasicCoverageBlock, CoverageKind>>,
}
impl BasicCoverageBlockData {
pub fn from(basic_blocks: Vec<BasicBlock>) -> Self {
assert!(basic_blocks.len() > 0);
Self { basic_blocks, counter_kind: None, edge_from_bcbs: None }
}
#[inline(always)]
pub fn leader_bb(&self) -> BasicBlock {
self.basic_blocks[0]
}
#[inline(always)]
pub fn last_bb(&self) -> BasicBlock {
*self.basic_blocks.last().unwrap()
}
#[inline(always)]
pub fn terminator<'a, 'tcx>(&self, mir_body: &'a mir::Body<'tcx>) -> &'a Terminator<'tcx> {
&mir_body[self.last_bb()].terminator()
}
pub fn set_counter(
&mut self,
counter_kind: CoverageKind,
) -> Result<ExpressionOperandId, Error> {
debug_assert!(
// If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also
// have an expression (to be injected into an existing `BasicBlock` represented by this
// `BasicCoverageBlock`).
self.edge_from_bcbs.is_none() || counter_kind.is_expression(),
"attempt to add a `Counter` to a BCB target with existing incoming edge counters"
);
let operand = counter_kind.as_operand_id();
if let Some(replaced) = self.counter_kind.replace(counter_kind) {
Error::from_string(format!(
"attempt to set a BasicCoverageBlock coverage counter more than once; \
{:?} already had counter {:?}",
self, replaced,
))
} else {
Ok(operand)
}
}
#[inline(always)]
pub fn counter(&self) -> Option<&CoverageKind> {
self.counter_kind.as_ref()
}
#[inline(always)]
pub fn take_counter(&mut self) -> Option<CoverageKind> {
self.counter_kind.take()
}
pub fn set_edge_counter_from(
&mut self,
from_bcb: BasicCoverageBlock,
counter_kind: CoverageKind,
) -> Result<ExpressionOperandId, Error> {
if level_enabled!(tracing::Level::DEBUG) {
// If the BCB has an edge counter (to be injected into a new `BasicBlock`), it can also
// have an expression (to be injected into an existing `BasicBlock` represented by this
// `BasicCoverageBlock`).
if!self.counter_kind.as_ref().map_or(true, |c| c.is_expression()) {
return Error::from_string(format!(
"attempt to add an incoming edge counter from {:?} when the target BCB already \
has a `Counter`",
from_bcb
));
}
}
let operand = counter_kind.as_operand_id();
if let Some(replaced) =
self.edge_from_bcbs.get_or_insert_default().insert(from_bcb, counter_kind)
{
Error::from_string(format!(
"attempt to set an edge counter more than once; from_bcb: \
{:?} already had counter {:?}",
from_bcb, replaced,
))
} else {
Ok(operand)
}
}
#[inline]
pub fn edge_counter_from(&self, from_bcb: BasicCoverageBlock) -> Option<&CoverageKind> {
if let Some(edge_from_bcbs) = &self.edge_from_bcbs {
edge_from_bcbs.get(&from_bcb)
} else {
None
}
}
#[inline]
pub fn take_edge_counters(
&mut self,
) -> Option<impl Iterator<Item = (BasicCoverageBlock, CoverageKind)>> {
self.edge_from_bcbs.take().map_or(None, |m| Some(m.into_iter()))
}
pub fn id(&self) -> String {
format!(
"@{}",
self.basic_blocks
.iter()
.map(|bb| bb.index().to_string())
.collect::<Vec<_>>()
.join(ID_SEPARATOR)
)
}
}
/// Represents a successor from a branching BasicCoverageBlock (such as the arms of a `SwitchInt`)
/// as either the successor BCB itself, if it has only one incoming edge, or the successor _plus_
/// the specific branching BCB, representing the edge between the two. The latter case
/// distinguishes this incoming edge from other incoming edges to the same `target_bcb`.
#[derive(Clone, Copy, PartialEq, Eq)]
pub(super) struct BcbBranch {
pub edge_from_bcb: Option<BasicCoverageBlock>,
pub target_bcb: BasicCoverageBlock,
}
impl BcbBranch {
pub fn from_to(
from_bcb: BasicCoverageBlock,
to_bcb: BasicCoverageBlock,
basic_coverage_blocks: &CoverageGraph,
) -> Self {
let edge_from_bcb = if basic_coverage_blocks.predecessors[to_bcb].len() > 1 {
Some(from_bcb)
} else {
None
};
Self { edge_from_bcb, target_bcb: to_bcb }
}
pub fn counter<'a>(
&self,
basic_coverage_blocks: &'a CoverageGraph,
) -> Option<&'a CoverageKind> {
if let Some(from_bcb) = self.edge_from_bcb {
basic_coverage_blocks[self.target_bcb].edge_counter_from(from_bcb)
} else {
basic_coverage_blocks[self.target_bcb].counter()
}
}
pub fn is_only_path_to_target(&self) -> bool {
self.edge_from_bcb.is_none()
}
}
impl std::fmt::Debug for BcbBranch {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if let Some(from_bcb) = self.edge_from_bcb {
write!(fmt, "{:?}->{:?}", from_bcb, self.target_bcb)
} else {
write!(fmt, "{:?}", self.target_bcb)
}
}
}
// Returns the `Terminator`s non-unwind successors.
// FIXME(#78544): MIR InstrumentCoverage: Improve coverage of `#[should_panic]` tests and
// `catch_unwind()` handlers.
fn bcb_filtered_successors<'a, 'tcx>(
body: &'tcx &'a mir::Body<'tcx>,
term_kind: &'tcx TerminatorKind<'tcx>,
) -> Box<dyn Iterator<Item = &'a BasicBlock> + 'a> {
let mut successors = term_kind.successors();
Box::new(
match &term_kind {
// SwitchInt successors are never unwind, and all of them should be traversed.
TerminatorKind::SwitchInt {.. } => successors,
// For all other kinds, return only the first successor, if any, and ignore unwinds.
// NOTE: `chain(&[])` is required to coerce the `option::iter` (from
// `next().into_iter()`) into the `mir::Successors` aliased type.
_ => successors.next().into_iter().chain(&[]),
}
.filter(move |&&successor| {
body[successor].terminator().kind!= TerminatorKind::Unreachable
}),
)
}
/// Maintains separate worklists for each loop in the BasicCoverageBlock CFG, plus one for the
/// CoverageGraph outside all loops. This supports traversing the BCB CFG in a way that
/// ensures a loop is completely traversed before processing Blocks after the end of the loop.
#[derive(Debug)]
pub(super) struct TraversalContext {
/// From one or more backedges returning to a loop header.
pub loop_backedges: Option<(Vec<BasicCoverageBlock>, BasicCoverageBlock)>,
/// worklist, to be traversed, of CoverageGraph in the loop with the given loop
/// backedges, such that the loop is the inner inner-most loop containing these
/// CoverageGraph
pub worklist: Vec<BasicCoverageBlock>,
}
pub(super) struct TraverseCoverageGraphWithLoops {
pub backedges: IndexVec<BasicCoverageBlock, Vec<BasicCoverageBlock>>,
pub context_stack: Vec<TraversalContext>,
visited: BitSet<BasicCoverageBlock>,
}
impl TraverseCoverageGraphWithLoops {
pub fn new(basic_coverage_blocks: &CoverageGraph) -> Self {
let start_bcb = basic_coverage_blocks.start_node();
let backedges = find_loop_backedges(basic_coverage_blocks);
let context_stack =
vec![TraversalContext { loop_backedges: None, worklist: vec![start_bcb] }];
// `context_stack` starts with a `TraversalContext` for the main function context (beginning
// with the `start` BasicCoverageBlock of the function). New worklists are pushed to the top
// of the stack as loops are entered, and popped off of the stack when a loop's worklist is
// exhausted.
let visited = BitSet::new_empty(basic_coverage_blocks.num_nodes());
Self { backedges, context_stack, visited }
}
pub fn next(&mut self, basic_coverage_blocks: &CoverageGraph) -> Option<BasicCoverageBlock> {
debug!(
"TraverseCoverageGraphWithLoops::next - context_stack: {:?}",
self.context_stack.iter().rev().collect::<Vec<_>>()
);
while let Some(next_bcb) = {
// Strip contexts with empty worklists from the top of the stack
while self.context_stack.last().map_or(false, |context| context.worklist.is_empty()) {
self.context_stack.pop();
}
// Pop the next bcb off of the current context_stack. If none, all BCBs were visited.
self.context_stack.last_mut().map_or(None, |context| context.worklist.pop())
} {
if!self.visited.insert(next_bcb) {
debug!("Already visited: {:?}", next_bcb);
continue;
}
debug!("Visiting {:?}", next_bcb);
if self.backedges[next_bcb].len() > 0 {
debug!("{:?} is a loop header! Start a new TraversalContext...", next_bcb);
self.context_stack.push(TraversalContext {
loop_backedges: Some((self.backedges[next_bcb].clone(), next_bcb)),
worklist: Vec::new(),
});
}
self.extend_worklist(basic_coverage_blocks, next_bcb);
return Some(next_bcb);
}
None
}
pub fn extend_worklist(
&mut self,
basic_coverage_blocks: &CoverageGraph,
bcb: BasicCoverageBlock,
) {
|
is_dominated_by
|
identifier_name
|
encoder.rs
|
extern crate libc;
use std::io::Write;
use std::io::Result;
use std::cmp;
use std::ptr;
use super::liblz4::*;
use self::libc::size_t;
struct EncoderContext {
c: LZ4FCompressionContext,
}
#[derive(Clone)]
pub struct EncoderBuilder {
block_size: BlockSize,
block_mode: BlockMode,
checksum: ContentChecksum,
// 0 == default (fast mode); values above 16 count as 16; values below 0 count as 0
level: u32,
// 1 == always flush (reduce need for tmp buffer)
auto_flush: bool,
}
pub struct Encoder<W> {
c: EncoderContext,
w: W,
limit: usize,
buffer: Vec<u8>
}
impl EncoderBuilder {
pub fn new() -> Self {
EncoderBuilder {
block_size: BlockSize::Default,
block_mode: BlockMode::Linked,
checksum: ContentChecksum::ChecksumEnabled,
level: 0,
auto_flush: false,
}
}
pub fn block_size(&mut self, block_size: BlockSize) -> &mut Self {
self.block_size = block_size;
self
}
|
}
pub fn checksum(&mut self, checksum: ContentChecksum) -> &mut Self {
self.checksum = checksum;
self
}
pub fn level(&mut self, level: u32) -> &mut Self {
self.level = level;
self
}
pub fn auto_flush(&mut self, auto_flush: bool) -> &mut Self {
self.auto_flush = auto_flush;
self
}
pub fn build<W: Write>(&self, w: W) -> Result<Encoder<W>> {
let block_size = self.block_size.get_size();
let preferences = LZ4FPreferences
{
frame_info: LZ4FFrameInfo
{
block_size_id: self.block_size.clone(),
block_mode: self.block_mode.clone(),
content_checksum_flag: self.checksum.clone(),
reserved: [0; 5],
},
compression_level: self.level,
auto_flush: match self.auto_flush {
false => 0,
true => 1,
},
reserved: [0; 4],
};
let mut encoder = Encoder {
w: w,
c: try! (EncoderContext::new()),
limit: block_size,
buffer: Vec::with_capacity(try! (check_error(unsafe {LZ4F_compressBound(block_size as size_t, &preferences)})))
};
try! (encoder.write_header(&preferences));
Ok (encoder)
}
}
impl<W: Write> Encoder<W> {
fn write_header(&mut self, preferences: &LZ4FPreferences) -> Result<()>
{
unsafe {
let len = try! (check_error(LZ4F_compressBegin(self.c.c, self.buffer.as_mut_ptr(), self.buffer.capacity() as size_t, preferences)));
self.buffer.set_len(len);
}
self.w.write_all(&self.buffer)
}
fn write_end(&mut self) -> Result<()> {
unsafe {
let len = try! (check_error(LZ4F_compressEnd(self.c.c, self.buffer.as_mut_ptr(), self.buffer.capacity() as size_t, ptr::null())));
self.buffer.set_len(len);
};
self.w.write_all(&self.buffer)
}
/// This function is used to flag that this session of compression is done
/// with. The stream is finished up (final bytes are written), and then the
/// wrapped writer is returned.
pub fn finish(mut self) -> (W, Result<()>) {
let result = self.write_end();
(self.w, result)
}
}
impl<W: Write> Write for Encoder<W> {
fn write(&mut self, buffer: &[u8]) -> Result<usize> {
let mut offset = 0;
while offset < buffer.len()
{
let size = cmp::min(buffer.len() - offset, self.limit);
unsafe {
let len = try! (check_error(LZ4F_compressUpdate(self.c.c, self.buffer.as_mut_ptr(), self.buffer.capacity() as size_t, buffer[offset..].as_ptr(), size as size_t, ptr::null())));
self.buffer.set_len(len);
try! (self.w.write_all(&self.buffer));
}
offset += size;
}
Ok(buffer.len())
}
fn flush(&mut self) -> Result<()> {
loop
{
unsafe {
let len = try! (check_error(LZ4F_flush(self.c.c, self.buffer.as_mut_ptr(), self.buffer.capacity() as size_t, ptr::null())));
if len == 0
{
break;
}
self.buffer.set_len(len);
};
try! (self.w.write_all(&self.buffer));
}
self.w.flush()
}
}
impl EncoderContext {
fn new() -> Result<EncoderContext>
{
let mut context: LZ4FCompressionContext = ptr::null_mut();
try! (check_error(unsafe {
LZ4F_createCompressionContext(&mut context, LZ4F_VERSION)
}));
Ok(EncoderContext {
c: context
})
}
}
impl Drop for EncoderContext {
fn drop(&mut self) {
unsafe
{
LZ4F_freeCompressionContext(self.c)
};
}
}
#[cfg(test)]
mod test {
use std::io::Write;
use super::EncoderBuilder;
#[test]
fn test_encoder_smoke() {
let mut encoder = EncoderBuilder::new().level(1).build(Vec::new()).unwrap();
encoder.write(b"Some ").unwrap();
encoder.write(b"data").unwrap();
let (_, result) = encoder.finish();
result.unwrap();
}
#[test]
fn test_encoder_random() {
let mut encoder = EncoderBuilder::new().level(1).build(Vec::new()).unwrap();
let mut buffer = Vec::new();
let mut rnd: u32 = 42;
for _ in 0..1024 * 1024 {
buffer.push((rnd & 0xFF) as u8);
rnd = ((1664525 as u64) * (rnd as u64) + (1013904223 as u64)) as u32;
}
encoder.write(&buffer).unwrap();
let (_, result) = encoder.finish();
result.unwrap();
}
}
|
pub fn block_mode(&mut self, block_mode: BlockMode) -> &mut Self {
self.block_mode = block_mode;
self
|
random_line_split
|
encoder.rs
|
extern crate libc;
use std::io::Write;
use std::io::Result;
use std::cmp;
use std::ptr;
use super::liblz4::*;
use self::libc::size_t;
struct EncoderContext {
c: LZ4FCompressionContext,
}
#[derive(Clone)]
pub struct EncoderBuilder {
block_size: BlockSize,
block_mode: BlockMode,
checksum: ContentChecksum,
// 0 == default (fast mode); values above 16 count as 16; values below 0 count as 0
level: u32,
// 1 == always flush (reduce need for tmp buffer)
auto_flush: bool,
}
pub struct Encoder<W> {
c: EncoderContext,
w: W,
limit: usize,
buffer: Vec<u8>
}
impl EncoderBuilder {
pub fn new() -> Self {
EncoderBuilder {
block_size: BlockSize::Default,
block_mode: BlockMode::Linked,
checksum: ContentChecksum::ChecksumEnabled,
level: 0,
auto_flush: false,
}
}
pub fn block_size(&mut self, block_size: BlockSize) -> &mut Self {
self.block_size = block_size;
self
}
pub fn block_mode(&mut self, block_mode: BlockMode) -> &mut Self {
self.block_mode = block_mode;
self
}
pub fn checksum(&mut self, checksum: ContentChecksum) -> &mut Self {
self.checksum = checksum;
self
}
pub fn level(&mut self, level: u32) -> &mut Self {
self.level = level;
self
}
pub fn auto_flush(&mut self, auto_flush: bool) -> &mut Self {
self.auto_flush = auto_flush;
self
}
pub fn build<W: Write>(&self, w: W) -> Result<Encoder<W>>
|
c: try! (EncoderContext::new()),
limit: block_size,
buffer: Vec::with_capacity(try! (check_error(unsafe {LZ4F_compressBound(block_size as size_t, &preferences)})))
};
try! (encoder.write_header(&preferences));
Ok (encoder)
}
}
impl<W: Write> Encoder<W> {
fn write_header(&mut self, preferences: &LZ4FPreferences) -> Result<()>
{
unsafe {
let len = try! (check_error(LZ4F_compressBegin(self.c.c, self.buffer.as_mut_ptr(), self.buffer.capacity() as size_t, preferences)));
self.buffer.set_len(len);
}
self.w.write_all(&self.buffer)
}
fn write_end(&mut self) -> Result<()> {
unsafe {
let len = try! (check_error(LZ4F_compressEnd(self.c.c, self.buffer.as_mut_ptr(), self.buffer.capacity() as size_t, ptr::null())));
self.buffer.set_len(len);
};
self.w.write_all(&self.buffer)
}
/// This function is used to flag that this session of compression is done
/// with. The stream is finished up (final bytes are written), and then the
/// wrapped writer is returned.
pub fn finish(mut self) -> (W, Result<()>) {
let result = self.write_end();
(self.w, result)
}
}
impl<W: Write> Write for Encoder<W> {
fn write(&mut self, buffer: &[u8]) -> Result<usize> {
let mut offset = 0;
while offset < buffer.len()
{
let size = cmp::min(buffer.len() - offset, self.limit);
unsafe {
let len = try! (check_error(LZ4F_compressUpdate(self.c.c, self.buffer.as_mut_ptr(), self.buffer.capacity() as size_t, buffer[offset..].as_ptr(), size as size_t, ptr::null())));
self.buffer.set_len(len);
try! (self.w.write_all(&self.buffer));
}
offset += size;
}
Ok(buffer.len())
}
fn flush(&mut self) -> Result<()> {
loop
{
unsafe {
let len = try! (check_error(LZ4F_flush(self.c.c, self.buffer.as_mut_ptr(), self.buffer.capacity() as size_t, ptr::null())));
if len == 0
{
break;
}
self.buffer.set_len(len);
};
try! (self.w.write_all(&self.buffer));
}
self.w.flush()
}
}
impl EncoderContext {
fn new() -> Result<EncoderContext>
{
let mut context: LZ4FCompressionContext = ptr::null_mut();
try! (check_error(unsafe {
LZ4F_createCompressionContext(&mut context, LZ4F_VERSION)
}));
Ok(EncoderContext {
c: context
})
}
}
impl Drop for EncoderContext {
fn drop(&mut self) {
unsafe
{
LZ4F_freeCompressionContext(self.c)
};
}
}
#[cfg(test)]
mod test {
use std::io::Write;
use super::EncoderBuilder;
#[test]
fn test_encoder_smoke() {
let mut encoder = EncoderBuilder::new().level(1).build(Vec::new()).unwrap();
encoder.write(b"Some ").unwrap();
encoder.write(b"data").unwrap();
let (_, result) = encoder.finish();
result.unwrap();
}
#[test]
fn test_encoder_random() {
let mut encoder = EncoderBuilder::new().level(1).build(Vec::new()).unwrap();
let mut buffer = Vec::new();
let mut rnd: u32 = 42;
for _ in 0..1024 * 1024 {
buffer.push((rnd & 0xFF) as u8);
rnd = ((1664525 as u64) * (rnd as u64) + (1013904223 as u64)) as u32;
}
encoder.write(&buffer).unwrap();
let (_, result) = encoder.finish();
result.unwrap();
}
}
|
{
let block_size = self.block_size.get_size();
let preferences = LZ4FPreferences
{
frame_info: LZ4FFrameInfo
{
block_size_id: self.block_size.clone(),
block_mode: self.block_mode.clone(),
content_checksum_flag: self.checksum.clone(),
reserved: [0; 5],
},
compression_level: self.level,
auto_flush: match self.auto_flush {
false => 0,
true => 1,
},
reserved: [0; 4],
};
let mut encoder = Encoder {
w: w,
|
identifier_body
|
encoder.rs
|
extern crate libc;
use std::io::Write;
use std::io::Result;
use std::cmp;
use std::ptr;
use super::liblz4::*;
use self::libc::size_t;
struct
|
{
c: LZ4FCompressionContext,
}
#[derive(Clone)]
pub struct EncoderBuilder {
block_size: BlockSize,
block_mode: BlockMode,
checksum: ContentChecksum,
// 0 == default (fast mode); values above 16 count as 16; values below 0 count as 0
level: u32,
// 1 == always flush (reduce need for tmp buffer)
auto_flush: bool,
}
pub struct Encoder<W> {
c: EncoderContext,
w: W,
limit: usize,
buffer: Vec<u8>
}
impl EncoderBuilder {
pub fn new() -> Self {
EncoderBuilder {
block_size: BlockSize::Default,
block_mode: BlockMode::Linked,
checksum: ContentChecksum::ChecksumEnabled,
level: 0,
auto_flush: false,
}
}
pub fn block_size(&mut self, block_size: BlockSize) -> &mut Self {
self.block_size = block_size;
self
}
pub fn block_mode(&mut self, block_mode: BlockMode) -> &mut Self {
self.block_mode = block_mode;
self
}
pub fn checksum(&mut self, checksum: ContentChecksum) -> &mut Self {
self.checksum = checksum;
self
}
pub fn level(&mut self, level: u32) -> &mut Self {
self.level = level;
self
}
pub fn auto_flush(&mut self, auto_flush: bool) -> &mut Self {
self.auto_flush = auto_flush;
self
}
pub fn build<W: Write>(&self, w: W) -> Result<Encoder<W>> {
let block_size = self.block_size.get_size();
let preferences = LZ4FPreferences
{
frame_info: LZ4FFrameInfo
{
block_size_id: self.block_size.clone(),
block_mode: self.block_mode.clone(),
content_checksum_flag: self.checksum.clone(),
reserved: [0; 5],
},
compression_level: self.level,
auto_flush: match self.auto_flush {
false => 0,
true => 1,
},
reserved: [0; 4],
};
let mut encoder = Encoder {
w: w,
c: try! (EncoderContext::new()),
limit: block_size,
buffer: Vec::with_capacity(try! (check_error(unsafe {LZ4F_compressBound(block_size as size_t, &preferences)})))
};
try! (encoder.write_header(&preferences));
Ok (encoder)
}
}
impl<W: Write> Encoder<W> {
fn write_header(&mut self, preferences: &LZ4FPreferences) -> Result<()>
{
unsafe {
let len = try! (check_error(LZ4F_compressBegin(self.c.c, self.buffer.as_mut_ptr(), self.buffer.capacity() as size_t, preferences)));
self.buffer.set_len(len);
}
self.w.write_all(&self.buffer)
}
fn write_end(&mut self) -> Result<()> {
unsafe {
let len = try! (check_error(LZ4F_compressEnd(self.c.c, self.buffer.as_mut_ptr(), self.buffer.capacity() as size_t, ptr::null())));
self.buffer.set_len(len);
};
self.w.write_all(&self.buffer)
}
/// This function is used to flag that this session of compression is done
/// with. The stream is finished up (final bytes are written), and then the
/// wrapped writer is returned.
pub fn finish(mut self) -> (W, Result<()>) {
let result = self.write_end();
(self.w, result)
}
}
impl<W: Write> Write for Encoder<W> {
fn write(&mut self, buffer: &[u8]) -> Result<usize> {
let mut offset = 0;
while offset < buffer.len()
{
let size = cmp::min(buffer.len() - offset, self.limit);
unsafe {
let len = try! (check_error(LZ4F_compressUpdate(self.c.c, self.buffer.as_mut_ptr(), self.buffer.capacity() as size_t, buffer[offset..].as_ptr(), size as size_t, ptr::null())));
self.buffer.set_len(len);
try! (self.w.write_all(&self.buffer));
}
offset += size;
}
Ok(buffer.len())
}
fn flush(&mut self) -> Result<()> {
loop
{
unsafe {
let len = try! (check_error(LZ4F_flush(self.c.c, self.buffer.as_mut_ptr(), self.buffer.capacity() as size_t, ptr::null())));
if len == 0
{
break;
}
self.buffer.set_len(len);
};
try! (self.w.write_all(&self.buffer));
}
self.w.flush()
}
}
impl EncoderContext {
fn new() -> Result<EncoderContext>
{
let mut context: LZ4FCompressionContext = ptr::null_mut();
try! (check_error(unsafe {
LZ4F_createCompressionContext(&mut context, LZ4F_VERSION)
}));
Ok(EncoderContext {
c: context
})
}
}
impl Drop for EncoderContext {
fn drop(&mut self) {
unsafe
{
LZ4F_freeCompressionContext(self.c)
};
}
}
#[cfg(test)]
mod test {
use std::io::Write;
use super::EncoderBuilder;
#[test]
fn test_encoder_smoke() {
let mut encoder = EncoderBuilder::new().level(1).build(Vec::new()).unwrap();
encoder.write(b"Some ").unwrap();
encoder.write(b"data").unwrap();
let (_, result) = encoder.finish();
result.unwrap();
}
#[test]
fn test_encoder_random() {
let mut encoder = EncoderBuilder::new().level(1).build(Vec::new()).unwrap();
let mut buffer = Vec::new();
let mut rnd: u32 = 42;
for _ in 0..1024 * 1024 {
buffer.push((rnd & 0xFF) as u8);
rnd = ((1664525 as u64) * (rnd as u64) + (1013904223 as u64)) as u32;
}
encoder.write(&buffer).unwrap();
let (_, result) = encoder.finish();
result.unwrap();
}
}
|
EncoderContext
|
identifier_name
|
test.rs
|
extern crate mio;
extern crate bytes;
#[macro_use]
extern crate log;
extern crate env_logger;
extern crate tempdir;
pub use ports::localhost;
mod test_battery;
mod test_close_on_drop;
mod test_echo_server;
mod test_multicast;
mod test_notify;
mod test_register_deregister;
mod test_register_multiple_event_loops;
mod test_tcp_level;
mod test_tick;
mod test_timer;
mod test_udp_level;
mod test_udp_socket;
// ===== Unix only tests =====
#[cfg(unix)]
mod test_unix_echo_server;
#[cfg(unix)]
mod test_unix_pass_fd;
mod ports {
use std::net::SocketAddr;
use std::str::FromStr;
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT};
use std::sync::atomic::Ordering::SeqCst;
// Helper for getting a unique port for the task run
// TODO: Reuse ports to not spam the system
static mut NEXT_PORT: AtomicUsize = ATOMIC_USIZE_INIT;
const FIRST_PORT: usize = 18080;
fn
|
() -> usize {
unsafe {
// If the atomic was never used, set it to the initial port
NEXT_PORT.compare_and_swap(0, FIRST_PORT, SeqCst);
// Get and increment the port list
NEXT_PORT.fetch_add(1, SeqCst)
}
}
pub fn localhost() -> SocketAddr {
let s = format!("127.0.0.1:{}", next_port());
FromStr::from_str(&s).unwrap()
}
}
pub fn sleep_ms(ms: usize) {
use std::thread;
thread::sleep_ms(ms as u32);
}
|
next_port
|
identifier_name
|
test.rs
|
extern crate mio;
extern crate bytes;
#[macro_use]
extern crate log;
extern crate env_logger;
extern crate tempdir;
|
mod test_battery;
mod test_close_on_drop;
mod test_echo_server;
mod test_multicast;
mod test_notify;
mod test_register_deregister;
mod test_register_multiple_event_loops;
mod test_tcp_level;
mod test_tick;
mod test_timer;
mod test_udp_level;
mod test_udp_socket;
// ===== Unix only tests =====
#[cfg(unix)]
mod test_unix_echo_server;
#[cfg(unix)]
mod test_unix_pass_fd;
mod ports {
use std::net::SocketAddr;
use std::str::FromStr;
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT};
use std::sync::atomic::Ordering::SeqCst;
// Helper for getting a unique port for the task run
// TODO: Reuse ports to not spam the system
static mut NEXT_PORT: AtomicUsize = ATOMIC_USIZE_INIT;
const FIRST_PORT: usize = 18080;
fn next_port() -> usize {
unsafe {
// If the atomic was never used, set it to the initial port
NEXT_PORT.compare_and_swap(0, FIRST_PORT, SeqCst);
// Get and increment the port list
NEXT_PORT.fetch_add(1, SeqCst)
}
}
pub fn localhost() -> SocketAddr {
let s = format!("127.0.0.1:{}", next_port());
FromStr::from_str(&s).unwrap()
}
}
pub fn sleep_ms(ms: usize) {
use std::thread;
thread::sleep_ms(ms as u32);
}
|
pub use ports::localhost;
|
random_line_split
|
test.rs
|
extern crate mio;
extern crate bytes;
#[macro_use]
extern crate log;
extern crate env_logger;
extern crate tempdir;
pub use ports::localhost;
mod test_battery;
mod test_close_on_drop;
mod test_echo_server;
mod test_multicast;
mod test_notify;
mod test_register_deregister;
mod test_register_multiple_event_loops;
mod test_tcp_level;
mod test_tick;
mod test_timer;
mod test_udp_level;
mod test_udp_socket;
// ===== Unix only tests =====
#[cfg(unix)]
mod test_unix_echo_server;
#[cfg(unix)]
mod test_unix_pass_fd;
mod ports {
use std::net::SocketAddr;
use std::str::FromStr;
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT};
use std::sync::atomic::Ordering::SeqCst;
// Helper for getting a unique port for the task run
// TODO: Reuse ports to not spam the system
static mut NEXT_PORT: AtomicUsize = ATOMIC_USIZE_INIT;
const FIRST_PORT: usize = 18080;
fn next_port() -> usize {
unsafe {
// If the atomic was never used, set it to the initial port
NEXT_PORT.compare_and_swap(0, FIRST_PORT, SeqCst);
// Get and increment the port list
NEXT_PORT.fetch_add(1, SeqCst)
}
}
pub fn localhost() -> SocketAddr {
let s = format!("127.0.0.1:{}", next_port());
FromStr::from_str(&s).unwrap()
}
}
pub fn sleep_ms(ms: usize)
|
{
use std::thread;
thread::sleep_ms(ms as u32);
}
|
identifier_body
|
|
opt-in-copy.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
struct CantCopyThis;
struct IWantToCopyThis {
but_i_cant: CantCopyThis,
}
impl Copy for IWantToCopyThis {}
//~^ ERROR the trait `Copy` may not be implemented for this type
//~| ERROR E0277
enum CantCopyThisEither {
A,
B,
}
enum IWantToCopyThisToo {
ButICant(CantCopyThisEither),
}
impl Copy for IWantToCopyThisToo {}
//~^ ERROR the trait `Copy` may not be implemented for this type
//~| ERROR E0277
fn main() {}
|
// option. This file may not be copied, modified, or distributed
// except according to those terms.
|
random_line_split
|
opt-in-copy.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct CantCopyThis;
struct IWantToCopyThis {
but_i_cant: CantCopyThis,
}
impl Copy for IWantToCopyThis {}
//~^ ERROR the trait `Copy` may not be implemented for this type
//~| ERROR E0277
enum CantCopyThisEither {
A,
B,
}
enum IWantToCopyThisToo {
ButICant(CantCopyThisEither),
}
impl Copy for IWantToCopyThisToo {}
//~^ ERROR the trait `Copy` may not be implemented for this type
//~| ERROR E0277
fn
|
() {}
|
main
|
identifier_name
|
attrs-after-extern-mod.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Constants (static variables) can be used to match in patterns, but mutable
// statics cannot. This ensures that there's some form of error if this is
// attempted.
extern crate libc;
extern {
static mut rust_dbg_static_mut: libc::c_int;
pub fn rust_dbg_static_mut_check_four();
#[cfg(stage37)] //~ ERROR expected item after attributes
}
pub fn
|
() {}
|
main
|
identifier_name
|
attrs-after-extern-mod.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Constants (static variables) can be used to match in patterns, but mutable
// statics cannot. This ensures that there's some form of error if this is
// attempted.
extern crate libc;
extern {
static mut rust_dbg_static_mut: libc::c_int;
pub fn rust_dbg_static_mut_check_four();
#[cfg(stage37)] //~ ERROR expected item after attributes
}
pub fn main() {}
|
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
|
random_line_split
|
attrs-after-extern-mod.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Constants (static variables) can be used to match in patterns, but mutable
// statics cannot. This ensures that there's some form of error if this is
// attempted.
extern crate libc;
extern {
static mut rust_dbg_static_mut: libc::c_int;
pub fn rust_dbg_static_mut_check_four();
#[cfg(stage37)] //~ ERROR expected item after attributes
}
pub fn main()
|
{}
|
identifier_body
|
|
abilities.rs
|
// Copyright (c) 2018 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
|
// See the License for the specific language governing permissions and
// limitations under the License.
//! Windows-equivalent for determining if the current user has certain
//! abilities.
// The Linux version uses Capabilities. Until we sort out the
// equivalent implementation on Windows, we assume that the current
// process has the abilities. This was the implicit behavior prior to
// adding this abstraction, so Windows supervisor behavior will remain
// unchanged (i.e., it will still require "root"-like abilities to
// run).
pub fn can_run_services_as_svc_user() -> bool {
true
}
|
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
random_line_split
|
abilities.rs
|
// Copyright (c) 2018 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Windows-equivalent for determining if the current user has certain
//! abilities.
// The Linux version uses Capabilities. Until we sort out the
// equivalent implementation on Windows, we assume that the current
// process has the abilities. This was the implicit behavior prior to
// adding this abstraction, so Windows supervisor behavior will remain
// unchanged (i.e., it will still require "root"-like abilities to
// run).
pub fn can_run_services_as_svc_user() -> bool
|
{
true
}
|
identifier_body
|
|
abilities.rs
|
// Copyright (c) 2018 Chef Software Inc. and/or applicable contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Windows-equivalent for determining if the current user has certain
//! abilities.
// The Linux version uses Capabilities. Until we sort out the
// equivalent implementation on Windows, we assume that the current
// process has the abilities. This was the implicit behavior prior to
// adding this abstraction, so Windows supervisor behavior will remain
// unchanged (i.e., it will still require "root"-like abilities to
// run).
pub fn
|
() -> bool {
true
}
|
can_run_services_as_svc_user
|
identifier_name
|
scripts.rs
|
//! This integration test runs the test scripts found in
//! the scripts/ directory
//! See image-worker/README.md for a full description of
//! the test script syntax and details about this test
//! runner.
extern crate image;
use std::fs::{File, read_dir, remove_file, copy};
use std::io::{BufReader, BufRead, Write};
use std::path::{Path, PathBuf};
use std::process::Child;
use std::process::{Command, Stdio};
use std::thread;
use std::sync::mpsc;
#[test]
fn run_scripts() {
let scripts_dir = Path::new(file!()).with_file_name("scripts");
let mut threads = Vec::new();
// We use channels here so we can immediately exit if we get
// a failure message
let (tx, rx) = mpsc::channel();
for entry in read_dir(scripts_dir).unwrap() {
let test_script = entry.unwrap().path();
let tx = tx.clone();
let th = (test_script.clone(), thread::spawn(move || {
let result = run_test_script(test_script);
tx.send(result).unwrap();
}));
threads.push(th);
}
for _ in threads.iter() {
let result = rx.recv().unwrap();
match result {
Err(error) => panic!(error),
_ => (),
}
}
for (script, th) in threads {
match th.join() {
Ok(_) => (),
Err(_) => panic!("thread for {} ended in a panic", script.to_str().unwrap()),
}
}
}
fn run_test_script(script: PathBuf) -> Result<(), String> {
let file = File::open(script.clone()).unwrap();
let reader = BufReader::new(file);
let mut child = spawn_worker();
let filename = script.file_name().unwrap().to_str().unwrap();
println!("Starting {}...", filename);
let mut last_response: Option<String> = None;
for (num, line) in reader.lines().enumerate() {
let line = line.unwrap();
let line = line.trim();
if line.is_empty() {
continue;
}
//println!("{}", line);
let (first, arg) = line.split_at(1);
let result = match first {
"%" => check_file_match(arg),
"-" => remove_file(arg.trim()).map_err(|e| format!("{}", e)),
"=" => copy_file(arg),
">" => {
let res = check_output(last_response.as_ref(), arg);
last_response = None;
res
},
"#" => Ok(()),
_ => {
// The test script has until the next command to check its output using the
// > command. If they do not, we will check here for success
let response_checked = if last_response.is_some() {
check_success(last_response.as_ref())
}
else {
Ok(())
};
response_checked.and_then(|_| {
send_input(&mut child, line).and_then(|_| {
last_response = Some(read_output(&mut child)?);
Ok(())
})
})
},
};
if let Err(error) = result {
return Err(format!("{}#{}: {}", filename, num + 1, error));
}
}
if last_response.is_some() {
if let Err(error) = check_success(last_response.as_ref()) {
return Err(format!("{}#EOF: {}", filename, error));
}
}
if!child.wait().unwrap().success() {
return Err(
format!("{}: Worker process did not complete successfully after test script", filename)
);
}
println!("Completed {}.", filename);
Ok(())
}
fn spawn_worker() -> Child {
Command::new("cargo")
.args(&["run", "-q"])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.unwrap()
}
fn check_file_match(arg: &str) -> Result<(), String> {
let delimiter = arg.find("=>")
.ok_or("Could not find => in % command")?;
let (output_path, expected_path) = arg.split_at(delimiter);
// get rid of the "=>"
let expected_path = expected_path.chars().skip(2).collect::<String>();
// ignore any extra whitespace
let output_path = output_path.trim();
let expected_path = expected_path.trim();
let output = image::open(output_path).map_err(|e| format!("{}", e))?;
let expected = image::open(expected_path).map_err(|e| format!("{}", e))?;
let output = output.raw_pixels();
let expected = expected.raw_pixels();
if output == expected {
remove_file(output_path)
.map_err(|e| format!("Failed to remove output after test passed: {}", e))?;
Ok(())
}
else {
Err(format!("{} did not match {}", output_path, expected_path))
}
}
fn copy_file(arg: &str) -> Result<(), String> {
let args: Vec<_> = arg.trim().split_whitespace().collect();
if args.len()!= 2 {
return Err("= command requires only 2 arguments".to_owned());
}
let source = args[0];
let destination = args[1];
copy(source, destination).map_err(|e| format!("{}", e))?;
Ok(())
}
fn check_success(output: Option<&String>) -> Result<(), String> {
if let Some(response) = output {
// This check is not foolproof and may eventually cause problems.
// It is good enough for now though we're running with it
if response.starts_with("{\"Success\":") {
Ok(())
}
else {
Err(format!("Worker did not produce Success. Script failed at \
the last input *before* this line. Actual result: {}", response))
}
}
else {
panic!("check_success should have been called only when last_response had a value");
}
}
fn check_output(output: Option<&String>, arg: &str) -> Result<(), String> {
// This is brittle, but it doesn't seem worth it to implement something
// more robust for now. You will need to exactly match the output in your
// test script if you want to test output
if let Some(response) = output {
let arg = arg.trim();
let response = response.trim();
if response == arg {
Ok(())
}
else {
Err(format!("Worker produced output not equal to expected output.\
\nExpected: {:?}\nReceived: {:?}", arg, response))
}
}
else {
panic!("check_output should have been called only when last_response had a value");
}
}
fn
|
(child: &mut Child, line: &str) -> Result<(), String> {
if let Some(ref mut stdin) = child.stdin {
match write!(stdin, "{}\n", line) {
Ok(_) => Ok(()),
Err(error) => Err(format!("{}", error)),
}
}
else {
// should not happen. This panic is just in case.
panic!("stdin was not open for writing".to_string());
}
}
fn read_output(child: &mut Child) -> Result<String, String> {
let mut stdout = BufReader::new(match child.stdout {
Some(ref mut handle) => Ok(handle),
None => Err("Worker child process stdout was never open"),
}?);
let mut response = String::new();
stdout.read_line(&mut response).map_err(|e| format!("{}", e))?;
Ok(response)
}
|
send_input
|
identifier_name
|
scripts.rs
|
//! This integration test runs the test scripts found in
//! the scripts/ directory
//! See image-worker/README.md for a full description of
//! the test script syntax and details about this test
//! runner.
extern crate image;
use std::fs::{File, read_dir, remove_file, copy};
use std::io::{BufReader, BufRead, Write};
use std::path::{Path, PathBuf};
use std::process::Child;
use std::process::{Command, Stdio};
use std::thread;
use std::sync::mpsc;
#[test]
fn run_scripts() {
let scripts_dir = Path::new(file!()).with_file_name("scripts");
let mut threads = Vec::new();
// We use channels here so we can immediately exit if we get
// a failure message
let (tx, rx) = mpsc::channel();
for entry in read_dir(scripts_dir).unwrap() {
let test_script = entry.unwrap().path();
let tx = tx.clone();
let th = (test_script.clone(), thread::spawn(move || {
let result = run_test_script(test_script);
tx.send(result).unwrap();
}));
threads.push(th);
}
for _ in threads.iter() {
let result = rx.recv().unwrap();
match result {
Err(error) => panic!(error),
_ => (),
}
}
for (script, th) in threads {
match th.join() {
Ok(_) => (),
Err(_) => panic!("thread for {} ended in a panic", script.to_str().unwrap()),
}
}
}
fn run_test_script(script: PathBuf) -> Result<(), String> {
let file = File::open(script.clone()).unwrap();
let reader = BufReader::new(file);
let mut child = spawn_worker();
let filename = script.file_name().unwrap().to_str().unwrap();
println!("Starting {}...", filename);
let mut last_response: Option<String> = None;
for (num, line) in reader.lines().enumerate() {
let line = line.unwrap();
let line = line.trim();
if line.is_empty() {
continue;
}
//println!("{}", line);
let (first, arg) = line.split_at(1);
let result = match first {
"%" => check_file_match(arg),
"-" => remove_file(arg.trim()).map_err(|e| format!("{}", e)),
"=" => copy_file(arg),
">" => {
let res = check_output(last_response.as_ref(), arg);
last_response = None;
res
},
"#" => Ok(()),
_ => {
// The test script has until the next command to check its output using the
// > command. If they do not, we will check here for success
let response_checked = if last_response.is_some() {
check_success(last_response.as_ref())
}
else {
Ok(())
};
response_checked.and_then(|_| {
send_input(&mut child, line).and_then(|_| {
last_response = Some(read_output(&mut child)?);
Ok(())
})
})
},
};
if let Err(error) = result {
return Err(format!("{}#{}: {}", filename, num + 1, error));
}
}
if last_response.is_some() {
if let Err(error) = check_success(last_response.as_ref()) {
return Err(format!("{}#EOF: {}", filename, error));
}
}
if!child.wait().unwrap().success() {
return Err(
format!("{}: Worker process did not complete successfully after test script", filename)
);
}
println!("Completed {}.", filename);
Ok(())
}
fn spawn_worker() -> Child {
Command::new("cargo")
.args(&["run", "-q"])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.unwrap()
}
fn check_file_match(arg: &str) -> Result<(), String> {
let delimiter = arg.find("=>")
.ok_or("Could not find => in % command")?;
let (output_path, expected_path) = arg.split_at(delimiter);
// get rid of the "=>"
let expected_path = expected_path.chars().skip(2).collect::<String>();
// ignore any extra whitespace
let output_path = output_path.trim();
let expected_path = expected_path.trim();
let output = image::open(output_path).map_err(|e| format!("{}", e))?;
let expected = image::open(expected_path).map_err(|e| format!("{}", e))?;
let output = output.raw_pixels();
let expected = expected.raw_pixels();
if output == expected {
remove_file(output_path)
.map_err(|e| format!("Failed to remove output after test passed: {}", e))?;
Ok(())
}
else {
Err(format!("{} did not match {}", output_path, expected_path))
}
}
fn copy_file(arg: &str) -> Result<(), String> {
let args: Vec<_> = arg.trim().split_whitespace().collect();
if args.len()!= 2 {
return Err("= command requires only 2 arguments".to_owned());
}
let source = args[0];
let destination = args[1];
copy(source, destination).map_err(|e| format!("{}", e))?;
Ok(())
}
fn check_success(output: Option<&String>) -> Result<(), String>
|
fn check_output(output: Option<&String>, arg: &str) -> Result<(), String> {
// This is brittle, but it doesn't seem worth it to implement something
// more robust for now. You will need to exactly match the output in your
// test script if you want to test output
if let Some(response) = output {
let arg = arg.trim();
let response = response.trim();
if response == arg {
Ok(())
}
else {
Err(format!("Worker produced output not equal to expected output.\
\nExpected: {:?}\nReceived: {:?}", arg, response))
}
}
else {
panic!("check_output should have been called only when last_response had a value");
}
}
fn send_input(child: &mut Child, line: &str) -> Result<(), String> {
if let Some(ref mut stdin) = child.stdin {
match write!(stdin, "{}\n", line) {
Ok(_) => Ok(()),
Err(error) => Err(format!("{}", error)),
}
}
else {
// should not happen. This panic is just in case.
panic!("stdin was not open for writing".to_string());
}
}
fn read_output(child: &mut Child) -> Result<String, String> {
let mut stdout = BufReader::new(match child.stdout {
Some(ref mut handle) => Ok(handle),
None => Err("Worker child process stdout was never open"),
}?);
let mut response = String::new();
stdout.read_line(&mut response).map_err(|e| format!("{}", e))?;
Ok(response)
}
|
{
if let Some(response) = output {
// This check is not foolproof and may eventually cause problems.
// It is good enough for now though we're running with it
if response.starts_with("{\"Success\":") {
Ok(())
}
else {
Err(format!("Worker did not produce Success. Script failed at \
the last input *before* this line. Actual result: {}", response))
}
}
else {
panic!("check_success should have been called only when last_response had a value");
}
}
|
identifier_body
|
scripts.rs
|
//! This integration test runs the test scripts found in
//! the scripts/ directory
//! See image-worker/README.md for a full description of
//! the test script syntax and details about this test
//! runner.
extern crate image;
use std::fs::{File, read_dir, remove_file, copy};
use std::io::{BufReader, BufRead, Write};
use std::path::{Path, PathBuf};
use std::process::Child;
use std::process::{Command, Stdio};
use std::thread;
use std::sync::mpsc;
#[test]
fn run_scripts() {
let scripts_dir = Path::new(file!()).with_file_name("scripts");
let mut threads = Vec::new();
// We use channels here so we can immediately exit if we get
// a failure message
let (tx, rx) = mpsc::channel();
for entry in read_dir(scripts_dir).unwrap() {
let test_script = entry.unwrap().path();
let tx = tx.clone();
let th = (test_script.clone(), thread::spawn(move || {
let result = run_test_script(test_script);
tx.send(result).unwrap();
}));
threads.push(th);
}
for _ in threads.iter() {
let result = rx.recv().unwrap();
match result {
Err(error) => panic!(error),
_ => (),
}
}
for (script, th) in threads {
match th.join() {
Ok(_) => (),
Err(_) => panic!("thread for {} ended in a panic", script.to_str().unwrap()),
}
}
}
fn run_test_script(script: PathBuf) -> Result<(), String> {
let file = File::open(script.clone()).unwrap();
let reader = BufReader::new(file);
let mut child = spawn_worker();
let filename = script.file_name().unwrap().to_str().unwrap();
println!("Starting {}...", filename);
let mut last_response: Option<String> = None;
for (num, line) in reader.lines().enumerate() {
let line = line.unwrap();
let line = line.trim();
if line.is_empty() {
continue;
}
//println!("{}", line);
let (first, arg) = line.split_at(1);
let result = match first {
"%" => check_file_match(arg),
"-" => remove_file(arg.trim()).map_err(|e| format!("{}", e)),
"=" => copy_file(arg),
">" => {
let res = check_output(last_response.as_ref(), arg);
last_response = None;
res
},
"#" => Ok(()),
_ => {
// The test script has until the next command to check its output using the
// > command. If they do not, we will check here for success
let response_checked = if last_response.is_some() {
check_success(last_response.as_ref())
}
else {
Ok(())
};
response_checked.and_then(|_| {
send_input(&mut child, line).and_then(|_| {
last_response = Some(read_output(&mut child)?);
Ok(())
})
})
},
};
if let Err(error) = result {
return Err(format!("{}#{}: {}", filename, num + 1, error));
}
}
if last_response.is_some() {
if let Err(error) = check_success(last_response.as_ref()) {
return Err(format!("{}#EOF: {}", filename, error));
}
}
if!child.wait().unwrap().success() {
return Err(
format!("{}: Worker process did not complete successfully after test script", filename)
);
}
println!("Completed {}.", filename);
Ok(())
}
fn spawn_worker() -> Child {
Command::new("cargo")
.args(&["run", "-q"])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.unwrap()
}
fn check_file_match(arg: &str) -> Result<(), String> {
let delimiter = arg.find("=>")
.ok_or("Could not find => in % command")?;
let (output_path, expected_path) = arg.split_at(delimiter);
// get rid of the "=>"
let expected_path = expected_path.chars().skip(2).collect::<String>();
|
let expected_path = expected_path.trim();
let output = image::open(output_path).map_err(|e| format!("{}", e))?;
let expected = image::open(expected_path).map_err(|e| format!("{}", e))?;
let output = output.raw_pixels();
let expected = expected.raw_pixels();
if output == expected {
remove_file(output_path)
.map_err(|e| format!("Failed to remove output after test passed: {}", e))?;
Ok(())
}
else {
Err(format!("{} did not match {}", output_path, expected_path))
}
}
fn copy_file(arg: &str) -> Result<(), String> {
let args: Vec<_> = arg.trim().split_whitespace().collect();
if args.len()!= 2 {
return Err("= command requires only 2 arguments".to_owned());
}
let source = args[0];
let destination = args[1];
copy(source, destination).map_err(|e| format!("{}", e))?;
Ok(())
}
fn check_success(output: Option<&String>) -> Result<(), String> {
if let Some(response) = output {
// This check is not foolproof and may eventually cause problems.
// It is good enough for now though we're running with it
if response.starts_with("{\"Success\":") {
Ok(())
}
else {
Err(format!("Worker did not produce Success. Script failed at \
the last input *before* this line. Actual result: {}", response))
}
}
else {
panic!("check_success should have been called only when last_response had a value");
}
}
fn check_output(output: Option<&String>, arg: &str) -> Result<(), String> {
// This is brittle, but it doesn't seem worth it to implement something
// more robust for now. You will need to exactly match the output in your
// test script if you want to test output
if let Some(response) = output {
let arg = arg.trim();
let response = response.trim();
if response == arg {
Ok(())
}
else {
Err(format!("Worker produced output not equal to expected output.\
\nExpected: {:?}\nReceived: {:?}", arg, response))
}
}
else {
panic!("check_output should have been called only when last_response had a value");
}
}
fn send_input(child: &mut Child, line: &str) -> Result<(), String> {
if let Some(ref mut stdin) = child.stdin {
match write!(stdin, "{}\n", line) {
Ok(_) => Ok(()),
Err(error) => Err(format!("{}", error)),
}
}
else {
// should not happen. This panic is just in case.
panic!("stdin was not open for writing".to_string());
}
}
fn read_output(child: &mut Child) -> Result<String, String> {
let mut stdout = BufReader::new(match child.stdout {
Some(ref mut handle) => Ok(handle),
None => Err("Worker child process stdout was never open"),
}?);
let mut response = String::new();
stdout.read_line(&mut response).map_err(|e| format!("{}", e))?;
Ok(response)
}
|
// ignore any extra whitespace
let output_path = output_path.trim();
|
random_line_split
|
simd.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! SIMD vectors.
//!
//! These types can be used for accessing basic SIMD operations. Each of them
//! implements the standard arithmetic operator traits (Add, Sub, Mul, Div,
//! Rem, Shl, Shr) through compiler magic, rather than explicitly. Currently
//! comparison operators are not implemented. To use SSE3+, you must enable
//! the features, like `-C target-feature=sse3,sse4.1,sse4.2`, or a more
//! specific `target-cpu`. No other SIMD intrinsics or high-level wrappers are
//! provided beyond this module.
//!
//! ```rust
//! #[allow(experimental)];
//!
//! fn main() {
//! use std::simd::f32x4;
//! let a = f32x4(40.0, 41.0, 42.0, 43.0);
//! let b = f32x4(1.0, 1.1, 3.4, 9.8);
//! println!("{}", a + b);
//! }
//! ```
//!
//! ## Stability Note
//!
//! These are all experimental. The interface may change entirely, without
//! warning.
#![allow(non_camel_case_types)]
#![allow(missing_docs)]
#[experimental]
#[simd]
#[deriving(Show)]
#[repr(C)]
pub struct i8x16(pub i8, pub i8, pub i8, pub i8,
pub i8, pub i8, pub i8, pub i8,
pub i8, pub i8, pub i8, pub i8,
pub i8, pub i8, pub i8, pub i8);
#[experimental]
#[simd]
#[deriving(Show)]
#[repr(C)]
pub struct i16x8(pub i16, pub i16, pub i16, pub i16,
pub i16, pub i16, pub i16, pub i16);
#[experimental]
#[simd]
#[deriving(Show)]
#[repr(C)]
pub struct i32x4(pub i32, pub i32, pub i32, pub i32);
#[experimental]
#[simd]
#[deriving(Show)]
#[repr(C)]
pub struct i64x2(pub i64, pub i64);
#[experimental]
#[simd]
#[deriving(Show)]
#[repr(C)]
pub struct u8x16(pub u8, pub u8, pub u8, pub u8,
pub u8, pub u8, pub u8, pub u8,
pub u8, pub u8, pub u8, pub u8,
pub u8, pub u8, pub u8, pub u8);
#[experimental]
#[simd]
#[deriving(Show)]
#[repr(C)]
pub struct u16x8(pub u16, pub u16, pub u16, pub u16,
pub u16, pub u16, pub u16, pub u16);
#[experimental]
#[simd]
#[deriving(Show)]
#[repr(C)]
pub struct u32x4(pub u32, pub u32, pub u32, pub u32);
#[experimental]
#[simd]
#[deriving(Show)]
#[repr(C)]
pub struct
|
(pub u64, pub u64);
#[experimental]
#[simd]
#[deriving(Show)]
#[repr(C)]
pub struct f32x4(pub f32, pub f32, pub f32, pub f32);
#[experimental]
#[simd]
#[deriving(Show)]
#[repr(C)]
pub struct f64x2(pub f64, pub f64);
|
u64x2
|
identifier_name
|
simd.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! SIMD vectors.
//!
//! These types can be used for accessing basic SIMD operations. Each of them
//! implements the standard arithmetic operator traits (Add, Sub, Mul, Div,
//! Rem, Shl, Shr) through compiler magic, rather than explicitly. Currently
//! comparison operators are not implemented. To use SSE3+, you must enable
//! the features, like `-C target-feature=sse3,sse4.1,sse4.2`, or a more
//! specific `target-cpu`. No other SIMD intrinsics or high-level wrappers are
//! provided beyond this module.
//!
//! ```rust
//! #[allow(experimental)];
//!
//! fn main() {
//! use std::simd::f32x4;
//! let a = f32x4(40.0, 41.0, 42.0, 43.0);
//! let b = f32x4(1.0, 1.1, 3.4, 9.8);
//! println!("{}", a + b);
//! }
//! ```
//!
//! ## Stability Note
//!
//! These are all experimental. The interface may change entirely, without
//! warning.
#![allow(non_camel_case_types)]
#![allow(missing_docs)]
#[experimental]
#[simd]
#[deriving(Show)]
#[repr(C)]
pub struct i8x16(pub i8, pub i8, pub i8, pub i8,
pub i8, pub i8, pub i8, pub i8,
pub i8, pub i8, pub i8, pub i8,
pub i8, pub i8, pub i8, pub i8);
#[experimental]
#[simd]
#[deriving(Show)]
#[repr(C)]
pub struct i16x8(pub i16, pub i16, pub i16, pub i16,
pub i16, pub i16, pub i16, pub i16);
#[experimental]
#[simd]
#[deriving(Show)]
#[repr(C)]
pub struct i32x4(pub i32, pub i32, pub i32, pub i32);
#[experimental]
#[simd]
#[deriving(Show)]
#[repr(C)]
pub struct i64x2(pub i64, pub i64);
#[experimental]
#[simd]
#[deriving(Show)]
#[repr(C)]
pub struct u8x16(pub u8, pub u8, pub u8, pub u8,
pub u8, pub u8, pub u8, pub u8,
pub u8, pub u8, pub u8, pub u8,
pub u8, pub u8, pub u8, pub u8);
#[experimental]
#[simd]
#[deriving(Show)]
#[repr(C)]
pub struct u16x8(pub u16, pub u16, pub u16, pub u16,
pub u16, pub u16, pub u16, pub u16);
#[experimental]
#[simd]
#[deriving(Show)]
#[repr(C)]
pub struct u32x4(pub u32, pub u32, pub u32, pub u32);
#[experimental]
#[simd]
#[deriving(Show)]
#[repr(C)]
pub struct u64x2(pub u64, pub u64);
#[experimental]
#[simd]
#[deriving(Show)]
#[repr(C)]
pub struct f32x4(pub f32, pub f32, pub f32, pub f32);
#[experimental]
#[simd]
#[deriving(Show)]
#[repr(C)]
|
pub struct f64x2(pub f64, pub f64);
|
random_line_split
|
|
observable.rs
|
// Rx -- Reactive programming for Rust
// Copyright 2016 Ruud van Asseldonk
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// A copy of the License has been included in the root of the repository.
use observer::Observer;
use observer::{NextObserver, CompletedObserver, ErrorObserver, OptionObserver, ResultObserver};
use std::fmt::Debug;
use transform::{ContinueWithObservable, MapErrorObservable, MapObservable};
/// A stream of values.
///
/// An observable represents a stream of values, much like an iterator,
/// but instead of being “pull-based” like an iterator, it is “push-based”.
/// Multiple observers can subscribe to an observable and when the observable
/// produces a value, all observers get called with this value.
///
/// An observable can be _finite_ or _infinite_. An example of an infinite
/// observable are mouse clicks: you never know if the user is going to click
/// once more. An example of a finite observable are the results of a database
/// query: a database is can hold only finitely many records, so one result is
/// the last one.
///
/// A finite observable can end in two ways:
///
/// * **Completed**: when the observable ends normally.
/// For instance, an observable of database query results
/// will complete after the last result has been produced.
/// * **Failed**: when an error occurred.
/// For instance, an observable of database query results
/// may fail if the connection is lost.
///
/// Failures are fatal: after an observable produces an error, it will not
/// produce any new values. If this is not the desired behavior, you can
/// use an observable of `Result`.
pub trait Observable {
/// The value produced by the observable.
type Item: Clone;
/// The error produced if the observable fails.
type Error: Clone;
/// The result of subscribing an observer.
// TODO: This drop bound is not required and it only complicates stuff, remove it.
type Subscription: Drop;
/// Subscribes an observer and returns the subscription.
///
/// After subscription, `on_next` will be called on the observer for every
/// value produced. If the observable completes, `on_completed` is called.
/// If the observable fails with an error, `on_error` is called. It is
/// guaranteed that no methods will be called on the observer after
/// `on_completed` or `on_error` have been called.
///
/// _When_ the observer is called is not part of the observable contract,
/// it depends on the kind of observable. The observer may be called before
/// `subscribe` returns, or it may be called in the future.
///
/// The returned value represents the subscription. Dropping the subscription
/// will prevent further calls on the observer.
fn subscribe<O>(&mut self, observer: O) -> Self::Subscription
where O: Observer<Self::Item, Self::Error>;
/// Subscribes a function to handle values produced by the observable.
///
/// For every value produced by the observable, `on_next` is called.
///
/// **This subscription panics if the observable fails with an error.**
///
/// See also [`subscribe()`](#tymethod.subscribe).
fn subscribe_next<FnNext>(&mut self,
on_next: FnNext)
-> Self::Subscription
where Self::Error: Debug, FnNext: FnMut(Self::Item) {
let observer = NextObserver {
fn_next: on_next,
};
self.subscribe(observer)
}
/// Subscribes functions to handle next and completion.
///
/// For every value produced by the observable, `on_next` is called. If the
/// observable completes, `on_completed` is called. A failure will cause a
/// panic. After `on_completed` has been called, it is guaranteed that neither
/// `on_next` nor `on_completed` is called again.
///
/// **This subscription panics if the observable fails with an error.**
///
/// See also [`subscribe()`](#tymethod.subscribe).
fn subscribe_completed<FnNext, FnCompleted>(&mut self,
on_next: FnNext,
on_completed: FnCompleted)
-> Self::Subscription
where Self::Error: Debug, FnNext: FnMut(Self::Item), FnCompleted: FnOnce() {
let observer = CompletedObserver {
fn_next: on_next,
fn_completed: on_completed,
};
self.subscribe(observer)
}
/// Subscribes functions to handle next, completion, and error.
///
/// For every value produced by the observable, `on_next` is called. If the
/// observable completes, `on_completed` is called. If it fails, `on_error`
/// is called. After `on_completed` or `on_error` have been called, it is
/// guaranteed that none of the three functions are called again.
///
/// See also [`subscribe()`](#tymethod.subscribe).
fn subscribe_error<FnNext, FnCompleted, FnError>(&mut self,
on_next: FnNext,
on_completed: FnCompleted,
on_error: FnError)
-> Self::Subscription
where FnNext: FnMut(Self::Item), FnCompleted: FnOnce(), FnError: FnOnce(Self::Error) {
let observer = ErrorObserver {
fn_next: on_next,
fn_completed: on_completed,
fn_error: on_error,
};
self.subscribe(observer)
}
/// Subscribes a function that takes an option.
///
/// The function translates into an observer as follows:
///
/// * `on_next(x)`: calls the functions with `Some(x)`.
/// * `on_completed()`: calls the function with `None`.
/// * `on_error(error)`: panics.
///
/// After the function has been called with `None`,
/// it is guaranteed never to be called again.
///
/// **This subscription panics if the observable fails with an error.**
///
/// See also [`subscribe()`](#tymethod.subscribe).
fn subscribe_option<FnOption>(&mut self,
on_next_or_completed: FnOption)
-> Self::Subscription
where Self::Error: Debug, FnOption: FnMut(Option<Self::Item>) {
let observer = OptionObserver {
fn_option: on_next_or_completed
};
self.subscribe(observer)
}
/// Subscribes a function that takes a result of an option.
///
/// The function translates into an observer as follows:
///
/// * `on_next(x)`: calls the function with `Ok(Some(x))`.
/// * `on_completed()`: calls the function with `Ok(None)`.
/// * `on_error(error)`: calls the function with `Err(error)`.
///
/// After the function has been called with `Ok(None)` or `Err(error)`,
/// it is guaranteed never to be called again.
///
/// See also [`subscribe()`](#tymethod.subscribe).
fn subscribe_result<FnResult>(&mut self,
on_next_or_completed_or_error: FnResult)
-> Self::Subscription
where FnResult: FnMut(Result<Option<Self::Item>, Self::Error>) {
let observer = ResultObserver {
fn_result: on_next_or_completed_or_error
};
self.subscribe(observer)
}
/// Transforms an observable by applying f to every value produced.
fn map<'s,
|
F>(&'s mut self, f: F) -> MapObservable<'s, Self, F>
where F: Fn(Self::Item) -> U {
MapObservable::new(self, f)
}
/// Transforms an observable by applying f the error in case of failure.
fn map_error<'s, F, G>(&'s mut self, f: G) -> MapErrorObservable<'s, Self, G>
where G: Fn(Self::Error) -> F {
MapErrorObservable::new(self, f)
}
/// Joins two observables sequentially.
///
/// After the current observable completes, an observer will start to
/// receive values from `next` until that observable completes or fails.
/// The `next` observable is only subscribed to after the current observable
/// completes.
fn continue_with<'s, ObNext>(&'s mut self, next: &'s mut ObNext) -> ContinueWithObservable<'s, Self, ObNext>
where ObNext: Observable<Item = Self::Item, Error = Self::Error> {
ContinueWithObservable::new(self, next)
}
}
|
U,
|
identifier_name
|
observable.rs
|
// Rx -- Reactive programming for Rust
|
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// A copy of the License has been included in the root of the repository.
use observer::Observer;
use observer::{NextObserver, CompletedObserver, ErrorObserver, OptionObserver, ResultObserver};
use std::fmt::Debug;
use transform::{ContinueWithObservable, MapErrorObservable, MapObservable};
/// A stream of values.
///
/// An observable represents a stream of values, much like an iterator,
/// but instead of being “pull-based” like an iterator, it is “push-based”.
/// Multiple observers can subscribe to an observable and when the observable
/// produces a value, all observers get called with this value.
///
/// An observable can be _finite_ or _infinite_. An example of an infinite
/// observable are mouse clicks: you never know if the user is going to click
/// once more. An example of a finite observable are the results of a database
/// query: a database is can hold only finitely many records, so one result is
/// the last one.
///
/// A finite observable can end in two ways:
///
/// * **Completed**: when the observable ends normally.
/// For instance, an observable of database query results
/// will complete after the last result has been produced.
/// * **Failed**: when an error occurred.
/// For instance, an observable of database query results
/// may fail if the connection is lost.
///
/// Failures are fatal: after an observable produces an error, it will not
/// produce any new values. If this is not the desired behavior, you can
/// use an observable of `Result`.
pub trait Observable {
/// The value produced by the observable.
type Item: Clone;
/// The error produced if the observable fails.
type Error: Clone;
/// The result of subscribing an observer.
// TODO: This drop bound is not required and it only complicates stuff, remove it.
type Subscription: Drop;
/// Subscribes an observer and returns the subscription.
///
/// After subscription, `on_next` will be called on the observer for every
/// value produced. If the observable completes, `on_completed` is called.
/// If the observable fails with an error, `on_error` is called. It is
/// guaranteed that no methods will be called on the observer after
/// `on_completed` or `on_error` have been called.
///
/// _When_ the observer is called is not part of the observable contract,
/// it depends on the kind of observable. The observer may be called before
/// `subscribe` returns, or it may be called in the future.
///
/// The returned value represents the subscription. Dropping the subscription
/// will prevent further calls on the observer.
fn subscribe<O>(&mut self, observer: O) -> Self::Subscription
where O: Observer<Self::Item, Self::Error>;
/// Subscribes a function to handle values produced by the observable.
///
/// For every value produced by the observable, `on_next` is called.
///
/// **This subscription panics if the observable fails with an error.**
///
/// See also [`subscribe()`](#tymethod.subscribe).
fn subscribe_next<FnNext>(&mut self,
on_next: FnNext)
-> Self::Subscription
where Self::Error: Debug, FnNext: FnMut(Self::Item) {
let observer = NextObserver {
fn_next: on_next,
};
self.subscribe(observer)
}
/// Subscribes functions to handle next and completion.
///
/// For every value produced by the observable, `on_next` is called. If the
/// observable completes, `on_completed` is called. A failure will cause a
/// panic. After `on_completed` has been called, it is guaranteed that neither
/// `on_next` nor `on_completed` is called again.
///
/// **This subscription panics if the observable fails with an error.**
///
/// See also [`subscribe()`](#tymethod.subscribe).
fn subscribe_completed<FnNext, FnCompleted>(&mut self,
on_next: FnNext,
on_completed: FnCompleted)
-> Self::Subscription
where Self::Error: Debug, FnNext: FnMut(Self::Item), FnCompleted: FnOnce() {
let observer = CompletedObserver {
fn_next: on_next,
fn_completed: on_completed,
};
self.subscribe(observer)
}
/// Subscribes functions to handle next, completion, and error.
///
/// For every value produced by the observable, `on_next` is called. If the
/// observable completes, `on_completed` is called. If it fails, `on_error`
/// is called. After `on_completed` or `on_error` have been called, it is
/// guaranteed that none of the three functions are called again.
///
/// See also [`subscribe()`](#tymethod.subscribe).
fn subscribe_error<FnNext, FnCompleted, FnError>(&mut self,
on_next: FnNext,
on_completed: FnCompleted,
on_error: FnError)
-> Self::Subscription
where FnNext: FnMut(Self::Item), FnCompleted: FnOnce(), FnError: FnOnce(Self::Error) {
let observer = ErrorObserver {
fn_next: on_next,
fn_completed: on_completed,
fn_error: on_error,
};
self.subscribe(observer)
}
/// Subscribes a function that takes an option.
///
/// The function translates into an observer as follows:
///
/// * `on_next(x)`: calls the functions with `Some(x)`.
/// * `on_completed()`: calls the function with `None`.
/// * `on_error(error)`: panics.
///
/// After the function has been called with `None`,
/// it is guaranteed never to be called again.
///
/// **This subscription panics if the observable fails with an error.**
///
/// See also [`subscribe()`](#tymethod.subscribe).
fn subscribe_option<FnOption>(&mut self,
on_next_or_completed: FnOption)
-> Self::Subscription
where Self::Error: Debug, FnOption: FnMut(Option<Self::Item>) {
let observer = OptionObserver {
fn_option: on_next_or_completed
};
self.subscribe(observer)
}
/// Subscribes a function that takes a result of an option.
///
/// The function translates into an observer as follows:
///
/// * `on_next(x)`: calls the function with `Ok(Some(x))`.
/// * `on_completed()`: calls the function with `Ok(None)`.
/// * `on_error(error)`: calls the function with `Err(error)`.
///
/// After the function has been called with `Ok(None)` or `Err(error)`,
/// it is guaranteed never to be called again.
///
/// See also [`subscribe()`](#tymethod.subscribe).
fn subscribe_result<FnResult>(&mut self,
on_next_or_completed_or_error: FnResult)
-> Self::Subscription
where FnResult: FnMut(Result<Option<Self::Item>, Self::Error>) {
let observer = ResultObserver {
fn_result: on_next_or_completed_or_error
};
self.subscribe(observer)
}
/// Transforms an observable by applying f to every value produced.
fn map<'s, U, F>(&'s mut self, f: F) -> MapObservable<'s, Self, F>
where F: Fn(Self::Item) -> U {
MapObservable::new(self, f)
}
/// Transforms an observable by applying f the error in case of failure.
fn map_error<'s, F, G>(&'s mut self, f: G) -> MapErrorObservable<'s, Self, G>
where G: Fn(Self::Error) -> F {
MapErrorObservable::new(self, f)
}
/// Joins two observables sequentially.
///
/// After the current observable completes, an observer will start to
/// receive values from `next` until that observable completes or fails.
/// The `next` observable is only subscribed to after the current observable
/// completes.
fn continue_with<'s, ObNext>(&'s mut self, next: &'s mut ObNext) -> ContinueWithObservable<'s, Self, ObNext>
where ObNext: Observable<Item = Self::Item, Error = Self::Error> {
ContinueWithObservable::new(self, next)
}
}
|
// Copyright 2016 Ruud van Asseldonk
|
random_line_split
|
observable.rs
|
// Rx -- Reactive programming for Rust
// Copyright 2016 Ruud van Asseldonk
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// A copy of the License has been included in the root of the repository.
use observer::Observer;
use observer::{NextObserver, CompletedObserver, ErrorObserver, OptionObserver, ResultObserver};
use std::fmt::Debug;
use transform::{ContinueWithObservable, MapErrorObservable, MapObservable};
/// A stream of values.
///
/// An observable represents a stream of values, much like an iterator,
/// but instead of being “pull-based” like an iterator, it is “push-based”.
/// Multiple observers can subscribe to an observable and when the observable
/// produces a value, all observers get called with this value.
///
/// An observable can be _finite_ or _infinite_. An example of an infinite
/// observable are mouse clicks: you never know if the user is going to click
/// once more. An example of a finite observable are the results of a database
/// query: a database is can hold only finitely many records, so one result is
/// the last one.
///
/// A finite observable can end in two ways:
///
/// * **Completed**: when the observable ends normally.
/// For instance, an observable of database query results
/// will complete after the last result has been produced.
/// * **Failed**: when an error occurred.
/// For instance, an observable of database query results
/// may fail if the connection is lost.
///
/// Failures are fatal: after an observable produces an error, it will not
/// produce any new values. If this is not the desired behavior, you can
/// use an observable of `Result`.
pub trait Observable {
/// The value produced by the observable.
type Item: Clone;
/// The error produced if the observable fails.
type Error: Clone;
/// The result of subscribing an observer.
// TODO: This drop bound is not required and it only complicates stuff, remove it.
type Subscription: Drop;
/// Subscribes an observer and returns the subscription.
///
/// After subscription, `on_next` will be called on the observer for every
/// value produced. If the observable completes, `on_completed` is called.
/// If the observable fails with an error, `on_error` is called. It is
/// guaranteed that no methods will be called on the observer after
/// `on_completed` or `on_error` have been called.
///
/// _When_ the observer is called is not part of the observable contract,
/// it depends on the kind of observable. The observer may be called before
/// `subscribe` returns, or it may be called in the future.
///
/// The returned value represents the subscription. Dropping the subscription
/// will prevent further calls on the observer.
fn subscribe<O>(&mut self, observer: O) -> Self::Subscription
where O: Observer<Self::Item, Self::Error>;
/// Subscribes a function to handle values produced by the observable.
///
/// For every value produced by the observable, `on_next` is called.
///
/// **This subscription panics if the observable fails with an error.**
///
/// See also [`subscribe()`](#tymethod.subscribe).
fn subscribe_next<FnNext>(&mut self,
on_next: FnNext)
-> Self::Subscription
where Self::Error: Debug, FnNext: FnMut(Self::Item) {
let observer = NextObserver {
fn_next: on_next,
};
self.subscribe(observer)
}
/// Subscribes functions to handle next and completion.
///
/// For every value produced by the observable, `on_next` is called. If the
/// observable completes, `on_completed` is called. A failure will cause a
/// panic. After `on_completed` has been called, it is guaranteed that neither
/// `on_next` nor `on_completed` is called again.
///
/// **This subscription panics if the observable fails with an error.**
///
/// See also [`subscribe()`](#tymethod.subscribe).
fn subscribe_completed<FnNext, FnCompleted>(&mut self,
on_next: FnNext,
on_completed: FnCompleted)
-> Self::Subscription
where Self::Error: Debug, FnNext: FnMut(Self::Item), FnCompleted: FnOnce() {
let observer = CompletedObserver {
fn_next: on_next,
fn_completed: on_completed,
};
self.subscribe(observer)
}
/// Subscribes functions to handle next, completion, and error.
///
/// For every value produced by the observable, `on_next` is called. If the
/// observable completes, `on_completed` is called. If it fails, `on_error`
/// is called. After `on_completed` or `on_error` have been called, it is
/// guaranteed that none of the three functions are called again.
///
/// See also [`subscribe()`](#tymethod.subscribe).
fn subscribe_error<FnNext, FnCompleted, FnError>(&mut self,
on_next: FnNext,
on_completed: FnCompleted,
on_error: FnError)
-> Self::Subscription
where FnNext: FnMut(Self::Item), FnCompleted: FnOnce(), FnError: FnOnce(Self::Error) {
|
/ Subscribes a function that takes an option.
///
/// The function translates into an observer as follows:
///
/// * `on_next(x)`: calls the functions with `Some(x)`.
/// * `on_completed()`: calls the function with `None`.
/// * `on_error(error)`: panics.
///
/// After the function has been called with `None`,
/// it is guaranteed never to be called again.
///
/// **This subscription panics if the observable fails with an error.**
///
/// See also [`subscribe()`](#tymethod.subscribe).
fn subscribe_option<FnOption>(&mut self,
on_next_or_completed: FnOption)
-> Self::Subscription
where Self::Error: Debug, FnOption: FnMut(Option<Self::Item>) {
let observer = OptionObserver {
fn_option: on_next_or_completed
};
self.subscribe(observer)
}
/// Subscribes a function that takes a result of an option.
///
/// The function translates into an observer as follows:
///
/// * `on_next(x)`: calls the function with `Ok(Some(x))`.
/// * `on_completed()`: calls the function with `Ok(None)`.
/// * `on_error(error)`: calls the function with `Err(error)`.
///
/// After the function has been called with `Ok(None)` or `Err(error)`,
/// it is guaranteed never to be called again.
///
/// See also [`subscribe()`](#tymethod.subscribe).
fn subscribe_result<FnResult>(&mut self,
on_next_or_completed_or_error: FnResult)
-> Self::Subscription
where FnResult: FnMut(Result<Option<Self::Item>, Self::Error>) {
let observer = ResultObserver {
fn_result: on_next_or_completed_or_error
};
self.subscribe(observer)
}
/// Transforms an observable by applying f to every value produced.
fn map<'s, U, F>(&'s mut self, f: F) -> MapObservable<'s, Self, F>
where F: Fn(Self::Item) -> U {
MapObservable::new(self, f)
}
/// Transforms an observable by applying f the error in case of failure.
fn map_error<'s, F, G>(&'s mut self, f: G) -> MapErrorObservable<'s, Self, G>
where G: Fn(Self::Error) -> F {
MapErrorObservable::new(self, f)
}
/// Joins two observables sequentially.
///
/// After the current observable completes, an observer will start to
/// receive values from `next` until that observable completes or fails.
/// The `next` observable is only subscribed to after the current observable
/// completes.
fn continue_with<'s, ObNext>(&'s mut self, next: &'s mut ObNext) -> ContinueWithObservable<'s, Self, ObNext>
where ObNext: Observable<Item = Self::Item, Error = Self::Error> {
ContinueWithObservable::new(self, next)
}
}
|
let observer = ErrorObserver {
fn_next: on_next,
fn_completed: on_completed,
fn_error: on_error,
};
self.subscribe(observer)
}
//
|
identifier_body
|
timer.rs
|
// Zinc, the bare metal stack for rust.
// Copyright 2014 Dzmitry "kvark" Malyshau <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Timer configuration for ST STM32L1.
//!
//! This code supports only TIM2 at the moment.
#[path="../../util/ioreg.rs"] mod ioreg;
/// Available timer peripherals.
#[allow(missing_docs)]
#[derive(Copy)]
pub enum
|
{
Timer2,
}
/// Structure describing a Timer.
#[derive(Copy)]
pub struct Timer {
reg: &'static reg::TIMER,
}
impl Timer {
/// Create and start a Timer.
pub fn new(peripheral: TimerPeripheral, counter: u32, div_shift: u16) -> Timer {
use super::peripheral_clock as pc;
use self::TimerPeripheral::*;
let (reg, clock) = match peripheral {
Timer2 => (®::TIM2, pc::BusApb1::Tim2),
};
pc::PeripheralClock::Apb1(clock).enable();
reg.cr1.set_counter_enable(true);
reg.cr1.set_divisor_shift(div_shift);
reg.psc.set_prescaler(counter as u16 - 1);
reg.egr.set_generate(1);
Timer {
reg: reg,
}
}
}
impl ::hal::timer::Timer for Timer {
#[inline(always)]
fn get_counter(&self) -> u32 {
self.reg.cnt.counter() as u32
}
}
mod reg {
use util::volatile_cell::VolatileCell;
use core::ops::Drop;
ioregs!(TIMER = {
0x00 => reg16 cr1 { // control 1
0 => counter_enable : rw,
1 => update_disable : rw,
2 => update_request_source : rw,
3 => one_pulse_mode : rw,
4 => direction : rw,
6..5 => center_alignment_mode : rw,
7 => auto_reload_enable : rw,
9..8 => divisor_shift : rw,
},
0x04 => reg16 cr2 { // control 2
15..0 => control : rw,
},
0x08 => reg16 smcr { // slave mode control
15..0 => slave_control : rw,
},
0x0A => reg16 dier { // DMA/interrupt enable
15..0 => enable : rw,
},
0x10 => reg16 sr { // status
15..0 => status : rw,
},
0x14 => reg16 egr { // event generation
15..0 => generate : wo,
},
0x18 => reg16 ccmr1 { // capture/compare mode 1
15..0 => mode : rw,
},
0x1C => reg16 ccmr2 { // capture/compare mode 2
15..0 => mode : rw,
},
0x20 => reg16 ccer { // capture/compare enable
15..0 => enable : rw,
},
0x24 => reg16 cnt { // counter
15..0 => counter : rw,
},
0x28 => reg16 psc { // prescaler
15..0 => prescaler : rw,
},
0x2C => reg32 arr { // auto-reload
31..0 => reload : rw,
},
0x34 => reg32 ccr1 { // capture/compare 1
31..0 => cc : rw,
},
0x38 => reg32 ccr2 { // capture/compare 2
31..0 => cc : rw,
},
0x3C => reg32 ccr3 { // capture/compare 3
31..0 => cc : rw,
},
0x40 => reg32 ccr4 { // capture/compare 4
31..0 => cc : rw,
},
0x48 => reg16 dcr { // DMA control
15..0 => control : rw,
},
0x4C => reg16 dmap { // DMA address for full transfer
15..0 => address : rw,
},
0x50 => reg16 or { // option
15..0 => option : rw,
},
});
extern {
#[link_name="stm32l1_iomem_TIM2"] pub static TIM2: TIMER;
}
}
|
TimerPeripheral
|
identifier_name
|
timer.rs
|
// Zinc, the bare metal stack for rust.
// Copyright 2014 Dzmitry "kvark" Malyshau <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Timer configuration for ST STM32L1.
//!
//! This code supports only TIM2 at the moment.
#[path="../../util/ioreg.rs"] mod ioreg;
/// Available timer peripherals.
#[allow(missing_docs)]
#[derive(Copy)]
pub enum TimerPeripheral {
Timer2,
}
/// Structure describing a Timer.
#[derive(Copy)]
pub struct Timer {
reg: &'static reg::TIMER,
}
impl Timer {
/// Create and start a Timer.
pub fn new(peripheral: TimerPeripheral, counter: u32, div_shift: u16) -> Timer {
use super::peripheral_clock as pc;
use self::TimerPeripheral::*;
let (reg, clock) = match peripheral {
Timer2 => (®::TIM2, pc::BusApb1::Tim2),
};
pc::PeripheralClock::Apb1(clock).enable();
reg.cr1.set_counter_enable(true);
reg.cr1.set_divisor_shift(div_shift);
reg.psc.set_prescaler(counter as u16 - 1);
reg.egr.set_generate(1);
Timer {
reg: reg,
}
}
}
impl ::hal::timer::Timer for Timer {
#[inline(always)]
fn get_counter(&self) -> u32
|
}
mod reg {
use util::volatile_cell::VolatileCell;
use core::ops::Drop;
ioregs!(TIMER = {
0x00 => reg16 cr1 { // control 1
0 => counter_enable : rw,
1 => update_disable : rw,
2 => update_request_source : rw,
3 => one_pulse_mode : rw,
4 => direction : rw,
6..5 => center_alignment_mode : rw,
7 => auto_reload_enable : rw,
9..8 => divisor_shift : rw,
},
0x04 => reg16 cr2 { // control 2
15..0 => control : rw,
},
0x08 => reg16 smcr { // slave mode control
15..0 => slave_control : rw,
},
0x0A => reg16 dier { // DMA/interrupt enable
15..0 => enable : rw,
},
0x10 => reg16 sr { // status
15..0 => status : rw,
},
0x14 => reg16 egr { // event generation
15..0 => generate : wo,
},
0x18 => reg16 ccmr1 { // capture/compare mode 1
15..0 => mode : rw,
},
0x1C => reg16 ccmr2 { // capture/compare mode 2
15..0 => mode : rw,
},
0x20 => reg16 ccer { // capture/compare enable
15..0 => enable : rw,
},
0x24 => reg16 cnt { // counter
15..0 => counter : rw,
},
0x28 => reg16 psc { // prescaler
15..0 => prescaler : rw,
},
0x2C => reg32 arr { // auto-reload
31..0 => reload : rw,
},
0x34 => reg32 ccr1 { // capture/compare 1
31..0 => cc : rw,
},
0x38 => reg32 ccr2 { // capture/compare 2
31..0 => cc : rw,
},
0x3C => reg32 ccr3 { // capture/compare 3
31..0 => cc : rw,
},
0x40 => reg32 ccr4 { // capture/compare 4
31..0 => cc : rw,
},
0x48 => reg16 dcr { // DMA control
15..0 => control : rw,
},
0x4C => reg16 dmap { // DMA address for full transfer
15..0 => address : rw,
},
0x50 => reg16 or { // option
15..0 => option : rw,
},
});
extern {
#[link_name="stm32l1_iomem_TIM2"] pub static TIM2: TIMER;
}
}
|
{
self.reg.cnt.counter() as u32
}
|
identifier_body
|
timer.rs
|
// Zinc, the bare metal stack for rust.
// Copyright 2014 Dzmitry "kvark" Malyshau <[email protected]>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
|
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Timer configuration for ST STM32L1.
//!
//! This code supports only TIM2 at the moment.
#[path="../../util/ioreg.rs"] mod ioreg;
/// Available timer peripherals.
#[allow(missing_docs)]
#[derive(Copy)]
pub enum TimerPeripheral {
Timer2,
}
/// Structure describing a Timer.
#[derive(Copy)]
pub struct Timer {
reg: &'static reg::TIMER,
}
impl Timer {
/// Create and start a Timer.
pub fn new(peripheral: TimerPeripheral, counter: u32, div_shift: u16) -> Timer {
use super::peripheral_clock as pc;
use self::TimerPeripheral::*;
let (reg, clock) = match peripheral {
Timer2 => (®::TIM2, pc::BusApb1::Tim2),
};
pc::PeripheralClock::Apb1(clock).enable();
reg.cr1.set_counter_enable(true);
reg.cr1.set_divisor_shift(div_shift);
reg.psc.set_prescaler(counter as u16 - 1);
reg.egr.set_generate(1);
Timer {
reg: reg,
}
}
}
impl ::hal::timer::Timer for Timer {
#[inline(always)]
fn get_counter(&self) -> u32 {
self.reg.cnt.counter() as u32
}
}
mod reg {
use util::volatile_cell::VolatileCell;
use core::ops::Drop;
ioregs!(TIMER = {
0x00 => reg16 cr1 { // control 1
0 => counter_enable : rw,
1 => update_disable : rw,
2 => update_request_source : rw,
3 => one_pulse_mode : rw,
4 => direction : rw,
6..5 => center_alignment_mode : rw,
7 => auto_reload_enable : rw,
9..8 => divisor_shift : rw,
},
0x04 => reg16 cr2 { // control 2
15..0 => control : rw,
},
0x08 => reg16 smcr { // slave mode control
15..0 => slave_control : rw,
},
0x0A => reg16 dier { // DMA/interrupt enable
15..0 => enable : rw,
},
0x10 => reg16 sr { // status
15..0 => status : rw,
},
0x14 => reg16 egr { // event generation
15..0 => generate : wo,
},
0x18 => reg16 ccmr1 { // capture/compare mode 1
15..0 => mode : rw,
},
0x1C => reg16 ccmr2 { // capture/compare mode 2
15..0 => mode : rw,
},
0x20 => reg16 ccer { // capture/compare enable
15..0 => enable : rw,
},
0x24 => reg16 cnt { // counter
15..0 => counter : rw,
},
0x28 => reg16 psc { // prescaler
15..0 => prescaler : rw,
},
0x2C => reg32 arr { // auto-reload
31..0 => reload : rw,
},
0x34 => reg32 ccr1 { // capture/compare 1
31..0 => cc : rw,
},
0x38 => reg32 ccr2 { // capture/compare 2
31..0 => cc : rw,
},
0x3C => reg32 ccr3 { // capture/compare 3
31..0 => cc : rw,
},
0x40 => reg32 ccr4 { // capture/compare 4
31..0 => cc : rw,
},
0x48 => reg16 dcr { // DMA control
15..0 => control : rw,
},
0x4C => reg16 dmap { // DMA address for full transfer
15..0 => address : rw,
},
0x50 => reg16 or { // option
15..0 => option : rw,
},
});
extern {
#[link_name="stm32l1_iomem_TIM2"] pub static TIM2: TIMER;
}
}
|
//
// Unless required by applicable law or agreed to in writing, software
|
random_line_split
|
response.rs
|
//! # HTTP Response generation
//!
//! The HTTP Response code converts response objects into octets and
//! writes them to a stream.
// ****************************************************************************
//
// Imports
//
// ****************************************************************************
use std::collections::HashMap;
use std::fmt;
use std::io;
use std::borrow::Cow;
// ****************************************************************************
//
// Public Types
//
// ****************************************************************************
#[derive(Debug, Clone, Copy)]
pub enum HttpResponseStatus {
Continue = 100,
SwitchingProtocols = 101,
Processing = 102,
OK = 200,
Created = 201,
Accepted = 202,
NonAuthoritativeInformation = 203,
NoContent = 204,
ResetContent = 205,
PartialContent = 206,
MultiStatus = 207,
AlreadyReported = 208,
ImUsed = 226,
MultipleChoices = 300,
MovedPermanently = 301,
Found = 302,
SeeOther = 303,
NotModified = 304,
UseProxy = 305,
SwitchProxy = 306,
TemporaryRedirect = 307,
PermanentRedirect = 308,
BadRequest = 400,
Unauthorized = 401,
PaymentRequired = 402,
Forbidden = 403,
NotFound = 404,
MethodNotAllowed = 405,
NotAcceptable = 406,
ProxyAuthenticationRequired = 407,
RequestTimeout = 408,
Conflict = 409,
Gone = 410,
LengthRequired = 411,
PreconditionFailed = 412,
PayloadTooLarge = 413,
URITooLong = 414,
UnsupportedMediaType = 415,
RangeNotSatisfiable = 416,
ExpectationFailed = 417,
IAmATeapot = 418,
MisdirectedRequest = 421,
UnprocessableEntity = 422,
Locked = 423,
FailedDependency = 424,
UpgradeRequired = 426,
PreconditionRequired = 428,
TooManyRequests = 429,
RequestHeaderFieldsTooLarge = 431,
UnavailableForLegalReasons = 451,
InternalServerError = 500,
NotImplemented = 501,
BadGateway = 502,
ServiceUnavailable = 503,
GatewayTimeout = 504,
HTTPVersionNotSupported = 505,
VariantAlsoNegotiates = 506,
InsufficientStorage = 507,
LoopDetected = 508,
NotExtended = 510,
NetworkAuthenticationRequired = 511,
}
/// An HTTP Response.
/// Fully describes the HTTP response sent from the server to the client.
/// Because the user can create these objects, we use a Cow to allow them
/// to supply either an `&str` or a `std::string::String`.
#[derive(Debug)]
pub struct HttpResponse<'a> {
/// The HTTP result code - @todo should be an enum
pub status: HttpResponseStatus,
/// The protocol the client is using in the response
pub protocol: Cow<'a, str>,
/// Any headers supplied by the server in the response
pub headers: HashMap<Cow<'a, str>, Cow<'a, str>>,
/// The response body
|
// Private Types
//
// ****************************************************************************
// None
// ****************************************************************************
//
// Public Functions
//
// ****************************************************************************
impl<'a> HttpResponse<'a> {
pub fn new<S>(status: HttpResponseStatus, protocol: S) -> HttpResponse<'a>
where S: Into<Cow<'a, str>>
{
HttpResponse::new_with_body(status, protocol, Cow::Borrowed(""))
}
pub fn new_with_body<S, T>(status: HttpResponseStatus, protocol: S, body: T) -> HttpResponse<'a>
where S: Into<Cow<'a, str>>,
T: Into<Cow<'a, str>>
{
HttpResponse {
status: status,
protocol: protocol.into(),
headers: HashMap::new(),
body: body.into(),
}
}
pub fn write<T: io::Write>(&self, sink: &mut T) -> io::Result<usize> {
let header: String = format!("{} {}\r\n", self.protocol, self.status);
let mut total: usize = 0;
total += try!(sink.write(header.as_bytes()));
for (k, v) in &self.headers {
let line = format!("{}: {}\r\n", k, v);
total += try!(sink.write(line.as_bytes()));
}
total += try!(sink.write(b"\r\n"));
total += try!(sink.write(self.body.as_bytes()));
return Ok(total);
}
pub fn add_header<S, T>(&mut self, key: S, value: T)
where S: Into<Cow<'a, str>>,
T: Into<Cow<'a, str>>
{
self.headers.insert(key.into(), value.into());
}
}
impl fmt::Display for HttpResponseStatus {
// This trait requires `fmt` with this exact signature.
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// Write strictly the first element into the supplied output
// stream: `f`. Returns `fmt::Result` which indicates whether the
// operation succeeded or failed. Note that `write!` uses syntax which
// is very similar to `println!`.
write!(f, "{} {}", *self as u32, self.as_string())
}
}
impl HttpResponseStatus {
pub fn as_string(&self) -> &str {
match *self {
HttpResponseStatus::Continue => "Continue",
HttpResponseStatus::SwitchingProtocols => "Switching Protocols",
HttpResponseStatus::Processing => "Processing",
HttpResponseStatus::OK => "OK",
HttpResponseStatus::Created => "Created",
HttpResponseStatus::Accepted => "Accepted",
HttpResponseStatus::NonAuthoritativeInformation => "Non-Authoritative Information",
HttpResponseStatus::NoContent => "No Content",
HttpResponseStatus::ResetContent => "Reset Content",
HttpResponseStatus::PartialContent => "Partial Content",
HttpResponseStatus::MultiStatus => "Multi Status",
HttpResponseStatus::AlreadyReported => "Already Reported",
HttpResponseStatus::ImUsed => "IM Used",
HttpResponseStatus::MultipleChoices => "Multiple Choices",
HttpResponseStatus::MovedPermanently => "Moved Permanently",
HttpResponseStatus::Found => "Found",
HttpResponseStatus::SeeOther => "See Other",
HttpResponseStatus::NotModified => "Not Modified",
HttpResponseStatus::UseProxy => "Use Proxy",
HttpResponseStatus::SwitchProxy => "Switch Proxy",
HttpResponseStatus::TemporaryRedirect => "Temporary Redirect",
HttpResponseStatus::PermanentRedirect => "Permanent Redirect",
HttpResponseStatus::BadRequest => "Bad Request",
HttpResponseStatus::Unauthorized => "Unauthorized",
HttpResponseStatus::PaymentRequired => "Payment Required",
HttpResponseStatus::Forbidden => "Forbidden",
HttpResponseStatus::NotFound => "Not Found",
HttpResponseStatus::MethodNotAllowed => "Method Not Allowed",
HttpResponseStatus::NotAcceptable => "Not Acceptable",
HttpResponseStatus::ProxyAuthenticationRequired => "Proxy Authentication Required",
HttpResponseStatus::RequestTimeout => "Request Timeout",
HttpResponseStatus::Conflict => "Conflict",
HttpResponseStatus::Gone => "Gone",
HttpResponseStatus::LengthRequired => "Length Required",
HttpResponseStatus::PreconditionFailed => "Precondition Failed",
HttpResponseStatus::PayloadTooLarge => "Payload Too Large",
HttpResponseStatus::URITooLong => "URI Too Long",
HttpResponseStatus::UnsupportedMediaType => "Unsupported Media Type",
HttpResponseStatus::RangeNotSatisfiable => "Range Not Satisfiable",
HttpResponseStatus::ExpectationFailed => "Expectation Failed",
HttpResponseStatus::IAmATeapot => "I'm A Teapot",
HttpResponseStatus::MisdirectedRequest => "Misdirected Request",
HttpResponseStatus::UnprocessableEntity => "Unprocessable Entity",
HttpResponseStatus::Locked => "Locked",
HttpResponseStatus::FailedDependency => "Failed Dependency",
HttpResponseStatus::UpgradeRequired => "Upgrade Required",
HttpResponseStatus::PreconditionRequired => "Precondition Required",
HttpResponseStatus::TooManyRequests => "Too Many Requests",
HttpResponseStatus::RequestHeaderFieldsTooLarge => "Request Header Fields Too Large",
HttpResponseStatus::UnavailableForLegalReasons => "Unavailable For Legal Reasons",
HttpResponseStatus::InternalServerError => "Internal Server Error",
HttpResponseStatus::NotImplemented => "Not Implemented",
HttpResponseStatus::BadGateway => "Bad Gateway",
HttpResponseStatus::ServiceUnavailable => "Service Unavailable",
HttpResponseStatus::GatewayTimeout => "Gateway Timeout",
HttpResponseStatus::HTTPVersionNotSupported => "HTTP Version Not Supported",
HttpResponseStatus::VariantAlsoNegotiates => "Variant Also Negotiates",
HttpResponseStatus::InsufficientStorage => "Insufficient Storage",
HttpResponseStatus::LoopDetected => "Loop Detected",
HttpResponseStatus::NotExtended => "Not Extended",
HttpResponseStatus::NetworkAuthenticationRequired => "Network Authentication Required",
}
}
}
// ****************************************************************************
//
// Private Functions
//
// ****************************************************************************
// None
// ****************************************************************************
//
// End Of File
//
// ****************************************************************************
|
pub body: Cow<'a, str>,
}
// ****************************************************************************
//
|
random_line_split
|
response.rs
|
//! # HTTP Response generation
//!
//! The HTTP Response code converts response objects into octets and
//! writes them to a stream.
// ****************************************************************************
//
// Imports
//
// ****************************************************************************
use std::collections::HashMap;
use std::fmt;
use std::io;
use std::borrow::Cow;
// ****************************************************************************
//
// Public Types
//
// ****************************************************************************
#[derive(Debug, Clone, Copy)]
pub enum HttpResponseStatus {
Continue = 100,
SwitchingProtocols = 101,
Processing = 102,
OK = 200,
Created = 201,
Accepted = 202,
NonAuthoritativeInformation = 203,
NoContent = 204,
ResetContent = 205,
PartialContent = 206,
MultiStatus = 207,
AlreadyReported = 208,
ImUsed = 226,
MultipleChoices = 300,
MovedPermanently = 301,
Found = 302,
SeeOther = 303,
NotModified = 304,
UseProxy = 305,
SwitchProxy = 306,
TemporaryRedirect = 307,
PermanentRedirect = 308,
BadRequest = 400,
Unauthorized = 401,
PaymentRequired = 402,
Forbidden = 403,
NotFound = 404,
MethodNotAllowed = 405,
NotAcceptable = 406,
ProxyAuthenticationRequired = 407,
RequestTimeout = 408,
Conflict = 409,
Gone = 410,
LengthRequired = 411,
PreconditionFailed = 412,
PayloadTooLarge = 413,
URITooLong = 414,
UnsupportedMediaType = 415,
RangeNotSatisfiable = 416,
ExpectationFailed = 417,
IAmATeapot = 418,
MisdirectedRequest = 421,
UnprocessableEntity = 422,
Locked = 423,
FailedDependency = 424,
UpgradeRequired = 426,
PreconditionRequired = 428,
TooManyRequests = 429,
RequestHeaderFieldsTooLarge = 431,
UnavailableForLegalReasons = 451,
InternalServerError = 500,
NotImplemented = 501,
BadGateway = 502,
ServiceUnavailable = 503,
GatewayTimeout = 504,
HTTPVersionNotSupported = 505,
VariantAlsoNegotiates = 506,
InsufficientStorage = 507,
LoopDetected = 508,
NotExtended = 510,
NetworkAuthenticationRequired = 511,
}
/// An HTTP Response.
/// Fully describes the HTTP response sent from the server to the client.
/// Because the user can create these objects, we use a Cow to allow them
/// to supply either an `&str` or a `std::string::String`.
#[derive(Debug)]
pub struct HttpResponse<'a> {
/// The HTTP result code - @todo should be an enum
pub status: HttpResponseStatus,
/// The protocol the client is using in the response
pub protocol: Cow<'a, str>,
/// Any headers supplied by the server in the response
pub headers: HashMap<Cow<'a, str>, Cow<'a, str>>,
/// The response body
pub body: Cow<'a, str>,
}
// ****************************************************************************
//
// Private Types
//
// ****************************************************************************
// None
// ****************************************************************************
//
// Public Functions
//
// ****************************************************************************
impl<'a> HttpResponse<'a> {
pub fn new<S>(status: HttpResponseStatus, protocol: S) -> HttpResponse<'a>
where S: Into<Cow<'a, str>>
{
HttpResponse::new_with_body(status, protocol, Cow::Borrowed(""))
}
pub fn new_with_body<S, T>(status: HttpResponseStatus, protocol: S, body: T) -> HttpResponse<'a>
where S: Into<Cow<'a, str>>,
T: Into<Cow<'a, str>>
{
HttpResponse {
status: status,
protocol: protocol.into(),
headers: HashMap::new(),
body: body.into(),
}
}
pub fn write<T: io::Write>(&self, sink: &mut T) -> io::Result<usize> {
let header: String = format!("{} {}\r\n", self.protocol, self.status);
let mut total: usize = 0;
total += try!(sink.write(header.as_bytes()));
for (k, v) in &self.headers {
let line = format!("{}: {}\r\n", k, v);
total += try!(sink.write(line.as_bytes()));
}
total += try!(sink.write(b"\r\n"));
total += try!(sink.write(self.body.as_bytes()));
return Ok(total);
}
pub fn add_header<S, T>(&mut self, key: S, value: T)
where S: Into<Cow<'a, str>>,
T: Into<Cow<'a, str>>
{
self.headers.insert(key.into(), value.into());
}
}
impl fmt::Display for HttpResponseStatus {
// This trait requires `fmt` with this exact signature.
fn
|
(&self, f: &mut fmt::Formatter) -> fmt::Result {
// Write strictly the first element into the supplied output
// stream: `f`. Returns `fmt::Result` which indicates whether the
// operation succeeded or failed. Note that `write!` uses syntax which
// is very similar to `println!`.
write!(f, "{} {}", *self as u32, self.as_string())
}
}
impl HttpResponseStatus {
pub fn as_string(&self) -> &str {
match *self {
HttpResponseStatus::Continue => "Continue",
HttpResponseStatus::SwitchingProtocols => "Switching Protocols",
HttpResponseStatus::Processing => "Processing",
HttpResponseStatus::OK => "OK",
HttpResponseStatus::Created => "Created",
HttpResponseStatus::Accepted => "Accepted",
HttpResponseStatus::NonAuthoritativeInformation => "Non-Authoritative Information",
HttpResponseStatus::NoContent => "No Content",
HttpResponseStatus::ResetContent => "Reset Content",
HttpResponseStatus::PartialContent => "Partial Content",
HttpResponseStatus::MultiStatus => "Multi Status",
HttpResponseStatus::AlreadyReported => "Already Reported",
HttpResponseStatus::ImUsed => "IM Used",
HttpResponseStatus::MultipleChoices => "Multiple Choices",
HttpResponseStatus::MovedPermanently => "Moved Permanently",
HttpResponseStatus::Found => "Found",
HttpResponseStatus::SeeOther => "See Other",
HttpResponseStatus::NotModified => "Not Modified",
HttpResponseStatus::UseProxy => "Use Proxy",
HttpResponseStatus::SwitchProxy => "Switch Proxy",
HttpResponseStatus::TemporaryRedirect => "Temporary Redirect",
HttpResponseStatus::PermanentRedirect => "Permanent Redirect",
HttpResponseStatus::BadRequest => "Bad Request",
HttpResponseStatus::Unauthorized => "Unauthorized",
HttpResponseStatus::PaymentRequired => "Payment Required",
HttpResponseStatus::Forbidden => "Forbidden",
HttpResponseStatus::NotFound => "Not Found",
HttpResponseStatus::MethodNotAllowed => "Method Not Allowed",
HttpResponseStatus::NotAcceptable => "Not Acceptable",
HttpResponseStatus::ProxyAuthenticationRequired => "Proxy Authentication Required",
HttpResponseStatus::RequestTimeout => "Request Timeout",
HttpResponseStatus::Conflict => "Conflict",
HttpResponseStatus::Gone => "Gone",
HttpResponseStatus::LengthRequired => "Length Required",
HttpResponseStatus::PreconditionFailed => "Precondition Failed",
HttpResponseStatus::PayloadTooLarge => "Payload Too Large",
HttpResponseStatus::URITooLong => "URI Too Long",
HttpResponseStatus::UnsupportedMediaType => "Unsupported Media Type",
HttpResponseStatus::RangeNotSatisfiable => "Range Not Satisfiable",
HttpResponseStatus::ExpectationFailed => "Expectation Failed",
HttpResponseStatus::IAmATeapot => "I'm A Teapot",
HttpResponseStatus::MisdirectedRequest => "Misdirected Request",
HttpResponseStatus::UnprocessableEntity => "Unprocessable Entity",
HttpResponseStatus::Locked => "Locked",
HttpResponseStatus::FailedDependency => "Failed Dependency",
HttpResponseStatus::UpgradeRequired => "Upgrade Required",
HttpResponseStatus::PreconditionRequired => "Precondition Required",
HttpResponseStatus::TooManyRequests => "Too Many Requests",
HttpResponseStatus::RequestHeaderFieldsTooLarge => "Request Header Fields Too Large",
HttpResponseStatus::UnavailableForLegalReasons => "Unavailable For Legal Reasons",
HttpResponseStatus::InternalServerError => "Internal Server Error",
HttpResponseStatus::NotImplemented => "Not Implemented",
HttpResponseStatus::BadGateway => "Bad Gateway",
HttpResponseStatus::ServiceUnavailable => "Service Unavailable",
HttpResponseStatus::GatewayTimeout => "Gateway Timeout",
HttpResponseStatus::HTTPVersionNotSupported => "HTTP Version Not Supported",
HttpResponseStatus::VariantAlsoNegotiates => "Variant Also Negotiates",
HttpResponseStatus::InsufficientStorage => "Insufficient Storage",
HttpResponseStatus::LoopDetected => "Loop Detected",
HttpResponseStatus::NotExtended => "Not Extended",
HttpResponseStatus::NetworkAuthenticationRequired => "Network Authentication Required",
}
}
}
// ****************************************************************************
//
// Private Functions
//
// ****************************************************************************
// None
// ****************************************************************************
//
// End Of File
//
// ****************************************************************************
|
fmt
|
identifier_name
|
response.rs
|
//! # HTTP Response generation
//!
//! The HTTP Response code converts response objects into octets and
//! writes them to a stream.
// ****************************************************************************
//
// Imports
//
// ****************************************************************************
use std::collections::HashMap;
use std::fmt;
use std::io;
use std::borrow::Cow;
// ****************************************************************************
//
// Public Types
//
// ****************************************************************************
#[derive(Debug, Clone, Copy)]
pub enum HttpResponseStatus {
Continue = 100,
SwitchingProtocols = 101,
Processing = 102,
OK = 200,
Created = 201,
Accepted = 202,
NonAuthoritativeInformation = 203,
NoContent = 204,
ResetContent = 205,
PartialContent = 206,
MultiStatus = 207,
AlreadyReported = 208,
ImUsed = 226,
MultipleChoices = 300,
MovedPermanently = 301,
Found = 302,
SeeOther = 303,
NotModified = 304,
UseProxy = 305,
SwitchProxy = 306,
TemporaryRedirect = 307,
PermanentRedirect = 308,
BadRequest = 400,
Unauthorized = 401,
PaymentRequired = 402,
Forbidden = 403,
NotFound = 404,
MethodNotAllowed = 405,
NotAcceptable = 406,
ProxyAuthenticationRequired = 407,
RequestTimeout = 408,
Conflict = 409,
Gone = 410,
LengthRequired = 411,
PreconditionFailed = 412,
PayloadTooLarge = 413,
URITooLong = 414,
UnsupportedMediaType = 415,
RangeNotSatisfiable = 416,
ExpectationFailed = 417,
IAmATeapot = 418,
MisdirectedRequest = 421,
UnprocessableEntity = 422,
Locked = 423,
FailedDependency = 424,
UpgradeRequired = 426,
PreconditionRequired = 428,
TooManyRequests = 429,
RequestHeaderFieldsTooLarge = 431,
UnavailableForLegalReasons = 451,
InternalServerError = 500,
NotImplemented = 501,
BadGateway = 502,
ServiceUnavailable = 503,
GatewayTimeout = 504,
HTTPVersionNotSupported = 505,
VariantAlsoNegotiates = 506,
InsufficientStorage = 507,
LoopDetected = 508,
NotExtended = 510,
NetworkAuthenticationRequired = 511,
}
/// An HTTP Response.
/// Fully describes the HTTP response sent from the server to the client.
/// Because the user can create these objects, we use a Cow to allow them
/// to supply either an `&str` or a `std::string::String`.
#[derive(Debug)]
pub struct HttpResponse<'a> {
/// The HTTP result code - @todo should be an enum
pub status: HttpResponseStatus,
/// The protocol the client is using in the response
pub protocol: Cow<'a, str>,
/// Any headers supplied by the server in the response
pub headers: HashMap<Cow<'a, str>, Cow<'a, str>>,
/// The response body
pub body: Cow<'a, str>,
}
// ****************************************************************************
//
// Private Types
//
// ****************************************************************************
// None
// ****************************************************************************
//
// Public Functions
//
// ****************************************************************************
impl<'a> HttpResponse<'a> {
pub fn new<S>(status: HttpResponseStatus, protocol: S) -> HttpResponse<'a>
where S: Into<Cow<'a, str>>
{
HttpResponse::new_with_body(status, protocol, Cow::Borrowed(""))
}
pub fn new_with_body<S, T>(status: HttpResponseStatus, protocol: S, body: T) -> HttpResponse<'a>
where S: Into<Cow<'a, str>>,
T: Into<Cow<'a, str>>
{
HttpResponse {
status: status,
protocol: protocol.into(),
headers: HashMap::new(),
body: body.into(),
}
}
pub fn write<T: io::Write>(&self, sink: &mut T) -> io::Result<usize> {
let header: String = format!("{} {}\r\n", self.protocol, self.status);
let mut total: usize = 0;
total += try!(sink.write(header.as_bytes()));
for (k, v) in &self.headers {
let line = format!("{}: {}\r\n", k, v);
total += try!(sink.write(line.as_bytes()));
}
total += try!(sink.write(b"\r\n"));
total += try!(sink.write(self.body.as_bytes()));
return Ok(total);
}
pub fn add_header<S, T>(&mut self, key: S, value: T)
where S: Into<Cow<'a, str>>,
T: Into<Cow<'a, str>>
{
self.headers.insert(key.into(), value.into());
}
}
impl fmt::Display for HttpResponseStatus {
// This trait requires `fmt` with this exact signature.
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// Write strictly the first element into the supplied output
// stream: `f`. Returns `fmt::Result` which indicates whether the
// operation succeeded or failed. Note that `write!` uses syntax which
// is very similar to `println!`.
write!(f, "{} {}", *self as u32, self.as_string())
}
}
impl HttpResponseStatus {
pub fn as_string(&self) -> &str
|
HttpResponseStatus::UseProxy => "Use Proxy",
HttpResponseStatus::SwitchProxy => "Switch Proxy",
HttpResponseStatus::TemporaryRedirect => "Temporary Redirect",
HttpResponseStatus::PermanentRedirect => "Permanent Redirect",
HttpResponseStatus::BadRequest => "Bad Request",
HttpResponseStatus::Unauthorized => "Unauthorized",
HttpResponseStatus::PaymentRequired => "Payment Required",
HttpResponseStatus::Forbidden => "Forbidden",
HttpResponseStatus::NotFound => "Not Found",
HttpResponseStatus::MethodNotAllowed => "Method Not Allowed",
HttpResponseStatus::NotAcceptable => "Not Acceptable",
HttpResponseStatus::ProxyAuthenticationRequired => "Proxy Authentication Required",
HttpResponseStatus::RequestTimeout => "Request Timeout",
HttpResponseStatus::Conflict => "Conflict",
HttpResponseStatus::Gone => "Gone",
HttpResponseStatus::LengthRequired => "Length Required",
HttpResponseStatus::PreconditionFailed => "Precondition Failed",
HttpResponseStatus::PayloadTooLarge => "Payload Too Large",
HttpResponseStatus::URITooLong => "URI Too Long",
HttpResponseStatus::UnsupportedMediaType => "Unsupported Media Type",
HttpResponseStatus::RangeNotSatisfiable => "Range Not Satisfiable",
HttpResponseStatus::ExpectationFailed => "Expectation Failed",
HttpResponseStatus::IAmATeapot => "I'm A Teapot",
HttpResponseStatus::MisdirectedRequest => "Misdirected Request",
HttpResponseStatus::UnprocessableEntity => "Unprocessable Entity",
HttpResponseStatus::Locked => "Locked",
HttpResponseStatus::FailedDependency => "Failed Dependency",
HttpResponseStatus::UpgradeRequired => "Upgrade Required",
HttpResponseStatus::PreconditionRequired => "Precondition Required",
HttpResponseStatus::TooManyRequests => "Too Many Requests",
HttpResponseStatus::RequestHeaderFieldsTooLarge => "Request Header Fields Too Large",
HttpResponseStatus::UnavailableForLegalReasons => "Unavailable For Legal Reasons",
HttpResponseStatus::InternalServerError => "Internal Server Error",
HttpResponseStatus::NotImplemented => "Not Implemented",
HttpResponseStatus::BadGateway => "Bad Gateway",
HttpResponseStatus::ServiceUnavailable => "Service Unavailable",
HttpResponseStatus::GatewayTimeout => "Gateway Timeout",
HttpResponseStatus::HTTPVersionNotSupported => "HTTP Version Not Supported",
HttpResponseStatus::VariantAlsoNegotiates => "Variant Also Negotiates",
HttpResponseStatus::InsufficientStorage => "Insufficient Storage",
HttpResponseStatus::LoopDetected => "Loop Detected",
HttpResponseStatus::NotExtended => "Not Extended",
HttpResponseStatus::NetworkAuthenticationRequired => "Network Authentication Required",
}
}
}
// ****************************************************************************
//
// Private Functions
//
// ****************************************************************************
// None
// ****************************************************************************
//
// End Of File
//
// ****************************************************************************
|
{
match *self {
HttpResponseStatus::Continue => "Continue",
HttpResponseStatus::SwitchingProtocols => "Switching Protocols",
HttpResponseStatus::Processing => "Processing",
HttpResponseStatus::OK => "OK",
HttpResponseStatus::Created => "Created",
HttpResponseStatus::Accepted => "Accepted",
HttpResponseStatus::NonAuthoritativeInformation => "Non-Authoritative Information",
HttpResponseStatus::NoContent => "No Content",
HttpResponseStatus::ResetContent => "Reset Content",
HttpResponseStatus::PartialContent => "Partial Content",
HttpResponseStatus::MultiStatus => "Multi Status",
HttpResponseStatus::AlreadyReported => "Already Reported",
HttpResponseStatus::ImUsed => "IM Used",
HttpResponseStatus::MultipleChoices => "Multiple Choices",
HttpResponseStatus::MovedPermanently => "Moved Permanently",
HttpResponseStatus::Found => "Found",
HttpResponseStatus::SeeOther => "See Other",
HttpResponseStatus::NotModified => "Not Modified",
|
identifier_body
|
loads.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags: -C no-prepopulate-passes
pub struct Bytes {
a: u8,
b: u8,
c: u8,
d: u8,
}
// CHECK-LABEL: @borrow
#[no_mangle]
pub fn borrow(x: &i32) -> &i32 {
// CHECK: load i32** %x{{.*}},!nonnull
x
}
// CHECK-LABEL: @_box
#[no_mangle]
pub fn _box(x: Box<i32>) -> i32 {
// CHECK: load i32** %x{{.*}},!nonnull
*x
}
// CHECK-LABEL: small_array_alignment
// The array is loaded as i32, but its alignment is lower, go with 1 byte to avoid target
// dependent alignment
#[no_mangle]
pub fn small_array_alignment(x: [i8; 4]) -> [i8; 4]
|
// CHECK-LABEL: small_struct_alignment
// The struct is loaded as i32, but its alignment is lower, go with 1 byte to avoid target
// dependent alignment
#[no_mangle]
pub fn small_struct_alignment(x: Bytes) -> Bytes {
// CHECK: [[VAR:%[0-9]+]] = load i32* %{{.*}}, align 1
// CHECK: ret i32 [[VAR]]
x
}
|
{
// CHECK: [[VAR:%[0-9]+]] = load i32* %{{.*}}, align 1
// CHECK: ret i32 [[VAR]]
x
}
|
identifier_body
|
loads.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags: -C no-prepopulate-passes
pub struct Bytes {
a: u8,
b: u8,
c: u8,
d: u8,
}
// CHECK-LABEL: @borrow
#[no_mangle]
pub fn
|
(x: &i32) -> &i32 {
// CHECK: load i32** %x{{.*}},!nonnull
x
}
// CHECK-LABEL: @_box
#[no_mangle]
pub fn _box(x: Box<i32>) -> i32 {
// CHECK: load i32** %x{{.*}},!nonnull
*x
}
// CHECK-LABEL: small_array_alignment
// The array is loaded as i32, but its alignment is lower, go with 1 byte to avoid target
// dependent alignment
#[no_mangle]
pub fn small_array_alignment(x: [i8; 4]) -> [i8; 4] {
// CHECK: [[VAR:%[0-9]+]] = load i32* %{{.*}}, align 1
// CHECK: ret i32 [[VAR]]
x
}
// CHECK-LABEL: small_struct_alignment
// The struct is loaded as i32, but its alignment is lower, go with 1 byte to avoid target
// dependent alignment
#[no_mangle]
pub fn small_struct_alignment(x: Bytes) -> Bytes {
// CHECK: [[VAR:%[0-9]+]] = load i32* %{{.*}}, align 1
// CHECK: ret i32 [[VAR]]
x
}
|
borrow
|
identifier_name
|
loads.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// compile-flags: -C no-prepopulate-passes
pub struct Bytes {
a: u8,
b: u8,
c: u8,
d: u8,
|
#[no_mangle]
pub fn borrow(x: &i32) -> &i32 {
// CHECK: load i32** %x{{.*}},!nonnull
x
}
// CHECK-LABEL: @_box
#[no_mangle]
pub fn _box(x: Box<i32>) -> i32 {
// CHECK: load i32** %x{{.*}},!nonnull
*x
}
// CHECK-LABEL: small_array_alignment
// The array is loaded as i32, but its alignment is lower, go with 1 byte to avoid target
// dependent alignment
#[no_mangle]
pub fn small_array_alignment(x: [i8; 4]) -> [i8; 4] {
// CHECK: [[VAR:%[0-9]+]] = load i32* %{{.*}}, align 1
// CHECK: ret i32 [[VAR]]
x
}
// CHECK-LABEL: small_struct_alignment
// The struct is loaded as i32, but its alignment is lower, go with 1 byte to avoid target
// dependent alignment
#[no_mangle]
pub fn small_struct_alignment(x: Bytes) -> Bytes {
// CHECK: [[VAR:%[0-9]+]] = load i32* %{{.*}}, align 1
// CHECK: ret i32 [[VAR]]
x
}
|
}
// CHECK-LABEL: @borrow
|
random_line_split
|
mod.rs
|
use alloc::boxed::Box;
use core::str;
use fs::{KScheme, Resource};
use system::error::{Error, Result, ENOENT};
use system::syscall::O_CREAT;
pub use self::dsdt::DSDT;
pub use self::fadt::FADT;
pub use self::madt::MADT;
pub use self::rsdt::RSDT;
pub use self::sdt::SDTHeader;
pub use self::ssdt::SSDT;
pub mod aml;
pub mod dsdt;
pub mod fadt;
pub mod madt;
pub mod rsdt;
pub mod sdt;
pub mod ssdt;
#[derive(Clone, Debug, Default)]
pub struct Acpi {
rsdt: RSDT,
fadt: Option<FADT>,
dsdt: Option<DSDT>,
ssdt: Option<SSDT>,
madt: Option<MADT>,
}
impl Acpi {
pub fn new() -> Option<Box<Self>> {
match RSDT::new() {
Ok(rsdt) => {
// debugln!("{:#?}", rsdt);
let mut acpi = box Acpi {
rsdt: rsdt,
fadt: None,
dsdt: None,
ssdt: None,
madt: None,
};
for addr in acpi.rsdt.addrs.iter() {
let header = unsafe { &*(*addr as *const SDTHeader) };
if let Some(fadt) = FADT::new(header) {
//Can't do it debugln!("{:#?}", fadt);
if let Some(dsdt) = DSDT::new(unsafe { &*(fadt.dsdt as *const SDTHeader) }) {
syslog_debug!("DSDT:");
aml::parse(dsdt.data);
acpi.dsdt = Some(dsdt);
}
acpi.fadt = Some(fadt);
} else if let Some(ssdt) = SSDT::new(header) {
syslog_debug!("SSDT:");
aml::parse(ssdt.data);
acpi.ssdt = Some(ssdt);
} else if let Some(madt) = MADT::new(header) {
syslog_debug!("{:#?}", madt);
acpi.madt = Some(madt);
} else {
syslog_debug!("{}: Unknown Table", unsafe { str::from_utf8_unchecked(&header.signature) });
}
}
Some(acpi)
}
Err(e) => {
debugln!("{}", e);
None
}
}
}
}
impl KScheme for Acpi {
fn
|
(&self) -> &'static str {
"acpi"
}
fn open(&mut self, url: &str, flags: usize) -> Result<Box<Resource>> {
if url.splitn(2, ":").nth(1).unwrap_or("") == "off" && flags & O_CREAT == O_CREAT {
match self.fadt {
Some(fadt) => {
debugln!("Powering Off");
unsafe {
asm!("out dx, ax" : : "{edx}"(fadt.pm1a_control_block), "{ax}"(0 | 1 << 13) : : "intel", "volatile")
};
}
None => {
debugln!("Unable to power off: No FADT");
}
}
}
Err(Error::new(ENOENT))
}
}
|
scheme
|
identifier_name
|
mod.rs
|
use alloc::boxed::Box;
use core::str;
use fs::{KScheme, Resource};
use system::error::{Error, Result, ENOENT};
use system::syscall::O_CREAT;
pub use self::dsdt::DSDT;
pub use self::fadt::FADT;
pub use self::madt::MADT;
pub use self::rsdt::RSDT;
pub use self::sdt::SDTHeader;
pub use self::ssdt::SSDT;
pub mod aml;
pub mod dsdt;
pub mod fadt;
pub mod madt;
pub mod rsdt;
pub mod sdt;
pub mod ssdt;
#[derive(Clone, Debug, Default)]
pub struct Acpi {
rsdt: RSDT,
fadt: Option<FADT>,
dsdt: Option<DSDT>,
ssdt: Option<SSDT>,
madt: Option<MADT>,
}
impl Acpi {
pub fn new() -> Option<Box<Self>> {
match RSDT::new() {
Ok(rsdt) => {
// debugln!("{:#?}", rsdt);
let mut acpi = box Acpi {
rsdt: rsdt,
fadt: None,
dsdt: None,
ssdt: None,
madt: None,
};
for addr in acpi.rsdt.addrs.iter() {
let header = unsafe { &*(*addr as *const SDTHeader) };
if let Some(fadt) = FADT::new(header) {
//Can't do it debugln!("{:#?}", fadt);
if let Some(dsdt) = DSDT::new(unsafe { &*(fadt.dsdt as *const SDTHeader) }) {
syslog_debug!("DSDT:");
aml::parse(dsdt.data);
acpi.dsdt = Some(dsdt);
}
acpi.fadt = Some(fadt);
} else if let Some(ssdt) = SSDT::new(header) {
syslog_debug!("SSDT:");
aml::parse(ssdt.data);
acpi.ssdt = Some(ssdt);
} else if let Some(madt) = MADT::new(header) {
syslog_debug!("{:#?}", madt);
acpi.madt = Some(madt);
} else {
syslog_debug!("{}: Unknown Table", unsafe { str::from_utf8_unchecked(&header.signature) });
}
}
Some(acpi)
}
Err(e) =>
|
}
}
}
impl KScheme for Acpi {
fn scheme(&self) -> &'static str {
"acpi"
}
fn open(&mut self, url: &str, flags: usize) -> Result<Box<Resource>> {
if url.splitn(2, ":").nth(1).unwrap_or("") == "off" && flags & O_CREAT == O_CREAT {
match self.fadt {
Some(fadt) => {
debugln!("Powering Off");
unsafe {
asm!("out dx, ax" : : "{edx}"(fadt.pm1a_control_block), "{ax}"(0 | 1 << 13) : : "intel", "volatile")
};
}
None => {
debugln!("Unable to power off: No FADT");
}
}
}
Err(Error::new(ENOENT))
}
}
|
{
debugln!("{}", e);
None
}
|
conditional_block
|
mod.rs
|
use alloc::boxed::Box;
use core::str;
use fs::{KScheme, Resource};
use system::error::{Error, Result, ENOENT};
use system::syscall::O_CREAT;
pub use self::dsdt::DSDT;
pub use self::fadt::FADT;
pub use self::madt::MADT;
pub use self::rsdt::RSDT;
pub use self::sdt::SDTHeader;
pub use self::ssdt::SSDT;
pub mod aml;
pub mod dsdt;
pub mod fadt;
pub mod madt;
pub mod rsdt;
pub mod sdt;
pub mod ssdt;
#[derive(Clone, Debug, Default)]
pub struct Acpi {
rsdt: RSDT,
fadt: Option<FADT>,
dsdt: Option<DSDT>,
ssdt: Option<SSDT>,
madt: Option<MADT>,
}
impl Acpi {
pub fn new() -> Option<Box<Self>>
|
acpi.dsdt = Some(dsdt);
}
acpi.fadt = Some(fadt);
} else if let Some(ssdt) = SSDT::new(header) {
syslog_debug!("SSDT:");
aml::parse(ssdt.data);
acpi.ssdt = Some(ssdt);
} else if let Some(madt) = MADT::new(header) {
syslog_debug!("{:#?}", madt);
acpi.madt = Some(madt);
} else {
syslog_debug!("{}: Unknown Table", unsafe { str::from_utf8_unchecked(&header.signature) });
}
}
Some(acpi)
}
Err(e) => {
debugln!("{}", e);
None
}
}
}
}
impl KScheme for Acpi {
fn scheme(&self) -> &'static str {
"acpi"
}
fn open(&mut self, url: &str, flags: usize) -> Result<Box<Resource>> {
if url.splitn(2, ":").nth(1).unwrap_or("") == "off" && flags & O_CREAT == O_CREAT {
match self.fadt {
Some(fadt) => {
debugln!("Powering Off");
unsafe {
asm!("out dx, ax" : : "{edx}"(fadt.pm1a_control_block), "{ax}"(0 | 1 << 13) : : "intel", "volatile")
};
}
None => {
debugln!("Unable to power off: No FADT");
}
}
}
Err(Error::new(ENOENT))
}
}
|
{
match RSDT::new() {
Ok(rsdt) => {
// debugln!("{:#?}", rsdt);
let mut acpi = box Acpi {
rsdt: rsdt,
fadt: None,
dsdt: None,
ssdt: None,
madt: None,
};
for addr in acpi.rsdt.addrs.iter() {
let header = unsafe { &*(*addr as *const SDTHeader) };
if let Some(fadt) = FADT::new(header) {
//Can't do it debugln!("{:#?}", fadt);
if let Some(dsdt) = DSDT::new(unsafe { &*(fadt.dsdt as *const SDTHeader) }) {
syslog_debug!("DSDT:");
aml::parse(dsdt.data);
|
identifier_body
|
mod.rs
|
use alloc::boxed::Box;
use core::str;
use fs::{KScheme, Resource};
use system::error::{Error, Result, ENOENT};
use system::syscall::O_CREAT;
pub use self::dsdt::DSDT;
pub use self::fadt::FADT;
pub use self::madt::MADT;
pub use self::rsdt::RSDT;
pub use self::sdt::SDTHeader;
pub use self::ssdt::SSDT;
pub mod aml;
pub mod dsdt;
pub mod fadt;
pub mod madt;
pub mod rsdt;
pub mod sdt;
pub mod ssdt;
#[derive(Clone, Debug, Default)]
pub struct Acpi {
rsdt: RSDT,
fadt: Option<FADT>,
dsdt: Option<DSDT>,
ssdt: Option<SSDT>,
madt: Option<MADT>,
}
impl Acpi {
pub fn new() -> Option<Box<Self>> {
match RSDT::new() {
Ok(rsdt) => {
// debugln!("{:#?}", rsdt);
let mut acpi = box Acpi {
rsdt: rsdt,
fadt: None,
dsdt: None,
ssdt: None,
madt: None,
};
for addr in acpi.rsdt.addrs.iter() {
let header = unsafe { &*(*addr as *const SDTHeader) };
if let Some(fadt) = FADT::new(header) {
//Can't do it debugln!("{:#?}", fadt);
if let Some(dsdt) = DSDT::new(unsafe { &*(fadt.dsdt as *const SDTHeader) }) {
syslog_debug!("DSDT:");
aml::parse(dsdt.data);
acpi.dsdt = Some(dsdt);
}
acpi.fadt = Some(fadt);
} else if let Some(ssdt) = SSDT::new(header) {
|
aml::parse(ssdt.data);
acpi.ssdt = Some(ssdt);
} else if let Some(madt) = MADT::new(header) {
syslog_debug!("{:#?}", madt);
acpi.madt = Some(madt);
} else {
syslog_debug!("{}: Unknown Table", unsafe { str::from_utf8_unchecked(&header.signature) });
}
}
Some(acpi)
}
Err(e) => {
debugln!("{}", e);
None
}
}
}
}
impl KScheme for Acpi {
fn scheme(&self) -> &'static str {
"acpi"
}
fn open(&mut self, url: &str, flags: usize) -> Result<Box<Resource>> {
if url.splitn(2, ":").nth(1).unwrap_or("") == "off" && flags & O_CREAT == O_CREAT {
match self.fadt {
Some(fadt) => {
debugln!("Powering Off");
unsafe {
asm!("out dx, ax" : : "{edx}"(fadt.pm1a_control_block), "{ax}"(0 | 1 << 13) : : "intel", "volatile")
};
}
None => {
debugln!("Unable to power off: No FADT");
}
}
}
Err(Error::new(ENOENT))
}
}
|
syslog_debug!("SSDT:");
|
random_line_split
|
showbayer.rs
|
//! ShowBayer.
extern crate bayer;
extern crate sdl2;
use std::cmp::min;
use std::env;
use std::fs::File;
use std::path::Path;
use std::slice;
use bayer::*;
use sdl2::event::Event;
use sdl2::keyboard::Keycode;
use sdl2::pixels::PixelFormatEnum;
#[derive(Copy,Clone,Debug,Eq,PartialEq)]
enum ImgDepth {
Depth8,
Depth12BE,
Depth12LE,
Depth16BE,
Depth16LE,
}
fn main() {
let args: Vec<String> = env::args().skip(1).collect();
usage();
if args.len() < 4 {
return;
}
let bayer_w = args[0].parse::<usize>().unwrap();
let bayer_h = args[1].parse::<usize>().unwrap();
let depth = parse_depth(&args[2]);
let files = &args[3..];
let mut idx = 0;
let mut cfa = CFA::BGGR;
let mut alg = Demosaic::Linear;
let mut old_idx = 1;
let mut old_cfa = CFA::RGGB;
let mut old_alg = Demosaic::None;
// Initialise SDL window.
let sdl = sdl2::init().unwrap();
let video = sdl.video().unwrap();
let window
= video.window("ShowBayer", bayer_w as u32, bayer_h as u32)
.position_centered()
.opengl()
.build().unwrap();
let mut canvas = window.into_canvas().build().unwrap();
let mut event_pump = sdl.event_pump().unwrap();
let texture_creator = canvas.texture_creator();
let mut texture = texture_creator.create_texture_streaming(
PixelFormatEnum::RGB24,
bayer_w as u32, bayer_h as u32).unwrap();
let bytes_per_pixel = bytes_per_pixel(raster_depth(depth));
let mut buf = vec![0; bayer_w * bayer_h * bytes_per_pixel];
read_file(&Path::new(&files[0]), bayer_w, bayer_h, depth, cfa, alg,
&mut buf, &mut texture);
let mut redraw = true;
'mainloop: loop {
if let Some(e) = event_pump.wait_event_timeout(60) {
match e {
Event::Quit {..}
| Event::KeyDown { keycode: Some(Keycode::Escape),.. } => {
break'mainloop;
},
Event::KeyDown { keycode: Some(Keycode::F1),.. } => { cfa = CFA::BGGR; },
Event::KeyDown { keycode: Some(Keycode::F2),.. } => { cfa = CFA::GBRG; },
Event::KeyDown { keycode: Some(Keycode::F3),.. } => { cfa = CFA::GRBG; },
Event::KeyDown { keycode: Some(Keycode::F4),.. } => { cfa = CFA::RGGB; },
Event::KeyDown { keycode: Some(Keycode::Num0),.. } => { alg = Demosaic::None; },
Event::KeyDown { keycode: Some(Keycode::Num1),.. } => { alg = Demosaic::NearestNeighbour; },
Event::KeyDown { keycode: Some(Keycode::Num2),.. } => { alg = Demosaic::Linear; },
Event::KeyDown { keycode: Some(Keycode::Num3),.. } => { alg = Demosaic::Cubic; },
Event::KeyDown { keycode: Some(Keycode::Space),.. }
| Event::KeyDown { keycode: Some(Keycode::Right),.. } => {
idx = (idx + 1) % files.len();
}
Event::KeyDown { keycode: Some(Keycode::Left),.. } => {
if idx == 0 {
idx = files.len() - 1;
} else {
idx = idx - 1;
}
}
_ => (),
}
if idx!= old_idx || cfa!= old_cfa || alg!= old_alg {
redraw = true;
}
} else {
redraw = true;
}
if redraw {
if idx!= old_idx || cfa!= old_cfa || alg!= old_alg {
if old_idx!= idx {
old_idx = idx;
println!("{}", files[idx]);
}
if old_cfa!= cfa {
old_cfa = cfa;
print_cfa(cfa);
}
if old_alg!= alg {
old_alg = alg;
print_alg(alg);
}
read_file(&Path::new(&files[idx]), bayer_w, bayer_h, depth, cfa, alg,
&mut buf, &mut texture);
}
present_to_screen(&mut canvas, &texture);
}
}
}
fn usage() {
println!("usage: ShowBayer <width> <height> <depth> [filenames...]");
println!();
println!(" depth 8, 12BE, 12LE, 16BE, 16LE");
println!();
println!(" <ESC> Quit.");
println!(" <left> Go to previous image.");
println!(" <right> Go to previous image.");
println!(" <space> Go to next image.");
println!();
println!(" F1-F4 Change CFA pattern: BGGR, GBRG, GRBG, RGGB");
println!(" 0-3 Change demosaicing algorithm");
println!();
}
fn parse_depth(s: &String) -> ImgDepth {
let s = s.to_uppercase();
if s == "8" {
ImgDepth::Depth8
} else if s == "12BE" {
ImgDepth::Depth12BE
} else if s == "12LE" {
ImgDepth::Depth12LE
} else if s == "16BE" {
ImgDepth::Depth16BE
} else if s == "16LE" {
ImgDepth::Depth16LE
} else {
panic!("invalid depth");
}
}
fn bayer_depth(depth: ImgDepth) -> BayerDepth {
match depth {
ImgDepth::Depth8 => BayerDepth::Depth8,
ImgDepth::Depth12BE => BayerDepth::Depth16BE,
ImgDepth::Depth12LE => BayerDepth::Depth16LE,
ImgDepth::Depth16BE => BayerDepth::Depth16BE,
ImgDepth::Depth16LE => BayerDepth::Depth16LE,
}
}
fn raster_depth(depth: ImgDepth) -> RasterDepth {
match depth {
ImgDepth::Depth8 => RasterDepth::Depth8,
ImgDepth::Depth12BE => RasterDepth::Depth16,
ImgDepth::Depth12LE => RasterDepth::Depth16,
ImgDepth::Depth16BE => RasterDepth::Depth16,
ImgDepth::Depth16LE => RasterDepth::Depth16,
}
}
fn bytes_per_pixel(depth: RasterDepth) -> usize {
match depth {
RasterDepth::Depth8 => 3,
RasterDepth::Depth16 => 6,
}
}
fn print_cfa(cfa: CFA) {
let s = match cfa {
CFA::BGGR => "BGGR",
CFA::GBRG => "GBRG",
CFA::GRBG => "GRBG",
CFA::RGGB => "RGGB",
};
println!("CFA: {}", s);
}
fn print_alg(alg: Demosaic) {
let s = match alg {
Demosaic::None => "none",
Demosaic::NearestNeighbour => "nearest neighbour",
Demosaic::Linear => "linear",
Demosaic::Cubic => "cubic",
};
println!("Demosaic: {}", s);
}
fn read_file(
path: &Path, bayer_w: usize, bayer_h: usize,
depth: ImgDepth, cfa: CFA, alg: Demosaic,
buf: &mut [u8], texture: &mut sdl2::render::Texture) {
let maybe_file = File::open(path);
match maybe_file {
Ok(mut f) => {
let result = run_demosaic(&mut f, bayer_depth(depth), cfa, alg,
&mut RasterMut::new(bayer_w, bayer_h, raster_depth(depth), buf));
match result {
Ok(_) => (),
Err(e) => {
println!("Error occurred - {}", e);
return;
},
}
},
Err(e) => {
println!("Error occurred - {}", e);
return;
}
}
render_to_texture(texture, bayer_w, bayer_h, depth, &buf);
}
fn render_to_texture(
texture: &mut sdl2::render::Texture,
w: usize, h: usize, depth: ImgDepth, buf: &[u8]) {
match raster_depth(depth) {
RasterDepth::Depth8 => {
texture.with_lock(None, |buffer: &mut [u8], pitch: usize| {
for y in 0..h {
let src_offset = (3 * w) * y;
let dst_offset = pitch * y;
for i in 0..3 * w {
buffer[dst_offset + i] = buf[src_offset + i];
}
}
}).unwrap();
},
RasterDepth::Depth16 => {
let shr = if depth == ImgDepth::Depth12BE || depth == ImgDepth::Depth12LE { 4 } else { 8 };
let buf = unsafe {
slice::from_raw_parts(buf.as_ptr() as *const u16, buf.len() / 2)
};
texture.with_lock(None, |buffer: &mut [u8], pitch: usize| {
for y in 0..h {
let src_offset = (3 * w) * y;
let dst_offset = pitch * y;
for i in 0..3 * w {
// shr = 8 for u16 to u8, or
// shr = 4 for u12 to u8.
let v = buf[src_offset + i] >> shr;
buffer[dst_offset + i] = min(v, 255) as u8;
}
}
}).unwrap();
},
}
}
fn present_to_screen(
canvas: &mut sdl2::render::WindowCanvas,
texture: &sdl2::render::Texture)
|
{
canvas.clear();
let _ = canvas.copy(&texture, None, None);
canvas.present();
}
|
identifier_body
|
|
showbayer.rs
|
//! ShowBayer.
extern crate bayer;
extern crate sdl2;
use std::cmp::min;
use std::env;
use std::fs::File;
use std::path::Path;
use std::slice;
use bayer::*;
use sdl2::event::Event;
use sdl2::keyboard::Keycode;
use sdl2::pixels::PixelFormatEnum;
#[derive(Copy,Clone,Debug,Eq,PartialEq)]
enum ImgDepth {
Depth8,
Depth12BE,
Depth12LE,
Depth16BE,
Depth16LE,
}
fn main() {
let args: Vec<String> = env::args().skip(1).collect();
usage();
if args.len() < 4 {
return;
}
let bayer_w = args[0].parse::<usize>().unwrap();
let bayer_h = args[1].parse::<usize>().unwrap();
let depth = parse_depth(&args[2]);
let files = &args[3..];
let mut idx = 0;
let mut cfa = CFA::BGGR;
let mut alg = Demosaic::Linear;
let mut old_idx = 1;
let mut old_cfa = CFA::RGGB;
let mut old_alg = Demosaic::None;
// Initialise SDL window.
let sdl = sdl2::init().unwrap();
let video = sdl.video().unwrap();
let window
= video.window("ShowBayer", bayer_w as u32, bayer_h as u32)
.position_centered()
.opengl()
.build().unwrap();
let mut canvas = window.into_canvas().build().unwrap();
let mut event_pump = sdl.event_pump().unwrap();
let texture_creator = canvas.texture_creator();
let mut texture = texture_creator.create_texture_streaming(
PixelFormatEnum::RGB24,
bayer_w as u32, bayer_h as u32).unwrap();
let bytes_per_pixel = bytes_per_pixel(raster_depth(depth));
let mut buf = vec![0; bayer_w * bayer_h * bytes_per_pixel];
read_file(&Path::new(&files[0]), bayer_w, bayer_h, depth, cfa, alg,
&mut buf, &mut texture);
let mut redraw = true;
'mainloop: loop {
if let Some(e) = event_pump.wait_event_timeout(60) {
match e {
Event::Quit {..}
| Event::KeyDown { keycode: Some(Keycode::Escape),.. } => {
break'mainloop;
},
Event::KeyDown { keycode: Some(Keycode::F1),.. } => { cfa = CFA::BGGR; },
Event::KeyDown { keycode: Some(Keycode::F2),.. } => { cfa = CFA::GBRG; },
Event::KeyDown { keycode: Some(Keycode::F3),.. } => { cfa = CFA::GRBG; },
Event::KeyDown { keycode: Some(Keycode::F4),.. } => { cfa = CFA::RGGB; },
Event::KeyDown { keycode: Some(Keycode::Num0),.. } => { alg = Demosaic::None; },
Event::KeyDown { keycode: Some(Keycode::Num1),.. } => { alg = Demosaic::NearestNeighbour; },
Event::KeyDown { keycode: Some(Keycode::Num2),.. } => { alg = Demosaic::Linear; },
Event::KeyDown { keycode: Some(Keycode::Num3),.. } => { alg = Demosaic::Cubic; },
Event::KeyDown { keycode: Some(Keycode::Space),.. }
| Event::KeyDown { keycode: Some(Keycode::Right),.. } => {
idx = (idx + 1) % files.len();
}
Event::KeyDown { keycode: Some(Keycode::Left),.. } => {
if idx == 0 {
idx = files.len() - 1;
} else {
idx = idx - 1;
}
}
_ => (),
}
if idx!= old_idx || cfa!= old_cfa || alg!= old_alg {
redraw = true;
}
} else {
redraw = true;
}
if redraw {
if idx!= old_idx || cfa!= old_cfa || alg!= old_alg {
if old_idx!= idx {
old_idx = idx;
println!("{}", files[idx]);
}
if old_cfa!= cfa {
old_cfa = cfa;
print_cfa(cfa);
}
if old_alg!= alg {
old_alg = alg;
print_alg(alg);
}
read_file(&Path::new(&files[idx]), bayer_w, bayer_h, depth, cfa, alg,
&mut buf, &mut texture);
}
present_to_screen(&mut canvas, &texture);
}
}
}
fn usage() {
println!("usage: ShowBayer <width> <height> <depth> [filenames...]");
println!();
println!(" depth 8, 12BE, 12LE, 16BE, 16LE");
println!();
println!(" <ESC> Quit.");
println!(" <left> Go to previous image.");
println!(" <right> Go to previous image.");
println!(" <space> Go to next image.");
println!();
println!(" F1-F4 Change CFA pattern: BGGR, GBRG, GRBG, RGGB");
println!(" 0-3 Change demosaicing algorithm");
println!();
}
fn parse_depth(s: &String) -> ImgDepth {
let s = s.to_uppercase();
if s == "8" {
ImgDepth::Depth8
} else if s == "12BE" {
ImgDepth::Depth12BE
} else if s == "12LE" {
ImgDepth::Depth12LE
} else if s == "16BE" {
ImgDepth::Depth16BE
} else if s == "16LE" {
ImgDepth::Depth16LE
} else {
panic!("invalid depth");
}
}
fn bayer_depth(depth: ImgDepth) -> BayerDepth {
match depth {
ImgDepth::Depth8 => BayerDepth::Depth8,
ImgDepth::Depth12BE => BayerDepth::Depth16BE,
ImgDepth::Depth12LE => BayerDepth::Depth16LE,
ImgDepth::Depth16BE => BayerDepth::Depth16BE,
ImgDepth::Depth16LE => BayerDepth::Depth16LE,
}
}
fn raster_depth(depth: ImgDepth) -> RasterDepth {
match depth {
ImgDepth::Depth8 => RasterDepth::Depth8,
ImgDepth::Depth12BE => RasterDepth::Depth16,
ImgDepth::Depth12LE => RasterDepth::Depth16,
ImgDepth::Depth16BE => RasterDepth::Depth16,
ImgDepth::Depth16LE => RasterDepth::Depth16,
}
}
fn bytes_per_pixel(depth: RasterDepth) -> usize {
match depth {
RasterDepth::Depth8 => 3,
RasterDepth::Depth16 => 6,
}
}
fn print_cfa(cfa: CFA) {
let s = match cfa {
CFA::BGGR => "BGGR",
CFA::GBRG => "GBRG",
CFA::GRBG => "GRBG",
CFA::RGGB => "RGGB",
};
println!("CFA: {}", s);
}
fn
|
(alg: Demosaic) {
let s = match alg {
Demosaic::None => "none",
Demosaic::NearestNeighbour => "nearest neighbour",
Demosaic::Linear => "linear",
Demosaic::Cubic => "cubic",
};
println!("Demosaic: {}", s);
}
fn read_file(
path: &Path, bayer_w: usize, bayer_h: usize,
depth: ImgDepth, cfa: CFA, alg: Demosaic,
buf: &mut [u8], texture: &mut sdl2::render::Texture) {
let maybe_file = File::open(path);
match maybe_file {
Ok(mut f) => {
let result = run_demosaic(&mut f, bayer_depth(depth), cfa, alg,
&mut RasterMut::new(bayer_w, bayer_h, raster_depth(depth), buf));
match result {
Ok(_) => (),
Err(e) => {
println!("Error occurred - {}", e);
return;
},
}
},
Err(e) => {
println!("Error occurred - {}", e);
return;
}
}
render_to_texture(texture, bayer_w, bayer_h, depth, &buf);
}
fn render_to_texture(
texture: &mut sdl2::render::Texture,
w: usize, h: usize, depth: ImgDepth, buf: &[u8]) {
match raster_depth(depth) {
RasterDepth::Depth8 => {
texture.with_lock(None, |buffer: &mut [u8], pitch: usize| {
for y in 0..h {
let src_offset = (3 * w) * y;
let dst_offset = pitch * y;
for i in 0..3 * w {
buffer[dst_offset + i] = buf[src_offset + i];
}
}
}).unwrap();
},
RasterDepth::Depth16 => {
let shr = if depth == ImgDepth::Depth12BE || depth == ImgDepth::Depth12LE { 4 } else { 8 };
let buf = unsafe {
slice::from_raw_parts(buf.as_ptr() as *const u16, buf.len() / 2)
};
texture.with_lock(None, |buffer: &mut [u8], pitch: usize| {
for y in 0..h {
let src_offset = (3 * w) * y;
let dst_offset = pitch * y;
for i in 0..3 * w {
// shr = 8 for u16 to u8, or
// shr = 4 for u12 to u8.
let v = buf[src_offset + i] >> shr;
buffer[dst_offset + i] = min(v, 255) as u8;
}
}
}).unwrap();
},
}
}
fn present_to_screen(
canvas: &mut sdl2::render::WindowCanvas,
texture: &sdl2::render::Texture) {
canvas.clear();
let _ = canvas.copy(&texture, None, None);
canvas.present();
}
|
print_alg
|
identifier_name
|
showbayer.rs
|
//! ShowBayer.
extern crate bayer;
extern crate sdl2;
use std::cmp::min;
use std::env;
use std::fs::File;
use std::path::Path;
use std::slice;
use bayer::*;
use sdl2::event::Event;
use sdl2::keyboard::Keycode;
use sdl2::pixels::PixelFormatEnum;
#[derive(Copy,Clone,Debug,Eq,PartialEq)]
enum ImgDepth {
Depth8,
Depth12BE,
Depth12LE,
Depth16BE,
Depth16LE,
}
fn main() {
let args: Vec<String> = env::args().skip(1).collect();
usage();
if args.len() < 4 {
return;
}
let bayer_w = args[0].parse::<usize>().unwrap();
let bayer_h = args[1].parse::<usize>().unwrap();
let depth = parse_depth(&args[2]);
let files = &args[3..];
let mut idx = 0;
let mut cfa = CFA::BGGR;
let mut alg = Demosaic::Linear;
let mut old_idx = 1;
let mut old_cfa = CFA::RGGB;
let mut old_alg = Demosaic::None;
// Initialise SDL window.
let sdl = sdl2::init().unwrap();
let video = sdl.video().unwrap();
let window
= video.window("ShowBayer", bayer_w as u32, bayer_h as u32)
.position_centered()
.opengl()
.build().unwrap();
let mut canvas = window.into_canvas().build().unwrap();
let mut event_pump = sdl.event_pump().unwrap();
let texture_creator = canvas.texture_creator();
let mut texture = texture_creator.create_texture_streaming(
PixelFormatEnum::RGB24,
bayer_w as u32, bayer_h as u32).unwrap();
let bytes_per_pixel = bytes_per_pixel(raster_depth(depth));
let mut buf = vec![0; bayer_w * bayer_h * bytes_per_pixel];
read_file(&Path::new(&files[0]), bayer_w, bayer_h, depth, cfa, alg,
&mut buf, &mut texture);
let mut redraw = true;
'mainloop: loop {
if let Some(e) = event_pump.wait_event_timeout(60)
|
}
Event::KeyDown { keycode: Some(Keycode::Left),.. } => {
if idx == 0 {
idx = files.len() - 1;
} else {
idx = idx - 1;
}
}
_ => (),
}
if idx!= old_idx || cfa!= old_cfa || alg!= old_alg {
redraw = true;
}
}
else {
redraw = true;
}
if redraw {
if idx!= old_idx || cfa!= old_cfa || alg!= old_alg {
if old_idx!= idx {
old_idx = idx;
println!("{}", files[idx]);
}
if old_cfa!= cfa {
old_cfa = cfa;
print_cfa(cfa);
}
if old_alg!= alg {
old_alg = alg;
print_alg(alg);
}
read_file(&Path::new(&files[idx]), bayer_w, bayer_h, depth, cfa, alg,
&mut buf, &mut texture);
}
present_to_screen(&mut canvas, &texture);
}
}
}
fn usage() {
println!("usage: ShowBayer <width> <height> <depth> [filenames...]");
println!();
println!(" depth 8, 12BE, 12LE, 16BE, 16LE");
println!();
println!(" <ESC> Quit.");
println!(" <left> Go to previous image.");
println!(" <right> Go to previous image.");
println!(" <space> Go to next image.");
println!();
println!(" F1-F4 Change CFA pattern: BGGR, GBRG, GRBG, RGGB");
println!(" 0-3 Change demosaicing algorithm");
println!();
}
fn parse_depth(s: &String) -> ImgDepth {
let s = s.to_uppercase();
if s == "8" {
ImgDepth::Depth8
} else if s == "12BE" {
ImgDepth::Depth12BE
} else if s == "12LE" {
ImgDepth::Depth12LE
} else if s == "16BE" {
ImgDepth::Depth16BE
} else if s == "16LE" {
ImgDepth::Depth16LE
} else {
panic!("invalid depth");
}
}
fn bayer_depth(depth: ImgDepth) -> BayerDepth {
match depth {
ImgDepth::Depth8 => BayerDepth::Depth8,
ImgDepth::Depth12BE => BayerDepth::Depth16BE,
ImgDepth::Depth12LE => BayerDepth::Depth16LE,
ImgDepth::Depth16BE => BayerDepth::Depth16BE,
ImgDepth::Depth16LE => BayerDepth::Depth16LE,
}
}
fn raster_depth(depth: ImgDepth) -> RasterDepth {
match depth {
ImgDepth::Depth8 => RasterDepth::Depth8,
ImgDepth::Depth12BE => RasterDepth::Depth16,
ImgDepth::Depth12LE => RasterDepth::Depth16,
ImgDepth::Depth16BE => RasterDepth::Depth16,
ImgDepth::Depth16LE => RasterDepth::Depth16,
}
}
fn bytes_per_pixel(depth: RasterDepth) -> usize {
match depth {
RasterDepth::Depth8 => 3,
RasterDepth::Depth16 => 6,
}
}
fn print_cfa(cfa: CFA) {
let s = match cfa {
CFA::BGGR => "BGGR",
CFA::GBRG => "GBRG",
CFA::GRBG => "GRBG",
CFA::RGGB => "RGGB",
};
println!("CFA: {}", s);
}
fn print_alg(alg: Demosaic) {
let s = match alg {
Demosaic::None => "none",
Demosaic::NearestNeighbour => "nearest neighbour",
Demosaic::Linear => "linear",
Demosaic::Cubic => "cubic",
};
println!("Demosaic: {}", s);
}
fn read_file(
path: &Path, bayer_w: usize, bayer_h: usize,
depth: ImgDepth, cfa: CFA, alg: Demosaic,
buf: &mut [u8], texture: &mut sdl2::render::Texture) {
let maybe_file = File::open(path);
match maybe_file {
Ok(mut f) => {
let result = run_demosaic(&mut f, bayer_depth(depth), cfa, alg,
&mut RasterMut::new(bayer_w, bayer_h, raster_depth(depth), buf));
match result {
Ok(_) => (),
Err(e) => {
println!("Error occurred - {}", e);
return;
},
}
},
Err(e) => {
println!("Error occurred - {}", e);
return;
}
}
render_to_texture(texture, bayer_w, bayer_h, depth, &buf);
}
fn render_to_texture(
texture: &mut sdl2::render::Texture,
w: usize, h: usize, depth: ImgDepth, buf: &[u8]) {
match raster_depth(depth) {
RasterDepth::Depth8 => {
texture.with_lock(None, |buffer: &mut [u8], pitch: usize| {
for y in 0..h {
let src_offset = (3 * w) * y;
let dst_offset = pitch * y;
for i in 0..3 * w {
buffer[dst_offset + i] = buf[src_offset + i];
}
}
}).unwrap();
},
RasterDepth::Depth16 => {
let shr = if depth == ImgDepth::Depth12BE || depth == ImgDepth::Depth12LE { 4 } else { 8 };
let buf = unsafe {
slice::from_raw_parts(buf.as_ptr() as *const u16, buf.len() / 2)
};
texture.with_lock(None, |buffer: &mut [u8], pitch: usize| {
for y in 0..h {
let src_offset = (3 * w) * y;
let dst_offset = pitch * y;
for i in 0..3 * w {
// shr = 8 for u16 to u8, or
// shr = 4 for u12 to u8.
let v = buf[src_offset + i] >> shr;
buffer[dst_offset + i] = min(v, 255) as u8;
}
}
}).unwrap();
},
}
}
fn present_to_screen(
canvas: &mut sdl2::render::WindowCanvas,
texture: &sdl2::render::Texture) {
canvas.clear();
let _ = canvas.copy(&texture, None, None);
canvas.present();
}
|
{
match e {
Event::Quit {..}
| Event::KeyDown { keycode: Some(Keycode::Escape), .. } => {
break 'mainloop;
},
Event::KeyDown { keycode: Some(Keycode::F1), .. } => { cfa = CFA::BGGR; },
Event::KeyDown { keycode: Some(Keycode::F2), .. } => { cfa = CFA::GBRG; },
Event::KeyDown { keycode: Some(Keycode::F3), .. } => { cfa = CFA::GRBG; },
Event::KeyDown { keycode: Some(Keycode::F4), .. } => { cfa = CFA::RGGB; },
Event::KeyDown { keycode: Some(Keycode::Num0), .. } => { alg = Demosaic::None; },
Event::KeyDown { keycode: Some(Keycode::Num1), .. } => { alg = Demosaic::NearestNeighbour; },
Event::KeyDown { keycode: Some(Keycode::Num2), .. } => { alg = Demosaic::Linear; },
Event::KeyDown { keycode: Some(Keycode::Num3), .. } => { alg = Demosaic::Cubic; },
Event::KeyDown { keycode: Some(Keycode::Space), .. }
| Event::KeyDown { keycode: Some(Keycode::Right), .. } => {
idx = (idx + 1) % files.len();
|
conditional_block
|
showbayer.rs
|
//! ShowBayer.
extern crate bayer;
extern crate sdl2;
use std::cmp::min;
use std::env;
use std::fs::File;
use std::path::Path;
use std::slice;
use bayer::*;
use sdl2::event::Event;
use sdl2::keyboard::Keycode;
use sdl2::pixels::PixelFormatEnum;
#[derive(Copy,Clone,Debug,Eq,PartialEq)]
enum ImgDepth {
Depth8,
Depth12BE,
Depth12LE,
Depth16BE,
Depth16LE,
}
fn main() {
let args: Vec<String> = env::args().skip(1).collect();
usage();
if args.len() < 4 {
return;
}
let bayer_w = args[0].parse::<usize>().unwrap();
let bayer_h = args[1].parse::<usize>().unwrap();
let depth = parse_depth(&args[2]);
let files = &args[3..];
let mut idx = 0;
let mut cfa = CFA::BGGR;
let mut alg = Demosaic::Linear;
let mut old_idx = 1;
let mut old_cfa = CFA::RGGB;
let mut old_alg = Demosaic::None;
// Initialise SDL window.
let sdl = sdl2::init().unwrap();
let video = sdl.video().unwrap();
let window
= video.window("ShowBayer", bayer_w as u32, bayer_h as u32)
.position_centered()
.opengl()
.build().unwrap();
let mut canvas = window.into_canvas().build().unwrap();
let mut event_pump = sdl.event_pump().unwrap();
let texture_creator = canvas.texture_creator();
let mut texture = texture_creator.create_texture_streaming(
PixelFormatEnum::RGB24,
bayer_w as u32, bayer_h as u32).unwrap();
let bytes_per_pixel = bytes_per_pixel(raster_depth(depth));
let mut buf = vec![0; bayer_w * bayer_h * bytes_per_pixel];
read_file(&Path::new(&files[0]), bayer_w, bayer_h, depth, cfa, alg,
&mut buf, &mut texture);
let mut redraw = true;
'mainloop: loop {
if let Some(e) = event_pump.wait_event_timeout(60) {
match e {
Event::Quit {..}
| Event::KeyDown { keycode: Some(Keycode::Escape),.. } => {
|
},
Event::KeyDown { keycode: Some(Keycode::F1),.. } => { cfa = CFA::BGGR; },
Event::KeyDown { keycode: Some(Keycode::F2),.. } => { cfa = CFA::GBRG; },
Event::KeyDown { keycode: Some(Keycode::F3),.. } => { cfa = CFA::GRBG; },
Event::KeyDown { keycode: Some(Keycode::F4),.. } => { cfa = CFA::RGGB; },
Event::KeyDown { keycode: Some(Keycode::Num0),.. } => { alg = Demosaic::None; },
Event::KeyDown { keycode: Some(Keycode::Num1),.. } => { alg = Demosaic::NearestNeighbour; },
Event::KeyDown { keycode: Some(Keycode::Num2),.. } => { alg = Demosaic::Linear; },
Event::KeyDown { keycode: Some(Keycode::Num3),.. } => { alg = Demosaic::Cubic; },
Event::KeyDown { keycode: Some(Keycode::Space),.. }
| Event::KeyDown { keycode: Some(Keycode::Right),.. } => {
idx = (idx + 1) % files.len();
}
Event::KeyDown { keycode: Some(Keycode::Left),.. } => {
if idx == 0 {
idx = files.len() - 1;
} else {
idx = idx - 1;
}
}
_ => (),
}
if idx!= old_idx || cfa!= old_cfa || alg!= old_alg {
redraw = true;
}
} else {
redraw = true;
}
if redraw {
if idx!= old_idx || cfa!= old_cfa || alg!= old_alg {
if old_idx!= idx {
old_idx = idx;
println!("{}", files[idx]);
}
if old_cfa!= cfa {
old_cfa = cfa;
print_cfa(cfa);
}
if old_alg!= alg {
old_alg = alg;
print_alg(alg);
}
read_file(&Path::new(&files[idx]), bayer_w, bayer_h, depth, cfa, alg,
&mut buf, &mut texture);
}
present_to_screen(&mut canvas, &texture);
}
}
}
fn usage() {
println!("usage: ShowBayer <width> <height> <depth> [filenames...]");
println!();
println!(" depth 8, 12BE, 12LE, 16BE, 16LE");
println!();
println!(" <ESC> Quit.");
println!(" <left> Go to previous image.");
println!(" <right> Go to previous image.");
println!(" <space> Go to next image.");
println!();
println!(" F1-F4 Change CFA pattern: BGGR, GBRG, GRBG, RGGB");
println!(" 0-3 Change demosaicing algorithm");
println!();
}
fn parse_depth(s: &String) -> ImgDepth {
let s = s.to_uppercase();
if s == "8" {
ImgDepth::Depth8
} else if s == "12BE" {
ImgDepth::Depth12BE
} else if s == "12LE" {
ImgDepth::Depth12LE
} else if s == "16BE" {
ImgDepth::Depth16BE
} else if s == "16LE" {
ImgDepth::Depth16LE
} else {
panic!("invalid depth");
}
}
fn bayer_depth(depth: ImgDepth) -> BayerDepth {
match depth {
ImgDepth::Depth8 => BayerDepth::Depth8,
ImgDepth::Depth12BE => BayerDepth::Depth16BE,
ImgDepth::Depth12LE => BayerDepth::Depth16LE,
ImgDepth::Depth16BE => BayerDepth::Depth16BE,
ImgDepth::Depth16LE => BayerDepth::Depth16LE,
}
}
fn raster_depth(depth: ImgDepth) -> RasterDepth {
match depth {
ImgDepth::Depth8 => RasterDepth::Depth8,
ImgDepth::Depth12BE => RasterDepth::Depth16,
ImgDepth::Depth12LE => RasterDepth::Depth16,
ImgDepth::Depth16BE => RasterDepth::Depth16,
ImgDepth::Depth16LE => RasterDepth::Depth16,
}
}
fn bytes_per_pixel(depth: RasterDepth) -> usize {
match depth {
RasterDepth::Depth8 => 3,
RasterDepth::Depth16 => 6,
}
}
fn print_cfa(cfa: CFA) {
let s = match cfa {
CFA::BGGR => "BGGR",
CFA::GBRG => "GBRG",
CFA::GRBG => "GRBG",
CFA::RGGB => "RGGB",
};
println!("CFA: {}", s);
}
fn print_alg(alg: Demosaic) {
let s = match alg {
Demosaic::None => "none",
Demosaic::NearestNeighbour => "nearest neighbour",
Demosaic::Linear => "linear",
Demosaic::Cubic => "cubic",
};
println!("Demosaic: {}", s);
}
fn read_file(
path: &Path, bayer_w: usize, bayer_h: usize,
depth: ImgDepth, cfa: CFA, alg: Demosaic,
buf: &mut [u8], texture: &mut sdl2::render::Texture) {
let maybe_file = File::open(path);
match maybe_file {
Ok(mut f) => {
let result = run_demosaic(&mut f, bayer_depth(depth), cfa, alg,
&mut RasterMut::new(bayer_w, bayer_h, raster_depth(depth), buf));
match result {
Ok(_) => (),
Err(e) => {
println!("Error occurred - {}", e);
return;
},
}
},
Err(e) => {
println!("Error occurred - {}", e);
return;
}
}
render_to_texture(texture, bayer_w, bayer_h, depth, &buf);
}
fn render_to_texture(
texture: &mut sdl2::render::Texture,
w: usize, h: usize, depth: ImgDepth, buf: &[u8]) {
match raster_depth(depth) {
RasterDepth::Depth8 => {
texture.with_lock(None, |buffer: &mut [u8], pitch: usize| {
for y in 0..h {
let src_offset = (3 * w) * y;
let dst_offset = pitch * y;
for i in 0..3 * w {
buffer[dst_offset + i] = buf[src_offset + i];
}
}
}).unwrap();
},
RasterDepth::Depth16 => {
let shr = if depth == ImgDepth::Depth12BE || depth == ImgDepth::Depth12LE { 4 } else { 8 };
let buf = unsafe {
slice::from_raw_parts(buf.as_ptr() as *const u16, buf.len() / 2)
};
texture.with_lock(None, |buffer: &mut [u8], pitch: usize| {
for y in 0..h {
let src_offset = (3 * w) * y;
let dst_offset = pitch * y;
for i in 0..3 * w {
// shr = 8 for u16 to u8, or
// shr = 4 for u12 to u8.
let v = buf[src_offset + i] >> shr;
buffer[dst_offset + i] = min(v, 255) as u8;
}
}
}).unwrap();
},
}
}
fn present_to_screen(
canvas: &mut sdl2::render::WindowCanvas,
texture: &sdl2::render::Texture) {
canvas.clear();
let _ = canvas.copy(&texture, None, None);
canvas.present();
}
|
break 'mainloop;
|
random_line_split
|
cssnamespacerule.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::CSSNamespaceRuleBinding;
use dom::bindings::codegen::Bindings::CSSNamespaceRuleBinding::CSSNamespaceRuleMethods;
use dom::bindings::js::Root;
use dom::bindings::reflector::reflect_dom_object;
use dom::bindings::str::DOMString;
use dom::cssrule::{CSSRule, SpecificCSSRule};
use dom::cssstylesheet::CSSStyleSheet;
use dom::window::Window;
use dom_struct::dom_struct;
use style::shared_lock::{Locked, ToCssWithGuard};
use style::stylearc::Arc;
use style::stylesheets::NamespaceRule;
#[dom_struct]
pub struct CSSNamespaceRule {
cssrule: CSSRule,
#[ignore_heap_size_of = "Arc"]
namespacerule: Arc<Locked<NamespaceRule>>,
}
impl CSSNamespaceRule {
fn new_inherited(parent_stylesheet: &CSSStyleSheet, namespacerule: Arc<Locked<NamespaceRule>>)
-> CSSNamespaceRule {
CSSNamespaceRule {
cssrule: CSSRule::new_inherited(parent_stylesheet),
namespacerule: namespacerule,
}
}
#[allow(unrooted_must_root)]
pub fn new(window: &Window, parent_stylesheet: &CSSStyleSheet,
namespacerule: Arc<Locked<NamespaceRule>>) -> Root<CSSNamespaceRule>
|
}
impl CSSNamespaceRuleMethods for CSSNamespaceRule {
// https://drafts.csswg.org/cssom/#dom-cssnamespacerule-prefix
fn Prefix(&self) -> DOMString {
let guard = self.cssrule.shared_lock().read();
self.namespacerule.read_with(&guard).prefix
.as_ref().map(|s| s.to_string().into())
.unwrap_or(DOMString::new())
}
// https://drafts.csswg.org/cssom/#dom-cssnamespacerule-namespaceuri
fn NamespaceURI(&self) -> DOMString {
let guard = self.cssrule.shared_lock().read();
(*self.namespacerule.read_with(&guard).url).into()
}
}
impl SpecificCSSRule for CSSNamespaceRule {
fn ty(&self) -> u16 {
use dom::bindings::codegen::Bindings::CSSRuleBinding::CSSRuleConstants;
CSSRuleConstants::NAMESPACE_RULE
}
fn get_css(&self) -> DOMString {
let guard = self.cssrule.shared_lock().read();
self.namespacerule.read_with(&guard).to_css_string(&guard).into()
}
}
|
{
reflect_dom_object(box CSSNamespaceRule::new_inherited(parent_stylesheet, namespacerule),
window,
CSSNamespaceRuleBinding::Wrap)
}
|
identifier_body
|
cssnamespacerule.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::CSSNamespaceRuleBinding;
use dom::bindings::codegen::Bindings::CSSNamespaceRuleBinding::CSSNamespaceRuleMethods;
use dom::bindings::js::Root;
use dom::bindings::reflector::reflect_dom_object;
use dom::bindings::str::DOMString;
use dom::cssrule::{CSSRule, SpecificCSSRule};
use dom::cssstylesheet::CSSStyleSheet;
use dom::window::Window;
use dom_struct::dom_struct;
use style::shared_lock::{Locked, ToCssWithGuard};
use style::stylearc::Arc;
use style::stylesheets::NamespaceRule;
#[dom_struct]
pub struct CSSNamespaceRule {
cssrule: CSSRule,
#[ignore_heap_size_of = "Arc"]
namespacerule: Arc<Locked<NamespaceRule>>,
}
impl CSSNamespaceRule {
fn new_inherited(parent_stylesheet: &CSSStyleSheet, namespacerule: Arc<Locked<NamespaceRule>>)
-> CSSNamespaceRule {
CSSNamespaceRule {
cssrule: CSSRule::new_inherited(parent_stylesheet),
namespacerule: namespacerule,
}
}
#[allow(unrooted_must_root)]
pub fn new(window: &Window, parent_stylesheet: &CSSStyleSheet,
namespacerule: Arc<Locked<NamespaceRule>>) -> Root<CSSNamespaceRule> {
reflect_dom_object(box CSSNamespaceRule::new_inherited(parent_stylesheet, namespacerule),
window,
CSSNamespaceRuleBinding::Wrap)
}
}
impl CSSNamespaceRuleMethods for CSSNamespaceRule {
// https://drafts.csswg.org/cssom/#dom-cssnamespacerule-prefix
fn Prefix(&self) -> DOMString {
let guard = self.cssrule.shared_lock().read();
self.namespacerule.read_with(&guard).prefix
.as_ref().map(|s| s.to_string().into())
.unwrap_or(DOMString::new())
}
// https://drafts.csswg.org/cssom/#dom-cssnamespacerule-namespaceuri
fn NamespaceURI(&self) -> DOMString {
let guard = self.cssrule.shared_lock().read();
(*self.namespacerule.read_with(&guard).url).into()
}
}
impl SpecificCSSRule for CSSNamespaceRule {
fn
|
(&self) -> u16 {
use dom::bindings::codegen::Bindings::CSSRuleBinding::CSSRuleConstants;
CSSRuleConstants::NAMESPACE_RULE
}
fn get_css(&self) -> DOMString {
let guard = self.cssrule.shared_lock().read();
self.namespacerule.read_with(&guard).to_css_string(&guard).into()
}
}
|
ty
|
identifier_name
|
cssnamespacerule.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::CSSNamespaceRuleBinding;
use dom::bindings::codegen::Bindings::CSSNamespaceRuleBinding::CSSNamespaceRuleMethods;
use dom::bindings::js::Root;
use dom::bindings::reflector::reflect_dom_object;
|
use style::shared_lock::{Locked, ToCssWithGuard};
use style::stylearc::Arc;
use style::stylesheets::NamespaceRule;
#[dom_struct]
pub struct CSSNamespaceRule {
cssrule: CSSRule,
#[ignore_heap_size_of = "Arc"]
namespacerule: Arc<Locked<NamespaceRule>>,
}
impl CSSNamespaceRule {
fn new_inherited(parent_stylesheet: &CSSStyleSheet, namespacerule: Arc<Locked<NamespaceRule>>)
-> CSSNamespaceRule {
CSSNamespaceRule {
cssrule: CSSRule::new_inherited(parent_stylesheet),
namespacerule: namespacerule,
}
}
#[allow(unrooted_must_root)]
pub fn new(window: &Window, parent_stylesheet: &CSSStyleSheet,
namespacerule: Arc<Locked<NamespaceRule>>) -> Root<CSSNamespaceRule> {
reflect_dom_object(box CSSNamespaceRule::new_inherited(parent_stylesheet, namespacerule),
window,
CSSNamespaceRuleBinding::Wrap)
}
}
impl CSSNamespaceRuleMethods for CSSNamespaceRule {
// https://drafts.csswg.org/cssom/#dom-cssnamespacerule-prefix
fn Prefix(&self) -> DOMString {
let guard = self.cssrule.shared_lock().read();
self.namespacerule.read_with(&guard).prefix
.as_ref().map(|s| s.to_string().into())
.unwrap_or(DOMString::new())
}
// https://drafts.csswg.org/cssom/#dom-cssnamespacerule-namespaceuri
fn NamespaceURI(&self) -> DOMString {
let guard = self.cssrule.shared_lock().read();
(*self.namespacerule.read_with(&guard).url).into()
}
}
impl SpecificCSSRule for CSSNamespaceRule {
fn ty(&self) -> u16 {
use dom::bindings::codegen::Bindings::CSSRuleBinding::CSSRuleConstants;
CSSRuleConstants::NAMESPACE_RULE
}
fn get_css(&self) -> DOMString {
let guard = self.cssrule.shared_lock().read();
self.namespacerule.read_with(&guard).to_css_string(&guard).into()
}
}
|
use dom::bindings::str::DOMString;
use dom::cssrule::{CSSRule, SpecificCSSRule};
use dom::cssstylesheet::CSSStyleSheet;
use dom::window::Window;
use dom_struct::dom_struct;
|
random_line_split
|
exec.rs
|
// Copyright 2017 Google Inc.
//
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file or at
// https://opensource.org/licenses/MIT.
use std::error::Error;
use std::ffi::OsString;
use std::io::{stderr, Write};
use std::path::Path;
use std::process::Command;
use walkdir::DirEntry;
use super::{Matcher, MatcherIO};
enum Arg {
Filename,
LiteralArg(OsString),
}
pub struct SingleExecMatcher {
executable: String,
args: Vec<Arg>,
exec_in_parent_dir: bool,
}
impl SingleExecMatcher {
pub fn
|
(
executable: &str,
args: &[&str],
exec_in_parent_dir: bool,
) -> Result<Self, Box<dyn Error>> {
let transformed_args = args
.iter()
.map(|&a| match a {
"{}" => Arg::Filename,
_ => Arg::LiteralArg(OsString::from(a)),
})
.collect();
Ok(Self {
executable: executable.to_string(),
args: transformed_args,
exec_in_parent_dir,
})
}
}
impl Matcher for SingleExecMatcher {
fn matches(&self, file_info: &DirEntry, _: &mut MatcherIO) -> bool {
let mut command = Command::new(&self.executable);
let path_to_file = if self.exec_in_parent_dir {
if let Some(f) = file_info.path().file_name() {
Path::new(".").join(f)
} else {
Path::new(".").join(file_info.path())
}
} else {
file_info.path().to_path_buf()
};
for arg in &self.args {
command.arg(match *arg {
Arg::LiteralArg(ref a) => a.as_os_str(),
Arg::Filename => path_to_file.as_os_str(),
});
}
if self.exec_in_parent_dir {
match file_info.path().parent() {
None => {
// Root paths like "/" have no parent. Run them from the root to match GNU find.
command.current_dir(file_info.path());
}
Some(parent) if parent == Path::new("") => {
// Paths like "foo" have a parent of "". Avoid chdir("").
}
Some(parent) => {
command.current_dir(parent);
}
}
}
match command.status() {
Ok(status) => status.success(),
Err(e) => {
writeln!(&mut stderr(), "Failed to run {}: {}", self.executable, e).unwrap();
false
}
}
}
fn has_side_effects(&self) -> bool {
true
}
}
#[cfg(test)]
/// No tests here, because we need to call out to an external executable. See
/// tests/exec_unit_tests.rs instead.
mod tests {}
|
new
|
identifier_name
|
exec.rs
|
// Copyright 2017 Google Inc.
//
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file or at
// https://opensource.org/licenses/MIT.
use std::error::Error;
use std::ffi::OsString;
use std::io::{stderr, Write};
use std::path::Path;
use std::process::Command;
use walkdir::DirEntry;
use super::{Matcher, MatcherIO};
enum Arg {
Filename,
LiteralArg(OsString),
}
pub struct SingleExecMatcher {
executable: String,
args: Vec<Arg>,
exec_in_parent_dir: bool,
}
impl SingleExecMatcher {
pub fn new(
executable: &str,
args: &[&str],
exec_in_parent_dir: bool,
) -> Result<Self, Box<dyn Error>> {
let transformed_args = args
.iter()
.map(|&a| match a {
"{}" => Arg::Filename,
_ => Arg::LiteralArg(OsString::from(a)),
})
.collect();
Ok(Self {
executable: executable.to_string(),
args: transformed_args,
exec_in_parent_dir,
})
}
}
impl Matcher for SingleExecMatcher {
fn matches(&self, file_info: &DirEntry, _: &mut MatcherIO) -> bool {
let mut command = Command::new(&self.executable);
let path_to_file = if self.exec_in_parent_dir
|
else {
file_info.path().to_path_buf()
};
for arg in &self.args {
command.arg(match *arg {
Arg::LiteralArg(ref a) => a.as_os_str(),
Arg::Filename => path_to_file.as_os_str(),
});
}
if self.exec_in_parent_dir {
match file_info.path().parent() {
None => {
// Root paths like "/" have no parent. Run them from the root to match GNU find.
command.current_dir(file_info.path());
}
Some(parent) if parent == Path::new("") => {
// Paths like "foo" have a parent of "". Avoid chdir("").
}
Some(parent) => {
command.current_dir(parent);
}
}
}
match command.status() {
Ok(status) => status.success(),
Err(e) => {
writeln!(&mut stderr(), "Failed to run {}: {}", self.executable, e).unwrap();
false
}
}
}
fn has_side_effects(&self) -> bool {
true
}
}
#[cfg(test)]
/// No tests here, because we need to call out to an external executable. See
/// tests/exec_unit_tests.rs instead.
mod tests {}
|
{
if let Some(f) = file_info.path().file_name() {
Path::new(".").join(f)
} else {
Path::new(".").join(file_info.path())
}
}
|
conditional_block
|
exec.rs
|
// Copyright 2017 Google Inc.
//
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file or at
// https://opensource.org/licenses/MIT.
use std::error::Error;
use std::ffi::OsString;
use std::io::{stderr, Write};
use std::path::Path;
use std::process::Command;
use walkdir::DirEntry;
use super::{Matcher, MatcherIO};
enum Arg {
Filename,
LiteralArg(OsString),
}
pub struct SingleExecMatcher {
executable: String,
args: Vec<Arg>,
exec_in_parent_dir: bool,
}
impl SingleExecMatcher {
pub fn new(
executable: &str,
args: &[&str],
exec_in_parent_dir: bool,
) -> Result<Self, Box<dyn Error>> {
let transformed_args = args
.iter()
.map(|&a| match a {
"{}" => Arg::Filename,
_ => Arg::LiteralArg(OsString::from(a)),
})
.collect();
Ok(Self {
executable: executable.to_string(),
args: transformed_args,
exec_in_parent_dir,
})
}
}
impl Matcher for SingleExecMatcher {
fn matches(&self, file_info: &DirEntry, _: &mut MatcherIO) -> bool {
let mut command = Command::new(&self.executable);
let path_to_file = if self.exec_in_parent_dir {
if let Some(f) = file_info.path().file_name() {
Path::new(".").join(f)
} else {
Path::new(".").join(file_info.path())
}
} else {
file_info.path().to_path_buf()
};
for arg in &self.args {
command.arg(match *arg {
Arg::LiteralArg(ref a) => a.as_os_str(),
|
Arg::Filename => path_to_file.as_os_str(),
});
}
if self.exec_in_parent_dir {
match file_info.path().parent() {
None => {
// Root paths like "/" have no parent. Run them from the root to match GNU find.
command.current_dir(file_info.path());
}
Some(parent) if parent == Path::new("") => {
// Paths like "foo" have a parent of "". Avoid chdir("").
}
Some(parent) => {
command.current_dir(parent);
}
}
}
match command.status() {
Ok(status) => status.success(),
Err(e) => {
writeln!(&mut stderr(), "Failed to run {}: {}", self.executable, e).unwrap();
false
}
}
}
fn has_side_effects(&self) -> bool {
true
}
}
#[cfg(test)]
/// No tests here, because we need to call out to an external executable. See
/// tests/exec_unit_tests.rs instead.
mod tests {}
|
random_line_split
|
|
v10Data.rs
|
pub fn v10Data() -> [f64; 81]
|
v10List[16] = -0.0655694;
v10List[17] = -0.0556668;
v10List[18] = 0.0269233;
v10List[19] = 0.0368759;
v10List[20] = 0.0886982;
v10List[21] = 0.0965984;
v10List[22] = 0.0990651;
v10List[23] = 0.0818134;
v10List[24] = 0.0693681;
v10List[25] = 0.00481364;
v10List[26] = 0.00611984;
v10List[27] = 0.0390983;
v10List[28] = -0.011949;
v10List[29] = -0.114423;
v10List[30] = -0.0860141;
v10List[31] = -0.237726;
v10List[32] = -0.0579294;
v10List[33] = -0.0644042;
v10List[34] = -0.0387676;
v10List[35] = 0.0333347;
v10List[36] = 0.196683;
v10List[37] = 0.0181403;
v10List[38] = 0.228297;
v10List[39] = 0.00502533;
v10List[40] = 0.0192794;
v10List[41] = 0.121928;
v10List[42] = -0.214032;
v10List[43] = 0.0421587;
v10List[44] = -0.0623173;
v10List[45] = -0.0749197;
v10List[46] = 0.0459746;
v10List[47] = 0.109541;
v10List[48] = -0.0882381;
v10List[49] = 0.0103898;
v10List[50] = 0.0575668;
v10List[51] = -0.0759044;
v10List[52] = -0.160495;
v10List[53] = 0.0517623;
v10List[54] = -0.188036;
v10List[55] = 0.115881;
v10List[56] = -0.146265;
v10List[57] = -0.225199;
v10List[58] = 0.0933194;
v10List[59] = -0.0926862;
v10List[60] = 0.124726;
v10List[61] = 0.0724166;
v10List[62] = 0.0777776;
v10List[63] = 0.206614;
v10List[64] = 0.102666;
v10List[65] = 0.201758;
v10List[66] = -0.0794907;
v10List[67] = 0.0997935;
v10List[68] = 0.0766996;
v10List[69] = -0.133814;
v10List[70] = 0.0212788;
v10List[71] = -0.0730455;
v10List[72] = -0.118137;
v10List[73] = -0.194071;
v10List[74] = 0.00833846;
v10List[75] = 0.0109041;
v10List[76] = -0.133108;
v10List[77] = 0.137109;
v10List[78] = 0.0202285;
v10List[79] = 0.00710011;
v10List[80] = 0.0928652;
v10List
}
|
{
let mut v10List: [f64; 81] = [0.0; 81];
// index is wavelengths of light, stepped by 5, from 380 to 780
v10List[0] = -0.296898;
v10List[1] = -0.230842;
v10List[2] = -0.160906;
v10List[3] = -0.0696234;
v10List[4] = 0.0198897;
v10List[5] = 0.0648208;
v10List[6] = 0.0979055;
v10List[7] = 0.0837608;
v10List[8] = 0.0706796;
v10List[9] = 0.0414158;
v10List[10] = -8.60E-17;
v10List[11] = -0.0370519;
v10List[12] = -0.0931451;
v10List[13] = -0.106081;
v10List[14] = -0.0983992;
v10List[15] = -0.0826705;
|
identifier_body
|
v10Data.rs
|
pub fn
|
() -> [f64; 81] {
let mut v10List: [f64; 81] = [0.0; 81];
// index is wavelengths of light, stepped by 5, from 380 to 780
v10List[0] = -0.296898;
v10List[1] = -0.230842;
v10List[2] = -0.160906;
v10List[3] = -0.0696234;
v10List[4] = 0.0198897;
v10List[5] = 0.0648208;
v10List[6] = 0.0979055;
v10List[7] = 0.0837608;
v10List[8] = 0.0706796;
v10List[9] = 0.0414158;
v10List[10] = -8.60E-17;
v10List[11] = -0.0370519;
v10List[12] = -0.0931451;
v10List[13] = -0.106081;
v10List[14] = -0.0983992;
v10List[15] = -0.0826705;
v10List[16] = -0.0655694;
v10List[17] = -0.0556668;
v10List[18] = 0.0269233;
v10List[19] = 0.0368759;
v10List[20] = 0.0886982;
v10List[21] = 0.0965984;
v10List[22] = 0.0990651;
v10List[23] = 0.0818134;
v10List[24] = 0.0693681;
v10List[25] = 0.00481364;
v10List[26] = 0.00611984;
v10List[27] = 0.0390983;
v10List[28] = -0.011949;
v10List[29] = -0.114423;
v10List[30] = -0.0860141;
v10List[31] = -0.237726;
v10List[32] = -0.0579294;
v10List[33] = -0.0644042;
v10List[34] = -0.0387676;
v10List[35] = 0.0333347;
v10List[36] = 0.196683;
v10List[37] = 0.0181403;
v10List[38] = 0.228297;
v10List[39] = 0.00502533;
v10List[40] = 0.0192794;
v10List[41] = 0.121928;
v10List[42] = -0.214032;
v10List[43] = 0.0421587;
v10List[44] = -0.0623173;
v10List[45] = -0.0749197;
v10List[46] = 0.0459746;
v10List[47] = 0.109541;
v10List[48] = -0.0882381;
v10List[49] = 0.0103898;
v10List[50] = 0.0575668;
v10List[51] = -0.0759044;
v10List[52] = -0.160495;
v10List[53] = 0.0517623;
v10List[54] = -0.188036;
v10List[55] = 0.115881;
v10List[56] = -0.146265;
v10List[57] = -0.225199;
v10List[58] = 0.0933194;
v10List[59] = -0.0926862;
v10List[60] = 0.124726;
v10List[61] = 0.0724166;
v10List[62] = 0.0777776;
v10List[63] = 0.206614;
v10List[64] = 0.102666;
v10List[65] = 0.201758;
v10List[66] = -0.0794907;
v10List[67] = 0.0997935;
v10List[68] = 0.0766996;
v10List[69] = -0.133814;
v10List[70] = 0.0212788;
v10List[71] = -0.0730455;
v10List[72] = -0.118137;
v10List[73] = -0.194071;
v10List[74] = 0.00833846;
v10List[75] = 0.0109041;
v10List[76] = -0.133108;
v10List[77] = 0.137109;
v10List[78] = 0.0202285;
v10List[79] = 0.00710011;
v10List[80] = 0.0928652;
v10List
}
|
v10Data
|
identifier_name
|
v10Data.rs
|
pub fn v10Data() -> [f64; 81] {
let mut v10List: [f64; 81] = [0.0; 81];
// index is wavelengths of light, stepped by 5, from 380 to 780
v10List[0] = -0.296898;
v10List[1] = -0.230842;
v10List[2] = -0.160906;
v10List[3] = -0.0696234;
v10List[4] = 0.0198897;
v10List[5] = 0.0648208;
v10List[6] = 0.0979055;
v10List[7] = 0.0837608;
v10List[8] = 0.0706796;
v10List[9] = 0.0414158;
v10List[10] = -8.60E-17;
v10List[11] = -0.0370519;
v10List[12] = -0.0931451;
v10List[13] = -0.106081;
v10List[14] = -0.0983992;
v10List[15] = -0.0826705;
v10List[16] = -0.0655694;
v10List[17] = -0.0556668;
v10List[18] = 0.0269233;
v10List[19] = 0.0368759;
v10List[20] = 0.0886982;
v10List[21] = 0.0965984;
|
v10List[24] = 0.0693681;
v10List[25] = 0.00481364;
v10List[26] = 0.00611984;
v10List[27] = 0.0390983;
v10List[28] = -0.011949;
v10List[29] = -0.114423;
v10List[30] = -0.0860141;
v10List[31] = -0.237726;
v10List[32] = -0.0579294;
v10List[33] = -0.0644042;
v10List[34] = -0.0387676;
v10List[35] = 0.0333347;
v10List[36] = 0.196683;
v10List[37] = 0.0181403;
v10List[38] = 0.228297;
v10List[39] = 0.00502533;
v10List[40] = 0.0192794;
v10List[41] = 0.121928;
v10List[42] = -0.214032;
v10List[43] = 0.0421587;
v10List[44] = -0.0623173;
v10List[45] = -0.0749197;
v10List[46] = 0.0459746;
v10List[47] = 0.109541;
v10List[48] = -0.0882381;
v10List[49] = 0.0103898;
v10List[50] = 0.0575668;
v10List[51] = -0.0759044;
v10List[52] = -0.160495;
v10List[53] = 0.0517623;
v10List[54] = -0.188036;
v10List[55] = 0.115881;
v10List[56] = -0.146265;
v10List[57] = -0.225199;
v10List[58] = 0.0933194;
v10List[59] = -0.0926862;
v10List[60] = 0.124726;
v10List[61] = 0.0724166;
v10List[62] = 0.0777776;
v10List[63] = 0.206614;
v10List[64] = 0.102666;
v10List[65] = 0.201758;
v10List[66] = -0.0794907;
v10List[67] = 0.0997935;
v10List[68] = 0.0766996;
v10List[69] = -0.133814;
v10List[70] = 0.0212788;
v10List[71] = -0.0730455;
v10List[72] = -0.118137;
v10List[73] = -0.194071;
v10List[74] = 0.00833846;
v10List[75] = 0.0109041;
v10List[76] = -0.133108;
v10List[77] = 0.137109;
v10List[78] = 0.0202285;
v10List[79] = 0.00710011;
v10List[80] = 0.0928652;
v10List
}
|
v10List[22] = 0.0990651;
v10List[23] = 0.0818134;
|
random_line_split
|
evaluation.rs
|
use std::cmp;
use rayon;
use {Expression, Length};
use reduce::{Reduce, ReduceScalar};
const MIN_THRESHOLD: usize = 4096;
const MAX_COUNT: usize = 32;
fn get_len(l1: Length, l2: Length) -> usize {
match cmp::min(l1, l2) {
Length::Finite(x) => x,
Length::Infinite => panic!("trying to reduce an infinite expression into an infinite reducer")
}
}
pub fn
|
<E, R>(e: E, reduce: R) -> R::Output
where E: Expression, R: Reduce<E::Element>
{
let len = get_len(e.length(), reduce.expected_length());
eval_inner(e, cmp::max(len / MAX_COUNT, MIN_THRESHOLD), reduce)
}
fn eval_inner<E, R>(e: E, threshold: usize, reduce: R) -> R::Output
where E: Expression, R: Reduce<E::Element>
{
let len = get_len(e.length(), reduce.expected_length());
assert!(e.length().compatible(reduce.expected_length()));
if len > threshold {
let (low, high, scalar) = reduce.split();
let (e_low, e_high) = e.split(false);
let (a, b) = rayon::join(|| eval_inner(e_low, threshold, low),
|| eval_inner(e_high, threshold, high));
scalar.combine(a, b)
} else {
reduce.reduce(e)
}
}
|
evaluate
|
identifier_name
|
evaluation.rs
|
use std::cmp;
use rayon;
use {Expression, Length};
use reduce::{Reduce, ReduceScalar};
const MIN_THRESHOLD: usize = 4096;
const MAX_COUNT: usize = 32;
fn get_len(l1: Length, l2: Length) -> usize {
match cmp::min(l1, l2) {
Length::Finite(x) => x,
Length::Infinite => panic!("trying to reduce an infinite expression into an infinite reducer")
}
}
pub fn evaluate<E, R>(e: E, reduce: R) -> R::Output
where E: Expression, R: Reduce<E::Element>
{
let len = get_len(e.length(), reduce.expected_length());
|
{
let len = get_len(e.length(), reduce.expected_length());
assert!(e.length().compatible(reduce.expected_length()));
if len > threshold {
let (low, high, scalar) = reduce.split();
let (e_low, e_high) = e.split(false);
let (a, b) = rayon::join(|| eval_inner(e_low, threshold, low),
|| eval_inner(e_high, threshold, high));
scalar.combine(a, b)
} else {
reduce.reduce(e)
}
}
|
eval_inner(e, cmp::max(len / MAX_COUNT, MIN_THRESHOLD), reduce)
}
fn eval_inner<E, R>(e: E, threshold: usize, reduce: R) -> R::Output
where E: Expression, R: Reduce<E::Element>
|
random_line_split
|
evaluation.rs
|
use std::cmp;
use rayon;
use {Expression, Length};
use reduce::{Reduce, ReduceScalar};
const MIN_THRESHOLD: usize = 4096;
const MAX_COUNT: usize = 32;
fn get_len(l1: Length, l2: Length) -> usize
|
pub fn evaluate<E, R>(e: E, reduce: R) -> R::Output
where E: Expression, R: Reduce<E::Element>
{
let len = get_len(e.length(), reduce.expected_length());
eval_inner(e, cmp::max(len / MAX_COUNT, MIN_THRESHOLD), reduce)
}
fn eval_inner<E, R>(e: E, threshold: usize, reduce: R) -> R::Output
where E: Expression, R: Reduce<E::Element>
{
let len = get_len(e.length(), reduce.expected_length());
assert!(e.length().compatible(reduce.expected_length()));
if len > threshold {
let (low, high, scalar) = reduce.split();
let (e_low, e_high) = e.split(false);
let (a, b) = rayon::join(|| eval_inner(e_low, threshold, low),
|| eval_inner(e_high, threshold, high));
scalar.combine(a, b)
} else {
reduce.reduce(e)
}
}
|
{
match cmp::min(l1, l2) {
Length::Finite(x) => x,
Length::Infinite => panic!("trying to reduce an infinite expression into an infinite reducer")
}
}
|
identifier_body
|
noparse.rs
|
macro_rules! noparse(
($name:ident, $re:expr) => (
#[test]
fn $name() {
let re = $re;
match regex_new!(re) {
Err(_) => {},
Ok(_) => panic!("Regex '{}' should cause a parse error.", re),
}
}
);
);
noparse!(fail_double_repeat, "a**");
noparse!(fail_no_repeat_arg, "*");
|
noparse!(fail_class_incomplete, "[A-");
noparse!(fail_class_not_closed, "[A");
noparse!(fail_class_no_begin, r"[\A]");
noparse!(fail_class_no_end, r"[\z]");
noparse!(fail_class_no_boundary, r"[\b]");
noparse!(fail_open_paren, "(");
noparse!(fail_close_paren, ")");
noparse!(fail_invalid_range, "[a-Z]");
noparse!(fail_empty_capture_name, "(?P<>a)");
noparse!(fail_empty_capture_exp, "(?P<name>)");
noparse!(fail_bad_capture_name, "(?P<na-me>)");
noparse!(fail_bad_flag, "(?a)a");
noparse!(fail_empty_alt_before, "|a");
noparse!(fail_empty_alt_after, "a|");
noparse!(fail_too_big, "a{10000000}");
noparse!(fail_counted_no_close, "a{1001");
noparse!(fail_unfinished_cap, "(?");
noparse!(fail_unfinished_escape, "\\");
noparse!(fail_octal_digit, r"\8");
noparse!(fail_hex_digit, r"\xG0");
noparse!(fail_hex_short, r"\xF");
noparse!(fail_hex_long_digits, r"\x{fffg}");
noparse!(fail_flag_bad, "(?a)");
noparse!(fail_flag_empty, "(?)");
noparse!(fail_double_neg, "(?-i-i)");
noparse!(fail_neg_empty, "(?i-)");
noparse!(fail_empty_group, "()");
noparse!(fail_dupe_named, "(?P<a>.)(?P<a>.)");
noparse!(fail_range_end_no_class, "[a-[:lower:]]");
noparse!(fail_range_end_no_begin, r"[a-\A]");
noparse!(fail_range_end_no_end, r"[a-\z]");
noparse!(fail_range_end_no_boundary, r"[a-\b]");
|
noparse!(fail_incomplete_escape, "\\");
|
random_line_split
|
mod.rs
|
//! Job scheduling.
use Result;
use system::Job;
mod impartial;
mod queue;
pub use self::impartial::Impartial;
pub use self::queue::{Interval, Queue};
/// A scheduling policy.
pub trait Schedule {
/// The data consumed by the policy.
type Data;
|
fn push(&mut self, f64, Self::Data) -> Result<()>;
}
/// A scheduling decision.
#[derive(Clone, Debug)]
pub enum Decision {
Accept {
/// The start of the execution interval.
start: f64,
/// The end of the execution interval.
finish: f64,
/// The mapping of the job to the platform.
mapping: Mapping,
},
Reject,
}
/// A mapping of a job’s processing elements to the platform‘s procesing
/// elements.
pub type Mapping = Vec<(usize, usize)>;
/// A placeholder signifying that no data are needed.
#[derive(Clone, Copy)]
pub struct NoData;
impl Decision {
/// Create an accept decision.
#[inline]
pub fn accept(start: f64, finish: f64, mapping: Mapping) -> Decision {
Decision::Accept { start: start, finish: finish, mapping: mapping }
}
/// Create a reject decision.
#[inline]
pub fn reject() -> Decision {
Decision::Reject
}
}
impl<'l, T> From<&'l T> for NoData {
#[inline]
fn from(_: &'l T) -> NoData {
NoData
}
}
|
/// Take a decision with respect to a job.
fn next(&mut self, &Job) -> Result<Decision>;
/// Advance time and consume the data accumulated since the previous call.
|
random_line_split
|
mod.rs
|
//! Job scheduling.
use Result;
use system::Job;
mod impartial;
mod queue;
pub use self::impartial::Impartial;
pub use self::queue::{Interval, Queue};
/// A scheduling policy.
pub trait Schedule {
/// The data consumed by the policy.
type Data;
/// Take a decision with respect to a job.
fn next(&mut self, &Job) -> Result<Decision>;
/// Advance time and consume the data accumulated since the previous call.
fn push(&mut self, f64, Self::Data) -> Result<()>;
}
/// A scheduling decision.
#[derive(Clone, Debug)]
pub enum
|
{
Accept {
/// The start of the execution interval.
start: f64,
/// The end of the execution interval.
finish: f64,
/// The mapping of the job to the platform.
mapping: Mapping,
},
Reject,
}
/// A mapping of a job’s processing elements to the platform‘s procesing
/// elements.
pub type Mapping = Vec<(usize, usize)>;
/// A placeholder signifying that no data are needed.
#[derive(Clone, Copy)]
pub struct NoData;
impl Decision {
/// Create an accept decision.
#[inline]
pub fn accept(start: f64, finish: f64, mapping: Mapping) -> Decision {
Decision::Accept { start: start, finish: finish, mapping: mapping }
}
/// Create a reject decision.
#[inline]
pub fn reject() -> Decision {
Decision::Reject
}
}
impl<'l, T> From<&'l T> for NoData {
#[inline]
fn from(_: &'l T) -> NoData {
NoData
}
}
|
Decision
|
identifier_name
|
mod.rs
|
//! Job scheduling.
use Result;
use system::Job;
mod impartial;
mod queue;
pub use self::impartial::Impartial;
pub use self::queue::{Interval, Queue};
/// A scheduling policy.
pub trait Schedule {
/// The data consumed by the policy.
type Data;
/// Take a decision with respect to a job.
fn next(&mut self, &Job) -> Result<Decision>;
/// Advance time and consume the data accumulated since the previous call.
fn push(&mut self, f64, Self::Data) -> Result<()>;
}
/// A scheduling decision.
#[derive(Clone, Debug)]
pub enum Decision {
Accept {
/// The start of the execution interval.
start: f64,
/// The end of the execution interval.
finish: f64,
/// The mapping of the job to the platform.
mapping: Mapping,
},
Reject,
}
/// A mapping of a job’s processing elements to the platform‘s procesing
/// elements.
pub type Mapping = Vec<(usize, usize)>;
/// A placeholder signifying that no data are needed.
#[derive(Clone, Copy)]
pub struct NoData;
impl Decision {
/// Create an accept decision.
#[inline]
pub fn accept(start: f64, finish: f64, mapping: Mapping) -> Decision {
Decision::Accept { start: start, finish: finish, mapping: mapping }
}
/// Create a reject decision.
#[inline]
pub fn reject() -> Decision {
Decision::Reject
}
}
impl<'l, T> From<&'l T> for NoData {
#[inline]
fn from(_: &'l T) -> NoData {
|
NoData
}
}
|
identifier_body
|
|
identity.rs
|
use std::ops::Mul;
use num::One;
use structs::mat;
use traits::operations::{Inv, Transpose};
use traits::geometry::{Translate, Rotate, Transform, AbsoluteRotate};
impl One for mat::Identity {
#[inline]
fn one() -> mat::Identity {
mat::Identity::new()
}
}
impl Inv for mat::Identity {
fn inv(&self) -> Option<mat::Identity> {
Some(mat::Identity::new())
}
fn inv_mut(&mut self) -> bool {
true
}
}
impl<T: Clone> Mul<T> for mat::Identity {
type Output = T;
#[inline]
fn mul(self, other: T) -> T {
other
}
}
impl Transpose for mat::Identity {
#[inline]
fn transpose(&self) -> mat::Identity {
mat::Identity::new()
}
#[inline]
fn transpose_mut(&mut self) {
}
}
|
v.clone()
}
#[inline]
fn inv_translate(&self, v: &V) -> V {
v.clone()
}
}
impl<V: Clone> Rotate<V> for mat::Identity {
#[inline]
fn rotate(&self, v: &V) -> V {
v.clone()
}
#[inline]
fn inv_rotate(&self, v: &V) -> V {
v.clone()
}
}
impl<V: Clone> AbsoluteRotate<V> for mat::Identity {
#[inline]
fn absolute_rotate(&self, v: &V) -> V {
v.clone()
}
}
impl<V: Clone> Transform<V> for mat::Identity {
#[inline]
fn transform(&self, v: &V) -> V {
v.clone()
}
#[inline]
fn inv_transform(&self, v: &V) -> V {
v.clone()
}
}
|
impl<V: Clone> Translate<V> for mat::Identity {
#[inline]
fn translate(&self, v: &V) -> V {
|
random_line_split
|
identity.rs
|
use std::ops::Mul;
use num::One;
use structs::mat;
use traits::operations::{Inv, Transpose};
use traits::geometry::{Translate, Rotate, Transform, AbsoluteRotate};
impl One for mat::Identity {
#[inline]
fn one() -> mat::Identity {
mat::Identity::new()
}
}
impl Inv for mat::Identity {
fn inv(&self) -> Option<mat::Identity> {
Some(mat::Identity::new())
}
fn inv_mut(&mut self) -> bool {
true
}
}
impl<T: Clone> Mul<T> for mat::Identity {
type Output = T;
#[inline]
fn mul(self, other: T) -> T
|
}
impl Transpose for mat::Identity {
#[inline]
fn transpose(&self) -> mat::Identity {
mat::Identity::new()
}
#[inline]
fn transpose_mut(&mut self) {
}
}
impl<V: Clone> Translate<V> for mat::Identity {
#[inline]
fn translate(&self, v: &V) -> V {
v.clone()
}
#[inline]
fn inv_translate(&self, v: &V) -> V {
v.clone()
}
}
impl<V: Clone> Rotate<V> for mat::Identity {
#[inline]
fn rotate(&self, v: &V) -> V {
v.clone()
}
#[inline]
fn inv_rotate(&self, v: &V) -> V {
v.clone()
}
}
impl<V: Clone> AbsoluteRotate<V> for mat::Identity {
#[inline]
fn absolute_rotate(&self, v: &V) -> V {
v.clone()
}
}
impl<V: Clone> Transform<V> for mat::Identity {
#[inline]
fn transform(&self, v: &V) -> V {
v.clone()
}
#[inline]
fn inv_transform(&self, v: &V) -> V {
v.clone()
}
}
|
{
other
}
|
identifier_body
|
identity.rs
|
use std::ops::Mul;
use num::One;
use structs::mat;
use traits::operations::{Inv, Transpose};
use traits::geometry::{Translate, Rotate, Transform, AbsoluteRotate};
impl One for mat::Identity {
#[inline]
fn one() -> mat::Identity {
mat::Identity::new()
}
}
impl Inv for mat::Identity {
fn inv(&self) -> Option<mat::Identity> {
Some(mat::Identity::new())
}
fn inv_mut(&mut self) -> bool {
true
}
}
impl<T: Clone> Mul<T> for mat::Identity {
type Output = T;
#[inline]
fn mul(self, other: T) -> T {
other
}
}
impl Transpose for mat::Identity {
#[inline]
fn transpose(&self) -> mat::Identity {
mat::Identity::new()
}
#[inline]
fn transpose_mut(&mut self) {
}
}
impl<V: Clone> Translate<V> for mat::Identity {
#[inline]
fn translate(&self, v: &V) -> V {
v.clone()
}
#[inline]
fn inv_translate(&self, v: &V) -> V {
v.clone()
}
}
impl<V: Clone> Rotate<V> for mat::Identity {
#[inline]
fn rotate(&self, v: &V) -> V {
v.clone()
}
#[inline]
fn
|
(&self, v: &V) -> V {
v.clone()
}
}
impl<V: Clone> AbsoluteRotate<V> for mat::Identity {
#[inline]
fn absolute_rotate(&self, v: &V) -> V {
v.clone()
}
}
impl<V: Clone> Transform<V> for mat::Identity {
#[inline]
fn transform(&self, v: &V) -> V {
v.clone()
}
#[inline]
fn inv_transform(&self, v: &V) -> V {
v.clone()
}
}
|
inv_rotate
|
identifier_name
|
lib.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![feature(box_syntax)]
#![feature(collections)]
#![feature(core)]
#[macro_use]
extern crate log;
extern crate azure;
extern crate devtools_traits;
extern crate geom;
extern crate gfx;
extern crate layers;
extern crate layout_traits;
extern crate png;
extern crate script_traits;
extern crate msg;
extern crate net;
extern crate num;
extern crate profile_traits;
extern crate net_traits;
extern crate gfx_traits;
extern crate style;
#[macro_use]
extern crate util;
extern crate gleam;
extern crate clipboard;
extern crate libc;
extern crate time;
extern crate url;
#[cfg(target_os="macos")]
extern crate core_graphics;
#[cfg(target_os="macos")]
extern crate core_text;
|
pub use compositor_task::{CompositorEventListener, CompositorProxy, CompositorTask};
pub use constellation::Constellation;
pub mod compositor_task;
mod compositor_layer;
mod compositor;
mod headless;
mod scrolling;
pub mod pipeline;
pub mod constellation;
pub mod windowing;
|
random_line_split
|
|
error.rs
|
use std::error::Error as StdError;
use std::fmt::{self, Debug, Display, Formatter};
use std::io;
use hyper::Error as HyperError;
use rustc_serialize::json::DecoderError;
use telegram_bot;
pub enum Error {
General(String),
}
impl From<HyperError> for Error {
fn from(e: HyperError) -> Error {
Error::General(e.description().to_owned())
}
}
impl From<DecoderError> for Error {
fn from(e: DecoderError) -> Error {
Error::General(e.description().to_owned())
}
}
impl From<io::Error> for Error {
fn from(e: io::Error) -> Error {
Error::General(e.description().to_owned())
}
}
impl From<telegram_bot::Error> for Error {
fn from(e: telegram_bot::Error) -> Error {
Error::General(e.description().to_owned())
}
}
impl Debug for Error {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
match *self {
Error::General(ref msg) => write!(f, "error: {}", msg).unwrap(),
};
Ok(())
}
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
match *self {
Error::General(ref msg) => write!(f, "error: {}", msg).unwrap(),
};
|
impl StdError for Error {
fn description(&self) -> &str {
match *self {
Error::General(ref msg) => msg,
}
}
}
|
Ok(())
}
}
|
random_line_split
|
error.rs
|
use std::error::Error as StdError;
use std::fmt::{self, Debug, Display, Formatter};
use std::io;
use hyper::Error as HyperError;
use rustc_serialize::json::DecoderError;
use telegram_bot;
pub enum
|
{
General(String),
}
impl From<HyperError> for Error {
fn from(e: HyperError) -> Error {
Error::General(e.description().to_owned())
}
}
impl From<DecoderError> for Error {
fn from(e: DecoderError) -> Error {
Error::General(e.description().to_owned())
}
}
impl From<io::Error> for Error {
fn from(e: io::Error) -> Error {
Error::General(e.description().to_owned())
}
}
impl From<telegram_bot::Error> for Error {
fn from(e: telegram_bot::Error) -> Error {
Error::General(e.description().to_owned())
}
}
impl Debug for Error {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
match *self {
Error::General(ref msg) => write!(f, "error: {}", msg).unwrap(),
};
Ok(())
}
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
match *self {
Error::General(ref msg) => write!(f, "error: {}", msg).unwrap(),
};
Ok(())
}
}
impl StdError for Error {
fn description(&self) -> &str {
match *self {
Error::General(ref msg) => msg,
}
}
}
|
Error
|
identifier_name
|
bcm.rs
|
use libc::{
c_int, c_short, c_uint, c_void, close, connect, fcntl, read, sockaddr, socket, timeval, write,
F_SETFL, O_NONBLOCK,
};
use futures;
// use mio::{Evented, PollOpt, Ready, Token};
use nix::net::if_::if_nametoindex;
use std::collections::VecDeque;
use std::fmt;
use std::io::{Error, ErrorKind};
use std::mem::size_of;
use std::{io, slice, time};
use romio::PollEvented;
use std::pin::Pin;
use futures::stream::Stream;
use futures::task::LocalWaker;
use futures::{ready, Poll};
use mio;
use socketcan::{
c_timeval_new, CanMessageId, CanAddr, CanFrame, CanSocketOpenError, FrameFlags, AF_CAN, CAN_BCM, PF_CAN,
SOCK_DGRAM,
};
pub const MAX_NFRAMES: u32 = 256;
/// OpCodes
///
/// create (cyclic) transmission task
pub const TX_SETUP: u32 = 1;
/// remove (cyclic) transmission task
pub const TX_DELETE: u32 = 2;
/// read properties of (cyclic) transmission task
pub const TX_READ: u32 = 3;
/// send one CAN frame
pub const TX_SEND: u32 = 4;
/// create RX content filter subscription
pub const RX_SETUP: u32 = 5;
/// remove RX content filter subscription
pub const RX_DELETE: u32 = 6;
/// read properties of RX content filter subscription
pub const RX_READ: u32 = 7;
/// reply to TX_READ request
pub const TX_STATUS: u32 = 8;
/// notification on performed transmissions (count=0)
pub const TX_EXPIRED: u32 = 9;
/// reply to RX_READ request
pub const RX_STATUS: u32 = 10;
/// cyclic message is absent
pub const RX_TIMEOUT: u32 = 11;
/// sent if the first or a revised CAN message was received
pub const RX_CHANGED: u32 = 12;
/// Flags
///
/// set the value of ival1, ival2 and count
pub const SETTIMER: u32 = 0x0001;
/// start the timer with the actual value of ival1, ival2 and count.
/// Starting the timer leads simultaneously to emit a can_frame.
pub const STARTTIMER: u32 = 0x0002;
/// create the message TX_EXPIRED when count expires
pub const TX_COUNTEVT: u32 = 0x0004;
/// A change of data by the process is emitted immediatly.
/// (Requirement of 'Changing Now' - BAES)
pub const TX_ANNOUNCE: u32 = 0x0008;
/// Copies the can_id from the message header to each subsequent frame
/// in frames. This is intended only as usage simplification.
pub const TX_CP_CAN_ID: u32 = 0x0010;
/// Filter by can_id alone, no frames required (nframes=0)
pub const RX_FILTER_ID: u32 = 0x0020;
/// A change of the DLC leads to an RX_CHANGED.
pub const RX_CHECK_DLC: u32 = 0x0040;
/// If the timer ival1 in the RX_SETUP has been set equal to zero, on receipt
/// of the CAN message the timer for the timeout monitoring is automatically
/// started. Setting this flag prevents the automatic start timer.
pub const RX_NO_AUTOTIMER: u32 = 0x0080;
/// refers also to the time-out supervision of the management RX_SETUP.
/// By setting this flag, when an RX-outs occours, a RX_CHANGED will be
/// generated when the (cyclic) receive restarts. This will happen even if the
/// user data have not changed.
pub const RX_ANNOUNCE_RESUM: u32 = 0x0100;
/// forces a reset of the index counter from the update to be sent by multiplex
/// message even if it would not be necessary because of the length.
pub const TX_RESET_MULTI_ID: u32 = 0x0200;
/// the filter passed is used as CAN message to be sent when receiving an RTR frame.
pub const RX_RTR_FRAME: u32 = 0x0400;
pub const CAN_FD_FRAME: u32 = 0x0800;
/// BcmMsgHead
///
/// Head of messages to and from the broadcast manager
#[repr(C)]
pub struct BcmMsgHead {
_opcode: u32,
_flags: u32,
/// number of frames to send before changing interval
_count: u32,
/// interval for the first count frames
_ival1: timeval,
/// interval for the following frames
_ival2: timeval,
_can_id: u32,
/// number of can frames appended to the message head
_nframes: u32,
// TODO figure out how why C adds a padding here?
#[cfg(all(target_pointer_width = "32"))]
_pad: u32,
// TODO figure out how to allocate only nframes instead of MAX_NFRAMES
/// buffer of CAN frames
_frames: [CanFrame; MAX_NFRAMES as usize],
}
impl fmt::Debug for BcmMsgHead {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "BcmMsgHead {{ _opcode: {}, _flags: {}, _count: {}, _ival1: {:?}, _ival2: {:?}, _can_id: {}, _nframes: {}}}", self._opcode, self._flags, self._count, self._ival1.tv_sec, self._ival2.tv_sec, self._can_id, self._nframes)
}
}
/// BcmMsgHeadFrameLess
///
/// Head of messages to and from the broadcast manager see _pad fields for differences
/// to BcmMsgHead
#[repr(C)]
pub struct BcmMsgHeadFrameLess {
_opcode: u32,
_flags: u32,
/// number of frames to send before changing interval
_count: u32,
/// interval for the first count frames
_ival1: timeval,
/// interval for the following frames
_ival2: timeval,
_can_id: u32,
/// number of can frames appended to the message head
_nframes: u32,
// Workaround Rust ZST has a size of 0 for frames, in
// C the BcmMsgHead struct contains an Array that although it has
// a length of zero still takes n (4) bytes.
#[cfg(all(target_pointer_width = "32"))]
_pad: usize,
}
#[repr(C)]
pub struct TxMsg {
_msg_head: BcmMsgHeadFrameLess,
_frames: [CanFrame; MAX_NFRAMES as usize],
}
impl BcmMsgHead {
pub fn can_id(&self) -> u32 {
self._can_id
}
#[inline]
pub fn frames(&self) -> &[CanFrame] {
return unsafe { slice::from_raw_parts(self._frames.as_ptr(), self._nframes as usize) };
}
}
/// A socket for a CAN device, specifically for broadcast manager operations.
#[derive(Debug)]
pub struct CanBCMSocket {
pub fd: c_int,
}
pub struct BcmFrameStream {
io: PollEvented<CanBCMSocket>,
frame_buffer: VecDeque<CanFrame>,
}
impl BcmFrameStream {
pub fn new(socket: CanBCMSocket) -> BcmFrameStream {
BcmFrameStream {
io: PollEvented::new(socket),
frame_buffer: VecDeque::new(),
}
}
}
impl Stream for BcmFrameStream {
type Item = io::Result<CanFrame>;
fn poll_next(mut self: Pin<&mut Self>, lw: &LocalWaker) -> Poll<Option<Self::Item>> {
// Buffer still contains frames
// after testing this it looks like the recv_msg will never contain
// more than one msg, therefore the buffer is basically never filled
if let Some(frame) = self.frame_buffer.pop_front() {
return Poll::Ready(Some(Ok(frame)));
}
ready!(self.io.poll_read_ready(lw)?);
match self.io.get_ref().read_msg() {
Ok(n) => {
let mut frames = n.frames().to_vec();
if let Some(frame) = frames.pop() {
if!frames.is_empty()
|
Poll::Ready(Some(Ok(frame)))
} else {
// This happens e.g. when a timed out msg is received
self.io.clear_read_ready(lw)?;
Poll::Pending
}
}
Err(e) => {
if e.kind() == io::ErrorKind::WouldBlock {
self.io.clear_read_ready(lw)?;
return Poll::Pending;
}
return Poll::Ready(Some(Err(e)));
}
}
}
}
impl mio::Evented for BcmFrameStream {
fn register(
&self,
poll: &mio::Poll,
token: mio::Token,
interest: mio::Ready,
opts: mio::PollOpt,
) -> io::Result<()> {
self.io.get_ref().register(poll, token, interest, opts)
}
fn reregister(
&self,
poll: &mio::Poll,
token: mio::Token,
interest: mio::Ready,
opts: mio::PollOpt,
) -> io::Result<()> {
self.io.get_ref().reregister(poll, token, interest, opts)
}
fn deregister(&self, poll: &mio::Poll) -> io::Result<()> {
self.io.get_ref().deregister(poll)
}
}
impl CanBCMSocket {
/// Open a named CAN device non blocking.
///
/// Usually the more common case, opens a socket can device by name, such
/// as "vcan0" or "socan0".
pub fn open_nb(ifname: &str) -> Result<CanBCMSocket, CanSocketOpenError> {
let if_index = if_nametoindex(ifname)?;
CanBCMSocket::open_if_nb(if_index)
}
/// Open CAN device by interface number non blocking.
///
/// Opens a CAN device by kernel interface number.
pub fn open_if_nb(if_index: c_uint) -> Result<CanBCMSocket, CanSocketOpenError> {
// open socket
let sock_fd;
unsafe {
sock_fd = socket(PF_CAN, SOCK_DGRAM, CAN_BCM);
}
if sock_fd == -1 {
return Err(CanSocketOpenError::from(io::Error::last_os_error()));
}
let fcntl_resp = unsafe { fcntl(sock_fd, F_SETFL, O_NONBLOCK) };
if fcntl_resp == -1 {
return Err(CanSocketOpenError::from(io::Error::last_os_error()));
}
let addr = CanAddr {
_af_can: AF_CAN as c_short,
if_index: if_index as c_int,
rx_id: 0, //?
tx_id: 0, //?
};
let sockaddr_ptr = &addr as *const CanAddr;
let connect_res;
unsafe {
connect_res = connect(
sock_fd,
sockaddr_ptr as *const sockaddr,
size_of::<CanAddr>() as u32,
);
}
if connect_res!= 0 {
return Err(CanSocketOpenError::from(io::Error::last_os_error()));
}
Ok(CanBCMSocket { fd: sock_fd })
}
fn close(&mut self) -> io::Result<()> {
unsafe {
let rv = close(self.fd);
if rv!= -1 {
return Err(io::Error::last_os_error());
}
}
Ok(())
}
/// Create a content filter subscription, filtering can frames by can_id.
pub fn filter_id(
&self,
can_id: CanMessageId,
ival1: time::Duration,
ival2: time::Duration,
) -> io::Result<()> {
let _ival1 = c_timeval_new(ival1);
let _ival2 = c_timeval_new(ival2);
let frames = [CanFrame::new(CanMessageId::SFF(0u16), &[], false, false).unwrap(); MAX_NFRAMES as usize];
let msg = BcmMsgHeadFrameLess {
_opcode: RX_SETUP,
_flags: SETTIMER | RX_FILTER_ID,
_count: 0,
#[cfg(all(target_pointer_width = "32"))]
_pad: 0,
_ival1: _ival1,
_ival2: _ival2,
_can_id: can_id.with_eff_bit(),
_nframes: 0,
};
let tx_msg = &TxMsg {
_msg_head: msg,
_frames: frames,
};
let tx_msg_ptr = tx_msg as *const TxMsg;
let write_rv = unsafe {
write(self.fd, tx_msg_ptr as *const c_void, size_of::<TxMsg>())
};
if write_rv < 0 {
return Err(Error::new(ErrorKind::WriteZero, io::Error::last_os_error()));
}
Ok(())
}
///
/// Combination of `CanBCMSocket::filter_id` and `CanBCMSocket::incoming_frames`.
/// ```
/// extern crate futures;
/// extern crate tokio;
/// extern crate socketcan;
///
/// use futures::stream::Stream;
/// use tokio::prelude::*;
/// use std::time;
/// use socketcan::FrameFlags;
/// use socketcan_tokio::bcm::*;
///
/// let ival = time::Duration::from_millis(1);
/// let socket = CanBCMSocket::open_nb("vcan0").unwrap();
/// let f = socket.filter_id_incoming_frames(0x123, ival, ival, FrameFlags::EFF_FLAG).unwrap()
/// .map_err(|_| ())
/// .for_each(|frame| {
/// println!("Frame {:?}", frame);
/// Ok(())
/// });
/// tokio::run(f);
/// ```
///
pub fn filter_id_incoming_frames(
self,
can_id: CanMessageId,
ival1: time::Duration,
ival2: time::Duration,
) -> io::Result<BcmFrameStream> {
self.filter_id(can_id, ival1, ival2)?;
Ok(self.incoming_frames())
}
///
/// Stream of incoming BcmMsgHeads that apply to the filter criteria.
/// ```
/// extern crate futures;
/// extern crate tokio;
/// extern crate socketcan;
///
/// use futures::stream::Stream;
/// use tokio::prelude::*;
/// use std::time;
/// use socketcan::FrameFlags;
/// use socketcan_tokio::bcm::*;
///
/// let socket = CanBCMSocket::open_nb("vcan0").unwrap();
/// let ival = time::Duration::from_millis(1);
/// socket.filter_id(0x123, ival, ival, FrameFlags::EFF_FLAG).unwrap();
/// let f = socket.incoming_msg()
/// .map_err(|err| {
/// eprintln!("IO error {:?}", err)
/// })
/// .for_each(|bcm_msg_head| {
/// println!("BcmMsgHead {:?}", bcm_msg_head);
/// Ok(())
/// });
/// tokio::run(f);
/// ```
///
pub fn incoming_msg(self) -> BcmStream {
BcmStream::from(self)
}
///
/// Stream of incoming frames that apply to the filter criteria.
/// ```
/// extern crate futures;
/// extern crate tokio;
/// extern crate socketcan;
///
/// use futures::stream::Stream;
/// use tokio::prelude::*;
/// use std::time;
/// use socketcan::FrameFlags;
/// use socketcan_tokio::bcm::*;
///
/// let socket = CanBCMSocket::open_nb("vcan0").unwrap();
/// let ival = time::Duration::from_millis(1);
/// socket.filter_id(0x123, ival, ival, FrameFlags::EFF_FLAG).unwrap();
/// let f = socket.incoming_frames()
/// .map_err(|err| {
/// eprintln!("IO error {:?}", err)
/// })
/// .for_each(|frame| {
/// println!("Frame {:?}", frame);
/// Ok(())
/// });
/// tokio::run(f);
/// ```
///
pub fn incoming_frames(self) -> BcmFrameStream {
// let stream = BcmStream::from(self);
// stream
// .map(move |bcm_msg_head| {
// let v: Vec<CanFrame> = bcm_msg_head.frames().to_owned();
// futures::stream::iter_ok::<_, io::Error>(v)
// })
// .flatten()
BcmFrameStream::new(self)
}
/// Remove a content filter subscription.
pub fn filter_delete(&self, can_id: CanMessageId) -> io::Result<()> {
let frames = [CanFrame::new(CanMessageId::EFF(0x0), &[], false, false).unwrap(); MAX_NFRAMES as usize];
let msg = &BcmMsgHead {
_opcode: RX_DELETE,
_flags: 0,
_count: 0,
_ival1: c_timeval_new(time::Duration::new(0, 0)),
_ival2: c_timeval_new(time::Duration::new(0, 0)),
_can_id: can_id.with_eff_bit(),
_nframes: 0,
#[cfg(all(target_pointer_width = "32"))]
_pad: 0,
_frames: frames,
};
let msg_ptr = msg as *const BcmMsgHead;
let write_rv = unsafe {
write(self.fd, msg_ptr as *const c_void, size_of::<BcmMsgHead>())
};
let expected_size = size_of::<BcmMsgHead>() - size_of::<[CanFrame; MAX_NFRAMES as usize]>();
if write_rv as usize!= expected_size {
let msg = format!("Wrote {} but expected {}", write_rv, expected_size);
return Err(Error::new(ErrorKind::WriteZero, msg));
}
Ok(())
}
/// Read a single can frame.
pub fn read_msg(&self) -> io::Result<BcmMsgHead> {
let ival1 = c_timeval_new(time::Duration::from_millis(0));
let ival2 = c_timeval_new(time::Duration::from_millis(0));
let frames = [CanFrame::new(CanMessageId::EFF(0x0), &[], false, false).unwrap(); MAX_NFRAMES as usize];
let mut msg = BcmMsgHead {
_opcode: 0,
_flags: 0,
_count: 0,
_ival1: ival1,
_ival2: ival2,
_can_id: 0,
_nframes: 0,
#[cfg(all(target_pointer_width = "32"))]
_pad: 0,
_frames: frames,
};
let msg_ptr = &mut msg as *mut BcmMsgHead;
let count = unsafe {
read(
self.fd.clone(),
msg_ptr as *mut c_void,
size_of::<BcmMsgHead>(),
)
};
let last_error = io::Error::last_os_error();
if count < 0 {
Err(last_error)
} else {
Ok(msg)
}
}
}
impl mio::Evented for CanBCMSocket {
fn register(
&self,
poll: &mio::Poll,
token: mio::Token,
interest: mio::Ready,
opts: mio::PollOpt,
) -> io::Result<()> {
mio::unix::EventedFd(&self.fd).register(poll, token, interest, opts)
}
fn reregister(
&self,
poll: &mio::Poll,
token: mio::Token,
interest: mio::Ready,
opts: mio::PollOpt,
) -> io::Result<()> {
mio::unix::EventedFd(&self.fd).reregister(poll, token, interest, opts)
}
fn deregister(&self, poll: &mio::Poll) -> io::Result<()> {
mio::unix::EventedFd(&self.fd).deregister(poll)
}
}
impl Drop for CanBCMSocket {
fn drop(&mut self) {
self.close().ok(); // ignore result
}
}
pub struct BcmStream {
io: PollEvented<CanBCMSocket>,
}
pub trait IntoBcmStream {
type Stream: futures::stream::Stream;
type Error;
fn into_bcm(self) -> Result<Self::Stream, Self::Error>;
}
impl BcmStream {
pub fn from(bcm_socket: CanBCMSocket) -> BcmStream {
let io = PollEvented::new(bcm_socket);
BcmStream { io: io }
}
}
impl Stream for BcmStream {
type Item = io::Result<BcmMsgHead>;
fn poll_next(self: Pin<&mut Self>, lw: &LocalWaker) -> Poll<Option<Self::Item>> {
ready!(self.io.poll_read_ready(lw)?);
match self.io.get_ref().read_msg() {
Ok(n) => Poll::Ready(Some(Ok(n))),
Err(e) => {
if e.kind() == io::ErrorKind::WouldBlock {
self.io.clear_read_ready(lw)?;
return Poll::Pending;
}
return Poll::Ready(Some(Err(e)));
}
}
}
}
|
{
for frame in n.frames() {
self.frame_buffer.push_back(*frame)
}
}
|
conditional_block
|
bcm.rs
|
use libc::{
c_int, c_short, c_uint, c_void, close, connect, fcntl, read, sockaddr, socket, timeval, write,
F_SETFL, O_NONBLOCK,
};
use futures;
// use mio::{Evented, PollOpt, Ready, Token};
use nix::net::if_::if_nametoindex;
use std::collections::VecDeque;
use std::fmt;
use std::io::{Error, ErrorKind};
use std::mem::size_of;
use std::{io, slice, time};
use romio::PollEvented;
use std::pin::Pin;
use futures::stream::Stream;
use futures::task::LocalWaker;
use futures::{ready, Poll};
use mio;
use socketcan::{
c_timeval_new, CanMessageId, CanAddr, CanFrame, CanSocketOpenError, FrameFlags, AF_CAN, CAN_BCM, PF_CAN,
SOCK_DGRAM,
};
pub const MAX_NFRAMES: u32 = 256;
/// OpCodes
///
/// create (cyclic) transmission task
pub const TX_SETUP: u32 = 1;
/// remove (cyclic) transmission task
pub const TX_DELETE: u32 = 2;
/// read properties of (cyclic) transmission task
pub const TX_READ: u32 = 3;
/// send one CAN frame
pub const TX_SEND: u32 = 4;
/// create RX content filter subscription
pub const RX_SETUP: u32 = 5;
/// remove RX content filter subscription
pub const RX_DELETE: u32 = 6;
/// read properties of RX content filter subscription
pub const RX_READ: u32 = 7;
/// reply to TX_READ request
pub const TX_STATUS: u32 = 8;
/// notification on performed transmissions (count=0)
pub const TX_EXPIRED: u32 = 9;
/// reply to RX_READ request
pub const RX_STATUS: u32 = 10;
/// cyclic message is absent
pub const RX_TIMEOUT: u32 = 11;
/// sent if the first or a revised CAN message was received
pub const RX_CHANGED: u32 = 12;
/// Flags
///
/// set the value of ival1, ival2 and count
pub const SETTIMER: u32 = 0x0001;
/// start the timer with the actual value of ival1, ival2 and count.
/// Starting the timer leads simultaneously to emit a can_frame.
pub const STARTTIMER: u32 = 0x0002;
/// create the message TX_EXPIRED when count expires
pub const TX_COUNTEVT: u32 = 0x0004;
/// A change of data by the process is emitted immediatly.
/// (Requirement of 'Changing Now' - BAES)
pub const TX_ANNOUNCE: u32 = 0x0008;
/// Copies the can_id from the message header to each subsequent frame
/// in frames. This is intended only as usage simplification.
pub const TX_CP_CAN_ID: u32 = 0x0010;
/// Filter by can_id alone, no frames required (nframes=0)
pub const RX_FILTER_ID: u32 = 0x0020;
/// A change of the DLC leads to an RX_CHANGED.
pub const RX_CHECK_DLC: u32 = 0x0040;
/// If the timer ival1 in the RX_SETUP has been set equal to zero, on receipt
/// of the CAN message the timer for the timeout monitoring is automatically
/// started. Setting this flag prevents the automatic start timer.
pub const RX_NO_AUTOTIMER: u32 = 0x0080;
/// refers also to the time-out supervision of the management RX_SETUP.
/// By setting this flag, when an RX-outs occours, a RX_CHANGED will be
/// generated when the (cyclic) receive restarts. This will happen even if the
/// user data have not changed.
pub const RX_ANNOUNCE_RESUM: u32 = 0x0100;
/// forces a reset of the index counter from the update to be sent by multiplex
/// message even if it would not be necessary because of the length.
pub const TX_RESET_MULTI_ID: u32 = 0x0200;
/// the filter passed is used as CAN message to be sent when receiving an RTR frame.
pub const RX_RTR_FRAME: u32 = 0x0400;
pub const CAN_FD_FRAME: u32 = 0x0800;
/// BcmMsgHead
///
/// Head of messages to and from the broadcast manager
#[repr(C)]
pub struct BcmMsgHead {
_opcode: u32,
_flags: u32,
/// number of frames to send before changing interval
_count: u32,
/// interval for the first count frames
_ival1: timeval,
/// interval for the following frames
_ival2: timeval,
_can_id: u32,
/// number of can frames appended to the message head
_nframes: u32,
// TODO figure out how why C adds a padding here?
#[cfg(all(target_pointer_width = "32"))]
_pad: u32,
// TODO figure out how to allocate only nframes instead of MAX_NFRAMES
/// buffer of CAN frames
_frames: [CanFrame; MAX_NFRAMES as usize],
}
impl fmt::Debug for BcmMsgHead {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "BcmMsgHead {{ _opcode: {}, _flags: {}, _count: {}, _ival1: {:?}, _ival2: {:?}, _can_id: {}, _nframes: {}}}", self._opcode, self._flags, self._count, self._ival1.tv_sec, self._ival2.tv_sec, self._can_id, self._nframes)
}
}
/// BcmMsgHeadFrameLess
///
/// Head of messages to and from the broadcast manager see _pad fields for differences
/// to BcmMsgHead
#[repr(C)]
pub struct BcmMsgHeadFrameLess {
_opcode: u32,
_flags: u32,
/// number of frames to send before changing interval
_count: u32,
/// interval for the first count frames
_ival1: timeval,
/// interval for the following frames
_ival2: timeval,
_can_id: u32,
/// number of can frames appended to the message head
_nframes: u32,
// Workaround Rust ZST has a size of 0 for frames, in
// C the BcmMsgHead struct contains an Array that although it has
// a length of zero still takes n (4) bytes.
#[cfg(all(target_pointer_width = "32"))]
_pad: usize,
}
#[repr(C)]
pub struct TxMsg {
_msg_head: BcmMsgHeadFrameLess,
_frames: [CanFrame; MAX_NFRAMES as usize],
}
impl BcmMsgHead {
pub fn can_id(&self) -> u32 {
self._can_id
}
#[inline]
pub fn frames(&self) -> &[CanFrame] {
return unsafe { slice::from_raw_parts(self._frames.as_ptr(), self._nframes as usize) };
}
}
/// A socket for a CAN device, specifically for broadcast manager operations.
#[derive(Debug)]
pub struct CanBCMSocket {
pub fd: c_int,
}
pub struct BcmFrameStream {
io: PollEvented<CanBCMSocket>,
frame_buffer: VecDeque<CanFrame>,
}
impl BcmFrameStream {
pub fn new(socket: CanBCMSocket) -> BcmFrameStream {
BcmFrameStream {
io: PollEvented::new(socket),
frame_buffer: VecDeque::new(),
}
}
}
impl Stream for BcmFrameStream {
type Item = io::Result<CanFrame>;
fn poll_next(mut self: Pin<&mut Self>, lw: &LocalWaker) -> Poll<Option<Self::Item>> {
// Buffer still contains frames
// after testing this it looks like the recv_msg will never contain
// more than one msg, therefore the buffer is basically never filled
if let Some(frame) = self.frame_buffer.pop_front() {
return Poll::Ready(Some(Ok(frame)));
}
ready!(self.io.poll_read_ready(lw)?);
match self.io.get_ref().read_msg() {
Ok(n) => {
let mut frames = n.frames().to_vec();
if let Some(frame) = frames.pop() {
if!frames.is_empty() {
for frame in n.frames() {
self.frame_buffer.push_back(*frame)
}
}
Poll::Ready(Some(Ok(frame)))
} else {
// This happens e.g. when a timed out msg is received
self.io.clear_read_ready(lw)?;
Poll::Pending
}
}
Err(e) => {
if e.kind() == io::ErrorKind::WouldBlock {
self.io.clear_read_ready(lw)?;
return Poll::Pending;
}
return Poll::Ready(Some(Err(e)));
}
}
}
}
impl mio::Evented for BcmFrameStream {
fn register(
&self,
poll: &mio::Poll,
token: mio::Token,
interest: mio::Ready,
opts: mio::PollOpt,
) -> io::Result<()> {
self.io.get_ref().register(poll, token, interest, opts)
}
fn reregister(
&self,
poll: &mio::Poll,
token: mio::Token,
interest: mio::Ready,
opts: mio::PollOpt,
) -> io::Result<()> {
self.io.get_ref().reregister(poll, token, interest, opts)
}
fn deregister(&self, poll: &mio::Poll) -> io::Result<()> {
self.io.get_ref().deregister(poll)
}
}
impl CanBCMSocket {
/// Open a named CAN device non blocking.
///
/// Usually the more common case, opens a socket can device by name, such
/// as "vcan0" or "socan0".
pub fn open_nb(ifname: &str) -> Result<CanBCMSocket, CanSocketOpenError> {
let if_index = if_nametoindex(ifname)?;
CanBCMSocket::open_if_nb(if_index)
}
/// Open CAN device by interface number non blocking.
///
/// Opens a CAN device by kernel interface number.
pub fn open_if_nb(if_index: c_uint) -> Result<CanBCMSocket, CanSocketOpenError> {
// open socket
let sock_fd;
unsafe {
sock_fd = socket(PF_CAN, SOCK_DGRAM, CAN_BCM);
}
if sock_fd == -1 {
return Err(CanSocketOpenError::from(io::Error::last_os_error()));
}
let fcntl_resp = unsafe { fcntl(sock_fd, F_SETFL, O_NONBLOCK) };
if fcntl_resp == -1 {
return Err(CanSocketOpenError::from(io::Error::last_os_error()));
}
let addr = CanAddr {
_af_can: AF_CAN as c_short,
if_index: if_index as c_int,
rx_id: 0, //?
tx_id: 0, //?
};
let sockaddr_ptr = &addr as *const CanAddr;
let connect_res;
unsafe {
connect_res = connect(
sock_fd,
sockaddr_ptr as *const sockaddr,
size_of::<CanAddr>() as u32,
);
}
if connect_res!= 0 {
return Err(CanSocketOpenError::from(io::Error::last_os_error()));
}
Ok(CanBCMSocket { fd: sock_fd })
}
fn close(&mut self) -> io::Result<()> {
unsafe {
let rv = close(self.fd);
if rv!= -1 {
return Err(io::Error::last_os_error());
}
}
Ok(())
}
/// Create a content filter subscription, filtering can frames by can_id.
pub fn filter_id(
&self,
can_id: CanMessageId,
ival1: time::Duration,
ival2: time::Duration,
) -> io::Result<()> {
let _ival1 = c_timeval_new(ival1);
let _ival2 = c_timeval_new(ival2);
let frames = [CanFrame::new(CanMessageId::SFF(0u16), &[], false, false).unwrap(); MAX_NFRAMES as usize];
let msg = BcmMsgHeadFrameLess {
_opcode: RX_SETUP,
_flags: SETTIMER | RX_FILTER_ID,
_count: 0,
#[cfg(all(target_pointer_width = "32"))]
_pad: 0,
_ival1: _ival1,
_ival2: _ival2,
_can_id: can_id.with_eff_bit(),
_nframes: 0,
};
let tx_msg = &TxMsg {
_msg_head: msg,
_frames: frames,
};
let tx_msg_ptr = tx_msg as *const TxMsg;
let write_rv = unsafe {
write(self.fd, tx_msg_ptr as *const c_void, size_of::<TxMsg>())
};
if write_rv < 0 {
return Err(Error::new(ErrorKind::WriteZero, io::Error::last_os_error()));
}
Ok(())
}
///
/// Combination of `CanBCMSocket::filter_id` and `CanBCMSocket::incoming_frames`.
/// ```
/// extern crate futures;
/// extern crate tokio;
/// extern crate socketcan;
///
/// use futures::stream::Stream;
/// use tokio::prelude::*;
/// use std::time;
/// use socketcan::FrameFlags;
/// use socketcan_tokio::bcm::*;
///
/// let ival = time::Duration::from_millis(1);
/// let socket = CanBCMSocket::open_nb("vcan0").unwrap();
/// let f = socket.filter_id_incoming_frames(0x123, ival, ival, FrameFlags::EFF_FLAG).unwrap()
/// .map_err(|_| ())
/// .for_each(|frame| {
/// println!("Frame {:?}", frame);
/// Ok(())
/// });
/// tokio::run(f);
/// ```
///
pub fn filter_id_incoming_frames(
self,
can_id: CanMessageId,
ival1: time::Duration,
ival2: time::Duration,
) -> io::Result<BcmFrameStream> {
self.filter_id(can_id, ival1, ival2)?;
Ok(self.incoming_frames())
}
///
/// Stream of incoming BcmMsgHeads that apply to the filter criteria.
/// ```
/// extern crate futures;
/// extern crate tokio;
/// extern crate socketcan;
///
/// use futures::stream::Stream;
/// use tokio::prelude::*;
/// use std::time;
/// use socketcan::FrameFlags;
/// use socketcan_tokio::bcm::*;
///
/// let socket = CanBCMSocket::open_nb("vcan0").unwrap();
/// let ival = time::Duration::from_millis(1);
/// socket.filter_id(0x123, ival, ival, FrameFlags::EFF_FLAG).unwrap();
/// let f = socket.incoming_msg()
/// .map_err(|err| {
/// eprintln!("IO error {:?}", err)
/// })
/// .for_each(|bcm_msg_head| {
/// println!("BcmMsgHead {:?}", bcm_msg_head);
/// Ok(())
/// });
/// tokio::run(f);
/// ```
///
pub fn incoming_msg(self) -> BcmStream {
BcmStream::from(self)
}
///
/// Stream of incoming frames that apply to the filter criteria.
/// ```
/// extern crate futures;
/// extern crate tokio;
/// extern crate socketcan;
///
/// use futures::stream::Stream;
/// use tokio::prelude::*;
/// use std::time;
/// use socketcan::FrameFlags;
/// use socketcan_tokio::bcm::*;
///
/// let socket = CanBCMSocket::open_nb("vcan0").unwrap();
/// let ival = time::Duration::from_millis(1);
/// socket.filter_id(0x123, ival, ival, FrameFlags::EFF_FLAG).unwrap();
/// let f = socket.incoming_frames()
/// .map_err(|err| {
/// eprintln!("IO error {:?}", err)
/// })
/// .for_each(|frame| {
/// println!("Frame {:?}", frame);
/// Ok(())
/// });
/// tokio::run(f);
/// ```
///
pub fn incoming_frames(self) -> BcmFrameStream {
// let stream = BcmStream::from(self);
// stream
// .map(move |bcm_msg_head| {
// let v: Vec<CanFrame> = bcm_msg_head.frames().to_owned();
// futures::stream::iter_ok::<_, io::Error>(v)
// })
// .flatten()
BcmFrameStream::new(self)
}
/// Remove a content filter subscription.
pub fn filter_delete(&self, can_id: CanMessageId) -> io::Result<()> {
let frames = [CanFrame::new(CanMessageId::EFF(0x0), &[], false, false).unwrap(); MAX_NFRAMES as usize];
let msg = &BcmMsgHead {
_opcode: RX_DELETE,
_flags: 0,
_count: 0,
_ival1: c_timeval_new(time::Duration::new(0, 0)),
_ival2: c_timeval_new(time::Duration::new(0, 0)),
_can_id: can_id.with_eff_bit(),
_nframes: 0,
#[cfg(all(target_pointer_width = "32"))]
_pad: 0,
_frames: frames,
};
let msg_ptr = msg as *const BcmMsgHead;
let write_rv = unsafe {
write(self.fd, msg_ptr as *const c_void, size_of::<BcmMsgHead>())
};
let expected_size = size_of::<BcmMsgHead>() - size_of::<[CanFrame; MAX_NFRAMES as usize]>();
if write_rv as usize!= expected_size {
let msg = format!("Wrote {} but expected {}", write_rv, expected_size);
return Err(Error::new(ErrorKind::WriteZero, msg));
}
Ok(())
}
/// Read a single can frame.
pub fn read_msg(&self) -> io::Result<BcmMsgHead> {
let ival1 = c_timeval_new(time::Duration::from_millis(0));
let ival2 = c_timeval_new(time::Duration::from_millis(0));
let frames = [CanFrame::new(CanMessageId::EFF(0x0), &[], false, false).unwrap(); MAX_NFRAMES as usize];
let mut msg = BcmMsgHead {
_opcode: 0,
_flags: 0,
_count: 0,
_ival1: ival1,
_ival2: ival2,
_can_id: 0,
_nframes: 0,
#[cfg(all(target_pointer_width = "32"))]
_pad: 0,
_frames: frames,
};
let msg_ptr = &mut msg as *mut BcmMsgHead;
let count = unsafe {
read(
self.fd.clone(),
msg_ptr as *mut c_void,
size_of::<BcmMsgHead>(),
)
};
let last_error = io::Error::last_os_error();
if count < 0 {
Err(last_error)
} else {
Ok(msg)
}
}
}
impl mio::Evented for CanBCMSocket {
fn register(
&self,
poll: &mio::Poll,
token: mio::Token,
interest: mio::Ready,
opts: mio::PollOpt,
) -> io::Result<()> {
mio::unix::EventedFd(&self.fd).register(poll, token, interest, opts)
}
fn reregister(
&self,
poll: &mio::Poll,
token: mio::Token,
interest: mio::Ready,
opts: mio::PollOpt,
) -> io::Result<()> {
mio::unix::EventedFd(&self.fd).reregister(poll, token, interest, opts)
}
fn deregister(&self, poll: &mio::Poll) -> io::Result<()> {
mio::unix::EventedFd(&self.fd).deregister(poll)
}
}
impl Drop for CanBCMSocket {
fn drop(&mut self) {
self.close().ok(); // ignore result
}
}
pub struct BcmStream {
io: PollEvented<CanBCMSocket>,
}
pub trait IntoBcmStream {
type Stream: futures::stream::Stream;
type Error;
fn into_bcm(self) -> Result<Self::Stream, Self::Error>;
}
impl BcmStream {
pub fn from(bcm_socket: CanBCMSocket) -> BcmStream
|
}
impl Stream for BcmStream {
type Item = io::Result<BcmMsgHead>;
fn poll_next(self: Pin<&mut Self>, lw: &LocalWaker) -> Poll<Option<Self::Item>> {
ready!(self.io.poll_read_ready(lw)?);
match self.io.get_ref().read_msg() {
Ok(n) => Poll::Ready(Some(Ok(n))),
Err(e) => {
if e.kind() == io::ErrorKind::WouldBlock {
self.io.clear_read_ready(lw)?;
return Poll::Pending;
}
return Poll::Ready(Some(Err(e)));
}
}
}
}
|
{
let io = PollEvented::new(bcm_socket);
BcmStream { io: io }
}
|
identifier_body
|
bcm.rs
|
use libc::{
c_int, c_short, c_uint, c_void, close, connect, fcntl, read, sockaddr, socket, timeval, write,
F_SETFL, O_NONBLOCK,
};
use futures;
// use mio::{Evented, PollOpt, Ready, Token};
use nix::net::if_::if_nametoindex;
use std::collections::VecDeque;
use std::fmt;
use std::io::{Error, ErrorKind};
use std::mem::size_of;
use std::{io, slice, time};
use romio::PollEvented;
use std::pin::Pin;
use futures::stream::Stream;
use futures::task::LocalWaker;
use futures::{ready, Poll};
use mio;
use socketcan::{
c_timeval_new, CanMessageId, CanAddr, CanFrame, CanSocketOpenError, FrameFlags, AF_CAN, CAN_BCM, PF_CAN,
SOCK_DGRAM,
};
pub const MAX_NFRAMES: u32 = 256;
/// OpCodes
///
/// create (cyclic) transmission task
pub const TX_SETUP: u32 = 1;
/// remove (cyclic) transmission task
pub const TX_DELETE: u32 = 2;
/// read properties of (cyclic) transmission task
pub const TX_READ: u32 = 3;
/// send one CAN frame
pub const TX_SEND: u32 = 4;
/// create RX content filter subscription
pub const RX_SETUP: u32 = 5;
/// remove RX content filter subscription
pub const RX_DELETE: u32 = 6;
/// read properties of RX content filter subscription
pub const RX_READ: u32 = 7;
/// reply to TX_READ request
pub const TX_STATUS: u32 = 8;
/// notification on performed transmissions (count=0)
pub const TX_EXPIRED: u32 = 9;
/// reply to RX_READ request
pub const RX_STATUS: u32 = 10;
/// cyclic message is absent
pub const RX_TIMEOUT: u32 = 11;
/// sent if the first or a revised CAN message was received
pub const RX_CHANGED: u32 = 12;
/// Flags
///
/// set the value of ival1, ival2 and count
pub const SETTIMER: u32 = 0x0001;
/// start the timer with the actual value of ival1, ival2 and count.
/// Starting the timer leads simultaneously to emit a can_frame.
pub const STARTTIMER: u32 = 0x0002;
/// create the message TX_EXPIRED when count expires
pub const TX_COUNTEVT: u32 = 0x0004;
/// A change of data by the process is emitted immediatly.
/// (Requirement of 'Changing Now' - BAES)
pub const TX_ANNOUNCE: u32 = 0x0008;
/// Copies the can_id from the message header to each subsequent frame
/// in frames. This is intended only as usage simplification.
pub const TX_CP_CAN_ID: u32 = 0x0010;
/// Filter by can_id alone, no frames required (nframes=0)
pub const RX_FILTER_ID: u32 = 0x0020;
/// A change of the DLC leads to an RX_CHANGED.
pub const RX_CHECK_DLC: u32 = 0x0040;
/// If the timer ival1 in the RX_SETUP has been set equal to zero, on receipt
/// of the CAN message the timer for the timeout monitoring is automatically
/// started. Setting this flag prevents the automatic start timer.
pub const RX_NO_AUTOTIMER: u32 = 0x0080;
/// refers also to the time-out supervision of the management RX_SETUP.
/// By setting this flag, when an RX-outs occours, a RX_CHANGED will be
/// generated when the (cyclic) receive restarts. This will happen even if the
/// user data have not changed.
pub const RX_ANNOUNCE_RESUM: u32 = 0x0100;
/// forces a reset of the index counter from the update to be sent by multiplex
/// message even if it would not be necessary because of the length.
pub const TX_RESET_MULTI_ID: u32 = 0x0200;
/// the filter passed is used as CAN message to be sent when receiving an RTR frame.
pub const RX_RTR_FRAME: u32 = 0x0400;
pub const CAN_FD_FRAME: u32 = 0x0800;
/// BcmMsgHead
///
/// Head of messages to and from the broadcast manager
#[repr(C)]
pub struct BcmMsgHead {
_opcode: u32,
_flags: u32,
/// number of frames to send before changing interval
_count: u32,
/// interval for the first count frames
_ival1: timeval,
/// interval for the following frames
_ival2: timeval,
_can_id: u32,
/// number of can frames appended to the message head
_nframes: u32,
// TODO figure out how why C adds a padding here?
#[cfg(all(target_pointer_width = "32"))]
_pad: u32,
// TODO figure out how to allocate only nframes instead of MAX_NFRAMES
/// buffer of CAN frames
_frames: [CanFrame; MAX_NFRAMES as usize],
}
impl fmt::Debug for BcmMsgHead {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "BcmMsgHead {{ _opcode: {}, _flags: {}, _count: {}, _ival1: {:?}, _ival2: {:?}, _can_id: {}, _nframes: {}}}", self._opcode, self._flags, self._count, self._ival1.tv_sec, self._ival2.tv_sec, self._can_id, self._nframes)
}
}
/// BcmMsgHeadFrameLess
///
/// Head of messages to and from the broadcast manager see _pad fields for differences
/// to BcmMsgHead
#[repr(C)]
pub struct BcmMsgHeadFrameLess {
_opcode: u32,
_flags: u32,
/// number of frames to send before changing interval
_count: u32,
/// interval for the first count frames
_ival1: timeval,
/// interval for the following frames
_ival2: timeval,
_can_id: u32,
/// number of can frames appended to the message head
_nframes: u32,
// Workaround Rust ZST has a size of 0 for frames, in
// C the BcmMsgHead struct contains an Array that although it has
// a length of zero still takes n (4) bytes.
#[cfg(all(target_pointer_width = "32"))]
_pad: usize,
}
#[repr(C)]
pub struct TxMsg {
_msg_head: BcmMsgHeadFrameLess,
_frames: [CanFrame; MAX_NFRAMES as usize],
}
impl BcmMsgHead {
pub fn can_id(&self) -> u32 {
self._can_id
}
|
return unsafe { slice::from_raw_parts(self._frames.as_ptr(), self._nframes as usize) };
}
}
/// A socket for a CAN device, specifically for broadcast manager operations.
#[derive(Debug)]
pub struct CanBCMSocket {
pub fd: c_int,
}
pub struct BcmFrameStream {
io: PollEvented<CanBCMSocket>,
frame_buffer: VecDeque<CanFrame>,
}
impl BcmFrameStream {
pub fn new(socket: CanBCMSocket) -> BcmFrameStream {
BcmFrameStream {
io: PollEvented::new(socket),
frame_buffer: VecDeque::new(),
}
}
}
impl Stream for BcmFrameStream {
type Item = io::Result<CanFrame>;
fn poll_next(mut self: Pin<&mut Self>, lw: &LocalWaker) -> Poll<Option<Self::Item>> {
// Buffer still contains frames
// after testing this it looks like the recv_msg will never contain
// more than one msg, therefore the buffer is basically never filled
if let Some(frame) = self.frame_buffer.pop_front() {
return Poll::Ready(Some(Ok(frame)));
}
ready!(self.io.poll_read_ready(lw)?);
match self.io.get_ref().read_msg() {
Ok(n) => {
let mut frames = n.frames().to_vec();
if let Some(frame) = frames.pop() {
if!frames.is_empty() {
for frame in n.frames() {
self.frame_buffer.push_back(*frame)
}
}
Poll::Ready(Some(Ok(frame)))
} else {
// This happens e.g. when a timed out msg is received
self.io.clear_read_ready(lw)?;
Poll::Pending
}
}
Err(e) => {
if e.kind() == io::ErrorKind::WouldBlock {
self.io.clear_read_ready(lw)?;
return Poll::Pending;
}
return Poll::Ready(Some(Err(e)));
}
}
}
}
impl mio::Evented for BcmFrameStream {
fn register(
&self,
poll: &mio::Poll,
token: mio::Token,
interest: mio::Ready,
opts: mio::PollOpt,
) -> io::Result<()> {
self.io.get_ref().register(poll, token, interest, opts)
}
fn reregister(
&self,
poll: &mio::Poll,
token: mio::Token,
interest: mio::Ready,
opts: mio::PollOpt,
) -> io::Result<()> {
self.io.get_ref().reregister(poll, token, interest, opts)
}
fn deregister(&self, poll: &mio::Poll) -> io::Result<()> {
self.io.get_ref().deregister(poll)
}
}
impl CanBCMSocket {
/// Open a named CAN device non blocking.
///
/// Usually the more common case, opens a socket can device by name, such
/// as "vcan0" or "socan0".
pub fn open_nb(ifname: &str) -> Result<CanBCMSocket, CanSocketOpenError> {
let if_index = if_nametoindex(ifname)?;
CanBCMSocket::open_if_nb(if_index)
}
/// Open CAN device by interface number non blocking.
///
/// Opens a CAN device by kernel interface number.
pub fn open_if_nb(if_index: c_uint) -> Result<CanBCMSocket, CanSocketOpenError> {
// open socket
let sock_fd;
unsafe {
sock_fd = socket(PF_CAN, SOCK_DGRAM, CAN_BCM);
}
if sock_fd == -1 {
return Err(CanSocketOpenError::from(io::Error::last_os_error()));
}
let fcntl_resp = unsafe { fcntl(sock_fd, F_SETFL, O_NONBLOCK) };
if fcntl_resp == -1 {
return Err(CanSocketOpenError::from(io::Error::last_os_error()));
}
let addr = CanAddr {
_af_can: AF_CAN as c_short,
if_index: if_index as c_int,
rx_id: 0, //?
tx_id: 0, //?
};
let sockaddr_ptr = &addr as *const CanAddr;
let connect_res;
unsafe {
connect_res = connect(
sock_fd,
sockaddr_ptr as *const sockaddr,
size_of::<CanAddr>() as u32,
);
}
if connect_res!= 0 {
return Err(CanSocketOpenError::from(io::Error::last_os_error()));
}
Ok(CanBCMSocket { fd: sock_fd })
}
fn close(&mut self) -> io::Result<()> {
unsafe {
let rv = close(self.fd);
if rv!= -1 {
return Err(io::Error::last_os_error());
}
}
Ok(())
}
/// Create a content filter subscription, filtering can frames by can_id.
pub fn filter_id(
&self,
can_id: CanMessageId,
ival1: time::Duration,
ival2: time::Duration,
) -> io::Result<()> {
let _ival1 = c_timeval_new(ival1);
let _ival2 = c_timeval_new(ival2);
let frames = [CanFrame::new(CanMessageId::SFF(0u16), &[], false, false).unwrap(); MAX_NFRAMES as usize];
let msg = BcmMsgHeadFrameLess {
_opcode: RX_SETUP,
_flags: SETTIMER | RX_FILTER_ID,
_count: 0,
#[cfg(all(target_pointer_width = "32"))]
_pad: 0,
_ival1: _ival1,
_ival2: _ival2,
_can_id: can_id.with_eff_bit(),
_nframes: 0,
};
let tx_msg = &TxMsg {
_msg_head: msg,
_frames: frames,
};
let tx_msg_ptr = tx_msg as *const TxMsg;
let write_rv = unsafe {
write(self.fd, tx_msg_ptr as *const c_void, size_of::<TxMsg>())
};
if write_rv < 0 {
return Err(Error::new(ErrorKind::WriteZero, io::Error::last_os_error()));
}
Ok(())
}
///
/// Combination of `CanBCMSocket::filter_id` and `CanBCMSocket::incoming_frames`.
/// ```
/// extern crate futures;
/// extern crate tokio;
/// extern crate socketcan;
///
/// use futures::stream::Stream;
/// use tokio::prelude::*;
/// use std::time;
/// use socketcan::FrameFlags;
/// use socketcan_tokio::bcm::*;
///
/// let ival = time::Duration::from_millis(1);
/// let socket = CanBCMSocket::open_nb("vcan0").unwrap();
/// let f = socket.filter_id_incoming_frames(0x123, ival, ival, FrameFlags::EFF_FLAG).unwrap()
/// .map_err(|_| ())
/// .for_each(|frame| {
/// println!("Frame {:?}", frame);
/// Ok(())
/// });
/// tokio::run(f);
/// ```
///
pub fn filter_id_incoming_frames(
self,
can_id: CanMessageId,
ival1: time::Duration,
ival2: time::Duration,
) -> io::Result<BcmFrameStream> {
self.filter_id(can_id, ival1, ival2)?;
Ok(self.incoming_frames())
}
///
/// Stream of incoming BcmMsgHeads that apply to the filter criteria.
/// ```
/// extern crate futures;
/// extern crate tokio;
/// extern crate socketcan;
///
/// use futures::stream::Stream;
/// use tokio::prelude::*;
/// use std::time;
/// use socketcan::FrameFlags;
/// use socketcan_tokio::bcm::*;
///
/// let socket = CanBCMSocket::open_nb("vcan0").unwrap();
/// let ival = time::Duration::from_millis(1);
/// socket.filter_id(0x123, ival, ival, FrameFlags::EFF_FLAG).unwrap();
/// let f = socket.incoming_msg()
/// .map_err(|err| {
/// eprintln!("IO error {:?}", err)
/// })
/// .for_each(|bcm_msg_head| {
/// println!("BcmMsgHead {:?}", bcm_msg_head);
/// Ok(())
/// });
/// tokio::run(f);
/// ```
///
pub fn incoming_msg(self) -> BcmStream {
BcmStream::from(self)
}
///
/// Stream of incoming frames that apply to the filter criteria.
/// ```
/// extern crate futures;
/// extern crate tokio;
/// extern crate socketcan;
///
/// use futures::stream::Stream;
/// use tokio::prelude::*;
/// use std::time;
/// use socketcan::FrameFlags;
/// use socketcan_tokio::bcm::*;
///
/// let socket = CanBCMSocket::open_nb("vcan0").unwrap();
/// let ival = time::Duration::from_millis(1);
/// socket.filter_id(0x123, ival, ival, FrameFlags::EFF_FLAG).unwrap();
/// let f = socket.incoming_frames()
/// .map_err(|err| {
/// eprintln!("IO error {:?}", err)
/// })
/// .for_each(|frame| {
/// println!("Frame {:?}", frame);
/// Ok(())
/// });
/// tokio::run(f);
/// ```
///
pub fn incoming_frames(self) -> BcmFrameStream {
// let stream = BcmStream::from(self);
// stream
// .map(move |bcm_msg_head| {
// let v: Vec<CanFrame> = bcm_msg_head.frames().to_owned();
// futures::stream::iter_ok::<_, io::Error>(v)
// })
// .flatten()
BcmFrameStream::new(self)
}
/// Remove a content filter subscription.
pub fn filter_delete(&self, can_id: CanMessageId) -> io::Result<()> {
let frames = [CanFrame::new(CanMessageId::EFF(0x0), &[], false, false).unwrap(); MAX_NFRAMES as usize];
let msg = &BcmMsgHead {
_opcode: RX_DELETE,
_flags: 0,
_count: 0,
_ival1: c_timeval_new(time::Duration::new(0, 0)),
_ival2: c_timeval_new(time::Duration::new(0, 0)),
_can_id: can_id.with_eff_bit(),
_nframes: 0,
#[cfg(all(target_pointer_width = "32"))]
_pad: 0,
_frames: frames,
};
let msg_ptr = msg as *const BcmMsgHead;
let write_rv = unsafe {
write(self.fd, msg_ptr as *const c_void, size_of::<BcmMsgHead>())
};
let expected_size = size_of::<BcmMsgHead>() - size_of::<[CanFrame; MAX_NFRAMES as usize]>();
if write_rv as usize!= expected_size {
let msg = format!("Wrote {} but expected {}", write_rv, expected_size);
return Err(Error::new(ErrorKind::WriteZero, msg));
}
Ok(())
}
/// Read a single can frame.
pub fn read_msg(&self) -> io::Result<BcmMsgHead> {
let ival1 = c_timeval_new(time::Duration::from_millis(0));
let ival2 = c_timeval_new(time::Duration::from_millis(0));
let frames = [CanFrame::new(CanMessageId::EFF(0x0), &[], false, false).unwrap(); MAX_NFRAMES as usize];
let mut msg = BcmMsgHead {
_opcode: 0,
_flags: 0,
_count: 0,
_ival1: ival1,
_ival2: ival2,
_can_id: 0,
_nframes: 0,
#[cfg(all(target_pointer_width = "32"))]
_pad: 0,
_frames: frames,
};
let msg_ptr = &mut msg as *mut BcmMsgHead;
let count = unsafe {
read(
self.fd.clone(),
msg_ptr as *mut c_void,
size_of::<BcmMsgHead>(),
)
};
let last_error = io::Error::last_os_error();
if count < 0 {
Err(last_error)
} else {
Ok(msg)
}
}
}
impl mio::Evented for CanBCMSocket {
fn register(
&self,
poll: &mio::Poll,
token: mio::Token,
interest: mio::Ready,
opts: mio::PollOpt,
) -> io::Result<()> {
mio::unix::EventedFd(&self.fd).register(poll, token, interest, opts)
}
fn reregister(
&self,
poll: &mio::Poll,
token: mio::Token,
interest: mio::Ready,
opts: mio::PollOpt,
) -> io::Result<()> {
mio::unix::EventedFd(&self.fd).reregister(poll, token, interest, opts)
}
fn deregister(&self, poll: &mio::Poll) -> io::Result<()> {
mio::unix::EventedFd(&self.fd).deregister(poll)
}
}
impl Drop for CanBCMSocket {
fn drop(&mut self) {
self.close().ok(); // ignore result
}
}
pub struct BcmStream {
io: PollEvented<CanBCMSocket>,
}
pub trait IntoBcmStream {
type Stream: futures::stream::Stream;
type Error;
fn into_bcm(self) -> Result<Self::Stream, Self::Error>;
}
impl BcmStream {
pub fn from(bcm_socket: CanBCMSocket) -> BcmStream {
let io = PollEvented::new(bcm_socket);
BcmStream { io: io }
}
}
impl Stream for BcmStream {
type Item = io::Result<BcmMsgHead>;
fn poll_next(self: Pin<&mut Self>, lw: &LocalWaker) -> Poll<Option<Self::Item>> {
ready!(self.io.poll_read_ready(lw)?);
match self.io.get_ref().read_msg() {
Ok(n) => Poll::Ready(Some(Ok(n))),
Err(e) => {
if e.kind() == io::ErrorKind::WouldBlock {
self.io.clear_read_ready(lw)?;
return Poll::Pending;
}
return Poll::Ready(Some(Err(e)));
}
}
}
}
|
#[inline]
pub fn frames(&self) -> &[CanFrame] {
|
random_line_split
|
bcm.rs
|
use libc::{
c_int, c_short, c_uint, c_void, close, connect, fcntl, read, sockaddr, socket, timeval, write,
F_SETFL, O_NONBLOCK,
};
use futures;
// use mio::{Evented, PollOpt, Ready, Token};
use nix::net::if_::if_nametoindex;
use std::collections::VecDeque;
use std::fmt;
use std::io::{Error, ErrorKind};
use std::mem::size_of;
use std::{io, slice, time};
use romio::PollEvented;
use std::pin::Pin;
use futures::stream::Stream;
use futures::task::LocalWaker;
use futures::{ready, Poll};
use mio;
use socketcan::{
c_timeval_new, CanMessageId, CanAddr, CanFrame, CanSocketOpenError, FrameFlags, AF_CAN, CAN_BCM, PF_CAN,
SOCK_DGRAM,
};
pub const MAX_NFRAMES: u32 = 256;
/// OpCodes
///
/// create (cyclic) transmission task
pub const TX_SETUP: u32 = 1;
/// remove (cyclic) transmission task
pub const TX_DELETE: u32 = 2;
/// read properties of (cyclic) transmission task
pub const TX_READ: u32 = 3;
/// send one CAN frame
pub const TX_SEND: u32 = 4;
/// create RX content filter subscription
pub const RX_SETUP: u32 = 5;
/// remove RX content filter subscription
pub const RX_DELETE: u32 = 6;
/// read properties of RX content filter subscription
pub const RX_READ: u32 = 7;
/// reply to TX_READ request
pub const TX_STATUS: u32 = 8;
/// notification on performed transmissions (count=0)
pub const TX_EXPIRED: u32 = 9;
/// reply to RX_READ request
pub const RX_STATUS: u32 = 10;
/// cyclic message is absent
pub const RX_TIMEOUT: u32 = 11;
/// sent if the first or a revised CAN message was received
pub const RX_CHANGED: u32 = 12;
/// Flags
///
/// set the value of ival1, ival2 and count
pub const SETTIMER: u32 = 0x0001;
/// start the timer with the actual value of ival1, ival2 and count.
/// Starting the timer leads simultaneously to emit a can_frame.
pub const STARTTIMER: u32 = 0x0002;
/// create the message TX_EXPIRED when count expires
pub const TX_COUNTEVT: u32 = 0x0004;
/// A change of data by the process is emitted immediatly.
/// (Requirement of 'Changing Now' - BAES)
pub const TX_ANNOUNCE: u32 = 0x0008;
/// Copies the can_id from the message header to each subsequent frame
/// in frames. This is intended only as usage simplification.
pub const TX_CP_CAN_ID: u32 = 0x0010;
/// Filter by can_id alone, no frames required (nframes=0)
pub const RX_FILTER_ID: u32 = 0x0020;
/// A change of the DLC leads to an RX_CHANGED.
pub const RX_CHECK_DLC: u32 = 0x0040;
/// If the timer ival1 in the RX_SETUP has been set equal to zero, on receipt
/// of the CAN message the timer for the timeout monitoring is automatically
/// started. Setting this flag prevents the automatic start timer.
pub const RX_NO_AUTOTIMER: u32 = 0x0080;
/// refers also to the time-out supervision of the management RX_SETUP.
/// By setting this flag, when an RX-outs occours, a RX_CHANGED will be
/// generated when the (cyclic) receive restarts. This will happen even if the
/// user data have not changed.
pub const RX_ANNOUNCE_RESUM: u32 = 0x0100;
/// forces a reset of the index counter from the update to be sent by multiplex
/// message even if it would not be necessary because of the length.
pub const TX_RESET_MULTI_ID: u32 = 0x0200;
/// the filter passed is used as CAN message to be sent when receiving an RTR frame.
pub const RX_RTR_FRAME: u32 = 0x0400;
pub const CAN_FD_FRAME: u32 = 0x0800;
/// BcmMsgHead
///
/// Head of messages to and from the broadcast manager
#[repr(C)]
pub struct BcmMsgHead {
_opcode: u32,
_flags: u32,
/// number of frames to send before changing interval
_count: u32,
/// interval for the first count frames
_ival1: timeval,
/// interval for the following frames
_ival2: timeval,
_can_id: u32,
/// number of can frames appended to the message head
_nframes: u32,
// TODO figure out how why C adds a padding here?
#[cfg(all(target_pointer_width = "32"))]
_pad: u32,
// TODO figure out how to allocate only nframes instead of MAX_NFRAMES
/// buffer of CAN frames
_frames: [CanFrame; MAX_NFRAMES as usize],
}
impl fmt::Debug for BcmMsgHead {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "BcmMsgHead {{ _opcode: {}, _flags: {}, _count: {}, _ival1: {:?}, _ival2: {:?}, _can_id: {}, _nframes: {}}}", self._opcode, self._flags, self._count, self._ival1.tv_sec, self._ival2.tv_sec, self._can_id, self._nframes)
}
}
/// BcmMsgHeadFrameLess
///
/// Head of messages to and from the broadcast manager see _pad fields for differences
/// to BcmMsgHead
#[repr(C)]
pub struct BcmMsgHeadFrameLess {
_opcode: u32,
_flags: u32,
/// number of frames to send before changing interval
_count: u32,
/// interval for the first count frames
_ival1: timeval,
/// interval for the following frames
_ival2: timeval,
_can_id: u32,
/// number of can frames appended to the message head
_nframes: u32,
// Workaround Rust ZST has a size of 0 for frames, in
// C the BcmMsgHead struct contains an Array that although it has
// a length of zero still takes n (4) bytes.
#[cfg(all(target_pointer_width = "32"))]
_pad: usize,
}
#[repr(C)]
pub struct TxMsg {
_msg_head: BcmMsgHeadFrameLess,
_frames: [CanFrame; MAX_NFRAMES as usize],
}
impl BcmMsgHead {
pub fn can_id(&self) -> u32 {
self._can_id
}
#[inline]
pub fn frames(&self) -> &[CanFrame] {
return unsafe { slice::from_raw_parts(self._frames.as_ptr(), self._nframes as usize) };
}
}
/// A socket for a CAN device, specifically for broadcast manager operations.
#[derive(Debug)]
pub struct CanBCMSocket {
pub fd: c_int,
}
pub struct BcmFrameStream {
io: PollEvented<CanBCMSocket>,
frame_buffer: VecDeque<CanFrame>,
}
impl BcmFrameStream {
pub fn new(socket: CanBCMSocket) -> BcmFrameStream {
BcmFrameStream {
io: PollEvented::new(socket),
frame_buffer: VecDeque::new(),
}
}
}
impl Stream for BcmFrameStream {
type Item = io::Result<CanFrame>;
fn poll_next(mut self: Pin<&mut Self>, lw: &LocalWaker) -> Poll<Option<Self::Item>> {
// Buffer still contains frames
// after testing this it looks like the recv_msg will never contain
// more than one msg, therefore the buffer is basically never filled
if let Some(frame) = self.frame_buffer.pop_front() {
return Poll::Ready(Some(Ok(frame)));
}
ready!(self.io.poll_read_ready(lw)?);
match self.io.get_ref().read_msg() {
Ok(n) => {
let mut frames = n.frames().to_vec();
if let Some(frame) = frames.pop() {
if!frames.is_empty() {
for frame in n.frames() {
self.frame_buffer.push_back(*frame)
}
}
Poll::Ready(Some(Ok(frame)))
} else {
// This happens e.g. when a timed out msg is received
self.io.clear_read_ready(lw)?;
Poll::Pending
}
}
Err(e) => {
if e.kind() == io::ErrorKind::WouldBlock {
self.io.clear_read_ready(lw)?;
return Poll::Pending;
}
return Poll::Ready(Some(Err(e)));
}
}
}
}
impl mio::Evented for BcmFrameStream {
fn register(
&self,
poll: &mio::Poll,
token: mio::Token,
interest: mio::Ready,
opts: mio::PollOpt,
) -> io::Result<()> {
self.io.get_ref().register(poll, token, interest, opts)
}
fn reregister(
&self,
poll: &mio::Poll,
token: mio::Token,
interest: mio::Ready,
opts: mio::PollOpt,
) -> io::Result<()> {
self.io.get_ref().reregister(poll, token, interest, opts)
}
fn deregister(&self, poll: &mio::Poll) -> io::Result<()> {
self.io.get_ref().deregister(poll)
}
}
impl CanBCMSocket {
/// Open a named CAN device non blocking.
///
/// Usually the more common case, opens a socket can device by name, such
/// as "vcan0" or "socan0".
pub fn open_nb(ifname: &str) -> Result<CanBCMSocket, CanSocketOpenError> {
let if_index = if_nametoindex(ifname)?;
CanBCMSocket::open_if_nb(if_index)
}
/// Open CAN device by interface number non blocking.
///
/// Opens a CAN device by kernel interface number.
pub fn open_if_nb(if_index: c_uint) -> Result<CanBCMSocket, CanSocketOpenError> {
// open socket
let sock_fd;
unsafe {
sock_fd = socket(PF_CAN, SOCK_DGRAM, CAN_BCM);
}
if sock_fd == -1 {
return Err(CanSocketOpenError::from(io::Error::last_os_error()));
}
let fcntl_resp = unsafe { fcntl(sock_fd, F_SETFL, O_NONBLOCK) };
if fcntl_resp == -1 {
return Err(CanSocketOpenError::from(io::Error::last_os_error()));
}
let addr = CanAddr {
_af_can: AF_CAN as c_short,
if_index: if_index as c_int,
rx_id: 0, //?
tx_id: 0, //?
};
let sockaddr_ptr = &addr as *const CanAddr;
let connect_res;
unsafe {
connect_res = connect(
sock_fd,
sockaddr_ptr as *const sockaddr,
size_of::<CanAddr>() as u32,
);
}
if connect_res!= 0 {
return Err(CanSocketOpenError::from(io::Error::last_os_error()));
}
Ok(CanBCMSocket { fd: sock_fd })
}
fn close(&mut self) -> io::Result<()> {
unsafe {
let rv = close(self.fd);
if rv!= -1 {
return Err(io::Error::last_os_error());
}
}
Ok(())
}
/// Create a content filter subscription, filtering can frames by can_id.
pub fn filter_id(
&self,
can_id: CanMessageId,
ival1: time::Duration,
ival2: time::Duration,
) -> io::Result<()> {
let _ival1 = c_timeval_new(ival1);
let _ival2 = c_timeval_new(ival2);
let frames = [CanFrame::new(CanMessageId::SFF(0u16), &[], false, false).unwrap(); MAX_NFRAMES as usize];
let msg = BcmMsgHeadFrameLess {
_opcode: RX_SETUP,
_flags: SETTIMER | RX_FILTER_ID,
_count: 0,
#[cfg(all(target_pointer_width = "32"))]
_pad: 0,
_ival1: _ival1,
_ival2: _ival2,
_can_id: can_id.with_eff_bit(),
_nframes: 0,
};
let tx_msg = &TxMsg {
_msg_head: msg,
_frames: frames,
};
let tx_msg_ptr = tx_msg as *const TxMsg;
let write_rv = unsafe {
write(self.fd, tx_msg_ptr as *const c_void, size_of::<TxMsg>())
};
if write_rv < 0 {
return Err(Error::new(ErrorKind::WriteZero, io::Error::last_os_error()));
}
Ok(())
}
///
/// Combination of `CanBCMSocket::filter_id` and `CanBCMSocket::incoming_frames`.
/// ```
/// extern crate futures;
/// extern crate tokio;
/// extern crate socketcan;
///
/// use futures::stream::Stream;
/// use tokio::prelude::*;
/// use std::time;
/// use socketcan::FrameFlags;
/// use socketcan_tokio::bcm::*;
///
/// let ival = time::Duration::from_millis(1);
/// let socket = CanBCMSocket::open_nb("vcan0").unwrap();
/// let f = socket.filter_id_incoming_frames(0x123, ival, ival, FrameFlags::EFF_FLAG).unwrap()
/// .map_err(|_| ())
/// .for_each(|frame| {
/// println!("Frame {:?}", frame);
/// Ok(())
/// });
/// tokio::run(f);
/// ```
///
pub fn filter_id_incoming_frames(
self,
can_id: CanMessageId,
ival1: time::Duration,
ival2: time::Duration,
) -> io::Result<BcmFrameStream> {
self.filter_id(can_id, ival1, ival2)?;
Ok(self.incoming_frames())
}
///
/// Stream of incoming BcmMsgHeads that apply to the filter criteria.
/// ```
/// extern crate futures;
/// extern crate tokio;
/// extern crate socketcan;
///
/// use futures::stream::Stream;
/// use tokio::prelude::*;
/// use std::time;
/// use socketcan::FrameFlags;
/// use socketcan_tokio::bcm::*;
///
/// let socket = CanBCMSocket::open_nb("vcan0").unwrap();
/// let ival = time::Duration::from_millis(1);
/// socket.filter_id(0x123, ival, ival, FrameFlags::EFF_FLAG).unwrap();
/// let f = socket.incoming_msg()
/// .map_err(|err| {
/// eprintln!("IO error {:?}", err)
/// })
/// .for_each(|bcm_msg_head| {
/// println!("BcmMsgHead {:?}", bcm_msg_head);
/// Ok(())
/// });
/// tokio::run(f);
/// ```
///
pub fn incoming_msg(self) -> BcmStream {
BcmStream::from(self)
}
///
/// Stream of incoming frames that apply to the filter criteria.
/// ```
/// extern crate futures;
/// extern crate tokio;
/// extern crate socketcan;
///
/// use futures::stream::Stream;
/// use tokio::prelude::*;
/// use std::time;
/// use socketcan::FrameFlags;
/// use socketcan_tokio::bcm::*;
///
/// let socket = CanBCMSocket::open_nb("vcan0").unwrap();
/// let ival = time::Duration::from_millis(1);
/// socket.filter_id(0x123, ival, ival, FrameFlags::EFF_FLAG).unwrap();
/// let f = socket.incoming_frames()
/// .map_err(|err| {
/// eprintln!("IO error {:?}", err)
/// })
/// .for_each(|frame| {
/// println!("Frame {:?}", frame);
/// Ok(())
/// });
/// tokio::run(f);
/// ```
///
pub fn incoming_frames(self) -> BcmFrameStream {
// let stream = BcmStream::from(self);
// stream
// .map(move |bcm_msg_head| {
// let v: Vec<CanFrame> = bcm_msg_head.frames().to_owned();
// futures::stream::iter_ok::<_, io::Error>(v)
// })
// .flatten()
BcmFrameStream::new(self)
}
/// Remove a content filter subscription.
pub fn
|
(&self, can_id: CanMessageId) -> io::Result<()> {
let frames = [CanFrame::new(CanMessageId::EFF(0x0), &[], false, false).unwrap(); MAX_NFRAMES as usize];
let msg = &BcmMsgHead {
_opcode: RX_DELETE,
_flags: 0,
_count: 0,
_ival1: c_timeval_new(time::Duration::new(0, 0)),
_ival2: c_timeval_new(time::Duration::new(0, 0)),
_can_id: can_id.with_eff_bit(),
_nframes: 0,
#[cfg(all(target_pointer_width = "32"))]
_pad: 0,
_frames: frames,
};
let msg_ptr = msg as *const BcmMsgHead;
let write_rv = unsafe {
write(self.fd, msg_ptr as *const c_void, size_of::<BcmMsgHead>())
};
let expected_size = size_of::<BcmMsgHead>() - size_of::<[CanFrame; MAX_NFRAMES as usize]>();
if write_rv as usize!= expected_size {
let msg = format!("Wrote {} but expected {}", write_rv, expected_size);
return Err(Error::new(ErrorKind::WriteZero, msg));
}
Ok(())
}
/// Read a single can frame.
pub fn read_msg(&self) -> io::Result<BcmMsgHead> {
let ival1 = c_timeval_new(time::Duration::from_millis(0));
let ival2 = c_timeval_new(time::Duration::from_millis(0));
let frames = [CanFrame::new(CanMessageId::EFF(0x0), &[], false, false).unwrap(); MAX_NFRAMES as usize];
let mut msg = BcmMsgHead {
_opcode: 0,
_flags: 0,
_count: 0,
_ival1: ival1,
_ival2: ival2,
_can_id: 0,
_nframes: 0,
#[cfg(all(target_pointer_width = "32"))]
_pad: 0,
_frames: frames,
};
let msg_ptr = &mut msg as *mut BcmMsgHead;
let count = unsafe {
read(
self.fd.clone(),
msg_ptr as *mut c_void,
size_of::<BcmMsgHead>(),
)
};
let last_error = io::Error::last_os_error();
if count < 0 {
Err(last_error)
} else {
Ok(msg)
}
}
}
impl mio::Evented for CanBCMSocket {
fn register(
&self,
poll: &mio::Poll,
token: mio::Token,
interest: mio::Ready,
opts: mio::PollOpt,
) -> io::Result<()> {
mio::unix::EventedFd(&self.fd).register(poll, token, interest, opts)
}
fn reregister(
&self,
poll: &mio::Poll,
token: mio::Token,
interest: mio::Ready,
opts: mio::PollOpt,
) -> io::Result<()> {
mio::unix::EventedFd(&self.fd).reregister(poll, token, interest, opts)
}
fn deregister(&self, poll: &mio::Poll) -> io::Result<()> {
mio::unix::EventedFd(&self.fd).deregister(poll)
}
}
impl Drop for CanBCMSocket {
fn drop(&mut self) {
self.close().ok(); // ignore result
}
}
pub struct BcmStream {
io: PollEvented<CanBCMSocket>,
}
pub trait IntoBcmStream {
type Stream: futures::stream::Stream;
type Error;
fn into_bcm(self) -> Result<Self::Stream, Self::Error>;
}
impl BcmStream {
pub fn from(bcm_socket: CanBCMSocket) -> BcmStream {
let io = PollEvented::new(bcm_socket);
BcmStream { io: io }
}
}
impl Stream for BcmStream {
type Item = io::Result<BcmMsgHead>;
fn poll_next(self: Pin<&mut Self>, lw: &LocalWaker) -> Poll<Option<Self::Item>> {
ready!(self.io.poll_read_ready(lw)?);
match self.io.get_ref().read_msg() {
Ok(n) => Poll::Ready(Some(Ok(n))),
Err(e) => {
if e.kind() == io::ErrorKind::WouldBlock {
self.io.clear_read_ready(lw)?;
return Poll::Pending;
}
return Poll::Ready(Some(Err(e)));
}
}
}
}
|
filter_delete
|
identifier_name
|
f02-thread-id.rs
|
/// Figure 11.2 Printing thread IDs
///
/// $ f02-thread-id | sed's/[0-9]//g; s/x[a-f]*//g'
/// main thread: pid tid ()
/// new thread: pid tid ()
extern crate libc;
extern crate apue;
use libc::{c_void, pthread_t};
use libc::{pthread_create, getpid, pthread_self, usleep};
use std::ptr::null_mut;
use apue::LibcResult;
fn printids(s: &str)
|
extern "C" fn thr_fn(_: *mut c_void) -> *mut c_void {
printids("new thread:");
0 as _
}
fn main() {
unsafe {
let mut ntid: pthread_t = std::mem::zeroed();
pthread_create(&mut ntid, null_mut(), thr_fn as _, null_mut()).check_zero()
.expect("can't create thread");
printids("main thread:");
usleep(100);
}
}
|
{
unsafe {
let pid = getpid();
let tid = pthread_self();
println!("{} pid {} tid {} (0x{:x})", s, pid, tid, tid);
}
}
|
identifier_body
|
f02-thread-id.rs
|
/// Figure 11.2 Printing thread IDs
///
/// $ f02-thread-id | sed's/[0-9]//g; s/x[a-f]*//g'
/// main thread: pid tid ()
/// new thread: pid tid ()
extern crate libc;
extern crate apue;
use libc::{c_void, pthread_t};
use libc::{pthread_create, getpid, pthread_self, usleep};
use std::ptr::null_mut;
use apue::LibcResult;
fn printids(s: &str) {
unsafe {
let pid = getpid();
let tid = pthread_self();
println!("{} pid {} tid {} (0x{:x})", s, pid, tid, tid);
}
}
extern "C" fn
|
(_: *mut c_void) -> *mut c_void {
printids("new thread:");
0 as _
}
fn main() {
unsafe {
let mut ntid: pthread_t = std::mem::zeroed();
pthread_create(&mut ntid, null_mut(), thr_fn as _, null_mut()).check_zero()
.expect("can't create thread");
printids("main thread:");
usleep(100);
}
}
|
thr_fn
|
identifier_name
|
f02-thread-id.rs
|
/// Figure 11.2 Printing thread IDs
///
/// $ f02-thread-id | sed's/[0-9]//g; s/x[a-f]*//g'
/// main thread: pid tid ()
/// new thread: pid tid ()
|
use std::ptr::null_mut;
use apue::LibcResult;
fn printids(s: &str) {
unsafe {
let pid = getpid();
let tid = pthread_self();
println!("{} pid {} tid {} (0x{:x})", s, pid, tid, tid);
}
}
extern "C" fn thr_fn(_: *mut c_void) -> *mut c_void {
printids("new thread:");
0 as _
}
fn main() {
unsafe {
let mut ntid: pthread_t = std::mem::zeroed();
pthread_create(&mut ntid, null_mut(), thr_fn as _, null_mut()).check_zero()
.expect("can't create thread");
printids("main thread:");
usleep(100);
}
}
|
extern crate libc;
extern crate apue;
use libc::{c_void, pthread_t};
use libc::{pthread_create, getpid, pthread_self, usleep};
|
random_line_split
|
type-sizes.rs
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::mem::size_of;
struct t {a: u8, b: i8}
struct u {a: u8, b: i8, c: u8}
struct v {a: u8, b: i8, c: v2, d: u32}
struct v2 {u: char, v: u8}
struct w {a: isize, b: ()}
struct x {a: isize, b: (), c: ()}
struct y {x: isize}
enum e1 {
a(u8, u32), b(u32), c
}
enum e2 {
a(u32), b
}
enum e3 {
a([u16; 0], u8), b
}
pub fn main() {
assert_eq!(size_of::<u8>(), 1 as usize);
assert_eq!(size_of::<u32>(), 4 as usize);
assert_eq!(size_of::<char>(), 4 as usize);
assert_eq!(size_of::<i8>(), 1 as usize);
assert_eq!(size_of::<i32>(), 4 as usize);
assert_eq!(size_of::<t>(), 2 as usize);
assert_eq!(size_of::<u>(), 3 as usize);
// Alignment causes padding before the char and the u32.
assert_eq!(size_of::<v>(),
16 as usize);
assert_eq!(size_of::<isize>(), size_of::<usize>());
assert_eq!(size_of::<w>(), size_of::<isize>());
assert_eq!(size_of::<x>(), size_of::<isize>());
assert_eq!(size_of::<isize>(), size_of::<y>());
// Make sure enum types are the appropriate size, mostly
// around ensuring alignment is handled properly
assert_eq!(size_of::<e1>(), 8 as usize);
assert_eq!(size_of::<e2>(), 8 as usize);
assert_eq!(size_of::<e3>(), 4 as usize);
}
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
|
random_line_split
|
|
type-sizes.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::mem::size_of;
struct t {a: u8, b: i8}
struct u {a: u8, b: i8, c: u8}
struct v {a: u8, b: i8, c: v2, d: u32}
struct v2 {u: char, v: u8}
struct
|
{a: isize, b: ()}
struct x {a: isize, b: (), c: ()}
struct y {x: isize}
enum e1 {
a(u8, u32), b(u32), c
}
enum e2 {
a(u32), b
}
enum e3 {
a([u16; 0], u8), b
}
pub fn main() {
assert_eq!(size_of::<u8>(), 1 as usize);
assert_eq!(size_of::<u32>(), 4 as usize);
assert_eq!(size_of::<char>(), 4 as usize);
assert_eq!(size_of::<i8>(), 1 as usize);
assert_eq!(size_of::<i32>(), 4 as usize);
assert_eq!(size_of::<t>(), 2 as usize);
assert_eq!(size_of::<u>(), 3 as usize);
// Alignment causes padding before the char and the u32.
assert_eq!(size_of::<v>(),
16 as usize);
assert_eq!(size_of::<isize>(), size_of::<usize>());
assert_eq!(size_of::<w>(), size_of::<isize>());
assert_eq!(size_of::<x>(), size_of::<isize>());
assert_eq!(size_of::<isize>(), size_of::<y>());
// Make sure enum types are the appropriate size, mostly
// around ensuring alignment is handled properly
assert_eq!(size_of::<e1>(), 8 as usize);
assert_eq!(size_of::<e2>(), 8 as usize);
assert_eq!(size_of::<e3>(), 4 as usize);
}
|
w
|
identifier_name
|
test_fcntl.rs
|
use nix::fcntl::{openat, open, OFlag, O_RDONLY, readlink, readlinkat};
use nix::sys::stat::Mode;
use nix::unistd::{close, read};
use tempdir::TempDir;
use tempfile::NamedTempFile;
use std::io::prelude::*;
use std::os::unix::fs;
#[test]
fn test_openat()
|
#[test]
fn test_readlink() {
let tempdir = TempDir::new("nix-test_readdir")
.unwrap_or_else(|e| panic!("tempdir failed: {}", e));
let src = tempdir.path().join("a");
let dst = tempdir.path().join("b");
println!("a: {:?}, b: {:?}", &src, &dst);
fs::symlink(&src.as_path(), &dst.as_path()).unwrap();
let dirfd = open(tempdir.path(),
OFlag::empty(),
Mode::empty()).unwrap();
let mut buf = vec![0; src.to_str().unwrap().len() + 1];
assert_eq!(readlink(&dst, &mut buf).unwrap().to_str().unwrap(),
src.to_str().unwrap());
assert_eq!(readlinkat(dirfd, "b", &mut buf).unwrap().to_str().unwrap(),
src.to_str().unwrap());
}
#[cfg(any(target_os = "linux", target_os = "android"))]
mod linux_android {
use std::io::prelude::*;
use std::os::unix::prelude::*;
use libc::loff_t;
use nix::fcntl::{SpliceFFlags, splice, tee, vmsplice};
use nix::sys::uio::IoVec;
use nix::unistd::{close, pipe, read, write};
use tempfile::tempfile;
#[test]
fn test_splice() {
const CONTENTS: &'static [u8] = b"abcdef123456";
let mut tmp = tempfile().unwrap();
tmp.write(CONTENTS).unwrap();
let (rd, wr) = pipe().unwrap();
let mut offset: loff_t = 5;
let res = splice(tmp.as_raw_fd(), Some(&mut offset),
wr, None, 2, SpliceFFlags::empty()).unwrap();
assert_eq!(2, res);
let mut buf = [0u8; 1024];
assert_eq!(2, read(rd, &mut buf).unwrap());
assert_eq!(b"f1", &buf[0..2]);
assert_eq!(7, offset);
close(rd).unwrap();
close(wr).unwrap();
}
#[test]
fn test_tee() {
let (rd1, wr1) = pipe().unwrap();
let (rd2, wr2) = pipe().unwrap();
write(wr1, b"abc").unwrap();
let res = tee(rd1, wr2, 2, SpliceFFlags::empty()).unwrap();
assert_eq!(2, res);
let mut buf = [0u8; 1024];
// Check the tee'd bytes are at rd2.
assert_eq!(2, read(rd2, &mut buf).unwrap());
assert_eq!(b"ab", &buf[0..2]);
// Check all the bytes are still at rd1.
assert_eq!(3, read(rd1, &mut buf).unwrap());
assert_eq!(b"abc", &buf[0..3]);
close(rd1).unwrap();
close(wr1).unwrap();
close(rd2).unwrap();
close(wr2).unwrap();
}
#[test]
fn test_vmsplice() {
let (rd, wr) = pipe().unwrap();
let buf1 = b"abcdef";
let buf2 = b"defghi";
let mut iovecs = Vec::with_capacity(2);
iovecs.push(IoVec::from_slice(&buf1[0..3]));
iovecs.push(IoVec::from_slice(&buf2[0..3]));
let res = vmsplice(wr, &iovecs[..], SpliceFFlags::empty()).unwrap();
assert_eq!(6, res);
// Check the bytes can be read at rd.
let mut buf = [0u8; 32];
assert_eq!(6, read(rd, &mut buf).unwrap());
assert_eq!(b"abcdef", &buf[0..6]);
close(rd).unwrap();
close(wr).unwrap();
}
}
|
{
const CONTENTS: &'static [u8] = b"abcd";
let mut tmp = NamedTempFile::new().unwrap();
tmp.write(CONTENTS).unwrap();
let dirfd = open(tmp.path().parent().unwrap(),
OFlag::empty(),
Mode::empty()).unwrap();
let fd = openat(dirfd,
tmp.path().file_name().unwrap(),
O_RDONLY,
Mode::empty()).unwrap();
let mut buf = [0u8; 1024];
assert_eq!(4, read(fd, &mut buf).unwrap());
assert_eq!(CONTENTS, &buf[0..4]);
close(fd).unwrap();
close(dirfd).unwrap();
}
|
identifier_body
|
test_fcntl.rs
|
use nix::fcntl::{openat, open, OFlag, O_RDONLY, readlink, readlinkat};
use nix::sys::stat::Mode;
use nix::unistd::{close, read};
use tempdir::TempDir;
use tempfile::NamedTempFile;
use std::io::prelude::*;
use std::os::unix::fs;
#[test]
fn test_openat() {
const CONTENTS: &'static [u8] = b"abcd";
let mut tmp = NamedTempFile::new().unwrap();
tmp.write(CONTENTS).unwrap();
let dirfd = open(tmp.path().parent().unwrap(),
OFlag::empty(),
Mode::empty()).unwrap();
let fd = openat(dirfd,
tmp.path().file_name().unwrap(),
O_RDONLY,
Mode::empty()).unwrap();
let mut buf = [0u8; 1024];
assert_eq!(4, read(fd, &mut buf).unwrap());
assert_eq!(CONTENTS, &buf[0..4]);
close(fd).unwrap();
close(dirfd).unwrap();
}
#[test]
fn test_readlink() {
let tempdir = TempDir::new("nix-test_readdir")
.unwrap_or_else(|e| panic!("tempdir failed: {}", e));
let src = tempdir.path().join("a");
let dst = tempdir.path().join("b");
println!("a: {:?}, b: {:?}", &src, &dst);
fs::symlink(&src.as_path(), &dst.as_path()).unwrap();
let dirfd = open(tempdir.path(),
OFlag::empty(),
Mode::empty()).unwrap();
let mut buf = vec![0; src.to_str().unwrap().len() + 1];
assert_eq!(readlink(&dst, &mut buf).unwrap().to_str().unwrap(),
src.to_str().unwrap());
assert_eq!(readlinkat(dirfd, "b", &mut buf).unwrap().to_str().unwrap(),
src.to_str().unwrap());
}
#[cfg(any(target_os = "linux", target_os = "android"))]
mod linux_android {
use std::io::prelude::*;
use std::os::unix::prelude::*;
use libc::loff_t;
use nix::fcntl::{SpliceFFlags, splice, tee, vmsplice};
use nix::sys::uio::IoVec;
use nix::unistd::{close, pipe, read, write};
use tempfile::tempfile;
#[test]
fn test_splice() {
const CONTENTS: &'static [u8] = b"abcdef123456";
let mut tmp = tempfile().unwrap();
tmp.write(CONTENTS).unwrap();
let (rd, wr) = pipe().unwrap();
let mut offset: loff_t = 5;
let res = splice(tmp.as_raw_fd(), Some(&mut offset),
wr, None, 2, SpliceFFlags::empty()).unwrap();
assert_eq!(2, res);
let mut buf = [0u8; 1024];
assert_eq!(2, read(rd, &mut buf).unwrap());
assert_eq!(b"f1", &buf[0..2]);
assert_eq!(7, offset);
close(rd).unwrap();
close(wr).unwrap();
}
#[test]
fn test_tee() {
let (rd1, wr1) = pipe().unwrap();
let (rd2, wr2) = pipe().unwrap();
write(wr1, b"abc").unwrap();
let res = tee(rd1, wr2, 2, SpliceFFlags::empty()).unwrap();
assert_eq!(2, res);
let mut buf = [0u8; 1024];
// Check the tee'd bytes are at rd2.
assert_eq!(2, read(rd2, &mut buf).unwrap());
assert_eq!(b"ab", &buf[0..2]);
// Check all the bytes are still at rd1.
assert_eq!(3, read(rd1, &mut buf).unwrap());
assert_eq!(b"abc", &buf[0..3]);
close(rd1).unwrap();
close(wr1).unwrap();
close(rd2).unwrap();
close(wr2).unwrap();
}
#[test]
fn
|
() {
let (rd, wr) = pipe().unwrap();
let buf1 = b"abcdef";
let buf2 = b"defghi";
let mut iovecs = Vec::with_capacity(2);
iovecs.push(IoVec::from_slice(&buf1[0..3]));
iovecs.push(IoVec::from_slice(&buf2[0..3]));
let res = vmsplice(wr, &iovecs[..], SpliceFFlags::empty()).unwrap();
assert_eq!(6, res);
// Check the bytes can be read at rd.
let mut buf = [0u8; 32];
assert_eq!(6, read(rd, &mut buf).unwrap());
assert_eq!(b"abcdef", &buf[0..6]);
close(rd).unwrap();
close(wr).unwrap();
}
}
|
test_vmsplice
|
identifier_name
|
test_fcntl.rs
|
use nix::fcntl::{openat, open, OFlag, O_RDONLY, readlink, readlinkat};
use nix::sys::stat::Mode;
use nix::unistd::{close, read};
use tempdir::TempDir;
use tempfile::NamedTempFile;
use std::io::prelude::*;
use std::os::unix::fs;
#[test]
fn test_openat() {
const CONTENTS: &'static [u8] = b"abcd";
let mut tmp = NamedTempFile::new().unwrap();
tmp.write(CONTENTS).unwrap();
let dirfd = open(tmp.path().parent().unwrap(),
OFlag::empty(),
Mode::empty()).unwrap();
let fd = openat(dirfd,
tmp.path().file_name().unwrap(),
O_RDONLY,
Mode::empty()).unwrap();
let mut buf = [0u8; 1024];
assert_eq!(4, read(fd, &mut buf).unwrap());
assert_eq!(CONTENTS, &buf[0..4]);
close(fd).unwrap();
close(dirfd).unwrap();
}
#[test]
fn test_readlink() {
let tempdir = TempDir::new("nix-test_readdir")
.unwrap_or_else(|e| panic!("tempdir failed: {}", e));
let src = tempdir.path().join("a");
let dst = tempdir.path().join("b");
println!("a: {:?}, b: {:?}", &src, &dst);
fs::symlink(&src.as_path(), &dst.as_path()).unwrap();
let dirfd = open(tempdir.path(),
OFlag::empty(),
Mode::empty()).unwrap();
let mut buf = vec![0; src.to_str().unwrap().len() + 1];
assert_eq!(readlink(&dst, &mut buf).unwrap().to_str().unwrap(),
src.to_str().unwrap());
assert_eq!(readlinkat(dirfd, "b", &mut buf).unwrap().to_str().unwrap(),
src.to_str().unwrap());
}
#[cfg(any(target_os = "linux", target_os = "android"))]
mod linux_android {
use std::io::prelude::*;
use std::os::unix::prelude::*;
use libc::loff_t;
use nix::fcntl::{SpliceFFlags, splice, tee, vmsplice};
use nix::sys::uio::IoVec;
use nix::unistd::{close, pipe, read, write};
use tempfile::tempfile;
#[test]
fn test_splice() {
const CONTENTS: &'static [u8] = b"abcdef123456";
let mut tmp = tempfile().unwrap();
tmp.write(CONTENTS).unwrap();
let (rd, wr) = pipe().unwrap();
let mut offset: loff_t = 5;
let res = splice(tmp.as_raw_fd(), Some(&mut offset),
wr, None, 2, SpliceFFlags::empty()).unwrap();
assert_eq!(2, res);
let mut buf = [0u8; 1024];
assert_eq!(2, read(rd, &mut buf).unwrap());
assert_eq!(b"f1", &buf[0..2]);
assert_eq!(7, offset);
close(rd).unwrap();
close(wr).unwrap();
}
#[test]
fn test_tee() {
let (rd1, wr1) = pipe().unwrap();
let (rd2, wr2) = pipe().unwrap();
write(wr1, b"abc").unwrap();
let res = tee(rd1, wr2, 2, SpliceFFlags::empty()).unwrap();
assert_eq!(2, res);
let mut buf = [0u8; 1024];
// Check the tee'd bytes are at rd2.
assert_eq!(2, read(rd2, &mut buf).unwrap());
assert_eq!(b"ab", &buf[0..2]);
// Check all the bytes are still at rd1.
assert_eq!(3, read(rd1, &mut buf).unwrap());
assert_eq!(b"abc", &buf[0..3]);
close(rd1).unwrap();
close(wr1).unwrap();
close(rd2).unwrap();
close(wr2).unwrap();
}
#[test]
fn test_vmsplice() {
|
let mut iovecs = Vec::with_capacity(2);
iovecs.push(IoVec::from_slice(&buf1[0..3]));
iovecs.push(IoVec::from_slice(&buf2[0..3]));
let res = vmsplice(wr, &iovecs[..], SpliceFFlags::empty()).unwrap();
assert_eq!(6, res);
// Check the bytes can be read at rd.
let mut buf = [0u8; 32];
assert_eq!(6, read(rd, &mut buf).unwrap());
assert_eq!(b"abcdef", &buf[0..6]);
close(rd).unwrap();
close(wr).unwrap();
}
}
|
let (rd, wr) = pipe().unwrap();
let buf1 = b"abcdef";
let buf2 = b"defghi";
|
random_line_split
|
cleanup-arm-conditional.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that cleanup scope for temporaries created in a match
// arm is confined to the match arm itself.
// pretty-expanded FIXME #23616
#![allow(unknown_features)]
#![feature(box_syntax, os)]
use std::os;
struct Test { x: isize }
impl Test {
fn get_x(&self) -> Option<Box<isize>>
|
}
fn do_something(t: &Test) -> isize {
// The cleanup scope for the result of `t.get_x()` should be the
// arm itself and not the match, otherwise we'll (potentially) get
// a crash trying to free an uninitialized stack slot.
match t {
&Test { x: 2 } if t.get_x().is_some() => {
t.x * 2
}
_ => { 22 }
}
}
pub fn main() {
let t = Test { x: 1 };
do_something(&t);
}
|
{
Some(box self.x)
}
|
identifier_body
|
cleanup-arm-conditional.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that cleanup scope for temporaries created in a match
// arm is confined to the match arm itself.
// pretty-expanded FIXME #23616
#![allow(unknown_features)]
#![feature(box_syntax, os)]
use std::os;
struct Test { x: isize }
impl Test {
fn get_x(&self) -> Option<Box<isize>> {
Some(box self.x)
}
}
fn do_something(t: &Test) -> isize {
// The cleanup scope for the result of `t.get_x()` should be the
// arm itself and not the match, otherwise we'll (potentially) get
// a crash trying to free an uninitialized stack slot.
match t {
&Test { x: 2 } if t.get_x().is_some() => {
t.x * 2
}
_ => { 22 }
}
}
pub fn main() {
let t = Test { x: 1 };
do_something(&t);
}
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
random_line_split
|
cleanup-arm-conditional.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test that cleanup scope for temporaries created in a match
// arm is confined to the match arm itself.
// pretty-expanded FIXME #23616
#![allow(unknown_features)]
#![feature(box_syntax, os)]
use std::os;
struct Test { x: isize }
impl Test {
fn get_x(&self) -> Option<Box<isize>> {
Some(box self.x)
}
}
fn do_something(t: &Test) -> isize {
// The cleanup scope for the result of `t.get_x()` should be the
// arm itself and not the match, otherwise we'll (potentially) get
// a crash trying to free an uninitialized stack slot.
match t {
&Test { x: 2 } if t.get_x().is_some() => {
t.x * 2
}
_ => { 22 }
}
}
pub fn
|
() {
let t = Test { x: 1 };
do_something(&t);
}
|
main
|
identifier_name
|
mod.rs
|
//! Utilities for handling shell surfaces with the `wl_shell` protocol
//!
//! This module provides automatic handling of shell surfaces objects, by being registered
//! as a global handler for `wl_shell`. This protocol is deprecated in favor of `xdg_shell`,
//! thus this module is provided as a compatibility layer with older clients. As a consequence,
//! you can as a compositor-writer decide to only support its functionality in a best-effort
//! manner: as this global is part of the core protocol, you are still required to provide
//! some support for it.
//!
//! ## Why use this implementation
//!
//! This implementation can track for you the various shell surfaces defined by the
//! clients by handling the `wl_shell` protocol.
//!
//! It allows you to easily access a list of all shell surfaces defined by your clients
//! access their associated metadata and underlying `wl_surface`s.
//!
//! This handler only handles the protocol exchanges with the client to present you the
//! information in a coherent and relatively easy to use manner. All the actual drawing
//! and positioning logic of windows is out of its scope.
//!
//! ## How to use it
//!
//! ### Initialization
//!
//! To initialize this handler, simple use the [`wl_shell_init`] function provided in this module.
//! You need to provide a closure that will be invoked whenever some action is required from you,
//! are represented by the [`ShellRequest`] enum.
//!
//! ```no_run
//! # extern crate wayland_server;
//! #
//! use smithay::wayland::shell::legacy::{wl_shell_init, ShellRequest};
//!
//! # let mut display = wayland_server::Display::new();
//! let (shell_state, _) = wl_shell_init(
//! &mut display,
//! // your implementation
//! |event: ShellRequest, dispatch_data| { /* handle the shell requests here */ },
//! None // put a logger if you want
//! );
//!
//! // You're now ready to go!
//! ```
use std::{
cell::RefCell,
rc::Rc,
sync::{Arc, Mutex},
};
use crate::{
utils::{Logical, Point, Size},
wayland::{compositor, Serial},
};
use wayland_server::{
protocol::{wl_output, wl_seat, wl_shell, wl_shell_surface, wl_surface},
DispatchData, Display, Filter, Global,
};
use super::PingError;
/// The role of a wl_shell_surface.
pub const WL_SHELL_SURFACE_ROLE: &str = "wl_shell_surface";
mod wl_handlers;
/// Metadata associated with the `wl_surface` role
#[derive(Debug)]
pub struct ShellSurfaceAttributes {
/// Title of the surface
pub title: String,
/// Class of the surface
pub class: String,
pending_ping: Option<Serial>,
}
/// A handle to a shell surface
#[derive(Debug, Clone)]
pub struct ShellSurface {
wl_surface: wl_surface::WlSurface,
shell_surface: wl_shell_surface::WlShellSurface,
}
impl std::cmp::PartialEq for ShellSurface {
fn eq(&self, other: &Self) -> bool {
self.shell_surface == other.shell_surface
}
}
impl ShellSurface {
/// Is the shell surface referred by this handle still alive?
pub fn alive(&self) -> bool {
self.shell_surface.as_ref().is_alive() && self.wl_surface.as_ref().is_alive()
}
/// Access the underlying `wl_surface` of this toplevel surface
///
/// Returns `None` if the toplevel surface actually no longer exists.
pub fn get_surface(&self) -> Option<&wl_surface::WlSurface> {
if self.alive() {
Some(&self.wl_surface)
} else {
None
}
}
/// Send a ping request to this shell surface
///
/// You'll receive the reply as a [`ShellRequest::Pong`] request
///
/// A typical use is to start a timer at the same time you send this ping
/// request, and cancel it when you receive the pong. If the timer runs
/// down to 0 before a pong is received, mark the client as unresponsive.
///
/// Fails if this shell client already has a pending ping or is already dead.
pub fn send_ping(&self, serial: Serial) -> Result<(), PingError> {
if!self.alive() {
return Err(PingError::DeadSurface);
}
compositor::with_states(&self.wl_surface, |states| {
let mut data = states
.data_map
.get::<Mutex<ShellSurfaceAttributes>>()
.unwrap()
.lock()
.unwrap();
if let Some(pending_ping) = data.pending_ping {
return Err(PingError::PingAlreadyPending(pending_ping));
}
data.pending_ping = Some(serial);
Ok(())
})
.unwrap()?;
self.shell_surface.ping(serial.into());
Ok(())
}
/// Send a configure event to this toplevel surface to suggest it a new configuration
pub fn send_configure(&self, size: Size<i32, Logical>, edges: wl_shell_surface::Resize) {
self.shell_surface.configure(edges, size.w, size.h)
}
/// Signal a popup surface that it has lost focus
pub fn send_popup_done(&self) {
self.shell_surface.popup_done()
}
}
/// Possible kinds of shell surface of the `wl_shell` protocol
#[derive(Debug)]
pub enum ShellSurfaceKind {
/// Toplevel, a regular window displayed somewhere in the compositor space
Toplevel,
/// Transient, this surface has a parent surface
///
/// These are sub-windows of an application (for example a configuration window),
/// and as such should only be visible in their parent window is, and on top of it.
Transient {
/// The surface considered as parent
parent: wl_surface::WlSurface,
/// Location relative to the parent
location: Point<i32, Logical>,
/// Weather this window should be marked as inactive
inactive: bool,
},
/// Fullscreen surface, covering an entire output
Fullscreen {
/// Method used for fullscreen
method: wl_shell_surface::FullscreenMethod,
/// Framerate (relevant only for driver fullscreen)
framerate: u32,
/// Requested output if any
output: Option<wl_output::WlOutput>,
},
/// A popup surface
///
/// Short-lived surface, typically referred as "tooltips" in many
/// contexts.
Popup {
/// The parent surface of this popup
parent: wl_surface::WlSurface,
/// The serial of the input event triggering the creation of this
/// popup
serial: Serial,
/// Weather this popup should be marked as inactive
inactive: bool,
/// Location of the popup relative to its parent
location: Point<i32, Logical>,
/// Seat associated this the input that triggered the creation of the
/// popup. Used to define when the "popup done" event is sent.
seat: wl_seat::WlSeat,
},
/// A maximized surface
///
/// Like a toplevel surface, but as big as possible on a single output
/// while keeping any relevant desktop-environment interface visible.
Maximized {
/// Requested output for maximization
output: Option<wl_output::WlOutput>,
},
}
/// A request triggered by a `wl_shell_surface`
#[derive(Debug)]
pub enum ShellRequest {
/// A new shell surface was created
///
/// by default it has no kind and this should not be displayed
NewShellSurface {
/// The created surface
|
},
/// A pong event
///
/// The surface responded to its pending ping. If you receive this
/// event, smithay has already checked that the responded serial was valid.
Pong {
/// The surface that sent the pong
surface: ShellSurface,
},
/// Start of an interactive move
///
/// The surface requests that an interactive move is started on it
Move {
/// The surface requesting the move
surface: ShellSurface,
/// Serial of the implicit grab that initiated the move
serial: Serial,
/// Seat associated with the move
seat: wl_seat::WlSeat,
},
/// Start of an interactive resize
///
/// The surface requests that an interactive resize is started on it
Resize {
/// The surface requesting the resize
surface: ShellSurface,
/// Serial of the implicit grab that initiated the resize
serial: Serial,
/// Seat associated with the resize
seat: wl_seat::WlSeat,
/// Direction of the resize
edges: wl_shell_surface::Resize,
},
/// The surface changed its kind
SetKind {
/// The surface
surface: ShellSurface,
/// Its new kind
kind: ShellSurfaceKind,
},
}
/// Shell global state
///
/// This state allows you to retrieve a list of surfaces
/// currently known to the shell global.
#[derive(Debug)]
pub struct ShellState {
known_surfaces: Vec<ShellSurface>,
}
impl ShellState {
/// Cleans the internal surface storage by removing all dead surfaces
pub(crate) fn cleanup_surfaces(&mut self) {
self.known_surfaces.retain(|s| s.alive());
}
/// Access all the shell surfaces known by this handler
pub fn surfaces(&self) -> &[ShellSurface] {
&self.known_surfaces[..]
}
}
/// Create a new `wl_shell` global
pub fn wl_shell_init<L, Impl>(
display: &mut Display,
implementation: Impl,
logger: L,
) -> (Arc<Mutex<ShellState>>, Global<wl_shell::WlShell>)
where
L: Into<Option<::slog::Logger>>,
Impl: FnMut(ShellRequest, DispatchData<'_>) +'static,
{
let _log = crate::slog_or_fallback(logger);
let implementation = Rc::new(RefCell::new(implementation));
let state = Arc::new(Mutex::new(ShellState {
known_surfaces: Vec::new(),
}));
let state2 = state.clone();
let global = display.create_global(
1,
Filter::new(move |(shell, _version), _, _data| {
self::wl_handlers::implement_shell(shell, implementation.clone(), state2.clone());
}),
);
(state, global)
}
|
surface: ShellSurface,
|
random_line_split
|
mod.rs
|
//! Utilities for handling shell surfaces with the `wl_shell` protocol
//!
//! This module provides automatic handling of shell surfaces objects, by being registered
//! as a global handler for `wl_shell`. This protocol is deprecated in favor of `xdg_shell`,
//! thus this module is provided as a compatibility layer with older clients. As a consequence,
//! you can as a compositor-writer decide to only support its functionality in a best-effort
//! manner: as this global is part of the core protocol, you are still required to provide
//! some support for it.
//!
//! ## Why use this implementation
//!
//! This implementation can track for you the various shell surfaces defined by the
//! clients by handling the `wl_shell` protocol.
//!
//! It allows you to easily access a list of all shell surfaces defined by your clients
//! access their associated metadata and underlying `wl_surface`s.
//!
//! This handler only handles the protocol exchanges with the client to present you the
//! information in a coherent and relatively easy to use manner. All the actual drawing
//! and positioning logic of windows is out of its scope.
//!
//! ## How to use it
//!
//! ### Initialization
//!
//! To initialize this handler, simple use the [`wl_shell_init`] function provided in this module.
//! You need to provide a closure that will be invoked whenever some action is required from you,
//! are represented by the [`ShellRequest`] enum.
//!
//! ```no_run
//! # extern crate wayland_server;
//! #
//! use smithay::wayland::shell::legacy::{wl_shell_init, ShellRequest};
//!
//! # let mut display = wayland_server::Display::new();
//! let (shell_state, _) = wl_shell_init(
//! &mut display,
//! // your implementation
//! |event: ShellRequest, dispatch_data| { /* handle the shell requests here */ },
//! None // put a logger if you want
//! );
//!
//! // You're now ready to go!
//! ```
use std::{
cell::RefCell,
rc::Rc,
sync::{Arc, Mutex},
};
use crate::{
utils::{Logical, Point, Size},
wayland::{compositor, Serial},
};
use wayland_server::{
protocol::{wl_output, wl_seat, wl_shell, wl_shell_surface, wl_surface},
DispatchData, Display, Filter, Global,
};
use super::PingError;
/// The role of a wl_shell_surface.
pub const WL_SHELL_SURFACE_ROLE: &str = "wl_shell_surface";
mod wl_handlers;
/// Metadata associated with the `wl_surface` role
#[derive(Debug)]
pub struct ShellSurfaceAttributes {
/// Title of the surface
pub title: String,
/// Class of the surface
pub class: String,
pending_ping: Option<Serial>,
}
/// A handle to a shell surface
#[derive(Debug, Clone)]
pub struct ShellSurface {
wl_surface: wl_surface::WlSurface,
shell_surface: wl_shell_surface::WlShellSurface,
}
impl std::cmp::PartialEq for ShellSurface {
fn eq(&self, other: &Self) -> bool {
self.shell_surface == other.shell_surface
}
}
impl ShellSurface {
/// Is the shell surface referred by this handle still alive?
pub fn alive(&self) -> bool {
self.shell_surface.as_ref().is_alive() && self.wl_surface.as_ref().is_alive()
}
/// Access the underlying `wl_surface` of this toplevel surface
///
/// Returns `None` if the toplevel surface actually no longer exists.
pub fn get_surface(&self) -> Option<&wl_surface::WlSurface> {
if self.alive() {
Some(&self.wl_surface)
} else {
None
}
}
/// Send a ping request to this shell surface
///
/// You'll receive the reply as a [`ShellRequest::Pong`] request
///
/// A typical use is to start a timer at the same time you send this ping
/// request, and cancel it when you receive the pong. If the timer runs
/// down to 0 before a pong is received, mark the client as unresponsive.
///
/// Fails if this shell client already has a pending ping or is already dead.
pub fn send_ping(&self, serial: Serial) -> Result<(), PingError> {
if!self.alive() {
return Err(PingError::DeadSurface);
}
compositor::with_states(&self.wl_surface, |states| {
let mut data = states
.data_map
.get::<Mutex<ShellSurfaceAttributes>>()
.unwrap()
.lock()
.unwrap();
if let Some(pending_ping) = data.pending_ping {
return Err(PingError::PingAlreadyPending(pending_ping));
}
data.pending_ping = Some(serial);
Ok(())
})
.unwrap()?;
self.shell_surface.ping(serial.into());
Ok(())
}
/// Send a configure event to this toplevel surface to suggest it a new configuration
pub fn send_configure(&self, size: Size<i32, Logical>, edges: wl_shell_surface::Resize) {
self.shell_surface.configure(edges, size.w, size.h)
}
/// Signal a popup surface that it has lost focus
pub fn send_popup_done(&self) {
self.shell_surface.popup_done()
}
}
/// Possible kinds of shell surface of the `wl_shell` protocol
#[derive(Debug)]
pub enum ShellSurfaceKind {
/// Toplevel, a regular window displayed somewhere in the compositor space
Toplevel,
/// Transient, this surface has a parent surface
///
/// These are sub-windows of an application (for example a configuration window),
/// and as such should only be visible in their parent window is, and on top of it.
Transient {
/// The surface considered as parent
parent: wl_surface::WlSurface,
/// Location relative to the parent
location: Point<i32, Logical>,
/// Weather this window should be marked as inactive
inactive: bool,
},
/// Fullscreen surface, covering an entire output
Fullscreen {
/// Method used for fullscreen
method: wl_shell_surface::FullscreenMethod,
/// Framerate (relevant only for driver fullscreen)
framerate: u32,
/// Requested output if any
output: Option<wl_output::WlOutput>,
},
/// A popup surface
///
/// Short-lived surface, typically referred as "tooltips" in many
/// contexts.
Popup {
/// The parent surface of this popup
parent: wl_surface::WlSurface,
/// The serial of the input event triggering the creation of this
/// popup
serial: Serial,
/// Weather this popup should be marked as inactive
inactive: bool,
/// Location of the popup relative to its parent
location: Point<i32, Logical>,
/// Seat associated this the input that triggered the creation of the
/// popup. Used to define when the "popup done" event is sent.
seat: wl_seat::WlSeat,
},
/// A maximized surface
///
/// Like a toplevel surface, but as big as possible on a single output
/// while keeping any relevant desktop-environment interface visible.
Maximized {
/// Requested output for maximization
output: Option<wl_output::WlOutput>,
},
}
/// A request triggered by a `wl_shell_surface`
#[derive(Debug)]
pub enum ShellRequest {
/// A new shell surface was created
///
/// by default it has no kind and this should not be displayed
NewShellSurface {
/// The created surface
surface: ShellSurface,
},
/// A pong event
///
/// The surface responded to its pending ping. If you receive this
/// event, smithay has already checked that the responded serial was valid.
Pong {
/// The surface that sent the pong
surface: ShellSurface,
},
/// Start of an interactive move
///
/// The surface requests that an interactive move is started on it
Move {
/// The surface requesting the move
surface: ShellSurface,
/// Serial of the implicit grab that initiated the move
serial: Serial,
/// Seat associated with the move
seat: wl_seat::WlSeat,
},
/// Start of an interactive resize
///
/// The surface requests that an interactive resize is started on it
Resize {
/// The surface requesting the resize
surface: ShellSurface,
/// Serial of the implicit grab that initiated the resize
serial: Serial,
/// Seat associated with the resize
seat: wl_seat::WlSeat,
/// Direction of the resize
edges: wl_shell_surface::Resize,
},
/// The surface changed its kind
SetKind {
/// The surface
surface: ShellSurface,
/// Its new kind
kind: ShellSurfaceKind,
},
}
/// Shell global state
///
/// This state allows you to retrieve a list of surfaces
/// currently known to the shell global.
#[derive(Debug)]
pub struct ShellState {
known_surfaces: Vec<ShellSurface>,
}
impl ShellState {
/// Cleans the internal surface storage by removing all dead surfaces
pub(crate) fn cleanup_surfaces(&mut self) {
self.known_surfaces.retain(|s| s.alive());
}
/// Access all the shell surfaces known by this handler
pub fn surfaces(&self) -> &[ShellSurface]
|
}
/// Create a new `wl_shell` global
pub fn wl_shell_init<L, Impl>(
display: &mut Display,
implementation: Impl,
logger: L,
) -> (Arc<Mutex<ShellState>>, Global<wl_shell::WlShell>)
where
L: Into<Option<::slog::Logger>>,
Impl: FnMut(ShellRequest, DispatchData<'_>) +'static,
{
let _log = crate::slog_or_fallback(logger);
let implementation = Rc::new(RefCell::new(implementation));
let state = Arc::new(Mutex::new(ShellState {
known_surfaces: Vec::new(),
}));
let state2 = state.clone();
let global = display.create_global(
1,
Filter::new(move |(shell, _version), _, _data| {
self::wl_handlers::implement_shell(shell, implementation.clone(), state2.clone());
}),
);
(state, global)
}
|
{
&self.known_surfaces[..]
}
|
identifier_body
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.