file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
lib.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
#![allow(dead_code)]
#![allow(unreachable_code)]
#![allow(unused_imports)]
#![allow(unused_variables)]
#![deny(unsafe_code)]
#[macro_use]
extern crate serde;
use style::properties::ComputedValues;
use style::values::computed::{Length, LengthOrAuto};
use style::Zero;
pub mod context;
pub mod data;
mod dom_traversal;
mod element_data;
mod flow;
mod fragments;
mod geom;
mod opaque_node;
mod positioned;
pub mod query;
mod replaced;
mod style_ext;
pub mod traversal;
pub mod wrapper;
pub use flow::BoxTreeRoot;
use crate::dom_traversal::{Contents, NodeExt};
use crate::flow::{BlockFormattingContext, FlowChildren};
use crate::geom::flow_relative::Vec2;
use crate::positioned::AbsolutelyPositionedFragment;
use crate::replaced::ReplacedContent;
use crate::style_ext::{ComputedValuesExt, Direction, Position, WritingMode};
use servo_arc::Arc;
use std::convert::TryInto;
use style::context::SharedStyleContext;
use style::values::specified::box_::DisplayInside;
/// https://drafts.csswg.org/css-display/#independent-formatting-context
#[derive(Debug)]
enum IndependentFormattingContext {
Flow(BlockFormattingContext),
// Not called FC in specs, but behaves close enough
Replaced(ReplacedContent),
// Other layout modes go here
}
enum NonReplacedIFC<'a> {
Flow(&'a BlockFormattingContext),
}
impl IndependentFormattingContext {
fn construct<'dom,'style>(
context: &SharedStyleContext<'style>,
style: &Arc<ComputedValues>,
display_inside: DisplayInside,
contents: Contents<impl NodeExt<'dom>>,
) -> Self {
match contents.try_into() {
Ok(non_replaced) => match display_inside {
DisplayInside::Flow | DisplayInside::FlowRoot => {
IndependentFormattingContext::Flow(BlockFormattingContext::construct(
context,
style,
non_replaced,
))
},
DisplayInside::None | DisplayInside::Contents => panic!(":("),
},
Err(replaced) => IndependentFormattingContext::Replaced(replaced),
}
}
fn as_replaced(&self) -> Result<&ReplacedContent, NonReplacedIFC>
|
fn layout<'a>(
&'a self,
containing_block: &ContainingBlock,
tree_rank: usize,
absolutely_positioned_fragments: &mut Vec<AbsolutelyPositionedFragment<'a>>,
) -> FlowChildren {
match self.as_replaced() {
Ok(replaced) => match *replaced {},
Err(ifc) => ifc.layout(containing_block, tree_rank, absolutely_positioned_fragments),
}
}
}
impl<'a> NonReplacedIFC<'a> {
fn layout(
&self,
containing_block: &ContainingBlock,
tree_rank: usize,
absolutely_positioned_fragments: &mut Vec<AbsolutelyPositionedFragment<'a>>,
) -> FlowChildren {
match self {
NonReplacedIFC::Flow(bfc) => {
bfc.layout(containing_block, tree_rank, absolutely_positioned_fragments)
},
}
}
}
struct ContainingBlock {
inline_size: Length,
block_size: LengthOrAuto,
mode: (WritingMode, Direction),
}
struct DefiniteContainingBlock {
size: Vec2<Length>,
mode: (WritingMode, Direction),
}
/// https://drafts.csswg.org/css2/visuren.html#relative-positioning
fn relative_adjustement(
style: &ComputedValues,
inline_size: Length,
block_size: LengthOrAuto,
) -> Vec2<Length> {
if style.get_box().position!= Position::Relative {
return Vec2::zero();
}
fn adjust(start: LengthOrAuto, end: LengthOrAuto) -> Length {
match (start, end) {
(LengthOrAuto::Auto, LengthOrAuto::Auto) => Length::zero(),
(LengthOrAuto::Auto, LengthOrAuto::LengthPercentage(end)) => -end,
(LengthOrAuto::LengthPercentage(start), _) => start,
}
}
let block_size = block_size.auto_is(Length::zero);
let box_offsets = style.box_offsets().map_inline_and_block_axes(
|v| v.percentage_relative_to(inline_size),
|v| v.percentage_relative_to(block_size),
);
Vec2 {
inline: adjust(box_offsets.inline_start, box_offsets.inline_end),
block: adjust(box_offsets.block_start, box_offsets.block_end),
}
}
// FIXME: use std::mem::take when it’s stable.
// https://github.com/rust-lang/rust/issues/61129
fn take<T>(x: &mut T) -> T
where
T: Default,
{
std::mem::replace(x, Default::default())
}
|
{
match self {
IndependentFormattingContext::Replaced(r) => Ok(r),
IndependentFormattingContext::Flow(f) => Err(NonReplacedIFC::Flow(f)),
}
}
|
identifier_body
|
lib.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
#![allow(dead_code)]
#![allow(unreachable_code)]
#![allow(unused_imports)]
#![allow(unused_variables)]
#![deny(unsafe_code)]
#[macro_use]
extern crate serde;
use style::properties::ComputedValues;
use style::values::computed::{Length, LengthOrAuto};
use style::Zero;
pub mod context;
pub mod data;
mod dom_traversal;
mod element_data;
mod flow;
mod fragments;
mod geom;
mod opaque_node;
mod positioned;
pub mod query;
mod replaced;
mod style_ext;
pub mod traversal;
pub mod wrapper;
pub use flow::BoxTreeRoot;
use crate::dom_traversal::{Contents, NodeExt};
use crate::flow::{BlockFormattingContext, FlowChildren};
use crate::geom::flow_relative::Vec2;
use crate::positioned::AbsolutelyPositionedFragment;
use crate::replaced::ReplacedContent;
use crate::style_ext::{ComputedValuesExt, Direction, Position, WritingMode};
use servo_arc::Arc;
use std::convert::TryInto;
use style::context::SharedStyleContext;
use style::values::specified::box_::DisplayInside;
/// https://drafts.csswg.org/css-display/#independent-formatting-context
#[derive(Debug)]
enum IndependentFormattingContext {
Flow(BlockFormattingContext),
// Not called FC in specs, but behaves close enough
Replaced(ReplacedContent),
// Other layout modes go here
}
enum NonReplacedIFC<'a> {
Flow(&'a BlockFormattingContext),
}
impl IndependentFormattingContext {
fn construct<'dom,'style>(
context: &SharedStyleContext<'style>,
style: &Arc<ComputedValues>,
display_inside: DisplayInside,
contents: Contents<impl NodeExt<'dom>>,
) -> Self {
match contents.try_into() {
Ok(non_replaced) => match display_inside {
DisplayInside::Flow | DisplayInside::FlowRoot => {
IndependentFormattingContext::Flow(BlockFormattingContext::construct(
context,
style,
non_replaced,
))
},
DisplayInside::None | DisplayInside::Contents => panic!(":("),
},
Err(replaced) => IndependentFormattingContext::Replaced(replaced),
}
}
fn as_replaced(&self) -> Result<&ReplacedContent, NonReplacedIFC> {
match self {
IndependentFormattingContext::Replaced(r) => Ok(r),
IndependentFormattingContext::Flow(f) => Err(NonReplacedIFC::Flow(f)),
}
}
fn layout<'a>(
&'a self,
containing_block: &ContainingBlock,
tree_rank: usize,
absolutely_positioned_fragments: &mut Vec<AbsolutelyPositionedFragment<'a>>,
) -> FlowChildren {
match self.as_replaced() {
Ok(replaced) => match *replaced {},
Err(ifc) => ifc.layout(containing_block, tree_rank, absolutely_positioned_fragments),
}
}
}
impl<'a> NonReplacedIFC<'a> {
fn layout(
&self,
containing_block: &ContainingBlock,
tree_rank: usize,
absolutely_positioned_fragments: &mut Vec<AbsolutelyPositionedFragment<'a>>,
) -> FlowChildren {
match self {
NonReplacedIFC::Flow(bfc) => {
bfc.layout(containing_block, tree_rank, absolutely_positioned_fragments)
},
}
}
}
struct
|
{
inline_size: Length,
block_size: LengthOrAuto,
mode: (WritingMode, Direction),
}
struct DefiniteContainingBlock {
size: Vec2<Length>,
mode: (WritingMode, Direction),
}
/// https://drafts.csswg.org/css2/visuren.html#relative-positioning
fn relative_adjustement(
style: &ComputedValues,
inline_size: Length,
block_size: LengthOrAuto,
) -> Vec2<Length> {
if style.get_box().position!= Position::Relative {
return Vec2::zero();
}
fn adjust(start: LengthOrAuto, end: LengthOrAuto) -> Length {
match (start, end) {
(LengthOrAuto::Auto, LengthOrAuto::Auto) => Length::zero(),
(LengthOrAuto::Auto, LengthOrAuto::LengthPercentage(end)) => -end,
(LengthOrAuto::LengthPercentage(start), _) => start,
}
}
let block_size = block_size.auto_is(Length::zero);
let box_offsets = style.box_offsets().map_inline_and_block_axes(
|v| v.percentage_relative_to(inline_size),
|v| v.percentage_relative_to(block_size),
);
Vec2 {
inline: adjust(box_offsets.inline_start, box_offsets.inline_end),
block: adjust(box_offsets.block_start, box_offsets.block_end),
}
}
// FIXME: use std::mem::take when it’s stable.
// https://github.com/rust-lang/rust/issues/61129
fn take<T>(x: &mut T) -> T
where
T: Default,
{
std::mem::replace(x, Default::default())
}
|
ContainingBlock
|
identifier_name
|
lib.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
#![allow(dead_code)]
#![allow(unreachable_code)]
#![allow(unused_imports)]
#![allow(unused_variables)]
#![deny(unsafe_code)]
#[macro_use]
extern crate serde;
use style::properties::ComputedValues;
use style::values::computed::{Length, LengthOrAuto};
use style::Zero;
pub mod context;
pub mod data;
mod dom_traversal;
mod element_data;
mod flow;
mod fragments;
mod geom;
mod opaque_node;
mod positioned;
pub mod query;
mod replaced;
mod style_ext;
pub mod traversal;
pub mod wrapper;
pub use flow::BoxTreeRoot;
use crate::dom_traversal::{Contents, NodeExt};
use crate::flow::{BlockFormattingContext, FlowChildren};
use crate::geom::flow_relative::Vec2;
use crate::positioned::AbsolutelyPositionedFragment;
use crate::replaced::ReplacedContent;
use crate::style_ext::{ComputedValuesExt, Direction, Position, WritingMode};
use servo_arc::Arc;
use std::convert::TryInto;
use style::context::SharedStyleContext;
use style::values::specified::box_::DisplayInside;
/// https://drafts.csswg.org/css-display/#independent-formatting-context
#[derive(Debug)]
enum IndependentFormattingContext {
Flow(BlockFormattingContext),
// Not called FC in specs, but behaves close enough
Replaced(ReplacedContent),
// Other layout modes go here
}
enum NonReplacedIFC<'a> {
Flow(&'a BlockFormattingContext),
}
impl IndependentFormattingContext {
fn construct<'dom,'style>(
context: &SharedStyleContext<'style>,
style: &Arc<ComputedValues>,
display_inside: DisplayInside,
contents: Contents<impl NodeExt<'dom>>,
) -> Self {
match contents.try_into() {
Ok(non_replaced) => match display_inside {
DisplayInside::Flow | DisplayInside::FlowRoot => {
IndependentFormattingContext::Flow(BlockFormattingContext::construct(
context,
style,
non_replaced,
))
},
DisplayInside::None | DisplayInside::Contents => panic!(":("),
},
Err(replaced) => IndependentFormattingContext::Replaced(replaced),
}
}
fn as_replaced(&self) -> Result<&ReplacedContent, NonReplacedIFC> {
match self {
IndependentFormattingContext::Replaced(r) => Ok(r),
IndependentFormattingContext::Flow(f) => Err(NonReplacedIFC::Flow(f)),
}
}
fn layout<'a>(
&'a self,
containing_block: &ContainingBlock,
tree_rank: usize,
absolutely_positioned_fragments: &mut Vec<AbsolutelyPositionedFragment<'a>>,
) -> FlowChildren {
match self.as_replaced() {
Ok(replaced) => match *replaced {},
Err(ifc) => ifc.layout(containing_block, tree_rank, absolutely_positioned_fragments),
}
}
}
impl<'a> NonReplacedIFC<'a> {
fn layout(
&self,
containing_block: &ContainingBlock,
tree_rank: usize,
absolutely_positioned_fragments: &mut Vec<AbsolutelyPositionedFragment<'a>>,
) -> FlowChildren {
match self {
NonReplacedIFC::Flow(bfc) => {
bfc.layout(containing_block, tree_rank, absolutely_positioned_fragments)
},
}
}
}
struct ContainingBlock {
inline_size: Length,
block_size: LengthOrAuto,
mode: (WritingMode, Direction),
}
struct DefiniteContainingBlock {
size: Vec2<Length>,
mode: (WritingMode, Direction),
}
/// https://drafts.csswg.org/css2/visuren.html#relative-positioning
fn relative_adjustement(
style: &ComputedValues,
inline_size: Length,
block_size: LengthOrAuto,
) -> Vec2<Length> {
if style.get_box().position!= Position::Relative {
return Vec2::zero();
}
fn adjust(start: LengthOrAuto, end: LengthOrAuto) -> Length {
match (start, end) {
(LengthOrAuto::Auto, LengthOrAuto::Auto) => Length::zero(),
(LengthOrAuto::Auto, LengthOrAuto::LengthPercentage(end)) => -end,
(LengthOrAuto::LengthPercentage(start), _) => start,
}
}
let block_size = block_size.auto_is(Length::zero);
let box_offsets = style.box_offsets().map_inline_and_block_axes(
|v| v.percentage_relative_to(inline_size),
|v| v.percentage_relative_to(block_size),
);
Vec2 {
inline: adjust(box_offsets.inline_start, box_offsets.inline_end),
|
// https://github.com/rust-lang/rust/issues/61129
fn take<T>(x: &mut T) -> T
where
T: Default,
{
std::mem::replace(x, Default::default())
}
|
block: adjust(box_offsets.block_start, box_offsets.block_end),
}
}
// FIXME: use std::mem::take when it’s stable.
|
random_line_split
|
tree_sortable.rs
|
// Copyright 2013-2015, The Gtk-rs Project Developers.
// See the COPYRIGHT file at the top-level directory of this distribution.
// Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT>
use SortType;
use ffi;
use glib::object::{Cast, IsA};
use glib::translate::*;
use std::mem::{self, transmute};
use std::cmp::Ordering;
use glib_ffi::gpointer;
use {TreeIter, TreeModel, TreeSortable};
use ffi::{GtkTreeIter, GtkTreeModel};
pub enum SortColumn {
Default,
Index(u32),
}
#[doc(hidden)]
impl ToGlib for SortColumn {
type GlibType = i32;
#[inline]
fn to_glib(&self) -> i32 {
match *self {
SortColumn::Default => ffi::GTK_TREE_SORTABLE_DEFAULT_SORT_COLUMN_ID,
SortColumn::Index(x) => {
assert!(x <= i32::max_value() as u32, "column index is too big");
x as i32
}
}
}
}
#[doc(hidden)]
impl FromGlib<i32> for SortColumn {
#[inline]
fn from_glib(val: i32) -> SortColumn {
skip_assert_initialized!();
match val {
ffi::GTK_TREE_SORTABLE_DEFAULT_SORT_COLUMN_ID => SortColumn::Default,
x => {
assert!(x >= 0, "invalid column index");
SortColumn::Index(x as u32)
}
}
}
}
pub trait TreeSortableExtManual:'static {
fn set_default_sort_func<F>(&self, sort_func: F)
where F: Fn(&Self, &TreeIter, &TreeIter) -> Ordering +'static;
fn set_sort_func<F>(&self, sort_column_id: SortColumn, sort_func: F)
where F: Fn(&Self, &TreeIter, &TreeIter) -> Ordering +'static;
fn get_sort_column_id(&self) -> Option<(SortColumn, SortType)>;
fn set_sort_column_id(&self, sort_column_id: SortColumn, order: SortType);
fn set_unsorted(&self);
}
unsafe extern "C" fn
|
<T, F: Fn(&T, &TreeIter, &TreeIter) -> Ordering>(this: *mut GtkTreeModel, iter: *mut GtkTreeIter,
iter2: *mut GtkTreeIter, f: gpointer) -> i32
where T: IsA<TreeSortable> {
let f: &F = transmute(f);
f(&TreeModel::from_glib_none(this).unsafe_cast(), &from_glib_borrow(iter),
&from_glib_borrow(iter2)).to_glib()
}
unsafe extern "C" fn destroy_closure<T, F: Fn(&T, &TreeIter, &TreeIter) -> Ordering>(ptr: gpointer) {
Box::<F>::from_raw(ptr as *mut _);
}
fn into_raw<F, T>(func: F) -> gpointer
where F: Fn(&T, &TreeIter, &TreeIter) -> Ordering +'static {
skip_assert_initialized!();
let func: Box<F> = Box::new(func);
Box::into_raw(func) as gpointer
}
impl<O: IsA<TreeSortable>> TreeSortableExtManual for O {
#[inline]
fn get_sort_column_id(&self) -> Option<(SortColumn, SortType)> {
unsafe {
let mut sort_column_id = mem::uninitialized();
let mut order = mem::uninitialized();
ffi::gtk_tree_sortable_get_sort_column_id(self.as_ref().to_glib_none().0, &mut sort_column_id, &mut order);
if sort_column_id!= ffi::GTK_TREE_SORTABLE_UNSORTED_SORT_COLUMN_ID {
Some((from_glib(sort_column_id), from_glib(order)))
} else {
None
}
}
}
fn set_default_sort_func<F>(&self, sort_func: F)
where F: Fn(&Self, &TreeIter, &TreeIter) -> Ordering +'static {
unsafe {
ffi::gtk_tree_sortable_set_default_sort_func(self.as_ref().to_glib_none().0,
Some(trampoline::<Self, F>),
into_raw(sort_func),
Some(destroy_closure::<Self, F>))
}
}
#[inline]
fn set_sort_column_id(&self, sort_column_id: SortColumn, order: SortType) {
unsafe {
ffi::gtk_tree_sortable_set_sort_column_id(self.as_ref().to_glib_none().0, sort_column_id.to_glib(), order.to_glib());
}
}
fn set_unsorted(&self) {
unsafe {
ffi::gtk_tree_sortable_set_sort_column_id(self.as_ref().to_glib_none().0,
ffi::GTK_TREE_SORTABLE_UNSORTED_SORT_COLUMN_ID,
SortType::Ascending.to_glib());
}
}
fn set_sort_func<F>(&self, sort_column_id: SortColumn, sort_func: F)
where F: Fn(&Self, &TreeIter, &TreeIter) -> Ordering +'static {
unsafe {
ffi::gtk_tree_sortable_set_sort_func(self.as_ref().to_glib_none().0,
sort_column_id.to_glib(),
Some(trampoline::<Self, F>),
into_raw(sort_func),
Some(destroy_closure::<Self, F>))
}
}
}
|
trampoline
|
identifier_name
|
tree_sortable.rs
|
// Copyright 2013-2015, The Gtk-rs Project Developers.
// See the COPYRIGHT file at the top-level directory of this distribution.
// Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT>
use SortType;
use ffi;
use glib::object::{Cast, IsA};
use glib::translate::*;
use std::mem::{self, transmute};
use std::cmp::Ordering;
use glib_ffi::gpointer;
use {TreeIter, TreeModel, TreeSortable};
use ffi::{GtkTreeIter, GtkTreeModel};
pub enum SortColumn {
Default,
Index(u32),
}
#[doc(hidden)]
impl ToGlib for SortColumn {
type GlibType = i32;
#[inline]
fn to_glib(&self) -> i32 {
match *self {
SortColumn::Default => ffi::GTK_TREE_SORTABLE_DEFAULT_SORT_COLUMN_ID,
SortColumn::Index(x) => {
assert!(x <= i32::max_value() as u32, "column index is too big");
x as i32
}
}
}
}
#[doc(hidden)]
impl FromGlib<i32> for SortColumn {
#[inline]
fn from_glib(val: i32) -> SortColumn {
skip_assert_initialized!();
match val {
ffi::GTK_TREE_SORTABLE_DEFAULT_SORT_COLUMN_ID => SortColumn::Default,
x => {
assert!(x >= 0, "invalid column index");
SortColumn::Index(x as u32)
}
}
}
}
pub trait TreeSortableExtManual:'static {
fn set_default_sort_func<F>(&self, sort_func: F)
where F: Fn(&Self, &TreeIter, &TreeIter) -> Ordering +'static;
fn set_sort_func<F>(&self, sort_column_id: SortColumn, sort_func: F)
where F: Fn(&Self, &TreeIter, &TreeIter) -> Ordering +'static;
fn get_sort_column_id(&self) -> Option<(SortColumn, SortType)>;
fn set_sort_column_id(&self, sort_column_id: SortColumn, order: SortType);
fn set_unsorted(&self);
}
unsafe extern "C" fn trampoline<T, F: Fn(&T, &TreeIter, &TreeIter) -> Ordering>(this: *mut GtkTreeModel, iter: *mut GtkTreeIter,
iter2: *mut GtkTreeIter, f: gpointer) -> i32
where T: IsA<TreeSortable> {
let f: &F = transmute(f);
f(&TreeModel::from_glib_none(this).unsafe_cast(), &from_glib_borrow(iter),
&from_glib_borrow(iter2)).to_glib()
}
unsafe extern "C" fn destroy_closure<T, F: Fn(&T, &TreeIter, &TreeIter) -> Ordering>(ptr: gpointer) {
Box::<F>::from_raw(ptr as *mut _);
}
fn into_raw<F, T>(func: F) -> gpointer
where F: Fn(&T, &TreeIter, &TreeIter) -> Ordering +'static {
skip_assert_initialized!();
let func: Box<F> = Box::new(func);
Box::into_raw(func) as gpointer
}
impl<O: IsA<TreeSortable>> TreeSortableExtManual for O {
#[inline]
fn get_sort_column_id(&self) -> Option<(SortColumn, SortType)> {
unsafe {
let mut sort_column_id = mem::uninitialized();
let mut order = mem::uninitialized();
ffi::gtk_tree_sortable_get_sort_column_id(self.as_ref().to_glib_none().0, &mut sort_column_id, &mut order);
if sort_column_id!= ffi::GTK_TREE_SORTABLE_UNSORTED_SORT_COLUMN_ID {
Some((from_glib(sort_column_id), from_glib(order)))
} else {
None
}
}
}
|
where F: Fn(&Self, &TreeIter, &TreeIter) -> Ordering +'static {
unsafe {
ffi::gtk_tree_sortable_set_default_sort_func(self.as_ref().to_glib_none().0,
Some(trampoline::<Self, F>),
into_raw(sort_func),
Some(destroy_closure::<Self, F>))
}
}
#[inline]
fn set_sort_column_id(&self, sort_column_id: SortColumn, order: SortType) {
unsafe {
ffi::gtk_tree_sortable_set_sort_column_id(self.as_ref().to_glib_none().0, sort_column_id.to_glib(), order.to_glib());
}
}
fn set_unsorted(&self) {
unsafe {
ffi::gtk_tree_sortable_set_sort_column_id(self.as_ref().to_glib_none().0,
ffi::GTK_TREE_SORTABLE_UNSORTED_SORT_COLUMN_ID,
SortType::Ascending.to_glib());
}
}
fn set_sort_func<F>(&self, sort_column_id: SortColumn, sort_func: F)
where F: Fn(&Self, &TreeIter, &TreeIter) -> Ordering +'static {
unsafe {
ffi::gtk_tree_sortable_set_sort_func(self.as_ref().to_glib_none().0,
sort_column_id.to_glib(),
Some(trampoline::<Self, F>),
into_raw(sort_func),
Some(destroy_closure::<Self, F>))
}
}
}
|
fn set_default_sort_func<F>(&self, sort_func: F)
|
random_line_split
|
tree_sortable.rs
|
// Copyright 2013-2015, The Gtk-rs Project Developers.
// See the COPYRIGHT file at the top-level directory of this distribution.
// Licensed under the MIT license, see the LICENSE file or <http://opensource.org/licenses/MIT>
use SortType;
use ffi;
use glib::object::{Cast, IsA};
use glib::translate::*;
use std::mem::{self, transmute};
use std::cmp::Ordering;
use glib_ffi::gpointer;
use {TreeIter, TreeModel, TreeSortable};
use ffi::{GtkTreeIter, GtkTreeModel};
pub enum SortColumn {
Default,
Index(u32),
}
#[doc(hidden)]
impl ToGlib for SortColumn {
type GlibType = i32;
#[inline]
fn to_glib(&self) -> i32 {
match *self {
SortColumn::Default => ffi::GTK_TREE_SORTABLE_DEFAULT_SORT_COLUMN_ID,
SortColumn::Index(x) => {
assert!(x <= i32::max_value() as u32, "column index is too big");
x as i32
}
}
}
}
#[doc(hidden)]
impl FromGlib<i32> for SortColumn {
#[inline]
fn from_glib(val: i32) -> SortColumn {
skip_assert_initialized!();
match val {
ffi::GTK_TREE_SORTABLE_DEFAULT_SORT_COLUMN_ID => SortColumn::Default,
x => {
assert!(x >= 0, "invalid column index");
SortColumn::Index(x as u32)
}
}
}
}
pub trait TreeSortableExtManual:'static {
fn set_default_sort_func<F>(&self, sort_func: F)
where F: Fn(&Self, &TreeIter, &TreeIter) -> Ordering +'static;
fn set_sort_func<F>(&self, sort_column_id: SortColumn, sort_func: F)
where F: Fn(&Self, &TreeIter, &TreeIter) -> Ordering +'static;
fn get_sort_column_id(&self) -> Option<(SortColumn, SortType)>;
fn set_sort_column_id(&self, sort_column_id: SortColumn, order: SortType);
fn set_unsorted(&self);
}
unsafe extern "C" fn trampoline<T, F: Fn(&T, &TreeIter, &TreeIter) -> Ordering>(this: *mut GtkTreeModel, iter: *mut GtkTreeIter,
iter2: *mut GtkTreeIter, f: gpointer) -> i32
where T: IsA<TreeSortable> {
let f: &F = transmute(f);
f(&TreeModel::from_glib_none(this).unsafe_cast(), &from_glib_borrow(iter),
&from_glib_borrow(iter2)).to_glib()
}
unsafe extern "C" fn destroy_closure<T, F: Fn(&T, &TreeIter, &TreeIter) -> Ordering>(ptr: gpointer)
|
fn into_raw<F, T>(func: F) -> gpointer
where F: Fn(&T, &TreeIter, &TreeIter) -> Ordering +'static {
skip_assert_initialized!();
let func: Box<F> = Box::new(func);
Box::into_raw(func) as gpointer
}
impl<O: IsA<TreeSortable>> TreeSortableExtManual for O {
#[inline]
fn get_sort_column_id(&self) -> Option<(SortColumn, SortType)> {
unsafe {
let mut sort_column_id = mem::uninitialized();
let mut order = mem::uninitialized();
ffi::gtk_tree_sortable_get_sort_column_id(self.as_ref().to_glib_none().0, &mut sort_column_id, &mut order);
if sort_column_id!= ffi::GTK_TREE_SORTABLE_UNSORTED_SORT_COLUMN_ID {
Some((from_glib(sort_column_id), from_glib(order)))
} else {
None
}
}
}
fn set_default_sort_func<F>(&self, sort_func: F)
where F: Fn(&Self, &TreeIter, &TreeIter) -> Ordering +'static {
unsafe {
ffi::gtk_tree_sortable_set_default_sort_func(self.as_ref().to_glib_none().0,
Some(trampoline::<Self, F>),
into_raw(sort_func),
Some(destroy_closure::<Self, F>))
}
}
#[inline]
fn set_sort_column_id(&self, sort_column_id: SortColumn, order: SortType) {
unsafe {
ffi::gtk_tree_sortable_set_sort_column_id(self.as_ref().to_glib_none().0, sort_column_id.to_glib(), order.to_glib());
}
}
fn set_unsorted(&self) {
unsafe {
ffi::gtk_tree_sortable_set_sort_column_id(self.as_ref().to_glib_none().0,
ffi::GTK_TREE_SORTABLE_UNSORTED_SORT_COLUMN_ID,
SortType::Ascending.to_glib());
}
}
fn set_sort_func<F>(&self, sort_column_id: SortColumn, sort_func: F)
where F: Fn(&Self, &TreeIter, &TreeIter) -> Ordering +'static {
unsafe {
ffi::gtk_tree_sortable_set_sort_func(self.as_ref().to_glib_none().0,
sort_column_id.to_glib(),
Some(trampoline::<Self, F>),
into_raw(sort_func),
Some(destroy_closure::<Self, F>))
}
}
}
|
{
Box::<F>::from_raw(ptr as *mut _);
}
|
identifier_body
|
lib.rs
|
time::{Duration, Instant},
};
pub mod bench;
mod cli;
mod console;
mod event;
mod formatters;
mod helpers;
mod options;
pub mod stats;
mod term;
mod test_result;
mod time;
mod types;
#[cfg(test)]
mod tests;
use event::{CompletedTest, TestEvent};
use helpers::concurrency::get_concurrency;
use helpers::exit_code::get_exit_code;
use options::{Concurrent, RunStrategy};
use test_result::*;
use time::TestExecTime;
// Process exit code to be used to indicate test failures.
const ERROR_EXIT_CODE: i32 = 101;
const SECONDARY_TEST_INVOKER_VAR: &str = "__RUST_TEST_INVOKE";
// The default console test runner. It accepts the command line
// arguments and a vector of test_descs.
pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Option<Options>) {
let mut opts = match cli::parse_opts(args) {
Some(Ok(o)) => o,
Some(Err(msg)) => {
eprintln!("error: {}", msg);
process::exit(ERROR_EXIT_CODE);
}
None => return,
};
if let Some(options) = options {
opts.options = options;
}
if opts.list {
if let Err(e) = console::list_tests_console(&opts, tests) {
eprintln!("error: io error when listing tests: {:?}", e);
process::exit(ERROR_EXIT_CODE);
}
} else {
match console::run_tests_console(&opts, tests) {
Ok(true) => {}
Ok(false) => process::exit(ERROR_EXIT_CODE),
Err(e) => {
eprintln!("error: io error when listing tests: {:?}", e);
process::exit(ERROR_EXIT_CODE);
}
}
}
}
/// A variant optimized for invocation with a static test vector.
/// This will panic (intentionally) when fed any dynamic tests.
///
/// This is the entry point for the main function generated by `rustc --test`
/// when panic=unwind.
pub fn test_main_static(tests: &[&TestDescAndFn]) {
let args = env::args().collect::<Vec<_>>();
let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect();
test_main(&args, owned_tests, None)
}
/// A variant optimized for invocation with a static test vector.
/// This will panic (intentionally) when fed any dynamic tests.
///
/// Runs tests in panic=abort mode, which involves spawning subprocesses for
/// tests.
///
/// This is the entry point for the main function generated by `rustc --test`
/// when panic=abort.
pub fn test_main_static_abort(tests: &[&TestDescAndFn]) {
// If we're being run in SpawnedSecondary mode, run the test here. run_test
// will then exit the process.
if let Ok(name) = env::var(SECONDARY_TEST_INVOKER_VAR) {
env::remove_var(SECONDARY_TEST_INVOKER_VAR);
let test = tests
.iter()
.filter(|test| test.desc.name.as_slice() == name)
.map(make_owned_test)
.next()
.unwrap_or_else(|| panic!("couldn't find a test with the provided name '{}'", name));
let TestDescAndFn { desc, testfn } = test;
let testfn = match testfn {
StaticTestFn(f) => f,
_ => panic!("only static tests are supported"),
};
run_test_in_spawned_subprocess(desc, Box::new(testfn));
}
let args = env::args().collect::<Vec<_>>();
let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect();
test_main(&args, owned_tests, Some(Options::new().panic_abort(true)))
}
/// Clones static values for putting into a dynamic vector, which test_main()
/// needs to hand out ownership of tests to parallel test runners.
///
/// This will panic when fed any dynamic tests, because they cannot be cloned.
fn make_owned_test(test: &&TestDescAndFn) -> TestDescAndFn {
match test.testfn {
StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: test.desc.clone() },
StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: test.desc.clone() },
_ => panic!("non-static tests passed to test::test_main_static"),
}
}
/// Invoked when unit tests terminate. Should panic if the unit
/// Tests is considered a failure. By default, invokes `report()`
/// and checks for a `0` result.
pub fn assert_test_result<T: Termination>(result: T) {
let code = result.report();
assert_eq!(
code, 0,
"the test returned a termination value with a non-zero status code ({}) \
which indicates a failure",
code
);
}
pub fn run_tests<F>(
opts: &TestOpts,
tests: Vec<TestDescAndFn>,
mut notify_about_test_event: F,
) -> io::Result<()>
where
F: FnMut(TestEvent) -> io::Result<()>,
{
use std::collections::{self, HashMap};
use std::hash::BuildHasherDefault;
use std::sync::mpsc::RecvTimeoutError;
struct RunningTest {
join_handle: Option<thread::JoinHandle<()>>,
}
// Use a deterministic hasher
type TestMap =
HashMap<TestId, RunningTest, BuildHasherDefault<collections::hash_map::DefaultHasher>>;
|
id: TestId,
desc: TestDesc,
timeout: Instant,
}
let tests_len = tests.len();
let mut filtered_tests = filter_tests(opts, tests);
if!opts.bench_benchmarks {
filtered_tests = convert_benchmarks_to_tests(filtered_tests);
}
let filtered_tests = {
let mut filtered_tests = filtered_tests;
for test in filtered_tests.iter_mut() {
test.desc.name = test.desc.name.with_padding(test.testfn.padding());
}
filtered_tests
};
let filtered_out = tests_len - filtered_tests.len();
let event = TestEvent::TeFilteredOut(filtered_out);
notify_about_test_event(event)?;
let filtered_descs = filtered_tests.iter().map(|t| t.desc.clone()).collect();
let event = TestEvent::TeFiltered(filtered_descs);
notify_about_test_event(event)?;
let (filtered_tests, filtered_benchs): (Vec<_>, _) = filtered_tests
.into_iter()
.enumerate()
.map(|(i, e)| (TestId(i), e))
.partition(|(_, e)| matches!(e.testfn, StaticTestFn(_) | DynTestFn(_)));
let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
let mut remaining = filtered_tests;
remaining.reverse();
let mut pending = 0;
let (tx, rx) = channel::<CompletedTest>();
let run_strategy = if opts.options.panic_abort &&!opts.force_run_in_process {
RunStrategy::SpawnPrimary
} else {
RunStrategy::InProcess
};
let mut running_tests: TestMap = HashMap::default();
let mut timeout_queue: VecDeque<TimeoutEntry> = VecDeque::new();
fn get_timed_out_tests(
running_tests: &TestMap,
timeout_queue: &mut VecDeque<TimeoutEntry>,
) -> Vec<TestDesc> {
let now = Instant::now();
let mut timed_out = Vec::new();
while let Some(timeout_entry) = timeout_queue.front() {
if now < timeout_entry.timeout {
break;
}
let timeout_entry = timeout_queue.pop_front().unwrap();
if running_tests.contains_key(&timeout_entry.id) {
timed_out.push(timeout_entry.desc);
}
}
timed_out
}
fn calc_timeout(timeout_queue: &VecDeque<TimeoutEntry>) -> Option<Duration> {
timeout_queue.front().map(|&TimeoutEntry { timeout: next_timeout,.. }| {
let now = Instant::now();
if next_timeout >= now { next_timeout - now } else { Duration::new(0, 0) }
})
}
if concurrency == 1 {
while!remaining.is_empty() {
let (id, test) = remaining.pop().unwrap();
let event = TestEvent::TeWait(test.desc.clone());
notify_about_test_event(event)?;
let join_handle =
run_test(opts,!opts.run_tests, id, test, run_strategy, tx.clone(), Concurrent::No);
assert!(join_handle.is_none());
let completed_test = rx.recv().unwrap();
let event = TestEvent::TeResult(completed_test);
notify_about_test_event(event)?;
}
} else {
while pending > 0 ||!remaining.is_empty() {
while pending < concurrency &&!remaining.is_empty() {
let (id, test) = remaining.pop().unwrap();
let timeout = time::get_default_test_timeout();
let desc = test.desc.clone();
let event = TestEvent::TeWait(desc.clone());
notify_about_test_event(event)?; //here no pad
let join_handle = run_test(
opts,
!opts.run_tests,
id,
test,
run_strategy,
tx.clone(),
Concurrent::Yes,
);
running_tests.insert(id, RunningTest { join_handle });
timeout_queue.push_back(TimeoutEntry { id, desc, timeout });
pending += 1;
}
let mut res;
loop {
if let Some(timeout) = calc_timeout(&timeout_queue) {
res = rx.recv_timeout(timeout);
for test in get_timed_out_tests(&running_tests, &mut timeout_queue) {
let event = TestEvent::TeTimeout(test);
notify_about_test_event(event)?;
}
match res {
Err(RecvTimeoutError::Timeout) => {
// Result is not yet ready, continue waiting.
}
_ => {
// We've got a result, stop the loop.
break;
}
}
} else {
res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
break;
}
}
let mut completed_test = res.unwrap();
let running_test = running_tests.remove(&completed_test.id).unwrap();
if let Some(join_handle) = running_test.join_handle {
if let Err(_) = join_handle.join() {
if let TrOk = completed_test.result {
completed_test.result =
TrFailedMsg("panicked after reporting success".to_string());
}
}
}
let event = TestEvent::TeResult(completed_test);
notify_about_test_event(event)?;
pending -= 1;
}
}
if opts.bench_benchmarks {
// All benchmarks run at the end, in serial.
for (id, b) in filtered_benchs {
let event = TestEvent::TeWait(b.desc.clone());
notify_about_test_event(event)?;
run_test(opts, false, id, b, run_strategy, tx.clone(), Concurrent::No);
let completed_test = rx.recv().unwrap();
let event = TestEvent::TeResult(completed_test);
notify_about_test_event(event)?;
}
}
Ok(())
}
pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
let mut filtered = tests;
let matches_filter = |test: &TestDescAndFn, filter: &str| {
let test_name = test.desc.name.as_slice();
match opts.filter_exact {
true => test_name == filter,
false => test_name.contains(filter),
}
};
// Remove tests that don't match the test filter
if!opts.filters.is_empty() {
filtered.retain(|test| opts.filters.iter().any(|filter| matches_filter(test, filter)));
}
// Skip tests that match any of the skip filters
filtered.retain(|test|!opts.skip.iter().any(|sf| matches_filter(test, sf)));
// Excludes #[should_panic] tests
if opts.exclude_should_panic {
filtered.retain(|test| test.desc.should_panic == ShouldPanic::No);
}
// maybe unignore tests
match opts.run_ignored {
RunIgnored::Yes => {
filtered.iter_mut().for_each(|test| test.desc.ignore = false);
}
RunIgnored::Only => {
filtered.retain(|test| test.desc.ignore);
filtered.iter_mut().for_each(|test| test.desc.ignore = false);
}
RunIgnored::No => {}
}
// Sort the tests alphabetically
filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
filtered
}
pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
// convert benchmarks to tests, if we're not benchmarking them
tests
.into_iter()
.map(|x| {
let testfn = match x.testfn {
DynBenchFn(bench) => DynTestFn(Box::new(move || {
bench::run_once(|b| __rust_begin_short_backtrace(|| bench.run(b)))
})),
StaticBenchFn(benchfn) => DynTestFn(Box::new(move || {
bench::run_once(|b| __rust_begin_short_backtrace(|| benchfn(b)))
})),
f => f,
};
TestDescAndFn { desc: x.desc, testfn }
})
.collect()
}
pub fn run_test(
opts: &TestOpts,
force_ignore: bool,
id: TestId,
test: TestDescAndFn,
strategy: RunStrategy,
monitor_ch: Sender<CompletedTest>,
concurrency: Concurrent,
) -> Option<thread::JoinHandle<()>> {
let TestDescAndFn { desc, testfn } = test;
// Emscripten can catch panics but other wasm targets cannot
let ignore_because_no_process_support = desc.should_panic!= ShouldPanic::No
&& cfg!(target_arch = "wasm32")
&&!cfg!(target_os = "emscripten");
if force_ignore || desc.ignore || ignore_because_no_process_support {
let message = CompletedTest::new(id, desc, TrIgnored, None, Vec::new());
monitor_ch.send(message).unwrap();
return None;
}
struct TestRunOpts {
pub strategy: RunStrategy,
pub nocapture: bool,
pub concurrency: Concurrent,
pub time: Option<time::TestTimeOptions>,
}
fn run_test_inner(
id: TestId,
desc: TestDesc,
monitor_ch: Sender<CompletedTest>,
testfn: Box<dyn FnOnce() + Send>,
opts: TestRunOpts,
) -> Option<thread::JoinHandle<()>> {
let concurrency = opts.concurrency;
let name = desc.name.clone();
let runtest = move || match opts.strategy {
RunStrategy::InProcess => run_test_in_process(
id,
desc,
opts.nocapture,
opts.time.is_some(),
testfn,
monitor_ch,
opts.time,
),
RunStrategy::SpawnPrimary => spawn_test_subprocess(
id,
desc,
opts.nocapture,
opts.time.is_some(),
monitor_ch,
opts.time,
),
};
// If the platform is single-threaded we're just going to run
// the test synchronously, regardless of the concurrency
// level.
let supports_threads =!cfg!(target_os = "emscripten") &&!cfg!(target_arch = "wasm32");
if concurrency == Concurrent::Yes && supports_threads {
let cfg = thread::Builder::new().name(name.as_slice().to_owned());
let mut runtest = Arc::new(Mutex::new(Some(runtest)));
let runtest2 = runtest.clone();
match cfg.spawn(move || runtest2.lock().unwrap().take().unwrap()()) {
Ok(handle) => Some(handle),
Err(e) if e.kind() == io::ErrorKind::WouldBlock => {
// `ErrorKind::WouldBlock` means hitting the thread limit on some
// platforms, so run the test synchronously here instead.
Arc::get_mut(&mut runtest).unwrap().get_mut().unwrap().take().unwrap()();
None
}
Err(e) => panic!("failed to spawn thread to run test: {}", e),
}
} else {
runtest();
None
}
}
let test_run_opts =
TestRunOpts { strategy, nocapture: opts.nocapture, concurrency, time: opts.time_options };
match testfn {
DynBenchFn(bencher) => {
// Benchmarks aren't expected to panic, so we run them all in-process.
crate::bench::benchmark(id, desc, monitor_ch, opts.nocapture, |harness| {
bencher.run(harness)
});
None
}
StaticBenchFn(benchfn) => {
// Benchmarks aren't expected to panic, so we run them all in-process.
crate::bench::benchmark(id, desc, monitor_ch, opts.nocapture, benchfn);
None
}
DynTestFn(f) => {
match strategy {
RunStrategy::InProcess => (),
_ => panic!("Cannot run dynamic test fn out-of-process"),
};
run_test_inner(
id,
desc,
monitor_ch,
Box::new(move || __rust_begin_short_backtrace(f)),
test_run_opts,
)
}
StaticTestFn(f) => run_test_inner(
id,
desc,
|
struct TimeoutEntry {
|
random_line_split
|
lib.rs
|
::{Duration, Instant},
};
pub mod bench;
mod cli;
mod console;
mod event;
mod formatters;
mod helpers;
mod options;
pub mod stats;
mod term;
mod test_result;
mod time;
mod types;
#[cfg(test)]
mod tests;
use event::{CompletedTest, TestEvent};
use helpers::concurrency::get_concurrency;
use helpers::exit_code::get_exit_code;
use options::{Concurrent, RunStrategy};
use test_result::*;
use time::TestExecTime;
// Process exit code to be used to indicate test failures.
const ERROR_EXIT_CODE: i32 = 101;
const SECONDARY_TEST_INVOKER_VAR: &str = "__RUST_TEST_INVOKE";
// The default console test runner. It accepts the command line
// arguments and a vector of test_descs.
pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Option<Options>) {
let mut opts = match cli::parse_opts(args) {
Some(Ok(o)) => o,
Some(Err(msg)) => {
eprintln!("error: {}", msg);
process::exit(ERROR_EXIT_CODE);
}
None => return,
};
if let Some(options) = options {
opts.options = options;
}
if opts.list {
if let Err(e) = console::list_tests_console(&opts, tests) {
eprintln!("error: io error when listing tests: {:?}", e);
process::exit(ERROR_EXIT_CODE);
}
} else {
match console::run_tests_console(&opts, tests) {
Ok(true) => {}
Ok(false) => process::exit(ERROR_EXIT_CODE),
Err(e) => {
eprintln!("error: io error when listing tests: {:?}", e);
process::exit(ERROR_EXIT_CODE);
}
}
}
}
/// A variant optimized for invocation with a static test vector.
/// This will panic (intentionally) when fed any dynamic tests.
///
/// This is the entry point for the main function generated by `rustc --test`
/// when panic=unwind.
pub fn test_main_static(tests: &[&TestDescAndFn]) {
let args = env::args().collect::<Vec<_>>();
let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect();
test_main(&args, owned_tests, None)
}
/// A variant optimized for invocation with a static test vector.
/// This will panic (intentionally) when fed any dynamic tests.
///
/// Runs tests in panic=abort mode, which involves spawning subprocesses for
/// tests.
///
/// This is the entry point for the main function generated by `rustc --test`
/// when panic=abort.
pub fn test_main_static_abort(tests: &[&TestDescAndFn]) {
// If we're being run in SpawnedSecondary mode, run the test here. run_test
// will then exit the process.
if let Ok(name) = env::var(SECONDARY_TEST_INVOKER_VAR) {
env::remove_var(SECONDARY_TEST_INVOKER_VAR);
let test = tests
.iter()
.filter(|test| test.desc.name.as_slice() == name)
.map(make_owned_test)
.next()
.unwrap_or_else(|| panic!("couldn't find a test with the provided name '{}'", name));
let TestDescAndFn { desc, testfn } = test;
let testfn = match testfn {
StaticTestFn(f) => f,
_ => panic!("only static tests are supported"),
};
run_test_in_spawned_subprocess(desc, Box::new(testfn));
}
let args = env::args().collect::<Vec<_>>();
let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect();
test_main(&args, owned_tests, Some(Options::new().panic_abort(true)))
}
/// Clones static values for putting into a dynamic vector, which test_main()
/// needs to hand out ownership of tests to parallel test runners.
///
/// This will panic when fed any dynamic tests, because they cannot be cloned.
fn make_owned_test(test: &&TestDescAndFn) -> TestDescAndFn {
match test.testfn {
StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: test.desc.clone() },
StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: test.desc.clone() },
_ => panic!("non-static tests passed to test::test_main_static"),
}
}
/// Invoked when unit tests terminate. Should panic if the unit
/// Tests is considered a failure. By default, invokes `report()`
/// and checks for a `0` result.
pub fn assert_test_result<T: Termination>(result: T) {
let code = result.report();
assert_eq!(
code, 0,
"the test returned a termination value with a non-zero status code ({}) \
which indicates a failure",
code
);
}
pub fn run_tests<F>(
opts: &TestOpts,
tests: Vec<TestDescAndFn>,
mut notify_about_test_event: F,
) -> io::Result<()>
where
F: FnMut(TestEvent) -> io::Result<()>,
{
use std::collections::{self, HashMap};
use std::hash::BuildHasherDefault;
use std::sync::mpsc::RecvTimeoutError;
struct RunningTest {
join_handle: Option<thread::JoinHandle<()>>,
}
// Use a deterministic hasher
type TestMap =
HashMap<TestId, RunningTest, BuildHasherDefault<collections::hash_map::DefaultHasher>>;
struct TimeoutEntry {
id: TestId,
desc: TestDesc,
timeout: Instant,
}
let tests_len = tests.len();
let mut filtered_tests = filter_tests(opts, tests);
if!opts.bench_benchmarks {
filtered_tests = convert_benchmarks_to_tests(filtered_tests);
}
let filtered_tests = {
let mut filtered_tests = filtered_tests;
for test in filtered_tests.iter_mut() {
test.desc.name = test.desc.name.with_padding(test.testfn.padding());
}
filtered_tests
};
let filtered_out = tests_len - filtered_tests.len();
let event = TestEvent::TeFilteredOut(filtered_out);
notify_about_test_event(event)?;
let filtered_descs = filtered_tests.iter().map(|t| t.desc.clone()).collect();
let event = TestEvent::TeFiltered(filtered_descs);
notify_about_test_event(event)?;
let (filtered_tests, filtered_benchs): (Vec<_>, _) = filtered_tests
.into_iter()
.enumerate()
.map(|(i, e)| (TestId(i), e))
.partition(|(_, e)| matches!(e.testfn, StaticTestFn(_) | DynTestFn(_)));
let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
let mut remaining = filtered_tests;
remaining.reverse();
let mut pending = 0;
let (tx, rx) = channel::<CompletedTest>();
let run_strategy = if opts.options.panic_abort &&!opts.force_run_in_process {
RunStrategy::SpawnPrimary
} else {
RunStrategy::InProcess
};
let mut running_tests: TestMap = HashMap::default();
let mut timeout_queue: VecDeque<TimeoutEntry> = VecDeque::new();
fn get_timed_out_tests(
running_tests: &TestMap,
timeout_queue: &mut VecDeque<TimeoutEntry>,
) -> Vec<TestDesc> {
let now = Instant::now();
let mut timed_out = Vec::new();
while let Some(timeout_entry) = timeout_queue.front() {
if now < timeout_entry.timeout {
break;
}
let timeout_entry = timeout_queue.pop_front().unwrap();
if running_tests.contains_key(&timeout_entry.id) {
timed_out.push(timeout_entry.desc);
}
}
timed_out
}
fn calc_timeout(timeout_queue: &VecDeque<TimeoutEntry>) -> Option<Duration> {
timeout_queue.front().map(|&TimeoutEntry { timeout: next_timeout,.. }| {
let now = Instant::now();
if next_timeout >= now { next_timeout - now } else { Duration::new(0, 0) }
})
}
if concurrency == 1 {
while!remaining.is_empty() {
let (id, test) = remaining.pop().unwrap();
let event = TestEvent::TeWait(test.desc.clone());
notify_about_test_event(event)?;
let join_handle =
run_test(opts,!opts.run_tests, id, test, run_strategy, tx.clone(), Concurrent::No);
assert!(join_handle.is_none());
let completed_test = rx.recv().unwrap();
let event = TestEvent::TeResult(completed_test);
notify_about_test_event(event)?;
}
} else {
while pending > 0 ||!remaining.is_empty() {
while pending < concurrency &&!remaining.is_empty() {
let (id, test) = remaining.pop().unwrap();
let timeout = time::get_default_test_timeout();
let desc = test.desc.clone();
let event = TestEvent::TeWait(desc.clone());
notify_about_test_event(event)?; //here no pad
let join_handle = run_test(
opts,
!opts.run_tests,
id,
test,
run_strategy,
tx.clone(),
Concurrent::Yes,
);
running_tests.insert(id, RunningTest { join_handle });
timeout_queue.push_back(TimeoutEntry { id, desc, timeout });
pending += 1;
}
let mut res;
loop {
if let Some(timeout) = calc_timeout(&timeout_queue) {
res = rx.recv_timeout(timeout);
for test in get_timed_out_tests(&running_tests, &mut timeout_queue) {
let event = TestEvent::TeTimeout(test);
notify_about_test_event(event)?;
}
match res {
Err(RecvTimeoutError::Timeout) => {
// Result is not yet ready, continue waiting.
}
_ => {
// We've got a result, stop the loop.
break;
}
}
} else {
res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
break;
}
}
let mut completed_test = res.unwrap();
let running_test = running_tests.remove(&completed_test.id).unwrap();
if let Some(join_handle) = running_test.join_handle {
if let Err(_) = join_handle.join() {
if let TrOk = completed_test.result {
completed_test.result =
TrFailedMsg("panicked after reporting success".to_string());
}
}
}
let event = TestEvent::TeResult(completed_test);
notify_about_test_event(event)?;
pending -= 1;
}
}
if opts.bench_benchmarks {
// All benchmarks run at the end, in serial.
for (id, b) in filtered_benchs {
let event = TestEvent::TeWait(b.desc.clone());
notify_about_test_event(event)?;
run_test(opts, false, id, b, run_strategy, tx.clone(), Concurrent::No);
let completed_test = rx.recv().unwrap();
let event = TestEvent::TeResult(completed_test);
notify_about_test_event(event)?;
}
}
Ok(())
}
pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
let mut filtered = tests;
let matches_filter = |test: &TestDescAndFn, filter: &str| {
let test_name = test.desc.name.as_slice();
match opts.filter_exact {
true => test_name == filter,
false => test_name.contains(filter),
}
};
// Remove tests that don't match the test filter
if!opts.filters.is_empty() {
filtered.retain(|test| opts.filters.iter().any(|filter| matches_filter(test, filter)));
}
// Skip tests that match any of the skip filters
filtered.retain(|test|!opts.skip.iter().any(|sf| matches_filter(test, sf)));
// Excludes #[should_panic] tests
if opts.exclude_should_panic {
filtered.retain(|test| test.desc.should_panic == ShouldPanic::No);
}
// maybe unignore tests
match opts.run_ignored {
RunIgnored::Yes => {
filtered.iter_mut().for_each(|test| test.desc.ignore = false);
}
RunIgnored::Only => {
filtered.retain(|test| test.desc.ignore);
filtered.iter_mut().for_each(|test| test.desc.ignore = false);
}
RunIgnored::No => {}
}
// Sort the tests alphabetically
filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
filtered
}
pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
// convert benchmarks to tests, if we're not benchmarking them
tests
.into_iter()
.map(|x| {
let testfn = match x.testfn {
DynBenchFn(bench) => DynTestFn(Box::new(move || {
bench::run_once(|b| __rust_begin_short_backtrace(|| bench.run(b)))
})),
StaticBenchFn(benchfn) => DynTestFn(Box::new(move || {
bench::run_once(|b| __rust_begin_short_backtrace(|| benchfn(b)))
})),
f => f,
};
TestDescAndFn { desc: x.desc, testfn }
})
.collect()
}
pub fn run_test(
opts: &TestOpts,
force_ignore: bool,
id: TestId,
test: TestDescAndFn,
strategy: RunStrategy,
monitor_ch: Sender<CompletedTest>,
concurrency: Concurrent,
) -> Option<thread::JoinHandle<()>>
|
fn run_test_inner(
id: TestId,
desc: TestDesc,
monitor_ch: Sender<CompletedTest>,
testfn: Box<dyn FnOnce() + Send>,
opts: TestRunOpts,
) -> Option<thread::JoinHandle<()>> {
let concurrency = opts.concurrency;
let name = desc.name.clone();
let runtest = move || match opts.strategy {
RunStrategy::InProcess => run_test_in_process(
id,
desc,
opts.nocapture,
opts.time.is_some(),
testfn,
monitor_ch,
opts.time,
),
RunStrategy::SpawnPrimary => spawn_test_subprocess(
id,
desc,
opts.nocapture,
opts.time.is_some(),
monitor_ch,
opts.time,
),
};
// If the platform is single-threaded we're just going to run
// the test synchronously, regardless of the concurrency
// level.
let supports_threads =!cfg!(target_os = "emscripten") &&!cfg!(target_arch = "wasm32");
if concurrency == Concurrent::Yes && supports_threads {
let cfg = thread::Builder::new().name(name.as_slice().to_owned());
let mut runtest = Arc::new(Mutex::new(Some(runtest)));
let runtest2 = runtest.clone();
match cfg.spawn(move || runtest2.lock().unwrap().take().unwrap()()) {
Ok(handle) => Some(handle),
Err(e) if e.kind() == io::ErrorKind::WouldBlock => {
// `ErrorKind::WouldBlock` means hitting the thread limit on some
// platforms, so run the test synchronously here instead.
Arc::get_mut(&mut runtest).unwrap().get_mut().unwrap().take().unwrap()();
None
}
Err(e) => panic!("failed to spawn thread to run test: {}", e),
}
} else {
runtest();
None
}
}
let test_run_opts =
TestRunOpts { strategy, nocapture: opts.nocapture, concurrency, time: opts.time_options };
match testfn {
DynBenchFn(bencher) => {
// Benchmarks aren't expected to panic, so we run them all in-process.
crate::bench::benchmark(id, desc, monitor_ch, opts.nocapture, |harness| {
bencher.run(harness)
});
None
}
StaticBenchFn(benchfn) => {
// Benchmarks aren't expected to panic, so we run them all in-process.
crate::bench::benchmark(id, desc, monitor_ch, opts.nocapture, benchfn);
None
}
DynTestFn(f) => {
match strategy {
RunStrategy::InProcess => (),
_ => panic!("Cannot run dynamic test fn out-of-process"),
};
run_test_inner(
id,
desc,
monitor_ch,
Box::new(move || __rust_begin_short_backtrace(f)),
test_run_opts,
)
}
StaticTestFn(f) => run_test_inner(
id,
desc,
|
{
let TestDescAndFn { desc, testfn } = test;
// Emscripten can catch panics but other wasm targets cannot
let ignore_because_no_process_support = desc.should_panic != ShouldPanic::No
&& cfg!(target_arch = "wasm32")
&& !cfg!(target_os = "emscripten");
if force_ignore || desc.ignore || ignore_because_no_process_support {
let message = CompletedTest::new(id, desc, TrIgnored, None, Vec::new());
monitor_ch.send(message).unwrap();
return None;
}
struct TestRunOpts {
pub strategy: RunStrategy,
pub nocapture: bool,
pub concurrency: Concurrent,
pub time: Option<time::TestTimeOptions>,
}
|
identifier_body
|
lib.rs
|
::{Duration, Instant},
};
pub mod bench;
mod cli;
mod console;
mod event;
mod formatters;
mod helpers;
mod options;
pub mod stats;
mod term;
mod test_result;
mod time;
mod types;
#[cfg(test)]
mod tests;
use event::{CompletedTest, TestEvent};
use helpers::concurrency::get_concurrency;
use helpers::exit_code::get_exit_code;
use options::{Concurrent, RunStrategy};
use test_result::*;
use time::TestExecTime;
// Process exit code to be used to indicate test failures.
const ERROR_EXIT_CODE: i32 = 101;
const SECONDARY_TEST_INVOKER_VAR: &str = "__RUST_TEST_INVOKE";
// The default console test runner. It accepts the command line
// arguments and a vector of test_descs.
pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Option<Options>) {
let mut opts = match cli::parse_opts(args) {
Some(Ok(o)) => o,
Some(Err(msg)) => {
eprintln!("error: {}", msg);
process::exit(ERROR_EXIT_CODE);
}
None => return,
};
if let Some(options) = options {
opts.options = options;
}
if opts.list {
if let Err(e) = console::list_tests_console(&opts, tests) {
eprintln!("error: io error when listing tests: {:?}", e);
process::exit(ERROR_EXIT_CODE);
}
} else {
match console::run_tests_console(&opts, tests) {
Ok(true) => {}
Ok(false) => process::exit(ERROR_EXIT_CODE),
Err(e) => {
eprintln!("error: io error when listing tests: {:?}", e);
process::exit(ERROR_EXIT_CODE);
}
}
}
}
/// A variant optimized for invocation with a static test vector.
/// This will panic (intentionally) when fed any dynamic tests.
///
/// This is the entry point for the main function generated by `rustc --test`
/// when panic=unwind.
pub fn test_main_static(tests: &[&TestDescAndFn]) {
let args = env::args().collect::<Vec<_>>();
let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect();
test_main(&args, owned_tests, None)
}
/// A variant optimized for invocation with a static test vector.
/// This will panic (intentionally) when fed any dynamic tests.
///
/// Runs tests in panic=abort mode, which involves spawning subprocesses for
/// tests.
///
/// This is the entry point for the main function generated by `rustc --test`
/// when panic=abort.
pub fn test_main_static_abort(tests: &[&TestDescAndFn]) {
// If we're being run in SpawnedSecondary mode, run the test here. run_test
// will then exit the process.
if let Ok(name) = env::var(SECONDARY_TEST_INVOKER_VAR) {
env::remove_var(SECONDARY_TEST_INVOKER_VAR);
let test = tests
.iter()
.filter(|test| test.desc.name.as_slice() == name)
.map(make_owned_test)
.next()
.unwrap_or_else(|| panic!("couldn't find a test with the provided name '{}'", name));
let TestDescAndFn { desc, testfn } = test;
let testfn = match testfn {
StaticTestFn(f) => f,
_ => panic!("only static tests are supported"),
};
run_test_in_spawned_subprocess(desc, Box::new(testfn));
}
let args = env::args().collect::<Vec<_>>();
let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect();
test_main(&args, owned_tests, Some(Options::new().panic_abort(true)))
}
/// Clones static values for putting into a dynamic vector, which test_main()
/// needs to hand out ownership of tests to parallel test runners.
///
/// This will panic when fed any dynamic tests, because they cannot be cloned.
fn make_owned_test(test: &&TestDescAndFn) -> TestDescAndFn {
match test.testfn {
StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: test.desc.clone() },
StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: test.desc.clone() },
_ => panic!("non-static tests passed to test::test_main_static"),
}
}
/// Invoked when unit tests terminate. Should panic if the unit
/// Tests is considered a failure. By default, invokes `report()`
/// and checks for a `0` result.
pub fn assert_test_result<T: Termination>(result: T) {
let code = result.report();
assert_eq!(
code, 0,
"the test returned a termination value with a non-zero status code ({}) \
which indicates a failure",
code
);
}
pub fn run_tests<F>(
opts: &TestOpts,
tests: Vec<TestDescAndFn>,
mut notify_about_test_event: F,
) -> io::Result<()>
where
F: FnMut(TestEvent) -> io::Result<()>,
{
use std::collections::{self, HashMap};
use std::hash::BuildHasherDefault;
use std::sync::mpsc::RecvTimeoutError;
struct RunningTest {
join_handle: Option<thread::JoinHandle<()>>,
}
// Use a deterministic hasher
type TestMap =
HashMap<TestId, RunningTest, BuildHasherDefault<collections::hash_map::DefaultHasher>>;
struct TimeoutEntry {
id: TestId,
desc: TestDesc,
timeout: Instant,
}
let tests_len = tests.len();
let mut filtered_tests = filter_tests(opts, tests);
if!opts.bench_benchmarks {
filtered_tests = convert_benchmarks_to_tests(filtered_tests);
}
let filtered_tests = {
let mut filtered_tests = filtered_tests;
for test in filtered_tests.iter_mut() {
test.desc.name = test.desc.name.with_padding(test.testfn.padding());
}
filtered_tests
};
let filtered_out = tests_len - filtered_tests.len();
let event = TestEvent::TeFilteredOut(filtered_out);
notify_about_test_event(event)?;
let filtered_descs = filtered_tests.iter().map(|t| t.desc.clone()).collect();
let event = TestEvent::TeFiltered(filtered_descs);
notify_about_test_event(event)?;
let (filtered_tests, filtered_benchs): (Vec<_>, _) = filtered_tests
.into_iter()
.enumerate()
.map(|(i, e)| (TestId(i), e))
.partition(|(_, e)| matches!(e.testfn, StaticTestFn(_) | DynTestFn(_)));
let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
let mut remaining = filtered_tests;
remaining.reverse();
let mut pending = 0;
let (tx, rx) = channel::<CompletedTest>();
let run_strategy = if opts.options.panic_abort &&!opts.force_run_in_process {
RunStrategy::SpawnPrimary
} else {
RunStrategy::InProcess
};
let mut running_tests: TestMap = HashMap::default();
let mut timeout_queue: VecDeque<TimeoutEntry> = VecDeque::new();
fn get_timed_out_tests(
running_tests: &TestMap,
timeout_queue: &mut VecDeque<TimeoutEntry>,
) -> Vec<TestDesc> {
let now = Instant::now();
let mut timed_out = Vec::new();
while let Some(timeout_entry) = timeout_queue.front() {
if now < timeout_entry.timeout {
break;
}
let timeout_entry = timeout_queue.pop_front().unwrap();
if running_tests.contains_key(&timeout_entry.id) {
timed_out.push(timeout_entry.desc);
}
}
timed_out
}
fn calc_timeout(timeout_queue: &VecDeque<TimeoutEntry>) -> Option<Duration> {
timeout_queue.front().map(|&TimeoutEntry { timeout: next_timeout,.. }| {
let now = Instant::now();
if next_timeout >= now { next_timeout - now } else { Duration::new(0, 0) }
})
}
if concurrency == 1 {
while!remaining.is_empty() {
let (id, test) = remaining.pop().unwrap();
let event = TestEvent::TeWait(test.desc.clone());
notify_about_test_event(event)?;
let join_handle =
run_test(opts,!opts.run_tests, id, test, run_strategy, tx.clone(), Concurrent::No);
assert!(join_handle.is_none());
let completed_test = rx.recv().unwrap();
let event = TestEvent::TeResult(completed_test);
notify_about_test_event(event)?;
}
} else
|
pending += 1;
}
let mut res;
loop {
if let Some(timeout) = calc_timeout(&timeout_queue) {
res = rx.recv_timeout(timeout);
for test in get_timed_out_tests(&running_tests, &mut timeout_queue) {
let event = TestEvent::TeTimeout(test);
notify_about_test_event(event)?;
}
match res {
Err(RecvTimeoutError::Timeout) => {
// Result is not yet ready, continue waiting.
}
_ => {
// We've got a result, stop the loop.
break;
}
}
} else {
res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
break;
}
}
let mut completed_test = res.unwrap();
let running_test = running_tests.remove(&completed_test.id).unwrap();
if let Some(join_handle) = running_test.join_handle {
if let Err(_) = join_handle.join() {
if let TrOk = completed_test.result {
completed_test.result =
TrFailedMsg("panicked after reporting success".to_string());
}
}
}
let event = TestEvent::TeResult(completed_test);
notify_about_test_event(event)?;
pending -= 1;
}
}
if opts.bench_benchmarks {
// All benchmarks run at the end, in serial.
for (id, b) in filtered_benchs {
let event = TestEvent::TeWait(b.desc.clone());
notify_about_test_event(event)?;
run_test(opts, false, id, b, run_strategy, tx.clone(), Concurrent::No);
let completed_test = rx.recv().unwrap();
let event = TestEvent::TeResult(completed_test);
notify_about_test_event(event)?;
}
}
Ok(())
}
pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
let mut filtered = tests;
let matches_filter = |test: &TestDescAndFn, filter: &str| {
let test_name = test.desc.name.as_slice();
match opts.filter_exact {
true => test_name == filter,
false => test_name.contains(filter),
}
};
// Remove tests that don't match the test filter
if!opts.filters.is_empty() {
filtered.retain(|test| opts.filters.iter().any(|filter| matches_filter(test, filter)));
}
// Skip tests that match any of the skip filters
filtered.retain(|test|!opts.skip.iter().any(|sf| matches_filter(test, sf)));
// Excludes #[should_panic] tests
if opts.exclude_should_panic {
filtered.retain(|test| test.desc.should_panic == ShouldPanic::No);
}
// maybe unignore tests
match opts.run_ignored {
RunIgnored::Yes => {
filtered.iter_mut().for_each(|test| test.desc.ignore = false);
}
RunIgnored::Only => {
filtered.retain(|test| test.desc.ignore);
filtered.iter_mut().for_each(|test| test.desc.ignore = false);
}
RunIgnored::No => {}
}
// Sort the tests alphabetically
filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
filtered
}
pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
// convert benchmarks to tests, if we're not benchmarking them
tests
.into_iter()
.map(|x| {
let testfn = match x.testfn {
DynBenchFn(bench) => DynTestFn(Box::new(move || {
bench::run_once(|b| __rust_begin_short_backtrace(|| bench.run(b)))
})),
StaticBenchFn(benchfn) => DynTestFn(Box::new(move || {
bench::run_once(|b| __rust_begin_short_backtrace(|| benchfn(b)))
})),
f => f,
};
TestDescAndFn { desc: x.desc, testfn }
})
.collect()
}
pub fn run_test(
opts: &TestOpts,
force_ignore: bool,
id: TestId,
test: TestDescAndFn,
strategy: RunStrategy,
monitor_ch: Sender<CompletedTest>,
concurrency: Concurrent,
) -> Option<thread::JoinHandle<()>> {
let TestDescAndFn { desc, testfn } = test;
// Emscripten can catch panics but other wasm targets cannot
let ignore_because_no_process_support = desc.should_panic!= ShouldPanic::No
&& cfg!(target_arch = "wasm32")
&&!cfg!(target_os = "emscripten");
if force_ignore || desc.ignore || ignore_because_no_process_support {
let message = CompletedTest::new(id, desc, TrIgnored, None, Vec::new());
monitor_ch.send(message).unwrap();
return None;
}
struct TestRunOpts {
pub strategy: RunStrategy,
pub nocapture: bool,
pub concurrency: Concurrent,
pub time: Option<time::TestTimeOptions>,
}
fn run_test_inner(
id: TestId,
desc: TestDesc,
monitor_ch: Sender<CompletedTest>,
testfn: Box<dyn FnOnce() + Send>,
opts: TestRunOpts,
) -> Option<thread::JoinHandle<()>> {
let concurrency = opts.concurrency;
let name = desc.name.clone();
let runtest = move || match opts.strategy {
RunStrategy::InProcess => run_test_in_process(
id,
desc,
opts.nocapture,
opts.time.is_some(),
testfn,
monitor_ch,
opts.time,
),
RunStrategy::SpawnPrimary => spawn_test_subprocess(
id,
desc,
opts.nocapture,
opts.time.is_some(),
monitor_ch,
opts.time,
),
};
// If the platform is single-threaded we're just going to run
// the test synchronously, regardless of the concurrency
// level.
let supports_threads =!cfg!(target_os = "emscripten") &&!cfg!(target_arch = "wasm32");
if concurrency == Concurrent::Yes && supports_threads {
let cfg = thread::Builder::new().name(name.as_slice().to_owned());
let mut runtest = Arc::new(Mutex::new(Some(runtest)));
let runtest2 = runtest.clone();
match cfg.spawn(move || runtest2.lock().unwrap().take().unwrap()()) {
Ok(handle) => Some(handle),
Err(e) if e.kind() == io::ErrorKind::WouldBlock => {
// `ErrorKind::WouldBlock` means hitting the thread limit on some
// platforms, so run the test synchronously here instead.
Arc::get_mut(&mut runtest).unwrap().get_mut().unwrap().take().unwrap()();
None
}
Err(e) => panic!("failed to spawn thread to run test: {}", e),
}
} else {
runtest();
None
}
}
let test_run_opts =
TestRunOpts { strategy, nocapture: opts.nocapture, concurrency, time: opts.time_options };
match testfn {
DynBenchFn(bencher) => {
// Benchmarks aren't expected to panic, so we run them all in-process.
crate::bench::benchmark(id, desc, monitor_ch, opts.nocapture, |harness| {
bencher.run(harness)
});
None
}
StaticBenchFn(benchfn) => {
// Benchmarks aren't expected to panic, so we run them all in-process.
crate::bench::benchmark(id, desc, monitor_ch, opts.nocapture, benchfn);
None
}
DynTestFn(f) => {
match strategy {
RunStrategy::InProcess => (),
_ => panic!("Cannot run dynamic test fn out-of-process"),
};
run_test_inner(
id,
desc,
monitor_ch,
Box::new(move || __rust_begin_short_backtrace(f)),
test_run_opts,
)
}
StaticTestFn(f) => run_test_inner(
id,
desc,
|
{
while pending > 0 || !remaining.is_empty() {
while pending < concurrency && !remaining.is_empty() {
let (id, test) = remaining.pop().unwrap();
let timeout = time::get_default_test_timeout();
let desc = test.desc.clone();
let event = TestEvent::TeWait(desc.clone());
notify_about_test_event(event)?; //here no pad
let join_handle = run_test(
opts,
!opts.run_tests,
id,
test,
run_strategy,
tx.clone(),
Concurrent::Yes,
);
running_tests.insert(id, RunningTest { join_handle });
timeout_queue.push_back(TimeoutEntry { id, desc, timeout });
|
conditional_block
|
lib.rs
|
::{Duration, Instant},
};
pub mod bench;
mod cli;
mod console;
mod event;
mod formatters;
mod helpers;
mod options;
pub mod stats;
mod term;
mod test_result;
mod time;
mod types;
#[cfg(test)]
mod tests;
use event::{CompletedTest, TestEvent};
use helpers::concurrency::get_concurrency;
use helpers::exit_code::get_exit_code;
use options::{Concurrent, RunStrategy};
use test_result::*;
use time::TestExecTime;
// Process exit code to be used to indicate test failures.
const ERROR_EXIT_CODE: i32 = 101;
const SECONDARY_TEST_INVOKER_VAR: &str = "__RUST_TEST_INVOKE";
// The default console test runner. It accepts the command line
// arguments and a vector of test_descs.
pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Option<Options>) {
let mut opts = match cli::parse_opts(args) {
Some(Ok(o)) => o,
Some(Err(msg)) => {
eprintln!("error: {}", msg);
process::exit(ERROR_EXIT_CODE);
}
None => return,
};
if let Some(options) = options {
opts.options = options;
}
if opts.list {
if let Err(e) = console::list_tests_console(&opts, tests) {
eprintln!("error: io error when listing tests: {:?}", e);
process::exit(ERROR_EXIT_CODE);
}
} else {
match console::run_tests_console(&opts, tests) {
Ok(true) => {}
Ok(false) => process::exit(ERROR_EXIT_CODE),
Err(e) => {
eprintln!("error: io error when listing tests: {:?}", e);
process::exit(ERROR_EXIT_CODE);
}
}
}
}
/// A variant optimized for invocation with a static test vector.
/// This will panic (intentionally) when fed any dynamic tests.
///
/// This is the entry point for the main function generated by `rustc --test`
/// when panic=unwind.
pub fn test_main_static(tests: &[&TestDescAndFn]) {
let args = env::args().collect::<Vec<_>>();
let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect();
test_main(&args, owned_tests, None)
}
/// A variant optimized for invocation with a static test vector.
/// This will panic (intentionally) when fed any dynamic tests.
///
/// Runs tests in panic=abort mode, which involves spawning subprocesses for
/// tests.
///
/// This is the entry point for the main function generated by `rustc --test`
/// when panic=abort.
pub fn test_main_static_abort(tests: &[&TestDescAndFn]) {
// If we're being run in SpawnedSecondary mode, run the test here. run_test
// will then exit the process.
if let Ok(name) = env::var(SECONDARY_TEST_INVOKER_VAR) {
env::remove_var(SECONDARY_TEST_INVOKER_VAR);
let test = tests
.iter()
.filter(|test| test.desc.name.as_slice() == name)
.map(make_owned_test)
.next()
.unwrap_or_else(|| panic!("couldn't find a test with the provided name '{}'", name));
let TestDescAndFn { desc, testfn } = test;
let testfn = match testfn {
StaticTestFn(f) => f,
_ => panic!("only static tests are supported"),
};
run_test_in_spawned_subprocess(desc, Box::new(testfn));
}
let args = env::args().collect::<Vec<_>>();
let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect();
test_main(&args, owned_tests, Some(Options::new().panic_abort(true)))
}
/// Clones static values for putting into a dynamic vector, which test_main()
/// needs to hand out ownership of tests to parallel test runners.
///
/// This will panic when fed any dynamic tests, because they cannot be cloned.
fn make_owned_test(test: &&TestDescAndFn) -> TestDescAndFn {
match test.testfn {
StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: test.desc.clone() },
StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: test.desc.clone() },
_ => panic!("non-static tests passed to test::test_main_static"),
}
}
/// Invoked when unit tests terminate. Should panic if the unit
/// Tests is considered a failure. By default, invokes `report()`
/// and checks for a `0` result.
pub fn assert_test_result<T: Termination>(result: T) {
let code = result.report();
assert_eq!(
code, 0,
"the test returned a termination value with a non-zero status code ({}) \
which indicates a failure",
code
);
}
pub fn run_tests<F>(
opts: &TestOpts,
tests: Vec<TestDescAndFn>,
mut notify_about_test_event: F,
) -> io::Result<()>
where
F: FnMut(TestEvent) -> io::Result<()>,
{
use std::collections::{self, HashMap};
use std::hash::BuildHasherDefault;
use std::sync::mpsc::RecvTimeoutError;
struct RunningTest {
join_handle: Option<thread::JoinHandle<()>>,
}
// Use a deterministic hasher
type TestMap =
HashMap<TestId, RunningTest, BuildHasherDefault<collections::hash_map::DefaultHasher>>;
struct TimeoutEntry {
id: TestId,
desc: TestDesc,
timeout: Instant,
}
let tests_len = tests.len();
let mut filtered_tests = filter_tests(opts, tests);
if!opts.bench_benchmarks {
filtered_tests = convert_benchmarks_to_tests(filtered_tests);
}
let filtered_tests = {
let mut filtered_tests = filtered_tests;
for test in filtered_tests.iter_mut() {
test.desc.name = test.desc.name.with_padding(test.testfn.padding());
}
filtered_tests
};
let filtered_out = tests_len - filtered_tests.len();
let event = TestEvent::TeFilteredOut(filtered_out);
notify_about_test_event(event)?;
let filtered_descs = filtered_tests.iter().map(|t| t.desc.clone()).collect();
let event = TestEvent::TeFiltered(filtered_descs);
notify_about_test_event(event)?;
let (filtered_tests, filtered_benchs): (Vec<_>, _) = filtered_tests
.into_iter()
.enumerate()
.map(|(i, e)| (TestId(i), e))
.partition(|(_, e)| matches!(e.testfn, StaticTestFn(_) | DynTestFn(_)));
let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
let mut remaining = filtered_tests;
remaining.reverse();
let mut pending = 0;
let (tx, rx) = channel::<CompletedTest>();
let run_strategy = if opts.options.panic_abort &&!opts.force_run_in_process {
RunStrategy::SpawnPrimary
} else {
RunStrategy::InProcess
};
let mut running_tests: TestMap = HashMap::default();
let mut timeout_queue: VecDeque<TimeoutEntry> = VecDeque::new();
fn
|
(
running_tests: &TestMap,
timeout_queue: &mut VecDeque<TimeoutEntry>,
) -> Vec<TestDesc> {
let now = Instant::now();
let mut timed_out = Vec::new();
while let Some(timeout_entry) = timeout_queue.front() {
if now < timeout_entry.timeout {
break;
}
let timeout_entry = timeout_queue.pop_front().unwrap();
if running_tests.contains_key(&timeout_entry.id) {
timed_out.push(timeout_entry.desc);
}
}
timed_out
}
fn calc_timeout(timeout_queue: &VecDeque<TimeoutEntry>) -> Option<Duration> {
timeout_queue.front().map(|&TimeoutEntry { timeout: next_timeout,.. }| {
let now = Instant::now();
if next_timeout >= now { next_timeout - now } else { Duration::new(0, 0) }
})
}
if concurrency == 1 {
while!remaining.is_empty() {
let (id, test) = remaining.pop().unwrap();
let event = TestEvent::TeWait(test.desc.clone());
notify_about_test_event(event)?;
let join_handle =
run_test(opts,!opts.run_tests, id, test, run_strategy, tx.clone(), Concurrent::No);
assert!(join_handle.is_none());
let completed_test = rx.recv().unwrap();
let event = TestEvent::TeResult(completed_test);
notify_about_test_event(event)?;
}
} else {
while pending > 0 ||!remaining.is_empty() {
while pending < concurrency &&!remaining.is_empty() {
let (id, test) = remaining.pop().unwrap();
let timeout = time::get_default_test_timeout();
let desc = test.desc.clone();
let event = TestEvent::TeWait(desc.clone());
notify_about_test_event(event)?; //here no pad
let join_handle = run_test(
opts,
!opts.run_tests,
id,
test,
run_strategy,
tx.clone(),
Concurrent::Yes,
);
running_tests.insert(id, RunningTest { join_handle });
timeout_queue.push_back(TimeoutEntry { id, desc, timeout });
pending += 1;
}
let mut res;
loop {
if let Some(timeout) = calc_timeout(&timeout_queue) {
res = rx.recv_timeout(timeout);
for test in get_timed_out_tests(&running_tests, &mut timeout_queue) {
let event = TestEvent::TeTimeout(test);
notify_about_test_event(event)?;
}
match res {
Err(RecvTimeoutError::Timeout) => {
// Result is not yet ready, continue waiting.
}
_ => {
// We've got a result, stop the loop.
break;
}
}
} else {
res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
break;
}
}
let mut completed_test = res.unwrap();
let running_test = running_tests.remove(&completed_test.id).unwrap();
if let Some(join_handle) = running_test.join_handle {
if let Err(_) = join_handle.join() {
if let TrOk = completed_test.result {
completed_test.result =
TrFailedMsg("panicked after reporting success".to_string());
}
}
}
let event = TestEvent::TeResult(completed_test);
notify_about_test_event(event)?;
pending -= 1;
}
}
if opts.bench_benchmarks {
// All benchmarks run at the end, in serial.
for (id, b) in filtered_benchs {
let event = TestEvent::TeWait(b.desc.clone());
notify_about_test_event(event)?;
run_test(opts, false, id, b, run_strategy, tx.clone(), Concurrent::No);
let completed_test = rx.recv().unwrap();
let event = TestEvent::TeResult(completed_test);
notify_about_test_event(event)?;
}
}
Ok(())
}
pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
let mut filtered = tests;
let matches_filter = |test: &TestDescAndFn, filter: &str| {
let test_name = test.desc.name.as_slice();
match opts.filter_exact {
true => test_name == filter,
false => test_name.contains(filter),
}
};
// Remove tests that don't match the test filter
if!opts.filters.is_empty() {
filtered.retain(|test| opts.filters.iter().any(|filter| matches_filter(test, filter)));
}
// Skip tests that match any of the skip filters
filtered.retain(|test|!opts.skip.iter().any(|sf| matches_filter(test, sf)));
// Excludes #[should_panic] tests
if opts.exclude_should_panic {
filtered.retain(|test| test.desc.should_panic == ShouldPanic::No);
}
// maybe unignore tests
match opts.run_ignored {
RunIgnored::Yes => {
filtered.iter_mut().for_each(|test| test.desc.ignore = false);
}
RunIgnored::Only => {
filtered.retain(|test| test.desc.ignore);
filtered.iter_mut().for_each(|test| test.desc.ignore = false);
}
RunIgnored::No => {}
}
// Sort the tests alphabetically
filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
filtered
}
pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
// convert benchmarks to tests, if we're not benchmarking them
tests
.into_iter()
.map(|x| {
let testfn = match x.testfn {
DynBenchFn(bench) => DynTestFn(Box::new(move || {
bench::run_once(|b| __rust_begin_short_backtrace(|| bench.run(b)))
})),
StaticBenchFn(benchfn) => DynTestFn(Box::new(move || {
bench::run_once(|b| __rust_begin_short_backtrace(|| benchfn(b)))
})),
f => f,
};
TestDescAndFn { desc: x.desc, testfn }
})
.collect()
}
pub fn run_test(
opts: &TestOpts,
force_ignore: bool,
id: TestId,
test: TestDescAndFn,
strategy: RunStrategy,
monitor_ch: Sender<CompletedTest>,
concurrency: Concurrent,
) -> Option<thread::JoinHandle<()>> {
let TestDescAndFn { desc, testfn } = test;
// Emscripten can catch panics but other wasm targets cannot
let ignore_because_no_process_support = desc.should_panic!= ShouldPanic::No
&& cfg!(target_arch = "wasm32")
&&!cfg!(target_os = "emscripten");
if force_ignore || desc.ignore || ignore_because_no_process_support {
let message = CompletedTest::new(id, desc, TrIgnored, None, Vec::new());
monitor_ch.send(message).unwrap();
return None;
}
struct TestRunOpts {
pub strategy: RunStrategy,
pub nocapture: bool,
pub concurrency: Concurrent,
pub time: Option<time::TestTimeOptions>,
}
fn run_test_inner(
id: TestId,
desc: TestDesc,
monitor_ch: Sender<CompletedTest>,
testfn: Box<dyn FnOnce() + Send>,
opts: TestRunOpts,
) -> Option<thread::JoinHandle<()>> {
let concurrency = opts.concurrency;
let name = desc.name.clone();
let runtest = move || match opts.strategy {
RunStrategy::InProcess => run_test_in_process(
id,
desc,
opts.nocapture,
opts.time.is_some(),
testfn,
monitor_ch,
opts.time,
),
RunStrategy::SpawnPrimary => spawn_test_subprocess(
id,
desc,
opts.nocapture,
opts.time.is_some(),
monitor_ch,
opts.time,
),
};
// If the platform is single-threaded we're just going to run
// the test synchronously, regardless of the concurrency
// level.
let supports_threads =!cfg!(target_os = "emscripten") &&!cfg!(target_arch = "wasm32");
if concurrency == Concurrent::Yes && supports_threads {
let cfg = thread::Builder::new().name(name.as_slice().to_owned());
let mut runtest = Arc::new(Mutex::new(Some(runtest)));
let runtest2 = runtest.clone();
match cfg.spawn(move || runtest2.lock().unwrap().take().unwrap()()) {
Ok(handle) => Some(handle),
Err(e) if e.kind() == io::ErrorKind::WouldBlock => {
// `ErrorKind::WouldBlock` means hitting the thread limit on some
// platforms, so run the test synchronously here instead.
Arc::get_mut(&mut runtest).unwrap().get_mut().unwrap().take().unwrap()();
None
}
Err(e) => panic!("failed to spawn thread to run test: {}", e),
}
} else {
runtest();
None
}
}
let test_run_opts =
TestRunOpts { strategy, nocapture: opts.nocapture, concurrency, time: opts.time_options };
match testfn {
DynBenchFn(bencher) => {
// Benchmarks aren't expected to panic, so we run them all in-process.
crate::bench::benchmark(id, desc, monitor_ch, opts.nocapture, |harness| {
bencher.run(harness)
});
None
}
StaticBenchFn(benchfn) => {
// Benchmarks aren't expected to panic, so we run them all in-process.
crate::bench::benchmark(id, desc, monitor_ch, opts.nocapture, benchfn);
None
}
DynTestFn(f) => {
match strategy {
RunStrategy::InProcess => (),
_ => panic!("Cannot run dynamic test fn out-of-process"),
};
run_test_inner(
id,
desc,
monitor_ch,
Box::new(move || __rust_begin_short_backtrace(f)),
test_run_opts,
)
}
StaticTestFn(f) => run_test_inner(
id,
desc,
|
get_timed_out_tests
|
identifier_name
|
mark_set.rs
|
// Copyright 2021-2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing permissions and
// limitations under the License.
use core::any::Any;
use core::any::TypeId;
use std::collections::HashMap;
use std::rc::Rc;
/// A set of reference-counted "mark" values, containing up to one value per value type.
///
/// # Example
///
/// ```rust
/// use notemaps_text::MarkSet;
/// use std::rc::Rc;
///
/// let mut marks = MarkSet::new();
/// let thing_one: Rc<String> = "thing one".to_string().into();
/// let thing_two: Rc<String> = "thing two".to_string().into();
///
/// marks.push(thing_one.clone());
/// assert!(marks.contains(thing_one.as_ref()));
/// assert_eq!(marks.get::<String>(), Some(thing_one.as_ref()));
///
/// marks.push(thing_two.clone());
/// assert_eq!(marks.get::<String>(), Some(thing_two.as_ref()));
/// ```
#[derive(Clone, Default, Debug)]
pub struct MarkSet {
map: HashMap<TypeId, Rc<dyn Any>>,
}
impl MarkSet {
/// Create a new, empty [MarkSet].
pub fn new() -> Self {
Default::default()
}
/// Create a new [MarkSet] containing one mark, `m`.
pub fn new_with<T: Any>(m: Rc<T>) -> Self {
let mut self_ = Self::new();
self_.push(m);
self_
}
/// Return `true` if and only if `m` was the last mark of type `T` pushed into this [MarkSet]
/// and it has not yet been removed.
pub fn contains<T: Any + PartialEq>(&self, m: &T) -> bool {
self.get::<T>() == Some(m)
}
/// Return `true` if and only if a mark of type `T` was pushed into this [MarkSet] and has not
/// yet been removed.
pub fn contains_any<T: Any>(&self) -> bool {
self.map.contains_key(&TypeId::of::<T>())
}
/// Return a reference to the mark of type `T` that was last pushed into this [MarkSet] if that
/// value has not yet been removed.
pub fn get<T: Any>(&self) -> Option<&T> {
self.map
.get(&TypeId::of::<T>())
.map(|rc| rc.as_ref().downcast_ref().expect(""))
}
/// Add mark `m` of type `T` into this [MarkSet]. Return the mark of type `T` that was already
/// present, if any.
pub fn push<T: Any>(&mut self, m: Rc<T>) -> Option<Rc<T>> {
self.map
.insert((&*m).type_id(), m)
.map(|v| v.downcast().expect(""))
}
/// Remove from this [MarkSet] the mark of type `T`, if any.
pub fn take_any<T: Any>(&mut self) -> Option<Rc<T>> {
self.map
.remove(&TypeId::of::<T>())
.map(|rc| rc.downcast().expect(""))
}
/// Add to this [MarkSet] all marks from `other`. Marks of the same type in `self` will be
/// discarded.
pub fn push_all(&mut self, other: &Self) {
other.map.iter().for_each(|(type_id, rc)| {
self.map.insert(type_id.clone(), rc.clone());
});
}
}
impl<M: Any> From<Rc<M>> for MarkSet {
fn from(mark: Rc<M>) -> Self {
MarkSet::new_with(mark)
}
}
#[cfg(test)]
mod a_bag {
use super::*;
#[test]
fn pushes_new_items() {
let mut bag = MarkSet::new();
let one = Rc::new(1i8);
let three = Rc::new(3i64);
assert_eq!(bag.push(one.clone()), None);
assert_eq!(bag.push(three.clone()), None);
}
#[test]
fn confirms_push_with_contains() {
let mut bag = MarkSet::new();
let one = Rc::new(1i8);
let three = Rc::new(3i64);
assert!(!bag.contains(&*one));
assert!(!bag.contains(&*three));
assert_eq!(bag.push(one.clone()), None);
assert!(bag.contains(&*one));
assert!(!bag.contains(&*three));
assert_eq!(bag.push(three.clone()), None);
assert!(bag.contains(&*one));
assert!(bag.contains(&*three));
}
#[test]
fn
|
() {
let mut bag = MarkSet::new();
let one = Rc::new(1i8);
let three = Rc::new(3i64);
assert!(!bag.contains_any::<i8>());
assert!(!bag.contains_any::<i64>());
assert_eq!(bag.push(one.clone()), None);
assert!(bag.contains_any::<i8>());
assert!(!bag.contains_any::<i64>());
assert_eq!(bag.push(three.clone()), None);
assert!(bag.contains_any::<i8>());
assert!(bag.contains_any::<i64>());
}
#[test]
fn confirms_push_with_get() {
let mut bag = MarkSet::new();
let one = Rc::new(1i8);
let three = Rc::new(3i64);
assert_eq!(bag.get::<i8>(), None);
assert_eq!(bag.get::<i64>(), None);
assert_eq!(bag.push(one), None);
assert_eq!(bag.get(), Some(&1i8));
assert_eq!(bag.get::<i64>(), None);
assert_eq!(bag.push(three), None);
assert_eq!(bag.get(), Some(&1i8));
assert_eq!(bag.get(), Some(&3i64));
}
#[test]
fn pops_old_items_when_new_are_pushed() {
let mut bag = MarkSet::new();
let one = Rc::new(1i8);
let two = Rc::new(2i8);
let three = Rc::new(3i64);
let four = Rc::new(4i64);
assert_eq!(bag.push(one.clone()), None);
assert_eq!(bag.push(two.clone()), Some(one));
assert!(!bag.contains(&1i8));
assert!(bag.contains(&2i8));
assert_eq!(bag.push(three.clone()), None);
assert_eq!(bag.push(four.clone()), Some(three));
assert!(!bag.contains(&3i64));
assert!(bag.contains(&4i64));
}
#[test]
fn pushes_all_items() {
let mut bag0 = MarkSet::new();
bag0.push(1i8.into());
let mut bag1 = MarkSet::new();
bag1.push(2i8.into());
bag1.push(3i64.into());
bag0.push_all(&bag1);
assert!(!bag0.contains(&1i8));
assert!(bag0.contains(&2i8));
assert!(bag0.contains(&3i64));
}
#[test]
fn removes_items_by_type() {
let mut bag = MarkSet::new();
let one = Rc::new(1i8);
let three = Rc::new(3i64);
assert_eq!(bag.push(one.clone()), None);
assert_eq!(bag.push(three.clone()), None);
assert_eq!(bag.contains_any::<i8>(), true);
assert_eq!(bag.contains_any::<i64>(), true);
assert_eq!(bag.take_any(), Some(one));
assert_eq!(bag.contains_any::<i8>(), false);
assert_eq!(bag.contains_any::<i64>(), true);
assert_eq!(bag.take_any(), Some(three));
assert_eq!(bag.contains_any::<i8>(), false);
assert_eq!(bag.contains_any::<i64>(), false);
}
}
|
confirms_push_with_contains_any
|
identifier_name
|
mark_set.rs
|
// Copyright 2021-2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing permissions and
// limitations under the License.
use core::any::Any;
use core::any::TypeId;
use std::collections::HashMap;
use std::rc::Rc;
/// A set of reference-counted "mark" values, containing up to one value per value type.
///
/// # Example
///
/// ```rust
/// use notemaps_text::MarkSet;
/// use std::rc::Rc;
///
/// let mut marks = MarkSet::new();
/// let thing_one: Rc<String> = "thing one".to_string().into();
/// let thing_two: Rc<String> = "thing two".to_string().into();
///
/// marks.push(thing_one.clone());
/// assert!(marks.contains(thing_one.as_ref()));
/// assert_eq!(marks.get::<String>(), Some(thing_one.as_ref()));
///
/// marks.push(thing_two.clone());
/// assert_eq!(marks.get::<String>(), Some(thing_two.as_ref()));
/// ```
#[derive(Clone, Default, Debug)]
pub struct MarkSet {
map: HashMap<TypeId, Rc<dyn Any>>,
}
impl MarkSet {
/// Create a new, empty [MarkSet].
pub fn new() -> Self {
Default::default()
}
/// Create a new [MarkSet] containing one mark, `m`.
pub fn new_with<T: Any>(m: Rc<T>) -> Self {
let mut self_ = Self::new();
self_.push(m);
self_
}
/// Return `true` if and only if `m` was the last mark of type `T` pushed into this [MarkSet]
/// and it has not yet been removed.
pub fn contains<T: Any + PartialEq>(&self, m: &T) -> bool
|
/// Return `true` if and only if a mark of type `T` was pushed into this [MarkSet] and has not
/// yet been removed.
pub fn contains_any<T: Any>(&self) -> bool {
self.map.contains_key(&TypeId::of::<T>())
}
/// Return a reference to the mark of type `T` that was last pushed into this [MarkSet] if that
/// value has not yet been removed.
pub fn get<T: Any>(&self) -> Option<&T> {
self.map
.get(&TypeId::of::<T>())
.map(|rc| rc.as_ref().downcast_ref().expect(""))
}
/// Add mark `m` of type `T` into this [MarkSet]. Return the mark of type `T` that was already
/// present, if any.
pub fn push<T: Any>(&mut self, m: Rc<T>) -> Option<Rc<T>> {
self.map
.insert((&*m).type_id(), m)
.map(|v| v.downcast().expect(""))
}
/// Remove from this [MarkSet] the mark of type `T`, if any.
pub fn take_any<T: Any>(&mut self) -> Option<Rc<T>> {
self.map
.remove(&TypeId::of::<T>())
.map(|rc| rc.downcast().expect(""))
}
/// Add to this [MarkSet] all marks from `other`. Marks of the same type in `self` will be
/// discarded.
pub fn push_all(&mut self, other: &Self) {
other.map.iter().for_each(|(type_id, rc)| {
self.map.insert(type_id.clone(), rc.clone());
});
}
}
impl<M: Any> From<Rc<M>> for MarkSet {
fn from(mark: Rc<M>) -> Self {
MarkSet::new_with(mark)
}
}
#[cfg(test)]
mod a_bag {
use super::*;
#[test]
fn pushes_new_items() {
let mut bag = MarkSet::new();
let one = Rc::new(1i8);
let three = Rc::new(3i64);
assert_eq!(bag.push(one.clone()), None);
assert_eq!(bag.push(three.clone()), None);
}
#[test]
fn confirms_push_with_contains() {
let mut bag = MarkSet::new();
let one = Rc::new(1i8);
let three = Rc::new(3i64);
assert!(!bag.contains(&*one));
assert!(!bag.contains(&*three));
assert_eq!(bag.push(one.clone()), None);
assert!(bag.contains(&*one));
assert!(!bag.contains(&*three));
assert_eq!(bag.push(three.clone()), None);
assert!(bag.contains(&*one));
assert!(bag.contains(&*three));
}
#[test]
fn confirms_push_with_contains_any() {
let mut bag = MarkSet::new();
let one = Rc::new(1i8);
let three = Rc::new(3i64);
assert!(!bag.contains_any::<i8>());
assert!(!bag.contains_any::<i64>());
assert_eq!(bag.push(one.clone()), None);
assert!(bag.contains_any::<i8>());
assert!(!bag.contains_any::<i64>());
assert_eq!(bag.push(three.clone()), None);
assert!(bag.contains_any::<i8>());
assert!(bag.contains_any::<i64>());
}
#[test]
fn confirms_push_with_get() {
let mut bag = MarkSet::new();
let one = Rc::new(1i8);
let three = Rc::new(3i64);
assert_eq!(bag.get::<i8>(), None);
assert_eq!(bag.get::<i64>(), None);
assert_eq!(bag.push(one), None);
assert_eq!(bag.get(), Some(&1i8));
assert_eq!(bag.get::<i64>(), None);
assert_eq!(bag.push(three), None);
assert_eq!(bag.get(), Some(&1i8));
assert_eq!(bag.get(), Some(&3i64));
}
#[test]
fn pops_old_items_when_new_are_pushed() {
let mut bag = MarkSet::new();
let one = Rc::new(1i8);
let two = Rc::new(2i8);
let three = Rc::new(3i64);
let four = Rc::new(4i64);
assert_eq!(bag.push(one.clone()), None);
assert_eq!(bag.push(two.clone()), Some(one));
assert!(!bag.contains(&1i8));
assert!(bag.contains(&2i8));
assert_eq!(bag.push(three.clone()), None);
assert_eq!(bag.push(four.clone()), Some(three));
assert!(!bag.contains(&3i64));
assert!(bag.contains(&4i64));
}
#[test]
fn pushes_all_items() {
let mut bag0 = MarkSet::new();
bag0.push(1i8.into());
let mut bag1 = MarkSet::new();
bag1.push(2i8.into());
bag1.push(3i64.into());
bag0.push_all(&bag1);
assert!(!bag0.contains(&1i8));
assert!(bag0.contains(&2i8));
assert!(bag0.contains(&3i64));
}
#[test]
fn removes_items_by_type() {
let mut bag = MarkSet::new();
let one = Rc::new(1i8);
let three = Rc::new(3i64);
assert_eq!(bag.push(one.clone()), None);
assert_eq!(bag.push(three.clone()), None);
assert_eq!(bag.contains_any::<i8>(), true);
assert_eq!(bag.contains_any::<i64>(), true);
assert_eq!(bag.take_any(), Some(one));
assert_eq!(bag.contains_any::<i8>(), false);
assert_eq!(bag.contains_any::<i64>(), true);
assert_eq!(bag.take_any(), Some(three));
assert_eq!(bag.contains_any::<i8>(), false);
assert_eq!(bag.contains_any::<i64>(), false);
}
}
|
{
self.get::<T>() == Some(m)
}
|
identifier_body
|
mark_set.rs
|
// Copyright 2021-2022 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing permissions and
// limitations under the License.
use core::any::Any;
use core::any::TypeId;
use std::collections::HashMap;
use std::rc::Rc;
/// A set of reference-counted "mark" values, containing up to one value per value type.
///
/// # Example
///
/// ```rust
/// use notemaps_text::MarkSet;
/// use std::rc::Rc;
///
/// let mut marks = MarkSet::new();
/// let thing_one: Rc<String> = "thing one".to_string().into();
/// let thing_two: Rc<String> = "thing two".to_string().into();
///
/// marks.push(thing_one.clone());
/// assert!(marks.contains(thing_one.as_ref()));
/// assert_eq!(marks.get::<String>(), Some(thing_one.as_ref()));
///
/// marks.push(thing_two.clone());
/// assert_eq!(marks.get::<String>(), Some(thing_two.as_ref()));
/// ```
#[derive(Clone, Default, Debug)]
pub struct MarkSet {
map: HashMap<TypeId, Rc<dyn Any>>,
}
impl MarkSet {
/// Create a new, empty [MarkSet].
pub fn new() -> Self {
Default::default()
}
/// Create a new [MarkSet] containing one mark, `m`.
pub fn new_with<T: Any>(m: Rc<T>) -> Self {
let mut self_ = Self::new();
self_.push(m);
self_
}
/// Return `true` if and only if `m` was the last mark of type `T` pushed into this [MarkSet]
/// and it has not yet been removed.
pub fn contains<T: Any + PartialEq>(&self, m: &T) -> bool {
self.get::<T>() == Some(m)
}
/// Return `true` if and only if a mark of type `T` was pushed into this [MarkSet] and has not
/// yet been removed.
pub fn contains_any<T: Any>(&self) -> bool {
self.map.contains_key(&TypeId::of::<T>())
}
/// Return a reference to the mark of type `T` that was last pushed into this [MarkSet] if that
/// value has not yet been removed.
pub fn get<T: Any>(&self) -> Option<&T> {
self.map
.get(&TypeId::of::<T>())
.map(|rc| rc.as_ref().downcast_ref().expect(""))
}
/// Add mark `m` of type `T` into this [MarkSet]. Return the mark of type `T` that was already
/// present, if any.
pub fn push<T: Any>(&mut self, m: Rc<T>) -> Option<Rc<T>> {
self.map
.insert((&*m).type_id(), m)
.map(|v| v.downcast().expect(""))
}
/// Remove from this [MarkSet] the mark of type `T`, if any.
pub fn take_any<T: Any>(&mut self) -> Option<Rc<T>> {
self.map
.remove(&TypeId::of::<T>())
.map(|rc| rc.downcast().expect(""))
}
/// Add to this [MarkSet] all marks from `other`. Marks of the same type in `self` will be
/// discarded.
pub fn push_all(&mut self, other: &Self) {
other.map.iter().for_each(|(type_id, rc)| {
self.map.insert(type_id.clone(), rc.clone());
});
}
}
impl<M: Any> From<Rc<M>> for MarkSet {
fn from(mark: Rc<M>) -> Self {
MarkSet::new_with(mark)
}
}
#[cfg(test)]
mod a_bag {
use super::*;
#[test]
fn pushes_new_items() {
let mut bag = MarkSet::new();
let one = Rc::new(1i8);
let three = Rc::new(3i64);
assert_eq!(bag.push(one.clone()), None);
assert_eq!(bag.push(three.clone()), None);
}
#[test]
fn confirms_push_with_contains() {
let mut bag = MarkSet::new();
let one = Rc::new(1i8);
let three = Rc::new(3i64);
assert!(!bag.contains(&*one));
assert!(!bag.contains(&*three));
assert_eq!(bag.push(one.clone()), None);
assert!(bag.contains(&*one));
assert!(!bag.contains(&*three));
assert_eq!(bag.push(three.clone()), None);
assert!(bag.contains(&*one));
assert!(bag.contains(&*three));
}
#[test]
fn confirms_push_with_contains_any() {
let mut bag = MarkSet::new();
let one = Rc::new(1i8);
let three = Rc::new(3i64);
assert!(!bag.contains_any::<i8>());
assert!(!bag.contains_any::<i64>());
assert_eq!(bag.push(one.clone()), None);
assert!(bag.contains_any::<i8>());
assert!(!bag.contains_any::<i64>());
assert_eq!(bag.push(three.clone()), None);
assert!(bag.contains_any::<i8>());
assert!(bag.contains_any::<i64>());
}
#[test]
fn confirms_push_with_get() {
let mut bag = MarkSet::new();
let one = Rc::new(1i8);
let three = Rc::new(3i64);
assert_eq!(bag.get::<i8>(), None);
assert_eq!(bag.get::<i64>(), None);
assert_eq!(bag.push(one), None);
assert_eq!(bag.get(), Some(&1i8));
assert_eq!(bag.get::<i64>(), None);
assert_eq!(bag.push(three), None);
assert_eq!(bag.get(), Some(&1i8));
assert_eq!(bag.get(), Some(&3i64));
}
|
fn pops_old_items_when_new_are_pushed() {
let mut bag = MarkSet::new();
let one = Rc::new(1i8);
let two = Rc::new(2i8);
let three = Rc::new(3i64);
let four = Rc::new(4i64);
assert_eq!(bag.push(one.clone()), None);
assert_eq!(bag.push(two.clone()), Some(one));
assert!(!bag.contains(&1i8));
assert!(bag.contains(&2i8));
assert_eq!(bag.push(three.clone()), None);
assert_eq!(bag.push(four.clone()), Some(three));
assert!(!bag.contains(&3i64));
assert!(bag.contains(&4i64));
}
#[test]
fn pushes_all_items() {
let mut bag0 = MarkSet::new();
bag0.push(1i8.into());
let mut bag1 = MarkSet::new();
bag1.push(2i8.into());
bag1.push(3i64.into());
bag0.push_all(&bag1);
assert!(!bag0.contains(&1i8));
assert!(bag0.contains(&2i8));
assert!(bag0.contains(&3i64));
}
#[test]
fn removes_items_by_type() {
let mut bag = MarkSet::new();
let one = Rc::new(1i8);
let three = Rc::new(3i64);
assert_eq!(bag.push(one.clone()), None);
assert_eq!(bag.push(three.clone()), None);
assert_eq!(bag.contains_any::<i8>(), true);
assert_eq!(bag.contains_any::<i64>(), true);
assert_eq!(bag.take_any(), Some(one));
assert_eq!(bag.contains_any::<i8>(), false);
assert_eq!(bag.contains_any::<i64>(), true);
assert_eq!(bag.take_any(), Some(three));
assert_eq!(bag.contains_any::<i8>(), false);
assert_eq!(bag.contains_any::<i64>(), false);
}
}
|
#[test]
|
random_line_split
|
digest.rs
|
use std::io::{self, Write, Read};
use std::fmt::Display;
use sha2::Sha256;
use sha2::Digest as DigestTrait;
pub struct Digest(Sha256);
impl Digest {
pub fn new() -> Digest {
Digest(Sha256::new())
}
pub fn result_str(&mut self) -> String {
return self.0.result_str();
}
pub fn input<V: AsRef<[u8]>>(&mut self, value: V) {
self.0.input(value.as_ref());
}
pub fn item<V: AsRef<[u8]>>(&mut self, value: V) {
self.0.input(value.as_ref());
self.0.input(b"\0");
}
pub fn field<K: AsRef<[u8]>, V: AsRef<[u8]>>(&mut self, key: K, value: V) {
self.0.input(key.as_ref());
self.0.input(b"\0");
self.0.input(value.as_ref());
self.0.input(b"\0");
}
pub fn text<K: AsRef<[u8]>, V: Display>(&mut self, key: K, value: V) {
self.0.input(key.as_ref());
self.0.input(b"\0");
self.0.input(format!("{}", value).as_bytes());
self.0.input(b"\0");
}
pub fn opt_field<K: AsRef<[u8]>, V: AsRef<[u8]>>(&mut self,
key: K, value: &Option<V>)
{
if let Some(ref val) = *value {
self.0.input(key.as_ref());
self.0.input(b"\0");
self.0.input(val.as_ref());
self.0.input(b"\0");
}
}
pub fn bool<K: AsRef<[u8]>>(&mut self, key: K, value: bool)
{
self.0.input(key.as_ref());
self.0.input(b"\0");
self.0.input(if value { b"0" } else { b"1" });
}
pub fn sequence<K, I: IntoIterator>(&mut self, key: K, seq: I)
where K: AsRef<[u8]>, I::Item: AsRef<[u8]>
{
self.0.input(key.as_ref());
self.0.input(b"\0");
for value in seq {
self.0.input(value.as_ref());
self.0.input(b"\0");
}
}
pub fn stream(&mut self, reader: &mut Read)
-> Result<(), io::Error>
{
let mut buf = [0u8; 8*1024];
loop {
let len = match reader.read(&mut buf[..]) {
Ok(0) => break,
Ok(len) => len,
Err(ref e) if e.kind() == io::ErrorKind::Interrupted => continue,
|
};
self.0.input(&buf[..len]);
}
Ok(())
}
}
impl Write for Digest {
fn write(&mut self, chunk: &[u8]) -> io::Result<usize> {
self.0.input(chunk);
Ok(chunk.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
|
Err(e) => return Err(e),
|
random_line_split
|
digest.rs
|
use std::io::{self, Write, Read};
use std::fmt::Display;
use sha2::Sha256;
use sha2::Digest as DigestTrait;
pub struct Digest(Sha256);
impl Digest {
pub fn new() -> Digest {
Digest(Sha256::new())
}
pub fn result_str(&mut self) -> String {
return self.0.result_str();
}
pub fn input<V: AsRef<[u8]>>(&mut self, value: V) {
self.0.input(value.as_ref());
}
pub fn item<V: AsRef<[u8]>>(&mut self, value: V) {
self.0.input(value.as_ref());
self.0.input(b"\0");
}
pub fn field<K: AsRef<[u8]>, V: AsRef<[u8]>>(&mut self, key: K, value: V) {
self.0.input(key.as_ref());
self.0.input(b"\0");
self.0.input(value.as_ref());
self.0.input(b"\0");
}
pub fn text<K: AsRef<[u8]>, V: Display>(&mut self, key: K, value: V) {
self.0.input(key.as_ref());
self.0.input(b"\0");
self.0.input(format!("{}", value).as_bytes());
self.0.input(b"\0");
}
pub fn opt_field<K: AsRef<[u8]>, V: AsRef<[u8]>>(&mut self,
key: K, value: &Option<V>)
{
if let Some(ref val) = *value {
self.0.input(key.as_ref());
self.0.input(b"\0");
self.0.input(val.as_ref());
self.0.input(b"\0");
}
}
pub fn bool<K: AsRef<[u8]>>(&mut self, key: K, value: bool)
{
self.0.input(key.as_ref());
self.0.input(b"\0");
self.0.input(if value { b"0" } else { b"1" });
}
pub fn sequence<K, I: IntoIterator>(&mut self, key: K, seq: I)
where K: AsRef<[u8]>, I::Item: AsRef<[u8]>
{
self.0.input(key.as_ref());
self.0.input(b"\0");
for value in seq {
self.0.input(value.as_ref());
self.0.input(b"\0");
}
}
pub fn stream(&mut self, reader: &mut Read)
-> Result<(), io::Error>
{
let mut buf = [0u8; 8*1024];
loop {
let len = match reader.read(&mut buf[..]) {
Ok(0) => break,
Ok(len) => len,
Err(ref e) if e.kind() == io::ErrorKind::Interrupted => continue,
Err(e) => return Err(e),
};
self.0.input(&buf[..len]);
}
Ok(())
}
}
impl Write for Digest {
fn write(&mut self, chunk: &[u8]) -> io::Result<usize>
|
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
|
{
self.0.input(chunk);
Ok(chunk.len())
}
|
identifier_body
|
digest.rs
|
use std::io::{self, Write, Read};
use std::fmt::Display;
use sha2::Sha256;
use sha2::Digest as DigestTrait;
pub struct Digest(Sha256);
impl Digest {
pub fn new() -> Digest {
Digest(Sha256::new())
}
pub fn result_str(&mut self) -> String {
return self.0.result_str();
}
pub fn input<V: AsRef<[u8]>>(&mut self, value: V) {
self.0.input(value.as_ref());
}
pub fn item<V: AsRef<[u8]>>(&mut self, value: V) {
self.0.input(value.as_ref());
self.0.input(b"\0");
}
pub fn field<K: AsRef<[u8]>, V: AsRef<[u8]>>(&mut self, key: K, value: V) {
self.0.input(key.as_ref());
self.0.input(b"\0");
self.0.input(value.as_ref());
self.0.input(b"\0");
}
pub fn text<K: AsRef<[u8]>, V: Display>(&mut self, key: K, value: V) {
self.0.input(key.as_ref());
self.0.input(b"\0");
self.0.input(format!("{}", value).as_bytes());
self.0.input(b"\0");
}
pub fn opt_field<K: AsRef<[u8]>, V: AsRef<[u8]>>(&mut self,
key: K, value: &Option<V>)
{
if let Some(ref val) = *value
|
}
pub fn bool<K: AsRef<[u8]>>(&mut self, key: K, value: bool)
{
self.0.input(key.as_ref());
self.0.input(b"\0");
self.0.input(if value { b"0" } else { b"1" });
}
pub fn sequence<K, I: IntoIterator>(&mut self, key: K, seq: I)
where K: AsRef<[u8]>, I::Item: AsRef<[u8]>
{
self.0.input(key.as_ref());
self.0.input(b"\0");
for value in seq {
self.0.input(value.as_ref());
self.0.input(b"\0");
}
}
pub fn stream(&mut self, reader: &mut Read)
-> Result<(), io::Error>
{
let mut buf = [0u8; 8*1024];
loop {
let len = match reader.read(&mut buf[..]) {
Ok(0) => break,
Ok(len) => len,
Err(ref e) if e.kind() == io::ErrorKind::Interrupted => continue,
Err(e) => return Err(e),
};
self.0.input(&buf[..len]);
}
Ok(())
}
}
impl Write for Digest {
fn write(&mut self, chunk: &[u8]) -> io::Result<usize> {
self.0.input(chunk);
Ok(chunk.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
|
{
self.0.input(key.as_ref());
self.0.input(b"\0");
self.0.input(val.as_ref());
self.0.input(b"\0");
}
|
conditional_block
|
digest.rs
|
use std::io::{self, Write, Read};
use std::fmt::Display;
use sha2::Sha256;
use sha2::Digest as DigestTrait;
pub struct
|
(Sha256);
impl Digest {
pub fn new() -> Digest {
Digest(Sha256::new())
}
pub fn result_str(&mut self) -> String {
return self.0.result_str();
}
pub fn input<V: AsRef<[u8]>>(&mut self, value: V) {
self.0.input(value.as_ref());
}
pub fn item<V: AsRef<[u8]>>(&mut self, value: V) {
self.0.input(value.as_ref());
self.0.input(b"\0");
}
pub fn field<K: AsRef<[u8]>, V: AsRef<[u8]>>(&mut self, key: K, value: V) {
self.0.input(key.as_ref());
self.0.input(b"\0");
self.0.input(value.as_ref());
self.0.input(b"\0");
}
pub fn text<K: AsRef<[u8]>, V: Display>(&mut self, key: K, value: V) {
self.0.input(key.as_ref());
self.0.input(b"\0");
self.0.input(format!("{}", value).as_bytes());
self.0.input(b"\0");
}
pub fn opt_field<K: AsRef<[u8]>, V: AsRef<[u8]>>(&mut self,
key: K, value: &Option<V>)
{
if let Some(ref val) = *value {
self.0.input(key.as_ref());
self.0.input(b"\0");
self.0.input(val.as_ref());
self.0.input(b"\0");
}
}
pub fn bool<K: AsRef<[u8]>>(&mut self, key: K, value: bool)
{
self.0.input(key.as_ref());
self.0.input(b"\0");
self.0.input(if value { b"0" } else { b"1" });
}
pub fn sequence<K, I: IntoIterator>(&mut self, key: K, seq: I)
where K: AsRef<[u8]>, I::Item: AsRef<[u8]>
{
self.0.input(key.as_ref());
self.0.input(b"\0");
for value in seq {
self.0.input(value.as_ref());
self.0.input(b"\0");
}
}
pub fn stream(&mut self, reader: &mut Read)
-> Result<(), io::Error>
{
let mut buf = [0u8; 8*1024];
loop {
let len = match reader.read(&mut buf[..]) {
Ok(0) => break,
Ok(len) => len,
Err(ref e) if e.kind() == io::ErrorKind::Interrupted => continue,
Err(e) => return Err(e),
};
self.0.input(&buf[..len]);
}
Ok(())
}
}
impl Write for Digest {
fn write(&mut self, chunk: &[u8]) -> io::Result<usize> {
self.0.input(chunk);
Ok(chunk.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
|
Digest
|
identifier_name
|
counter_style_rule.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// TODO(emilio): unify this, components/style/counter_style.rs, and
// components/style/gecko/rules.rs
#![allow(missing_docs)]
#[cfg(feature = "servo")]
pub use counter_style::CounterStyleRuleData as CounterStyleRule;
#[cfg(feature = "gecko")]
pub use gecko::rules::CounterStyleRule;
impl CounterStyleRule {
#[cfg(feature = "servo")]
pub fn clone_conditionally_gecko_or_servo(&self) -> CounterStyleRule {
self.clone()
}
#[cfg(feature = "gecko")]
pub fn
|
(&self) -> CounterStyleRule {
self.deep_clone_from_gecko()
}
}
|
clone_conditionally_gecko_or_servo
|
identifier_name
|
counter_style_rule.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// TODO(emilio): unify this, components/style/counter_style.rs, and
// components/style/gecko/rules.rs
#![allow(missing_docs)]
#[cfg(feature = "servo")]
pub use counter_style::CounterStyleRuleData as CounterStyleRule;
#[cfg(feature = "gecko")]
pub use gecko::rules::CounterStyleRule;
impl CounterStyleRule {
#[cfg(feature = "servo")]
pub fn clone_conditionally_gecko_or_servo(&self) -> CounterStyleRule
|
#[cfg(feature = "gecko")]
pub fn clone_conditionally_gecko_or_servo(&self) -> CounterStyleRule {
self.deep_clone_from_gecko()
}
}
|
{
self.clone()
}
|
identifier_body
|
counter_style_rule.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// TODO(emilio): unify this, components/style/counter_style.rs, and
// components/style/gecko/rules.rs
#![allow(missing_docs)]
|
pub use counter_style::CounterStyleRuleData as CounterStyleRule;
#[cfg(feature = "gecko")]
pub use gecko::rules::CounterStyleRule;
impl CounterStyleRule {
#[cfg(feature = "servo")]
pub fn clone_conditionally_gecko_or_servo(&self) -> CounterStyleRule {
self.clone()
}
#[cfg(feature = "gecko")]
pub fn clone_conditionally_gecko_or_servo(&self) -> CounterStyleRule {
self.deep_clone_from_gecko()
}
}
|
#[cfg(feature = "servo")]
|
random_line_split
|
headerbar.rs
|
use gtk::{Inhibit};
use gtk::Orientation::{Vertical};
use gtk::prelude::*;
use relm_derive::{Msg, widget};
use relm::{Component, Widget, init};
use self::HeaderMsg::*;
use self::WinMsg::*;
#[derive(Msg)]
pub enum HeaderMsg {
Add,
Remove,
}
#[widget]
impl Widget for Header {
fn model() -> () {
}
fn update(&mut self, event: HeaderMsg) {
match event {
Add => println!("Add"),
Remove => println!("Remove"),
}
}
view! {
#[name="titlebar"]
gtk::HeaderBar {
title: Some("Title"),
|
show_close_button: true,
#[name="add_button"]
gtk::Button {
clicked => Add,
label: "Add",
},
#[name="remove_button"]
gtk::Button {
clicked => Remove,
label: "Remove",
},
}
}
}
pub struct Model {
header: Component<Header>,
}
#[derive(Msg)]
pub enum WinMsg {
Quit,
}
#[widget]
impl Widget for Win {
fn model() -> Model {
let header = init::<Header>(()).expect("Header");
Model {
header
}
}
fn update(&mut self, event: WinMsg) {
match event {
Quit => gtk::main_quit(),
}
}
view! {
#[name="window"]
gtk::Window {
titlebar: Some(self.model.header.widget()),
#[name="app"]
gtk::Box {
orientation: Vertical
},
delete_event(_, _) => (Quit, Inhibit(false)),
}
}
}
fn main() {
Win::run(()).expect("Window::run");
}
|
random_line_split
|
|
headerbar.rs
|
use gtk::{Inhibit};
use gtk::Orientation::{Vertical};
use gtk::prelude::*;
use relm_derive::{Msg, widget};
use relm::{Component, Widget, init};
use self::HeaderMsg::*;
use self::WinMsg::*;
#[derive(Msg)]
pub enum HeaderMsg {
Add,
Remove,
}
#[widget]
impl Widget for Header {
fn model() -> () {
}
fn update(&mut self, event: HeaderMsg) {
match event {
Add => println!("Add"),
Remove => println!("Remove"),
}
}
view! {
#[name="titlebar"]
gtk::HeaderBar {
title: Some("Title"),
show_close_button: true,
#[name="add_button"]
gtk::Button {
clicked => Add,
label: "Add",
},
#[name="remove_button"]
gtk::Button {
clicked => Remove,
label: "Remove",
},
}
}
}
pub struct Model {
header: Component<Header>,
}
#[derive(Msg)]
pub enum WinMsg {
Quit,
}
#[widget]
impl Widget for Win {
fn
|
() -> Model {
let header = init::<Header>(()).expect("Header");
Model {
header
}
}
fn update(&mut self, event: WinMsg) {
match event {
Quit => gtk::main_quit(),
}
}
view! {
#[name="window"]
gtk::Window {
titlebar: Some(self.model.header.widget()),
#[name="app"]
gtk::Box {
orientation: Vertical
},
delete_event(_, _) => (Quit, Inhibit(false)),
}
}
}
fn main() {
Win::run(()).expect("Window::run");
}
|
model
|
identifier_name
|
lib.rs
|
#![crate_name="rustspec"]
#![crate_type="dylib"]
#![feature(plugin_registrar, rustc_private, collections, core, convert)]
extern crate syntax;
extern crate core;
extern crate rustc;
extern crate rustspec_assertions;
pub use rustspec_assertions::{expect, eq, be_gt, be_ge, be_lt, be_le, contain, be_false, be_true, be_some, be_none};
use macro_result::MacroResult;
use test_context_node::TestContextNode;
use test_case_node::TestCaseNode;
use test_node::TestNode;
use self::core::ops::Deref;
use rustc::plugin::Registry;
use syntax::ext::base::{ExtCtxt, MacResult};
use syntax::ext::quote::rt::ToTokens;
use syntax::codemap::Span;
use syntax::ast;
use syntax::ptr::P;
use syntax::parse::{token, tts_to_parser};
use syntax::parse::parser::Parser;
mod macro_result;
mod test_context_node;
mod test_case_node;
mod test_node;
#[plugin_registrar]
pub fn plugin_registrar(registry: &mut Registry) {
registry.register_macro("scenario", macro_scenario);
}
fn is_skippable(token: syntax::parse::token::Token) -> bool {
token == token::OpenDelim(token::Brace) ||
token == token::CloseDelim(token::Brace) ||
token == token::OpenDelim(token::Paren) ||
token == token::CloseDelim(token::Paren) ||
token == token::Comma || token == token::Semi
}
#[allow(unused_must_use)]
fn extract_test_node_data(parser: &mut Parser) -> (String, P<ast::Block>) {
parser.bump(); // skip (
let (name, _) = parser.parse_str().ok().unwrap();
parser.bump(); // skip,
let block = parser.parse_block().ok().unwrap();
(name.deref().to_string(), block)
}
#[allow(unused_must_use)]
fn parse_test_node(parser: &mut Parser) -> Box<TestCaseNode> {
let mut should_fail = false;
let mut should_be_ignored = false;
if parser.token == token::Dot
|
let (name, block) = extract_test_node_data(parser);
TestCaseNode::new(name, block, should_fail, should_be_ignored)
}
#[allow(unused_must_use)]
fn parse_node(cx: &mut ExtCtxt, parser: &mut Parser) -> (Option<P<ast::Block>>, Vec<Box<TestNode +'static>>) {
let mut nodes: Vec<Box<TestNode>> = Vec::new();
let mut before_block = None;
while parser.token!= token::Eof {
if is_skippable(parser.token.clone()) {
parser.bump();
continue;
}
let ident = parser.parse_ident().ok().unwrap();
let token_str = ident.as_str();
match token_str {
"before" => {
if before_block.is_some() {
panic!("More than one before blocks found in the same context.");
}
parser.bump(); // skip (
before_block = Some(parser.parse_block().ok().unwrap());
},
"when" | "context" | "describe" => {
parser.bump(); // skip (
let (name, _) = parser.parse_str().ok().unwrap();
parser.bump(); // skip,
let block_tokens = parser.parse_block().ok().unwrap().to_tokens(cx);
let mut block_parser = tts_to_parser(cx.parse_sess(), block_tokens, cx.cfg());
let (b, children) = parse_node(cx, &mut block_parser);
let before = if b.is_some() {
Some(P(b.unwrap().deref().clone()))
} else { None };
nodes.push(TestContextNode::new(
name.deref().to_string(),
before,
children
));
},
"it" => {
nodes.push(parse_test_node(parser));
},
other => {
let span = parser.span;
parser.span_fatal(span, format!("Unexpected {}", other).as_ref());
}
}
}
(before_block, nodes)
}
#[allow(unused_must_use)]
pub fn macro_scenario(cx: &mut ExtCtxt, _: Span, tts: &[ast::TokenTree]) -> Box<MacResult +'static> {
let mut parser = cx.new_parser_from_tts(tts);
let (name, _) = parser.parse_str().ok().unwrap();
parser.bump();
let block_tokens = parser.parse_block().ok().unwrap().to_tokens(cx);
let mut block_parser = tts_to_parser(cx.parse_sess(), block_tokens, cx.cfg());
let (before, children) = parse_node(cx, &mut block_parser);
let node = TestContextNode::new(name.deref().to_string(), before, children);
MacroResult::new(vec![node.to_item(cx, &mut vec![])])
}
|
{
parser.bump();
let ident = parser.parse_ident().ok().unwrap();
let token_str = ident.as_str();
should_fail = token_str == "fails";
should_be_ignored = token_str == "ignores";
}
|
conditional_block
|
lib.rs
|
#![crate_name="rustspec"]
#![crate_type="dylib"]
#![feature(plugin_registrar, rustc_private, collections, core, convert)]
extern crate syntax;
extern crate core;
extern crate rustc;
extern crate rustspec_assertions;
pub use rustspec_assertions::{expect, eq, be_gt, be_ge, be_lt, be_le, contain, be_false, be_true, be_some, be_none};
use macro_result::MacroResult;
use test_context_node::TestContextNode;
use test_case_node::TestCaseNode;
use test_node::TestNode;
use self::core::ops::Deref;
use rustc::plugin::Registry;
use syntax::ext::base::{ExtCtxt, MacResult};
use syntax::ext::quote::rt::ToTokens;
use syntax::codemap::Span;
use syntax::ast;
use syntax::ptr::P;
use syntax::parse::{token, tts_to_parser};
use syntax::parse::parser::Parser;
mod macro_result;
mod test_context_node;
mod test_case_node;
mod test_node;
#[plugin_registrar]
pub fn plugin_registrar(registry: &mut Registry) {
registry.register_macro("scenario", macro_scenario);
}
fn
|
(token: syntax::parse::token::Token) -> bool {
token == token::OpenDelim(token::Brace) ||
token == token::CloseDelim(token::Brace) ||
token == token::OpenDelim(token::Paren) ||
token == token::CloseDelim(token::Paren) ||
token == token::Comma || token == token::Semi
}
#[allow(unused_must_use)]
fn extract_test_node_data(parser: &mut Parser) -> (String, P<ast::Block>) {
parser.bump(); // skip (
let (name, _) = parser.parse_str().ok().unwrap();
parser.bump(); // skip,
let block = parser.parse_block().ok().unwrap();
(name.deref().to_string(), block)
}
#[allow(unused_must_use)]
fn parse_test_node(parser: &mut Parser) -> Box<TestCaseNode> {
let mut should_fail = false;
let mut should_be_ignored = false;
if parser.token == token::Dot {
parser.bump();
let ident = parser.parse_ident().ok().unwrap();
let token_str = ident.as_str();
should_fail = token_str == "fails";
should_be_ignored = token_str == "ignores";
}
let (name, block) = extract_test_node_data(parser);
TestCaseNode::new(name, block, should_fail, should_be_ignored)
}
#[allow(unused_must_use)]
fn parse_node(cx: &mut ExtCtxt, parser: &mut Parser) -> (Option<P<ast::Block>>, Vec<Box<TestNode +'static>>) {
let mut nodes: Vec<Box<TestNode>> = Vec::new();
let mut before_block = None;
while parser.token!= token::Eof {
if is_skippable(parser.token.clone()) {
parser.bump();
continue;
}
let ident = parser.parse_ident().ok().unwrap();
let token_str = ident.as_str();
match token_str {
"before" => {
if before_block.is_some() {
panic!("More than one before blocks found in the same context.");
}
parser.bump(); // skip (
before_block = Some(parser.parse_block().ok().unwrap());
},
"when" | "context" | "describe" => {
parser.bump(); // skip (
let (name, _) = parser.parse_str().ok().unwrap();
parser.bump(); // skip,
let block_tokens = parser.parse_block().ok().unwrap().to_tokens(cx);
let mut block_parser = tts_to_parser(cx.parse_sess(), block_tokens, cx.cfg());
let (b, children) = parse_node(cx, &mut block_parser);
let before = if b.is_some() {
Some(P(b.unwrap().deref().clone()))
} else { None };
nodes.push(TestContextNode::new(
name.deref().to_string(),
before,
children
));
},
"it" => {
nodes.push(parse_test_node(parser));
},
other => {
let span = parser.span;
parser.span_fatal(span, format!("Unexpected {}", other).as_ref());
}
}
}
(before_block, nodes)
}
#[allow(unused_must_use)]
pub fn macro_scenario(cx: &mut ExtCtxt, _: Span, tts: &[ast::TokenTree]) -> Box<MacResult +'static> {
let mut parser = cx.new_parser_from_tts(tts);
let (name, _) = parser.parse_str().ok().unwrap();
parser.bump();
let block_tokens = parser.parse_block().ok().unwrap().to_tokens(cx);
let mut block_parser = tts_to_parser(cx.parse_sess(), block_tokens, cx.cfg());
let (before, children) = parse_node(cx, &mut block_parser);
let node = TestContextNode::new(name.deref().to_string(), before, children);
MacroResult::new(vec![node.to_item(cx, &mut vec![])])
}
|
is_skippable
|
identifier_name
|
lib.rs
|
#![crate_name="rustspec"]
#![crate_type="dylib"]
#![feature(plugin_registrar, rustc_private, collections, core, convert)]
extern crate syntax;
extern crate core;
extern crate rustc;
extern crate rustspec_assertions;
pub use rustspec_assertions::{expect, eq, be_gt, be_ge, be_lt, be_le, contain, be_false, be_true, be_some, be_none};
use macro_result::MacroResult;
use test_context_node::TestContextNode;
use test_case_node::TestCaseNode;
use test_node::TestNode;
use self::core::ops::Deref;
use rustc::plugin::Registry;
use syntax::ext::base::{ExtCtxt, MacResult};
use syntax::ext::quote::rt::ToTokens;
use syntax::codemap::Span;
use syntax::ast;
use syntax::ptr::P;
use syntax::parse::{token, tts_to_parser};
use syntax::parse::parser::Parser;
mod macro_result;
mod test_context_node;
mod test_case_node;
mod test_node;
#[plugin_registrar]
pub fn plugin_registrar(registry: &mut Registry) {
registry.register_macro("scenario", macro_scenario);
}
fn is_skippable(token: syntax::parse::token::Token) -> bool {
token == token::OpenDelim(token::Brace) ||
token == token::CloseDelim(token::Brace) ||
token == token::OpenDelim(token::Paren) ||
token == token::CloseDelim(token::Paren) ||
token == token::Comma || token == token::Semi
}
#[allow(unused_must_use)]
fn extract_test_node_data(parser: &mut Parser) -> (String, P<ast::Block>) {
parser.bump(); // skip (
let (name, _) = parser.parse_str().ok().unwrap();
parser.bump(); // skip,
let block = parser.parse_block().ok().unwrap();
(name.deref().to_string(), block)
}
#[allow(unused_must_use)]
fn parse_test_node(parser: &mut Parser) -> Box<TestCaseNode>
|
#[allow(unused_must_use)]
fn parse_node(cx: &mut ExtCtxt, parser: &mut Parser) -> (Option<P<ast::Block>>, Vec<Box<TestNode +'static>>) {
let mut nodes: Vec<Box<TestNode>> = Vec::new();
let mut before_block = None;
while parser.token!= token::Eof {
if is_skippable(parser.token.clone()) {
parser.bump();
continue;
}
let ident = parser.parse_ident().ok().unwrap();
let token_str = ident.as_str();
match token_str {
"before" => {
if before_block.is_some() {
panic!("More than one before blocks found in the same context.");
}
parser.bump(); // skip (
before_block = Some(parser.parse_block().ok().unwrap());
},
"when" | "context" | "describe" => {
parser.bump(); // skip (
let (name, _) = parser.parse_str().ok().unwrap();
parser.bump(); // skip,
let block_tokens = parser.parse_block().ok().unwrap().to_tokens(cx);
let mut block_parser = tts_to_parser(cx.parse_sess(), block_tokens, cx.cfg());
let (b, children) = parse_node(cx, &mut block_parser);
let before = if b.is_some() {
Some(P(b.unwrap().deref().clone()))
} else { None };
nodes.push(TestContextNode::new(
name.deref().to_string(),
before,
children
));
},
"it" => {
nodes.push(parse_test_node(parser));
},
other => {
let span = parser.span;
parser.span_fatal(span, format!("Unexpected {}", other).as_ref());
}
}
}
(before_block, nodes)
}
#[allow(unused_must_use)]
pub fn macro_scenario(cx: &mut ExtCtxt, _: Span, tts: &[ast::TokenTree]) -> Box<MacResult +'static> {
let mut parser = cx.new_parser_from_tts(tts);
let (name, _) = parser.parse_str().ok().unwrap();
parser.bump();
let block_tokens = parser.parse_block().ok().unwrap().to_tokens(cx);
let mut block_parser = tts_to_parser(cx.parse_sess(), block_tokens, cx.cfg());
let (before, children) = parse_node(cx, &mut block_parser);
let node = TestContextNode::new(name.deref().to_string(), before, children);
MacroResult::new(vec![node.to_item(cx, &mut vec![])])
}
|
{
let mut should_fail = false;
let mut should_be_ignored = false;
if parser.token == token::Dot {
parser.bump();
let ident = parser.parse_ident().ok().unwrap();
let token_str = ident.as_str();
should_fail = token_str == "fails";
should_be_ignored = token_str == "ignores";
}
let (name, block) = extract_test_node_data(parser);
TestCaseNode::new(name, block, should_fail, should_be_ignored)
}
|
identifier_body
|
lib.rs
|
#![crate_name="rustspec"]
#![crate_type="dylib"]
#![feature(plugin_registrar, rustc_private, collections, core, convert)]
extern crate syntax;
extern crate core;
extern crate rustc;
extern crate rustspec_assertions;
pub use rustspec_assertions::{expect, eq, be_gt, be_ge, be_lt, be_le, contain, be_false, be_true, be_some, be_none};
use macro_result::MacroResult;
use test_context_node::TestContextNode;
use test_case_node::TestCaseNode;
use test_node::TestNode;
use self::core::ops::Deref;
use rustc::plugin::Registry;
use syntax::ext::base::{ExtCtxt, MacResult};
use syntax::ext::quote::rt::ToTokens;
use syntax::codemap::Span;
use syntax::ast;
use syntax::ptr::P;
use syntax::parse::{token, tts_to_parser};
use syntax::parse::parser::Parser;
mod macro_result;
mod test_context_node;
mod test_case_node;
mod test_node;
#[plugin_registrar]
pub fn plugin_registrar(registry: &mut Registry) {
registry.register_macro("scenario", macro_scenario);
}
fn is_skippable(token: syntax::parse::token::Token) -> bool {
token == token::OpenDelim(token::Brace) ||
token == token::CloseDelim(token::Brace) ||
token == token::OpenDelim(token::Paren) ||
token == token::CloseDelim(token::Paren) ||
token == token::Comma || token == token::Semi
}
#[allow(unused_must_use)]
fn extract_test_node_data(parser: &mut Parser) -> (String, P<ast::Block>) {
parser.bump(); // skip (
let (name, _) = parser.parse_str().ok().unwrap();
parser.bump(); // skip,
let block = parser.parse_block().ok().unwrap();
(name.deref().to_string(), block)
}
#[allow(unused_must_use)]
fn parse_test_node(parser: &mut Parser) -> Box<TestCaseNode> {
let mut should_fail = false;
let mut should_be_ignored = false;
if parser.token == token::Dot {
parser.bump();
let ident = parser.parse_ident().ok().unwrap();
let token_str = ident.as_str();
should_fail = token_str == "fails";
should_be_ignored = token_str == "ignores";
}
let (name, block) = extract_test_node_data(parser);
TestCaseNode::new(name, block, should_fail, should_be_ignored)
}
#[allow(unused_must_use)]
fn parse_node(cx: &mut ExtCtxt, parser: &mut Parser) -> (Option<P<ast::Block>>, Vec<Box<TestNode +'static>>) {
let mut nodes: Vec<Box<TestNode>> = Vec::new();
let mut before_block = None;
while parser.token!= token::Eof {
if is_skippable(parser.token.clone()) {
parser.bump();
continue;
}
let ident = parser.parse_ident().ok().unwrap();
let token_str = ident.as_str();
match token_str {
"before" => {
if before_block.is_some() {
panic!("More than one before blocks found in the same context.");
}
parser.bump(); // skip (
before_block = Some(parser.parse_block().ok().unwrap());
},
"when" | "context" | "describe" => {
parser.bump(); // skip (
let (name, _) = parser.parse_str().ok().unwrap();
parser.bump(); // skip,
let block_tokens = parser.parse_block().ok().unwrap().to_tokens(cx);
let mut block_parser = tts_to_parser(cx.parse_sess(), block_tokens, cx.cfg());
let (b, children) = parse_node(cx, &mut block_parser);
let before = if b.is_some() {
Some(P(b.unwrap().deref().clone()))
} else { None };
nodes.push(TestContextNode::new(
name.deref().to_string(),
before,
children
));
},
"it" => {
nodes.push(parse_test_node(parser));
},
other => {
let span = parser.span;
parser.span_fatal(span, format!("Unexpected {}", other).as_ref());
}
}
}
(before_block, nodes)
}
|
let (name, _) = parser.parse_str().ok().unwrap();
parser.bump();
let block_tokens = parser.parse_block().ok().unwrap().to_tokens(cx);
let mut block_parser = tts_to_parser(cx.parse_sess(), block_tokens, cx.cfg());
let (before, children) = parse_node(cx, &mut block_parser);
let node = TestContextNode::new(name.deref().to_string(), before, children);
MacroResult::new(vec![node.to_item(cx, &mut vec![])])
}
|
#[allow(unused_must_use)]
pub fn macro_scenario(cx: &mut ExtCtxt, _: Span, tts: &[ast::TokenTree]) -> Box<MacResult + 'static> {
let mut parser = cx.new_parser_from_tts(tts);
|
random_line_split
|
layout_interface.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! The high-level interface from script to layout. Using this abstract
//! interface helps reduce coupling between these two components, and enables
//! the DOM to be placed in a separate crate from layout.
use app_units::Au;
use dom::node::LayoutData;
use euclid::point::Point2D;
use euclid::rect::Rect;
use ipc_channel::ipc::{IpcReceiver, IpcSender};
use libc::uintptr_t;
use msg::compositor_msg::Epoch;
use msg::compositor_msg::LayerId;
use msg::constellation_msg::{ConstellationChan, Failure, PipelineExitType, PipelineId};
use msg::constellation_msg::{WindowSizeData};
use net_traits::PendingAsyncLoad;
use net_traits::image_cache_task::ImageCacheTask;
use profile_traits::mem::ReportsChan;
use script_traits::{ConstellationControlMsg, LayoutControlMsg};
use script_traits::{OpaqueScriptLayoutChannel, StylesheetLoadResponder, UntrustedNodeAddress};
use selectors::parser::PseudoElement;
use std::any::Any;
use std::sync::mpsc::{Receiver, Sender, channel};
use string_cache::Atom;
use style::animation::PropertyAnimation;
use style::media_queries::MediaQueryList;
use style::stylesheets::Stylesheet;
use style::viewport::ViewportRule;
use url::Url;
pub use dom::node::TrustedNodeAddress;
/// Asynchronous messages that script can send to layout.
pub enum Msg {
/// Adds the given stylesheet to the document.
AddStylesheet(Stylesheet, MediaQueryList),
/// Adds the given stylesheet to the document.
LoadStylesheet(Url, MediaQueryList, PendingAsyncLoad, Box<StylesheetLoadResponder + Send>),
/// Adds a @viewport rule (translated from a <META name="viewport"> element) to the document.
AddMetaViewport(ViewportRule),
/// Puts a document into quirks mode, causing the quirks mode stylesheet to be loaded.
SetQuirksMode,
/// Requests a reflow.
Reflow(ScriptReflow),
/// Get an RPC interface.
GetRPC(Sender<Box<LayoutRPC + Send>>),
/// Requests that the layout task render the next frame of all animations.
TickAnimations,
/// Requests that the layout task reflow with a newly-loaded Web font.
ReflowWithNewlyLoadedWebFont,
/// Updates the layout visible rects, affecting the area that display lists will be constructed
/// for.
SetVisibleRects(Vec<(LayerId, Rect<Au>)>),
/// Destroys layout data associated with a DOM node.
///
/// TODO(pcwalton): Maybe think about batching to avoid message traffic.
ReapLayoutData(LayoutData),
/// Requests that the layout task measure its memory usage. The resulting reports are sent back
/// via the supplied channel.
CollectReports(ReportsChan),
/// Requests that the layout task enter a quiescent state in which no more messages are
/// accepted except `ExitMsg`. A response message will be sent on the supplied channel when
/// this happens.
PrepareToExit(Sender<()>),
/// Requests that the layout task immediately shut down. There must be no more nodes left after
/// this, or layout will crash.
ExitNow(PipelineExitType),
/// Get the last epoch counter for this layout task.
GetCurrentEpoch(IpcSender<Epoch>),
/// Asks the layout task whether any Web fonts have yet to load (if true, loads are pending;
/// false otherwise).
GetWebFontLoadState(IpcSender<bool>),
/// Creates a new layout task.
///
/// This basically exists to keep the script-layout dependency one-way.
CreateLayoutTask(NewLayoutTaskInfo),
}
/// Synchronous messages that script can send to layout.
///
/// In general, you should use messages to talk to Layout. Use the RPC interface
/// if and only if the work is
///
/// 1) read-only with respect to LayoutTaskData,
/// 2) small,
/// 3) and really needs to be fast.
pub trait LayoutRPC {
/// Requests the dimensions of the content box, as in the `getBoundingClientRect()` call.
fn content_box(&self) -> ContentBoxResponse;
/// Requests the dimensions of all the content boxes, as in the `getClientRects()` call.
fn content_boxes(&self) -> ContentBoxesResponse;
/// Requests the geometry of this node. Used by APIs such as `clientTop`.
fn node_geometry(&self) -> NodeGeometryResponse;
/// Requests the node containing the point of interest
fn hit_test(&self, node: TrustedNodeAddress, point: Point2D<f32>) -> Result<HitTestResponse, ()>;
fn mouse_over(&self, node: TrustedNodeAddress, point: Point2D<f32>) -> Result<MouseOverResponse, ()>;
/// Query layout for the resolved value of a given CSS property
fn resolved_style(&self) -> ResolvedStyleResponse;
fn offset_parent(&self) -> OffsetParentResponse;
|
pub struct ContentBoxResponse(pub Rect<Au>);
pub struct ContentBoxesResponse(pub Vec<Rect<Au>>);
pub struct NodeGeometryResponse {
pub client_rect: Rect<i32>,
}
pub struct HitTestResponse(pub UntrustedNodeAddress);
pub struct MouseOverResponse(pub Vec<UntrustedNodeAddress>);
pub struct ResolvedStyleResponse(pub Option<String>);
#[derive(Clone)]
pub struct OffsetParentResponse {
pub node_address: Option<UntrustedNodeAddress>,
pub rect: Rect<Au>,
}
impl OffsetParentResponse {
pub fn empty() -> OffsetParentResponse {
OffsetParentResponse {
node_address: None,
rect: Rect::zero(),
}
}
}
/// Why we're doing reflow.
#[derive(PartialEq, Copy, Clone, Debug)]
pub enum ReflowGoal {
/// We're reflowing in order to send a display list to the screen.
ForDisplay,
/// We're reflowing in order to satisfy a script query. No display list will be created.
ForScriptQuery,
}
/// Any query to perform with this reflow.
#[derive(PartialEq)]
pub enum ReflowQueryType {
NoQuery,
ContentBoxQuery(TrustedNodeAddress),
ContentBoxesQuery(TrustedNodeAddress),
NodeGeometryQuery(TrustedNodeAddress),
ResolvedStyleQuery(TrustedNodeAddress, Option<PseudoElement>, Atom),
OffsetParentQuery(TrustedNodeAddress),
}
/// Information needed for a reflow.
pub struct Reflow {
/// The goal of reflow: either to render to the screen or to flush layout info for script.
pub goal: ReflowGoal,
/// A clipping rectangle for the page, an enlarged rectangle containing the viewport.
pub page_clip_rect: Rect<Au>,
}
/// Information needed for a script-initiated reflow.
pub struct ScriptReflow {
/// General reflow data.
pub reflow_info: Reflow,
/// The document node.
pub document: TrustedNodeAddress,
/// The current window size.
pub window_size: WindowSizeData,
/// The channel that we send a notification to.
pub script_join_chan: Sender<()>,
/// The type of query if any to perform during this reflow.
pub query_type: ReflowQueryType,
}
/// Encapsulates a channel to the layout task.
#[derive(Clone)]
pub struct LayoutChan(pub Sender<Msg>);
impl LayoutChan {
pub fn new() -> (Receiver<Msg>, LayoutChan) {
let (chan, port) = channel();
(port, LayoutChan(chan))
}
}
/// A trait to manage opaque references to script<->layout channels without needing
/// to expose the message type to crates that don't need to know about them.
pub trait ScriptLayoutChan {
fn new(sender: Sender<Msg>, receiver: Receiver<Msg>) -> Self;
fn sender(&self) -> Sender<Msg>;
fn receiver(self) -> Receiver<Msg>;
}
impl ScriptLayoutChan for OpaqueScriptLayoutChannel {
fn new(sender: Sender<Msg>, receiver: Receiver<Msg>) -> OpaqueScriptLayoutChannel {
let inner = (box sender as Box<Any + Send>, box receiver as Box<Any + Send>);
OpaqueScriptLayoutChannel(inner)
}
fn sender(&self) -> Sender<Msg> {
let &OpaqueScriptLayoutChannel((ref sender, _)) = self;
(*sender.downcast_ref::<Sender<Msg>>().unwrap()).clone()
}
fn receiver(self) -> Receiver<Msg> {
let OpaqueScriptLayoutChannel((_, receiver)) = self;
*receiver.downcast::<Receiver<Msg>>().unwrap()
}
}
/// Type of an opaque node.
pub type OpaqueNode = uintptr_t;
/// State relating to an animation.
#[derive(Clone)]
pub struct Animation {
/// An opaque reference to the DOM node participating in the animation.
pub node: OpaqueNode,
/// A description of the property animation that is occurring.
pub property_animation: PropertyAnimation,
/// The start time of the animation, as returned by `time::precise_time_s()`.
pub start_time: f64,
/// The end time of the animation, as returned by `time::precise_time_s()`.
pub end_time: f64,
}
impl Animation {
/// Returns the duration of this animation in seconds.
#[inline]
pub fn duration(&self) -> f64 {
self.end_time - self.start_time
}
}
pub struct NewLayoutTaskInfo {
pub id: PipelineId,
pub url: Url,
pub is_parent: bool,
pub layout_pair: OpaqueScriptLayoutChannel,
pub pipeline_port: IpcReceiver<LayoutControlMsg>,
pub constellation_chan: ConstellationChan,
pub failure: Failure,
pub script_chan: Sender<ConstellationControlMsg>,
pub image_cache_task: ImageCacheTask,
pub paint_chan: Box<Any + Send>,
pub layout_shutdown_chan: Sender<()>,
}
|
}
|
random_line_split
|
layout_interface.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! The high-level interface from script to layout. Using this abstract
//! interface helps reduce coupling between these two components, and enables
//! the DOM to be placed in a separate crate from layout.
use app_units::Au;
use dom::node::LayoutData;
use euclid::point::Point2D;
use euclid::rect::Rect;
use ipc_channel::ipc::{IpcReceiver, IpcSender};
use libc::uintptr_t;
use msg::compositor_msg::Epoch;
use msg::compositor_msg::LayerId;
use msg::constellation_msg::{ConstellationChan, Failure, PipelineExitType, PipelineId};
use msg::constellation_msg::{WindowSizeData};
use net_traits::PendingAsyncLoad;
use net_traits::image_cache_task::ImageCacheTask;
use profile_traits::mem::ReportsChan;
use script_traits::{ConstellationControlMsg, LayoutControlMsg};
use script_traits::{OpaqueScriptLayoutChannel, StylesheetLoadResponder, UntrustedNodeAddress};
use selectors::parser::PseudoElement;
use std::any::Any;
use std::sync::mpsc::{Receiver, Sender, channel};
use string_cache::Atom;
use style::animation::PropertyAnimation;
use style::media_queries::MediaQueryList;
use style::stylesheets::Stylesheet;
use style::viewport::ViewportRule;
use url::Url;
pub use dom::node::TrustedNodeAddress;
/// Asynchronous messages that script can send to layout.
pub enum Msg {
/// Adds the given stylesheet to the document.
AddStylesheet(Stylesheet, MediaQueryList),
/// Adds the given stylesheet to the document.
LoadStylesheet(Url, MediaQueryList, PendingAsyncLoad, Box<StylesheetLoadResponder + Send>),
/// Adds a @viewport rule (translated from a <META name="viewport"> element) to the document.
AddMetaViewport(ViewportRule),
/// Puts a document into quirks mode, causing the quirks mode stylesheet to be loaded.
SetQuirksMode,
/// Requests a reflow.
Reflow(ScriptReflow),
/// Get an RPC interface.
GetRPC(Sender<Box<LayoutRPC + Send>>),
/// Requests that the layout task render the next frame of all animations.
TickAnimations,
/// Requests that the layout task reflow with a newly-loaded Web font.
ReflowWithNewlyLoadedWebFont,
/// Updates the layout visible rects, affecting the area that display lists will be constructed
/// for.
SetVisibleRects(Vec<(LayerId, Rect<Au>)>),
/// Destroys layout data associated with a DOM node.
///
/// TODO(pcwalton): Maybe think about batching to avoid message traffic.
ReapLayoutData(LayoutData),
/// Requests that the layout task measure its memory usage. The resulting reports are sent back
/// via the supplied channel.
CollectReports(ReportsChan),
/// Requests that the layout task enter a quiescent state in which no more messages are
/// accepted except `ExitMsg`. A response message will be sent on the supplied channel when
/// this happens.
PrepareToExit(Sender<()>),
/// Requests that the layout task immediately shut down. There must be no more nodes left after
/// this, or layout will crash.
ExitNow(PipelineExitType),
/// Get the last epoch counter for this layout task.
GetCurrentEpoch(IpcSender<Epoch>),
/// Asks the layout task whether any Web fonts have yet to load (if true, loads are pending;
/// false otherwise).
GetWebFontLoadState(IpcSender<bool>),
/// Creates a new layout task.
///
/// This basically exists to keep the script-layout dependency one-way.
CreateLayoutTask(NewLayoutTaskInfo),
}
/// Synchronous messages that script can send to layout.
///
/// In general, you should use messages to talk to Layout. Use the RPC interface
/// if and only if the work is
///
/// 1) read-only with respect to LayoutTaskData,
/// 2) small,
/// 3) and really needs to be fast.
pub trait LayoutRPC {
/// Requests the dimensions of the content box, as in the `getBoundingClientRect()` call.
fn content_box(&self) -> ContentBoxResponse;
/// Requests the dimensions of all the content boxes, as in the `getClientRects()` call.
fn content_boxes(&self) -> ContentBoxesResponse;
/// Requests the geometry of this node. Used by APIs such as `clientTop`.
fn node_geometry(&self) -> NodeGeometryResponse;
/// Requests the node containing the point of interest
fn hit_test(&self, node: TrustedNodeAddress, point: Point2D<f32>) -> Result<HitTestResponse, ()>;
fn mouse_over(&self, node: TrustedNodeAddress, point: Point2D<f32>) -> Result<MouseOverResponse, ()>;
/// Query layout for the resolved value of a given CSS property
fn resolved_style(&self) -> ResolvedStyleResponse;
fn offset_parent(&self) -> OffsetParentResponse;
}
pub struct ContentBoxResponse(pub Rect<Au>);
pub struct ContentBoxesResponse(pub Vec<Rect<Au>>);
pub struct NodeGeometryResponse {
pub client_rect: Rect<i32>,
}
pub struct HitTestResponse(pub UntrustedNodeAddress);
pub struct MouseOverResponse(pub Vec<UntrustedNodeAddress>);
pub struct ResolvedStyleResponse(pub Option<String>);
#[derive(Clone)]
pub struct OffsetParentResponse {
pub node_address: Option<UntrustedNodeAddress>,
pub rect: Rect<Au>,
}
impl OffsetParentResponse {
pub fn empty() -> OffsetParentResponse {
OffsetParentResponse {
node_address: None,
rect: Rect::zero(),
}
}
}
/// Why we're doing reflow.
#[derive(PartialEq, Copy, Clone, Debug)]
pub enum ReflowGoal {
/// We're reflowing in order to send a display list to the screen.
ForDisplay,
/// We're reflowing in order to satisfy a script query. No display list will be created.
ForScriptQuery,
}
/// Any query to perform with this reflow.
#[derive(PartialEq)]
pub enum ReflowQueryType {
NoQuery,
ContentBoxQuery(TrustedNodeAddress),
ContentBoxesQuery(TrustedNodeAddress),
NodeGeometryQuery(TrustedNodeAddress),
ResolvedStyleQuery(TrustedNodeAddress, Option<PseudoElement>, Atom),
OffsetParentQuery(TrustedNodeAddress),
}
/// Information needed for a reflow.
pub struct Reflow {
/// The goal of reflow: either to render to the screen or to flush layout info for script.
pub goal: ReflowGoal,
/// A clipping rectangle for the page, an enlarged rectangle containing the viewport.
pub page_clip_rect: Rect<Au>,
}
/// Information needed for a script-initiated reflow.
pub struct ScriptReflow {
/// General reflow data.
pub reflow_info: Reflow,
/// The document node.
pub document: TrustedNodeAddress,
/// The current window size.
pub window_size: WindowSizeData,
/// The channel that we send a notification to.
pub script_join_chan: Sender<()>,
/// The type of query if any to perform during this reflow.
pub query_type: ReflowQueryType,
}
/// Encapsulates a channel to the layout task.
#[derive(Clone)]
pub struct LayoutChan(pub Sender<Msg>);
impl LayoutChan {
pub fn new() -> (Receiver<Msg>, LayoutChan) {
let (chan, port) = channel();
(port, LayoutChan(chan))
}
}
/// A trait to manage opaque references to script<->layout channels without needing
/// to expose the message type to crates that don't need to know about them.
pub trait ScriptLayoutChan {
fn new(sender: Sender<Msg>, receiver: Receiver<Msg>) -> Self;
fn sender(&self) -> Sender<Msg>;
fn receiver(self) -> Receiver<Msg>;
}
impl ScriptLayoutChan for OpaqueScriptLayoutChannel {
fn new(sender: Sender<Msg>, receiver: Receiver<Msg>) -> OpaqueScriptLayoutChannel {
let inner = (box sender as Box<Any + Send>, box receiver as Box<Any + Send>);
OpaqueScriptLayoutChannel(inner)
}
fn sender(&self) -> Sender<Msg> {
let &OpaqueScriptLayoutChannel((ref sender, _)) = self;
(*sender.downcast_ref::<Sender<Msg>>().unwrap()).clone()
}
fn receiver(self) -> Receiver<Msg> {
let OpaqueScriptLayoutChannel((_, receiver)) = self;
*receiver.downcast::<Receiver<Msg>>().unwrap()
}
}
/// Type of an opaque node.
pub type OpaqueNode = uintptr_t;
/// State relating to an animation.
#[derive(Clone)]
pub struct Animation {
/// An opaque reference to the DOM node participating in the animation.
pub node: OpaqueNode,
/// A description of the property animation that is occurring.
pub property_animation: PropertyAnimation,
/// The start time of the animation, as returned by `time::precise_time_s()`.
pub start_time: f64,
/// The end time of the animation, as returned by `time::precise_time_s()`.
pub end_time: f64,
}
impl Animation {
/// Returns the duration of this animation in seconds.
#[inline]
pub fn duration(&self) -> f64 {
self.end_time - self.start_time
}
}
pub struct
|
{
pub id: PipelineId,
pub url: Url,
pub is_parent: bool,
pub layout_pair: OpaqueScriptLayoutChannel,
pub pipeline_port: IpcReceiver<LayoutControlMsg>,
pub constellation_chan: ConstellationChan,
pub failure: Failure,
pub script_chan: Sender<ConstellationControlMsg>,
pub image_cache_task: ImageCacheTask,
pub paint_chan: Box<Any + Send>,
pub layout_shutdown_chan: Sender<()>,
}
|
NewLayoutTaskInfo
|
identifier_name
|
input_layers.rs
|
use std::borrow::Cow;
use std::fmt;
use std::io::BufRead;
use std::result;
use enum_map::{Enum, EnumMap};
use failure::Error;
use tensorflow::Tensor;
use crate::features::addr;
use crate::features::lookup::LookupResult;
use crate::features::parse_addr::parse_addressed_values;
use crate::features::{BoxedLookup, Lookup, LookupType};
use crate::system::ParserState;
/// Multiple addressable parts of the parser state.
///
/// `AddressedValues` represents multiple addresses into the parser state.
/// This can be used to construct feature vectors over the parser state.
pub struct AddressedValues(pub Vec<addr::AddressedValue>);
impl AddressedValues {
/// Read addressed values specification from a text file.
///
/// Such a text file consists of lines with the format
///
/// ~~~text,no_run
/// [address+] layer
/// ~~~
///
/// Multiple addresses are used to e.g. address the left/rightmost
/// dependency of a token on the stack or buffer.
pub fn from_buf_read<R>(mut read: R) -> Result<Self, Error>
where
R: BufRead,
{
let mut data = String::new();
read.read_to_string(&mut data)?;
Ok(AddressedValues(parse_addressed_values(&data)?))
}
}
/// A feature vector.
///
/// `InputVector` instances represent feature vectors, also called
/// input layers in neural networks. The input vector is split in
/// vectors for different layers. In each layer, the feature is encoded
/// as a 32-bit identifier, which is typically the row of the layer
/// value in an embedding matrix.
pub struct InputVector {
pub lookup_layers: EnumMap<Layer, Vec<i32>>,
pub embed_layer: Tensor<f32>,
}
#[derive(Clone, Copy, Debug, Enum, Eq, PartialEq)]
pub enum Layer {
Token,
Tag,
DepRel,
Feature,
}
impl fmt::Display for Layer {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> result::Result<(), fmt::Error> {
let s = match *self {
Layer::Token => "tokens",
Layer::Tag => "tags",
Layer::DepRel => "deprels",
Layer::Feature => "features",
};
f.write_str(s)
}
}
// I am not sure whether I like the use of Borrow here, but is there another
// convenient way to convert from both addr::Layer and &addr::Layer?
impl From<&addr::Layer> for Layer {
fn from(layer: &addr::Layer) -> Self {
match *layer {
addr::Layer::Token => Layer::Token,
addr::Layer::Tag => Layer::Tag,
addr::Layer::DepRel => Layer::DepRel,
addr::Layer::Feature(_) => Layer::Feature,
}
}
}
/// Lookups for layers.
///
/// This data structure bundles lookups for the different layers (tokens,
/// part-of-speech, etc).
#[derive(Default)]
pub struct LayerLookups(EnumMap<Layer, BoxedLookup>);
impl LayerLookups {
pub fn new() -> Self {
LayerLookups(EnumMap::new())
}
pub fn insert<L>(&mut self, layer: Layer, lookup: L)
where
L: Into<Box<dyn Lookup>>,
{
self.0[layer] = BoxedLookup::new(lookup)
}
/// Get the lookup for a layer.
pub fn layer_lookup(&self, layer: Layer) -> Option<&dyn Lookup> {
self.0[layer].as_ref()
}
}
/// Vectorizer for parser states.
///
/// An `InputVectorizer` vectorizes parser states.
pub struct InputVectorizer {
layer_lookups: LayerLookups,
input_layer_addrs: AddressedValues,
}
impl InputVectorizer {
/// Construct an input vectorizer.
///
/// The vectorizer is constructed from the layer lookups and the parser
/// state addresses from which the feature vector should be used. The layer
/// lookups are used to find the indices that represent the features.
pub fn new(layer_lookups: LayerLookups, input_layer_addrs: AddressedValues) -> Self {
InputVectorizer {
layer_lookups,
input_layer_addrs,
}
}
pub fn embedding_layer_size(&self) -> usize {
let mut size = 0;
for layer in &self.input_layer_addrs.0 {
if let Some(lookup) = self.layer_lookups.0[(&layer.layer).into()].as_ref() {
match lookup.lookup_type() {
LookupType::Embedding(dims) => size += dims,
LookupType::Index => (),
}
}
}
size
}
pub fn layer_addrs(&self) -> &AddressedValues {
&self.input_layer_addrs
}
/// Get the layer lookups.
pub fn layer_lookups(&self) -> &LayerLookups {
&self.layer_lookups
}
pub fn lookup_layer_sizes(&self) -> EnumMap<Layer, usize> {
let mut sizes = EnumMap::new();
for layer in &self.input_layer_addrs.0 {
if let Some(lookup) = self.layer_lookups.0[(&layer.layer).into()].as_ref() {
match lookup.lookup_type() {
LookupType::Embedding(_) => (),
LookupType::Index => sizes[(&layer.layer).into()] += 1,
}
}
}
sizes
}
/// Vectorize a parser state.
pub fn realize(&self, state: &ParserState<'_>) -> InputVector {
let mut embed_layer = Tensor::new(&[self.embedding_layer_size() as u64]);
let mut lookup_layers = EnumMap::new();
|
self.realize_into(state, &mut embed_layer, &mut lookup_layers);
InputVector {
embed_layer,
lookup_layers,
}
}
/// Vectorize a parser state into the given slices.
pub fn realize_into<S>(
&self,
state: &ParserState<'_>,
embed_layer: &mut [f32],
lookup_slices: &mut EnumMap<Layer, S>,
) where
S: AsMut<[i32]>,
{
let mut embed_offset = 0;
let mut layer_offsets: EnumMap<Layer, usize> = EnumMap::new();
for layer in &self.input_layer_addrs.0 {
let val = layer.get(state);
let offset = &mut layer_offsets[(&layer.layer).into()];
let layer = &layer.layer;
match lookup_value(
self.layer_lookups
.layer_lookup(layer.into())
.unwrap_or_else(|| panic!("Missing layer lookup for: {:?}", layer)),
val,
) {
LookupResult::Embedding(embed) => {
embed_layer[embed_offset..embed_offset + embed.len()]
.copy_from_slice(embed.as_slice().expect("Embedding is not contiguous"));
embed_offset += embed.len();
}
LookupResult::Index(idx) => {
lookup_slices[layer.into()].as_mut()[*offset] = idx as i32;
*offset += 1;
}
}
}
}
}
fn lookup_value<'a>(lookup: &'a dyn Lookup, feature: Option<Cow<'_, str>>) -> LookupResult<'a> {
match feature {
Some(f) => lookup
.lookup(f.as_ref())
.unwrap_or_else(|| lookup.unknown()),
None => lookup.null(),
}
}
|
for (layer, &size) in &self.lookup_layer_sizes() {
lookup_layers[layer] = vec![0; size];
}
|
random_line_split
|
input_layers.rs
|
use std::borrow::Cow;
use std::fmt;
use std::io::BufRead;
use std::result;
use enum_map::{Enum, EnumMap};
use failure::Error;
use tensorflow::Tensor;
use crate::features::addr;
use crate::features::lookup::LookupResult;
use crate::features::parse_addr::parse_addressed_values;
use crate::features::{BoxedLookup, Lookup, LookupType};
use crate::system::ParserState;
/// Multiple addressable parts of the parser state.
///
/// `AddressedValues` represents multiple addresses into the parser state.
/// This can be used to construct feature vectors over the parser state.
pub struct AddressedValues(pub Vec<addr::AddressedValue>);
impl AddressedValues {
/// Read addressed values specification from a text file.
///
/// Such a text file consists of lines with the format
///
/// ~~~text,no_run
/// [address+] layer
/// ~~~
///
/// Multiple addresses are used to e.g. address the left/rightmost
/// dependency of a token on the stack or buffer.
pub fn from_buf_read<R>(mut read: R) -> Result<Self, Error>
where
R: BufRead,
{
let mut data = String::new();
read.read_to_string(&mut data)?;
Ok(AddressedValues(parse_addressed_values(&data)?))
}
}
/// A feature vector.
///
/// `InputVector` instances represent feature vectors, also called
/// input layers in neural networks. The input vector is split in
/// vectors for different layers. In each layer, the feature is encoded
/// as a 32-bit identifier, which is typically the row of the layer
/// value in an embedding matrix.
pub struct InputVector {
pub lookup_layers: EnumMap<Layer, Vec<i32>>,
pub embed_layer: Tensor<f32>,
}
#[derive(Clone, Copy, Debug, Enum, Eq, PartialEq)]
pub enum Layer {
Token,
Tag,
DepRel,
Feature,
}
impl fmt::Display for Layer {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> result::Result<(), fmt::Error> {
let s = match *self {
Layer::Token => "tokens",
Layer::Tag => "tags",
Layer::DepRel => "deprels",
Layer::Feature => "features",
};
f.write_str(s)
}
}
// I am not sure whether I like the use of Borrow here, but is there another
// convenient way to convert from both addr::Layer and &addr::Layer?
impl From<&addr::Layer> for Layer {
fn from(layer: &addr::Layer) -> Self {
match *layer {
addr::Layer::Token => Layer::Token,
addr::Layer::Tag => Layer::Tag,
addr::Layer::DepRel => Layer::DepRel,
addr::Layer::Feature(_) => Layer::Feature,
}
}
}
/// Lookups for layers.
///
/// This data structure bundles lookups for the different layers (tokens,
/// part-of-speech, etc).
#[derive(Default)]
pub struct
|
(EnumMap<Layer, BoxedLookup>);
impl LayerLookups {
pub fn new() -> Self {
LayerLookups(EnumMap::new())
}
pub fn insert<L>(&mut self, layer: Layer, lookup: L)
where
L: Into<Box<dyn Lookup>>,
{
self.0[layer] = BoxedLookup::new(lookup)
}
/// Get the lookup for a layer.
pub fn layer_lookup(&self, layer: Layer) -> Option<&dyn Lookup> {
self.0[layer].as_ref()
}
}
/// Vectorizer for parser states.
///
/// An `InputVectorizer` vectorizes parser states.
pub struct InputVectorizer {
layer_lookups: LayerLookups,
input_layer_addrs: AddressedValues,
}
impl InputVectorizer {
/// Construct an input vectorizer.
///
/// The vectorizer is constructed from the layer lookups and the parser
/// state addresses from which the feature vector should be used. The layer
/// lookups are used to find the indices that represent the features.
pub fn new(layer_lookups: LayerLookups, input_layer_addrs: AddressedValues) -> Self {
InputVectorizer {
layer_lookups,
input_layer_addrs,
}
}
pub fn embedding_layer_size(&self) -> usize {
let mut size = 0;
for layer in &self.input_layer_addrs.0 {
if let Some(lookup) = self.layer_lookups.0[(&layer.layer).into()].as_ref() {
match lookup.lookup_type() {
LookupType::Embedding(dims) => size += dims,
LookupType::Index => (),
}
}
}
size
}
pub fn layer_addrs(&self) -> &AddressedValues {
&self.input_layer_addrs
}
/// Get the layer lookups.
pub fn layer_lookups(&self) -> &LayerLookups {
&self.layer_lookups
}
pub fn lookup_layer_sizes(&self) -> EnumMap<Layer, usize> {
let mut sizes = EnumMap::new();
for layer in &self.input_layer_addrs.0 {
if let Some(lookup) = self.layer_lookups.0[(&layer.layer).into()].as_ref() {
match lookup.lookup_type() {
LookupType::Embedding(_) => (),
LookupType::Index => sizes[(&layer.layer).into()] += 1,
}
}
}
sizes
}
/// Vectorize a parser state.
pub fn realize(&self, state: &ParserState<'_>) -> InputVector {
let mut embed_layer = Tensor::new(&[self.embedding_layer_size() as u64]);
let mut lookup_layers = EnumMap::new();
for (layer, &size) in &self.lookup_layer_sizes() {
lookup_layers[layer] = vec![0; size];
}
self.realize_into(state, &mut embed_layer, &mut lookup_layers);
InputVector {
embed_layer,
lookup_layers,
}
}
/// Vectorize a parser state into the given slices.
pub fn realize_into<S>(
&self,
state: &ParserState<'_>,
embed_layer: &mut [f32],
lookup_slices: &mut EnumMap<Layer, S>,
) where
S: AsMut<[i32]>,
{
let mut embed_offset = 0;
let mut layer_offsets: EnumMap<Layer, usize> = EnumMap::new();
for layer in &self.input_layer_addrs.0 {
let val = layer.get(state);
let offset = &mut layer_offsets[(&layer.layer).into()];
let layer = &layer.layer;
match lookup_value(
self.layer_lookups
.layer_lookup(layer.into())
.unwrap_or_else(|| panic!("Missing layer lookup for: {:?}", layer)),
val,
) {
LookupResult::Embedding(embed) => {
embed_layer[embed_offset..embed_offset + embed.len()]
.copy_from_slice(embed.as_slice().expect("Embedding is not contiguous"));
embed_offset += embed.len();
}
LookupResult::Index(idx) => {
lookup_slices[layer.into()].as_mut()[*offset] = idx as i32;
*offset += 1;
}
}
}
}
}
fn lookup_value<'a>(lookup: &'a dyn Lookup, feature: Option<Cow<'_, str>>) -> LookupResult<'a> {
match feature {
Some(f) => lookup
.lookup(f.as_ref())
.unwrap_or_else(|| lookup.unknown()),
None => lookup.null(),
}
}
|
LayerLookups
|
identifier_name
|
input_layers.rs
|
use std::borrow::Cow;
use std::fmt;
use std::io::BufRead;
use std::result;
use enum_map::{Enum, EnumMap};
use failure::Error;
use tensorflow::Tensor;
use crate::features::addr;
use crate::features::lookup::LookupResult;
use crate::features::parse_addr::parse_addressed_values;
use crate::features::{BoxedLookup, Lookup, LookupType};
use crate::system::ParserState;
/// Multiple addressable parts of the parser state.
///
/// `AddressedValues` represents multiple addresses into the parser state.
/// This can be used to construct feature vectors over the parser state.
pub struct AddressedValues(pub Vec<addr::AddressedValue>);
impl AddressedValues {
/// Read addressed values specification from a text file.
///
/// Such a text file consists of lines with the format
///
/// ~~~text,no_run
/// [address+] layer
/// ~~~
///
/// Multiple addresses are used to e.g. address the left/rightmost
/// dependency of a token on the stack or buffer.
pub fn from_buf_read<R>(mut read: R) -> Result<Self, Error>
where
R: BufRead,
{
let mut data = String::new();
read.read_to_string(&mut data)?;
Ok(AddressedValues(parse_addressed_values(&data)?))
}
}
/// A feature vector.
///
/// `InputVector` instances represent feature vectors, also called
/// input layers in neural networks. The input vector is split in
/// vectors for different layers. In each layer, the feature is encoded
/// as a 32-bit identifier, which is typically the row of the layer
/// value in an embedding matrix.
pub struct InputVector {
pub lookup_layers: EnumMap<Layer, Vec<i32>>,
pub embed_layer: Tensor<f32>,
}
#[derive(Clone, Copy, Debug, Enum, Eq, PartialEq)]
pub enum Layer {
Token,
Tag,
DepRel,
Feature,
}
impl fmt::Display for Layer {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> result::Result<(), fmt::Error> {
let s = match *self {
Layer::Token => "tokens",
Layer::Tag => "tags",
Layer::DepRel => "deprels",
Layer::Feature => "features",
};
f.write_str(s)
}
}
// I am not sure whether I like the use of Borrow here, but is there another
// convenient way to convert from both addr::Layer and &addr::Layer?
impl From<&addr::Layer> for Layer {
fn from(layer: &addr::Layer) -> Self {
match *layer {
addr::Layer::Token => Layer::Token,
addr::Layer::Tag => Layer::Tag,
addr::Layer::DepRel => Layer::DepRel,
addr::Layer::Feature(_) => Layer::Feature,
}
}
}
/// Lookups for layers.
///
/// This data structure bundles lookups for the different layers (tokens,
/// part-of-speech, etc).
#[derive(Default)]
pub struct LayerLookups(EnumMap<Layer, BoxedLookup>);
impl LayerLookups {
pub fn new() -> Self {
LayerLookups(EnumMap::new())
}
pub fn insert<L>(&mut self, layer: Layer, lookup: L)
where
L: Into<Box<dyn Lookup>>,
{
self.0[layer] = BoxedLookup::new(lookup)
}
/// Get the lookup for a layer.
pub fn layer_lookup(&self, layer: Layer) -> Option<&dyn Lookup> {
self.0[layer].as_ref()
}
}
/// Vectorizer for parser states.
///
/// An `InputVectorizer` vectorizes parser states.
pub struct InputVectorizer {
layer_lookups: LayerLookups,
input_layer_addrs: AddressedValues,
}
impl InputVectorizer {
/// Construct an input vectorizer.
///
/// The vectorizer is constructed from the layer lookups and the parser
/// state addresses from which the feature vector should be used. The layer
/// lookups are used to find the indices that represent the features.
pub fn new(layer_lookups: LayerLookups, input_layer_addrs: AddressedValues) -> Self {
InputVectorizer {
layer_lookups,
input_layer_addrs,
}
}
pub fn embedding_layer_size(&self) -> usize {
let mut size = 0;
for layer in &self.input_layer_addrs.0 {
if let Some(lookup) = self.layer_lookups.0[(&layer.layer).into()].as_ref() {
match lookup.lookup_type() {
LookupType::Embedding(dims) => size += dims,
LookupType::Index => (),
}
}
}
size
}
pub fn layer_addrs(&self) -> &AddressedValues {
&self.input_layer_addrs
}
/// Get the layer lookups.
pub fn layer_lookups(&self) -> &LayerLookups
|
pub fn lookup_layer_sizes(&self) -> EnumMap<Layer, usize> {
let mut sizes = EnumMap::new();
for layer in &self.input_layer_addrs.0 {
if let Some(lookup) = self.layer_lookups.0[(&layer.layer).into()].as_ref() {
match lookup.lookup_type() {
LookupType::Embedding(_) => (),
LookupType::Index => sizes[(&layer.layer).into()] += 1,
}
}
}
sizes
}
/// Vectorize a parser state.
pub fn realize(&self, state: &ParserState<'_>) -> InputVector {
let mut embed_layer = Tensor::new(&[self.embedding_layer_size() as u64]);
let mut lookup_layers = EnumMap::new();
for (layer, &size) in &self.lookup_layer_sizes() {
lookup_layers[layer] = vec![0; size];
}
self.realize_into(state, &mut embed_layer, &mut lookup_layers);
InputVector {
embed_layer,
lookup_layers,
}
}
/// Vectorize a parser state into the given slices.
pub fn realize_into<S>(
&self,
state: &ParserState<'_>,
embed_layer: &mut [f32],
lookup_slices: &mut EnumMap<Layer, S>,
) where
S: AsMut<[i32]>,
{
let mut embed_offset = 0;
let mut layer_offsets: EnumMap<Layer, usize> = EnumMap::new();
for layer in &self.input_layer_addrs.0 {
let val = layer.get(state);
let offset = &mut layer_offsets[(&layer.layer).into()];
let layer = &layer.layer;
match lookup_value(
self.layer_lookups
.layer_lookup(layer.into())
.unwrap_or_else(|| panic!("Missing layer lookup for: {:?}", layer)),
val,
) {
LookupResult::Embedding(embed) => {
embed_layer[embed_offset..embed_offset + embed.len()]
.copy_from_slice(embed.as_slice().expect("Embedding is not contiguous"));
embed_offset += embed.len();
}
LookupResult::Index(idx) => {
lookup_slices[layer.into()].as_mut()[*offset] = idx as i32;
*offset += 1;
}
}
}
}
}
fn lookup_value<'a>(lookup: &'a dyn Lookup, feature: Option<Cow<'_, str>>) -> LookupResult<'a> {
match feature {
Some(f) => lookup
.lookup(f.as_ref())
.unwrap_or_else(|| lookup.unknown()),
None => lookup.null(),
}
}
|
{
&self.layer_lookups
}
|
identifier_body
|
e01-pass-struct.rs
|
/// Exercise 11.1: Modify the example code shown in Figure 11.4 to pass the structure between the
/// threads properly.
///
/// The cleanest solution is - I think - if the main thread is responsible
/// for both mallocing and freeing the memory, thus we don't need
/// the return value (or pthread_exit), but store the changes in to the arg
/// variable directly.
///
/// The solution in the book works as well of course, but leaks memory.
///
/// $ e01-pass-struct
/// Foo { a: 55, b: 66, c: 3, d: 4 }
extern crate libc;
extern crate apue;
use libc::c_void;
use libc::usleep;
use apue::my_libc::pthread_create;
use apue::LibcResult;
use std::ptr::null_mut;
#[derive(Debug)]
struct Foo {
a: i32,
b: i32,
c: i32,
d: i32,
}
unsafe extern "C" fn thr_fn1(foo_ptr: *mut c_void) -> *mut c_void {
let foo = foo_ptr as *mut Foo;
(*foo).a = 55;
(*foo).b = 66;
0 as _
}
fn
|
() {
unsafe {
let foo = Box::new(Foo {
a: 1,
b: 2,
c: 3,
d: 4,
});
let mut tid1 = std::mem::uninitialized();
let foo_ptr = Box::into_raw(foo);
pthread_create(&mut tid1, null_mut(), thr_fn1, foo_ptr as *mut c_void).check_zero().expect("can't create thread 1");
libc::pthread_join(tid1, null_mut()).check_zero().expect("join error");
let foo: Box<Foo> = Box::from_raw(foo_ptr);
usleep(100);
println!("{:?}", foo);
}
}
|
main
|
identifier_name
|
e01-pass-struct.rs
|
/// Exercise 11.1: Modify the example code shown in Figure 11.4 to pass the structure between the
|
///
/// The cleanest solution is - I think - if the main thread is responsible
/// for both mallocing and freeing the memory, thus we don't need
/// the return value (or pthread_exit), but store the changes in to the arg
/// variable directly.
///
/// The solution in the book works as well of course, but leaks memory.
///
/// $ e01-pass-struct
/// Foo { a: 55, b: 66, c: 3, d: 4 }
extern crate libc;
extern crate apue;
use libc::c_void;
use libc::usleep;
use apue::my_libc::pthread_create;
use apue::LibcResult;
use std::ptr::null_mut;
#[derive(Debug)]
struct Foo {
a: i32,
b: i32,
c: i32,
d: i32,
}
unsafe extern "C" fn thr_fn1(foo_ptr: *mut c_void) -> *mut c_void {
let foo = foo_ptr as *mut Foo;
(*foo).a = 55;
(*foo).b = 66;
0 as _
}
fn main() {
unsafe {
let foo = Box::new(Foo {
a: 1,
b: 2,
c: 3,
d: 4,
});
let mut tid1 = std::mem::uninitialized();
let foo_ptr = Box::into_raw(foo);
pthread_create(&mut tid1, null_mut(), thr_fn1, foo_ptr as *mut c_void).check_zero().expect("can't create thread 1");
libc::pthread_join(tid1, null_mut()).check_zero().expect("join error");
let foo: Box<Foo> = Box::from_raw(foo_ptr);
usleep(100);
println!("{:?}", foo);
}
}
|
/// threads properly.
|
random_line_split
|
main.rs
|
extern crate rand;
use std::io;
use std::cmp::Ordering;
use rand::Rng;
fn main()
|
println!("You guessed: {}", myguess);
let secret = rand::thread_rng().gen_range(1,101);
match myguess.cmp(&secret) {
Ordering::Less => println!("Too small!!"),
Ordering::Greater => println!("Too BIG!!"),
Ordering::Equal => {
println!("You Win!!!");
break;
}
}
println!("The secret number is: {}", secret);
}
}
|
{
loop {
println!("\n");
println!("Hurry ! guess a number between 1 and 100 quick !!");
println!("Now quickly enter what you have guessed !");
let mut myguess = String::new();
io::stdin()
.read_line(&mut myguess)
.ok()
.expect("can not read !! I am blind again !!");
let myguess: u32 = match myguess
.trim()
.parse(){
Ok(num) => num,
Err(_) => continue,
};
|
identifier_body
|
main.rs
|
extern crate rand;
use std::io;
use std::cmp::Ordering;
use rand::Rng;
fn main() {
loop {
println!("\n");
println!("Hurry! guess a number between 1 and 100 quick!!");
println!("Now quickly enter what you have guessed!");
let mut myguess = String::new();
io::stdin()
.read_line(&mut myguess)
.ok()
.expect("can not read!! I am blind again!!");
let myguess: u32 = match myguess
.trim()
.parse(){
Ok(num) => num,
Err(_) => continue,
};
println!("You guessed: {}", myguess);
let secret = rand::thread_rng().gen_range(1,101);
match myguess.cmp(&secret) {
Ordering::Less => println!("Too small!!"),
Ordering::Greater => println!("Too BIG!!"),
Ordering::Equal =>
|
}
println!("The secret number is: {}", secret);
}
}
|
{
println!("You Win !!!");
break;
}
|
conditional_block
|
main.rs
|
extern crate rand;
use std::io;
use std::cmp::Ordering;
use rand::Rng;
fn main() {
loop {
println!("\n");
println!("Hurry! guess a number between 1 and 100 quick!!");
println!("Now quickly enter what you have guessed!");
let mut myguess = String::new();
io::stdin()
.read_line(&mut myguess)
.ok()
.expect("can not read!! I am blind again!!");
let myguess: u32 = match myguess
.trim()
.parse(){
Ok(num) => num,
Err(_) => continue,
};
println!("You guessed: {}", myguess);
|
match myguess.cmp(&secret) {
Ordering::Less => println!("Too small!!"),
Ordering::Greater => println!("Too BIG!!"),
Ordering::Equal => {
println!("You Win!!!");
break;
}
}
println!("The secret number is: {}", secret);
}
}
|
let secret = rand::thread_rng().gen_range(1,101);
|
random_line_split
|
main.rs
|
extern crate rand;
use std::io;
use std::cmp::Ordering;
use rand::Rng;
fn
|
() {
loop {
println!("\n");
println!("Hurry! guess a number between 1 and 100 quick!!");
println!("Now quickly enter what you have guessed!");
let mut myguess = String::new();
io::stdin()
.read_line(&mut myguess)
.ok()
.expect("can not read!! I am blind again!!");
let myguess: u32 = match myguess
.trim()
.parse(){
Ok(num) => num,
Err(_) => continue,
};
println!("You guessed: {}", myguess);
let secret = rand::thread_rng().gen_range(1,101);
match myguess.cmp(&secret) {
Ordering::Less => println!("Too small!!"),
Ordering::Greater => println!("Too BIG!!"),
Ordering::Equal => {
println!("You Win!!!");
break;
}
}
println!("The secret number is: {}", secret);
}
}
|
main
|
identifier_name
|
arm_linux_androideabi.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use target::Target;
pub fn
|
() -> Target {
let mut base = super::linux_base::opts();
base.features = "+v7".to_string();
// Many of the symbols defined in compiler-rt are also defined in libgcc. Android
// linker doesn't like that by default.
base.pre_link_args.push("-Wl,--allow-multiple-definition".to_string());
// FIXME #17437 (and #17448): Android doesn't support position dependent executables anymore.
base.position_independent_executables = false;
Target {
data_layout: "e-p:32:32:32\
-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
-f32:32:32-f64:64:64\
-v64:64:64-v128:64:128\
-a:0:64-n32".to_string(),
llvm_target: "arm-linux-androideabi".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "32".to_string(),
arch: "arm".to_string(),
target_os: "android".to_string(),
options: base,
}
}
|
target
|
identifier_name
|
arm_linux_androideabi.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use target::Target;
pub fn target() -> Target {
let mut base = super::linux_base::opts();
base.features = "+v7".to_string();
// Many of the symbols defined in compiler-rt are also defined in libgcc. Android
// linker doesn't like that by default.
|
Target {
data_layout: "e-p:32:32:32\
-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
-f32:32:32-f64:64:64\
-v64:64:64-v128:64:128\
-a:0:64-n32".to_string(),
llvm_target: "arm-linux-androideabi".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "32".to_string(),
arch: "arm".to_string(),
target_os: "android".to_string(),
options: base,
}
}
|
base.pre_link_args.push("-Wl,--allow-multiple-definition".to_string());
// FIXME #17437 (and #17448): Android doesn't support position dependent executables anymore.
base.position_independent_executables = false;
|
random_line_split
|
arm_linux_androideabi.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use target::Target;
pub fn target() -> Target
|
options: base,
}
}
|
{
let mut base = super::linux_base::opts();
base.features = "+v7".to_string();
// Many of the symbols defined in compiler-rt are also defined in libgcc. Android
// linker doesn't like that by default.
base.pre_link_args.push("-Wl,--allow-multiple-definition".to_string());
// FIXME #17437 (and #17448): Android doesn't support position dependent executables anymore.
base.position_independent_executables = false;
Target {
data_layout: "e-p:32:32:32\
-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64\
-f32:32:32-f64:64:64\
-v64:64:64-v128:64:128\
-a:0:64-n32".to_string(),
llvm_target: "arm-linux-androideabi".to_string(),
target_endian: "little".to_string(),
target_pointer_width: "32".to_string(),
arch: "arm".to_string(),
target_os: "android".to_string(),
|
identifier_body
|
classes-simple.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct cat {
priv meows : uint,
how_hungry : int,
}
|
cat {
meows: in_x,
how_hungry: in_y
}
}
pub fn main() {
let mut nyan : cat = cat(52u, 99);
let mut kitty = cat(1000u, 2);
assert!((nyan.how_hungry == 99));
assert!((kitty.how_hungry == 2));
}
|
fn cat(in_x : uint, in_y : int) -> cat {
|
random_line_split
|
classes-simple.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct cat {
priv meows : uint,
how_hungry : int,
}
fn cat(in_x : uint, in_y : int) -> cat
|
pub fn main() {
let mut nyan : cat = cat(52u, 99);
let mut kitty = cat(1000u, 2);
assert!((nyan.how_hungry == 99));
assert!((kitty.how_hungry == 2));
}
|
{
cat {
meows: in_x,
how_hungry: in_y
}
}
|
identifier_body
|
classes-simple.rs
|
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
struct cat {
priv meows : uint,
how_hungry : int,
}
fn cat(in_x : uint, in_y : int) -> cat {
cat {
meows: in_x,
how_hungry: in_y
}
}
pub fn
|
() {
let mut nyan : cat = cat(52u, 99);
let mut kitty = cat(1000u, 2);
assert!((nyan.how_hungry == 99));
assert!((kitty.how_hungry == 2));
}
|
main
|
identifier_name
|
main.rs
|
use mylib_threadpool::ThreadPool;
use std::fs;
use std::io::prelude::*;
use std::net::TcpListener;
use std::net::TcpStream;
use std::thread;
use std::time::Duration;
fn main() {
let listener = TcpListener::bind("127.0.0.1:7878").unwrap();
let pool = ThreadPool::new(4);
for stream in listener.incoming().take(2) {
let stream = stream.unwrap();
pool.execute(|| {
handle_connection(stream);
});
}
println!("Shutting down.");
}
fn
|
(mut stream: TcpStream) {
let mut buffer = [0; 1024];
stream.read(&mut buffer).unwrap();
let get = b"GET / HTTP/1.1\r\n";
let sleep = b"GET /sleep HTTP/1.1\r\n";
let (status_line, filename) = if buffer.starts_with(get) {
("HTTP/1.1 200 OK\r\n\r\n", "hello.html")
} else if buffer.starts_with(sleep) {
thread::sleep(Duration::from_secs(5));
("HTTP/1.1 200 OK\r\n\r\n", "hello.html")
} else {
("HTTP/1.1 404 NOT FOUND\r\n\r\n", "404.html")
};
let contents = fs::read_to_string(filename).unwrap();
let response = format!("{}{}", status_line, contents);
stream.write(response.as_bytes()).unwrap();
stream.flush().unwrap();
}
|
handle_connection
|
identifier_name
|
main.rs
|
use mylib_threadpool::ThreadPool;
use std::fs;
use std::io::prelude::*;
use std::net::TcpListener;
use std::net::TcpStream;
use std::thread;
use std::time::Duration;
fn main()
|
fn handle_connection(mut stream: TcpStream) {
let mut buffer = [0; 1024];
stream.read(&mut buffer).unwrap();
let get = b"GET / HTTP/1.1\r\n";
let sleep = b"GET /sleep HTTP/1.1\r\n";
let (status_line, filename) = if buffer.starts_with(get) {
("HTTP/1.1 200 OK\r\n\r\n", "hello.html")
} else if buffer.starts_with(sleep) {
thread::sleep(Duration::from_secs(5));
("HTTP/1.1 200 OK\r\n\r\n", "hello.html")
} else {
("HTTP/1.1 404 NOT FOUND\r\n\r\n", "404.html")
};
let contents = fs::read_to_string(filename).unwrap();
let response = format!("{}{}", status_line, contents);
stream.write(response.as_bytes()).unwrap();
stream.flush().unwrap();
}
|
{
let listener = TcpListener::bind("127.0.0.1:7878").unwrap();
let pool = ThreadPool::new(4);
for stream in listener.incoming().take(2) {
let stream = stream.unwrap();
pool.execute(|| {
handle_connection(stream);
});
}
println!("Shutting down.");
}
|
identifier_body
|
main.rs
|
use mylib_threadpool::ThreadPool;
use std::fs;
use std::io::prelude::*;
use std::net::TcpListener;
use std::net::TcpStream;
use std::thread;
use std::time::Duration;
fn main() {
let listener = TcpListener::bind("127.0.0.1:7878").unwrap();
let pool = ThreadPool::new(4);
for stream in listener.incoming().take(2) {
let stream = stream.unwrap();
pool.execute(|| {
handle_connection(stream);
});
}
println!("Shutting down.");
}
fn handle_connection(mut stream: TcpStream) {
let mut buffer = [0; 1024];
stream.read(&mut buffer).unwrap();
let get = b"GET / HTTP/1.1\r\n";
let sleep = b"GET /sleep HTTP/1.1\r\n";
let (status_line, filename) = if buffer.starts_with(get) {
("HTTP/1.1 200 OK\r\n\r\n", "hello.html")
} else if buffer.starts_with(sleep) {
thread::sleep(Duration::from_secs(5));
|
};
let contents = fs::read_to_string(filename).unwrap();
let response = format!("{}{}", status_line, contents);
stream.write(response.as_bytes()).unwrap();
stream.flush().unwrap();
}
|
("HTTP/1.1 200 OK\r\n\r\n", "hello.html")
} else {
("HTTP/1.1 404 NOT FOUND\r\n\r\n", "404.html")
|
random_line_split
|
main.rs
|
use mylib_threadpool::ThreadPool;
use std::fs;
use std::io::prelude::*;
use std::net::TcpListener;
use std::net::TcpStream;
use std::thread;
use std::time::Duration;
fn main() {
let listener = TcpListener::bind("127.0.0.1:7878").unwrap();
let pool = ThreadPool::new(4);
for stream in listener.incoming().take(2) {
let stream = stream.unwrap();
pool.execute(|| {
handle_connection(stream);
});
}
println!("Shutting down.");
}
fn handle_connection(mut stream: TcpStream) {
let mut buffer = [0; 1024];
stream.read(&mut buffer).unwrap();
let get = b"GET / HTTP/1.1\r\n";
let sleep = b"GET /sleep HTTP/1.1\r\n";
let (status_line, filename) = if buffer.starts_with(get) {
("HTTP/1.1 200 OK\r\n\r\n", "hello.html")
} else if buffer.starts_with(sleep) {
thread::sleep(Duration::from_secs(5));
("HTTP/1.1 200 OK\r\n\r\n", "hello.html")
} else
|
;
let contents = fs::read_to_string(filename).unwrap();
let response = format!("{}{}", status_line, contents);
stream.write(response.as_bytes()).unwrap();
stream.flush().unwrap();
}
|
{
("HTTP/1.1 404 NOT FOUND\r\n\r\n", "404.html")
}
|
conditional_block
|
source.rs
|
use std::fmt::{self, Debug, Formatter};
use url::Url;
use core::source::{Source, SourceId};
use core::GitReference;
use core::{Dependency, Package, PackageId, Summary};
use util::Config;
use util::errors::CargoResult;
use util::hex::short_hash;
use sources::PathSource;
use sources::git::utils::{GitRemote, GitRevision};
pub struct GitSource<'cfg> {
remote: GitRemote,
reference: GitReference,
source_id: SourceId,
path_source: Option<PathSource<'cfg>>,
rev: Option<GitRevision>,
ident: String,
config: &'cfg Config,
}
impl<'cfg> GitSource<'cfg> {
pub fn new(source_id: &SourceId, config: &'cfg Config) -> CargoResult<GitSource<'cfg>> {
assert!(source_id.is_git(), "id is not git, id={}", source_id);
let remote = GitRemote::new(source_id.url());
let ident = ident(source_id.url())?;
let reference = match source_id.precise() {
Some(s) => GitReference::Rev(s.to_string()),
None => source_id.git_reference().unwrap().clone(),
};
let source = GitSource {
remote,
reference,
source_id: source_id.clone(),
path_source: None,
rev: None,
ident,
config,
};
Ok(source)
}
pub fn url(&self) -> &Url {
self.remote.url()
}
pub fn read_packages(&mut self) -> CargoResult<Vec<Package>> {
if self.path_source.is_none() {
self.update()?;
}
self.path_source.as_mut().unwrap().read_packages()
}
}
fn ident(url: &Url) -> CargoResult<String> {
let url = canonicalize_url(url)?;
let ident = url.path_segments()
.and_then(|mut s| s.next_back())
.unwrap_or("");
let ident = if ident == "" { "_empty" } else { ident };
Ok(format!("{}-{}", ident, short_hash(&url)))
}
// Some hacks and heuristics for making equivalent URLs hash the same
pub fn canonicalize_url(url: &Url) -> CargoResult<Url> {
let mut url = url.clone();
// cannot-be-a-base-urls are not supported
// eg. github.com:rust-lang-nursery/rustfmt.git
if url.cannot_be_a_base() {
bail!(
"invalid url `{}`: cannot-be-a-base-URLs are not supported",
url
)
}
// Strip a trailing slash
if url.path().ends_with('/') {
url.path_segments_mut().unwrap().pop_if_empty();
}
// HACKHACK: For GitHub URL's specifically just lowercase
// everything. GitHub treats both the same, but they hash
// differently, and we're gonna be hashing them. This wants a more
// general solution, and also we're almost certainly not using the
// same case conversion rules that GitHub does. (#84)
if url.host_str() == Some("github.com") {
url.set_scheme("https").unwrap();
let path = url.path().to_lowercase();
url.set_path(&path);
}
// Repos generally can be accessed with or w/o '.git'
let needs_chopping = url.path().ends_with(".git");
if needs_chopping {
let last = {
let last = url.path_segments().unwrap().next_back().unwrap();
last[..last.len() - 4].to_owned()
};
url.path_segments_mut().unwrap().pop().push(&last);
}
Ok(url)
}
impl<'cfg> Debug for GitSource<'cfg> {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "git repo at {}", self.remote.url())?;
match self.reference.pretty_ref() {
Some(s) => write!(f, " ({})", s),
None => Ok(()),
}
}
}
impl<'cfg> Source for GitSource<'cfg> {
fn query(&mut self, dep: &Dependency, f: &mut FnMut(Summary)) -> CargoResult<()> {
let src = self.path_source
.as_mut()
.expect("BUG: update() must be called before query()");
src.query(dep, f)
}
fn supports_checksums(&self) -> bool {
false
}
fn requires_precise(&self) -> bool {
true
}
fn source_id(&self) -> &SourceId {
&self.source_id
}
fn update(&mut self) -> CargoResult<()> {
let lock =
self.config
.git_path()
.open_rw(".cargo-lock-git", self.config, "the git checkouts")?;
let db_path = lock.parent().join("db").join(&self.ident);
if self.config.cli_unstable().offline &&!db_path.exists() {
bail!(
"can't checkout from '{}': you are in the offline mode (-Z offline)",
self.remote.url()
);
}
// Resolve our reference to an actual revision, and check if the
// database already has that revision. If it does, we just load a
// database pinned at that revision, and if we don't we issue an update
// to try to find the revision.
let actual_rev = self.remote.rev_for(&db_path, &self.reference);
let should_update = actual_rev.is_err() || self.source_id.precise().is_none();
let (db, actual_rev) = if should_update &&!self.config.cli_unstable().offline
|
else {
(self.remote.db_at(&db_path)?, actual_rev.unwrap())
};
// Don’t use the full hash,
// to contribute less to reaching the path length limit on Windows:
// https://github.com/servo/servo/pull/14397
let short_id = db.to_short_id(actual_rev.clone()).unwrap();
let checkout_path = lock.parent()
.join("checkouts")
.join(&self.ident)
.join(short_id.as_str());
// Copy the database to the checkout location. After this we could drop
// the lock on the database as we no longer needed it, but we leave it
// in scope so the destructors here won't tamper with too much.
// Checkout is immutable, so we don't need to protect it with a lock once
// it is created.
db.copy_to(actual_rev.clone(), &checkout_path, self.config)?;
let source_id = self.source_id.with_precise(Some(actual_rev.to_string()));
let path_source = PathSource::new_recursive(&checkout_path, &source_id, self.config);
self.path_source = Some(path_source);
self.rev = Some(actual_rev);
self.path_source.as_mut().unwrap().update()
}
fn download(&mut self, id: &PackageId) -> CargoResult<Package> {
trace!(
"getting packages for package id `{}` from `{:?}`",
id,
self.remote
);
self.path_source
.as_mut()
.expect("BUG: update() must be called before get()")
.download(id)
}
fn fingerprint(&self, _pkg: &Package) -> CargoResult<String> {
Ok(self.rev.as_ref().unwrap().to_string())
}
}
#[cfg(test)]
mod test {
use url::Url;
use super::ident;
use util::ToUrl;
#[test]
pub fn test_url_to_path_ident_with_path() {
let ident = ident(&url("https://github.com/carlhuda/cargo")).unwrap();
assert!(ident.starts_with("cargo-"));
}
#[test]
pub fn test_url_to_path_ident_without_path() {
let ident = ident(&url("https://github.com")).unwrap();
assert!(ident.starts_with("_empty-"));
}
#[test]
fn test_canonicalize_idents_by_stripping_trailing_url_slash() {
let ident1 = ident(&url("https://github.com/PistonDevelopers/piston/")).unwrap();
let ident2 = ident(&url("https://github.com/PistonDevelopers/piston")).unwrap();
assert_eq!(ident1, ident2);
}
#[test]
fn test_canonicalize_idents_by_lowercasing_github_urls() {
let ident1 = ident(&url("https://github.com/PistonDevelopers/piston")).unwrap();
let ident2 = ident(&url("https://github.com/pistondevelopers/piston")).unwrap();
assert_eq!(ident1, ident2);
}
#[test]
fn test_canonicalize_idents_by_stripping_dot_git() {
let ident1 = ident(&url("https://github.com/PistonDevelopers/piston")).unwrap();
let ident2 = ident(&url("https://github.com/PistonDevelopers/piston.git")).unwrap();
assert_eq!(ident1, ident2);
}
#[test]
fn test_canonicalize_idents_different_protocols() {
let ident1 = ident(&url("https://github.com/PistonDevelopers/piston")).unwrap();
let ident2 = ident(&url("git://github.com/PistonDevelopers/piston")).unwrap();
assert_eq!(ident1, ident2);
}
#[test]
fn test_canonicalize_cannot_be_a_base_urls() {
assert!(ident(&url("github.com:PistonDevelopers/piston")).is_err());
assert!(ident(&url("google.com:PistonDevelopers/piston")).is_err());
}
fn url(s: &str) -> Url {
s.to_url().unwrap()
}
}
|
{
self.config.shell().status(
"Updating",
format!("git repository `{}`", self.remote.url()),
)?;
trace!("updating git source `{:?}`", self.remote);
self.remote
.checkout(&db_path, &self.reference, self.config)?
}
|
conditional_block
|
source.rs
|
use std::fmt::{self, Debug, Formatter};
use url::Url;
use core::source::{Source, SourceId};
use core::GitReference;
use core::{Dependency, Package, PackageId, Summary};
use util::Config;
use util::errors::CargoResult;
use util::hex::short_hash;
use sources::PathSource;
use sources::git::utils::{GitRemote, GitRevision};
pub struct GitSource<'cfg> {
remote: GitRemote,
reference: GitReference,
source_id: SourceId,
path_source: Option<PathSource<'cfg>>,
rev: Option<GitRevision>,
ident: String,
config: &'cfg Config,
}
impl<'cfg> GitSource<'cfg> {
pub fn new(source_id: &SourceId, config: &'cfg Config) -> CargoResult<GitSource<'cfg>> {
assert!(source_id.is_git(), "id is not git, id={}", source_id);
let remote = GitRemote::new(source_id.url());
let ident = ident(source_id.url())?;
let reference = match source_id.precise() {
Some(s) => GitReference::Rev(s.to_string()),
None => source_id.git_reference().unwrap().clone(),
};
let source = GitSource {
remote,
reference,
source_id: source_id.clone(),
path_source: None,
rev: None,
ident,
config,
};
Ok(source)
}
pub fn url(&self) -> &Url {
self.remote.url()
}
pub fn read_packages(&mut self) -> CargoResult<Vec<Package>> {
if self.path_source.is_none() {
self.update()?;
}
self.path_source.as_mut().unwrap().read_packages()
}
}
fn ident(url: &Url) -> CargoResult<String> {
let url = canonicalize_url(url)?;
let ident = url.path_segments()
.and_then(|mut s| s.next_back())
.unwrap_or("");
let ident = if ident == "" { "_empty" } else { ident };
Ok(format!("{}-{}", ident, short_hash(&url)))
}
// Some hacks and heuristics for making equivalent URLs hash the same
pub fn canonicalize_url(url: &Url) -> CargoResult<Url> {
let mut url = url.clone();
// cannot-be-a-base-urls are not supported
// eg. github.com:rust-lang-nursery/rustfmt.git
if url.cannot_be_a_base() {
bail!(
"invalid url `{}`: cannot-be-a-base-URLs are not supported",
url
)
}
// Strip a trailing slash
if url.path().ends_with('/') {
url.path_segments_mut().unwrap().pop_if_empty();
}
// HACKHACK: For GitHub URL's specifically just lowercase
// everything. GitHub treats both the same, but they hash
// differently, and we're gonna be hashing them. This wants a more
// general solution, and also we're almost certainly not using the
// same case conversion rules that GitHub does. (#84)
if url.host_str() == Some("github.com") {
|
url.set_path(&path);
}
// Repos generally can be accessed with or w/o '.git'
let needs_chopping = url.path().ends_with(".git");
if needs_chopping {
let last = {
let last = url.path_segments().unwrap().next_back().unwrap();
last[..last.len() - 4].to_owned()
};
url.path_segments_mut().unwrap().pop().push(&last);
}
Ok(url)
}
impl<'cfg> Debug for GitSource<'cfg> {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "git repo at {}", self.remote.url())?;
match self.reference.pretty_ref() {
Some(s) => write!(f, " ({})", s),
None => Ok(()),
}
}
}
impl<'cfg> Source for GitSource<'cfg> {
fn query(&mut self, dep: &Dependency, f: &mut FnMut(Summary)) -> CargoResult<()> {
let src = self.path_source
.as_mut()
.expect("BUG: update() must be called before query()");
src.query(dep, f)
}
fn supports_checksums(&self) -> bool {
false
}
fn requires_precise(&self) -> bool {
true
}
fn source_id(&self) -> &SourceId {
&self.source_id
}
fn update(&mut self) -> CargoResult<()> {
let lock =
self.config
.git_path()
.open_rw(".cargo-lock-git", self.config, "the git checkouts")?;
let db_path = lock.parent().join("db").join(&self.ident);
if self.config.cli_unstable().offline &&!db_path.exists() {
bail!(
"can't checkout from '{}': you are in the offline mode (-Z offline)",
self.remote.url()
);
}
// Resolve our reference to an actual revision, and check if the
// database already has that revision. If it does, we just load a
// database pinned at that revision, and if we don't we issue an update
// to try to find the revision.
let actual_rev = self.remote.rev_for(&db_path, &self.reference);
let should_update = actual_rev.is_err() || self.source_id.precise().is_none();
let (db, actual_rev) = if should_update &&!self.config.cli_unstable().offline {
self.config.shell().status(
"Updating",
format!("git repository `{}`", self.remote.url()),
)?;
trace!("updating git source `{:?}`", self.remote);
self.remote
.checkout(&db_path, &self.reference, self.config)?
} else {
(self.remote.db_at(&db_path)?, actual_rev.unwrap())
};
// Don’t use the full hash,
// to contribute less to reaching the path length limit on Windows:
// https://github.com/servo/servo/pull/14397
let short_id = db.to_short_id(actual_rev.clone()).unwrap();
let checkout_path = lock.parent()
.join("checkouts")
.join(&self.ident)
.join(short_id.as_str());
// Copy the database to the checkout location. After this we could drop
// the lock on the database as we no longer needed it, but we leave it
// in scope so the destructors here won't tamper with too much.
// Checkout is immutable, so we don't need to protect it with a lock once
// it is created.
db.copy_to(actual_rev.clone(), &checkout_path, self.config)?;
let source_id = self.source_id.with_precise(Some(actual_rev.to_string()));
let path_source = PathSource::new_recursive(&checkout_path, &source_id, self.config);
self.path_source = Some(path_source);
self.rev = Some(actual_rev);
self.path_source.as_mut().unwrap().update()
}
fn download(&mut self, id: &PackageId) -> CargoResult<Package> {
trace!(
"getting packages for package id `{}` from `{:?}`",
id,
self.remote
);
self.path_source
.as_mut()
.expect("BUG: update() must be called before get()")
.download(id)
}
fn fingerprint(&self, _pkg: &Package) -> CargoResult<String> {
Ok(self.rev.as_ref().unwrap().to_string())
}
}
#[cfg(test)]
mod test {
use url::Url;
use super::ident;
use util::ToUrl;
#[test]
pub fn test_url_to_path_ident_with_path() {
let ident = ident(&url("https://github.com/carlhuda/cargo")).unwrap();
assert!(ident.starts_with("cargo-"));
}
#[test]
pub fn test_url_to_path_ident_without_path() {
let ident = ident(&url("https://github.com")).unwrap();
assert!(ident.starts_with("_empty-"));
}
#[test]
fn test_canonicalize_idents_by_stripping_trailing_url_slash() {
let ident1 = ident(&url("https://github.com/PistonDevelopers/piston/")).unwrap();
let ident2 = ident(&url("https://github.com/PistonDevelopers/piston")).unwrap();
assert_eq!(ident1, ident2);
}
#[test]
fn test_canonicalize_idents_by_lowercasing_github_urls() {
let ident1 = ident(&url("https://github.com/PistonDevelopers/piston")).unwrap();
let ident2 = ident(&url("https://github.com/pistondevelopers/piston")).unwrap();
assert_eq!(ident1, ident2);
}
#[test]
fn test_canonicalize_idents_by_stripping_dot_git() {
let ident1 = ident(&url("https://github.com/PistonDevelopers/piston")).unwrap();
let ident2 = ident(&url("https://github.com/PistonDevelopers/piston.git")).unwrap();
assert_eq!(ident1, ident2);
}
#[test]
fn test_canonicalize_idents_different_protocols() {
let ident1 = ident(&url("https://github.com/PistonDevelopers/piston")).unwrap();
let ident2 = ident(&url("git://github.com/PistonDevelopers/piston")).unwrap();
assert_eq!(ident1, ident2);
}
#[test]
fn test_canonicalize_cannot_be_a_base_urls() {
assert!(ident(&url("github.com:PistonDevelopers/piston")).is_err());
assert!(ident(&url("google.com:PistonDevelopers/piston")).is_err());
}
fn url(s: &str) -> Url {
s.to_url().unwrap()
}
}
|
url.set_scheme("https").unwrap();
let path = url.path().to_lowercase();
|
random_line_split
|
source.rs
|
use std::fmt::{self, Debug, Formatter};
use url::Url;
use core::source::{Source, SourceId};
use core::GitReference;
use core::{Dependency, Package, PackageId, Summary};
use util::Config;
use util::errors::CargoResult;
use util::hex::short_hash;
use sources::PathSource;
use sources::git::utils::{GitRemote, GitRevision};
pub struct GitSource<'cfg> {
remote: GitRemote,
reference: GitReference,
source_id: SourceId,
path_source: Option<PathSource<'cfg>>,
rev: Option<GitRevision>,
ident: String,
config: &'cfg Config,
}
impl<'cfg> GitSource<'cfg> {
pub fn new(source_id: &SourceId, config: &'cfg Config) -> CargoResult<GitSource<'cfg>> {
assert!(source_id.is_git(), "id is not git, id={}", source_id);
let remote = GitRemote::new(source_id.url());
let ident = ident(source_id.url())?;
let reference = match source_id.precise() {
Some(s) => GitReference::Rev(s.to_string()),
None => source_id.git_reference().unwrap().clone(),
};
let source = GitSource {
remote,
reference,
source_id: source_id.clone(),
path_source: None,
rev: None,
ident,
config,
};
Ok(source)
}
pub fn url(&self) -> &Url {
self.remote.url()
}
pub fn read_packages(&mut self) -> CargoResult<Vec<Package>> {
if self.path_source.is_none() {
self.update()?;
}
self.path_source.as_mut().unwrap().read_packages()
}
}
fn ident(url: &Url) -> CargoResult<String> {
let url = canonicalize_url(url)?;
let ident = url.path_segments()
.and_then(|mut s| s.next_back())
.unwrap_or("");
let ident = if ident == "" { "_empty" } else { ident };
Ok(format!("{}-{}", ident, short_hash(&url)))
}
// Some hacks and heuristics for making equivalent URLs hash the same
pub fn canonicalize_url(url: &Url) -> CargoResult<Url> {
let mut url = url.clone();
// cannot-be-a-base-urls are not supported
// eg. github.com:rust-lang-nursery/rustfmt.git
if url.cannot_be_a_base() {
bail!(
"invalid url `{}`: cannot-be-a-base-URLs are not supported",
url
)
}
// Strip a trailing slash
if url.path().ends_with('/') {
url.path_segments_mut().unwrap().pop_if_empty();
}
// HACKHACK: For GitHub URL's specifically just lowercase
// everything. GitHub treats both the same, but they hash
// differently, and we're gonna be hashing them. This wants a more
// general solution, and also we're almost certainly not using the
// same case conversion rules that GitHub does. (#84)
if url.host_str() == Some("github.com") {
url.set_scheme("https").unwrap();
let path = url.path().to_lowercase();
url.set_path(&path);
}
// Repos generally can be accessed with or w/o '.git'
let needs_chopping = url.path().ends_with(".git");
if needs_chopping {
let last = {
let last = url.path_segments().unwrap().next_back().unwrap();
last[..last.len() - 4].to_owned()
};
url.path_segments_mut().unwrap().pop().push(&last);
}
Ok(url)
}
impl<'cfg> Debug for GitSource<'cfg> {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "git repo at {}", self.remote.url())?;
match self.reference.pretty_ref() {
Some(s) => write!(f, " ({})", s),
None => Ok(()),
}
}
}
impl<'cfg> Source for GitSource<'cfg> {
fn query(&mut self, dep: &Dependency, f: &mut FnMut(Summary)) -> CargoResult<()> {
let src = self.path_source
.as_mut()
.expect("BUG: update() must be called before query()");
src.query(dep, f)
}
fn supports_checksums(&self) -> bool {
false
}
fn requires_precise(&self) -> bool {
true
}
fn source_id(&self) -> &SourceId {
&self.source_id
}
fn update(&mut self) -> CargoResult<()> {
let lock =
self.config
.git_path()
.open_rw(".cargo-lock-git", self.config, "the git checkouts")?;
let db_path = lock.parent().join("db").join(&self.ident);
if self.config.cli_unstable().offline &&!db_path.exists() {
bail!(
"can't checkout from '{}': you are in the offline mode (-Z offline)",
self.remote.url()
);
}
// Resolve our reference to an actual revision, and check if the
// database already has that revision. If it does, we just load a
// database pinned at that revision, and if we don't we issue an update
// to try to find the revision.
let actual_rev = self.remote.rev_for(&db_path, &self.reference);
let should_update = actual_rev.is_err() || self.source_id.precise().is_none();
let (db, actual_rev) = if should_update &&!self.config.cli_unstable().offline {
self.config.shell().status(
"Updating",
format!("git repository `{}`", self.remote.url()),
)?;
trace!("updating git source `{:?}`", self.remote);
self.remote
.checkout(&db_path, &self.reference, self.config)?
} else {
(self.remote.db_at(&db_path)?, actual_rev.unwrap())
};
// Don’t use the full hash,
// to contribute less to reaching the path length limit on Windows:
// https://github.com/servo/servo/pull/14397
let short_id = db.to_short_id(actual_rev.clone()).unwrap();
let checkout_path = lock.parent()
.join("checkouts")
.join(&self.ident)
.join(short_id.as_str());
// Copy the database to the checkout location. After this we could drop
// the lock on the database as we no longer needed it, but we leave it
// in scope so the destructors here won't tamper with too much.
// Checkout is immutable, so we don't need to protect it with a lock once
// it is created.
db.copy_to(actual_rev.clone(), &checkout_path, self.config)?;
let source_id = self.source_id.with_precise(Some(actual_rev.to_string()));
let path_source = PathSource::new_recursive(&checkout_path, &source_id, self.config);
self.path_source = Some(path_source);
self.rev = Some(actual_rev);
self.path_source.as_mut().unwrap().update()
}
fn download(&mut self, id: &PackageId) -> CargoResult<Package> {
trace!(
"getting packages for package id `{}` from `{:?}`",
id,
self.remote
);
self.path_source
.as_mut()
.expect("BUG: update() must be called before get()")
.download(id)
}
fn fingerprint(&self, _pkg: &Package) -> CargoResult<String> {
Ok(self.rev.as_ref().unwrap().to_string())
}
}
#[cfg(test)]
mod test {
use url::Url;
use super::ident;
use util::ToUrl;
#[test]
pub fn test_url_to_path_ident_with_path() {
let ident = ident(&url("https://github.com/carlhuda/cargo")).unwrap();
assert!(ident.starts_with("cargo-"));
}
#[test]
pub fn test_url_to_path_ident_without_path() {
let ident = ident(&url("https://github.com")).unwrap();
assert!(ident.starts_with("_empty-"));
}
#[test]
fn test_canonicalize_idents_by_stripping_trailing_url_slash() {
let ident1 = ident(&url("https://github.com/PistonDevelopers/piston/")).unwrap();
let ident2 = ident(&url("https://github.com/PistonDevelopers/piston")).unwrap();
assert_eq!(ident1, ident2);
}
#[test]
fn test_canonicalize_idents_by_lowercasing_github_urls() {
let ident1 = ident(&url("https://github.com/PistonDevelopers/piston")).unwrap();
let ident2 = ident(&url("https://github.com/pistondevelopers/piston")).unwrap();
assert_eq!(ident1, ident2);
}
#[test]
fn test_canonicalize_idents_by_stripping_dot_git() {
let ident1 = ident(&url("https://github.com/PistonDevelopers/piston")).unwrap();
let ident2 = ident(&url("https://github.com/PistonDevelopers/piston.git")).unwrap();
assert_eq!(ident1, ident2);
}
#[test]
fn test_canonicalize_idents_different_protocols() {
let ident1 = ident(&url("https://github.com/PistonDevelopers/piston")).unwrap();
let ident2 = ident(&url("git://github.com/PistonDevelopers/piston")).unwrap();
assert_eq!(ident1, ident2);
}
#[test]
fn test_canonicalize_cannot_be_a_base_urls() {
assert!(ident(&url("github.com:PistonDevelopers/piston")).is_err());
assert!(ident(&url("google.com:PistonDevelopers/piston")).is_err());
}
fn url(s: &str) -> Url {
|
s.to_url().unwrap()
}
}
|
identifier_body
|
|
source.rs
|
use std::fmt::{self, Debug, Formatter};
use url::Url;
use core::source::{Source, SourceId};
use core::GitReference;
use core::{Dependency, Package, PackageId, Summary};
use util::Config;
use util::errors::CargoResult;
use util::hex::short_hash;
use sources::PathSource;
use sources::git::utils::{GitRemote, GitRevision};
pub struct GitSource<'cfg> {
remote: GitRemote,
reference: GitReference,
source_id: SourceId,
path_source: Option<PathSource<'cfg>>,
rev: Option<GitRevision>,
ident: String,
config: &'cfg Config,
}
impl<'cfg> GitSource<'cfg> {
pub fn new(source_id: &SourceId, config: &'cfg Config) -> CargoResult<GitSource<'cfg>> {
assert!(source_id.is_git(), "id is not git, id={}", source_id);
let remote = GitRemote::new(source_id.url());
let ident = ident(source_id.url())?;
let reference = match source_id.precise() {
Some(s) => GitReference::Rev(s.to_string()),
None => source_id.git_reference().unwrap().clone(),
};
let source = GitSource {
remote,
reference,
source_id: source_id.clone(),
path_source: None,
rev: None,
ident,
config,
};
Ok(source)
}
pub fn url(&self) -> &Url {
self.remote.url()
}
pub fn read_packages(&mut self) -> CargoResult<Vec<Package>> {
if self.path_source.is_none() {
self.update()?;
}
self.path_source.as_mut().unwrap().read_packages()
}
}
fn ident(url: &Url) -> CargoResult<String> {
let url = canonicalize_url(url)?;
let ident = url.path_segments()
.and_then(|mut s| s.next_back())
.unwrap_or("");
let ident = if ident == "" { "_empty" } else { ident };
Ok(format!("{}-{}", ident, short_hash(&url)))
}
// Some hacks and heuristics for making equivalent URLs hash the same
pub fn canonicalize_url(url: &Url) -> CargoResult<Url> {
let mut url = url.clone();
// cannot-be-a-base-urls are not supported
// eg. github.com:rust-lang-nursery/rustfmt.git
if url.cannot_be_a_base() {
bail!(
"invalid url `{}`: cannot-be-a-base-URLs are not supported",
url
)
}
// Strip a trailing slash
if url.path().ends_with('/') {
url.path_segments_mut().unwrap().pop_if_empty();
}
// HACKHACK: For GitHub URL's specifically just lowercase
// everything. GitHub treats both the same, but they hash
// differently, and we're gonna be hashing them. This wants a more
// general solution, and also we're almost certainly not using the
// same case conversion rules that GitHub does. (#84)
if url.host_str() == Some("github.com") {
url.set_scheme("https").unwrap();
let path = url.path().to_lowercase();
url.set_path(&path);
}
// Repos generally can be accessed with or w/o '.git'
let needs_chopping = url.path().ends_with(".git");
if needs_chopping {
let last = {
let last = url.path_segments().unwrap().next_back().unwrap();
last[..last.len() - 4].to_owned()
};
url.path_segments_mut().unwrap().pop().push(&last);
}
Ok(url)
}
impl<'cfg> Debug for GitSource<'cfg> {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
write!(f, "git repo at {}", self.remote.url())?;
match self.reference.pretty_ref() {
Some(s) => write!(f, " ({})", s),
None => Ok(()),
}
}
}
impl<'cfg> Source for GitSource<'cfg> {
fn query(&mut self, dep: &Dependency, f: &mut FnMut(Summary)) -> CargoResult<()> {
let src = self.path_source
.as_mut()
.expect("BUG: update() must be called before query()");
src.query(dep, f)
}
fn supports_checksums(&self) -> bool {
false
}
fn requires_precise(&self) -> bool {
true
}
fn
|
(&self) -> &SourceId {
&self.source_id
}
fn update(&mut self) -> CargoResult<()> {
let lock =
self.config
.git_path()
.open_rw(".cargo-lock-git", self.config, "the git checkouts")?;
let db_path = lock.parent().join("db").join(&self.ident);
if self.config.cli_unstable().offline &&!db_path.exists() {
bail!(
"can't checkout from '{}': you are in the offline mode (-Z offline)",
self.remote.url()
);
}
// Resolve our reference to an actual revision, and check if the
// database already has that revision. If it does, we just load a
// database pinned at that revision, and if we don't we issue an update
// to try to find the revision.
let actual_rev = self.remote.rev_for(&db_path, &self.reference);
let should_update = actual_rev.is_err() || self.source_id.precise().is_none();
let (db, actual_rev) = if should_update &&!self.config.cli_unstable().offline {
self.config.shell().status(
"Updating",
format!("git repository `{}`", self.remote.url()),
)?;
trace!("updating git source `{:?}`", self.remote);
self.remote
.checkout(&db_path, &self.reference, self.config)?
} else {
(self.remote.db_at(&db_path)?, actual_rev.unwrap())
};
// Don’t use the full hash,
// to contribute less to reaching the path length limit on Windows:
// https://github.com/servo/servo/pull/14397
let short_id = db.to_short_id(actual_rev.clone()).unwrap();
let checkout_path = lock.parent()
.join("checkouts")
.join(&self.ident)
.join(short_id.as_str());
// Copy the database to the checkout location. After this we could drop
// the lock on the database as we no longer needed it, but we leave it
// in scope so the destructors here won't tamper with too much.
// Checkout is immutable, so we don't need to protect it with a lock once
// it is created.
db.copy_to(actual_rev.clone(), &checkout_path, self.config)?;
let source_id = self.source_id.with_precise(Some(actual_rev.to_string()));
let path_source = PathSource::new_recursive(&checkout_path, &source_id, self.config);
self.path_source = Some(path_source);
self.rev = Some(actual_rev);
self.path_source.as_mut().unwrap().update()
}
fn download(&mut self, id: &PackageId) -> CargoResult<Package> {
trace!(
"getting packages for package id `{}` from `{:?}`",
id,
self.remote
);
self.path_source
.as_mut()
.expect("BUG: update() must be called before get()")
.download(id)
}
fn fingerprint(&self, _pkg: &Package) -> CargoResult<String> {
Ok(self.rev.as_ref().unwrap().to_string())
}
}
#[cfg(test)]
mod test {
use url::Url;
use super::ident;
use util::ToUrl;
#[test]
pub fn test_url_to_path_ident_with_path() {
let ident = ident(&url("https://github.com/carlhuda/cargo")).unwrap();
assert!(ident.starts_with("cargo-"));
}
#[test]
pub fn test_url_to_path_ident_without_path() {
let ident = ident(&url("https://github.com")).unwrap();
assert!(ident.starts_with("_empty-"));
}
#[test]
fn test_canonicalize_idents_by_stripping_trailing_url_slash() {
let ident1 = ident(&url("https://github.com/PistonDevelopers/piston/")).unwrap();
let ident2 = ident(&url("https://github.com/PistonDevelopers/piston")).unwrap();
assert_eq!(ident1, ident2);
}
#[test]
fn test_canonicalize_idents_by_lowercasing_github_urls() {
let ident1 = ident(&url("https://github.com/PistonDevelopers/piston")).unwrap();
let ident2 = ident(&url("https://github.com/pistondevelopers/piston")).unwrap();
assert_eq!(ident1, ident2);
}
#[test]
fn test_canonicalize_idents_by_stripping_dot_git() {
let ident1 = ident(&url("https://github.com/PistonDevelopers/piston")).unwrap();
let ident2 = ident(&url("https://github.com/PistonDevelopers/piston.git")).unwrap();
assert_eq!(ident1, ident2);
}
#[test]
fn test_canonicalize_idents_different_protocols() {
let ident1 = ident(&url("https://github.com/PistonDevelopers/piston")).unwrap();
let ident2 = ident(&url("git://github.com/PistonDevelopers/piston")).unwrap();
assert_eq!(ident1, ident2);
}
#[test]
fn test_canonicalize_cannot_be_a_base_urls() {
assert!(ident(&url("github.com:PistonDevelopers/piston")).is_err());
assert!(ident(&url("google.com:PistonDevelopers/piston")).is_err());
}
fn url(s: &str) -> Url {
s.to_url().unwrap()
}
}
|
source_id
|
identifier_name
|
global.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Abstractions for global scopes.
//!
//! This module contains smart pointers to global scopes, to simplify writing
//! code that works in workers as well as window scopes.
use dom::bindings::conversions::native_from_reflector_jsmanaged;
use dom::bindings::js::{JS, JSRef, Rootable, Root, Unrooted};
use dom::bindings::utils::{Reflectable, Reflector};
use dom::workerglobalscope::{WorkerGlobalScope, WorkerGlobalScopeHelpers};
use dom::window::{self, WindowHelpers};
use devtools_traits::DevtoolsControlChan;
use script_task::{ScriptChan, ScriptPort, ScriptMsg, ScriptTask};
use msg::constellation_msg::{PipelineId, WorkerId};
use net_traits::ResourceTask;
use js::{JSCLASS_IS_GLOBAL, JSCLASS_IS_DOMJSCLASS};
use js::glue::{GetGlobalForObjectCrossCompartment};
use js::jsapi::{JSContext, JSObject};
use js::jsapi::{JS_GetClass};
use url::Url;
/// A freely-copyable reference to a rooted global object.
#[derive(Copy, Clone)]
pub enum GlobalRef<'a> {
/// A reference to a `Window` object.
Window(JSRef<'a, window::Window>),
/// A reference to a `WorkerGlobalScope` object.
Worker(JSRef<'a, WorkerGlobalScope>),
}
/// A stack-based rooted reference to a global object.
#[no_move]
pub enum GlobalRoot {
/// A root for a `Window` object.
Window(Root<window::Window>),
/// A root for a `WorkerGlobalScope` object.
Worker(Root<WorkerGlobalScope>),
}
/// A traced reference to a global object, for use in fields of traced Rust
/// structures.
#[jstraceable]
#[must_root]
pub enum GlobalField {
/// A field for a `Window` object.
Window(JS<window::Window>),
/// A field for a `WorkerGlobalScope` object.
Worker(JS<WorkerGlobalScope>),
}
/// An unrooted reference to a global object.
#[must_root]
pub enum GlobalUnrooted {
/// An unrooted reference to a `Window` object.
Window(Unrooted<window::Window>),
/// An unrooted reference to a `WorkerGlobalScope` object.
Worker(Unrooted<WorkerGlobalScope>),
}
impl<'a> GlobalRef<'a> {
/// Get the `JSContext` for the `JSRuntime` associated with the thread
/// this global object is on.
pub fn get_cx(&self) -> *mut JSContext {
match *self {
GlobalRef::Window(ref window) => window.get_cx(),
GlobalRef::Worker(ref worker) => worker.get_cx(),
}
}
/// Extract a `Window`, causing task failure if the global object is not
/// a `Window`.
pub fn as_window<'b>(&'b self) -> JSRef<'b, window::Window> {
match *self {
GlobalRef::Window(window) => window,
GlobalRef::Worker(_) => panic!("expected a Window scope"),
}
}
/// Get the `PipelineId` for this global scope.
pub fn pipeline(&self) -> PipelineId {
match *self {
GlobalRef::Window(window) => window.pipeline(),
GlobalRef::Worker(worker) => worker.pipeline(),
}
}
/// Get `DevtoolsControlChan` to send messages to Devtools
/// task when available.
pub fn devtools_chan(&self) -> Option<DevtoolsControlChan> {
match *self {
GlobalRef::Window(window) => window.devtools_chan(),
GlobalRef::Worker(worker) => worker.devtools_chan(),
}
}
/// Get the `ResourceTask` for this global scope.
pub fn resource_task(&self) -> ResourceTask {
match *self {
GlobalRef::Window(ref window) => window.resource_task().clone(),
GlobalRef::Worker(ref worker) => worker.resource_task().clone(),
}
}
/// Get next worker id.
pub fn get_next_worker_id(&self) -> WorkerId {
match *self {
GlobalRef::Window(ref window) => window.get_next_worker_id(),
GlobalRef::Worker(ref worker) => worker.get_next_worker_id()
}
}
/// Get the URL for this global scope.
pub fn get_url(&self) -> Url {
match *self {
GlobalRef::Window(ref window) => window.get_url(),
GlobalRef::Worker(ref worker) => worker.get_url().clone(),
}
}
/// `ScriptChan` used to send messages to the event loop of this global's
/// thread.
pub fn script_chan(&self) -> Box<ScriptChan+Send> {
match *self {
GlobalRef::Window(ref window) => window.script_chan(),
GlobalRef::Worker(ref worker) => worker.script_chan(),
}
}
/// Create a new sender/receiver pair that can be used to implement an on-demand
/// event loop. Used for implementing web APIs that require blocking semantics
/// without resorting to nested event loops.
pub fn new_script_pair(&self) -> (Box<ScriptChan+Send>, Box<ScriptPort+Send>) {
match *self {
GlobalRef::Window(ref window) => window.new_script_pair(),
GlobalRef::Worker(ref worker) => worker.new_script_pair(),
}
}
/// Process a single event as if it were the next event in the task queue for
/// this global.
pub fn process_event(&self, msg: ScriptMsg) {
match *self {
GlobalRef::Window(_) => ScriptTask::process_event(msg),
GlobalRef::Worker(ref worker) => worker.process_event(msg),
}
}
}
impl<'a> Reflectable for GlobalRef<'a> {
fn reflector<'b>(&'b self) -> &'b Reflector {
match *self {
GlobalRef::Window(ref window) => window.reflector(),
GlobalRef::Worker(ref worker) => worker.reflector(),
}
}
}
impl GlobalRoot {
/// Obtain a safe reference to the global object that cannot outlive the
/// lifetime of this root.
pub fn r<'c>(&'c self) -> GlobalRef<'c> {
match *self {
GlobalRoot::Window(ref window) => GlobalRef::Window(window.r()),
GlobalRoot::Worker(ref worker) => GlobalRef::Worker(worker.r()),
}
}
}
impl GlobalField {
/// Create a new `GlobalField` from a rooted reference.
pub fn from_rooted(global: &GlobalRef) -> GlobalField {
match *global {
GlobalRef::Window(window) => GlobalField::Window(JS::from_rooted(window)),
GlobalRef::Worker(worker) => GlobalField::Worker(JS::from_rooted(worker)),
}
}
/// Create a stack-bounded root for this reference.
pub fn root(&self) -> GlobalRoot {
match *self {
GlobalField::Window(ref window) => GlobalRoot::Window(window.root()),
GlobalField::Worker(ref worker) => GlobalRoot::Worker(worker.root()),
}
}
}
impl GlobalUnrooted {
/// Create a stack-bounded root for this reference.
pub fn root(&self) -> GlobalRoot {
match *self {
GlobalUnrooted::Window(ref window) => GlobalRoot::Window(window.root()),
GlobalUnrooted::Worker(ref worker) => GlobalRoot::Worker(worker.root()),
}
}
}
/// Returns the global object of the realm that the given JS object was created in.
#[allow(unrooted_must_root)]
pub fn global_object_for_js_object(obj: *mut JSObject) -> GlobalUnrooted {
|
let global = GetGlobalForObjectCrossCompartment(obj);
let clasp = JS_GetClass(global);
assert!(((*clasp).flags & (JSCLASS_IS_DOMJSCLASS | JSCLASS_IS_GLOBAL))!= 0);
match native_from_reflector_jsmanaged(global) {
Ok(window) => return GlobalUnrooted::Window(window),
Err(_) => (),
}
match native_from_reflector_jsmanaged(global) {
Ok(worker) => return GlobalUnrooted::Worker(worker),
Err(_) => (),
}
panic!("found DOM global that doesn't unwrap to Window or WorkerGlobalScope")
}
}
|
unsafe {
|
random_line_split
|
global.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Abstractions for global scopes.
//!
//! This module contains smart pointers to global scopes, to simplify writing
//! code that works in workers as well as window scopes.
use dom::bindings::conversions::native_from_reflector_jsmanaged;
use dom::bindings::js::{JS, JSRef, Rootable, Root, Unrooted};
use dom::bindings::utils::{Reflectable, Reflector};
use dom::workerglobalscope::{WorkerGlobalScope, WorkerGlobalScopeHelpers};
use dom::window::{self, WindowHelpers};
use devtools_traits::DevtoolsControlChan;
use script_task::{ScriptChan, ScriptPort, ScriptMsg, ScriptTask};
use msg::constellation_msg::{PipelineId, WorkerId};
use net_traits::ResourceTask;
use js::{JSCLASS_IS_GLOBAL, JSCLASS_IS_DOMJSCLASS};
use js::glue::{GetGlobalForObjectCrossCompartment};
use js::jsapi::{JSContext, JSObject};
use js::jsapi::{JS_GetClass};
use url::Url;
/// A freely-copyable reference to a rooted global object.
#[derive(Copy, Clone)]
pub enum GlobalRef<'a> {
/// A reference to a `Window` object.
Window(JSRef<'a, window::Window>),
/// A reference to a `WorkerGlobalScope` object.
Worker(JSRef<'a, WorkerGlobalScope>),
}
/// A stack-based rooted reference to a global object.
#[no_move]
pub enum GlobalRoot {
/// A root for a `Window` object.
Window(Root<window::Window>),
/// A root for a `WorkerGlobalScope` object.
Worker(Root<WorkerGlobalScope>),
}
/// A traced reference to a global object, for use in fields of traced Rust
/// structures.
#[jstraceable]
#[must_root]
pub enum GlobalField {
/// A field for a `Window` object.
Window(JS<window::Window>),
/// A field for a `WorkerGlobalScope` object.
Worker(JS<WorkerGlobalScope>),
}
/// An unrooted reference to a global object.
#[must_root]
pub enum GlobalUnrooted {
/// An unrooted reference to a `Window` object.
Window(Unrooted<window::Window>),
/// An unrooted reference to a `WorkerGlobalScope` object.
Worker(Unrooted<WorkerGlobalScope>),
}
impl<'a> GlobalRef<'a> {
/// Get the `JSContext` for the `JSRuntime` associated with the thread
/// this global object is on.
pub fn get_cx(&self) -> *mut JSContext {
match *self {
GlobalRef::Window(ref window) => window.get_cx(),
GlobalRef::Worker(ref worker) => worker.get_cx(),
}
}
/// Extract a `Window`, causing task failure if the global object is not
/// a `Window`.
pub fn as_window<'b>(&'b self) -> JSRef<'b, window::Window> {
match *self {
GlobalRef::Window(window) => window,
GlobalRef::Worker(_) => panic!("expected a Window scope"),
}
}
/// Get the `PipelineId` for this global scope.
pub fn pipeline(&self) -> PipelineId {
match *self {
GlobalRef::Window(window) => window.pipeline(),
GlobalRef::Worker(worker) => worker.pipeline(),
}
}
/// Get `DevtoolsControlChan` to send messages to Devtools
/// task when available.
pub fn devtools_chan(&self) -> Option<DevtoolsControlChan> {
match *self {
GlobalRef::Window(window) => window.devtools_chan(),
GlobalRef::Worker(worker) => worker.devtools_chan(),
}
}
/// Get the `ResourceTask` for this global scope.
pub fn resource_task(&self) -> ResourceTask
|
/// Get next worker id.
pub fn get_next_worker_id(&self) -> WorkerId {
match *self {
GlobalRef::Window(ref window) => window.get_next_worker_id(),
GlobalRef::Worker(ref worker) => worker.get_next_worker_id()
}
}
/// Get the URL for this global scope.
pub fn get_url(&self) -> Url {
match *self {
GlobalRef::Window(ref window) => window.get_url(),
GlobalRef::Worker(ref worker) => worker.get_url().clone(),
}
}
/// `ScriptChan` used to send messages to the event loop of this global's
/// thread.
pub fn script_chan(&self) -> Box<ScriptChan+Send> {
match *self {
GlobalRef::Window(ref window) => window.script_chan(),
GlobalRef::Worker(ref worker) => worker.script_chan(),
}
}
/// Create a new sender/receiver pair that can be used to implement an on-demand
/// event loop. Used for implementing web APIs that require blocking semantics
/// without resorting to nested event loops.
pub fn new_script_pair(&self) -> (Box<ScriptChan+Send>, Box<ScriptPort+Send>) {
match *self {
GlobalRef::Window(ref window) => window.new_script_pair(),
GlobalRef::Worker(ref worker) => worker.new_script_pair(),
}
}
/// Process a single event as if it were the next event in the task queue for
/// this global.
pub fn process_event(&self, msg: ScriptMsg) {
match *self {
GlobalRef::Window(_) => ScriptTask::process_event(msg),
GlobalRef::Worker(ref worker) => worker.process_event(msg),
}
}
}
impl<'a> Reflectable for GlobalRef<'a> {
fn reflector<'b>(&'b self) -> &'b Reflector {
match *self {
GlobalRef::Window(ref window) => window.reflector(),
GlobalRef::Worker(ref worker) => worker.reflector(),
}
}
}
impl GlobalRoot {
/// Obtain a safe reference to the global object that cannot outlive the
/// lifetime of this root.
pub fn r<'c>(&'c self) -> GlobalRef<'c> {
match *self {
GlobalRoot::Window(ref window) => GlobalRef::Window(window.r()),
GlobalRoot::Worker(ref worker) => GlobalRef::Worker(worker.r()),
}
}
}
impl GlobalField {
/// Create a new `GlobalField` from a rooted reference.
pub fn from_rooted(global: &GlobalRef) -> GlobalField {
match *global {
GlobalRef::Window(window) => GlobalField::Window(JS::from_rooted(window)),
GlobalRef::Worker(worker) => GlobalField::Worker(JS::from_rooted(worker)),
}
}
/// Create a stack-bounded root for this reference.
pub fn root(&self) -> GlobalRoot {
match *self {
GlobalField::Window(ref window) => GlobalRoot::Window(window.root()),
GlobalField::Worker(ref worker) => GlobalRoot::Worker(worker.root()),
}
}
}
impl GlobalUnrooted {
/// Create a stack-bounded root for this reference.
pub fn root(&self) -> GlobalRoot {
match *self {
GlobalUnrooted::Window(ref window) => GlobalRoot::Window(window.root()),
GlobalUnrooted::Worker(ref worker) => GlobalRoot::Worker(worker.root()),
}
}
}
/// Returns the global object of the realm that the given JS object was created in.
#[allow(unrooted_must_root)]
pub fn global_object_for_js_object(obj: *mut JSObject) -> GlobalUnrooted {
unsafe {
let global = GetGlobalForObjectCrossCompartment(obj);
let clasp = JS_GetClass(global);
assert!(((*clasp).flags & (JSCLASS_IS_DOMJSCLASS | JSCLASS_IS_GLOBAL))!= 0);
match native_from_reflector_jsmanaged(global) {
Ok(window) => return GlobalUnrooted::Window(window),
Err(_) => (),
}
match native_from_reflector_jsmanaged(global) {
Ok(worker) => return GlobalUnrooted::Worker(worker),
Err(_) => (),
}
panic!("found DOM global that doesn't unwrap to Window or WorkerGlobalScope")
}
}
|
{
match *self {
GlobalRef::Window(ref window) => window.resource_task().clone(),
GlobalRef::Worker(ref worker) => worker.resource_task().clone(),
}
}
|
identifier_body
|
global.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Abstractions for global scopes.
//!
//! This module contains smart pointers to global scopes, to simplify writing
//! code that works in workers as well as window scopes.
use dom::bindings::conversions::native_from_reflector_jsmanaged;
use dom::bindings::js::{JS, JSRef, Rootable, Root, Unrooted};
use dom::bindings::utils::{Reflectable, Reflector};
use dom::workerglobalscope::{WorkerGlobalScope, WorkerGlobalScopeHelpers};
use dom::window::{self, WindowHelpers};
use devtools_traits::DevtoolsControlChan;
use script_task::{ScriptChan, ScriptPort, ScriptMsg, ScriptTask};
use msg::constellation_msg::{PipelineId, WorkerId};
use net_traits::ResourceTask;
use js::{JSCLASS_IS_GLOBAL, JSCLASS_IS_DOMJSCLASS};
use js::glue::{GetGlobalForObjectCrossCompartment};
use js::jsapi::{JSContext, JSObject};
use js::jsapi::{JS_GetClass};
use url::Url;
/// A freely-copyable reference to a rooted global object.
#[derive(Copy, Clone)]
pub enum GlobalRef<'a> {
/// A reference to a `Window` object.
Window(JSRef<'a, window::Window>),
/// A reference to a `WorkerGlobalScope` object.
Worker(JSRef<'a, WorkerGlobalScope>),
}
/// A stack-based rooted reference to a global object.
#[no_move]
pub enum GlobalRoot {
/// A root for a `Window` object.
Window(Root<window::Window>),
/// A root for a `WorkerGlobalScope` object.
Worker(Root<WorkerGlobalScope>),
}
/// A traced reference to a global object, for use in fields of traced Rust
/// structures.
#[jstraceable]
#[must_root]
pub enum GlobalField {
/// A field for a `Window` object.
Window(JS<window::Window>),
/// A field for a `WorkerGlobalScope` object.
Worker(JS<WorkerGlobalScope>),
}
/// An unrooted reference to a global object.
#[must_root]
pub enum GlobalUnrooted {
/// An unrooted reference to a `Window` object.
Window(Unrooted<window::Window>),
/// An unrooted reference to a `WorkerGlobalScope` object.
Worker(Unrooted<WorkerGlobalScope>),
}
impl<'a> GlobalRef<'a> {
/// Get the `JSContext` for the `JSRuntime` associated with the thread
/// this global object is on.
pub fn get_cx(&self) -> *mut JSContext {
match *self {
GlobalRef::Window(ref window) => window.get_cx(),
GlobalRef::Worker(ref worker) => worker.get_cx(),
}
}
/// Extract a `Window`, causing task failure if the global object is not
/// a `Window`.
pub fn as_window<'b>(&'b self) -> JSRef<'b, window::Window> {
match *self {
GlobalRef::Window(window) => window,
GlobalRef::Worker(_) => panic!("expected a Window scope"),
}
}
/// Get the `PipelineId` for this global scope.
pub fn pipeline(&self) -> PipelineId {
match *self {
GlobalRef::Window(window) => window.pipeline(),
GlobalRef::Worker(worker) => worker.pipeline(),
}
}
/// Get `DevtoolsControlChan` to send messages to Devtools
/// task when available.
pub fn devtools_chan(&self) -> Option<DevtoolsControlChan> {
match *self {
GlobalRef::Window(window) => window.devtools_chan(),
GlobalRef::Worker(worker) => worker.devtools_chan(),
}
}
/// Get the `ResourceTask` for this global scope.
pub fn resource_task(&self) -> ResourceTask {
match *self {
GlobalRef::Window(ref window) => window.resource_task().clone(),
GlobalRef::Worker(ref worker) => worker.resource_task().clone(),
}
}
/// Get next worker id.
pub fn get_next_worker_id(&self) -> WorkerId {
match *self {
GlobalRef::Window(ref window) => window.get_next_worker_id(),
GlobalRef::Worker(ref worker) => worker.get_next_worker_id()
}
}
/// Get the URL for this global scope.
pub fn get_url(&self) -> Url {
match *self {
GlobalRef::Window(ref window) => window.get_url(),
GlobalRef::Worker(ref worker) => worker.get_url().clone(),
}
}
/// `ScriptChan` used to send messages to the event loop of this global's
/// thread.
pub fn
|
(&self) -> Box<ScriptChan+Send> {
match *self {
GlobalRef::Window(ref window) => window.script_chan(),
GlobalRef::Worker(ref worker) => worker.script_chan(),
}
}
/// Create a new sender/receiver pair that can be used to implement an on-demand
/// event loop. Used for implementing web APIs that require blocking semantics
/// without resorting to nested event loops.
pub fn new_script_pair(&self) -> (Box<ScriptChan+Send>, Box<ScriptPort+Send>) {
match *self {
GlobalRef::Window(ref window) => window.new_script_pair(),
GlobalRef::Worker(ref worker) => worker.new_script_pair(),
}
}
/// Process a single event as if it were the next event in the task queue for
/// this global.
pub fn process_event(&self, msg: ScriptMsg) {
match *self {
GlobalRef::Window(_) => ScriptTask::process_event(msg),
GlobalRef::Worker(ref worker) => worker.process_event(msg),
}
}
}
impl<'a> Reflectable for GlobalRef<'a> {
fn reflector<'b>(&'b self) -> &'b Reflector {
match *self {
GlobalRef::Window(ref window) => window.reflector(),
GlobalRef::Worker(ref worker) => worker.reflector(),
}
}
}
impl GlobalRoot {
/// Obtain a safe reference to the global object that cannot outlive the
/// lifetime of this root.
pub fn r<'c>(&'c self) -> GlobalRef<'c> {
match *self {
GlobalRoot::Window(ref window) => GlobalRef::Window(window.r()),
GlobalRoot::Worker(ref worker) => GlobalRef::Worker(worker.r()),
}
}
}
impl GlobalField {
/// Create a new `GlobalField` from a rooted reference.
pub fn from_rooted(global: &GlobalRef) -> GlobalField {
match *global {
GlobalRef::Window(window) => GlobalField::Window(JS::from_rooted(window)),
GlobalRef::Worker(worker) => GlobalField::Worker(JS::from_rooted(worker)),
}
}
/// Create a stack-bounded root for this reference.
pub fn root(&self) -> GlobalRoot {
match *self {
GlobalField::Window(ref window) => GlobalRoot::Window(window.root()),
GlobalField::Worker(ref worker) => GlobalRoot::Worker(worker.root()),
}
}
}
impl GlobalUnrooted {
/// Create a stack-bounded root for this reference.
pub fn root(&self) -> GlobalRoot {
match *self {
GlobalUnrooted::Window(ref window) => GlobalRoot::Window(window.root()),
GlobalUnrooted::Worker(ref worker) => GlobalRoot::Worker(worker.root()),
}
}
}
/// Returns the global object of the realm that the given JS object was created in.
#[allow(unrooted_must_root)]
pub fn global_object_for_js_object(obj: *mut JSObject) -> GlobalUnrooted {
unsafe {
let global = GetGlobalForObjectCrossCompartment(obj);
let clasp = JS_GetClass(global);
assert!(((*clasp).flags & (JSCLASS_IS_DOMJSCLASS | JSCLASS_IS_GLOBAL))!= 0);
match native_from_reflector_jsmanaged(global) {
Ok(window) => return GlobalUnrooted::Window(window),
Err(_) => (),
}
match native_from_reflector_jsmanaged(global) {
Ok(worker) => return GlobalUnrooted::Worker(worker),
Err(_) => (),
}
panic!("found DOM global that doesn't unwrap to Window or WorkerGlobalScope")
}
}
|
script_chan
|
identifier_name
|
generic-impl-more-params-with-defaults.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(default_type_params)]
struct
|
;
struct Vec<T, A = Heap>;
impl<T, A = Heap> Vec<T, A> {
fn new() -> Vec<T, A> {Vec}
}
fn main() {
Vec::<int, Heap, bool>::new();
//~^ ERROR the impl referenced by this path needs at most 2 type parameters,
// but 3 were supplied
//~^^^ ERROR too many type parameters provided: expected at most 2, found 3
}
|
Heap
|
identifier_name
|
generic-impl-more-params-with-defaults.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(default_type_params)]
struct Heap;
struct Vec<T, A = Heap>;
impl<T, A = Heap> Vec<T, A> {
fn new() -> Vec<T, A> {Vec}
}
fn main() {
Vec::<int, Heap, bool>::new();
|
// but 3 were supplied
//~^^^ ERROR too many type parameters provided: expected at most 2, found 3
}
|
//~^ ERROR the impl referenced by this path needs at most 2 type parameters,
|
random_line_split
|
lib.rs
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
#![warn(missing_docs)]
#![cfg_attr(feature="dev", feature(plugin))]
#![cfg_attr(feature="dev", plugin(clippy))]
// Clippy settings
// Most of the time much more readable
#![cfg_attr(feature="dev", allow(needless_range_loop))]
// Shorter than if-else
#![cfg_attr(feature="dev", allow(match_bool))]
// We use that to be more explicit about handled cases
#![cfg_attr(feature="dev", allow(match_same_arms))]
// Keeps consistency (all lines with `.clone()`).
#![cfg_attr(feature="dev", allow(clone_on_copy))]
// Some false positives when doing pattern matching.
#![cfg_attr(feature="dev", allow(needless_borrow))]
// TODO [todr] a lot of warnings to be fixed
#![cfg_attr(feature="dev", allow(assign_op_pattern))]
//! Ethcore-util library
//!
//! ### Rust version:
//! - nightly
//!
//! ### Supported platforms:
//! - OSX
//! - Linux
//!
//! ### Building:
//!
//! - Ubuntu 14.04 and later:
//!
//! ```bash
//! # install rocksdb
//! add-apt-repository "deb http://ppa.launchpad.net/giskou/librocksdb/ubuntu trusty main"
//! apt-get update
//! apt-get install -y --force-yes librocksdb
//!
//! # install multirust
//! curl -sf https://raw.githubusercontent.com/brson/multirust/master/blastoff.sh | sh -s -- --yes
//!
//! # install nightly and make it default
//! multirust update nightly && multirust default nightly
//!
//! # export rust LIBRARY_PATH
//! export LIBRARY_PATH=/usr/local/lib
//!
//! # download and build parity
//! git clone https://github.com/ethcore/parity
//! cd parity
//! cargo build --release
//! ```
//!
//! - OSX:
//!
//! ```bash
//! # install rocksdb && multirust
//! brew update
//! brew install rocksdb
//! brew install multirust
//!
//! # install nightly and make it default
//! multirust update nightly && multirust default nightly
//!
//! # export rust LIBRARY_PATH
//! export LIBRARY_PATH=/usr/local/lib
//!
//! # download and build parity
//! git clone https://github.com/ethcore/parity
//! cd parity
//! cargo build --release
//! ```
extern crate rustc_serialize;
extern crate rand;
extern crate rocksdb;
extern crate env_logger;
extern crate crypto as rcrypto;
extern crate secp256k1;
extern crate arrayvec;
extern crate elastic_array;
extern crate time;
extern crate ethcore_devtools as devtools;
extern crate libc;
|
extern crate tiny_keccak;
extern crate rlp;
extern crate regex;
#[macro_use]
extern crate heapsize;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate itertools;
#[macro_use]
extern crate log as rlog;
pub extern crate using_queue;
pub extern crate table;
pub mod bloom;
pub mod standard;
#[macro_use]
pub mod from_json;
#[macro_use]
pub mod common;
pub mod error;
pub mod bytes;
pub mod misc;
pub mod vector;
pub mod sha3;
pub mod hashdb;
pub mod memorydb;
pub mod migration;
pub mod overlaydb;
pub mod journaldb;
pub mod kvdb;
pub mod triehash;
pub mod trie;
pub mod nibbleslice;
pub mod nibblevec;
pub mod semantic_version;
pub mod log;
pub mod path;
pub mod snappy;
mod timer;
pub use common::*;
pub use misc::*;
pub use hashdb::*;
pub use memorydb::*;
pub use overlaydb::*;
pub use journaldb::JournalDB;
pub use triehash::*;
pub use trie::{Trie, TrieMut, TrieDB, TrieDBMut, TrieFactory, TrieError, SecTrieDB, SecTrieDBMut};
pub use nibbleslice::*;
pub use semantic_version::*;
pub use log::*;
pub use kvdb::*;
pub use timer::*;
/// 160-bit integer representing account address
pub type Address = H160;
/// Secret
pub type Secret = H256;
|
extern crate target_info;
extern crate ethcore_bigint as bigint;
extern crate parking_lot;
extern crate ansi_term;
|
random_line_split
|
pool.rs
|
use indy::IndyError;
use indy::pool;
use indy::future::Future;
use serde_json::to_string;
use std::io::Write;
use std::path::Path;
use std::path::PathBuf;
use std::fs;
#[derive(Serialize, Deserialize)]
pub struct PoolConfig {
pub genesis_txn: String
}
const PROTOCOL_VERSION: usize = 2;
pub fn create_and_open_pool_ledger(pool_name: &str) -> Result<i32, IndyError> {
set_protocol_version(PROTOCOL_VERSION).unwrap();
let txn_file_path = _create_genesis_txn_file_for_test_pool(pool_name, None, None);
let pool_config = _pool_config_json(txn_file_path.as_path());
_create_pool_ledger_config(pool_name, Some(pool_config.as_str()))?;
_open_pool_ledger(pool_name, None)
}
pub fn close(pool_handle: i32) -> Result<(), IndyError> {
pool::close_pool_ledger(pool_handle).wait()
}
fn _pool_config_json(txn_file_path: &Path) -> String {
to_string(&PoolConfig {
genesis_txn: txn_file_path.to_string_lossy().to_string()
}).unwrap()
}
fn _create_pool_ledger_config(pool_name: &str, pool_config: Option<&str>) -> Result<(), IndyError> {
pool::create_pool_ledger_config(pool_name, pool_config).wait()
}
fn _create_genesis_txn_file_for_test_pool(pool_name: &str,
nodes_count: Option<u8>,
txn_file_path: Option<&Path>) -> PathBuf {
let nodes_count = nodes_count.unwrap_or(4);
let test_pool_ip = super::environment::test_pool_ip();
let node_txns = vec![
format!(r#"{{"reqSignature":{{}},"txn":{{"data":{{"data":{{"alias":"Node1","blskey":"4N8aUNHSgjQVgkpm8nhNEfDf6txHznoYREg9kirmJrkivgL4oSEimFF6nsQ6M41QvhM2Z33nves5vfSn9n1UwNFJBYtWVnHYMATn76vLuL3zU88KyeAYcHfsih3He6UHcXDxcaecHVz6jhCYz1P2UZn2bDVruL5wXpehgBfBaLKm3Ba","blskey_pop":"RahHYiCvoNCtPTrVtP7nMC5eTYrsUA8WjXbdhNc8debh1agE9bGiJxWBXYNFbnJXoXhWFMvyqhqhRoq737YQemH5ik9oL7R4NTTCz2LEZhkgLJzB3QRQqJyBNyv7acbdHrAT8nQ9UkLbaVL9NBpnWXBTw4LEMePaSHEw66RzPNdAX1","client_ip":"{}","client_port":9702,"node_ip":"{}","node_port":9701,"services":["VALIDATOR"]}},"dest":"Gw6pDLhcBcoQesN72qfotTgFa7cbuqZpkX3Xo6pLhPhv"}},"metadata":{{"from":"Th7MpTaRZVRYnPiabds81Y"}},"type":"0"}},"txnMetadata":{{"seqNo":1,"txnId":"fea82e10e894419fe2bea7d96296a6d46f50f93f9eeda954ec461b2ed2950b62"}},"ver":"1"}}"#, test_pool_ip, test_pool_ip),
format!(r#"{{"reqSignature":{{}},"txn":{{"data":{{"data":{{"alias":"Node2","blskey":"37rAPpXVoxzKhz7d9gkUe52XuXryuLXoM6P6LbWDB7LSbG62Lsb33sfG7zqS8TK1MXwuCHj1FKNzVpsnafmqLG1vXN88rt38mNFs9TENzm4QHdBzsvCuoBnPH7rpYYDo9DZNJePaDvRvqJKByCabubJz3XXKbEeshzpz4Ma5QYpJqjk","blskey_pop":"Qr658mWZ2YC8JXGXwMDQTzuZCWF7NK9EwxphGmcBvCh6ybUuLxbG65nsX4JvD4SPNtkJ2w9ug1yLTj6fgmuDg41TgECXjLCij3RMsV8CwewBVgVN67wsA45DFWvqvLtu4rjNnE9JbdFTc1Z4WCPA3Xan44K1HoHAq9EVeaRYs8zoF5","client_ip":"{}","client_port":9704,"node_ip":"{}","node_port":9703,"services":["VALIDATOR"]}},"dest":"8ECVSk179mjsjKRLWiQtssMLgp6EPhWXtaYyStWPSGAb"}},"metadata":{{"from":"EbP4aYNeTHL6q385GuVpRV"}},"type":"0"}},"txnMetadata":{{"seqNo":2,"txnId":"1ac8aece2a18ced660fef8694b61aac3af08ba875ce3026a160acbc3a3af35fc"}},"ver":"1"}}"#, test_pool_ip, test_pool_ip),
format!(r#"{{"reqSignature":{{}},"txn":{{"data":{{"data":{{"alias":"Node3","blskey":"3WFpdbg7C5cnLYZwFZevJqhubkFALBfCBBok15GdrKMUhUjGsk3jV6QKj6MZgEubF7oqCafxNdkm7eswgA4sdKTRc82tLGzZBd6vNqU8dupzup6uYUf32KTHTPQbuUM8Yk4QFXjEf2Usu2TJcNkdgpyeUSX42u5LqdDDpNSWUK5deC5","blskey_pop":"QwDeb2CkNSx6r8QC8vGQK3GRv7Yndn84TGNijX8YXHPiagXajyfTjoR87rXUu4G4QLk2cF8NNyqWiYMus1623dELWwx57rLCFqGh7N4ZRbGDRP4fnVcaKg1BcUxQ866Ven4gw8y4N56S5HzxXNBZtLYmhGHvDtk6PFkFwCvxYrNYjh","client_ip":"{}","client_port":9706,"node_ip":"{}","node_port":9705,"services":["VALIDATOR"]}},"dest":"DKVxG2fXXTU8yT5N7hGEbXB3dfdAnYv1JczDUHpmDxya"}},"metadata":{{"from":"4cU41vWW82ArfxJxHkzXPG"}},"type":"0"}},"txnMetadata":{{"seqNo":3,"txnId":"7e9f355dffa78ed24668f0e0e369fd8c224076571c51e2ea8be5f26479edebe4"}},"ver":"1"}}"#, test_pool_ip, test_pool_ip),
format!(r#"{{"reqSignature":{{}},"txn":{{"data":{{"data":{{"alias":"Node4","blskey":"2zN3bHM1m4rLz54MJHYSwvqzPchYp8jkHswveCLAEJVcX6Mm1wHQD1SkPYMzUDTZvWvhuE6VNAkK3KxVeEmsanSmvjVkReDeBEMxeDaayjcZjFGPydyey1qxBHmTvAnBKoPydvuTAqx5f7YNNRAdeLmUi99gERUU7TD8KfAa6MpQ9bw","blskey_pop":"RPLagxaR5xdimFzwmzYnz4ZhWtYQEj8iR5ZU53T2gitPCyCHQneUn2Huc4oeLd2B2HzkGnjAff4hWTJT6C7qHYB1Mv2wU5iHHGFWkhnTX9WsEAbunJCV2qcaXScKj4tTfvdDKfLiVuU2av6hbsMztirRze7LvYBkRHV3tGwyCptsrP","client_ip":"{}","client_port":9708,"node_ip":"{}","node_port":9707,"services":["VALIDATOR"]}},"dest":"4PS3EDQ3dW1tci1Bp6543CfuuebjFrg36kLAUcskGfaA"}},"metadata":{{"from":"TWwCRQRZ2ZHMJFn9TzLp7W"}},"type":"0"}},"txnMetadata":{{"seqNo":4,"txnId":"aa5e817d7cc626170eca175822029339a444eb0ee8f0bd20d3b0b76e566fb008"}},"ver":"1"}}"#, test_pool_ip, test_pool_ip)];
let txn_file_data = node_txns[0..(nodes_count as usize)].join("\n");
_create_genesis_txn_file(pool_name, txn_file_data.as_str(), txn_file_path)
}
fn _create_genesis_txn_file(pool_name: &str,
txn_file_data: &str,
txn_file_path: Option<&Path>) -> PathBuf {
let txn_file_path = txn_file_path.map_or(
super::environment::tmp_file_path(format!("{}.txn", pool_name).as_str()),
|path| path.to_path_buf());
if!txn_file_path.parent().unwrap().exists()
|
let mut f = fs::File::create(txn_file_path.as_path()).unwrap();
f.write_all(txn_file_data.as_bytes()).unwrap();
f.flush().unwrap();
f.sync_all().unwrap();
txn_file_path
}
fn _open_pool_ledger(pool_name: &str, config: Option<&str>) -> Result<i32, IndyError> {
pool::open_pool_ledger(pool_name, config).wait()
}
pub fn set_protocol_version(protocol_version: usize) -> Result<(), IndyError> {
pool::set_protocol_version(protocol_version).wait()
}
|
{
fs::DirBuilder::new()
.recursive(true)
.create(txn_file_path.parent().unwrap()).unwrap();
}
|
conditional_block
|
pool.rs
|
use indy::IndyError;
use indy::pool;
use indy::future::Future;
use serde_json::to_string;
use std::io::Write;
use std::path::Path;
use std::path::PathBuf;
use std::fs;
#[derive(Serialize, Deserialize)]
pub struct PoolConfig {
pub genesis_txn: String
}
const PROTOCOL_VERSION: usize = 2;
pub fn create_and_open_pool_ledger(pool_name: &str) -> Result<i32, IndyError> {
set_protocol_version(PROTOCOL_VERSION).unwrap();
let txn_file_path = _create_genesis_txn_file_for_test_pool(pool_name, None, None);
let pool_config = _pool_config_json(txn_file_path.as_path());
_create_pool_ledger_config(pool_name, Some(pool_config.as_str()))?;
_open_pool_ledger(pool_name, None)
}
pub fn close(pool_handle: i32) -> Result<(), IndyError> {
pool::close_pool_ledger(pool_handle).wait()
}
fn _pool_config_json(txn_file_path: &Path) -> String {
to_string(&PoolConfig {
genesis_txn: txn_file_path.to_string_lossy().to_string()
}).unwrap()
}
fn _create_pool_ledger_config(pool_name: &str, pool_config: Option<&str>) -> Result<(), IndyError> {
pool::create_pool_ledger_config(pool_name, pool_config).wait()
}
fn _create_genesis_txn_file_for_test_pool(pool_name: &str,
nodes_count: Option<u8>,
txn_file_path: Option<&Path>) -> PathBuf {
let nodes_count = nodes_count.unwrap_or(4);
let test_pool_ip = super::environment::test_pool_ip();
let node_txns = vec![
format!(r#"{{"reqSignature":{{}},"txn":{{"data":{{"data":{{"alias":"Node1","blskey":"4N8aUNHSgjQVgkpm8nhNEfDf6txHznoYREg9kirmJrkivgL4oSEimFF6nsQ6M41QvhM2Z33nves5vfSn9n1UwNFJBYtWVnHYMATn76vLuL3zU88KyeAYcHfsih3He6UHcXDxcaecHVz6jhCYz1P2UZn2bDVruL5wXpehgBfBaLKm3Ba","blskey_pop":"RahHYiCvoNCtPTrVtP7nMC5eTYrsUA8WjXbdhNc8debh1agE9bGiJxWBXYNFbnJXoXhWFMvyqhqhRoq737YQemH5ik9oL7R4NTTCz2LEZhkgLJzB3QRQqJyBNyv7acbdHrAT8nQ9UkLbaVL9NBpnWXBTw4LEMePaSHEw66RzPNdAX1","client_ip":"{}","client_port":9702,"node_ip":"{}","node_port":9701,"services":["VALIDATOR"]}},"dest":"Gw6pDLhcBcoQesN72qfotTgFa7cbuqZpkX3Xo6pLhPhv"}},"metadata":{{"from":"Th7MpTaRZVRYnPiabds81Y"}},"type":"0"}},"txnMetadata":{{"seqNo":1,"txnId":"fea82e10e894419fe2bea7d96296a6d46f50f93f9eeda954ec461b2ed2950b62"}},"ver":"1"}}"#, test_pool_ip, test_pool_ip),
format!(r#"{{"reqSignature":{{}},"txn":{{"data":{{"data":{{"alias":"Node2","blskey":"37rAPpXVoxzKhz7d9gkUe52XuXryuLXoM6P6LbWDB7LSbG62Lsb33sfG7zqS8TK1MXwuCHj1FKNzVpsnafmqLG1vXN88rt38mNFs9TENzm4QHdBzsvCuoBnPH7rpYYDo9DZNJePaDvRvqJKByCabubJz3XXKbEeshzpz4Ma5QYpJqjk","blskey_pop":"Qr658mWZ2YC8JXGXwMDQTzuZCWF7NK9EwxphGmcBvCh6ybUuLxbG65nsX4JvD4SPNtkJ2w9ug1yLTj6fgmuDg41TgECXjLCij3RMsV8CwewBVgVN67wsA45DFWvqvLtu4rjNnE9JbdFTc1Z4WCPA3Xan44K1HoHAq9EVeaRYs8zoF5","client_ip":"{}","client_port":9704,"node_ip":"{}","node_port":9703,"services":["VALIDATOR"]}},"dest":"8ECVSk179mjsjKRLWiQtssMLgp6EPhWXtaYyStWPSGAb"}},"metadata":{{"from":"EbP4aYNeTHL6q385GuVpRV"}},"type":"0"}},"txnMetadata":{{"seqNo":2,"txnId":"1ac8aece2a18ced660fef8694b61aac3af08ba875ce3026a160acbc3a3af35fc"}},"ver":"1"}}"#, test_pool_ip, test_pool_ip),
format!(r#"{{"reqSignature":{{}},"txn":{{"data":{{"data":{{"alias":"Node3","blskey":"3WFpdbg7C5cnLYZwFZevJqhubkFALBfCBBok15GdrKMUhUjGsk3jV6QKj6MZgEubF7oqCafxNdkm7eswgA4sdKTRc82tLGzZBd6vNqU8dupzup6uYUf32KTHTPQbuUM8Yk4QFXjEf2Usu2TJcNkdgpyeUSX42u5LqdDDpNSWUK5deC5","blskey_pop":"QwDeb2CkNSx6r8QC8vGQK3GRv7Yndn84TGNijX8YXHPiagXajyfTjoR87rXUu4G4QLk2cF8NNyqWiYMus1623dELWwx57rLCFqGh7N4ZRbGDRP4fnVcaKg1BcUxQ866Ven4gw8y4N56S5HzxXNBZtLYmhGHvDtk6PFkFwCvxYrNYjh","client_ip":"{}","client_port":9706,"node_ip":"{}","node_port":9705,"services":["VALIDATOR"]}},"dest":"DKVxG2fXXTU8yT5N7hGEbXB3dfdAnYv1JczDUHpmDxya"}},"metadata":{{"from":"4cU41vWW82ArfxJxHkzXPG"}},"type":"0"}},"txnMetadata":{{"seqNo":3,"txnId":"7e9f355dffa78ed24668f0e0e369fd8c224076571c51e2ea8be5f26479edebe4"}},"ver":"1"}}"#, test_pool_ip, test_pool_ip),
format!(r#"{{"reqSignature":{{}},"txn":{{"data":{{"data":{{"alias":"Node4","blskey":"2zN3bHM1m4rLz54MJHYSwvqzPchYp8jkHswveCLAEJVcX6Mm1wHQD1SkPYMzUDTZvWvhuE6VNAkK3KxVeEmsanSmvjVkReDeBEMxeDaayjcZjFGPydyey1qxBHmTvAnBKoPydvuTAqx5f7YNNRAdeLmUi99gERUU7TD8KfAa6MpQ9bw","blskey_pop":"RPLagxaR5xdimFzwmzYnz4ZhWtYQEj8iR5ZU53T2gitPCyCHQneUn2Huc4oeLd2B2HzkGnjAff4hWTJT6C7qHYB1Mv2wU5iHHGFWkhnTX9WsEAbunJCV2qcaXScKj4tTfvdDKfLiVuU2av6hbsMztirRze7LvYBkRHV3tGwyCptsrP","client_ip":"{}","client_port":9708,"node_ip":"{}","node_port":9707,"services":["VALIDATOR"]}},"dest":"4PS3EDQ3dW1tci1Bp6543CfuuebjFrg36kLAUcskGfaA"}},"metadata":{{"from":"TWwCRQRZ2ZHMJFn9TzLp7W"}},"type":"0"}},"txnMetadata":{{"seqNo":4,"txnId":"aa5e817d7cc626170eca175822029339a444eb0ee8f0bd20d3b0b76e566fb008"}},"ver":"1"}}"#, test_pool_ip, test_pool_ip)];
let txn_file_data = node_txns[0..(nodes_count as usize)].join("\n");
_create_genesis_txn_file(pool_name, txn_file_data.as_str(), txn_file_path)
}
fn _create_genesis_txn_file(pool_name: &str,
txn_file_data: &str,
txn_file_path: Option<&Path>) -> PathBuf {
let txn_file_path = txn_file_path.map_or(
super::environment::tmp_file_path(format!("{}.txn", pool_name).as_str()),
|path| path.to_path_buf());
if!txn_file_path.parent().unwrap().exists() {
fs::DirBuilder::new()
.recursive(true)
.create(txn_file_path.parent().unwrap()).unwrap();
}
let mut f = fs::File::create(txn_file_path.as_path()).unwrap();
f.write_all(txn_file_data.as_bytes()).unwrap();
f.flush().unwrap();
f.sync_all().unwrap();
txn_file_path
}
fn _open_pool_ledger(pool_name: &str, config: Option<&str>) -> Result<i32, IndyError> {
pool::open_pool_ledger(pool_name, config).wait()
}
|
pub fn set_protocol_version(protocol_version: usize) -> Result<(), IndyError> {
pool::set_protocol_version(protocol_version).wait()
}
|
random_line_split
|
|
pool.rs
|
use indy::IndyError;
use indy::pool;
use indy::future::Future;
use serde_json::to_string;
use std::io::Write;
use std::path::Path;
use std::path::PathBuf;
use std::fs;
#[derive(Serialize, Deserialize)]
pub struct PoolConfig {
pub genesis_txn: String
}
const PROTOCOL_VERSION: usize = 2;
pub fn create_and_open_pool_ledger(pool_name: &str) -> Result<i32, IndyError>
|
pub fn close(pool_handle: i32) -> Result<(), IndyError> {
pool::close_pool_ledger(pool_handle).wait()
}
fn _pool_config_json(txn_file_path: &Path) -> String {
to_string(&PoolConfig {
genesis_txn: txn_file_path.to_string_lossy().to_string()
}).unwrap()
}
fn _create_pool_ledger_config(pool_name: &str, pool_config: Option<&str>) -> Result<(), IndyError> {
pool::create_pool_ledger_config(pool_name, pool_config).wait()
}
fn _create_genesis_txn_file_for_test_pool(pool_name: &str,
nodes_count: Option<u8>,
txn_file_path: Option<&Path>) -> PathBuf {
let nodes_count = nodes_count.unwrap_or(4);
let test_pool_ip = super::environment::test_pool_ip();
let node_txns = vec![
format!(r#"{{"reqSignature":{{}},"txn":{{"data":{{"data":{{"alias":"Node1","blskey":"4N8aUNHSgjQVgkpm8nhNEfDf6txHznoYREg9kirmJrkivgL4oSEimFF6nsQ6M41QvhM2Z33nves5vfSn9n1UwNFJBYtWVnHYMATn76vLuL3zU88KyeAYcHfsih3He6UHcXDxcaecHVz6jhCYz1P2UZn2bDVruL5wXpehgBfBaLKm3Ba","blskey_pop":"RahHYiCvoNCtPTrVtP7nMC5eTYrsUA8WjXbdhNc8debh1agE9bGiJxWBXYNFbnJXoXhWFMvyqhqhRoq737YQemH5ik9oL7R4NTTCz2LEZhkgLJzB3QRQqJyBNyv7acbdHrAT8nQ9UkLbaVL9NBpnWXBTw4LEMePaSHEw66RzPNdAX1","client_ip":"{}","client_port":9702,"node_ip":"{}","node_port":9701,"services":["VALIDATOR"]}},"dest":"Gw6pDLhcBcoQesN72qfotTgFa7cbuqZpkX3Xo6pLhPhv"}},"metadata":{{"from":"Th7MpTaRZVRYnPiabds81Y"}},"type":"0"}},"txnMetadata":{{"seqNo":1,"txnId":"fea82e10e894419fe2bea7d96296a6d46f50f93f9eeda954ec461b2ed2950b62"}},"ver":"1"}}"#, test_pool_ip, test_pool_ip),
format!(r#"{{"reqSignature":{{}},"txn":{{"data":{{"data":{{"alias":"Node2","blskey":"37rAPpXVoxzKhz7d9gkUe52XuXryuLXoM6P6LbWDB7LSbG62Lsb33sfG7zqS8TK1MXwuCHj1FKNzVpsnafmqLG1vXN88rt38mNFs9TENzm4QHdBzsvCuoBnPH7rpYYDo9DZNJePaDvRvqJKByCabubJz3XXKbEeshzpz4Ma5QYpJqjk","blskey_pop":"Qr658mWZ2YC8JXGXwMDQTzuZCWF7NK9EwxphGmcBvCh6ybUuLxbG65nsX4JvD4SPNtkJ2w9ug1yLTj6fgmuDg41TgECXjLCij3RMsV8CwewBVgVN67wsA45DFWvqvLtu4rjNnE9JbdFTc1Z4WCPA3Xan44K1HoHAq9EVeaRYs8zoF5","client_ip":"{}","client_port":9704,"node_ip":"{}","node_port":9703,"services":["VALIDATOR"]}},"dest":"8ECVSk179mjsjKRLWiQtssMLgp6EPhWXtaYyStWPSGAb"}},"metadata":{{"from":"EbP4aYNeTHL6q385GuVpRV"}},"type":"0"}},"txnMetadata":{{"seqNo":2,"txnId":"1ac8aece2a18ced660fef8694b61aac3af08ba875ce3026a160acbc3a3af35fc"}},"ver":"1"}}"#, test_pool_ip, test_pool_ip),
format!(r#"{{"reqSignature":{{}},"txn":{{"data":{{"data":{{"alias":"Node3","blskey":"3WFpdbg7C5cnLYZwFZevJqhubkFALBfCBBok15GdrKMUhUjGsk3jV6QKj6MZgEubF7oqCafxNdkm7eswgA4sdKTRc82tLGzZBd6vNqU8dupzup6uYUf32KTHTPQbuUM8Yk4QFXjEf2Usu2TJcNkdgpyeUSX42u5LqdDDpNSWUK5deC5","blskey_pop":"QwDeb2CkNSx6r8QC8vGQK3GRv7Yndn84TGNijX8YXHPiagXajyfTjoR87rXUu4G4QLk2cF8NNyqWiYMus1623dELWwx57rLCFqGh7N4ZRbGDRP4fnVcaKg1BcUxQ866Ven4gw8y4N56S5HzxXNBZtLYmhGHvDtk6PFkFwCvxYrNYjh","client_ip":"{}","client_port":9706,"node_ip":"{}","node_port":9705,"services":["VALIDATOR"]}},"dest":"DKVxG2fXXTU8yT5N7hGEbXB3dfdAnYv1JczDUHpmDxya"}},"metadata":{{"from":"4cU41vWW82ArfxJxHkzXPG"}},"type":"0"}},"txnMetadata":{{"seqNo":3,"txnId":"7e9f355dffa78ed24668f0e0e369fd8c224076571c51e2ea8be5f26479edebe4"}},"ver":"1"}}"#, test_pool_ip, test_pool_ip),
format!(r#"{{"reqSignature":{{}},"txn":{{"data":{{"data":{{"alias":"Node4","blskey":"2zN3bHM1m4rLz54MJHYSwvqzPchYp8jkHswveCLAEJVcX6Mm1wHQD1SkPYMzUDTZvWvhuE6VNAkK3KxVeEmsanSmvjVkReDeBEMxeDaayjcZjFGPydyey1qxBHmTvAnBKoPydvuTAqx5f7YNNRAdeLmUi99gERUU7TD8KfAa6MpQ9bw","blskey_pop":"RPLagxaR5xdimFzwmzYnz4ZhWtYQEj8iR5ZU53T2gitPCyCHQneUn2Huc4oeLd2B2HzkGnjAff4hWTJT6C7qHYB1Mv2wU5iHHGFWkhnTX9WsEAbunJCV2qcaXScKj4tTfvdDKfLiVuU2av6hbsMztirRze7LvYBkRHV3tGwyCptsrP","client_ip":"{}","client_port":9708,"node_ip":"{}","node_port":9707,"services":["VALIDATOR"]}},"dest":"4PS3EDQ3dW1tci1Bp6543CfuuebjFrg36kLAUcskGfaA"}},"metadata":{{"from":"TWwCRQRZ2ZHMJFn9TzLp7W"}},"type":"0"}},"txnMetadata":{{"seqNo":4,"txnId":"aa5e817d7cc626170eca175822029339a444eb0ee8f0bd20d3b0b76e566fb008"}},"ver":"1"}}"#, test_pool_ip, test_pool_ip)];
let txn_file_data = node_txns[0..(nodes_count as usize)].join("\n");
_create_genesis_txn_file(pool_name, txn_file_data.as_str(), txn_file_path)
}
fn _create_genesis_txn_file(pool_name: &str,
txn_file_data: &str,
txn_file_path: Option<&Path>) -> PathBuf {
let txn_file_path = txn_file_path.map_or(
super::environment::tmp_file_path(format!("{}.txn", pool_name).as_str()),
|path| path.to_path_buf());
if!txn_file_path.parent().unwrap().exists() {
fs::DirBuilder::new()
.recursive(true)
.create(txn_file_path.parent().unwrap()).unwrap();
}
let mut f = fs::File::create(txn_file_path.as_path()).unwrap();
f.write_all(txn_file_data.as_bytes()).unwrap();
f.flush().unwrap();
f.sync_all().unwrap();
txn_file_path
}
fn _open_pool_ledger(pool_name: &str, config: Option<&str>) -> Result<i32, IndyError> {
pool::open_pool_ledger(pool_name, config).wait()
}
pub fn set_protocol_version(protocol_version: usize) -> Result<(), IndyError> {
pool::set_protocol_version(protocol_version).wait()
}
|
{
set_protocol_version(PROTOCOL_VERSION).unwrap();
let txn_file_path = _create_genesis_txn_file_for_test_pool(pool_name, None, None);
let pool_config = _pool_config_json(txn_file_path.as_path());
_create_pool_ledger_config(pool_name, Some(pool_config.as_str()))?;
_open_pool_ledger(pool_name, None)
}
|
identifier_body
|
pool.rs
|
use indy::IndyError;
use indy::pool;
use indy::future::Future;
use serde_json::to_string;
use std::io::Write;
use std::path::Path;
use std::path::PathBuf;
use std::fs;
#[derive(Serialize, Deserialize)]
pub struct PoolConfig {
pub genesis_txn: String
}
const PROTOCOL_VERSION: usize = 2;
pub fn create_and_open_pool_ledger(pool_name: &str) -> Result<i32, IndyError> {
set_protocol_version(PROTOCOL_VERSION).unwrap();
let txn_file_path = _create_genesis_txn_file_for_test_pool(pool_name, None, None);
let pool_config = _pool_config_json(txn_file_path.as_path());
_create_pool_ledger_config(pool_name, Some(pool_config.as_str()))?;
_open_pool_ledger(pool_name, None)
}
pub fn close(pool_handle: i32) -> Result<(), IndyError> {
pool::close_pool_ledger(pool_handle).wait()
}
fn _pool_config_json(txn_file_path: &Path) -> String {
to_string(&PoolConfig {
genesis_txn: txn_file_path.to_string_lossy().to_string()
}).unwrap()
}
fn _create_pool_ledger_config(pool_name: &str, pool_config: Option<&str>) -> Result<(), IndyError> {
pool::create_pool_ledger_config(pool_name, pool_config).wait()
}
fn _create_genesis_txn_file_for_test_pool(pool_name: &str,
nodes_count: Option<u8>,
txn_file_path: Option<&Path>) -> PathBuf {
let nodes_count = nodes_count.unwrap_or(4);
let test_pool_ip = super::environment::test_pool_ip();
let node_txns = vec![
format!(r#"{{"reqSignature":{{}},"txn":{{"data":{{"data":{{"alias":"Node1","blskey":"4N8aUNHSgjQVgkpm8nhNEfDf6txHznoYREg9kirmJrkivgL4oSEimFF6nsQ6M41QvhM2Z33nves5vfSn9n1UwNFJBYtWVnHYMATn76vLuL3zU88KyeAYcHfsih3He6UHcXDxcaecHVz6jhCYz1P2UZn2bDVruL5wXpehgBfBaLKm3Ba","blskey_pop":"RahHYiCvoNCtPTrVtP7nMC5eTYrsUA8WjXbdhNc8debh1agE9bGiJxWBXYNFbnJXoXhWFMvyqhqhRoq737YQemH5ik9oL7R4NTTCz2LEZhkgLJzB3QRQqJyBNyv7acbdHrAT8nQ9UkLbaVL9NBpnWXBTw4LEMePaSHEw66RzPNdAX1","client_ip":"{}","client_port":9702,"node_ip":"{}","node_port":9701,"services":["VALIDATOR"]}},"dest":"Gw6pDLhcBcoQesN72qfotTgFa7cbuqZpkX3Xo6pLhPhv"}},"metadata":{{"from":"Th7MpTaRZVRYnPiabds81Y"}},"type":"0"}},"txnMetadata":{{"seqNo":1,"txnId":"fea82e10e894419fe2bea7d96296a6d46f50f93f9eeda954ec461b2ed2950b62"}},"ver":"1"}}"#, test_pool_ip, test_pool_ip),
format!(r#"{{"reqSignature":{{}},"txn":{{"data":{{"data":{{"alias":"Node2","blskey":"37rAPpXVoxzKhz7d9gkUe52XuXryuLXoM6P6LbWDB7LSbG62Lsb33sfG7zqS8TK1MXwuCHj1FKNzVpsnafmqLG1vXN88rt38mNFs9TENzm4QHdBzsvCuoBnPH7rpYYDo9DZNJePaDvRvqJKByCabubJz3XXKbEeshzpz4Ma5QYpJqjk","blskey_pop":"Qr658mWZ2YC8JXGXwMDQTzuZCWF7NK9EwxphGmcBvCh6ybUuLxbG65nsX4JvD4SPNtkJ2w9ug1yLTj6fgmuDg41TgECXjLCij3RMsV8CwewBVgVN67wsA45DFWvqvLtu4rjNnE9JbdFTc1Z4WCPA3Xan44K1HoHAq9EVeaRYs8zoF5","client_ip":"{}","client_port":9704,"node_ip":"{}","node_port":9703,"services":["VALIDATOR"]}},"dest":"8ECVSk179mjsjKRLWiQtssMLgp6EPhWXtaYyStWPSGAb"}},"metadata":{{"from":"EbP4aYNeTHL6q385GuVpRV"}},"type":"0"}},"txnMetadata":{{"seqNo":2,"txnId":"1ac8aece2a18ced660fef8694b61aac3af08ba875ce3026a160acbc3a3af35fc"}},"ver":"1"}}"#, test_pool_ip, test_pool_ip),
format!(r#"{{"reqSignature":{{}},"txn":{{"data":{{"data":{{"alias":"Node3","blskey":"3WFpdbg7C5cnLYZwFZevJqhubkFALBfCBBok15GdrKMUhUjGsk3jV6QKj6MZgEubF7oqCafxNdkm7eswgA4sdKTRc82tLGzZBd6vNqU8dupzup6uYUf32KTHTPQbuUM8Yk4QFXjEf2Usu2TJcNkdgpyeUSX42u5LqdDDpNSWUK5deC5","blskey_pop":"QwDeb2CkNSx6r8QC8vGQK3GRv7Yndn84TGNijX8YXHPiagXajyfTjoR87rXUu4G4QLk2cF8NNyqWiYMus1623dELWwx57rLCFqGh7N4ZRbGDRP4fnVcaKg1BcUxQ866Ven4gw8y4N56S5HzxXNBZtLYmhGHvDtk6PFkFwCvxYrNYjh","client_ip":"{}","client_port":9706,"node_ip":"{}","node_port":9705,"services":["VALIDATOR"]}},"dest":"DKVxG2fXXTU8yT5N7hGEbXB3dfdAnYv1JczDUHpmDxya"}},"metadata":{{"from":"4cU41vWW82ArfxJxHkzXPG"}},"type":"0"}},"txnMetadata":{{"seqNo":3,"txnId":"7e9f355dffa78ed24668f0e0e369fd8c224076571c51e2ea8be5f26479edebe4"}},"ver":"1"}}"#, test_pool_ip, test_pool_ip),
format!(r#"{{"reqSignature":{{}},"txn":{{"data":{{"data":{{"alias":"Node4","blskey":"2zN3bHM1m4rLz54MJHYSwvqzPchYp8jkHswveCLAEJVcX6Mm1wHQD1SkPYMzUDTZvWvhuE6VNAkK3KxVeEmsanSmvjVkReDeBEMxeDaayjcZjFGPydyey1qxBHmTvAnBKoPydvuTAqx5f7YNNRAdeLmUi99gERUU7TD8KfAa6MpQ9bw","blskey_pop":"RPLagxaR5xdimFzwmzYnz4ZhWtYQEj8iR5ZU53T2gitPCyCHQneUn2Huc4oeLd2B2HzkGnjAff4hWTJT6C7qHYB1Mv2wU5iHHGFWkhnTX9WsEAbunJCV2qcaXScKj4tTfvdDKfLiVuU2av6hbsMztirRze7LvYBkRHV3tGwyCptsrP","client_ip":"{}","client_port":9708,"node_ip":"{}","node_port":9707,"services":["VALIDATOR"]}},"dest":"4PS3EDQ3dW1tci1Bp6543CfuuebjFrg36kLAUcskGfaA"}},"metadata":{{"from":"TWwCRQRZ2ZHMJFn9TzLp7W"}},"type":"0"}},"txnMetadata":{{"seqNo":4,"txnId":"aa5e817d7cc626170eca175822029339a444eb0ee8f0bd20d3b0b76e566fb008"}},"ver":"1"}}"#, test_pool_ip, test_pool_ip)];
let txn_file_data = node_txns[0..(nodes_count as usize)].join("\n");
_create_genesis_txn_file(pool_name, txn_file_data.as_str(), txn_file_path)
}
fn _create_genesis_txn_file(pool_name: &str,
txn_file_data: &str,
txn_file_path: Option<&Path>) -> PathBuf {
let txn_file_path = txn_file_path.map_or(
super::environment::tmp_file_path(format!("{}.txn", pool_name).as_str()),
|path| path.to_path_buf());
if!txn_file_path.parent().unwrap().exists() {
fs::DirBuilder::new()
.recursive(true)
.create(txn_file_path.parent().unwrap()).unwrap();
}
let mut f = fs::File::create(txn_file_path.as_path()).unwrap();
f.write_all(txn_file_data.as_bytes()).unwrap();
f.flush().unwrap();
f.sync_all().unwrap();
txn_file_path
}
fn
|
(pool_name: &str, config: Option<&str>) -> Result<i32, IndyError> {
pool::open_pool_ledger(pool_name, config).wait()
}
pub fn set_protocol_version(protocol_version: usize) -> Result<(), IndyError> {
pool::set_protocol_version(protocol_version).wait()
}
|
_open_pool_ledger
|
identifier_name
|
re.rs
|
String>>,
#[doc(hidden)]
pub prog: Program
}
#[doc(hidden)]
pub struct ExNative {
#[doc(hidden)]
pub original: &'static str,
#[doc(hidden)]
pub names: &'static [Option<&'static str>],
#[doc(hidden)]
pub prog: fn(MatchKind, &str, uint, uint) -> Vec<Option<uint>>
}
impl Clone for ExNative {
fn clone(&self) -> ExNative { *self }
}
impl fmt::Show for Regex {
/// Shows the original regular expression.
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.as_str())
}
}
impl Regex {
/// Compiles a dynamic regular expression. Once compiled, it can be
/// used repeatedly to search, split or replace text in a string.
///
/// When possible, you should prefer the `regex!` macro since it is
/// safer and always faster.
///
/// If an invalid expression is given, then an error is returned.
pub fn new(re: &str) -> Result<Regex, parse::Error> {
let ast = try!(parse::parse(re));
let (prog, names) = Program::new(ast);
Ok(Dynamic(ExDynamic {
original: re.to_string(),
names: names,
prog: prog,
}))
}
/// Returns true if and only if the regex matches the string given.
///
/// # Example
///
/// Test if some text contains at least one word with exactly 13
/// characters:
///
/// ```rust
/// # #![feature(phase)]
/// # extern crate regex; #[phase(plugin)] extern crate regex_macros;
/// # fn main() {
/// let text = "I categorically deny having triskaidekaphobia.";
/// let matched = regex!(r"\b\w{13}\b").is_match(text);
/// assert!(matched);
/// # }
/// ```
pub fn is_match(&self, text: &str) -> bool {
has_match(&exec(self, Exists, text))
}
/// Returns the start and end byte range of the leftmost-first match in
/// `text`. If no match exists, then `None` is returned.
///
/// Note that this should only be used if you want to discover the position
/// of the match. Testing the existence of a match is faster if you use
/// `is_match`.
///
/// # Example
///
/// Find the start and end location of the first word with exactly 13
/// characters:
///
/// ```rust
/// # #![feature(phase)]
/// # extern crate regex; #[phase(plugin)] extern crate regex_macros;
/// # fn main() {
/// let text = "I categorically deny having triskaidekaphobia.";
/// let pos = regex!(r"\b\w{13}\b").find(text);
/// assert_eq!(pos, Some((2, 15)));
/// # }
/// ```
pub fn find(&self, text: &str) -> Option<(uint, uint)> {
let caps = exec(self, Location, text);
if has_match(&caps) {
Some((caps[0].unwrap(), caps[1].unwrap()))
} else {
None
}
}
/// Returns an iterator for each successive non-overlapping match in
/// `text`, returning the start and end byte indices with respect to
/// `text`.
///
/// # Example
///
/// Find the start and end location of every word with exactly 13
/// characters:
///
/// ```rust
/// # #![feature(phase)]
/// # extern crate regex; #[phase(plugin)] extern crate regex_macros;
/// # fn main() {
/// let text = "Retroactively relinquishing remunerations is reprehensible.";
/// for pos in regex!(r"\b\w{13}\b").find_iter(text) {
/// println!("{}", pos);
/// }
/// // Output:
/// // (0, 13)
/// // (14, 27)
/// // (28, 41)
/// // (45, 58)
/// # }
/// ```
pub fn find_iter<'r, 't>(&'r self, text: &'t str) -> FindMatches<'r, 't> {
FindMatches {
re: self,
search: text,
last_end: 0,
last_match: None,
}
}
/// Returns the capture groups corresponding to the leftmost-first
/// match in `text`. Capture group `0` always corresponds to the entire
/// match. If no match is found, then `None` is returned.
///
/// You should only use `captures` if you need access to submatches.
/// Otherwise, `find` is faster for discovering the location of the overall
/// match.
///
/// # Examples
///
/// Say you have some text with movie names and their release years,
/// like "'Citizen Kane' (1941)". It'd be nice if we could search for text
/// looking like that, while also extracting the movie name and its release
/// year separately.
///
/// ```rust
/// # #![feature(phase)]
/// # extern crate regex; #[phase(plugin)] extern crate regex_macros;
/// # fn main() {
/// let re = regex!(r"'([^']+)'\s+\((\d{4})\)");
/// let text = "Not my favorite movie: 'Citizen Kane' (1941).";
/// let caps = re.captures(text).unwrap();
/// assert_eq!(caps.at(1), "Citizen Kane");
/// assert_eq!(caps.at(2), "1941");
/// assert_eq!(caps.at(0), "'Citizen Kane' (1941)");
/// # }
/// ```
///
/// Note that the full match is at capture group `0`. Each subsequent
/// capture group is indexed by the order of its opening `(`.
///
/// We can make this example a bit clearer by using *named* capture groups:
///
/// ```rust
/// # #![feature(phase)]
/// # extern crate regex; #[phase(plugin)] extern crate regex_macros;
/// # fn main() {
/// let re = regex!(r"'(?P<title>[^']+)'\s+\((?P<year>\d{4})\)");
/// let text = "Not my favorite movie: 'Citizen Kane' (1941).";
/// let caps = re.captures(text).unwrap();
/// assert_eq!(caps.name("title"), "Citizen Kane");
/// assert_eq!(caps.name("year"), "1941");
/// assert_eq!(caps.at(0), "'Citizen Kane' (1941)");
/// # }
/// ```
///
/// Here we name the capture groups, which we can access with the `name`
/// method. Note that the named capture groups are still accessible with
/// `at`.
///
/// The `0`th capture group is always unnamed, so it must always be
/// accessed with `at(0)`.
pub fn captures<'t>(&self, text: &'t str) -> Option<Captures<'t>> {
let caps = exec(self, Submatches, text);
Captures::new(self, text, caps)
}
/// Returns an iterator over all the non-overlapping capture groups matched
/// in `text`. This is operationally the same as `find_iter` (except it
/// yields information about submatches).
///
/// # Example
///
/// We can use this to find all movie titles and their release years in
/// some text, where the movie is formatted like "'Title' (xxxx)":
///
/// ```rust
/// # #![feature(phase)]
/// # extern crate regex; #[phase(plugin)] extern crate regex_macros;
/// # fn main() {
/// let re = regex!(r"'(?P<title>[^']+)'\s+\((?P<year>\d{4})\)");
/// let text = "'Citizen Kane' (1941), 'The Wizard of Oz' (1939), 'M' (1931).";
/// for caps in re.captures_iter(text) {
/// println!("Movie: {}, Released: {}", caps.name("title"), caps.name("year"));
/// }
/// // Output:
/// // Movie: Citizen Kane, Released: 1941
/// // Movie: The Wizard of Oz, Released: 1939
/// // Movie: M, Released: 1931
/// # }
/// ```
pub fn captures_iter<'r, 't>(&'r self, text: &'t str)
-> FindCaptures<'r, 't> {
FindCaptures {
re: self,
search: text,
last_match: None,
last_end: 0,
}
}
/// Returns an iterator of substrings of `text` delimited by a match
/// of the regular expression.
/// Namely, each element of the iterator corresponds to text that *isn't*
/// matched by the regular expression.
///
/// This method will *not* copy the text given.
///
/// # Example
///
/// To split a string delimited by arbitrary amounts of spaces or tabs:
///
/// ```rust
/// # #![feature(phase)]
/// # extern crate regex; #[phase(plugin)] extern crate regex_macros;
/// # fn main() {
/// let re = regex!(r"[ \t]+");
/// let fields: Vec<&str> = re.split("a b \t c\td e").collect();
/// assert_eq!(fields, vec!("a", "b", "c", "d", "e"));
/// # }
/// ```
pub fn split<'r, 't>(&'r self, text: &'t str) -> RegexSplits<'r, 't> {
RegexSplits {
finder: self.find_iter(text),
last: 0,
}
}
/// Returns an iterator of at most `limit` substrings of `text` delimited
/// by a match of the regular expression. (A `limit` of `0` will return no
/// substrings.)
/// Namely, each element of the iterator corresponds to text that *isn't*
/// matched by the regular expression.
/// The remainder of the string that is not split will be the last element
/// in the iterator.
///
/// This method will *not* copy the text given.
///
/// # Example
///
/// Get the first two words in some text:
///
/// ```rust
/// # #![feature(phase)]
/// # extern crate regex; #[phase(plugin)] extern crate regex_macros;
/// # fn main() {
/// let re = regex!(r"\W+");
/// let fields: Vec<&str> = re.splitn("Hey! How are you?", 3).collect();
/// assert_eq!(fields, vec!("Hey", "How", "are you?"));
/// # }
/// ```
pub fn splitn<'r, 't>(&'r self, text: &'t str, limit: uint)
-> RegexSplitsN<'r, 't> {
RegexSplitsN {
splits: self.split(text),
cur: 0,
limit: limit,
}
}
/// Replaces the leftmost-first match with the replacement provided.
/// The replacement can be a regular string (where `$N` and `$name` are
/// expanded to match capture groups) or a function that takes the matches'
/// `Captures` and returns the replaced string.
///
/// If no match is found, then a copy of the string is returned unchanged.
///
/// # Examples
///
/// Note that this function is polymorphic with respect to the replacement.
/// In typical usage, this can just be a normal string:
///
/// ```rust
/// # #![feature(phase)]
/// # extern crate regex; #[phase(plugin)] extern crate regex_macros;
/// # fn main() {
/// let re = regex!("[^01]+");
/// assert_eq!(re.replace("1078910", "").as_slice(), "1010");
/// # }
/// ```
///
/// But anything satisfying the `Replacer` trait will work. For example,
/// a closure of type `|&Captures| -> String` provides direct access to the
/// captures corresponding to a match. This allows one to access
/// submatches easily:
///
/// ```rust
/// # #![feature(phase)]
/// # extern crate regex; #[phase(plugin)] extern crate regex_macros;
/// # use regex::Captures; fn main() {
/// let re = regex!(r"([^,\s]+),\s+(\S+)");
/// let result = re.replace("Springsteen, Bruce", |caps: &Captures| {
/// format!("{} {}", caps.at(2), caps.at(1))
/// });
/// assert_eq!(result.as_slice(), "Bruce Springsteen");
/// # }
/// ```
///
/// But this is a bit cumbersome to use all the time. Instead, a simple
/// syntax is supported that expands `$name` into the corresponding capture
/// group. Here's the last example, but using this expansion technique
/// with named capture groups:
///
/// ```rust
/// # #![feature(phase)]
/// # extern crate regex; #[phase(plugin)] extern crate regex_macros;
/// # fn main() {
/// let re = regex!(r"(?P<last>[^,\s]+),\s+(?P<first>\S+)");
/// let result = re.replace("Springsteen, Bruce", "$first $last");
/// assert_eq!(result.as_slice(), "Bruce Springsteen");
/// # }
/// ```
///
/// Note that using `$2` instead of `$first` or `$1` instead of `$last`
/// would produce the same result. To write a literal `$` use `$$`.
///
/// Finally, sometimes you just want to replace a literal string with no
/// submatch expansion. This can be done by wrapping a string with
/// `NoExpand`:
///
/// ```rust
/// # #![feature(phase)]
/// # extern crate regex; #[phase(plugin)] extern crate regex_macros;
/// # fn main() {
/// use regex::NoExpand;
///
/// let re = regex!(r"(?P<last>[^,\s]+),\s+(\S+)");
/// let result = re.replace("Springsteen, Bruce", NoExpand("$2 $last"));
/// assert_eq!(result.as_slice(), "$2 $last");
/// # }
/// ```
pub fn replace<R: Replacer>(&self, text: &str, rep: R) -> String {
self.replacen(text, 1, rep)
}
/// Replaces all non-overlapping matches in `text` with the
/// replacement provided. This is the same as calling `replacen` with
/// `limit` set to `0`.
///
/// See the documentation for `replace` for details on how to access
/// submatches in the replacement string.
pub fn replace_all<R: Replacer>(&self, text: &str, rep: R) -> String {
self.replacen(text, 0, rep)
}
/// Replaces at most `limit` non-overlapping matches in `text` with the
/// replacement provided. If `limit` is 0, then all non-overlapping matches
/// are replaced.
///
/// See the documentation for `replace` for details on how to access
/// submatches in the replacement string.
pub fn replacen<R: Replacer>
(&self, text: &str, limit: uint, mut rep: R) -> String {
let mut new = String::with_capacity(text.len());
let mut last_match = 0u;
for (i, cap) in self.captures_iter(text).enumerate() {
// It'd be nicer to use the 'take' iterator instead, but it seemed
// awkward given that '0' => no limit.
if limit > 0 && i >= limit {
break
}
let (s, e) = cap.pos(0).unwrap(); // captures only reports matches
new.push_str(text.slice(last_match, s));
new.push_str(rep.reg_replace(&cap).as_slice());
last_match = e;
}
new.push_str(text.slice(last_match, text.len()));
return new;
}
/// Returns the original string of this regex.
pub fn as_str<'a>(&'a self) -> &'a str {
match *self {
Dynamic(ExDynamic { ref original,.. }) => original.as_slice(),
Native(ExNative { ref original,.. }) => original.as_slice(),
}
}
#[doc(hidden)]
#[experimental]
pub fn names_iter<'a>(&'a self) -> NamesIter<'a> {
match *self {
Native(ref n) => NamesIterNative(n.names.iter()),
Dynamic(ref d) => NamesIterDynamic(d.names.iter())
}
}
fn names_len(&self) -> uint {
match *self {
Native(ref n) => n.names.len(),
Dynamic(ref d) => d.names.len()
}
}
}
pub enum NamesIter<'a> {
NamesIterNative(::std::slice::Items<'a, Option<&'static str>>),
NamesIterDynamic(::std::slice::Items<'a, Option<String>>)
}
impl<'a> Iterator<Option<String>> for NamesIter<'a> {
fn next(&mut self) -> Option<Option<String>> {
match *self {
NamesIterNative(ref mut i) => i.next().map(|x| x.map(|s| s.to_string())),
NamesIterDynamic(ref mut i) => i.next().map(|x| x.as_ref().map(|s| s.to_string())),
}
}
}
/// NoExpand indicates literal string replacement.
///
/// It can be used with `replace` and `replace_all` to do a literal
/// string replacement without expanding `$name` to their corresponding
/// capture groups.
///
/// `'r` is the lifetime of the literal text.
pub struct NoExpand<'t>(pub &'t str);
/// Replacer describes types that can be used to replace matches in a string.
pub trait Replacer {
/// Returns a possibly owned string that is used to replace the match
/// corresponding the the `caps` capture group.
///
/// The `'a` lifetime refers to the lifetime of a borrowed string when
/// a new owned string isn't needed (e.g., for `NoExpand`).
fn reg_replace<'a>(&'a mut self, caps: &Captures) -> MaybeOwned<'a>;
}
impl<'t> Replacer for NoExpand<'t> {
fn reg_replace<'a>(&'a mut self, _: &Captures) -> MaybeOwned<'a> {
let NoExpand(s) = *self;
Slice(s)
}
}
impl<'t> Replacer for &'t str {
fn reg_replace<'a>(&'a mut self, caps: &Captures) -> MaybeOwned<'a>
|
}
impl<'t> Replacer for |&Captures|: 't -> String {
fn reg_replace<'a>(&'a mut self, caps: &Captures) -> MaybeOwned<'a> {
Owned((*self)(caps))
}
}
/// Yields all substrings delimited by a regular expression match.
///
/// `'r` is the lifetime of the compiled expression and `'t` is the lifetime
/// of the string being split.
pub struct RegexSplits<'r, 't> {
finder: FindMatches<'r, 't>,
last: uint,
}
impl<'r, 't> Iterator<&'t str> for RegexSplits<'r, 't> {
fn next(&mut self) -> Option<&'t str> {
let text = self.finder.search;
match self.finder.next() {
None => {
if self.last >= text.len() {
None
} else {
let s = text.slice(self.last, text.len());
self.last = text.len();
Some(s)
}
}
Some((s, e)) => {
let matched = text.slice(self.last, s);
self.last = e;
Some(matched)
}
}
}
}
/// Yields at most `N` substrings delimited by a regular expression match.
///
/// The last substring will be whatever remains after splitting.
///
/// `'r` is the lifetime of the compiled expression and `'t` is the lifetime
/// of the string being split.
pub struct RegexSplitsN<'r, 't> {
splits: RegexSplits<'r, 't>,
cur: uint,
limit: uint,
}
impl<'r, 't> Iterator<&'t str> for RegexSplitsN<'r, 't> {
fn next(&mut self) -> Option<&'t str> {
let text = self.splits.finder.search;
if self.cur >= self.limit {
None
} else {
self.cur += 1;
if self.cur >= self.limit {
Some(text.slice(self.splits.last, text.len()))
} else {
self.splits.next()
}
}
}
}
/// Captures represents a group of captured strings for a single match.
///
/// The 0th capture always corresponds to the entire match. Each subsequent
/// index corresponds to the next capture group in the regex.
/// If a capture group is named, then the matched string is *also* available
/// via the `name` method. (Note that the 0th capture is always unnamed and so
/// must be accessed with the `at` method.)
///
/// Positions returned from a capture group are always byte indices.
///
/// `'t` is the lifetime of the matched text.
pub struct Captures<'t> {
text: &'t str,
locs: CaptureLocs,
named: Option<HashMap<String, uint>>,
}
impl<'t> Captures<'t> {
#[allow(experimental)]
fn new(re: &Regex, search: &'t str, locs: CaptureLocs)
-> Option<Captures<'t>> {
if!has_match(&locs) {
return None
}
let named =
if re.names_len() == 0 {
None
} else {
let mut named = HashMap::new();
for (i, name) in re.names_iter().enumerate() {
match name {
None => {},
Some(name) => {
named.insert(name, i);
}
}
}
Some(named)
};
Some(Captures {
text: search,
locs: locs,
named: named,
})
}
/// Returns the start and end positions of the Nth capture group.
/// Returns `None` if `i` is not a valid capture group or if the capture
/// group did not match anything.
/// The positions returned are *always* byte indices with respect to the
/// original string matched.
pub fn pos(&self, i: uint) -> Option<(uint, uint)> {
let (s, e) = (i * 2, i * 2 + 1);
if e >= self.locs.len() || self.locs[s].is_none() {
// VM guarantees that each pair of locations are both Some or None.
return None
}
Some((self.locs[s].unwrap(), self.locs[e].unwrap()))
}
/// Returns the matched string for the capture group `i`.
/// If `i` isn't a valid capture group or didn't match anything, then the
/// empty string is returned.
pub fn at(&self, i: uint) -> &'t str {
match self.pos(i) {
None => "",
Some((s, e)) => {
self.text.slice(s, e)
}
}
}
/// Returns the matched string for the capture group named `name`.
/// If `name` isn't a valid capture group or didn't match anything, then
|
{
Owned(caps.expand(*self))
}
|
identifier_body
|
re.rs
|
let ast = try!(parse::parse(re));
let (prog, names) = Program::new(ast);
Ok(Dynamic(ExDynamic {
original: re.to_string(),
names: names,
prog: prog,
}))
}
/// Returns true if and only if the regex matches the string given.
///
/// # Example
///
/// Test if some text contains at least one word with exactly 13
/// characters:
///
/// ```rust
/// # #![feature(phase)]
/// # extern crate regex; #[phase(plugin)] extern crate regex_macros;
/// # fn main() {
/// let text = "I categorically deny having triskaidekaphobia.";
/// let matched = regex!(r"\b\w{13}\b").is_match(text);
/// assert!(matched);
/// # }
/// ```
pub fn is_match(&self, text: &str) -> bool {
has_match(&exec(self, Exists, text))
}
/// Returns the start and end byte range of the leftmost-first match in
/// `text`. If no match exists, then `None` is returned.
///
/// Note that this should only be used if you want to discover the position
/// of the match. Testing the existence of a match is faster if you use
/// `is_match`.
///
/// # Example
///
/// Find the start and end location of the first word with exactly 13
/// characters:
///
/// ```rust
/// # #![feature(phase)]
/// # extern crate regex; #[phase(plugin)] extern crate regex_macros;
/// # fn main() {
/// let text = "I categorically deny having triskaidekaphobia.";
/// let pos = regex!(r"\b\w{13}\b").find(text);
/// assert_eq!(pos, Some((2, 15)));
/// # }
/// ```
pub fn find(&self, text: &str) -> Option<(uint, uint)> {
let caps = exec(self, Location, text);
if has_match(&caps) {
Some((caps[0].unwrap(), caps[1].unwrap()))
} else {
None
}
}
/// Returns an iterator for each successive non-overlapping match in
/// `text`, returning the start and end byte indices with respect to
/// `text`.
///
/// # Example
///
/// Find the start and end location of every word with exactly 13
/// characters:
///
/// ```rust
/// # #![feature(phase)]
/// # extern crate regex; #[phase(plugin)] extern crate regex_macros;
/// # fn main() {
/// let text = "Retroactively relinquishing remunerations is reprehensible.";
/// for pos in regex!(r"\b\w{13}\b").find_iter(text) {
/// println!("{}", pos);
/// }
/// // Output:
/// // (0, 13)
/// // (14, 27)
/// // (28, 41)
/// // (45, 58)
/// # }
/// ```
pub fn find_iter<'r, 't>(&'r self, text: &'t str) -> FindMatches<'r, 't> {
FindMatches {
re: self,
search: text,
last_end: 0,
last_match: None,
}
}
/// Returns the capture groups corresponding to the leftmost-first
/// match in `text`. Capture group `0` always corresponds to the entire
/// match. If no match is found, then `None` is returned.
///
/// You should only use `captures` if you need access to submatches.
/// Otherwise, `find` is faster for discovering the location of the overall
/// match.
///
/// # Examples
///
/// Say you have some text with movie names and their release years,
/// like "'Citizen Kane' (1941)". It'd be nice if we could search for text
/// looking like that, while also extracting the movie name and its release
/// year separately.
///
/// ```rust
/// # #![feature(phase)]
/// # extern crate regex; #[phase(plugin)] extern crate regex_macros;
/// # fn main() {
/// let re = regex!(r"'([^']+)'\s+\((\d{4})\)");
/// let text = "Not my favorite movie: 'Citizen Kane' (1941).";
/// let caps = re.captures(text).unwrap();
/// assert_eq!(caps.at(1), "Citizen Kane");
/// assert_eq!(caps.at(2), "1941");
/// assert_eq!(caps.at(0), "'Citizen Kane' (1941)");
/// # }
/// ```
///
/// Note that the full match is at capture group `0`. Each subsequent
/// capture group is indexed by the order of its opening `(`.
///
/// We can make this example a bit clearer by using *named* capture groups:
///
/// ```rust
/// # #![feature(phase)]
/// # extern crate regex; #[phase(plugin)] extern crate regex_macros;
/// # fn main() {
/// let re = regex!(r"'(?P<title>[^']+)'\s+\((?P<year>\d{4})\)");
/// let text = "Not my favorite movie: 'Citizen Kane' (1941).";
/// let caps = re.captures(text).unwrap();
/// assert_eq!(caps.name("title"), "Citizen Kane");
/// assert_eq!(caps.name("year"), "1941");
/// assert_eq!(caps.at(0), "'Citizen Kane' (1941)");
/// # }
/// ```
///
/// Here we name the capture groups, which we can access with the `name`
/// method. Note that the named capture groups are still accessible with
/// `at`.
///
/// The `0`th capture group is always unnamed, so it must always be
/// accessed with `at(0)`.
pub fn captures<'t>(&self, text: &'t str) -> Option<Captures<'t>> {
let caps = exec(self, Submatches, text);
Captures::new(self, text, caps)
}
/// Returns an iterator over all the non-overlapping capture groups matched
/// in `text`. This is operationally the same as `find_iter` (except it
/// yields information about submatches).
///
/// # Example
///
/// We can use this to find all movie titles and their release years in
/// some text, where the movie is formatted like "'Title' (xxxx)":
///
/// ```rust
/// # #![feature(phase)]
/// # extern crate regex; #[phase(plugin)] extern crate regex_macros;
/// # fn main() {
/// let re = regex!(r"'(?P<title>[^']+)'\s+\((?P<year>\d{4})\)");
/// let text = "'Citizen Kane' (1941), 'The Wizard of Oz' (1939), 'M' (1931).";
/// for caps in re.captures_iter(text) {
/// println!("Movie: {}, Released: {}", caps.name("title"), caps.name("year"));
/// }
/// // Output:
/// // Movie: Citizen Kane, Released: 1941
/// // Movie: The Wizard of Oz, Released: 1939
/// // Movie: M, Released: 1931
/// # }
/// ```
pub fn captures_iter<'r, 't>(&'r self, text: &'t str)
-> FindCaptures<'r, 't> {
FindCaptures {
re: self,
search: text,
last_match: None,
last_end: 0,
}
}
/// Returns an iterator of substrings of `text` delimited by a match
/// of the regular expression.
/// Namely, each element of the iterator corresponds to text that *isn't*
/// matched by the regular expression.
///
/// This method will *not* copy the text given.
///
/// # Example
///
/// To split a string delimited by arbitrary amounts of spaces or tabs:
///
/// ```rust
/// # #![feature(phase)]
/// # extern crate regex; #[phase(plugin)] extern crate regex_macros;
/// # fn main() {
/// let re = regex!(r"[ \t]+");
/// let fields: Vec<&str> = re.split("a b \t c\td e").collect();
/// assert_eq!(fields, vec!("a", "b", "c", "d", "e"));
/// # }
/// ```
pub fn split<'r, 't>(&'r self, text: &'t str) -> RegexSplits<'r, 't> {
RegexSplits {
finder: self.find_iter(text),
last: 0,
}
}
/// Returns an iterator of at most `limit` substrings of `text` delimited
/// by a match of the regular expression. (A `limit` of `0` will return no
/// substrings.)
/// Namely, each element of the iterator corresponds to text that *isn't*
/// matched by the regular expression.
/// The remainder of the string that is not split will be the last element
/// in the iterator.
///
/// This method will *not* copy the text given.
///
/// # Example
///
/// Get the first two words in some text:
///
/// ```rust
/// # #![feature(phase)]
/// # extern crate regex; #[phase(plugin)] extern crate regex_macros;
/// # fn main() {
/// let re = regex!(r"\W+");
/// let fields: Vec<&str> = re.splitn("Hey! How are you?", 3).collect();
/// assert_eq!(fields, vec!("Hey", "How", "are you?"));
/// # }
/// ```
pub fn splitn<'r, 't>(&'r self, text: &'t str, limit: uint)
-> RegexSplitsN<'r, 't> {
RegexSplitsN {
splits: self.split(text),
cur: 0,
limit: limit,
}
}
/// Replaces the leftmost-first match with the replacement provided.
/// The replacement can be a regular string (where `$N` and `$name` are
/// expanded to match capture groups) or a function that takes the matches'
/// `Captures` and returns the replaced string.
///
/// If no match is found, then a copy of the string is returned unchanged.
///
/// # Examples
///
/// Note that this function is polymorphic with respect to the replacement.
/// In typical usage, this can just be a normal string:
///
/// ```rust
/// # #![feature(phase)]
/// # extern crate regex; #[phase(plugin)] extern crate regex_macros;
/// # fn main() {
/// let re = regex!("[^01]+");
/// assert_eq!(re.replace("1078910", "").as_slice(), "1010");
/// # }
/// ```
///
/// But anything satisfying the `Replacer` trait will work. For example,
/// a closure of type `|&Captures| -> String` provides direct access to the
/// captures corresponding to a match. This allows one to access
/// submatches easily:
///
/// ```rust
/// # #![feature(phase)]
/// # extern crate regex; #[phase(plugin)] extern crate regex_macros;
/// # use regex::Captures; fn main() {
/// let re = regex!(r"([^,\s]+),\s+(\S+)");
/// let result = re.replace("Springsteen, Bruce", |caps: &Captures| {
/// format!("{} {}", caps.at(2), caps.at(1))
/// });
/// assert_eq!(result.as_slice(), "Bruce Springsteen");
/// # }
/// ```
///
/// But this is a bit cumbersome to use all the time. Instead, a simple
/// syntax is supported that expands `$name` into the corresponding capture
/// group. Here's the last example, but using this expansion technique
/// with named capture groups:
///
/// ```rust
/// # #![feature(phase)]
/// # extern crate regex; #[phase(plugin)] extern crate regex_macros;
/// # fn main() {
/// let re = regex!(r"(?P<last>[^,\s]+),\s+(?P<first>\S+)");
/// let result = re.replace("Springsteen, Bruce", "$first $last");
/// assert_eq!(result.as_slice(), "Bruce Springsteen");
/// # }
/// ```
///
/// Note that using `$2` instead of `$first` or `$1` instead of `$last`
/// would produce the same result. To write a literal `$` use `$$`.
///
/// Finally, sometimes you just want to replace a literal string with no
/// submatch expansion. This can be done by wrapping a string with
/// `NoExpand`:
///
/// ```rust
/// # #![feature(phase)]
/// # extern crate regex; #[phase(plugin)] extern crate regex_macros;
/// # fn main() {
/// use regex::NoExpand;
///
/// let re = regex!(r"(?P<last>[^,\s]+),\s+(\S+)");
/// let result = re.replace("Springsteen, Bruce", NoExpand("$2 $last"));
/// assert_eq!(result.as_slice(), "$2 $last");
/// # }
/// ```
pub fn replace<R: Replacer>(&self, text: &str, rep: R) -> String {
self.replacen(text, 1, rep)
}
/// Replaces all non-overlapping matches in `text` with the
/// replacement provided. This is the same as calling `replacen` with
/// `limit` set to `0`.
///
/// See the documentation for `replace` for details on how to access
/// submatches in the replacement string.
pub fn replace_all<R: Replacer>(&self, text: &str, rep: R) -> String {
self.replacen(text, 0, rep)
}
/// Replaces at most `limit` non-overlapping matches in `text` with the
/// replacement provided. If `limit` is 0, then all non-overlapping matches
/// are replaced.
///
/// See the documentation for `replace` for details on how to access
/// submatches in the replacement string.
pub fn replacen<R: Replacer>
(&self, text: &str, limit: uint, mut rep: R) -> String {
let mut new = String::with_capacity(text.len());
let mut last_match = 0u;
for (i, cap) in self.captures_iter(text).enumerate() {
// It'd be nicer to use the 'take' iterator instead, but it seemed
// awkward given that '0' => no limit.
if limit > 0 && i >= limit {
break
}
let (s, e) = cap.pos(0).unwrap(); // captures only reports matches
new.push_str(text.slice(last_match, s));
new.push_str(rep.reg_replace(&cap).as_slice());
last_match = e;
}
new.push_str(text.slice(last_match, text.len()));
return new;
}
/// Returns the original string of this regex.
pub fn as_str<'a>(&'a self) -> &'a str {
match *self {
Dynamic(ExDynamic { ref original,.. }) => original.as_slice(),
Native(ExNative { ref original,.. }) => original.as_slice(),
}
}
#[doc(hidden)]
#[experimental]
pub fn names_iter<'a>(&'a self) -> NamesIter<'a> {
match *self {
Native(ref n) => NamesIterNative(n.names.iter()),
Dynamic(ref d) => NamesIterDynamic(d.names.iter())
}
}
fn names_len(&self) -> uint {
match *self {
Native(ref n) => n.names.len(),
Dynamic(ref d) => d.names.len()
}
}
}
pub enum NamesIter<'a> {
NamesIterNative(::std::slice::Items<'a, Option<&'static str>>),
NamesIterDynamic(::std::slice::Items<'a, Option<String>>)
}
impl<'a> Iterator<Option<String>> for NamesIter<'a> {
fn next(&mut self) -> Option<Option<String>> {
match *self {
NamesIterNative(ref mut i) => i.next().map(|x| x.map(|s| s.to_string())),
NamesIterDynamic(ref mut i) => i.next().map(|x| x.as_ref().map(|s| s.to_string())),
}
}
}
/// NoExpand indicates literal string replacement.
///
/// It can be used with `replace` and `replace_all` to do a literal
/// string replacement without expanding `$name` to their corresponding
/// capture groups.
///
/// `'r` is the lifetime of the literal text.
pub struct NoExpand<'t>(pub &'t str);
/// Replacer describes types that can be used to replace matches in a string.
pub trait Replacer {
/// Returns a possibly owned string that is used to replace the match
/// corresponding the the `caps` capture group.
///
/// The `'a` lifetime refers to the lifetime of a borrowed string when
/// a new owned string isn't needed (e.g., for `NoExpand`).
fn reg_replace<'a>(&'a mut self, caps: &Captures) -> MaybeOwned<'a>;
}
impl<'t> Replacer for NoExpand<'t> {
fn reg_replace<'a>(&'a mut self, _: &Captures) -> MaybeOwned<'a> {
let NoExpand(s) = *self;
Slice(s)
}
}
impl<'t> Replacer for &'t str {
fn reg_replace<'a>(&'a mut self, caps: &Captures) -> MaybeOwned<'a> {
Owned(caps.expand(*self))
}
}
impl<'t> Replacer for |&Captures|: 't -> String {
fn reg_replace<'a>(&'a mut self, caps: &Captures) -> MaybeOwned<'a> {
Owned((*self)(caps))
}
}
/// Yields all substrings delimited by a regular expression match.
///
/// `'r` is the lifetime of the compiled expression and `'t` is the lifetime
/// of the string being split.
pub struct RegexSplits<'r, 't> {
finder: FindMatches<'r, 't>,
last: uint,
}
impl<'r, 't> Iterator<&'t str> for RegexSplits<'r, 't> {
fn next(&mut self) -> Option<&'t str> {
let text = self.finder.search;
match self.finder.next() {
None => {
if self.last >= text.len() {
None
} else {
let s = text.slice(self.last, text.len());
self.last = text.len();
Some(s)
}
}
Some((s, e)) => {
let matched = text.slice(self.last, s);
self.last = e;
Some(matched)
}
}
}
}
/// Yields at most `N` substrings delimited by a regular expression match.
///
/// The last substring will be whatever remains after splitting.
///
/// `'r` is the lifetime of the compiled expression and `'t` is the lifetime
/// of the string being split.
pub struct RegexSplitsN<'r, 't> {
splits: RegexSplits<'r, 't>,
cur: uint,
limit: uint,
}
impl<'r, 't> Iterator<&'t str> for RegexSplitsN<'r, 't> {
fn next(&mut self) -> Option<&'t str> {
let text = self.splits.finder.search;
if self.cur >= self.limit {
None
} else {
self.cur += 1;
if self.cur >= self.limit {
Some(text.slice(self.splits.last, text.len()))
} else {
self.splits.next()
}
}
}
}
/// Captures represents a group of captured strings for a single match.
///
/// The 0th capture always corresponds to the entire match. Each subsequent
/// index corresponds to the next capture group in the regex.
/// If a capture group is named, then the matched string is *also* available
/// via the `name` method. (Note that the 0th capture is always unnamed and so
/// must be accessed with the `at` method.)
///
/// Positions returned from a capture group are always byte indices.
///
/// `'t` is the lifetime of the matched text.
pub struct Captures<'t> {
text: &'t str,
locs: CaptureLocs,
named: Option<HashMap<String, uint>>,
}
impl<'t> Captures<'t> {
#[allow(experimental)]
fn new(re: &Regex, search: &'t str, locs: CaptureLocs)
-> Option<Captures<'t>> {
if!has_match(&locs) {
return None
}
let named =
if re.names_len() == 0 {
None
} else {
let mut named = HashMap::new();
for (i, name) in re.names_iter().enumerate() {
match name {
None => {},
Some(name) => {
named.insert(name, i);
}
}
}
Some(named)
};
Some(Captures {
text: search,
locs: locs,
named: named,
})
}
/// Returns the start and end positions of the Nth capture group.
/// Returns `None` if `i` is not a valid capture group or if the capture
/// group did not match anything.
/// The positions returned are *always* byte indices with respect to the
/// original string matched.
pub fn pos(&self, i: uint) -> Option<(uint, uint)> {
let (s, e) = (i * 2, i * 2 + 1);
if e >= self.locs.len() || self.locs[s].is_none() {
// VM guarantees that each pair of locations are both Some or None.
return None
}
Some((self.locs[s].unwrap(), self.locs[e].unwrap()))
}
/// Returns the matched string for the capture group `i`.
/// If `i` isn't a valid capture group or didn't match anything, then the
/// empty string is returned.
pub fn at(&self, i: uint) -> &'t str {
match self.pos(i) {
None => "",
Some((s, e)) => {
self.text.slice(s, e)
}
}
}
/// Returns the matched string for the capture group named `name`.
/// If `name` isn't a valid capture group or didn't match anything, then
/// the empty string is returned.
pub fn name(&self, name: &str) -> &'t str {
match self.named {
None => "",
Some(ref h) => {
match h.find_equiv(&name) {
None => "",
Some(i) => self.at(*i),
}
}
}
}
/// Creates an iterator of all the capture groups in order of appearance
/// in the regular expression.
pub fn iter(&'t self) -> SubCaptures<'t> {
SubCaptures { idx: 0, caps: self, }
}
/// Creates an iterator of all the capture group positions in order of
/// appearance in the regular expression. Positions are byte indices
/// in terms of the original string matched.
pub fn
|
iter_pos
|
identifier_name
|
|
re.rs
|
<String>>,
#[doc(hidden)]
pub prog: Program
}
#[doc(hidden)]
pub struct ExNative {
#[doc(hidden)]
pub original: &'static str,
#[doc(hidden)]
pub names: &'static [Option<&'static str>],
#[doc(hidden)]
pub prog: fn(MatchKind, &str, uint, uint) -> Vec<Option<uint>>
}
impl Clone for ExNative {
fn clone(&self) -> ExNative { *self }
}
impl fmt::Show for Regex {
/// Shows the original regular expression.
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.as_str())
}
}
impl Regex {
/// Compiles a dynamic regular expression. Once compiled, it can be
/// used repeatedly to search, split or replace text in a string.
///
/// When possible, you should prefer the `regex!` macro since it is
/// safer and always faster.
///
/// If an invalid expression is given, then an error is returned.
pub fn new(re: &str) -> Result<Regex, parse::Error> {
let ast = try!(parse::parse(re));
let (prog, names) = Program::new(ast);
Ok(Dynamic(ExDynamic {
original: re.to_string(),
names: names,
prog: prog,
}))
}
/// Returns true if and only if the regex matches the string given.
///
/// # Example
///
/// Test if some text contains at least one word with exactly 13
/// characters:
///
/// ```rust
/// # #![feature(phase)]
/// # extern crate regex; #[phase(plugin)] extern crate regex_macros;
/// # fn main() {
/// let text = "I categorically deny having triskaidekaphobia.";
/// let matched = regex!(r"\b\w{13}\b").is_match(text);
/// assert!(matched);
/// # }
/// ```
pub fn is_match(&self, text: &str) -> bool {
has_match(&exec(self, Exists, text))
}
/// Returns the start and end byte range of the leftmost-first match in
/// `text`. If no match exists, then `None` is returned.
///
/// Note that this should only be used if you want to discover the position
/// of the match. Testing the existence of a match is faster if you use
/// `is_match`.
///
/// # Example
///
/// Find the start and end location of the first word with exactly 13
/// characters:
///
/// ```rust
/// # #![feature(phase)]
/// # extern crate regex; #[phase(plugin)] extern crate regex_macros;
/// # fn main() {
/// let text = "I categorically deny having triskaidekaphobia.";
/// let pos = regex!(r"\b\w{13}\b").find(text);
/// assert_eq!(pos, Some((2, 15)));
/// # }
/// ```
pub fn find(&self, text: &str) -> Option<(uint, uint)> {
let caps = exec(self, Location, text);
if has_match(&caps) {
Some((caps[0].unwrap(), caps[1].unwrap()))
} else {
None
}
}
/// Returns an iterator for each successive non-overlapping match in
/// `text`, returning the start and end byte indices with respect to
/// `text`.
///
/// # Example
///
/// Find the start and end location of every word with exactly 13
/// characters:
///
/// ```rust
/// # #![feature(phase)]
/// # extern crate regex; #[phase(plugin)] extern crate regex_macros;
/// # fn main() {
/// let text = "Retroactively relinquishing remunerations is reprehensible.";
/// for pos in regex!(r"\b\w{13}\b").find_iter(text) {
/// println!("{}", pos);
/// }
/// // Output:
/// // (0, 13)
/// // (14, 27)
/// // (28, 41)
/// // (45, 58)
/// # }
/// ```
pub fn find_iter<'r, 't>(&'r self, text: &'t str) -> FindMatches<'r, 't> {
FindMatches {
re: self,
search: text,
last_end: 0,
last_match: None,
}
}
/// Returns the capture groups corresponding to the leftmost-first
/// match in `text`. Capture group `0` always corresponds to the entire
/// match. If no match is found, then `None` is returned.
///
/// You should only use `captures` if you need access to submatches.
/// Otherwise, `find` is faster for discovering the location of the overall
/// match.
///
/// # Examples
///
/// Say you have some text with movie names and their release years,
/// like "'Citizen Kane' (1941)". It'd be nice if we could search for text
/// looking like that, while also extracting the movie name and its release
/// year separately.
///
/// ```rust
/// # #![feature(phase)]
/// # extern crate regex; #[phase(plugin)] extern crate regex_macros;
/// # fn main() {
/// let re = regex!(r"'([^']+)'\s+\((\d{4})\)");
/// let text = "Not my favorite movie: 'Citizen Kane' (1941).";
/// let caps = re.captures(text).unwrap();
|
/// assert_eq!(caps.at(2), "1941");
/// assert_eq!(caps.at(0), "'Citizen Kane' (1941)");
/// # }
/// ```
///
/// Note that the full match is at capture group `0`. Each subsequent
/// capture group is indexed by the order of its opening `(`.
///
/// We can make this example a bit clearer by using *named* capture groups:
///
/// ```rust
/// # #![feature(phase)]
/// # extern crate regex; #[phase(plugin)] extern crate regex_macros;
/// # fn main() {
/// let re = regex!(r"'(?P<title>[^']+)'\s+\((?P<year>\d{4})\)");
/// let text = "Not my favorite movie: 'Citizen Kane' (1941).";
/// let caps = re.captures(text).unwrap();
/// assert_eq!(caps.name("title"), "Citizen Kane");
/// assert_eq!(caps.name("year"), "1941");
/// assert_eq!(caps.at(0), "'Citizen Kane' (1941)");
/// # }
/// ```
///
/// Here we name the capture groups, which we can access with the `name`
/// method. Note that the named capture groups are still accessible with
/// `at`.
///
/// The `0`th capture group is always unnamed, so it must always be
/// accessed with `at(0)`.
pub fn captures<'t>(&self, text: &'t str) -> Option<Captures<'t>> {
let caps = exec(self, Submatches, text);
Captures::new(self, text, caps)
}
/// Returns an iterator over all the non-overlapping capture groups matched
/// in `text`. This is operationally the same as `find_iter` (except it
/// yields information about submatches).
///
/// # Example
///
/// We can use this to find all movie titles and their release years in
/// some text, where the movie is formatted like "'Title' (xxxx)":
///
/// ```rust
/// # #![feature(phase)]
/// # extern crate regex; #[phase(plugin)] extern crate regex_macros;
/// # fn main() {
/// let re = regex!(r"'(?P<title>[^']+)'\s+\((?P<year>\d{4})\)");
/// let text = "'Citizen Kane' (1941), 'The Wizard of Oz' (1939), 'M' (1931).";
/// for caps in re.captures_iter(text) {
/// println!("Movie: {}, Released: {}", caps.name("title"), caps.name("year"));
/// }
/// // Output:
/// // Movie: Citizen Kane, Released: 1941
/// // Movie: The Wizard of Oz, Released: 1939
/// // Movie: M, Released: 1931
/// # }
/// ```
pub fn captures_iter<'r, 't>(&'r self, text: &'t str)
-> FindCaptures<'r, 't> {
FindCaptures {
re: self,
search: text,
last_match: None,
last_end: 0,
}
}
/// Returns an iterator of substrings of `text` delimited by a match
/// of the regular expression.
/// Namely, each element of the iterator corresponds to text that *isn't*
/// matched by the regular expression.
///
/// This method will *not* copy the text given.
///
/// # Example
///
/// To split a string delimited by arbitrary amounts of spaces or tabs:
///
/// ```rust
/// # #![feature(phase)]
/// # extern crate regex; #[phase(plugin)] extern crate regex_macros;
/// # fn main() {
/// let re = regex!(r"[ \t]+");
/// let fields: Vec<&str> = re.split("a b \t c\td e").collect();
/// assert_eq!(fields, vec!("a", "b", "c", "d", "e"));
/// # }
/// ```
pub fn split<'r, 't>(&'r self, text: &'t str) -> RegexSplits<'r, 't> {
RegexSplits {
finder: self.find_iter(text),
last: 0,
}
}
/// Returns an iterator of at most `limit` substrings of `text` delimited
/// by a match of the regular expression. (A `limit` of `0` will return no
/// substrings.)
/// Namely, each element of the iterator corresponds to text that *isn't*
/// matched by the regular expression.
/// The remainder of the string that is not split will be the last element
/// in the iterator.
///
/// This method will *not* copy the text given.
///
/// # Example
///
/// Get the first two words in some text:
///
/// ```rust
/// # #![feature(phase)]
/// # extern crate regex; #[phase(plugin)] extern crate regex_macros;
/// # fn main() {
/// let re = regex!(r"\W+");
/// let fields: Vec<&str> = re.splitn("Hey! How are you?", 3).collect();
/// assert_eq!(fields, vec!("Hey", "How", "are you?"));
/// # }
/// ```
pub fn splitn<'r, 't>(&'r self, text: &'t str, limit: uint)
-> RegexSplitsN<'r, 't> {
RegexSplitsN {
splits: self.split(text),
cur: 0,
limit: limit,
}
}
/// Replaces the leftmost-first match with the replacement provided.
/// The replacement can be a regular string (where `$N` and `$name` are
/// expanded to match capture groups) or a function that takes the matches'
/// `Captures` and returns the replaced string.
///
/// If no match is found, then a copy of the string is returned unchanged.
///
/// # Examples
///
/// Note that this function is polymorphic with respect to the replacement.
/// In typical usage, this can just be a normal string:
///
/// ```rust
/// # #![feature(phase)]
/// # extern crate regex; #[phase(plugin)] extern crate regex_macros;
/// # fn main() {
/// let re = regex!("[^01]+");
/// assert_eq!(re.replace("1078910", "").as_slice(), "1010");
/// # }
/// ```
///
/// But anything satisfying the `Replacer` trait will work. For example,
/// a closure of type `|&Captures| -> String` provides direct access to the
/// captures corresponding to a match. This allows one to access
/// submatches easily:
///
/// ```rust
/// # #![feature(phase)]
/// # extern crate regex; #[phase(plugin)] extern crate regex_macros;
/// # use regex::Captures; fn main() {
/// let re = regex!(r"([^,\s]+),\s+(\S+)");
/// let result = re.replace("Springsteen, Bruce", |caps: &Captures| {
/// format!("{} {}", caps.at(2), caps.at(1))
/// });
/// assert_eq!(result.as_slice(), "Bruce Springsteen");
/// # }
/// ```
///
/// But this is a bit cumbersome to use all the time. Instead, a simple
/// syntax is supported that expands `$name` into the corresponding capture
/// group. Here's the last example, but using this expansion technique
/// with named capture groups:
///
/// ```rust
/// # #![feature(phase)]
/// # extern crate regex; #[phase(plugin)] extern crate regex_macros;
/// # fn main() {
/// let re = regex!(r"(?P<last>[^,\s]+),\s+(?P<first>\S+)");
/// let result = re.replace("Springsteen, Bruce", "$first $last");
/// assert_eq!(result.as_slice(), "Bruce Springsteen");
/// # }
/// ```
///
/// Note that using `$2` instead of `$first` or `$1` instead of `$last`
/// would produce the same result. To write a literal `$` use `$$`.
///
/// Finally, sometimes you just want to replace a literal string with no
/// submatch expansion. This can be done by wrapping a string with
/// `NoExpand`:
///
/// ```rust
/// # #![feature(phase)]
/// # extern crate regex; #[phase(plugin)] extern crate regex_macros;
/// # fn main() {
/// use regex::NoExpand;
///
/// let re = regex!(r"(?P<last>[^,\s]+),\s+(\S+)");
/// let result = re.replace("Springsteen, Bruce", NoExpand("$2 $last"));
/// assert_eq!(result.as_slice(), "$2 $last");
/// # }
/// ```
pub fn replace<R: Replacer>(&self, text: &str, rep: R) -> String {
self.replacen(text, 1, rep)
}
/// Replaces all non-overlapping matches in `text` with the
/// replacement provided. This is the same as calling `replacen` with
/// `limit` set to `0`.
///
/// See the documentation for `replace` for details on how to access
/// submatches in the replacement string.
pub fn replace_all<R: Replacer>(&self, text: &str, rep: R) -> String {
self.replacen(text, 0, rep)
}
/// Replaces at most `limit` non-overlapping matches in `text` with the
/// replacement provided. If `limit` is 0, then all non-overlapping matches
/// are replaced.
///
/// See the documentation for `replace` for details on how to access
/// submatches in the replacement string.
pub fn replacen<R: Replacer>
(&self, text: &str, limit: uint, mut rep: R) -> String {
let mut new = String::with_capacity(text.len());
let mut last_match = 0u;
for (i, cap) in self.captures_iter(text).enumerate() {
// It'd be nicer to use the 'take' iterator instead, but it seemed
// awkward given that '0' => no limit.
if limit > 0 && i >= limit {
break
}
let (s, e) = cap.pos(0).unwrap(); // captures only reports matches
new.push_str(text.slice(last_match, s));
new.push_str(rep.reg_replace(&cap).as_slice());
last_match = e;
}
new.push_str(text.slice(last_match, text.len()));
return new;
}
/// Returns the original string of this regex.
pub fn as_str<'a>(&'a self) -> &'a str {
match *self {
Dynamic(ExDynamic { ref original,.. }) => original.as_slice(),
Native(ExNative { ref original,.. }) => original.as_slice(),
}
}
#[doc(hidden)]
#[experimental]
pub fn names_iter<'a>(&'a self) -> NamesIter<'a> {
match *self {
Native(ref n) => NamesIterNative(n.names.iter()),
Dynamic(ref d) => NamesIterDynamic(d.names.iter())
}
}
fn names_len(&self) -> uint {
match *self {
Native(ref n) => n.names.len(),
Dynamic(ref d) => d.names.len()
}
}
}
pub enum NamesIter<'a> {
NamesIterNative(::std::slice::Items<'a, Option<&'static str>>),
NamesIterDynamic(::std::slice::Items<'a, Option<String>>)
}
impl<'a> Iterator<Option<String>> for NamesIter<'a> {
fn next(&mut self) -> Option<Option<String>> {
match *self {
NamesIterNative(ref mut i) => i.next().map(|x| x.map(|s| s.to_string())),
NamesIterDynamic(ref mut i) => i.next().map(|x| x.as_ref().map(|s| s.to_string())),
}
}
}
/// NoExpand indicates literal string replacement.
///
/// It can be used with `replace` and `replace_all` to do a literal
/// string replacement without expanding `$name` to their corresponding
/// capture groups.
///
/// `'r` is the lifetime of the literal text.
pub struct NoExpand<'t>(pub &'t str);
/// Replacer describes types that can be used to replace matches in a string.
pub trait Replacer {
/// Returns a possibly owned string that is used to replace the match
/// corresponding the the `caps` capture group.
///
/// The `'a` lifetime refers to the lifetime of a borrowed string when
/// a new owned string isn't needed (e.g., for `NoExpand`).
fn reg_replace<'a>(&'a mut self, caps: &Captures) -> MaybeOwned<'a>;
}
impl<'t> Replacer for NoExpand<'t> {
fn reg_replace<'a>(&'a mut self, _: &Captures) -> MaybeOwned<'a> {
let NoExpand(s) = *self;
Slice(s)
}
}
impl<'t> Replacer for &'t str {
fn reg_replace<'a>(&'a mut self, caps: &Captures) -> MaybeOwned<'a> {
Owned(caps.expand(*self))
}
}
impl<'t> Replacer for |&Captures|: 't -> String {
fn reg_replace<'a>(&'a mut self, caps: &Captures) -> MaybeOwned<'a> {
Owned((*self)(caps))
}
}
/// Yields all substrings delimited by a regular expression match.
///
/// `'r` is the lifetime of the compiled expression and `'t` is the lifetime
/// of the string being split.
pub struct RegexSplits<'r, 't> {
finder: FindMatches<'r, 't>,
last: uint,
}
impl<'r, 't> Iterator<&'t str> for RegexSplits<'r, 't> {
fn next(&mut self) -> Option<&'t str> {
let text = self.finder.search;
match self.finder.next() {
None => {
if self.last >= text.len() {
None
} else {
let s = text.slice(self.last, text.len());
self.last = text.len();
Some(s)
}
}
Some((s, e)) => {
let matched = text.slice(self.last, s);
self.last = e;
Some(matched)
}
}
}
}
/// Yields at most `N` substrings delimited by a regular expression match.
///
/// The last substring will be whatever remains after splitting.
///
/// `'r` is the lifetime of the compiled expression and `'t` is the lifetime
/// of the string being split.
pub struct RegexSplitsN<'r, 't> {
splits: RegexSplits<'r, 't>,
cur: uint,
limit: uint,
}
impl<'r, 't> Iterator<&'t str> for RegexSplitsN<'r, 't> {
fn next(&mut self) -> Option<&'t str> {
let text = self.splits.finder.search;
if self.cur >= self.limit {
None
} else {
self.cur += 1;
if self.cur >= self.limit {
Some(text.slice(self.splits.last, text.len()))
} else {
self.splits.next()
}
}
}
}
/// Captures represents a group of captured strings for a single match.
///
/// The 0th capture always corresponds to the entire match. Each subsequent
/// index corresponds to the next capture group in the regex.
/// If a capture group is named, then the matched string is *also* available
/// via the `name` method. (Note that the 0th capture is always unnamed and so
/// must be accessed with the `at` method.)
///
/// Positions returned from a capture group are always byte indices.
///
/// `'t` is the lifetime of the matched text.
pub struct Captures<'t> {
text: &'t str,
locs: CaptureLocs,
named: Option<HashMap<String, uint>>,
}
impl<'t> Captures<'t> {
#[allow(experimental)]
fn new(re: &Regex, search: &'t str, locs: CaptureLocs)
-> Option<Captures<'t>> {
if!has_match(&locs) {
return None
}
let named =
if re.names_len() == 0 {
None
} else {
let mut named = HashMap::new();
for (i, name) in re.names_iter().enumerate() {
match name {
None => {},
Some(name) => {
named.insert(name, i);
}
}
}
Some(named)
};
Some(Captures {
text: search,
locs: locs,
named: named,
})
}
/// Returns the start and end positions of the Nth capture group.
/// Returns `None` if `i` is not a valid capture group or if the capture
/// group did not match anything.
/// The positions returned are *always* byte indices with respect to the
/// original string matched.
pub fn pos(&self, i: uint) -> Option<(uint, uint)> {
let (s, e) = (i * 2, i * 2 + 1);
if e >= self.locs.len() || self.locs[s].is_none() {
// VM guarantees that each pair of locations are both Some or None.
return None
}
Some((self.locs[s].unwrap(), self.locs[e].unwrap()))
}
/// Returns the matched string for the capture group `i`.
/// If `i` isn't a valid capture group or didn't match anything, then the
/// empty string is returned.
pub fn at(&self, i: uint) -> &'t str {
match self.pos(i) {
None => "",
Some((s, e)) => {
self.text.slice(s, e)
}
}
}
/// Returns the matched string for the capture group named `name`.
/// If `name` isn't a valid capture group or didn't match anything, then
|
/// assert_eq!(caps.at(1), "Citizen Kane");
|
random_line_split
|
mod.rs
|
// Copyright 2020 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Tools for messages authenticated with the Ed25519 public-key crypto system.
//! These messages are used by the P2P networking and for transaction authentication by external
//! clients.
//!
//! Every message passes through three phases:
//!
//! 1. `Vec<u8>`: raw bytes as received from the network
//! 2. `SignedMessage`: integrity and the signature of the message has been verified
//! 3. `impl IntoMessage`: the message has been completely parsed
//!
//! Graphical representation of the message processing flow:
//!
//! ```text
//! +---------+ +---------------+ +------------------+
//! | Vec<u8> |--(verify)-->| SignedMessage |--(deserialize)-->| impl IntoMessage |-->(handle)
//! +---------+ | +---------------+ | +------------------+
//! | |
//! V V
//! (drop) (drop)
//!
//! ```
//!
//! # Examples
//!
//! A new signed message can be created as follows.
//!
//! ```
//! # use chrono::Utc;
//! # use exonum::{
//! # crypto::{hash, Hash, KeyPair},
//! # helpers::{Height, Round, ValidatorId},
//! # messages::{Precommit, Verified},
//! # };
//! # fn send<T>(_: T) {}
//! let keypair = KeyPair::random();
//! // For example, let's create a `Precommit` message.
//! let payload = Precommit::new(
//! ValidatorId(0),
//! Height(15),
//! Round::first(),
//! hash(b"propose_hash"),
//! hash(b"block_hash"),
//! Utc::now(),
//! );
//! // Sign the message with some keypair to get a trusted `Precommit` message.
//! let signed_payload = Verified::from_value(payload, keypair.public_key(), keypair.secret_key());
//! // Further, convert the trusted message into a raw signed message and send
//! // it through the network.
//! let raw_signed_message = signed_payload.into_raw();
//! send(raw_signed_message);
//! ```
//!
//! A signed message can be verified as follows:
//!
//! ```
//! # use assert_matches::assert_matches;
//! # use chrono::Utc;
//! # use exonum::{
//! # crypto::{hash, Hash, KeyPair},
//! # helpers::{Height, Round, ValidatorId},
//! # messages::{CoreMessage, Precommit, Verified, SignedMessage},
//! # };
//! # fn get_signed_message() -> SignedMessage {
//! # let keypair = KeyPair::random();
//! # let payload = Precommit::new(
//! # ValidatorId(0),
//! # Height(15),
//! # Round::first(),
//! # hash(b"propose_hash"),
//! # hash(b"block_hash"),
//! # Utc::now(),
//! # );
//! # Verified::from_value(payload, keypair.public_key(), keypair.secret_key()).into_raw()
//! # }
//! // Assume you have some signed message.
//! let raw: SignedMessage = get_signed_message();
//! // You know that this is a type of `CoreMessage`, so you can
//! // verify its signature and convert it into `CoreMessage`.
//! let verified = raw.into_verified::<CoreMessage>().expect("verification failed");
//! // Further, check whether it is a `Precommit` message.
//! assert_matches!(
//! verified.payload(),
//! CoreMessage::Precommit(ref precommit) if precommit.epoch == Height(15)
//! );
//! ```
pub use self::{
signed::{IntoMessage, Verified},
types::*,
};
use crate::crypto::{PUBLIC_KEY_LENGTH, SIGNATURE_LENGTH};
mod signed;
mod types;
/// Lower bound on the size of the correct `SignedMessage`.
/// This is the size of message fields + Protobuf overhead.
#[doc(hidden)]
pub const SIGNED_MESSAGE_MIN_SIZE: usize = PUBLIC_KEY_LENGTH + SIGNATURE_LENGTH + 8;
#[cfg(test)]
mod tests {
use chrono::Utc;
use exonum_crypto::{self as crypto, KeyPair};
use exonum_merkledb::BinaryValue;
use exonum_proto::ProtobufConvert;
use protobuf::Message;
use super::{Precommit, SignedMessage, Verified, SIGNED_MESSAGE_MIN_SIZE};
use crate::{
helpers::{Height, Round, ValidatorId},
proto::schema::messages as proto,
};
#[test]
fn test_signed_message_min_size()
|
#[test]
fn test_message_roundtrip() {
let keys = KeyPair::random();
let ts = Utc::now();
let msg = Verified::from_value(
Precommit::new(
ValidatorId(123),
Height(15),
Round(25),
crypto::hash(&[1, 2, 3]),
crypto::hash(&[3, 2, 1]),
ts,
),
keys.public_key(),
keys.secret_key(),
);
let bytes = msg.to_bytes();
let message =
SignedMessage::from_bytes(bytes.into()).expect("Cannot deserialize signed message");
let msg_roundtrip = message
.into_verified::<Precommit>()
.expect("Failed to check precommit");
assert_eq!(msg, msg_roundtrip);
}
#[test]
fn test_signed_message_unusual_protobuf() {
let keys = KeyPair::random();
let mut ex_msg = proto::CoreMessage::new();
let precommit_msg = Precommit::new(
ValidatorId(123),
Height(15),
Round(25),
crypto::hash(&[1, 2, 3]),
crypto::hash(&[3, 2, 1]),
Utc::now(),
);
ex_msg.set_precommit(precommit_msg.to_pb());
let mut payload = ex_msg.write_to_bytes().unwrap();
// Duplicate pb serialization to create unusual but correct protobuf message.
payload.append(&mut payload.clone());
let signed = SignedMessage::new(payload, keys.public_key(), keys.secret_key());
let bytes = signed.into_bytes();
let message =
SignedMessage::from_bytes(bytes.into()).expect("Cannot deserialize signed message");
let deserialized_precommit = message
.into_verified::<Precommit>()
.expect("Failed to check precommit");
assert_eq!(precommit_msg, *deserialized_precommit.payload());
}
#[test]
fn test_precommit_serde_correct() {
let keys = KeyPair::random();
let ts = Utc::now();
let precommit = Verified::from_value(
Precommit::new(
ValidatorId(123),
Height(15),
Round(25),
crypto::hash(&[1, 2, 3]),
crypto::hash(&[3, 2, 1]),
ts,
),
keys.public_key(),
keys.secret_key(),
);
let precommit_json = serde_json::to_string(&precommit).unwrap();
let precommit2: Verified<Precommit> = serde_json::from_str(&precommit_json).unwrap();
assert_eq!(precommit2, precommit);
}
}
|
{
let keys = KeyPair::random();
let msg = SignedMessage::new(vec![], keys.public_key(), keys.secret_key());
assert_eq!(SIGNED_MESSAGE_MIN_SIZE, msg.into_bytes().len());
}
|
identifier_body
|
mod.rs
|
// Copyright 2020 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Tools for messages authenticated with the Ed25519 public-key crypto system.
//! These messages are used by the P2P networking and for transaction authentication by external
//! clients.
//!
//! Every message passes through three phases:
//!
//! 1. `Vec<u8>`: raw bytes as received from the network
//! 2. `SignedMessage`: integrity and the signature of the message has been verified
//! 3. `impl IntoMessage`: the message has been completely parsed
//!
//! Graphical representation of the message processing flow:
//!
//! ```text
//! +---------+ +---------------+ +------------------+
//! | Vec<u8> |--(verify)-->| SignedMessage |--(deserialize)-->| impl IntoMessage |-->(handle)
//! +---------+ | +---------------+ | +------------------+
//! | |
//! V V
//! (drop) (drop)
//!
//! ```
//!
//! # Examples
//!
//! A new signed message can be created as follows.
//!
//! ```
//! # use chrono::Utc;
//! # use exonum::{
//! # crypto::{hash, Hash, KeyPair},
//! # helpers::{Height, Round, ValidatorId},
//! # messages::{Precommit, Verified},
//! # };
//! # fn send<T>(_: T) {}
//! let keypair = KeyPair::random();
//! // For example, let's create a `Precommit` message.
//! let payload = Precommit::new(
//! ValidatorId(0),
//! Height(15),
//! Round::first(),
//! hash(b"propose_hash"),
//! hash(b"block_hash"),
//! Utc::now(),
//! );
//! // Sign the message with some keypair to get a trusted `Precommit` message.
//! let signed_payload = Verified::from_value(payload, keypair.public_key(), keypair.secret_key());
//! // Further, convert the trusted message into a raw signed message and send
//! // it through the network.
//! let raw_signed_message = signed_payload.into_raw();
//! send(raw_signed_message);
//! ```
//!
//! A signed message can be verified as follows:
//!
//! ```
//! # use assert_matches::assert_matches;
//! # use chrono::Utc;
//! # use exonum::{
//! # crypto::{hash, Hash, KeyPair},
//! # helpers::{Height, Round, ValidatorId},
//! # messages::{CoreMessage, Precommit, Verified, SignedMessage},
//! # };
//! # fn get_signed_message() -> SignedMessage {
//! # let keypair = KeyPair::random();
//! # let payload = Precommit::new(
//! # ValidatorId(0),
//! # Height(15),
//! # Round::first(),
//! # hash(b"propose_hash"),
//! # hash(b"block_hash"),
//! # Utc::now(),
//! # );
//! # Verified::from_value(payload, keypair.public_key(), keypair.secret_key()).into_raw()
//! # }
//! // Assume you have some signed message.
//! let raw: SignedMessage = get_signed_message();
//! // You know that this is a type of `CoreMessage`, so you can
//! // verify its signature and convert it into `CoreMessage`.
//! let verified = raw.into_verified::<CoreMessage>().expect("verification failed");
//! // Further, check whether it is a `Precommit` message.
//! assert_matches!(
//! verified.payload(),
//! CoreMessage::Precommit(ref precommit) if precommit.epoch == Height(15)
//! );
//! ```
pub use self::{
signed::{IntoMessage, Verified},
types::*,
};
use crate::crypto::{PUBLIC_KEY_LENGTH, SIGNATURE_LENGTH};
mod signed;
mod types;
/// Lower bound on the size of the correct `SignedMessage`.
/// This is the size of message fields + Protobuf overhead.
#[doc(hidden)]
pub const SIGNED_MESSAGE_MIN_SIZE: usize = PUBLIC_KEY_LENGTH + SIGNATURE_LENGTH + 8;
#[cfg(test)]
mod tests {
use chrono::Utc;
use exonum_crypto::{self as crypto, KeyPair};
use exonum_merkledb::BinaryValue;
use exonum_proto::ProtobufConvert;
use protobuf::Message;
use super::{Precommit, SignedMessage, Verified, SIGNED_MESSAGE_MIN_SIZE};
use crate::{
helpers::{Height, Round, ValidatorId},
proto::schema::messages as proto,
};
#[test]
fn test_signed_message_min_size() {
let keys = KeyPair::random();
let msg = SignedMessage::new(vec![], keys.public_key(), keys.secret_key());
assert_eq!(SIGNED_MESSAGE_MIN_SIZE, msg.into_bytes().len());
}
#[test]
fn
|
() {
let keys = KeyPair::random();
let ts = Utc::now();
let msg = Verified::from_value(
Precommit::new(
ValidatorId(123),
Height(15),
Round(25),
crypto::hash(&[1, 2, 3]),
crypto::hash(&[3, 2, 1]),
ts,
),
keys.public_key(),
keys.secret_key(),
);
let bytes = msg.to_bytes();
let message =
SignedMessage::from_bytes(bytes.into()).expect("Cannot deserialize signed message");
let msg_roundtrip = message
.into_verified::<Precommit>()
.expect("Failed to check precommit");
assert_eq!(msg, msg_roundtrip);
}
#[test]
fn test_signed_message_unusual_protobuf() {
let keys = KeyPair::random();
let mut ex_msg = proto::CoreMessage::new();
let precommit_msg = Precommit::new(
ValidatorId(123),
Height(15),
Round(25),
crypto::hash(&[1, 2, 3]),
crypto::hash(&[3, 2, 1]),
Utc::now(),
);
ex_msg.set_precommit(precommit_msg.to_pb());
let mut payload = ex_msg.write_to_bytes().unwrap();
// Duplicate pb serialization to create unusual but correct protobuf message.
payload.append(&mut payload.clone());
let signed = SignedMessage::new(payload, keys.public_key(), keys.secret_key());
let bytes = signed.into_bytes();
let message =
SignedMessage::from_bytes(bytes.into()).expect("Cannot deserialize signed message");
let deserialized_precommit = message
.into_verified::<Precommit>()
.expect("Failed to check precommit");
assert_eq!(precommit_msg, *deserialized_precommit.payload());
}
#[test]
fn test_precommit_serde_correct() {
let keys = KeyPair::random();
let ts = Utc::now();
let precommit = Verified::from_value(
Precommit::new(
ValidatorId(123),
Height(15),
Round(25),
crypto::hash(&[1, 2, 3]),
crypto::hash(&[3, 2, 1]),
ts,
),
keys.public_key(),
keys.secret_key(),
);
let precommit_json = serde_json::to_string(&precommit).unwrap();
let precommit2: Verified<Precommit> = serde_json::from_str(&precommit_json).unwrap();
assert_eq!(precommit2, precommit);
}
}
|
test_message_roundtrip
|
identifier_name
|
mod.rs
|
// Copyright 2020 The Exonum Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
|
//!
//! 1. `Vec<u8>`: raw bytes as received from the network
//! 2. `SignedMessage`: integrity and the signature of the message has been verified
//! 3. `impl IntoMessage`: the message has been completely parsed
//!
//! Graphical representation of the message processing flow:
//!
//! ```text
//! +---------+ +---------------+ +------------------+
//! | Vec<u8> |--(verify)-->| SignedMessage |--(deserialize)-->| impl IntoMessage |-->(handle)
//! +---------+ | +---------------+ | +------------------+
//! | |
//! V V
//! (drop) (drop)
//!
//! ```
//!
//! # Examples
//!
//! A new signed message can be created as follows.
//!
//! ```
//! # use chrono::Utc;
//! # use exonum::{
//! # crypto::{hash, Hash, KeyPair},
//! # helpers::{Height, Round, ValidatorId},
//! # messages::{Precommit, Verified},
//! # };
//! # fn send<T>(_: T) {}
//! let keypair = KeyPair::random();
//! // For example, let's create a `Precommit` message.
//! let payload = Precommit::new(
//! ValidatorId(0),
//! Height(15),
//! Round::first(),
//! hash(b"propose_hash"),
//! hash(b"block_hash"),
//! Utc::now(),
//! );
//! // Sign the message with some keypair to get a trusted `Precommit` message.
//! let signed_payload = Verified::from_value(payload, keypair.public_key(), keypair.secret_key());
//! // Further, convert the trusted message into a raw signed message and send
//! // it through the network.
//! let raw_signed_message = signed_payload.into_raw();
//! send(raw_signed_message);
//! ```
//!
//! A signed message can be verified as follows:
//!
//! ```
//! # use assert_matches::assert_matches;
//! # use chrono::Utc;
//! # use exonum::{
//! # crypto::{hash, Hash, KeyPair},
//! # helpers::{Height, Round, ValidatorId},
//! # messages::{CoreMessage, Precommit, Verified, SignedMessage},
//! # };
//! # fn get_signed_message() -> SignedMessage {
//! # let keypair = KeyPair::random();
//! # let payload = Precommit::new(
//! # ValidatorId(0),
//! # Height(15),
//! # Round::first(),
//! # hash(b"propose_hash"),
//! # hash(b"block_hash"),
//! # Utc::now(),
//! # );
//! # Verified::from_value(payload, keypair.public_key(), keypair.secret_key()).into_raw()
//! # }
//! // Assume you have some signed message.
//! let raw: SignedMessage = get_signed_message();
//! // You know that this is a type of `CoreMessage`, so you can
//! // verify its signature and convert it into `CoreMessage`.
//! let verified = raw.into_verified::<CoreMessage>().expect("verification failed");
//! // Further, check whether it is a `Precommit` message.
//! assert_matches!(
//! verified.payload(),
//! CoreMessage::Precommit(ref precommit) if precommit.epoch == Height(15)
//! );
//! ```
pub use self::{
signed::{IntoMessage, Verified},
types::*,
};
use crate::crypto::{PUBLIC_KEY_LENGTH, SIGNATURE_LENGTH};
mod signed;
mod types;
/// Lower bound on the size of the correct `SignedMessage`.
/// This is the size of message fields + Protobuf overhead.
#[doc(hidden)]
pub const SIGNED_MESSAGE_MIN_SIZE: usize = PUBLIC_KEY_LENGTH + SIGNATURE_LENGTH + 8;
#[cfg(test)]
mod tests {
use chrono::Utc;
use exonum_crypto::{self as crypto, KeyPair};
use exonum_merkledb::BinaryValue;
use exonum_proto::ProtobufConvert;
use protobuf::Message;
use super::{Precommit, SignedMessage, Verified, SIGNED_MESSAGE_MIN_SIZE};
use crate::{
helpers::{Height, Round, ValidatorId},
proto::schema::messages as proto,
};
#[test]
fn test_signed_message_min_size() {
let keys = KeyPair::random();
let msg = SignedMessage::new(vec![], keys.public_key(), keys.secret_key());
assert_eq!(SIGNED_MESSAGE_MIN_SIZE, msg.into_bytes().len());
}
#[test]
fn test_message_roundtrip() {
let keys = KeyPair::random();
let ts = Utc::now();
let msg = Verified::from_value(
Precommit::new(
ValidatorId(123),
Height(15),
Round(25),
crypto::hash(&[1, 2, 3]),
crypto::hash(&[3, 2, 1]),
ts,
),
keys.public_key(),
keys.secret_key(),
);
let bytes = msg.to_bytes();
let message =
SignedMessage::from_bytes(bytes.into()).expect("Cannot deserialize signed message");
let msg_roundtrip = message
.into_verified::<Precommit>()
.expect("Failed to check precommit");
assert_eq!(msg, msg_roundtrip);
}
#[test]
fn test_signed_message_unusual_protobuf() {
let keys = KeyPair::random();
let mut ex_msg = proto::CoreMessage::new();
let precommit_msg = Precommit::new(
ValidatorId(123),
Height(15),
Round(25),
crypto::hash(&[1, 2, 3]),
crypto::hash(&[3, 2, 1]),
Utc::now(),
);
ex_msg.set_precommit(precommit_msg.to_pb());
let mut payload = ex_msg.write_to_bytes().unwrap();
// Duplicate pb serialization to create unusual but correct protobuf message.
payload.append(&mut payload.clone());
let signed = SignedMessage::new(payload, keys.public_key(), keys.secret_key());
let bytes = signed.into_bytes();
let message =
SignedMessage::from_bytes(bytes.into()).expect("Cannot deserialize signed message");
let deserialized_precommit = message
.into_verified::<Precommit>()
.expect("Failed to check precommit");
assert_eq!(precommit_msg, *deserialized_precommit.payload());
}
#[test]
fn test_precommit_serde_correct() {
let keys = KeyPair::random();
let ts = Utc::now();
let precommit = Verified::from_value(
Precommit::new(
ValidatorId(123),
Height(15),
Round(25),
crypto::hash(&[1, 2, 3]),
crypto::hash(&[3, 2, 1]),
ts,
),
keys.public_key(),
keys.secret_key(),
);
let precommit_json = serde_json::to_string(&precommit).unwrap();
let precommit2: Verified<Precommit> = serde_json::from_str(&precommit_json).unwrap();
assert_eq!(precommit2, precommit);
}
}
|
//! Tools for messages authenticated with the Ed25519 public-key crypto system.
//! These messages are used by the P2P networking and for transaction authentication by external
//! clients.
//!
//! Every message passes through three phases:
|
random_line_split
|
blockdev.rs
|
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
// Code to handle a single block device.
use std::fs::OpenOptions;
use std::path::PathBuf;
use chrono::{DateTime, TimeZone, Utc};
use devicemapper::{Device, Sectors};
use stratis::StratisResult;
use super::super::super::engine::BlockDev;
use super::super::super::event::{get_engine_listener_list, EngineEvent};
use super::super::super::types::{BlockDevState, DevUuid, MaybeDbusPath, PoolUuid};
use super::super::serde_structs::{BaseBlockDevSave, Recordable};
use super::metadata::BDA;
use super::range_alloc::RangeAllocator;
#[derive(Debug)]
pub struct StratBlockDev {
dev: Device,
pub(super) devnode: PathBuf,
bda: BDA,
used: RangeAllocator,
user_info: Option<String>,
hardware_info: Option<String>,
dbus_path: MaybeDbusPath,
}
impl StratBlockDev {
/// Make a new BlockDev from the parameters.
/// Allocate space for the Stratis metadata on the device.
/// - dev: the device, identified by number
/// - devnode: the device node
/// - bda: the device's BDA
/// - other_segments: segments claimed for non-Stratis metadata use
/// - user_info: user settable identifying information
/// - hardware_info: identifying information in the hardware
/// Returns an error if it is impossible to allocate all segments on the
/// device.
/// NOTE: It is possible that the actual device size is greater than
/// the recorded device size. In that case, the additional space available
/// on the device is simply invisible to the blockdev. Consequently, it
/// is invisible to the engine, and is not part of the total size value
/// reported on the D-Bus.
pub fn new(
dev: Device,
devnode: PathBuf,
bda: BDA,
upper_segments: &[(Sectors, Sectors)],
user_info: Option<String>,
hardware_info: Option<String>,
) -> StratisResult<StratBlockDev> {
let mut segments = vec![(Sectors(0), bda.size())];
segments.extend(upper_segments);
let allocator = RangeAllocator::new(bda.dev_size(), &segments)?;
Ok(StratBlockDev {
dev,
devnode,
bda,
used: allocator,
user_info,
hardware_info,
dbus_path: MaybeDbusPath(None),
})
}
/// Returns the blockdev's Device
pub fn device(&self) -> &Device {
&self.dev
}
pub fn wipe_metadata(&self) -> StratisResult<()> {
let mut f = OpenOptions::new().write(true).open(&self.devnode)?;
BDA::wipe(&mut f)
}
pub fn save_state(&mut self, time: &DateTime<Utc>, metadata: &[u8]) -> StratisResult<()> {
let mut f = OpenOptions::new().write(true).open(&self.devnode)?;
self.bda.save_state(time, metadata, &mut f)
}
/// The device's UUID.
pub fn uuid(&self) -> DevUuid {
self.bda.dev_uuid()
}
/// The device's pool's UUID.
#[allow(dead_code)]
pub fn pool_uuid(&self) -> PoolUuid {
self.bda.pool_uuid()
}
/// Last time metadata was written to this device.
#[allow(dead_code)]
pub fn last_update_time(&self) -> Option<&DateTime<Utc>> {
self.bda.last_update_time()
}
/// Find some sector ranges that could be allocated. If more
/// sectors are needed than are available, return partial results.
/// If all sectors are desired, use available() method to get all.
pub fn request_space(&mut self, size: Sectors) -> (Sectors, Vec<(Sectors, Sectors)>) {
let prev_state = self.state();
let result = self.used.request(size);
if result.0 > Sectors(0) && prev_state!= BlockDevState::InUse {
get_engine_listener_list().notify(&EngineEvent::BlockdevStateChanged {
dbus_path: self.get_dbus_path(),
state: BlockDevState::InUse,
});
}
result
}
// ALL SIZE METHODS (except size(), which is in BlockDev impl.)
/// The number of Sectors on this device used by Stratis for metadata
pub fn metadata_size(&self) -> Sectors {
self.bda.size()
}
/// The number of Sectors on this device not allocated for any purpose.
/// self.size() - self.metadata_size() >= self.available()
pub fn available(&self) -> Sectors {
self.used.available()
}
/// The maximum size of variable length metadata that can be accommodated.
/// self.max_metadata_size() < self.metadata_size()
pub fn max_metadata_size(&self) -> Sectors {
self.bda.max_data_size()
}
/// Set the user info on this blockdev.
/// The user_info may be None, which unsets user info.
/// Returns true if the user info was changed, otherwise false.
pub fn set_user_info(&mut self, user_info: Option<&str>) -> bool {
set_blockdev_user_info!(self; user_info)
}
}
impl BlockDev for StratBlockDev {
fn devnode(&self) -> PathBuf {
self.devnode.clone()
}
fn user_info(&self) -> Option<&str> {
self.user_info.as_ref().map(|x| &**x)
}
fn hardware_info(&self) -> Option<&str> {
self.hardware_info.as_ref().map(|x| &**x)
}
fn initialization_time(&self) -> DateTime<Utc> {
// This cast will result in an incorrect, negative value starting in
// the year 292,277,026,596. :-)
Utc.timestamp(self.bda.initialization_time() as i64, 0)
}
fn
|
(&self) -> Sectors {
let size = self.used.size();
assert_eq!(self.bda.dev_size(), size);
size
}
fn state(&self) -> BlockDevState {
// TODO: Implement support for other BlockDevStates
if self.used.used() > self.bda.size() {
BlockDevState::InUse
} else {
BlockDevState::NotInUse
}
}
fn set_dbus_path(&mut self, path: MaybeDbusPath) {
self.dbus_path = path
}
fn get_dbus_path(&self) -> &MaybeDbusPath {
&self.dbus_path
}
}
impl Recordable<BaseBlockDevSave> for StratBlockDev {
fn record(&self) -> BaseBlockDevSave {
BaseBlockDevSave {
uuid: self.uuid(),
user_info: self.user_info.clone(),
hardware_info: self.hardware_info.clone(),
}
}
}
|
size
|
identifier_name
|
blockdev.rs
|
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
// Code to handle a single block device.
use std::fs::OpenOptions;
use std::path::PathBuf;
use chrono::{DateTime, TimeZone, Utc};
use devicemapper::{Device, Sectors};
use stratis::StratisResult;
use super::super::super::engine::BlockDev;
use super::super::super::event::{get_engine_listener_list, EngineEvent};
use super::super::super::types::{BlockDevState, DevUuid, MaybeDbusPath, PoolUuid};
use super::super::serde_structs::{BaseBlockDevSave, Recordable};
use super::metadata::BDA;
use super::range_alloc::RangeAllocator;
#[derive(Debug)]
pub struct StratBlockDev {
dev: Device,
pub(super) devnode: PathBuf,
bda: BDA,
used: RangeAllocator,
user_info: Option<String>,
hardware_info: Option<String>,
dbus_path: MaybeDbusPath,
}
impl StratBlockDev {
/// Make a new BlockDev from the parameters.
/// Allocate space for the Stratis metadata on the device.
/// - dev: the device, identified by number
/// - devnode: the device node
/// - bda: the device's BDA
/// - other_segments: segments claimed for non-Stratis metadata use
/// - user_info: user settable identifying information
/// - hardware_info: identifying information in the hardware
/// Returns an error if it is impossible to allocate all segments on the
/// device.
/// NOTE: It is possible that the actual device size is greater than
/// the recorded device size. In that case, the additional space available
/// on the device is simply invisible to the blockdev. Consequently, it
/// is invisible to the engine, and is not part of the total size value
/// reported on the D-Bus.
pub fn new(
dev: Device,
devnode: PathBuf,
bda: BDA,
upper_segments: &[(Sectors, Sectors)],
user_info: Option<String>,
hardware_info: Option<String>,
) -> StratisResult<StratBlockDev> {
let mut segments = vec![(Sectors(0), bda.size())];
segments.extend(upper_segments);
let allocator = RangeAllocator::new(bda.dev_size(), &segments)?;
Ok(StratBlockDev {
dev,
devnode,
bda,
used: allocator,
user_info,
hardware_info,
dbus_path: MaybeDbusPath(None),
})
}
/// Returns the blockdev's Device
pub fn device(&self) -> &Device {
&self.dev
}
pub fn wipe_metadata(&self) -> StratisResult<()> {
let mut f = OpenOptions::new().write(true).open(&self.devnode)?;
BDA::wipe(&mut f)
}
pub fn save_state(&mut self, time: &DateTime<Utc>, metadata: &[u8]) -> StratisResult<()> {
let mut f = OpenOptions::new().write(true).open(&self.devnode)?;
self.bda.save_state(time, metadata, &mut f)
}
/// The device's UUID.
pub fn uuid(&self) -> DevUuid {
self.bda.dev_uuid()
}
/// The device's pool's UUID.
#[allow(dead_code)]
pub fn pool_uuid(&self) -> PoolUuid {
self.bda.pool_uuid()
}
/// Last time metadata was written to this device.
#[allow(dead_code)]
pub fn last_update_time(&self) -> Option<&DateTime<Utc>> {
self.bda.last_update_time()
}
/// Find some sector ranges that could be allocated. If more
/// sectors are needed than are available, return partial results.
/// If all sectors are desired, use available() method to get all.
pub fn request_space(&mut self, size: Sectors) -> (Sectors, Vec<(Sectors, Sectors)>) {
let prev_state = self.state();
let result = self.used.request(size);
if result.0 > Sectors(0) && prev_state!= BlockDevState::InUse {
get_engine_listener_list().notify(&EngineEvent::BlockdevStateChanged {
dbus_path: self.get_dbus_path(),
state: BlockDevState::InUse,
});
}
result
}
// ALL SIZE METHODS (except size(), which is in BlockDev impl.)
/// The number of Sectors on this device used by Stratis for metadata
pub fn metadata_size(&self) -> Sectors {
self.bda.size()
}
/// The number of Sectors on this device not allocated for any purpose.
/// self.size() - self.metadata_size() >= self.available()
pub fn available(&self) -> Sectors {
self.used.available()
}
/// The maximum size of variable length metadata that can be accommodated.
/// self.max_metadata_size() < self.metadata_size()
pub fn max_metadata_size(&self) -> Sectors
|
/// Set the user info on this blockdev.
/// The user_info may be None, which unsets user info.
/// Returns true if the user info was changed, otherwise false.
pub fn set_user_info(&mut self, user_info: Option<&str>) -> bool {
set_blockdev_user_info!(self; user_info)
}
}
impl BlockDev for StratBlockDev {
fn devnode(&self) -> PathBuf {
self.devnode.clone()
}
fn user_info(&self) -> Option<&str> {
self.user_info.as_ref().map(|x| &**x)
}
fn hardware_info(&self) -> Option<&str> {
self.hardware_info.as_ref().map(|x| &**x)
}
fn initialization_time(&self) -> DateTime<Utc> {
// This cast will result in an incorrect, negative value starting in
// the year 292,277,026,596. :-)
Utc.timestamp(self.bda.initialization_time() as i64, 0)
}
fn size(&self) -> Sectors {
let size = self.used.size();
assert_eq!(self.bda.dev_size(), size);
size
}
fn state(&self) -> BlockDevState {
// TODO: Implement support for other BlockDevStates
if self.used.used() > self.bda.size() {
BlockDevState::InUse
} else {
BlockDevState::NotInUse
}
}
fn set_dbus_path(&mut self, path: MaybeDbusPath) {
self.dbus_path = path
}
fn get_dbus_path(&self) -> &MaybeDbusPath {
&self.dbus_path
}
}
impl Recordable<BaseBlockDevSave> for StratBlockDev {
fn record(&self) -> BaseBlockDevSave {
BaseBlockDevSave {
uuid: self.uuid(),
user_info: self.user_info.clone(),
hardware_info: self.hardware_info.clone(),
}
}
}
|
{
self.bda.max_data_size()
}
|
identifier_body
|
blockdev.rs
|
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
// Code to handle a single block device.
use std::fs::OpenOptions;
use std::path::PathBuf;
use chrono::{DateTime, TimeZone, Utc};
use devicemapper::{Device, Sectors};
use stratis::StratisResult;
use super::super::super::engine::BlockDev;
use super::super::super::event::{get_engine_listener_list, EngineEvent};
use super::super::super::types::{BlockDevState, DevUuid, MaybeDbusPath, PoolUuid};
use super::super::serde_structs::{BaseBlockDevSave, Recordable};
use super::metadata::BDA;
use super::range_alloc::RangeAllocator;
#[derive(Debug)]
pub struct StratBlockDev {
dev: Device,
pub(super) devnode: PathBuf,
bda: BDA,
used: RangeAllocator,
user_info: Option<String>,
hardware_info: Option<String>,
dbus_path: MaybeDbusPath,
}
impl StratBlockDev {
/// Make a new BlockDev from the parameters.
/// Allocate space for the Stratis metadata on the device.
/// - dev: the device, identified by number
/// - devnode: the device node
/// - bda: the device's BDA
/// - other_segments: segments claimed for non-Stratis metadata use
/// - user_info: user settable identifying information
/// - hardware_info: identifying information in the hardware
/// Returns an error if it is impossible to allocate all segments on the
/// device.
/// NOTE: It is possible that the actual device size is greater than
/// the recorded device size. In that case, the additional space available
/// on the device is simply invisible to the blockdev. Consequently, it
/// is invisible to the engine, and is not part of the total size value
/// reported on the D-Bus.
pub fn new(
dev: Device,
devnode: PathBuf,
bda: BDA,
upper_segments: &[(Sectors, Sectors)],
user_info: Option<String>,
hardware_info: Option<String>,
) -> StratisResult<StratBlockDev> {
let mut segments = vec![(Sectors(0), bda.size())];
segments.extend(upper_segments);
let allocator = RangeAllocator::new(bda.dev_size(), &segments)?;
Ok(StratBlockDev {
dev,
devnode,
bda,
used: allocator,
user_info,
hardware_info,
dbus_path: MaybeDbusPath(None),
})
}
/// Returns the blockdev's Device
pub fn device(&self) -> &Device {
&self.dev
}
pub fn wipe_metadata(&self) -> StratisResult<()> {
let mut f = OpenOptions::new().write(true).open(&self.devnode)?;
BDA::wipe(&mut f)
}
pub fn save_state(&mut self, time: &DateTime<Utc>, metadata: &[u8]) -> StratisResult<()> {
let mut f = OpenOptions::new().write(true).open(&self.devnode)?;
self.bda.save_state(time, metadata, &mut f)
}
/// The device's UUID.
pub fn uuid(&self) -> DevUuid {
self.bda.dev_uuid()
}
/// The device's pool's UUID.
#[allow(dead_code)]
pub fn pool_uuid(&self) -> PoolUuid {
self.bda.pool_uuid()
}
/// Last time metadata was written to this device.
#[allow(dead_code)]
pub fn last_update_time(&self) -> Option<&DateTime<Utc>> {
self.bda.last_update_time()
}
/// Find some sector ranges that could be allocated. If more
/// sectors are needed than are available, return partial results.
/// If all sectors are desired, use available() method to get all.
pub fn request_space(&mut self, size: Sectors) -> (Sectors, Vec<(Sectors, Sectors)>) {
let prev_state = self.state();
let result = self.used.request(size);
if result.0 > Sectors(0) && prev_state!= BlockDevState::InUse {
get_engine_listener_list().notify(&EngineEvent::BlockdevStateChanged {
dbus_path: self.get_dbus_path(),
state: BlockDevState::InUse,
});
}
result
}
// ALL SIZE METHODS (except size(), which is in BlockDev impl.)
/// The number of Sectors on this device used by Stratis for metadata
pub fn metadata_size(&self) -> Sectors {
self.bda.size()
}
/// The number of Sectors on this device not allocated for any purpose.
/// self.size() - self.metadata_size() >= self.available()
pub fn available(&self) -> Sectors {
self.used.available()
}
/// The maximum size of variable length metadata that can be accommodated.
/// self.max_metadata_size() < self.metadata_size()
pub fn max_metadata_size(&self) -> Sectors {
self.bda.max_data_size()
}
/// Set the user info on this blockdev.
/// The user_info may be None, which unsets user info.
/// Returns true if the user info was changed, otherwise false.
pub fn set_user_info(&mut self, user_info: Option<&str>) -> bool {
set_blockdev_user_info!(self; user_info)
}
}
impl BlockDev for StratBlockDev {
fn devnode(&self) -> PathBuf {
self.devnode.clone()
}
fn user_info(&self) -> Option<&str> {
self.user_info.as_ref().map(|x| &**x)
}
fn hardware_info(&self) -> Option<&str> {
self.hardware_info.as_ref().map(|x| &**x)
}
fn initialization_time(&self) -> DateTime<Utc> {
// This cast will result in an incorrect, negative value starting in
// the year 292,277,026,596. :-)
Utc.timestamp(self.bda.initialization_time() as i64, 0)
}
fn size(&self) -> Sectors {
let size = self.used.size();
assert_eq!(self.bda.dev_size(), size);
size
}
fn state(&self) -> BlockDevState {
// TODO: Implement support for other BlockDevStates
if self.used.used() > self.bda.size() {
BlockDevState::InUse
} else {
BlockDevState::NotInUse
}
}
fn set_dbus_path(&mut self, path: MaybeDbusPath) {
self.dbus_path = path
}
fn get_dbus_path(&self) -> &MaybeDbusPath {
&self.dbus_path
}
}
impl Recordable<BaseBlockDevSave> for StratBlockDev {
fn record(&self) -> BaseBlockDevSave {
BaseBlockDevSave {
uuid: self.uuid(),
user_info: self.user_info.clone(),
hardware_info: self.hardware_info.clone(),
}
|
}
}
|
random_line_split
|
|
blockdev.rs
|
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
// Code to handle a single block device.
use std::fs::OpenOptions;
use std::path::PathBuf;
use chrono::{DateTime, TimeZone, Utc};
use devicemapper::{Device, Sectors};
use stratis::StratisResult;
use super::super::super::engine::BlockDev;
use super::super::super::event::{get_engine_listener_list, EngineEvent};
use super::super::super::types::{BlockDevState, DevUuid, MaybeDbusPath, PoolUuid};
use super::super::serde_structs::{BaseBlockDevSave, Recordable};
use super::metadata::BDA;
use super::range_alloc::RangeAllocator;
#[derive(Debug)]
pub struct StratBlockDev {
dev: Device,
pub(super) devnode: PathBuf,
bda: BDA,
used: RangeAllocator,
user_info: Option<String>,
hardware_info: Option<String>,
dbus_path: MaybeDbusPath,
}
impl StratBlockDev {
/// Make a new BlockDev from the parameters.
/// Allocate space for the Stratis metadata on the device.
/// - dev: the device, identified by number
/// - devnode: the device node
/// - bda: the device's BDA
/// - other_segments: segments claimed for non-Stratis metadata use
/// - user_info: user settable identifying information
/// - hardware_info: identifying information in the hardware
/// Returns an error if it is impossible to allocate all segments on the
/// device.
/// NOTE: It is possible that the actual device size is greater than
/// the recorded device size. In that case, the additional space available
/// on the device is simply invisible to the blockdev. Consequently, it
/// is invisible to the engine, and is not part of the total size value
/// reported on the D-Bus.
pub fn new(
dev: Device,
devnode: PathBuf,
bda: BDA,
upper_segments: &[(Sectors, Sectors)],
user_info: Option<String>,
hardware_info: Option<String>,
) -> StratisResult<StratBlockDev> {
let mut segments = vec![(Sectors(0), bda.size())];
segments.extend(upper_segments);
let allocator = RangeAllocator::new(bda.dev_size(), &segments)?;
Ok(StratBlockDev {
dev,
devnode,
bda,
used: allocator,
user_info,
hardware_info,
dbus_path: MaybeDbusPath(None),
})
}
/// Returns the blockdev's Device
pub fn device(&self) -> &Device {
&self.dev
}
pub fn wipe_metadata(&self) -> StratisResult<()> {
let mut f = OpenOptions::new().write(true).open(&self.devnode)?;
BDA::wipe(&mut f)
}
pub fn save_state(&mut self, time: &DateTime<Utc>, metadata: &[u8]) -> StratisResult<()> {
let mut f = OpenOptions::new().write(true).open(&self.devnode)?;
self.bda.save_state(time, metadata, &mut f)
}
/// The device's UUID.
pub fn uuid(&self) -> DevUuid {
self.bda.dev_uuid()
}
/// The device's pool's UUID.
#[allow(dead_code)]
pub fn pool_uuid(&self) -> PoolUuid {
self.bda.pool_uuid()
}
/// Last time metadata was written to this device.
#[allow(dead_code)]
pub fn last_update_time(&self) -> Option<&DateTime<Utc>> {
self.bda.last_update_time()
}
/// Find some sector ranges that could be allocated. If more
/// sectors are needed than are available, return partial results.
/// If all sectors are desired, use available() method to get all.
pub fn request_space(&mut self, size: Sectors) -> (Sectors, Vec<(Sectors, Sectors)>) {
let prev_state = self.state();
let result = self.used.request(size);
if result.0 > Sectors(0) && prev_state!= BlockDevState::InUse {
get_engine_listener_list().notify(&EngineEvent::BlockdevStateChanged {
dbus_path: self.get_dbus_path(),
state: BlockDevState::InUse,
});
}
result
}
// ALL SIZE METHODS (except size(), which is in BlockDev impl.)
/// The number of Sectors on this device used by Stratis for metadata
pub fn metadata_size(&self) -> Sectors {
self.bda.size()
}
/// The number of Sectors on this device not allocated for any purpose.
/// self.size() - self.metadata_size() >= self.available()
pub fn available(&self) -> Sectors {
self.used.available()
}
/// The maximum size of variable length metadata that can be accommodated.
/// self.max_metadata_size() < self.metadata_size()
pub fn max_metadata_size(&self) -> Sectors {
self.bda.max_data_size()
}
/// Set the user info on this blockdev.
/// The user_info may be None, which unsets user info.
/// Returns true if the user info was changed, otherwise false.
pub fn set_user_info(&mut self, user_info: Option<&str>) -> bool {
set_blockdev_user_info!(self; user_info)
}
}
impl BlockDev for StratBlockDev {
fn devnode(&self) -> PathBuf {
self.devnode.clone()
}
fn user_info(&self) -> Option<&str> {
self.user_info.as_ref().map(|x| &**x)
}
fn hardware_info(&self) -> Option<&str> {
self.hardware_info.as_ref().map(|x| &**x)
}
fn initialization_time(&self) -> DateTime<Utc> {
// This cast will result in an incorrect, negative value starting in
// the year 292,277,026,596. :-)
Utc.timestamp(self.bda.initialization_time() as i64, 0)
}
fn size(&self) -> Sectors {
let size = self.used.size();
assert_eq!(self.bda.dev_size(), size);
size
}
fn state(&self) -> BlockDevState {
// TODO: Implement support for other BlockDevStates
if self.used.used() > self.bda.size() {
BlockDevState::InUse
} else
|
}
fn set_dbus_path(&mut self, path: MaybeDbusPath) {
self.dbus_path = path
}
fn get_dbus_path(&self) -> &MaybeDbusPath {
&self.dbus_path
}
}
impl Recordable<BaseBlockDevSave> for StratBlockDev {
fn record(&self) -> BaseBlockDevSave {
BaseBlockDevSave {
uuid: self.uuid(),
user_info: self.user_info.clone(),
hardware_info: self.hardware_info.clone(),
}
}
}
|
{
BlockDevState::NotInUse
}
|
conditional_block
|
storage_thread.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use ipc_channel::ipc::IpcSender;
use servo_url::ServoUrl;
#[derive(Copy, Clone, Deserialize, Serialize, HeapSizeOf)]
pub enum StorageType {
Session,
Local
}
/// Request operations on the storage data associated with a particular url
#[derive(Deserialize, Serialize)]
pub enum StorageThreadMsg {
/// gets the number of key/value pairs present in the associated storage data
Length(IpcSender<usize>, ServoUrl, StorageType),
/// gets the name of the key at the specified index in the associated storage data
Key(IpcSender<Option<String>>, ServoUrl, StorageType, u32),
/// Gets the available keys in the associated storage data
Keys(IpcSender<Vec<String>>, ServoUrl, StorageType),
/// gets the value associated with the given key in the associated storage data
GetItem(IpcSender<Option<String>>, ServoUrl, StorageType, String),
/// sets the value of the given key in the associated storage data
SetItem(IpcSender<Result<(bool, Option<String>), ()>>, ServoUrl, StorageType, String, String),
/// removes the key/value pair for the given key in the associated storage data
RemoveItem(IpcSender<Option<String>>, ServoUrl, StorageType, String),
/// clears the associated storage data by removing all the key/value pairs
Clear(IpcSender<bool>, ServoUrl, StorageType),
/// send a reply when done cleaning up thread resources and then shut it down
|
Exit(IpcSender<()>)
}
|
random_line_split
|
|
storage_thread.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use ipc_channel::ipc::IpcSender;
use servo_url::ServoUrl;
#[derive(Copy, Clone, Deserialize, Serialize, HeapSizeOf)]
pub enum StorageType {
Session,
Local
}
/// Request operations on the storage data associated with a particular url
#[derive(Deserialize, Serialize)]
pub enum
|
{
/// gets the number of key/value pairs present in the associated storage data
Length(IpcSender<usize>, ServoUrl, StorageType),
/// gets the name of the key at the specified index in the associated storage data
Key(IpcSender<Option<String>>, ServoUrl, StorageType, u32),
/// Gets the available keys in the associated storage data
Keys(IpcSender<Vec<String>>, ServoUrl, StorageType),
/// gets the value associated with the given key in the associated storage data
GetItem(IpcSender<Option<String>>, ServoUrl, StorageType, String),
/// sets the value of the given key in the associated storage data
SetItem(IpcSender<Result<(bool, Option<String>), ()>>, ServoUrl, StorageType, String, String),
/// removes the key/value pair for the given key in the associated storage data
RemoveItem(IpcSender<Option<String>>, ServoUrl, StorageType, String),
/// clears the associated storage data by removing all the key/value pairs
Clear(IpcSender<bool>, ServoUrl, StorageType),
/// send a reply when done cleaning up thread resources and then shut it down
Exit(IpcSender<()>)
}
|
StorageThreadMsg
|
identifier_name
|
evec-internal.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-test
// Doesn't work; needs a design decision.
pub fn
|
() {
let x : [int; 5] = [1,2,3,4,5];
let _y : [int; 5] = [1,2,3,4,5];
let mut z = [1,2,3,4,5];
z = x;
assert_eq!(z[0], 1);
assert_eq!(z[4], 5);
let a : [int; 5] = [1,1,1,1,1];
let b : [int; 5] = [2,2,2,2,2];
let c : [int; 5] = [2,2,2,2,3];
log(debug, a);
assert!(a < b);
assert!(a <= b);
assert!(a!= b);
assert!(b >= a);
assert!(b > a);
log(debug, b);
assert!(b < c);
assert!(b <= c);
assert!(b!= c);
assert!(c >= b);
assert!(c > b);
assert!(a < c);
assert!(a <= c);
assert!(a!= c);
assert!(c >= a);
assert!(c > a);
log(debug, c);
}
|
main
|
identifier_name
|
evec-internal.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-test
// Doesn't work; needs a design decision.
pub fn main() {
let x : [int; 5] = [1,2,3,4,5];
let _y : [int; 5] = [1,2,3,4,5];
let mut z = [1,2,3,4,5];
|
z = x;
assert_eq!(z[0], 1);
assert_eq!(z[4], 5);
let a : [int; 5] = [1,1,1,1,1];
let b : [int; 5] = [2,2,2,2,2];
let c : [int; 5] = [2,2,2,2,3];
log(debug, a);
assert!(a < b);
assert!(a <= b);
assert!(a!= b);
assert!(b >= a);
assert!(b > a);
log(debug, b);
assert!(b < c);
assert!(b <= c);
assert!(b!= c);
assert!(c >= b);
assert!(c > b);
assert!(a < c);
assert!(a <= c);
assert!(a!= c);
assert!(c >= a);
assert!(c > a);
log(debug, c);
}
|
random_line_split
|
|
evec-internal.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-test
// Doesn't work; needs a design decision.
pub fn main()
|
log(debug, b);
assert!(b < c);
assert!(b <= c);
assert!(b!= c);
assert!(c >= b);
assert!(c > b);
assert!(a < c);
assert!(a <= c);
assert!(a!= c);
assert!(c >= a);
assert!(c > a);
log(debug, c);
}
|
{
let x : [int; 5] = [1,2,3,4,5];
let _y : [int; 5] = [1,2,3,4,5];
let mut z = [1,2,3,4,5];
z = x;
assert_eq!(z[0], 1);
assert_eq!(z[4], 5);
let a : [int; 5] = [1,1,1,1,1];
let b : [int; 5] = [2,2,2,2,2];
let c : [int; 5] = [2,2,2,2,3];
log(debug, a);
assert!(a < b);
assert!(a <= b);
assert!(a != b);
assert!(b >= a);
assert!(b > a);
|
identifier_body
|
helper.rs
|
// Just functions that may be useful to many modules.
use std::fs::Metadata;
use std::fs::FileType as StdFileType;
use std::os::unix::fs::{MetadataExt, FileTypeExt, PermissionsExt};
use time::Timespec;
use fuse::{FileAttr, FileType};
pub fn
|
(file_type : StdFileType) -> FileType {
if file_type.is_dir() == true {
FileType::Directory
} else if file_type.is_file() == true {
FileType::RegularFile
} else if file_type.is_symlink() == true {
FileType::Symlink
} else if file_type.is_block_device() == true {
FileType::BlockDevice
} else if file_type.is_fifo() == true {
FileType::NamedPipe
} else if file_type.is_char_device() == true {
FileType::CharDevice
} else {
// Sockets aren't supported apparently.
FileType::RegularFile
}
}
pub fn fill_file_attr(md : &Metadata) -> FileAttr {
FileAttr{
ino : md.ino(),
size : md.size(),
blocks : md.blocks(),
atime : Timespec{ sec : md.atime(), nsec : md.atime_nsec() as i32, },
mtime : Timespec{ sec : md.mtime(), nsec : md.mtime_nsec() as i32, },
ctime : Timespec{ sec : md.ctime(), nsec : md.ctime_nsec() as i32, },
crtime : Timespec{ sec : 0, nsec : 0 }, //unavailable on Linux...
kind : fuse_file_type(md.file_type()),
perm : md.permissions().mode() as u16,
nlink : md.nlink() as u32,
uid : md.uid(),
gid : md.gid(),
rdev : md.rdev() as u32,
flags : 0,
}
}
use std::path::Path;
use fuse::Request;
use libc::EACCES;
use mirrorfs::MirrorFS;
use std::ops::Shl;
// Allows or denies access according to DAC (user/group permissions).
impl MirrorFS {
pub fn u_access(&self, _req: &Request, path: &Path, _mask: u32) -> Result<(), i32> {
let (uid, gid) = self.usermap(_req);
#[cfg(feature="enable_unsecure_features")] {
if self.settings.fullaccess.contains(&uid) {
trace!("User {} is almighty so access is ok!", uid);
return Ok(());
}
}
match path.symlink_metadata() {
Ok(md) => {
if uid == md.uid() {
if md.permissions().mode() | _mask.shl(6) == md.permissions().mode() {
trace!("Access request {:b} as user {} on path {} is ok", _mask.shl(6), uid, path.display());
return Ok(());
} else {
trace!("Access request as user isn't ok! Request was {:b}, Permissions were {:b}", _mask.shl(6), md.permissions().mode());
return Err(EACCES);
}
} else if gid == md.gid() {
if md.permissions().mode() | _mask.shl(3) == md.permissions().mode() {
trace!("Access request {:b} as group member of {} on path {} is ok", _mask.shl(3), gid, path.display());
return Ok(());
} else {
trace!("Access request as group member isn't ok! Request was {:b}, Permissions were {:b}", _mask.shl(3), md.permissions().mode());
return Err(EACCES);
}
} else {
if md.permissions().mode() | _mask == md.permissions().mode() {
trace!("Access request {:b} as \"other\" on path {} is ok", _mask, path.display());
return Ok(());
} else {
trace!("Access request as \"other\" isn't ok! Request was {:b}, Permissions were {:b}", _mask, md.permissions().mode());
return Err(EACCES);
}
}
},
Err(why) => {
warn!("Could not get metadata to file {} : {:?}", path.display(), why);
return Err(why.raw_os_error().unwrap());
}
}
}
}
|
fuse_file_type
|
identifier_name
|
helper.rs
|
// Just functions that may be useful to many modules.
use std::fs::Metadata;
use std::fs::FileType as StdFileType;
use std::os::unix::fs::{MetadataExt, FileTypeExt, PermissionsExt};
use time::Timespec;
use fuse::{FileAttr, FileType};
pub fn fuse_file_type(file_type : StdFileType) -> FileType {
if file_type.is_dir() == true {
FileType::Directory
} else if file_type.is_file() == true {
FileType::RegularFile
} else if file_type.is_symlink() == true {
FileType::Symlink
} else if file_type.is_block_device() == true {
FileType::BlockDevice
} else if file_type.is_fifo() == true {
FileType::NamedPipe
} else if file_type.is_char_device() == true {
FileType::CharDevice
} else {
// Sockets aren't supported apparently.
FileType::RegularFile
}
}
pub fn fill_file_attr(md : &Metadata) -> FileAttr
|
use std::path::Path;
use fuse::Request;
use libc::EACCES;
use mirrorfs::MirrorFS;
use std::ops::Shl;
// Allows or denies access according to DAC (user/group permissions).
impl MirrorFS {
pub fn u_access(&self, _req: &Request, path: &Path, _mask: u32) -> Result<(), i32> {
let (uid, gid) = self.usermap(_req);
#[cfg(feature="enable_unsecure_features")] {
if self.settings.fullaccess.contains(&uid) {
trace!("User {} is almighty so access is ok!", uid);
return Ok(());
}
}
match path.symlink_metadata() {
Ok(md) => {
if uid == md.uid() {
if md.permissions().mode() | _mask.shl(6) == md.permissions().mode() {
trace!("Access request {:b} as user {} on path {} is ok", _mask.shl(6), uid, path.display());
return Ok(());
} else {
trace!("Access request as user isn't ok! Request was {:b}, Permissions were {:b}", _mask.shl(6), md.permissions().mode());
return Err(EACCES);
}
} else if gid == md.gid() {
if md.permissions().mode() | _mask.shl(3) == md.permissions().mode() {
trace!("Access request {:b} as group member of {} on path {} is ok", _mask.shl(3), gid, path.display());
return Ok(());
} else {
trace!("Access request as group member isn't ok! Request was {:b}, Permissions were {:b}", _mask.shl(3), md.permissions().mode());
return Err(EACCES);
}
} else {
if md.permissions().mode() | _mask == md.permissions().mode() {
trace!("Access request {:b} as \"other\" on path {} is ok", _mask, path.display());
return Ok(());
} else {
trace!("Access request as \"other\" isn't ok! Request was {:b}, Permissions were {:b}", _mask, md.permissions().mode());
return Err(EACCES);
}
}
},
Err(why) => {
warn!("Could not get metadata to file {} : {:?}", path.display(), why);
return Err(why.raw_os_error().unwrap());
}
}
}
}
|
{
FileAttr{
ino : md.ino(),
size : md.size(),
blocks : md.blocks(),
atime : Timespec{ sec : md.atime(), nsec : md.atime_nsec() as i32, },
mtime : Timespec{ sec : md.mtime(), nsec : md.mtime_nsec() as i32, },
ctime : Timespec{ sec : md.ctime(), nsec : md.ctime_nsec() as i32, },
crtime : Timespec{ sec : 0, nsec : 0 }, //unavailable on Linux...
kind : fuse_file_type(md.file_type()),
perm : md.permissions().mode() as u16,
nlink : md.nlink() as u32,
uid : md.uid(),
gid : md.gid(),
rdev : md.rdev() as u32,
flags : 0,
}
}
|
identifier_body
|
helper.rs
|
// Just functions that may be useful to many modules.
use std::fs::Metadata;
use std::fs::FileType as StdFileType;
use std::os::unix::fs::{MetadataExt, FileTypeExt, PermissionsExt};
use time::Timespec;
use fuse::{FileAttr, FileType};
pub fn fuse_file_type(file_type : StdFileType) -> FileType {
if file_type.is_dir() == true {
FileType::Directory
} else if file_type.is_file() == true {
FileType::RegularFile
} else if file_type.is_symlink() == true {
FileType::Symlink
} else if file_type.is_block_device() == true {
FileType::BlockDevice
} else if file_type.is_fifo() == true {
FileType::NamedPipe
} else if file_type.is_char_device() == true {
FileType::CharDevice
} else {
// Sockets aren't supported apparently.
FileType::RegularFile
}
}
pub fn fill_file_attr(md : &Metadata) -> FileAttr {
FileAttr{
ino : md.ino(),
size : md.size(),
blocks : md.blocks(),
atime : Timespec{ sec : md.atime(), nsec : md.atime_nsec() as i32, },
mtime : Timespec{ sec : md.mtime(), nsec : md.mtime_nsec() as i32, },
ctime : Timespec{ sec : md.ctime(), nsec : md.ctime_nsec() as i32, },
crtime : Timespec{ sec : 0, nsec : 0 }, //unavailable on Linux...
kind : fuse_file_type(md.file_type()),
perm : md.permissions().mode() as u16,
nlink : md.nlink() as u32,
uid : md.uid(),
gid : md.gid(),
rdev : md.rdev() as u32,
flags : 0,
}
}
use std::path::Path;
use fuse::Request;
use libc::EACCES;
use mirrorfs::MirrorFS;
use std::ops::Shl;
// Allows or denies access according to DAC (user/group permissions).
impl MirrorFS {
pub fn u_access(&self, _req: &Request, path: &Path, _mask: u32) -> Result<(), i32> {
let (uid, gid) = self.usermap(_req);
#[cfg(feature="enable_unsecure_features")] {
if self.settings.fullaccess.contains(&uid) {
trace!("User {} is almighty so access is ok!", uid);
return Ok(());
}
}
match path.symlink_metadata() {
Ok(md) => {
if uid == md.uid() {
if md.permissions().mode() | _mask.shl(6) == md.permissions().mode() {
trace!("Access request {:b} as user {} on path {} is ok", _mask.shl(6), uid, path.display());
return Ok(());
} else {
trace!("Access request as user isn't ok! Request was {:b}, Permissions were {:b}", _mask.shl(6), md.permissions().mode());
return Err(EACCES);
}
} else if gid == md.gid() {
if md.permissions().mode() | _mask.shl(3) == md.permissions().mode() {
trace!("Access request {:b} as group member of {} on path {} is ok", _mask.shl(3), gid, path.display());
return Ok(());
} else {
trace!("Access request as group member isn't ok! Request was {:b}, Permissions were {:b}", _mask.shl(3), md.permissions().mode());
return Err(EACCES);
}
} else
|
},
Err(why) => {
warn!("Could not get metadata to file {} : {:?}", path.display(), why);
return Err(why.raw_os_error().unwrap());
}
}
}
}
|
{
if md.permissions().mode() | _mask == md.permissions().mode() {
trace!("Access request {:b} as \"other\" on path {} is ok", _mask, path.display());
return Ok(());
} else {
trace!("Access request as \"other\" isn't ok! Request was {:b}, Permissions were {:b}", _mask, md.permissions().mode());
return Err(EACCES);
}
}
|
conditional_block
|
helper.rs
|
// Just functions that may be useful to many modules.
use std::fs::Metadata;
use std::fs::FileType as StdFileType;
use std::os::unix::fs::{MetadataExt, FileTypeExt, PermissionsExt};
use time::Timespec;
use fuse::{FileAttr, FileType};
pub fn fuse_file_type(file_type : StdFileType) -> FileType {
if file_type.is_dir() == true {
FileType::Directory
} else if file_type.is_file() == true {
FileType::RegularFile
} else if file_type.is_symlink() == true {
FileType::Symlink
} else if file_type.is_block_device() == true {
FileType::BlockDevice
} else if file_type.is_fifo() == true {
FileType::NamedPipe
} else if file_type.is_char_device() == true {
FileType::CharDevice
} else {
// Sockets aren't supported apparently.
FileType::RegularFile
}
}
pub fn fill_file_attr(md : &Metadata) -> FileAttr {
FileAttr{
ino : md.ino(),
size : md.size(),
blocks : md.blocks(),
atime : Timespec{ sec : md.atime(), nsec : md.atime_nsec() as i32, },
mtime : Timespec{ sec : md.mtime(), nsec : md.mtime_nsec() as i32, },
ctime : Timespec{ sec : md.ctime(), nsec : md.ctime_nsec() as i32, },
crtime : Timespec{ sec : 0, nsec : 0 }, //unavailable on Linux...
kind : fuse_file_type(md.file_type()),
perm : md.permissions().mode() as u16,
nlink : md.nlink() as u32,
uid : md.uid(),
gid : md.gid(),
rdev : md.rdev() as u32,
flags : 0,
}
}
use std::path::Path;
use fuse::Request;
use libc::EACCES;
use mirrorfs::MirrorFS;
use std::ops::Shl;
// Allows or denies access according to DAC (user/group permissions).
impl MirrorFS {
pub fn u_access(&self, _req: &Request, path: &Path, _mask: u32) -> Result<(), i32> {
let (uid, gid) = self.usermap(_req);
#[cfg(feature="enable_unsecure_features")] {
if self.settings.fullaccess.contains(&uid) {
trace!("User {} is almighty so access is ok!", uid);
return Ok(());
}
}
match path.symlink_metadata() {
Ok(md) => {
if uid == md.uid() {
if md.permissions().mode() | _mask.shl(6) == md.permissions().mode() {
trace!("Access request {:b} as user {} on path {} is ok", _mask.shl(6), uid, path.display());
return Ok(());
} else {
trace!("Access request as user isn't ok! Request was {:b}, Permissions were {:b}", _mask.shl(6), md.permissions().mode());
return Err(EACCES);
}
} else if gid == md.gid() {
if md.permissions().mode() | _mask.shl(3) == md.permissions().mode() {
trace!("Access request {:b} as group member of {} on path {} is ok", _mask.shl(3), gid, path.display());
return Ok(());
} else {
trace!("Access request as group member isn't ok! Request was {:b}, Permissions were {:b}", _mask.shl(3), md.permissions().mode());
return Err(EACCES);
}
} else {
if md.permissions().mode() | _mask == md.permissions().mode() {
trace!("Access request {:b} as \"other\" on path {} is ok", _mask, path.display());
return Ok(());
} else {
trace!("Access request as \"other\" isn't ok! Request was {:b}, Permissions were {:b}", _mask, md.permissions().mode());
return Err(EACCES);
}
}
|
}
}
}
}
|
},
Err(why) => {
warn!("Could not get metadata to file {} : {:?}", path.display(), why);
return Err(why.raw_os_error().unwrap());
|
random_line_split
|
solution.rs
|
use std::io;
macro_rules! read_n {
( $name : ident, $typ : ty ) => {
let mut line : String = String::new();
io::stdin().read_line(&mut line);
let $name : $typ = line.trim().parse::<$typ>().expect("invalid data type");
};
}
fn main() {
read_n!(x, u32);
read_n!(n, u32);
let mut powers : Vec<u32> = (1..32).map(|x: u32| x.pow(n)).collect();
let mut no_of_ways: u32 = 0;
permut(x, 0, &mut powers, 0, &mut no_of_ways);
println!("{}", no_of_ways);
}
fn permut(x: u32, sum: u32, mut powers: &mut Vec<u32>, start: usize, mut no_of_ways: &mut u32) {
if sum == x
|
if powers.len() == 0 || sum > x {
return;
}
for i in start..powers.len() {
permut(x, sum + powers[i], &mut powers, i + 1, &mut no_of_ways);
}
}
|
{
*no_of_ways += 1;
return;
}
|
conditional_block
|
solution.rs
|
use std::io;
macro_rules! read_n {
( $name : ident, $typ : ty ) => {
let mut line : String = String::new();
io::stdin().read_line(&mut line);
let $name : $typ = line.trim().parse::<$typ>().expect("invalid data type");
};
}
fn main() {
read_n!(x, u32);
read_n!(n, u32);
let mut powers : Vec<u32> = (1..32).map(|x: u32| x.pow(n)).collect();
let mut no_of_ways: u32 = 0;
permut(x, 0, &mut powers, 0, &mut no_of_ways);
println!("{}", no_of_ways);
}
fn permut(x: u32, sum: u32, mut powers: &mut Vec<u32>, start: usize, mut no_of_ways: &mut u32)
|
{
if sum == x {
*no_of_ways += 1;
return;
}
if powers.len() == 0 || sum > x {
return;
}
for i in start..powers.len() {
permut(x, sum + powers[i], &mut powers, i + 1, &mut no_of_ways);
}
}
|
identifier_body
|
|
solution.rs
|
use std::io;
macro_rules! read_n {
( $name : ident, $typ : ty ) => {
let mut line : String = String::new();
io::stdin().read_line(&mut line);
let $name : $typ = line.trim().parse::<$typ>().expect("invalid data type");
};
}
fn
|
() {
read_n!(x, u32);
read_n!(n, u32);
let mut powers : Vec<u32> = (1..32).map(|x: u32| x.pow(n)).collect();
let mut no_of_ways: u32 = 0;
permut(x, 0, &mut powers, 0, &mut no_of_ways);
println!("{}", no_of_ways);
}
fn permut(x: u32, sum: u32, mut powers: &mut Vec<u32>, start: usize, mut no_of_ways: &mut u32) {
if sum == x {
*no_of_ways += 1;
return;
}
if powers.len() == 0 || sum > x {
return;
}
for i in start..powers.len() {
permut(x, sum + powers[i], &mut powers, i + 1, &mut no_of_ways);
}
}
|
main
|
identifier_name
|
solution.rs
|
use std::io;
macro_rules! read_n {
( $name : ident, $typ : ty ) => {
let mut line : String = String::new();
io::stdin().read_line(&mut line);
let $name : $typ = line.trim().parse::<$typ>().expect("invalid data type");
};
}
fn main() {
read_n!(x, u32);
read_n!(n, u32);
let mut powers : Vec<u32> = (1..32).map(|x: u32| x.pow(n)).collect();
let mut no_of_ways: u32 = 0;
permut(x, 0, &mut powers, 0, &mut no_of_ways);
println!("{}", no_of_ways);
|
fn permut(x: u32, sum: u32, mut powers: &mut Vec<u32>, start: usize, mut no_of_ways: &mut u32) {
if sum == x {
*no_of_ways += 1;
return;
}
if powers.len() == 0 || sum > x {
return;
}
for i in start..powers.len() {
permut(x, sum + powers[i], &mut powers, i + 1, &mut no_of_ways);
}
}
|
}
|
random_line_split
|
use-from-trait-xc.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:use_from_trait_xc.rs
extern crate use_from_trait_xc;
use use_from_trait_xc::Trait::foo;
//~^ ERROR unresolved import `use_from_trait_xc::Trait::foo`. Cannot import from a trait or type imp
use use_from_trait_xc::Foo::new;
//~^ ERROR unresolved import `use_from_trait_xc::Foo::new`. Cannot import from a trait or type imple
fn main() {}
|
//
|
random_line_split
|
use-from-trait-xc.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:use_from_trait_xc.rs
extern crate use_from_trait_xc;
use use_from_trait_xc::Trait::foo;
//~^ ERROR unresolved import `use_from_trait_xc::Trait::foo`. Cannot import from a trait or type imp
use use_from_trait_xc::Foo::new;
//~^ ERROR unresolved import `use_from_trait_xc::Foo::new`. Cannot import from a trait or type imple
fn
|
() {}
|
main
|
identifier_name
|
use-from-trait-xc.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:use_from_trait_xc.rs
extern crate use_from_trait_xc;
use use_from_trait_xc::Trait::foo;
//~^ ERROR unresolved import `use_from_trait_xc::Trait::foo`. Cannot import from a trait or type imp
use use_from_trait_xc::Foo::new;
//~^ ERROR unresolved import `use_from_trait_xc::Foo::new`. Cannot import from a trait or type imple
fn main()
|
{}
|
identifier_body
|
|
f32.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Operations and constants for 32-bits floats (`f32` type)
#![doc(primitive = "f32")]
// FIXME: MIN_VALUE and MAX_VALUE literals are parsed as -inf and inf #14353
#![allow(overflowing_literals)]
#![stable(feature = "rust1", since = "1.0.0")]
use prelude::*;
use intrinsics;
use mem;
use num::{Float, ParseFloatError};
use num::FpCategory as Fp;
#[stable(feature = "rust1", since = "1.0.0")]
pub const RADIX: u32 = 2;
#[stable(feature = "rust1", since = "1.0.0")]
pub const MANTISSA_DIGITS: u32 = 24;
#[stable(feature = "rust1", since = "1.0.0")]
pub const DIGITS: u32 = 6;
#[stable(feature = "rust1", since = "1.0.0")]
pub const EPSILON: f32 = 1.19209290e-07_f32;
/// Smallest finite f32 value
#[stable(feature = "rust1", since = "1.0.0")]
pub const MIN: f32 = -3.40282347e+38_f32;
/// Smallest positive, normalized f32 value
#[stable(feature = "rust1", since = "1.0.0")]
pub const MIN_POSITIVE: f32 = 1.17549435e-38_f32;
/// Largest finite f32 value
#[stable(feature = "rust1", since = "1.0.0")]
pub const MAX: f32 = 3.40282347e+38_f32;
#[stable(feature = "rust1", since = "1.0.0")]
pub const MIN_EXP: i32 = -125;
#[stable(feature = "rust1", since = "1.0.0")]
pub const MAX_EXP: i32 = 128;
#[stable(feature = "rust1", since = "1.0.0")]
pub const MIN_10_EXP: i32 = -37;
#[stable(feature = "rust1", since = "1.0.0")]
pub const MAX_10_EXP: i32 = 38;
#[stable(feature = "rust1", since = "1.0.0")]
pub const NAN: f32 = 0.0_f32/0.0_f32;
#[stable(feature = "rust1", since = "1.0.0")]
pub const INFINITY: f32 = 1.0_f32/0.0_f32;
#[stable(feature = "rust1", since = "1.0.0")]
pub const NEG_INFINITY: f32 = -1.0_f32/0.0_f32;
/// Basic mathematial constants.
#[stable(feature = "rust1", since = "1.0.0")]
pub mod consts {
// FIXME: replace with mathematical constants from cmath.
/// Archimedes' constant
#[stable(feature = "rust1", since = "1.0.0")]
pub const PI: f32 = 3.14159265358979323846264338327950288_f32;
/// pi * 2.0
#[unstable(feature = "float_consts",
reason = "unclear naming convention/usefulness")]
#[deprecated(since = "1.2.0", reason = "unclear on usefulness")]
pub const PI_2: f32 = 6.28318530717958647692528676655900576_f32;
/// pi/2.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_2: f32 = 1.57079632679489661923132169163975144_f32;
/// pi/3.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_3: f32 = 1.04719755119659774615421446109316763_f32;
/// pi/4.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_4: f32 = 0.785398163397448309615660845819875721_f32;
/// pi/6.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_6: f32 = 0.52359877559829887307710723054658381_f32;
/// pi/8.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_8: f32 = 0.39269908169872415480783042290993786_f32;
/// 1.0/pi
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_1_PI: f32 = 0.318309886183790671537767526745028724_f32;
/// 2.0/pi
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_2_PI: f32 = 0.636619772367581343075535053490057448_f32;
/// 2.0/sqrt(pi)
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_2_SQRT_PI: f32 = 1.12837916709551257389615890312154517_f32;
/// sqrt(2.0)
#[stable(feature = "rust1", since = "1.0.0")]
pub const SQRT_2: f32 = 1.41421356237309504880168872420969808_f32;
/// 1.0/sqrt(2.0)
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_1_SQRT_2: f32 = 0.707106781186547524400844362104849039_f32;
/// Euler's number
#[stable(feature = "rust1", since = "1.0.0")]
pub const E: f32 = 2.71828182845904523536028747135266250_f32;
/// log2(e)
#[stable(feature = "rust1", since = "1.0.0")]
pub const LOG2_E: f32 = 1.44269504088896340735992468100189214_f32;
/// log10(e)
#[stable(feature = "rust1", since = "1.0.0")]
pub const LOG10_E: f32 = 0.434294481903251827651128918916605082_f32;
/// ln(2.0)
#[stable(feature = "rust1", since = "1.0.0")]
pub const LN_2: f32 = 0.693147180559945309417232121458176568_f32;
/// ln(10.0)
#[stable(feature = "rust1", since = "1.0.0")]
pub const LN_10: f32 = 2.30258509299404568401799145468436421_f32;
}
impl Float for f32 {
#[inline]
fn nan() -> f32 { NAN }
#[inline]
fn infinity() -> f32 { INFINITY }
#[inline]
fn neg_infinity() -> f32 { NEG_INFINITY }
#[inline]
fn zero() -> f32 { 0.0 }
#[inline]
fn neg_zero() -> f32 { -0.0 }
#[inline]
fn one() -> f32 { 1.0 }
from_str_radix_float_impl! { f32 }
/// Returns `true` if the number is NaN.
#[inline]
fn is_nan(self) -> bool { self!= self }
/// Returns `true` if the number is infinite.
#[inline]
fn is_infinite(self) -> bool {
self == Float::infinity() || self == Float::neg_infinity()
}
/// Returns `true` if the number is neither infinite or NaN.
#[inline]
fn is_finite(self) -> bool {
!(self.is_nan() || self.is_infinite())
}
/// Returns `true` if the number is neither zero, infinite, subnormal or NaN.
#[inline]
fn is_normal(self) -> bool {
self.classify() == Fp::Normal
}
/// Returns the floating point category of the number. If only one property
/// is going to be tested, it is generally faster to use the specific
/// predicate instead.
fn classify(self) -> Fp {
const EXP_MASK: u32 = 0x7f800000;
const MAN_MASK: u32 = 0x007fffff;
let bits: u32 = unsafe { mem::transmute(self) };
match (bits & MAN_MASK, bits & EXP_MASK) {
(0, 0) => Fp::Zero,
(_, 0) => Fp::Subnormal,
(0, EXP_MASK) => Fp::Infinite,
(_, EXP_MASK) => Fp::Nan,
_ => Fp::Normal,
}
}
/// Returns the mantissa, exponent and sign as integers.
fn integer_decode(self) -> (u64, i16, i8) {
let bits: u32 = unsafe { mem::transmute(self) };
let sign: i8 = if bits >> 31 == 0 { 1 } else { -1 };
let mut exponent: i16 = ((bits >> 23) & 0xff) as i16;
let mantissa = if exponent == 0 {
(bits & 0x7fffff) << 1
} else {
(bits & 0x7fffff) | 0x800000
};
// Exponent bias + mantissa shift
exponent -= 127 + 23;
(mantissa as u64, exponent, sign)
}
/// Rounds towards minus infinity.
#[inline]
fn floor(self) -> f32 {
unsafe { intrinsics::floorf32(self) }
}
/// Rounds towards plus infinity.
#[inline]
fn ceil(self) -> f32 {
unsafe { intrinsics::ceilf32(self) }
}
/// Rounds to nearest integer. Rounds half-way cases away from zero.
#[inline]
fn round(self) -> f32 {
unsafe { intrinsics::roundf32(self) }
}
/// Returns the integer part of the number (rounds towards zero).
#[inline]
fn trunc(self) -> f32 {
unsafe { intrinsics::truncf32(self) }
}
/// The fractional part of the number, satisfying:
///
/// ```
/// let x = 1.65f32;
/// assert!(x == x.trunc() + x.fract())
/// ```
#[inline]
fn fract(self) -> f32 { self - self.trunc() }
/// Computes the absolute value of `self`. Returns `Float::nan()` if the
/// number is `Float::nan()`.
#[inline]
fn abs(self) -> f32 {
unsafe { intrinsics::fabsf32(self) }
}
/// Returns a number that represents the sign of `self`.
///
/// - `1.0` if the number is positive, `+0.0` or `Float::infinity()`
/// - `-1.0` if the number is negative, `-0.0` or `Float::neg_infinity()`
/// - `Float::nan()` if the number is `Float::nan()`
#[inline]
fn signum(self) -> f32 {
if self.is_nan()
|
else {
unsafe { intrinsics::copysignf32(1.0, self) }
}
}
/// Returns `true` if `self` is positive, including `+0.0` and
/// `Float::infinity()`.
#[inline]
fn is_positive(self) -> bool {
self > 0.0 || (1.0 / self) == Float::infinity()
}
/// Returns `true` if `self` is negative, including `-0.0` and
/// `Float::neg_infinity()`.
#[inline]
fn is_negative(self) -> bool {
self < 0.0 || (1.0 / self) == Float::neg_infinity()
}
/// Fused multiply-add. Computes `(self * a) + b` with only one rounding
/// error. This produces a more accurate result with better performance than
/// a separate multiplication operation followed by an add.
#[inline]
fn mul_add(self, a: f32, b: f32) -> f32 {
unsafe { intrinsics::fmaf32(self, a, b) }
}
/// Returns the reciprocal (multiplicative inverse) of the number.
#[inline]
fn recip(self) -> f32 { 1.0 / self }
#[inline]
fn powi(self, n: i32) -> f32 {
unsafe { intrinsics::powif32(self, n) }
}
#[inline]
fn powf(self, n: f32) -> f32 {
unsafe { intrinsics::powf32(self, n) }
}
#[inline]
fn sqrt(self) -> f32 {
if self < 0.0 {
NAN
} else {
unsafe { intrinsics::sqrtf32(self) }
}
}
#[inline]
fn rsqrt(self) -> f32 { self.sqrt().recip() }
/// Returns the exponential of the number.
#[inline]
fn exp(self) -> f32 {
unsafe { intrinsics::expf32(self) }
}
/// Returns 2 raised to the power of the number.
#[inline]
fn exp2(self) -> f32 {
unsafe { intrinsics::exp2f32(self) }
}
/// Returns the natural logarithm of the number.
#[inline]
fn ln(self) -> f32 {
unsafe { intrinsics::logf32(self) }
}
/// Returns the logarithm of the number with respect to an arbitrary base.
#[inline]
fn log(self, base: f32) -> f32 { self.ln() / base.ln() }
/// Returns the base 2 logarithm of the number.
#[inline]
fn log2(self) -> f32 {
unsafe { intrinsics::log2f32(self) }
}
/// Returns the base 10 logarithm of the number.
#[inline]
fn log10(self) -> f32 {
unsafe { intrinsics::log10f32(self) }
}
/// Converts to degrees, assuming the number is in radians.
#[inline]
fn to_degrees(self) -> f32 { self * (180.0f32 / consts::PI) }
/// Converts to radians, assuming the number is in degrees.
#[inline]
fn to_radians(self) -> f32 {
let value: f32 = consts::PI;
self * (value / 180.0f32)
}
}
|
{
Float::nan()
}
|
conditional_block
|
f32.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Operations and constants for 32-bits floats (`f32` type)
#![doc(primitive = "f32")]
// FIXME: MIN_VALUE and MAX_VALUE literals are parsed as -inf and inf #14353
#![allow(overflowing_literals)]
#![stable(feature = "rust1", since = "1.0.0")]
use prelude::*;
use intrinsics;
use mem;
use num::{Float, ParseFloatError};
use num::FpCategory as Fp;
#[stable(feature = "rust1", since = "1.0.0")]
pub const RADIX: u32 = 2;
#[stable(feature = "rust1", since = "1.0.0")]
pub const MANTISSA_DIGITS: u32 = 24;
#[stable(feature = "rust1", since = "1.0.0")]
|
pub const DIGITS: u32 = 6;
#[stable(feature = "rust1", since = "1.0.0")]
pub const EPSILON: f32 = 1.19209290e-07_f32;
/// Smallest finite f32 value
#[stable(feature = "rust1", since = "1.0.0")]
pub const MIN: f32 = -3.40282347e+38_f32;
/// Smallest positive, normalized f32 value
#[stable(feature = "rust1", since = "1.0.0")]
pub const MIN_POSITIVE: f32 = 1.17549435e-38_f32;
/// Largest finite f32 value
#[stable(feature = "rust1", since = "1.0.0")]
pub const MAX: f32 = 3.40282347e+38_f32;
#[stable(feature = "rust1", since = "1.0.0")]
pub const MIN_EXP: i32 = -125;
#[stable(feature = "rust1", since = "1.0.0")]
pub const MAX_EXP: i32 = 128;
#[stable(feature = "rust1", since = "1.0.0")]
pub const MIN_10_EXP: i32 = -37;
#[stable(feature = "rust1", since = "1.0.0")]
pub const MAX_10_EXP: i32 = 38;
#[stable(feature = "rust1", since = "1.0.0")]
pub const NAN: f32 = 0.0_f32/0.0_f32;
#[stable(feature = "rust1", since = "1.0.0")]
pub const INFINITY: f32 = 1.0_f32/0.0_f32;
#[stable(feature = "rust1", since = "1.0.0")]
pub const NEG_INFINITY: f32 = -1.0_f32/0.0_f32;
/// Basic mathematial constants.
#[stable(feature = "rust1", since = "1.0.0")]
pub mod consts {
// FIXME: replace with mathematical constants from cmath.
/// Archimedes' constant
#[stable(feature = "rust1", since = "1.0.0")]
pub const PI: f32 = 3.14159265358979323846264338327950288_f32;
/// pi * 2.0
#[unstable(feature = "float_consts",
reason = "unclear naming convention/usefulness")]
#[deprecated(since = "1.2.0", reason = "unclear on usefulness")]
pub const PI_2: f32 = 6.28318530717958647692528676655900576_f32;
/// pi/2.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_2: f32 = 1.57079632679489661923132169163975144_f32;
/// pi/3.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_3: f32 = 1.04719755119659774615421446109316763_f32;
/// pi/4.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_4: f32 = 0.785398163397448309615660845819875721_f32;
/// pi/6.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_6: f32 = 0.52359877559829887307710723054658381_f32;
/// pi/8.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_8: f32 = 0.39269908169872415480783042290993786_f32;
/// 1.0/pi
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_1_PI: f32 = 0.318309886183790671537767526745028724_f32;
/// 2.0/pi
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_2_PI: f32 = 0.636619772367581343075535053490057448_f32;
/// 2.0/sqrt(pi)
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_2_SQRT_PI: f32 = 1.12837916709551257389615890312154517_f32;
/// sqrt(2.0)
#[stable(feature = "rust1", since = "1.0.0")]
pub const SQRT_2: f32 = 1.41421356237309504880168872420969808_f32;
/// 1.0/sqrt(2.0)
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_1_SQRT_2: f32 = 0.707106781186547524400844362104849039_f32;
/// Euler's number
#[stable(feature = "rust1", since = "1.0.0")]
pub const E: f32 = 2.71828182845904523536028747135266250_f32;
/// log2(e)
#[stable(feature = "rust1", since = "1.0.0")]
pub const LOG2_E: f32 = 1.44269504088896340735992468100189214_f32;
/// log10(e)
#[stable(feature = "rust1", since = "1.0.0")]
pub const LOG10_E: f32 = 0.434294481903251827651128918916605082_f32;
/// ln(2.0)
#[stable(feature = "rust1", since = "1.0.0")]
pub const LN_2: f32 = 0.693147180559945309417232121458176568_f32;
/// ln(10.0)
#[stable(feature = "rust1", since = "1.0.0")]
pub const LN_10: f32 = 2.30258509299404568401799145468436421_f32;
}
impl Float for f32 {
#[inline]
fn nan() -> f32 { NAN }
#[inline]
fn infinity() -> f32 { INFINITY }
#[inline]
fn neg_infinity() -> f32 { NEG_INFINITY }
#[inline]
fn zero() -> f32 { 0.0 }
#[inline]
fn neg_zero() -> f32 { -0.0 }
#[inline]
fn one() -> f32 { 1.0 }
from_str_radix_float_impl! { f32 }
/// Returns `true` if the number is NaN.
#[inline]
fn is_nan(self) -> bool { self!= self }
/// Returns `true` if the number is infinite.
#[inline]
fn is_infinite(self) -> bool {
self == Float::infinity() || self == Float::neg_infinity()
}
/// Returns `true` if the number is neither infinite or NaN.
#[inline]
fn is_finite(self) -> bool {
!(self.is_nan() || self.is_infinite())
}
/// Returns `true` if the number is neither zero, infinite, subnormal or NaN.
#[inline]
fn is_normal(self) -> bool {
self.classify() == Fp::Normal
}
/// Returns the floating point category of the number. If only one property
/// is going to be tested, it is generally faster to use the specific
/// predicate instead.
fn classify(self) -> Fp {
const EXP_MASK: u32 = 0x7f800000;
const MAN_MASK: u32 = 0x007fffff;
let bits: u32 = unsafe { mem::transmute(self) };
match (bits & MAN_MASK, bits & EXP_MASK) {
(0, 0) => Fp::Zero,
(_, 0) => Fp::Subnormal,
(0, EXP_MASK) => Fp::Infinite,
(_, EXP_MASK) => Fp::Nan,
_ => Fp::Normal,
}
}
/// Returns the mantissa, exponent and sign as integers.
fn integer_decode(self) -> (u64, i16, i8) {
let bits: u32 = unsafe { mem::transmute(self) };
let sign: i8 = if bits >> 31 == 0 { 1 } else { -1 };
let mut exponent: i16 = ((bits >> 23) & 0xff) as i16;
let mantissa = if exponent == 0 {
(bits & 0x7fffff) << 1
} else {
(bits & 0x7fffff) | 0x800000
};
// Exponent bias + mantissa shift
exponent -= 127 + 23;
(mantissa as u64, exponent, sign)
}
/// Rounds towards minus infinity.
#[inline]
fn floor(self) -> f32 {
unsafe { intrinsics::floorf32(self) }
}
/// Rounds towards plus infinity.
#[inline]
fn ceil(self) -> f32 {
unsafe { intrinsics::ceilf32(self) }
}
/// Rounds to nearest integer. Rounds half-way cases away from zero.
#[inline]
fn round(self) -> f32 {
unsafe { intrinsics::roundf32(self) }
}
/// Returns the integer part of the number (rounds towards zero).
#[inline]
fn trunc(self) -> f32 {
unsafe { intrinsics::truncf32(self) }
}
/// The fractional part of the number, satisfying:
///
/// ```
/// let x = 1.65f32;
/// assert!(x == x.trunc() + x.fract())
/// ```
#[inline]
fn fract(self) -> f32 { self - self.trunc() }
/// Computes the absolute value of `self`. Returns `Float::nan()` if the
/// number is `Float::nan()`.
#[inline]
fn abs(self) -> f32 {
unsafe { intrinsics::fabsf32(self) }
}
/// Returns a number that represents the sign of `self`.
///
/// - `1.0` if the number is positive, `+0.0` or `Float::infinity()`
/// - `-1.0` if the number is negative, `-0.0` or `Float::neg_infinity()`
/// - `Float::nan()` if the number is `Float::nan()`
#[inline]
fn signum(self) -> f32 {
if self.is_nan() {
Float::nan()
} else {
unsafe { intrinsics::copysignf32(1.0, self) }
}
}
/// Returns `true` if `self` is positive, including `+0.0` and
/// `Float::infinity()`.
#[inline]
fn is_positive(self) -> bool {
self > 0.0 || (1.0 / self) == Float::infinity()
}
/// Returns `true` if `self` is negative, including `-0.0` and
/// `Float::neg_infinity()`.
#[inline]
fn is_negative(self) -> bool {
self < 0.0 || (1.0 / self) == Float::neg_infinity()
}
/// Fused multiply-add. Computes `(self * a) + b` with only one rounding
/// error. This produces a more accurate result with better performance than
/// a separate multiplication operation followed by an add.
#[inline]
fn mul_add(self, a: f32, b: f32) -> f32 {
unsafe { intrinsics::fmaf32(self, a, b) }
}
/// Returns the reciprocal (multiplicative inverse) of the number.
#[inline]
fn recip(self) -> f32 { 1.0 / self }
#[inline]
fn powi(self, n: i32) -> f32 {
unsafe { intrinsics::powif32(self, n) }
}
#[inline]
fn powf(self, n: f32) -> f32 {
unsafe { intrinsics::powf32(self, n) }
}
#[inline]
fn sqrt(self) -> f32 {
if self < 0.0 {
NAN
} else {
unsafe { intrinsics::sqrtf32(self) }
}
}
#[inline]
fn rsqrt(self) -> f32 { self.sqrt().recip() }
/// Returns the exponential of the number.
#[inline]
fn exp(self) -> f32 {
unsafe { intrinsics::expf32(self) }
}
/// Returns 2 raised to the power of the number.
#[inline]
fn exp2(self) -> f32 {
unsafe { intrinsics::exp2f32(self) }
}
/// Returns the natural logarithm of the number.
#[inline]
fn ln(self) -> f32 {
unsafe { intrinsics::logf32(self) }
}
/// Returns the logarithm of the number with respect to an arbitrary base.
#[inline]
fn log(self, base: f32) -> f32 { self.ln() / base.ln() }
/// Returns the base 2 logarithm of the number.
#[inline]
fn log2(self) -> f32 {
unsafe { intrinsics::log2f32(self) }
}
/// Returns the base 10 logarithm of the number.
#[inline]
fn log10(self) -> f32 {
unsafe { intrinsics::log10f32(self) }
}
/// Converts to degrees, assuming the number is in radians.
#[inline]
fn to_degrees(self) -> f32 { self * (180.0f32 / consts::PI) }
/// Converts to radians, assuming the number is in degrees.
#[inline]
fn to_radians(self) -> f32 {
let value: f32 = consts::PI;
self * (value / 180.0f32)
}
}
|
random_line_split
|
|
f32.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Operations and constants for 32-bits floats (`f32` type)
#![doc(primitive = "f32")]
// FIXME: MIN_VALUE and MAX_VALUE literals are parsed as -inf and inf #14353
#![allow(overflowing_literals)]
#![stable(feature = "rust1", since = "1.0.0")]
use prelude::*;
use intrinsics;
use mem;
use num::{Float, ParseFloatError};
use num::FpCategory as Fp;
#[stable(feature = "rust1", since = "1.0.0")]
pub const RADIX: u32 = 2;
#[stable(feature = "rust1", since = "1.0.0")]
pub const MANTISSA_DIGITS: u32 = 24;
#[stable(feature = "rust1", since = "1.0.0")]
pub const DIGITS: u32 = 6;
#[stable(feature = "rust1", since = "1.0.0")]
pub const EPSILON: f32 = 1.19209290e-07_f32;
/// Smallest finite f32 value
#[stable(feature = "rust1", since = "1.0.0")]
pub const MIN: f32 = -3.40282347e+38_f32;
/// Smallest positive, normalized f32 value
#[stable(feature = "rust1", since = "1.0.0")]
pub const MIN_POSITIVE: f32 = 1.17549435e-38_f32;
/// Largest finite f32 value
#[stable(feature = "rust1", since = "1.0.0")]
pub const MAX: f32 = 3.40282347e+38_f32;
#[stable(feature = "rust1", since = "1.0.0")]
pub const MIN_EXP: i32 = -125;
#[stable(feature = "rust1", since = "1.0.0")]
pub const MAX_EXP: i32 = 128;
#[stable(feature = "rust1", since = "1.0.0")]
pub const MIN_10_EXP: i32 = -37;
#[stable(feature = "rust1", since = "1.0.0")]
pub const MAX_10_EXP: i32 = 38;
#[stable(feature = "rust1", since = "1.0.0")]
pub const NAN: f32 = 0.0_f32/0.0_f32;
#[stable(feature = "rust1", since = "1.0.0")]
pub const INFINITY: f32 = 1.0_f32/0.0_f32;
#[stable(feature = "rust1", since = "1.0.0")]
pub const NEG_INFINITY: f32 = -1.0_f32/0.0_f32;
/// Basic mathematial constants.
#[stable(feature = "rust1", since = "1.0.0")]
pub mod consts {
// FIXME: replace with mathematical constants from cmath.
/// Archimedes' constant
#[stable(feature = "rust1", since = "1.0.0")]
pub const PI: f32 = 3.14159265358979323846264338327950288_f32;
/// pi * 2.0
#[unstable(feature = "float_consts",
reason = "unclear naming convention/usefulness")]
#[deprecated(since = "1.2.0", reason = "unclear on usefulness")]
pub const PI_2: f32 = 6.28318530717958647692528676655900576_f32;
/// pi/2.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_2: f32 = 1.57079632679489661923132169163975144_f32;
/// pi/3.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_3: f32 = 1.04719755119659774615421446109316763_f32;
/// pi/4.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_4: f32 = 0.785398163397448309615660845819875721_f32;
/// pi/6.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_6: f32 = 0.52359877559829887307710723054658381_f32;
/// pi/8.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_8: f32 = 0.39269908169872415480783042290993786_f32;
/// 1.0/pi
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_1_PI: f32 = 0.318309886183790671537767526745028724_f32;
/// 2.0/pi
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_2_PI: f32 = 0.636619772367581343075535053490057448_f32;
/// 2.0/sqrt(pi)
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_2_SQRT_PI: f32 = 1.12837916709551257389615890312154517_f32;
/// sqrt(2.0)
#[stable(feature = "rust1", since = "1.0.0")]
pub const SQRT_2: f32 = 1.41421356237309504880168872420969808_f32;
/// 1.0/sqrt(2.0)
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_1_SQRT_2: f32 = 0.707106781186547524400844362104849039_f32;
/// Euler's number
#[stable(feature = "rust1", since = "1.0.0")]
pub const E: f32 = 2.71828182845904523536028747135266250_f32;
/// log2(e)
#[stable(feature = "rust1", since = "1.0.0")]
pub const LOG2_E: f32 = 1.44269504088896340735992468100189214_f32;
/// log10(e)
#[stable(feature = "rust1", since = "1.0.0")]
pub const LOG10_E: f32 = 0.434294481903251827651128918916605082_f32;
/// ln(2.0)
#[stable(feature = "rust1", since = "1.0.0")]
pub const LN_2: f32 = 0.693147180559945309417232121458176568_f32;
/// ln(10.0)
#[stable(feature = "rust1", since = "1.0.0")]
pub const LN_10: f32 = 2.30258509299404568401799145468436421_f32;
}
impl Float for f32 {
#[inline]
fn nan() -> f32 { NAN }
#[inline]
fn infinity() -> f32 { INFINITY }
#[inline]
fn neg_infinity() -> f32 { NEG_INFINITY }
#[inline]
fn zero() -> f32 { 0.0 }
#[inline]
fn neg_zero() -> f32 { -0.0 }
#[inline]
fn one() -> f32 { 1.0 }
from_str_radix_float_impl! { f32 }
/// Returns `true` if the number is NaN.
#[inline]
fn is_nan(self) -> bool { self!= self }
/// Returns `true` if the number is infinite.
#[inline]
fn is_infinite(self) -> bool {
self == Float::infinity() || self == Float::neg_infinity()
}
/// Returns `true` if the number is neither infinite or NaN.
#[inline]
fn is_finite(self) -> bool {
!(self.is_nan() || self.is_infinite())
}
/// Returns `true` if the number is neither zero, infinite, subnormal or NaN.
#[inline]
fn is_normal(self) -> bool {
self.classify() == Fp::Normal
}
/// Returns the floating point category of the number. If only one property
/// is going to be tested, it is generally faster to use the specific
/// predicate instead.
fn classify(self) -> Fp {
const EXP_MASK: u32 = 0x7f800000;
const MAN_MASK: u32 = 0x007fffff;
let bits: u32 = unsafe { mem::transmute(self) };
match (bits & MAN_MASK, bits & EXP_MASK) {
(0, 0) => Fp::Zero,
(_, 0) => Fp::Subnormal,
(0, EXP_MASK) => Fp::Infinite,
(_, EXP_MASK) => Fp::Nan,
_ => Fp::Normal,
}
}
/// Returns the mantissa, exponent and sign as integers.
fn integer_decode(self) -> (u64, i16, i8) {
let bits: u32 = unsafe { mem::transmute(self) };
let sign: i8 = if bits >> 31 == 0 { 1 } else { -1 };
let mut exponent: i16 = ((bits >> 23) & 0xff) as i16;
let mantissa = if exponent == 0 {
(bits & 0x7fffff) << 1
} else {
(bits & 0x7fffff) | 0x800000
};
// Exponent bias + mantissa shift
exponent -= 127 + 23;
(mantissa as u64, exponent, sign)
}
/// Rounds towards minus infinity.
#[inline]
fn floor(self) -> f32 {
unsafe { intrinsics::floorf32(self) }
}
/// Rounds towards plus infinity.
#[inline]
fn
|
(self) -> f32 {
unsafe { intrinsics::ceilf32(self) }
}
/// Rounds to nearest integer. Rounds half-way cases away from zero.
#[inline]
fn round(self) -> f32 {
unsafe { intrinsics::roundf32(self) }
}
/// Returns the integer part of the number (rounds towards zero).
#[inline]
fn trunc(self) -> f32 {
unsafe { intrinsics::truncf32(self) }
}
/// The fractional part of the number, satisfying:
///
/// ```
/// let x = 1.65f32;
/// assert!(x == x.trunc() + x.fract())
/// ```
#[inline]
fn fract(self) -> f32 { self - self.trunc() }
/// Computes the absolute value of `self`. Returns `Float::nan()` if the
/// number is `Float::nan()`.
#[inline]
fn abs(self) -> f32 {
unsafe { intrinsics::fabsf32(self) }
}
/// Returns a number that represents the sign of `self`.
///
/// - `1.0` if the number is positive, `+0.0` or `Float::infinity()`
/// - `-1.0` if the number is negative, `-0.0` or `Float::neg_infinity()`
/// - `Float::nan()` if the number is `Float::nan()`
#[inline]
fn signum(self) -> f32 {
if self.is_nan() {
Float::nan()
} else {
unsafe { intrinsics::copysignf32(1.0, self) }
}
}
/// Returns `true` if `self` is positive, including `+0.0` and
/// `Float::infinity()`.
#[inline]
fn is_positive(self) -> bool {
self > 0.0 || (1.0 / self) == Float::infinity()
}
/// Returns `true` if `self` is negative, including `-0.0` and
/// `Float::neg_infinity()`.
#[inline]
fn is_negative(self) -> bool {
self < 0.0 || (1.0 / self) == Float::neg_infinity()
}
/// Fused multiply-add. Computes `(self * a) + b` with only one rounding
/// error. This produces a more accurate result with better performance than
/// a separate multiplication operation followed by an add.
#[inline]
fn mul_add(self, a: f32, b: f32) -> f32 {
unsafe { intrinsics::fmaf32(self, a, b) }
}
/// Returns the reciprocal (multiplicative inverse) of the number.
#[inline]
fn recip(self) -> f32 { 1.0 / self }
#[inline]
fn powi(self, n: i32) -> f32 {
unsafe { intrinsics::powif32(self, n) }
}
#[inline]
fn powf(self, n: f32) -> f32 {
unsafe { intrinsics::powf32(self, n) }
}
#[inline]
fn sqrt(self) -> f32 {
if self < 0.0 {
NAN
} else {
unsafe { intrinsics::sqrtf32(self) }
}
}
#[inline]
fn rsqrt(self) -> f32 { self.sqrt().recip() }
/// Returns the exponential of the number.
#[inline]
fn exp(self) -> f32 {
unsafe { intrinsics::expf32(self) }
}
/// Returns 2 raised to the power of the number.
#[inline]
fn exp2(self) -> f32 {
unsafe { intrinsics::exp2f32(self) }
}
/// Returns the natural logarithm of the number.
#[inline]
fn ln(self) -> f32 {
unsafe { intrinsics::logf32(self) }
}
/// Returns the logarithm of the number with respect to an arbitrary base.
#[inline]
fn log(self, base: f32) -> f32 { self.ln() / base.ln() }
/// Returns the base 2 logarithm of the number.
#[inline]
fn log2(self) -> f32 {
unsafe { intrinsics::log2f32(self) }
}
/// Returns the base 10 logarithm of the number.
#[inline]
fn log10(self) -> f32 {
unsafe { intrinsics::log10f32(self) }
}
/// Converts to degrees, assuming the number is in radians.
#[inline]
fn to_degrees(self) -> f32 { self * (180.0f32 / consts::PI) }
/// Converts to radians, assuming the number is in degrees.
#[inline]
fn to_radians(self) -> f32 {
let value: f32 = consts::PI;
self * (value / 180.0f32)
}
}
|
ceil
|
identifier_name
|
f32.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Operations and constants for 32-bits floats (`f32` type)
#![doc(primitive = "f32")]
// FIXME: MIN_VALUE and MAX_VALUE literals are parsed as -inf and inf #14353
#![allow(overflowing_literals)]
#![stable(feature = "rust1", since = "1.0.0")]
use prelude::*;
use intrinsics;
use mem;
use num::{Float, ParseFloatError};
use num::FpCategory as Fp;
#[stable(feature = "rust1", since = "1.0.0")]
pub const RADIX: u32 = 2;
#[stable(feature = "rust1", since = "1.0.0")]
pub const MANTISSA_DIGITS: u32 = 24;
#[stable(feature = "rust1", since = "1.0.0")]
pub const DIGITS: u32 = 6;
#[stable(feature = "rust1", since = "1.0.0")]
pub const EPSILON: f32 = 1.19209290e-07_f32;
/// Smallest finite f32 value
#[stable(feature = "rust1", since = "1.0.0")]
pub const MIN: f32 = -3.40282347e+38_f32;
/// Smallest positive, normalized f32 value
#[stable(feature = "rust1", since = "1.0.0")]
pub const MIN_POSITIVE: f32 = 1.17549435e-38_f32;
/// Largest finite f32 value
#[stable(feature = "rust1", since = "1.0.0")]
pub const MAX: f32 = 3.40282347e+38_f32;
#[stable(feature = "rust1", since = "1.0.0")]
pub const MIN_EXP: i32 = -125;
#[stable(feature = "rust1", since = "1.0.0")]
pub const MAX_EXP: i32 = 128;
#[stable(feature = "rust1", since = "1.0.0")]
pub const MIN_10_EXP: i32 = -37;
#[stable(feature = "rust1", since = "1.0.0")]
pub const MAX_10_EXP: i32 = 38;
#[stable(feature = "rust1", since = "1.0.0")]
pub const NAN: f32 = 0.0_f32/0.0_f32;
#[stable(feature = "rust1", since = "1.0.0")]
pub const INFINITY: f32 = 1.0_f32/0.0_f32;
#[stable(feature = "rust1", since = "1.0.0")]
pub const NEG_INFINITY: f32 = -1.0_f32/0.0_f32;
/// Basic mathematial constants.
#[stable(feature = "rust1", since = "1.0.0")]
pub mod consts {
// FIXME: replace with mathematical constants from cmath.
/// Archimedes' constant
#[stable(feature = "rust1", since = "1.0.0")]
pub const PI: f32 = 3.14159265358979323846264338327950288_f32;
/// pi * 2.0
#[unstable(feature = "float_consts",
reason = "unclear naming convention/usefulness")]
#[deprecated(since = "1.2.0", reason = "unclear on usefulness")]
pub const PI_2: f32 = 6.28318530717958647692528676655900576_f32;
/// pi/2.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_2: f32 = 1.57079632679489661923132169163975144_f32;
/// pi/3.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_3: f32 = 1.04719755119659774615421446109316763_f32;
/// pi/4.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_4: f32 = 0.785398163397448309615660845819875721_f32;
/// pi/6.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_6: f32 = 0.52359877559829887307710723054658381_f32;
/// pi/8.0
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_PI_8: f32 = 0.39269908169872415480783042290993786_f32;
/// 1.0/pi
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_1_PI: f32 = 0.318309886183790671537767526745028724_f32;
/// 2.0/pi
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_2_PI: f32 = 0.636619772367581343075535053490057448_f32;
/// 2.0/sqrt(pi)
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_2_SQRT_PI: f32 = 1.12837916709551257389615890312154517_f32;
/// sqrt(2.0)
#[stable(feature = "rust1", since = "1.0.0")]
pub const SQRT_2: f32 = 1.41421356237309504880168872420969808_f32;
/// 1.0/sqrt(2.0)
#[stable(feature = "rust1", since = "1.0.0")]
pub const FRAC_1_SQRT_2: f32 = 0.707106781186547524400844362104849039_f32;
/// Euler's number
#[stable(feature = "rust1", since = "1.0.0")]
pub const E: f32 = 2.71828182845904523536028747135266250_f32;
/// log2(e)
#[stable(feature = "rust1", since = "1.0.0")]
pub const LOG2_E: f32 = 1.44269504088896340735992468100189214_f32;
/// log10(e)
#[stable(feature = "rust1", since = "1.0.0")]
pub const LOG10_E: f32 = 0.434294481903251827651128918916605082_f32;
/// ln(2.0)
#[stable(feature = "rust1", since = "1.0.0")]
pub const LN_2: f32 = 0.693147180559945309417232121458176568_f32;
/// ln(10.0)
#[stable(feature = "rust1", since = "1.0.0")]
pub const LN_10: f32 = 2.30258509299404568401799145468436421_f32;
}
impl Float for f32 {
#[inline]
fn nan() -> f32 { NAN }
#[inline]
fn infinity() -> f32 { INFINITY }
#[inline]
fn neg_infinity() -> f32 { NEG_INFINITY }
#[inline]
fn zero() -> f32 { 0.0 }
#[inline]
fn neg_zero() -> f32 { -0.0 }
#[inline]
fn one() -> f32 { 1.0 }
from_str_radix_float_impl! { f32 }
/// Returns `true` if the number is NaN.
#[inline]
fn is_nan(self) -> bool { self!= self }
/// Returns `true` if the number is infinite.
#[inline]
fn is_infinite(self) -> bool {
self == Float::infinity() || self == Float::neg_infinity()
}
/// Returns `true` if the number is neither infinite or NaN.
#[inline]
fn is_finite(self) -> bool {
!(self.is_nan() || self.is_infinite())
}
/// Returns `true` if the number is neither zero, infinite, subnormal or NaN.
#[inline]
fn is_normal(self) -> bool {
self.classify() == Fp::Normal
}
/// Returns the floating point category of the number. If only one property
/// is going to be tested, it is generally faster to use the specific
/// predicate instead.
fn classify(self) -> Fp {
const EXP_MASK: u32 = 0x7f800000;
const MAN_MASK: u32 = 0x007fffff;
let bits: u32 = unsafe { mem::transmute(self) };
match (bits & MAN_MASK, bits & EXP_MASK) {
(0, 0) => Fp::Zero,
(_, 0) => Fp::Subnormal,
(0, EXP_MASK) => Fp::Infinite,
(_, EXP_MASK) => Fp::Nan,
_ => Fp::Normal,
}
}
/// Returns the mantissa, exponent and sign as integers.
fn integer_decode(self) -> (u64, i16, i8) {
let bits: u32 = unsafe { mem::transmute(self) };
let sign: i8 = if bits >> 31 == 0 { 1 } else { -1 };
let mut exponent: i16 = ((bits >> 23) & 0xff) as i16;
let mantissa = if exponent == 0 {
(bits & 0x7fffff) << 1
} else {
(bits & 0x7fffff) | 0x800000
};
// Exponent bias + mantissa shift
exponent -= 127 + 23;
(mantissa as u64, exponent, sign)
}
/// Rounds towards minus infinity.
#[inline]
fn floor(self) -> f32 {
unsafe { intrinsics::floorf32(self) }
}
/// Rounds towards plus infinity.
#[inline]
fn ceil(self) -> f32 {
unsafe { intrinsics::ceilf32(self) }
}
/// Rounds to nearest integer. Rounds half-way cases away from zero.
#[inline]
fn round(self) -> f32 {
unsafe { intrinsics::roundf32(self) }
}
/// Returns the integer part of the number (rounds towards zero).
#[inline]
fn trunc(self) -> f32 {
unsafe { intrinsics::truncf32(self) }
}
/// The fractional part of the number, satisfying:
///
/// ```
/// let x = 1.65f32;
/// assert!(x == x.trunc() + x.fract())
/// ```
#[inline]
fn fract(self) -> f32 { self - self.trunc() }
/// Computes the absolute value of `self`. Returns `Float::nan()` if the
/// number is `Float::nan()`.
#[inline]
fn abs(self) -> f32 {
unsafe { intrinsics::fabsf32(self) }
}
/// Returns a number that represents the sign of `self`.
///
/// - `1.0` if the number is positive, `+0.0` or `Float::infinity()`
/// - `-1.0` if the number is negative, `-0.0` or `Float::neg_infinity()`
/// - `Float::nan()` if the number is `Float::nan()`
#[inline]
fn signum(self) -> f32 {
if self.is_nan() {
Float::nan()
} else {
unsafe { intrinsics::copysignf32(1.0, self) }
}
}
/// Returns `true` if `self` is positive, including `+0.0` and
/// `Float::infinity()`.
#[inline]
fn is_positive(self) -> bool
|
/// Returns `true` if `self` is negative, including `-0.0` and
/// `Float::neg_infinity()`.
#[inline]
fn is_negative(self) -> bool {
self < 0.0 || (1.0 / self) == Float::neg_infinity()
}
/// Fused multiply-add. Computes `(self * a) + b` with only one rounding
/// error. This produces a more accurate result with better performance than
/// a separate multiplication operation followed by an add.
#[inline]
fn mul_add(self, a: f32, b: f32) -> f32 {
unsafe { intrinsics::fmaf32(self, a, b) }
}
/// Returns the reciprocal (multiplicative inverse) of the number.
#[inline]
fn recip(self) -> f32 { 1.0 / self }
#[inline]
fn powi(self, n: i32) -> f32 {
unsafe { intrinsics::powif32(self, n) }
}
#[inline]
fn powf(self, n: f32) -> f32 {
unsafe { intrinsics::powf32(self, n) }
}
#[inline]
fn sqrt(self) -> f32 {
if self < 0.0 {
NAN
} else {
unsafe { intrinsics::sqrtf32(self) }
}
}
#[inline]
fn rsqrt(self) -> f32 { self.sqrt().recip() }
/// Returns the exponential of the number.
#[inline]
fn exp(self) -> f32 {
unsafe { intrinsics::expf32(self) }
}
/// Returns 2 raised to the power of the number.
#[inline]
fn exp2(self) -> f32 {
unsafe { intrinsics::exp2f32(self) }
}
/// Returns the natural logarithm of the number.
#[inline]
fn ln(self) -> f32 {
unsafe { intrinsics::logf32(self) }
}
/// Returns the logarithm of the number with respect to an arbitrary base.
#[inline]
fn log(self, base: f32) -> f32 { self.ln() / base.ln() }
/// Returns the base 2 logarithm of the number.
#[inline]
fn log2(self) -> f32 {
unsafe { intrinsics::log2f32(self) }
}
/// Returns the base 10 logarithm of the number.
#[inline]
fn log10(self) -> f32 {
unsafe { intrinsics::log10f32(self) }
}
/// Converts to degrees, assuming the number is in radians.
#[inline]
fn to_degrees(self) -> f32 { self * (180.0f32 / consts::PI) }
/// Converts to radians, assuming the number is in degrees.
#[inline]
fn to_radians(self) -> f32 {
let value: f32 = consts::PI;
self * (value / 180.0f32)
}
}
|
{
self > 0.0 || (1.0 / self) == Float::infinity()
}
|
identifier_body
|
brainfuck.rs
|
extern crate zaldinar_core;
use std::fmt;
use zaldinar_core::client::PluginRegister;
use zaldinar_core::events::CommandEvent;
const MAX_ITERATIONS: u32 = 134217728u32;
const MAX_OUTPUT: usize = 256usize;
#[derive(Debug)]
pub enum Error {
/// A right bracket was found with no unmatched left brackets preceding it.
UnbalancedRightBracket,
/// The input ended before right brackets were found to match all left brackets.
UnbalancedLeftBracket,
/// `,` is unsupported
CommaUnsupported,
}
impl fmt::Display for Error {
fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match self {
&Error::UnbalancedRightBracket => {
write!(formatter, "Expected matching `[` before `]`, found lone `]` first.")
},
&Error::UnbalancedLeftBracket => {
write!(formatter, "Unbalanced `[`. Expected matching `]`, found end of file.")
},
&Error::CommaUnsupported => {
write!(formatter, "Unsupported command: `,`.")
}
}
}
}
#[derive(Debug)]
enum Instruction {
/// Increment the memory pointer by one
MoveRight,
/// Decrement the memory pointer by one
MoveLeft,
/// Increment the memory value at the memory pointer by one
Increment,
/// Decrement the memory value at the memory pointer by one
Decrement,
/// Output the value of the current memory pointer as a char
Output,
/// This is the left side of a loop.
/// If the memory value at the memory pointer is zero, set the next instruction to the
/// contained value.
JumpToLeft(usize),
/// This is the right side of a loop.
/// If the memory value at the memory pointer is non-zero, set the next instruction to the
/// contained value.
JumpToRight(usize),
}
fn parse_instructions(event: &CommandEvent) -> Result<Vec<Instruction>, Error> {
// Vec of opening jumps waiting for a closing jump to find
// each u16 is a position in the instructions vec.
let mut waiting_opening_jumps = Vec::new();
let mut instructions = Vec::new();
for arg in &event.args {
for c in arg.chars() {
let instruction = match c {
'>' => Instruction::MoveRight,
'<' => Instruction::MoveLeft,
'+' => Instruction::Increment,
'-' => Instruction::Decrement,
'.' => Instruction::Output,
',' => {
return Err(Error::CommaUnsupported);
},
'[' => {
// instructions.len() is the position where JumpTo is going to end up
waiting_opening_jumps.push(instructions.len());
// This is a placeholder, this is guaranteed to be replaced when the
// corresponding `]` is found.
Instruction::JumpToLeft(0usize)
},
']' => {
match waiting_opening_jumps.pop() {
Some(left_jump) => {
// instructions.len() is the position where the right JumpTo
instructions[left_jump] = Instruction::JumpToLeft(instructions.len());
Instruction::JumpToRight(left_jump)
},
None => {
return Err(Error::UnbalancedRightBracket);
}
}
},
_ => continue, // treat invalid characters as comments
};
instructions.push(instruction);
}
}
if!waiting_opening_jumps.is_empty() {
return Err(Error::UnbalancedLeftBracket);
}
return Ok(instructions);
}
fn brainfuck(event: &CommandEvent) {
let instructions = match parse_instructions(event) {
Ok(instructions) => instructions,
Err(e) => {
event.client.send_message(event.channel(), format!("Error: {}", e));
return;
}
};
// Program memory, max size is 2^15
let mut memory = [0u8; 32768];
// Current position in memory
let mut memory_position = 0u16;
// Next instruction to run
let mut next_instruction = 0usize;
// Output string buffer
let mut output = String::new();
// Whether or not we finished cleanly (if false, output error for maximum iterations reached)
let mut done = false;
// u32::MAX as a limit to the number of iterations to run for a single program.
for _ in 0..MAX_ITERATIONS {
if next_instruction >= instructions.len() {
done = true;
break;
}
match instructions[next_instruction] {
Instruction::MoveRight => {
memory_position += 1;
memory_position %= 32768;
},
Instruction::MoveLeft => {
memory_position -= 1;
memory_position %= 32768;
},
Instruction::Increment => memory[memory_position as usize] += 1,
Instruction::Decrement => memory[memory_position as usize] -= 1,
Instruction::Output => {
output.push(memory[memory_position as usize] as char);
if output.len() > MAX_OUTPUT {
event.client.send_message(event.channel(),
"Reached maximum output length. (256)");
done = true;
break;
}
},
Instruction::JumpToLeft(target_position) => {
if memory[memory_position as usize] == 0 {
next_instruction = target_position;
continue; // this avoids the automatic incrementing of next_instruction below.
}
},
Instruction::JumpToRight(target_position) => {
if memory[memory_position as usize]!= 0 {
|
next_instruction = target_position;
continue; // this avoids the automatic incrementing of next_instruction below.
}
},
}
next_instruction += 1;
}
if!done {
event.client.send_message(event.channel(), "Reached maximum iterations. (134217728)");
}
if output.is_empty() {
event.client.send_message(event.channel(), "No output produced.");
} else {
event.client.send_message(event.channel(), format!("Output: {}", escape_output(&output)));
}
}
pub fn register(register: &mut PluginRegister) {
register.register_command("brainfuck", brainfuck);
}
fn escape_output(input: &str) -> String {
let mut result = String::with_capacity(input.len());
for c in input.chars() {
match c {
'\t' => result.push_str("\\t"),
'\r' => result.push_str("\\r"),
'\n' => result.push_str("\\n"),
'\\' => result.push_str("\\\\"),
v @ '\x20'... '\x7e' => result.push(v),
v @ _ => result.extend(v.escape_unicode()),
}
}
return result;
}
|
random_line_split
|
|
brainfuck.rs
|
extern crate zaldinar_core;
use std::fmt;
use zaldinar_core::client::PluginRegister;
use zaldinar_core::events::CommandEvent;
const MAX_ITERATIONS: u32 = 134217728u32;
const MAX_OUTPUT: usize = 256usize;
#[derive(Debug)]
pub enum Error {
/// A right bracket was found with no unmatched left brackets preceding it.
UnbalancedRightBracket,
/// The input ended before right brackets were found to match all left brackets.
UnbalancedLeftBracket,
/// `,` is unsupported
CommaUnsupported,
}
impl fmt::Display for Error {
fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match self {
&Error::UnbalancedRightBracket => {
write!(formatter, "Expected matching `[` before `]`, found lone `]` first.")
},
&Error::UnbalancedLeftBracket => {
write!(formatter, "Unbalanced `[`. Expected matching `]`, found end of file.")
},
&Error::CommaUnsupported => {
write!(formatter, "Unsupported command: `,`.")
}
}
}
}
#[derive(Debug)]
enum Instruction {
/// Increment the memory pointer by one
MoveRight,
/// Decrement the memory pointer by one
MoveLeft,
/// Increment the memory value at the memory pointer by one
Increment,
/// Decrement the memory value at the memory pointer by one
Decrement,
/// Output the value of the current memory pointer as a char
Output,
/// This is the left side of a loop.
/// If the memory value at the memory pointer is zero, set the next instruction to the
/// contained value.
JumpToLeft(usize),
/// This is the right side of a loop.
/// If the memory value at the memory pointer is non-zero, set the next instruction to the
/// contained value.
JumpToRight(usize),
}
fn parse_instructions(event: &CommandEvent) -> Result<Vec<Instruction>, Error> {
// Vec of opening jumps waiting for a closing jump to find
// each u16 is a position in the instructions vec.
let mut waiting_opening_jumps = Vec::new();
let mut instructions = Vec::new();
for arg in &event.args {
for c in arg.chars() {
let instruction = match c {
'>' => Instruction::MoveRight,
'<' => Instruction::MoveLeft,
'+' => Instruction::Increment,
'-' => Instruction::Decrement,
'.' => Instruction::Output,
',' => {
return Err(Error::CommaUnsupported);
},
'[' => {
// instructions.len() is the position where JumpTo is going to end up
waiting_opening_jumps.push(instructions.len());
// This is a placeholder, this is guaranteed to be replaced when the
// corresponding `]` is found.
Instruction::JumpToLeft(0usize)
},
']' => {
match waiting_opening_jumps.pop() {
Some(left_jump) => {
// instructions.len() is the position where the right JumpTo
instructions[left_jump] = Instruction::JumpToLeft(instructions.len());
Instruction::JumpToRight(left_jump)
},
None => {
return Err(Error::UnbalancedRightBracket);
}
}
},
_ => continue, // treat invalid characters as comments
};
instructions.push(instruction);
}
}
if!waiting_opening_jumps.is_empty() {
return Err(Error::UnbalancedLeftBracket);
}
return Ok(instructions);
}
fn
|
(event: &CommandEvent) {
let instructions = match parse_instructions(event) {
Ok(instructions) => instructions,
Err(e) => {
event.client.send_message(event.channel(), format!("Error: {}", e));
return;
}
};
// Program memory, max size is 2^15
let mut memory = [0u8; 32768];
// Current position in memory
let mut memory_position = 0u16;
// Next instruction to run
let mut next_instruction = 0usize;
// Output string buffer
let mut output = String::new();
// Whether or not we finished cleanly (if false, output error for maximum iterations reached)
let mut done = false;
// u32::MAX as a limit to the number of iterations to run for a single program.
for _ in 0..MAX_ITERATIONS {
if next_instruction >= instructions.len() {
done = true;
break;
}
match instructions[next_instruction] {
Instruction::MoveRight => {
memory_position += 1;
memory_position %= 32768;
},
Instruction::MoveLeft => {
memory_position -= 1;
memory_position %= 32768;
},
Instruction::Increment => memory[memory_position as usize] += 1,
Instruction::Decrement => memory[memory_position as usize] -= 1,
Instruction::Output => {
output.push(memory[memory_position as usize] as char);
if output.len() > MAX_OUTPUT {
event.client.send_message(event.channel(),
"Reached maximum output length. (256)");
done = true;
break;
}
},
Instruction::JumpToLeft(target_position) => {
if memory[memory_position as usize] == 0 {
next_instruction = target_position;
continue; // this avoids the automatic incrementing of next_instruction below.
}
},
Instruction::JumpToRight(target_position) => {
if memory[memory_position as usize]!= 0 {
next_instruction = target_position;
continue; // this avoids the automatic incrementing of next_instruction below.
}
},
}
next_instruction += 1;
}
if!done {
event.client.send_message(event.channel(), "Reached maximum iterations. (134217728)");
}
if output.is_empty() {
event.client.send_message(event.channel(), "No output produced.");
} else {
event.client.send_message(event.channel(), format!("Output: {}", escape_output(&output)));
}
}
pub fn register(register: &mut PluginRegister) {
register.register_command("brainfuck", brainfuck);
}
fn escape_output(input: &str) -> String {
let mut result = String::with_capacity(input.len());
for c in input.chars() {
match c {
'\t' => result.push_str("\\t"),
'\r' => result.push_str("\\r"),
'\n' => result.push_str("\\n"),
'\\' => result.push_str("\\\\"),
v @ '\x20'... '\x7e' => result.push(v),
v @ _ => result.extend(v.escape_unicode()),
}
}
return result;
}
|
brainfuck
|
identifier_name
|
brainfuck.rs
|
extern crate zaldinar_core;
use std::fmt;
use zaldinar_core::client::PluginRegister;
use zaldinar_core::events::CommandEvent;
const MAX_ITERATIONS: u32 = 134217728u32;
const MAX_OUTPUT: usize = 256usize;
#[derive(Debug)]
pub enum Error {
/// A right bracket was found with no unmatched left brackets preceding it.
UnbalancedRightBracket,
/// The input ended before right brackets were found to match all left brackets.
UnbalancedLeftBracket,
/// `,` is unsupported
CommaUnsupported,
}
impl fmt::Display for Error {
fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match self {
&Error::UnbalancedRightBracket => {
write!(formatter, "Expected matching `[` before `]`, found lone `]` first.")
},
&Error::UnbalancedLeftBracket => {
write!(formatter, "Unbalanced `[`. Expected matching `]`, found end of file.")
},
&Error::CommaUnsupported => {
write!(formatter, "Unsupported command: `,`.")
}
}
}
}
#[derive(Debug)]
enum Instruction {
/// Increment the memory pointer by one
MoveRight,
/// Decrement the memory pointer by one
MoveLeft,
/// Increment the memory value at the memory pointer by one
Increment,
/// Decrement the memory value at the memory pointer by one
Decrement,
/// Output the value of the current memory pointer as a char
Output,
/// This is the left side of a loop.
/// If the memory value at the memory pointer is zero, set the next instruction to the
/// contained value.
JumpToLeft(usize),
/// This is the right side of a loop.
/// If the memory value at the memory pointer is non-zero, set the next instruction to the
/// contained value.
JumpToRight(usize),
}
fn parse_instructions(event: &CommandEvent) -> Result<Vec<Instruction>, Error> {
// Vec of opening jumps waiting for a closing jump to find
// each u16 is a position in the instructions vec.
let mut waiting_opening_jumps = Vec::new();
let mut instructions = Vec::new();
for arg in &event.args {
for c in arg.chars() {
let instruction = match c {
'>' => Instruction::MoveRight,
'<' => Instruction::MoveLeft,
'+' => Instruction::Increment,
'-' => Instruction::Decrement,
'.' => Instruction::Output,
',' => {
return Err(Error::CommaUnsupported);
},
'[' => {
// instructions.len() is the position where JumpTo is going to end up
waiting_opening_jumps.push(instructions.len());
// This is a placeholder, this is guaranteed to be replaced when the
// corresponding `]` is found.
Instruction::JumpToLeft(0usize)
},
']' => {
match waiting_opening_jumps.pop() {
Some(left_jump) => {
// instructions.len() is the position where the right JumpTo
instructions[left_jump] = Instruction::JumpToLeft(instructions.len());
Instruction::JumpToRight(left_jump)
},
None => {
return Err(Error::UnbalancedRightBracket);
}
}
},
_ => continue, // treat invalid characters as comments
};
instructions.push(instruction);
}
}
if!waiting_opening_jumps.is_empty() {
return Err(Error::UnbalancedLeftBracket);
}
return Ok(instructions);
}
fn brainfuck(event: &CommandEvent)
|
// u32::MAX as a limit to the number of iterations to run for a single program.
for _ in 0..MAX_ITERATIONS {
if next_instruction >= instructions.len() {
done = true;
break;
}
match instructions[next_instruction] {
Instruction::MoveRight => {
memory_position += 1;
memory_position %= 32768;
},
Instruction::MoveLeft => {
memory_position -= 1;
memory_position %= 32768;
},
Instruction::Increment => memory[memory_position as usize] += 1,
Instruction::Decrement => memory[memory_position as usize] -= 1,
Instruction::Output => {
output.push(memory[memory_position as usize] as char);
if output.len() > MAX_OUTPUT {
event.client.send_message(event.channel(),
"Reached maximum output length. (256)");
done = true;
break;
}
},
Instruction::JumpToLeft(target_position) => {
if memory[memory_position as usize] == 0 {
next_instruction = target_position;
continue; // this avoids the automatic incrementing of next_instruction below.
}
},
Instruction::JumpToRight(target_position) => {
if memory[memory_position as usize]!= 0 {
next_instruction = target_position;
continue; // this avoids the automatic incrementing of next_instruction below.
}
},
}
next_instruction += 1;
}
if!done {
event.client.send_message(event.channel(), "Reached maximum iterations. (134217728)");
}
if output.is_empty() {
event.client.send_message(event.channel(), "No output produced.");
} else {
event.client.send_message(event.channel(), format!("Output: {}", escape_output(&output)));
}
}
pub fn register(register: &mut PluginRegister) {
register.register_command("brainfuck", brainfuck);
}
fn escape_output(input: &str) -> String {
let mut result = String::with_capacity(input.len());
for c in input.chars() {
match c {
'\t' => result.push_str("\\t"),
'\r' => result.push_str("\\r"),
'\n' => result.push_str("\\n"),
'\\' => result.push_str("\\\\"),
v @ '\x20'... '\x7e' => result.push(v),
v @ _ => result.extend(v.escape_unicode()),
}
}
return result;
}
|
{
let instructions = match parse_instructions(event) {
Ok(instructions) => instructions,
Err(e) => {
event.client.send_message(event.channel(), format!("Error: {}", e));
return;
}
};
// Program memory, max size is 2^15
let mut memory = [0u8; 32768];
// Current position in memory
let mut memory_position = 0u16;
// Next instruction to run
let mut next_instruction = 0usize;
// Output string buffer
let mut output = String::new();
// Whether or not we finished cleanly (if false, output error for maximum iterations reached)
let mut done = false;
|
identifier_body
|
brainfuck.rs
|
extern crate zaldinar_core;
use std::fmt;
use zaldinar_core::client::PluginRegister;
use zaldinar_core::events::CommandEvent;
const MAX_ITERATIONS: u32 = 134217728u32;
const MAX_OUTPUT: usize = 256usize;
#[derive(Debug)]
pub enum Error {
/// A right bracket was found with no unmatched left brackets preceding it.
UnbalancedRightBracket,
/// The input ended before right brackets were found to match all left brackets.
UnbalancedLeftBracket,
/// `,` is unsupported
CommaUnsupported,
}
impl fmt::Display for Error {
fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match self {
&Error::UnbalancedRightBracket => {
write!(formatter, "Expected matching `[` before `]`, found lone `]` first.")
},
&Error::UnbalancedLeftBracket => {
write!(formatter, "Unbalanced `[`. Expected matching `]`, found end of file.")
},
&Error::CommaUnsupported => {
write!(formatter, "Unsupported command: `,`.")
}
}
}
}
#[derive(Debug)]
enum Instruction {
/// Increment the memory pointer by one
MoveRight,
/// Decrement the memory pointer by one
MoveLeft,
/// Increment the memory value at the memory pointer by one
Increment,
/// Decrement the memory value at the memory pointer by one
Decrement,
/// Output the value of the current memory pointer as a char
Output,
/// This is the left side of a loop.
/// If the memory value at the memory pointer is zero, set the next instruction to the
/// contained value.
JumpToLeft(usize),
/// This is the right side of a loop.
/// If the memory value at the memory pointer is non-zero, set the next instruction to the
/// contained value.
JumpToRight(usize),
}
fn parse_instructions(event: &CommandEvent) -> Result<Vec<Instruction>, Error> {
// Vec of opening jumps waiting for a closing jump to find
// each u16 is a position in the instructions vec.
let mut waiting_opening_jumps = Vec::new();
let mut instructions = Vec::new();
for arg in &event.args {
for c in arg.chars() {
let instruction = match c {
'>' => Instruction::MoveRight,
'<' => Instruction::MoveLeft,
'+' => Instruction::Increment,
'-' => Instruction::Decrement,
'.' => Instruction::Output,
',' => {
return Err(Error::CommaUnsupported);
},
'[' => {
// instructions.len() is the position where JumpTo is going to end up
waiting_opening_jumps.push(instructions.len());
// This is a placeholder, this is guaranteed to be replaced when the
// corresponding `]` is found.
Instruction::JumpToLeft(0usize)
},
']' => {
match waiting_opening_jumps.pop() {
Some(left_jump) => {
// instructions.len() is the position where the right JumpTo
instructions[left_jump] = Instruction::JumpToLeft(instructions.len());
Instruction::JumpToRight(left_jump)
},
None => {
return Err(Error::UnbalancedRightBracket);
}
}
},
_ => continue, // treat invalid characters as comments
};
instructions.push(instruction);
}
}
if!waiting_opening_jumps.is_empty() {
return Err(Error::UnbalancedLeftBracket);
}
return Ok(instructions);
}
fn brainfuck(event: &CommandEvent) {
let instructions = match parse_instructions(event) {
Ok(instructions) => instructions,
Err(e) => {
event.client.send_message(event.channel(), format!("Error: {}", e));
return;
}
};
// Program memory, max size is 2^15
let mut memory = [0u8; 32768];
// Current position in memory
let mut memory_position = 0u16;
// Next instruction to run
let mut next_instruction = 0usize;
// Output string buffer
let mut output = String::new();
// Whether or not we finished cleanly (if false, output error for maximum iterations reached)
let mut done = false;
// u32::MAX as a limit to the number of iterations to run for a single program.
for _ in 0..MAX_ITERATIONS {
if next_instruction >= instructions.len() {
done = true;
break;
}
match instructions[next_instruction] {
Instruction::MoveRight => {
memory_position += 1;
memory_position %= 32768;
},
Instruction::MoveLeft => {
memory_position -= 1;
memory_position %= 32768;
},
Instruction::Increment => memory[memory_position as usize] += 1,
Instruction::Decrement => memory[memory_position as usize] -= 1,
Instruction::Output => {
output.push(memory[memory_position as usize] as char);
if output.len() > MAX_OUTPUT {
event.client.send_message(event.channel(),
"Reached maximum output length. (256)");
done = true;
break;
}
},
Instruction::JumpToLeft(target_position) => {
if memory[memory_position as usize] == 0 {
next_instruction = target_position;
continue; // this avoids the automatic incrementing of next_instruction below.
}
},
Instruction::JumpToRight(target_position) => {
if memory[memory_position as usize]!= 0 {
next_instruction = target_position;
continue; // this avoids the automatic incrementing of next_instruction below.
}
},
}
next_instruction += 1;
}
if!done {
event.client.send_message(event.channel(), "Reached maximum iterations. (134217728)");
}
if output.is_empty() {
event.client.send_message(event.channel(), "No output produced.");
} else
|
}
pub fn register(register: &mut PluginRegister) {
register.register_command("brainfuck", brainfuck);
}
fn escape_output(input: &str) -> String {
let mut result = String::with_capacity(input.len());
for c in input.chars() {
match c {
'\t' => result.push_str("\\t"),
'\r' => result.push_str("\\r"),
'\n' => result.push_str("\\n"),
'\\' => result.push_str("\\\\"),
v @ '\x20'... '\x7e' => result.push(v),
v @ _ => result.extend(v.escape_unicode()),
}
}
return result;
}
|
{
event.client.send_message(event.channel(), format!("Output: {}", escape_output(&output)));
}
|
conditional_block
|
task_queue.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! Machinery for [task-queue](https://html.spec.whatwg.org/multipage/#task-queue).
use crate::dom::bindings::cell::DomRefCell;
use crate::dom::worker::TrustedWorkerAddress;
use crate::script_runtime::ScriptThreadEventCategory;
use crate::task::TaskBox;
use crate::task_source::TaskSourceName;
use crossbeam_channel::{self, Receiver, Sender};
use msg::constellation_msg::PipelineId;
use std::cell::Cell;
use std::collections::{HashMap, VecDeque};
use std::default::Default;
pub type QueuedTask = (
Option<TrustedWorkerAddress>,
ScriptThreadEventCategory,
Box<dyn TaskBox>,
Option<PipelineId>,
TaskSourceName,
);
/// Defining the operations used to convert from a msg T to a QueuedTask.
pub trait QueuedTaskConversion {
fn task_source_name(&self) -> Option<&TaskSourceName>;
fn into_queued_task(self) -> Option<QueuedTask>;
fn from_queued_task(queued_task: QueuedTask) -> Self;
fn wake_up_msg() -> Self;
fn is_wake_up(&self) -> bool;
}
pub struct TaskQueue<T> {
/// The original port on which the task-sources send tasks as messages.
port: Receiver<T>,
/// A sender to ensure the port doesn't block on select while there are throttled tasks.
wake_up_sender: Sender<T>,
/// A queue from which the event-loop can drain tasks.
msg_queue: DomRefCell<VecDeque<T>>,
/// A "business" counter, reset for each iteration of the event-loop
taken_task_counter: Cell<u64>,
/// Tasks that will be throttled for as long as we are "busy".
throttled: DomRefCell<HashMap<TaskSourceName, VecDeque<QueuedTask>>>,
}
impl<T: QueuedTaskConversion> TaskQueue<T> {
pub fn new(port: Receiver<T>, wake_up_sender: Sender<T>) -> TaskQueue<T> {
TaskQueue {
port,
wake_up_sender,
msg_queue: DomRefCell::new(VecDeque::new()),
taken_task_counter: Default::default(),
throttled: Default::default(),
}
}
/// Process incoming tasks, immediately sending priority ones downstream,
/// and categorizing potential throttles.
fn process_incoming_tasks(&self, first_msg: T) {
let mut incoming = Vec::with_capacity(self.port.len() + 1);
if!first_msg.is_wake_up() {
incoming.push(first_msg);
}
while let Ok(msg) = self.port.try_recv() {
if!msg.is_wake_up() {
incoming.push(msg);
}
}
let to_be_throttled: Vec<T> = incoming
.drain_filter(|msg| {
let task_source = match msg.task_source_name() {
Some(task_source) => task_source,
None => return false,
};
match task_source {
TaskSourceName::PerformanceTimeline => return true,
_ => {
// A task that will not be throttled, start counting "business"
self.taken_task_counter
.set(self.taken_task_counter.get() + 1);
return false;
},
}
})
.collect();
for msg in incoming {
// Immediately send non-throttled tasks for processing.
let _ = self.msg_queue.borrow_mut().push_back(msg);
}
for msg in to_be_throttled {
// Categorize tasks per task queue.
let (worker, category, boxed, pipeline_id, task_source) = match msg.into_queued_task() {
Some(queued_task) => queued_task,
None => unreachable!(
"A message to be throttled should always be convertible into a queued task"
),
};
let mut throttled_tasks = self.throttled.borrow_mut();
throttled_tasks
.entry(task_source.clone())
.or_insert(VecDeque::new())
.push_back((worker, category, boxed, pipeline_id, task_source));
}
}
/// Reset the queue for a new iteration of the event-loop,
/// returning the port about whose readiness we want to be notified.
pub fn
|
(&self) -> &crossbeam_channel::Receiver<T> {
// This is a new iteration of the event-loop, so we reset the "business" counter.
self.taken_task_counter.set(0);
// We want to be notified when the script-port is ready to receive.
// Hence that's the one we need to include in the select.
&self.port
}
/// Take a message from the front of the queue, without waiting if empty.
pub fn recv(&self) -> Result<T, ()> {
self.msg_queue.borrow_mut().pop_front().ok_or(())
}
/// Same as recv.
pub fn try_recv(&self) -> Result<T, ()> {
self.recv()
}
/// Drain the queue for the current iteration of the event-loop.
/// Holding-back throttles above a given high-water mark.
pub fn take_tasks(&self, first_msg: T) {
// High-watermark: once reached, throttled tasks will be held-back.
const PER_ITERATION_MAX: u64 = 5;
// Always first check for new tasks, but don't reset 'taken_task_counter'.
self.process_incoming_tasks(first_msg);
let mut throttled = self.throttled.borrow_mut();
let mut throttled_length: usize = throttled.values().map(|queue| queue.len()).sum();
let task_source_names = TaskSourceName::all();
let mut task_source_cycler = task_source_names.iter().cycle();
// "being busy", is defined as having more than x tasks for this loop's iteration.
// As long as we're not busy, and there are throttled tasks left:
loop {
let max_reached = self.taken_task_counter.get() > PER_ITERATION_MAX;
let none_left = throttled_length == 0;
match (max_reached, none_left) {
(_, true) => break,
(true, false) => {
// We have reached the high-watermark for this iteration of the event-loop,
// yet also have throttled messages left in the queue.
// Ensure the select wakes up in the next iteration of the event-loop
let _ = self.wake_up_sender.send(T::wake_up_msg());
break;
},
(false, false) => {
// Cycle through non-priority task sources, taking one throttled task from each.
let task_source = task_source_cycler.next().unwrap();
let throttled_queue = match throttled.get_mut(&task_source) {
Some(queue) => queue,
None => continue,
};
let queued_task = match throttled_queue.pop_front() {
Some(queued_task) => queued_task,
None => continue,
};
let msg = T::from_queued_task(queued_task);
let _ = self.msg_queue.borrow_mut().push_back(msg);
self.taken_task_counter
.set(self.taken_task_counter.get() + 1);
throttled_length = throttled_length - 1;
},
}
}
}
}
|
select
|
identifier_name
|
task_queue.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! Machinery for [task-queue](https://html.spec.whatwg.org/multipage/#task-queue).
use crate::dom::bindings::cell::DomRefCell;
use crate::dom::worker::TrustedWorkerAddress;
use crate::script_runtime::ScriptThreadEventCategory;
use crate::task::TaskBox;
use crate::task_source::TaskSourceName;
use crossbeam_channel::{self, Receiver, Sender};
use msg::constellation_msg::PipelineId;
use std::cell::Cell;
use std::collections::{HashMap, VecDeque};
use std::default::Default;
pub type QueuedTask = (
Option<TrustedWorkerAddress>,
ScriptThreadEventCategory,
Box<dyn TaskBox>,
Option<PipelineId>,
TaskSourceName,
);
/// Defining the operations used to convert from a msg T to a QueuedTask.
pub trait QueuedTaskConversion {
fn task_source_name(&self) -> Option<&TaskSourceName>;
fn into_queued_task(self) -> Option<QueuedTask>;
fn from_queued_task(queued_task: QueuedTask) -> Self;
fn wake_up_msg() -> Self;
fn is_wake_up(&self) -> bool;
}
pub struct TaskQueue<T> {
/// The original port on which the task-sources send tasks as messages.
port: Receiver<T>,
/// A sender to ensure the port doesn't block on select while there are throttled tasks.
wake_up_sender: Sender<T>,
/// A queue from which the event-loop can drain tasks.
msg_queue: DomRefCell<VecDeque<T>>,
/// A "business" counter, reset for each iteration of the event-loop
taken_task_counter: Cell<u64>,
/// Tasks that will be throttled for as long as we are "busy".
throttled: DomRefCell<HashMap<TaskSourceName, VecDeque<QueuedTask>>>,
}
impl<T: QueuedTaskConversion> TaskQueue<T> {
pub fn new(port: Receiver<T>, wake_up_sender: Sender<T>) -> TaskQueue<T> {
TaskQueue {
port,
wake_up_sender,
msg_queue: DomRefCell::new(VecDeque::new()),
taken_task_counter: Default::default(),
throttled: Default::default(),
}
}
/// Process incoming tasks, immediately sending priority ones downstream,
/// and categorizing potential throttles.
fn process_incoming_tasks(&self, first_msg: T) {
let mut incoming = Vec::with_capacity(self.port.len() + 1);
if!first_msg.is_wake_up() {
incoming.push(first_msg);
}
while let Ok(msg) = self.port.try_recv() {
if!msg.is_wake_up() {
incoming.push(msg);
}
}
let to_be_throttled: Vec<T> = incoming
.drain_filter(|msg| {
let task_source = match msg.task_source_name() {
Some(task_source) => task_source,
None => return false,
|
_ => {
// A task that will not be throttled, start counting "business"
self.taken_task_counter
.set(self.taken_task_counter.get() + 1);
return false;
},
}
})
.collect();
for msg in incoming {
// Immediately send non-throttled tasks for processing.
let _ = self.msg_queue.borrow_mut().push_back(msg);
}
for msg in to_be_throttled {
// Categorize tasks per task queue.
let (worker, category, boxed, pipeline_id, task_source) = match msg.into_queued_task() {
Some(queued_task) => queued_task,
None => unreachable!(
"A message to be throttled should always be convertible into a queued task"
),
};
let mut throttled_tasks = self.throttled.borrow_mut();
throttled_tasks
.entry(task_source.clone())
.or_insert(VecDeque::new())
.push_back((worker, category, boxed, pipeline_id, task_source));
}
}
/// Reset the queue for a new iteration of the event-loop,
/// returning the port about whose readiness we want to be notified.
pub fn select(&self) -> &crossbeam_channel::Receiver<T> {
// This is a new iteration of the event-loop, so we reset the "business" counter.
self.taken_task_counter.set(0);
// We want to be notified when the script-port is ready to receive.
// Hence that's the one we need to include in the select.
&self.port
}
/// Take a message from the front of the queue, without waiting if empty.
pub fn recv(&self) -> Result<T, ()> {
self.msg_queue.borrow_mut().pop_front().ok_or(())
}
/// Same as recv.
pub fn try_recv(&self) -> Result<T, ()> {
self.recv()
}
/// Drain the queue for the current iteration of the event-loop.
/// Holding-back throttles above a given high-water mark.
pub fn take_tasks(&self, first_msg: T) {
// High-watermark: once reached, throttled tasks will be held-back.
const PER_ITERATION_MAX: u64 = 5;
// Always first check for new tasks, but don't reset 'taken_task_counter'.
self.process_incoming_tasks(first_msg);
let mut throttled = self.throttled.borrow_mut();
let mut throttled_length: usize = throttled.values().map(|queue| queue.len()).sum();
let task_source_names = TaskSourceName::all();
let mut task_source_cycler = task_source_names.iter().cycle();
// "being busy", is defined as having more than x tasks for this loop's iteration.
// As long as we're not busy, and there are throttled tasks left:
loop {
let max_reached = self.taken_task_counter.get() > PER_ITERATION_MAX;
let none_left = throttled_length == 0;
match (max_reached, none_left) {
(_, true) => break,
(true, false) => {
// We have reached the high-watermark for this iteration of the event-loop,
// yet also have throttled messages left in the queue.
// Ensure the select wakes up in the next iteration of the event-loop
let _ = self.wake_up_sender.send(T::wake_up_msg());
break;
},
(false, false) => {
// Cycle through non-priority task sources, taking one throttled task from each.
let task_source = task_source_cycler.next().unwrap();
let throttled_queue = match throttled.get_mut(&task_source) {
Some(queue) => queue,
None => continue,
};
let queued_task = match throttled_queue.pop_front() {
Some(queued_task) => queued_task,
None => continue,
};
let msg = T::from_queued_task(queued_task);
let _ = self.msg_queue.borrow_mut().push_back(msg);
self.taken_task_counter
.set(self.taken_task_counter.get() + 1);
throttled_length = throttled_length - 1;
},
}
}
}
}
|
};
match task_source {
TaskSourceName::PerformanceTimeline => return true,
|
random_line_split
|
task_queue.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! Machinery for [task-queue](https://html.spec.whatwg.org/multipage/#task-queue).
use crate::dom::bindings::cell::DomRefCell;
use crate::dom::worker::TrustedWorkerAddress;
use crate::script_runtime::ScriptThreadEventCategory;
use crate::task::TaskBox;
use crate::task_source::TaskSourceName;
use crossbeam_channel::{self, Receiver, Sender};
use msg::constellation_msg::PipelineId;
use std::cell::Cell;
use std::collections::{HashMap, VecDeque};
use std::default::Default;
pub type QueuedTask = (
Option<TrustedWorkerAddress>,
ScriptThreadEventCategory,
Box<dyn TaskBox>,
Option<PipelineId>,
TaskSourceName,
);
/// Defining the operations used to convert from a msg T to a QueuedTask.
pub trait QueuedTaskConversion {
fn task_source_name(&self) -> Option<&TaskSourceName>;
fn into_queued_task(self) -> Option<QueuedTask>;
fn from_queued_task(queued_task: QueuedTask) -> Self;
fn wake_up_msg() -> Self;
fn is_wake_up(&self) -> bool;
}
pub struct TaskQueue<T> {
/// The original port on which the task-sources send tasks as messages.
port: Receiver<T>,
/// A sender to ensure the port doesn't block on select while there are throttled tasks.
wake_up_sender: Sender<T>,
/// A queue from which the event-loop can drain tasks.
msg_queue: DomRefCell<VecDeque<T>>,
/// A "business" counter, reset for each iteration of the event-loop
taken_task_counter: Cell<u64>,
/// Tasks that will be throttled for as long as we are "busy".
throttled: DomRefCell<HashMap<TaskSourceName, VecDeque<QueuedTask>>>,
}
impl<T: QueuedTaskConversion> TaskQueue<T> {
pub fn new(port: Receiver<T>, wake_up_sender: Sender<T>) -> TaskQueue<T>
|
/// Process incoming tasks, immediately sending priority ones downstream,
/// and categorizing potential throttles.
fn process_incoming_tasks(&self, first_msg: T) {
let mut incoming = Vec::with_capacity(self.port.len() + 1);
if!first_msg.is_wake_up() {
incoming.push(first_msg);
}
while let Ok(msg) = self.port.try_recv() {
if!msg.is_wake_up() {
incoming.push(msg);
}
}
let to_be_throttled: Vec<T> = incoming
.drain_filter(|msg| {
let task_source = match msg.task_source_name() {
Some(task_source) => task_source,
None => return false,
};
match task_source {
TaskSourceName::PerformanceTimeline => return true,
_ => {
// A task that will not be throttled, start counting "business"
self.taken_task_counter
.set(self.taken_task_counter.get() + 1);
return false;
},
}
})
.collect();
for msg in incoming {
// Immediately send non-throttled tasks for processing.
let _ = self.msg_queue.borrow_mut().push_back(msg);
}
for msg in to_be_throttled {
// Categorize tasks per task queue.
let (worker, category, boxed, pipeline_id, task_source) = match msg.into_queued_task() {
Some(queued_task) => queued_task,
None => unreachable!(
"A message to be throttled should always be convertible into a queued task"
),
};
let mut throttled_tasks = self.throttled.borrow_mut();
throttled_tasks
.entry(task_source.clone())
.or_insert(VecDeque::new())
.push_back((worker, category, boxed, pipeline_id, task_source));
}
}
/// Reset the queue for a new iteration of the event-loop,
/// returning the port about whose readiness we want to be notified.
pub fn select(&self) -> &crossbeam_channel::Receiver<T> {
// This is a new iteration of the event-loop, so we reset the "business" counter.
self.taken_task_counter.set(0);
// We want to be notified when the script-port is ready to receive.
// Hence that's the one we need to include in the select.
&self.port
}
/// Take a message from the front of the queue, without waiting if empty.
pub fn recv(&self) -> Result<T, ()> {
self.msg_queue.borrow_mut().pop_front().ok_or(())
}
/// Same as recv.
pub fn try_recv(&self) -> Result<T, ()> {
self.recv()
}
/// Drain the queue for the current iteration of the event-loop.
/// Holding-back throttles above a given high-water mark.
pub fn take_tasks(&self, first_msg: T) {
// High-watermark: once reached, throttled tasks will be held-back.
const PER_ITERATION_MAX: u64 = 5;
// Always first check for new tasks, but don't reset 'taken_task_counter'.
self.process_incoming_tasks(first_msg);
let mut throttled = self.throttled.borrow_mut();
let mut throttled_length: usize = throttled.values().map(|queue| queue.len()).sum();
let task_source_names = TaskSourceName::all();
let mut task_source_cycler = task_source_names.iter().cycle();
// "being busy", is defined as having more than x tasks for this loop's iteration.
// As long as we're not busy, and there are throttled tasks left:
loop {
let max_reached = self.taken_task_counter.get() > PER_ITERATION_MAX;
let none_left = throttled_length == 0;
match (max_reached, none_left) {
(_, true) => break,
(true, false) => {
// We have reached the high-watermark for this iteration of the event-loop,
// yet also have throttled messages left in the queue.
// Ensure the select wakes up in the next iteration of the event-loop
let _ = self.wake_up_sender.send(T::wake_up_msg());
break;
},
(false, false) => {
// Cycle through non-priority task sources, taking one throttled task from each.
let task_source = task_source_cycler.next().unwrap();
let throttled_queue = match throttled.get_mut(&task_source) {
Some(queue) => queue,
None => continue,
};
let queued_task = match throttled_queue.pop_front() {
Some(queued_task) => queued_task,
None => continue,
};
let msg = T::from_queued_task(queued_task);
let _ = self.msg_queue.borrow_mut().push_back(msg);
self.taken_task_counter
.set(self.taken_task_counter.get() + 1);
throttled_length = throttled_length - 1;
},
}
}
}
}
|
{
TaskQueue {
port,
wake_up_sender,
msg_queue: DomRefCell::new(VecDeque::new()),
taken_task_counter: Default::default(),
throttled: Default::default(),
}
}
|
identifier_body
|
issue-35668.rs
|
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn func<'a, T>(a: &'a [T]) -> impl Iterator<Item=&'a T> {
a.iter().map(|a| a*a)
//~^ ERROR binary operation `*` cannot be applied to type `&T`
}
fn
|
() {
let a = (0..30).collect::<Vec<_>>();
for k in func(&a) {
println!("{}", k);
}
}
|
main
|
identifier_name
|
issue-35668.rs
|
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn func<'a, T>(a: &'a [T]) -> impl Iterator<Item=&'a T> {
a.iter().map(|a| a*a)
//~^ ERROR binary operation `*` cannot be applied to type `&T`
}
fn main()
|
{
let a = (0..30).collect::<Vec<_>>();
for k in func(&a) {
println!("{}", k);
}
}
|
identifier_body
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.