file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
query04.rs
|
use timely::order::TotalOrder;
use timely::dataflow::*;
use timely::dataflow::operators::probe::Handle as ProbeHandle;
use differential_dataflow::operators::*;
use differential_dataflow::lattice::Lattice;
use {Arrangements, Experiment, Collections};
// -- $ID$
// -- TPC-H/TPC-R Order Priority Checking Query (Q4)
// -- Functional Query Definition
// -- Approved February 1998
// :x
// :o
// select
// o_orderpriority,
// count(*) as order_count
// from
// orders
// where
// o_orderdate >= date ':1'
// and o_orderdate < date ':1' + interval '3' month
// and exists (
// select
// *
// from
// lineitem
// where
// l_orderkey = o_orderkey
// and l_commitdate < l_receiptdate
// )
// group by
// o_orderpriority
// order by
// o_orderpriority;
// :n -1
pub fn query<G: Scope>(collections: &mut Collections<G>, probe: &mut ProbeHandle<G::Timestamp>)
where G::Timestamp: Lattice+TotalOrder+Ord {
let lineitems =
collections
.lineitems()
.flat_map(|l| if l.commit_date < l.receipt_date { Some(l.order_key) } else { None })
.distinct_total();
collections
.orders()
.flat_map(|o|
if o.order_date >= ::types::create_date(1993, 7, 1) && o.order_date < ::types::create_date(1993, 10, 1) {
Some((o.order_key, o.order_priority))
}
else { None }
)
.semijoin(&lineitems)
.map(|(_k,v)| v)
.count_total()
//.inspect(|x| println!("{:?}", x))
.probe_with(probe);
}
pub fn query_arranged<G: Scope<Timestamp=usize>>(
scope: &mut G,
probe: &mut ProbeHandle<usize>,
experiment: &mut Experiment,
arrangements: &mut Arrangements,
)
where
G::Timestamp: Lattice+TotalOrder+Ord
{
let arrangements = arrangements.in_scope(scope, experiment);
experiment
.lineitem(scope)
.flat_map(|l| if l.commit_date < l.receipt_date { Some((l.order_key, ())) } else { None })
.distinct_total()
.join_core(&arrangements.order, |_k,&(),o| {
if o.order_date >= ::types::create_date(1993, 7, 1) && o.order_date < ::types::create_date(1993, 10, 1) {
Some(o.order_priority)
}
else
|
})
.count_total()
.probe_with(probe);
}
|
{
None
}
|
conditional_block
|
backend_termion.rs
|
#[cfg(feature = "termion")]
#[test]
fn backend_termion_should_only_write_diffs() -> Result<(), Box<dyn std::error::Error>>
|
})?;
terminal.draw(|f| {
f.render_widget(Paragraph::new("ab"), area);
})?;
terminal.draw(|f| {
f.render_widget(Paragraph::new("abc"), area);
})?;
}
let expected = {
use termion::{color, cursor, style};
let mut s = String::new();
// First draw
write!(s, "{}", cursor::Goto(1, 1))?;
s.push('a');
write!(s, "{}", color::Fg(color::Reset))?;
write!(s, "{}", color::Bg(color::Reset))?;
write!(s, "{}", style::Reset)?;
write!(s, "{}", cursor::Hide)?;
// Second draw
write!(s, "{}", cursor::Goto(2, 1))?;
s.push('b');
write!(s, "{}", color::Fg(color::Reset))?;
write!(s, "{}", color::Bg(color::Reset))?;
write!(s, "{}", style::Reset)?;
write!(s, "{}", cursor::Hide)?;
// Third draw
write!(s, "{}", cursor::Goto(3, 1))?;
s.push('c');
write!(s, "{}", color::Fg(color::Reset))?;
write!(s, "{}", color::Bg(color::Reset))?;
write!(s, "{}", style::Reset)?;
write!(s, "{}", cursor::Hide)?;
// Terminal drop
write!(s, "{}", cursor::Show)?;
s
};
assert_eq!(std::str::from_utf8(&bytes)?, expected);
Ok(())
}
|
{
use std::{fmt::Write, io::Cursor};
let mut bytes = Vec::new();
let mut stdout = Cursor::new(&mut bytes);
{
use tui::{
backend::TermionBackend, layout::Rect, widgets::Paragraph, Terminal, TerminalOptions,
Viewport,
};
let backend = TermionBackend::new(&mut stdout);
let area = Rect::new(0, 0, 3, 1);
let mut terminal = Terminal::with_options(
backend,
TerminalOptions {
viewport: Viewport::fixed(area),
},
)?;
terminal.draw(|f| {
f.render_widget(Paragraph::new("a"), area);
|
identifier_body
|
backend_termion.rs
|
#[cfg(feature = "termion")]
#[test]
fn backend_termion_should_only_write_diffs() -> Result<(), Box<dyn std::error::Error>> {
use std::{fmt::Write, io::Cursor};
let mut bytes = Vec::new();
let mut stdout = Cursor::new(&mut bytes);
{
use tui::{
backend::TermionBackend, layout::Rect, widgets::Paragraph, Terminal, TerminalOptions,
Viewport,
};
let backend = TermionBackend::new(&mut stdout);
let area = Rect::new(0, 0, 3, 1);
let mut terminal = Terminal::with_options(
backend,
TerminalOptions {
viewport: Viewport::fixed(area),
},
)?;
terminal.draw(|f| {
f.render_widget(Paragraph::new("a"), area);
})?;
terminal.draw(|f| {
f.render_widget(Paragraph::new("ab"), area);
})?;
terminal.draw(|f| {
f.render_widget(Paragraph::new("abc"), area);
})?;
}
let expected = {
use termion::{color, cursor, style};
let mut s = String::new();
// First draw
write!(s, "{}", cursor::Goto(1, 1))?;
s.push('a');
write!(s, "{}", color::Fg(color::Reset))?;
write!(s, "{}", color::Bg(color::Reset))?;
write!(s, "{}", style::Reset)?;
write!(s, "{}", cursor::Hide)?;
// Second draw
write!(s, "{}", cursor::Goto(2, 1))?;
s.push('b');
write!(s, "{}", color::Fg(color::Reset))?;
write!(s, "{}", color::Bg(color::Reset))?;
write!(s, "{}", style::Reset)?;
write!(s, "{}", cursor::Hide)?;
// Third draw
write!(s, "{}", cursor::Goto(3, 1))?;
s.push('c');
write!(s, "{}", color::Fg(color::Reset))?;
write!(s, "{}", color::Bg(color::Reset))?;
write!(s, "{}", style::Reset)?;
write!(s, "{}", cursor::Hide)?;
// Terminal drop
write!(s, "{}", cursor::Show)?;
|
};
assert_eq!(std::str::from_utf8(&bytes)?, expected);
Ok(())
}
|
s
|
random_line_split
|
backend_termion.rs
|
#[cfg(feature = "termion")]
#[test]
fn
|
() -> Result<(), Box<dyn std::error::Error>> {
use std::{fmt::Write, io::Cursor};
let mut bytes = Vec::new();
let mut stdout = Cursor::new(&mut bytes);
{
use tui::{
backend::TermionBackend, layout::Rect, widgets::Paragraph, Terminal, TerminalOptions,
Viewport,
};
let backend = TermionBackend::new(&mut stdout);
let area = Rect::new(0, 0, 3, 1);
let mut terminal = Terminal::with_options(
backend,
TerminalOptions {
viewport: Viewport::fixed(area),
},
)?;
terminal.draw(|f| {
f.render_widget(Paragraph::new("a"), area);
})?;
terminal.draw(|f| {
f.render_widget(Paragraph::new("ab"), area);
})?;
terminal.draw(|f| {
f.render_widget(Paragraph::new("abc"), area);
})?;
}
let expected = {
use termion::{color, cursor, style};
let mut s = String::new();
// First draw
write!(s, "{}", cursor::Goto(1, 1))?;
s.push('a');
write!(s, "{}", color::Fg(color::Reset))?;
write!(s, "{}", color::Bg(color::Reset))?;
write!(s, "{}", style::Reset)?;
write!(s, "{}", cursor::Hide)?;
// Second draw
write!(s, "{}", cursor::Goto(2, 1))?;
s.push('b');
write!(s, "{}", color::Fg(color::Reset))?;
write!(s, "{}", color::Bg(color::Reset))?;
write!(s, "{}", style::Reset)?;
write!(s, "{}", cursor::Hide)?;
// Third draw
write!(s, "{}", cursor::Goto(3, 1))?;
s.push('c');
write!(s, "{}", color::Fg(color::Reset))?;
write!(s, "{}", color::Bg(color::Reset))?;
write!(s, "{}", style::Reset)?;
write!(s, "{}", cursor::Hide)?;
// Terminal drop
write!(s, "{}", cursor::Show)?;
s
};
assert_eq!(std::str::from_utf8(&bytes)?, expected);
Ok(())
}
|
backend_termion_should_only_write_diffs
|
identifier_name
|
const-block-non-item-statement-2.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
const A: usize = { 1; 2 };
//~^ ERROR statements in constants are unstable
const B: usize = { { } 2 };
//~^ ERROR statements in constants are unstable
macro_rules! foo {
() => (()) //~ ERROR statements in constants are unstable
}
const C: usize = { foo!(); 2 };
const D: usize = { let x = 4; 2 };
//~^ ERROR let bindings in constants are unstable
//~| ERROR statements in constants are unstable
//~| ERROR let bindings in constants are unstable
//~| ERROR statements in constants are unstable
pub fn
|
() {}
|
main
|
identifier_name
|
const-block-non-item-statement-2.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
const A: usize = { 1; 2 };
//~^ ERROR statements in constants are unstable
const B: usize = { { } 2 };
//~^ ERROR statements in constants are unstable
macro_rules! foo {
() => (()) //~ ERROR statements in constants are unstable
}
const C: usize = { foo!(); 2 };
const D: usize = { let x = 4; 2 };
//~^ ERROR let bindings in constants are unstable
//~| ERROR statements in constants are unstable
//~| ERROR let bindings in constants are unstable
//~| ERROR statements in constants are unstable
pub fn main()
|
{}
|
identifier_body
|
|
const-block-non-item-statement-2.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
const A: usize = { 1; 2 };
//~^ ERROR statements in constants are unstable
const B: usize = { { } 2 };
//~^ ERROR statements in constants are unstable
macro_rules! foo {
() => (()) //~ ERROR statements in constants are unstable
}
const C: usize = { foo!(); 2 };
const D: usize = { let x = 4; 2 };
//~^ ERROR let bindings in constants are unstable
//~| ERROR statements in constants are unstable
//~| ERROR let bindings in constants are unstable
//~| ERROR statements in constants are unstable
pub fn main() {}
|
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
random_line_split
|
lib.rs
|
// Copyright © 2015, Peter Atashian
// Licensed under the MIT License <LICENSE.md>
//! FFI bindings to d3d9.
#![cfg(windows)]
extern crate winapi;
use winapi::*;
#[cfg(any(target_arch = "x86", target_arch = "x86_64", target_arch = "arm"))]
|
extern "system" {
pub fn D3DPERF_BeginEvent(col: D3DCOLOR, wszName: LPCWSTR) -> INT;
pub fn D3DPERF_EndEvent() -> INT;
pub fn D3DPERF_GetStatus() -> DWORD;
pub fn D3DPERF_QueryRepeatFrame() -> BOOL;
pub fn D3DPERF_SetMarker(col: D3DCOLOR, wszName: LPCWSTR) -> ();
pub fn D3DPERF_SetOptions(dwOptions: DWORD) -> ();
pub fn D3DPERF_SetRegion(col: D3DCOLOR, wszName: LPCWSTR) -> ();
pub fn Direct3DCreate9(SDKVersion: UINT) -> *mut IDirect3D9;
pub fn Direct3DCreate9Ex(SDKVersion: UINT, arg1: *mut *mut IDirect3D9Ex) -> HRESULT;
}
|
random_line_split
|
|
logging-right-crate.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:logging_right_crate.rs
// exec-env:RUST_LOG=logging-right-crate=debug
// This is a test for issue #3046 to make sure that when we monomorphize a
// function from one crate to another the right top-level logging name is
// preserved.
//
// It used to be the case that if logging were turned on for this crate, all
// monomorphized functions from other crates had logging turned on (their
// logging module names were all incorrect). This test ensures that this no
// longer happens by enabling logging for *this* crate and then invoking a
// function in an external crate which will panic when logging is enabled.
|
extern crate logging_right_crate;
pub fn main() {
// this function panicks if logging is turned on
logging_right_crate::foo::<int>();
}
|
random_line_split
|
|
logging-right-crate.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:logging_right_crate.rs
// exec-env:RUST_LOG=logging-right-crate=debug
// This is a test for issue #3046 to make sure that when we monomorphize a
// function from one crate to another the right top-level logging name is
// preserved.
//
// It used to be the case that if logging were turned on for this crate, all
// monomorphized functions from other crates had logging turned on (their
// logging module names were all incorrect). This test ensures that this no
// longer happens by enabling logging for *this* crate and then invoking a
// function in an external crate which will panic when logging is enabled.
extern crate logging_right_crate;
pub fn
|
() {
// this function panicks if logging is turned on
logging_right_crate::foo::<int>();
}
|
main
|
identifier_name
|
logging-right-crate.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// aux-build:logging_right_crate.rs
// exec-env:RUST_LOG=logging-right-crate=debug
// This is a test for issue #3046 to make sure that when we monomorphize a
// function from one crate to another the right top-level logging name is
// preserved.
//
// It used to be the case that if logging were turned on for this crate, all
// monomorphized functions from other crates had logging turned on (their
// logging module names were all incorrect). This test ensures that this no
// longer happens by enabling logging for *this* crate and then invoking a
// function in an external crate which will panic when logging is enabled.
extern crate logging_right_crate;
pub fn main()
|
{
// this function panicks if logging is turned on
logging_right_crate::foo::<int>();
}
|
identifier_body
|
|
mod.rs
|
pub use self::cache::*;
pub use self::combiners::*;
pub use self::generators::*;
pub use self::modifiers::*;
pub use self::selectors::*;
pub use self::transformers::*;
mod cache;
mod combiners;
mod generators;
mod modifiers;
mod selectors;
mod transformers;
/// Base trait for noise functions.
///
/// A noise function is a object that calculates and outputs a value given a
/// n-Dimensional input value, where n is (2,3,4).
///
/// Each type of noise function uses a specific method to calculate an output
/// value. Some of these methods include:
///
/// * Calculating a value using a coherent-noise function or some other
/// mathematical function.
/// * Mathematically changing the output value from another noise function
/// in various ways.
/// * Combining the output values from two noise functions in various ways.
pub trait NoiseFn<T> {
fn get(&self, point: T) -> f64;
}
impl<'a, T, M: NoiseFn<T>> NoiseFn<T> for &'a M {
#[inline]
fn
|
(&self, point: T) -> f64 {
M::get(*self, point)
}
}
/// Trait for functions that require a seed before generating their values
pub trait Seedable {
/// Set the seed for the function implementing the `Seedable` trait
fn set_seed(self, seed: u32) -> Self;
/// Getter to retrieve the seed from the function
fn seed(&self) -> u32;
}
|
get
|
identifier_name
|
mod.rs
|
pub use self::cache::*;
pub use self::combiners::*;
pub use self::generators::*;
pub use self::modifiers::*;
pub use self::selectors::*;
pub use self::transformers::*;
mod cache;
mod combiners;
mod generators;
mod modifiers;
mod selectors;
mod transformers;
/// Base trait for noise functions.
///
/// A noise function is a object that calculates and outputs a value given a
/// n-Dimensional input value, where n is (2,3,4).
///
/// Each type of noise function uses a specific method to calculate an output
/// value. Some of these methods include:
///
/// * Calculating a value using a coherent-noise function or some other
/// mathematical function.
/// * Mathematically changing the output value from another noise function
/// in various ways.
/// * Combining the output values from two noise functions in various ways.
pub trait NoiseFn<T> {
|
#[inline]
fn get(&self, point: T) -> f64 {
M::get(*self, point)
}
}
/// Trait for functions that require a seed before generating their values
pub trait Seedable {
/// Set the seed for the function implementing the `Seedable` trait
fn set_seed(self, seed: u32) -> Self;
/// Getter to retrieve the seed from the function
fn seed(&self) -> u32;
}
|
fn get(&self, point: T) -> f64;
}
impl<'a, T, M: NoiseFn<T>> NoiseFn<T> for &'a M {
|
random_line_split
|
mod.rs
|
pub use self::cache::*;
pub use self::combiners::*;
pub use self::generators::*;
pub use self::modifiers::*;
pub use self::selectors::*;
pub use self::transformers::*;
mod cache;
mod combiners;
mod generators;
mod modifiers;
mod selectors;
mod transformers;
/// Base trait for noise functions.
///
/// A noise function is a object that calculates and outputs a value given a
/// n-Dimensional input value, where n is (2,3,4).
///
/// Each type of noise function uses a specific method to calculate an output
/// value. Some of these methods include:
///
/// * Calculating a value using a coherent-noise function or some other
/// mathematical function.
/// * Mathematically changing the output value from another noise function
/// in various ways.
/// * Combining the output values from two noise functions in various ways.
pub trait NoiseFn<T> {
fn get(&self, point: T) -> f64;
}
impl<'a, T, M: NoiseFn<T>> NoiseFn<T> for &'a M {
#[inline]
fn get(&self, point: T) -> f64
|
}
/// Trait for functions that require a seed before generating their values
pub trait Seedable {
/// Set the seed for the function implementing the `Seedable` trait
fn set_seed(self, seed: u32) -> Self;
/// Getter to retrieve the seed from the function
fn seed(&self) -> u32;
}
|
{
M::get(*self, point)
}
|
identifier_body
|
linkage-visibility.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(dynamic_lib)]
|
// We're testing linkage visibility; the compiler warns us, but we want to
// do the runtime check that these functions aren't exported.
#![allow(private_no_mangle_fns)]
use std::dynamic_lib::DynamicLibrary;
#[no_mangle]
pub fn foo() { bar(); }
pub fn foo2<T>() {
fn bar2() {
bar();
}
bar2();
}
#[no_mangle]
fn bar() { }
#[allow(dead_code)]
#[no_mangle]
fn baz() { }
pub fn test() {
let lib = DynamicLibrary::open(None).unwrap();
unsafe {
assert!(lib.symbol::<isize>("foo").is_ok());
assert!(lib.symbol::<isize>("baz").is_err());
assert!(lib.symbol::<isize>("bar").is_err());
}
}
|
random_line_split
|
|
linkage-visibility.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(dynamic_lib)]
// We're testing linkage visibility; the compiler warns us, but we want to
// do the runtime check that these functions aren't exported.
#![allow(private_no_mangle_fns)]
use std::dynamic_lib::DynamicLibrary;
#[no_mangle]
pub fn foo() { bar(); }
pub fn foo2<T>() {
fn bar2() {
bar();
}
bar2();
}
#[no_mangle]
fn bar() { }
#[allow(dead_code)]
#[no_mangle]
fn baz()
|
pub fn test() {
let lib = DynamicLibrary::open(None).unwrap();
unsafe {
assert!(lib.symbol::<isize>("foo").is_ok());
assert!(lib.symbol::<isize>("baz").is_err());
assert!(lib.symbol::<isize>("bar").is_err());
}
}
|
{ }
|
identifier_body
|
linkage-visibility.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(dynamic_lib)]
// We're testing linkage visibility; the compiler warns us, but we want to
// do the runtime check that these functions aren't exported.
#![allow(private_no_mangle_fns)]
use std::dynamic_lib::DynamicLibrary;
#[no_mangle]
pub fn foo() { bar(); }
pub fn foo2<T>() {
fn bar2() {
bar();
}
bar2();
}
#[no_mangle]
fn
|
() { }
#[allow(dead_code)]
#[no_mangle]
fn baz() { }
pub fn test() {
let lib = DynamicLibrary::open(None).unwrap();
unsafe {
assert!(lib.symbol::<isize>("foo").is_ok());
assert!(lib.symbol::<isize>("baz").is_err());
assert!(lib.symbol::<isize>("bar").is_err());
}
}
|
bar
|
identifier_name
|
weak_lang_items.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Validity checking for weak lang items
use session::config;
use middle::lang_items;
use rustc_data_structures::fx::FxHashSet;
use rustc_target::spec::PanicStrategy;
use syntax::ast;
use syntax::symbol::Symbol;
use syntax_pos::Span;
use hir::def_id::DefId;
use hir::intravisit::{Visitor, NestedVisitorMap};
use hir::intravisit;
use hir;
use ty::TyCtxt;
macro_rules! weak_lang_items {
($($name:ident, $item:ident, $sym:ident;)*) => (
struct Context<'a, 'tcx: 'a> {
tcx: TyCtxt<'a, 'tcx, 'tcx>,
items: &'a mut lang_items::LanguageItems,
}
/// Checks the crate for usage of weak lang items, returning a vector of all the
/// language items required by this crate, but not defined yet.
pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
items: &mut lang_items::LanguageItems) {
// These are never called by user code, they're generated by the compiler.
// They will never implicitly be added to the `missing` array unless we do
// so here.
if items.eh_personality().is_none() {
items.missing.push(lang_items::EhPersonalityLangItem);
}
if tcx.sess.target.target.options.custom_unwind_resume &
items.eh_unwind_resume().is_none() {
items.missing.push(lang_items::EhUnwindResumeLangItem);
}
{
let mut cx = Context { tcx, items };
tcx.hir().krate().visit_all_item_likes(&mut cx.as_deep_visitor());
}
verify(tcx, items);
}
pub fn link_name(attrs: &[ast::Attribute]) -> Option<Symbol> {
lang_items::extract(attrs).and_then(|(name, _)| {
$(if name == stringify!($name) {
Some(Symbol::intern(stringify!($sym)))
} else)* {
None
}
})
}
/// Returns whether the specified `lang_item` doesn't actually need to be
/// present for this compilation.
///
/// Not all lang items are always required for each compilation, particularly in
/// the case of panic=abort. In these situations some lang items are injected by
/// crates and don't actually need to be defined in libstd.
pub fn whitelisted(tcx: TyCtxt<'_, '_, '_>, lang_item: lang_items::LangItem) -> bool {
// If we're not compiling with unwinding, we won't actually need these
// symbols. Other panic runtimes ensure that the relevant symbols are
// available to link things together, but they're never exercised.
if tcx.sess.panic_strategy()!= PanicStrategy::Unwind {
return lang_item == lang_items::EhPersonalityLangItem ||
lang_item == lang_items::EhUnwindResumeLangItem
}
false
}
fn verify<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
items: &lang_items::LanguageItems) {
// We only need to check for the presence of weak lang items if we're
// emitting something that's not an rlib.
let needs_check = tcx.sess.crate_types.borrow().iter().any(|kind| {
match *kind {
config::CrateType::Dylib |
config::CrateType::ProcMacro |
config::CrateType::Cdylib |
config::CrateType::Executable |
config::CrateType::Staticlib => true,
config::CrateType::Rlib => false,
}
});
if!needs_check {
return
}
let mut missing = FxHashSet::default();
for &cnum in tcx.crates().iter() {
for &item in tcx.missing_lang_items(cnum).iter() {
missing.insert(item);
}
}
$(
if missing.contains(&lang_items::$item) &&
!whitelisted(tcx, lang_items::$item) &&
items.$name().is_none() {
if lang_items::$item == lang_items::PanicImplLangItem {
tcx.sess.err(&format!("`#[panic_handler]` function required, \
but not found"));
|
tcx.sess.err(&format!("language item required, but not found: `{}`",
stringify!($name)));
}
}
)*
}
impl<'a, 'tcx> Context<'a, 'tcx> {
fn register(&mut self, name: &str, span: Span) {
$(if name == stringify!($name) {
if self.items.$name().is_none() {
self.items.missing.push(lang_items::$item);
}
} else)* {
span_err!(self.tcx.sess, span, E0264,
"unknown external lang item: `{}`",
name);
}
}
}
impl<'a, 'tcx, 'v> Visitor<'v> for Context<'a, 'tcx> {
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'v> {
NestedVisitorMap::None
}
fn visit_foreign_item(&mut self, i: &hir::ForeignItem) {
if let Some((lang_item, _)) = lang_items::extract(&i.attrs) {
self.register(&lang_item.as_str(), i.span);
}
intravisit::walk_foreign_item(self, i)
}
}
impl<'a, 'tcx, 'gcx> TyCtxt<'a, 'tcx, 'gcx> {
pub fn is_weak_lang_item(&self, item_def_id: DefId) -> bool {
let lang_items = self.lang_items();
let did = Some(item_def_id);
$(lang_items.$name() == did)||+
}
}
) }
weak_lang_items! {
panic_impl, PanicImplLangItem, rust_begin_unwind;
eh_personality, EhPersonalityLangItem, rust_eh_personality;
eh_unwind_resume, EhUnwindResumeLangItem, rust_eh_unwind_resume;
oom, OomLangItem, rust_oom;
}
|
} else if lang_items::$item == lang_items::OomLangItem {
tcx.sess.err(&format!("`#[alloc_error_handler]` function required, \
but not found"));
} else {
|
random_line_split
|
floatfns.rs
|
//! Functions operating on float numbers.
use libc;
use std::mem;
use remacs_macros::lisp_fn;
use remacs_sys::{EmacsDouble, EmacsInt, EmacsUint, Lisp_Object, MOST_NEGATIVE_FIXNUM,
MOST_POSITIVE_FIXNUM};
use remacs_sys::{Qarith_error, Qinteger_or_marker_p, Qnumberp, Qrange_error};
use remacs_sys::build_string;
use remacs_sys::libm;
use lisp::{LispNumber, LispObject};
use lisp::defsubr;
use math::ArithOp;
/// Either extracts a floating point number from a lisp number (of any kind) or throws an error
/// TODO this is used from C in a few places; remove afterwards.
#[no_mangle]
pub extern "C" fn extract_float(f: Lisp_Object) -> EmacsDouble {
let f = LispObject::from(f);
f.any_to_float_or_error()
}
/// Calculate the modulus of two elisp floats.
pub fn fmod_float(mut f1: f64, f2: f64) -> LispObject {
f1 %= f2;
// Ensure that the remainder has the correct sign.
if f2 < 0.0 && f1 > 0.0 || f2 > 0.0 && f1 < 0.0 {
f1 += f2;
}
LispObject::from_float(f1)
}
macro_rules! simple_float_op {
($lisp_name:expr, $float_func:ident, $lisp_docs:expr) => {
#[doc = $lisp_docs]
#[lisp_fn(name = $lisp_name, c_name = $lisp_name)]
fn $float_func(arg: LispObject) -> LispObject {
let d = arg.any_to_float_or_error();
let val = d.$float_func();
LispObject::from_float(val)
}
}
}
simple_float_op!("acos", acos, "Return the inverse cosine of ARG.");
simple_float_op!("asin", asin, "Return the inverse sine of ARG.");
// atan is special, defined later
simple_float_op!("cos", cos, "Return the cosine of ARG.");
simple_float_op!("sin", sin, "Return the sine of ARG.");
simple_float_op!("tan", tan, "Return the tangent of ARG.");
simple_float_op!("exp", exp, "Return the exponential base e of ARG.");
simple_float_op!("sqrt", sqrt, "Return the square root of ARG.");
/// Driver for standard arithmetic operations on floats.
pub fn float_arith_driver(
mut accum: f64,
argstart: usize,
code: ArithOp,
args: &[LispObject],
) -> LispObject {
for (i, &val) in args[argstart..].iter().enumerate() {
let argnum = argstart + i;
let next = match val.as_number_coerce_marker_or_error() {
LispNumber::Float(f) => f,
LispNumber::Fixnum(d) => d as f64,
};
match code {
ArithOp::Add => accum += next,
ArithOp::Sub => {
accum = {
if argnum > 0 {
accum - next
} else if args.len() == 1 {
-next
} else {
next
}
}
}
ArithOp::Mult => accum *= next,
ArithOp::Div => if args.len() > 1 && argnum == 0 {
accum = next;
} else {
if next == 0. {
xsignal!(Qarith_error);
}
accum /= next;
},
ArithOp::Logand | ArithOp::Logior | ArithOp::Logxor => {
wrong_type!(Qinteger_or_marker_p, val)
}
}
}
LispObject::from_float(accum)
}
/// Return non nil if argument X is a NaN.
#[lisp_fn]
pub fn isnan(x: LispObject) -> LispObject {
let f = x.as_float_or_error();
LispObject::from_bool(f.is_nan())
}
/// Return the inverse tangent of the arguments.
/// If only one argument Y is given, return the inverse tangent of Y.
/// If two arguments Y and X are given, return the inverse tangent of Y
/// divided by X, i.e. the angle in radians between the vector (X, Y)
/// and the x-axis
#[lisp_fn(min = "1")]
pub fn atan(y: LispObject, x: LispObject) -> LispObject {
let y = y.any_to_float_or_error();
if x.is_nil() {
LispObject::from_float(y.atan())
} else {
let x = x.any_to_float_or_error();
LispObject::from_float(y.atan2(x))
}
}
/// Return the natural logarithm of ARG.
/// If the optional argument BASE is given, return log ARG using that base.
#[lisp_fn(min = "1")]
pub fn log(arg: LispObject, base: LispObject) -> LispObject {
let d = arg.any_to_float_or_error();
let res = if base.is_nil() {
d.ln()
} else {
let base = base.any_to_float_or_error();
if base == 10.0 {
d.log10()
} else if base == 2.0 {
d.log2()
} else {
d.log(base)
}
};
LispObject::from_float(res)
}
/* These functions take only floats now. */
/// Return the smallest integer no less than ARG, as a float.
/// (Round toward +inf.)
#[lisp_fn]
pub fn fceiling(arg: LispObject) -> LispObject {
let d = arg.as_float_or_error();
LispObject::from_float(d.ceil())
}
/// Return the largest integer no greater than ARG, as a float.
/// (Round toward -inf.)
#[lisp_fn]
pub fn ffloor(arg: LispObject) -> LispObject {
let d = arg.as_float_or_error();
LispObject::from_float(d.floor())
}
/// Truncate a floating point number to an integral float value.
/// (Round toward zero.)
#[lisp_fn]
pub fn ftruncate(arg: LispObject) -> LispObject {
let d = arg.as_float_or_error();
if d > 0.0 {
LispObject::from_float(d.floor())
} else {
LispObject::from_float(d.ceil())
}
}
/// Return the floating point number equal to ARG.
#[lisp_fn]
pub fn float(arg: LispObject) -> LispObject {
if arg.is_float() {
arg
} else if let Some(n) = arg.as_fixnum() {
LispObject::from_float(n as EmacsDouble)
} else {
wrong_type!(Qnumberp, arg);
}
}
/// Copy sign of X2 to value of X1, and return the result.
/// Cause an error if X1 or X2 is not a float.
#[lisp_fn]
pub fn copysign(x1: LispObject, x2: LispObject) -> LispObject {
let f1 = x1.as_float_or_error();
let f2 = x2.as_float_or_error();
if libm::signbit(f1)!= libm::signbit(f2) {
LispObject::from_float(-f1)
} else {
x1
}
}
/// Get significand and exponent of a floating point number.
/// Breaks the floating point number X into its binary significand SGNFCAND
/// (a floating point value between 0.5 (included) and 1.0 (excluded))
/// and an integral exponent EXP for 2, such that:
///
/// X = SGNFCAND * 2^EXP
///
/// The function returns the cons cell (SGNFCAND. EXP).
/// If X is zero, both parts (SGNFCAND and EXP) are zero.
#[lisp_fn]
pub fn frexp(x: LispObject) -> LispObject {
let f = x.any_to_float_or_error();
let (significand, exponent) = libm::frexp(f);
LispObject::cons(
LispObject::from_float(significand),
LispObject::from_fixnum(exponent as EmacsInt),
)
}
/// Return SGNFCAND * 2**EXPONENT, as a floating point number.
/// EXPONENT must be an integer.
#[lisp_fn]
pub fn ldexp(sgnfcand: LispObject, exponent: LispObject) -> LispObject
|
/// Return the exponential ARG1 ** ARG2.
#[lisp_fn]
pub fn expt(arg1: LispObject, arg2: LispObject) -> LispObject {
if let (Some(x), Some(y)) = (arg1.as_fixnum(), arg2.as_fixnum()) {
if y >= 0 && y <= u32::max_value() as EmacsInt {
return LispObject::from_fixnum(x.pow(y as u32));
}
}
let b = arg1.any_to_float_or_error();
let e = arg2.any_to_float_or_error();
LispObject::from_float(b.powf(e))
}
/// Returns largest integer <= the base 2 log of the magnitude of ARG.
/// This is the same as the exponent of a float.
#[lisp_fn]
pub fn logb(arg: LispObject) -> LispObject {
let res = if let Some(n) = arg.as_fixnum() {
let i = n.abs();
if i == 0 {
MOST_NEGATIVE_FIXNUM
} else {
(mem::size_of::<EmacsUint>() * 8) as EmacsInt - 1 - i.leading_zeros() as EmacsInt
}
} else if let Some(f) = arg.as_float() {
if f == 0.0 {
MOST_NEGATIVE_FIXNUM
} else if f.is_finite() {
let (_, exp) = libm::frexp(f);
exp as EmacsInt - 1
} else {
MOST_POSITIVE_FIXNUM
}
} else {
wrong_type!(Qnumberp, arg)
};
LispObject::from_fixnum(res)
}
/// Return the nearest integer to ARG, as a float.
#[lisp_fn]
pub fn fround(arg: LispObject) -> LispObject {
let d = arg.as_float_or_error();
LispObject::from_float(libm::rint(d))
}
/// Return the smallest integer no less than ARG.
/// This rounds the value towards +inf.
/// With optional DIVISOR, return the smallest integer no less than ARG/DIVISOR.
#[lisp_fn(min = "1")]
pub fn ceiling(arg: LispObject, divisor: LispObject) -> LispObject {
rounding_driver(arg, divisor, |x| x.ceil(), ceiling2, "ceiling")
}
/// Return the largest integer no greater than ARG.
/// This rounds the value towards -inf.
/// With optional DIVISOR, return the largest integer no greater than ARG/DIVISOR.
#[lisp_fn(min = "1")]
pub fn floor(arg: LispObject, divisor: LispObject) -> LispObject {
rounding_driver(arg, divisor, |x| x.floor(), floor2, "floor")
}
/// Return the nearest integer to ARG.
/// With optional DIVISOR, return the nearest integer to ARG/DIVISOR.
///
/// Rounding a value equidistant between two integers may choose the
/// integer closer to zero, or it may prefer an even integer, depending on
/// your machine. For example, (round 2.5) can return 3 on some
/// systems, but 2 on others.
#[lisp_fn(min = "1")]
pub fn round(arg: LispObject, divisor: LispObject) -> LispObject {
rounding_driver(arg, divisor, libm::rint, round2, "round")
}
/// Truncate a floating point number to an int.
/// Rounds ARG toward zero.
/// With optional DIVISOR, truncate ARG/DIVISOR.
#[lisp_fn(min = "1")]
pub fn truncate(arg: LispObject, divisor: LispObject) -> LispObject {
rounding_driver(arg, divisor, |x| x.trunc(), truncate2, "truncate")
}
fn rounding_driver<F>(
arg: LispObject,
divisor: LispObject,
double_round: F,
int_round2: fn(EmacsInt, EmacsInt) -> EmacsInt,
name: &str,
) -> LispObject
where
F: Fn(f64) -> f64,
{
let d;
if divisor.is_nil() {
if arg.is_fixnum() {
return arg;
} else if let Some(f) = arg.as_float() {
d = f;
} else {
wrong_type!(Qnumberp, arg)
}
} else {
if let (Some(arg), Some(div)) = (arg.as_fixnum(), divisor.as_fixnum()) {
if div == 0 {
xsignal!(Qarith_error);
}
return LispObject::from_fixnum(int_round2(arg, div));
}
let arg = arg.any_to_float_or_error();
let div = divisor.any_to_float_or_error();
d = arg / div;
}
// Round, coarsely test for fixnum overflow before converting to
// EmacsInt (to avoid undefined behavior), and then exactly test
// for overflow after converting (as FIXNUM_OVERFLOW_P is inaccurate
// on floats).
let dr = double_round(d);
if dr.abs() < (2 * (MOST_POSITIVE_FIXNUM + 1)) as f64 {
let ir = dr as EmacsInt;
if!LispObject::fixnum_overflow(ir) {
return LispObject::from_fixnum(ir);
}
}
let errstr = LispObject::from(unsafe {
build_string(name.as_ptr() as *const libc::c_char)
});
xsignal!(Qrange_error, errstr, arg)
}
fn ceiling2(i1: EmacsInt, i2: EmacsInt) -> EmacsInt {
i1 / i2 + ((i1 % i2!= 0) & ((i1 < 0) == (i2 < 0))) as EmacsInt
}
fn floor2(i1: EmacsInt, i2: EmacsInt) -> EmacsInt {
i1 / i2 - ((i1 % i2!= 0) & ((i1 < 0)!= (i2 < 0))) as EmacsInt
}
fn truncate2(i1: EmacsInt, i2: EmacsInt) -> EmacsInt {
i1 / i2
}
fn round2(i1: EmacsInt, i2: EmacsInt) -> EmacsInt {
// The division operator gives us one remainder R, but we want the
// remainder R1 on the other side of 0 if R1 is closer to 0 than R
// is; because we want to round to even, we also want R1 if R and R1
// are the same distance from 0 and if C's quotient is odd.
let q = i1 / i2;
let r = i1 % i2;
let abs_r = r.abs();
let abs_r1 = i2.abs() - abs_r;
q + if abs_r + (q & 1) <= abs_r1 {
0
} else if (i2 ^ r) < 0 {
-1
} else {
1
}
}
// Since these are generated via a macro the build cannot hook them into the
// system automatically. Do not add more items here unless they are also generated
// with something like simple_float_op.
pub fn rust_init_extra_syms() {
unsafe {
defsubr(Sacos.as_ptr());
defsubr(Sasin.as_ptr());
defsubr(Scos.as_ptr());
defsubr(Ssin.as_ptr());
defsubr(Stan.as_ptr());
defsubr(Sexp.as_ptr());
defsubr(Ssqrt.as_ptr());
}
}
include!(concat!(env!("OUT_DIR"), "/floatfns_exports.rs"));
|
{
let exponent = exponent.as_fixnum_or_error();
let significand = sgnfcand.any_to_float_or_error();
let result = libm::ldexp(significand, exponent as libc::c_int);
LispObject::from_float(result)
}
|
identifier_body
|
floatfns.rs
|
//! Functions operating on float numbers.
use libc;
use std::mem;
use remacs_macros::lisp_fn;
use remacs_sys::{EmacsDouble, EmacsInt, EmacsUint, Lisp_Object, MOST_NEGATIVE_FIXNUM,
MOST_POSITIVE_FIXNUM};
use remacs_sys::{Qarith_error, Qinteger_or_marker_p, Qnumberp, Qrange_error};
use remacs_sys::build_string;
use remacs_sys::libm;
use lisp::{LispNumber, LispObject};
use lisp::defsubr;
use math::ArithOp;
/// Either extracts a floating point number from a lisp number (of any kind) or throws an error
/// TODO this is used from C in a few places; remove afterwards.
#[no_mangle]
pub extern "C" fn extract_float(f: Lisp_Object) -> EmacsDouble {
let f = LispObject::from(f);
f.any_to_float_or_error()
}
/// Calculate the modulus of two elisp floats.
pub fn fmod_float(mut f1: f64, f2: f64) -> LispObject {
f1 %= f2;
// Ensure that the remainder has the correct sign.
if f2 < 0.0 && f1 > 0.0 || f2 > 0.0 && f1 < 0.0 {
f1 += f2;
}
LispObject::from_float(f1)
}
macro_rules! simple_float_op {
($lisp_name:expr, $float_func:ident, $lisp_docs:expr) => {
#[doc = $lisp_docs]
#[lisp_fn(name = $lisp_name, c_name = $lisp_name)]
fn $float_func(arg: LispObject) -> LispObject {
let d = arg.any_to_float_or_error();
let val = d.$float_func();
LispObject::from_float(val)
}
}
}
simple_float_op!("acos", acos, "Return the inverse cosine of ARG.");
simple_float_op!("asin", asin, "Return the inverse sine of ARG.");
// atan is special, defined later
simple_float_op!("cos", cos, "Return the cosine of ARG.");
simple_float_op!("sin", sin, "Return the sine of ARG.");
simple_float_op!("tan", tan, "Return the tangent of ARG.");
simple_float_op!("exp", exp, "Return the exponential base e of ARG.");
simple_float_op!("sqrt", sqrt, "Return the square root of ARG.");
/// Driver for standard arithmetic operations on floats.
pub fn float_arith_driver(
mut accum: f64,
argstart: usize,
code: ArithOp,
args: &[LispObject],
) -> LispObject {
for (i, &val) in args[argstart..].iter().enumerate() {
let argnum = argstart + i;
let next = match val.as_number_coerce_marker_or_error() {
LispNumber::Float(f) => f,
LispNumber::Fixnum(d) => d as f64,
};
match code {
ArithOp::Add => accum += next,
ArithOp::Sub => {
accum = {
if argnum > 0 {
accum - next
} else if args.len() == 1 {
-next
} else {
next
}
}
}
ArithOp::Mult => accum *= next,
ArithOp::Div => if args.len() > 1 && argnum == 0 {
accum = next;
} else {
if next == 0. {
xsignal!(Qarith_error);
}
accum /= next;
},
ArithOp::Logand | ArithOp::Logior | ArithOp::Logxor => {
wrong_type!(Qinteger_or_marker_p, val)
}
}
}
LispObject::from_float(accum)
}
/// Return non nil if argument X is a NaN.
#[lisp_fn]
pub fn isnan(x: LispObject) -> LispObject {
let f = x.as_float_or_error();
LispObject::from_bool(f.is_nan())
}
/// Return the inverse tangent of the arguments.
/// If only one argument Y is given, return the inverse tangent of Y.
/// If two arguments Y and X are given, return the inverse tangent of Y
/// divided by X, i.e. the angle in radians between the vector (X, Y)
/// and the x-axis
#[lisp_fn(min = "1")]
pub fn atan(y: LispObject, x: LispObject) -> LispObject {
let y = y.any_to_float_or_error();
if x.is_nil() {
LispObject::from_float(y.atan())
} else {
let x = x.any_to_float_or_error();
LispObject::from_float(y.atan2(x))
}
}
/// Return the natural logarithm of ARG.
/// If the optional argument BASE is given, return log ARG using that base.
#[lisp_fn(min = "1")]
pub fn log(arg: LispObject, base: LispObject) -> LispObject {
let d = arg.any_to_float_or_error();
let res = if base.is_nil() {
d.ln()
} else {
let base = base.any_to_float_or_error();
if base == 10.0 {
d.log10()
} else if base == 2.0 {
d.log2()
} else {
d.log(base)
}
};
LispObject::from_float(res)
}
/* These functions take only floats now. */
|
pub fn fceiling(arg: LispObject) -> LispObject {
let d = arg.as_float_or_error();
LispObject::from_float(d.ceil())
}
/// Return the largest integer no greater than ARG, as a float.
/// (Round toward -inf.)
#[lisp_fn]
pub fn ffloor(arg: LispObject) -> LispObject {
let d = arg.as_float_or_error();
LispObject::from_float(d.floor())
}
/// Truncate a floating point number to an integral float value.
/// (Round toward zero.)
#[lisp_fn]
pub fn ftruncate(arg: LispObject) -> LispObject {
let d = arg.as_float_or_error();
if d > 0.0 {
LispObject::from_float(d.floor())
} else {
LispObject::from_float(d.ceil())
}
}
/// Return the floating point number equal to ARG.
#[lisp_fn]
pub fn float(arg: LispObject) -> LispObject {
if arg.is_float() {
arg
} else if let Some(n) = arg.as_fixnum() {
LispObject::from_float(n as EmacsDouble)
} else {
wrong_type!(Qnumberp, arg);
}
}
/// Copy sign of X2 to value of X1, and return the result.
/// Cause an error if X1 or X2 is not a float.
#[lisp_fn]
pub fn copysign(x1: LispObject, x2: LispObject) -> LispObject {
let f1 = x1.as_float_or_error();
let f2 = x2.as_float_or_error();
if libm::signbit(f1)!= libm::signbit(f2) {
LispObject::from_float(-f1)
} else {
x1
}
}
/// Get significand and exponent of a floating point number.
/// Breaks the floating point number X into its binary significand SGNFCAND
/// (a floating point value between 0.5 (included) and 1.0 (excluded))
/// and an integral exponent EXP for 2, such that:
///
/// X = SGNFCAND * 2^EXP
///
/// The function returns the cons cell (SGNFCAND. EXP).
/// If X is zero, both parts (SGNFCAND and EXP) are zero.
#[lisp_fn]
pub fn frexp(x: LispObject) -> LispObject {
let f = x.any_to_float_or_error();
let (significand, exponent) = libm::frexp(f);
LispObject::cons(
LispObject::from_float(significand),
LispObject::from_fixnum(exponent as EmacsInt),
)
}
/// Return SGNFCAND * 2**EXPONENT, as a floating point number.
/// EXPONENT must be an integer.
#[lisp_fn]
pub fn ldexp(sgnfcand: LispObject, exponent: LispObject) -> LispObject {
let exponent = exponent.as_fixnum_or_error();
let significand = sgnfcand.any_to_float_or_error();
let result = libm::ldexp(significand, exponent as libc::c_int);
LispObject::from_float(result)
}
/// Return the exponential ARG1 ** ARG2.
#[lisp_fn]
pub fn expt(arg1: LispObject, arg2: LispObject) -> LispObject {
if let (Some(x), Some(y)) = (arg1.as_fixnum(), arg2.as_fixnum()) {
if y >= 0 && y <= u32::max_value() as EmacsInt {
return LispObject::from_fixnum(x.pow(y as u32));
}
}
let b = arg1.any_to_float_or_error();
let e = arg2.any_to_float_or_error();
LispObject::from_float(b.powf(e))
}
/// Returns largest integer <= the base 2 log of the magnitude of ARG.
/// This is the same as the exponent of a float.
#[lisp_fn]
pub fn logb(arg: LispObject) -> LispObject {
let res = if let Some(n) = arg.as_fixnum() {
let i = n.abs();
if i == 0 {
MOST_NEGATIVE_FIXNUM
} else {
(mem::size_of::<EmacsUint>() * 8) as EmacsInt - 1 - i.leading_zeros() as EmacsInt
}
} else if let Some(f) = arg.as_float() {
if f == 0.0 {
MOST_NEGATIVE_FIXNUM
} else if f.is_finite() {
let (_, exp) = libm::frexp(f);
exp as EmacsInt - 1
} else {
MOST_POSITIVE_FIXNUM
}
} else {
wrong_type!(Qnumberp, arg)
};
LispObject::from_fixnum(res)
}
/// Return the nearest integer to ARG, as a float.
#[lisp_fn]
pub fn fround(arg: LispObject) -> LispObject {
let d = arg.as_float_or_error();
LispObject::from_float(libm::rint(d))
}
/// Return the smallest integer no less than ARG.
/// This rounds the value towards +inf.
/// With optional DIVISOR, return the smallest integer no less than ARG/DIVISOR.
#[lisp_fn(min = "1")]
pub fn ceiling(arg: LispObject, divisor: LispObject) -> LispObject {
rounding_driver(arg, divisor, |x| x.ceil(), ceiling2, "ceiling")
}
/// Return the largest integer no greater than ARG.
/// This rounds the value towards -inf.
/// With optional DIVISOR, return the largest integer no greater than ARG/DIVISOR.
#[lisp_fn(min = "1")]
pub fn floor(arg: LispObject, divisor: LispObject) -> LispObject {
rounding_driver(arg, divisor, |x| x.floor(), floor2, "floor")
}
/// Return the nearest integer to ARG.
/// With optional DIVISOR, return the nearest integer to ARG/DIVISOR.
///
/// Rounding a value equidistant between two integers may choose the
/// integer closer to zero, or it may prefer an even integer, depending on
/// your machine. For example, (round 2.5) can return 3 on some
/// systems, but 2 on others.
#[lisp_fn(min = "1")]
pub fn round(arg: LispObject, divisor: LispObject) -> LispObject {
rounding_driver(arg, divisor, libm::rint, round2, "round")
}
/// Truncate a floating point number to an int.
/// Rounds ARG toward zero.
/// With optional DIVISOR, truncate ARG/DIVISOR.
#[lisp_fn(min = "1")]
pub fn truncate(arg: LispObject, divisor: LispObject) -> LispObject {
rounding_driver(arg, divisor, |x| x.trunc(), truncate2, "truncate")
}
fn rounding_driver<F>(
arg: LispObject,
divisor: LispObject,
double_round: F,
int_round2: fn(EmacsInt, EmacsInt) -> EmacsInt,
name: &str,
) -> LispObject
where
F: Fn(f64) -> f64,
{
let d;
if divisor.is_nil() {
if arg.is_fixnum() {
return arg;
} else if let Some(f) = arg.as_float() {
d = f;
} else {
wrong_type!(Qnumberp, arg)
}
} else {
if let (Some(arg), Some(div)) = (arg.as_fixnum(), divisor.as_fixnum()) {
if div == 0 {
xsignal!(Qarith_error);
}
return LispObject::from_fixnum(int_round2(arg, div));
}
let arg = arg.any_to_float_or_error();
let div = divisor.any_to_float_or_error();
d = arg / div;
}
// Round, coarsely test for fixnum overflow before converting to
// EmacsInt (to avoid undefined behavior), and then exactly test
// for overflow after converting (as FIXNUM_OVERFLOW_P is inaccurate
// on floats).
let dr = double_round(d);
if dr.abs() < (2 * (MOST_POSITIVE_FIXNUM + 1)) as f64 {
let ir = dr as EmacsInt;
if!LispObject::fixnum_overflow(ir) {
return LispObject::from_fixnum(ir);
}
}
let errstr = LispObject::from(unsafe {
build_string(name.as_ptr() as *const libc::c_char)
});
xsignal!(Qrange_error, errstr, arg)
}
fn ceiling2(i1: EmacsInt, i2: EmacsInt) -> EmacsInt {
i1 / i2 + ((i1 % i2!= 0) & ((i1 < 0) == (i2 < 0))) as EmacsInt
}
fn floor2(i1: EmacsInt, i2: EmacsInt) -> EmacsInt {
i1 / i2 - ((i1 % i2!= 0) & ((i1 < 0)!= (i2 < 0))) as EmacsInt
}
fn truncate2(i1: EmacsInt, i2: EmacsInt) -> EmacsInt {
i1 / i2
}
fn round2(i1: EmacsInt, i2: EmacsInt) -> EmacsInt {
// The division operator gives us one remainder R, but we want the
// remainder R1 on the other side of 0 if R1 is closer to 0 than R
// is; because we want to round to even, we also want R1 if R and R1
// are the same distance from 0 and if C's quotient is odd.
let q = i1 / i2;
let r = i1 % i2;
let abs_r = r.abs();
let abs_r1 = i2.abs() - abs_r;
q + if abs_r + (q & 1) <= abs_r1 {
0
} else if (i2 ^ r) < 0 {
-1
} else {
1
}
}
// Since these are generated via a macro the build cannot hook them into the
// system automatically. Do not add more items here unless they are also generated
// with something like simple_float_op.
pub fn rust_init_extra_syms() {
unsafe {
defsubr(Sacos.as_ptr());
defsubr(Sasin.as_ptr());
defsubr(Scos.as_ptr());
defsubr(Ssin.as_ptr());
defsubr(Stan.as_ptr());
defsubr(Sexp.as_ptr());
defsubr(Ssqrt.as_ptr());
}
}
include!(concat!(env!("OUT_DIR"), "/floatfns_exports.rs"));
|
/// Return the smallest integer no less than ARG, as a float.
/// (Round toward +inf.)
#[lisp_fn]
|
random_line_split
|
floatfns.rs
|
//! Functions operating on float numbers.
use libc;
use std::mem;
use remacs_macros::lisp_fn;
use remacs_sys::{EmacsDouble, EmacsInt, EmacsUint, Lisp_Object, MOST_NEGATIVE_FIXNUM,
MOST_POSITIVE_FIXNUM};
use remacs_sys::{Qarith_error, Qinteger_or_marker_p, Qnumberp, Qrange_error};
use remacs_sys::build_string;
use remacs_sys::libm;
use lisp::{LispNumber, LispObject};
use lisp::defsubr;
use math::ArithOp;
/// Either extracts a floating point number from a lisp number (of any kind) or throws an error
/// TODO this is used from C in a few places; remove afterwards.
#[no_mangle]
pub extern "C" fn extract_float(f: Lisp_Object) -> EmacsDouble {
let f = LispObject::from(f);
f.any_to_float_or_error()
}
/// Calculate the modulus of two elisp floats.
pub fn fmod_float(mut f1: f64, f2: f64) -> LispObject {
f1 %= f2;
// Ensure that the remainder has the correct sign.
if f2 < 0.0 && f1 > 0.0 || f2 > 0.0 && f1 < 0.0 {
f1 += f2;
}
LispObject::from_float(f1)
}
macro_rules! simple_float_op {
($lisp_name:expr, $float_func:ident, $lisp_docs:expr) => {
#[doc = $lisp_docs]
#[lisp_fn(name = $lisp_name, c_name = $lisp_name)]
fn $float_func(arg: LispObject) -> LispObject {
let d = arg.any_to_float_or_error();
let val = d.$float_func();
LispObject::from_float(val)
}
}
}
simple_float_op!("acos", acos, "Return the inverse cosine of ARG.");
simple_float_op!("asin", asin, "Return the inverse sine of ARG.");
// atan is special, defined later
simple_float_op!("cos", cos, "Return the cosine of ARG.");
simple_float_op!("sin", sin, "Return the sine of ARG.");
simple_float_op!("tan", tan, "Return the tangent of ARG.");
simple_float_op!("exp", exp, "Return the exponential base e of ARG.");
simple_float_op!("sqrt", sqrt, "Return the square root of ARG.");
/// Driver for standard arithmetic operations on floats.
pub fn float_arith_driver(
mut accum: f64,
argstart: usize,
code: ArithOp,
args: &[LispObject],
) -> LispObject {
for (i, &val) in args[argstart..].iter().enumerate() {
let argnum = argstart + i;
let next = match val.as_number_coerce_marker_or_error() {
LispNumber::Float(f) => f,
LispNumber::Fixnum(d) => d as f64,
};
match code {
ArithOp::Add => accum += next,
ArithOp::Sub => {
accum = {
if argnum > 0
|
else if args.len() == 1 {
-next
} else {
next
}
}
}
ArithOp::Mult => accum *= next,
ArithOp::Div => if args.len() > 1 && argnum == 0 {
accum = next;
} else {
if next == 0. {
xsignal!(Qarith_error);
}
accum /= next;
},
ArithOp::Logand | ArithOp::Logior | ArithOp::Logxor => {
wrong_type!(Qinteger_or_marker_p, val)
}
}
}
LispObject::from_float(accum)
}
/// Return non nil if argument X is a NaN.
#[lisp_fn]
pub fn isnan(x: LispObject) -> LispObject {
let f = x.as_float_or_error();
LispObject::from_bool(f.is_nan())
}
/// Return the inverse tangent of the arguments.
/// If only one argument Y is given, return the inverse tangent of Y.
/// If two arguments Y and X are given, return the inverse tangent of Y
/// divided by X, i.e. the angle in radians between the vector (X, Y)
/// and the x-axis
#[lisp_fn(min = "1")]
pub fn atan(y: LispObject, x: LispObject) -> LispObject {
let y = y.any_to_float_or_error();
if x.is_nil() {
LispObject::from_float(y.atan())
} else {
let x = x.any_to_float_or_error();
LispObject::from_float(y.atan2(x))
}
}
/// Return the natural logarithm of ARG.
/// If the optional argument BASE is given, return log ARG using that base.
#[lisp_fn(min = "1")]
pub fn log(arg: LispObject, base: LispObject) -> LispObject {
let d = arg.any_to_float_or_error();
let res = if base.is_nil() {
d.ln()
} else {
let base = base.any_to_float_or_error();
if base == 10.0 {
d.log10()
} else if base == 2.0 {
d.log2()
} else {
d.log(base)
}
};
LispObject::from_float(res)
}
/* These functions take only floats now. */
/// Return the smallest integer no less than ARG, as a float.
/// (Round toward +inf.)
#[lisp_fn]
pub fn fceiling(arg: LispObject) -> LispObject {
let d = arg.as_float_or_error();
LispObject::from_float(d.ceil())
}
/// Return the largest integer no greater than ARG, as a float.
/// (Round toward -inf.)
#[lisp_fn]
pub fn ffloor(arg: LispObject) -> LispObject {
let d = arg.as_float_or_error();
LispObject::from_float(d.floor())
}
/// Truncate a floating point number to an integral float value.
/// (Round toward zero.)
#[lisp_fn]
pub fn ftruncate(arg: LispObject) -> LispObject {
let d = arg.as_float_or_error();
if d > 0.0 {
LispObject::from_float(d.floor())
} else {
LispObject::from_float(d.ceil())
}
}
/// Return the floating point number equal to ARG.
#[lisp_fn]
pub fn float(arg: LispObject) -> LispObject {
if arg.is_float() {
arg
} else if let Some(n) = arg.as_fixnum() {
LispObject::from_float(n as EmacsDouble)
} else {
wrong_type!(Qnumberp, arg);
}
}
/// Copy sign of X2 to value of X1, and return the result.
/// Cause an error if X1 or X2 is not a float.
#[lisp_fn]
pub fn copysign(x1: LispObject, x2: LispObject) -> LispObject {
let f1 = x1.as_float_or_error();
let f2 = x2.as_float_or_error();
if libm::signbit(f1)!= libm::signbit(f2) {
LispObject::from_float(-f1)
} else {
x1
}
}
/// Get significand and exponent of a floating point number.
/// Breaks the floating point number X into its binary significand SGNFCAND
/// (a floating point value between 0.5 (included) and 1.0 (excluded))
/// and an integral exponent EXP for 2, such that:
///
/// X = SGNFCAND * 2^EXP
///
/// The function returns the cons cell (SGNFCAND. EXP).
/// If X is zero, both parts (SGNFCAND and EXP) are zero.
#[lisp_fn]
pub fn frexp(x: LispObject) -> LispObject {
let f = x.any_to_float_or_error();
let (significand, exponent) = libm::frexp(f);
LispObject::cons(
LispObject::from_float(significand),
LispObject::from_fixnum(exponent as EmacsInt),
)
}
/// Return SGNFCAND * 2**EXPONENT, as a floating point number.
/// EXPONENT must be an integer.
#[lisp_fn]
pub fn ldexp(sgnfcand: LispObject, exponent: LispObject) -> LispObject {
let exponent = exponent.as_fixnum_or_error();
let significand = sgnfcand.any_to_float_or_error();
let result = libm::ldexp(significand, exponent as libc::c_int);
LispObject::from_float(result)
}
/// Return the exponential ARG1 ** ARG2.
#[lisp_fn]
pub fn expt(arg1: LispObject, arg2: LispObject) -> LispObject {
if let (Some(x), Some(y)) = (arg1.as_fixnum(), arg2.as_fixnum()) {
if y >= 0 && y <= u32::max_value() as EmacsInt {
return LispObject::from_fixnum(x.pow(y as u32));
}
}
let b = arg1.any_to_float_or_error();
let e = arg2.any_to_float_or_error();
LispObject::from_float(b.powf(e))
}
/// Returns largest integer <= the base 2 log of the magnitude of ARG.
/// This is the same as the exponent of a float.
#[lisp_fn]
pub fn logb(arg: LispObject) -> LispObject {
let res = if let Some(n) = arg.as_fixnum() {
let i = n.abs();
if i == 0 {
MOST_NEGATIVE_FIXNUM
} else {
(mem::size_of::<EmacsUint>() * 8) as EmacsInt - 1 - i.leading_zeros() as EmacsInt
}
} else if let Some(f) = arg.as_float() {
if f == 0.0 {
MOST_NEGATIVE_FIXNUM
} else if f.is_finite() {
let (_, exp) = libm::frexp(f);
exp as EmacsInt - 1
} else {
MOST_POSITIVE_FIXNUM
}
} else {
wrong_type!(Qnumberp, arg)
};
LispObject::from_fixnum(res)
}
/// Return the nearest integer to ARG, as a float.
#[lisp_fn]
pub fn fround(arg: LispObject) -> LispObject {
let d = arg.as_float_or_error();
LispObject::from_float(libm::rint(d))
}
/// Return the smallest integer no less than ARG.
/// This rounds the value towards +inf.
/// With optional DIVISOR, return the smallest integer no less than ARG/DIVISOR.
#[lisp_fn(min = "1")]
pub fn ceiling(arg: LispObject, divisor: LispObject) -> LispObject {
rounding_driver(arg, divisor, |x| x.ceil(), ceiling2, "ceiling")
}
/// Return the largest integer no greater than ARG.
/// This rounds the value towards -inf.
/// With optional DIVISOR, return the largest integer no greater than ARG/DIVISOR.
#[lisp_fn(min = "1")]
pub fn floor(arg: LispObject, divisor: LispObject) -> LispObject {
rounding_driver(arg, divisor, |x| x.floor(), floor2, "floor")
}
/// Return the nearest integer to ARG.
/// With optional DIVISOR, return the nearest integer to ARG/DIVISOR.
///
/// Rounding a value equidistant between two integers may choose the
/// integer closer to zero, or it may prefer an even integer, depending on
/// your machine. For example, (round 2.5) can return 3 on some
/// systems, but 2 on others.
#[lisp_fn(min = "1")]
pub fn round(arg: LispObject, divisor: LispObject) -> LispObject {
rounding_driver(arg, divisor, libm::rint, round2, "round")
}
/// Truncate a floating point number to an int.
/// Rounds ARG toward zero.
/// With optional DIVISOR, truncate ARG/DIVISOR.
#[lisp_fn(min = "1")]
pub fn truncate(arg: LispObject, divisor: LispObject) -> LispObject {
rounding_driver(arg, divisor, |x| x.trunc(), truncate2, "truncate")
}
fn rounding_driver<F>(
arg: LispObject,
divisor: LispObject,
double_round: F,
int_round2: fn(EmacsInt, EmacsInt) -> EmacsInt,
name: &str,
) -> LispObject
where
F: Fn(f64) -> f64,
{
let d;
if divisor.is_nil() {
if arg.is_fixnum() {
return arg;
} else if let Some(f) = arg.as_float() {
d = f;
} else {
wrong_type!(Qnumberp, arg)
}
} else {
if let (Some(arg), Some(div)) = (arg.as_fixnum(), divisor.as_fixnum()) {
if div == 0 {
xsignal!(Qarith_error);
}
return LispObject::from_fixnum(int_round2(arg, div));
}
let arg = arg.any_to_float_or_error();
let div = divisor.any_to_float_or_error();
d = arg / div;
}
// Round, coarsely test for fixnum overflow before converting to
// EmacsInt (to avoid undefined behavior), and then exactly test
// for overflow after converting (as FIXNUM_OVERFLOW_P is inaccurate
// on floats).
let dr = double_round(d);
if dr.abs() < (2 * (MOST_POSITIVE_FIXNUM + 1)) as f64 {
let ir = dr as EmacsInt;
if!LispObject::fixnum_overflow(ir) {
return LispObject::from_fixnum(ir);
}
}
let errstr = LispObject::from(unsafe {
build_string(name.as_ptr() as *const libc::c_char)
});
xsignal!(Qrange_error, errstr, arg)
}
fn ceiling2(i1: EmacsInt, i2: EmacsInt) -> EmacsInt {
i1 / i2 + ((i1 % i2!= 0) & ((i1 < 0) == (i2 < 0))) as EmacsInt
}
fn floor2(i1: EmacsInt, i2: EmacsInt) -> EmacsInt {
i1 / i2 - ((i1 % i2!= 0) & ((i1 < 0)!= (i2 < 0))) as EmacsInt
}
fn truncate2(i1: EmacsInt, i2: EmacsInt) -> EmacsInt {
i1 / i2
}
fn round2(i1: EmacsInt, i2: EmacsInt) -> EmacsInt {
// The division operator gives us one remainder R, but we want the
// remainder R1 on the other side of 0 if R1 is closer to 0 than R
// is; because we want to round to even, we also want R1 if R and R1
// are the same distance from 0 and if C's quotient is odd.
let q = i1 / i2;
let r = i1 % i2;
let abs_r = r.abs();
let abs_r1 = i2.abs() - abs_r;
q + if abs_r + (q & 1) <= abs_r1 {
0
} else if (i2 ^ r) < 0 {
-1
} else {
1
}
}
// Since these are generated via a macro the build cannot hook them into the
// system automatically. Do not add more items here unless they are also generated
// with something like simple_float_op.
pub fn rust_init_extra_syms() {
unsafe {
defsubr(Sacos.as_ptr());
defsubr(Sasin.as_ptr());
defsubr(Scos.as_ptr());
defsubr(Ssin.as_ptr());
defsubr(Stan.as_ptr());
defsubr(Sexp.as_ptr());
defsubr(Ssqrt.as_ptr());
}
}
include!(concat!(env!("OUT_DIR"), "/floatfns_exports.rs"));
|
{
accum - next
}
|
conditional_block
|
floatfns.rs
|
//! Functions operating on float numbers.
use libc;
use std::mem;
use remacs_macros::lisp_fn;
use remacs_sys::{EmacsDouble, EmacsInt, EmacsUint, Lisp_Object, MOST_NEGATIVE_FIXNUM,
MOST_POSITIVE_FIXNUM};
use remacs_sys::{Qarith_error, Qinteger_or_marker_p, Qnumberp, Qrange_error};
use remacs_sys::build_string;
use remacs_sys::libm;
use lisp::{LispNumber, LispObject};
use lisp::defsubr;
use math::ArithOp;
/// Either extracts a floating point number from a lisp number (of any kind) or throws an error
/// TODO this is used from C in a few places; remove afterwards.
#[no_mangle]
pub extern "C" fn extract_float(f: Lisp_Object) -> EmacsDouble {
let f = LispObject::from(f);
f.any_to_float_or_error()
}
/// Calculate the modulus of two elisp floats.
pub fn fmod_float(mut f1: f64, f2: f64) -> LispObject {
f1 %= f2;
// Ensure that the remainder has the correct sign.
if f2 < 0.0 && f1 > 0.0 || f2 > 0.0 && f1 < 0.0 {
f1 += f2;
}
LispObject::from_float(f1)
}
macro_rules! simple_float_op {
($lisp_name:expr, $float_func:ident, $lisp_docs:expr) => {
#[doc = $lisp_docs]
#[lisp_fn(name = $lisp_name, c_name = $lisp_name)]
fn $float_func(arg: LispObject) -> LispObject {
let d = arg.any_to_float_or_error();
let val = d.$float_func();
LispObject::from_float(val)
}
}
}
simple_float_op!("acos", acos, "Return the inverse cosine of ARG.");
simple_float_op!("asin", asin, "Return the inverse sine of ARG.");
// atan is special, defined later
simple_float_op!("cos", cos, "Return the cosine of ARG.");
simple_float_op!("sin", sin, "Return the sine of ARG.");
simple_float_op!("tan", tan, "Return the tangent of ARG.");
simple_float_op!("exp", exp, "Return the exponential base e of ARG.");
simple_float_op!("sqrt", sqrt, "Return the square root of ARG.");
/// Driver for standard arithmetic operations on floats.
pub fn float_arith_driver(
mut accum: f64,
argstart: usize,
code: ArithOp,
args: &[LispObject],
) -> LispObject {
for (i, &val) in args[argstart..].iter().enumerate() {
let argnum = argstart + i;
let next = match val.as_number_coerce_marker_or_error() {
LispNumber::Float(f) => f,
LispNumber::Fixnum(d) => d as f64,
};
match code {
ArithOp::Add => accum += next,
ArithOp::Sub => {
accum = {
if argnum > 0 {
accum - next
} else if args.len() == 1 {
-next
} else {
next
}
}
}
ArithOp::Mult => accum *= next,
ArithOp::Div => if args.len() > 1 && argnum == 0 {
accum = next;
} else {
if next == 0. {
xsignal!(Qarith_error);
}
accum /= next;
},
ArithOp::Logand | ArithOp::Logior | ArithOp::Logxor => {
wrong_type!(Qinteger_or_marker_p, val)
}
}
}
LispObject::from_float(accum)
}
/// Return non nil if argument X is a NaN.
#[lisp_fn]
pub fn isnan(x: LispObject) -> LispObject {
let f = x.as_float_or_error();
LispObject::from_bool(f.is_nan())
}
/// Return the inverse tangent of the arguments.
/// If only one argument Y is given, return the inverse tangent of Y.
/// If two arguments Y and X are given, return the inverse tangent of Y
/// divided by X, i.e. the angle in radians between the vector (X, Y)
/// and the x-axis
#[lisp_fn(min = "1")]
pub fn atan(y: LispObject, x: LispObject) -> LispObject {
let y = y.any_to_float_or_error();
if x.is_nil() {
LispObject::from_float(y.atan())
} else {
let x = x.any_to_float_or_error();
LispObject::from_float(y.atan2(x))
}
}
/// Return the natural logarithm of ARG.
/// If the optional argument BASE is given, return log ARG using that base.
#[lisp_fn(min = "1")]
pub fn log(arg: LispObject, base: LispObject) -> LispObject {
let d = arg.any_to_float_or_error();
let res = if base.is_nil() {
d.ln()
} else {
let base = base.any_to_float_or_error();
if base == 10.0 {
d.log10()
} else if base == 2.0 {
d.log2()
} else {
d.log(base)
}
};
LispObject::from_float(res)
}
/* These functions take only floats now. */
/// Return the smallest integer no less than ARG, as a float.
/// (Round toward +inf.)
#[lisp_fn]
pub fn fceiling(arg: LispObject) -> LispObject {
let d = arg.as_float_or_error();
LispObject::from_float(d.ceil())
}
/// Return the largest integer no greater than ARG, as a float.
/// (Round toward -inf.)
#[lisp_fn]
pub fn ffloor(arg: LispObject) -> LispObject {
let d = arg.as_float_or_error();
LispObject::from_float(d.floor())
}
/// Truncate a floating point number to an integral float value.
/// (Round toward zero.)
#[lisp_fn]
pub fn ftruncate(arg: LispObject) -> LispObject {
let d = arg.as_float_or_error();
if d > 0.0 {
LispObject::from_float(d.floor())
} else {
LispObject::from_float(d.ceil())
}
}
/// Return the floating point number equal to ARG.
#[lisp_fn]
pub fn float(arg: LispObject) -> LispObject {
if arg.is_float() {
arg
} else if let Some(n) = arg.as_fixnum() {
LispObject::from_float(n as EmacsDouble)
} else {
wrong_type!(Qnumberp, arg);
}
}
/// Copy sign of X2 to value of X1, and return the result.
/// Cause an error if X1 or X2 is not a float.
#[lisp_fn]
pub fn copysign(x1: LispObject, x2: LispObject) -> LispObject {
let f1 = x1.as_float_or_error();
let f2 = x2.as_float_or_error();
if libm::signbit(f1)!= libm::signbit(f2) {
LispObject::from_float(-f1)
} else {
x1
}
}
/// Get significand and exponent of a floating point number.
/// Breaks the floating point number X into its binary significand SGNFCAND
/// (a floating point value between 0.5 (included) and 1.0 (excluded))
/// and an integral exponent EXP for 2, such that:
///
/// X = SGNFCAND * 2^EXP
///
/// The function returns the cons cell (SGNFCAND. EXP).
/// If X is zero, both parts (SGNFCAND and EXP) are zero.
#[lisp_fn]
pub fn frexp(x: LispObject) -> LispObject {
let f = x.any_to_float_or_error();
let (significand, exponent) = libm::frexp(f);
LispObject::cons(
LispObject::from_float(significand),
LispObject::from_fixnum(exponent as EmacsInt),
)
}
/// Return SGNFCAND * 2**EXPONENT, as a floating point number.
/// EXPONENT must be an integer.
#[lisp_fn]
pub fn ldexp(sgnfcand: LispObject, exponent: LispObject) -> LispObject {
let exponent = exponent.as_fixnum_or_error();
let significand = sgnfcand.any_to_float_or_error();
let result = libm::ldexp(significand, exponent as libc::c_int);
LispObject::from_float(result)
}
/// Return the exponential ARG1 ** ARG2.
#[lisp_fn]
pub fn expt(arg1: LispObject, arg2: LispObject) -> LispObject {
if let (Some(x), Some(y)) = (arg1.as_fixnum(), arg2.as_fixnum()) {
if y >= 0 && y <= u32::max_value() as EmacsInt {
return LispObject::from_fixnum(x.pow(y as u32));
}
}
let b = arg1.any_to_float_or_error();
let e = arg2.any_to_float_or_error();
LispObject::from_float(b.powf(e))
}
/// Returns largest integer <= the base 2 log of the magnitude of ARG.
/// This is the same as the exponent of a float.
#[lisp_fn]
pub fn logb(arg: LispObject) -> LispObject {
let res = if let Some(n) = arg.as_fixnum() {
let i = n.abs();
if i == 0 {
MOST_NEGATIVE_FIXNUM
} else {
(mem::size_of::<EmacsUint>() * 8) as EmacsInt - 1 - i.leading_zeros() as EmacsInt
}
} else if let Some(f) = arg.as_float() {
if f == 0.0 {
MOST_NEGATIVE_FIXNUM
} else if f.is_finite() {
let (_, exp) = libm::frexp(f);
exp as EmacsInt - 1
} else {
MOST_POSITIVE_FIXNUM
}
} else {
wrong_type!(Qnumberp, arg)
};
LispObject::from_fixnum(res)
}
/// Return the nearest integer to ARG, as a float.
#[lisp_fn]
pub fn fround(arg: LispObject) -> LispObject {
let d = arg.as_float_or_error();
LispObject::from_float(libm::rint(d))
}
/// Return the smallest integer no less than ARG.
/// This rounds the value towards +inf.
/// With optional DIVISOR, return the smallest integer no less than ARG/DIVISOR.
#[lisp_fn(min = "1")]
pub fn ceiling(arg: LispObject, divisor: LispObject) -> LispObject {
rounding_driver(arg, divisor, |x| x.ceil(), ceiling2, "ceiling")
}
/// Return the largest integer no greater than ARG.
/// This rounds the value towards -inf.
/// With optional DIVISOR, return the largest integer no greater than ARG/DIVISOR.
#[lisp_fn(min = "1")]
pub fn floor(arg: LispObject, divisor: LispObject) -> LispObject {
rounding_driver(arg, divisor, |x| x.floor(), floor2, "floor")
}
/// Return the nearest integer to ARG.
/// With optional DIVISOR, return the nearest integer to ARG/DIVISOR.
///
/// Rounding a value equidistant between two integers may choose the
/// integer closer to zero, or it may prefer an even integer, depending on
/// your machine. For example, (round 2.5) can return 3 on some
/// systems, but 2 on others.
#[lisp_fn(min = "1")]
pub fn round(arg: LispObject, divisor: LispObject) -> LispObject {
rounding_driver(arg, divisor, libm::rint, round2, "round")
}
/// Truncate a floating point number to an int.
/// Rounds ARG toward zero.
/// With optional DIVISOR, truncate ARG/DIVISOR.
#[lisp_fn(min = "1")]
pub fn truncate(arg: LispObject, divisor: LispObject) -> LispObject {
rounding_driver(arg, divisor, |x| x.trunc(), truncate2, "truncate")
}
fn rounding_driver<F>(
arg: LispObject,
divisor: LispObject,
double_round: F,
int_round2: fn(EmacsInt, EmacsInt) -> EmacsInt,
name: &str,
) -> LispObject
where
F: Fn(f64) -> f64,
{
let d;
if divisor.is_nil() {
if arg.is_fixnum() {
return arg;
} else if let Some(f) = arg.as_float() {
d = f;
} else {
wrong_type!(Qnumberp, arg)
}
} else {
if let (Some(arg), Some(div)) = (arg.as_fixnum(), divisor.as_fixnum()) {
if div == 0 {
xsignal!(Qarith_error);
}
return LispObject::from_fixnum(int_round2(arg, div));
}
let arg = arg.any_to_float_or_error();
let div = divisor.any_to_float_or_error();
d = arg / div;
}
// Round, coarsely test for fixnum overflow before converting to
// EmacsInt (to avoid undefined behavior), and then exactly test
// for overflow after converting (as FIXNUM_OVERFLOW_P is inaccurate
// on floats).
let dr = double_round(d);
if dr.abs() < (2 * (MOST_POSITIVE_FIXNUM + 1)) as f64 {
let ir = dr as EmacsInt;
if!LispObject::fixnum_overflow(ir) {
return LispObject::from_fixnum(ir);
}
}
let errstr = LispObject::from(unsafe {
build_string(name.as_ptr() as *const libc::c_char)
});
xsignal!(Qrange_error, errstr, arg)
}
fn ceiling2(i1: EmacsInt, i2: EmacsInt) -> EmacsInt {
i1 / i2 + ((i1 % i2!= 0) & ((i1 < 0) == (i2 < 0))) as EmacsInt
}
fn floor2(i1: EmacsInt, i2: EmacsInt) -> EmacsInt {
i1 / i2 - ((i1 % i2!= 0) & ((i1 < 0)!= (i2 < 0))) as EmacsInt
}
fn
|
(i1: EmacsInt, i2: EmacsInt) -> EmacsInt {
i1 / i2
}
fn round2(i1: EmacsInt, i2: EmacsInt) -> EmacsInt {
// The division operator gives us one remainder R, but we want the
// remainder R1 on the other side of 0 if R1 is closer to 0 than R
// is; because we want to round to even, we also want R1 if R and R1
// are the same distance from 0 and if C's quotient is odd.
let q = i1 / i2;
let r = i1 % i2;
let abs_r = r.abs();
let abs_r1 = i2.abs() - abs_r;
q + if abs_r + (q & 1) <= abs_r1 {
0
} else if (i2 ^ r) < 0 {
-1
} else {
1
}
}
// Since these are generated via a macro the build cannot hook them into the
// system automatically. Do not add more items here unless they are also generated
// with something like simple_float_op.
pub fn rust_init_extra_syms() {
unsafe {
defsubr(Sacos.as_ptr());
defsubr(Sasin.as_ptr());
defsubr(Scos.as_ptr());
defsubr(Ssin.as_ptr());
defsubr(Stan.as_ptr());
defsubr(Sexp.as_ptr());
defsubr(Ssqrt.as_ptr());
}
}
include!(concat!(env!("OUT_DIR"), "/floatfns_exports.rs"));
|
truncate2
|
identifier_name
|
main.rs
|
extern crate crypto;
use crypto::ripemd160;
use crypto::digest::Digest;
fn ripemd_str(string: &str) -> String {
let mut ripemd = ripemd160::Ripemd160::new();
ripemd.input_str(string);
ripemd.result_str()
}
fn main() {
println!("{}", ripemd_str("Rosetta Code"));
}
#[cfg(test)]
mod tests {
use super::ripemd_str;
#[test]
fn ripemd160() {
assert_eq!(ripemd_str("Rosetta Code"),
"b3be159860842cebaa7174c8fff0aa9e50a5199f");
assert_eq!(ripemd_str(""), "9c1185a5c5e9fc54612808977ee8f548b2258d31");
assert_eq!(ripemd_str("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"),
|
}
}
|
"b0e20b6e3116640286ed3a87a5713079b21f5189");
|
random_line_split
|
main.rs
|
extern crate crypto;
use crypto::ripemd160;
use crypto::digest::Digest;
fn ripemd_str(string: &str) -> String {
let mut ripemd = ripemd160::Ripemd160::new();
ripemd.input_str(string);
ripemd.result_str()
}
fn main()
|
#[cfg(test)]
mod tests {
use super::ripemd_str;
#[test]
fn ripemd160() {
assert_eq!(ripemd_str("Rosetta Code"),
"b3be159860842cebaa7174c8fff0aa9e50a5199f");
assert_eq!(ripemd_str(""), "9c1185a5c5e9fc54612808977ee8f548b2258d31");
assert_eq!(ripemd_str("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"),
"b0e20b6e3116640286ed3a87a5713079b21f5189");
}
}
|
{
println!("{}", ripemd_str("Rosetta Code"));
}
|
identifier_body
|
main.rs
|
extern crate crypto;
use crypto::ripemd160;
use crypto::digest::Digest;
fn ripemd_str(string: &str) -> String {
let mut ripemd = ripemd160::Ripemd160::new();
ripemd.input_str(string);
ripemd.result_str()
}
fn main() {
println!("{}", ripemd_str("Rosetta Code"));
}
#[cfg(test)]
mod tests {
use super::ripemd_str;
#[test]
fn
|
() {
assert_eq!(ripemd_str("Rosetta Code"),
"b3be159860842cebaa7174c8fff0aa9e50a5199f");
assert_eq!(ripemd_str(""), "9c1185a5c5e9fc54612808977ee8f548b2258d31");
assert_eq!(ripemd_str("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"),
"b0e20b6e3116640286ed3a87a5713079b21f5189");
}
}
|
ripemd160
|
identifier_name
|
guard.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Machinery to conditionally expose things.
use js::jsapi::{HandleObject, JSContext};
use util::prefs::get_pref;
/// A container with a condition.
pub struct Guard<T: Clone + Copy> {
condition: Condition,
value: T,
}
impl<T: Clone + Copy> Guard<T> {
/// Construct a new guarded value.
pub const fn new(condition: Condition, value: T) -> Self {
Guard {
condition: condition,
value: value,
}
}
/// Expose the value if the condition is satisfied.
///
/// The passed handle is the object on which the value may be exposed.
pub unsafe fn expose(&self, cx: *mut JSContext, obj: HandleObject) -> Option<T> {
if self.condition.is_satisfied(cx, obj) {
Some(self.value)
} else {
None
}
|
}
/// A condition to expose things.
pub enum Condition {
/// The condition is satisfied if the function returns true.
Func(unsafe fn(*mut JSContext, HandleObject) -> bool),
/// The condition is satisfied if the preference is set.
Pref(&'static str),
/// The condition is always satisfied.
Satisfied,
}
impl Condition {
unsafe fn is_satisfied(&self, cx: *mut JSContext, obj: HandleObject) -> bool {
match *self {
Condition::Pref(name) => get_pref(name).as_boolean().unwrap_or(false),
Condition::Func(f) => f(cx, obj),
Condition::Satisfied => true,
}
}
}
|
}
|
random_line_split
|
guard.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Machinery to conditionally expose things.
use js::jsapi::{HandleObject, JSContext};
use util::prefs::get_pref;
/// A container with a condition.
pub struct Guard<T: Clone + Copy> {
condition: Condition,
value: T,
}
impl<T: Clone + Copy> Guard<T> {
/// Construct a new guarded value.
pub const fn new(condition: Condition, value: T) -> Self {
Guard {
condition: condition,
value: value,
}
}
/// Expose the value if the condition is satisfied.
///
/// The passed handle is the object on which the value may be exposed.
pub unsafe fn
|
(&self, cx: *mut JSContext, obj: HandleObject) -> Option<T> {
if self.condition.is_satisfied(cx, obj) {
Some(self.value)
} else {
None
}
}
}
/// A condition to expose things.
pub enum Condition {
/// The condition is satisfied if the function returns true.
Func(unsafe fn(*mut JSContext, HandleObject) -> bool),
/// The condition is satisfied if the preference is set.
Pref(&'static str),
/// The condition is always satisfied.
Satisfied,
}
impl Condition {
unsafe fn is_satisfied(&self, cx: *mut JSContext, obj: HandleObject) -> bool {
match *self {
Condition::Pref(name) => get_pref(name).as_boolean().unwrap_or(false),
Condition::Func(f) => f(cx, obj),
Condition::Satisfied => true,
}
}
}
|
expose
|
identifier_name
|
guard.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Machinery to conditionally expose things.
use js::jsapi::{HandleObject, JSContext};
use util::prefs::get_pref;
/// A container with a condition.
pub struct Guard<T: Clone + Copy> {
condition: Condition,
value: T,
}
impl<T: Clone + Copy> Guard<T> {
/// Construct a new guarded value.
pub const fn new(condition: Condition, value: T) -> Self {
Guard {
condition: condition,
value: value,
}
}
/// Expose the value if the condition is satisfied.
///
/// The passed handle is the object on which the value may be exposed.
pub unsafe fn expose(&self, cx: *mut JSContext, obj: HandleObject) -> Option<T> {
if self.condition.is_satisfied(cx, obj) {
Some(self.value)
} else {
None
}
}
}
/// A condition to expose things.
pub enum Condition {
/// The condition is satisfied if the function returns true.
Func(unsafe fn(*mut JSContext, HandleObject) -> bool),
/// The condition is satisfied if the preference is set.
Pref(&'static str),
/// The condition is always satisfied.
Satisfied,
}
impl Condition {
unsafe fn is_satisfied(&self, cx: *mut JSContext, obj: HandleObject) -> bool
|
}
|
{
match *self {
Condition::Pref(name) => get_pref(name).as_boolean().unwrap_or(false),
Condition::Func(f) => f(cx, obj),
Condition::Satisfied => true,
}
}
|
identifier_body
|
contact.rs
|
use crate::math::{Point, Vector};
use crate::query::ContactKinematic;
use na::{self, RealField, Unit};
use slotmap::Key;
use std::mem;
slotmap::new_key_type! {
/// A contact identifier which is unique within a contact manifold.
pub struct ContactId;
}
/// Geometric description of a contact.
#[derive(Debug, PartialEq, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct Contact<N: RealField> {
/// Position of the contact on the first object. The position is expressed in world space.
pub world1: Point<N>,
/// Position of the contact on the second object. The position is expressed in world space.
pub world2: Point<N>,
/// Contact normal
pub normal: Unit<Vector<N>>,
/// Penetration depth
pub depth: N,
}
impl<N: RealField> Contact<N> {
/// Creates a new contact.
#[inline]
pub fn new(world1: Point<N>, world2: Point<N>, normal: Unit<Vector<N>>, depth: N) -> Self {
Contact {
world1,
world2,
normal,
depth,
}
}
/// Creates a new contact, computing automatically the penetration depth.
#[inline]
pub fn new_wo_depth(world1: Point<N>, world2: Point<N>, normal: Unit<Vector<N>>) -> Contact<N> {
let depth = -normal.dot(&(world2 - world1));
Self::new(world1, world2, normal, depth)
}
}
impl<N: RealField> Contact<N> {
/// Reverts the contact normal and swaps `world1` and `world2`.
#[inline]
pub fn flip(&mut self) {
mem::swap(&mut self.world1, &mut self.world2);
self.normal = -self.normal;
}
}
/// A contact combined with contact kinematic information as well as a persistent identifier.
///
/// When ncollide is used to compute contact points between moving solids, it will attempt to
/// match contact points found at successive frames. Two contact points are said to "match" if
/// they can be seen as the same contact point that moved in-between frames. Two matching
/// contact points are given the same `id` here.
#[derive(Copy, Clone, Debug)]
pub struct TrackedContact<N: RealField> {
/// The geometric contact information.
pub contact: Contact<N>,
/// The local contact kinematic.
pub kinematic: ContactKinematic<N>,
/// The identifier of this contact.
pub id: ContactId,
}
impl<N: RealField> TrackedContact<N> {
/// Creates a new tracked contact.
pub fn new(contact: Contact<N>, kinematic: ContactKinematic<N>) -> Self {
TrackedContact {
contact,
kinematic,
id: ContactId::null(),
}
}
}
/// The prediction parameters for contact determination.
#[derive(Clone, Debug, PartialEq)]
pub struct ContactPrediction<N: RealField> {
linear: N,
angular1: N,
angular2: N,
cos_angular1: N,
cos_angular2: N,
sin_angular1: N,
sin_angular2: N,
}
impl<N: RealField> ContactPrediction<N> {
/// Initialize prediction parameters.
pub fn new(linear: N, angular1: N, angular2: N) -> Self {
ContactPrediction {
linear,
angular1,
angular2,
cos_angular1: angular1.cos(),
cos_angular2: angular2.cos(),
sin_angular1: angular1.sin(),
sin_angular2: angular2.sin(),
}
}
/// The linear prediction.
#[inline]
pub fn linear(&self) -> N {
self.linear
}
/// Sets linear prediction.
#[inline]
pub fn set_linear(&mut self, val: N) {
self.linear = val
}
/// The angular regularization for the first solid.
#[inline]
pub fn angular1(&self) -> N {
self.angular1
}
/// The angular regularization for the second solid.
#[inline]
pub fn angular2(&self) -> N {
self.angular2
}
/// The cosine of angular regularization for the first solid.
#[inline]
pub fn cos_angular1(&self) -> N {
self.cos_angular1
}
/// The cosine angular regularization for the second solid.
#[inline]
pub fn cos_angular2(&self) -> N
|
/// The sine of angular regularization for the first solid.
#[inline]
pub fn sin_angular1(&self) -> N {
self.sin_angular1
}
/// The sine angular regularization for the second solid.
#[inline]
pub fn sin_angular2(&self) -> N {
self.sin_angular2
}
}
|
{
self.cos_angular2
}
|
identifier_body
|
contact.rs
|
use crate::math::{Point, Vector};
use crate::query::ContactKinematic;
use na::{self, RealField, Unit};
use slotmap::Key;
use std::mem;
slotmap::new_key_type! {
/// A contact identifier which is unique within a contact manifold.
pub struct ContactId;
}
/// Geometric description of a contact.
#[derive(Debug, PartialEq, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct Contact<N: RealField> {
/// Position of the contact on the first object. The position is expressed in world space.
pub world1: Point<N>,
/// Position of the contact on the second object. The position is expressed in world space.
pub world2: Point<N>,
/// Contact normal
pub normal: Unit<Vector<N>>,
/// Penetration depth
pub depth: N,
}
impl<N: RealField> Contact<N> {
/// Creates a new contact.
#[inline]
pub fn new(world1: Point<N>, world2: Point<N>, normal: Unit<Vector<N>>, depth: N) -> Self {
Contact {
world1,
world2,
normal,
depth,
}
}
/// Creates a new contact, computing automatically the penetration depth.
#[inline]
pub fn
|
(world1: Point<N>, world2: Point<N>, normal: Unit<Vector<N>>) -> Contact<N> {
let depth = -normal.dot(&(world2 - world1));
Self::new(world1, world2, normal, depth)
}
}
impl<N: RealField> Contact<N> {
/// Reverts the contact normal and swaps `world1` and `world2`.
#[inline]
pub fn flip(&mut self) {
mem::swap(&mut self.world1, &mut self.world2);
self.normal = -self.normal;
}
}
/// A contact combined with contact kinematic information as well as a persistent identifier.
///
/// When ncollide is used to compute contact points between moving solids, it will attempt to
/// match contact points found at successive frames. Two contact points are said to "match" if
/// they can be seen as the same contact point that moved in-between frames. Two matching
/// contact points are given the same `id` here.
#[derive(Copy, Clone, Debug)]
pub struct TrackedContact<N: RealField> {
/// The geometric contact information.
pub contact: Contact<N>,
/// The local contact kinematic.
pub kinematic: ContactKinematic<N>,
/// The identifier of this contact.
pub id: ContactId,
}
impl<N: RealField> TrackedContact<N> {
/// Creates a new tracked contact.
pub fn new(contact: Contact<N>, kinematic: ContactKinematic<N>) -> Self {
TrackedContact {
contact,
kinematic,
id: ContactId::null(),
}
}
}
/// The prediction parameters for contact determination.
#[derive(Clone, Debug, PartialEq)]
pub struct ContactPrediction<N: RealField> {
linear: N,
angular1: N,
angular2: N,
cos_angular1: N,
cos_angular2: N,
sin_angular1: N,
sin_angular2: N,
}
impl<N: RealField> ContactPrediction<N> {
/// Initialize prediction parameters.
pub fn new(linear: N, angular1: N, angular2: N) -> Self {
ContactPrediction {
linear,
angular1,
angular2,
cos_angular1: angular1.cos(),
cos_angular2: angular2.cos(),
sin_angular1: angular1.sin(),
sin_angular2: angular2.sin(),
}
}
/// The linear prediction.
#[inline]
pub fn linear(&self) -> N {
self.linear
}
/// Sets linear prediction.
#[inline]
pub fn set_linear(&mut self, val: N) {
self.linear = val
}
/// The angular regularization for the first solid.
#[inline]
pub fn angular1(&self) -> N {
self.angular1
}
/// The angular regularization for the second solid.
#[inline]
pub fn angular2(&self) -> N {
self.angular2
}
/// The cosine of angular regularization for the first solid.
#[inline]
pub fn cos_angular1(&self) -> N {
self.cos_angular1
}
/// The cosine angular regularization for the second solid.
#[inline]
pub fn cos_angular2(&self) -> N {
self.cos_angular2
}
/// The sine of angular regularization for the first solid.
#[inline]
pub fn sin_angular1(&self) -> N {
self.sin_angular1
}
/// The sine angular regularization for the second solid.
#[inline]
pub fn sin_angular2(&self) -> N {
self.sin_angular2
}
}
|
new_wo_depth
|
identifier_name
|
contact.rs
|
use crate::math::{Point, Vector};
use crate::query::ContactKinematic;
use na::{self, RealField, Unit};
use slotmap::Key;
use std::mem;
slotmap::new_key_type! {
/// A contact identifier which is unique within a contact manifold.
pub struct ContactId;
}
/// Geometric description of a contact.
#[derive(Debug, PartialEq, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct Contact<N: RealField> {
/// Position of the contact on the first object. The position is expressed in world space.
pub world1: Point<N>,
/// Position of the contact on the second object. The position is expressed in world space.
pub world2: Point<N>,
/// Contact normal
pub normal: Unit<Vector<N>>,
/// Penetration depth
pub depth: N,
}
impl<N: RealField> Contact<N> {
/// Creates a new contact.
#[inline]
pub fn new(world1: Point<N>, world2: Point<N>, normal: Unit<Vector<N>>, depth: N) -> Self {
Contact {
world1,
world2,
normal,
depth,
}
}
/// Creates a new contact, computing automatically the penetration depth.
#[inline]
pub fn new_wo_depth(world1: Point<N>, world2: Point<N>, normal: Unit<Vector<N>>) -> Contact<N> {
let depth = -normal.dot(&(world2 - world1));
Self::new(world1, world2, normal, depth)
}
}
impl<N: RealField> Contact<N> {
/// Reverts the contact normal and swaps `world1` and `world2`.
#[inline]
pub fn flip(&mut self) {
mem::swap(&mut self.world1, &mut self.world2);
self.normal = -self.normal;
}
}
/// A contact combined with contact kinematic information as well as a persistent identifier.
///
/// When ncollide is used to compute contact points between moving solids, it will attempt to
/// match contact points found at successive frames. Two contact points are said to "match" if
/// they can be seen as the same contact point that moved in-between frames. Two matching
/// contact points are given the same `id` here.
#[derive(Copy, Clone, Debug)]
pub struct TrackedContact<N: RealField> {
/// The geometric contact information.
pub contact: Contact<N>,
/// The local contact kinematic.
pub kinematic: ContactKinematic<N>,
/// The identifier of this contact.
pub id: ContactId,
}
impl<N: RealField> TrackedContact<N> {
/// Creates a new tracked contact.
pub fn new(contact: Contact<N>, kinematic: ContactKinematic<N>) -> Self {
TrackedContact {
contact,
kinematic,
id: ContactId::null(),
}
}
}
/// The prediction parameters for contact determination.
#[derive(Clone, Debug, PartialEq)]
pub struct ContactPrediction<N: RealField> {
linear: N,
angular1: N,
angular2: N,
cos_angular1: N,
cos_angular2: N,
sin_angular1: N,
sin_angular2: N,
}
impl<N: RealField> ContactPrediction<N> {
/// Initialize prediction parameters.
pub fn new(linear: N, angular1: N, angular2: N) -> Self {
ContactPrediction {
linear,
angular1,
angular2,
cos_angular1: angular1.cos(),
cos_angular2: angular2.cos(),
sin_angular1: angular1.sin(),
|
sin_angular2: angular2.sin(),
}
}
/// The linear prediction.
#[inline]
pub fn linear(&self) -> N {
self.linear
}
/// Sets linear prediction.
#[inline]
pub fn set_linear(&mut self, val: N) {
self.linear = val
}
/// The angular regularization for the first solid.
#[inline]
pub fn angular1(&self) -> N {
self.angular1
}
/// The angular regularization for the second solid.
#[inline]
pub fn angular2(&self) -> N {
self.angular2
}
/// The cosine of angular regularization for the first solid.
#[inline]
pub fn cos_angular1(&self) -> N {
self.cos_angular1
}
/// The cosine angular regularization for the second solid.
#[inline]
pub fn cos_angular2(&self) -> N {
self.cos_angular2
}
/// The sine of angular regularization for the first solid.
#[inline]
pub fn sin_angular1(&self) -> N {
self.sin_angular1
}
/// The sine angular regularization for the second solid.
#[inline]
pub fn sin_angular2(&self) -> N {
self.sin_angular2
}
}
|
random_line_split
|
|
entities.rs
|
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
//! Data structures containing extracted URL, mention, tag, and media information.
//!
//! These structures are meant to be received in an API call to describe the data they accompany.
//! For example, a `UrlEntity` describes a hyperlink in a tweet or user description text, and a
//! `HashtagEntity` describes a hashtag or stock symbol extracted from a tweet.
//!
//! For more information on the data in these structures, see Twitter's documentation for
//! [Entities][] and [Entities in Objects][obj].
//!
//! [Entities]: https://dev.twitter.com/overview/api/entities
//! [obj]: https://dev.twitter.com/overview/api/entities-in-twitter-objects
//!
//! ## Entity Ranges
//!
//! Entities that refer to elements within a text have a `range` field that contains the text span
//! that is being referenced. The numbers in question are byte offsets, so if you have an entity
//! that you'd like to slice out of the source text, you can use the indices directly in slicing
//! operations:
//!
//! ```rust
//! # use egg_mode::entities::HashtagEntity;
//! # let entity = HashtagEntity { range: (0, 0), text: "".to_string() };
//! # let text = "asdf";
//! let slice = &text[entity.range.0..entity.range.1];
//! ```
//!
//! ### Shortened, Display, and Expanded URLs
//!
//! URL and Media entities contain references to a URL within their parent text. However, due to
//! the nature of how Twitter handles URLs in tweets and user bios, each entity struct has three
//! URLs within it:
//!
//! - `url`: This is the `t.co` shortened URL as returned directly from twitter. This is what
//! contributes to character count in tweets and user bios.
//! - `expanded_url`: This is the original URL the user entered in their tweet. While it is given
//! to API client, Twitter recommends still sending users to the shortened link, for analytics
//! purposes. Twitter Web uses this field to supply hover-text for where the URL resolves to.
//! - `display_url`: This is a truncated version of `expanded_url`, meant to be displayed inline
//! with the parent text. This is useful to show users where the link resolves to, without
//! potentially filling up a lot of space with the fullly expanded URL.
use mime;
use serde::{Deserialize, Deserializer};
use crate::common::deserialize_mime;
///Represents a hashtag or symbol extracted from another piece of text.
#[derive(Debug, Clone, Deserialize)]
pub struct HashtagEntity {
///The byte offsets where the hashtag is located. The first index is the location of the # or $
///character; the second is the location of the first character following the hashtag.
#[serde(rename = "indices")]
pub range: (usize, usize),
///The text of the hashtag, without the leading # or $ character.
pub text: String,
}
///Represents a piece of media attached to a tweet.
///
///The information in this struct is subtly different depending on what media is being referenced,
///and which entity container is holding this instance. For videos and GIFs, the `media_url` and
///`media_url_https` fields each link to a thumbnail image of the media, typically of the first
///frame. The real video information can be found on the `video_info` field, including various
///encodings if available.
///
///Image links available in `media_url` and `media_url_https` can be obtained in different sizes by
///appending a colon and one of the available sizes in the `MediaSizes` struct. For example, the
///cropped thumbnail can be viewed by appending `:thumb` to the end of the URL, and the full-size
///image can be viewed by appending `:large`.
#[derive(Debug, Clone, Deserialize)]
pub struct MediaEntity {
///A shortened URL to display to clients.
pub display_url: String,
///An expanded version of `display_url`; links to the media display page.
pub expanded_url: String,
///A numeric ID for the media.
pub id: u64,
///The byte offsets where the media URL is located. The first index is the location of the
///first character of the URL; the second is the location of the first character following the
///URL.
#[serde(rename = "indices")]
pub range: (usize, usize),
///A URL pointing directly to the media file. Uses HTTP as the protocol.
///
///For videos and GIFs, this link will be to a thumbnail of the media, and the real video link
///will be contained in `video_info`.
pub media_url: String,
///A URL pointing directly to the media file. Uses HTTPS as the protocol.
///
///For videos and GIFs, this link will be to a thumbnail of the media, and the real video link
///will be contained in `video_info`.
pub media_url_https: String,
///Various sizes available for the media file.
pub sizes: MediaSizes,
///For tweets containing media that was originally associated with a different tweet, this
///contains the ID of the original tweet.
pub source_status_id: Option<u64>,
///The type of media being represented.
#[serde(rename = "type")]
pub media_type: MediaType,
///The t.co link from the original text.
pub url: String,
///For media entities corresponding to videos, this contains extra information about the linked
///video.
pub video_info: Option<VideoInfo>,
///Media alt text, if present.
pub ext_alt_text: Option<String>,
}
///Represents the types of media that can be attached to a tweet.
#[derive(Debug, Copy, Clone, Deserialize)]
pub enum MediaType {
///A static image.
#[serde(rename = "photo")]
Photo,
///A video.
#[serde(rename = "video")]
Video,
///An animated GIF, delivered as a video without audio.
#[serde(rename = "animated_gif")]
Gif,
}
///Represents the available sizes for a media file.
#[derive(Debug, Copy, Clone, Deserialize)]
pub struct MediaSizes {
///Information for a thumbnail-sized version of the media.
pub thumb: MediaSize,
///Information for a small-sized version of the media.
pub small: MediaSize,
///Information for a medium-sized version of the media.
pub medium: MediaSize,
///Information for a large-sized version of the media.
pub large: MediaSize,
}
///Represents how an image has been resized for a given size variant.
#[derive(Debug, Copy, Clone, Deserialize)]
pub enum ResizeMode {
///The media was resized to fit one dimension, keeping its aspect ratio.
#[serde(rename = "fit")]
Fit,
///The media was cropped to fit a specific resolution.
#[serde(rename = "crop")]
Crop,
}
///Represents the dimensions of a media file.
#[derive(Debug, Copy, Clone, Deserialize)]
pub struct MediaSize {
///The size variant's width in pixels.
pub w: i32,
///The size variant's height in pixels.
pub h: i32,
///The method used to obtain the given dimensions.
pub resize: ResizeMode,
}
///Represents metadata specific to videos.
#[derive(Debug, Clone, Deserialize)]
pub struct VideoInfo {
///The aspect ratio of the video.
pub aspect_ratio: (i32, i32),
///The duration of the video, in milliseconds.
///
///This field is not given for animated GIFs.
pub duration_millis: Option<i32>,
///Information about various encodings available for the video.
pub variants: Vec<VideoVariant>,
}
///Represents information about a specific encoding of a video.
#[derive(Debug, Clone, Deserialize)]
pub struct VideoVariant {
///The bitrate of the video. This value is present for GIFs, but it will be zero.
pub bitrate: Option<i32>,
///The file format of the video variant.
#[serde(deserialize_with = "deserialize_mime")]
pub content_type: mime::Mime,
///The URL for the video variant.
pub url: String,
}
///Represents a link extracted from another piece of text.
#[derive(Debug, Clone, Deserialize)]
pub struct UrlEntity {
///A truncated URL meant to be displayed inline with the text.
#[serde(default)]
pub display_url: String,
///The URL that the t.co URL resolves to.
///
///Meant to be used as hover-text when a user mouses over a link.
#[serde(default)]
pub expanded_url: Option<String>,
///The byte offsets in the companion text where the URL was extracted from.
#[serde(rename = "indices")]
pub range: (usize, usize),
///The t.co URL extracted from the companion text.
pub url: String,
}
///Represnts a user mention extracted from another piece of text.
#[derive(Debug, Clone, Deserialize)]
pub struct MentionEntity {
///Numeric ID of the mentioned user.
#[serde(deserialize_with = "nullable_id")] // Very rarely this field is null
pub id: u64,
///The byte offsets where the user mention is located in the original text. The first index is
///the location of the @ symbol; the second is the location of the first character following
///the user screen name.
#[serde(rename = "indices")]
pub range: (usize, usize),
///Display name of the mentioned user.
#[serde(deserialize_with = "nullable_str")] // Very rarely, this field is null
pub name: String,
///Screen name of the mentioned user, without the leading @ symbol.
pub screen_name: String,
}
fn nullable_id<'de, D>(deserializer: D) -> Result<u64, D::Error>
where
D: Deserializer<'de>,
{
let opt = Option::deserialize(deserializer)?;
Ok(opt.unwrap_or_default())
}
fn
|
<'de, D>(deserializer: D) -> Result<String, D::Error>
where
D: Deserializer<'de>,
{
let opt = Option::deserialize(deserializer)?;
Ok(opt.unwrap_or_default())
}
|
nullable_str
|
identifier_name
|
entities.rs
|
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
//! Data structures containing extracted URL, mention, tag, and media information.
//!
//! These structures are meant to be received in an API call to describe the data they accompany.
//! For example, a `UrlEntity` describes a hyperlink in a tweet or user description text, and a
//! `HashtagEntity` describes a hashtag or stock symbol extracted from a tweet.
//!
//! For more information on the data in these structures, see Twitter's documentation for
//! [Entities][] and [Entities in Objects][obj].
//!
//! [Entities]: https://dev.twitter.com/overview/api/entities
//! [obj]: https://dev.twitter.com/overview/api/entities-in-twitter-objects
//!
//! ## Entity Ranges
//!
//! Entities that refer to elements within a text have a `range` field that contains the text span
//! that is being referenced. The numbers in question are byte offsets, so if you have an entity
//! that you'd like to slice out of the source text, you can use the indices directly in slicing
//! operations:
//!
//! ```rust
//! # use egg_mode::entities::HashtagEntity;
//! # let entity = HashtagEntity { range: (0, 0), text: "".to_string() };
//! # let text = "asdf";
//! let slice = &text[entity.range.0..entity.range.1];
//! ```
//!
//! ### Shortened, Display, and Expanded URLs
//!
//! URL and Media entities contain references to a URL within their parent text. However, due to
//! the nature of how Twitter handles URLs in tweets and user bios, each entity struct has three
//! URLs within it:
//!
//! - `url`: This is the `t.co` shortened URL as returned directly from twitter. This is what
//! contributes to character count in tweets and user bios.
//! - `expanded_url`: This is the original URL the user entered in their tweet. While it is given
//! to API client, Twitter recommends still sending users to the shortened link, for analytics
//! purposes. Twitter Web uses this field to supply hover-text for where the URL resolves to.
//! - `display_url`: This is a truncated version of `expanded_url`, meant to be displayed inline
//! with the parent text. This is useful to show users where the link resolves to, without
//! potentially filling up a lot of space with the fullly expanded URL.
use mime;
use serde::{Deserialize, Deserializer};
use crate::common::deserialize_mime;
///Represents a hashtag or symbol extracted from another piece of text.
#[derive(Debug, Clone, Deserialize)]
pub struct HashtagEntity {
///The byte offsets where the hashtag is located. The first index is the location of the # or $
///character; the second is the location of the first character following the hashtag.
#[serde(rename = "indices")]
pub range: (usize, usize),
///The text of the hashtag, without the leading # or $ character.
pub text: String,
}
///Represents a piece of media attached to a tweet.
///
///The information in this struct is subtly different depending on what media is being referenced,
///and which entity container is holding this instance. For videos and GIFs, the `media_url` and
///`media_url_https` fields each link to a thumbnail image of the media, typically of the first
///frame. The real video information can be found on the `video_info` field, including various
///encodings if available.
///
///Image links available in `media_url` and `media_url_https` can be obtained in different sizes by
///appending a colon and one of the available sizes in the `MediaSizes` struct. For example, the
///cropped thumbnail can be viewed by appending `:thumb` to the end of the URL, and the full-size
///image can be viewed by appending `:large`.
#[derive(Debug, Clone, Deserialize)]
pub struct MediaEntity {
///A shortened URL to display to clients.
pub display_url: String,
///An expanded version of `display_url`; links to the media display page.
pub expanded_url: String,
///A numeric ID for the media.
pub id: u64,
///The byte offsets where the media URL is located. The first index is the location of the
///first character of the URL; the second is the location of the first character following the
///URL.
#[serde(rename = "indices")]
pub range: (usize, usize),
///A URL pointing directly to the media file. Uses HTTP as the protocol.
///
///For videos and GIFs, this link will be to a thumbnail of the media, and the real video link
///will be contained in `video_info`.
pub media_url: String,
///A URL pointing directly to the media file. Uses HTTPS as the protocol.
///
///For videos and GIFs, this link will be to a thumbnail of the media, and the real video link
///will be contained in `video_info`.
pub media_url_https: String,
///Various sizes available for the media file.
pub sizes: MediaSizes,
///For tweets containing media that was originally associated with a different tweet, this
///contains the ID of the original tweet.
|
pub media_type: MediaType,
///The t.co link from the original text.
pub url: String,
///For media entities corresponding to videos, this contains extra information about the linked
///video.
pub video_info: Option<VideoInfo>,
///Media alt text, if present.
pub ext_alt_text: Option<String>,
}
///Represents the types of media that can be attached to a tweet.
#[derive(Debug, Copy, Clone, Deserialize)]
pub enum MediaType {
///A static image.
#[serde(rename = "photo")]
Photo,
///A video.
#[serde(rename = "video")]
Video,
///An animated GIF, delivered as a video without audio.
#[serde(rename = "animated_gif")]
Gif,
}
///Represents the available sizes for a media file.
#[derive(Debug, Copy, Clone, Deserialize)]
pub struct MediaSizes {
///Information for a thumbnail-sized version of the media.
pub thumb: MediaSize,
///Information for a small-sized version of the media.
pub small: MediaSize,
///Information for a medium-sized version of the media.
pub medium: MediaSize,
///Information for a large-sized version of the media.
pub large: MediaSize,
}
///Represents how an image has been resized for a given size variant.
#[derive(Debug, Copy, Clone, Deserialize)]
pub enum ResizeMode {
///The media was resized to fit one dimension, keeping its aspect ratio.
#[serde(rename = "fit")]
Fit,
///The media was cropped to fit a specific resolution.
#[serde(rename = "crop")]
Crop,
}
///Represents the dimensions of a media file.
#[derive(Debug, Copy, Clone, Deserialize)]
pub struct MediaSize {
///The size variant's width in pixels.
pub w: i32,
///The size variant's height in pixels.
pub h: i32,
///The method used to obtain the given dimensions.
pub resize: ResizeMode,
}
///Represents metadata specific to videos.
#[derive(Debug, Clone, Deserialize)]
pub struct VideoInfo {
///The aspect ratio of the video.
pub aspect_ratio: (i32, i32),
///The duration of the video, in milliseconds.
///
///This field is not given for animated GIFs.
pub duration_millis: Option<i32>,
///Information about various encodings available for the video.
pub variants: Vec<VideoVariant>,
}
///Represents information about a specific encoding of a video.
#[derive(Debug, Clone, Deserialize)]
pub struct VideoVariant {
///The bitrate of the video. This value is present for GIFs, but it will be zero.
pub bitrate: Option<i32>,
///The file format of the video variant.
#[serde(deserialize_with = "deserialize_mime")]
pub content_type: mime::Mime,
///The URL for the video variant.
pub url: String,
}
///Represents a link extracted from another piece of text.
#[derive(Debug, Clone, Deserialize)]
pub struct UrlEntity {
///A truncated URL meant to be displayed inline with the text.
#[serde(default)]
pub display_url: String,
///The URL that the t.co URL resolves to.
///
///Meant to be used as hover-text when a user mouses over a link.
#[serde(default)]
pub expanded_url: Option<String>,
///The byte offsets in the companion text where the URL was extracted from.
#[serde(rename = "indices")]
pub range: (usize, usize),
///The t.co URL extracted from the companion text.
pub url: String,
}
///Represnts a user mention extracted from another piece of text.
#[derive(Debug, Clone, Deserialize)]
pub struct MentionEntity {
///Numeric ID of the mentioned user.
#[serde(deserialize_with = "nullable_id")] // Very rarely this field is null
pub id: u64,
///The byte offsets where the user mention is located in the original text. The first index is
///the location of the @ symbol; the second is the location of the first character following
///the user screen name.
#[serde(rename = "indices")]
pub range: (usize, usize),
///Display name of the mentioned user.
#[serde(deserialize_with = "nullable_str")] // Very rarely, this field is null
pub name: String,
///Screen name of the mentioned user, without the leading @ symbol.
pub screen_name: String,
}
fn nullable_id<'de, D>(deserializer: D) -> Result<u64, D::Error>
where
D: Deserializer<'de>,
{
let opt = Option::deserialize(deserializer)?;
Ok(opt.unwrap_or_default())
}
fn nullable_str<'de, D>(deserializer: D) -> Result<String, D::Error>
where
D: Deserializer<'de>,
{
let opt = Option::deserialize(deserializer)?;
Ok(opt.unwrap_or_default())
}
|
pub source_status_id: Option<u64>,
///The type of media being represented.
#[serde(rename = "type")]
|
random_line_split
|
dispatcher.rs
|
use frame::Frame;
use genet_abi::{
context::Context,
decoder::{DecoderBox, ExecType, Metadata, WorkerBox},
fixed::MutFixed,
layer::{Layer, Parent},
};
use profile::Profile;
pub struct Dispatcher {
runners: Vec<Runner>,
}
impl Dispatcher {
pub fn new(typ: &ExecType, profile: &Profile) -> Dispatcher {
let runners = profile
.decoders()
.map(|d| Runner::new(typ, profile.context(), *d))
.collect();
Dispatcher { runners }
}
|
}
pub fn process_frame(&mut self, frame: &mut Frame) {
let mut indices = frame.fetch_tree_indices();
let mut layers = frame.fetch_layers();
let mut offset = 0;
let mut runners = self.runners();
loop {
let len = layers.len() - offset;
for index in offset..layers.len() {
if let Some(n) = indices.get(index) {
if *n > 0 {
continue;
}
}
let mut children = 0;
loop {
let mut executed = 0;
for mut r in &mut runners.iter_mut() {
let mut layer =
Parent::from_mut_ref(unsafe { &mut *layers[index].as_mut_ptr() });
let done = r.execute(&layers, &mut layer);
if done {
executed += 1;
}
let mut results: Vec<MutFixed<Layer>> = layer
.children()
.iter()
.map(|v| unsafe { MutFixed::from_ptr(*v) })
.collect();
children += results.len();
layers.append(&mut results);
}
if executed == 0 {
break;
}
}
indices.push(children as u8);
}
offset += len;
if offset >= layers.len() {
break;
}
}
frame.set_layers(layers);
frame.set_tree_indices(indices);
}
}
struct Runner {
ctx: Context,
typ: ExecType,
decoder: DecoderBox,
metadata: Metadata,
worker: Option<WorkerBox>,
}
impl Runner {
fn new(typ: &ExecType, ctx: Context, decoder: DecoderBox) -> Runner {
let mut runner = Runner {
ctx,
typ: typ.clone(),
decoder,
metadata: decoder.metadata(),
worker: None,
};
runner.reset();
runner
}
fn execute(&mut self, layers: &[MutFixed<Layer>], layer: &mut Parent) -> bool {
if let Some(worker) = &mut self.worker {
match worker.decode(&mut self.ctx, layers, layer) {
Ok(done) => done,
Err(_) => true,
}
} else {
true
}
}
fn reset(&mut self) {
self.worker = if self.metadata.exec_type == self.typ {
Some(self.decoder.new_worker(&self.ctx))
} else {
None
}
}
}
pub struct OnceRunner<'a> {
runner: &'a mut Runner,
used: bool,
}
impl<'a> OnceRunner<'a> {
fn new(runner: &'a mut Runner) -> OnceRunner {
OnceRunner {
runner,
used: false,
}
}
fn execute(&mut self, layers: &[MutFixed<Layer>], layer: &mut Parent) -> bool {
if!self.used {
let done = self.runner.execute(layers, layer);
if done {
self.used = true;
}
done
} else {
false
}
}
}
|
pub fn runners(&mut self) -> Vec<OnceRunner> {
self.runners
.iter_mut()
.map(|r| OnceRunner::new(r))
.collect()
|
random_line_split
|
dispatcher.rs
|
use frame::Frame;
use genet_abi::{
context::Context,
decoder::{DecoderBox, ExecType, Metadata, WorkerBox},
fixed::MutFixed,
layer::{Layer, Parent},
};
use profile::Profile;
pub struct Dispatcher {
runners: Vec<Runner>,
}
impl Dispatcher {
pub fn new(typ: &ExecType, profile: &Profile) -> Dispatcher {
let runners = profile
.decoders()
.map(|d| Runner::new(typ, profile.context(), *d))
.collect();
Dispatcher { runners }
}
pub fn runners(&mut self) -> Vec<OnceRunner> {
self.runners
.iter_mut()
.map(|r| OnceRunner::new(r))
.collect()
}
pub fn process_frame(&mut self, frame: &mut Frame) {
let mut indices = frame.fetch_tree_indices();
let mut layers = frame.fetch_layers();
let mut offset = 0;
let mut runners = self.runners();
loop {
let len = layers.len() - offset;
for index in offset..layers.len() {
if let Some(n) = indices.get(index) {
if *n > 0 {
continue;
}
}
let mut children = 0;
loop {
let mut executed = 0;
for mut r in &mut runners.iter_mut() {
let mut layer =
Parent::from_mut_ref(unsafe { &mut *layers[index].as_mut_ptr() });
let done = r.execute(&layers, &mut layer);
if done {
executed += 1;
}
let mut results: Vec<MutFixed<Layer>> = layer
.children()
.iter()
.map(|v| unsafe { MutFixed::from_ptr(*v) })
.collect();
children += results.len();
layers.append(&mut results);
}
if executed == 0 {
break;
}
}
indices.push(children as u8);
}
offset += len;
if offset >= layers.len() {
break;
}
}
frame.set_layers(layers);
frame.set_tree_indices(indices);
}
}
struct Runner {
ctx: Context,
typ: ExecType,
decoder: DecoderBox,
metadata: Metadata,
worker: Option<WorkerBox>,
}
impl Runner {
fn new(typ: &ExecType, ctx: Context, decoder: DecoderBox) -> Runner {
let mut runner = Runner {
ctx,
typ: typ.clone(),
decoder,
metadata: decoder.metadata(),
worker: None,
};
runner.reset();
runner
}
fn execute(&mut self, layers: &[MutFixed<Layer>], layer: &mut Parent) -> bool {
if let Some(worker) = &mut self.worker {
match worker.decode(&mut self.ctx, layers, layer) {
Ok(done) => done,
Err(_) => true,
}
} else {
true
}
}
fn reset(&mut self) {
self.worker = if self.metadata.exec_type == self.typ
|
else {
None
}
}
}
pub struct OnceRunner<'a> {
runner: &'a mut Runner,
used: bool,
}
impl<'a> OnceRunner<'a> {
fn new(runner: &'a mut Runner) -> OnceRunner {
OnceRunner {
runner,
used: false,
}
}
fn execute(&mut self, layers: &[MutFixed<Layer>], layer: &mut Parent) -> bool {
if!self.used {
let done = self.runner.execute(layers, layer);
if done {
self.used = true;
}
done
} else {
false
}
}
}
|
{
Some(self.decoder.new_worker(&self.ctx))
}
|
conditional_block
|
dispatcher.rs
|
use frame::Frame;
use genet_abi::{
context::Context,
decoder::{DecoderBox, ExecType, Metadata, WorkerBox},
fixed::MutFixed,
layer::{Layer, Parent},
};
use profile::Profile;
pub struct Dispatcher {
runners: Vec<Runner>,
}
impl Dispatcher {
pub fn new(typ: &ExecType, profile: &Profile) -> Dispatcher {
let runners = profile
.decoders()
.map(|d| Runner::new(typ, profile.context(), *d))
.collect();
Dispatcher { runners }
}
pub fn runners(&mut self) -> Vec<OnceRunner> {
self.runners
.iter_mut()
.map(|r| OnceRunner::new(r))
.collect()
}
pub fn process_frame(&mut self, frame: &mut Frame) {
let mut indices = frame.fetch_tree_indices();
let mut layers = frame.fetch_layers();
let mut offset = 0;
let mut runners = self.runners();
loop {
let len = layers.len() - offset;
for index in offset..layers.len() {
if let Some(n) = indices.get(index) {
if *n > 0 {
continue;
}
}
let mut children = 0;
loop {
let mut executed = 0;
for mut r in &mut runners.iter_mut() {
let mut layer =
Parent::from_mut_ref(unsafe { &mut *layers[index].as_mut_ptr() });
let done = r.execute(&layers, &mut layer);
if done {
executed += 1;
}
let mut results: Vec<MutFixed<Layer>> = layer
.children()
.iter()
.map(|v| unsafe { MutFixed::from_ptr(*v) })
.collect();
children += results.len();
layers.append(&mut results);
}
if executed == 0 {
break;
}
}
indices.push(children as u8);
}
offset += len;
if offset >= layers.len() {
break;
}
}
frame.set_layers(layers);
frame.set_tree_indices(indices);
}
}
struct Runner {
ctx: Context,
typ: ExecType,
decoder: DecoderBox,
metadata: Metadata,
worker: Option<WorkerBox>,
}
impl Runner {
fn new(typ: &ExecType, ctx: Context, decoder: DecoderBox) -> Runner {
let mut runner = Runner {
ctx,
typ: typ.clone(),
decoder,
metadata: decoder.metadata(),
worker: None,
};
runner.reset();
runner
}
fn execute(&mut self, layers: &[MutFixed<Layer>], layer: &mut Parent) -> bool {
if let Some(worker) = &mut self.worker {
match worker.decode(&mut self.ctx, layers, layer) {
Ok(done) => done,
Err(_) => true,
}
} else {
true
}
}
fn
|
(&mut self) {
self.worker = if self.metadata.exec_type == self.typ {
Some(self.decoder.new_worker(&self.ctx))
} else {
None
}
}
}
pub struct OnceRunner<'a> {
runner: &'a mut Runner,
used: bool,
}
impl<'a> OnceRunner<'a> {
fn new(runner: &'a mut Runner) -> OnceRunner {
OnceRunner {
runner,
used: false,
}
}
fn execute(&mut self, layers: &[MutFixed<Layer>], layer: &mut Parent) -> bool {
if!self.used {
let done = self.runner.execute(layers, layer);
if done {
self.used = true;
}
done
} else {
false
}
}
}
|
reset
|
identifier_name
|
write_test.rs
|
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use super::*;
use crate::test_helpers::{
arb_hash_batch, arb_list_of_hash_batches, test_append_empty_impl, test_append_many_impl,
MockHashStore, TestAccumulator,
};
use diem_crypto::hash::ACCUMULATOR_PLACEHOLDER_HASH;
use diem_types::proof::definition::LeafCount;
#[test]
fn test_append_empty_on_empty() {
let store = MockHashStore::new();
assert_eq!(
TestAccumulator::append(&store, 0, &[]).unwrap(),
(*ACCUMULATOR_PLACEHOLDER_HASH, Vec::new())
);
}
#[test]
fn test_append_one() {
let mut store = MockHashStore::new();
store.verify(&[]).unwrap();
let mut leaves = Vec::new();
for v in 0..100 {
let hash = HashValue::random();
let (root_hash, writes) =
TestAccumulator::append(&store, leaves.len() as LeafCount, &[hash]).unwrap();
store.put_many(&writes);
leaves.push(hash);
let expected_root_hash = store.verify(&leaves).unwrap();
assert_eq!(root_hash, expected_root_hash);
assert_eq!(
TestAccumulator::get_root_hash(&store, v + 1).unwrap(),
expected_root_hash
);
}
}
proptest! {
#![proptest_config(ProptestConfig::with_cases(10))]
|
fn test_append_many(batches in arb_list_of_hash_batches(10, 10)) {
test_append_many_impl(batches);
}
#[test]
fn test_append_empty(leaves in arb_hash_batch(100)) {
test_append_empty_impl(leaves)
}
}
|
#[test]
|
random_line_split
|
write_test.rs
|
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use super::*;
use crate::test_helpers::{
arb_hash_batch, arb_list_of_hash_batches, test_append_empty_impl, test_append_many_impl,
MockHashStore, TestAccumulator,
};
use diem_crypto::hash::ACCUMULATOR_PLACEHOLDER_HASH;
use diem_types::proof::definition::LeafCount;
#[test]
fn test_append_empty_on_empty()
|
#[test]
fn test_append_one() {
let mut store = MockHashStore::new();
store.verify(&[]).unwrap();
let mut leaves = Vec::new();
for v in 0..100 {
let hash = HashValue::random();
let (root_hash, writes) =
TestAccumulator::append(&store, leaves.len() as LeafCount, &[hash]).unwrap();
store.put_many(&writes);
leaves.push(hash);
let expected_root_hash = store.verify(&leaves).unwrap();
assert_eq!(root_hash, expected_root_hash);
assert_eq!(
TestAccumulator::get_root_hash(&store, v + 1).unwrap(),
expected_root_hash
);
}
}
proptest! {
#![proptest_config(ProptestConfig::with_cases(10))]
#[test]
fn test_append_many(batches in arb_list_of_hash_batches(10, 10)) {
test_append_many_impl(batches);
}
#[test]
fn test_append_empty(leaves in arb_hash_batch(100)) {
test_append_empty_impl(leaves)
}
}
|
{
let store = MockHashStore::new();
assert_eq!(
TestAccumulator::append(&store, 0, &[]).unwrap(),
(*ACCUMULATOR_PLACEHOLDER_HASH, Vec::new())
);
}
|
identifier_body
|
write_test.rs
|
// Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use super::*;
use crate::test_helpers::{
arb_hash_batch, arb_list_of_hash_batches, test_append_empty_impl, test_append_many_impl,
MockHashStore, TestAccumulator,
};
use diem_crypto::hash::ACCUMULATOR_PLACEHOLDER_HASH;
use diem_types::proof::definition::LeafCount;
#[test]
fn test_append_empty_on_empty() {
let store = MockHashStore::new();
assert_eq!(
TestAccumulator::append(&store, 0, &[]).unwrap(),
(*ACCUMULATOR_PLACEHOLDER_HASH, Vec::new())
);
}
#[test]
fn
|
() {
let mut store = MockHashStore::new();
store.verify(&[]).unwrap();
let mut leaves = Vec::new();
for v in 0..100 {
let hash = HashValue::random();
let (root_hash, writes) =
TestAccumulator::append(&store, leaves.len() as LeafCount, &[hash]).unwrap();
store.put_many(&writes);
leaves.push(hash);
let expected_root_hash = store.verify(&leaves).unwrap();
assert_eq!(root_hash, expected_root_hash);
assert_eq!(
TestAccumulator::get_root_hash(&store, v + 1).unwrap(),
expected_root_hash
);
}
}
proptest! {
#![proptest_config(ProptestConfig::with_cases(10))]
#[test]
fn test_append_many(batches in arb_list_of_hash_batches(10, 10)) {
test_append_many_impl(batches);
}
#[test]
fn test_append_empty(leaves in arb_hash_batch(100)) {
test_append_empty_impl(leaves)
}
}
|
test_append_one
|
identifier_name
|
env.rs
|
use std::ops::{Deref, DerefMut};
use std::collections::HashMap;
use std::iter::FromIterator;
use std::cell::RefCell;
use rand::IsaacRng;
use rand::distributions::{Range, Sample};
use common_util::IntType;
use syntax_tree::*;
use eval::types::*;
use eval::eval_functions::eval_expr;
use error::*;
/// The Roller runtime environment. Stores the variable and function namespaces, the function call_stack, and the random number generator.
pub struct RollerEnv {
/// The global namespace for functions
fun_ns: HashMap<Ident, RollerFun>,
/// The global namespace for variables
var_ns: HashMap<Ident, Value>,
/// The callstack for the functions.
/// Stores the temporary variables of the functions.
call_stack: RefCell<Vec<HashMap<Ident, Value>>>,
/// How many function calls can be in the callstack
max_call_depth: usize,
/// The random number generator
rng: RefCell<IsaacRng>,
}
pub enum NameInfo {
Var,
Fun,
Empty,
}
impl RollerEnv {
/// Creates a new empty runtime environment
pub fn new(max_call_depth: usize) -> RollerEnv {
RollerEnv {
fun_ns: HashMap::new(),
var_ns: HashMap::new(),
call_stack: RefCell::new(Vec::new()),
rng: RefCell::new(IsaacRng::new_unseeded()),
max_call_depth: max_call_depth,
}
}
/// Clears the function and variable namespaces.
pub fn clear(&mut self) {
*self = RollerEnv {
fun_ns: HashMap::new(),
var_ns: HashMap::new(),
call_stack: RefCell::new(Vec::new()),
rng: RefCell::new(IsaacRng::new_unseeded()),
max_call_depth: self.max_call_depth,
}
}
#[allow(dead_code)] // TODO: remove when used
pub fn set_max_call_depth(&mut self, new_depth: usize) {
self.max_call_depth = new_depth;
}
/// Sets a variable with name id to value.
/// If there were a variable or function with same name, it will be replaced.
pub fn assign_var(&mut self, id: &Ident, value: Value) {
self.fun_ns.remove(id);
self.var_ns.insert(id.to_owned(), value);
}
/// Declares a function with the name id.
/// If there were a variable or function with same name, it will be replaced.
pub fn declare_function(&mut self, id: &Ident, body: &RollerFun) {
self.var_ns.remove(id);
self.fun_ns.insert(id.to_owned(), body.clone());
}
/// Deletes a function or variable with the given name.
/// Returns the type of the deleted identifier
pub fn delete_id(&mut self, id: &Ident) -> ParseResult<NameInfo> {
match self.var_ns.remove(id) {
Some(_) => Ok(NameInfo::Var),
// no variable found, try to delete a function
None => match self.fun_ns.remove(id) {
Some(_) => Ok(NameInfo::Fun),
None => Err(RollerErr::EvalError(EvalErr::NoIdFound(id.to_owned() ))),
},
}
}
/// Tells if there is a variable, function or nothing with that name.
#[allow(dead_code)] // TODO: remove when used
pub fn
|
(&self, id: Ident) -> NameInfo {
if let Some(_) = self.var_ns.get(&id) {
NameInfo::Var
}
else if let Some(_) = self.fun_ns.get(&id) {
NameInfo::Fun
}
else {
NameInfo::Empty
}
}
/// Returns the value of the variable with the given identifier.
pub fn get_var<'a>(&'a self, id: &Ident) -> ParseResult<Value> {
// check the last element of the call stack
if let Some(ref hm) = self.call_stack.borrow().deref().last() {
if let Some(ref val) = hm.get(id) {
// check if we found the variable in the stack as a function argument
return Ok((*val).clone());
}
}
// if we didn't find the variable from the stack, check the global space
if let Some(ref val) = self.var_ns.get(id) {
return Ok((*val).clone());
}
// didn't find the variable from either of the namespaces
Err(RollerErr::EvalError(EvalErr::NoVarFound(id.clone() )))
}
/// Calls the function with the given identifier with the given arguments.
/// Returns an error if no such function was found, if the number of parameters was wrong, if the maximum function call depth was reached or if the evaluation of the function's body failed.
/// Calls the eval_expr function to evaluate the function.
pub fn call_fun(&self, id: &Ident, args: Vec<Value>) -> ParseResult<Value> {
match self.fun_ns.get(id) {
Some(ref fun) => {
if self.call_stack.borrow().deref().len() > self.max_call_depth {
return Err(RollerErr::EvalError(EvalErr::ReachedMaxCallDepth));
}
// the fumction's local namespace
let local_ns = try!(Self::ns_from_args(&fun.params, args));
// add the function's local namespace
self.call_stack.borrow_mut().deref_mut().push(local_ns);
// evaluate the function body
let to_return = eval_expr(&fun.body, self);
// remove the call stack namespace. IMPORTANT
self.call_stack.borrow_mut().deref_mut().pop();
// return the output value
to_return
},
None => Err(RollerErr::EvalError(EvalErr::NoFunFound(id.to_owned() ))),
}
}
fn ns_from_args(names: &Vec<Ident>, args: Vec<Value>) -> ParseResult<HashMap<Ident, Value>>
{
// chech whether the lengths match
if names.len()!= args.len() {
return Err(RollerErr::EvalError(
EvalErr::WrongNumParams{expected: names.len(), found: args.len()}
));
}
// ok they do, use iterator magic to add them
Ok(
HashMap::from_iter(
names.iter()
.cloned()
.zip(args.into_iter())
)
)
}
pub fn get_roll(&self, amount: IntType, sides: IntType) -> Vec<IntType> {
let mut distr = Range::new(1, sides+1);
let mut to_return = Vec::with_capacity(amount as usize);
for _ in 1..amount+1 {
to_return.push(distr.sample(&mut *self.rng.borrow_mut()) )
}
to_return
}
}
|
get_name_info
|
identifier_name
|
env.rs
|
use std::ops::{Deref, DerefMut};
use std::collections::HashMap;
use std::iter::FromIterator;
use std::cell::RefCell;
use rand::IsaacRng;
use rand::distributions::{Range, Sample};
use common_util::IntType;
use syntax_tree::*;
use eval::types::*;
use eval::eval_functions::eval_expr;
use error::*;
/// The Roller runtime environment. Stores the variable and function namespaces, the function call_stack, and the random number generator.
pub struct RollerEnv {
/// The global namespace for functions
fun_ns: HashMap<Ident, RollerFun>,
/// The global namespace for variables
var_ns: HashMap<Ident, Value>,
/// The callstack for the functions.
/// Stores the temporary variables of the functions.
call_stack: RefCell<Vec<HashMap<Ident, Value>>>,
/// How many function calls can be in the callstack
max_call_depth: usize,
/// The random number generator
rng: RefCell<IsaacRng>,
}
pub enum NameInfo {
Var,
Fun,
Empty,
}
impl RollerEnv {
/// Creates a new empty runtime environment
pub fn new(max_call_depth: usize) -> RollerEnv {
RollerEnv {
fun_ns: HashMap::new(),
var_ns: HashMap::new(),
call_stack: RefCell::new(Vec::new()),
rng: RefCell::new(IsaacRng::new_unseeded()),
max_call_depth: max_call_depth,
}
}
/// Clears the function and variable namespaces.
pub fn clear(&mut self) {
*self = RollerEnv {
fun_ns: HashMap::new(),
var_ns: HashMap::new(),
call_stack: RefCell::new(Vec::new()),
rng: RefCell::new(IsaacRng::new_unseeded()),
max_call_depth: self.max_call_depth,
}
}
#[allow(dead_code)] // TODO: remove when used
pub fn set_max_call_depth(&mut self, new_depth: usize) {
self.max_call_depth = new_depth;
}
/// Sets a variable with name id to value.
/// If there were a variable or function with same name, it will be replaced.
pub fn assign_var(&mut self, id: &Ident, value: Value) {
self.fun_ns.remove(id);
self.var_ns.insert(id.to_owned(), value);
}
/// Declares a function with the name id.
/// If there were a variable or function with same name, it will be replaced.
pub fn declare_function(&mut self, id: &Ident, body: &RollerFun) {
self.var_ns.remove(id);
self.fun_ns.insert(id.to_owned(), body.clone());
}
/// Deletes a function or variable with the given name.
/// Returns the type of the deleted identifier
pub fn delete_id(&mut self, id: &Ident) -> ParseResult<NameInfo> {
match self.var_ns.remove(id) {
Some(_) => Ok(NameInfo::Var),
// no variable found, try to delete a function
None => match self.fun_ns.remove(id) {
Some(_) => Ok(NameInfo::Fun),
None => Err(RollerErr::EvalError(EvalErr::NoIdFound(id.to_owned() ))),
},
}
}
/// Tells if there is a variable, function or nothing with that name.
#[allow(dead_code)] // TODO: remove when used
pub fn get_name_info(&self, id: Ident) -> NameInfo {
if let Some(_) = self.var_ns.get(&id) {
NameInfo::Var
}
else if let Some(_) = self.fun_ns.get(&id) {
NameInfo::Fun
}
else {
NameInfo::Empty
}
}
/// Returns the value of the variable with the given identifier.
pub fn get_var<'a>(&'a self, id: &Ident) -> ParseResult<Value> {
// check the last element of the call stack
if let Some(ref hm) = self.call_stack.borrow().deref().last() {
if let Some(ref val) = hm.get(id) {
// check if we found the variable in the stack as a function argument
return Ok((*val).clone());
}
}
// if we didn't find the variable from the stack, check the global space
if let Some(ref val) = self.var_ns.get(id) {
return Ok((*val).clone());
}
// didn't find the variable from either of the namespaces
Err(RollerErr::EvalError(EvalErr::NoVarFound(id.clone() )))
}
/// Calls the function with the given identifier with the given arguments.
/// Returns an error if no such function was found, if the number of parameters was wrong, if the maximum function call depth was reached or if the evaluation of the function's body failed.
/// Calls the eval_expr function to evaluate the function.
pub fn call_fun(&self, id: &Ident, args: Vec<Value>) -> ParseResult<Value> {
match self.fun_ns.get(id) {
Some(ref fun) =>
|
,
None => Err(RollerErr::EvalError(EvalErr::NoFunFound(id.to_owned() ))),
}
}
fn ns_from_args(names: &Vec<Ident>, args: Vec<Value>) -> ParseResult<HashMap<Ident, Value>>
{
// chech whether the lengths match
if names.len()!= args.len() {
return Err(RollerErr::EvalError(
EvalErr::WrongNumParams{expected: names.len(), found: args.len()}
));
}
// ok they do, use iterator magic to add them
Ok(
HashMap::from_iter(
names.iter()
.cloned()
.zip(args.into_iter())
)
)
}
pub fn get_roll(&self, amount: IntType, sides: IntType) -> Vec<IntType> {
let mut distr = Range::new(1, sides+1);
let mut to_return = Vec::with_capacity(amount as usize);
for _ in 1..amount+1 {
to_return.push(distr.sample(&mut *self.rng.borrow_mut()) )
}
to_return
}
}
|
{
if self.call_stack.borrow().deref().len() > self.max_call_depth {
return Err(RollerErr::EvalError(EvalErr::ReachedMaxCallDepth));
}
// the fumction's local namespace
let local_ns = try!(Self::ns_from_args(&fun.params, args));
// add the function's local namespace
self.call_stack.borrow_mut().deref_mut().push(local_ns);
// evaluate the function body
let to_return = eval_expr(&fun.body, self);
// remove the call stack namespace. IMPORTANT
self.call_stack.borrow_mut().deref_mut().pop();
// return the output value
to_return
}
|
conditional_block
|
env.rs
|
use std::ops::{Deref, DerefMut};
use std::collections::HashMap;
use std::iter::FromIterator;
use std::cell::RefCell;
use rand::IsaacRng;
use rand::distributions::{Range, Sample};
use common_util::IntType;
use syntax_tree::*;
use eval::types::*;
use eval::eval_functions::eval_expr;
use error::*;
/// The Roller runtime environment. Stores the variable and function namespaces, the function call_stack, and the random number generator.
pub struct RollerEnv {
/// The global namespace for functions
fun_ns: HashMap<Ident, RollerFun>,
/// The global namespace for variables
var_ns: HashMap<Ident, Value>,
/// The callstack for the functions.
/// Stores the temporary variables of the functions.
call_stack: RefCell<Vec<HashMap<Ident, Value>>>,
/// How many function calls can be in the callstack
max_call_depth: usize,
/// The random number generator
rng: RefCell<IsaacRng>,
}
pub enum NameInfo {
Var,
Fun,
Empty,
}
impl RollerEnv {
/// Creates a new empty runtime environment
pub fn new(max_call_depth: usize) -> RollerEnv {
RollerEnv {
fun_ns: HashMap::new(),
var_ns: HashMap::new(),
call_stack: RefCell::new(Vec::new()),
rng: RefCell::new(IsaacRng::new_unseeded()),
max_call_depth: max_call_depth,
}
}
/// Clears the function and variable namespaces.
pub fn clear(&mut self) {
*self = RollerEnv {
fun_ns: HashMap::new(),
var_ns: HashMap::new(),
call_stack: RefCell::new(Vec::new()),
rng: RefCell::new(IsaacRng::new_unseeded()),
max_call_depth: self.max_call_depth,
}
}
#[allow(dead_code)] // TODO: remove when used
pub fn set_max_call_depth(&mut self, new_depth: usize) {
self.max_call_depth = new_depth;
}
/// Sets a variable with name id to value.
/// If there were a variable or function with same name, it will be replaced.
pub fn assign_var(&mut self, id: &Ident, value: Value) {
self.fun_ns.remove(id);
self.var_ns.insert(id.to_owned(), value);
}
/// Declares a function with the name id.
/// If there were a variable or function with same name, it will be replaced.
pub fn declare_function(&mut self, id: &Ident, body: &RollerFun) {
self.var_ns.remove(id);
self.fun_ns.insert(id.to_owned(), body.clone());
}
/// Deletes a function or variable with the given name.
/// Returns the type of the deleted identifier
pub fn delete_id(&mut self, id: &Ident) -> ParseResult<NameInfo> {
match self.var_ns.remove(id) {
Some(_) => Ok(NameInfo::Var),
// no variable found, try to delete a function
None => match self.fun_ns.remove(id) {
Some(_) => Ok(NameInfo::Fun),
None => Err(RollerErr::EvalError(EvalErr::NoIdFound(id.to_owned() ))),
},
}
}
/// Tells if there is a variable, function or nothing with that name.
#[allow(dead_code)] // TODO: remove when used
pub fn get_name_info(&self, id: Ident) -> NameInfo {
if let Some(_) = self.var_ns.get(&id) {
NameInfo::Var
}
else if let Some(_) = self.fun_ns.get(&id) {
NameInfo::Fun
}
else {
NameInfo::Empty
}
}
|
/// Returns the value of the variable with the given identifier.
pub fn get_var<'a>(&'a self, id: &Ident) -> ParseResult<Value> {
// check the last element of the call stack
if let Some(ref hm) = self.call_stack.borrow().deref().last() {
if let Some(ref val) = hm.get(id) {
// check if we found the variable in the stack as a function argument
return Ok((*val).clone());
}
}
// if we didn't find the variable from the stack, check the global space
if let Some(ref val) = self.var_ns.get(id) {
return Ok((*val).clone());
}
// didn't find the variable from either of the namespaces
Err(RollerErr::EvalError(EvalErr::NoVarFound(id.clone() )))
}
/// Calls the function with the given identifier with the given arguments.
/// Returns an error if no such function was found, if the number of parameters was wrong, if the maximum function call depth was reached or if the evaluation of the function's body failed.
/// Calls the eval_expr function to evaluate the function.
pub fn call_fun(&self, id: &Ident, args: Vec<Value>) -> ParseResult<Value> {
match self.fun_ns.get(id) {
Some(ref fun) => {
if self.call_stack.borrow().deref().len() > self.max_call_depth {
return Err(RollerErr::EvalError(EvalErr::ReachedMaxCallDepth));
}
// the fumction's local namespace
let local_ns = try!(Self::ns_from_args(&fun.params, args));
// add the function's local namespace
self.call_stack.borrow_mut().deref_mut().push(local_ns);
// evaluate the function body
let to_return = eval_expr(&fun.body, self);
// remove the call stack namespace. IMPORTANT
self.call_stack.borrow_mut().deref_mut().pop();
// return the output value
to_return
},
None => Err(RollerErr::EvalError(EvalErr::NoFunFound(id.to_owned() ))),
}
}
fn ns_from_args(names: &Vec<Ident>, args: Vec<Value>) -> ParseResult<HashMap<Ident, Value>>
{
// chech whether the lengths match
if names.len()!= args.len() {
return Err(RollerErr::EvalError(
EvalErr::WrongNumParams{expected: names.len(), found: args.len()}
));
}
// ok they do, use iterator magic to add them
Ok(
HashMap::from_iter(
names.iter()
.cloned()
.zip(args.into_iter())
)
)
}
pub fn get_roll(&self, amount: IntType, sides: IntType) -> Vec<IntType> {
let mut distr = Range::new(1, sides+1);
let mut to_return = Vec::with_capacity(amount as usize);
for _ in 1..amount+1 {
to_return.push(distr.sample(&mut *self.rng.borrow_mut()) )
}
to_return
}
}
|
random_line_split
|
|
surface.rs
|
// This is a part of Sonorous.
// Copyright (c) 2005, 2007, 2009, 2012, 2013, 2014, Kang Seonghoon.
// See README.md for details.
//
// Licensed under the Apache License, Version 2.0 <http://www.apache.org/licenses/LICENSE-2.0> or
// the MIT license <http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//! Utilities for SDL surfaces.
use gfx::color::{Color, Gradient, RGB, RGBA, Blend};
pub use sdl::Rect;
pub use sdl::video::Surface;
use sdl::video::ll::SDL_PixelFormat;
/// A trait that can be translated to point coordinates (`x` and `y` fields in `sdl::Rect`,
/// hence the name). Also contains `()`.
pub trait XyOpt {
/// Returns point coordinates if any.
fn xy_opt(&self) -> Option<(i16,i16)>;
}
/// Same as `XyOpt` but does not contain `()`.
pub trait Xy: XyOpt {
/// Returns point coordinates.
fn xy(&self) -> (i16,i16);
}
/// A trait that can be translated to a rectangular area (`w` and `h` fields in `sdl::Rect`,
/// hence the name). Also contains `()`.
pub trait WhOpt {
|
/// Same as `WhOpt` but does not contain `()`.
pub trait Wh {
/// Returns a rectangular area.
fn wh(&self) -> (u16,u16);
}
impl XyOpt for () {
#[inline(always)]
fn xy_opt(&self) -> Option<(i16,i16)> { None }
}
// Rust: we can't define these with `impl<T:Xy> XyOpt for T` due to the ambiguity.
impl XyOpt for Rect {
#[inline(always)]
fn xy_opt(&self) -> Option<(i16,i16)> { Some((self.x, self.y)) }
}
impl<'r,T:XyOpt> XyOpt for &'r T {
#[inline(always)]
fn xy_opt(&self) -> Option<(i16,i16)> { (*self).xy_opt() }
}
impl Xy for Rect {
#[inline(always)]
fn xy(&self) -> (i16,i16) { (self.x, self.y) }
}
impl<'r,T:Xy> Xy for &'r T {
#[inline(always)]
fn xy(&self) -> (i16,i16) { (*self).xy() }
}
impl WhOpt for () {
#[inline(always)]
fn wh_opt(&self) -> Option<(u16,u16)> { None }
}
impl WhOpt for Rect {
#[inline(always)]
fn wh_opt(&self) -> Option<(u16,u16)> { Some((self.w, self.h)) }
}
impl WhOpt for Surface {
#[inline(always)]
fn wh_opt(&self) -> Option<(u16,u16)> { Some(self.get_size()) }
}
impl<'r,T:WhOpt> WhOpt for &'r T {
#[inline(always)]
fn wh_opt(&self) -> Option<(u16,u16)> { (*self).wh_opt() }
}
impl Wh for Rect {
#[inline(always)]
fn wh(&self) -> (u16,u16) { (self.w, self.h) }
}
impl Wh for Surface {
#[inline(always)]
fn wh(&self) -> (u16,u16) { self.get_size() }
}
impl<'r,T:Wh> Wh for &'r T {
#[inline(always)]
fn wh(&self) -> (u16,u16) { (*self).wh() }
}
/// A helper trait for defining every implementations for types `(T1,T2)` where `T1` and `T2` is
/// convertible to an integer.
trait ToInt16 {
/// Converts to `i16`.
fn to_i16(&self) -> i16;
/// Converts to `u16`.
fn to_u16(&self) -> u16;
}
macro_rules! define_ToInt16(
($t:ty) => (impl ToInt16 for $t {
#[inline(always)]
fn to_i16(&self) -> i16 { *self as i16 }
#[inline(always)]
fn to_u16(&self) -> u16 { *self as u16 }
})
)
define_ToInt16!(int)
define_ToInt16!(uint)
define_ToInt16!(i8)
define_ToInt16!(i16)
define_ToInt16!(i32)
define_ToInt16!(i64)
define_ToInt16!(u8)
define_ToInt16!(u16)
define_ToInt16!(u32)
define_ToInt16!(u64)
impl<X:ToInt16+Clone,Y:ToInt16+Clone> XyOpt for (X,Y) {
#[inline(always)]
fn xy_opt(&self) -> Option<(i16,i16)> {
let (x, y) = self.clone();
Some((x.to_i16(), y.to_i16()))
}
}
impl<X:ToInt16+Clone,Y:ToInt16+Clone> Xy for (X,Y) {
#[inline(always)]
fn xy(&self) -> (i16,i16) {
let (x, y) = self.clone();
(x.to_i16(), y.to_i16())
}
}
impl<W:ToInt16+Clone,H:ToInt16+Clone> WhOpt for (W,H) {
#[inline(always)]
fn wh_opt(&self) -> Option<(u16,u16)> {
let (w, h) = self.clone();
Some((w.to_u16(), h.to_u16()))
}
}
impl<W:ToInt16+Clone,H:ToInt16+Clone> Wh for (W,H) {
#[inline(always)]
fn wh(&self) -> (u16,u16) {
let (w, h) = self.clone();
(w.to_u16(), h.to_u16())
}
}
/// Constructs an `sdl::Rect` from given point coordinates. Fills `w` and `h` fields to 0
/// as expected by the second `sdl::Rect` argument from `SDL_BlitSurface`.
#[inline(always)]
pub fn rect_from_xy<XY:Xy>(xy: XY) -> Rect {
let (x, y) = xy.xy();
Rect { x: x, y: y, w: 0, h: 0 }
}
/// Constructs an `sdl::Rect` from given point coordinates and optional rectangular area.
/// `rect_from_xywh(xy, ())` equals to `rect_from_xy(xy)`.
#[inline(always)]
pub fn rect_from_xywh<XY:Xy,WH:WhOpt>(xy: XY, wh: WH) -> Rect {
let (x, y) = xy.xy();
let (w, h) = wh.wh_opt().unwrap_or((0, 0));
Rect { x: x, y: y, w: w, h: h }
}
/// Additions to `sdl::video::Surface`. They replace their `_rect` suffixed counterparts,
/// which are generally annoying to work with.
pub trait SurfaceAreaUtil {
/// An alternative interface to `set_clip_rect`.
fn set_clip_area<XY:Xy,WH:WhOpt>(&self, xy: XY, wh: WH);
/// An alternative interface to `blit_rect`.
fn blit_area<SrcXY:Xy,DstXY:XyOpt,WH:WhOpt>(&self, src: &Surface,
srcxy: SrcXY, dstxy: DstXY, wh: WH) -> bool;
/// An alternative interface to `fill_rect`.
fn fill_area<XY:Xy,WH:WhOpt>(&self, xy: XY, wh: WH, color: Color) -> bool;
}
impl SurfaceAreaUtil for Surface {
#[inline(always)]
fn set_clip_area<XY:Xy,WH:WhOpt>(&self, xy: XY, wh: WH) {
let rect = rect_from_xywh(xy, wh);
self.set_clip_rect(&rect)
}
#[inline(always)]
fn blit_area<SrcXY:Xy,DstXY:XyOpt,WH:WhOpt>(&self, src: &Surface,
srcxy: SrcXY, dstxy: DstXY, wh: WH) -> bool {
let srcrect = rect_from_xywh(srcxy, wh);
let dstrect = dstxy.xy_opt().map(|xy| rect_from_xywh(xy, &srcrect));
self.blit_rect(src, Some(srcrect), dstrect)
}
#[inline(always)]
fn fill_area<XY:Xy,WH:WhOpt>(&self, xy: XY, wh: WH, color: Color) -> bool {
let rect = rect_from_xywh(xy, wh);
self.fill_rect(Some(rect), color)
}
}
/// A proxy to `sdl::video::Surface` for the direct access to pixels. For now, it is for 32 bits
/// per pixel only.
pub struct SurfacePixels<'r> {
fmt: *mut SDL_PixelFormat,
width: uint,
height: uint,
pitch: uint,
pixels: &'r mut [u32]
}
/// A trait for the direct access to pixels.
pub trait SurfacePixelsUtil {
/// Grants the direct access to pixels. Also locks the surface as needed, so you can't blit
/// during working with pixels.
fn with_pixels<R>(&self, f: |pixels: &mut SurfacePixels| -> R) -> R;
}
impl SurfacePixelsUtil for Surface {
fn with_pixels<R>(&self, f: |pixels: &mut SurfacePixels| -> R) -> R {
self.with_lock(|pixels| {
let fmt = unsafe {(*self.raw).format};
let pitch = unsafe {((*self.raw).pitch / 4) as uint};
let pixels = unsafe {::std::mem::transmute(pixels)};
let mut proxy = SurfacePixels { fmt: fmt, width: self.get_width() as uint,
height: self.get_height() as uint,
pitch: pitch, pixels: pixels };
f(&mut proxy)
})
}
}
impl<'r> SurfacePixels<'r> {
/// Returns a pixel at given position. (C: `getpixel`)
pub fn get_pixel(&self, x: uint, y: uint) -> Color {
Color::from_mapped(self.pixels[x + y * self.pitch], self.fmt as *const _)
}
/// Returns a pixel at given position, only when the position is valid.
pub fn get_pixel_checked(&self, x: uint, y: uint) -> Option<Color> {
if x < self.width && y < self.height {
Some(self.get_pixel(x, y))
} else {
None
}
}
/// Sets a pixel to given position. (C: `putpixel`)
pub fn put_pixel(&mut self, x: uint, y: uint, c: Color) {
self.pixels[x + y * self.pitch] = c.to_mapped(self.fmt as *const _);
}
/// Sets a pixel to given position, only when the position is valid.
/// Returns true when the pixel has really been set.
pub fn put_pixel_checked(&mut self, x: uint, y: uint, c: Color) -> bool {
if x < self.width && y < self.height {
self.put_pixel(x, y, c);
true
} else {
false
}
}
/// Sets or blends (if `c` is `RGBA`) a pixel to given position. (C: `putblendedpixel`)
pub fn put_blended_pixel(&mut self, x: uint, y: uint, c: Color) {
match c {
RGB(..) => self.put_pixel(x, y, c),
RGBA(r,g,b,a) => match self.get_pixel(x, y) {
RGB(r2,g2,b2) | RGBA(r2,g2,b2,_) => {
let grad = Gradient { zero: RGB(r,g,b), one: RGB(r2,g2,b2) };
self.put_pixel(x, y, grad.blend(a as int, 255));
}
}
}
}
/// Sets or blends (if `c` is `RGBA`) a pixel to given position,
/// only when the position is valid.
/// Returns true when the pixel has really been set.
pub fn put_blended_pixel_checked(&mut self, x: uint, y: uint, c: Color) -> bool {
if x < self.width && y < self.height {
self.put_blended_pixel(x, y, c);
true
} else {
false
}
}
}
|
/// Returns a rectangular area if any.
fn wh_opt(&self) -> Option<(u16,u16)>;
}
|
random_line_split
|
surface.rs
|
// This is a part of Sonorous.
// Copyright (c) 2005, 2007, 2009, 2012, 2013, 2014, Kang Seonghoon.
// See README.md for details.
//
// Licensed under the Apache License, Version 2.0 <http://www.apache.org/licenses/LICENSE-2.0> or
// the MIT license <http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//! Utilities for SDL surfaces.
use gfx::color::{Color, Gradient, RGB, RGBA, Blend};
pub use sdl::Rect;
pub use sdl::video::Surface;
use sdl::video::ll::SDL_PixelFormat;
/// A trait that can be translated to point coordinates (`x` and `y` fields in `sdl::Rect`,
/// hence the name). Also contains `()`.
pub trait XyOpt {
/// Returns point coordinates if any.
fn xy_opt(&self) -> Option<(i16,i16)>;
}
/// Same as `XyOpt` but does not contain `()`.
pub trait Xy: XyOpt {
/// Returns point coordinates.
fn xy(&self) -> (i16,i16);
}
/// A trait that can be translated to a rectangular area (`w` and `h` fields in `sdl::Rect`,
/// hence the name). Also contains `()`.
pub trait WhOpt {
/// Returns a rectangular area if any.
fn wh_opt(&self) -> Option<(u16,u16)>;
}
/// Same as `WhOpt` but does not contain `()`.
pub trait Wh {
/// Returns a rectangular area.
fn wh(&self) -> (u16,u16);
}
impl XyOpt for () {
#[inline(always)]
fn xy_opt(&self) -> Option<(i16,i16)> { None }
}
// Rust: we can't define these with `impl<T:Xy> XyOpt for T` due to the ambiguity.
impl XyOpt for Rect {
#[inline(always)]
fn xy_opt(&self) -> Option<(i16,i16)> { Some((self.x, self.y)) }
}
impl<'r,T:XyOpt> XyOpt for &'r T {
#[inline(always)]
fn xy_opt(&self) -> Option<(i16,i16)> { (*self).xy_opt() }
}
impl Xy for Rect {
#[inline(always)]
fn xy(&self) -> (i16,i16) { (self.x, self.y) }
}
impl<'r,T:Xy> Xy for &'r T {
#[inline(always)]
fn xy(&self) -> (i16,i16) { (*self).xy() }
}
impl WhOpt for () {
#[inline(always)]
fn wh_opt(&self) -> Option<(u16,u16)> { None }
}
impl WhOpt for Rect {
#[inline(always)]
fn wh_opt(&self) -> Option<(u16,u16)> { Some((self.w, self.h)) }
}
impl WhOpt for Surface {
#[inline(always)]
fn wh_opt(&self) -> Option<(u16,u16)> { Some(self.get_size()) }
}
impl<'r,T:WhOpt> WhOpt for &'r T {
#[inline(always)]
fn wh_opt(&self) -> Option<(u16,u16)> { (*self).wh_opt() }
}
impl Wh for Rect {
#[inline(always)]
fn wh(&self) -> (u16,u16) { (self.w, self.h) }
}
impl Wh for Surface {
#[inline(always)]
fn wh(&self) -> (u16,u16)
|
}
impl<'r,T:Wh> Wh for &'r T {
#[inline(always)]
fn wh(&self) -> (u16,u16) { (*self).wh() }
}
/// A helper trait for defining every implementations for types `(T1,T2)` where `T1` and `T2` is
/// convertible to an integer.
trait ToInt16 {
/// Converts to `i16`.
fn to_i16(&self) -> i16;
/// Converts to `u16`.
fn to_u16(&self) -> u16;
}
macro_rules! define_ToInt16(
($t:ty) => (impl ToInt16 for $t {
#[inline(always)]
fn to_i16(&self) -> i16 { *self as i16 }
#[inline(always)]
fn to_u16(&self) -> u16 { *self as u16 }
})
)
define_ToInt16!(int)
define_ToInt16!(uint)
define_ToInt16!(i8)
define_ToInt16!(i16)
define_ToInt16!(i32)
define_ToInt16!(i64)
define_ToInt16!(u8)
define_ToInt16!(u16)
define_ToInt16!(u32)
define_ToInt16!(u64)
impl<X:ToInt16+Clone,Y:ToInt16+Clone> XyOpt for (X,Y) {
#[inline(always)]
fn xy_opt(&self) -> Option<(i16,i16)> {
let (x, y) = self.clone();
Some((x.to_i16(), y.to_i16()))
}
}
impl<X:ToInt16+Clone,Y:ToInt16+Clone> Xy for (X,Y) {
#[inline(always)]
fn xy(&self) -> (i16,i16) {
let (x, y) = self.clone();
(x.to_i16(), y.to_i16())
}
}
impl<W:ToInt16+Clone,H:ToInt16+Clone> WhOpt for (W,H) {
#[inline(always)]
fn wh_opt(&self) -> Option<(u16,u16)> {
let (w, h) = self.clone();
Some((w.to_u16(), h.to_u16()))
}
}
impl<W:ToInt16+Clone,H:ToInt16+Clone> Wh for (W,H) {
#[inline(always)]
fn wh(&self) -> (u16,u16) {
let (w, h) = self.clone();
(w.to_u16(), h.to_u16())
}
}
/// Constructs an `sdl::Rect` from given point coordinates. Fills `w` and `h` fields to 0
/// as expected by the second `sdl::Rect` argument from `SDL_BlitSurface`.
#[inline(always)]
pub fn rect_from_xy<XY:Xy>(xy: XY) -> Rect {
let (x, y) = xy.xy();
Rect { x: x, y: y, w: 0, h: 0 }
}
/// Constructs an `sdl::Rect` from given point coordinates and optional rectangular area.
/// `rect_from_xywh(xy, ())` equals to `rect_from_xy(xy)`.
#[inline(always)]
pub fn rect_from_xywh<XY:Xy,WH:WhOpt>(xy: XY, wh: WH) -> Rect {
let (x, y) = xy.xy();
let (w, h) = wh.wh_opt().unwrap_or((0, 0));
Rect { x: x, y: y, w: w, h: h }
}
/// Additions to `sdl::video::Surface`. They replace their `_rect` suffixed counterparts,
/// which are generally annoying to work with.
pub trait SurfaceAreaUtil {
/// An alternative interface to `set_clip_rect`.
fn set_clip_area<XY:Xy,WH:WhOpt>(&self, xy: XY, wh: WH);
/// An alternative interface to `blit_rect`.
fn blit_area<SrcXY:Xy,DstXY:XyOpt,WH:WhOpt>(&self, src: &Surface,
srcxy: SrcXY, dstxy: DstXY, wh: WH) -> bool;
/// An alternative interface to `fill_rect`.
fn fill_area<XY:Xy,WH:WhOpt>(&self, xy: XY, wh: WH, color: Color) -> bool;
}
impl SurfaceAreaUtil for Surface {
#[inline(always)]
fn set_clip_area<XY:Xy,WH:WhOpt>(&self, xy: XY, wh: WH) {
let rect = rect_from_xywh(xy, wh);
self.set_clip_rect(&rect)
}
#[inline(always)]
fn blit_area<SrcXY:Xy,DstXY:XyOpt,WH:WhOpt>(&self, src: &Surface,
srcxy: SrcXY, dstxy: DstXY, wh: WH) -> bool {
let srcrect = rect_from_xywh(srcxy, wh);
let dstrect = dstxy.xy_opt().map(|xy| rect_from_xywh(xy, &srcrect));
self.blit_rect(src, Some(srcrect), dstrect)
}
#[inline(always)]
fn fill_area<XY:Xy,WH:WhOpt>(&self, xy: XY, wh: WH, color: Color) -> bool {
let rect = rect_from_xywh(xy, wh);
self.fill_rect(Some(rect), color)
}
}
/// A proxy to `sdl::video::Surface` for the direct access to pixels. For now, it is for 32 bits
/// per pixel only.
pub struct SurfacePixels<'r> {
fmt: *mut SDL_PixelFormat,
width: uint,
height: uint,
pitch: uint,
pixels: &'r mut [u32]
}
/// A trait for the direct access to pixels.
pub trait SurfacePixelsUtil {
/// Grants the direct access to pixels. Also locks the surface as needed, so you can't blit
/// during working with pixels.
fn with_pixels<R>(&self, f: |pixels: &mut SurfacePixels| -> R) -> R;
}
impl SurfacePixelsUtil for Surface {
fn with_pixels<R>(&self, f: |pixels: &mut SurfacePixels| -> R) -> R {
self.with_lock(|pixels| {
let fmt = unsafe {(*self.raw).format};
let pitch = unsafe {((*self.raw).pitch / 4) as uint};
let pixels = unsafe {::std::mem::transmute(pixels)};
let mut proxy = SurfacePixels { fmt: fmt, width: self.get_width() as uint,
height: self.get_height() as uint,
pitch: pitch, pixels: pixels };
f(&mut proxy)
})
}
}
impl<'r> SurfacePixels<'r> {
/// Returns a pixel at given position. (C: `getpixel`)
pub fn get_pixel(&self, x: uint, y: uint) -> Color {
Color::from_mapped(self.pixels[x + y * self.pitch], self.fmt as *const _)
}
/// Returns a pixel at given position, only when the position is valid.
pub fn get_pixel_checked(&self, x: uint, y: uint) -> Option<Color> {
if x < self.width && y < self.height {
Some(self.get_pixel(x, y))
} else {
None
}
}
/// Sets a pixel to given position. (C: `putpixel`)
pub fn put_pixel(&mut self, x: uint, y: uint, c: Color) {
self.pixels[x + y * self.pitch] = c.to_mapped(self.fmt as *const _);
}
/// Sets a pixel to given position, only when the position is valid.
/// Returns true when the pixel has really been set.
pub fn put_pixel_checked(&mut self, x: uint, y: uint, c: Color) -> bool {
if x < self.width && y < self.height {
self.put_pixel(x, y, c);
true
} else {
false
}
}
/// Sets or blends (if `c` is `RGBA`) a pixel to given position. (C: `putblendedpixel`)
pub fn put_blended_pixel(&mut self, x: uint, y: uint, c: Color) {
match c {
RGB(..) => self.put_pixel(x, y, c),
RGBA(r,g,b,a) => match self.get_pixel(x, y) {
RGB(r2,g2,b2) | RGBA(r2,g2,b2,_) => {
let grad = Gradient { zero: RGB(r,g,b), one: RGB(r2,g2,b2) };
self.put_pixel(x, y, grad.blend(a as int, 255));
}
}
}
}
/// Sets or blends (if `c` is `RGBA`) a pixel to given position,
/// only when the position is valid.
/// Returns true when the pixel has really been set.
pub fn put_blended_pixel_checked(&mut self, x: uint, y: uint, c: Color) -> bool {
if x < self.width && y < self.height {
self.put_blended_pixel(x, y, c);
true
} else {
false
}
}
}
|
{ self.get_size() }
|
identifier_body
|
surface.rs
|
// This is a part of Sonorous.
// Copyright (c) 2005, 2007, 2009, 2012, 2013, 2014, Kang Seonghoon.
// See README.md for details.
//
// Licensed under the Apache License, Version 2.0 <http://www.apache.org/licenses/LICENSE-2.0> or
// the MIT license <http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//! Utilities for SDL surfaces.
use gfx::color::{Color, Gradient, RGB, RGBA, Blend};
pub use sdl::Rect;
pub use sdl::video::Surface;
use sdl::video::ll::SDL_PixelFormat;
/// A trait that can be translated to point coordinates (`x` and `y` fields in `sdl::Rect`,
/// hence the name). Also contains `()`.
pub trait XyOpt {
/// Returns point coordinates if any.
fn xy_opt(&self) -> Option<(i16,i16)>;
}
/// Same as `XyOpt` but does not contain `()`.
pub trait Xy: XyOpt {
/// Returns point coordinates.
fn xy(&self) -> (i16,i16);
}
/// A trait that can be translated to a rectangular area (`w` and `h` fields in `sdl::Rect`,
/// hence the name). Also contains `()`.
pub trait WhOpt {
/// Returns a rectangular area if any.
fn wh_opt(&self) -> Option<(u16,u16)>;
}
/// Same as `WhOpt` but does not contain `()`.
pub trait Wh {
/// Returns a rectangular area.
fn wh(&self) -> (u16,u16);
}
impl XyOpt for () {
#[inline(always)]
fn xy_opt(&self) -> Option<(i16,i16)> { None }
}
// Rust: we can't define these with `impl<T:Xy> XyOpt for T` due to the ambiguity.
impl XyOpt for Rect {
#[inline(always)]
fn xy_opt(&self) -> Option<(i16,i16)> { Some((self.x, self.y)) }
}
impl<'r,T:XyOpt> XyOpt for &'r T {
#[inline(always)]
fn xy_opt(&self) -> Option<(i16,i16)> { (*self).xy_opt() }
}
impl Xy for Rect {
#[inline(always)]
fn xy(&self) -> (i16,i16) { (self.x, self.y) }
}
impl<'r,T:Xy> Xy for &'r T {
#[inline(always)]
fn xy(&self) -> (i16,i16) { (*self).xy() }
}
impl WhOpt for () {
#[inline(always)]
fn wh_opt(&self) -> Option<(u16,u16)> { None }
}
impl WhOpt for Rect {
#[inline(always)]
fn wh_opt(&self) -> Option<(u16,u16)> { Some((self.w, self.h)) }
}
impl WhOpt for Surface {
#[inline(always)]
fn wh_opt(&self) -> Option<(u16,u16)> { Some(self.get_size()) }
}
impl<'r,T:WhOpt> WhOpt for &'r T {
#[inline(always)]
fn wh_opt(&self) -> Option<(u16,u16)> { (*self).wh_opt() }
}
impl Wh for Rect {
#[inline(always)]
fn wh(&self) -> (u16,u16) { (self.w, self.h) }
}
impl Wh for Surface {
#[inline(always)]
fn wh(&self) -> (u16,u16) { self.get_size() }
}
impl<'r,T:Wh> Wh for &'r T {
#[inline(always)]
fn wh(&self) -> (u16,u16) { (*self).wh() }
}
/// A helper trait for defining every implementations for types `(T1,T2)` where `T1` and `T2` is
/// convertible to an integer.
trait ToInt16 {
/// Converts to `i16`.
fn to_i16(&self) -> i16;
/// Converts to `u16`.
fn to_u16(&self) -> u16;
}
macro_rules! define_ToInt16(
($t:ty) => (impl ToInt16 for $t {
#[inline(always)]
fn to_i16(&self) -> i16 { *self as i16 }
#[inline(always)]
fn to_u16(&self) -> u16 { *self as u16 }
})
)
define_ToInt16!(int)
define_ToInt16!(uint)
define_ToInt16!(i8)
define_ToInt16!(i16)
define_ToInt16!(i32)
define_ToInt16!(i64)
define_ToInt16!(u8)
define_ToInt16!(u16)
define_ToInt16!(u32)
define_ToInt16!(u64)
impl<X:ToInt16+Clone,Y:ToInt16+Clone> XyOpt for (X,Y) {
#[inline(always)]
fn xy_opt(&self) -> Option<(i16,i16)> {
let (x, y) = self.clone();
Some((x.to_i16(), y.to_i16()))
}
}
impl<X:ToInt16+Clone,Y:ToInt16+Clone> Xy for (X,Y) {
#[inline(always)]
fn xy(&self) -> (i16,i16) {
let (x, y) = self.clone();
(x.to_i16(), y.to_i16())
}
}
impl<W:ToInt16+Clone,H:ToInt16+Clone> WhOpt for (W,H) {
#[inline(always)]
fn wh_opt(&self) -> Option<(u16,u16)> {
let (w, h) = self.clone();
Some((w.to_u16(), h.to_u16()))
}
}
impl<W:ToInt16+Clone,H:ToInt16+Clone> Wh for (W,H) {
#[inline(always)]
fn wh(&self) -> (u16,u16) {
let (w, h) = self.clone();
(w.to_u16(), h.to_u16())
}
}
/// Constructs an `sdl::Rect` from given point coordinates. Fills `w` and `h` fields to 0
/// as expected by the second `sdl::Rect` argument from `SDL_BlitSurface`.
#[inline(always)]
pub fn rect_from_xy<XY:Xy>(xy: XY) -> Rect {
let (x, y) = xy.xy();
Rect { x: x, y: y, w: 0, h: 0 }
}
/// Constructs an `sdl::Rect` from given point coordinates and optional rectangular area.
/// `rect_from_xywh(xy, ())` equals to `rect_from_xy(xy)`.
#[inline(always)]
pub fn rect_from_xywh<XY:Xy,WH:WhOpt>(xy: XY, wh: WH) -> Rect {
let (x, y) = xy.xy();
let (w, h) = wh.wh_opt().unwrap_or((0, 0));
Rect { x: x, y: y, w: w, h: h }
}
/// Additions to `sdl::video::Surface`. They replace their `_rect` suffixed counterparts,
/// which are generally annoying to work with.
pub trait SurfaceAreaUtil {
/// An alternative interface to `set_clip_rect`.
fn set_clip_area<XY:Xy,WH:WhOpt>(&self, xy: XY, wh: WH);
/// An alternative interface to `blit_rect`.
fn blit_area<SrcXY:Xy,DstXY:XyOpt,WH:WhOpt>(&self, src: &Surface,
srcxy: SrcXY, dstxy: DstXY, wh: WH) -> bool;
/// An alternative interface to `fill_rect`.
fn fill_area<XY:Xy,WH:WhOpt>(&self, xy: XY, wh: WH, color: Color) -> bool;
}
impl SurfaceAreaUtil for Surface {
#[inline(always)]
fn set_clip_area<XY:Xy,WH:WhOpt>(&self, xy: XY, wh: WH) {
let rect = rect_from_xywh(xy, wh);
self.set_clip_rect(&rect)
}
#[inline(always)]
fn blit_area<SrcXY:Xy,DstXY:XyOpt,WH:WhOpt>(&self, src: &Surface,
srcxy: SrcXY, dstxy: DstXY, wh: WH) -> bool {
let srcrect = rect_from_xywh(srcxy, wh);
let dstrect = dstxy.xy_opt().map(|xy| rect_from_xywh(xy, &srcrect));
self.blit_rect(src, Some(srcrect), dstrect)
}
#[inline(always)]
fn fill_area<XY:Xy,WH:WhOpt>(&self, xy: XY, wh: WH, color: Color) -> bool {
let rect = rect_from_xywh(xy, wh);
self.fill_rect(Some(rect), color)
}
}
/// A proxy to `sdl::video::Surface` for the direct access to pixels. For now, it is for 32 bits
/// per pixel only.
pub struct SurfacePixels<'r> {
fmt: *mut SDL_PixelFormat,
width: uint,
height: uint,
pitch: uint,
pixels: &'r mut [u32]
}
/// A trait for the direct access to pixels.
pub trait SurfacePixelsUtil {
/// Grants the direct access to pixels. Also locks the surface as needed, so you can't blit
/// during working with pixels.
fn with_pixels<R>(&self, f: |pixels: &mut SurfacePixels| -> R) -> R;
}
impl SurfacePixelsUtil for Surface {
fn with_pixels<R>(&self, f: |pixels: &mut SurfacePixels| -> R) -> R {
self.with_lock(|pixels| {
let fmt = unsafe {(*self.raw).format};
let pitch = unsafe {((*self.raw).pitch / 4) as uint};
let pixels = unsafe {::std::mem::transmute(pixels)};
let mut proxy = SurfacePixels { fmt: fmt, width: self.get_width() as uint,
height: self.get_height() as uint,
pitch: pitch, pixels: pixels };
f(&mut proxy)
})
}
}
impl<'r> SurfacePixels<'r> {
/// Returns a pixel at given position. (C: `getpixel`)
pub fn get_pixel(&self, x: uint, y: uint) -> Color {
Color::from_mapped(self.pixels[x + y * self.pitch], self.fmt as *const _)
}
/// Returns a pixel at given position, only when the position is valid.
pub fn
|
(&self, x: uint, y: uint) -> Option<Color> {
if x < self.width && y < self.height {
Some(self.get_pixel(x, y))
} else {
None
}
}
/// Sets a pixel to given position. (C: `putpixel`)
pub fn put_pixel(&mut self, x: uint, y: uint, c: Color) {
self.pixels[x + y * self.pitch] = c.to_mapped(self.fmt as *const _);
}
/// Sets a pixel to given position, only when the position is valid.
/// Returns true when the pixel has really been set.
pub fn put_pixel_checked(&mut self, x: uint, y: uint, c: Color) -> bool {
if x < self.width && y < self.height {
self.put_pixel(x, y, c);
true
} else {
false
}
}
/// Sets or blends (if `c` is `RGBA`) a pixel to given position. (C: `putblendedpixel`)
pub fn put_blended_pixel(&mut self, x: uint, y: uint, c: Color) {
match c {
RGB(..) => self.put_pixel(x, y, c),
RGBA(r,g,b,a) => match self.get_pixel(x, y) {
RGB(r2,g2,b2) | RGBA(r2,g2,b2,_) => {
let grad = Gradient { zero: RGB(r,g,b), one: RGB(r2,g2,b2) };
self.put_pixel(x, y, grad.blend(a as int, 255));
}
}
}
}
/// Sets or blends (if `c` is `RGBA`) a pixel to given position,
/// only when the position is valid.
/// Returns true when the pixel has really been set.
pub fn put_blended_pixel_checked(&mut self, x: uint, y: uint, c: Color) -> bool {
if x < self.width && y < self.height {
self.put_blended_pixel(x, y, c);
true
} else {
false
}
}
}
|
get_pixel_checked
|
identifier_name
|
tendermint.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Tendermint params deserialization.
use uint::Uint;
use super::ValidatorSet;
/// Tendermint params deserialization.
#[derive(Debug, PartialEq, Deserialize)]
pub struct TendermintParams {
/// Gas limit divisor.
#[serde(rename="gasLimitBoundDivisor")]
pub gas_limit_bound_divisor: Uint,
/// Valid validators.
pub validators: ValidatorSet,
/// Propose step timeout in milliseconds.
#[serde(rename="timeoutPropose")]
pub timeout_propose: Option<Uint>,
/// Prevote step timeout in milliseconds.
#[serde(rename="timeoutPrevote")]
pub timeout_prevote: Option<Uint>,
/// Precommit step timeout in milliseconds.
#[serde(rename="timeoutPrecommit")]
pub timeout_precommit: Option<Uint>,
/// Commit step timeout in milliseconds.
#[serde(rename="timeoutCommit")]
pub timeout_commit: Option<Uint>,
/// Block reward.
#[serde(rename="blockReward")]
pub block_reward: Option<Uint>,
}
/// Tendermint engine deserialization.
#[derive(Debug, PartialEq, Deserialize)]
pub struct Tendermint {
/// Ethash params.
pub params: TendermintParams,
}
#[cfg(test)]
mod tests {
use serde_json;
use spec::tendermint::Tendermint;
#[test]
fn
|
() {
let s = r#"{
"params": {
"gasLimitBoundDivisor": "0x0400",
"validators": {
"list": ["0xc6d9d2cd449a754c494264e1809c50e34d64562b"]
},
"blockReward": "0x50"
}
}"#;
let _deserialized: Tendermint = serde_json::from_str(s).unwrap();
}
}
|
tendermint_deserialization
|
identifier_name
|
tendermint.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Tendermint params deserialization.
use uint::Uint;
use super::ValidatorSet;
/// Tendermint params deserialization.
#[derive(Debug, PartialEq, Deserialize)]
pub struct TendermintParams {
/// Gas limit divisor.
#[serde(rename="gasLimitBoundDivisor")]
pub gas_limit_bound_divisor: Uint,
/// Valid validators.
pub validators: ValidatorSet,
/// Propose step timeout in milliseconds.
#[serde(rename="timeoutPropose")]
pub timeout_propose: Option<Uint>,
/// Prevote step timeout in milliseconds.
#[serde(rename="timeoutPrevote")]
pub timeout_prevote: Option<Uint>,
/// Precommit step timeout in milliseconds.
#[serde(rename="timeoutPrecommit")]
pub timeout_precommit: Option<Uint>,
/// Commit step timeout in milliseconds.
#[serde(rename="timeoutCommit")]
pub timeout_commit: Option<Uint>,
/// Block reward.
#[serde(rename="blockReward")]
pub block_reward: Option<Uint>,
}
/// Tendermint engine deserialization.
#[derive(Debug, PartialEq, Deserialize)]
pub struct Tendermint {
/// Ethash params.
pub params: TendermintParams,
}
#[cfg(test)]
mod tests {
use serde_json;
use spec::tendermint::Tendermint;
#[test]
fn tendermint_deserialization() {
let s = r#"{
"params": {
|
"gasLimitBoundDivisor": "0x0400",
"validators": {
"list": ["0xc6d9d2cd449a754c494264e1809c50e34d64562b"]
},
"blockReward": "0x50"
}
}"#;
let _deserialized: Tendermint = serde_json::from_str(s).unwrap();
}
}
|
random_line_split
|
|
slice.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::collections::{BTreeMap, HashMap, HashSet};
use std::sync::Arc;
use anyhow::{anyhow, Error, Result};
use blobrepo::BlobRepo;
use context::CoreContext;
use derived_data_utils::DerivedUtils;
use futures::stream::{self, FuturesUnordered, StreamExt, TryStreamExt};
use mononoke_types::{ChangesetId, Generation};
use skiplist::SkiplistIndex;
use slog::info;
/// Determine which heads are underived in any of the derivers.
async fn underived_heads(
ctx: &CoreContext,
repo: &BlobRepo,
derivers: &[Arc<dyn DerivedUtils>],
heads: &[ChangesetId],
) -> Result<HashSet<ChangesetId>> {
derivers
.iter()
.map(|deriver| async move {
Ok::<_, Error>(stream::iter(
deriver
.pending(ctx.clone(), repo.clone(), heads.to_vec())
.await?
.into_iter()
.map(Ok::<_, Error>),
))
})
.collect::<FuturesUnordered<_>>()
.try_flatten()
.try_collect::<HashSet<_>>()
.await
}
/// If skiplist parents are not available, fetch the parents and their
/// generation from the repo.
async fn parents_with_generations(
ctx: &CoreContext,
repo: &BlobRepo,
csid: ChangesetId,
) -> Result<Vec<(ChangesetId, Generation)>> {
let parents = repo
.get_changeset_parents_by_bonsai(ctx.clone(), csid)
.await?;
let parents_with_generations =
stream::iter(parents.into_iter().map(|parent_csid| async move {
match repo.get_generation_number(ctx.clone(), parent_csid).await? {
Some(gen) => Ok(Some((parent_csid, gen))),
None => Err(anyhow!(
"Could not find generation number for commit {} parent {}",
csid,
parent_csid
)),
}
}))
.buffered(100)
.try_filter_map(|maybe_csid_gen| async move { Ok::<_, Error>(maybe_csid_gen) })
.try_collect::<Vec<_>>()
.await?;
Ok(parents_with_generations)
}
/// Slice a respository into a sequence of slices for derivation.
///
/// For large repositories with a long history, computing the full set of
/// commits before beginning backfilling is slow, and cannot be resumed
/// if interrupted.
///
/// This function makes large repositories more tractible by using the
/// skiplist index to divide the repository history into "slices", where
/// each slice consists of the commits known to the skiplist index that
/// are within a range of generations.
///
/// Each slice's heads should be derived together and will be ancestors of
/// subsequent slices. The returned slices consist only of heads which
/// haven't been derived by the provided derivers. Slicing stops once
/// all derived commits are reached.
///
/// For example, given a repository where the skiplists have the structure:
///
/// E (gen 450)
/// :
/// D (gen 350)
/// :
/// : C (gen 275)
/// :/
/// B (gen 180)
/// :
/// A (gen 1)
///
/// And a slice size of 200, this function will generate slices:
///
/// (0, [A, B])
/// (200, [C, D])
/// (400, [E])
///
/// If any of these heads are already derived then they are omitted. Empty
/// slices are also omitted.
///
/// This allows derivation of the first slice with underived commits to begin
/// more quickly, as the rest of the repository history doesn't need to be
/// traversed (just the ancestors of B and A).
///
/// Returns the number of slices, and an iterator where each item is
/// (slice_id, heads).
pub(crate) async fn slice_repository(
ctx: &CoreContext,
repo: &BlobRepo,
skiplist_index: &SkiplistIndex,
derivers: &[Arc<dyn DerivedUtils>],
heads: Vec<ChangesetId>,
slice_size: u64,
) -> Result<(usize, impl Iterator<Item = (u64, Vec<ChangesetId>)>)>
|
}
let mut head_generation_groups: BTreeMap<u64, Vec<ChangesetId>> = BTreeMap::new();
stream::iter(heads.into_iter().map(|csid| async move {
match repo.get_generation_number(ctx.clone(), csid).await? {
Some(gen) => Ok(Some((csid, gen))),
None => Err(anyhow!(
"Could not find generation number for head {}",
csid
)),
}
}))
.buffered(100)
.try_for_each(|maybe_csid_gen| {
if let Some((csid, gen)) = maybe_csid_gen {
let gen_group = (gen.value() / slice_size) * slice_size;
head_generation_groups
.entry(gen_group)
.or_default()
.push(csid);
}
async { Ok::<_, Error>(()) }
})
.await?;
let mut slices = Vec::new();
while let Some((cur_gen, mut heads)) = head_generation_groups.pop_last() {
info!(
ctx.logger(),
"Adding slice starting at generation {} with {} heads ({} slices queued)",
cur_gen,
heads.len(),
head_generation_groups.len()
);
let mut new_heads_groups = HashMap::new();
let mut seen: HashSet<_> = heads.iter().cloned().collect();
while let Some(csid) = heads.pop() {
let skip_parents = match skiplist_index.get_furthest_edges(csid) {
Some(skip_parents) => skip_parents,
None => {
// Ordinarily this shouldn't happen, as the skiplist ought
// to refer to commits that are also in the skiplist.
// However, if the commit is missing from the skiplist, we
// can look up the parents and their generations directly.
parents_with_generations(ctx, repo, csid).await?
}
};
for (parent, gen) in skip_parents {
if gen.value() >= cur_gen {
// This commit is in the same generation group.
if seen.insert(parent) {
heads.push(parent);
}
} else {
// This commit is in a new generation group.
let gen_group = (gen.value() / slice_size) * slice_size;
new_heads_groups.insert(parent, gen_group);
}
}
}
// Add all commits we've seen to the slice. The heads from the start
// of this iteration would be sufficient, however providing additional
// changesets will allow traversal of the graph to find all commits to
// run faster as it can fetch the parents of multiple commits at once.
slices.push((cur_gen, seen.into_iter().collect()));
// For each new head, check if it needs derivation, and if so, add it
// to its generation group.
let new_heads: Vec<_> = new_heads_groups.keys().cloned().collect();
let underived_new_heads =
underived_heads(ctx, repo, derivers, new_heads.as_slice()).await?;
for head in underived_new_heads {
if let Some(gen_group) = new_heads_groups.get(&head) {
head_generation_groups
.entry(*gen_group)
.or_default()
.push(head);
}
}
}
if!slices.is_empty() {
info!(
ctx.logger(),
"Repository sliced into {} slices requiring derivation",
slices.len()
);
}
Ok((slices.len(), slices.into_iter().rev()))
}
|
{
let heads = underived_heads(ctx, repo, derivers, heads.as_slice()).await?;
if skiplist_index.indexed_node_count() == 0 {
// This skiplist index is not populated. Generate a single
// slice with all heads.
info!(
ctx.logger(),
"Repository not sliced as skiplist index is not populated",
);
let heads = heads.into_iter().collect();
return Ok((1, vec![(0, heads)].into_iter().rev()));
}
// Add any unindexed heads to the skiplist index.
let changeset_fetcher = repo.get_changeset_fetcher();
for head in heads.iter() {
skiplist_index
.add_node(ctx, &changeset_fetcher, *head, std::u64::MAX)
.await?;
|
identifier_body
|
slice.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::collections::{BTreeMap, HashMap, HashSet};
use std::sync::Arc;
use anyhow::{anyhow, Error, Result};
use blobrepo::BlobRepo;
use context::CoreContext;
use derived_data_utils::DerivedUtils;
use futures::stream::{self, FuturesUnordered, StreamExt, TryStreamExt};
use mononoke_types::{ChangesetId, Generation};
use skiplist::SkiplistIndex;
use slog::info;
/// Determine which heads are underived in any of the derivers.
async fn underived_heads(
ctx: &CoreContext,
repo: &BlobRepo,
derivers: &[Arc<dyn DerivedUtils>],
heads: &[ChangesetId],
) -> Result<HashSet<ChangesetId>> {
derivers
.iter()
.map(|deriver| async move {
Ok::<_, Error>(stream::iter(
deriver
.pending(ctx.clone(), repo.clone(), heads.to_vec())
.await?
.into_iter()
.map(Ok::<_, Error>),
))
})
.collect::<FuturesUnordered<_>>()
.try_flatten()
.try_collect::<HashSet<_>>()
.await
}
/// If skiplist parents are not available, fetch the parents and their
/// generation from the repo.
async fn parents_with_generations(
ctx: &CoreContext,
repo: &BlobRepo,
csid: ChangesetId,
) -> Result<Vec<(ChangesetId, Generation)>> {
let parents = repo
.get_changeset_parents_by_bonsai(ctx.clone(), csid)
.await?;
let parents_with_generations =
stream::iter(parents.into_iter().map(|parent_csid| async move {
match repo.get_generation_number(ctx.clone(), parent_csid).await? {
Some(gen) => Ok(Some((parent_csid, gen))),
None => Err(anyhow!(
"Could not find generation number for commit {} parent {}",
csid,
parent_csid
)),
}
}))
.buffered(100)
.try_filter_map(|maybe_csid_gen| async move { Ok::<_, Error>(maybe_csid_gen) })
.try_collect::<Vec<_>>()
.await?;
Ok(parents_with_generations)
}
/// Slice a respository into a sequence of slices for derivation.
///
/// For large repositories with a long history, computing the full set of
/// commits before beginning backfilling is slow, and cannot be resumed
/// if interrupted.
///
/// This function makes large repositories more tractible by using the
/// skiplist index to divide the repository history into "slices", where
/// each slice consists of the commits known to the skiplist index that
/// are within a range of generations.
///
/// Each slice's heads should be derived together and will be ancestors of
/// subsequent slices. The returned slices consist only of heads which
/// haven't been derived by the provided derivers. Slicing stops once
/// all derived commits are reached.
///
/// For example, given a repository where the skiplists have the structure:
///
/// E (gen 450)
/// :
/// D (gen 350)
/// :
/// : C (gen 275)
/// :/
/// B (gen 180)
/// :
/// A (gen 1)
///
/// And a slice size of 200, this function will generate slices:
///
/// (0, [A, B])
/// (200, [C, D])
/// (400, [E])
///
/// If any of these heads are already derived then they are omitted. Empty
/// slices are also omitted.
///
/// This allows derivation of the first slice with underived commits to begin
/// more quickly, as the rest of the repository history doesn't need to be
/// traversed (just the ancestors of B and A).
///
/// Returns the number of slices, and an iterator where each item is
/// (slice_id, heads).
pub(crate) async fn slice_repository(
ctx: &CoreContext,
repo: &BlobRepo,
skiplist_index: &SkiplistIndex,
derivers: &[Arc<dyn DerivedUtils>],
heads: Vec<ChangesetId>,
slice_size: u64,
) -> Result<(usize, impl Iterator<Item = (u64, Vec<ChangesetId>)>)> {
let heads = underived_heads(ctx, repo, derivers, heads.as_slice()).await?;
if skiplist_index.indexed_node_count() == 0 {
// This skiplist index is not populated. Generate a single
// slice with all heads.
info!(
ctx.logger(),
"Repository not sliced as skiplist index is not populated",
);
let heads = heads.into_iter().collect();
return Ok((1, vec![(0, heads)].into_iter().rev()));
}
// Add any unindexed heads to the skiplist index.
let changeset_fetcher = repo.get_changeset_fetcher();
for head in heads.iter() {
skiplist_index
.add_node(ctx, &changeset_fetcher, *head, std::u64::MAX)
.await?;
}
let mut head_generation_groups: BTreeMap<u64, Vec<ChangesetId>> = BTreeMap::new();
stream::iter(heads.into_iter().map(|csid| async move {
match repo.get_generation_number(ctx.clone(), csid).await? {
Some(gen) => Ok(Some((csid, gen))),
None => Err(anyhow!(
"Could not find generation number for head {}",
csid
)),
}
}))
.buffered(100)
.try_for_each(|maybe_csid_gen| {
if let Some((csid, gen)) = maybe_csid_gen {
let gen_group = (gen.value() / slice_size) * slice_size;
head_generation_groups
.entry(gen_group)
.or_default()
.push(csid);
}
async { Ok::<_, Error>(()) }
})
.await?;
let mut slices = Vec::new();
while let Some((cur_gen, mut heads)) = head_generation_groups.pop_last() {
info!(
ctx.logger(),
"Adding slice starting at generation {} with {} heads ({} slices queued)",
cur_gen,
heads.len(),
head_generation_groups.len()
);
let mut new_heads_groups = HashMap::new();
let mut seen: HashSet<_> = heads.iter().cloned().collect();
while let Some(csid) = heads.pop() {
let skip_parents = match skiplist_index.get_furthest_edges(csid) {
Some(skip_parents) => skip_parents,
None => {
// Ordinarily this shouldn't happen, as the skiplist ought
// to refer to commits that are also in the skiplist.
// However, if the commit is missing from the skiplist, we
// can look up the parents and their generations directly.
parents_with_generations(ctx, repo, csid).await?
}
};
for (parent, gen) in skip_parents {
if gen.value() >= cur_gen {
// This commit is in the same generation group.
if seen.insert(parent)
|
} else {
// This commit is in a new generation group.
let gen_group = (gen.value() / slice_size) * slice_size;
new_heads_groups.insert(parent, gen_group);
}
}
}
// Add all commits we've seen to the slice. The heads from the start
// of this iteration would be sufficient, however providing additional
// changesets will allow traversal of the graph to find all commits to
// run faster as it can fetch the parents of multiple commits at once.
slices.push((cur_gen, seen.into_iter().collect()));
// For each new head, check if it needs derivation, and if so, add it
// to its generation group.
let new_heads: Vec<_> = new_heads_groups.keys().cloned().collect();
let underived_new_heads =
underived_heads(ctx, repo, derivers, new_heads.as_slice()).await?;
for head in underived_new_heads {
if let Some(gen_group) = new_heads_groups.get(&head) {
head_generation_groups
.entry(*gen_group)
.or_default()
.push(head);
}
}
}
if!slices.is_empty() {
info!(
ctx.logger(),
"Repository sliced into {} slices requiring derivation",
slices.len()
);
}
Ok((slices.len(), slices.into_iter().rev()))
}
|
{
heads.push(parent);
}
|
conditional_block
|
slice.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::collections::{BTreeMap, HashMap, HashSet};
use std::sync::Arc;
use anyhow::{anyhow, Error, Result};
use blobrepo::BlobRepo;
use context::CoreContext;
use derived_data_utils::DerivedUtils;
use futures::stream::{self, FuturesUnordered, StreamExt, TryStreamExt};
use mononoke_types::{ChangesetId, Generation};
use skiplist::SkiplistIndex;
use slog::info;
/// Determine which heads are underived in any of the derivers.
async fn underived_heads(
ctx: &CoreContext,
repo: &BlobRepo,
derivers: &[Arc<dyn DerivedUtils>],
heads: &[ChangesetId],
) -> Result<HashSet<ChangesetId>> {
derivers
.iter()
.map(|deriver| async move {
Ok::<_, Error>(stream::iter(
deriver
.pending(ctx.clone(), repo.clone(), heads.to_vec())
.await?
.into_iter()
.map(Ok::<_, Error>),
))
})
.collect::<FuturesUnordered<_>>()
.try_flatten()
.try_collect::<HashSet<_>>()
.await
}
/// If skiplist parents are not available, fetch the parents and their
/// generation from the repo.
async fn parents_with_generations(
ctx: &CoreContext,
repo: &BlobRepo,
csid: ChangesetId,
) -> Result<Vec<(ChangesetId, Generation)>> {
let parents = repo
.get_changeset_parents_by_bonsai(ctx.clone(), csid)
.await?;
let parents_with_generations =
stream::iter(parents.into_iter().map(|parent_csid| async move {
match repo.get_generation_number(ctx.clone(), parent_csid).await? {
Some(gen) => Ok(Some((parent_csid, gen))),
None => Err(anyhow!(
"Could not find generation number for commit {} parent {}",
csid,
parent_csid
)),
}
}))
.buffered(100)
.try_filter_map(|maybe_csid_gen| async move { Ok::<_, Error>(maybe_csid_gen) })
|
}
/// Slice a respository into a sequence of slices for derivation.
///
/// For large repositories with a long history, computing the full set of
/// commits before beginning backfilling is slow, and cannot be resumed
/// if interrupted.
///
/// This function makes large repositories more tractible by using the
/// skiplist index to divide the repository history into "slices", where
/// each slice consists of the commits known to the skiplist index that
/// are within a range of generations.
///
/// Each slice's heads should be derived together and will be ancestors of
/// subsequent slices. The returned slices consist only of heads which
/// haven't been derived by the provided derivers. Slicing stops once
/// all derived commits are reached.
///
/// For example, given a repository where the skiplists have the structure:
///
/// E (gen 450)
/// :
/// D (gen 350)
/// :
/// : C (gen 275)
/// :/
/// B (gen 180)
/// :
/// A (gen 1)
///
/// And a slice size of 200, this function will generate slices:
///
/// (0, [A, B])
/// (200, [C, D])
/// (400, [E])
///
/// If any of these heads are already derived then they are omitted. Empty
/// slices are also omitted.
///
/// This allows derivation of the first slice with underived commits to begin
/// more quickly, as the rest of the repository history doesn't need to be
/// traversed (just the ancestors of B and A).
///
/// Returns the number of slices, and an iterator where each item is
/// (slice_id, heads).
pub(crate) async fn slice_repository(
ctx: &CoreContext,
repo: &BlobRepo,
skiplist_index: &SkiplistIndex,
derivers: &[Arc<dyn DerivedUtils>],
heads: Vec<ChangesetId>,
slice_size: u64,
) -> Result<(usize, impl Iterator<Item = (u64, Vec<ChangesetId>)>)> {
let heads = underived_heads(ctx, repo, derivers, heads.as_slice()).await?;
if skiplist_index.indexed_node_count() == 0 {
// This skiplist index is not populated. Generate a single
// slice with all heads.
info!(
ctx.logger(),
"Repository not sliced as skiplist index is not populated",
);
let heads = heads.into_iter().collect();
return Ok((1, vec![(0, heads)].into_iter().rev()));
}
// Add any unindexed heads to the skiplist index.
let changeset_fetcher = repo.get_changeset_fetcher();
for head in heads.iter() {
skiplist_index
.add_node(ctx, &changeset_fetcher, *head, std::u64::MAX)
.await?;
}
let mut head_generation_groups: BTreeMap<u64, Vec<ChangesetId>> = BTreeMap::new();
stream::iter(heads.into_iter().map(|csid| async move {
match repo.get_generation_number(ctx.clone(), csid).await? {
Some(gen) => Ok(Some((csid, gen))),
None => Err(anyhow!(
"Could not find generation number for head {}",
csid
)),
}
}))
.buffered(100)
.try_for_each(|maybe_csid_gen| {
if let Some((csid, gen)) = maybe_csid_gen {
let gen_group = (gen.value() / slice_size) * slice_size;
head_generation_groups
.entry(gen_group)
.or_default()
.push(csid);
}
async { Ok::<_, Error>(()) }
})
.await?;
let mut slices = Vec::new();
while let Some((cur_gen, mut heads)) = head_generation_groups.pop_last() {
info!(
ctx.logger(),
"Adding slice starting at generation {} with {} heads ({} slices queued)",
cur_gen,
heads.len(),
head_generation_groups.len()
);
let mut new_heads_groups = HashMap::new();
let mut seen: HashSet<_> = heads.iter().cloned().collect();
while let Some(csid) = heads.pop() {
let skip_parents = match skiplist_index.get_furthest_edges(csid) {
Some(skip_parents) => skip_parents,
None => {
// Ordinarily this shouldn't happen, as the skiplist ought
// to refer to commits that are also in the skiplist.
// However, if the commit is missing from the skiplist, we
// can look up the parents and their generations directly.
parents_with_generations(ctx, repo, csid).await?
}
};
for (parent, gen) in skip_parents {
if gen.value() >= cur_gen {
// This commit is in the same generation group.
if seen.insert(parent) {
heads.push(parent);
}
} else {
// This commit is in a new generation group.
let gen_group = (gen.value() / slice_size) * slice_size;
new_heads_groups.insert(parent, gen_group);
}
}
}
// Add all commits we've seen to the slice. The heads from the start
// of this iteration would be sufficient, however providing additional
// changesets will allow traversal of the graph to find all commits to
// run faster as it can fetch the parents of multiple commits at once.
slices.push((cur_gen, seen.into_iter().collect()));
// For each new head, check if it needs derivation, and if so, add it
// to its generation group.
let new_heads: Vec<_> = new_heads_groups.keys().cloned().collect();
let underived_new_heads =
underived_heads(ctx, repo, derivers, new_heads.as_slice()).await?;
for head in underived_new_heads {
if let Some(gen_group) = new_heads_groups.get(&head) {
head_generation_groups
.entry(*gen_group)
.or_default()
.push(head);
}
}
}
if!slices.is_empty() {
info!(
ctx.logger(),
"Repository sliced into {} slices requiring derivation",
slices.len()
);
}
Ok((slices.len(), slices.into_iter().rev()))
}
|
.try_collect::<Vec<_>>()
.await?;
Ok(parents_with_generations)
|
random_line_split
|
slice.rs
|
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::collections::{BTreeMap, HashMap, HashSet};
use std::sync::Arc;
use anyhow::{anyhow, Error, Result};
use blobrepo::BlobRepo;
use context::CoreContext;
use derived_data_utils::DerivedUtils;
use futures::stream::{self, FuturesUnordered, StreamExt, TryStreamExt};
use mononoke_types::{ChangesetId, Generation};
use skiplist::SkiplistIndex;
use slog::info;
/// Determine which heads are underived in any of the derivers.
async fn
|
(
ctx: &CoreContext,
repo: &BlobRepo,
derivers: &[Arc<dyn DerivedUtils>],
heads: &[ChangesetId],
) -> Result<HashSet<ChangesetId>> {
derivers
.iter()
.map(|deriver| async move {
Ok::<_, Error>(stream::iter(
deriver
.pending(ctx.clone(), repo.clone(), heads.to_vec())
.await?
.into_iter()
.map(Ok::<_, Error>),
))
})
.collect::<FuturesUnordered<_>>()
.try_flatten()
.try_collect::<HashSet<_>>()
.await
}
/// If skiplist parents are not available, fetch the parents and their
/// generation from the repo.
async fn parents_with_generations(
ctx: &CoreContext,
repo: &BlobRepo,
csid: ChangesetId,
) -> Result<Vec<(ChangesetId, Generation)>> {
let parents = repo
.get_changeset_parents_by_bonsai(ctx.clone(), csid)
.await?;
let parents_with_generations =
stream::iter(parents.into_iter().map(|parent_csid| async move {
match repo.get_generation_number(ctx.clone(), parent_csid).await? {
Some(gen) => Ok(Some((parent_csid, gen))),
None => Err(anyhow!(
"Could not find generation number for commit {} parent {}",
csid,
parent_csid
)),
}
}))
.buffered(100)
.try_filter_map(|maybe_csid_gen| async move { Ok::<_, Error>(maybe_csid_gen) })
.try_collect::<Vec<_>>()
.await?;
Ok(parents_with_generations)
}
/// Slice a respository into a sequence of slices for derivation.
///
/// For large repositories with a long history, computing the full set of
/// commits before beginning backfilling is slow, and cannot be resumed
/// if interrupted.
///
/// This function makes large repositories more tractible by using the
/// skiplist index to divide the repository history into "slices", where
/// each slice consists of the commits known to the skiplist index that
/// are within a range of generations.
///
/// Each slice's heads should be derived together and will be ancestors of
/// subsequent slices. The returned slices consist only of heads which
/// haven't been derived by the provided derivers. Slicing stops once
/// all derived commits are reached.
///
/// For example, given a repository where the skiplists have the structure:
///
/// E (gen 450)
/// :
/// D (gen 350)
/// :
/// : C (gen 275)
/// :/
/// B (gen 180)
/// :
/// A (gen 1)
///
/// And a slice size of 200, this function will generate slices:
///
/// (0, [A, B])
/// (200, [C, D])
/// (400, [E])
///
/// If any of these heads are already derived then they are omitted. Empty
/// slices are also omitted.
///
/// This allows derivation of the first slice with underived commits to begin
/// more quickly, as the rest of the repository history doesn't need to be
/// traversed (just the ancestors of B and A).
///
/// Returns the number of slices, and an iterator where each item is
/// (slice_id, heads).
pub(crate) async fn slice_repository(
ctx: &CoreContext,
repo: &BlobRepo,
skiplist_index: &SkiplistIndex,
derivers: &[Arc<dyn DerivedUtils>],
heads: Vec<ChangesetId>,
slice_size: u64,
) -> Result<(usize, impl Iterator<Item = (u64, Vec<ChangesetId>)>)> {
let heads = underived_heads(ctx, repo, derivers, heads.as_slice()).await?;
if skiplist_index.indexed_node_count() == 0 {
// This skiplist index is not populated. Generate a single
// slice with all heads.
info!(
ctx.logger(),
"Repository not sliced as skiplist index is not populated",
);
let heads = heads.into_iter().collect();
return Ok((1, vec![(0, heads)].into_iter().rev()));
}
// Add any unindexed heads to the skiplist index.
let changeset_fetcher = repo.get_changeset_fetcher();
for head in heads.iter() {
skiplist_index
.add_node(ctx, &changeset_fetcher, *head, std::u64::MAX)
.await?;
}
let mut head_generation_groups: BTreeMap<u64, Vec<ChangesetId>> = BTreeMap::new();
stream::iter(heads.into_iter().map(|csid| async move {
match repo.get_generation_number(ctx.clone(), csid).await? {
Some(gen) => Ok(Some((csid, gen))),
None => Err(anyhow!(
"Could not find generation number for head {}",
csid
)),
}
}))
.buffered(100)
.try_for_each(|maybe_csid_gen| {
if let Some((csid, gen)) = maybe_csid_gen {
let gen_group = (gen.value() / slice_size) * slice_size;
head_generation_groups
.entry(gen_group)
.or_default()
.push(csid);
}
async { Ok::<_, Error>(()) }
})
.await?;
let mut slices = Vec::new();
while let Some((cur_gen, mut heads)) = head_generation_groups.pop_last() {
info!(
ctx.logger(),
"Adding slice starting at generation {} with {} heads ({} slices queued)",
cur_gen,
heads.len(),
head_generation_groups.len()
);
let mut new_heads_groups = HashMap::new();
let mut seen: HashSet<_> = heads.iter().cloned().collect();
while let Some(csid) = heads.pop() {
let skip_parents = match skiplist_index.get_furthest_edges(csid) {
Some(skip_parents) => skip_parents,
None => {
// Ordinarily this shouldn't happen, as the skiplist ought
// to refer to commits that are also in the skiplist.
// However, if the commit is missing from the skiplist, we
// can look up the parents and their generations directly.
parents_with_generations(ctx, repo, csid).await?
}
};
for (parent, gen) in skip_parents {
if gen.value() >= cur_gen {
// This commit is in the same generation group.
if seen.insert(parent) {
heads.push(parent);
}
} else {
// This commit is in a new generation group.
let gen_group = (gen.value() / slice_size) * slice_size;
new_heads_groups.insert(parent, gen_group);
}
}
}
// Add all commits we've seen to the slice. The heads from the start
// of this iteration would be sufficient, however providing additional
// changesets will allow traversal of the graph to find all commits to
// run faster as it can fetch the parents of multiple commits at once.
slices.push((cur_gen, seen.into_iter().collect()));
// For each new head, check if it needs derivation, and if so, add it
// to its generation group.
let new_heads: Vec<_> = new_heads_groups.keys().cloned().collect();
let underived_new_heads =
underived_heads(ctx, repo, derivers, new_heads.as_slice()).await?;
for head in underived_new_heads {
if let Some(gen_group) = new_heads_groups.get(&head) {
head_generation_groups
.entry(*gen_group)
.or_default()
.push(head);
}
}
}
if!slices.is_empty() {
info!(
ctx.logger(),
"Repository sliced into {} slices requiring derivation",
slices.len()
);
}
Ok((slices.len(), slices.into_iter().rev()))
}
|
underived_heads
|
identifier_name
|
mod.rs
|
//! Unified interface for communicating with different trackers.
use std::old_io::{IoResult};
use std::old_io::net::ip::{SocketAddr, IpAddr};
use types::{Timepoint};
pub mod udp;
/// Information pertaining to the swarm we are in.
pub struct AnnounceInfo {
/// Number of leechers in the swarm.
pub leechers: i32,
/// Number of seeders in the swarm.
pub seeders: i32,
/// List of SocketAddrs for remote peers in the swarm.
pub peers: Vec<SocketAddr>,
/// Indicates when to send an update to the tracker.
pub interval: Timepoint
}
/// Statistics for a specific torrent.
#[derive(Copy)]
pub struct
|
{
/// Number of leechers in the swarm.
pub leechers: i32,
/// Number of seeders in the swarm.
pub seeders: i32,
/// Number of downloads for this torrent.
pub downloads: i32
}
/// Statistics for our download session.
#[derive(Copy)]
pub struct TransferStatus {
/// Number of bytes downloaded so far.
pub downloaded: i64,
/// Number of bytes left to download.
pub remaining: i64,
/// Number of bytes uploaded so far.
pub uploaded: i64
}
/// Interface for communicating with an generic tracker.
pub trait Tracker {
/// Returns the local ip address that is being used to communicate with the tracker.
fn local_ip(&mut self) -> IpAddr;
/// Returns information about the swarm for a particular torrent file without
/// joining the swarm.
///
/// This is a blocking operation.
fn send_scrape(&mut self) -> IoResult<ScrapeInfo>;
/// Sends an announce request to the tracker signalling a start event. This request
/// enters us into the swarm and we are required to send periodic updates as
/// specified by the tracker in order to be kept in the swarm. Periodic updates
/// should be sent with update_announce.
///
/// This is a blocking operation.
fn start_announce(&mut self, remaining: i64) -> IoResult<AnnounceInfo>;
/// Sends an announce request to the tracker signalling an update event. This request
/// acts as a heartbeat so that the tracker knows we are still connected and wanting
/// to be kept in the swarm.
///
/// This is a blocking operation.
fn update_announce(&mut self, status: TransferStatus) -> IoResult<AnnounceInfo>;
/// Sends an announce request to the tracker signalling a stop event. This request
/// exists to let the tracker know that we are gracefully shutting down and that
/// it should remove us from the swarm.
///
/// This is a blocking operation.
fn stop_announce(&mut self, status: TransferStatus) -> IoResult<()>;
/// Sends an announce request to the tracker signalling a completed event. This request
/// exists to let the tracker know that we have completed our download and wish to
/// remain in the swarm as a seeder.
///
/// This is a blocking operation.
fn complete_announce(&mut self, status: TransferStatus) -> IoResult<AnnounceInfo>;
}
|
ScrapeInfo
|
identifier_name
|
mod.rs
|
//! Unified interface for communicating with different trackers.
use std::old_io::{IoResult};
use std::old_io::net::ip::{SocketAddr, IpAddr};
use types::{Timepoint};
pub mod udp;
/// Information pertaining to the swarm we are in.
pub struct AnnounceInfo {
/// Number of leechers in the swarm.
pub leechers: i32,
/// Number of seeders in the swarm.
pub seeders: i32,
/// List of SocketAddrs for remote peers in the swarm.
pub peers: Vec<SocketAddr>,
/// Indicates when to send an update to the tracker.
pub interval: Timepoint
}
/// Statistics for a specific torrent.
#[derive(Copy)]
pub struct ScrapeInfo {
/// Number of leechers in the swarm.
pub leechers: i32,
/// Number of seeders in the swarm.
pub seeders: i32,
/// Number of downloads for this torrent.
pub downloads: i32
}
/// Statistics for our download session.
#[derive(Copy)]
pub struct TransferStatus {
/// Number of bytes downloaded so far.
pub downloaded: i64,
/// Number of bytes left to download.
|
}
/// Interface for communicating with an generic tracker.
pub trait Tracker {
/// Returns the local ip address that is being used to communicate with the tracker.
fn local_ip(&mut self) -> IpAddr;
/// Returns information about the swarm for a particular torrent file without
/// joining the swarm.
///
/// This is a blocking operation.
fn send_scrape(&mut self) -> IoResult<ScrapeInfo>;
/// Sends an announce request to the tracker signalling a start event. This request
/// enters us into the swarm and we are required to send periodic updates as
/// specified by the tracker in order to be kept in the swarm. Periodic updates
/// should be sent with update_announce.
///
/// This is a blocking operation.
fn start_announce(&mut self, remaining: i64) -> IoResult<AnnounceInfo>;
/// Sends an announce request to the tracker signalling an update event. This request
/// acts as a heartbeat so that the tracker knows we are still connected and wanting
/// to be kept in the swarm.
///
/// This is a blocking operation.
fn update_announce(&mut self, status: TransferStatus) -> IoResult<AnnounceInfo>;
/// Sends an announce request to the tracker signalling a stop event. This request
/// exists to let the tracker know that we are gracefully shutting down and that
/// it should remove us from the swarm.
///
/// This is a blocking operation.
fn stop_announce(&mut self, status: TransferStatus) -> IoResult<()>;
/// Sends an announce request to the tracker signalling a completed event. This request
/// exists to let the tracker know that we have completed our download and wish to
/// remain in the swarm as a seeder.
///
/// This is a blocking operation.
fn complete_announce(&mut self, status: TransferStatus) -> IoResult<AnnounceInfo>;
}
|
pub remaining: i64,
/// Number of bytes uploaded so far.
pub uploaded: i64
|
random_line_split
|
ifmt-bad-arg.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn
|
() {
// bad arguments to the format! call
format!("{}"); //~ ERROR: invalid reference to argument
format!("{1}", 1); //~ ERROR: invalid reference to argument `1`
//~^ ERROR: argument never used
format!("{foo}"); //~ ERROR: no argument named `foo`
format!("{}", 1, 2); //~ ERROR: argument never used
format!("{1}", 1, 2); //~ ERROR: argument never used
format!("{}", 1, foo=2); //~ ERROR: named argument never used
format!("{foo}", 1, foo=2); //~ ERROR: argument never used
format!("", foo=2); //~ ERROR: named argument never used
format!("{0:x} {0:X}", 1); //~ ERROR: redeclared with type `X`
format!("{foo:x} {foo:X}", foo=1); //~ ERROR: redeclared with type `X`
format!("{foo}", foo=1, foo=2); //~ ERROR: duplicate argument
format!("", foo=1, 2); //~ ERROR: positional arguments cannot follow
// bad number of arguments, see #15780
format!("{0}");
//~^ ERROR invalid reference to argument `0` (no arguments given)
format!("{0} {1}", 1);
//~^ ERROR invalid reference to argument `1` (there is 1 argument)
format!("{0} {1} {2}", 1, 2);
//~^ ERROR invalid reference to argument `2` (there are 2 arguments)
format!("{0} {1}");
//~^ ERROR invalid reference to argument `0` (no arguments given)
//~^^ ERROR invalid reference to argument `1` (no arguments given)
// bad syntax of the format string
format!("{"); //~ ERROR: expected `}` but string was terminated
format!("foo } bar"); //~ ERROR: unmatched `}` found
format!("foo }"); //~ ERROR: unmatched `}` found
format!(); //~ ERROR: requires at least a format string argument
format!("" 1); //~ ERROR: expected token: `,`
format!("", 1 1); //~ ERROR: expected token: `,`
}
|
main
|
identifier_name
|
ifmt-bad-arg.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn main() {
// bad arguments to the format! call
format!("{}"); //~ ERROR: invalid reference to argument
format!("{1}", 1); //~ ERROR: invalid reference to argument `1`
//~^ ERROR: argument never used
format!("{foo}"); //~ ERROR: no argument named `foo`
format!("{}", 1, 2); //~ ERROR: argument never used
format!("{1}", 1, 2); //~ ERROR: argument never used
format!("{}", 1, foo=2); //~ ERROR: named argument never used
format!("{foo}", 1, foo=2); //~ ERROR: argument never used
format!("", foo=2); //~ ERROR: named argument never used
format!("{0:x} {0:X}", 1); //~ ERROR: redeclared with type `X`
format!("{foo:x} {foo:X}", foo=1); //~ ERROR: redeclared with type `X`
format!("{foo}", foo=1, foo=2); //~ ERROR: duplicate argument
format!("", foo=1, 2); //~ ERROR: positional arguments cannot follow
// bad number of arguments, see #15780
|
format!("{0}");
//~^ ERROR invalid reference to argument `0` (no arguments given)
format!("{0} {1}", 1);
//~^ ERROR invalid reference to argument `1` (there is 1 argument)
format!("{0} {1} {2}", 1, 2);
//~^ ERROR invalid reference to argument `2` (there are 2 arguments)
format!("{0} {1}");
//~^ ERROR invalid reference to argument `0` (no arguments given)
//~^^ ERROR invalid reference to argument `1` (no arguments given)
// bad syntax of the format string
format!("{"); //~ ERROR: expected `}` but string was terminated
format!("foo } bar"); //~ ERROR: unmatched `}` found
format!("foo }"); //~ ERROR: unmatched `}` found
format!(); //~ ERROR: requires at least a format string argument
format!("" 1); //~ ERROR: expected token: `,`
format!("", 1 1); //~ ERROR: expected token: `,`
}
|
random_line_split
|
|
ifmt-bad-arg.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn main()
|
// bad number of arguments, see #15780
format!("{0}");
//~^ ERROR invalid reference to argument `0` (no arguments given)
format!("{0} {1}", 1);
//~^ ERROR invalid reference to argument `1` (there is 1 argument)
format!("{0} {1} {2}", 1, 2);
//~^ ERROR invalid reference to argument `2` (there are 2 arguments)
format!("{0} {1}");
//~^ ERROR invalid reference to argument `0` (no arguments given)
//~^^ ERROR invalid reference to argument `1` (no arguments given)
// bad syntax of the format string
format!("{"); //~ ERROR: expected `}` but string was terminated
format!("foo } bar"); //~ ERROR: unmatched `}` found
format!("foo }"); //~ ERROR: unmatched `}` found
format!(); //~ ERROR: requires at least a format string argument
format!("" 1); //~ ERROR: expected token: `,`
format!("", 1 1); //~ ERROR: expected token: `,`
}
|
{
// bad arguments to the format! call
format!("{}"); //~ ERROR: invalid reference to argument
format!("{1}", 1); //~ ERROR: invalid reference to argument `1`
//~^ ERROR: argument never used
format!("{foo}"); //~ ERROR: no argument named `foo`
format!("{}", 1, 2); //~ ERROR: argument never used
format!("{1}", 1, 2); //~ ERROR: argument never used
format!("{}", 1, foo=2); //~ ERROR: named argument never used
format!("{foo}", 1, foo=2); //~ ERROR: argument never used
format!("", foo=2); //~ ERROR: named argument never used
format!("{0:x} {0:X}", 1); //~ ERROR: redeclared with type `X`
format!("{foo:x} {foo:X}", foo=1); //~ ERROR: redeclared with type `X`
format!("{foo}", foo=1, foo=2); //~ ERROR: duplicate argument
format!("", foo=1, 2); //~ ERROR: positional arguments cannot follow
|
identifier_body
|
utils.rs
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::char;
use std::ops::Range;
pub fn get_prefix_bounds(prefix: impl AsRef<str>) -> Range<String> {
let mut upper = prefix.as_ref().to_string();
assert!(!upper.is_empty());
let mut last_char = upper.pop().unwrap();
let mut last_char_code: u32 = last_char as u32;
while let Some(next_val) = last_char_code.checked_add(1) {
if let Some(c) = char::from_u32(next_val) {
last_char = c;
break;
}
last_char_code = next_val;
}
upper.push(last_char);
prefix.as_ref().to_string()..upper
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_get_prefix_bounds_one_letter() {
let prefix = "a";
let end = "b";
let range = get_prefix_bounds(prefix);
assert_eq!(range.start.as_str(), prefix);
assert_eq!(range.end.as_str(), end);
}
#[test]
fn test_get_prefix_bounds_ending_with_z()
|
#[test]
fn test_get_prefix_bounds_multiple() {
let prefix = "comm"; // prefix of commit
let end = "comn";
let range = get_prefix_bounds(prefix);
assert_eq!(range.start.as_str(), prefix);
assert_eq!(range.end.as_str(), end);
}
#[test]
fn test_get_prefix_bounds_ending_space() {
let prefix = "comm "; // prefix of commit with trailing space
let end = "comm!";
let range = get_prefix_bounds(prefix);
assert_eq!(range.start.as_str(), prefix);
assert_eq!(range.end.as_str(), end);
}
#[test]
fn test_get_prefix_bounds_unicode() {
let prefix = "\u{1F36A}"; // Cookie Emoji
let end = "\u{1F36B}";
let range = get_prefix_bounds(prefix);
assert_eq!(range.start.as_str(), prefix);
assert_eq!(range.end.as_str(), end);
}
}
|
{
let prefix = "z";
let end = "{";
let range = get_prefix_bounds(prefix);
assert_eq!(range.start.as_str(), prefix);
assert_eq!(range.end.as_str(), end);
}
|
identifier_body
|
utils.rs
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::char;
use std::ops::Range;
pub fn get_prefix_bounds(prefix: impl AsRef<str>) -> Range<String> {
let mut upper = prefix.as_ref().to_string();
assert!(!upper.is_empty());
let mut last_char = upper.pop().unwrap();
let mut last_char_code: u32 = last_char as u32;
while let Some(next_val) = last_char_code.checked_add(1) {
if let Some(c) = char::from_u32(next_val) {
last_char = c;
break;
}
last_char_code = next_val;
}
upper.push(last_char);
prefix.as_ref().to_string()..upper
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_get_prefix_bounds_one_letter() {
let prefix = "a";
let end = "b";
let range = get_prefix_bounds(prefix);
assert_eq!(range.start.as_str(), prefix);
assert_eq!(range.end.as_str(), end);
}
#[test]
fn test_get_prefix_bounds_ending_with_z() {
let prefix = "z";
let end = "{";
let range = get_prefix_bounds(prefix);
assert_eq!(range.start.as_str(), prefix);
assert_eq!(range.end.as_str(), end);
}
#[test]
fn test_get_prefix_bounds_multiple() {
let prefix = "comm"; // prefix of commit
let end = "comn";
let range = get_prefix_bounds(prefix);
assert_eq!(range.start.as_str(), prefix);
assert_eq!(range.end.as_str(), end);
}
#[test]
fn test_get_prefix_bounds_ending_space() {
let prefix = "comm "; // prefix of commit with trailing space
let end = "comm!";
let range = get_prefix_bounds(prefix);
assert_eq!(range.start.as_str(), prefix);
assert_eq!(range.end.as_str(), end);
}
#[test]
|
assert_eq!(range.end.as_str(), end);
}
}
|
fn test_get_prefix_bounds_unicode() {
let prefix = "\u{1F36A}"; // Cookie Emoji
let end = "\u{1F36B}";
let range = get_prefix_bounds(prefix);
assert_eq!(range.start.as_str(), prefix);
|
random_line_split
|
utils.rs
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::char;
use std::ops::Range;
pub fn get_prefix_bounds(prefix: impl AsRef<str>) -> Range<String> {
let mut upper = prefix.as_ref().to_string();
assert!(!upper.is_empty());
let mut last_char = upper.pop().unwrap();
let mut last_char_code: u32 = last_char as u32;
while let Some(next_val) = last_char_code.checked_add(1) {
if let Some(c) = char::from_u32(next_val) {
last_char = c;
break;
}
last_char_code = next_val;
}
upper.push(last_char);
prefix.as_ref().to_string()..upper
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn
|
() {
let prefix = "a";
let end = "b";
let range = get_prefix_bounds(prefix);
assert_eq!(range.start.as_str(), prefix);
assert_eq!(range.end.as_str(), end);
}
#[test]
fn test_get_prefix_bounds_ending_with_z() {
let prefix = "z";
let end = "{";
let range = get_prefix_bounds(prefix);
assert_eq!(range.start.as_str(), prefix);
assert_eq!(range.end.as_str(), end);
}
#[test]
fn test_get_prefix_bounds_multiple() {
let prefix = "comm"; // prefix of commit
let end = "comn";
let range = get_prefix_bounds(prefix);
assert_eq!(range.start.as_str(), prefix);
assert_eq!(range.end.as_str(), end);
}
#[test]
fn test_get_prefix_bounds_ending_space() {
let prefix = "comm "; // prefix of commit with trailing space
let end = "comm!";
let range = get_prefix_bounds(prefix);
assert_eq!(range.start.as_str(), prefix);
assert_eq!(range.end.as_str(), end);
}
#[test]
fn test_get_prefix_bounds_unicode() {
let prefix = "\u{1F36A}"; // Cookie Emoji
let end = "\u{1F36B}";
let range = get_prefix_bounds(prefix);
assert_eq!(range.start.as_str(), prefix);
assert_eq!(range.end.as_str(), end);
}
}
|
test_get_prefix_bounds_one_letter
|
identifier_name
|
utils.rs
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This software may be used and distributed according to the terms of the
* GNU General Public License version 2.
*/
use std::char;
use std::ops::Range;
pub fn get_prefix_bounds(prefix: impl AsRef<str>) -> Range<String> {
let mut upper = prefix.as_ref().to_string();
assert!(!upper.is_empty());
let mut last_char = upper.pop().unwrap();
let mut last_char_code: u32 = last_char as u32;
while let Some(next_val) = last_char_code.checked_add(1) {
if let Some(c) = char::from_u32(next_val)
|
last_char_code = next_val;
}
upper.push(last_char);
prefix.as_ref().to_string()..upper
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_get_prefix_bounds_one_letter() {
let prefix = "a";
let end = "b";
let range = get_prefix_bounds(prefix);
assert_eq!(range.start.as_str(), prefix);
assert_eq!(range.end.as_str(), end);
}
#[test]
fn test_get_prefix_bounds_ending_with_z() {
let prefix = "z";
let end = "{";
let range = get_prefix_bounds(prefix);
assert_eq!(range.start.as_str(), prefix);
assert_eq!(range.end.as_str(), end);
}
#[test]
fn test_get_prefix_bounds_multiple() {
let prefix = "comm"; // prefix of commit
let end = "comn";
let range = get_prefix_bounds(prefix);
assert_eq!(range.start.as_str(), prefix);
assert_eq!(range.end.as_str(), end);
}
#[test]
fn test_get_prefix_bounds_ending_space() {
let prefix = "comm "; // prefix of commit with trailing space
let end = "comm!";
let range = get_prefix_bounds(prefix);
assert_eq!(range.start.as_str(), prefix);
assert_eq!(range.end.as_str(), end);
}
#[test]
fn test_get_prefix_bounds_unicode() {
let prefix = "\u{1F36A}"; // Cookie Emoji
let end = "\u{1F36B}";
let range = get_prefix_bounds(prefix);
assert_eq!(range.start.as_str(), prefix);
assert_eq!(range.end.as_str(), end);
}
}
|
{
last_char = c;
break;
}
|
conditional_block
|
macros.rs
|
//
//
//
/// Returns true if the passed exppression matches the pattern
#[macro_export]
macro_rules! is
{
($val:expr, $p:pat) => ( match $val { $p => true, _ => false } );
}
#[doc(hidden)]
#[macro_export]
macro_rules! _count
{
() => {0};
($a:expr) => {1};
($a:expr, $($b:expr),+) => {1+_count!($($b),+)};
}
/// Define a kernel module (creates the module header, containg the name and dependency strings)
#[macro_export]
macro_rules! module_define
{
($name:ident, [$($deps:ident),*], $init:path) => (
//#[assume_reachable]
#[doc(hidden)]
#[link_section = ".MODULE_LIST"]
#[linkage="external"]
#[allow(dead_code)]
pub static S_MODULE: $crate::modules::ModuleInfo = $crate::modules::ModuleInfo {
name: stringify!($name),
init: $init,
deps: &S_DEPS,
_rsvd: [0,0,0],
};
#[doc(hidden)]
static S_DEPS: [&'static str; _count!($($deps),*)] = [$(stringify!($deps)),*];
);
}
/// Ensure that a type implments the provided trait
///
/// Useful for "Send" and "Sync"
#[macro_export]
macro_rules! assert_trait
{
($t:ty : $tr:ident) => { #[allow(warnings)] fn assert_trait<T: $tr>() { } #[allow(dead_code)] fn call_assert_trait() { assert_trait::<$t>() } }
}
#[doc(hidden)]
#[tag_safe(irq)]
pub fn type_name<T:?::core::marker::Sized>() -> &'static str
|
/// A safe wrapper around the `type_name` intrinsic
#[macro_export]
macro_rules! type_name
{
($t:ty) => ( $crate::macros::type_name::<$t>() );
}
/// Iterator helper, desugars to a.zip(b)
#[macro_export]
macro_rules! zip
{
($a:expr, $b:expr) => ( $a.zip($b) );
}
/// Iterator helper, desugars to a.chain(b).chain(b2)
macro_rules! chain
{
($b:expr, $($b:expr),+) => ( $a$(.chain($b))+ );
}
/// Provides a short and noticable "TODO: " message
#[macro_export]
macro_rules! todo
{
( $s:expr ) => ( panic!( concat!("TODO: ",$s) ) );
( $s:expr, $($v:tt)* ) => ( panic!( concat!("TODO: ",$s), $($v)* ) );
}
/// Provides a less boiler-plate way to implement fmt traits for simple types
///
/// Only supports non-generic types and unbounded types (due to challenges in matching generic definitions)
///
/// ```
/// impl_fmt! {
/// Debug(self, f) for Type {
/// write!(f, "Hello world!")
/// }
/// <T> Display(self, f) for Container {
/// write!(f, "Hello world!")
/// }
/// }
/// ```
#[macro_export]
macro_rules! impl_fmt
{
( $( <$($g:ident),+> $tr:ident ($s:ident, $f:ident) for $ty:ty { $code:expr } )+ ) => { $(
impl<$($g),+> ::core::fmt::$tr for $ty {
fn fmt(&$s, $f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
$code
}
}
)+
};
( $( $tr:ident ($s:ident, $f:ident) for $ty:ty { $code:expr } )+ ) => { $(
impl ::core::fmt::$tr for $ty {
fn fmt(&$s, $f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
$code
}
}
)+
};
}
/// Implements the From trait for the provided type, avoiding boilerplate
#[macro_export]
macro_rules! impl_from {
(@as_item $($i:item)*) => {$($i)*};
($( $(<($($params:tt)+)>)* From<$src:ty>($v:ident) for $t:ty { $($code:stmt)*} )+) => {
$(impl_from!{ @as_item
impl$(<$($params)+>)* ::core::convert::From<$src> for $t {
fn from($v: $src) -> $t {
$($code)*
}
}
})+
};
}
// NOTE: This should really be in ::threads::wait_queue, but it also needs to be early in parse
/// Wait on a wait queue contained within a spinlock
///
/// Due to lifetime issues, the more erganomical `lock.queue.wait(lock)` does not pass borrow checking.
macro_rules! waitqueue_wait_ext {
($lock:expr, $field:ident) => ({
let mut lock: $crate::arch::sync::HeldSpinlock<_> = $lock;
let irql = lock.$field.wait_int();
::core::mem::drop(lock);
::core::mem::drop(irql);
$crate::threads::reschedule();
});
}
/// Override libcore's `try!` macro with one that backs onto `From`
#[macro_export]
macro_rules! try {
($e:expr) => (
match $e {
Ok(v) => v,
Err(e) => return Err(From::from(e)),
}
);
}
/// Initialise a static Mutex
#[macro_export]
macro_rules! mutex_init{ ($val:expr) => ($crate::sync::mutex::Mutex::new($val)) }
/// Initialise a static LazyMutex
#[macro_export]
macro_rules! lazymutex_init{ () => ($crate::sync::mutex::LazyMutex::new())}
// vim: ft=rust
|
{
// SAFE: All intrinsics are unsafe, no matter how safe they really are
unsafe { ::core::intrinsics::type_name::<T>() }
}
|
identifier_body
|
macros.rs
|
//
//
//
/// Returns true if the passed exppression matches the pattern
#[macro_export]
macro_rules! is
{
($val:expr, $p:pat) => ( match $val { $p => true, _ => false } );
}
#[doc(hidden)]
#[macro_export]
macro_rules! _count
{
() => {0};
($a:expr) => {1};
($a:expr, $($b:expr),+) => {1+_count!($($b),+)};
}
/// Define a kernel module (creates the module header, containg the name and dependency strings)
#[macro_export]
macro_rules! module_define
{
($name:ident, [$($deps:ident),*], $init:path) => (
//#[assume_reachable]
#[doc(hidden)]
#[link_section = ".MODULE_LIST"]
#[linkage="external"]
#[allow(dead_code)]
pub static S_MODULE: $crate::modules::ModuleInfo = $crate::modules::ModuleInfo {
name: stringify!($name),
init: $init,
deps: &S_DEPS,
_rsvd: [0,0,0],
};
#[doc(hidden)]
static S_DEPS: [&'static str; _count!($($deps),*)] = [$(stringify!($deps)),*];
);
}
/// Ensure that a type implments the provided trait
///
/// Useful for "Send" and "Sync"
#[macro_export]
macro_rules! assert_trait
{
($t:ty : $tr:ident) => { #[allow(warnings)] fn assert_trait<T: $tr>() { } #[allow(dead_code)] fn call_assert_trait() { assert_trait::<$t>() } }
}
#[doc(hidden)]
#[tag_safe(irq)]
pub fn
|
<T:?::core::marker::Sized>() -> &'static str {
// SAFE: All intrinsics are unsafe, no matter how safe they really are
unsafe { ::core::intrinsics::type_name::<T>() }
}
/// A safe wrapper around the `type_name` intrinsic
#[macro_export]
macro_rules! type_name
{
($t:ty) => ( $crate::macros::type_name::<$t>() );
}
/// Iterator helper, desugars to a.zip(b)
#[macro_export]
macro_rules! zip
{
($a:expr, $b:expr) => ( $a.zip($b) );
}
/// Iterator helper, desugars to a.chain(b).chain(b2)
macro_rules! chain
{
($b:expr, $($b:expr),+) => ( $a$(.chain($b))+ );
}
/// Provides a short and noticable "TODO: " message
#[macro_export]
macro_rules! todo
{
( $s:expr ) => ( panic!( concat!("TODO: ",$s) ) );
( $s:expr, $($v:tt)* ) => ( panic!( concat!("TODO: ",$s), $($v)* ) );
}
/// Provides a less boiler-plate way to implement fmt traits for simple types
///
/// Only supports non-generic types and unbounded types (due to challenges in matching generic definitions)
///
/// ```
/// impl_fmt! {
/// Debug(self, f) for Type {
/// write!(f, "Hello world!")
/// }
/// <T> Display(self, f) for Container {
/// write!(f, "Hello world!")
/// }
/// }
/// ```
#[macro_export]
macro_rules! impl_fmt
{
( $( <$($g:ident),+> $tr:ident ($s:ident, $f:ident) for $ty:ty { $code:expr } )+ ) => { $(
impl<$($g),+> ::core::fmt::$tr for $ty {
fn fmt(&$s, $f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
$code
}
}
)+
};
( $( $tr:ident ($s:ident, $f:ident) for $ty:ty { $code:expr } )+ ) => { $(
impl ::core::fmt::$tr for $ty {
fn fmt(&$s, $f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
$code
}
}
)+
};
}
/// Implements the From trait for the provided type, avoiding boilerplate
#[macro_export]
macro_rules! impl_from {
(@as_item $($i:item)*) => {$($i)*};
($( $(<($($params:tt)+)>)* From<$src:ty>($v:ident) for $t:ty { $($code:stmt)*} )+) => {
$(impl_from!{ @as_item
impl$(<$($params)+>)* ::core::convert::From<$src> for $t {
fn from($v: $src) -> $t {
$($code)*
}
}
})+
};
}
// NOTE: This should really be in ::threads::wait_queue, but it also needs to be early in parse
/// Wait on a wait queue contained within a spinlock
///
/// Due to lifetime issues, the more erganomical `lock.queue.wait(lock)` does not pass borrow checking.
macro_rules! waitqueue_wait_ext {
($lock:expr, $field:ident) => ({
let mut lock: $crate::arch::sync::HeldSpinlock<_> = $lock;
let irql = lock.$field.wait_int();
::core::mem::drop(lock);
::core::mem::drop(irql);
$crate::threads::reschedule();
});
}
/// Override libcore's `try!` macro with one that backs onto `From`
#[macro_export]
macro_rules! try {
($e:expr) => (
match $e {
Ok(v) => v,
Err(e) => return Err(From::from(e)),
}
);
}
/// Initialise a static Mutex
#[macro_export]
macro_rules! mutex_init{ ($val:expr) => ($crate::sync::mutex::Mutex::new($val)) }
/// Initialise a static LazyMutex
#[macro_export]
macro_rules! lazymutex_init{ () => ($crate::sync::mutex::LazyMutex::new())}
// vim: ft=rust
|
type_name
|
identifier_name
|
macros.rs
|
//
//
//
/// Returns true if the passed exppression matches the pattern
#[macro_export]
macro_rules! is
{
($val:expr, $p:pat) => ( match $val { $p => true, _ => false } );
}
#[doc(hidden)]
#[macro_export]
macro_rules! _count
{
() => {0};
($a:expr) => {1};
($a:expr, $($b:expr),+) => {1+_count!($($b),+)};
}
/// Define a kernel module (creates the module header, containg the name and dependency strings)
#[macro_export]
macro_rules! module_define
{
($name:ident, [$($deps:ident),*], $init:path) => (
//#[assume_reachable]
#[doc(hidden)]
#[link_section = ".MODULE_LIST"]
#[linkage="external"]
#[allow(dead_code)]
pub static S_MODULE: $crate::modules::ModuleInfo = $crate::modules::ModuleInfo {
name: stringify!($name),
init: $init,
deps: &S_DEPS,
_rsvd: [0,0,0],
};
#[doc(hidden)]
static S_DEPS: [&'static str; _count!($($deps),*)] = [$(stringify!($deps)),*];
);
}
/// Ensure that a type implments the provided trait
///
/// Useful for "Send" and "Sync"
#[macro_export]
macro_rules! assert_trait
{
($t:ty : $tr:ident) => { #[allow(warnings)] fn assert_trait<T: $tr>() { } #[allow(dead_code)] fn call_assert_trait() { assert_trait::<$t>() } }
}
#[doc(hidden)]
#[tag_safe(irq)]
|
}
/// A safe wrapper around the `type_name` intrinsic
#[macro_export]
macro_rules! type_name
{
($t:ty) => ( $crate::macros::type_name::<$t>() );
}
/// Iterator helper, desugars to a.zip(b)
#[macro_export]
macro_rules! zip
{
($a:expr, $b:expr) => ( $a.zip($b) );
}
/// Iterator helper, desugars to a.chain(b).chain(b2)
macro_rules! chain
{
($b:expr, $($b:expr),+) => ( $a$(.chain($b))+ );
}
/// Provides a short and noticable "TODO: " message
#[macro_export]
macro_rules! todo
{
( $s:expr ) => ( panic!( concat!("TODO: ",$s) ) );
( $s:expr, $($v:tt)* ) => ( panic!( concat!("TODO: ",$s), $($v)* ) );
}
/// Provides a less boiler-plate way to implement fmt traits for simple types
///
/// Only supports non-generic types and unbounded types (due to challenges in matching generic definitions)
///
/// ```
/// impl_fmt! {
/// Debug(self, f) for Type {
/// write!(f, "Hello world!")
/// }
/// <T> Display(self, f) for Container {
/// write!(f, "Hello world!")
/// }
/// }
/// ```
#[macro_export]
macro_rules! impl_fmt
{
( $( <$($g:ident),+> $tr:ident ($s:ident, $f:ident) for $ty:ty { $code:expr } )+ ) => { $(
impl<$($g),+> ::core::fmt::$tr for $ty {
fn fmt(&$s, $f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
$code
}
}
)+
};
( $( $tr:ident ($s:ident, $f:ident) for $ty:ty { $code:expr } )+ ) => { $(
impl ::core::fmt::$tr for $ty {
fn fmt(&$s, $f: &mut ::core::fmt::Formatter) -> ::core::fmt::Result {
$code
}
}
)+
};
}
/// Implements the From trait for the provided type, avoiding boilerplate
#[macro_export]
macro_rules! impl_from {
(@as_item $($i:item)*) => {$($i)*};
($( $(<($($params:tt)+)>)* From<$src:ty>($v:ident) for $t:ty { $($code:stmt)*} )+) => {
$(impl_from!{ @as_item
impl$(<$($params)+>)* ::core::convert::From<$src> for $t {
fn from($v: $src) -> $t {
$($code)*
}
}
})+
};
}
// NOTE: This should really be in ::threads::wait_queue, but it also needs to be early in parse
/// Wait on a wait queue contained within a spinlock
///
/// Due to lifetime issues, the more erganomical `lock.queue.wait(lock)` does not pass borrow checking.
macro_rules! waitqueue_wait_ext {
($lock:expr, $field:ident) => ({
let mut lock: $crate::arch::sync::HeldSpinlock<_> = $lock;
let irql = lock.$field.wait_int();
::core::mem::drop(lock);
::core::mem::drop(irql);
$crate::threads::reschedule();
});
}
/// Override libcore's `try!` macro with one that backs onto `From`
#[macro_export]
macro_rules! try {
($e:expr) => (
match $e {
Ok(v) => v,
Err(e) => return Err(From::from(e)),
}
);
}
/// Initialise a static Mutex
#[macro_export]
macro_rules! mutex_init{ ($val:expr) => ($crate::sync::mutex::Mutex::new($val)) }
/// Initialise a static LazyMutex
#[macro_export]
macro_rules! lazymutex_init{ () => ($crate::sync::mutex::LazyMutex::new())}
// vim: ft=rust
|
pub fn type_name<T: ?::core::marker::Sized>() -> &'static str {
// SAFE: All intrinsics are unsafe, no matter how safe they really are
unsafe { ::core::intrinsics::type_name::<T>() }
|
random_line_split
|
opts.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Configuration options for a single run of the servo application. Created
//! from command line arguments.
use geometry::{DevicePixel, ScreenPx};
use azure::azure_hl::{BackendType, CairoBackend, CoreGraphicsBackend};
use azure::azure_hl::{CoreGraphicsAcceleratedBackend, Direct2DBackend, SkiaBackend};
use geom::scale_factor::ScaleFactor;
use getopts;
use std::cmp;
use std::io;
use std::os;
use std::rt;
/// Global flags for Servo, currently set on the command line.
#[deriving(Clone)]
pub struct
|
{
/// The initial URLs to load.
pub urls: Vec<String>,
/// The rendering backend to use (`-r`).
pub render_backend: BackendType,
/// How many threads to use for CPU rendering (`-t`).
///
/// FIXME(pcwalton): This is not currently used. All rendering is sequential.
pub n_render_threads: uint,
/// True to use CPU painting, false to use GPU painting via Skia-GL (`-c`). Note that
/// compositing is always done on the GPU.
pub cpu_painting: bool,
/// The maximum size of each tile in pixels (`-s`).
pub tile_size: uint,
/// The ratio of device pixels per px at the default scale. If unspecified, will use the
/// platform default setting.
pub device_pixels_per_px: Option<ScaleFactor<ScreenPx, DevicePixel, f32>>,
/// `None` to disable the time profiler or `Some` with an interval in seconds to enable it and
/// cause it to produce output on that interval (`-p`).
pub time_profiler_period: Option<f64>,
/// `None` to disable the memory profiler or `Some` with an interval in seconds to enable it
/// and cause it to produce output on that interval (`-m`).
pub memory_profiler_period: Option<f64>,
/// The number of threads to use for layout (`-y`). Defaults to 1, which results in a recursive
/// sequential algorithm.
pub layout_threads: uint,
/// True to exit after the page load (`-x`).
pub exit_after_load: bool,
pub output_file: Option<String>,
pub headless: bool,
pub hard_fail: bool,
/// True if we should bubble intrinsic widths sequentially (`-b`). If this is true, then
/// intrinsic widths are computed as a separate pass instead of during flow construction. You
/// may wish to turn this flag on in order to benchmark style recalculation against other
/// browser engines.
pub bubble_widths_separately: bool,
}
fn print_usage(app: &str, opts: &[getopts::OptGroup]) {
let message = format!("Usage: {} [ options... ] [URL]\n\twhere options include", app);
println!("{}", getopts::usage(message.as_slice(), opts));
}
fn args_fail(msg: &str) {
io::stderr().write_line(msg).unwrap();
os::set_exit_status(1);
}
pub fn from_cmdline_args(args: &[String]) -> Option<Opts> {
let app_name = args[0].to_str();
let args = args.tail();
let opts = vec!(
getopts::optflag("c", "cpu", "CPU rendering"),
getopts::optopt("o", "output", "Output file", "output.png"),
getopts::optopt("r", "rendering", "Rendering backend", "direct2d|core-graphics|core-graphics-accelerated|cairo|skia."),
getopts::optopt("s", "size", "Size of tiles", "512"),
getopts::optopt("", "device-pixel-ratio", "Device pixels per px", ""),
getopts::optopt("t", "threads", "Number of render threads", "1"),
getopts::optflagopt("p", "profile", "Profiler flag and output interval", "10"),
getopts::optflagopt("m", "memory-profile", "Memory profiler flag and output interval", "10"),
getopts::optflag("x", "exit", "Exit after load flag"),
getopts::optopt("y", "layout-threads", "Number of threads to use for layout", "1"),
getopts::optflag("z", "headless", "Headless mode"),
getopts::optflag("f", "hard-fail", "Exit on task failure instead of displaying about:failure"),
getopts::optflag("b", "bubble-widths", "Bubble intrinsic widths separately like other engines"),
getopts::optflag("h", "help", "Print this message")
);
let opt_match = match getopts::getopts(args, opts.as_slice()) {
Ok(m) => m,
Err(f) => {
args_fail(format!("{}", f).as_slice());
return None;
}
};
if opt_match.opt_present("h") || opt_match.opt_present("help") {
print_usage(app_name.as_slice(), opts.as_slice());
return None;
};
let urls = if opt_match.free.is_empty() {
print_usage(app_name.as_slice(), opts.as_slice());
args_fail("servo asks that you provide 1 or more URLs");
return None;
} else {
opt_match.free.clone()
};
let render_backend = match opt_match.opt_str("r") {
Some(backend_str) => {
if "direct2d" == backend_str.as_slice() {
Direct2DBackend
} else if "core-graphics" == backend_str.as_slice() {
CoreGraphicsBackend
} else if "core-graphics-accelerated" == backend_str.as_slice() {
CoreGraphicsAcceleratedBackend
} else if "cairo" == backend_str.as_slice() {
CairoBackend
} else if "skia" == backend_str.as_slice() {
SkiaBackend
} else {
fail!("unknown backend type")
}
}
None => SkiaBackend
};
let tile_size: uint = match opt_match.opt_str("s") {
Some(tile_size_str) => from_str(tile_size_str.as_slice()).unwrap(),
None => 512,
};
let device_pixels_per_px = opt_match.opt_str("device-pixel-ratio").map(|dppx_str|
ScaleFactor(from_str(dppx_str.as_slice()).unwrap())
);
let n_render_threads: uint = match opt_match.opt_str("t") {
Some(n_render_threads_str) => from_str(n_render_threads_str.as_slice()).unwrap(),
None => 1, // FIXME: Number of cores.
};
// If only the flag is present, default to a 5 second period for both profilers.
let time_profiler_period = opt_match.opt_default("p", "5").map(|period| {
from_str(period.as_slice()).unwrap()
});
let memory_profiler_period = opt_match.opt_default("m", "5").map(|period| {
from_str(period.as_slice()).unwrap()
});
let cpu_painting = opt_match.opt_present("c");
let layout_threads: uint = match opt_match.opt_str("y") {
Some(layout_threads_str) => from_str(layout_threads_str.as_slice()).unwrap(),
None => cmp::max(rt::default_sched_threads() * 3 / 4, 1),
};
Some(Opts {
urls: urls,
render_backend: render_backend,
n_render_threads: n_render_threads,
cpu_painting: cpu_painting,
tile_size: tile_size,
device_pixels_per_px: device_pixels_per_px,
time_profiler_period: time_profiler_period,
memory_profiler_period: memory_profiler_period,
layout_threads: layout_threads,
exit_after_load: opt_match.opt_present("x"),
output_file: opt_match.opt_str("o"),
headless: opt_match.opt_present("z"),
hard_fail: opt_match.opt_present("f"),
bubble_widths_separately: opt_match.opt_present("b"),
})
}
|
Opts
|
identifier_name
|
opts.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Configuration options for a single run of the servo application. Created
//! from command line arguments.
use geometry::{DevicePixel, ScreenPx};
use azure::azure_hl::{BackendType, CairoBackend, CoreGraphicsBackend};
use azure::azure_hl::{CoreGraphicsAcceleratedBackend, Direct2DBackend, SkiaBackend};
use geom::scale_factor::ScaleFactor;
use getopts;
use std::cmp;
use std::io;
use std::os;
use std::rt;
/// Global flags for Servo, currently set on the command line.
#[deriving(Clone)]
pub struct Opts {
/// The initial URLs to load.
pub urls: Vec<String>,
/// The rendering backend to use (`-r`).
pub render_backend: BackendType,
/// How many threads to use for CPU rendering (`-t`).
///
/// FIXME(pcwalton): This is not currently used. All rendering is sequential.
pub n_render_threads: uint,
/// True to use CPU painting, false to use GPU painting via Skia-GL (`-c`). Note that
/// compositing is always done on the GPU.
pub cpu_painting: bool,
/// The maximum size of each tile in pixels (`-s`).
pub tile_size: uint,
/// The ratio of device pixels per px at the default scale. If unspecified, will use the
/// platform default setting.
pub device_pixels_per_px: Option<ScaleFactor<ScreenPx, DevicePixel, f32>>,
/// `None` to disable the time profiler or `Some` with an interval in seconds to enable it and
/// cause it to produce output on that interval (`-p`).
pub time_profiler_period: Option<f64>,
/// `None` to disable the memory profiler or `Some` with an interval in seconds to enable it
/// and cause it to produce output on that interval (`-m`).
pub memory_profiler_period: Option<f64>,
/// The number of threads to use for layout (`-y`). Defaults to 1, which results in a recursive
/// sequential algorithm.
pub layout_threads: uint,
/// True to exit after the page load (`-x`).
pub exit_after_load: bool,
pub output_file: Option<String>,
pub headless: bool,
pub hard_fail: bool,
/// True if we should bubble intrinsic widths sequentially (`-b`). If this is true, then
/// intrinsic widths are computed as a separate pass instead of during flow construction. You
/// may wish to turn this flag on in order to benchmark style recalculation against other
/// browser engines.
pub bubble_widths_separately: bool,
}
fn print_usage(app: &str, opts: &[getopts::OptGroup]) {
let message = format!("Usage: {} [ options... ] [URL]\n\twhere options include", app);
println!("{}", getopts::usage(message.as_slice(), opts));
}
fn args_fail(msg: &str)
|
pub fn from_cmdline_args(args: &[String]) -> Option<Opts> {
let app_name = args[0].to_str();
let args = args.tail();
let opts = vec!(
getopts::optflag("c", "cpu", "CPU rendering"),
getopts::optopt("o", "output", "Output file", "output.png"),
getopts::optopt("r", "rendering", "Rendering backend", "direct2d|core-graphics|core-graphics-accelerated|cairo|skia."),
getopts::optopt("s", "size", "Size of tiles", "512"),
getopts::optopt("", "device-pixel-ratio", "Device pixels per px", ""),
getopts::optopt("t", "threads", "Number of render threads", "1"),
getopts::optflagopt("p", "profile", "Profiler flag and output interval", "10"),
getopts::optflagopt("m", "memory-profile", "Memory profiler flag and output interval", "10"),
getopts::optflag("x", "exit", "Exit after load flag"),
getopts::optopt("y", "layout-threads", "Number of threads to use for layout", "1"),
getopts::optflag("z", "headless", "Headless mode"),
getopts::optflag("f", "hard-fail", "Exit on task failure instead of displaying about:failure"),
getopts::optflag("b", "bubble-widths", "Bubble intrinsic widths separately like other engines"),
getopts::optflag("h", "help", "Print this message")
);
let opt_match = match getopts::getopts(args, opts.as_slice()) {
Ok(m) => m,
Err(f) => {
args_fail(format!("{}", f).as_slice());
return None;
}
};
if opt_match.opt_present("h") || opt_match.opt_present("help") {
print_usage(app_name.as_slice(), opts.as_slice());
return None;
};
let urls = if opt_match.free.is_empty() {
print_usage(app_name.as_slice(), opts.as_slice());
args_fail("servo asks that you provide 1 or more URLs");
return None;
} else {
opt_match.free.clone()
};
let render_backend = match opt_match.opt_str("r") {
Some(backend_str) => {
if "direct2d" == backend_str.as_slice() {
Direct2DBackend
} else if "core-graphics" == backend_str.as_slice() {
CoreGraphicsBackend
} else if "core-graphics-accelerated" == backend_str.as_slice() {
CoreGraphicsAcceleratedBackend
} else if "cairo" == backend_str.as_slice() {
CairoBackend
} else if "skia" == backend_str.as_slice() {
SkiaBackend
} else {
fail!("unknown backend type")
}
}
None => SkiaBackend
};
let tile_size: uint = match opt_match.opt_str("s") {
Some(tile_size_str) => from_str(tile_size_str.as_slice()).unwrap(),
None => 512,
};
let device_pixels_per_px = opt_match.opt_str("device-pixel-ratio").map(|dppx_str|
ScaleFactor(from_str(dppx_str.as_slice()).unwrap())
);
let n_render_threads: uint = match opt_match.opt_str("t") {
Some(n_render_threads_str) => from_str(n_render_threads_str.as_slice()).unwrap(),
None => 1, // FIXME: Number of cores.
};
// If only the flag is present, default to a 5 second period for both profilers.
let time_profiler_period = opt_match.opt_default("p", "5").map(|period| {
from_str(period.as_slice()).unwrap()
});
let memory_profiler_period = opt_match.opt_default("m", "5").map(|period| {
from_str(period.as_slice()).unwrap()
});
let cpu_painting = opt_match.opt_present("c");
let layout_threads: uint = match opt_match.opt_str("y") {
Some(layout_threads_str) => from_str(layout_threads_str.as_slice()).unwrap(),
None => cmp::max(rt::default_sched_threads() * 3 / 4, 1),
};
Some(Opts {
urls: urls,
render_backend: render_backend,
n_render_threads: n_render_threads,
cpu_painting: cpu_painting,
tile_size: tile_size,
device_pixels_per_px: device_pixels_per_px,
time_profiler_period: time_profiler_period,
memory_profiler_period: memory_profiler_period,
layout_threads: layout_threads,
exit_after_load: opt_match.opt_present("x"),
output_file: opt_match.opt_str("o"),
headless: opt_match.opt_present("z"),
hard_fail: opt_match.opt_present("f"),
bubble_widths_separately: opt_match.opt_present("b"),
})
}
|
{
io::stderr().write_line(msg).unwrap();
os::set_exit_status(1);
}
|
identifier_body
|
opts.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Configuration options for a single run of the servo application. Created
//! from command line arguments.
use geometry::{DevicePixel, ScreenPx};
use azure::azure_hl::{BackendType, CairoBackend, CoreGraphicsBackend};
use azure::azure_hl::{CoreGraphicsAcceleratedBackend, Direct2DBackend, SkiaBackend};
use geom::scale_factor::ScaleFactor;
use getopts;
use std::cmp;
use std::io;
use std::os;
use std::rt;
/// Global flags for Servo, currently set on the command line.
#[deriving(Clone)]
pub struct Opts {
/// The initial URLs to load.
pub urls: Vec<String>,
/// The rendering backend to use (`-r`).
pub render_backend: BackendType,
/// How many threads to use for CPU rendering (`-t`).
///
/// FIXME(pcwalton): This is not currently used. All rendering is sequential.
pub n_render_threads: uint,
/// True to use CPU painting, false to use GPU painting via Skia-GL (`-c`). Note that
/// compositing is always done on the GPU.
pub cpu_painting: bool,
/// The maximum size of each tile in pixels (`-s`).
pub tile_size: uint,
/// The ratio of device pixels per px at the default scale. If unspecified, will use the
/// platform default setting.
pub device_pixels_per_px: Option<ScaleFactor<ScreenPx, DevicePixel, f32>>,
/// `None` to disable the time profiler or `Some` with an interval in seconds to enable it and
/// cause it to produce output on that interval (`-p`).
pub time_profiler_period: Option<f64>,
/// `None` to disable the memory profiler or `Some` with an interval in seconds to enable it
/// and cause it to produce output on that interval (`-m`).
pub memory_profiler_period: Option<f64>,
/// The number of threads to use for layout (`-y`). Defaults to 1, which results in a recursive
/// sequential algorithm.
pub layout_threads: uint,
/// True to exit after the page load (`-x`).
pub exit_after_load: bool,
pub output_file: Option<String>,
pub headless: bool,
pub hard_fail: bool,
/// True if we should bubble intrinsic widths sequentially (`-b`). If this is true, then
/// intrinsic widths are computed as a separate pass instead of during flow construction. You
/// may wish to turn this flag on in order to benchmark style recalculation against other
/// browser engines.
pub bubble_widths_separately: bool,
}
fn print_usage(app: &str, opts: &[getopts::OptGroup]) {
let message = format!("Usage: {} [ options... ] [URL]\n\twhere options include", app);
println!("{}", getopts::usage(message.as_slice(), opts));
}
fn args_fail(msg: &str) {
io::stderr().write_line(msg).unwrap();
os::set_exit_status(1);
}
pub fn from_cmdline_args(args: &[String]) -> Option<Opts> {
let app_name = args[0].to_str();
let args = args.tail();
let opts = vec!(
getopts::optflag("c", "cpu", "CPU rendering"),
getopts::optopt("o", "output", "Output file", "output.png"),
getopts::optopt("r", "rendering", "Rendering backend", "direct2d|core-graphics|core-graphics-accelerated|cairo|skia."),
getopts::optopt("s", "size", "Size of tiles", "512"),
getopts::optopt("", "device-pixel-ratio", "Device pixels per px", ""),
getopts::optopt("t", "threads", "Number of render threads", "1"),
getopts::optflagopt("p", "profile", "Profiler flag and output interval", "10"),
getopts::optflagopt("m", "memory-profile", "Memory profiler flag and output interval", "10"),
getopts::optflag("x", "exit", "Exit after load flag"),
getopts::optopt("y", "layout-threads", "Number of threads to use for layout", "1"),
getopts::optflag("z", "headless", "Headless mode"),
getopts::optflag("f", "hard-fail", "Exit on task failure instead of displaying about:failure"),
getopts::optflag("b", "bubble-widths", "Bubble intrinsic widths separately like other engines"),
getopts::optflag("h", "help", "Print this message")
);
let opt_match = match getopts::getopts(args, opts.as_slice()) {
Ok(m) => m,
Err(f) => {
args_fail(format!("{}", f).as_slice());
return None;
}
};
if opt_match.opt_present("h") || opt_match.opt_present("help") {
print_usage(app_name.as_slice(), opts.as_slice());
return None;
};
let urls = if opt_match.free.is_empty() {
print_usage(app_name.as_slice(), opts.as_slice());
args_fail("servo asks that you provide 1 or more URLs");
return None;
} else {
opt_match.free.clone()
};
let render_backend = match opt_match.opt_str("r") {
|
} else if "core-graphics" == backend_str.as_slice() {
CoreGraphicsBackend
} else if "core-graphics-accelerated" == backend_str.as_slice() {
CoreGraphicsAcceleratedBackend
} else if "cairo" == backend_str.as_slice() {
CairoBackend
} else if "skia" == backend_str.as_slice() {
SkiaBackend
} else {
fail!("unknown backend type")
}
}
None => SkiaBackend
};
let tile_size: uint = match opt_match.opt_str("s") {
Some(tile_size_str) => from_str(tile_size_str.as_slice()).unwrap(),
None => 512,
};
let device_pixels_per_px = opt_match.opt_str("device-pixel-ratio").map(|dppx_str|
ScaleFactor(from_str(dppx_str.as_slice()).unwrap())
);
let n_render_threads: uint = match opt_match.opt_str("t") {
Some(n_render_threads_str) => from_str(n_render_threads_str.as_slice()).unwrap(),
None => 1, // FIXME: Number of cores.
};
// If only the flag is present, default to a 5 second period for both profilers.
let time_profiler_period = opt_match.opt_default("p", "5").map(|period| {
from_str(period.as_slice()).unwrap()
});
let memory_profiler_period = opt_match.opt_default("m", "5").map(|period| {
from_str(period.as_slice()).unwrap()
});
let cpu_painting = opt_match.opt_present("c");
let layout_threads: uint = match opt_match.opt_str("y") {
Some(layout_threads_str) => from_str(layout_threads_str.as_slice()).unwrap(),
None => cmp::max(rt::default_sched_threads() * 3 / 4, 1),
};
Some(Opts {
urls: urls,
render_backend: render_backend,
n_render_threads: n_render_threads,
cpu_painting: cpu_painting,
tile_size: tile_size,
device_pixels_per_px: device_pixels_per_px,
time_profiler_period: time_profiler_period,
memory_profiler_period: memory_profiler_period,
layout_threads: layout_threads,
exit_after_load: opt_match.opt_present("x"),
output_file: opt_match.opt_str("o"),
headless: opt_match.opt_present("z"),
hard_fail: opt_match.opt_present("f"),
bubble_widths_separately: opt_match.opt_present("b"),
})
}
|
Some(backend_str) => {
if "direct2d" == backend_str.as_slice() {
Direct2DBackend
|
random_line_split
|
utils.rs
|
use std::ptr;
use std::io;
use std::fs::{create_dir, remove_dir_all, read_dir, remove_file, remove_dir};
use std::fs::{metadata};
use std::path::{Path, PathBuf};
use std::path::Component::Normal;
use std::io::Error as IoError;
use std::io::ErrorKind::{AlreadyExists, NotFound};
use std::ffi::CString;
use std::env::current_dir;
use nix::sys::signal::Signal;
use nix::sys::signal::{SIGQUIT, SIGSEGV, SIGBUS, SIGHUP, SIGILL, SIGABRT};
use nix::sys::signal::{SIGFPE, SIGUSR1, SIGUSR2};
use libc::{c_int, c_char, timeval, c_void, mode_t, uid_t, gid_t};
use libc::{chmod, chdir, chown};
use signal::trap::Trap;
use range::Range;
use super::id_map::IdMap;
pub type Time = f64;
pub type SigNum = i32;
// TODO(tailhook) signal::Trap might use nix signals instead of i32
pub const ABNORMAL_TERM_SIGNALS: &'static [Signal] = &[
SIGQUIT, SIGSEGV, SIGBUS, SIGHUP,
SIGILL, SIGABRT, SIGFPE, SIGUSR1,
SIGUSR2,
];
pub struct FsUidGuard(bool);
extern {
fn chroot(dir: *const c_char) -> c_int;
fn pivot_root(new_root: *const c_char, put_old: *const c_char) -> c_int;
fn gettimeofday(tp: *mut timeval, tzp: *mut c_void) -> c_int;
// TODO(tailhook) move to libc and nix
fn setfsuid(uid: uid_t) -> c_int;
fn setfsgid(gid: gid_t) -> c_int;
}
pub fn temporary_change_root<T, F>(path: &Path, mut fun: F)
-> Result<T, String>
where F: FnMut() -> Result<T, String>
{
// The point is: if we gat fatal signal in the chroot, we have 2 issues:
//
// 1. Process can't actually restart (the binary path is wrong)
// 2. Even if it finds the binary, it will be angry restarting in chroot
//
let _trap = Trap::trap(ABNORMAL_TERM_SIGNALS);
let cwd = current_dir().map_err(|e| {
format!("Can't determine current dir: {}. \
This usually happens if the directory \
your're in is already deleted", e)
})?;
if unsafe { chdir(CString::new("/").unwrap().as_ptr()) }!= 0 {
return Err(format!("Error chdir to root: {}",
IoError::last_os_error()));
}
if unsafe { chroot(cpath(&path).as_ptr()) }!= 0 {
return Err(format!("Error chroot to {:?}: {}",
path, IoError::last_os_error()));
}
let res = fun();
if unsafe { chroot(CString::new(".").unwrap().as_ptr()) }!= 0 {
return Err(format!("Error chroot back: {}",
IoError::last_os_error()));
}
if unsafe { chdir(cpath(&cwd).as_ptr()) }!= 0 {
return Err(format!("Error chdir to workdir back: {}",
IoError::last_os_error()));
}
return res;
}
pub fn in_mapping(mapping: &Vec<IdMap>, value: u32) -> bool {
for mp in mapping.iter() {
if value >= mp.inside && value < mp.inside + mp.count {
return true;
}
}
return false;
}
pub fn check_mapping(ranges: &Vec<Range>, map: &Vec<IdMap>) -> bool {
// TODO(tailhook) do more comprehensive algo
'map: for item in map.iter() {
for rng in ranges.iter() {
if rng.start <= item.outside &&
rng.end >= item.outside + item.count - 1
{
continue'map;
}
}
return false;
}
return true;
}
pub fn change_root(new_root: &Path, put_old: &Path) -> Result<(), String>
{
if unsafe { pivot_root(
cpath(new_root).as_ptr(),
cpath(put_old).as_ptr()) }!= 0
{
return Err(format!("Error pivot_root to {}: {}", new_root.display(),
IoError::last_os_error()));
}
if unsafe { chdir(CString::new("/").unwrap().as_ptr()) }!= 0
{
return Err(format!("Error chdir to root: {}",
IoError::last_os_error()));
}
return Ok(());
}
pub fn ensure_dir(dir: &Path) -> Result<(), String> {
if let Ok(dmeta) = metadata(dir) {
if!dmeta.is_dir() {
return Err(format!(concat!("Can't create dir {:?}, ",
"path already exists but not a directory"), dir));
}
return Ok(());
}
match create_dir(dir) {
Ok(()) => return Ok(()),
Err(ref e) if e.kind() == AlreadyExists => {
let dmeta = metadata(dir);
if dmeta.is_ok() && dmeta.unwrap().is_dir() {
return Ok(());
} else {
return Err(format!(concat!("Can't create dir {:?}, ",
"path already exists but not a directory"),
dir));
}
}
Err(ref e) => {
return Err(format!(concat!("Can't create dir {:?}: {} ",
"path already exists but not a directory"), dir, e));
}
}
}
pub fn clean_dir(dir: &Path, remove_dir_itself: bool) -> Result<(), String> {
if let Err(e) = metadata(dir) {
if e.kind() == NotFound {
return Ok(());
} else {
return Err(format!("Can't stat dir {:?}: {}", dir, e));
}
}
// We temporarily change root, so that symlinks inside the dir
// would do no harm. But note that dir itself can be a symlink
try!(temporary_change_root(dir, || {
let dirlist = try!(read_dir("/")
.map_err(|e| format!("Can't read directory {:?}: {}", dir, e)))
.filter_map(|x| x.ok())
.collect::<Vec<_>>();
for entry in dirlist.into_iter() {
match metadata(entry.path()) {
Ok(ref meta) if meta.is_dir() => {
try!(remove_dir_all(entry.path())
.map_err(|e| format!("Can't remove directory {:?}{:?}: {}",
dir, entry.path(), e)));
}
Ok(_) => {
try!(remove_file(entry.path())
.map_err(|e| format!("Can't remove file {:?}{:?}: {}",
dir, entry.path(), e)));
}
Err(_) => {
return Err(format!("Can't stat file {:?}", entry.path()));
}
}
}
Ok(())
}));
if remove_dir_itself {
try!(remove_dir(dir)
.map_err(|e| format!("Can't remove dir {:?}: {}", dir, e)));
}
return Ok(());
}
pub fn join<S1, S2, I>(mut iter: I, sep: S2) -> String
where S1:AsRef<str>, S2:AsRef<str>, I:Iterator<Item=S1>
{
let mut buf = String::new();
match iter.next() {
Some(x) => buf.push_str(x.as_ref()),
None => {}
}
for i in iter {
buf.push_str(sep.as_ref());
buf.push_str(i.as_ref());
}
return buf;
}
pub fn get_time() -> Time {
let mut tv = timeval { tv_sec: 0, tv_usec: 0 };
unsafe { gettimeofday(&mut tv, ptr::null_mut()) };
return tv.tv_sec as f64 + tv.tv_usec as f64 * 0.000001;
}
pub fn set_file_owner(path: &Path, owner: uid_t, group: gid_t)
-> Result<(), IoError>
{
let cpath = cpath(path);
let rc = unsafe { chown(cpath.as_ptr(), owner, group) };
if rc < 0 {
return Err(IoError::last_os_error());
}
return Ok(());
}
pub fn set_file_mode(path: &Path, mode: mode_t) -> Result<(), IoError> {
let cpath = cpath(path);
let rc = unsafe { chmod(cpath.as_ptr(), mode) };
if rc < 0 {
return Err(IoError::last_os_error());
}
return Ok(());
}
pub fn cpath<P:AsRef<Path>>(path: P) -> CString {
CString::new(path.as_ref().to_str().unwrap()).unwrap()
}
pub fn relative(child: &Path, base: &Path) -> PathBuf {
assert!(child.starts_with(base));
let mut res = PathBuf::new();
for cmp in child.components().skip(base.components().count()) {
if let Normal(ref chunk) = cmp {
res.push(chunk);
} else {
panic!("Bad path for relative ({:?} from {:?} against {:?})",
cmp, child, base);
}
}
return res
}
impl FsUidGuard {
pub fn set(uid: u32, gid: u32) -> FsUidGuard {
if uid!= 0 || gid!= 0 {
unsafe { setfsuid(uid) };
if unsafe { setfsuid(uid) }!= uid as i32 {
error!("Can't set fs gid to open socket: {}. Ignoring.",
io::Error::last_os_error());
}
unsafe { setfsgid(gid) };
if unsafe { setfsgid(gid) }!= gid as i32 {
error!("Can't set fs uid to open socket: {}. Ignoring.",
io::Error::last_os_error());
}
FsUidGuard(true)
} else {
FsUidGuard(false)
}
}
}
impl Drop for FsUidGuard {
fn drop(&mut self)
|
}
|
{
if self.0 {
unsafe { setfsuid(0) };
if unsafe { setfsuid(0) } != 0 {
let err = io::Error::last_os_error();
error!("Can't return fs uid back to zero: {}. Aborting.", err);
panic!("Can't return fs uid back to zero: {}. Aborting.", err);
}
unsafe { setfsgid(0) };
if unsafe { setfsgid(0) } != 0 {
let err = io::Error::last_os_error();
error!("Can't return fs gid back to zero: {}. Aborting.", err);
panic!("Can't return fs gid back to zero: {}. Aborting.", err);
}
}
}
|
identifier_body
|
utils.rs
|
use std::ptr;
use std::io;
use std::fs::{create_dir, remove_dir_all, read_dir, remove_file, remove_dir};
use std::fs::{metadata};
use std::path::{Path, PathBuf};
use std::path::Component::Normal;
use std::io::Error as IoError;
use std::io::ErrorKind::{AlreadyExists, NotFound};
use std::ffi::CString;
use std::env::current_dir;
use nix::sys::signal::Signal;
use nix::sys::signal::{SIGQUIT, SIGSEGV, SIGBUS, SIGHUP, SIGILL, SIGABRT};
use nix::sys::signal::{SIGFPE, SIGUSR1, SIGUSR2};
use libc::{c_int, c_char, timeval, c_void, mode_t, uid_t, gid_t};
use libc::{chmod, chdir, chown};
use signal::trap::Trap;
use range::Range;
use super::id_map::IdMap;
pub type Time = f64;
pub type SigNum = i32;
// TODO(tailhook) signal::Trap might use nix signals instead of i32
pub const ABNORMAL_TERM_SIGNALS: &'static [Signal] = &[
SIGQUIT, SIGSEGV, SIGBUS, SIGHUP,
SIGILL, SIGABRT, SIGFPE, SIGUSR1,
SIGUSR2,
];
pub struct FsUidGuard(bool);
extern {
fn chroot(dir: *const c_char) -> c_int;
fn pivot_root(new_root: *const c_char, put_old: *const c_char) -> c_int;
fn gettimeofday(tp: *mut timeval, tzp: *mut c_void) -> c_int;
// TODO(tailhook) move to libc and nix
fn setfsuid(uid: uid_t) -> c_int;
fn setfsgid(gid: gid_t) -> c_int;
}
pub fn temporary_change_root<T, F>(path: &Path, mut fun: F)
-> Result<T, String>
where F: FnMut() -> Result<T, String>
{
// The point is: if we gat fatal signal in the chroot, we have 2 issues:
//
// 1. Process can't actually restart (the binary path is wrong)
// 2. Even if it finds the binary, it will be angry restarting in chroot
//
let _trap = Trap::trap(ABNORMAL_TERM_SIGNALS);
let cwd = current_dir().map_err(|e| {
format!("Can't determine current dir: {}. \
This usually happens if the directory \
your're in is already deleted", e)
})?;
if unsafe { chdir(CString::new("/").unwrap().as_ptr()) }!= 0 {
return Err(format!("Error chdir to root: {}",
IoError::last_os_error()));
}
if unsafe { chroot(cpath(&path).as_ptr()) }!= 0 {
return Err(format!("Error chroot to {:?}: {}",
path, IoError::last_os_error()));
}
let res = fun();
if unsafe { chroot(CString::new(".").unwrap().as_ptr()) }!= 0 {
return Err(format!("Error chroot back: {}",
IoError::last_os_error()));
}
if unsafe { chdir(cpath(&cwd).as_ptr()) }!= 0 {
return Err(format!("Error chdir to workdir back: {}",
IoError::last_os_error()));
}
return res;
}
pub fn in_mapping(mapping: &Vec<IdMap>, value: u32) -> bool {
for mp in mapping.iter() {
if value >= mp.inside && value < mp.inside + mp.count {
return true;
}
}
return false;
}
pub fn check_mapping(ranges: &Vec<Range>, map: &Vec<IdMap>) -> bool {
// TODO(tailhook) do more comprehensive algo
'map: for item in map.iter() {
for rng in ranges.iter() {
if rng.start <= item.outside &&
rng.end >= item.outside + item.count - 1
{
continue'map;
}
}
return false;
}
return true;
}
pub fn change_root(new_root: &Path, put_old: &Path) -> Result<(), String>
{
if unsafe { pivot_root(
cpath(new_root).as_ptr(),
cpath(put_old).as_ptr()) }!= 0
{
return Err(format!("Error pivot_root to {}: {}", new_root.display(),
IoError::last_os_error()));
}
if unsafe { chdir(CString::new("/").unwrap().as_ptr()) }!= 0
{
return Err(format!("Error chdir to root: {}",
IoError::last_os_error()));
}
return Ok(());
}
pub fn ensure_dir(dir: &Path) -> Result<(), String> {
if let Ok(dmeta) = metadata(dir) {
if!dmeta.is_dir() {
return Err(format!(concat!("Can't create dir {:?}, ",
"path already exists but not a directory"), dir));
}
return Ok(());
}
match create_dir(dir) {
Ok(()) => return Ok(()),
Err(ref e) if e.kind() == AlreadyExists => {
let dmeta = metadata(dir);
if dmeta.is_ok() && dmeta.unwrap().is_dir() {
return Ok(());
} else {
return Err(format!(concat!("Can't create dir {:?}, ",
"path already exists but not a directory"),
dir));
}
}
Err(ref e) => {
return Err(format!(concat!("Can't create dir {:?}: {} ",
"path already exists but not a directory"), dir, e));
}
}
}
pub fn clean_dir(dir: &Path, remove_dir_itself: bool) -> Result<(), String> {
if let Err(e) = metadata(dir) {
if e.kind() == NotFound {
return Ok(());
} else {
return Err(format!("Can't stat dir {:?}: {}", dir, e));
}
}
// We temporarily change root, so that symlinks inside the dir
// would do no harm. But note that dir itself can be a symlink
try!(temporary_change_root(dir, || {
let dirlist = try!(read_dir("/")
.map_err(|e| format!("Can't read directory {:?}: {}", dir, e)))
.filter_map(|x| x.ok())
.collect::<Vec<_>>();
for entry in dirlist.into_iter() {
match metadata(entry.path()) {
Ok(ref meta) if meta.is_dir() => {
try!(remove_dir_all(entry.path())
.map_err(|e| format!("Can't remove directory {:?}{:?}: {}",
dir, entry.path(), e)));
}
Ok(_) => {
try!(remove_file(entry.path())
.map_err(|e| format!("Can't remove file {:?}{:?}: {}",
dir, entry.path(), e)));
}
Err(_) => {
return Err(format!("Can't stat file {:?}", entry.path()));
}
}
}
Ok(())
}));
if remove_dir_itself {
try!(remove_dir(dir)
.map_err(|e| format!("Can't remove dir {:?}: {}", dir, e)));
}
return Ok(());
}
pub fn join<S1, S2, I>(mut iter: I, sep: S2) -> String
where S1:AsRef<str>, S2:AsRef<str>, I:Iterator<Item=S1>
{
let mut buf = String::new();
match iter.next() {
Some(x) => buf.push_str(x.as_ref()),
None => {}
}
for i in iter {
buf.push_str(sep.as_ref());
buf.push_str(i.as_ref());
}
return buf;
}
pub fn get_time() -> Time {
let mut tv = timeval { tv_sec: 0, tv_usec: 0 };
unsafe { gettimeofday(&mut tv, ptr::null_mut()) };
return tv.tv_sec as f64 + tv.tv_usec as f64 * 0.000001;
}
pub fn set_file_owner(path: &Path, owner: uid_t, group: gid_t)
-> Result<(), IoError>
{
let cpath = cpath(path);
let rc = unsafe { chown(cpath.as_ptr(), owner, group) };
if rc < 0 {
return Err(IoError::last_os_error());
}
return Ok(());
}
pub fn set_file_mode(path: &Path, mode: mode_t) -> Result<(), IoError> {
let cpath = cpath(path);
let rc = unsafe { chmod(cpath.as_ptr(), mode) };
if rc < 0 {
return Err(IoError::last_os_error());
}
return Ok(());
}
pub fn cpath<P:AsRef<Path>>(path: P) -> CString {
CString::new(path.as_ref().to_str().unwrap()).unwrap()
}
pub fn relative(child: &Path, base: &Path) -> PathBuf {
assert!(child.starts_with(base));
let mut res = PathBuf::new();
for cmp in child.components().skip(base.components().count()) {
if let Normal(ref chunk) = cmp {
res.push(chunk);
} else {
panic!("Bad path for relative ({:?} from {:?} against {:?})",
cmp, child, base);
}
}
return res
}
impl FsUidGuard {
pub fn
|
(uid: u32, gid: u32) -> FsUidGuard {
if uid!= 0 || gid!= 0 {
unsafe { setfsuid(uid) };
if unsafe { setfsuid(uid) }!= uid as i32 {
error!("Can't set fs gid to open socket: {}. Ignoring.",
io::Error::last_os_error());
}
unsafe { setfsgid(gid) };
if unsafe { setfsgid(gid) }!= gid as i32 {
error!("Can't set fs uid to open socket: {}. Ignoring.",
io::Error::last_os_error());
}
FsUidGuard(true)
} else {
FsUidGuard(false)
}
}
}
impl Drop for FsUidGuard {
fn drop(&mut self) {
if self.0 {
unsafe { setfsuid(0) };
if unsafe { setfsuid(0) }!= 0 {
let err = io::Error::last_os_error();
error!("Can't return fs uid back to zero: {}. Aborting.", err);
panic!("Can't return fs uid back to zero: {}. Aborting.", err);
}
unsafe { setfsgid(0) };
if unsafe { setfsgid(0) }!= 0 {
let err = io::Error::last_os_error();
error!("Can't return fs gid back to zero: {}. Aborting.", err);
panic!("Can't return fs gid back to zero: {}. Aborting.", err);
}
}
}
}
|
set
|
identifier_name
|
utils.rs
|
use std::ptr;
use std::io;
use std::fs::{create_dir, remove_dir_all, read_dir, remove_file, remove_dir};
use std::fs::{metadata};
use std::path::{Path, PathBuf};
use std::path::Component::Normal;
use std::io::Error as IoError;
use std::io::ErrorKind::{AlreadyExists, NotFound};
use std::ffi::CString;
use std::env::current_dir;
use nix::sys::signal::Signal;
use nix::sys::signal::{SIGQUIT, SIGSEGV, SIGBUS, SIGHUP, SIGILL, SIGABRT};
use nix::sys::signal::{SIGFPE, SIGUSR1, SIGUSR2};
use libc::{c_int, c_char, timeval, c_void, mode_t, uid_t, gid_t};
use libc::{chmod, chdir, chown};
use signal::trap::Trap;
use range::Range;
use super::id_map::IdMap;
pub type Time = f64;
pub type SigNum = i32;
// TODO(tailhook) signal::Trap might use nix signals instead of i32
pub const ABNORMAL_TERM_SIGNALS: &'static [Signal] = &[
SIGQUIT, SIGSEGV, SIGBUS, SIGHUP,
SIGILL, SIGABRT, SIGFPE, SIGUSR1,
SIGUSR2,
];
pub struct FsUidGuard(bool);
extern {
fn chroot(dir: *const c_char) -> c_int;
fn pivot_root(new_root: *const c_char, put_old: *const c_char) -> c_int;
fn gettimeofday(tp: *mut timeval, tzp: *mut c_void) -> c_int;
// TODO(tailhook) move to libc and nix
fn setfsuid(uid: uid_t) -> c_int;
fn setfsgid(gid: gid_t) -> c_int;
}
pub fn temporary_change_root<T, F>(path: &Path, mut fun: F)
-> Result<T, String>
where F: FnMut() -> Result<T, String>
{
// The point is: if we gat fatal signal in the chroot, we have 2 issues:
//
// 1. Process can't actually restart (the binary path is wrong)
// 2. Even if it finds the binary, it will be angry restarting in chroot
//
let _trap = Trap::trap(ABNORMAL_TERM_SIGNALS);
let cwd = current_dir().map_err(|e| {
format!("Can't determine current dir: {}. \
This usually happens if the directory \
your're in is already deleted", e)
})?;
if unsafe { chdir(CString::new("/").unwrap().as_ptr()) }!= 0 {
return Err(format!("Error chdir to root: {}",
IoError::last_os_error()));
}
if unsafe { chroot(cpath(&path).as_ptr()) }!= 0 {
return Err(format!("Error chroot to {:?}: {}",
path, IoError::last_os_error()));
}
let res = fun();
if unsafe { chroot(CString::new(".").unwrap().as_ptr()) }!= 0 {
return Err(format!("Error chroot back: {}",
IoError::last_os_error()));
}
if unsafe { chdir(cpath(&cwd).as_ptr()) }!= 0 {
return Err(format!("Error chdir to workdir back: {}",
IoError::last_os_error()));
}
return res;
}
pub fn in_mapping(mapping: &Vec<IdMap>, value: u32) -> bool {
for mp in mapping.iter() {
if value >= mp.inside && value < mp.inside + mp.count {
return true;
}
}
return false;
}
pub fn check_mapping(ranges: &Vec<Range>, map: &Vec<IdMap>) -> bool {
// TODO(tailhook) do more comprehensive algo
'map: for item in map.iter() {
for rng in ranges.iter() {
if rng.start <= item.outside &&
rng.end >= item.outside + item.count - 1
{
continue'map;
}
}
return false;
}
return true;
}
pub fn change_root(new_root: &Path, put_old: &Path) -> Result<(), String>
{
if unsafe { pivot_root(
cpath(new_root).as_ptr(),
cpath(put_old).as_ptr()) }!= 0
{
return Err(format!("Error pivot_root to {}: {}", new_root.display(),
IoError::last_os_error()));
}
if unsafe { chdir(CString::new("/").unwrap().as_ptr()) }!= 0
{
return Err(format!("Error chdir to root: {}",
IoError::last_os_error()));
}
return Ok(());
}
pub fn ensure_dir(dir: &Path) -> Result<(), String> {
if let Ok(dmeta) = metadata(dir) {
if!dmeta.is_dir() {
return Err(format!(concat!("Can't create dir {:?}, ",
"path already exists but not a directory"), dir));
}
return Ok(());
}
match create_dir(dir) {
Ok(()) => return Ok(()),
Err(ref e) if e.kind() == AlreadyExists => {
let dmeta = metadata(dir);
if dmeta.is_ok() && dmeta.unwrap().is_dir() {
return Ok(());
} else {
return Err(format!(concat!("Can't create dir {:?}, ",
"path already exists but not a directory"),
dir));
}
}
Err(ref e) => {
|
pub fn clean_dir(dir: &Path, remove_dir_itself: bool) -> Result<(), String> {
if let Err(e) = metadata(dir) {
if e.kind() == NotFound {
return Ok(());
} else {
return Err(format!("Can't stat dir {:?}: {}", dir, e));
}
}
// We temporarily change root, so that symlinks inside the dir
// would do no harm. But note that dir itself can be a symlink
try!(temporary_change_root(dir, || {
let dirlist = try!(read_dir("/")
.map_err(|e| format!("Can't read directory {:?}: {}", dir, e)))
.filter_map(|x| x.ok())
.collect::<Vec<_>>();
for entry in dirlist.into_iter() {
match metadata(entry.path()) {
Ok(ref meta) if meta.is_dir() => {
try!(remove_dir_all(entry.path())
.map_err(|e| format!("Can't remove directory {:?}{:?}: {}",
dir, entry.path(), e)));
}
Ok(_) => {
try!(remove_file(entry.path())
.map_err(|e| format!("Can't remove file {:?}{:?}: {}",
dir, entry.path(), e)));
}
Err(_) => {
return Err(format!("Can't stat file {:?}", entry.path()));
}
}
}
Ok(())
}));
if remove_dir_itself {
try!(remove_dir(dir)
.map_err(|e| format!("Can't remove dir {:?}: {}", dir, e)));
}
return Ok(());
}
pub fn join<S1, S2, I>(mut iter: I, sep: S2) -> String
where S1:AsRef<str>, S2:AsRef<str>, I:Iterator<Item=S1>
{
let mut buf = String::new();
match iter.next() {
Some(x) => buf.push_str(x.as_ref()),
None => {}
}
for i in iter {
buf.push_str(sep.as_ref());
buf.push_str(i.as_ref());
}
return buf;
}
pub fn get_time() -> Time {
let mut tv = timeval { tv_sec: 0, tv_usec: 0 };
unsafe { gettimeofday(&mut tv, ptr::null_mut()) };
return tv.tv_sec as f64 + tv.tv_usec as f64 * 0.000001;
}
pub fn set_file_owner(path: &Path, owner: uid_t, group: gid_t)
-> Result<(), IoError>
{
let cpath = cpath(path);
let rc = unsafe { chown(cpath.as_ptr(), owner, group) };
if rc < 0 {
return Err(IoError::last_os_error());
}
return Ok(());
}
pub fn set_file_mode(path: &Path, mode: mode_t) -> Result<(), IoError> {
let cpath = cpath(path);
let rc = unsafe { chmod(cpath.as_ptr(), mode) };
if rc < 0 {
return Err(IoError::last_os_error());
}
return Ok(());
}
pub fn cpath<P:AsRef<Path>>(path: P) -> CString {
CString::new(path.as_ref().to_str().unwrap()).unwrap()
}
pub fn relative(child: &Path, base: &Path) -> PathBuf {
assert!(child.starts_with(base));
let mut res = PathBuf::new();
for cmp in child.components().skip(base.components().count()) {
if let Normal(ref chunk) = cmp {
res.push(chunk);
} else {
panic!("Bad path for relative ({:?} from {:?} against {:?})",
cmp, child, base);
}
}
return res
}
impl FsUidGuard {
pub fn set(uid: u32, gid: u32) -> FsUidGuard {
if uid!= 0 || gid!= 0 {
unsafe { setfsuid(uid) };
if unsafe { setfsuid(uid) }!= uid as i32 {
error!("Can't set fs gid to open socket: {}. Ignoring.",
io::Error::last_os_error());
}
unsafe { setfsgid(gid) };
if unsafe { setfsgid(gid) }!= gid as i32 {
error!("Can't set fs uid to open socket: {}. Ignoring.",
io::Error::last_os_error());
}
FsUidGuard(true)
} else {
FsUidGuard(false)
}
}
}
impl Drop for FsUidGuard {
fn drop(&mut self) {
if self.0 {
unsafe { setfsuid(0) };
if unsafe { setfsuid(0) }!= 0 {
let err = io::Error::last_os_error();
error!("Can't return fs uid back to zero: {}. Aborting.", err);
panic!("Can't return fs uid back to zero: {}. Aborting.", err);
}
unsafe { setfsgid(0) };
if unsafe { setfsgid(0) }!= 0 {
let err = io::Error::last_os_error();
error!("Can't return fs gid back to zero: {}. Aborting.", err);
panic!("Can't return fs gid back to zero: {}. Aborting.", err);
}
}
}
}
|
return Err(format!(concat!("Can't create dir {:?}: {} ",
"path already exists but not a directory"), dir, e));
}
}
}
|
random_line_split
|
page.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::cell::DOMRefCell;
use dom::bindings::js::{JS, Root};
use dom::document::Document;
use dom::window::Window;
use msg::constellation_msg::PipelineId;
use std::cell::Cell;
use std::rc::Rc;
/// Encapsulates a handle to a frame in a frame tree.
#[derive(JSTraceable, HeapSizeOf)]
#[allow(unrooted_must_root)] // FIXME(#6687) this is wrong
pub struct Page {
/// Pipeline id associated with this page.
id: PipelineId,
/// The outermost frame containing the document and window.
frame: DOMRefCell<Option<Frame>>,
/// Indicates if reflow is required when reloading.
needs_reflow: Cell<bool>,
// Child Pages.
pub children: DOMRefCell<Vec<Rc<Page>>>,
}
pub struct PageIterator {
stack: Vec<Rc<Page>>,
}
pub trait IterablePage {
fn iter(&self) -> PageIterator;
fn find(&self, id: PipelineId) -> Option<Rc<Page>>;
}
impl IterablePage for Rc<Page> {
fn iter(&self) -> PageIterator {
PageIterator {
stack: vec!(self.clone()),
}
}
fn find(&self, id: PipelineId) -> Option<Rc<Page>> {
if self.id == id {
return Some(self.clone());
}
self.children.borrow()
.iter()
.filter_map(|p| p.find(id))
.next()
}
}
impl Page {
pub fn new(id: PipelineId) -> Page {
Page {
id: id,
frame: DOMRefCell::new(None),
needs_reflow: Cell::new(true),
children: DOMRefCell::new(vec!()),
}
}
pub fn pipeline(&self) -> PipelineId {
self.id
}
pub fn window(&self) -> Root<Window> {
Root::from_ref(&*self.frame.borrow().as_ref().unwrap().window)
}
pub fn document(&self) -> Root<Document> {
Root::from_ref(&*self.frame.borrow().as_ref().unwrap().document)
}
// must handle root case separately
pub fn remove(&self, id: PipelineId) -> Option<Rc<Page>> {
let remove_idx = {
self.children
.borrow()
.iter()
.position(|page_tree| page_tree.id == id)
};
match remove_idx {
Some(idx) => Some(self.children.borrow_mut().remove(idx)),
None => {
self.children
.borrow_mut()
.iter_mut()
.filter_map(|page_tree| page_tree.remove(id))
.next()
}
}
}
}
impl Iterator for PageIterator {
type Item = Rc<Page>;
fn next(&mut self) -> Option<Rc<Page>> {
let popped = self.stack.pop();
if let Some(ref page) = popped {
self.stack.extend(page.children.borrow().iter().cloned());
}
popped
}
}
impl Page {
pub fn set_reflow_status(&self, status: bool) -> bool {
let old = self.needs_reflow.get();
self.needs_reflow.set(status);
old
}
#[allow(unrooted_must_root)]
pub fn set_frame(&self, frame: Option<Frame>)
|
}
/// Information for one frame in the browsing context.
#[derive(JSTraceable, HeapSizeOf)]
#[must_root]
pub struct Frame {
/// The document for this frame.
pub document: JS<Document>,
/// The window object for this frame.
pub window: JS<Window>,
}
|
{
*self.frame.borrow_mut() = frame;
}
|
identifier_body
|
page.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::cell::DOMRefCell;
use dom::bindings::js::{JS, Root};
use dom::document::Document;
use dom::window::Window;
use msg::constellation_msg::PipelineId;
use std::cell::Cell;
use std::rc::Rc;
/// Encapsulates a handle to a frame in a frame tree.
#[derive(JSTraceable, HeapSizeOf)]
#[allow(unrooted_must_root)] // FIXME(#6687) this is wrong
pub struct Page {
/// Pipeline id associated with this page.
id: PipelineId,
/// The outermost frame containing the document and window.
frame: DOMRefCell<Option<Frame>>,
/// Indicates if reflow is required when reloading.
needs_reflow: Cell<bool>,
// Child Pages.
pub children: DOMRefCell<Vec<Rc<Page>>>,
}
pub struct PageIterator {
stack: Vec<Rc<Page>>,
}
pub trait IterablePage {
fn iter(&self) -> PageIterator;
fn find(&self, id: PipelineId) -> Option<Rc<Page>>;
}
impl IterablePage for Rc<Page> {
fn iter(&self) -> PageIterator {
PageIterator {
stack: vec!(self.clone()),
}
}
fn find(&self, id: PipelineId) -> Option<Rc<Page>> {
if self.id == id {
return Some(self.clone());
}
self.children.borrow()
.iter()
.filter_map(|p| p.find(id))
.next()
}
}
impl Page {
pub fn new(id: PipelineId) -> Page {
Page {
id: id,
frame: DOMRefCell::new(None),
needs_reflow: Cell::new(true),
children: DOMRefCell::new(vec!()),
}
}
pub fn pipeline(&self) -> PipelineId {
self.id
}
pub fn
|
(&self) -> Root<Window> {
Root::from_ref(&*self.frame.borrow().as_ref().unwrap().window)
}
pub fn document(&self) -> Root<Document> {
Root::from_ref(&*self.frame.borrow().as_ref().unwrap().document)
}
// must handle root case separately
pub fn remove(&self, id: PipelineId) -> Option<Rc<Page>> {
let remove_idx = {
self.children
.borrow()
.iter()
.position(|page_tree| page_tree.id == id)
};
match remove_idx {
Some(idx) => Some(self.children.borrow_mut().remove(idx)),
None => {
self.children
.borrow_mut()
.iter_mut()
.filter_map(|page_tree| page_tree.remove(id))
.next()
}
}
}
}
impl Iterator for PageIterator {
type Item = Rc<Page>;
fn next(&mut self) -> Option<Rc<Page>> {
let popped = self.stack.pop();
if let Some(ref page) = popped {
self.stack.extend(page.children.borrow().iter().cloned());
}
popped
}
}
impl Page {
pub fn set_reflow_status(&self, status: bool) -> bool {
let old = self.needs_reflow.get();
self.needs_reflow.set(status);
old
}
#[allow(unrooted_must_root)]
pub fn set_frame(&self, frame: Option<Frame>) {
*self.frame.borrow_mut() = frame;
}
}
/// Information for one frame in the browsing context.
#[derive(JSTraceable, HeapSizeOf)]
#[must_root]
pub struct Frame {
/// The document for this frame.
pub document: JS<Document>,
/// The window object for this frame.
pub window: JS<Window>,
}
|
window
|
identifier_name
|
page.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::cell::DOMRefCell;
use dom::bindings::js::{JS, Root};
use dom::document::Document;
use dom::window::Window;
use msg::constellation_msg::PipelineId;
use std::cell::Cell;
use std::rc::Rc;
/// Encapsulates a handle to a frame in a frame tree.
#[derive(JSTraceable, HeapSizeOf)]
#[allow(unrooted_must_root)] // FIXME(#6687) this is wrong
pub struct Page {
/// Pipeline id associated with this page.
id: PipelineId,
/// The outermost frame containing the document and window.
frame: DOMRefCell<Option<Frame>>,
/// Indicates if reflow is required when reloading.
needs_reflow: Cell<bool>,
// Child Pages.
pub children: DOMRefCell<Vec<Rc<Page>>>,
}
pub struct PageIterator {
stack: Vec<Rc<Page>>,
}
pub trait IterablePage {
fn iter(&self) -> PageIterator;
fn find(&self, id: PipelineId) -> Option<Rc<Page>>;
}
impl IterablePage for Rc<Page> {
fn iter(&self) -> PageIterator {
PageIterator {
stack: vec!(self.clone()),
}
}
fn find(&self, id: PipelineId) -> Option<Rc<Page>> {
if self.id == id {
return Some(self.clone());
}
self.children.borrow()
.iter()
.filter_map(|p| p.find(id))
.next()
}
}
impl Page {
pub fn new(id: PipelineId) -> Page {
Page {
id: id,
frame: DOMRefCell::new(None),
needs_reflow: Cell::new(true),
children: DOMRefCell::new(vec!()),
}
}
pub fn pipeline(&self) -> PipelineId {
self.id
}
pub fn window(&self) -> Root<Window> {
Root::from_ref(&*self.frame.borrow().as_ref().unwrap().window)
}
pub fn document(&self) -> Root<Document> {
Root::from_ref(&*self.frame.borrow().as_ref().unwrap().document)
}
// must handle root case separately
pub fn remove(&self, id: PipelineId) -> Option<Rc<Page>> {
let remove_idx = {
self.children
.borrow()
.iter()
.position(|page_tree| page_tree.id == id)
};
match remove_idx {
Some(idx) => Some(self.children.borrow_mut().remove(idx)),
None => {
self.children
.borrow_mut()
.iter_mut()
.filter_map(|page_tree| page_tree.remove(id))
.next()
}
}
}
}
impl Iterator for PageIterator {
type Item = Rc<Page>;
fn next(&mut self) -> Option<Rc<Page>> {
let popped = self.stack.pop();
if let Some(ref page) = popped
|
popped
}
}
impl Page {
pub fn set_reflow_status(&self, status: bool) -> bool {
let old = self.needs_reflow.get();
self.needs_reflow.set(status);
old
}
#[allow(unrooted_must_root)]
pub fn set_frame(&self, frame: Option<Frame>) {
*self.frame.borrow_mut() = frame;
}
}
/// Information for one frame in the browsing context.
#[derive(JSTraceable, HeapSizeOf)]
#[must_root]
pub struct Frame {
/// The document for this frame.
pub document: JS<Document>,
/// The window object for this frame.
pub window: JS<Window>,
}
|
{
self.stack.extend(page.children.borrow().iter().cloned());
}
|
conditional_block
|
page.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::cell::DOMRefCell;
use dom::bindings::js::{JS, Root};
use dom::document::Document;
use dom::window::Window;
use msg::constellation_msg::PipelineId;
use std::cell::Cell;
use std::rc::Rc;
/// Encapsulates a handle to a frame in a frame tree.
#[derive(JSTraceable, HeapSizeOf)]
#[allow(unrooted_must_root)] // FIXME(#6687) this is wrong
pub struct Page {
/// Pipeline id associated with this page.
id: PipelineId,
/// The outermost frame containing the document and window.
frame: DOMRefCell<Option<Frame>>,
/// Indicates if reflow is required when reloading.
needs_reflow: Cell<bool>,
// Child Pages.
pub children: DOMRefCell<Vec<Rc<Page>>>,
}
pub struct PageIterator {
stack: Vec<Rc<Page>>,
}
pub trait IterablePage {
fn iter(&self) -> PageIterator;
fn find(&self, id: PipelineId) -> Option<Rc<Page>>;
}
impl IterablePage for Rc<Page> {
fn iter(&self) -> PageIterator {
PageIterator {
stack: vec!(self.clone()),
}
}
fn find(&self, id: PipelineId) -> Option<Rc<Page>> {
if self.id == id {
return Some(self.clone());
}
self.children.borrow()
.iter()
.filter_map(|p| p.find(id))
.next()
}
}
impl Page {
pub fn new(id: PipelineId) -> Page {
Page {
id: id,
frame: DOMRefCell::new(None),
needs_reflow: Cell::new(true),
children: DOMRefCell::new(vec!()),
}
}
pub fn pipeline(&self) -> PipelineId {
self.id
}
pub fn window(&self) -> Root<Window> {
Root::from_ref(&*self.frame.borrow().as_ref().unwrap().window)
}
pub fn document(&self) -> Root<Document> {
Root::from_ref(&*self.frame.borrow().as_ref().unwrap().document)
}
// must handle root case separately
pub fn remove(&self, id: PipelineId) -> Option<Rc<Page>> {
let remove_idx = {
self.children
.borrow()
.iter()
.position(|page_tree| page_tree.id == id)
};
match remove_idx {
Some(idx) => Some(self.children.borrow_mut().remove(idx)),
None => {
self.children
.borrow_mut()
.iter_mut()
.filter_map(|page_tree| page_tree.remove(id))
.next()
}
}
}
}
impl Iterator for PageIterator {
type Item = Rc<Page>;
fn next(&mut self) -> Option<Rc<Page>> {
let popped = self.stack.pop();
if let Some(ref page) = popped {
self.stack.extend(page.children.borrow().iter().cloned());
}
popped
}
}
impl Page {
pub fn set_reflow_status(&self, status: bool) -> bool {
let old = self.needs_reflow.get();
self.needs_reflow.set(status);
old
}
#[allow(unrooted_must_root)]
pub fn set_frame(&self, frame: Option<Frame>) {
*self.frame.borrow_mut() = frame;
}
}
/// Information for one frame in the browsing context.
#[derive(JSTraceable, HeapSizeOf)]
#[must_root]
pub struct Frame {
|
pub window: JS<Window>,
}
|
/// The document for this frame.
pub document: JS<Document>,
/// The window object for this frame.
|
random_line_split
|
circular_counter.rs
|
fn
|
(a: &Vec<i32>) -> Vec<i32> {
let mut a2 = a.clone();
let mut i = 0;
let mut b = vec![];
while a2.len() > 0 {
i = (i + 2) % a2.len();
b.push(a2[i]);
a2.remove(i);
}
b
}
fn main() {
let a = vec![1, 2, 3, 4, 5, 6, 7, 8, 9];
let b = circular_counter(&a);
println!("{:?}", b);
}
#[test]
fn test() {
assert_eq!(
circular_counter(
&vec![1, 2, 3, 4, 5, 6, 7, 8, 9]
),
vec![3, 6, 9, 4, 8, 5, 2, 7, 1]
);
}
|
circular_counter
|
identifier_name
|
circular_counter.rs
|
fn circular_counter(a: &Vec<i32>) -> Vec<i32>
|
fn main() {
let a = vec![1, 2, 3, 4, 5, 6, 7, 8, 9];
let b = circular_counter(&a);
println!("{:?}", b);
}
#[test]
fn test() {
assert_eq!(
circular_counter(
&vec![1, 2, 3, 4, 5, 6, 7, 8, 9]
),
vec![3, 6, 9, 4, 8, 5, 2, 7, 1]
);
}
|
{
let mut a2 = a.clone();
let mut i = 0;
let mut b = vec![];
while a2.len() > 0 {
i = (i + 2) % a2.len();
b.push(a2[i]);
a2.remove(i);
}
b
}
|
identifier_body
|
circular_counter.rs
|
fn circular_counter(a: &Vec<i32>) -> Vec<i32> {
let mut a2 = a.clone();
let mut i = 0;
let mut b = vec![];
while a2.len() > 0 {
i = (i + 2) % a2.len();
b.push(a2[i]);
a2.remove(i);
}
b
}
fn main() {
let a = vec![1, 2, 3, 4, 5, 6, 7, 8, 9];
let b = circular_counter(&a);
println!("{:?}", b);
}
#[test]
fn test() {
assert_eq!(
circular_counter(
&vec![1, 2, 3, 4, 5, 6, 7, 8, 9]
),
vec![3, 6, 9, 4, 8, 5, 2, 7, 1]
|
}
|
);
|
random_line_split
|
issue-51191.rs
|
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(nll)]
struct Struct;
impl Struct {
fn bar(self: &mut Self) {
//~^ WARN function cannot return without recursing
(&mut self).bar();
//~^ ERROR cannot borrow `self` as mutable, as it is not declared as mutable [E0596]
}
fn imm(self) {
(&mut self).bar();
|
}
fn immref(&self) {
(&mut self).bar();
//~^ ERROR cannot borrow `self` as mutable, as it is not declared as mutable [E0596]
//~^^ ERROR cannot borrow data in a `&` reference as mutable [E0596]
}
fn mtblref(&mut self) {
(&mut self).bar();
//~^ ERROR cannot borrow `self` as mutable, as it is not declared as mutable [E0596]
}
}
fn main () {}
|
//~^ ERROR cannot borrow `self` as mutable, as it is not declared as mutable [E0596]
}
fn mtbl(mut self) {
(&mut self).bar();
|
random_line_split
|
issue-51191.rs
|
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(nll)]
struct Struct;
impl Struct {
fn bar(self: &mut Self) {
//~^ WARN function cannot return without recursing
(&mut self).bar();
//~^ ERROR cannot borrow `self` as mutable, as it is not declared as mutable [E0596]
}
fn
|
(self) {
(&mut self).bar();
//~^ ERROR cannot borrow `self` as mutable, as it is not declared as mutable [E0596]
}
fn mtbl(mut self) {
(&mut self).bar();
}
fn immref(&self) {
(&mut self).bar();
//~^ ERROR cannot borrow `self` as mutable, as it is not declared as mutable [E0596]
//~^^ ERROR cannot borrow data in a `&` reference as mutable [E0596]
}
fn mtblref(&mut self) {
(&mut self).bar();
//~^ ERROR cannot borrow `self` as mutable, as it is not declared as mutable [E0596]
}
}
fn main () {}
|
imm
|
identifier_name
|
issue-51191.rs
|
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![feature(nll)]
struct Struct;
impl Struct {
fn bar(self: &mut Self)
|
fn imm(self) {
(&mut self).bar();
//~^ ERROR cannot borrow `self` as mutable, as it is not declared as mutable [E0596]
}
fn mtbl(mut self) {
(&mut self).bar();
}
fn immref(&self) {
(&mut self).bar();
//~^ ERROR cannot borrow `self` as mutable, as it is not declared as mutable [E0596]
//~^^ ERROR cannot borrow data in a `&` reference as mutable [E0596]
}
fn mtblref(&mut self) {
(&mut self).bar();
//~^ ERROR cannot borrow `self` as mutable, as it is not declared as mutable [E0596]
}
}
fn main () {}
|
{
//~^ WARN function cannot return without recursing
(&mut self).bar();
//~^ ERROR cannot borrow `self` as mutable, as it is not declared as mutable [E0596]
}
|
identifier_body
|
lib.rs
|
// Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
// Licensed under the Apache License, Version 2.0 (see LICENSE).
#![deny(warnings)]
// Enable all clippy lints except for many of the pedantic ones. It's a shame this needs to be copied and pasted across crates, but there doesn't appear to be a way to include inner attributes from a common source.
#![deny(
clippy::all,
clippy::default_trait_access,
clippy::expl_impl_clone_on_copy,
clippy::if_not_else,
clippy::needless_continue,
clippy::single_match_else,
clippy::unseparated_literal_suffix,
clippy::used_underscore_binding
)]
// It is often more clear to show that nothing is being moved.
#![allow(clippy::match_ref_pats)]
// Subjective style.
#![allow(
clippy::len_without_is_empty,
clippy::redundant_field_names,
clippy::too_many_arguments
)]
// Default isn't as big a deal as people seem to think it is.
#![allow(clippy::new_without_default, clippy::new_ret_no_self)]
// Arc<Mutex> can be more clear than needing to grok Orderings:
#![allow(clippy::mutex_atomic)]
use boxfuture::BoxFuture;
use bytes::Bytes;
use std::collections::{BTreeMap, BTreeSet};
use std::ops::AddAssign;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use async_semaphore::AsyncSemaphore;
pub mod local;
pub mod remote;
///
/// A process to be executed.
///
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub struct ExecuteProcessRequest {
///
|
/// The arguments to execute.
///
/// The first argument should be an absolute or relative path to the binary to execute.
///
/// No PATH lookup will be performed unless a PATH environment variable is specified.
///
/// No shell expansion will take place.
///
pub argv: Vec<String>,
///
/// The environment variables to set for the execution.
///
/// No other environment variables will be set (except possibly for an empty PATH variable).
///
pub env: BTreeMap<String, String>,
pub input_files: hashing::Digest,
pub output_files: BTreeSet<PathBuf>,
pub output_directories: BTreeSet<PathBuf>,
pub timeout: std::time::Duration,
pub description: String,
///
/// If present, a symlink will be created at.jdk which points to this directory for local
/// execution, or a system-installed JDK (ignoring the value of the present Some) for remote
/// execution.
///
/// This is some technical debt we should clean up;
/// see https://github.com/pantsbuild/pants/issues/6416.
///
pub jdk_home: Option<PathBuf>,
}
///
/// The result of running a process.
///
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct FallibleExecuteProcessResult {
pub stdout: Bytes,
pub stderr: Bytes,
pub exit_code: i32,
// It's unclear whether this should be a Snapshot or a digest of a Directory. A Directory digest
// is handy, so let's try that out for now.
pub output_directory: hashing::Digest,
pub execution_attempts: Vec<ExecutionStats>,
}
#[cfg(test)]
impl FallibleExecuteProcessResult {
pub fn without_execution_attempts(mut self) -> Self {
self.execution_attempts = vec![];
self
}
}
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
pub struct ExecutionStats {
uploaded_bytes: usize,
uploaded_file_count: usize,
upload: Duration,
remote_queue: Option<Duration>,
remote_input_fetch: Option<Duration>,
remote_execution: Option<Duration>,
remote_output_store: Option<Duration>,
was_cache_hit: bool,
}
impl AddAssign<fs::UploadSummary> for ExecutionStats {
fn add_assign(&mut self, summary: fs::UploadSummary) {
self.uploaded_file_count += summary.uploaded_file_count;
self.uploaded_bytes += summary.uploaded_file_bytes;
self.upload += summary.upload_wall_time;
}
}
pub trait CommandRunner: Send + Sync {
fn run(&self, req: ExecuteProcessRequest) -> BoxFuture<FallibleExecuteProcessResult, String>;
}
///
/// A CommandRunner wrapper that limits the number of concurrent requests.
///
#[derive(Clone)]
pub struct BoundedCommandRunner {
inner: Arc<(Box<dyn CommandRunner>, AsyncSemaphore)>,
}
impl BoundedCommandRunner {
pub fn new(inner: Box<dyn CommandRunner>, bound: usize) -> BoundedCommandRunner {
BoundedCommandRunner {
inner: Arc::new((inner, AsyncSemaphore::new(bound))),
}
}
}
impl CommandRunner for BoundedCommandRunner {
fn run(&self, req: ExecuteProcessRequest) -> BoxFuture<FallibleExecuteProcessResult, String> {
let inner = self.inner.clone();
self.inner.1.with_acquired(move || inner.0.run(req))
}
}
|
random_line_split
|
|
lib.rs
|
// Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
// Licensed under the Apache License, Version 2.0 (see LICENSE).
#![deny(warnings)]
// Enable all clippy lints except for many of the pedantic ones. It's a shame this needs to be copied and pasted across crates, but there doesn't appear to be a way to include inner attributes from a common source.
#![deny(
clippy::all,
clippy::default_trait_access,
clippy::expl_impl_clone_on_copy,
clippy::if_not_else,
clippy::needless_continue,
clippy::single_match_else,
clippy::unseparated_literal_suffix,
clippy::used_underscore_binding
)]
// It is often more clear to show that nothing is being moved.
#![allow(clippy::match_ref_pats)]
// Subjective style.
#![allow(
clippy::len_without_is_empty,
clippy::redundant_field_names,
clippy::too_many_arguments
)]
// Default isn't as big a deal as people seem to think it is.
#![allow(clippy::new_without_default, clippy::new_ret_no_self)]
// Arc<Mutex> can be more clear than needing to grok Orderings:
#![allow(clippy::mutex_atomic)]
use boxfuture::BoxFuture;
use bytes::Bytes;
use std::collections::{BTreeMap, BTreeSet};
use std::ops::AddAssign;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use async_semaphore::AsyncSemaphore;
pub mod local;
pub mod remote;
///
/// A process to be executed.
///
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub struct ExecuteProcessRequest {
///
/// The arguments to execute.
///
/// The first argument should be an absolute or relative path to the binary to execute.
///
/// No PATH lookup will be performed unless a PATH environment variable is specified.
///
/// No shell expansion will take place.
///
pub argv: Vec<String>,
///
/// The environment variables to set for the execution.
///
/// No other environment variables will be set (except possibly for an empty PATH variable).
///
pub env: BTreeMap<String, String>,
pub input_files: hashing::Digest,
pub output_files: BTreeSet<PathBuf>,
pub output_directories: BTreeSet<PathBuf>,
pub timeout: std::time::Duration,
pub description: String,
///
/// If present, a symlink will be created at.jdk which points to this directory for local
/// execution, or a system-installed JDK (ignoring the value of the present Some) for remote
/// execution.
///
/// This is some technical debt we should clean up;
/// see https://github.com/pantsbuild/pants/issues/6416.
///
pub jdk_home: Option<PathBuf>,
}
///
/// The result of running a process.
///
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct FallibleExecuteProcessResult {
pub stdout: Bytes,
pub stderr: Bytes,
pub exit_code: i32,
// It's unclear whether this should be a Snapshot or a digest of a Directory. A Directory digest
// is handy, so let's try that out for now.
pub output_directory: hashing::Digest,
pub execution_attempts: Vec<ExecutionStats>,
}
#[cfg(test)]
impl FallibleExecuteProcessResult {
pub fn
|
(mut self) -> Self {
self.execution_attempts = vec![];
self
}
}
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
pub struct ExecutionStats {
uploaded_bytes: usize,
uploaded_file_count: usize,
upload: Duration,
remote_queue: Option<Duration>,
remote_input_fetch: Option<Duration>,
remote_execution: Option<Duration>,
remote_output_store: Option<Duration>,
was_cache_hit: bool,
}
impl AddAssign<fs::UploadSummary> for ExecutionStats {
fn add_assign(&mut self, summary: fs::UploadSummary) {
self.uploaded_file_count += summary.uploaded_file_count;
self.uploaded_bytes += summary.uploaded_file_bytes;
self.upload += summary.upload_wall_time;
}
}
pub trait CommandRunner: Send + Sync {
fn run(&self, req: ExecuteProcessRequest) -> BoxFuture<FallibleExecuteProcessResult, String>;
}
///
/// A CommandRunner wrapper that limits the number of concurrent requests.
///
#[derive(Clone)]
pub struct BoundedCommandRunner {
inner: Arc<(Box<dyn CommandRunner>, AsyncSemaphore)>,
}
impl BoundedCommandRunner {
pub fn new(inner: Box<dyn CommandRunner>, bound: usize) -> BoundedCommandRunner {
BoundedCommandRunner {
inner: Arc::new((inner, AsyncSemaphore::new(bound))),
}
}
}
impl CommandRunner for BoundedCommandRunner {
fn run(&self, req: ExecuteProcessRequest) -> BoxFuture<FallibleExecuteProcessResult, String> {
let inner = self.inner.clone();
self.inner.1.with_acquired(move || inner.0.run(req))
}
}
|
without_execution_attempts
|
identifier_name
|
main.rs
|
extern crate byteorder;
use std::collections::HashMap;
use std::io::Write;
use byteorder::{LittleEndian, WriteBytesExt};
#[derive(Hash, Eq, PartialEq, Clone, Debug)]
struct Frac(u64, u64);
type Memory = HashMap<Frac, f64>;
fn simplify(Frac(a, b): Frac) -> Frac {
fn gcd(x: u64, y: u64) -> u64 {
if y == 0 {
x
} else {
gcd(y, x % y)
}
}
let d = gcd(a, b);
Frac(a/d, b/d)
}
/// judge a set of notes based on harmony.
/// range: floats in [0, 1] and lower is better.
fn judge_harmony(noteset: &[Frac], memory: &Memory) -> f64 {
let mut harmony_sum = 0_f64;
for &Frac(a1, b1) in noteset {
for (&Frac(a2, b2), &familiarity) in memory.iter() {
let Frac(a3, b3) = simplify(Frac(a1*b2, a2*b1));
harmony_sum += familiarity * (a3 as f64) * (b3 as f64);
}
}
let iterations = noteset.len()*memory.len();
let avg_harmony = (harmony_sum as f64)/(iterations as f64);
(1_f64 - 1_f64/(avg_harmony/5_f64).exp()).max(0_f64).min(1_f64)
}
/// judge a set of notes based on familiarity & novelty balance.
/// range: floats in [0, 1] and lower is better.
fn judge_novelty(noteset: &[Frac], memory: &Memory) -> f64 {
if noteset.len() < 1 {
panic!("judge_novelty: need at least 1 note");
}
let mut familiarity_sum = 0_f64;
for note in noteset {
let &familiarity = memory.get(note).unwrap_or(&0_f64);
familiarity_sum += familiarity;
}
let avg_familiarity = familiarity_sum / (noteset.len() as f64);
let target_familiarity = 0.1_f64;
let disparity = (target_familiarity - avg_familiarity).abs();
(1_f64 - 1_f64/disparity.exp()).max(0_f64).min(1_f64)
}
/// judge a set of notes.
/// range: floats in [0, 1] and lower is better.
fn judge(noteset: &[Frac], memory: &Memory) -> f64 {
(judge_harmony(noteset, memory) + judge_novelty(noteset, memory))/2_f64
}
fn forget(memory: &mut Memory) {
for (n, val) in memory.iter_mut() {
*val *= 0.75;
}
}
fn remember(note_set: &[Frac], memory: &mut Memory) {
let increase = 0.1_f64;
for note in note_set {
let val = match memory.get(note) {
Some(v) => v + increase,
None => increase,
};
memory.insert(note.clone(), val);
}
}
/// step to a set of notes that minimizes the judge function.
fn step_notes(note_set: &[Frac], memory: &Memory) -> Vec<Frac> {
let mut best: Vec<Frac> = note_set.to_owned();
let mut best_score = 1_f64;
for i in 0..note_set.len() {
for a in 1..12 {
for b in 1..12 {
let possibility = simplify(Frac(a, b));
if (note_set.contains(&possibility)) {
continue;
}
let note_set2: Vec<Frac> = note_set[0..i].iter()
.chain(note_set[i+1..note_set.len()].iter())
.chain([possibility].iter())
.map(|n| n.clone())
.collect();
let score = judge(¬e_set2, memory);
if score < best_score {
best = note_set2;
best_score = score;
}
}
}
}
best
}
type PCM_Sample = i16;
static PCM_HZ: u64 = 44100_u64;
static STEPS_PER_SEC: u64 = 4;
static BASE_NOTE: f64 = 250_f64;
type Endianness = LittleEndian;
fn sine_wave(freq: f64, step: u64) -> f64 {
(2.0*std::f64::consts::PI*(step as f64)*freq/(PCM_HZ as f64)).sin()
}
fn
|
(base_note: f64, fractions: &[Frac], step: u64) -> f64 {
let mut sum = 0_f64;
for &Frac(a, b) in fractions {
let freq = (base_note / (b as f64)) * (a as f64);
sum += sine_wave(freq, step);
}
sum / (fractions.len() as f64)
}
fn linear_envelope(sample: f64, duration: u64, progress: u64) -> f64 {
sample * (progress as f64) / (duration as f64)
}
fn output_pcm() {
let mut notes = vec![Frac(1, 2), Frac(1, 1), Frac(1, 3), Frac(1, 5), Frac(1, 7)];
let mut memory = Memory::new();
let mut j=0;
for i in (0_u64..u64::max_value()).cycle() {
let sample = sine_waves(BASE_NOTE, ¬es, i) *
(PCM_Sample::max_value() as f64);
let enveloped = linear_envelope(sample, j, PCM_HZ/STEPS_PER_SEC);
let bounded = enveloped.min(PCM_Sample::max_value() as f64 - 1_f64)
.max(PCM_Sample::min_value() as f64 + 1_f64);
let as_sample: PCM_Sample = bounded as PCM_Sample;
std::io::stdout().write_i16::<Endianness>(as_sample).unwrap();
j += 1;
if j == PCM_HZ/STEPS_PER_SEC {
j = 0;
forget(&mut memory);
notes = step_notes(¬es, &memory);
remember(¬es, &mut memory);
}
}
}
fn main() {
output_pcm();
}
|
sine_waves
|
identifier_name
|
main.rs
|
extern crate byteorder;
use std::collections::HashMap;
use std::io::Write;
use byteorder::{LittleEndian, WriteBytesExt};
#[derive(Hash, Eq, PartialEq, Clone, Debug)]
struct Frac(u64, u64);
type Memory = HashMap<Frac, f64>;
fn simplify(Frac(a, b): Frac) -> Frac {
fn gcd(x: u64, y: u64) -> u64 {
if y == 0 {
x
} else {
gcd(y, x % y)
}
}
let d = gcd(a, b);
Frac(a/d, b/d)
}
/// judge a set of notes based on harmony.
/// range: floats in [0, 1] and lower is better.
fn judge_harmony(noteset: &[Frac], memory: &Memory) -> f64 {
let mut harmony_sum = 0_f64;
for &Frac(a1, b1) in noteset {
for (&Frac(a2, b2), &familiarity) in memory.iter() {
let Frac(a3, b3) = simplify(Frac(a1*b2, a2*b1));
harmony_sum += familiarity * (a3 as f64) * (b3 as f64);
}
}
let iterations = noteset.len()*memory.len();
let avg_harmony = (harmony_sum as f64)/(iterations as f64);
(1_f64 - 1_f64/(avg_harmony/5_f64).exp()).max(0_f64).min(1_f64)
}
/// judge a set of notes based on familiarity & novelty balance.
/// range: floats in [0, 1] and lower is better.
fn judge_novelty(noteset: &[Frac], memory: &Memory) -> f64 {
if noteset.len() < 1 {
panic!("judge_novelty: need at least 1 note");
}
let mut familiarity_sum = 0_f64;
for note in noteset {
let &familiarity = memory.get(note).unwrap_or(&0_f64);
familiarity_sum += familiarity;
}
let avg_familiarity = familiarity_sum / (noteset.len() as f64);
let target_familiarity = 0.1_f64;
let disparity = (target_familiarity - avg_familiarity).abs();
(1_f64 - 1_f64/disparity.exp()).max(0_f64).min(1_f64)
}
/// judge a set of notes.
/// range: floats in [0, 1] and lower is better.
fn judge(noteset: &[Frac], memory: &Memory) -> f64 {
(judge_harmony(noteset, memory) + judge_novelty(noteset, memory))/2_f64
}
fn forget(memory: &mut Memory)
|
fn remember(note_set: &[Frac], memory: &mut Memory) {
let increase = 0.1_f64;
for note in note_set {
let val = match memory.get(note) {
Some(v) => v + increase,
None => increase,
};
memory.insert(note.clone(), val);
}
}
/// step to a set of notes that minimizes the judge function.
fn step_notes(note_set: &[Frac], memory: &Memory) -> Vec<Frac> {
let mut best: Vec<Frac> = note_set.to_owned();
let mut best_score = 1_f64;
for i in 0..note_set.len() {
for a in 1..12 {
for b in 1..12 {
let possibility = simplify(Frac(a, b));
if (note_set.contains(&possibility)) {
continue;
}
let note_set2: Vec<Frac> = note_set[0..i].iter()
.chain(note_set[i+1..note_set.len()].iter())
.chain([possibility].iter())
.map(|n| n.clone())
.collect();
let score = judge(¬e_set2, memory);
if score < best_score {
best = note_set2;
best_score = score;
}
}
}
}
best
}
type PCM_Sample = i16;
static PCM_HZ: u64 = 44100_u64;
static STEPS_PER_SEC: u64 = 4;
static BASE_NOTE: f64 = 250_f64;
type Endianness = LittleEndian;
fn sine_wave(freq: f64, step: u64) -> f64 {
(2.0*std::f64::consts::PI*(step as f64)*freq/(PCM_HZ as f64)).sin()
}
fn sine_waves(base_note: f64, fractions: &[Frac], step: u64) -> f64 {
let mut sum = 0_f64;
for &Frac(a, b) in fractions {
let freq = (base_note / (b as f64)) * (a as f64);
sum += sine_wave(freq, step);
}
sum / (fractions.len() as f64)
}
fn linear_envelope(sample: f64, duration: u64, progress: u64) -> f64 {
sample * (progress as f64) / (duration as f64)
}
fn output_pcm() {
let mut notes = vec![Frac(1, 2), Frac(1, 1), Frac(1, 3), Frac(1, 5), Frac(1, 7)];
let mut memory = Memory::new();
let mut j=0;
for i in (0_u64..u64::max_value()).cycle() {
let sample = sine_waves(BASE_NOTE, ¬es, i) *
(PCM_Sample::max_value() as f64);
let enveloped = linear_envelope(sample, j, PCM_HZ/STEPS_PER_SEC);
let bounded = enveloped.min(PCM_Sample::max_value() as f64 - 1_f64)
.max(PCM_Sample::min_value() as f64 + 1_f64);
let as_sample: PCM_Sample = bounded as PCM_Sample;
std::io::stdout().write_i16::<Endianness>(as_sample).unwrap();
j += 1;
if j == PCM_HZ/STEPS_PER_SEC {
j = 0;
forget(&mut memory);
notes = step_notes(¬es, &memory);
remember(¬es, &mut memory);
}
}
}
fn main() {
output_pcm();
}
|
{
for (n, val) in memory.iter_mut() {
*val *= 0.75;
}
}
|
identifier_body
|
main.rs
|
extern crate byteorder;
use std::collections::HashMap;
use std::io::Write;
use byteorder::{LittleEndian, WriteBytesExt};
#[derive(Hash, Eq, PartialEq, Clone, Debug)]
struct Frac(u64, u64);
type Memory = HashMap<Frac, f64>;
fn simplify(Frac(a, b): Frac) -> Frac {
fn gcd(x: u64, y: u64) -> u64 {
if y == 0 {
x
} else {
gcd(y, x % y)
}
}
let d = gcd(a, b);
Frac(a/d, b/d)
}
/// judge a set of notes based on harmony.
/// range: floats in [0, 1] and lower is better.
fn judge_harmony(noteset: &[Frac], memory: &Memory) -> f64 {
let mut harmony_sum = 0_f64;
for &Frac(a1, b1) in noteset {
for (&Frac(a2, b2), &familiarity) in memory.iter() {
let Frac(a3, b3) = simplify(Frac(a1*b2, a2*b1));
harmony_sum += familiarity * (a3 as f64) * (b3 as f64);
}
}
let iterations = noteset.len()*memory.len();
let avg_harmony = (harmony_sum as f64)/(iterations as f64);
(1_f64 - 1_f64/(avg_harmony/5_f64).exp()).max(0_f64).min(1_f64)
}
/// judge a set of notes based on familiarity & novelty balance.
/// range: floats in [0, 1] and lower is better.
fn judge_novelty(noteset: &[Frac], memory: &Memory) -> f64 {
if noteset.len() < 1 {
panic!("judge_novelty: need at least 1 note");
}
let mut familiarity_sum = 0_f64;
for note in noteset {
let &familiarity = memory.get(note).unwrap_or(&0_f64);
familiarity_sum += familiarity;
}
let avg_familiarity = familiarity_sum / (noteset.len() as f64);
let target_familiarity = 0.1_f64;
let disparity = (target_familiarity - avg_familiarity).abs();
(1_f64 - 1_f64/disparity.exp()).max(0_f64).min(1_f64)
}
/// judge a set of notes.
/// range: floats in [0, 1] and lower is better.
fn judge(noteset: &[Frac], memory: &Memory) -> f64 {
(judge_harmony(noteset, memory) + judge_novelty(noteset, memory))/2_f64
}
fn forget(memory: &mut Memory) {
for (n, val) in memory.iter_mut() {
*val *= 0.75;
}
}
fn remember(note_set: &[Frac], memory: &mut Memory) {
let increase = 0.1_f64;
for note in note_set {
let val = match memory.get(note) {
Some(v) => v + increase,
None => increase,
};
memory.insert(note.clone(), val);
}
}
/// step to a set of notes that minimizes the judge function.
fn step_notes(note_set: &[Frac], memory: &Memory) -> Vec<Frac> {
let mut best: Vec<Frac> = note_set.to_owned();
let mut best_score = 1_f64;
for i in 0..note_set.len() {
for a in 1..12 {
for b in 1..12 {
let possibility = simplify(Frac(a, b));
if (note_set.contains(&possibility)) {
continue;
}
let note_set2: Vec<Frac> = note_set[0..i].iter()
.chain(note_set[i+1..note_set.len()].iter())
.chain([possibility].iter())
.map(|n| n.clone())
.collect();
let score = judge(¬e_set2, memory);
if score < best_score {
best = note_set2;
best_score = score;
}
}
}
}
best
}
type PCM_Sample = i16;
static PCM_HZ: u64 = 44100_u64;
static STEPS_PER_SEC: u64 = 4;
static BASE_NOTE: f64 = 250_f64;
type Endianness = LittleEndian;
fn sine_wave(freq: f64, step: u64) -> f64 {
(2.0*std::f64::consts::PI*(step as f64)*freq/(PCM_HZ as f64)).sin()
}
fn sine_waves(base_note: f64, fractions: &[Frac], step: u64) -> f64 {
let mut sum = 0_f64;
for &Frac(a, b) in fractions {
let freq = (base_note / (b as f64)) * (a as f64);
sum += sine_wave(freq, step);
}
|
sum / (fractions.len() as f64)
}
fn linear_envelope(sample: f64, duration: u64, progress: u64) -> f64 {
sample * (progress as f64) / (duration as f64)
}
fn output_pcm() {
let mut notes = vec![Frac(1, 2), Frac(1, 1), Frac(1, 3), Frac(1, 5), Frac(1, 7)];
let mut memory = Memory::new();
let mut j=0;
for i in (0_u64..u64::max_value()).cycle() {
let sample = sine_waves(BASE_NOTE, ¬es, i) *
(PCM_Sample::max_value() as f64);
let enveloped = linear_envelope(sample, j, PCM_HZ/STEPS_PER_SEC);
let bounded = enveloped.min(PCM_Sample::max_value() as f64 - 1_f64)
.max(PCM_Sample::min_value() as f64 + 1_f64);
let as_sample: PCM_Sample = bounded as PCM_Sample;
std::io::stdout().write_i16::<Endianness>(as_sample).unwrap();
j += 1;
if j == PCM_HZ/STEPS_PER_SEC {
j = 0;
forget(&mut memory);
notes = step_notes(¬es, &memory);
remember(¬es, &mut memory);
}
}
}
fn main() {
output_pcm();
}
|
random_line_split
|
|
main.rs
|
extern crate byteorder;
use std::collections::HashMap;
use std::io::Write;
use byteorder::{LittleEndian, WriteBytesExt};
#[derive(Hash, Eq, PartialEq, Clone, Debug)]
struct Frac(u64, u64);
type Memory = HashMap<Frac, f64>;
fn simplify(Frac(a, b): Frac) -> Frac {
fn gcd(x: u64, y: u64) -> u64 {
if y == 0 {
x
} else {
gcd(y, x % y)
}
}
let d = gcd(a, b);
Frac(a/d, b/d)
}
/// judge a set of notes based on harmony.
/// range: floats in [0, 1] and lower is better.
fn judge_harmony(noteset: &[Frac], memory: &Memory) -> f64 {
let mut harmony_sum = 0_f64;
for &Frac(a1, b1) in noteset {
for (&Frac(a2, b2), &familiarity) in memory.iter() {
let Frac(a3, b3) = simplify(Frac(a1*b2, a2*b1));
harmony_sum += familiarity * (a3 as f64) * (b3 as f64);
}
}
let iterations = noteset.len()*memory.len();
let avg_harmony = (harmony_sum as f64)/(iterations as f64);
(1_f64 - 1_f64/(avg_harmony/5_f64).exp()).max(0_f64).min(1_f64)
}
/// judge a set of notes based on familiarity & novelty balance.
/// range: floats in [0, 1] and lower is better.
fn judge_novelty(noteset: &[Frac], memory: &Memory) -> f64 {
if noteset.len() < 1 {
panic!("judge_novelty: need at least 1 note");
}
let mut familiarity_sum = 0_f64;
for note in noteset {
let &familiarity = memory.get(note).unwrap_or(&0_f64);
familiarity_sum += familiarity;
}
let avg_familiarity = familiarity_sum / (noteset.len() as f64);
let target_familiarity = 0.1_f64;
let disparity = (target_familiarity - avg_familiarity).abs();
(1_f64 - 1_f64/disparity.exp()).max(0_f64).min(1_f64)
}
/// judge a set of notes.
/// range: floats in [0, 1] and lower is better.
fn judge(noteset: &[Frac], memory: &Memory) -> f64 {
(judge_harmony(noteset, memory) + judge_novelty(noteset, memory))/2_f64
}
fn forget(memory: &mut Memory) {
for (n, val) in memory.iter_mut() {
*val *= 0.75;
}
}
fn remember(note_set: &[Frac], memory: &mut Memory) {
let increase = 0.1_f64;
for note in note_set {
let val = match memory.get(note) {
Some(v) => v + increase,
None => increase,
};
memory.insert(note.clone(), val);
}
}
/// step to a set of notes that minimizes the judge function.
fn step_notes(note_set: &[Frac], memory: &Memory) -> Vec<Frac> {
let mut best: Vec<Frac> = note_set.to_owned();
let mut best_score = 1_f64;
for i in 0..note_set.len() {
for a in 1..12 {
for b in 1..12 {
let possibility = simplify(Frac(a, b));
if (note_set.contains(&possibility)) {
continue;
}
let note_set2: Vec<Frac> = note_set[0..i].iter()
.chain(note_set[i+1..note_set.len()].iter())
.chain([possibility].iter())
.map(|n| n.clone())
.collect();
let score = judge(¬e_set2, memory);
if score < best_score
|
}
}
}
best
}
type PCM_Sample = i16;
static PCM_HZ: u64 = 44100_u64;
static STEPS_PER_SEC: u64 = 4;
static BASE_NOTE: f64 = 250_f64;
type Endianness = LittleEndian;
fn sine_wave(freq: f64, step: u64) -> f64 {
(2.0*std::f64::consts::PI*(step as f64)*freq/(PCM_HZ as f64)).sin()
}
fn sine_waves(base_note: f64, fractions: &[Frac], step: u64) -> f64 {
let mut sum = 0_f64;
for &Frac(a, b) in fractions {
let freq = (base_note / (b as f64)) * (a as f64);
sum += sine_wave(freq, step);
}
sum / (fractions.len() as f64)
}
fn linear_envelope(sample: f64, duration: u64, progress: u64) -> f64 {
sample * (progress as f64) / (duration as f64)
}
fn output_pcm() {
let mut notes = vec![Frac(1, 2), Frac(1, 1), Frac(1, 3), Frac(1, 5), Frac(1, 7)];
let mut memory = Memory::new();
let mut j=0;
for i in (0_u64..u64::max_value()).cycle() {
let sample = sine_waves(BASE_NOTE, ¬es, i) *
(PCM_Sample::max_value() as f64);
let enveloped = linear_envelope(sample, j, PCM_HZ/STEPS_PER_SEC);
let bounded = enveloped.min(PCM_Sample::max_value() as f64 - 1_f64)
.max(PCM_Sample::min_value() as f64 + 1_f64);
let as_sample: PCM_Sample = bounded as PCM_Sample;
std::io::stdout().write_i16::<Endianness>(as_sample).unwrap();
j += 1;
if j == PCM_HZ/STEPS_PER_SEC {
j = 0;
forget(&mut memory);
notes = step_notes(¬es, &memory);
remember(¬es, &mut memory);
}
}
}
fn main() {
output_pcm();
}
|
{
best = note_set2;
best_score = score;
}
|
conditional_block
|
group.rs
|
/*!
Group Icons and Cursors.
References:
* http://msdn.microsoft.com/en-us/library/ms997538.aspx
* https://devblogs.microsoft.com/oldnewthing/20120720-00/?p=7083
* https://github.com/MathewSachin/NIco/wiki/Ico,-Cur-and-PE-Formats
# Examples
The following example prints all group icon resource names which contain a PNG image.
```
// Aqcuire the resources of a Portable Executable file
let resources: pelite::resources::Resources;
# fn example(resources: pelite::resources::Resources<'_>) {
// Iterate over the group icons in the resources and throw away any invalid results
// If the resources contain no group icons the iterator is empty
for (name, group) in resources.icons().filter_map(Result::ok) {
// Enumerate the entries in the group
for entry in group.entries() {
// Fetch the image data for this entry
match group.image(entry.nId) {
Ok(image) => {
// Check if the image data starts with the PNG magic bytes
if image.starts_with(b"\x89PNG") {
println!("{}: contains PNG", name);
}
},
Err(err) => {
println!("{}: Error {}!", name, err)
},
}
}
}
# }
```
*/
use std::prelude::v1::*;
#[cfg(feature = "std")]
use std::io;
use crate::util::AlignTo;
use crate::Error;
use crate::Pod;
use std::{fmt, mem, slice};
use super::{FindError, Resources};
use self::image::*;
//----------------------------------------------------------------
/// Icon or Cursor type.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum ResourceType {
Icon,
Cursor,
}
impl ResourceType {
#[inline]
pub fn id(self) -> u16 {
match self {
ResourceType::Icon => crate::image::RT_ICON,
ResourceType::Cursor => crate::image::RT_CURSOR,
}
}
}
impl<'a> From<ResourceType> for super::Name<'a> {
fn from(resource_type: ResourceType) -> super::Name<'a> {
resource_type.id().into()
}
}
/// Group resources, Icons and Cursors.
#[derive(Copy, Clone)]
pub struct GroupResource<'a> {
resources: Resources<'a>,
image: &'a GRPICONDIR,
}
impl<'a> GroupResource<'a> {
/// Parses the GroupResource from the byte slice.
///
/// The pixel data of the group resource is stored in separate data entries, requiring the resources to access.
pub fn new(resources: Resources<'a>, bytes: &'a [u8]) -> Result<GroupResource<'a>, Error> {
if!bytes.as_ptr().aligned_to(2) {
return Err(Error::Misaligned);
}
if bytes.len() < mem::size_of::<GRPICONDIR>() {
return Err(Error::Bounds);
}
let image: &'a GRPICONDIR = unsafe { &*(bytes.as_ptr() as *const GRPICONDIR) };
if image.idReserved!= 0 ||!(image.idType == 1 || image.idType == 2) {
return Err(Error::BadMagic);
}
let total_size = mem::size_of::<GRPICONDIR>() + image.idCount as usize * mem::size_of::<GRPICONDIRENTRY>();
if bytes.len()!= total_size {
return Err(Error::Bounds);
}
Ok(GroupResource { resources, image })
}
/// Gets the Group header.
pub fn header(&self) -> &'a GRPICONDIR {
self.image
}
/// Gets the Group entries.
pub fn entries(&self) -> &'a [GRPICONDIRENTRY] {
let len = self.image.idCount as usize;
// Checked by try_from constructor
unsafe {
let ptr = (self.image as *const GRPICONDIR).offset(1) as *const GRPICONDIRENTRY;
slice::from_raw_parts(ptr, len)
}
}
/// Gets the Group resource type.
pub fn ty(&self) -> ResourceType {
match self.image.idType {
1 => ResourceType::Icon,
2 => ResourceType::Cursor,
_ => unreachable!(), // Checked by constructor
}
}
/// Gets the image data for the given icon id.
pub fn image(&self, id: u16) -> Result<&'a [u8], FindError>
|
/// Reassemble the file.
#[cfg(feature = "std")]
pub fn write(&self, dest: &mut dyn io::Write) -> io::Result<()> {
// Start by appending the header
dest.write(self.image.as_bytes())?;
// Write all the icon entries
let entries = self.entries();
let mut image_offset = (6 + entries.len() * 16) as u32;
for entry in entries {
// Fixup the dwImageOffset field of the icon entry
// NOTE! It is expected that the actual icon data size matches dwBytesInRes information!
let mut icon_entry = [0u32; 4];
icon_entry.as_bytes_mut()[..14].copy_from_slice(entry.as_bytes());
icon_entry[3] = image_offset;
image_offset += entry.bytes_in_resource();
dest.write(icon_entry.as_bytes())?;
}
// Append the bytes for every entry
for entry in entries {
// Find the Icon data and append it
// FIXME! What do if dwBytesInRes does not match the icon data size?
// Ignoring this check may lead to corrupt icon files
if let Ok(bytes) = self.image(entry.nId) {
// assert_eq!(entry.bytes_in_resource() as usize, bytes.len());
dest.write(bytes)?;
}
}
Ok(())
}
}
impl fmt::Debug for GroupResource<'_> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("GroupResource")
.field("type", &self.ty())
.field("entries.len", &self.entries().len())
.finish()
}
}
#[cfg(feature = "serde")]
impl serde::Serialize for GroupResource<'_> {
fn serialize<S: serde::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
let mut bytes = Vec::new();
mem::forget(self.write(&mut bytes));
#[cfg(feature = "data-encoding")]
{if serializer.is_human_readable() {
return serializer.serialize_str(&data_encoding::BASE64.encode(&bytes));
}}
serializer.serialize_bytes(&bytes)
}
}
/// Group Icon.
pub type GroupIcon<'a> = GroupResource<'a>;
/// Group Cursor.
pub type GroupCursor<'a> = GroupResource<'a>;
//----------------------------------------------------------------
#[allow(non_snake_case)]
pub mod image {
use crate::Pod;
#[derive(Copy, Clone, Debug)]
#[repr(C)]
pub struct GRPICONDIR {
pub idReserved: u16,
pub idType: u16,
pub idCount: u16,
pub idEntries: [GRPICONDIRENTRY; 0],
}
#[derive(Copy, Clone, Debug)]
#[repr(C)]
pub struct GRPICONDIRENTRY {
pub bWidth: u8,
pub bHeight: u8,
pub bColorCount: u8,
pub bReserved: u8,
pub wPlanes: u16,
pub wBitCount: u16,
pub dwBytesInResLo: u16,
pub dwBytesInResHi: u16,
pub nId: u16,
}
impl GRPICONDIRENTRY {
pub fn bytes_in_resource(&self) -> u32 {
self.dwBytesInResHi as u32 * 0x10000 + self.dwBytesInResLo as u32
}
}
unsafe impl Pod for GRPICONDIR {}
unsafe impl Pod for GRPICONDIRENTRY {}
}
|
{
self.resources.root()?
.get_dir(self.ty().into())?
.get_dir(id.into())?
.first_data()?
.bytes().map_err(FindError::Pe)
}
|
identifier_body
|
group.rs
|
/*!
Group Icons and Cursors.
References:
* http://msdn.microsoft.com/en-us/library/ms997538.aspx
* https://devblogs.microsoft.com/oldnewthing/20120720-00/?p=7083
* https://github.com/MathewSachin/NIco/wiki/Ico,-Cur-and-PE-Formats
# Examples
The following example prints all group icon resource names which contain a PNG image.
```
// Aqcuire the resources of a Portable Executable file
let resources: pelite::resources::Resources;
# fn example(resources: pelite::resources::Resources<'_>) {
// Iterate over the group icons in the resources and throw away any invalid results
// If the resources contain no group icons the iterator is empty
for (name, group) in resources.icons().filter_map(Result::ok) {
// Enumerate the entries in the group
for entry in group.entries() {
// Fetch the image data for this entry
match group.image(entry.nId) {
Ok(image) => {
// Check if the image data starts with the PNG magic bytes
if image.starts_with(b"\x89PNG") {
println!("{}: contains PNG", name);
}
},
Err(err) => {
println!("{}: Error {}!", name, err)
},
}
}
}
# }
```
*/
use std::prelude::v1::*;
#[cfg(feature = "std")]
use std::io;
use crate::util::AlignTo;
use crate::Error;
use crate::Pod;
use std::{fmt, mem, slice};
use super::{FindError, Resources};
use self::image::*;
//----------------------------------------------------------------
/// Icon or Cursor type.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum ResourceType {
Icon,
Cursor,
}
impl ResourceType {
#[inline]
pub fn id(self) -> u16 {
match self {
ResourceType::Icon => crate::image::RT_ICON,
ResourceType::Cursor => crate::image::RT_CURSOR,
}
}
}
impl<'a> From<ResourceType> for super::Name<'a> {
fn from(resource_type: ResourceType) -> super::Name<'a> {
resource_type.id().into()
}
}
/// Group resources, Icons and Cursors.
#[derive(Copy, Clone)]
pub struct GroupResource<'a> {
resources: Resources<'a>,
image: &'a GRPICONDIR,
}
impl<'a> GroupResource<'a> {
/// Parses the GroupResource from the byte slice.
///
/// The pixel data of the group resource is stored in separate data entries, requiring the resources to access.
pub fn new(resources: Resources<'a>, bytes: &'a [u8]) -> Result<GroupResource<'a>, Error> {
if!bytes.as_ptr().aligned_to(2) {
return Err(Error::Misaligned);
}
if bytes.len() < mem::size_of::<GRPICONDIR>() {
return Err(Error::Bounds);
}
let image: &'a GRPICONDIR = unsafe { &*(bytes.as_ptr() as *const GRPICONDIR) };
if image.idReserved!= 0 ||!(image.idType == 1 || image.idType == 2) {
return Err(Error::BadMagic);
}
let total_size = mem::size_of::<GRPICONDIR>() + image.idCount as usize * mem::size_of::<GRPICONDIRENTRY>();
if bytes.len()!= total_size {
return Err(Error::Bounds);
}
Ok(GroupResource { resources, image })
}
/// Gets the Group header.
pub fn header(&self) -> &'a GRPICONDIR {
self.image
}
/// Gets the Group entries.
pub fn entries(&self) -> &'a [GRPICONDIRENTRY] {
let len = self.image.idCount as usize;
// Checked by try_from constructor
unsafe {
let ptr = (self.image as *const GRPICONDIR).offset(1) as *const GRPICONDIRENTRY;
slice::from_raw_parts(ptr, len)
}
}
/// Gets the Group resource type.
pub fn ty(&self) -> ResourceType {
match self.image.idType {
1 => ResourceType::Icon,
2 => ResourceType::Cursor,
_ => unreachable!(), // Checked by constructor
}
}
/// Gets the image data for the given icon id.
pub fn image(&self, id: u16) -> Result<&'a [u8], FindError> {
self.resources.root()?
.get_dir(self.ty().into())?
.get_dir(id.into())?
.first_data()?
.bytes().map_err(FindError::Pe)
}
/// Reassemble the file.
#[cfg(feature = "std")]
pub fn write(&self, dest: &mut dyn io::Write) -> io::Result<()> {
// Start by appending the header
dest.write(self.image.as_bytes())?;
// Write all the icon entries
let entries = self.entries();
let mut image_offset = (6 + entries.len() * 16) as u32;
for entry in entries {
// Fixup the dwImageOffset field of the icon entry
// NOTE! It is expected that the actual icon data size matches dwBytesInRes information!
let mut icon_entry = [0u32; 4];
icon_entry.as_bytes_mut()[..14].copy_from_slice(entry.as_bytes());
icon_entry[3] = image_offset;
image_offset += entry.bytes_in_resource();
dest.write(icon_entry.as_bytes())?;
}
// Append the bytes for every entry
for entry in entries {
// Find the Icon data and append it
// FIXME! What do if dwBytesInRes does not match the icon data size?
// Ignoring this check may lead to corrupt icon files
if let Ok(bytes) = self.image(entry.nId) {
// assert_eq!(entry.bytes_in_resource() as usize, bytes.len());
dest.write(bytes)?;
}
}
Ok(())
}
}
impl fmt::Debug for GroupResource<'_> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("GroupResource")
.field("type", &self.ty())
.field("entries.len", &self.entries().len())
.finish()
}
}
#[cfg(feature = "serde")]
impl serde::Serialize for GroupResource<'_> {
fn serialize<S: serde::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
let mut bytes = Vec::new();
mem::forget(self.write(&mut bytes));
#[cfg(feature = "data-encoding")]
{if serializer.is_human_readable() {
return serializer.serialize_str(&data_encoding::BASE64.encode(&bytes));
}}
serializer.serialize_bytes(&bytes)
}
}
/// Group Icon.
pub type GroupIcon<'a> = GroupResource<'a>;
/// Group Cursor.
pub type GroupCursor<'a> = GroupResource<'a>;
//----------------------------------------------------------------
#[allow(non_snake_case)]
pub mod image {
use crate::Pod;
#[derive(Copy, Clone, Debug)]
#[repr(C)]
pub struct
|
{
pub idReserved: u16,
pub idType: u16,
pub idCount: u16,
pub idEntries: [GRPICONDIRENTRY; 0],
}
#[derive(Copy, Clone, Debug)]
#[repr(C)]
pub struct GRPICONDIRENTRY {
pub bWidth: u8,
pub bHeight: u8,
pub bColorCount: u8,
pub bReserved: u8,
pub wPlanes: u16,
pub wBitCount: u16,
pub dwBytesInResLo: u16,
pub dwBytesInResHi: u16,
pub nId: u16,
}
impl GRPICONDIRENTRY {
pub fn bytes_in_resource(&self) -> u32 {
self.dwBytesInResHi as u32 * 0x10000 + self.dwBytesInResLo as u32
}
}
unsafe impl Pod for GRPICONDIR {}
unsafe impl Pod for GRPICONDIRENTRY {}
}
|
GRPICONDIR
|
identifier_name
|
group.rs
|
/*!
Group Icons and Cursors.
References:
* http://msdn.microsoft.com/en-us/library/ms997538.aspx
* https://devblogs.microsoft.com/oldnewthing/20120720-00/?p=7083
* https://github.com/MathewSachin/NIco/wiki/Ico,-Cur-and-PE-Formats
|
```
// Aqcuire the resources of a Portable Executable file
let resources: pelite::resources::Resources;
# fn example(resources: pelite::resources::Resources<'_>) {
// Iterate over the group icons in the resources and throw away any invalid results
// If the resources contain no group icons the iterator is empty
for (name, group) in resources.icons().filter_map(Result::ok) {
// Enumerate the entries in the group
for entry in group.entries() {
// Fetch the image data for this entry
match group.image(entry.nId) {
Ok(image) => {
// Check if the image data starts with the PNG magic bytes
if image.starts_with(b"\x89PNG") {
println!("{}: contains PNG", name);
}
},
Err(err) => {
println!("{}: Error {}!", name, err)
},
}
}
}
# }
```
*/
use std::prelude::v1::*;
#[cfg(feature = "std")]
use std::io;
use crate::util::AlignTo;
use crate::Error;
use crate::Pod;
use std::{fmt, mem, slice};
use super::{FindError, Resources};
use self::image::*;
//----------------------------------------------------------------
/// Icon or Cursor type.
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum ResourceType {
Icon,
Cursor,
}
impl ResourceType {
#[inline]
pub fn id(self) -> u16 {
match self {
ResourceType::Icon => crate::image::RT_ICON,
ResourceType::Cursor => crate::image::RT_CURSOR,
}
}
}
impl<'a> From<ResourceType> for super::Name<'a> {
fn from(resource_type: ResourceType) -> super::Name<'a> {
resource_type.id().into()
}
}
/// Group resources, Icons and Cursors.
#[derive(Copy, Clone)]
pub struct GroupResource<'a> {
resources: Resources<'a>,
image: &'a GRPICONDIR,
}
impl<'a> GroupResource<'a> {
/// Parses the GroupResource from the byte slice.
///
/// The pixel data of the group resource is stored in separate data entries, requiring the resources to access.
pub fn new(resources: Resources<'a>, bytes: &'a [u8]) -> Result<GroupResource<'a>, Error> {
if!bytes.as_ptr().aligned_to(2) {
return Err(Error::Misaligned);
}
if bytes.len() < mem::size_of::<GRPICONDIR>() {
return Err(Error::Bounds);
}
let image: &'a GRPICONDIR = unsafe { &*(bytes.as_ptr() as *const GRPICONDIR) };
if image.idReserved!= 0 ||!(image.idType == 1 || image.idType == 2) {
return Err(Error::BadMagic);
}
let total_size = mem::size_of::<GRPICONDIR>() + image.idCount as usize * mem::size_of::<GRPICONDIRENTRY>();
if bytes.len()!= total_size {
return Err(Error::Bounds);
}
Ok(GroupResource { resources, image })
}
/// Gets the Group header.
pub fn header(&self) -> &'a GRPICONDIR {
self.image
}
/// Gets the Group entries.
pub fn entries(&self) -> &'a [GRPICONDIRENTRY] {
let len = self.image.idCount as usize;
// Checked by try_from constructor
unsafe {
let ptr = (self.image as *const GRPICONDIR).offset(1) as *const GRPICONDIRENTRY;
slice::from_raw_parts(ptr, len)
}
}
/// Gets the Group resource type.
pub fn ty(&self) -> ResourceType {
match self.image.idType {
1 => ResourceType::Icon,
2 => ResourceType::Cursor,
_ => unreachable!(), // Checked by constructor
}
}
/// Gets the image data for the given icon id.
pub fn image(&self, id: u16) -> Result<&'a [u8], FindError> {
self.resources.root()?
.get_dir(self.ty().into())?
.get_dir(id.into())?
.first_data()?
.bytes().map_err(FindError::Pe)
}
/// Reassemble the file.
#[cfg(feature = "std")]
pub fn write(&self, dest: &mut dyn io::Write) -> io::Result<()> {
// Start by appending the header
dest.write(self.image.as_bytes())?;
// Write all the icon entries
let entries = self.entries();
let mut image_offset = (6 + entries.len() * 16) as u32;
for entry in entries {
// Fixup the dwImageOffset field of the icon entry
// NOTE! It is expected that the actual icon data size matches dwBytesInRes information!
let mut icon_entry = [0u32; 4];
icon_entry.as_bytes_mut()[..14].copy_from_slice(entry.as_bytes());
icon_entry[3] = image_offset;
image_offset += entry.bytes_in_resource();
dest.write(icon_entry.as_bytes())?;
}
// Append the bytes for every entry
for entry in entries {
// Find the Icon data and append it
// FIXME! What do if dwBytesInRes does not match the icon data size?
// Ignoring this check may lead to corrupt icon files
if let Ok(bytes) = self.image(entry.nId) {
// assert_eq!(entry.bytes_in_resource() as usize, bytes.len());
dest.write(bytes)?;
}
}
Ok(())
}
}
impl fmt::Debug for GroupResource<'_> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("GroupResource")
.field("type", &self.ty())
.field("entries.len", &self.entries().len())
.finish()
}
}
#[cfg(feature = "serde")]
impl serde::Serialize for GroupResource<'_> {
fn serialize<S: serde::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
let mut bytes = Vec::new();
mem::forget(self.write(&mut bytes));
#[cfg(feature = "data-encoding")]
{if serializer.is_human_readable() {
return serializer.serialize_str(&data_encoding::BASE64.encode(&bytes));
}}
serializer.serialize_bytes(&bytes)
}
}
/// Group Icon.
pub type GroupIcon<'a> = GroupResource<'a>;
/// Group Cursor.
pub type GroupCursor<'a> = GroupResource<'a>;
//----------------------------------------------------------------
#[allow(non_snake_case)]
pub mod image {
use crate::Pod;
#[derive(Copy, Clone, Debug)]
#[repr(C)]
pub struct GRPICONDIR {
pub idReserved: u16,
pub idType: u16,
pub idCount: u16,
pub idEntries: [GRPICONDIRENTRY; 0],
}
#[derive(Copy, Clone, Debug)]
#[repr(C)]
pub struct GRPICONDIRENTRY {
pub bWidth: u8,
pub bHeight: u8,
pub bColorCount: u8,
pub bReserved: u8,
pub wPlanes: u16,
pub wBitCount: u16,
pub dwBytesInResLo: u16,
pub dwBytesInResHi: u16,
pub nId: u16,
}
impl GRPICONDIRENTRY {
pub fn bytes_in_resource(&self) -> u32 {
self.dwBytesInResHi as u32 * 0x10000 + self.dwBytesInResLo as u32
}
}
unsafe impl Pod for GRPICONDIR {}
unsafe impl Pod for GRPICONDIRENTRY {}
}
|
# Examples
The following example prints all group icon resource names which contain a PNG image.
|
random_line_split
|
exhaustive_vec_permutations.rs
|
use itertools::Itertools;
use malachite_base::vecs::exhaustive_vec_permutations;
fn exhaustive_vec_permutations_helper(cs: &str, out: &[&str]) {
let cs = cs.chars().collect_vec();
let css: Vec<String> = exhaustive_vec_permutations(cs)
.map(|ds| ds.into_iter().collect())
.collect();
assert_eq!(css.iter().map(String::as_str).collect_vec().as_slice(), out);
}
#[test]
fn test_exhaustive_vec_permutations()
|
"24135", "24153", "24315", "24351", "24513", "24531", "25134", "25143", "25314",
"25341", "25413", "25431", "31245", "31254", "31425", "31452", "31524", "31542",
"32145", "32154", "32415", "32451", "32514", "32541", "34125", "34152", "34215",
"34251", "34512", "34521", "35124", "35142", "35214", "35241", "35412", "35421",
"41235", "41253", "41325", "41352", "41523", "41532", "42135", "42153", "42315",
"42351", "42513", "42531", "43125", "43152", "43215", "43251", "43512", "43521",
"45123", "45132", "45213", "45231", "45312", "45321", "51234", "51243", "51324",
"51342", "51423", "51432", "52134", "52143", "52314", "52341", "52413", "52431",
"53124", "53142", "53214", "53241", "53412", "53421", "54123", "54132", "54213",
"54231", "54312", "54321",
],
);
exhaustive_vec_permutations_helper(
"abcd",
&[
"abcd", "abdc", "acbd", "acdb", "adbc", "adcb", "bacd", "badc", "bcad", "bcda", "bdac",
"bdca", "cabd", "cadb", "cbad", "cbda", "cdab", "cdba", "dabc", "dacb", "dbac", "dbca",
"dcab", "dcba",
],
);
}
|
{
exhaustive_vec_permutations_helper("", &[""]);
exhaustive_vec_permutations_helper("1", &["1"]);
exhaustive_vec_permutations_helper("12", &["12", "21"]);
exhaustive_vec_permutations_helper("123", &["123", "132", "213", "231", "312", "321"]);
exhaustive_vec_permutations_helper(
"1234",
&[
"1234", "1243", "1324", "1342", "1423", "1432", "2134", "2143", "2314", "2341", "2413",
"2431", "3124", "3142", "3214", "3241", "3412", "3421", "4123", "4132", "4213", "4231",
"4312", "4321",
],
);
exhaustive_vec_permutations_helper(
"12345",
&[
"12345", "12354", "12435", "12453", "12534", "12543", "13245", "13254", "13425",
"13452", "13524", "13542", "14235", "14253", "14325", "14352", "14523", "14532",
"15234", "15243", "15324", "15342", "15423", "15432", "21345", "21354", "21435",
"21453", "21534", "21543", "23145", "23154", "23415", "23451", "23514", "23541",
|
identifier_body
|
exhaustive_vec_permutations.rs
|
use itertools::Itertools;
use malachite_base::vecs::exhaustive_vec_permutations;
fn
|
(cs: &str, out: &[&str]) {
let cs = cs.chars().collect_vec();
let css: Vec<String> = exhaustive_vec_permutations(cs)
.map(|ds| ds.into_iter().collect())
.collect();
assert_eq!(css.iter().map(String::as_str).collect_vec().as_slice(), out);
}
#[test]
fn test_exhaustive_vec_permutations() {
exhaustive_vec_permutations_helper("", &[""]);
exhaustive_vec_permutations_helper("1", &["1"]);
exhaustive_vec_permutations_helper("12", &["12", "21"]);
exhaustive_vec_permutations_helper("123", &["123", "132", "213", "231", "312", "321"]);
exhaustive_vec_permutations_helper(
"1234",
&[
"1234", "1243", "1324", "1342", "1423", "1432", "2134", "2143", "2314", "2341", "2413",
"2431", "3124", "3142", "3214", "3241", "3412", "3421", "4123", "4132", "4213", "4231",
"4312", "4321",
],
);
exhaustive_vec_permutations_helper(
"12345",
&[
"12345", "12354", "12435", "12453", "12534", "12543", "13245", "13254", "13425",
"13452", "13524", "13542", "14235", "14253", "14325", "14352", "14523", "14532",
"15234", "15243", "15324", "15342", "15423", "15432", "21345", "21354", "21435",
"21453", "21534", "21543", "23145", "23154", "23415", "23451", "23514", "23541",
"24135", "24153", "24315", "24351", "24513", "24531", "25134", "25143", "25314",
"25341", "25413", "25431", "31245", "31254", "31425", "31452", "31524", "31542",
"32145", "32154", "32415", "32451", "32514", "32541", "34125", "34152", "34215",
"34251", "34512", "34521", "35124", "35142", "35214", "35241", "35412", "35421",
"41235", "41253", "41325", "41352", "41523", "41532", "42135", "42153", "42315",
"42351", "42513", "42531", "43125", "43152", "43215", "43251", "43512", "43521",
"45123", "45132", "45213", "45231", "45312", "45321", "51234", "51243", "51324",
"51342", "51423", "51432", "52134", "52143", "52314", "52341", "52413", "52431",
"53124", "53142", "53214", "53241", "53412", "53421", "54123", "54132", "54213",
"54231", "54312", "54321",
],
);
exhaustive_vec_permutations_helper(
"abcd",
&[
"abcd", "abdc", "acbd", "acdb", "adbc", "adcb", "bacd", "badc", "bcad", "bcda", "bdac",
"bdca", "cabd", "cadb", "cbad", "cbda", "cdab", "cdba", "dabc", "dacb", "dbac", "dbca",
"dcab", "dcba",
],
);
}
|
exhaustive_vec_permutations_helper
|
identifier_name
|
exhaustive_vec_permutations.rs
|
use itertools::Itertools;
use malachite_base::vecs::exhaustive_vec_permutations;
fn exhaustive_vec_permutations_helper(cs: &str, out: &[&str]) {
let cs = cs.chars().collect_vec();
let css: Vec<String> = exhaustive_vec_permutations(cs)
.map(|ds| ds.into_iter().collect())
.collect();
assert_eq!(css.iter().map(String::as_str).collect_vec().as_slice(), out);
}
#[test]
fn test_exhaustive_vec_permutations() {
exhaustive_vec_permutations_helper("", &[""]);
exhaustive_vec_permutations_helper("1", &["1"]);
exhaustive_vec_permutations_helper("12", &["12", "21"]);
exhaustive_vec_permutations_helper("123", &["123", "132", "213", "231", "312", "321"]);
exhaustive_vec_permutations_helper(
"1234",
&[
"1234", "1243", "1324", "1342", "1423", "1432", "2134", "2143", "2314", "2341", "2413",
"2431", "3124", "3142", "3214", "3241", "3412", "3421", "4123", "4132", "4213", "4231",
"4312", "4321",
],
);
exhaustive_vec_permutations_helper(
"12345",
&[
"12345", "12354", "12435", "12453", "12534", "12543", "13245", "13254", "13425",
"13452", "13524", "13542", "14235", "14253", "14325", "14352", "14523", "14532",
"15234", "15243", "15324", "15342", "15423", "15432", "21345", "21354", "21435",
"21453", "21534", "21543", "23145", "23154", "23415", "23451", "23514", "23541",
"24135", "24153", "24315", "24351", "24513", "24531", "25134", "25143", "25314",
"25341", "25413", "25431", "31245", "31254", "31425", "31452", "31524", "31542",
"32145", "32154", "32415", "32451", "32514", "32541", "34125", "34152", "34215",
"34251", "34512", "34521", "35124", "35142", "35214", "35241", "35412", "35421",
"41235", "41253", "41325", "41352", "41523", "41532", "42135", "42153", "42315",
"42351", "42513", "42531", "43125", "43152", "43215", "43251", "43512", "43521",
"45123", "45132", "45213", "45231", "45312", "45321", "51234", "51243", "51324",
"51342", "51423", "51432", "52134", "52143", "52314", "52341", "52413", "52431",
|
"abcd",
&[
"abcd", "abdc", "acbd", "acdb", "adbc", "adcb", "bacd", "badc", "bcad", "bcda", "bdac",
"bdca", "cabd", "cadb", "cbad", "cbda", "cdab", "cdba", "dabc", "dacb", "dbac", "dbca",
"dcab", "dcba",
],
);
}
|
"53124", "53142", "53214", "53241", "53412", "53421", "54123", "54132", "54213",
"54231", "54312", "54321",
],
);
exhaustive_vec_permutations_helper(
|
random_line_split
|
traits-assoc-type-in-supertrait.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test case where an associated type is referenced from within the
// supertrait definition. Issue #20220.
use std::vec::IntoIter;
pub trait Foo: Iterator<Item=<Self as Foo>::Key> {
type Key;
}
impl Foo for IntoIter<i32> {
type Key = i32;
}
fn
|
<F:Foo<Key=i32>>(f: F) -> i32 {
f.fold(0, |a,b| a + b)
}
fn main() {
let x = sum_foo(vec![11, 10, 1].into_iter());
assert_eq!(x, 22);
}
|
sum_foo
|
identifier_name
|
traits-assoc-type-in-supertrait.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test case where an associated type is referenced from within the
// supertrait definition. Issue #20220.
use std::vec::IntoIter;
pub trait Foo: Iterator<Item=<Self as Foo>::Key> {
type Key;
}
impl Foo for IntoIter<i32> {
type Key = i32;
}
fn sum_foo<F:Foo<Key=i32>>(f: F) -> i32 {
f.fold(0, |a,b| a + b)
}
fn main()
|
{
let x = sum_foo(vec![11, 10, 1].into_iter());
assert_eq!(x, 22);
}
|
identifier_body
|
|
traits-assoc-type-in-supertrait.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Test case where an associated type is referenced from within the
// supertrait definition. Issue #20220.
use std::vec::IntoIter;
|
pub trait Foo: Iterator<Item=<Self as Foo>::Key> {
type Key;
}
impl Foo for IntoIter<i32> {
type Key = i32;
}
fn sum_foo<F:Foo<Key=i32>>(f: F) -> i32 {
f.fold(0, |a,b| a + b)
}
fn main() {
let x = sum_foo(vec![11, 10, 1].into_iter());
assert_eq!(x, 22);
}
|
random_line_split
|
|
panic.rs
|
//! Intrinsics for panic handling
use interrupt;
#[lang = "eh_personality"]
#[no_mangle]
pub extern "C" fn rust_eh_personality() {}
/// Required to handle panics
#[lang = "panic_fmt"]
#[no_mangle]
pub extern "C" fn rust_begin_unwind(fmt: ::core::fmt::Arguments, file: &str, line: u32) ->! {
println!("PANIC: {}", fmt);
println!("FILE: {}", file);
println!("LINE: {}", line);
unsafe { interrupt::stack_trace(); }
println!("HALT");
loop {
unsafe { interrupt::halt(); }
}
}
#[lang = "oom"]
#[no_mangle]
pub extern fn rust_oom() ->! {
panic!("kernel memory allocation failed");
}
#[allow(non_snake_case)]
#[no_mangle]
/// Required to handle panics
pub extern "C" fn
|
() ->! {
loop {
unsafe { interrupt::halt(); }
}
}
|
_Unwind_Resume
|
identifier_name
|
panic.rs
|
//! Intrinsics for panic handling
use interrupt;
#[lang = "eh_personality"]
#[no_mangle]
pub extern "C" fn rust_eh_personality() {}
/// Required to handle panics
#[lang = "panic_fmt"]
#[no_mangle]
pub extern "C" fn rust_begin_unwind(fmt: ::core::fmt::Arguments, file: &str, line: u32) ->!
|
#[lang = "oom"]
#[no_mangle]
pub extern fn rust_oom() ->! {
panic!("kernel memory allocation failed");
}
#[allow(non_snake_case)]
#[no_mangle]
/// Required to handle panics
pub extern "C" fn _Unwind_Resume() ->! {
loop {
unsafe { interrupt::halt(); }
}
}
|
{
println!("PANIC: {}", fmt);
println!("FILE: {}", file);
println!("LINE: {}", line);
unsafe { interrupt::stack_trace(); }
println!("HALT");
loop {
unsafe { interrupt::halt(); }
}
}
|
identifier_body
|
panic.rs
|
//! Intrinsics for panic handling
use interrupt;
#[lang = "eh_personality"]
#[no_mangle]
pub extern "C" fn rust_eh_personality() {}
/// Required to handle panics
|
#[lang = "panic_fmt"]
#[no_mangle]
pub extern "C" fn rust_begin_unwind(fmt: ::core::fmt::Arguments, file: &str, line: u32) ->! {
println!("PANIC: {}", fmt);
println!("FILE: {}", file);
println!("LINE: {}", line);
unsafe { interrupt::stack_trace(); }
println!("HALT");
loop {
unsafe { interrupt::halt(); }
}
}
#[lang = "oom"]
#[no_mangle]
pub extern fn rust_oom() ->! {
panic!("kernel memory allocation failed");
}
#[allow(non_snake_case)]
#[no_mangle]
/// Required to handle panics
pub extern "C" fn _Unwind_Resume() ->! {
loop {
unsafe { interrupt::halt(); }
}
}
|
random_line_split
|
|
dataflow.rs
|
})
}
impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> {
fn has_bitset_for_nodeid(&self, n: ast::NodeId) -> bool {
assert!(n!= ast::DUMMY_NODE_ID);
self.nodeid_to_index.contains_key(&n)
}
}
impl<'a, 'tcx, O:DataFlowOperator> pprust::PpAnn for DataFlowContext<'a, 'tcx, O> {
fn pre(&self,
ps: &mut pprust::State,
node: pprust::AnnNode) -> io::IoResult<()> {
let id = match node {
pprust::NodeIdent(_) | pprust::NodeName(_) => 0,
pprust::NodeExpr(expr) => expr.id,
pprust::NodeBlock(blk) => blk.id,
pprust::NodeItem(_) => 0,
pprust::NodePat(pat) => pat.id
};
if self.has_bitset_for_nodeid(id) {
assert!(self.bits_per_id > 0);
let cfgidx = to_cfgidx_or_die(id, &self.nodeid_to_index);
let (start, end) = self.compute_id_range(cfgidx);
let on_entry = self.on_entry.slice(start, end);
let entry_str = bits_to_string(on_entry);
let gens = self.gens.slice(start, end);
let gens_str = if gens.iter().any(|&u| u!= 0) {
format!(" gen: {}", bits_to_string(gens))
} else {
"".to_string()
};
let kills = self.kills.slice(start, end);
let kills_str = if kills.iter().any(|&u| u!= 0) {
format!(" kill: {}", bits_to_string(kills))
} else {
"".to_string()
};
try!(ps.synth_comment(format!("id {}: {}{}{}", id, entry_str,
gens_str, kills_str)));
try!(pp::space(&mut ps.s));
}
Ok(())
}
}
fn build_nodeid_to_index(decl: Option<&ast::FnDecl>,
cfg: &cfg::CFG) -> NodeMap<CFGIndex> {
let mut index = NodeMap::new();
// FIXME (#6298): Would it be better to fold formals from decl
// into cfg itself? i.e. introduce a fn-based flow-graph in
// addition to the current block-based flow-graph, rather than
// have to put traversals like this here?
match decl {
None => {}
Some(decl) => add_entries_from_fn_decl(&mut index, decl, cfg.entry)
}
cfg.graph.each_node(|node_idx, node| {
if node.data.id!= ast::DUMMY_NODE_ID {
index.insert(node.data.id, node_idx);
}
true
});
return index;
fn add_entries_from_fn_decl(index: &mut NodeMap<CFGIndex>,
decl: &ast::FnDecl,
entry: CFGIndex) {
//! add mappings from the ast nodes for the formal bindings to
//! the entry-node in the graph.
struct Formals<'a> {
entry: CFGIndex,
index: &'a mut NodeMap<CFGIndex>,
}
let mut formals = Formals { entry: entry, index: index };
visit::walk_fn_decl(&mut formals, decl);
impl<'a, 'v> visit::Visitor<'v> for Formals<'a> {
fn visit_pat(&mut self, p: &ast::Pat) {
self.index.insert(p.id, self.entry);
visit::walk_pat(self, p)
}
}
}
}
impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> {
pub fn new(tcx: &'a ty::ctxt<'tcx>,
analysis_name: &'static str,
decl: Option<&ast::FnDecl>,
cfg: &cfg::CFG,
oper: O,
id_range: IdRange,
bits_per_id: uint) -> DataFlowContext<'a, 'tcx, O> {
let words_per_id = (bits_per_id + uint::BITS - 1) / uint::BITS;
let num_nodes = cfg.graph.all_nodes().len();
debug!("DataFlowContext::new(analysis_name: {}, id_range={:?}, \
bits_per_id={}, words_per_id={}) \
num_nodes: {}",
analysis_name, id_range, bits_per_id, words_per_id,
num_nodes);
let entry = if oper.initial_value() { uint::MAX } else {0};
let gens: Vec<_> = repeat(0).take(num_nodes * words_per_id).collect();
let kills: Vec<_> = repeat(0).take(num_nodes * words_per_id).collect();
let on_entry: Vec<_> = repeat(entry).take(num_nodes * words_per_id).collect();
let nodeid_to_index = build_nodeid_to_index(decl, cfg);
DataFlowContext {
tcx: tcx,
analysis_name: analysis_name,
words_per_id: words_per_id,
nodeid_to_index: nodeid_to_index,
bits_per_id: bits_per_id,
oper: oper,
gens: gens,
kills: kills,
on_entry: on_entry
}
}
pub fn add_gen(&mut self, id: ast::NodeId, bit: uint) {
//! Indicates that `id` generates `bit`
debug!("{} add_gen(id={}, bit={})",
self.analysis_name, id, bit);
assert!(self.nodeid_to_index.contains_key(&id));
assert!(self.bits_per_id > 0);
let cfgidx = to_cfgidx_or_die(id, &self.nodeid_to_index);
let (start, end) = self.compute_id_range(cfgidx);
let gens = self.gens.slice_mut(start, end);
set_bit(gens, bit);
}
pub fn add_kill(&mut self, id: ast::NodeId, bit: uint) {
//! Indicates that `id` kills `bit`
debug!("{} add_kill(id={}, bit={})",
self.analysis_name, id, bit);
assert!(self.nodeid_to_index.contains_key(&id));
assert!(self.bits_per_id > 0);
let cfgidx = to_cfgidx_or_die(id, &self.nodeid_to_index);
let (start, end) = self.compute_id_range(cfgidx);
let kills = self.kills.slice_mut(start, end);
set_bit(kills, bit);
}
fn apply_gen_kill(&self, cfgidx: CFGIndex, bits: &mut [uint]) {
//! Applies the gen and kill sets for `cfgidx` to `bits`
debug!("{} apply_gen_kill(cfgidx={:?}, bits={}) [before]",
self.analysis_name, cfgidx, mut_bits_to_string(bits));
assert!(self.bits_per_id > 0);
let (start, end) = self.compute_id_range(cfgidx);
let gens = self.gens.slice(start, end);
bitwise(bits, gens, &Union);
let kills = self.kills.slice(start, end);
bitwise(bits, kills, &Subtract);
debug!("{} apply_gen_kill(cfgidx={:?}, bits={}) [after]",
self.analysis_name, cfgidx, mut_bits_to_string(bits));
}
fn compute_id_range(&self, cfgidx: CFGIndex) -> (uint, uint) {
let n = cfgidx.node_id();
let start = n * self.words_per_id;
let end = start + self.words_per_id;
assert!(start < self.gens.len());
assert!(end <= self.gens.len());
assert!(self.gens.len() == self.kills.len());
assert!(self.gens.len() == self.on_entry.len());
(start, end)
}
pub fn each_bit_on_entry<F>(&self, id: ast::NodeId, f: F) -> bool where
F: FnMut(uint) -> bool,
{
//! Iterates through each bit that is set on entry to `id`.
//! Only useful after `propagate()` has been called.
if!self.has_bitset_for_nodeid(id) {
return true;
}
let cfgidx = to_cfgidx_or_die(id, &self.nodeid_to_index);
self.each_bit_for_node(Entry, cfgidx, f)
}
pub fn each_bit_for_node<F>(&self, e: EntryOrExit, cfgidx: CFGIndex, f: F) -> bool where
F: FnMut(uint) -> bool,
{
//! Iterates through each bit that is set on entry/exit to `cfgidx`.
//! Only useful after `propagate()` has been called.
if self.bits_per_id == 0 {
// Skip the surprisingly common degenerate case. (Note
// compute_id_range requires self.words_per_id > 0.)
return true;
}
let (start, end) = self.compute_id_range(cfgidx);
let on_entry = self.on_entry.slice(start, end);
let temp_bits;
let slice = match e {
Entry => on_entry,
Exit => {
let mut t = on_entry.to_vec();
self.apply_gen_kill(cfgidx, t.as_mut_slice());
temp_bits = t;
&temp_bits[]
}
};
debug!("{} each_bit_for_node({:?}, cfgidx={:?}) bits={}",
self.analysis_name, e, cfgidx, bits_to_string(slice));
self.each_bit(slice, f)
}
pub fn each_gen_bit<F>(&self, id: ast::NodeId, f: F) -> bool where
F: FnMut(uint) -> bool,
{
//! Iterates through each bit in the gen set for `id`.
if!self.has_bitset_for_nodeid(id) {
return true;
}
if self.bits_per_id == 0 {
// Skip the surprisingly common degenerate case. (Note
// compute_id_range requires self.words_per_id > 0.)
return true;
}
let cfgidx = to_cfgidx_or_die(id, &self.nodeid_to_index);
let (start, end) = self.compute_id_range(cfgidx);
let gens = self.gens.slice(start, end);
debug!("{} each_gen_bit(id={}, gens={})",
self.analysis_name, id, bits_to_string(gens));
self.each_bit(gens, f)
}
fn each_bit<F>(&self, words: &[uint], mut f: F) -> bool where
F: FnMut(uint) -> bool,
{
//! Helper for iterating over the bits in a bit set.
//! Returns false on the first call to `f` that returns false;
//! if all calls to `f` return true, then returns true.
for (word_index, &word) in words.iter().enumerate() {
if word!= 0 {
let base_index = word_index * uint::BITS;
for offset in range(0u, uint::BITS) {
let bit = 1 << offset;
if (word & bit)!= 0 {
// NB: we round up the total number of bits
// that we store in any given bit set so that
// it is an even multiple of uint::BITS. This
// means that there may be some stray bits at
// the end that do not correspond to any
// actual value. So before we callback, check
// whether the bit_index is greater than the
// actual value the user specified and stop
// iterating if so.
let bit_index = base_index + offset;
if bit_index >= self.bits_per_id {
return true;
} else if!f(bit_index) {
return false;
}
}
}
}
}
return true;
}
pub fn add_kills_from_flow_exits(&mut self, cfg: &cfg::CFG) {
//! Whenever you have a `break` or `continue` statement, flow
//! exits through any number of enclosing scopes on its way to
//! the new destination. This function infers the kill bits of
//! those control operators based on the kill bits associated
//! with those scopes.
//!
//! This is usually called (if it is called at all), after
//! all add_gen and add_kill calls, but before propagate.
debug!("{} add_kills_from_flow_exits", self.analysis_name);
if self.bits_per_id == 0 {
// Skip the surprisingly common degenerate case. (Note
// compute_id_range requires self.words_per_id > 0.)
return;
}
cfg.graph.each_edge(|_edge_index, edge| {
let flow_exit = edge.source();
let (start, end) = self.compute_id_range(flow_exit);
let mut orig_kills = self.kills.slice(start, end).to_vec();
let mut changed = false;
for &node_id in edge.data.exiting_scopes.iter() {
let opt_cfg_idx = self.nodeid_to_index.get(&node_id).map(|&i|i);
match opt_cfg_idx {
Some(cfg_idx) => {
let (start, end) = self.compute_id_range(cfg_idx);
let kills = self.kills.slice(start, end);
if bitwise(orig_kills.as_mut_slice(), kills, &Union) {
changed = true;
}
}
None => {
debug!("{} add_kills_from_flow_exits flow_exit={:?} \
no cfg_idx for exiting_scope={}",
self.analysis_name, flow_exit, node_id);
}
}
}
if changed {
let bits = self.kills.slice_mut(start, end);
debug!("{} add_kills_from_flow_exits flow_exit={:?} bits={} [before]",
self.analysis_name, flow_exit, mut_bits_to_string(bits));
bits.clone_from_slice(&orig_kills[]);
debug!("{} add_kills_from_flow_exits flow_exit={:?} bits={} [after]",
self.analysis_name, flow_exit, mut_bits_to_string(bits));
}
true
});
}
}
impl<'a, 'tcx, O:DataFlowOperator+Clone+'static> DataFlowContext<'a, 'tcx, O> {
// ^^^^^^^^^^^^^ only needed for pretty printing
pub fn propagate(&mut self, cfg: &cfg::CFG, blk: &ast::Block) {
//! Performs the data flow analysis.
if self.bits_per_id == 0 {
// Optimize the surprisingly common degenerate case.
return;
}
{
let words_per_id = self.words_per_id;
let mut propcx = PropagationContext {
dfcx: &mut *self,
changed: true
};
let mut temp: Vec<_> = repeat(0u).take(words_per_id).collect();
while propcx.changed {
propcx.changed = false;
propcx.reset(temp.as_mut_slice());
propcx.walk_cfg(cfg, temp.as_mut_slice());
}
}
debug!("Dataflow result for {}:", self.analysis_name);
debug!("{}", {
self.pretty_print_to(box io::stderr(), blk).unwrap();
""
});
}
fn pretty_print_to(&self, wr: Box<io::Writer+'static>,
blk: &ast::Block) -> io::IoResult<()> {
let mut ps = pprust::rust_printer_annotated(wr, self);
try!(ps.cbox(pprust::indent_unit));
try!(ps.ibox(0u));
try!(ps.print_block(blk));
pp::eof(&mut ps.s)
}
}
impl<'a, 'b, 'tcx, O:DataFlowOperator> PropagationContext<'a, 'b, 'tcx, O> {
fn walk_cfg(&mut self,
cfg: &cfg::CFG,
in_out: &mut [uint]) {
debug!("DataFlowContext::walk_cfg(in_out={}) {}",
bits_to_string(in_out), self.dfcx.analysis_name);
assert!(self.dfcx.bits_per_id > 0);
cfg.graph.each_node(|node_index, node| {
debug!("DataFlowContext::walk_cfg idx={:?} id={} begin in_out={}",
node_index, node.data.id, bits_to_string(in_out));
let (start, end) = self.dfcx.compute_id_range(node_index);
// Initialize local bitvector with state on-entry.
in_out.clone_from_slice(self.dfcx.on_entry.slice(start, end));
// Compute state on-exit by applying transfer function to
// state on-entry.
self.dfcx.apply_gen_kill(node_index, in_out);
// Propagate state on-exit from node into its successors.
self.propagate_bits_into_graph_successors_of(in_out, cfg, node_index);
true // continue to next node
});
}
fn reset(&mut self, bits: &mut [uint]) {
let e = if self.dfcx.oper.initial_value() {uint::MAX} else {0};
for b in bits.iter_mut() {
*b = e;
}
}
fn propagate_bits_into_graph_successors_of(&mut self,
pred_bits: &[uint],
|
cfg: &cfg::CFG,
cfgidx: CFGIndex) {
cfg.graph.each_outgoing_edge(cfgidx, |_e_idx, edge| {
|
random_line_split
|
|
dataflow.rs
|
}
pub trait BitwiseOperator {
/// Joins two predecessor bits together, typically either `|` or `&`
fn join(&self, succ: uint, pred: uint) -> uint;
}
/// Parameterization for the precise form of data flow that is used.
pub trait DataFlowOperator : BitwiseOperator {
/// Specifies the initial value for each bit in the `on_entry` set
fn initial_value(&self) -> bool;
}
struct PropagationContext<'a, 'b: 'a, 'tcx: 'b, O: 'a> {
dfcx: &'a mut DataFlowContext<'b, 'tcx, O>,
changed: bool
}
fn to_cfgidx_or_die(id: ast::NodeId, index: &NodeMap<CFGIndex>) -> CFGIndex {
let opt_cfgindex = index.get(&id).map(|&i|i);
opt_cfgindex.unwrap_or_else(|| {
panic!("nodeid_to_index does not have entry for NodeId {}", id);
})
}
impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> {
fn has_bitset_for_nodeid(&self, n: ast::NodeId) -> bool {
assert!(n!= ast::DUMMY_NODE_ID);
self.nodeid_to_index.contains_key(&n)
}
}
impl<'a, 'tcx, O:DataFlowOperator> pprust::PpAnn for DataFlowContext<'a, 'tcx, O> {
fn pre(&self,
ps: &mut pprust::State,
node: pprust::AnnNode) -> io::IoResult<()> {
let id = match node {
pprust::NodeIdent(_) | pprust::NodeName(_) => 0,
pprust::NodeExpr(expr) => expr.id,
pprust::NodeBlock(blk) => blk.id,
pprust::NodeItem(_) => 0,
pprust::NodePat(pat) => pat.id
};
if self.has_bitset_for_nodeid(id) {
assert!(self.bits_per_id > 0);
let cfgidx = to_cfgidx_or_die(id, &self.nodeid_to_index);
let (start, end) = self.compute_id_range(cfgidx);
let on_entry = self.on_entry.slice(start, end);
let entry_str = bits_to_string(on_entry);
let gens = self.gens.slice(start, end);
let gens_str = if gens.iter().any(|&u| u!= 0) {
format!(" gen: {}", bits_to_string(gens))
} else {
"".to_string()
};
let kills = self.kills.slice(start, end);
let kills_str = if kills.iter().any(|&u| u!= 0)
|
else {
"".to_string()
};
try!(ps.synth_comment(format!("id {}: {}{}{}", id, entry_str,
gens_str, kills_str)));
try!(pp::space(&mut ps.s));
}
Ok(())
}
}
fn build_nodeid_to_index(decl: Option<&ast::FnDecl>,
cfg: &cfg::CFG) -> NodeMap<CFGIndex> {
let mut index = NodeMap::new();
// FIXME (#6298): Would it be better to fold formals from decl
// into cfg itself? i.e. introduce a fn-based flow-graph in
// addition to the current block-based flow-graph, rather than
// have to put traversals like this here?
match decl {
None => {}
Some(decl) => add_entries_from_fn_decl(&mut index, decl, cfg.entry)
}
cfg.graph.each_node(|node_idx, node| {
if node.data.id!= ast::DUMMY_NODE_ID {
index.insert(node.data.id, node_idx);
}
true
});
return index;
fn add_entries_from_fn_decl(index: &mut NodeMap<CFGIndex>,
decl: &ast::FnDecl,
entry: CFGIndex) {
//! add mappings from the ast nodes for the formal bindings to
//! the entry-node in the graph.
struct Formals<'a> {
entry: CFGIndex,
index: &'a mut NodeMap<CFGIndex>,
}
let mut formals = Formals { entry: entry, index: index };
visit::walk_fn_decl(&mut formals, decl);
impl<'a, 'v> visit::Visitor<'v> for Formals<'a> {
fn visit_pat(&mut self, p: &ast::Pat) {
self.index.insert(p.id, self.entry);
visit::walk_pat(self, p)
}
}
}
}
impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> {
pub fn new(tcx: &'a ty::ctxt<'tcx>,
analysis_name: &'static str,
decl: Option<&ast::FnDecl>,
cfg: &cfg::CFG,
oper: O,
id_range: IdRange,
bits_per_id: uint) -> DataFlowContext<'a, 'tcx, O> {
let words_per_id = (bits_per_id + uint::BITS - 1) / uint::BITS;
let num_nodes = cfg.graph.all_nodes().len();
debug!("DataFlowContext::new(analysis_name: {}, id_range={:?}, \
bits_per_id={}, words_per_id={}) \
num_nodes: {}",
analysis_name, id_range, bits_per_id, words_per_id,
num_nodes);
let entry = if oper.initial_value() { uint::MAX } else {0};
let gens: Vec<_> = repeat(0).take(num_nodes * words_per_id).collect();
let kills: Vec<_> = repeat(0).take(num_nodes * words_per_id).collect();
let on_entry: Vec<_> = repeat(entry).take(num_nodes * words_per_id).collect();
let nodeid_to_index = build_nodeid_to_index(decl, cfg);
DataFlowContext {
tcx: tcx,
analysis_name: analysis_name,
words_per_id: words_per_id,
nodeid_to_index: nodeid_to_index,
bits_per_id: bits_per_id,
oper: oper,
gens: gens,
kills: kills,
on_entry: on_entry
}
}
pub fn add_gen(&mut self, id: ast::NodeId, bit: uint) {
//! Indicates that `id` generates `bit`
debug!("{} add_gen(id={}, bit={})",
self.analysis_name, id, bit);
assert!(self.nodeid_to_index.contains_key(&id));
assert!(self.bits_per_id > 0);
let cfgidx = to_cfgidx_or_die(id, &self.nodeid_to_index);
let (start, end) = self.compute_id_range(cfgidx);
let gens = self.gens.slice_mut(start, end);
set_bit(gens, bit);
}
pub fn add_kill(&mut self, id: ast::NodeId, bit: uint) {
//! Indicates that `id` kills `bit`
debug!("{} add_kill(id={}, bit={})",
self.analysis_name, id, bit);
assert!(self.nodeid_to_index.contains_key(&id));
assert!(self.bits_per_id > 0);
let cfgidx = to_cfgidx_or_die(id, &self.nodeid_to_index);
let (start, end) = self.compute_id_range(cfgidx);
let kills = self.kills.slice_mut(start, end);
set_bit(kills, bit);
}
fn apply_gen_kill(&self, cfgidx: CFGIndex, bits: &mut [uint]) {
//! Applies the gen and kill sets for `cfgidx` to `bits`
debug!("{} apply_gen_kill(cfgidx={:?}, bits={}) [before]",
self.analysis_name, cfgidx, mut_bits_to_string(bits));
assert!(self.bits_per_id > 0);
let (start, end) = self.compute_id_range(cfgidx);
let gens = self.gens.slice(start, end);
bitwise(bits, gens, &Union);
let kills = self.kills.slice(start, end);
bitwise(bits, kills, &Subtract);
debug!("{} apply_gen_kill(cfgidx={:?}, bits={}) [after]",
self.analysis_name, cfgidx, mut_bits_to_string(bits));
}
fn compute_id_range(&self, cfgidx: CFGIndex) -> (uint, uint) {
let n = cfgidx.node_id();
let start = n * self.words_per_id;
let end = start + self.words_per_id;
assert!(start < self.gens.len());
assert!(end <= self.gens.len());
assert!(self.gens.len() == self.kills.len());
assert!(self.gens.len() == self.on_entry.len());
(start, end)
}
pub fn each_bit_on_entry<F>(&self, id: ast::NodeId, f: F) -> bool where
F: FnMut(uint) -> bool,
{
//! Iterates through each bit that is set on entry to `id`.
//! Only useful after `propagate()` has been called.
if!self.has_bitset_for_nodeid(id) {
return true;
}
let cfgidx = to_cfgidx_or_die(id, &self.nodeid_to_index);
self.each_bit_for_node(Entry, cfgidx, f)
}
pub fn each_bit_for_node<F>(&self, e: EntryOrExit, cfgidx: CFGIndex, f: F) -> bool where
F: FnMut(uint) -> bool,
{
//! Iterates through each bit that is set on entry/exit to `cfgidx`.
//! Only useful after `propagate()` has been called.
if self.bits_per_id == 0 {
// Skip the surprisingly common degenerate case. (Note
// compute_id_range requires self.words_per_id > 0.)
return true;
}
let (start, end) = self.compute_id_range(cfgidx);
let on_entry = self.on_entry.slice(start, end);
let temp_bits;
let slice = match e {
Entry => on_entry,
Exit => {
let mut t = on_entry.to_vec();
self.apply_gen_kill(cfgidx, t.as_mut_slice());
temp_bits = t;
&temp_bits[]
}
};
debug!("{} each_bit_for_node({:?}, cfgidx={:?}) bits={}",
self.analysis_name, e, cfgidx, bits_to_string(slice));
self.each_bit(slice, f)
}
pub fn each_gen_bit<F>(&self, id: ast::NodeId, f: F) -> bool where
F: FnMut(uint) -> bool,
{
//! Iterates through each bit in the gen set for `id`.
if!self.has_bitset_for_nodeid(id) {
return true;
}
if self.bits_per_id == 0 {
// Skip the surprisingly common degenerate case. (Note
// compute_id_range requires self.words_per_id > 0.)
return true;
}
let cfgidx = to_cfgidx_or_die(id, &self.nodeid_to_index);
let (start, end) = self.compute_id_range(cfgidx);
let gens = self.gens.slice(start, end);
debug!("{} each_gen_bit(id={}, gens={})",
self.analysis_name, id, bits_to_string(gens));
self.each_bit(gens, f)
}
fn each_bit<F>(&self, words: &[uint], mut f: F) -> bool where
F: FnMut(uint) -> bool,
{
//! Helper for iterating over the bits in a bit set.
//! Returns false on the first call to `f` that returns false;
//! if all calls to `f` return true, then returns true.
for (word_index, &word) in words.iter().enumerate() {
if word!= 0 {
let base_index = word_index * uint::BITS;
for offset in range(0u, uint::BITS) {
let bit = 1 << offset;
if (word & bit)!= 0 {
// NB: we round up the total number of bits
// that we store in any given bit set so that
// it is an even multiple of uint::BITS. This
// means that there may be some stray bits at
// the end that do not correspond to any
// actual value. So before we callback, check
// whether the bit_index is greater than the
// actual value the user specified and stop
// iterating if so.
let bit_index = base_index + offset;
if bit_index >= self.bits_per_id {
return true;
} else if!f(bit_index) {
return false;
}
}
}
}
}
return true;
}
pub fn add_kills_from_flow_exits(&mut self, cfg: &cfg::CFG) {
//! Whenever you have a `break` or `continue` statement, flow
//! exits through any number of enclosing scopes on its way to
//! the new destination. This function infers the kill bits of
//! those control operators based on the kill bits associated
//! with those scopes.
//!
//! This is usually called (if it is called at all), after
//! all add_gen and add_kill calls, but before propagate.
debug!("{} add_kills_from_flow_exits", self.analysis_name);
if self.bits_per_id == 0 {
// Skip the surprisingly common degenerate case. (Note
// compute_id_range requires self.words_per_id > 0.)
return;
}
cfg.graph.each_edge(|_edge_index, edge| {
let flow_exit = edge.source();
let (start, end) = self.compute_id_range(flow_exit);
let mut orig_kills = self.kills.slice(start, end).to_vec();
let mut changed = false;
for &node_id in edge.data.exiting_scopes.iter() {
let opt_cfg_idx = self.nodeid_to_index.get(&node_id).map(|&i|i);
match opt_cfg_idx {
Some(cfg_idx) => {
let (start, end) = self.compute_id_range(cfg_idx);
let kills = self.kills.slice(start, end);
if bitwise(orig_kills.as_mut_slice(), kills, &Union) {
changed = true;
}
}
None => {
debug!("{} add_kills_from_flow_exits flow_exit={:?} \
no cfg_idx for exiting_scope={}",
self.analysis_name, flow_exit, node_id);
}
}
}
if changed {
let bits = self.kills.slice_mut(start, end);
debug!("{} add_kills_from_flow_exits flow_exit={:?} bits={} [before]",
self.analysis_name, flow_exit, mut_bits_to_string(bits));
bits.clone_from_slice(&orig_kills[]);
debug!("{} add_kills_from_flow_exits flow_exit={:?} bits={} [after]",
self.analysis_name, flow_exit, mut_bits_to_string(bits));
}
true
});
}
}
impl<'a, 'tcx, O:DataFlowOperator+Clone+'static> DataFlowContext<'a, 'tcx, O> {
// ^^^^^^^^^^^^^ only needed for pretty printing
pub fn propagate(&mut self, cfg: &cfg::CFG, blk: &ast::Block) {
//! Performs the data flow analysis.
if self.bits_per_id == 0 {
// Optimize the surprisingly common degenerate case.
return;
}
{
let words_per_id = self.words_per_id;
let mut propcx = PropagationContext {
dfcx: &mut *self,
changed: true
};
let mut temp: Vec<_> = repeat(0u).take(words_per_id).collect();
while propcx.changed {
propcx.changed = false;
propcx.reset(temp.as_mut_slice());
propcx.walk_cfg(cfg, temp.as_mut_slice());
}
}
debug!("Dataflow result for {}:", self.analysis_name);
debug!("{}", {
self.pretty_print_to(box io::stderr(), blk).unwrap();
""
});
}
fn pretty_print_to(&self, wr: Box<io::Writer+'static>,
blk: &ast::Block) -> io::IoResult<()> {
let mut ps = pprust::rust_printer_annotated(wr, self);
try!(ps.cbox(pprust::indent_unit));
try!(ps.ibox(0u));
try!(ps.print_block(blk));
pp::eof(&mut ps.s)
}
}
impl<'a, 'b, 'tcx, O:DataFlowOperator> PropagationContext<'a, 'b, 'tcx, O> {
fn walk_cfg(&mut self,
cfg: &cfg::CFG,
in_out: &mut [uint]) {
debug!("DataFlowContext::walk_cfg(in_out={}) {}",
bits_to_string(in_out), self.dfcx.analysis_name);
assert!(self.dfcx.bits_per_id > 0);
cfg.graph.each_node(|node_index, node| {
debug!("DataFlowContext::walk_cfg idx={:?} id={} begin in_out={}",
node_index, node.data.id, bits_to_string(in_out));
let (start, end) = self.dfcx.compute_id_range(node_index);
// Initialize local bitvector with state on-entry.
in_out.clone_from_slice(self.dfcx.on_entry.slice(start, end));
// Compute state on-exit by applying transfer function to
// state on-entry.
self.dfcx.apply_gen_kill(node_index, in_out);
// Propagate state on-exit from node into its successors.
self.propagate_bits_into_graph_successors_of(in_out, cfg, node_index);
true // continue to next node
});
}
fn reset(&mut self, bits: &mut [uint]) {
let e = if self.dfcx.
|
{
format!(" kill: {}", bits_to_string(kills))
}
|
conditional_block
|
dataflow.rs
|
//! the entry-node in the graph.
struct Formals<'a> {
entry: CFGIndex,
index: &'a mut NodeMap<CFGIndex>,
}
let mut formals = Formals { entry: entry, index: index };
visit::walk_fn_decl(&mut formals, decl);
impl<'a, 'v> visit::Visitor<'v> for Formals<'a> {
fn visit_pat(&mut self, p: &ast::Pat) {
self.index.insert(p.id, self.entry);
visit::walk_pat(self, p)
}
}
}
}
impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> {
pub fn new(tcx: &'a ty::ctxt<'tcx>,
analysis_name: &'static str,
decl: Option<&ast::FnDecl>,
cfg: &cfg::CFG,
oper: O,
id_range: IdRange,
bits_per_id: uint) -> DataFlowContext<'a, 'tcx, O> {
let words_per_id = (bits_per_id + uint::BITS - 1) / uint::BITS;
let num_nodes = cfg.graph.all_nodes().len();
debug!("DataFlowContext::new(analysis_name: {}, id_range={:?}, \
bits_per_id={}, words_per_id={}) \
num_nodes: {}",
analysis_name, id_range, bits_per_id, words_per_id,
num_nodes);
let entry = if oper.initial_value() { uint::MAX } else {0};
let gens: Vec<_> = repeat(0).take(num_nodes * words_per_id).collect();
let kills: Vec<_> = repeat(0).take(num_nodes * words_per_id).collect();
let on_entry: Vec<_> = repeat(entry).take(num_nodes * words_per_id).collect();
let nodeid_to_index = build_nodeid_to_index(decl, cfg);
DataFlowContext {
tcx: tcx,
analysis_name: analysis_name,
words_per_id: words_per_id,
nodeid_to_index: nodeid_to_index,
bits_per_id: bits_per_id,
oper: oper,
gens: gens,
kills: kills,
on_entry: on_entry
}
}
pub fn add_gen(&mut self, id: ast::NodeId, bit: uint) {
//! Indicates that `id` generates `bit`
debug!("{} add_gen(id={}, bit={})",
self.analysis_name, id, bit);
assert!(self.nodeid_to_index.contains_key(&id));
assert!(self.bits_per_id > 0);
let cfgidx = to_cfgidx_or_die(id, &self.nodeid_to_index);
let (start, end) = self.compute_id_range(cfgidx);
let gens = self.gens.slice_mut(start, end);
set_bit(gens, bit);
}
pub fn add_kill(&mut self, id: ast::NodeId, bit: uint) {
//! Indicates that `id` kills `bit`
debug!("{} add_kill(id={}, bit={})",
self.analysis_name, id, bit);
assert!(self.nodeid_to_index.contains_key(&id));
assert!(self.bits_per_id > 0);
let cfgidx = to_cfgidx_or_die(id, &self.nodeid_to_index);
let (start, end) = self.compute_id_range(cfgidx);
let kills = self.kills.slice_mut(start, end);
set_bit(kills, bit);
}
fn apply_gen_kill(&self, cfgidx: CFGIndex, bits: &mut [uint]) {
//! Applies the gen and kill sets for `cfgidx` to `bits`
debug!("{} apply_gen_kill(cfgidx={:?}, bits={}) [before]",
self.analysis_name, cfgidx, mut_bits_to_string(bits));
assert!(self.bits_per_id > 0);
let (start, end) = self.compute_id_range(cfgidx);
let gens = self.gens.slice(start, end);
bitwise(bits, gens, &Union);
let kills = self.kills.slice(start, end);
bitwise(bits, kills, &Subtract);
debug!("{} apply_gen_kill(cfgidx={:?}, bits={}) [after]",
self.analysis_name, cfgidx, mut_bits_to_string(bits));
}
fn compute_id_range(&self, cfgidx: CFGIndex) -> (uint, uint) {
let n = cfgidx.node_id();
let start = n * self.words_per_id;
let end = start + self.words_per_id;
assert!(start < self.gens.len());
assert!(end <= self.gens.len());
assert!(self.gens.len() == self.kills.len());
assert!(self.gens.len() == self.on_entry.len());
(start, end)
}
pub fn each_bit_on_entry<F>(&self, id: ast::NodeId, f: F) -> bool where
F: FnMut(uint) -> bool,
{
//! Iterates through each bit that is set on entry to `id`.
//! Only useful after `propagate()` has been called.
if!self.has_bitset_for_nodeid(id) {
return true;
}
let cfgidx = to_cfgidx_or_die(id, &self.nodeid_to_index);
self.each_bit_for_node(Entry, cfgidx, f)
}
pub fn each_bit_for_node<F>(&self, e: EntryOrExit, cfgidx: CFGIndex, f: F) -> bool where
F: FnMut(uint) -> bool,
{
//! Iterates through each bit that is set on entry/exit to `cfgidx`.
//! Only useful after `propagate()` has been called.
if self.bits_per_id == 0 {
// Skip the surprisingly common degenerate case. (Note
// compute_id_range requires self.words_per_id > 0.)
return true;
}
let (start, end) = self.compute_id_range(cfgidx);
let on_entry = self.on_entry.slice(start, end);
let temp_bits;
let slice = match e {
Entry => on_entry,
Exit => {
let mut t = on_entry.to_vec();
self.apply_gen_kill(cfgidx, t.as_mut_slice());
temp_bits = t;
&temp_bits[]
}
};
debug!("{} each_bit_for_node({:?}, cfgidx={:?}) bits={}",
self.analysis_name, e, cfgidx, bits_to_string(slice));
self.each_bit(slice, f)
}
pub fn each_gen_bit<F>(&self, id: ast::NodeId, f: F) -> bool where
F: FnMut(uint) -> bool,
{
//! Iterates through each bit in the gen set for `id`.
if!self.has_bitset_for_nodeid(id) {
return true;
}
if self.bits_per_id == 0 {
// Skip the surprisingly common degenerate case. (Note
// compute_id_range requires self.words_per_id > 0.)
return true;
}
let cfgidx = to_cfgidx_or_die(id, &self.nodeid_to_index);
let (start, end) = self.compute_id_range(cfgidx);
let gens = self.gens.slice(start, end);
debug!("{} each_gen_bit(id={}, gens={})",
self.analysis_name, id, bits_to_string(gens));
self.each_bit(gens, f)
}
fn each_bit<F>(&self, words: &[uint], mut f: F) -> bool where
F: FnMut(uint) -> bool,
{
//! Helper for iterating over the bits in a bit set.
//! Returns false on the first call to `f` that returns false;
//! if all calls to `f` return true, then returns true.
for (word_index, &word) in words.iter().enumerate() {
if word!= 0 {
let base_index = word_index * uint::BITS;
for offset in range(0u, uint::BITS) {
let bit = 1 << offset;
if (word & bit)!= 0 {
// NB: we round up the total number of bits
// that we store in any given bit set so that
// it is an even multiple of uint::BITS. This
// means that there may be some stray bits at
// the end that do not correspond to any
// actual value. So before we callback, check
// whether the bit_index is greater than the
// actual value the user specified and stop
// iterating if so.
let bit_index = base_index + offset;
if bit_index >= self.bits_per_id {
return true;
} else if!f(bit_index) {
return false;
}
}
}
}
}
return true;
}
pub fn add_kills_from_flow_exits(&mut self, cfg: &cfg::CFG) {
//! Whenever you have a `break` or `continue` statement, flow
//! exits through any number of enclosing scopes on its way to
//! the new destination. This function infers the kill bits of
//! those control operators based on the kill bits associated
//! with those scopes.
//!
//! This is usually called (if it is called at all), after
//! all add_gen and add_kill calls, but before propagate.
debug!("{} add_kills_from_flow_exits", self.analysis_name);
if self.bits_per_id == 0 {
// Skip the surprisingly common degenerate case. (Note
// compute_id_range requires self.words_per_id > 0.)
return;
}
cfg.graph.each_edge(|_edge_index, edge| {
let flow_exit = edge.source();
let (start, end) = self.compute_id_range(flow_exit);
let mut orig_kills = self.kills.slice(start, end).to_vec();
let mut changed = false;
for &node_id in edge.data.exiting_scopes.iter() {
let opt_cfg_idx = self.nodeid_to_index.get(&node_id).map(|&i|i);
match opt_cfg_idx {
Some(cfg_idx) => {
let (start, end) = self.compute_id_range(cfg_idx);
let kills = self.kills.slice(start, end);
if bitwise(orig_kills.as_mut_slice(), kills, &Union) {
changed = true;
}
}
None => {
debug!("{} add_kills_from_flow_exits flow_exit={:?} \
no cfg_idx for exiting_scope={}",
self.analysis_name, flow_exit, node_id);
}
}
}
if changed {
let bits = self.kills.slice_mut(start, end);
debug!("{} add_kills_from_flow_exits flow_exit={:?} bits={} [before]",
self.analysis_name, flow_exit, mut_bits_to_string(bits));
bits.clone_from_slice(&orig_kills[]);
debug!("{} add_kills_from_flow_exits flow_exit={:?} bits={} [after]",
self.analysis_name, flow_exit, mut_bits_to_string(bits));
}
true
});
}
}
impl<'a, 'tcx, O:DataFlowOperator+Clone+'static> DataFlowContext<'a, 'tcx, O> {
// ^^^^^^^^^^^^^ only needed for pretty printing
pub fn propagate(&mut self, cfg: &cfg::CFG, blk: &ast::Block) {
//! Performs the data flow analysis.
if self.bits_per_id == 0 {
// Optimize the surprisingly common degenerate case.
return;
}
{
let words_per_id = self.words_per_id;
let mut propcx = PropagationContext {
dfcx: &mut *self,
changed: true
};
let mut temp: Vec<_> = repeat(0u).take(words_per_id).collect();
while propcx.changed {
propcx.changed = false;
propcx.reset(temp.as_mut_slice());
propcx.walk_cfg(cfg, temp.as_mut_slice());
}
}
debug!("Dataflow result for {}:", self.analysis_name);
debug!("{}", {
self.pretty_print_to(box io::stderr(), blk).unwrap();
""
});
}
fn pretty_print_to(&self, wr: Box<io::Writer+'static>,
blk: &ast::Block) -> io::IoResult<()> {
let mut ps = pprust::rust_printer_annotated(wr, self);
try!(ps.cbox(pprust::indent_unit));
try!(ps.ibox(0u));
try!(ps.print_block(blk));
pp::eof(&mut ps.s)
}
}
impl<'a, 'b, 'tcx, O:DataFlowOperator> PropagationContext<'a, 'b, 'tcx, O> {
fn walk_cfg(&mut self,
cfg: &cfg::CFG,
in_out: &mut [uint]) {
debug!("DataFlowContext::walk_cfg(in_out={}) {}",
bits_to_string(in_out), self.dfcx.analysis_name);
assert!(self.dfcx.bits_per_id > 0);
cfg.graph.each_node(|node_index, node| {
debug!("DataFlowContext::walk_cfg idx={:?} id={} begin in_out={}",
node_index, node.data.id, bits_to_string(in_out));
let (start, end) = self.dfcx.compute_id_range(node_index);
// Initialize local bitvector with state on-entry.
in_out.clone_from_slice(self.dfcx.on_entry.slice(start, end));
// Compute state on-exit by applying transfer function to
// state on-entry.
self.dfcx.apply_gen_kill(node_index, in_out);
// Propagate state on-exit from node into its successors.
self.propagate_bits_into_graph_successors_of(in_out, cfg, node_index);
true // continue to next node
});
}
fn reset(&mut self, bits: &mut [uint]) {
let e = if self.dfcx.oper.initial_value() {uint::MAX} else {0};
for b in bits.iter_mut() {
*b = e;
}
}
fn propagate_bits_into_graph_successors_of(&mut self,
pred_bits: &[uint],
cfg: &cfg::CFG,
cfgidx: CFGIndex) {
cfg.graph.each_outgoing_edge(cfgidx, |_e_idx, edge| {
self.propagate_bits_into_entry_set_for(pred_bits, edge);
true
});
}
fn propagate_bits_into_entry_set_for(&mut self,
pred_bits: &[uint],
edge: &cfg::CFGEdge) {
let source = edge.source();
let cfgidx = edge.target();
debug!("{} propagate_bits_into_entry_set_for(pred_bits={}, {:?} to {:?})",
self.dfcx.analysis_name, bits_to_string(pred_bits), source, cfgidx);
assert!(self.dfcx.bits_per_id > 0);
let (start, end) = self.dfcx.compute_id_range(cfgidx);
let changed = {
// (scoping mutable borrow of self.dfcx.on_entry)
let on_entry = self.dfcx.on_entry.slice_mut(start, end);
bitwise(on_entry, pred_bits, &self.dfcx.oper)
};
if changed {
debug!("{} changed entry set for {:?} to {}",
self.dfcx.analysis_name, cfgidx,
bits_to_string(self.dfcx.on_entry.slice(start, end)));
self.changed = true;
}
}
}
fn mut_bits_to_string(words: &mut [uint]) -> String {
bits_to_string(words)
}
fn bits_to_string(words: &[uint]) -> String {
let mut result = String::new();
let mut sep = '[';
// Note: this is a little endian printout of bytes.
for &word in words.iter() {
let mut v = word;
for _ in range(0u, uint::BYTES) {
result.push(sep);
result.push_str(&format!("{:02x}", v & 0xFF)[]);
v >>= 8;
sep = '-';
}
}
result.push(']');
return result
}
#[inline]
fn bitwise<Op:BitwiseOperator>(out_vec: &mut [uint],
in_vec: &[uint],
op: &Op) -> bool {
assert_eq!(out_vec.len(), in_vec.len());
let mut changed = false;
for (out_elt, in_elt) in out_vec.iter_mut().zip(in_vec.iter()) {
let old_val = *out_elt;
let new_val = op.join(old_val, *in_elt);
*out_elt = new_val;
changed |= old_val!= new_val;
}
changed
}
fn set_bit(words: &mut [uint], bit: uint) -> bool {
debug!("set_bit: words={} bit={}",
mut_bits_to_string(words), bit_str(bit));
let word = bit / uint::BITS;
let bit_in_word = bit % uint::BITS;
let bit_mask = 1 << bit_in_word;
debug!("word={} bit_in_word={} bit_mask={}", word, bit_in_word, word);
let oldv = words[word];
let newv = oldv | bit_mask;
words[word] = newv;
oldv!= newv
}
fn bit_str(bit: uint) -> String {
let byte = bit >> 8;
let lobits = 1u << (bit & 0xFF);
format!("[{}:{}-{:02x}]", bit, byte, lobits)
}
struct
|
Union
|
identifier_name
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.