file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
mod.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
|
mod fetch;
mod miner_service;
mod snapshot_service;
mod sync_provider;
mod update_service;
pub use self::dapps::TestDappsService;
pub use self::fetch::TestFetch;
pub use self::miner_service::TestMinerService;
pub use self::snapshot_service::TestSnapshotService;
pub use self::sync_provider::{Config, TestSyncProvider};
pub use self::update_service::TestUpdater;
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Test rpc services.
mod dapps;
|
random_line_split
|
data.rs
|
indirect_function", name = "indirect-function")]
pub fn indirect_function_lisp(object: LispObject, _noerror: LispObject) -> LispObject {
match object.as_symbol() {
None => object,
Some(symbol) => symbol.get_indirect_function(),
}
}
/// Return a symbol representing the type of OBJECT.
/// The symbol returned names the object's basic type;
/// for example, (type-of 1) returns `integer'.
#[lisp_fn]
pub fn type_of(object: LispObject) -> LispObject {
match object.get_type() {
Lisp_Type::Lisp_Cons => Qcons,
Lisp_Type::Lisp_Int0 | Lisp_Type::Lisp_Int1 => Qinteger,
Lisp_Type::Lisp_Symbol => Qsymbol,
Lisp_Type::Lisp_String => Qstring,
Lisp_Type::Lisp_Float => Qfloat,
Lisp_Type::Lisp_Misc => {
let m = object.as_misc().unwrap();
match m.get_type() {
Lisp_Misc_Type::Lisp_Misc_Marker => Qmarker,
Lisp_Misc_Type::Lisp_Misc_Overlay => Qoverlay,
Lisp_Misc_Type::Lisp_Misc_Finalizer => Qfinalizer,
Lisp_Misc_Type::Lisp_Misc_User_Ptr => Quser_ptr,
_ => Qnone,
}
}
Lisp_Type::Lisp_Vectorlike => {
let vec = unsafe { object.as_vectorlike_unchecked() };
match vec.pseudovector_type() {
pvec_type::PVEC_NORMAL_VECTOR => Qvector,
pvec_type::PVEC_WINDOW_CONFIGURATION => Qwindow_configuration,
pvec_type::PVEC_PROCESS => Qprocess,
pvec_type::PVEC_WINDOW => Qwindow,
pvec_type::PVEC_SUBR => Qsubr,
pvec_type::PVEC_COMPILED => Qcompiled_function,
pvec_type::PVEC_BUFFER => Qbuffer,
pvec_type::PVEC_CHAR_TABLE => Qchar_table,
pvec_type::PVEC_BOOL_VECTOR => Qbool_vector,
pvec_type::PVEC_FRAME => Qframe,
pvec_type::PVEC_HASH_TABLE => Qhash_table,
pvec_type::PVEC_THREAD => Qthread,
pvec_type::PVEC_MUTEX => Qmutex,
pvec_type::PVEC_CONDVAR => Qcondition_variable,
pvec_type::PVEC_TERMINAL => Qterminal,
pvec_type::PVEC_MODULE_FUNCTION => Qmodule_function,
pvec_type::PVEC_FONT => {
if object.is_font_spec() {
Qfont_spec
} else if object.is_font_entity() {
Qfont_entity
} else if object.is_font_object() {
Qfont_object
} else {
Qfont
}
}
pvec_type::PVEC_RECORD => unsafe {
let vec = object.as_vector_unchecked();
let t = vec.get_unchecked(0);
if t.is_record() {
let v = t.as_vector_unchecked();
if v.len() > 1 {
return v.get_unchecked(1);
}
}
t
},
_ => Qnone,
}
}
}
}
#[lisp_fn]
pub fn subr_lang(subr: LispSubrRef) -> LispObject {
if subr.lang == Lisp_Subr_Lang::Lisp_Subr_Lang_C {
"C".into()
} else if subr.lang == Lisp_Subr_Lang::Lisp_Subr_Lang_Rust {
"Rust".into()
} else {
unreachable!()
}
}
/// Return the element of ARRAY at index IDX.
/// ARRAY may be a vector, a string, a char-table, a bool-vector, a record,
/// or a byte-code object. IDX starts at 0.
#[lisp_fn]
pub fn aref(array: LispObject, idx: EmacsInt) -> LispObject {
if idx < 0 {
args_out_of_range!(array, idx);
}
let idx_u = idx as usize;
if let Some(s) = array.as_string() {
match s.char_indices().nth(idx_u) {
None => {
args_out_of_range!(array, idx);
}
Some((_, cp)) => EmacsInt::from(cp).into(),
}
} else if let Some(bv) = array.as_bool_vector() {
if idx_u >= bv.len() {
args_out_of_range!(array, idx);
}
unsafe { bv.get_unchecked(idx_u) }
} else if let Some(ct) = array.as_char_table() {
ct.get(idx as isize)
} else if let Some(v) = array.as_vector() {
if idx_u >= v.len() {
args_out_of_range!(array, idx);
}
unsafe { v.get_unchecked(idx_u) }
} else if array.is_byte_code_function() || array.is_record() {
let vl = array.as_vectorlike().unwrap();
if idx >= vl.pseudovector_size() {
args_out_of_range!(array, idx);
}
let v = unsafe { vl.as_vector_unchecked() };
unsafe { v.get_unchecked(idx_u) }
} else {
wrong_type!(Qarrayp, array);
}
}
/// Store into the element of ARRAY at index IDX the value NEWELT.
/// Return NEWELT. ARRAY may be a vector, a string, a char-table or a
/// bool-vector. IDX starts at 0.
#[lisp_fn]
pub fn aset(array: LispObject, idx: EmacsInt, newelt: LispObject) -> LispObject {
if let Some(vl) = array.as_vectorlike() {
if let Some(mut v) = vl.as_vector() {
unsafe { CHECK_IMPURE(array, array.get_untaggedptr()) };
v.set_checked(idx as usize, newelt);
} else if let Some(mut bv) = vl.as_bool_vector() {
bv.set_checked(idx as usize, newelt.is_not_nil());
} else if let Some(_tbl) = vl.as_char_table() {
verify_lisp_type!(idx, Qcharacterp);
unsafe { CHAR_TABLE_SET(array, idx as c_int, newelt) };
} else if let Some(mut record) = vl.as_record() {
record.set_checked(idx as usize, newelt);
} else {
unreachable!();
}
} else if let Some(mut s) = array.as_string() {
unsafe { CHECK_IMPURE(array, array.get_untaggedptr()) };
if idx < 0 || idx >= s.len_chars() as EmacsInt {
args_out_of_range!(array, idx);
}
let c = newelt.as_character_or_error();
if s.is_multibyte() {
unsafe { aset_multibyte_string(array, idx, c as c_int) };
} else if is_single_byte_char(c) {
s.set_byte(idx as isize, c as u8);
} else {
if s.chars().any(|i|!is_ascii(i)) {
args_out_of_range!(array, newelt);
}
s.mark_as_multibyte();
unsafe { aset_multibyte_string(array, idx, c as c_int) };
}
} else {
wrong_type!(Qarrayp, array);
}
newelt
}
/// Set SYMBOL's function definition to DEFINITION.
/// Associates the function with the current load file, if any.
/// The optional third argument DOCSTRING specifies the documentation string
/// for SYMBOL; if it is omitted or nil, SYMBOL uses the documentation string
/// determined by DEFINITION.
///
/// Internally, this normally uses `fset', but if SYMBOL has a
/// `defalias-fset-function' property, the associated value is used instead.
///
/// The return value is undefined.
#[lisp_fn(min = "2")]
pub fn defalias(
symbol: LispSymbolRef,
mut definition: LispObject,
docstring: LispObject,
) -> LispObject {
let sym = LispObject::from(symbol);
unsafe {
if globals.Vpurify_flag.is_not_nil()
// If `definition' is a keymap, immutable (and copying) is wrong.
&& get_keymap(definition, false, false).is_nil()
{
definition = Fpurecopy(definition);
}
}
let autoload = is_autoload(definition);
if unsafe { globals.Vpurify_flag.is_nil() } ||!autoload {
// Only add autoload entries after dumping, because the ones before are
// not useful and else we get loads of them from the loaddefs.el.
if is_autoload(symbol.get_function()) {
// Remember that the function was already an autoload.
loadhist_attach((true, sym).into());
}
loadhist_attach((if autoload { Qautoload } else { Qdefun }, sym).into());
}
// Handle automatic advice activation.
let hook = get(symbol, Qdefalias_fset_function);
if hook.is_not_nil() {
call!(hook, sym, definition);
} else {
fset(symbol, definition);
}
if docstring.is_not_nil() {
put(symbol, Qfunction_documentation, docstring);
}
// We used to return `definition', but now that `defun' and `defmacro' expand
// to a call to `defalias', we return `symbol' for backward compatibility
// (bug#11686).
sym
}
/// Return minimum and maximum number of args allowed for SUBR.
/// SUBR must be a built-in function.
/// The returned value is a pair (MIN. MAX). MIN is the minimum number
/// of args. MAX is the maximum number or the symbol `many', for a
/// function with `&rest' args, or `unevalled' for a special form.
#[lisp_fn]
pub fn subr_arity(subr: LispSubrRef) -> (EmacsInt, LispObject) {
let minargs = subr.min_args();
let maxargs = if subr.is_many() {
Qmany
} else if subr.is_unevalled() {
Qunevalled
} else {
EmacsInt::from(subr.max_args()).into()
};
(EmacsInt::from(minargs), maxargs)
}
/// Return name of subroutine SUBR.
/// SUBR must be a built-in function.
#[lisp_fn]
pub fn subr_name(subr: LispSubrRef) -> LispObject {
let name = subr.symbol_name();
unsafe { build_string(name) }
}
/// Return the byteorder for the machine.
/// Returns 66 (ASCII uppercase B) for big endian machines or 108
/// (ASCII lowercase l) for small endian machines.
#[lisp_fn]
pub fn byteorder() -> u8 {
if cfg!(endian = "big") {
b'B'
} else {
b'l'
}
}
/// Return the default value of SYMBOL, but don't check for voidness.
/// Return Qunbound if it is void.
fn default_value(mut symbol: LispSymbolRef) -> LispObject
|
symbol_redirect::SYMBOL_FORWARDED => unsafe {
let valcontents = symbol.get_fwd();
// For a built-in buffer-local variable, get the default value
// rather than letting do_symval_forwarding get the current value.
if let Some(buffer_objfwd) = as_buffer_objfwd(valcontents) {
let offset = buffer_objfwd.offset;
if per_buffer_idx_from_field_offset(offset)!= 0 {
return per_buffer_default(offset.get_byte_offset() as i32);
}
}
// For other variables, get the current value.
do_symval_forwarding(valcontents)
},
_ => panic!("Symbol type has no default value"),
}
}
/// Return t if SYMBOL has a non-void default value.
/// This is the value that is seen in buffers that do not have their own values
/// for this variable.
#[lisp_fn]
pub fn default_boundp(symbol: LispSymbolRef) -> bool {
!default_value(symbol).eq(Qunbound)
}
/// Return SYMBOL's default value.
/// This is the value that is seen in buffers that do not have their own values
/// for this variable. The default value is meaningful for variables with
/// local bindings in certain buffers.
#[lisp_fn(c_name = "default_value", name = "default-value")]
pub fn default_value_lisp(symbol: LispSymbolRef) -> LispObject {
let value = default_value(symbol);
if value.eq(Qunbound) {
void_variable!(symbol);
}
value
}
/***********************************************************************
Getting and Setting Values of Symbols
***********************************************************************/
/// These are the types of forwarding objects used in the value slot
/// of symbols for special built-in variables whose value is stored in
/// C/Rust static variables.
pub type Lisp_Fwd_Type = u32;
pub const Lisp_Fwd_Int: Lisp_Fwd_Type = 0; // Fwd to a C `int' variable.
pub const Lisp_Fwd_Bool: Lisp_Fwd_Type = 1; // Fwd to a C boolean var.
pub const Lisp_Fwd_Obj: Lisp_Fwd_Type = 2; // Fwd to a C LispObject variable.
pub const Lisp_Fwd_Buffer_Obj: Lisp_Fwd_Type = 3; // Fwd to a LispObject field of buffers.
pub const Lisp_Fwd_Kboard_Obj: Lisp_Fwd_Type = 4; // Fwd to a LispObject field of kboards.
// these structs will still need to be compatible with their C
// counterparts until all the C callers of the DEFVAR macros are
// ported to Rust. However, as do_symval_forwarding and
// store_symval_forwarding have been ported, some Rust-isms have
// started to happen.
#[repr(C)]
pub union Lisp_Fwd {
pub u_intfwd: Lisp_Intfwd,
pub u_boolfwd: Lisp_Boolfwd,
pub u_objfwd: Lisp_Objfwd,
pub u_buffer_objfwd: Lisp_Buffer_Objfwd,
pub u_kboard_objfwd: Lisp_Kboard_Objfwd,
}
/// Forwarding pointer to an int variable.
/// This is allowed only in the value cell of a symbol,
/// and it means that the symbol's value really lives in the
/// specified int variable.
#[repr(C)]
#[derive(Clone, Copy)]
pub struct Lisp_Intfwd {
pub ty: Lisp_Fwd_Type, // = Lisp_Fwd_Int
pub intvar: *mut EmacsInt,
}
/// Boolean forwarding pointer to an int variable.
/// This is like Lisp_Intfwd except that the ostensible
/// "value" of the symbol is t if the bool variable is true,
/// nil if it is false.
#[repr(C)]
#[derive(Clone, Copy)]
pub struct Lisp_Boolfwd {
pub ty: Lisp_Fwd_Type, // = Lisp_Fwd_Bool
pub boolvar: *mut bool,
}
/// Forwarding pointer to a LispObject variable.
/// This is allowed only in the value cell of a symbol,
/// and it means that the symbol's value really lives in the
/// specified variable.
#[repr(C)]
#[derive(Clone, Copy)]
pub struct Lisp_Objfwd {
pub ty: Lisp_Fwd_Type, // = Lisp_Fwd_Obj
pub objvar: *mut LispObject,
}
/// Like Lisp_Objfwd except that value lives in a slot in the
/// current buffer. Value is byte index of slot within buffer.
#[repr(C)]
#[derive(Clone, Copy)]
pub struct Lisp_Buffer_Objfwd {
pub ty: Lisp_Fwd_Type, // = Lisp_Fwd_Buffer_Obj
pub offset: FieldOffset<remacs_sys::Lisp_Buffer, LispObject>,
// One of Qnil, Qintegerp, Qsymbolp, Qstringp, Qfloatp or Qnumberp.
pub predicate: LispObject,
}
/// Like Lisp_Objfwd except that value lives in a slot in the
/// current kboard.
#[repr(C)]
#[derive(Clone, Copy)]
pub struct Lisp_Kboard_Objfwd {
pub ty: Lisp_Fwd_Type, // = Lisp_Fwd_Kboard_Obj
pub offset: FieldOffset<remacs_sys::kboard, LispObject>,
}
/// Given the raw contents of a symbol value cell,
/// return the Lisp value of the symbol.
/// This does not handle buffer-local variables; use
/// swap_in_symval_forwarding for that.
#[no_mangle]
pub unsafe extern "C" fn do_symval_forwarding(valcontents: *const Lisp_Fwd) -> LispObject {
match (*valcontents).u_intfwd.ty {
Lisp_Fwd_Int => LispObject::from(*(*valcontents).u_intfwd.intvar),
Lisp_Fwd_Bool => LispObject::from(*(*valcontents).u_boolfwd.boolvar),
Lisp_Fwd_Obj => (*(*valcontents).u_objfwd.objvar),
Lisp_Fwd_Buffer_Obj => *(*valcontents)
.u_buffer_objfwd
.offset
.apply_ptr(ThreadState::current_buffer_unchecked().as_mut()),
Lisp_Fwd_Kboard_Obj => {
// We used to simply use current_kboard here, but from Lisp
// code, its value is often unexpected. It seems nicer to
// allow constructions like this to work as intuitively expected:
//
// (with-selected-frame frame
// (define-key local-function-map "\eOP" [f1]))
//
// On the other hand, this affects the semantics of
// last-command and real-last-command, and people may rely on
// that. I took a quick look at the Lisp codebase, and I
// don't think anything will break. --lorentey
let frame = selected_frame();
if!frame.is_live() {
panic!("Selected frame is not live");
}
let kboard = (*frame.terminal).kboard;
*(*valcontents).u_kboard_objfwd.offset.apply_ptr(kboard)
}
_ => panic!("Unknown intfwd type"),
}
}
/// Store NEWVAL into SYMBOL, where VALCONTENTS is found in the value cell
/// of SYMBOL. If SYMBOL is buffer-local, VALCONTENTS should be the
/// buffer-independent contents of the value cell: forwarded just one
/// step past the buffer-localness.
///
/// BUF non-zero means set the value in buffer BUF instead of the
/// current buffer. This only plays a role for per-buffer variables.
#[no_mangle]
pub unsafe extern "C" fn store_symval_forwarding(
valcontents: *mut Lisp_Fwd,
newval: LispObject,
mut buf: *mut Lisp_Buffer,
) {
match (*valcontents).u_intfwd.ty {
Lisp_Fwd_Int => (*(*valcontents).u_intfwd.intvar) = newval.as_fixnum_or_error(),
Lisp_Fwd_Bool => (*(*valcontents).u_boolfwd.boolvar) = newval.is_not_nil(),
Lisp_Fwd_Obj => {
(*(*valcontents).u_objfwd.objvar) = newval;
update_buffer_defaults((*valcontents).u_objfwd.objvar, newval);
}
Lisp_Fwd_Buffer_Obj => {
let predicate = (*valcontents).u_buffer_objfwd.predicate;
if newval.is_not_nil() && predicate.is_symbol() {
let pred_sym: LispSymbolRef = predicate.into();
let mut prop = get(pred_sym, Qchoice);
if prop.is_not_nil() {
if memq(newval, prop).is_nil() {
wrong_choice(prop, newval);
}
} else {
prop = get(pred_sym, Qrange);
if let Some((min, max)) = prop.into() {
let args = [min, newval, max];
if!newval.is_number() || leq(&args) {
wrong_range(min, max, newval);
}
} else if predicate.is_function() && call!(predicate, newval).is_nil() {
wrong_type!(predicate, newval);
}
}
}
if buf.is_null() {
buf = ThreadState::current_buffer_unchecked().as_mut();
}
*(*valcontents).u_buffer_objfwd.offset.apply_ptr_mut(buf) = newval;
}
Lisp_Fwd_Kboard_Obj => {
|
{
while symbol.get_redirect() == symbol_redirect::SYMBOL_VARALIAS {
symbol = symbol.get_indirect_variable();
}
match symbol.get_redirect() {
symbol_redirect::SYMBOL_PLAINVAL => unsafe { symbol.get_value() },
symbol_redirect::SYMBOL_LOCALIZED => {
// If var is set up for a buffer that lacks a local value for it,
// the current value is nominally the default value.
// But the `realvalue' slot may be more up to date, since
// ordinary setq stores just that slot. So use that.
let blv = unsafe { symbol.get_blv() };
let fwd = blv.get_fwd();
if !fwd.is_null() && blv.valcell.eq(blv.defcell) {
unsafe { do_symval_forwarding(fwd) }
} else {
let (_, d) = blv.defcell.into();
d
}
}
|
identifier_body
|
data.rs
|
indirect_function", name = "indirect-function")]
pub fn indirect_function_lisp(object: LispObject, _noerror: LispObject) -> LispObject {
match object.as_symbol() {
None => object,
Some(symbol) => symbol.get_indirect_function(),
}
}
/// Return a symbol representing the type of OBJECT.
/// The symbol returned names the object's basic type;
/// for example, (type-of 1) returns `integer'.
#[lisp_fn]
pub fn type_of(object: LispObject) -> LispObject {
match object.get_type() {
Lisp_Type::Lisp_Cons => Qcons,
Lisp_Type::Lisp_Int0 | Lisp_Type::Lisp_Int1 => Qinteger,
Lisp_Type::Lisp_Symbol => Qsymbol,
Lisp_Type::Lisp_String => Qstring,
Lisp_Type::Lisp_Float => Qfloat,
Lisp_Type::Lisp_Misc => {
let m = object.as_misc().unwrap();
match m.get_type() {
Lisp_Misc_Type::Lisp_Misc_Marker => Qmarker,
Lisp_Misc_Type::Lisp_Misc_Overlay => Qoverlay,
Lisp_Misc_Type::Lisp_Misc_Finalizer => Qfinalizer,
Lisp_Misc_Type::Lisp_Misc_User_Ptr => Quser_ptr,
_ => Qnone,
}
}
Lisp_Type::Lisp_Vectorlike => {
let vec = unsafe { object.as_vectorlike_unchecked() };
match vec.pseudovector_type() {
pvec_type::PVEC_NORMAL_VECTOR => Qvector,
pvec_type::PVEC_WINDOW_CONFIGURATION => Qwindow_configuration,
pvec_type::PVEC_PROCESS => Qprocess,
pvec_type::PVEC_WINDOW => Qwindow,
pvec_type::PVEC_SUBR => Qsubr,
pvec_type::PVEC_COMPILED => Qcompiled_function,
pvec_type::PVEC_BUFFER => Qbuffer,
pvec_type::PVEC_CHAR_TABLE => Qchar_table,
pvec_type::PVEC_BOOL_VECTOR => Qbool_vector,
pvec_type::PVEC_FRAME => Qframe,
pvec_type::PVEC_HASH_TABLE => Qhash_table,
pvec_type::PVEC_THREAD => Qthread,
pvec_type::PVEC_MUTEX => Qmutex,
pvec_type::PVEC_CONDVAR => Qcondition_variable,
pvec_type::PVEC_TERMINAL => Qterminal,
pvec_type::PVEC_MODULE_FUNCTION => Qmodule_function,
pvec_type::PVEC_FONT => {
if object.is_font_spec() {
Qfont_spec
} else if object.is_font_entity() {
Qfont_entity
} else if object.is_font_object() {
Qfont_object
} else {
Qfont
}
}
pvec_type::PVEC_RECORD => unsafe {
let vec = object.as_vector_unchecked();
let t = vec.get_unchecked(0);
if t.is_record() {
let v = t.as_vector_unchecked();
if v.len() > 1 {
return v.get_unchecked(1);
}
}
t
},
_ => Qnone,
}
}
}
}
#[lisp_fn]
pub fn subr_lang(subr: LispSubrRef) -> LispObject {
if subr.lang == Lisp_Subr_Lang::Lisp_Subr_Lang_C {
"C".into()
} else if subr.lang == Lisp_Subr_Lang::Lisp_Subr_Lang_Rust {
"Rust".into()
} else {
unreachable!()
}
}
/// Return the element of ARRAY at index IDX.
/// ARRAY may be a vector, a string, a char-table, a bool-vector, a record,
/// or a byte-code object. IDX starts at 0.
#[lisp_fn]
pub fn aref(array: LispObject, idx: EmacsInt) -> LispObject {
if idx < 0 {
args_out_of_range!(array, idx);
}
let idx_u = idx as usize;
if let Some(s) = array.as_string() {
match s.char_indices().nth(idx_u) {
None => {
args_out_of_range!(array, idx);
}
Some((_, cp)) => EmacsInt::from(cp).into(),
}
} else if let Some(bv) = array.as_bool_vector() {
if idx_u >= bv.len() {
args_out_of_range!(array, idx);
}
unsafe { bv.get_unchecked(idx_u) }
} else if let Some(ct) = array.as_char_table() {
ct.get(idx as isize)
} else if let Some(v) = array.as_vector() {
if idx_u >= v.len() {
args_out_of_range!(array, idx);
}
unsafe { v.get_unchecked(idx_u) }
} else if array.is_byte_code_function() || array.is_record() {
let vl = array.as_vectorlike().unwrap();
if idx >= vl.pseudovector_size() {
args_out_of_range!(array, idx);
}
let v = unsafe { vl.as_vector_unchecked() };
unsafe { v.get_unchecked(idx_u) }
} else {
wrong_type!(Qarrayp, array);
}
}
/// Store into the element of ARRAY at index IDX the value NEWELT.
/// Return NEWELT. ARRAY may be a vector, a string, a char-table or a
/// bool-vector. IDX starts at 0.
#[lisp_fn]
pub fn aset(array: LispObject, idx: EmacsInt, newelt: LispObject) -> LispObject {
if let Some(vl) = array.as_vectorlike() {
if let Some(mut v) = vl.as_vector() {
unsafe { CHECK_IMPURE(array, array.get_untaggedptr()) };
v.set_checked(idx as usize, newelt);
} else if let Some(mut bv) = vl.as_bool_vector() {
bv.set_checked(idx as usize, newelt.is_not_nil());
} else if let Some(_tbl) = vl.as_char_table() {
verify_lisp_type!(idx, Qcharacterp);
unsafe { CHAR_TABLE_SET(array, idx as c_int, newelt) };
} else if let Some(mut record) = vl.as_record() {
record.set_checked(idx as usize, newelt);
} else {
unreachable!();
}
} else if let Some(mut s) = array.as_string() {
unsafe { CHECK_IMPURE(array, array.get_untaggedptr()) };
if idx < 0 || idx >= s.len_chars() as EmacsInt {
args_out_of_range!(array, idx);
}
let c = newelt.as_character_or_error();
if s.is_multibyte() {
unsafe { aset_multibyte_string(array, idx, c as c_int) };
} else if is_single_byte_char(c) {
s.set_byte(idx as isize, c as u8);
} else {
if s.chars().any(|i|!is_ascii(i)) {
args_out_of_range!(array, newelt);
}
s.mark_as_multibyte();
unsafe { aset_multibyte_string(array, idx, c as c_int) };
}
} else {
wrong_type!(Qarrayp, array);
}
newelt
}
/// Set SYMBOL's function definition to DEFINITION.
/// Associates the function with the current load file, if any.
/// The optional third argument DOCSTRING specifies the documentation string
/// for SYMBOL; if it is omitted or nil, SYMBOL uses the documentation string
/// determined by DEFINITION.
///
/// Internally, this normally uses `fset', but if SYMBOL has a
/// `defalias-fset-function' property, the associated value is used instead.
///
/// The return value is undefined.
#[lisp_fn(min = "2")]
pub fn defalias(
symbol: LispSymbolRef,
mut definition: LispObject,
docstring: LispObject,
) -> LispObject {
let sym = LispObject::from(symbol);
unsafe {
if globals.Vpurify_flag.is_not_nil()
// If `definition' is a keymap, immutable (and copying) is wrong.
&& get_keymap(definition, false, false).is_nil()
{
definition = Fpurecopy(definition);
}
}
let autoload = is_autoload(definition);
if unsafe { globals.Vpurify_flag.is_nil() } ||!autoload {
// Only add autoload entries after dumping, because the ones before are
// not useful and else we get loads of them from the loaddefs.el.
if is_autoload(symbol.get_function()) {
// Remember that the function was already an autoload.
loadhist_attach((true, sym).into());
}
loadhist_attach((if autoload { Qautoload } else { Qdefun }, sym).into());
}
// Handle automatic advice activation.
let hook = get(symbol, Qdefalias_fset_function);
if hook.is_not_nil() {
call!(hook, sym, definition);
} else {
fset(symbol, definition);
}
if docstring.is_not_nil() {
put(symbol, Qfunction_documentation, docstring);
}
// We used to return `definition', but now that `defun' and `defmacro' expand
// to a call to `defalias', we return `symbol' for backward compatibility
// (bug#11686).
sym
}
/// Return minimum and maximum number of args allowed for SUBR.
/// SUBR must be a built-in function.
/// The returned value is a pair (MIN. MAX). MIN is the minimum number
/// of args. MAX is the maximum number or the symbol `many', for a
/// function with `&rest' args, or `unevalled' for a special form.
#[lisp_fn]
pub fn subr_arity(subr: LispSubrRef) -> (EmacsInt, LispObject) {
let minargs = subr.min_args();
let maxargs = if subr.is_many() {
Qmany
} else if subr.is_unevalled() {
Qunevalled
} else {
EmacsInt::from(subr.max_args()).into()
};
(EmacsInt::from(minargs), maxargs)
}
/// Return name of subroutine SUBR.
/// SUBR must be a built-in function.
#[lisp_fn]
pub fn subr_name(subr: LispSubrRef) -> LispObject {
let name = subr.symbol_name();
unsafe { build_string(name) }
}
/// Return the byteorder for the machine.
/// Returns 66 (ASCII uppercase B) for big endian machines or 108
/// (ASCII lowercase l) for small endian machines.
#[lisp_fn]
pub fn byteorder() -> u8 {
if cfg!(endian = "big") {
b'B'
} else {
b'l'
}
}
/// Return the default value of SYMBOL, but don't check for voidness.
/// Return Qunbound if it is void.
fn default_value(mut symbol: LispSymbolRef) -> LispObject {
while symbol.get_redirect() == symbol_redirect::SYMBOL_VARALIAS {
symbol = symbol.get_indirect_variable();
}
match symbol.get_redirect() {
symbol_redirect::SYMBOL_PLAINVAL => unsafe { symbol.get_value() },
symbol_redirect::SYMBOL_LOCALIZED => {
// If var is set up for a buffer that lacks a local value for it,
// the current value is nominally the default value.
// But the `realvalue' slot may be more up to date, since
// ordinary setq stores just that slot. So use that.
let blv = unsafe { symbol.get_blv() };
let fwd = blv.get_fwd();
if!fwd.is_null() && blv.valcell.eq(blv.defcell) {
unsafe { do_symval_forwarding(fwd) }
} else {
let (_, d) = blv.defcell.into();
d
}
}
symbol_redirect::SYMBOL_FORWARDED => unsafe {
let valcontents = symbol.get_fwd();
// For a built-in buffer-local variable, get the default value
// rather than letting do_symval_forwarding get the current value.
if let Some(buffer_objfwd) = as_buffer_objfwd(valcontents) {
let offset = buffer_objfwd.offset;
if per_buffer_idx_from_field_offset(offset)!= 0 {
return per_buffer_default(offset.get_byte_offset() as i32);
}
}
// For other variables, get the current value.
do_symval_forwarding(valcontents)
},
_ => panic!("Symbol type has no default value"),
}
}
/// Return t if SYMBOL has a non-void default value.
/// This is the value that is seen in buffers that do not have their own values
/// for this variable.
#[lisp_fn]
pub fn default_boundp(symbol: LispSymbolRef) -> bool {
!default_value(symbol).eq(Qunbound)
}
/// Return SYMBOL's default value.
/// This is the value that is seen in buffers that do not have their own values
/// for this variable. The default value is meaningful for variables with
/// local bindings in certain buffers.
#[lisp_fn(c_name = "default_value", name = "default-value")]
pub fn default_value_lisp(symbol: LispSymbolRef) -> LispObject {
let value = default_value(symbol);
if value.eq(Qunbound) {
void_variable!(symbol);
}
value
}
/***********************************************************************
Getting and Setting Values of Symbols
***********************************************************************/
/// These are the types of forwarding objects used in the value slot
/// of symbols for special built-in variables whose value is stored in
/// C/Rust static variables.
pub type Lisp_Fwd_Type = u32;
pub const Lisp_Fwd_Int: Lisp_Fwd_Type = 0; // Fwd to a C `int' variable.
pub const Lisp_Fwd_Bool: Lisp_Fwd_Type = 1; // Fwd to a C boolean var.
pub const Lisp_Fwd_Obj: Lisp_Fwd_Type = 2; // Fwd to a C LispObject variable.
pub const Lisp_Fwd_Buffer_Obj: Lisp_Fwd_Type = 3; // Fwd to a LispObject field of buffers.
pub const Lisp_Fwd_Kboard_Obj: Lisp_Fwd_Type = 4; // Fwd to a LispObject field of kboards.
// these structs will still need to be compatible with their C
// counterparts until all the C callers of the DEFVAR macros are
// ported to Rust. However, as do_symval_forwarding and
// store_symval_forwarding have been ported, some Rust-isms have
// started to happen.
#[repr(C)]
pub union Lisp_Fwd {
pub u_intfwd: Lisp_Intfwd,
pub u_boolfwd: Lisp_Boolfwd,
pub u_objfwd: Lisp_Objfwd,
pub u_buffer_objfwd: Lisp_Buffer_Objfwd,
pub u_kboard_objfwd: Lisp_Kboard_Objfwd,
}
/// Forwarding pointer to an int variable.
/// This is allowed only in the value cell of a symbol,
/// and it means that the symbol's value really lives in the
/// specified int variable.
#[repr(C)]
#[derive(Clone, Copy)]
pub struct Lisp_Intfwd {
pub ty: Lisp_Fwd_Type, // = Lisp_Fwd_Int
pub intvar: *mut EmacsInt,
}
/// Boolean forwarding pointer to an int variable.
/// This is like Lisp_Intfwd except that the ostensible
/// "value" of the symbol is t if the bool variable is true,
/// nil if it is false.
#[repr(C)]
#[derive(Clone, Copy)]
pub struct Lisp_Boolfwd {
pub ty: Lisp_Fwd_Type, // = Lisp_Fwd_Bool
pub boolvar: *mut bool,
}
/// Forwarding pointer to a LispObject variable.
/// This is allowed only in the value cell of a symbol,
/// and it means that the symbol's value really lives in the
/// specified variable.
#[repr(C)]
#[derive(Clone, Copy)]
pub struct Lisp_Objfwd {
pub ty: Lisp_Fwd_Type, // = Lisp_Fwd_Obj
pub objvar: *mut LispObject,
}
/// Like Lisp_Objfwd except that value lives in a slot in the
/// current buffer. Value is byte index of slot within buffer.
#[repr(C)]
#[derive(Clone, Copy)]
pub struct Lisp_Buffer_Objfwd {
pub ty: Lisp_Fwd_Type, // = Lisp_Fwd_Buffer_Obj
pub offset: FieldOffset<remacs_sys::Lisp_Buffer, LispObject>,
// One of Qnil, Qintegerp, Qsymbolp, Qstringp, Qfloatp or Qnumberp.
pub predicate: LispObject,
}
/// Like Lisp_Objfwd except that value lives in a slot in the
/// current kboard.
#[repr(C)]
#[derive(Clone, Copy)]
pub struct Lisp_Kboard_Objfwd {
pub ty: Lisp_Fwd_Type, // = Lisp_Fwd_Kboard_Obj
pub offset: FieldOffset<remacs_sys::kboard, LispObject>,
}
/// Given the raw contents of a symbol value cell,
/// return the Lisp value of the symbol.
/// This does not handle buffer-local variables; use
/// swap_in_symval_forwarding for that.
#[no_mangle]
pub unsafe extern "C" fn
|
(valcontents: *const Lisp_Fwd) -> LispObject {
match (*valcontents).u_intfwd.ty {
Lisp_Fwd_Int => LispObject::from(*(*valcontents).u_intfwd.intvar),
Lisp_Fwd_Bool => LispObject::from(*(*valcontents).u_boolfwd.boolvar),
Lisp_Fwd_Obj => (*(*valcontents).u_objfwd.objvar),
Lisp_Fwd_Buffer_Obj => *(*valcontents)
.u_buffer_objfwd
.offset
.apply_ptr(ThreadState::current_buffer_unchecked().as_mut()),
Lisp_Fwd_Kboard_Obj => {
// We used to simply use current_kboard here, but from Lisp
// code, its value is often unexpected. It seems nicer to
// allow constructions like this to work as intuitively expected:
//
// (with-selected-frame frame
// (define-key local-function-map "\eOP" [f1]))
//
// On the other hand, this affects the semantics of
// last-command and real-last-command, and people may rely on
// that. I took a quick look at the Lisp codebase, and I
// don't think anything will break. --lorentey
let frame = selected_frame();
if!frame.is_live() {
panic!("Selected frame is not live");
}
let kboard = (*frame.terminal).kboard;
*(*valcontents).u_kboard_objfwd.offset.apply_ptr(kboard)
}
_ => panic!("Unknown intfwd type"),
}
}
/// Store NEWVAL into SYMBOL, where VALCONTENTS is found in the value cell
/// of SYMBOL. If SYMBOL is buffer-local, VALCONTENTS should be the
/// buffer-independent contents of the value cell: forwarded just one
/// step past the buffer-localness.
///
/// BUF non-zero means set the value in buffer BUF instead of the
/// current buffer. This only plays a role for per-buffer variables.
#[no_mangle]
pub unsafe extern "C" fn store_symval_forwarding(
valcontents: *mut Lisp_Fwd,
newval: LispObject,
mut buf: *mut Lisp_Buffer,
) {
match (*valcontents).u_intfwd.ty {
Lisp_Fwd_Int => (*(*valcontents).u_intfwd.intvar) = newval.as_fixnum_or_error(),
Lisp_Fwd_Bool => (*(*valcontents).u_boolfwd.boolvar) = newval.is_not_nil(),
Lisp_Fwd_Obj => {
(*(*valcontents).u_objfwd.objvar) = newval;
update_buffer_defaults((*valcontents).u_objfwd.objvar, newval);
}
Lisp_Fwd_Buffer_Obj => {
let predicate = (*valcontents).u_buffer_objfwd.predicate;
if newval.is_not_nil() && predicate.is_symbol() {
let pred_sym: LispSymbolRef = predicate.into();
let mut prop = get(pred_sym, Qchoice);
if prop.is_not_nil() {
if memq(newval, prop).is_nil() {
wrong_choice(prop, newval);
}
} else {
prop = get(pred_sym, Qrange);
if let Some((min, max)) = prop.into() {
let args = [min, newval, max];
if!newval.is_number() || leq(&args) {
wrong_range(min, max, newval);
}
} else if predicate.is_function() && call!(predicate, newval).is_nil() {
wrong_type!(predicate, newval);
}
}
}
if buf.is_null() {
buf = ThreadState::current_buffer_unchecked().as_mut();
}
*(*valcontents).u_buffer_objfwd.offset.apply_ptr_mut(buf) = newval;
}
Lisp_Fwd_Kboard_Obj => {
|
do_symval_forwarding
|
identifier_name
|
data.rs
|
"indirect_function", name = "indirect-function")]
pub fn indirect_function_lisp(object: LispObject, _noerror: LispObject) -> LispObject {
match object.as_symbol() {
None => object,
Some(symbol) => symbol.get_indirect_function(),
}
}
/// Return a symbol representing the type of OBJECT.
/// The symbol returned names the object's basic type;
/// for example, (type-of 1) returns `integer'.
#[lisp_fn]
pub fn type_of(object: LispObject) -> LispObject {
match object.get_type() {
Lisp_Type::Lisp_Cons => Qcons,
Lisp_Type::Lisp_Int0 | Lisp_Type::Lisp_Int1 => Qinteger,
Lisp_Type::Lisp_Symbol => Qsymbol,
Lisp_Type::Lisp_String => Qstring,
Lisp_Type::Lisp_Float => Qfloat,
Lisp_Type::Lisp_Misc => {
let m = object.as_misc().unwrap();
match m.get_type() {
Lisp_Misc_Type::Lisp_Misc_Marker => Qmarker,
Lisp_Misc_Type::Lisp_Misc_Overlay => Qoverlay,
Lisp_Misc_Type::Lisp_Misc_Finalizer => Qfinalizer,
Lisp_Misc_Type::Lisp_Misc_User_Ptr => Quser_ptr,
_ => Qnone,
}
}
Lisp_Type::Lisp_Vectorlike => {
let vec = unsafe { object.as_vectorlike_unchecked() };
match vec.pseudovector_type() {
pvec_type::PVEC_NORMAL_VECTOR => Qvector,
pvec_type::PVEC_WINDOW_CONFIGURATION => Qwindow_configuration,
pvec_type::PVEC_PROCESS => Qprocess,
pvec_type::PVEC_WINDOW => Qwindow,
pvec_type::PVEC_SUBR => Qsubr,
pvec_type::PVEC_COMPILED => Qcompiled_function,
pvec_type::PVEC_BUFFER => Qbuffer,
pvec_type::PVEC_CHAR_TABLE => Qchar_table,
pvec_type::PVEC_BOOL_VECTOR => Qbool_vector,
pvec_type::PVEC_FRAME => Qframe,
pvec_type::PVEC_HASH_TABLE => Qhash_table,
pvec_type::PVEC_THREAD => Qthread,
pvec_type::PVEC_MUTEX => Qmutex,
pvec_type::PVEC_CONDVAR => Qcondition_variable,
pvec_type::PVEC_TERMINAL => Qterminal,
pvec_type::PVEC_MODULE_FUNCTION => Qmodule_function,
pvec_type::PVEC_FONT => {
if object.is_font_spec() {
Qfont_spec
} else if object.is_font_entity() {
Qfont_entity
} else if object.is_font_object() {
Qfont_object
} else {
Qfont
}
}
pvec_type::PVEC_RECORD => unsafe {
let vec = object.as_vector_unchecked();
let t = vec.get_unchecked(0);
if t.is_record() {
let v = t.as_vector_unchecked();
if v.len() > 1 {
return v.get_unchecked(1);
}
}
t
},
_ => Qnone,
}
}
}
}
#[lisp_fn]
pub fn subr_lang(subr: LispSubrRef) -> LispObject {
if subr.lang == Lisp_Subr_Lang::Lisp_Subr_Lang_C {
"C".into()
} else if subr.lang == Lisp_Subr_Lang::Lisp_Subr_Lang_Rust {
"Rust".into()
} else {
unreachable!()
}
}
/// Return the element of ARRAY at index IDX.
/// ARRAY may be a vector, a string, a char-table, a bool-vector, a record,
/// or a byte-code object. IDX starts at 0.
#[lisp_fn]
pub fn aref(array: LispObject, idx: EmacsInt) -> LispObject {
if idx < 0 {
args_out_of_range!(array, idx);
}
let idx_u = idx as usize;
if let Some(s) = array.as_string() {
match s.char_indices().nth(idx_u) {
None => {
args_out_of_range!(array, idx);
}
Some((_, cp)) => EmacsInt::from(cp).into(),
}
} else if let Some(bv) = array.as_bool_vector() {
if idx_u >= bv.len() {
args_out_of_range!(array, idx);
}
unsafe { bv.get_unchecked(idx_u) }
|
} else if let Some(v) = array.as_vector() {
if idx_u >= v.len() {
args_out_of_range!(array, idx);
}
unsafe { v.get_unchecked(idx_u) }
} else if array.is_byte_code_function() || array.is_record() {
let vl = array.as_vectorlike().unwrap();
if idx >= vl.pseudovector_size() {
args_out_of_range!(array, idx);
}
let v = unsafe { vl.as_vector_unchecked() };
unsafe { v.get_unchecked(idx_u) }
} else {
wrong_type!(Qarrayp, array);
}
}
/// Store into the element of ARRAY at index IDX the value NEWELT.
/// Return NEWELT. ARRAY may be a vector, a string, a char-table or a
/// bool-vector. IDX starts at 0.
#[lisp_fn]
pub fn aset(array: LispObject, idx: EmacsInt, newelt: LispObject) -> LispObject {
if let Some(vl) = array.as_vectorlike() {
if let Some(mut v) = vl.as_vector() {
unsafe { CHECK_IMPURE(array, array.get_untaggedptr()) };
v.set_checked(idx as usize, newelt);
} else if let Some(mut bv) = vl.as_bool_vector() {
bv.set_checked(idx as usize, newelt.is_not_nil());
} else if let Some(_tbl) = vl.as_char_table() {
verify_lisp_type!(idx, Qcharacterp);
unsafe { CHAR_TABLE_SET(array, idx as c_int, newelt) };
} else if let Some(mut record) = vl.as_record() {
record.set_checked(idx as usize, newelt);
} else {
unreachable!();
}
} else if let Some(mut s) = array.as_string() {
unsafe { CHECK_IMPURE(array, array.get_untaggedptr()) };
if idx < 0 || idx >= s.len_chars() as EmacsInt {
args_out_of_range!(array, idx);
}
let c = newelt.as_character_or_error();
if s.is_multibyte() {
unsafe { aset_multibyte_string(array, idx, c as c_int) };
} else if is_single_byte_char(c) {
s.set_byte(idx as isize, c as u8);
} else {
if s.chars().any(|i|!is_ascii(i)) {
args_out_of_range!(array, newelt);
}
s.mark_as_multibyte();
unsafe { aset_multibyte_string(array, idx, c as c_int) };
}
} else {
wrong_type!(Qarrayp, array);
}
newelt
}
/// Set SYMBOL's function definition to DEFINITION.
/// Associates the function with the current load file, if any.
/// The optional third argument DOCSTRING specifies the documentation string
/// for SYMBOL; if it is omitted or nil, SYMBOL uses the documentation string
/// determined by DEFINITION.
///
/// Internally, this normally uses `fset', but if SYMBOL has a
/// `defalias-fset-function' property, the associated value is used instead.
///
/// The return value is undefined.
#[lisp_fn(min = "2")]
pub fn defalias(
symbol: LispSymbolRef,
mut definition: LispObject,
docstring: LispObject,
) -> LispObject {
let sym = LispObject::from(symbol);
unsafe {
if globals.Vpurify_flag.is_not_nil()
// If `definition' is a keymap, immutable (and copying) is wrong.
&& get_keymap(definition, false, false).is_nil()
{
definition = Fpurecopy(definition);
}
}
let autoload = is_autoload(definition);
if unsafe { globals.Vpurify_flag.is_nil() } ||!autoload {
// Only add autoload entries after dumping, because the ones before are
// not useful and else we get loads of them from the loaddefs.el.
if is_autoload(symbol.get_function()) {
// Remember that the function was already an autoload.
loadhist_attach((true, sym).into());
}
loadhist_attach((if autoload { Qautoload } else { Qdefun }, sym).into());
}
// Handle automatic advice activation.
let hook = get(symbol, Qdefalias_fset_function);
if hook.is_not_nil() {
call!(hook, sym, definition);
} else {
fset(symbol, definition);
}
if docstring.is_not_nil() {
put(symbol, Qfunction_documentation, docstring);
}
// We used to return `definition', but now that `defun' and `defmacro' expand
// to a call to `defalias', we return `symbol' for backward compatibility
// (bug#11686).
sym
}
/// Return minimum and maximum number of args allowed for SUBR.
/// SUBR must be a built-in function.
/// The returned value is a pair (MIN. MAX). MIN is the minimum number
/// of args. MAX is the maximum number or the symbol `many', for a
/// function with `&rest' args, or `unevalled' for a special form.
#[lisp_fn]
pub fn subr_arity(subr: LispSubrRef) -> (EmacsInt, LispObject) {
let minargs = subr.min_args();
let maxargs = if subr.is_many() {
Qmany
} else if subr.is_unevalled() {
Qunevalled
} else {
EmacsInt::from(subr.max_args()).into()
};
(EmacsInt::from(minargs), maxargs)
}
/// Return name of subroutine SUBR.
/// SUBR must be a built-in function.
#[lisp_fn]
pub fn subr_name(subr: LispSubrRef) -> LispObject {
let name = subr.symbol_name();
unsafe { build_string(name) }
}
/// Return the byteorder for the machine.
/// Returns 66 (ASCII uppercase B) for big endian machines or 108
/// (ASCII lowercase l) for small endian machines.
#[lisp_fn]
pub fn byteorder() -> u8 {
if cfg!(endian = "big") {
b'B'
} else {
b'l'
}
}
/// Return the default value of SYMBOL, but don't check for voidness.
/// Return Qunbound if it is void.
fn default_value(mut symbol: LispSymbolRef) -> LispObject {
while symbol.get_redirect() == symbol_redirect::SYMBOL_VARALIAS {
symbol = symbol.get_indirect_variable();
}
match symbol.get_redirect() {
symbol_redirect::SYMBOL_PLAINVAL => unsafe { symbol.get_value() },
symbol_redirect::SYMBOL_LOCALIZED => {
// If var is set up for a buffer that lacks a local value for it,
// the current value is nominally the default value.
// But the `realvalue' slot may be more up to date, since
// ordinary setq stores just that slot. So use that.
let blv = unsafe { symbol.get_blv() };
let fwd = blv.get_fwd();
if!fwd.is_null() && blv.valcell.eq(blv.defcell) {
unsafe { do_symval_forwarding(fwd) }
} else {
let (_, d) = blv.defcell.into();
d
}
}
symbol_redirect::SYMBOL_FORWARDED => unsafe {
let valcontents = symbol.get_fwd();
// For a built-in buffer-local variable, get the default value
// rather than letting do_symval_forwarding get the current value.
if let Some(buffer_objfwd) = as_buffer_objfwd(valcontents) {
let offset = buffer_objfwd.offset;
if per_buffer_idx_from_field_offset(offset)!= 0 {
return per_buffer_default(offset.get_byte_offset() as i32);
}
}
// For other variables, get the current value.
do_symval_forwarding(valcontents)
},
_ => panic!("Symbol type has no default value"),
}
}
/// Return t if SYMBOL has a non-void default value.
/// This is the value that is seen in buffers that do not have their own values
/// for this variable.
#[lisp_fn]
pub fn default_boundp(symbol: LispSymbolRef) -> bool {
!default_value(symbol).eq(Qunbound)
}
/// Return SYMBOL's default value.
/// This is the value that is seen in buffers that do not have their own values
/// for this variable. The default value is meaningful for variables with
/// local bindings in certain buffers.
#[lisp_fn(c_name = "default_value", name = "default-value")]
pub fn default_value_lisp(symbol: LispSymbolRef) -> LispObject {
let value = default_value(symbol);
if value.eq(Qunbound) {
void_variable!(symbol);
}
value
}
/***********************************************************************
Getting and Setting Values of Symbols
***********************************************************************/
/// These are the types of forwarding objects used in the value slot
/// of symbols for special built-in variables whose value is stored in
/// C/Rust static variables.
pub type Lisp_Fwd_Type = u32;
pub const Lisp_Fwd_Int: Lisp_Fwd_Type = 0; // Fwd to a C `int' variable.
pub const Lisp_Fwd_Bool: Lisp_Fwd_Type = 1; // Fwd to a C boolean var.
pub const Lisp_Fwd_Obj: Lisp_Fwd_Type = 2; // Fwd to a C LispObject variable.
pub const Lisp_Fwd_Buffer_Obj: Lisp_Fwd_Type = 3; // Fwd to a LispObject field of buffers.
pub const Lisp_Fwd_Kboard_Obj: Lisp_Fwd_Type = 4; // Fwd to a LispObject field of kboards.
// these structs will still need to be compatible with their C
// counterparts until all the C callers of the DEFVAR macros are
// ported to Rust. However, as do_symval_forwarding and
// store_symval_forwarding have been ported, some Rust-isms have
// started to happen.
#[repr(C)]
pub union Lisp_Fwd {
pub u_intfwd: Lisp_Intfwd,
pub u_boolfwd: Lisp_Boolfwd,
pub u_objfwd: Lisp_Objfwd,
pub u_buffer_objfwd: Lisp_Buffer_Objfwd,
pub u_kboard_objfwd: Lisp_Kboard_Objfwd,
}
/// Forwarding pointer to an int variable.
/// This is allowed only in the value cell of a symbol,
/// and it means that the symbol's value really lives in the
/// specified int variable.
#[repr(C)]
#[derive(Clone, Copy)]
pub struct Lisp_Intfwd {
pub ty: Lisp_Fwd_Type, // = Lisp_Fwd_Int
pub intvar: *mut EmacsInt,
}
/// Boolean forwarding pointer to an int variable.
/// This is like Lisp_Intfwd except that the ostensible
/// "value" of the symbol is t if the bool variable is true,
/// nil if it is false.
#[repr(C)]
#[derive(Clone, Copy)]
pub struct Lisp_Boolfwd {
pub ty: Lisp_Fwd_Type, // = Lisp_Fwd_Bool
pub boolvar: *mut bool,
}
/// Forwarding pointer to a LispObject variable.
/// This is allowed only in the value cell of a symbol,
/// and it means that the symbol's value really lives in the
/// specified variable.
#[repr(C)]
#[derive(Clone, Copy)]
pub struct Lisp_Objfwd {
pub ty: Lisp_Fwd_Type, // = Lisp_Fwd_Obj
pub objvar: *mut LispObject,
}
/// Like Lisp_Objfwd except that value lives in a slot in the
/// current buffer. Value is byte index of slot within buffer.
#[repr(C)]
#[derive(Clone, Copy)]
pub struct Lisp_Buffer_Objfwd {
pub ty: Lisp_Fwd_Type, // = Lisp_Fwd_Buffer_Obj
pub offset: FieldOffset<remacs_sys::Lisp_Buffer, LispObject>,
// One of Qnil, Qintegerp, Qsymbolp, Qstringp, Qfloatp or Qnumberp.
pub predicate: LispObject,
}
/// Like Lisp_Objfwd except that value lives in a slot in the
/// current kboard.
#[repr(C)]
#[derive(Clone, Copy)]
pub struct Lisp_Kboard_Objfwd {
pub ty: Lisp_Fwd_Type, // = Lisp_Fwd_Kboard_Obj
pub offset: FieldOffset<remacs_sys::kboard, LispObject>,
}
/// Given the raw contents of a symbol value cell,
/// return the Lisp value of the symbol.
/// This does not handle buffer-local variables; use
/// swap_in_symval_forwarding for that.
#[no_mangle]
pub unsafe extern "C" fn do_symval_forwarding(valcontents: *const Lisp_Fwd) -> LispObject {
match (*valcontents).u_intfwd.ty {
Lisp_Fwd_Int => LispObject::from(*(*valcontents).u_intfwd.intvar),
Lisp_Fwd_Bool => LispObject::from(*(*valcontents).u_boolfwd.boolvar),
Lisp_Fwd_Obj => (*(*valcontents).u_objfwd.objvar),
Lisp_Fwd_Buffer_Obj => *(*valcontents)
.u_buffer_objfwd
.offset
.apply_ptr(ThreadState::current_buffer_unchecked().as_mut()),
Lisp_Fwd_Kboard_Obj => {
// We used to simply use current_kboard here, but from Lisp
// code, its value is often unexpected. It seems nicer to
// allow constructions like this to work as intuitively expected:
//
// (with-selected-frame frame
// (define-key local-function-map "\eOP" [f1]))
//
// On the other hand, this affects the semantics of
// last-command and real-last-command, and people may rely on
// that. I took a quick look at the Lisp codebase, and I
// don't think anything will break. --lorentey
let frame = selected_frame();
if!frame.is_live() {
panic!("Selected frame is not live");
}
let kboard = (*frame.terminal).kboard;
*(*valcontents).u_kboard_objfwd.offset.apply_ptr(kboard)
}
_ => panic!("Unknown intfwd type"),
}
}
/// Store NEWVAL into SYMBOL, where VALCONTENTS is found in the value cell
/// of SYMBOL. If SYMBOL is buffer-local, VALCONTENTS should be the
/// buffer-independent contents of the value cell: forwarded just one
/// step past the buffer-localness.
///
/// BUF non-zero means set the value in buffer BUF instead of the
/// current buffer. This only plays a role for per-buffer variables.
#[no_mangle]
pub unsafe extern "C" fn store_symval_forwarding(
valcontents: *mut Lisp_Fwd,
newval: LispObject,
mut buf: *mut Lisp_Buffer,
) {
match (*valcontents).u_intfwd.ty {
Lisp_Fwd_Int => (*(*valcontents).u_intfwd.intvar) = newval.as_fixnum_or_error(),
Lisp_Fwd_Bool => (*(*valcontents).u_boolfwd.boolvar) = newval.is_not_nil(),
Lisp_Fwd_Obj => {
(*(*valcontents).u_objfwd.objvar) = newval;
update_buffer_defaults((*valcontents).u_objfwd.objvar, newval);
}
Lisp_Fwd_Buffer_Obj => {
let predicate = (*valcontents).u_buffer_objfwd.predicate;
if newval.is_not_nil() && predicate.is_symbol() {
let pred_sym: LispSymbolRef = predicate.into();
let mut prop = get(pred_sym, Qchoice);
if prop.is_not_nil() {
if memq(newval, prop).is_nil() {
wrong_choice(prop, newval);
}
} else {
prop = get(pred_sym, Qrange);
if let Some((min, max)) = prop.into() {
let args = [min, newval, max];
if!newval.is_number() || leq(&args) {
wrong_range(min, max, newval);
}
} else if predicate.is_function() && call!(predicate, newval).is_nil() {
wrong_type!(predicate, newval);
}
}
}
if buf.is_null() {
buf = ThreadState::current_buffer_unchecked().as_mut();
}
*(*valcontents).u_buffer_objfwd.offset.apply_ptr_mut(buf) = newval;
}
Lisp_Fwd_Kboard_Obj => {
|
} else if let Some(ct) = array.as_char_table() {
ct.get(idx as isize)
|
random_line_split
|
data.rs
|
indirect_function", name = "indirect-function")]
pub fn indirect_function_lisp(object: LispObject, _noerror: LispObject) -> LispObject {
match object.as_symbol() {
None => object,
Some(symbol) => symbol.get_indirect_function(),
}
}
/// Return a symbol representing the type of OBJECT.
/// The symbol returned names the object's basic type;
/// for example, (type-of 1) returns `integer'.
#[lisp_fn]
pub fn type_of(object: LispObject) -> LispObject {
match object.get_type() {
Lisp_Type::Lisp_Cons => Qcons,
Lisp_Type::Lisp_Int0 | Lisp_Type::Lisp_Int1 => Qinteger,
Lisp_Type::Lisp_Symbol => Qsymbol,
Lisp_Type::Lisp_String => Qstring,
Lisp_Type::Lisp_Float => Qfloat,
Lisp_Type::Lisp_Misc => {
let m = object.as_misc().unwrap();
match m.get_type() {
Lisp_Misc_Type::Lisp_Misc_Marker => Qmarker,
Lisp_Misc_Type::Lisp_Misc_Overlay => Qoverlay,
Lisp_Misc_Type::Lisp_Misc_Finalizer => Qfinalizer,
Lisp_Misc_Type::Lisp_Misc_User_Ptr => Quser_ptr,
_ => Qnone,
}
}
Lisp_Type::Lisp_Vectorlike => {
let vec = unsafe { object.as_vectorlike_unchecked() };
match vec.pseudovector_type() {
pvec_type::PVEC_NORMAL_VECTOR => Qvector,
pvec_type::PVEC_WINDOW_CONFIGURATION => Qwindow_configuration,
pvec_type::PVEC_PROCESS => Qprocess,
pvec_type::PVEC_WINDOW => Qwindow,
pvec_type::PVEC_SUBR => Qsubr,
pvec_type::PVEC_COMPILED => Qcompiled_function,
pvec_type::PVEC_BUFFER => Qbuffer,
pvec_type::PVEC_CHAR_TABLE => Qchar_table,
pvec_type::PVEC_BOOL_VECTOR => Qbool_vector,
pvec_type::PVEC_FRAME => Qframe,
pvec_type::PVEC_HASH_TABLE => Qhash_table,
pvec_type::PVEC_THREAD => Qthread,
pvec_type::PVEC_MUTEX => Qmutex,
pvec_type::PVEC_CONDVAR => Qcondition_variable,
pvec_type::PVEC_TERMINAL => Qterminal,
pvec_type::PVEC_MODULE_FUNCTION => Qmodule_function,
pvec_type::PVEC_FONT => {
if object.is_font_spec() {
Qfont_spec
} else if object.is_font_entity() {
Qfont_entity
} else if object.is_font_object() {
Qfont_object
} else {
Qfont
}
}
pvec_type::PVEC_RECORD => unsafe {
let vec = object.as_vector_unchecked();
let t = vec.get_unchecked(0);
if t.is_record() {
let v = t.as_vector_unchecked();
if v.len() > 1 {
return v.get_unchecked(1);
}
}
t
},
_ => Qnone,
}
}
}
}
#[lisp_fn]
pub fn subr_lang(subr: LispSubrRef) -> LispObject {
if subr.lang == Lisp_Subr_Lang::Lisp_Subr_Lang_C {
"C".into()
} else if subr.lang == Lisp_Subr_Lang::Lisp_Subr_Lang_Rust {
"Rust".into()
} else {
unreachable!()
}
}
/// Return the element of ARRAY at index IDX.
/// ARRAY may be a vector, a string, a char-table, a bool-vector, a record,
/// or a byte-code object. IDX starts at 0.
#[lisp_fn]
pub fn aref(array: LispObject, idx: EmacsInt) -> LispObject {
if idx < 0 {
args_out_of_range!(array, idx);
}
let idx_u = idx as usize;
if let Some(s) = array.as_string() {
match s.char_indices().nth(idx_u) {
None => {
args_out_of_range!(array, idx);
}
Some((_, cp)) => EmacsInt::from(cp).into(),
}
} else if let Some(bv) = array.as_bool_vector() {
if idx_u >= bv.len() {
args_out_of_range!(array, idx);
}
unsafe { bv.get_unchecked(idx_u) }
} else if let Some(ct) = array.as_char_table() {
ct.get(idx as isize)
} else if let Some(v) = array.as_vector() {
if idx_u >= v.len() {
args_out_of_range!(array, idx);
}
unsafe { v.get_unchecked(idx_u) }
} else if array.is_byte_code_function() || array.is_record() {
let vl = array.as_vectorlike().unwrap();
if idx >= vl.pseudovector_size() {
args_out_of_range!(array, idx);
}
let v = unsafe { vl.as_vector_unchecked() };
unsafe { v.get_unchecked(idx_u) }
} else {
wrong_type!(Qarrayp, array);
}
}
/// Store into the element of ARRAY at index IDX the value NEWELT.
/// Return NEWELT. ARRAY may be a vector, a string, a char-table or a
/// bool-vector. IDX starts at 0.
#[lisp_fn]
pub fn aset(array: LispObject, idx: EmacsInt, newelt: LispObject) -> LispObject {
if let Some(vl) = array.as_vectorlike() {
if let Some(mut v) = vl.as_vector() {
unsafe { CHECK_IMPURE(array, array.get_untaggedptr()) };
v.set_checked(idx as usize, newelt);
} else if let Some(mut bv) = vl.as_bool_vector() {
bv.set_checked(idx as usize, newelt.is_not_nil());
} else if let Some(_tbl) = vl.as_char_table() {
verify_lisp_type!(idx, Qcharacterp);
unsafe { CHAR_TABLE_SET(array, idx as c_int, newelt) };
} else if let Some(mut record) = vl.as_record() {
record.set_checked(idx as usize, newelt);
} else {
unreachable!();
}
} else if let Some(mut s) = array.as_string() {
unsafe { CHECK_IMPURE(array, array.get_untaggedptr()) };
if idx < 0 || idx >= s.len_chars() as EmacsInt {
args_out_of_range!(array, idx);
}
let c = newelt.as_character_or_error();
if s.is_multibyte() {
unsafe { aset_multibyte_string(array, idx, c as c_int) };
} else if is_single_byte_char(c) {
s.set_byte(idx as isize, c as u8);
} else {
if s.chars().any(|i|!is_ascii(i)) {
args_out_of_range!(array, newelt);
}
s.mark_as_multibyte();
unsafe { aset_multibyte_string(array, idx, c as c_int) };
}
} else {
wrong_type!(Qarrayp, array);
}
newelt
}
/// Set SYMBOL's function definition to DEFINITION.
/// Associates the function with the current load file, if any.
/// The optional third argument DOCSTRING specifies the documentation string
/// for SYMBOL; if it is omitted or nil, SYMBOL uses the documentation string
/// determined by DEFINITION.
///
/// Internally, this normally uses `fset', but if SYMBOL has a
/// `defalias-fset-function' property, the associated value is used instead.
///
/// The return value is undefined.
#[lisp_fn(min = "2")]
pub fn defalias(
symbol: LispSymbolRef,
mut definition: LispObject,
docstring: LispObject,
) -> LispObject {
let sym = LispObject::from(symbol);
unsafe {
if globals.Vpurify_flag.is_not_nil()
// If `definition' is a keymap, immutable (and copying) is wrong.
&& get_keymap(definition, false, false).is_nil()
{
definition = Fpurecopy(definition);
}
}
let autoload = is_autoload(definition);
if unsafe { globals.Vpurify_flag.is_nil() } ||!autoload {
// Only add autoload entries after dumping, because the ones before are
// not useful and else we get loads of them from the loaddefs.el.
if is_autoload(symbol.get_function()) {
// Remember that the function was already an autoload.
loadhist_attach((true, sym).into());
}
loadhist_attach((if autoload { Qautoload } else { Qdefun }, sym).into());
}
// Handle automatic advice activation.
let hook = get(symbol, Qdefalias_fset_function);
if hook.is_not_nil() {
call!(hook, sym, definition);
} else {
fset(symbol, definition);
}
if docstring.is_not_nil() {
put(symbol, Qfunction_documentation, docstring);
}
// We used to return `definition', but now that `defun' and `defmacro' expand
// to a call to `defalias', we return `symbol' for backward compatibility
// (bug#11686).
sym
}
/// Return minimum and maximum number of args allowed for SUBR.
/// SUBR must be a built-in function.
/// The returned value is a pair (MIN. MAX). MIN is the minimum number
/// of args. MAX is the maximum number or the symbol `many', for a
/// function with `&rest' args, or `unevalled' for a special form.
#[lisp_fn]
pub fn subr_arity(subr: LispSubrRef) -> (EmacsInt, LispObject) {
let minargs = subr.min_args();
let maxargs = if subr.is_many() {
Qmany
} else if subr.is_unevalled() {
Qunevalled
} else {
EmacsInt::from(subr.max_args()).into()
};
(EmacsInt::from(minargs), maxargs)
}
/// Return name of subroutine SUBR.
/// SUBR must be a built-in function.
#[lisp_fn]
pub fn subr_name(subr: LispSubrRef) -> LispObject {
let name = subr.symbol_name();
unsafe { build_string(name) }
}
/// Return the byteorder for the machine.
/// Returns 66 (ASCII uppercase B) for big endian machines or 108
/// (ASCII lowercase l) for small endian machines.
#[lisp_fn]
pub fn byteorder() -> u8 {
if cfg!(endian = "big") {
b'B'
} else {
b'l'
}
}
/// Return the default value of SYMBOL, but don't check for voidness.
/// Return Qunbound if it is void.
fn default_value(mut symbol: LispSymbolRef) -> LispObject {
while symbol.get_redirect() == symbol_redirect::SYMBOL_VARALIAS {
symbol = symbol.get_indirect_variable();
}
match symbol.get_redirect() {
symbol_redirect::SYMBOL_PLAINVAL => unsafe { symbol.get_value() },
symbol_redirect::SYMBOL_LOCALIZED => {
// If var is set up for a buffer that lacks a local value for it,
// the current value is nominally the default value.
// But the `realvalue' slot may be more up to date, since
// ordinary setq stores just that slot. So use that.
let blv = unsafe { symbol.get_blv() };
let fwd = blv.get_fwd();
if!fwd.is_null() && blv.valcell.eq(blv.defcell)
|
else {
let (_, d) = blv.defcell.into();
d
}
}
symbol_redirect::SYMBOL_FORWARDED => unsafe {
let valcontents = symbol.get_fwd();
// For a built-in buffer-local variable, get the default value
// rather than letting do_symval_forwarding get the current value.
if let Some(buffer_objfwd) = as_buffer_objfwd(valcontents) {
let offset = buffer_objfwd.offset;
if per_buffer_idx_from_field_offset(offset)!= 0 {
return per_buffer_default(offset.get_byte_offset() as i32);
}
}
// For other variables, get the current value.
do_symval_forwarding(valcontents)
},
_ => panic!("Symbol type has no default value"),
}
}
/// Return t if SYMBOL has a non-void default value.
/// This is the value that is seen in buffers that do not have their own values
/// for this variable.
#[lisp_fn]
pub fn default_boundp(symbol: LispSymbolRef) -> bool {
!default_value(symbol).eq(Qunbound)
}
/// Return SYMBOL's default value.
/// This is the value that is seen in buffers that do not have their own values
/// for this variable. The default value is meaningful for variables with
/// local bindings in certain buffers.
#[lisp_fn(c_name = "default_value", name = "default-value")]
pub fn default_value_lisp(symbol: LispSymbolRef) -> LispObject {
let value = default_value(symbol);
if value.eq(Qunbound) {
void_variable!(symbol);
}
value
}
/***********************************************************************
Getting and Setting Values of Symbols
***********************************************************************/
/// These are the types of forwarding objects used in the value slot
/// of symbols for special built-in variables whose value is stored in
/// C/Rust static variables.
pub type Lisp_Fwd_Type = u32;
pub const Lisp_Fwd_Int: Lisp_Fwd_Type = 0; // Fwd to a C `int' variable.
pub const Lisp_Fwd_Bool: Lisp_Fwd_Type = 1; // Fwd to a C boolean var.
pub const Lisp_Fwd_Obj: Lisp_Fwd_Type = 2; // Fwd to a C LispObject variable.
pub const Lisp_Fwd_Buffer_Obj: Lisp_Fwd_Type = 3; // Fwd to a LispObject field of buffers.
pub const Lisp_Fwd_Kboard_Obj: Lisp_Fwd_Type = 4; // Fwd to a LispObject field of kboards.
// these structs will still need to be compatible with their C
// counterparts until all the C callers of the DEFVAR macros are
// ported to Rust. However, as do_symval_forwarding and
// store_symval_forwarding have been ported, some Rust-isms have
// started to happen.
#[repr(C)]
pub union Lisp_Fwd {
pub u_intfwd: Lisp_Intfwd,
pub u_boolfwd: Lisp_Boolfwd,
pub u_objfwd: Lisp_Objfwd,
pub u_buffer_objfwd: Lisp_Buffer_Objfwd,
pub u_kboard_objfwd: Lisp_Kboard_Objfwd,
}
/// Forwarding pointer to an int variable.
/// This is allowed only in the value cell of a symbol,
/// and it means that the symbol's value really lives in the
/// specified int variable.
#[repr(C)]
#[derive(Clone, Copy)]
pub struct Lisp_Intfwd {
pub ty: Lisp_Fwd_Type, // = Lisp_Fwd_Int
pub intvar: *mut EmacsInt,
}
/// Boolean forwarding pointer to an int variable.
/// This is like Lisp_Intfwd except that the ostensible
/// "value" of the symbol is t if the bool variable is true,
/// nil if it is false.
#[repr(C)]
#[derive(Clone, Copy)]
pub struct Lisp_Boolfwd {
pub ty: Lisp_Fwd_Type, // = Lisp_Fwd_Bool
pub boolvar: *mut bool,
}
/// Forwarding pointer to a LispObject variable.
/// This is allowed only in the value cell of a symbol,
/// and it means that the symbol's value really lives in the
/// specified variable.
#[repr(C)]
#[derive(Clone, Copy)]
pub struct Lisp_Objfwd {
pub ty: Lisp_Fwd_Type, // = Lisp_Fwd_Obj
pub objvar: *mut LispObject,
}
/// Like Lisp_Objfwd except that value lives in a slot in the
/// current buffer. Value is byte index of slot within buffer.
#[repr(C)]
#[derive(Clone, Copy)]
pub struct Lisp_Buffer_Objfwd {
pub ty: Lisp_Fwd_Type, // = Lisp_Fwd_Buffer_Obj
pub offset: FieldOffset<remacs_sys::Lisp_Buffer, LispObject>,
// One of Qnil, Qintegerp, Qsymbolp, Qstringp, Qfloatp or Qnumberp.
pub predicate: LispObject,
}
/// Like Lisp_Objfwd except that value lives in a slot in the
/// current kboard.
#[repr(C)]
#[derive(Clone, Copy)]
pub struct Lisp_Kboard_Objfwd {
pub ty: Lisp_Fwd_Type, // = Lisp_Fwd_Kboard_Obj
pub offset: FieldOffset<remacs_sys::kboard, LispObject>,
}
/// Given the raw contents of a symbol value cell,
/// return the Lisp value of the symbol.
/// This does not handle buffer-local variables; use
/// swap_in_symval_forwarding for that.
#[no_mangle]
pub unsafe extern "C" fn do_symval_forwarding(valcontents: *const Lisp_Fwd) -> LispObject {
match (*valcontents).u_intfwd.ty {
Lisp_Fwd_Int => LispObject::from(*(*valcontents).u_intfwd.intvar),
Lisp_Fwd_Bool => LispObject::from(*(*valcontents).u_boolfwd.boolvar),
Lisp_Fwd_Obj => (*(*valcontents).u_objfwd.objvar),
Lisp_Fwd_Buffer_Obj => *(*valcontents)
.u_buffer_objfwd
.offset
.apply_ptr(ThreadState::current_buffer_unchecked().as_mut()),
Lisp_Fwd_Kboard_Obj => {
// We used to simply use current_kboard here, but from Lisp
// code, its value is often unexpected. It seems nicer to
// allow constructions like this to work as intuitively expected:
//
// (with-selected-frame frame
// (define-key local-function-map "\eOP" [f1]))
//
// On the other hand, this affects the semantics of
// last-command and real-last-command, and people may rely on
// that. I took a quick look at the Lisp codebase, and I
// don't think anything will break. --lorentey
let frame = selected_frame();
if!frame.is_live() {
panic!("Selected frame is not live");
}
let kboard = (*frame.terminal).kboard;
*(*valcontents).u_kboard_objfwd.offset.apply_ptr(kboard)
}
_ => panic!("Unknown intfwd type"),
}
}
/// Store NEWVAL into SYMBOL, where VALCONTENTS is found in the value cell
/// of SYMBOL. If SYMBOL is buffer-local, VALCONTENTS should be the
/// buffer-independent contents of the value cell: forwarded just one
/// step past the buffer-localness.
///
/// BUF non-zero means set the value in buffer BUF instead of the
/// current buffer. This only plays a role for per-buffer variables.
#[no_mangle]
pub unsafe extern "C" fn store_symval_forwarding(
valcontents: *mut Lisp_Fwd,
newval: LispObject,
mut buf: *mut Lisp_Buffer,
) {
match (*valcontents).u_intfwd.ty {
Lisp_Fwd_Int => (*(*valcontents).u_intfwd.intvar) = newval.as_fixnum_or_error(),
Lisp_Fwd_Bool => (*(*valcontents).u_boolfwd.boolvar) = newval.is_not_nil(),
Lisp_Fwd_Obj => {
(*(*valcontents).u_objfwd.objvar) = newval;
update_buffer_defaults((*valcontents).u_objfwd.objvar, newval);
}
Lisp_Fwd_Buffer_Obj => {
let predicate = (*valcontents).u_buffer_objfwd.predicate;
if newval.is_not_nil() && predicate.is_symbol() {
let pred_sym: LispSymbolRef = predicate.into();
let mut prop = get(pred_sym, Qchoice);
if prop.is_not_nil() {
if memq(newval, prop).is_nil() {
wrong_choice(prop, newval);
}
} else {
prop = get(pred_sym, Qrange);
if let Some((min, max)) = prop.into() {
let args = [min, newval, max];
if!newval.is_number() || leq(&args) {
wrong_range(min, max, newval);
}
} else if predicate.is_function() && call!(predicate, newval).is_nil() {
wrong_type!(predicate, newval);
}
}
}
if buf.is_null() {
buf = ThreadState::current_buffer_unchecked().as_mut();
}
*(*valcontents).u_buffer_objfwd.offset.apply_ptr_mut(buf) = newval;
}
Lisp_Fwd_Kboard_Obj => {
|
{
unsafe { do_symval_forwarding(fwd) }
}
|
conditional_block
|
mod.rs
|
mod configuration;
mod transition;
pub mod automaton;
pub mod from_str;
#[macro_use]
pub mod coarse_to_fine;
use crate::util::push_down::Pushdown;
use search::agenda::weighted::Weighted;
use std::vec::Vec;
pub use self::configuration::Configuration;
pub use self::transition::Transition;
/// Something we can `apply` to a configuration.
pub trait Instruction {
type Storage;
fn apply(&self, _: Self::Storage) -> Vec<Self::Storage>;
}
/// items of the transition system
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct Item<S, I, T, W>(
pub Configuration<S, T, W>,
pub Pushdown<Transition<I, T, W>>,
);
pub type VecItem<S, I, T, W> = (Configuration<S, T, W>, Vec<Transition<I, T, W>>);
|
}
}
/// Something that recognises words and output corresponding parses.
pub trait Recognisable<T, W> {
// TODO rename to Recogniser
type Parse;
fn recognise<'a>(&'a self, word: Vec<T>) -> Box<dyn Iterator<Item = Self::Parse> + 'a>;
fn recognise_beam_search<'a>(
&'a self,
beam: usize,
word: Vec<T>,
) -> Box<dyn Iterator<Item = Self::Parse> + 'a>;
}
|
impl<S, I: Instruction<Storage = S>, T, W: Clone> Weighted for Item<S, I, T, W> {
type Weight = W;
fn get_weight(&self) -> W {
self.0.weight.clone()
|
random_line_split
|
mod.rs
|
mod configuration;
mod transition;
pub mod automaton;
pub mod from_str;
#[macro_use]
pub mod coarse_to_fine;
use crate::util::push_down::Pushdown;
use search::agenda::weighted::Weighted;
use std::vec::Vec;
pub use self::configuration::Configuration;
pub use self::transition::Transition;
/// Something we can `apply` to a configuration.
pub trait Instruction {
type Storage;
fn apply(&self, _: Self::Storage) -> Vec<Self::Storage>;
}
/// items of the transition system
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct Item<S, I, T, W>(
pub Configuration<S, T, W>,
pub Pushdown<Transition<I, T, W>>,
);
pub type VecItem<S, I, T, W> = (Configuration<S, T, W>, Vec<Transition<I, T, W>>);
impl<S, I: Instruction<Storage = S>, T, W: Clone> Weighted for Item<S, I, T, W> {
type Weight = W;
fn
|
(&self) -> W {
self.0.weight.clone()
}
}
/// Something that recognises words and output corresponding parses.
pub trait Recognisable<T, W> {
// TODO rename to Recogniser
type Parse;
fn recognise<'a>(&'a self, word: Vec<T>) -> Box<dyn Iterator<Item = Self::Parse> + 'a>;
fn recognise_beam_search<'a>(
&'a self,
beam: usize,
word: Vec<T>,
) -> Box<dyn Iterator<Item = Self::Parse> + 'a>;
}
|
get_weight
|
identifier_name
|
parser.rs
|
extern crate thunderdome;
#[cfg(test)]
mod parser_tests {
use thunderdome::parser::*;
fn
|
(q: &str) -> Option<ParsedGraphQuery> {
let result = pre_parse(q);
assert!(result.is_ok());
result.ok()
}
#[test]
fn global_graph_query() {
validate("g.V()");
}
#[test]
fn vertex_query() {
validate("g.v(1)");
validate("g.v(1,2)");
validate("g.v(1, 2)");
}
#[test]
fn simple_step_test() {
validate("g.v(1).outV()");
let result = validate("g.v(1).outV().inE()").unwrap();
assert_eq!(result.steps.len(), 3);
let step1 = result.steps.get(1).unwrap();
assert_eq!(step1.name, "outV".to_string());
let step2 = result.steps.get(2).unwrap();
assert_eq!(step2.name, "inE".to_string());
}
#[test]
fn test_args() {
let result = validate("g.v(1).outV('edge').has('age', 30)").unwrap();
let step1 = result.steps.get(1).unwrap();
assert_eq!(step1.name, "outV".to_string());
// make sure the arg is edge. should be a string and unquoted
match step1.args.get(0).unwrap() {
&Arg::String(ref x) if *x == "edge".to_string() => {},
&Arg::String(ref x) => panic!("{}", x),
x => { panic!("wrong type") }
}
}
#[test]
fn test_args_numbers() {
// maybe a weird offset command?
validate("g.V().limit(10, 20)");
let result = validate("g.V().limit(10.0)").unwrap();
match result.steps.get(1).unwrap().args.get(0).unwrap() {
&Arg::Float(ref x) if *x == 10.0 => { },
_ => { panic!("OH NOES")}
}
}
}
|
validate
|
identifier_name
|
parser.rs
|
extern crate thunderdome;
#[cfg(test)]
|
mod parser_tests {
use thunderdome::parser::*;
fn validate(q: &str) -> Option<ParsedGraphQuery> {
let result = pre_parse(q);
assert!(result.is_ok());
result.ok()
}
#[test]
fn global_graph_query() {
validate("g.V()");
}
#[test]
fn vertex_query() {
validate("g.v(1)");
validate("g.v(1,2)");
validate("g.v(1, 2)");
}
#[test]
fn simple_step_test() {
validate("g.v(1).outV()");
let result = validate("g.v(1).outV().inE()").unwrap();
assert_eq!(result.steps.len(), 3);
let step1 = result.steps.get(1).unwrap();
assert_eq!(step1.name, "outV".to_string());
let step2 = result.steps.get(2).unwrap();
assert_eq!(step2.name, "inE".to_string());
}
#[test]
fn test_args() {
let result = validate("g.v(1).outV('edge').has('age', 30)").unwrap();
let step1 = result.steps.get(1).unwrap();
assert_eq!(step1.name, "outV".to_string());
// make sure the arg is edge. should be a string and unquoted
match step1.args.get(0).unwrap() {
&Arg::String(ref x) if *x == "edge".to_string() => {},
&Arg::String(ref x) => panic!("{}", x),
x => { panic!("wrong type") }
}
}
#[test]
fn test_args_numbers() {
// maybe a weird offset command?
validate("g.V().limit(10, 20)");
let result = validate("g.V().limit(10.0)").unwrap();
match result.steps.get(1).unwrap().args.get(0).unwrap() {
&Arg::Float(ref x) if *x == 10.0 => { },
_ => { panic!("OH NOES")}
}
}
}
|
random_line_split
|
|
parser.rs
|
extern crate thunderdome;
#[cfg(test)]
mod parser_tests {
use thunderdome::parser::*;
fn validate(q: &str) -> Option<ParsedGraphQuery> {
let result = pre_parse(q);
assert!(result.is_ok());
result.ok()
}
#[test]
fn global_graph_query() {
validate("g.V()");
}
#[test]
fn vertex_query() {
validate("g.v(1)");
validate("g.v(1,2)");
validate("g.v(1, 2)");
}
#[test]
fn simple_step_test() {
validate("g.v(1).outV()");
let result = validate("g.v(1).outV().inE()").unwrap();
assert_eq!(result.steps.len(), 3);
let step1 = result.steps.get(1).unwrap();
assert_eq!(step1.name, "outV".to_string());
let step2 = result.steps.get(2).unwrap();
assert_eq!(step2.name, "inE".to_string());
}
#[test]
fn test_args() {
let result = validate("g.v(1).outV('edge').has('age', 30)").unwrap();
let step1 = result.steps.get(1).unwrap();
assert_eq!(step1.name, "outV".to_string());
// make sure the arg is edge. should be a string and unquoted
match step1.args.get(0).unwrap() {
&Arg::String(ref x) if *x == "edge".to_string() => {},
&Arg::String(ref x) => panic!("{}", x),
x => { panic!("wrong type") }
}
}
#[test]
fn test_args_numbers() {
// maybe a weird offset command?
validate("g.V().limit(10, 20)");
let result = validate("g.V().limit(10.0)").unwrap();
match result.steps.get(1).unwrap().args.get(0).unwrap() {
&Arg::Float(ref x) if *x == 10.0 =>
|
,
_ => { panic!("OH NOES")}
}
}
}
|
{ }
|
conditional_block
|
parser.rs
|
extern crate thunderdome;
#[cfg(test)]
mod parser_tests {
use thunderdome::parser::*;
fn validate(q: &str) -> Option<ParsedGraphQuery> {
let result = pre_parse(q);
assert!(result.is_ok());
result.ok()
}
#[test]
fn global_graph_query() {
validate("g.V()");
}
#[test]
fn vertex_query() {
validate("g.v(1)");
validate("g.v(1,2)");
validate("g.v(1, 2)");
}
#[test]
fn simple_step_test() {
validate("g.v(1).outV()");
let result = validate("g.v(1).outV().inE()").unwrap();
assert_eq!(result.steps.len(), 3);
let step1 = result.steps.get(1).unwrap();
assert_eq!(step1.name, "outV".to_string());
let step2 = result.steps.get(2).unwrap();
assert_eq!(step2.name, "inE".to_string());
}
#[test]
fn test_args() {
let result = validate("g.v(1).outV('edge').has('age', 30)").unwrap();
let step1 = result.steps.get(1).unwrap();
assert_eq!(step1.name, "outV".to_string());
// make sure the arg is edge. should be a string and unquoted
match step1.args.get(0).unwrap() {
&Arg::String(ref x) if *x == "edge".to_string() => {},
&Arg::String(ref x) => panic!("{}", x),
x => { panic!("wrong type") }
}
}
#[test]
fn test_args_numbers()
|
}
|
{
// maybe a weird offset command?
validate("g.V().limit(10, 20)");
let result = validate("g.V().limit(10.0)").unwrap();
match result.steps.get(1).unwrap().args.get(0).unwrap() {
&Arg::Float(ref x) if *x == 10.0 => { },
_ => { panic!("OH NOES")}
}
}
|
identifier_body
|
imports.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use Indent;
use utils;
use syntax::codemap::{self, BytePos, Span};
use codemap::SpanUtils;
use lists::{write_list, itemize_list, ListItem, ListFormatting, SeparatorTactic, definitive_tactic};
use types::rewrite_path;
use rewrite::{Rewrite, RewriteContext};
use visitor::FmtVisitor;
use std::cmp::{self, Ordering};
use syntax::{ast, ptr};
fn path_of(a: &ast::ViewPath_) -> &ast::Path {
match a {
&ast::ViewPath_::ViewPathSimple(_, ref p) => p,
&ast::ViewPath_::ViewPathGlob(ref p) => p,
&ast::ViewPath_::ViewPathList(ref p, _) => p,
}
}
fn compare_path_segments(a: &ast::PathSegment, b: &ast::PathSegment) -> Ordering {
a.identifier.name.as_str().cmp(&b.identifier.name.as_str())
}
fn compare_paths(a: &ast::Path, b: &ast::Path) -> Ordering {
for segment in a.segments.iter().zip(b.segments.iter()) {
let ord = compare_path_segments(segment.0, segment.1);
if ord!= Ordering::Equal {
return ord;
}
}
a.segments.len().cmp(&b.segments.len())
}
fn compare_path_list_items(a: &ast::PathListItem, b: &ast::PathListItem) -> Ordering {
let name_ordering = match a.node.name() {
Some(a_name) => {
match b.node.name() {
Some(b_name) => a_name.name.as_str().cmp(&b_name.name.as_str()),
None => Ordering::Greater,
}
}
None => {
match b.node.name() {
Some(_) => Ordering::Less,
None => Ordering::Equal,
}
}
};
if name_ordering == Ordering::Equal {
match a.node.rename() {
Some(a_rename) => {
match b.node.rename() {
Some(b_rename) => a_rename.name.as_str().cmp(&b_rename.name.as_str()),
None => Ordering::Greater,
}
}
None => {
match b.node.name() {
Some(_) => Ordering::Less,
None => Ordering::Equal,
}
}
}
} else {
name_ordering
}
}
fn compare_path_list_item_lists(a_items: &Vec<ast::PathListItem>,
b_items: &Vec<ast::PathListItem>)
-> Ordering {
let mut a = a_items.clone();
let mut b = b_items.clone();
a.sort_by(|a, b| compare_path_list_items(a, b));
b.sort_by(|a, b| compare_path_list_items(a, b));
for comparison_pair in a.iter().zip(b.iter()) {
let ord = compare_path_list_items(comparison_pair.0, comparison_pair.1);
if ord!= Ordering::Equal {
return ord;
}
}
a.len().cmp(&b.len())
}
fn compare_view_path_types(a: &ast::ViewPath_, b: &ast::ViewPath_) -> Ordering {
use syntax::ast::ViewPath_::*;
match (a, b) {
(&ViewPathSimple(..), &ViewPathSimple(..)) => Ordering::Equal,
(&ViewPathSimple(..), _) => Ordering::Less,
(&ViewPathGlob(_), &ViewPathSimple(..)) => Ordering::Greater,
(&ViewPathGlob(_), &ViewPathGlob(_)) => Ordering::Equal,
(&ViewPathGlob(_), &ViewPathList(..)) => Ordering::Less,
(&ViewPathList(_, ref a_items), &ViewPathList(_, ref b_items)) => {
compare_path_list_item_lists(a_items, b_items)
}
(&ViewPathList(..), _) => Ordering::Greater,
}
}
fn compare_view_paths(a: &ast::ViewPath_, b: &ast::ViewPath_) -> Ordering {
match compare_paths(path_of(a), path_of(b)) {
Ordering::Equal => compare_view_path_types(a, b),
cmp => cmp,
}
}
fn compare_use_items(a: &ast::Item, b: &ast::Item) -> Option<Ordering> {
match (&a.node, &b.node) {
(&ast::ItemKind::Use(ref a_vp), &ast::ItemKind::Use(ref b_vp)) => {
Some(compare_view_paths(&a_vp.node, &b_vp.node))
}
_ => None,
}
}
// TODO (some day) remove unused imports, expand globs, compress many single
// imports into a list import.
impl Rewrite for ast::ViewPath {
// Returns an empty string when the ViewPath is empty (like foo::bar::{})
fn rewrite(&self, context: &RewriteContext, width: usize, offset: Indent) -> Option<String> {
match self.node {
ast::ViewPath_::ViewPathList(_, ref path_list) if path_list.is_empty() => {
Some(String::new())
}
ast::ViewPath_::ViewPathList(ref path, ref path_list) => {
rewrite_use_list(width, offset, path, path_list, self.span, context)
}
ast::ViewPath_::ViewPathGlob(_) => {
// FIXME convert to list?
None
}
ast::ViewPath_::ViewPathSimple(ident, ref path) => {
let ident_str = ident.to_string();
// 4 = " as ".len()
let budget = try_opt!(width.checked_sub(ident_str.len() + 4));
let path_str = try_opt!(rewrite_path(context, false, None, path, budget, offset));
Some(if path.segments.last().unwrap().identifier == ident
|
else {
format!("{} as {}", path_str, ident_str)
})
}
}
}
}
impl<'a> FmtVisitor<'a> {
pub fn format_imports(&mut self, use_items: &[ptr::P<ast::Item>]) {
// Find the location immediately before the first use item in the run. This must not lie
// before the current `self.last_pos`
let pos_before_first_use_item = use_items.first()
.map(|p_i| cmp::max(self.last_pos, p_i.span.lo))
.unwrap_or(self.last_pos);
// Construct a list of pairs, each containing a `use` item and the start of span before
// that `use` item.
let mut last_pos_of_prev_use_item = pos_before_first_use_item;
let mut ordered_use_items = use_items.iter()
.map(|p_i| {
let new_item = (&*p_i, last_pos_of_prev_use_item);
last_pos_of_prev_use_item = p_i.span.hi;
new_item
})
.collect::<Vec<_>>();
let pos_after_last_use_item = last_pos_of_prev_use_item;
// Order the imports by view-path & other import path properties
ordered_use_items.sort_by(|a, b| compare_use_items(a.0, b.0).unwrap());
// First, output the span before the first import
let prev_span_str = self.snippet(codemap::mk_sp(self.last_pos, pos_before_first_use_item));
// Look for purely trailing space at the start of the prefix snippet before a linefeed, or
// a prefix that's entirely horizontal whitespace.
let prefix_span_start = match prev_span_str.find('\n') {
Some(offset) if prev_span_str[..offset].trim().is_empty() => {
self.last_pos + BytePos(offset as u32)
}
None if prev_span_str.trim().is_empty() => pos_before_first_use_item,
_ => self.last_pos,
};
// Look for indent (the line part preceding the use is all whitespace) and excise that
// from the prefix
let span_end = match prev_span_str.rfind('\n') {
Some(offset) if prev_span_str[offset..].trim().is_empty() => {
self.last_pos + BytePos(offset as u32)
}
_ => pos_before_first_use_item,
};
self.last_pos = prefix_span_start;
self.format_missing(span_end);
for ordered in ordered_use_items {
// Fake out the formatter by setting `self.last_pos` to the appropriate location before
// each item before visiting it.
self.last_pos = ordered.1;
self.visit_item(&ordered.0);
}
self.last_pos = pos_after_last_use_item;
}
pub fn format_import(&mut self, vis: &ast::Visibility, vp: &ast::ViewPath, span: Span) {
let vis = utils::format_visibility(vis);
let mut offset = self.block_indent;
offset.alignment += vis.len() + "use ".len();
// 1 = ";"
match vp.rewrite(&self.get_context(),
self.config.max_width - offset.width() - 1,
offset) {
Some(ref s) if s.is_empty() => {
// Format up to last newline
let prev_span = codemap::mk_sp(self.last_pos, source!(self, span).lo);
let span_end = match self.snippet(prev_span).rfind('\n') {
Some(offset) => self.last_pos + BytePos(offset as u32),
None => source!(self, span).lo,
};
self.format_missing(span_end);
self.last_pos = source!(self, span).hi;
}
Some(ref s) => {
let s = format!("{}use {};", vis, s);
self.format_missing_with_indent(source!(self, span).lo);
self.buffer.push_str(&s);
self.last_pos = source!(self, span).hi;
}
None => {
self.format_missing_with_indent(source!(self, span).lo);
self.format_missing(source!(self, span).hi);
}
}
}
}
fn rewrite_single_use_list(path_str: Option<String>, vpi: &ast::PathListItem) -> String {
let path_item_str = if let ast::PathListItemKind::Ident { name,.. } = vpi.node {
// A name.
match path_str {
Some(path_str) => format!("{}::{}", path_str, name),
None => name.to_string(),
}
} else {
// `self`.
match path_str {
Some(path_str) => path_str,
// This catches the import: use {self}, which is a compiler error, so we just
// leave it alone.
None => "{self}".to_owned(),
}
};
append_alias(path_item_str, vpi)
}
fn rewrite_path_item(vpi: &&ast::PathListItem) -> Option<String> {
let path_item_str = match vpi.node {
ast::PathListItemKind::Ident { name,.. } => name.to_string(),
ast::PathListItemKind::Mod {.. } => "self".to_owned(),
};
Some(append_alias(path_item_str, vpi))
}
fn append_alias(path_item_str: String, vpi: &ast::PathListItem) -> String {
match vpi.node {
ast::PathListItemKind::Ident { rename: Some(rename),.. } |
ast::PathListItemKind::Mod { rename: Some(rename),.. } => {
format!("{} as {}", path_item_str, rename)
}
_ => path_item_str,
}
}
// Pretty prints a multi-item import.
// Assumes that path_list.len() > 0.
pub fn rewrite_use_list(width: usize,
offset: Indent,
path: &ast::Path,
path_list: &[ast::PathListItem],
span: Span,
context: &RewriteContext)
-> Option<String> {
// Returns a different option to distinguish `::foo` and `foo`
let opt_path_str = if!path.to_string().is_empty() {
Some(path.to_string())
} else if path.global {
// path is absolute, we return an empty String to avoid a double `::`
Some(String::new())
} else {
None
};
match path_list.len() {
0 => unreachable!(),
1 => return Some(rewrite_single_use_list(opt_path_str, &path_list[0])),
_ => (),
}
// 2 = ::
let path_separation_w = if opt_path_str.is_some() { 2 } else { 0 };
// 1 = {
let supp_indent = path.to_string().len() + path_separation_w + 1;
// 1 = }
let remaining_width = width.checked_sub(supp_indent + 1).unwrap_or(0);
let mut items = {
// Dummy value, see explanation below.
let mut items = vec![ListItem::from_str("")];
let iter = itemize_list(context.codemap,
path_list.iter(),
"}",
|vpi| vpi.span.lo,
|vpi| vpi.span.hi,
rewrite_path_item,
context.codemap.span_after(span, "{"),
span.hi);
items.extend(iter);
items
};
// We prefixed the item list with a dummy value so that we can
// potentially move "self" to the front of the vector without touching
// the rest of the items.
let has_self = move_self_to_front(&mut items);
let first_index = if has_self { 0 } else { 1 };
if context.config.reorder_imported_names {
items[1..].sort_by(|a, b| a.item.cmp(&b.item));
}
let tactic = definitive_tactic(&items[first_index..],
::lists::ListTactic::Mixed,
remaining_width);
let fmt = ListFormatting {
tactic: tactic,
separator: ",",
trailing_separator: SeparatorTactic::Never,
indent: offset + supp_indent,
// FIXME This is too conservative, and will not use all width
// available
// (loose 1 column (";"))
width: remaining_width,
ends_with_newline: false,
config: context.config,
};
let list_str = try_opt!(write_list(&items[first_index..], &fmt));
Some(match opt_path_str {
Some(opt_path_str) => format!("{}::{{{}}}", opt_path_str, list_str),
None => format!("{{{}}}", list_str),
})
}
// Returns true when self item was found.
fn move_self_to_front(items: &mut Vec<ListItem>) -> bool {
match items.iter().position(|item| item.item.as_ref().map(|x| &x[..]) == Some("self")) {
Some(pos) => {
items[0] = items.remove(pos);
true
}
None => false,
}
}
|
{
path_str
}
|
conditional_block
|
imports.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use Indent;
use utils;
use syntax::codemap::{self, BytePos, Span};
use codemap::SpanUtils;
use lists::{write_list, itemize_list, ListItem, ListFormatting, SeparatorTactic, definitive_tactic};
use types::rewrite_path;
use rewrite::{Rewrite, RewriteContext};
use visitor::FmtVisitor;
use std::cmp::{self, Ordering};
use syntax::{ast, ptr};
fn path_of(a: &ast::ViewPath_) -> &ast::Path {
match a {
&ast::ViewPath_::ViewPathSimple(_, ref p) => p,
&ast::ViewPath_::ViewPathGlob(ref p) => p,
&ast::ViewPath_::ViewPathList(ref p, _) => p,
}
}
fn compare_path_segments(a: &ast::PathSegment, b: &ast::PathSegment) -> Ordering {
a.identifier.name.as_str().cmp(&b.identifier.name.as_str())
}
fn compare_paths(a: &ast::Path, b: &ast::Path) -> Ordering {
for segment in a.segments.iter().zip(b.segments.iter()) {
let ord = compare_path_segments(segment.0, segment.1);
if ord!= Ordering::Equal {
return ord;
}
}
a.segments.len().cmp(&b.segments.len())
}
fn compare_path_list_items(a: &ast::PathListItem, b: &ast::PathListItem) -> Ordering {
let name_ordering = match a.node.name() {
Some(a_name) => {
match b.node.name() {
Some(b_name) => a_name.name.as_str().cmp(&b_name.name.as_str()),
None => Ordering::Greater,
}
}
None => {
match b.node.name() {
Some(_) => Ordering::Less,
None => Ordering::Equal,
}
}
};
if name_ordering == Ordering::Equal {
match a.node.rename() {
Some(a_rename) => {
match b.node.rename() {
Some(b_rename) => a_rename.name.as_str().cmp(&b_rename.name.as_str()),
None => Ordering::Greater,
}
}
None => {
match b.node.name() {
Some(_) => Ordering::Less,
None => Ordering::Equal,
}
}
}
} else {
name_ordering
}
}
fn
|
(a_items: &Vec<ast::PathListItem>,
b_items: &Vec<ast::PathListItem>)
-> Ordering {
let mut a = a_items.clone();
let mut b = b_items.clone();
a.sort_by(|a, b| compare_path_list_items(a, b));
b.sort_by(|a, b| compare_path_list_items(a, b));
for comparison_pair in a.iter().zip(b.iter()) {
let ord = compare_path_list_items(comparison_pair.0, comparison_pair.1);
if ord!= Ordering::Equal {
return ord;
}
}
a.len().cmp(&b.len())
}
fn compare_view_path_types(a: &ast::ViewPath_, b: &ast::ViewPath_) -> Ordering {
use syntax::ast::ViewPath_::*;
match (a, b) {
(&ViewPathSimple(..), &ViewPathSimple(..)) => Ordering::Equal,
(&ViewPathSimple(..), _) => Ordering::Less,
(&ViewPathGlob(_), &ViewPathSimple(..)) => Ordering::Greater,
(&ViewPathGlob(_), &ViewPathGlob(_)) => Ordering::Equal,
(&ViewPathGlob(_), &ViewPathList(..)) => Ordering::Less,
(&ViewPathList(_, ref a_items), &ViewPathList(_, ref b_items)) => {
compare_path_list_item_lists(a_items, b_items)
}
(&ViewPathList(..), _) => Ordering::Greater,
}
}
fn compare_view_paths(a: &ast::ViewPath_, b: &ast::ViewPath_) -> Ordering {
match compare_paths(path_of(a), path_of(b)) {
Ordering::Equal => compare_view_path_types(a, b),
cmp => cmp,
}
}
fn compare_use_items(a: &ast::Item, b: &ast::Item) -> Option<Ordering> {
match (&a.node, &b.node) {
(&ast::ItemKind::Use(ref a_vp), &ast::ItemKind::Use(ref b_vp)) => {
Some(compare_view_paths(&a_vp.node, &b_vp.node))
}
_ => None,
}
}
// TODO (some day) remove unused imports, expand globs, compress many single
// imports into a list import.
impl Rewrite for ast::ViewPath {
// Returns an empty string when the ViewPath is empty (like foo::bar::{})
fn rewrite(&self, context: &RewriteContext, width: usize, offset: Indent) -> Option<String> {
match self.node {
ast::ViewPath_::ViewPathList(_, ref path_list) if path_list.is_empty() => {
Some(String::new())
}
ast::ViewPath_::ViewPathList(ref path, ref path_list) => {
rewrite_use_list(width, offset, path, path_list, self.span, context)
}
ast::ViewPath_::ViewPathGlob(_) => {
// FIXME convert to list?
None
}
ast::ViewPath_::ViewPathSimple(ident, ref path) => {
let ident_str = ident.to_string();
// 4 = " as ".len()
let budget = try_opt!(width.checked_sub(ident_str.len() + 4));
let path_str = try_opt!(rewrite_path(context, false, None, path, budget, offset));
Some(if path.segments.last().unwrap().identifier == ident {
path_str
} else {
format!("{} as {}", path_str, ident_str)
})
}
}
}
}
impl<'a> FmtVisitor<'a> {
pub fn format_imports(&mut self, use_items: &[ptr::P<ast::Item>]) {
// Find the location immediately before the first use item in the run. This must not lie
// before the current `self.last_pos`
let pos_before_first_use_item = use_items.first()
.map(|p_i| cmp::max(self.last_pos, p_i.span.lo))
.unwrap_or(self.last_pos);
// Construct a list of pairs, each containing a `use` item and the start of span before
// that `use` item.
let mut last_pos_of_prev_use_item = pos_before_first_use_item;
let mut ordered_use_items = use_items.iter()
.map(|p_i| {
let new_item = (&*p_i, last_pos_of_prev_use_item);
last_pos_of_prev_use_item = p_i.span.hi;
new_item
})
.collect::<Vec<_>>();
let pos_after_last_use_item = last_pos_of_prev_use_item;
// Order the imports by view-path & other import path properties
ordered_use_items.sort_by(|a, b| compare_use_items(a.0, b.0).unwrap());
// First, output the span before the first import
let prev_span_str = self.snippet(codemap::mk_sp(self.last_pos, pos_before_first_use_item));
// Look for purely trailing space at the start of the prefix snippet before a linefeed, or
// a prefix that's entirely horizontal whitespace.
let prefix_span_start = match prev_span_str.find('\n') {
Some(offset) if prev_span_str[..offset].trim().is_empty() => {
self.last_pos + BytePos(offset as u32)
}
None if prev_span_str.trim().is_empty() => pos_before_first_use_item,
_ => self.last_pos,
};
// Look for indent (the line part preceding the use is all whitespace) and excise that
// from the prefix
let span_end = match prev_span_str.rfind('\n') {
Some(offset) if prev_span_str[offset..].trim().is_empty() => {
self.last_pos + BytePos(offset as u32)
}
_ => pos_before_first_use_item,
};
self.last_pos = prefix_span_start;
self.format_missing(span_end);
for ordered in ordered_use_items {
// Fake out the formatter by setting `self.last_pos` to the appropriate location before
// each item before visiting it.
self.last_pos = ordered.1;
self.visit_item(&ordered.0);
}
self.last_pos = pos_after_last_use_item;
}
pub fn format_import(&mut self, vis: &ast::Visibility, vp: &ast::ViewPath, span: Span) {
let vis = utils::format_visibility(vis);
let mut offset = self.block_indent;
offset.alignment += vis.len() + "use ".len();
// 1 = ";"
match vp.rewrite(&self.get_context(),
self.config.max_width - offset.width() - 1,
offset) {
Some(ref s) if s.is_empty() => {
// Format up to last newline
let prev_span = codemap::mk_sp(self.last_pos, source!(self, span).lo);
let span_end = match self.snippet(prev_span).rfind('\n') {
Some(offset) => self.last_pos + BytePos(offset as u32),
None => source!(self, span).lo,
};
self.format_missing(span_end);
self.last_pos = source!(self, span).hi;
}
Some(ref s) => {
let s = format!("{}use {};", vis, s);
self.format_missing_with_indent(source!(self, span).lo);
self.buffer.push_str(&s);
self.last_pos = source!(self, span).hi;
}
None => {
self.format_missing_with_indent(source!(self, span).lo);
self.format_missing(source!(self, span).hi);
}
}
}
}
fn rewrite_single_use_list(path_str: Option<String>, vpi: &ast::PathListItem) -> String {
let path_item_str = if let ast::PathListItemKind::Ident { name,.. } = vpi.node {
// A name.
match path_str {
Some(path_str) => format!("{}::{}", path_str, name),
None => name.to_string(),
}
} else {
// `self`.
match path_str {
Some(path_str) => path_str,
// This catches the import: use {self}, which is a compiler error, so we just
// leave it alone.
None => "{self}".to_owned(),
}
};
append_alias(path_item_str, vpi)
}
fn rewrite_path_item(vpi: &&ast::PathListItem) -> Option<String> {
let path_item_str = match vpi.node {
ast::PathListItemKind::Ident { name,.. } => name.to_string(),
ast::PathListItemKind::Mod {.. } => "self".to_owned(),
};
Some(append_alias(path_item_str, vpi))
}
fn append_alias(path_item_str: String, vpi: &ast::PathListItem) -> String {
match vpi.node {
ast::PathListItemKind::Ident { rename: Some(rename),.. } |
ast::PathListItemKind::Mod { rename: Some(rename),.. } => {
format!("{} as {}", path_item_str, rename)
}
_ => path_item_str,
}
}
// Pretty prints a multi-item import.
// Assumes that path_list.len() > 0.
pub fn rewrite_use_list(width: usize,
offset: Indent,
path: &ast::Path,
path_list: &[ast::PathListItem],
span: Span,
context: &RewriteContext)
-> Option<String> {
// Returns a different option to distinguish `::foo` and `foo`
let opt_path_str = if!path.to_string().is_empty() {
Some(path.to_string())
} else if path.global {
// path is absolute, we return an empty String to avoid a double `::`
Some(String::new())
} else {
None
};
match path_list.len() {
0 => unreachable!(),
1 => return Some(rewrite_single_use_list(opt_path_str, &path_list[0])),
_ => (),
}
// 2 = ::
let path_separation_w = if opt_path_str.is_some() { 2 } else { 0 };
// 1 = {
let supp_indent = path.to_string().len() + path_separation_w + 1;
// 1 = }
let remaining_width = width.checked_sub(supp_indent + 1).unwrap_or(0);
let mut items = {
// Dummy value, see explanation below.
let mut items = vec![ListItem::from_str("")];
let iter = itemize_list(context.codemap,
path_list.iter(),
"}",
|vpi| vpi.span.lo,
|vpi| vpi.span.hi,
rewrite_path_item,
context.codemap.span_after(span, "{"),
span.hi);
items.extend(iter);
items
};
// We prefixed the item list with a dummy value so that we can
// potentially move "self" to the front of the vector without touching
// the rest of the items.
let has_self = move_self_to_front(&mut items);
let first_index = if has_self { 0 } else { 1 };
if context.config.reorder_imported_names {
items[1..].sort_by(|a, b| a.item.cmp(&b.item));
}
let tactic = definitive_tactic(&items[first_index..],
::lists::ListTactic::Mixed,
remaining_width);
let fmt = ListFormatting {
tactic: tactic,
separator: ",",
trailing_separator: SeparatorTactic::Never,
indent: offset + supp_indent,
// FIXME This is too conservative, and will not use all width
// available
// (loose 1 column (";"))
width: remaining_width,
ends_with_newline: false,
config: context.config,
};
let list_str = try_opt!(write_list(&items[first_index..], &fmt));
Some(match opt_path_str {
Some(opt_path_str) => format!("{}::{{{}}}", opt_path_str, list_str),
None => format!("{{{}}}", list_str),
})
}
// Returns true when self item was found.
fn move_self_to_front(items: &mut Vec<ListItem>) -> bool {
match items.iter().position(|item| item.item.as_ref().map(|x| &x[..]) == Some("self")) {
Some(pos) => {
items[0] = items.remove(pos);
true
}
None => false,
}
}
|
compare_path_list_item_lists
|
identifier_name
|
imports.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use Indent;
use utils;
use syntax::codemap::{self, BytePos, Span};
use codemap::SpanUtils;
use lists::{write_list, itemize_list, ListItem, ListFormatting, SeparatorTactic, definitive_tactic};
use types::rewrite_path;
use rewrite::{Rewrite, RewriteContext};
use visitor::FmtVisitor;
use std::cmp::{self, Ordering};
use syntax::{ast, ptr};
fn path_of(a: &ast::ViewPath_) -> &ast::Path {
match a {
&ast::ViewPath_::ViewPathSimple(_, ref p) => p,
&ast::ViewPath_::ViewPathGlob(ref p) => p,
&ast::ViewPath_::ViewPathList(ref p, _) => p,
}
}
fn compare_path_segments(a: &ast::PathSegment, b: &ast::PathSegment) -> Ordering {
a.identifier.name.as_str().cmp(&b.identifier.name.as_str())
}
fn compare_paths(a: &ast::Path, b: &ast::Path) -> Ordering {
for segment in a.segments.iter().zip(b.segments.iter()) {
let ord = compare_path_segments(segment.0, segment.1);
if ord!= Ordering::Equal {
return ord;
}
}
a.segments.len().cmp(&b.segments.len())
}
fn compare_path_list_items(a: &ast::PathListItem, b: &ast::PathListItem) -> Ordering {
let name_ordering = match a.node.name() {
Some(a_name) => {
match b.node.name() {
Some(b_name) => a_name.name.as_str().cmp(&b_name.name.as_str()),
None => Ordering::Greater,
}
}
None => {
match b.node.name() {
Some(_) => Ordering::Less,
None => Ordering::Equal,
}
}
};
if name_ordering == Ordering::Equal {
match a.node.rename() {
Some(a_rename) => {
match b.node.rename() {
Some(b_rename) => a_rename.name.as_str().cmp(&b_rename.name.as_str()),
None => Ordering::Greater,
}
}
None => {
match b.node.name() {
Some(_) => Ordering::Less,
None => Ordering::Equal,
}
}
}
} else {
name_ordering
}
}
fn compare_path_list_item_lists(a_items: &Vec<ast::PathListItem>,
b_items: &Vec<ast::PathListItem>)
-> Ordering {
let mut a = a_items.clone();
let mut b = b_items.clone();
a.sort_by(|a, b| compare_path_list_items(a, b));
b.sort_by(|a, b| compare_path_list_items(a, b));
for comparison_pair in a.iter().zip(b.iter()) {
let ord = compare_path_list_items(comparison_pair.0, comparison_pair.1);
if ord!= Ordering::Equal {
return ord;
}
}
a.len().cmp(&b.len())
}
fn compare_view_path_types(a: &ast::ViewPath_, b: &ast::ViewPath_) -> Ordering {
use syntax::ast::ViewPath_::*;
match (a, b) {
(&ViewPathSimple(..), &ViewPathSimple(..)) => Ordering::Equal,
(&ViewPathSimple(..), _) => Ordering::Less,
(&ViewPathGlob(_), &ViewPathSimple(..)) => Ordering::Greater,
(&ViewPathGlob(_), &ViewPathGlob(_)) => Ordering::Equal,
(&ViewPathGlob(_), &ViewPathList(..)) => Ordering::Less,
(&ViewPathList(_, ref a_items), &ViewPathList(_, ref b_items)) => {
compare_path_list_item_lists(a_items, b_items)
}
(&ViewPathList(..), _) => Ordering::Greater,
}
}
fn compare_view_paths(a: &ast::ViewPath_, b: &ast::ViewPath_) -> Ordering {
match compare_paths(path_of(a), path_of(b)) {
Ordering::Equal => compare_view_path_types(a, b),
cmp => cmp,
}
}
fn compare_use_items(a: &ast::Item, b: &ast::Item) -> Option<Ordering> {
match (&a.node, &b.node) {
(&ast::ItemKind::Use(ref a_vp), &ast::ItemKind::Use(ref b_vp)) => {
Some(compare_view_paths(&a_vp.node, &b_vp.node))
}
_ => None,
}
}
// TODO (some day) remove unused imports, expand globs, compress many single
// imports into a list import.
impl Rewrite for ast::ViewPath {
// Returns an empty string when the ViewPath is empty (like foo::bar::{})
fn rewrite(&self, context: &RewriteContext, width: usize, offset: Indent) -> Option<String> {
match self.node {
ast::ViewPath_::ViewPathList(_, ref path_list) if path_list.is_empty() => {
Some(String::new())
}
ast::ViewPath_::ViewPathList(ref path, ref path_list) => {
rewrite_use_list(width, offset, path, path_list, self.span, context)
}
ast::ViewPath_::ViewPathGlob(_) => {
// FIXME convert to list?
None
}
ast::ViewPath_::ViewPathSimple(ident, ref path) => {
let ident_str = ident.to_string();
// 4 = " as ".len()
let budget = try_opt!(width.checked_sub(ident_str.len() + 4));
let path_str = try_opt!(rewrite_path(context, false, None, path, budget, offset));
Some(if path.segments.last().unwrap().identifier == ident {
path_str
} else {
format!("{} as {}", path_str, ident_str)
})
}
}
}
}
impl<'a> FmtVisitor<'a> {
pub fn format_imports(&mut self, use_items: &[ptr::P<ast::Item>]) {
// Find the location immediately before the first use item in the run. This must not lie
// before the current `self.last_pos`
let pos_before_first_use_item = use_items.first()
.map(|p_i| cmp::max(self.last_pos, p_i.span.lo))
.unwrap_or(self.last_pos);
// Construct a list of pairs, each containing a `use` item and the start of span before
// that `use` item.
let mut last_pos_of_prev_use_item = pos_before_first_use_item;
let mut ordered_use_items = use_items.iter()
.map(|p_i| {
let new_item = (&*p_i, last_pos_of_prev_use_item);
last_pos_of_prev_use_item = p_i.span.hi;
new_item
})
.collect::<Vec<_>>();
let pos_after_last_use_item = last_pos_of_prev_use_item;
// Order the imports by view-path & other import path properties
ordered_use_items.sort_by(|a, b| compare_use_items(a.0, b.0).unwrap());
// First, output the span before the first import
let prev_span_str = self.snippet(codemap::mk_sp(self.last_pos, pos_before_first_use_item));
// Look for purely trailing space at the start of the prefix snippet before a linefeed, or
// a prefix that's entirely horizontal whitespace.
let prefix_span_start = match prev_span_str.find('\n') {
Some(offset) if prev_span_str[..offset].trim().is_empty() => {
self.last_pos + BytePos(offset as u32)
}
None if prev_span_str.trim().is_empty() => pos_before_first_use_item,
_ => self.last_pos,
};
// Look for indent (the line part preceding the use is all whitespace) and excise that
// from the prefix
let span_end = match prev_span_str.rfind('\n') {
Some(offset) if prev_span_str[offset..].trim().is_empty() => {
self.last_pos + BytePos(offset as u32)
}
_ => pos_before_first_use_item,
};
self.last_pos = prefix_span_start;
self.format_missing(span_end);
for ordered in ordered_use_items {
// Fake out the formatter by setting `self.last_pos` to the appropriate location before
// each item before visiting it.
self.last_pos = ordered.1;
self.visit_item(&ordered.0);
}
self.last_pos = pos_after_last_use_item;
}
pub fn format_import(&mut self, vis: &ast::Visibility, vp: &ast::ViewPath, span: Span) {
let vis = utils::format_visibility(vis);
let mut offset = self.block_indent;
offset.alignment += vis.len() + "use ".len();
// 1 = ";"
match vp.rewrite(&self.get_context(),
self.config.max_width - offset.width() - 1,
offset) {
Some(ref s) if s.is_empty() => {
// Format up to last newline
let prev_span = codemap::mk_sp(self.last_pos, source!(self, span).lo);
let span_end = match self.snippet(prev_span).rfind('\n') {
Some(offset) => self.last_pos + BytePos(offset as u32),
None => source!(self, span).lo,
};
self.format_missing(span_end);
self.last_pos = source!(self, span).hi;
}
Some(ref s) => {
let s = format!("{}use {};", vis, s);
self.format_missing_with_indent(source!(self, span).lo);
self.buffer.push_str(&s);
self.last_pos = source!(self, span).hi;
}
None => {
self.format_missing_with_indent(source!(self, span).lo);
self.format_missing(source!(self, span).hi);
}
}
}
}
fn rewrite_single_use_list(path_str: Option<String>, vpi: &ast::PathListItem) -> String {
let path_item_str = if let ast::PathListItemKind::Ident { name,.. } = vpi.node {
// A name.
match path_str {
Some(path_str) => format!("{}::{}", path_str, name),
None => name.to_string(),
}
} else {
// `self`.
match path_str {
Some(path_str) => path_str,
// This catches the import: use {self}, which is a compiler error, so we just
// leave it alone.
None => "{self}".to_owned(),
}
};
append_alias(path_item_str, vpi)
}
fn rewrite_path_item(vpi: &&ast::PathListItem) -> Option<String>
|
fn append_alias(path_item_str: String, vpi: &ast::PathListItem) -> String {
match vpi.node {
ast::PathListItemKind::Ident { rename: Some(rename),.. } |
ast::PathListItemKind::Mod { rename: Some(rename),.. } => {
format!("{} as {}", path_item_str, rename)
}
_ => path_item_str,
}
}
// Pretty prints a multi-item import.
// Assumes that path_list.len() > 0.
pub fn rewrite_use_list(width: usize,
offset: Indent,
path: &ast::Path,
path_list: &[ast::PathListItem],
span: Span,
context: &RewriteContext)
-> Option<String> {
// Returns a different option to distinguish `::foo` and `foo`
let opt_path_str = if!path.to_string().is_empty() {
Some(path.to_string())
} else if path.global {
// path is absolute, we return an empty String to avoid a double `::`
Some(String::new())
} else {
None
};
match path_list.len() {
0 => unreachable!(),
1 => return Some(rewrite_single_use_list(opt_path_str, &path_list[0])),
_ => (),
}
// 2 = ::
let path_separation_w = if opt_path_str.is_some() { 2 } else { 0 };
// 1 = {
let supp_indent = path.to_string().len() + path_separation_w + 1;
// 1 = }
let remaining_width = width.checked_sub(supp_indent + 1).unwrap_or(0);
let mut items = {
// Dummy value, see explanation below.
let mut items = vec![ListItem::from_str("")];
let iter = itemize_list(context.codemap,
path_list.iter(),
"}",
|vpi| vpi.span.lo,
|vpi| vpi.span.hi,
rewrite_path_item,
context.codemap.span_after(span, "{"),
span.hi);
items.extend(iter);
items
};
// We prefixed the item list with a dummy value so that we can
// potentially move "self" to the front of the vector without touching
// the rest of the items.
let has_self = move_self_to_front(&mut items);
let first_index = if has_self { 0 } else { 1 };
if context.config.reorder_imported_names {
items[1..].sort_by(|a, b| a.item.cmp(&b.item));
}
let tactic = definitive_tactic(&items[first_index..],
::lists::ListTactic::Mixed,
remaining_width);
let fmt = ListFormatting {
tactic: tactic,
separator: ",",
trailing_separator: SeparatorTactic::Never,
indent: offset + supp_indent,
// FIXME This is too conservative, and will not use all width
// available
// (loose 1 column (";"))
width: remaining_width,
ends_with_newline: false,
config: context.config,
};
let list_str = try_opt!(write_list(&items[first_index..], &fmt));
Some(match opt_path_str {
Some(opt_path_str) => format!("{}::{{{}}}", opt_path_str, list_str),
None => format!("{{{}}}", list_str),
})
}
// Returns true when self item was found.
fn move_self_to_front(items: &mut Vec<ListItem>) -> bool {
match items.iter().position(|item| item.item.as_ref().map(|x| &x[..]) == Some("self")) {
Some(pos) => {
items[0] = items.remove(pos);
true
}
None => false,
}
}
|
{
let path_item_str = match vpi.node {
ast::PathListItemKind::Ident { name, .. } => name.to_string(),
ast::PathListItemKind::Mod { .. } => "self".to_owned(),
};
Some(append_alias(path_item_str, vpi))
}
|
identifier_body
|
imports.rs
|
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use Indent;
use utils;
use syntax::codemap::{self, BytePos, Span};
use codemap::SpanUtils;
use lists::{write_list, itemize_list, ListItem, ListFormatting, SeparatorTactic, definitive_tactic};
use types::rewrite_path;
use rewrite::{Rewrite, RewriteContext};
use visitor::FmtVisitor;
use std::cmp::{self, Ordering};
use syntax::{ast, ptr};
fn path_of(a: &ast::ViewPath_) -> &ast::Path {
match a {
&ast::ViewPath_::ViewPathSimple(_, ref p) => p,
&ast::ViewPath_::ViewPathGlob(ref p) => p,
&ast::ViewPath_::ViewPathList(ref p, _) => p,
}
}
fn compare_path_segments(a: &ast::PathSegment, b: &ast::PathSegment) -> Ordering {
a.identifier.name.as_str().cmp(&b.identifier.name.as_str())
}
fn compare_paths(a: &ast::Path, b: &ast::Path) -> Ordering {
for segment in a.segments.iter().zip(b.segments.iter()) {
let ord = compare_path_segments(segment.0, segment.1);
if ord!= Ordering::Equal {
return ord;
}
}
a.segments.len().cmp(&b.segments.len())
}
fn compare_path_list_items(a: &ast::PathListItem, b: &ast::PathListItem) -> Ordering {
let name_ordering = match a.node.name() {
Some(a_name) => {
match b.node.name() {
Some(b_name) => a_name.name.as_str().cmp(&b_name.name.as_str()),
None => Ordering::Greater,
}
}
None => {
match b.node.name() {
Some(_) => Ordering::Less,
None => Ordering::Equal,
}
}
};
if name_ordering == Ordering::Equal {
match a.node.rename() {
Some(a_rename) => {
match b.node.rename() {
Some(b_rename) => a_rename.name.as_str().cmp(&b_rename.name.as_str()),
None => Ordering::Greater,
}
}
None => {
match b.node.name() {
Some(_) => Ordering::Less,
None => Ordering::Equal,
}
}
}
} else {
name_ordering
}
}
fn compare_path_list_item_lists(a_items: &Vec<ast::PathListItem>,
b_items: &Vec<ast::PathListItem>)
-> Ordering {
let mut a = a_items.clone();
let mut b = b_items.clone();
a.sort_by(|a, b| compare_path_list_items(a, b));
b.sort_by(|a, b| compare_path_list_items(a, b));
for comparison_pair in a.iter().zip(b.iter()) {
let ord = compare_path_list_items(comparison_pair.0, comparison_pair.1);
if ord!= Ordering::Equal {
return ord;
}
}
a.len().cmp(&b.len())
}
fn compare_view_path_types(a: &ast::ViewPath_, b: &ast::ViewPath_) -> Ordering {
use syntax::ast::ViewPath_::*;
match (a, b) {
(&ViewPathSimple(..), &ViewPathSimple(..)) => Ordering::Equal,
(&ViewPathSimple(..), _) => Ordering::Less,
(&ViewPathGlob(_), &ViewPathSimple(..)) => Ordering::Greater,
(&ViewPathGlob(_), &ViewPathGlob(_)) => Ordering::Equal,
(&ViewPathGlob(_), &ViewPathList(..)) => Ordering::Less,
(&ViewPathList(_, ref a_items), &ViewPathList(_, ref b_items)) => {
compare_path_list_item_lists(a_items, b_items)
}
(&ViewPathList(..), _) => Ordering::Greater,
}
}
fn compare_view_paths(a: &ast::ViewPath_, b: &ast::ViewPath_) -> Ordering {
match compare_paths(path_of(a), path_of(b)) {
Ordering::Equal => compare_view_path_types(a, b),
cmp => cmp,
}
}
fn compare_use_items(a: &ast::Item, b: &ast::Item) -> Option<Ordering> {
match (&a.node, &b.node) {
(&ast::ItemKind::Use(ref a_vp), &ast::ItemKind::Use(ref b_vp)) => {
Some(compare_view_paths(&a_vp.node, &b_vp.node))
}
_ => None,
}
}
// TODO (some day) remove unused imports, expand globs, compress many single
// imports into a list import.
impl Rewrite for ast::ViewPath {
// Returns an empty string when the ViewPath is empty (like foo::bar::{})
fn rewrite(&self, context: &RewriteContext, width: usize, offset: Indent) -> Option<String> {
match self.node {
ast::ViewPath_::ViewPathList(_, ref path_list) if path_list.is_empty() => {
Some(String::new())
}
ast::ViewPath_::ViewPathList(ref path, ref path_list) => {
rewrite_use_list(width, offset, path, path_list, self.span, context)
}
ast::ViewPath_::ViewPathGlob(_) => {
// FIXME convert to list?
None
}
ast::ViewPath_::ViewPathSimple(ident, ref path) => {
let ident_str = ident.to_string();
// 4 = " as ".len()
let budget = try_opt!(width.checked_sub(ident_str.len() + 4));
let path_str = try_opt!(rewrite_path(context, false, None, path, budget, offset));
Some(if path.segments.last().unwrap().identifier == ident {
path_str
} else {
format!("{} as {}", path_str, ident_str)
})
}
}
}
}
impl<'a> FmtVisitor<'a> {
pub fn format_imports(&mut self, use_items: &[ptr::P<ast::Item>]) {
// Find the location immediately before the first use item in the run. This must not lie
// before the current `self.last_pos`
let pos_before_first_use_item = use_items.first()
.map(|p_i| cmp::max(self.last_pos, p_i.span.lo))
.unwrap_or(self.last_pos);
// Construct a list of pairs, each containing a `use` item and the start of span before
// that `use` item.
let mut last_pos_of_prev_use_item = pos_before_first_use_item;
let mut ordered_use_items = use_items.iter()
.map(|p_i| {
let new_item = (&*p_i, last_pos_of_prev_use_item);
last_pos_of_prev_use_item = p_i.span.hi;
new_item
})
.collect::<Vec<_>>();
let pos_after_last_use_item = last_pos_of_prev_use_item;
// Order the imports by view-path & other import path properties
ordered_use_items.sort_by(|a, b| compare_use_items(a.0, b.0).unwrap());
// First, output the span before the first import
let prev_span_str = self.snippet(codemap::mk_sp(self.last_pos, pos_before_first_use_item));
// Look for purely trailing space at the start of the prefix snippet before a linefeed, or
// a prefix that's entirely horizontal whitespace.
let prefix_span_start = match prev_span_str.find('\n') {
Some(offset) if prev_span_str[..offset].trim().is_empty() => {
self.last_pos + BytePos(offset as u32)
}
None if prev_span_str.trim().is_empty() => pos_before_first_use_item,
_ => self.last_pos,
};
// Look for indent (the line part preceding the use is all whitespace) and excise that
// from the prefix
let span_end = match prev_span_str.rfind('\n') {
Some(offset) if prev_span_str[offset..].trim().is_empty() => {
self.last_pos + BytePos(offset as u32)
}
_ => pos_before_first_use_item,
};
self.last_pos = prefix_span_start;
self.format_missing(span_end);
for ordered in ordered_use_items {
// Fake out the formatter by setting `self.last_pos` to the appropriate location before
// each item before visiting it.
self.last_pos = ordered.1;
self.visit_item(&ordered.0);
}
self.last_pos = pos_after_last_use_item;
}
pub fn format_import(&mut self, vis: &ast::Visibility, vp: &ast::ViewPath, span: Span) {
let vis = utils::format_visibility(vis);
let mut offset = self.block_indent;
offset.alignment += vis.len() + "use ".len();
// 1 = ";"
match vp.rewrite(&self.get_context(),
self.config.max_width - offset.width() - 1,
offset) {
Some(ref s) if s.is_empty() => {
// Format up to last newline
let prev_span = codemap::mk_sp(self.last_pos, source!(self, span).lo);
let span_end = match self.snippet(prev_span).rfind('\n') {
Some(offset) => self.last_pos + BytePos(offset as u32),
None => source!(self, span).lo,
};
self.format_missing(span_end);
self.last_pos = source!(self, span).hi;
}
Some(ref s) => {
let s = format!("{}use {};", vis, s);
self.format_missing_with_indent(source!(self, span).lo);
self.buffer.push_str(&s);
self.last_pos = source!(self, span).hi;
}
None => {
self.format_missing_with_indent(source!(self, span).lo);
self.format_missing(source!(self, span).hi);
}
}
}
}
fn rewrite_single_use_list(path_str: Option<String>, vpi: &ast::PathListItem) -> String {
let path_item_str = if let ast::PathListItemKind::Ident { name,.. } = vpi.node {
// A name.
match path_str {
Some(path_str) => format!("{}::{}", path_str, name),
None => name.to_string(),
}
} else {
// `self`.
match path_str {
Some(path_str) => path_str,
// This catches the import: use {self}, which is a compiler error, so we just
// leave it alone.
None => "{self}".to_owned(),
}
};
append_alias(path_item_str, vpi)
}
fn rewrite_path_item(vpi: &&ast::PathListItem) -> Option<String> {
let path_item_str = match vpi.node {
ast::PathListItemKind::Ident { name,.. } => name.to_string(),
ast::PathListItemKind::Mod {.. } => "self".to_owned(),
};
Some(append_alias(path_item_str, vpi))
}
fn append_alias(path_item_str: String, vpi: &ast::PathListItem) -> String {
match vpi.node {
ast::PathListItemKind::Ident { rename: Some(rename),.. } |
ast::PathListItemKind::Mod { rename: Some(rename),.. } => {
format!("{} as {}", path_item_str, rename)
}
_ => path_item_str,
}
}
// Pretty prints a multi-item import.
// Assumes that path_list.len() > 0.
pub fn rewrite_use_list(width: usize,
offset: Indent,
path: &ast::Path,
path_list: &[ast::PathListItem],
span: Span,
context: &RewriteContext)
-> Option<String> {
// Returns a different option to distinguish `::foo` and `foo`
let opt_path_str = if!path.to_string().is_empty() {
Some(path.to_string())
} else if path.global {
// path is absolute, we return an empty String to avoid a double `::`
Some(String::new())
} else {
None
};
match path_list.len() {
0 => unreachable!(),
1 => return Some(rewrite_single_use_list(opt_path_str, &path_list[0])),
_ => (),
}
// 2 = ::
let path_separation_w = if opt_path_str.is_some() { 2 } else { 0 };
// 1 = {
let supp_indent = path.to_string().len() + path_separation_w + 1;
// 1 = }
let remaining_width = width.checked_sub(supp_indent + 1).unwrap_or(0);
let mut items = {
// Dummy value, see explanation below.
let mut items = vec![ListItem::from_str("")];
let iter = itemize_list(context.codemap,
path_list.iter(),
"}",
|vpi| vpi.span.lo,
|vpi| vpi.span.hi,
rewrite_path_item,
context.codemap.span_after(span, "{"),
span.hi);
items.extend(iter);
items
};
// We prefixed the item list with a dummy value so that we can
// potentially move "self" to the front of the vector without touching
// the rest of the items.
let has_self = move_self_to_front(&mut items);
let first_index = if has_self { 0 } else { 1 };
if context.config.reorder_imported_names {
items[1..].sort_by(|a, b| a.item.cmp(&b.item));
}
let tactic = definitive_tactic(&items[first_index..],
::lists::ListTactic::Mixed,
remaining_width);
let fmt = ListFormatting {
tactic: tactic,
separator: ",",
trailing_separator: SeparatorTactic::Never,
indent: offset + supp_indent,
// FIXME This is too conservative, and will not use all width
// available
// (loose 1 column (";"))
width: remaining_width,
ends_with_newline: false,
config: context.config,
};
let list_str = try_opt!(write_list(&items[first_index..], &fmt));
Some(match opt_path_str {
Some(opt_path_str) => format!("{}::{{{}}}", opt_path_str, list_str),
None => format!("{{{}}}", list_str),
})
}
// Returns true when self item was found.
fn move_self_to_front(items: &mut Vec<ListItem>) -> bool {
match items.iter().position(|item| item.item.as_ref().map(|x| &x[..]) == Some("self")) {
Some(pos) => {
items[0] = items.remove(pos);
true
}
None => false,
}
|
}
|
random_line_split
|
|
mat33.rs
|
use super::Vec2;
use super::Vec3;
/// A 3-by-3 matrix. Stored in column-major order.
pub struct Mat33 {
pub ex: Vec3,
pub ey: Vec3,
pub ez: Vec3
}
impl Mat33 {
pub fn new() -> Mat33 {
Mat33 {
ex: Vec3::new(0.0, 0.0, 0.0),
ey: Vec3::new(0.0, 0.0, 0.0),
ez: Vec3::new(0.0, 0.0, 0.0)
}
}
pub fn
|
(c1: Vec3, c2: Vec3, c3: Vec3) -> Mat33 {
Mat33 {
ex: c1,
ey: c2,
ez: c3
}
}
/// Set this matrix to all zeros.
pub fn set_zero(&mut self) {
self.ex.set_zero();
self.ey.set_zero();
self.ez.set_zero();
}
/// Solve A * x = b, where b is a column vector. This is more efficient
/// than computing the inverse in one-shot cases.
pub fn solve33(&self, b: Vec3) -> Vec3 {
let mut det = Vec3::dot(self.ex, Vec3::cross(self.ey, self.ez));
if det!= 0.0 {
det = 1.0 / det;
}
let x = det * Vec3::dot(b, Vec3::cross(self.ey, self.ez));
let y = det * Vec3::dot(self.ex, Vec3::cross(b, self.ez));
let z = det * Vec3::dot(self.ex, Vec3::cross(self.ey, b));
Vec3::new(x, y, z)
}
/// Solve A * x = b, where b is a column vector. This is more efficient
/// than computing the inverse in one-shot cases.
pub fn solve22(&self, b: Vec2) -> Vec2 {
let a11 = self.ex.x;
let a12 = self.ey.x;
let a21 = self.ex.y;
let a22 = self.ey.y;
let mut det = a11 * a22 - a12 * a21;
if det!= 0.0 {
det = 1.0 / det;
}
let x = det * (a22 * b.x - a12 * b.y);
let y = det * (a11 * b.y - a21 * b.x);
Vec2::new(x, y)
}
///
pub fn get_inverse22(&self) -> Mat33 {
let a = self.ex.x;
let b = self.ey.x;
let c = self.ex.y;
let d = self.eu.y;
let mut det = a * d - b * c;
if det!= 0.0 {
det = 1.0 / det;
}
let m = Mat33::new();
m.ex.x = det * d; m.ey.x = -det * b; m.ex.z = 0.0;
m.ex.y = -det * c; m.ey.y = det * a; m.ey.z = 0.0;
m.ez.x = 0.0; m.ez.y = 0.0; m.ez.z = 0.0;
return m
}
}
|
new_columns
|
identifier_name
|
mat33.rs
|
use super::Vec2;
use super::Vec3;
/// A 3-by-3 matrix. Stored in column-major order.
pub struct Mat33 {
pub ex: Vec3,
pub ey: Vec3,
pub ez: Vec3
}
impl Mat33 {
pub fn new() -> Mat33 {
Mat33 {
ex: Vec3::new(0.0, 0.0, 0.0),
ey: Vec3::new(0.0, 0.0, 0.0),
ez: Vec3::new(0.0, 0.0, 0.0)
}
}
pub fn new_columns(c1: Vec3, c2: Vec3, c3: Vec3) -> Mat33 {
Mat33 {
ex: c1,
ey: c2,
ez: c3
}
}
/// Set this matrix to all zeros.
pub fn set_zero(&mut self) {
self.ex.set_zero();
self.ey.set_zero();
self.ez.set_zero();
}
/// Solve A * x = b, where b is a column vector. This is more efficient
/// than computing the inverse in one-shot cases.
pub fn solve33(&self, b: Vec3) -> Vec3 {
let mut det = Vec3::dot(self.ex, Vec3::cross(self.ey, self.ez));
if det!= 0.0 {
det = 1.0 / det;
}
let x = det * Vec3::dot(b, Vec3::cross(self.ey, self.ez));
let y = det * Vec3::dot(self.ex, Vec3::cross(b, self.ez));
let z = det * Vec3::dot(self.ex, Vec3::cross(self.ey, b));
Vec3::new(x, y, z)
}
/// Solve A * x = b, where b is a column vector. This is more efficient
/// than computing the inverse in one-shot cases.
pub fn solve22(&self, b: Vec2) -> Vec2 {
let a11 = self.ex.x;
let a12 = self.ey.x;
let a21 = self.ex.y;
let a22 = self.ey.y;
let mut det = a11 * a22 - a12 * a21;
if det!= 0.0 {
det = 1.0 / det;
}
let x = det * (a22 * b.x - a12 * b.y);
let y = det * (a11 * b.y - a21 * b.x);
Vec2::new(x, y)
}
|
let b = self.ey.x;
let c = self.ex.y;
let d = self.eu.y;
let mut det = a * d - b * c;
if det!= 0.0 {
det = 1.0 / det;
}
let m = Mat33::new();
m.ex.x = det * d; m.ey.x = -det * b; m.ex.z = 0.0;
m.ex.y = -det * c; m.ey.y = det * a; m.ey.z = 0.0;
m.ez.x = 0.0; m.ez.y = 0.0; m.ez.z = 0.0;
return m
}
}
|
///
pub fn get_inverse22(&self) -> Mat33 {
let a = self.ex.x;
|
random_line_split
|
mat33.rs
|
use super::Vec2;
use super::Vec3;
/// A 3-by-3 matrix. Stored in column-major order.
pub struct Mat33 {
pub ex: Vec3,
pub ey: Vec3,
pub ez: Vec3
}
impl Mat33 {
pub fn new() -> Mat33 {
Mat33 {
ex: Vec3::new(0.0, 0.0, 0.0),
ey: Vec3::new(0.0, 0.0, 0.0),
ez: Vec3::new(0.0, 0.0, 0.0)
}
}
pub fn new_columns(c1: Vec3, c2: Vec3, c3: Vec3) -> Mat33 {
Mat33 {
ex: c1,
ey: c2,
ez: c3
}
}
/// Set this matrix to all zeros.
pub fn set_zero(&mut self)
|
/// Solve A * x = b, where b is a column vector. This is more efficient
/// than computing the inverse in one-shot cases.
pub fn solve33(&self, b: Vec3) -> Vec3 {
let mut det = Vec3::dot(self.ex, Vec3::cross(self.ey, self.ez));
if det!= 0.0 {
det = 1.0 / det;
}
let x = det * Vec3::dot(b, Vec3::cross(self.ey, self.ez));
let y = det * Vec3::dot(self.ex, Vec3::cross(b, self.ez));
let z = det * Vec3::dot(self.ex, Vec3::cross(self.ey, b));
Vec3::new(x, y, z)
}
/// Solve A * x = b, where b is a column vector. This is more efficient
/// than computing the inverse in one-shot cases.
pub fn solve22(&self, b: Vec2) -> Vec2 {
let a11 = self.ex.x;
let a12 = self.ey.x;
let a21 = self.ex.y;
let a22 = self.ey.y;
let mut det = a11 * a22 - a12 * a21;
if det!= 0.0 {
det = 1.0 / det;
}
let x = det * (a22 * b.x - a12 * b.y);
let y = det * (a11 * b.y - a21 * b.x);
Vec2::new(x, y)
}
///
pub fn get_inverse22(&self) -> Mat33 {
let a = self.ex.x;
let b = self.ey.x;
let c = self.ex.y;
let d = self.eu.y;
let mut det = a * d - b * c;
if det!= 0.0 {
det = 1.0 / det;
}
let m = Mat33::new();
m.ex.x = det * d; m.ey.x = -det * b; m.ex.z = 0.0;
m.ex.y = -det * c; m.ey.y = det * a; m.ey.z = 0.0;
m.ez.x = 0.0; m.ez.y = 0.0; m.ez.z = 0.0;
return m
}
}
|
{
self.ex.set_zero();
self.ey.set_zero();
self.ez.set_zero();
}
|
identifier_body
|
mat33.rs
|
use super::Vec2;
use super::Vec3;
/// A 3-by-3 matrix. Stored in column-major order.
pub struct Mat33 {
pub ex: Vec3,
pub ey: Vec3,
pub ez: Vec3
}
impl Mat33 {
pub fn new() -> Mat33 {
Mat33 {
ex: Vec3::new(0.0, 0.0, 0.0),
ey: Vec3::new(0.0, 0.0, 0.0),
ez: Vec3::new(0.0, 0.0, 0.0)
}
}
pub fn new_columns(c1: Vec3, c2: Vec3, c3: Vec3) -> Mat33 {
Mat33 {
ex: c1,
ey: c2,
ez: c3
}
}
/// Set this matrix to all zeros.
pub fn set_zero(&mut self) {
self.ex.set_zero();
self.ey.set_zero();
self.ez.set_zero();
}
/// Solve A * x = b, where b is a column vector. This is more efficient
/// than computing the inverse in one-shot cases.
pub fn solve33(&self, b: Vec3) -> Vec3 {
let mut det = Vec3::dot(self.ex, Vec3::cross(self.ey, self.ez));
if det!= 0.0 {
det = 1.0 / det;
}
let x = det * Vec3::dot(b, Vec3::cross(self.ey, self.ez));
let y = det * Vec3::dot(self.ex, Vec3::cross(b, self.ez));
let z = det * Vec3::dot(self.ex, Vec3::cross(self.ey, b));
Vec3::new(x, y, z)
}
/// Solve A * x = b, where b is a column vector. This is more efficient
/// than computing the inverse in one-shot cases.
pub fn solve22(&self, b: Vec2) -> Vec2 {
let a11 = self.ex.x;
let a12 = self.ey.x;
let a21 = self.ex.y;
let a22 = self.ey.y;
let mut det = a11 * a22 - a12 * a21;
if det!= 0.0
|
let x = det * (a22 * b.x - a12 * b.y);
let y = det * (a11 * b.y - a21 * b.x);
Vec2::new(x, y)
}
///
pub fn get_inverse22(&self) -> Mat33 {
let a = self.ex.x;
let b = self.ey.x;
let c = self.ex.y;
let d = self.eu.y;
let mut det = a * d - b * c;
if det!= 0.0 {
det = 1.0 / det;
}
let m = Mat33::new();
m.ex.x = det * d; m.ey.x = -det * b; m.ex.z = 0.0;
m.ex.y = -det * c; m.ey.y = det * a; m.ey.z = 0.0;
m.ez.x = 0.0; m.ez.y = 0.0; m.ez.z = 0.0;
return m
}
}
|
{
det = 1.0 / det;
}
|
conditional_block
|
reader.rs
|
//! Read points from an.sdc file.
use std::fs::File;
use std::iter::IntoIterator;
use std::io::{BufReader, Read};
use std::path::Path;
use std::str;
use byteorder;
use byteorder::{LittleEndian, ReadBytesExt};
use error::Error;
use point::{Point, TargetType};
use result::Result;
/// An object for readings.sdc points.
///
/// We don't just read them all into memory right away since.sdc files can be quite big.
#[derive(Debug)]
pub struct Reader<R: Read> {
reader: R,
version: Version,
header_information: Vec<u8>,
}
/// The sdc file version.
#[derive(Clone, Copy, Debug)]
pub struct Version {
/// The sdc major version.
pub major: u16,
/// The sdc minor version.
pub minor: u16,
}
impl Reader<BufReader<File>> {
/// Creates a new reader for a path.
///
/// # Examples
///
/// ```
/// use sdc::reader::Reader;
/// let reader = Reader::from_path("data/4-points-5.0.sdc").unwrap();
/// ```
pub fn from_path<P: AsRef<Path>>(path: P) -> Result<Reader<BufReader<File>>> {
let reader = BufReader::new(try!(File::open(path)));
Reader::new(reader)
}
}
impl<R: Read> Reader<R> {
/// Creates a new reader, consuimg a `Read`.
///
/// # Examples
///
/// ```
/// use std::fs::File;
/// use sdc::reader::Reader;
/// let file = File::open("data/4-points-5.0.sdc").unwrap();
/// let reader = Reader::new(file);
/// ```
pub fn new(mut reader: R) -> Result<Reader<R>> {
let header_size = try!(reader.read_u32::<LittleEndian>());
let major = try!(reader.read_u16::<LittleEndian>());
if major!= 5 {
return Err(Error::InvalidMajorVersion(major));
}
let minor = try!(reader.read_u16::<LittleEndian>());
let header_information_size = header_size - 8;
let mut header_information = Vec::with_capacity(header_information_size as usize);
if try!(reader.by_ref()
.take(header_information_size as u64)
.read_to_end(&mut header_information))!=
header_information_size as usize
|
Ok(Reader {
reader: reader,
version: Version { major: major, minor: minor, },
header_information: header_information,
})
}
/// Reads the next point from the underlying `Read`.
///
/// # Examples
///
/// ```
/// use sdc::reader::Reader;
/// let mut reader = Reader::from_path("data/4-points-5.0.sdc").unwrap();
/// let point = reader.next_point();
/// ```
pub fn next_point(&mut self) -> Result<Option<Point>> {
// Technically we should just check the first byte instead of the first four, but the work
// required to do that doesn't seem worth it at the moment.
let time = match self.reader.read_f64::<LittleEndian>() {
Ok(time) => time,
Err(byteorder::Error::UnexpectedEOF) => return Ok(None),
Err(err) => return Err(Error::from(err)),
};
let range = try!(self.reader.read_f32::<LittleEndian>());
let theta = try!(self.reader.read_f32::<LittleEndian>());
let x = try!(self.reader.read_f32::<LittleEndian>());
let y = try!(self.reader.read_f32::<LittleEndian>());
let z = try!(self.reader.read_f32::<LittleEndian>());
let amplitude = try!(self.reader.read_u16::<LittleEndian>());
let width = try!(self.reader.read_u16::<LittleEndian>());
let target_type = try!(TargetType::from_u8(try!(self.reader.read_u8())));
let target = try!(self.reader.read_u8());
let num_target = try!(self.reader.read_u8());
let rg_index = try!(self.reader.read_u16::<LittleEndian>());
let channel_desc_byte = try!(self.reader.read_u8());
let mut class_id = None;
let mut rho = None;
let mut reflectance = None;
if self.version.major >= 5 && self.version.minor >= 2 {
class_id = Some(try!(self.reader.read_u8()));
}
// These 5.3 and 5.4 reads are untested, since I don't have a real-world sample file yet.
if self.version.major >= 5 && self.version.minor >= 3 {
rho = Some(try!(self.reader.read_f32::<LittleEndian>()));
}
if self.version.major >= 5 && self.version.minor >= 4 {
reflectance = Some(try!(self.reader.read_i16::<LittleEndian>()));
}
Ok(Some(Point {
time: time,
range: range,
theta: theta,
x: x,
y: y,
z: z,
amplitude: amplitude,
width: width,
target_type: target_type,
target: target,
num_target: num_target,
rg_index: rg_index,
facet_number: channel_desc_byte & 0x3,
high_channel: (channel_desc_byte & 0b01000000) == 0b01000000,
class_id: class_id,
rho: rho,
reflectance: reflectance
}))
}
/// Returns this file's version as a `(u16, u16)`.
///
/// # Examples
///
/// ```
/// use sdc::reader::{Reader, Version};
/// let reader = Reader::from_path("data/4-points-5.0.sdc").unwrap();
/// let Version { major, minor } = reader.version();
/// ```
pub fn version(&self) -> Version {
self.version
}
/// Returns this file's header information, or an error if it is not valid ASCII.
///
/// # Examples
///
/// ```
/// use sdc::reader::Reader;
/// let reader = Reader::from_path("data/4-points-5.0.sdc").unwrap();
/// let header_information = reader.header_information_as_str();
/// ```
pub fn header_information_as_str(&self) -> Result<&str> {
str::from_utf8(&self.header_information[..]).map_err(|e| Error::from(e))
}
}
impl<R: Read> IntoIterator for Reader<R> {
type Item = Point;
type IntoIter = PointIterator<R>;
fn into_iter(self) -> Self::IntoIter {
PointIterator { reader: self }
}
}
/// An iterator over a reader's points.
#[derive(Debug)]
pub struct PointIterator<R: Read> {
reader: Reader<R>,
}
impl<R: Read> Iterator for PointIterator<R> {
type Item = Point;
fn next(&mut self) -> Option<Self::Item> {
self.reader.next_point().unwrap()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn read_points() {
let reader = Reader::from_path("data/4-points-5.0.sdc").unwrap();
let points: Vec<_> = reader.into_iter().collect();
assert_eq!(4, points.len());
}
#[test]
fn read_52() {
let reader = Reader::from_path("data/4-points-5.2.sdc").unwrap();
let points: Vec<_> = reader.into_iter().collect();
assert_eq!(4, points.len());
assert_eq!(4, points[0].class_id.unwrap());
}
}
|
{
return Err(Error::InvalidHeaderInformation);
}
|
conditional_block
|
reader.rs
|
//! Read points from an.sdc file.
use std::fs::File;
use std::iter::IntoIterator;
use std::io::{BufReader, Read};
use std::path::Path;
use std::str;
use byteorder;
use byteorder::{LittleEndian, ReadBytesExt};
use error::Error;
use point::{Point, TargetType};
use result::Result;
/// An object for readings.sdc points.
///
/// We don't just read them all into memory right away since.sdc files can be quite big.
#[derive(Debug)]
pub struct Reader<R: Read> {
reader: R,
version: Version,
header_information: Vec<u8>,
}
/// The sdc file version.
#[derive(Clone, Copy, Debug)]
pub struct Version {
/// The sdc major version.
pub major: u16,
/// The sdc minor version.
pub minor: u16,
}
impl Reader<BufReader<File>> {
/// Creates a new reader for a path.
///
/// # Examples
///
/// ```
/// use sdc::reader::Reader;
/// let reader = Reader::from_path("data/4-points-5.0.sdc").unwrap();
/// ```
pub fn from_path<P: AsRef<Path>>(path: P) -> Result<Reader<BufReader<File>>> {
let reader = BufReader::new(try!(File::open(path)));
Reader::new(reader)
}
}
impl<R: Read> Reader<R> {
/// Creates a new reader, consuimg a `Read`.
///
/// # Examples
///
/// ```
/// use std::fs::File;
/// use sdc::reader::Reader;
/// let file = File::open("data/4-points-5.0.sdc").unwrap();
/// let reader = Reader::new(file);
/// ```
pub fn new(mut reader: R) -> Result<Reader<R>> {
let header_size = try!(reader.read_u32::<LittleEndian>());
let major = try!(reader.read_u16::<LittleEndian>());
if major!= 5 {
return Err(Error::InvalidMajorVersion(major));
}
let minor = try!(reader.read_u16::<LittleEndian>());
let header_information_size = header_size - 8;
let mut header_information = Vec::with_capacity(header_information_size as usize);
if try!(reader.by_ref()
.take(header_information_size as u64)
.read_to_end(&mut header_information))!=
header_information_size as usize {
return Err(Error::InvalidHeaderInformation);
}
Ok(Reader {
reader: reader,
version: Version { major: major, minor: minor, },
header_information: header_information,
})
}
/// Reads the next point from the underlying `Read`.
///
/// # Examples
///
/// ```
/// use sdc::reader::Reader;
/// let mut reader = Reader::from_path("data/4-points-5.0.sdc").unwrap();
/// let point = reader.next_point();
/// ```
pub fn
|
(&mut self) -> Result<Option<Point>> {
// Technically we should just check the first byte instead of the first four, but the work
// required to do that doesn't seem worth it at the moment.
let time = match self.reader.read_f64::<LittleEndian>() {
Ok(time) => time,
Err(byteorder::Error::UnexpectedEOF) => return Ok(None),
Err(err) => return Err(Error::from(err)),
};
let range = try!(self.reader.read_f32::<LittleEndian>());
let theta = try!(self.reader.read_f32::<LittleEndian>());
let x = try!(self.reader.read_f32::<LittleEndian>());
let y = try!(self.reader.read_f32::<LittleEndian>());
let z = try!(self.reader.read_f32::<LittleEndian>());
let amplitude = try!(self.reader.read_u16::<LittleEndian>());
let width = try!(self.reader.read_u16::<LittleEndian>());
let target_type = try!(TargetType::from_u8(try!(self.reader.read_u8())));
let target = try!(self.reader.read_u8());
let num_target = try!(self.reader.read_u8());
let rg_index = try!(self.reader.read_u16::<LittleEndian>());
let channel_desc_byte = try!(self.reader.read_u8());
let mut class_id = None;
let mut rho = None;
let mut reflectance = None;
if self.version.major >= 5 && self.version.minor >= 2 {
class_id = Some(try!(self.reader.read_u8()));
}
// These 5.3 and 5.4 reads are untested, since I don't have a real-world sample file yet.
if self.version.major >= 5 && self.version.minor >= 3 {
rho = Some(try!(self.reader.read_f32::<LittleEndian>()));
}
if self.version.major >= 5 && self.version.minor >= 4 {
reflectance = Some(try!(self.reader.read_i16::<LittleEndian>()));
}
Ok(Some(Point {
time: time,
range: range,
theta: theta,
x: x,
y: y,
z: z,
amplitude: amplitude,
width: width,
target_type: target_type,
target: target,
num_target: num_target,
rg_index: rg_index,
facet_number: channel_desc_byte & 0x3,
high_channel: (channel_desc_byte & 0b01000000) == 0b01000000,
class_id: class_id,
rho: rho,
reflectance: reflectance
}))
}
/// Returns this file's version as a `(u16, u16)`.
///
/// # Examples
///
/// ```
/// use sdc::reader::{Reader, Version};
/// let reader = Reader::from_path("data/4-points-5.0.sdc").unwrap();
/// let Version { major, minor } = reader.version();
/// ```
pub fn version(&self) -> Version {
self.version
}
/// Returns this file's header information, or an error if it is not valid ASCII.
///
/// # Examples
///
/// ```
/// use sdc::reader::Reader;
/// let reader = Reader::from_path("data/4-points-5.0.sdc").unwrap();
/// let header_information = reader.header_information_as_str();
/// ```
pub fn header_information_as_str(&self) -> Result<&str> {
str::from_utf8(&self.header_information[..]).map_err(|e| Error::from(e))
}
}
impl<R: Read> IntoIterator for Reader<R> {
type Item = Point;
type IntoIter = PointIterator<R>;
fn into_iter(self) -> Self::IntoIter {
PointIterator { reader: self }
}
}
/// An iterator over a reader's points.
#[derive(Debug)]
pub struct PointIterator<R: Read> {
reader: Reader<R>,
}
impl<R: Read> Iterator for PointIterator<R> {
type Item = Point;
fn next(&mut self) -> Option<Self::Item> {
self.reader.next_point().unwrap()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn read_points() {
let reader = Reader::from_path("data/4-points-5.0.sdc").unwrap();
let points: Vec<_> = reader.into_iter().collect();
assert_eq!(4, points.len());
}
#[test]
fn read_52() {
let reader = Reader::from_path("data/4-points-5.2.sdc").unwrap();
let points: Vec<_> = reader.into_iter().collect();
assert_eq!(4, points.len());
assert_eq!(4, points[0].class_id.unwrap());
}
}
|
next_point
|
identifier_name
|
reader.rs
|
//! Read points from an.sdc file.
use std::fs::File;
use std::iter::IntoIterator;
use std::io::{BufReader, Read};
use std::path::Path;
use std::str;
use byteorder;
use byteorder::{LittleEndian, ReadBytesExt};
use error::Error;
use point::{Point, TargetType};
use result::Result;
/// An object for readings.sdc points.
///
/// We don't just read them all into memory right away since.sdc files can be quite big.
#[derive(Debug)]
pub struct Reader<R: Read> {
reader: R,
version: Version,
header_information: Vec<u8>,
}
/// The sdc file version.
#[derive(Clone, Copy, Debug)]
pub struct Version {
/// The sdc major version.
pub major: u16,
/// The sdc minor version.
pub minor: u16,
}
impl Reader<BufReader<File>> {
/// Creates a new reader for a path.
///
/// # Examples
///
/// ```
/// use sdc::reader::Reader;
/// let reader = Reader::from_path("data/4-points-5.0.sdc").unwrap();
/// ```
pub fn from_path<P: AsRef<Path>>(path: P) -> Result<Reader<BufReader<File>>> {
let reader = BufReader::new(try!(File::open(path)));
Reader::new(reader)
}
}
impl<R: Read> Reader<R> {
/// Creates a new reader, consuimg a `Read`.
///
/// # Examples
///
/// ```
/// use std::fs::File;
/// use sdc::reader::Reader;
/// let file = File::open("data/4-points-5.0.sdc").unwrap();
/// let reader = Reader::new(file);
/// ```
pub fn new(mut reader: R) -> Result<Reader<R>> {
let header_size = try!(reader.read_u32::<LittleEndian>());
let major = try!(reader.read_u16::<LittleEndian>());
if major!= 5 {
return Err(Error::InvalidMajorVersion(major));
}
let minor = try!(reader.read_u16::<LittleEndian>());
let header_information_size = header_size - 8;
let mut header_information = Vec::with_capacity(header_information_size as usize);
if try!(reader.by_ref()
.take(header_information_size as u64)
.read_to_end(&mut header_information))!=
header_information_size as usize {
return Err(Error::InvalidHeaderInformation);
}
Ok(Reader {
reader: reader,
version: Version { major: major, minor: minor, },
header_information: header_information,
})
|
/// Reads the next point from the underlying `Read`.
///
/// # Examples
///
/// ```
/// use sdc::reader::Reader;
/// let mut reader = Reader::from_path("data/4-points-5.0.sdc").unwrap();
/// let point = reader.next_point();
/// ```
pub fn next_point(&mut self) -> Result<Option<Point>> {
// Technically we should just check the first byte instead of the first four, but the work
// required to do that doesn't seem worth it at the moment.
let time = match self.reader.read_f64::<LittleEndian>() {
Ok(time) => time,
Err(byteorder::Error::UnexpectedEOF) => return Ok(None),
Err(err) => return Err(Error::from(err)),
};
let range = try!(self.reader.read_f32::<LittleEndian>());
let theta = try!(self.reader.read_f32::<LittleEndian>());
let x = try!(self.reader.read_f32::<LittleEndian>());
let y = try!(self.reader.read_f32::<LittleEndian>());
let z = try!(self.reader.read_f32::<LittleEndian>());
let amplitude = try!(self.reader.read_u16::<LittleEndian>());
let width = try!(self.reader.read_u16::<LittleEndian>());
let target_type = try!(TargetType::from_u8(try!(self.reader.read_u8())));
let target = try!(self.reader.read_u8());
let num_target = try!(self.reader.read_u8());
let rg_index = try!(self.reader.read_u16::<LittleEndian>());
let channel_desc_byte = try!(self.reader.read_u8());
let mut class_id = None;
let mut rho = None;
let mut reflectance = None;
if self.version.major >= 5 && self.version.minor >= 2 {
class_id = Some(try!(self.reader.read_u8()));
}
// These 5.3 and 5.4 reads are untested, since I don't have a real-world sample file yet.
if self.version.major >= 5 && self.version.minor >= 3 {
rho = Some(try!(self.reader.read_f32::<LittleEndian>()));
}
if self.version.major >= 5 && self.version.minor >= 4 {
reflectance = Some(try!(self.reader.read_i16::<LittleEndian>()));
}
Ok(Some(Point {
time: time,
range: range,
theta: theta,
x: x,
y: y,
z: z,
amplitude: amplitude,
width: width,
target_type: target_type,
target: target,
num_target: num_target,
rg_index: rg_index,
facet_number: channel_desc_byte & 0x3,
high_channel: (channel_desc_byte & 0b01000000) == 0b01000000,
class_id: class_id,
rho: rho,
reflectance: reflectance
}))
}
/// Returns this file's version as a `(u16, u16)`.
///
/// # Examples
///
/// ```
/// use sdc::reader::{Reader, Version};
/// let reader = Reader::from_path("data/4-points-5.0.sdc").unwrap();
/// let Version { major, minor } = reader.version();
/// ```
pub fn version(&self) -> Version {
self.version
}
/// Returns this file's header information, or an error if it is not valid ASCII.
///
/// # Examples
///
/// ```
/// use sdc::reader::Reader;
/// let reader = Reader::from_path("data/4-points-5.0.sdc").unwrap();
/// let header_information = reader.header_information_as_str();
/// ```
pub fn header_information_as_str(&self) -> Result<&str> {
str::from_utf8(&self.header_information[..]).map_err(|e| Error::from(e))
}
}
impl<R: Read> IntoIterator for Reader<R> {
type Item = Point;
type IntoIter = PointIterator<R>;
fn into_iter(self) -> Self::IntoIter {
PointIterator { reader: self }
}
}
/// An iterator over a reader's points.
#[derive(Debug)]
pub struct PointIterator<R: Read> {
reader: Reader<R>,
}
impl<R: Read> Iterator for PointIterator<R> {
type Item = Point;
fn next(&mut self) -> Option<Self::Item> {
self.reader.next_point().unwrap()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn read_points() {
let reader = Reader::from_path("data/4-points-5.0.sdc").unwrap();
let points: Vec<_> = reader.into_iter().collect();
assert_eq!(4, points.len());
}
#[test]
fn read_52() {
let reader = Reader::from_path("data/4-points-5.2.sdc").unwrap();
let points: Vec<_> = reader.into_iter().collect();
assert_eq!(4, points.len());
assert_eq!(4, points[0].class_id.unwrap());
}
}
|
}
|
random_line_split
|
reader.rs
|
//! Read points from an.sdc file.
use std::fs::File;
use std::iter::IntoIterator;
use std::io::{BufReader, Read};
use std::path::Path;
use std::str;
use byteorder;
use byteorder::{LittleEndian, ReadBytesExt};
use error::Error;
use point::{Point, TargetType};
use result::Result;
/// An object for readings.sdc points.
///
/// We don't just read them all into memory right away since.sdc files can be quite big.
#[derive(Debug)]
pub struct Reader<R: Read> {
reader: R,
version: Version,
header_information: Vec<u8>,
}
/// The sdc file version.
#[derive(Clone, Copy, Debug)]
pub struct Version {
/// The sdc major version.
pub major: u16,
/// The sdc minor version.
pub minor: u16,
}
impl Reader<BufReader<File>> {
/// Creates a new reader for a path.
///
/// # Examples
///
/// ```
/// use sdc::reader::Reader;
/// let reader = Reader::from_path("data/4-points-5.0.sdc").unwrap();
/// ```
pub fn from_path<P: AsRef<Path>>(path: P) -> Result<Reader<BufReader<File>>> {
let reader = BufReader::new(try!(File::open(path)));
Reader::new(reader)
}
}
impl<R: Read> Reader<R> {
/// Creates a new reader, consuimg a `Read`.
///
/// # Examples
///
/// ```
/// use std::fs::File;
/// use sdc::reader::Reader;
/// let file = File::open("data/4-points-5.0.sdc").unwrap();
/// let reader = Reader::new(file);
/// ```
pub fn new(mut reader: R) -> Result<Reader<R>> {
let header_size = try!(reader.read_u32::<LittleEndian>());
let major = try!(reader.read_u16::<LittleEndian>());
if major!= 5 {
return Err(Error::InvalidMajorVersion(major));
}
let minor = try!(reader.read_u16::<LittleEndian>());
let header_information_size = header_size - 8;
let mut header_information = Vec::with_capacity(header_information_size as usize);
if try!(reader.by_ref()
.take(header_information_size as u64)
.read_to_end(&mut header_information))!=
header_information_size as usize {
return Err(Error::InvalidHeaderInformation);
}
Ok(Reader {
reader: reader,
version: Version { major: major, minor: minor, },
header_information: header_information,
})
}
/// Reads the next point from the underlying `Read`.
///
/// # Examples
///
/// ```
/// use sdc::reader::Reader;
/// let mut reader = Reader::from_path("data/4-points-5.0.sdc").unwrap();
/// let point = reader.next_point();
/// ```
pub fn next_point(&mut self) -> Result<Option<Point>>
|
let mut class_id = None;
let mut rho = None;
let mut reflectance = None;
if self.version.major >= 5 && self.version.minor >= 2 {
class_id = Some(try!(self.reader.read_u8()));
}
// These 5.3 and 5.4 reads are untested, since I don't have a real-world sample file yet.
if self.version.major >= 5 && self.version.minor >= 3 {
rho = Some(try!(self.reader.read_f32::<LittleEndian>()));
}
if self.version.major >= 5 && self.version.minor >= 4 {
reflectance = Some(try!(self.reader.read_i16::<LittleEndian>()));
}
Ok(Some(Point {
time: time,
range: range,
theta: theta,
x: x,
y: y,
z: z,
amplitude: amplitude,
width: width,
target_type: target_type,
target: target,
num_target: num_target,
rg_index: rg_index,
facet_number: channel_desc_byte & 0x3,
high_channel: (channel_desc_byte & 0b01000000) == 0b01000000,
class_id: class_id,
rho: rho,
reflectance: reflectance
}))
}
/// Returns this file's version as a `(u16, u16)`.
///
/// # Examples
///
/// ```
/// use sdc::reader::{Reader, Version};
/// let reader = Reader::from_path("data/4-points-5.0.sdc").unwrap();
/// let Version { major, minor } = reader.version();
/// ```
pub fn version(&self) -> Version {
self.version
}
/// Returns this file's header information, or an error if it is not valid ASCII.
///
/// # Examples
///
/// ```
/// use sdc::reader::Reader;
/// let reader = Reader::from_path("data/4-points-5.0.sdc").unwrap();
/// let header_information = reader.header_information_as_str();
/// ```
pub fn header_information_as_str(&self) -> Result<&str> {
str::from_utf8(&self.header_information[..]).map_err(|e| Error::from(e))
}
}
impl<R: Read> IntoIterator for Reader<R> {
type Item = Point;
type IntoIter = PointIterator<R>;
fn into_iter(self) -> Self::IntoIter {
PointIterator { reader: self }
}
}
/// An iterator over a reader's points.
#[derive(Debug)]
pub struct PointIterator<R: Read> {
reader: Reader<R>,
}
impl<R: Read> Iterator for PointIterator<R> {
type Item = Point;
fn next(&mut self) -> Option<Self::Item> {
self.reader.next_point().unwrap()
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn read_points() {
let reader = Reader::from_path("data/4-points-5.0.sdc").unwrap();
let points: Vec<_> = reader.into_iter().collect();
assert_eq!(4, points.len());
}
#[test]
fn read_52() {
let reader = Reader::from_path("data/4-points-5.2.sdc").unwrap();
let points: Vec<_> = reader.into_iter().collect();
assert_eq!(4, points.len());
assert_eq!(4, points[0].class_id.unwrap());
}
}
|
{
// Technically we should just check the first byte instead of the first four, but the work
// required to do that doesn't seem worth it at the moment.
let time = match self.reader.read_f64::<LittleEndian>() {
Ok(time) => time,
Err(byteorder::Error::UnexpectedEOF) => return Ok(None),
Err(err) => return Err(Error::from(err)),
};
let range = try!(self.reader.read_f32::<LittleEndian>());
let theta = try!(self.reader.read_f32::<LittleEndian>());
let x = try!(self.reader.read_f32::<LittleEndian>());
let y = try!(self.reader.read_f32::<LittleEndian>());
let z = try!(self.reader.read_f32::<LittleEndian>());
let amplitude = try!(self.reader.read_u16::<LittleEndian>());
let width = try!(self.reader.read_u16::<LittleEndian>());
let target_type = try!(TargetType::from_u8(try!(self.reader.read_u8())));
let target = try!(self.reader.read_u8());
let num_target = try!(self.reader.read_u8());
let rg_index = try!(self.reader.read_u16::<LittleEndian>());
let channel_desc_byte = try!(self.reader.read_u8());
|
identifier_body
|
moves-based-on-type-block-bad.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-tidy-linelength
#![feature(box_syntax)]
struct S {
x: Box<E>
}
enum
|
{
Foo(Box<S>),
Bar(Box<isize>),
Baz
}
fn f<G>(s: &S, g: G) where G: FnOnce(&S) {
g(s)
}
fn main() {
let s = S { x: box E::Bar(box 42) };
loop {
f(&s, |hellothere| {
match hellothere.x { //~ ERROR cannot move out
box E::Foo(_) => {}
box E::Bar(x) => println!("{}", x.to_string()), //~ NOTE attempting to move value to here
box E::Baz => {}
}
})
}
}
|
E
|
identifier_name
|
moves-based-on-type-block-bad.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-tidy-linelength
#![feature(box_syntax)]
struct S {
x: Box<E>
}
enum E {
Foo(Box<S>),
Bar(Box<isize>),
Baz
}
fn f<G>(s: &S, g: G) where G: FnOnce(&S) {
g(s)
}
fn main() {
let s = S { x: box E::Bar(box 42) };
loop {
f(&s, |hellothere| {
match hellothere.x { //~ ERROR cannot move out
box E::Foo(_) => {}
box E::Bar(x) => println!("{}", x.to_string()), //~ NOTE attempting to move value to here
box E::Baz =>
|
}
})
}
}
|
{}
|
conditional_block
|
moves-based-on-type-block-bad.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-tidy-linelength
#![feature(box_syntax)]
struct S {
x: Box<E>
}
enum E {
Foo(Box<S>),
Bar(Box<isize>),
Baz
}
fn f<G>(s: &S, g: G) where G: FnOnce(&S) {
g(s)
}
fn main()
|
{
let s = S { x: box E::Bar(box 42) };
loop {
f(&s, |hellothere| {
match hellothere.x { //~ ERROR cannot move out
box E::Foo(_) => {}
box E::Bar(x) => println!("{}", x.to_string()), //~ NOTE attempting to move value to here
box E::Baz => {}
}
})
}
}
|
identifier_body
|
|
moves-based-on-type-block-bad.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
|
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-tidy-linelength
#![feature(box_syntax)]
struct S {
x: Box<E>
}
enum E {
Foo(Box<S>),
Bar(Box<isize>),
Baz
}
fn f<G>(s: &S, g: G) where G: FnOnce(&S) {
g(s)
}
fn main() {
let s = S { x: box E::Bar(box 42) };
loop {
f(&s, |hellothere| {
match hellothere.x { //~ ERROR cannot move out
box E::Foo(_) => {}
box E::Bar(x) => println!("{}", x.to_string()), //~ NOTE attempting to move value to here
box E::Baz => {}
}
})
}
}
|
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
random_line_split
|
frontend.rs
|
use std::io::{Read, Write, Stdin, stdin, Stdout, stdout};
use std::ops::Drop;
use termion;
use termion::{clear, color};
use termion::raw::IntoRawMode;
use left_pad::leftpad;
use cursor::Cursor;
// The Frontend is responsible for rendering the state of the editor
// to the screen and interacting with the user.
pub struct Frontend {
stdin: Stdin,
stdout: termion::raw::RawTerminal<Stdout>,
}
impl Frontend {
/// Creates a new Frontend
pub fn new() -> Frontend {
let in_ = stdin();
let out = stdout().into_raw_mode().unwrap();
Frontend {
stdin: in_,
stdout: out,
}
}
/// Clears the screen
pub fn clear_screen(&mut self) {
write!(self.stdout, "{}", clear::All).unwrap();
}
/// Draws the state of the editor to the screen.
pub fn draw(&mut self, cursor: &Cursor, filename: &Option<String>, lines: &[String]) {
let (width, height) = self.terminal_size();
let num_lines = lines.len();
// The index of the first line of text that is rendered.
let start = if cursor.line > height { cursor.line - height } else { 0 };
// The filename of the current buffer or a no filename message.
let name = filename.clone().unwrap_or_else(|| String::from("**no filename**"));
let padding = (width - name.len()) / 2;
let need_extra = padding*2+name.len()!= width;
self.goto_term(0, 0);
// Draw the title bar.
write!(&mut self.stdout, "{}{}{}{}{}{}{}{}",
color::Bg(color::White),
color::Fg(color::Black),
leftpad(" ", padding),
name,
leftpad(" ", padding),
if need_extra { " " } else { "" },
color::Fg(color::Reset),
color::Bg(color::Reset),
).unwrap();
// Draw the lines of text.
for (y, line_number) in (start..start + height - 1).enumerate() {
self.goto_term(0, (y + 1) as u16);
if line_number < num_lines {
// Draw the line of text
write!(self.stdout, "{}{}{} {}",
color::Fg(color::Cyan),
leftpad(format!("{}", line_number + 1), 3),
color::Fg(color::Reset),
lines[line_number],
).unwrap();
} else {
// Draw a ~ to show that there is no line.
write!(self.stdout, "{} ~{}",
color::Fg(color::Cyan),
color::Fg(color::Reset),
).unwrap();
}
}
}
/// Flushes stdout to make the changes show
pub fn flush(&mut self) {
self.stdout.flush().unwrap();
}
/// Hides the cursor
pub fn hide_cursor(&mut self) {
write!(self.stdout, "{}", termion::cursor::Hide{}).unwrap();
}
/// Shows the cursor
pub fn show_cursor(&mut self) {
write!(self.stdout, "{}", termion::cursor::Show{}).unwrap();
}
/// Moves the cursor to x, y, which are both 0 based in terminal cordinates
pub fn
|
(&mut self, x: u16, y: u16) {
write!(self.stdout, "{}", termion::cursor::Goto(x+1, y+1)).unwrap();
}
/// Moves the cursor to the position specified by the Cursor
pub fn move_cursor(&mut self, cursor: &Cursor) {
let (_, height) = self.terminal_size();
let x = (cursor.column + 4) as u16;
let y = if cursor.line > height {
cursor.line as u16
} else {
(cursor.line + 1) as u16
};
self.goto_term(x, y)
}
/// Returns the size of the terminal as (width, height)
pub fn terminal_size(&self) -> (usize, usize) {
let (w, h) = termion::terminal_size().unwrap();
(w as usize, h as usize)
}
/// Prompts for a line of text
pub fn prompt_for_text(&mut self, prompt: &str) -> Option<String> {
let (width, height) = termion::terminal_size().unwrap();
self.goto_term(0, height - 1);
// Draw the background.
write!(&mut self.stdout, "{}{}{}{}",
termion::clear::CurrentLine,
termion::color::Bg(color::White),
termion::color::Fg(color::Black),
leftpad("", width as usize)).unwrap();
self.goto_term(0, height - 1);
// Draw the prompt.
write!(&mut self.stdout, "{}: ", prompt).unwrap();
// Show it.
self.flush();
// Get the input from the user,
let input = self.read_line();
// Reset the forground and background.
write!(self.stdout, "{}{}", color::Fg(color::Reset), color::Bg(color::Reset)).unwrap();
input
}
/// Prompts for a yes/no response from the user
pub fn prompt_for_bool(&mut self, prompt: &str) -> bool {
let response = self.prompt_for_text(&format!("{} (y/n)", prompt));
if let Some(r) = response {
!r.is_empty() && r.chars().nth(0).unwrap() == 'y'
} else {
false
}
}
/// Reads a line of text from the user.
/// TODO: Fix for Unicode. I think that the actual user input is handled
/// correctly, but echoing the typed characters may not be.
fn read_line(&mut self) -> Option<String> {
// Start with a buffer of size 40 so that small inputs don't require
// reallocating the buffer.
let mut buf = Vec::with_capacity(30);
loop {
// Get one byte of input
let mut b = [0; 1];
self.stdin.read_exact(&mut b[..]).unwrap();
match b[0] {
0 | 3 | 4 => return None,
// 0x7f is backspace
0x7f if!buf.is_empty() => {
// Delete the last character typed
buf.pop();
// Clear the last character from the screen
write!(&mut self.stdout, "{}{}",
termion::cursor::Left(1),
termion::clear::UntilNewline).unwrap();
self.flush();
},
0x7f => {},
// Newline or CR ends the input
b'\n' | b'\r' => break,
c => {
// Add the typed character to the input
buf.push(c);
// Draw it to the screen
write!(&mut self.stdout, "{}", char::from(c)).unwrap();
self.flush();
},
};
}
// Convert the buffer to a String.
Some(String::from_utf8(buf).unwrap())
}
}
impl Drop for Frontend {
/// Clean up the terminal after the we go out of scope.
fn drop(&mut self) {
self.clear_screen();
self.goto_term(0, 0);
self.show_cursor();
self.flush();
}
}
|
goto_term
|
identifier_name
|
frontend.rs
|
use std::io::{Read, Write, Stdin, stdin, Stdout, stdout};
use std::ops::Drop;
use termion;
use termion::{clear, color};
use termion::raw::IntoRawMode;
use left_pad::leftpad;
use cursor::Cursor;
// The Frontend is responsible for rendering the state of the editor
// to the screen and interacting with the user.
pub struct Frontend {
stdin: Stdin,
stdout: termion::raw::RawTerminal<Stdout>,
}
impl Frontend {
/// Creates a new Frontend
pub fn new() -> Frontend {
let in_ = stdin();
let out = stdout().into_raw_mode().unwrap();
Frontend {
stdin: in_,
stdout: out,
}
}
/// Clears the screen
pub fn clear_screen(&mut self) {
write!(self.stdout, "{}", clear::All).unwrap();
}
/// Draws the state of the editor to the screen.
pub fn draw(&mut self, cursor: &Cursor, filename: &Option<String>, lines: &[String]) {
let (width, height) = self.terminal_size();
let num_lines = lines.len();
// The index of the first line of text that is rendered.
let start = if cursor.line > height { cursor.line - height } else { 0 };
// The filename of the current buffer or a no filename message.
let name = filename.clone().unwrap_or_else(|| String::from("**no filename**"));
let padding = (width - name.len()) / 2;
let need_extra = padding*2+name.len()!= width;
self.goto_term(0, 0);
// Draw the title bar.
write!(&mut self.stdout, "{}{}{}{}{}{}{}{}",
color::Bg(color::White),
color::Fg(color::Black),
leftpad(" ", padding),
name,
leftpad(" ", padding),
if need_extra { " " } else { "" },
color::Fg(color::Reset),
color::Bg(color::Reset),
).unwrap();
// Draw the lines of text.
for (y, line_number) in (start..start + height - 1).enumerate() {
self.goto_term(0, (y + 1) as u16);
if line_number < num_lines {
// Draw the line of text
write!(self.stdout, "{}{}{} {}",
color::Fg(color::Cyan),
leftpad(format!("{}", line_number + 1), 3),
color::Fg(color::Reset),
lines[line_number],
).unwrap();
} else
|
}
}
/// Flushes stdout to make the changes show
pub fn flush(&mut self) {
self.stdout.flush().unwrap();
}
/// Hides the cursor
pub fn hide_cursor(&mut self) {
write!(self.stdout, "{}", termion::cursor::Hide{}).unwrap();
}
/// Shows the cursor
pub fn show_cursor(&mut self) {
write!(self.stdout, "{}", termion::cursor::Show{}).unwrap();
}
/// Moves the cursor to x, y, which are both 0 based in terminal cordinates
pub fn goto_term(&mut self, x: u16, y: u16) {
write!(self.stdout, "{}", termion::cursor::Goto(x+1, y+1)).unwrap();
}
/// Moves the cursor to the position specified by the Cursor
pub fn move_cursor(&mut self, cursor: &Cursor) {
let (_, height) = self.terminal_size();
let x = (cursor.column + 4) as u16;
let y = if cursor.line > height {
cursor.line as u16
} else {
(cursor.line + 1) as u16
};
self.goto_term(x, y)
}
/// Returns the size of the terminal as (width, height)
pub fn terminal_size(&self) -> (usize, usize) {
let (w, h) = termion::terminal_size().unwrap();
(w as usize, h as usize)
}
/// Prompts for a line of text
pub fn prompt_for_text(&mut self, prompt: &str) -> Option<String> {
let (width, height) = termion::terminal_size().unwrap();
self.goto_term(0, height - 1);
// Draw the background.
write!(&mut self.stdout, "{}{}{}{}",
termion::clear::CurrentLine,
termion::color::Bg(color::White),
termion::color::Fg(color::Black),
leftpad("", width as usize)).unwrap();
self.goto_term(0, height - 1);
// Draw the prompt.
write!(&mut self.stdout, "{}: ", prompt).unwrap();
// Show it.
self.flush();
// Get the input from the user,
let input = self.read_line();
// Reset the forground and background.
write!(self.stdout, "{}{}", color::Fg(color::Reset), color::Bg(color::Reset)).unwrap();
input
}
/// Prompts for a yes/no response from the user
pub fn prompt_for_bool(&mut self, prompt: &str) -> bool {
let response = self.prompt_for_text(&format!("{} (y/n)", prompt));
if let Some(r) = response {
!r.is_empty() && r.chars().nth(0).unwrap() == 'y'
} else {
false
}
}
/// Reads a line of text from the user.
/// TODO: Fix for Unicode. I think that the actual user input is handled
/// correctly, but echoing the typed characters may not be.
fn read_line(&mut self) -> Option<String> {
// Start with a buffer of size 40 so that small inputs don't require
// reallocating the buffer.
let mut buf = Vec::with_capacity(30);
loop {
// Get one byte of input
let mut b = [0; 1];
self.stdin.read_exact(&mut b[..]).unwrap();
match b[0] {
0 | 3 | 4 => return None,
// 0x7f is backspace
0x7f if!buf.is_empty() => {
// Delete the last character typed
buf.pop();
// Clear the last character from the screen
write!(&mut self.stdout, "{}{}",
termion::cursor::Left(1),
termion::clear::UntilNewline).unwrap();
self.flush();
},
0x7f => {},
// Newline or CR ends the input
b'\n' | b'\r' => break,
c => {
// Add the typed character to the input
buf.push(c);
// Draw it to the screen
write!(&mut self.stdout, "{}", char::from(c)).unwrap();
self.flush();
},
};
}
// Convert the buffer to a String.
Some(String::from_utf8(buf).unwrap())
}
}
impl Drop for Frontend {
/// Clean up the terminal after the we go out of scope.
fn drop(&mut self) {
self.clear_screen();
self.goto_term(0, 0);
self.show_cursor();
self.flush();
}
}
|
{
// Draw a ~ to show that there is no line.
write!(self.stdout, "{} ~{}",
color::Fg(color::Cyan),
color::Fg(color::Reset),
).unwrap();
}
|
conditional_block
|
frontend.rs
|
use std::io::{Read, Write, Stdin, stdin, Stdout, stdout};
use std::ops::Drop;
use termion;
use termion::{clear, color};
use termion::raw::IntoRawMode;
use left_pad::leftpad;
use cursor::Cursor;
// The Frontend is responsible for rendering the state of the editor
// to the screen and interacting with the user.
pub struct Frontend {
stdin: Stdin,
stdout: termion::raw::RawTerminal<Stdout>,
}
impl Frontend {
/// Creates a new Frontend
pub fn new() -> Frontend {
let in_ = stdin();
let out = stdout().into_raw_mode().unwrap();
Frontend {
stdin: in_,
stdout: out,
}
}
/// Clears the screen
pub fn clear_screen(&mut self) {
write!(self.stdout, "{}", clear::All).unwrap();
}
/// Draws the state of the editor to the screen.
pub fn draw(&mut self, cursor: &Cursor, filename: &Option<String>, lines: &[String]) {
let (width, height) = self.terminal_size();
let num_lines = lines.len();
// The index of the first line of text that is rendered.
let start = if cursor.line > height { cursor.line - height } else { 0 };
// The filename of the current buffer or a no filename message.
let name = filename.clone().unwrap_or_else(|| String::from("**no filename**"));
let padding = (width - name.len()) / 2;
let need_extra = padding*2+name.len()!= width;
self.goto_term(0, 0);
// Draw the title bar.
write!(&mut self.stdout, "{}{}{}{}{}{}{}{}",
color::Bg(color::White),
color::Fg(color::Black),
leftpad(" ", padding),
name,
leftpad(" ", padding),
if need_extra { " " } else { "" },
color::Fg(color::Reset),
color::Bg(color::Reset),
).unwrap();
// Draw the lines of text.
for (y, line_number) in (start..start + height - 1).enumerate() {
self.goto_term(0, (y + 1) as u16);
if line_number < num_lines {
// Draw the line of text
write!(self.stdout, "{}{}{} {}",
color::Fg(color::Cyan),
leftpad(format!("{}", line_number + 1), 3),
color::Fg(color::Reset),
lines[line_number],
).unwrap();
} else {
// Draw a ~ to show that there is no line.
write!(self.stdout, "{} ~{}",
color::Fg(color::Cyan),
color::Fg(color::Reset),
).unwrap();
}
}
}
/// Flushes stdout to make the changes show
pub fn flush(&mut self) {
self.stdout.flush().unwrap();
}
/// Hides the cursor
pub fn hide_cursor(&mut self) {
write!(self.stdout, "{}", termion::cursor::Hide{}).unwrap();
}
/// Shows the cursor
pub fn show_cursor(&mut self) {
write!(self.stdout, "{}", termion::cursor::Show{}).unwrap();
}
/// Moves the cursor to x, y, which are both 0 based in terminal cordinates
pub fn goto_term(&mut self, x: u16, y: u16) {
write!(self.stdout, "{}", termion::cursor::Goto(x+1, y+1)).unwrap();
}
/// Moves the cursor to the position specified by the Cursor
pub fn move_cursor(&mut self, cursor: &Cursor) {
let (_, height) = self.terminal_size();
let x = (cursor.column + 4) as u16;
let y = if cursor.line > height {
cursor.line as u16
} else {
(cursor.line + 1) as u16
};
self.goto_term(x, y)
}
/// Returns the size of the terminal as (width, height)
pub fn terminal_size(&self) -> (usize, usize) {
let (w, h) = termion::terminal_size().unwrap();
(w as usize, h as usize)
}
/// Prompts for a line of text
pub fn prompt_for_text(&mut self, prompt: &str) -> Option<String> {
let (width, height) = termion::terminal_size().unwrap();
self.goto_term(0, height - 1);
// Draw the background.
write!(&mut self.stdout, "{}{}{}{}",
termion::clear::CurrentLine,
termion::color::Bg(color::White),
termion::color::Fg(color::Black),
leftpad("", width as usize)).unwrap();
self.goto_term(0, height - 1);
// Draw the prompt.
write!(&mut self.stdout, "{}: ", prompt).unwrap();
// Show it.
self.flush();
// Get the input from the user,
let input = self.read_line();
// Reset the forground and background.
write!(self.stdout, "{}{}", color::Fg(color::Reset), color::Bg(color::Reset)).unwrap();
input
}
/// Prompts for a yes/no response from the user
pub fn prompt_for_bool(&mut self, prompt: &str) -> bool {
let response = self.prompt_for_text(&format!("{} (y/n)", prompt));
if let Some(r) = response {
!r.is_empty() && r.chars().nth(0).unwrap() == 'y'
} else {
false
}
}
/// Reads a line of text from the user.
/// TODO: Fix for Unicode. I think that the actual user input is handled
/// correctly, but echoing the typed characters may not be.
|
// Get one byte of input
let mut b = [0; 1];
self.stdin.read_exact(&mut b[..]).unwrap();
match b[0] {
0 | 3 | 4 => return None,
// 0x7f is backspace
0x7f if!buf.is_empty() => {
// Delete the last character typed
buf.pop();
// Clear the last character from the screen
write!(&mut self.stdout, "{}{}",
termion::cursor::Left(1),
termion::clear::UntilNewline).unwrap();
self.flush();
},
0x7f => {},
// Newline or CR ends the input
b'\n' | b'\r' => break,
c => {
// Add the typed character to the input
buf.push(c);
// Draw it to the screen
write!(&mut self.stdout, "{}", char::from(c)).unwrap();
self.flush();
},
};
}
// Convert the buffer to a String.
Some(String::from_utf8(buf).unwrap())
}
}
impl Drop for Frontend {
/// Clean up the terminal after the we go out of scope.
fn drop(&mut self) {
self.clear_screen();
self.goto_term(0, 0);
self.show_cursor();
self.flush();
}
}
|
fn read_line(&mut self) -> Option<String> {
// Start with a buffer of size 40 so that small inputs don't require
// reallocating the buffer.
let mut buf = Vec::with_capacity(30);
loop {
|
random_line_split
|
error.rs
|
//! rzw specific error types
//!
//! These error type is compatible with the rust standard io `ErrorKind`.
pub type Result<T> = std::result::Result<T, Error>;
/// Categories of errors that can occur when interacting with z-Wave.
///
/// This list is intended to grow over time and it is not recommended to exhaustively match against it.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ErrorKind {
/// The controller is not available.
///
/// This could indicate that the controller is in use by another process or was disconnected while
/// performing I/O.
NoController,
/// A parameter was incorrect.
InvalidInput,
/// A unknown Z-Wave syntax was sent.
UnknownZWave,
/// This functionallity is not implemented.
NotImplemented,
/// An I/O error occured.
///
/// The type of I/O error is determined by the inner `io::ErrorKind`.
Io(std::io::ErrorKind),
}
/// An error type for Z-Wave operations.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Error {
kind: ErrorKind,
description: String,
}
impl Error {
/// Create a new error with a given type and description
pub fn new<T: Into<String>>(kind: ErrorKind, description: T) -> Self {
Error {
kind: kind,
description: description.into(),
}
}
/// Returns the corresponding `ErrorKind` for this error.
pub fn kind(&self) -> ErrorKind
|
}
impl std::fmt::Display for Error {
/// How to print the error
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::result::Result<(), std::fmt::Error> {
fmt.write_str(&self.description)
}
}
impl std::error::Error for Error {
/// Get the error description
fn description(&self) -> &str {
&self.description
}
}
impl From<std::io::Error> for Error {
/// Transform std io errors to this crate error
fn from(io_error: std::io::Error) -> Error {
Error::new(ErrorKind::Io(io_error.kind()), format!("{}", io_error))
}
}
impl From<Error> for std::io::Error {
/// Transform this error to a std io error
fn from(error: Error) -> std::io::Error {
let kind = match error.kind {
ErrorKind::NoController => std::io::ErrorKind::NotFound,
ErrorKind::InvalidInput => std::io::ErrorKind::InvalidInput,
ErrorKind::UnknownZWave => std::io::ErrorKind::InvalidData,
ErrorKind::NotImplemented => std::io::ErrorKind::Other,
ErrorKind::Io(kind) => kind,
};
std::io::Error::new(kind, error.description)
}
}
impl From<serial::Error> for Error {
/// Transform from a serial error
fn from(ser_error: serial::Error) -> Error {
use std::error::Error;
let kind = match ser_error.kind() {
serial::ErrorKind::NoDevice => ErrorKind::NoController,
serial::ErrorKind::InvalidInput => ErrorKind::InvalidInput,
serial::ErrorKind::Io(kind) => ErrorKind::Io(kind),
};
crate::error::Error::new(kind, ser_error.description())
}
}
|
{
self.kind
}
|
identifier_body
|
error.rs
|
//! rzw specific error types
//!
//! These error type is compatible with the rust standard io `ErrorKind`.
pub type Result<T> = std::result::Result<T, Error>;
/// Categories of errors that can occur when interacting with z-Wave.
///
/// This list is intended to grow over time and it is not recommended to exhaustively match against it.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ErrorKind {
/// The controller is not available.
///
/// This could indicate that the controller is in use by another process or was disconnected while
/// performing I/O.
NoController,
/// A parameter was incorrect.
InvalidInput,
/// A unknown Z-Wave syntax was sent.
UnknownZWave,
/// This functionallity is not implemented.
NotImplemented,
/// An I/O error occured.
///
/// The type of I/O error is determined by the inner `io::ErrorKind`.
Io(std::io::ErrorKind),
}
/// An error type for Z-Wave operations.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct
|
{
kind: ErrorKind,
description: String,
}
impl Error {
/// Create a new error with a given type and description
pub fn new<T: Into<String>>(kind: ErrorKind, description: T) -> Self {
Error {
kind: kind,
description: description.into(),
}
}
/// Returns the corresponding `ErrorKind` for this error.
pub fn kind(&self) -> ErrorKind {
self.kind
}
}
impl std::fmt::Display for Error {
/// How to print the error
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::result::Result<(), std::fmt::Error> {
fmt.write_str(&self.description)
}
}
impl std::error::Error for Error {
/// Get the error description
fn description(&self) -> &str {
&self.description
}
}
impl From<std::io::Error> for Error {
/// Transform std io errors to this crate error
fn from(io_error: std::io::Error) -> Error {
Error::new(ErrorKind::Io(io_error.kind()), format!("{}", io_error))
}
}
impl From<Error> for std::io::Error {
/// Transform this error to a std io error
fn from(error: Error) -> std::io::Error {
let kind = match error.kind {
ErrorKind::NoController => std::io::ErrorKind::NotFound,
ErrorKind::InvalidInput => std::io::ErrorKind::InvalidInput,
ErrorKind::UnknownZWave => std::io::ErrorKind::InvalidData,
ErrorKind::NotImplemented => std::io::ErrorKind::Other,
ErrorKind::Io(kind) => kind,
};
std::io::Error::new(kind, error.description)
}
}
impl From<serial::Error> for Error {
/// Transform from a serial error
fn from(ser_error: serial::Error) -> Error {
use std::error::Error;
let kind = match ser_error.kind() {
serial::ErrorKind::NoDevice => ErrorKind::NoController,
serial::ErrorKind::InvalidInput => ErrorKind::InvalidInput,
serial::ErrorKind::Io(kind) => ErrorKind::Io(kind),
};
crate::error::Error::new(kind, ser_error.description())
}
}
|
Error
|
identifier_name
|
error.rs
|
//! rzw specific error types
//!
//! These error type is compatible with the rust standard io `ErrorKind`.
pub type Result<T> = std::result::Result<T, Error>;
/// Categories of errors that can occur when interacting with z-Wave.
///
/// This list is intended to grow over time and it is not recommended to exhaustively match against it.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ErrorKind {
/// The controller is not available.
///
/// This could indicate that the controller is in use by another process or was disconnected while
/// performing I/O.
NoController,
/// A parameter was incorrect.
InvalidInput,
/// A unknown Z-Wave syntax was sent.
UnknownZWave,
/// This functionallity is not implemented.
NotImplemented,
/// An I/O error occured.
///
/// The type of I/O error is determined by the inner `io::ErrorKind`.
Io(std::io::ErrorKind),
}
/// An error type for Z-Wave operations.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Error {
kind: ErrorKind,
description: String,
}
impl Error {
/// Create a new error with a given type and description
pub fn new<T: Into<String>>(kind: ErrorKind, description: T) -> Self {
Error {
kind: kind,
description: description.into(),
}
}
|
}
impl std::fmt::Display for Error {
/// How to print the error
fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::result::Result<(), std::fmt::Error> {
fmt.write_str(&self.description)
}
}
impl std::error::Error for Error {
/// Get the error description
fn description(&self) -> &str {
&self.description
}
}
impl From<std::io::Error> for Error {
/// Transform std io errors to this crate error
fn from(io_error: std::io::Error) -> Error {
Error::new(ErrorKind::Io(io_error.kind()), format!("{}", io_error))
}
}
impl From<Error> for std::io::Error {
/// Transform this error to a std io error
fn from(error: Error) -> std::io::Error {
let kind = match error.kind {
ErrorKind::NoController => std::io::ErrorKind::NotFound,
ErrorKind::InvalidInput => std::io::ErrorKind::InvalidInput,
ErrorKind::UnknownZWave => std::io::ErrorKind::InvalidData,
ErrorKind::NotImplemented => std::io::ErrorKind::Other,
ErrorKind::Io(kind) => kind,
};
std::io::Error::new(kind, error.description)
}
}
impl From<serial::Error> for Error {
/// Transform from a serial error
fn from(ser_error: serial::Error) -> Error {
use std::error::Error;
let kind = match ser_error.kind() {
serial::ErrorKind::NoDevice => ErrorKind::NoController,
serial::ErrorKind::InvalidInput => ErrorKind::InvalidInput,
serial::ErrorKind::Io(kind) => ErrorKind::Io(kind),
};
crate::error::Error::new(kind, ser_error.description())
}
}
|
/// Returns the corresponding `ErrorKind` for this error.
pub fn kind(&self) -> ErrorKind {
self.kind
}
|
random_line_split
|
encryption.rs
|
use std::fs::{File, remove_file};
use std::io::{ErrorKind, Read, Write};
use hyper::Client as HyperClient;
use rusoto_core::{
ChainProvider,
DispatchSignedRequest,
ProvideAwsCredentials,
Region,
default_tls_client,
};
use rusoto_kms::{
DecryptError,
DecryptRequest,
DecryptResponse,
EncryptError,
EncryptRequest,
EncryptResponse,
Kms,
KmsClient,
};
use rustc_serialize::base64::{FromBase64, STANDARD, ToBase64};
use error::{KawsError, KawsResult};
pub struct Encryptor<'a, P, D> where P: ProvideAwsCredentials, D: DispatchSignedRequest {
client: KmsClient<P, D>,
decrypted_files: Vec<String>,
kms_master_key_id: Option<&'a str>,
}
impl<'a> Encryptor<'a, ChainProvider, HyperClient> {
pub fn
|
(
provider: ChainProvider,
region: Region,
kms_master_key_id: Option<&'a str>,
) -> Encryptor<'a, ChainProvider, HyperClient> {
Encryptor {
client: KmsClient::new(
default_tls_client().expect("failed to create HTTP client with TLS"),
provider,
region,
),
decrypted_files: vec![],
kms_master_key_id: kms_master_key_id,
}
}
pub fn decrypt_file(&mut self, source: &str) -> Result<Vec<u8>, KawsError> {
let mut src = File::open(source)?;
let mut encoded_data = String::new();
src.read_to_string(&mut encoded_data)?;
let encrypted_data = encoded_data.from_base64()?;
let decrypted_data = self.decrypt(encrypted_data)?;
match decrypted_data.plaintext {
Some(plaintext) => return Ok(plaintext),
None => return Err(KawsError::new("No plaintext was returned from KMS".to_owned())),
}
}
pub fn encrypt_and_write_file(&mut self, data: &[u8], file_path: &str) -> KawsResult {
let encrypted_data = self.encrypt(data.to_owned())?;
let mut file = File::create(file_path)?;
match encrypted_data.ciphertext_blob {
Some(ref ciphertext_blob) => {
let encoded_data = ciphertext_blob.to_base64(STANDARD);
file.write_all(encoded_data.as_bytes())?;
}
None => return Err(KawsError::new("No ciphertext was returned from KMS".to_owned())),
}
Ok(None)
}
// Private
fn decrypt<'b>(&mut self, encrypted_data: Vec<u8>) -> Result<DecryptResponse, DecryptError> {
let request = DecryptRequest {
encryption_context: None,
grant_tokens: None,
ciphertext_blob: encrypted_data,
};
self.client.decrypt(&request)
}
fn encrypt<'b>(&mut self, decrypted_data: Vec<u8>) -> Result<EncryptResponse, EncryptError> {
let request = EncryptRequest {
plaintext: decrypted_data,
encryption_context: None,
key_id: self.kms_master_key_id.expect("KMS key must be supplied to encrypt").to_owned(),
grant_tokens: None,
};
self.client.encrypt(&request)
}
}
impl<'a, P, D> Drop for Encryptor<'a, P, D>
where P: ProvideAwsCredentials, D: DispatchSignedRequest {
fn drop(&mut self) {
let mut failures = vec![];
for file in self.decrypted_files.iter() {
log_wrap!(&format!("Removing unencrypted file {:?}", file), {
if let Err(error) = remove_file(file) {
match error.kind() {
ErrorKind::NotFound => {},
_ => failures.push(error),
}
}
});
}
if!failures.is_empty() {
panic!(
"Failed to remove one or more encrypted files! You should remove these files \
manually if they are present: {:?}",
failures,
);
}
}
}
|
new
|
identifier_name
|
encryption.rs
|
use std::fs::{File, remove_file};
use std::io::{ErrorKind, Read, Write};
use hyper::Client as HyperClient;
use rusoto_core::{
ChainProvider,
DispatchSignedRequest,
ProvideAwsCredentials,
Region,
default_tls_client,
};
use rusoto_kms::{
DecryptError,
DecryptRequest,
DecryptResponse,
EncryptError,
EncryptRequest,
EncryptResponse,
Kms,
KmsClient,
};
use rustc_serialize::base64::{FromBase64, STANDARD, ToBase64};
use error::{KawsError, KawsResult};
pub struct Encryptor<'a, P, D> where P: ProvideAwsCredentials, D: DispatchSignedRequest {
client: KmsClient<P, D>,
decrypted_files: Vec<String>,
kms_master_key_id: Option<&'a str>,
}
impl<'a> Encryptor<'a, ChainProvider, HyperClient> {
pub fn new(
provider: ChainProvider,
region: Region,
kms_master_key_id: Option<&'a str>,
) -> Encryptor<'a, ChainProvider, HyperClient>
|
pub fn decrypt_file(&mut self, source: &str) -> Result<Vec<u8>, KawsError> {
let mut src = File::open(source)?;
let mut encoded_data = String::new();
src.read_to_string(&mut encoded_data)?;
let encrypted_data = encoded_data.from_base64()?;
let decrypted_data = self.decrypt(encrypted_data)?;
match decrypted_data.plaintext {
Some(plaintext) => return Ok(plaintext),
None => return Err(KawsError::new("No plaintext was returned from KMS".to_owned())),
}
}
pub fn encrypt_and_write_file(&mut self, data: &[u8], file_path: &str) -> KawsResult {
let encrypted_data = self.encrypt(data.to_owned())?;
let mut file = File::create(file_path)?;
match encrypted_data.ciphertext_blob {
Some(ref ciphertext_blob) => {
let encoded_data = ciphertext_blob.to_base64(STANDARD);
file.write_all(encoded_data.as_bytes())?;
}
None => return Err(KawsError::new("No ciphertext was returned from KMS".to_owned())),
}
Ok(None)
}
// Private
fn decrypt<'b>(&mut self, encrypted_data: Vec<u8>) -> Result<DecryptResponse, DecryptError> {
let request = DecryptRequest {
encryption_context: None,
grant_tokens: None,
ciphertext_blob: encrypted_data,
};
self.client.decrypt(&request)
}
fn encrypt<'b>(&mut self, decrypted_data: Vec<u8>) -> Result<EncryptResponse, EncryptError> {
let request = EncryptRequest {
plaintext: decrypted_data,
encryption_context: None,
key_id: self.kms_master_key_id.expect("KMS key must be supplied to encrypt").to_owned(),
grant_tokens: None,
};
self.client.encrypt(&request)
}
}
impl<'a, P, D> Drop for Encryptor<'a, P, D>
where P: ProvideAwsCredentials, D: DispatchSignedRequest {
fn drop(&mut self) {
let mut failures = vec![];
for file in self.decrypted_files.iter() {
log_wrap!(&format!("Removing unencrypted file {:?}", file), {
if let Err(error) = remove_file(file) {
match error.kind() {
ErrorKind::NotFound => {},
_ => failures.push(error),
}
}
});
}
if!failures.is_empty() {
panic!(
"Failed to remove one or more encrypted files! You should remove these files \
manually if they are present: {:?}",
failures,
);
}
}
}
|
{
Encryptor {
client: KmsClient::new(
default_tls_client().expect("failed to create HTTP client with TLS"),
provider,
region,
),
decrypted_files: vec![],
kms_master_key_id: kms_master_key_id,
}
}
|
identifier_body
|
encryption.rs
|
use std::fs::{File, remove_file};
use std::io::{ErrorKind, Read, Write};
use hyper::Client as HyperClient;
use rusoto_core::{
ChainProvider,
DispatchSignedRequest,
ProvideAwsCredentials,
Region,
default_tls_client,
};
use rusoto_kms::{
DecryptError,
DecryptRequest,
DecryptResponse,
EncryptError,
EncryptRequest,
EncryptResponse,
Kms,
KmsClient,
};
use rustc_serialize::base64::{FromBase64, STANDARD, ToBase64};
use error::{KawsError, KawsResult};
pub struct Encryptor<'a, P, D> where P: ProvideAwsCredentials, D: DispatchSignedRequest {
client: KmsClient<P, D>,
decrypted_files: Vec<String>,
kms_master_key_id: Option<&'a str>,
}
impl<'a> Encryptor<'a, ChainProvider, HyperClient> {
pub fn new(
provider: ChainProvider,
region: Region,
kms_master_key_id: Option<&'a str>,
) -> Encryptor<'a, ChainProvider, HyperClient> {
Encryptor {
client: KmsClient::new(
default_tls_client().expect("failed to create HTTP client with TLS"),
provider,
region,
),
decrypted_files: vec![],
kms_master_key_id: kms_master_key_id,
}
}
pub fn decrypt_file(&mut self, source: &str) -> Result<Vec<u8>, KawsError> {
let mut src = File::open(source)?;
let mut encoded_data = String::new();
src.read_to_string(&mut encoded_data)?;
let encrypted_data = encoded_data.from_base64()?;
let decrypted_data = self.decrypt(encrypted_data)?;
match decrypted_data.plaintext {
Some(plaintext) => return Ok(plaintext),
None => return Err(KawsError::new("No plaintext was returned from KMS".to_owned())),
}
}
pub fn encrypt_and_write_file(&mut self, data: &[u8], file_path: &str) -> KawsResult {
let encrypted_data = self.encrypt(data.to_owned())?;
let mut file = File::create(file_path)?;
match encrypted_data.ciphertext_blob {
Some(ref ciphertext_blob) => {
let encoded_data = ciphertext_blob.to_base64(STANDARD);
file.write_all(encoded_data.as_bytes())?;
}
None => return Err(KawsError::new("No ciphertext was returned from KMS".to_owned())),
}
Ok(None)
}
// Private
fn decrypt<'b>(&mut self, encrypted_data: Vec<u8>) -> Result<DecryptResponse, DecryptError> {
let request = DecryptRequest {
encryption_context: None,
grant_tokens: None,
ciphertext_blob: encrypted_data,
};
self.client.decrypt(&request)
}
fn encrypt<'b>(&mut self, decrypted_data: Vec<u8>) -> Result<EncryptResponse, EncryptError> {
let request = EncryptRequest {
plaintext: decrypted_data,
encryption_context: None,
key_id: self.kms_master_key_id.expect("KMS key must be supplied to encrypt").to_owned(),
grant_tokens: None,
};
self.client.encrypt(&request)
|
impl<'a, P, D> Drop for Encryptor<'a, P, D>
where P: ProvideAwsCredentials, D: DispatchSignedRequest {
fn drop(&mut self) {
let mut failures = vec![];
for file in self.decrypted_files.iter() {
log_wrap!(&format!("Removing unencrypted file {:?}", file), {
if let Err(error) = remove_file(file) {
match error.kind() {
ErrorKind::NotFound => {},
_ => failures.push(error),
}
}
});
}
if!failures.is_empty() {
panic!(
"Failed to remove one or more encrypted files! You should remove these files \
manually if they are present: {:?}",
failures,
);
}
}
}
|
}
}
|
random_line_split
|
digest_ext_table_test.rs
|
extern crate veye_checker;
use veye_checker::digest_ext_table::{DigestExtTable, DigestAlgo};
#[test]
fn test_digest_ext_table_init_with_default_values(){
let ext_tbl = DigestExtTable::default();
assert!(ext_tbl.is_md5("whl".to_string()));
assert!(ext_tbl.is_sha1("jar".to_string()));
assert!(ext_tbl.is_sha512("nupkg".to_string()));
}
|
let mut ext_tbl = DigestExtTable::default();
let file_ext = "tjar".to_string();
assert!(ext_tbl.add(DigestAlgo::Md5, file_ext.clone()));
assert!(ext_tbl.is_md5(file_ext))
}
#[test]
fn test_digest_ext_table_adding_new_extension_into_sha1(){
let mut ext_tbl = DigestExtTable::default();
let file_ext = "twar".to_string();
assert!(ext_tbl.add(DigestAlgo::Sha1, file_ext.clone()));
assert!(ext_tbl.is_sha1(file_ext))
}
#[test]
fn test_digest_ext_table_adding_new_extension_into_sha512(){
let mut ext_tbl = DigestExtTable::default();
let file_ext = "tnupkg".to_string();
assert!(ext_tbl.add(DigestAlgo::Sha512, file_ext.clone()));
assert!(ext_tbl.is_sha512(file_ext))
}
#[test]
fn test_digest_ext_table_swipes_default_table(){
let mut ext_tbl = DigestExtTable::default();
assert!(ext_tbl.swipe())
}
|
#[test]
fn test_digest_ext_table_adding_new_extension_into_md5(){
|
random_line_split
|
digest_ext_table_test.rs
|
extern crate veye_checker;
use veye_checker::digest_ext_table::{DigestExtTable, DigestAlgo};
#[test]
fn test_digest_ext_table_init_with_default_values(){
let ext_tbl = DigestExtTable::default();
assert!(ext_tbl.is_md5("whl".to_string()));
assert!(ext_tbl.is_sha1("jar".to_string()));
assert!(ext_tbl.is_sha512("nupkg".to_string()));
}
#[test]
fn test_digest_ext_table_adding_new_extension_into_md5(){
let mut ext_tbl = DigestExtTable::default();
let file_ext = "tjar".to_string();
assert!(ext_tbl.add(DigestAlgo::Md5, file_ext.clone()));
assert!(ext_tbl.is_md5(file_ext))
}
#[test]
fn test_digest_ext_table_adding_new_extension_into_sha1(){
let mut ext_tbl = DigestExtTable::default();
let file_ext = "twar".to_string();
assert!(ext_tbl.add(DigestAlgo::Sha1, file_ext.clone()));
assert!(ext_tbl.is_sha1(file_ext))
}
#[test]
fn test_digest_ext_table_adding_new_extension_into_sha512()
|
#[test]
fn test_digest_ext_table_swipes_default_table(){
let mut ext_tbl = DigestExtTable::default();
assert!(ext_tbl.swipe())
}
|
{
let mut ext_tbl = DigestExtTable::default();
let file_ext = "tnupkg".to_string();
assert!(ext_tbl.add(DigestAlgo::Sha512, file_ext.clone()));
assert!(ext_tbl.is_sha512(file_ext))
}
|
identifier_body
|
digest_ext_table_test.rs
|
extern crate veye_checker;
use veye_checker::digest_ext_table::{DigestExtTable, DigestAlgo};
#[test]
fn test_digest_ext_table_init_with_default_values(){
let ext_tbl = DigestExtTable::default();
assert!(ext_tbl.is_md5("whl".to_string()));
assert!(ext_tbl.is_sha1("jar".to_string()));
assert!(ext_tbl.is_sha512("nupkg".to_string()));
}
#[test]
fn
|
(){
let mut ext_tbl = DigestExtTable::default();
let file_ext = "tjar".to_string();
assert!(ext_tbl.add(DigestAlgo::Md5, file_ext.clone()));
assert!(ext_tbl.is_md5(file_ext))
}
#[test]
fn test_digest_ext_table_adding_new_extension_into_sha1(){
let mut ext_tbl = DigestExtTable::default();
let file_ext = "twar".to_string();
assert!(ext_tbl.add(DigestAlgo::Sha1, file_ext.clone()));
assert!(ext_tbl.is_sha1(file_ext))
}
#[test]
fn test_digest_ext_table_adding_new_extension_into_sha512(){
let mut ext_tbl = DigestExtTable::default();
let file_ext = "tnupkg".to_string();
assert!(ext_tbl.add(DigestAlgo::Sha512, file_ext.clone()));
assert!(ext_tbl.is_sha512(file_ext))
}
#[test]
fn test_digest_ext_table_swipes_default_table(){
let mut ext_tbl = DigestExtTable::default();
assert!(ext_tbl.swipe())
}
|
test_digest_ext_table_adding_new_extension_into_md5
|
identifier_name
|
material_query_resolvers.rs
|
// use eyre::{
// // eyre,
// Result,
// // Context as _,
// };
use async_graphql::{
ID,
Context,
FieldResult,
};
use printspool_json_store::Record as _;
use crate::{
Material,
};
#[derive(async_graphql::InputObject, Default)]
pub struct MaterialsInput {
#[graphql(name = "materialID")]
material_id: Option<ID>,
}
#[derive(Default)]
pub struct MaterialQuery;
#[async_graphql::Object]
impl MaterialQuery {
async fn materials<'ctx>(
&self,
ctx: &'ctx Context<'_>,
#[graphql(default)]
input: MaterialsInput,
) -> FieldResult<Vec<Material>> {
let db: &crate::Db = ctx.data()?;
let materials = if let Some(id) = input.material_id
|
else {
Material::get_all(db, false).await?
};
Ok(materials)
}
}
|
{
let material = Material::get(db, &id, false).await?;
vec![material]
}
|
conditional_block
|
material_query_resolvers.rs
|
// // Context as _,
// };
use async_graphql::{
ID,
Context,
FieldResult,
};
use printspool_json_store::Record as _;
use crate::{
Material,
};
#[derive(async_graphql::InputObject, Default)]
pub struct MaterialsInput {
#[graphql(name = "materialID")]
material_id: Option<ID>,
}
#[derive(Default)]
pub struct MaterialQuery;
#[async_graphql::Object]
impl MaterialQuery {
async fn materials<'ctx>(
&self,
ctx: &'ctx Context<'_>,
#[graphql(default)]
input: MaterialsInput,
) -> FieldResult<Vec<Material>> {
let db: &crate::Db = ctx.data()?;
let materials = if let Some(id) = input.material_id {
let material = Material::get(db, &id, false).await?;
vec![material]
} else {
Material::get_all(db, false).await?
};
Ok(materials)
}
}
|
// use eyre::{
// // eyre,
// Result,
|
random_line_split
|
|
material_query_resolvers.rs
|
// use eyre::{
// // eyre,
// Result,
// // Context as _,
// };
use async_graphql::{
ID,
Context,
FieldResult,
};
use printspool_json_store::Record as _;
use crate::{
Material,
};
#[derive(async_graphql::InputObject, Default)]
pub struct
|
{
#[graphql(name = "materialID")]
material_id: Option<ID>,
}
#[derive(Default)]
pub struct MaterialQuery;
#[async_graphql::Object]
impl MaterialQuery {
async fn materials<'ctx>(
&self,
ctx: &'ctx Context<'_>,
#[graphql(default)]
input: MaterialsInput,
) -> FieldResult<Vec<Material>> {
let db: &crate::Db = ctx.data()?;
let materials = if let Some(id) = input.material_id {
let material = Material::get(db, &id, false).await?;
vec![material]
} else {
Material::get_all(db, false).await?
};
Ok(materials)
}
}
|
MaterialsInput
|
identifier_name
|
lib.rs
|
//! `primal` puts raw power into prime numbers.
//!
//! This crates includes
//!
//! - optimised prime sieves
//! - checking for primality
//! - enumerating primes
//! - factorising numbers
//! - estimating upper and lower bounds for π(*n*) (the number of primes
//! below *n*) and *p<sub>k</sub>* (the <i>k</i>th prime)
//!
//! This uses a state-of-the-art cache-friendly Sieve of Eratosthenes
//! to enumerate the primes up to some fixed bound (in a memory
//! efficient manner), and then allows this cached information to be
//! used for things like enumerating and counting primes.
//!
//! `primal` takes around 2.8 seconds and less than 3MB of RAM to
//! count the exact number of primes below 10<sup>10</sup> (455052511)
//! on my laptop (i7-3517U).
//!
//! [*Source*](http://github.com/huonw/primal)
//!
//! # Using this library
//!
//! Just add the following to your [`Cargo.toml`](http://crates.io/):
//!
//! ```toml
//! [dependencies]
//! primal = "0.2"
//! ```
//!
//! # Examples
//!
//! ## "Indexing" Primes
//!
//! Let's find the 10001st prime. The easiest way is to enumerate the
//! primes, and find the 10001st:
//!
//! ```rust
//! // (.nth is zero indexed.)
//! let p = primal::Primes::all().nth(10001 - 1).unwrap();
//! println!("The 10001st prime is {}", p); // 104743
//! ```
//!
//! This takes around 400 microseconds on my computer, which seems
//! nice and quick, but, `Primes` is flexible at the cost of
//! performance: we can make it faster. The `StreamingSieve` type
//! offers a specialised `nth_prime` function:
//!
//! ```rust
//! let p = primal::StreamingSieve::nth_prime(10001);
//! println!("The 10001st prime is {}", p); // 104743
//! ```
//!
//! This runs in only 10 microseconds! `StreamingSieve` is extremely
//! efficient and uses very little memory. It is the best way to solve
//! this task with `primal`.
//!
//! Since that was so easy, let's now make the problem bigger and
//! harder: find the sum of the 100,000th, 200,000th, 300,000th,...,
//! 10,000,000th primes (100 in total).
//!
//! We could call `StreamingSieve::nth_prime` repeatedly:
//!
//! ```rust,no_run
//! // the primes we want to find
//! let ns = (1..100 + 1).map(|x| x * 100_000).collect::<Vec<_>>();
//!
//! // search and sum them up
//! let sum = ns.iter()
//! .map(|n| primal::StreamingSieve::nth_prime(*n))
//! .fold(0, |a, b| a + b);
//! println!("the sum is {}", sum);
//! ```
//!
//! This takes around 1.6s seconds to print `the sum is 8795091674`;
//! not so speedy. Each call to `nth_prime` is individually fast (400
//! microseconds for 100,000 to 40 milliseconds for 10,000,000) but
//! they add up to something bad. Every one is starting from the start
//! and redoing work that previous calls have done... wouldn't it be
//! nice if we could just do the computation for 10,000,000 and reuse
//! that for the smaller ones?
//!
//! The `Sieve` type is a wrapper around `StreamingSieve` that
//! caches information, allowing repeated queries to be answered
//! efficiently.
//!
//! There's one hitch: `Sieve` requires a limit to know how far to
//! sieve: we need some way to find an upper bound to be guaranteed to
//! be at least as large as all our primes. We could guess that, say,
//! 10<sup>10</sup> will be large enough and use that, but that's a
//! huge overestimate (spoilers: the 10,000,000th prime is around
//! 2×10<sup>8</sup>). We could also try filtering with
//! exponentially larger upper bounds until we find one that works
//! (e.g. doubling each time), or, we could just take a shortcut and
//! use deeper mathematics via
//! [`estimate_nth_prime`](fn.estimate_nth_prime.html).
//!
//! ```rust
//! // the primes we want to find
//! let ns = (1..100 + 1).map(|x| x * 100_000).collect::<Vec<_>>();
//!
//! // find our upper bound
//! let (_lo, hi) = primal::estimate_nth_prime(10_000_000);
//!
//! // find the primes up to this upper bound
//! let sieve = primal::Sieve::new(hi as usize);
//!
//! // now we can efficiently sum them up
//! let sum = ns.iter()
//! .map(|n| sieve.nth_prime(*n))
//! .fold(0, |a, b| a + b);
//! println!("the sum is {}", sum);
//! ```
//!
//! This takes around 40 milliseconds, and gives the same output: much
//! better!
//!
//! (By the way, the version using 10<sup>10</sup> as the bound
//! instead of the more accurate estimate still only takes ~3
//! seconds.)
//!
//! ## Counting Primes
//!
//! Another problem: count the number of primes below 1 million. This
//! is evaluating the [prime-counting function
//! π](https://en.wikipedia.org/wiki/Prime-counting_function),
//! i.e. π(10<sup>6</sup>).
//!
//! As above, there's a few ways to attack this: the iterator, and the
//! sieves.
//!
//! ```rust
//! const LIMIT: usize = 1_000_000;
//!
//! // iterator
//! let count = primal::Primes::all().take_while(|p| *p < LIMIT).count();
//! println!("there are {} primes below 1 million", count); // 78498
//!
//! // sieves
//! let sieve = primal::Sieve::new(LIMIT);
//! let count = sieve.prime_pi(LIMIT);
//! println!("there are {} primes below 1 million", count);
//!
//! let count = primal::StreamingSieve::prime_pi(LIMIT);
//! println!("there are {} primes below 1 million", count);
//! ```
//!
//! `StreamingSieve` is fastest (380 microseconds) followed by `Sieve`
//! (400) with `Primes` bringing up the rear at 1300 microseconds. Of
//! course, repeated queries will be faster with `Sieve` than with
//! `StreamingSieve`, but that flexibility comes at the cost of extra
//! memory use.
//!
//! If an approximation is all that is required, `estimate_prime_pi`
//! provides close upper and lower bounds:
//!
//! ```rust
//! let (lo, hi) = primal::estimate_prime_pi(1_000_000);
//! println!("there are between {} and {} primes below 1 million", lo, hi);
//! // 78304, 78573
//! ```
//!
//! ## Searching Primes
//!
//! Now for something where `Primes` might be useful: find the first
//! prime where the binary expansion (not including trailing zeros)
//! ends like `00..001` with at least 27 zeros. This condition is
//! checked by:
//!
//! ```rust
//! fn check(p: usize) -> bool {
//! p > 1 && (p / 2).trailing_zeros() >= 27
//! }
//! ```
//!
//! I have no idea how large the prime might be: I know it's
//! guaranteed to be at *least* 2<sup>27 + 1</sup> + 1, but not an
//! upper limit.
//!
//! The `Primes` iterator works perfectly for this:
//!
//! ```rust
//! # fn check(p: usize) -> bool { p > 1 && (p / 2).trailing_zeros() >= 5 } // 27 is too slow
//! let p = primal::Primes::all().find(|p| check(*p)).unwrap();
//! println!("the prime is {}", p);
//! # assert_eq!(p, 193);
//! ```
//!
//! It takes about 3.1 seconds for my computer to spit out 3,221,225,473.
//!
//! Using a sieve is a little trickier: one approach is to start with
//! some estimated upper bound (like double the absolute lower bound),
//! look for a valid prime. If one isn't found, double the upper bound
//! and start again. The `primes_from` method allows for saving a
//! little bit of work: we can start iterating from an arbitrary point
//! in the sequence, such as the lower bound.
//!
//! ```rust
//! # fn check(p: usize) -> bool { p > 1 && (p / 2).trailing_zeros() >= 5 } // 27 is too slow
//! let p;
//! let mut lower_bound = 1 << (27 + 1);
//! # let mut lower_bound = 1 << (5 + 1);
//! loop {
//! // our upper bound is double the lower bound
//! let sieve = primal::Sieve::new(lower_bound * 2);
//! if let Some(p_) = sieve.primes_from(lower_bound).find(|p| check(*p)) {
//! p = p_;
//! break
//! }
//! lower_bound *= 2;
//! }
//! println!("the prime is {}", p);
//! # assert_eq!(p, 193);
//! ```
//!
//! This takes around 3.5 seconds to print the same number. Slower
//! than the iterator!
//!
//! I was just using this silly condition as an example of something
//! that doesn't have an obvious upper bound, rather than a problem
//! that is hard to do fast. There's a much faster way to tackle it,
//! by inverting the problem: construct numbers that satisfy `check`,
//! and check the primality of those.
//!
//! The numbers that satisfy `check` are `k * (1 << (27 + 1)) + 1` for
//! `k >= 1`, so the only hard bit is testing primality. Fortunately,
//! `primal` offers the `is_prime` function which is an efficient way
//! to do primality tests, even of very large numbers.
//!
//! ```rust
//! let mut p = 0;
//! for k in 1.. {
//! p = k * (1 << (27 + 1)) + 1;
//! if primal::is_prime(p) { break }
//! }
//! println!("the prime is {}", p);
//! # assert_eq!(p, 3_221_225_473);
//! ```
//!
//! This takes 6 <em>micro</em>seconds: more than 500,000×
//! faster than the iterator!
#![cfg_attr(all(test, feature = "unstable"), feature(test, step_by))]
extern crate primal_estimate;
extern crate primal_check;
extern crate primal_sieve;
#[cfg(all(test, feature = "unstable"))] extern crate test;
pub use primal_estimate::prime_pi as estimate_prime_pi;
pub use primal_estimate::nth_prime as estimate_nth_prime;
pub use primal_check::miller_rabin as is_prime;
pub use primal_check::{as_perfect_power, as_prime_power};
pub use primal_sieve::{StreamingSieve, Sieve, SievePrimes, Primes};
#[cfg(all(test, feature = "unstable"))]
mod benches {
extern crate test;
use super::{Sieve, is_prime};
use self::test::Bencher;
const N: usize = 1_000_000;
const STEP: usize = 101;
#[bench]
fn bench_miller_rabin_tests(b: &mut Bencher) {
b.iter(|| {
(1..N).step_by(STEP)
.filter(|&n| is_prime(n as u64)).count()
})
}
#[bench]
fn bench_sieve_tests(b: &mut Bencher) {
|
b.iter(|| {
let sieve = Sieve::new(1_000_000);
(1..N).step_by(STEP)
.filter(|&n| sieve.is_prime(n)).count()
})
}
}
|
identifier_body
|
|
lib.rs
|
//! `primal` puts raw power into prime numbers.
//!
//! This crates includes
//!
//! - optimised prime sieves
//! - checking for primality
//! - enumerating primes
//! - factorising numbers
//! - estimating upper and lower bounds for π(*n*) (the number of primes
//! below *n*) and *p<sub>k</sub>* (the <i>k</i>th prime)
//!
//! This uses a state-of-the-art cache-friendly Sieve of Eratosthenes
//! to enumerate the primes up to some fixed bound (in a memory
//! efficient manner), and then allows this cached information to be
//! used for things like enumerating and counting primes.
//!
//! `primal` takes around 2.8 seconds and less than 3MB of RAM to
//! count the exact number of primes below 10<sup>10</sup> (455052511)
//! on my laptop (i7-3517U).
//!
//! [*Source*](http://github.com/huonw/primal)
//!
//! # Using this library
//!
//! Just add the following to your [`Cargo.toml`](http://crates.io/):
//!
//! ```toml
//! [dependencies]
//! primal = "0.2"
//! ```
//!
//! # Examples
//!
//! ## "Indexing" Primes
//!
//! Let's find the 10001st prime. The easiest way is to enumerate the
//! primes, and find the 10001st:
//!
//! ```rust
//! // (.nth is zero indexed.)
//! let p = primal::Primes::all().nth(10001 - 1).unwrap();
//! println!("The 10001st prime is {}", p); // 104743
//! ```
//!
//! This takes around 400 microseconds on my computer, which seems
//! nice and quick, but, `Primes` is flexible at the cost of
//! performance: we can make it faster. The `StreamingSieve` type
//! offers a specialised `nth_prime` function:
//!
//! ```rust
//! let p = primal::StreamingSieve::nth_prime(10001);
//! println!("The 10001st prime is {}", p); // 104743
//! ```
//!
//! This runs in only 10 microseconds! `StreamingSieve` is extremely
//! efficient and uses very little memory. It is the best way to solve
//! this task with `primal`.
//!
//! Since that was so easy, let's now make the problem bigger and
//! harder: find the sum of the 100,000th, 200,000th, 300,000th,...,
//! 10,000,000th primes (100 in total).
//!
//! We could call `StreamingSieve::nth_prime` repeatedly:
//!
//! ```rust,no_run
//! // the primes we want to find
//! let ns = (1..100 + 1).map(|x| x * 100_000).collect::<Vec<_>>();
//!
//! // search and sum them up
//! let sum = ns.iter()
//! .map(|n| primal::StreamingSieve::nth_prime(*n))
//! .fold(0, |a, b| a + b);
//! println!("the sum is {}", sum);
//! ```
//!
//! This takes around 1.6s seconds to print `the sum is 8795091674`;
//! not so speedy. Each call to `nth_prime` is individually fast (400
//! microseconds for 100,000 to 40 milliseconds for 10,000,000) but
//! they add up to something bad. Every one is starting from the start
//! and redoing work that previous calls have done... wouldn't it be
//! nice if we could just do the computation for 10,000,000 and reuse
//! that for the smaller ones?
//!
//! The `Sieve` type is a wrapper around `StreamingSieve` that
//! caches information, allowing repeated queries to be answered
//! efficiently.
//!
//! There's one hitch: `Sieve` requires a limit to know how far to
//! sieve: we need some way to find an upper bound to be guaranteed to
//! be at least as large as all our primes. We could guess that, say,
//! 10<sup>10</sup> will be large enough and use that, but that's a
//! huge overestimate (spoilers: the 10,000,000th prime is around
//! 2×10<sup>8</sup>). We could also try filtering with
//! exponentially larger upper bounds until we find one that works
//! (e.g. doubling each time), or, we could just take a shortcut and
//! use deeper mathematics via
//! [`estimate_nth_prime`](fn.estimate_nth_prime.html).
//!
//! ```rust
//! // the primes we want to find
//! let ns = (1..100 + 1).map(|x| x * 100_000).collect::<Vec<_>>();
//!
//! // find our upper bound
//! let (_lo, hi) = primal::estimate_nth_prime(10_000_000);
//!
//! // find the primes up to this upper bound
//! let sieve = primal::Sieve::new(hi as usize);
//!
//! // now we can efficiently sum them up
//! let sum = ns.iter()
//! .map(|n| sieve.nth_prime(*n))
//! .fold(0, |a, b| a + b);
//! println!("the sum is {}", sum);
//! ```
//!
//! This takes around 40 milliseconds, and gives the same output: much
//! better!
//!
//! (By the way, the version using 10<sup>10</sup> as the bound
//! instead of the more accurate estimate still only takes ~3
//! seconds.)
//!
//! ## Counting Primes
//!
//! Another problem: count the number of primes below 1 million. This
//! is evaluating the [prime-counting function
//! π](https://en.wikipedia.org/wiki/Prime-counting_function),
//! i.e. π(10<sup>6</sup>).
//!
//! As above, there's a few ways to attack this: the iterator, and the
//! sieves.
//!
//! ```rust
//! const LIMIT: usize = 1_000_000;
//!
//! // iterator
//! let count = primal::Primes::all().take_while(|p| *p < LIMIT).count();
//! println!("there are {} primes below 1 million", count); // 78498
//!
//! // sieves
//! let sieve = primal::Sieve::new(LIMIT);
//! let count = sieve.prime_pi(LIMIT);
//! println!("there are {} primes below 1 million", count);
//!
//! let count = primal::StreamingSieve::prime_pi(LIMIT);
//! println!("there are {} primes below 1 million", count);
//! ```
//!
//! `StreamingSieve` is fastest (380 microseconds) followed by `Sieve`
//! (400) with `Primes` bringing up the rear at 1300 microseconds. Of
//! course, repeated queries will be faster with `Sieve` than with
//! `StreamingSieve`, but that flexibility comes at the cost of extra
//! memory use.
//!
//! If an approximation is all that is required, `estimate_prime_pi`
//! provides close upper and lower bounds:
//!
//! ```rust
//! let (lo, hi) = primal::estimate_prime_pi(1_000_000);
//! println!("there are between {} and {} primes below 1 million", lo, hi);
//! // 78304, 78573
//! ```
//!
//! ## Searching Primes
//!
//! Now for something where `Primes` might be useful: find the first
//! prime where the binary expansion (not including trailing zeros)
//! ends like `00..001` with at least 27 zeros. This condition is
//! checked by:
//!
//! ```rust
//! fn check(p: usize) -> bool {
//! p > 1 && (p / 2).trailing_zeros() >= 27
//! }
//! ```
//!
//! I have no idea how large the prime might be: I know it's
//! guaranteed to be at *least* 2<sup>27 + 1</sup> + 1, but not an
//! upper limit.
//!
//! The `Primes` iterator works perfectly for this:
//!
//! ```rust
//! # fn check(p: usize) -> bool { p > 1 && (p / 2).trailing_zeros() >= 5 } // 27 is too slow
//! let p = primal::Primes::all().find(|p| check(*p)).unwrap();
//! println!("the prime is {}", p);
//! # assert_eq!(p, 193);
//! ```
//!
//! It takes about 3.1 seconds for my computer to spit out 3,221,225,473.
//!
//! Using a sieve is a little trickier: one approach is to start with
//! some estimated upper bound (like double the absolute lower bound),
//! look for a valid prime. If one isn't found, double the upper bound
//! and start again. The `primes_from` method allows for saving a
//! little bit of work: we can start iterating from an arbitrary point
//! in the sequence, such as the lower bound.
//!
//! ```rust
//! # fn check(p: usize) -> bool { p > 1 && (p / 2).trailing_zeros() >= 5 } // 27 is too slow
//! let p;
//! let mut lower_bound = 1 << (27 + 1);
//! # let mut lower_bound = 1 << (5 + 1);
//! loop {
//! // our upper bound is double the lower bound
//! let sieve = primal::Sieve::new(lower_bound * 2);
//! if let Some(p_) = sieve.primes_from(lower_bound).find(|p| check(*p)) {
//! p = p_;
//! break
//! }
//! lower_bound *= 2;
//! }
//! println!("the prime is {}", p);
//! # assert_eq!(p, 193);
//! ```
//!
//! This takes around 3.5 seconds to print the same number. Slower
//! than the iterator!
//!
//! I was just using this silly condition as an example of something
//! that doesn't have an obvious upper bound, rather than a problem
//! that is hard to do fast. There's a much faster way to tackle it,
//! by inverting the problem: construct numbers that satisfy `check`,
//! and check the primality of those.
//!
//! The numbers that satisfy `check` are `k * (1 << (27 + 1)) + 1` for
//! `k >= 1`, so the only hard bit is testing primality. Fortunately,
//! `primal` offers the `is_prime` function which is an efficient way
//! to do primality tests, even of very large numbers.
//!
//! ```rust
//! let mut p = 0;
//! for k in 1.. {
//! p = k * (1 << (27 + 1)) + 1;
//! if primal::is_prime(p) { break }
//! }
//! println!("the prime is {}", p);
//! # assert_eq!(p, 3_221_225_473);
//! ```
//!
//! This takes 6 <em>micro</em>seconds: more than 500,000×
//! faster than the iterator!
#![cfg_attr(all(test, feature = "unstable"), feature(test, step_by))]
extern crate primal_estimate;
extern crate primal_check;
extern crate primal_sieve;
#[cfg(all(test, feature = "unstable"))] extern crate test;
pub use primal_estimate::prime_pi as estimate_prime_pi;
pub use primal_estimate::nth_prime as estimate_nth_prime;
pub use primal_check::miller_rabin as is_prime;
pub use primal_check::{as_perfect_power, as_prime_power};
pub use primal_sieve::{StreamingSieve, Sieve, SievePrimes, Primes};
#[cfg(all(test, feature = "unstable"))]
mod benches {
extern crate test;
use super::{Sieve, is_prime};
use self::test::Bencher;
const N: usize = 1_000_000;
const STEP: usize = 101;
#[bench]
fn bench_miller_rabin_tests(b: &mut Bencher) {
b.iter(|| {
(1..N).step_by(STEP)
.filter(|&n| is_prime(n as u64)).count()
})
}
#[bench]
fn ben
|
&mut Bencher) {
b.iter(|| {
let sieve = Sieve::new(1_000_000);
(1..N).step_by(STEP)
.filter(|&n| sieve.is_prime(n)).count()
})
}
}
|
ch_sieve_tests(b:
|
identifier_name
|
lib.rs
|
//! `primal` puts raw power into prime numbers.
//!
//! This crates includes
//!
//! - optimised prime sieves
//! - checking for primality
//! - enumerating primes
//! - factorising numbers
//! - estimating upper and lower bounds for π(*n*) (the number of primes
//! below *n*) and *p<sub>k</sub>* (the <i>k</i>th prime)
//!
//! This uses a state-of-the-art cache-friendly Sieve of Eratosthenes
//! to enumerate the primes up to some fixed bound (in a memory
//! efficient manner), and then allows this cached information to be
//! used for things like enumerating and counting primes.
//!
//! `primal` takes around 2.8 seconds and less than 3MB of RAM to
//! count the exact number of primes below 10<sup>10</sup> (455052511)
//! on my laptop (i7-3517U).
//!
//! [*Source*](http://github.com/huonw/primal)
//!
//! # Using this library
//!
//! Just add the following to your [`Cargo.toml`](http://crates.io/):
//!
//! ```toml
//! [dependencies]
//! primal = "0.2"
//! ```
//!
//! # Examples
//!
//! ## "Indexing" Primes
//!
//! Let's find the 10001st prime. The easiest way is to enumerate the
//! primes, and find the 10001st:
//!
//! ```rust
//! // (.nth is zero indexed.)
//! let p = primal::Primes::all().nth(10001 - 1).unwrap();
//! println!("The 10001st prime is {}", p); // 104743
//! ```
//!
//! This takes around 400 microseconds on my computer, which seems
//! nice and quick, but, `Primes` is flexible at the cost of
//! performance: we can make it faster. The `StreamingSieve` type
//! offers a specialised `nth_prime` function:
//!
//! ```rust
//! let p = primal::StreamingSieve::nth_prime(10001);
//! println!("The 10001st prime is {}", p); // 104743
//! ```
//!
//! This runs in only 10 microseconds! `StreamingSieve` is extremely
//! efficient and uses very little memory. It is the best way to solve
//! this task with `primal`.
//!
//! Since that was so easy, let's now make the problem bigger and
//! harder: find the sum of the 100,000th, 200,000th, 300,000th,...,
//! 10,000,000th primes (100 in total).
//!
//! We could call `StreamingSieve::nth_prime` repeatedly:
//!
//! ```rust,no_run
//! // the primes we want to find
//! let ns = (1..100 + 1).map(|x| x * 100_000).collect::<Vec<_>>();
//!
//! // search and sum them up
//! let sum = ns.iter()
//! .map(|n| primal::StreamingSieve::nth_prime(*n))
//! .fold(0, |a, b| a + b);
//! println!("the sum is {}", sum);
//! ```
//!
//! This takes around 1.6s seconds to print `the sum is 8795091674`;
//! not so speedy. Each call to `nth_prime` is individually fast (400
//! microseconds for 100,000 to 40 milliseconds for 10,000,000) but
//! they add up to something bad. Every one is starting from the start
//! and redoing work that previous calls have done... wouldn't it be
//! nice if we could just do the computation for 10,000,000 and reuse
//! that for the smaller ones?
//!
//! The `Sieve` type is a wrapper around `StreamingSieve` that
//! caches information, allowing repeated queries to be answered
//! efficiently.
//!
//! There's one hitch: `Sieve` requires a limit to know how far to
//! sieve: we need some way to find an upper bound to be guaranteed to
//! be at least as large as all our primes. We could guess that, say,
//! 10<sup>10</sup> will be large enough and use that, but that's a
//! huge overestimate (spoilers: the 10,000,000th prime is around
//! 2×10<sup>8</sup>). We could also try filtering with
//! exponentially larger upper bounds until we find one that works
//! (e.g. doubling each time), or, we could just take a shortcut and
//! use deeper mathematics via
//! [`estimate_nth_prime`](fn.estimate_nth_prime.html).
//!
//! ```rust
//! // the primes we want to find
//! let ns = (1..100 + 1).map(|x| x * 100_000).collect::<Vec<_>>();
//!
//! // find our upper bound
//! let (_lo, hi) = primal::estimate_nth_prime(10_000_000);
//!
//! // find the primes up to this upper bound
//! let sieve = primal::Sieve::new(hi as usize);
//!
//! // now we can efficiently sum them up
//! let sum = ns.iter()
//! .map(|n| sieve.nth_prime(*n))
//! .fold(0, |a, b| a + b);
//! println!("the sum is {}", sum);
//! ```
//!
//! This takes around 40 milliseconds, and gives the same output: much
//! better!
//!
//! (By the way, the version using 10<sup>10</sup> as the bound
//! instead of the more accurate estimate still only takes ~3
//! seconds.)
//!
//! ## Counting Primes
//!
//! Another problem: count the number of primes below 1 million. This
//! is evaluating the [prime-counting function
//! π](https://en.wikipedia.org/wiki/Prime-counting_function),
//! i.e. π(10<sup>6</sup>).
//!
//! As above, there's a few ways to attack this: the iterator, and the
//! sieves.
//!
//! ```rust
//! const LIMIT: usize = 1_000_000;
//!
//! // iterator
//! let count = primal::Primes::all().take_while(|p| *p < LIMIT).count();
//! println!("there are {} primes below 1 million", count); // 78498
//!
//! // sieves
//! let sieve = primal::Sieve::new(LIMIT);
//! let count = sieve.prime_pi(LIMIT);
//! println!("there are {} primes below 1 million", count);
//!
//! let count = primal::StreamingSieve::prime_pi(LIMIT);
//! println!("there are {} primes below 1 million", count);
//! ```
//!
//! `StreamingSieve` is fastest (380 microseconds) followed by `Sieve`
//! (400) with `Primes` bringing up the rear at 1300 microseconds. Of
//! course, repeated queries will be faster with `Sieve` than with
//! `StreamingSieve`, but that flexibility comes at the cost of extra
//! memory use.
//!
//! If an approximation is all that is required, `estimate_prime_pi`
//! provides close upper and lower bounds:
//!
//! ```rust
//! let (lo, hi) = primal::estimate_prime_pi(1_000_000);
//! println!("there are between {} and {} primes below 1 million", lo, hi);
//! // 78304, 78573
//! ```
//!
//! ## Searching Primes
//!
//! Now for something where `Primes` might be useful: find the first
//! prime where the binary expansion (not including trailing zeros)
//! ends like `00..001` with at least 27 zeros. This condition is
//! checked by:
//!
//! ```rust
//! fn check(p: usize) -> bool {
//! p > 1 && (p / 2).trailing_zeros() >= 27
//! }
//! ```
//!
//! I have no idea how large the prime might be: I know it's
//! guaranteed to be at *least* 2<sup>27 + 1</sup> + 1, but not an
//! upper limit.
//!
//! The `Primes` iterator works perfectly for this:
//!
//! ```rust
//! # fn check(p: usize) -> bool { p > 1 && (p / 2).trailing_zeros() >= 5 } // 27 is too slow
//! let p = primal::Primes::all().find(|p| check(*p)).unwrap();
//! println!("the prime is {}", p);
//! # assert_eq!(p, 193);
//! ```
//!
//! It takes about 3.1 seconds for my computer to spit out 3,221,225,473.
//!
//! Using a sieve is a little trickier: one approach is to start with
//! some estimated upper bound (like double the absolute lower bound),
//! look for a valid prime. If one isn't found, double the upper bound
//! and start again. The `primes_from` method allows for saving a
//! little bit of work: we can start iterating from an arbitrary point
//! in the sequence, such as the lower bound.
//!
//! ```rust
//! # fn check(p: usize) -> bool { p > 1 && (p / 2).trailing_zeros() >= 5 } // 27 is too slow
//! let p;
//! let mut lower_bound = 1 << (27 + 1);
//! # let mut lower_bound = 1 << (5 + 1);
//! loop {
//! // our upper bound is double the lower bound
//! let sieve = primal::Sieve::new(lower_bound * 2);
//! if let Some(p_) = sieve.primes_from(lower_bound).find(|p| check(*p)) {
//! p = p_;
//! break
//! }
//! lower_bound *= 2;
//! }
//! println!("the prime is {}", p);
//! # assert_eq!(p, 193);
//! ```
//!
//! This takes around 3.5 seconds to print the same number. Slower
//! than the iterator!
//!
//! I was just using this silly condition as an example of something
//! that doesn't have an obvious upper bound, rather than a problem
//! that is hard to do fast. There's a much faster way to tackle it,
//! by inverting the problem: construct numbers that satisfy `check`,
//! and check the primality of those.
//!
//! The numbers that satisfy `check` are `k * (1 << (27 + 1)) + 1` for
//! `k >= 1`, so the only hard bit is testing primality. Fortunately,
//! `primal` offers the `is_prime` function which is an efficient way
//! to do primality tests, even of very large numbers.
//!
//! ```rust
//! let mut p = 0;
//! for k in 1.. {
//! p = k * (1 << (27 + 1)) + 1;
//! if primal::is_prime(p) { break }
//! }
//! println!("the prime is {}", p);
//! # assert_eq!(p, 3_221_225_473);
//! ```
//!
//! This takes 6 <em>micro</em>seconds: more than 500,000×
//! faster than the iterator!
#![cfg_attr(all(test, feature = "unstable"), feature(test, step_by))]
extern crate primal_estimate;
extern crate primal_check;
extern crate primal_sieve;
#[cfg(all(test, feature = "unstable"))] extern crate test;
pub use primal_estimate::prime_pi as estimate_prime_pi;
pub use primal_estimate::nth_prime as estimate_nth_prime;
pub use primal_check::miller_rabin as is_prime;
pub use primal_check::{as_perfect_power, as_prime_power};
pub use primal_sieve::{StreamingSieve, Sieve, SievePrimes, Primes};
#[cfg(all(test, feature = "unstable"))]
mod benches {
extern crate test;
use super::{Sieve, is_prime};
use self::test::Bencher;
const N: usize = 1_000_000;
const STEP: usize = 101;
#[bench]
fn bench_miller_rabin_tests(b: &mut Bencher) {
b.iter(|| {
(1..N).step_by(STEP)
.filter(|&n| is_prime(n as u64)).count()
})
}
|
let sieve = Sieve::new(1_000_000);
(1..N).step_by(STEP)
.filter(|&n| sieve.is_prime(n)).count()
})
}
}
|
#[bench]
fn bench_sieve_tests(b: &mut Bencher) {
b.iter(|| {
|
random_line_split
|
backtrace.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! As always, windows has something very different than unix, we mainly want
//! to avoid having to depend too much on libunwind for windows.
//!
//! If you google around, you'll find a fair bit of references to built-in
//! functions to get backtraces on windows. It turns out that most of these are
//! in an external library called dbghelp. I was unable to find this library
//! via `-ldbghelp`, but it is apparently normal to do the `dlopen` equivalent
//! of it.
//!
//! You'll also find that there's a function called CaptureStackBackTrace
//! mentioned frequently (which is also easy to use), but sadly I didn't have a
//! copy of that function in my mingw install (maybe it was broken?). Instead,
//! this takes the route of using StackWalk64 in order to walk the stack.
#![allow(dead_code)]
#[cfg(stage0)]
use prelude::v1::*;
use io::prelude::*;
use dynamic_lib::DynamicLibrary;
use ffi::CStr;
use intrinsics;
use io;
use libc;
use mem;
use path::Path;
use ptr;
use str;
use sync::StaticMutex;
use sys_common::backtrace::*;
#[allow(non_snake_case)]
extern "system" {
fn GetCurrentProcess() -> libc::HANDLE;
fn GetCurrentThread() -> libc::HANDLE;
fn RtlCaptureContext(ctx: *mut arch::CONTEXT);
}
type SymFromAddrFn =
extern "system" fn(libc::HANDLE, u64, *mut u64,
*mut SYMBOL_INFO) -> libc::BOOL;
type SymInitializeFn =
extern "system" fn(libc::HANDLE, *mut libc::c_void,
libc::BOOL) -> libc::BOOL;
type SymCleanupFn =
extern "system" fn(libc::HANDLE) -> libc::BOOL;
type StackWalk64Fn =
extern "system" fn(libc::DWORD, libc::HANDLE, libc::HANDLE,
*mut STACKFRAME64, *mut arch::CONTEXT,
*mut libc::c_void, *mut libc::c_void,
*mut libc::c_void, *mut libc::c_void) -> libc::BOOL;
const MAX_SYM_NAME: usize = 2000;
const IMAGE_FILE_MACHINE_I386: libc::DWORD = 0x014c;
const IMAGE_FILE_MACHINE_IA64: libc::DWORD = 0x0200;
const IMAGE_FILE_MACHINE_AMD64: libc::DWORD = 0x8664;
#[repr(C)]
struct SYMBOL_INFO {
SizeOfStruct: libc::c_ulong,
TypeIndex: libc::c_ulong,
Reserved: [u64; 2],
Index: libc::c_ulong,
Size: libc::c_ulong,
ModBase: u64,
Flags: libc::c_ulong,
Value: u64,
Address: u64,
Register: libc::c_ulong,
Scope: libc::c_ulong,
Tag: libc::c_ulong,
NameLen: libc::c_ulong,
MaxNameLen: libc::c_ulong,
// note that windows has this as 1, but it basically just means that
// the name is inline at the end of the struct. For us, we just bump
// the struct size up to MAX_SYM_NAME.
Name: [libc::c_char; MAX_SYM_NAME],
}
#[repr(C)]
enum ADDRESS_MODE {
AddrMode1616,
AddrMode1632,
AddrModeReal,
AddrModeFlat,
}
struct ADDRESS64 {
Offset: u64,
Segment: u16,
Mode: ADDRESS_MODE,
}
pub struct STACKFRAME64 {
AddrPC: ADDRESS64,
AddrReturn: ADDRESS64,
AddrFrame: ADDRESS64,
AddrStack: ADDRESS64,
AddrBStore: ADDRESS64,
FuncTableEntry: *mut libc::c_void,
Params: [u64; 4],
Far: libc::BOOL,
Virtual: libc::BOOL,
Reserved: [u64; 3],
KdHelp: KDHELP64,
}
struct KDHELP64 {
Thread: u64,
ThCallbackStack: libc::DWORD,
ThCallbackBStore: libc::DWORD,
NextCallback: libc::DWORD,
FramePointer: libc::DWORD,
KiCallUserMode: u64,
KeUserCallbackDispatcher: u64,
SystemRangeStart: u64,
KiUserExceptionDispatcher: u64,
StackBase: u64,
StackLimit: u64,
Reserved: [u64; 5],
}
#[cfg(target_arch = "x86")]
mod arch {
use libc;
const MAXIMUM_SUPPORTED_EXTENSION: usize = 512;
#[repr(C)]
pub struct CONTEXT {
ContextFlags: libc::DWORD,
Dr0: libc::DWORD,
Dr1: libc::DWORD,
Dr2: libc::DWORD,
Dr3: libc::DWORD,
Dr6: libc::DWORD,
Dr7: libc::DWORD,
FloatSave: FLOATING_SAVE_AREA,
SegGs: libc::DWORD,
SegFs: libc::DWORD,
SegEs: libc::DWORD,
SegDs: libc::DWORD,
Edi: libc::DWORD,
Esi: libc::DWORD,
Ebx: libc::DWORD,
Edx: libc::DWORD,
Ecx: libc::DWORD,
Eax: libc::DWORD,
Ebp: libc::DWORD,
Eip: libc::DWORD,
SegCs: libc::DWORD,
EFlags: libc::DWORD,
Esp: libc::DWORD,
SegSs: libc::DWORD,
ExtendedRegisters: [u8; MAXIMUM_SUPPORTED_EXTENSION],
}
#[repr(C)]
pub struct FLOATING_SAVE_AREA {
ControlWord: libc::DWORD,
StatusWord: libc::DWORD,
TagWord: libc::DWORD,
ErrorOffset: libc::DWORD,
ErrorSelector: libc::DWORD,
DataOffset: libc::DWORD,
DataSelector: libc::DWORD,
RegisterArea: [u8; 80],
Cr0NpxState: libc::DWORD,
}
pub fn init_frame(frame: &mut super::STACKFRAME64,
ctx: &CONTEXT) -> libc::DWORD {
frame.AddrPC.Offset = ctx.Eip as u64;
frame.AddrPC.Mode = super::ADDRESS_MODE::AddrModeFlat;
frame.AddrStack.Offset = ctx.Esp as u64;
frame.AddrStack.Mode = super::ADDRESS_MODE::AddrModeFlat;
frame.AddrFrame.Offset = ctx.Ebp as u64;
frame.AddrFrame.Mode = super::ADDRESS_MODE::AddrModeFlat;
super::IMAGE_FILE_MACHINE_I386
}
}
#[cfg(target_arch = "x86_64")]
mod arch {
use libc::{c_longlong, c_ulonglong};
use libc::types::os::arch::extra::{WORD, DWORD, DWORDLONG};
use simd;
#[repr(C)]
pub struct CONTEXT {
_align_hack: [simd::u64x2; 0], // FIXME align on 16-byte
P1Home: DWORDLONG,
P2Home: DWORDLONG,
P3Home: DWORDLONG,
P4Home: DWORDLONG,
P5Home: DWORDLONG,
P6Home: DWORDLONG,
ContextFlags: DWORD,
MxCsr: DWORD,
SegCs: WORD,
SegDs: WORD,
SegEs: WORD,
SegFs: WORD,
SegGs: WORD,
SegSs: WORD,
EFlags: DWORD,
Dr0: DWORDLONG,
Dr1: DWORDLONG,
Dr2: DWORDLONG,
Dr3: DWORDLONG,
Dr6: DWORDLONG,
Dr7: DWORDLONG,
Rax: DWORDLONG,
Rcx: DWORDLONG,
Rdx: DWORDLONG,
Rbx: DWORDLONG,
Rsp: DWORDLONG,
Rbp: DWORDLONG,
Rsi: DWORDLONG,
Rdi: DWORDLONG,
R8: DWORDLONG,
R9: DWORDLONG,
R10: DWORDLONG,
R11: DWORDLONG,
R12: DWORDLONG,
R13: DWORDLONG,
R14: DWORDLONG,
R15: DWORDLONG,
Rip: DWORDLONG,
FltSave: FLOATING_SAVE_AREA,
VectorRegister: [M128A; 26],
VectorControl: DWORDLONG,
DebugControl: DWORDLONG,
LastBranchToRip: DWORDLONG,
LastBranchFromRip: DWORDLONG,
LastExceptionToRip: DWORDLONG,
LastExceptionFromRip: DWORDLONG,
}
#[repr(C)]
pub struct M128A {
_align_hack: [simd::u64x2; 0], // FIXME align on 16-byte
Low: c_ulonglong,
High: c_longlong
}
#[repr(C)]
pub struct FLOATING_SAVE_AREA {
_align_hack: [simd::u64x2; 0], // FIXME align on 16-byte
_Dummy: [u8; 512] // FIXME: Fill this out
}
pub fn init_frame(frame: &mut super::STACKFRAME64,
ctx: &CONTEXT) -> DWORD {
frame.AddrPC.Offset = ctx.Rip as u64;
frame.AddrPC.Mode = super::ADDRESS_MODE::AddrModeFlat;
frame.AddrStack.Offset = ctx.Rsp as u64;
frame.AddrStack.Mode = super::ADDRESS_MODE::AddrModeFlat;
frame.AddrFrame.Offset = ctx.Rbp as u64;
frame.AddrFrame.Mode = super::ADDRESS_MODE::AddrModeFlat;
super::IMAGE_FILE_MACHINE_AMD64
}
}
struct Cleanup {
handle: libc::HANDLE,
SymCleanup: SymCleanupFn,
}
impl Drop for Cleanup {
fn drop(&mut self) { (self.SymCleanup)(self.handle); }
}
pub fn write(w: &mut Write) -> io::Result<()> {
// According to windows documentation, all dbghelp functions are
// single-threaded.
static LOCK: StaticMutex = StaticMutex::new();
let _g = LOCK.lock();
// Open up dbghelp.dll, we don't link to it explicitly because it can't
// always be found. Additionally, it's nice having fewer dependencies.
let path = Path::new("dbghelp.dll");
let lib = match DynamicLibrary::open(Some(&path)) {
Ok(lib) => lib,
Err(..) => return Ok(()),
};
macro_rules! sym{ ($e:expr, $t:ident) => (unsafe {
match lib.symbol($e) {
Ok(f) => mem::transmute::<*mut u8, $t>(f),
Err(..) => return Ok(())
}
}) }
// Fetch the symbols necessary from dbghelp.dll
let SymFromAddr = sym!("SymFromAddr", SymFromAddrFn);
let SymInitialize = sym!("SymInitialize", SymInitializeFn);
let SymCleanup = sym!("SymCleanup", SymCleanupFn);
let StackWalk64 = sym!("StackWalk64", StackWalk64Fn);
// Allocate necessary structures for doing the stack walk
let process = unsafe { GetCurrentProcess() };
let thread = unsafe { GetCurrentThread() };
let mut context: arch::CONTEXT = unsafe { intrinsics::init() };
unsafe { RtlCaptureContext(&mut context); }
let mut frame: STACKFRAME64 = unsafe { intrinsics::init() };
let image = arch::init_frame(&mut frame, &context);
// Initialize this process's symbols
let ret = SymInitialize(process, ptr::null_mut(), libc::TRUE);
if ret!= libc::TRUE { return Ok(()) }
let _c = Cleanup { handle: process, SymCleanup: SymCleanup };
// And now that we're done with all the setup, do the stack walking!
let mut i = 0;
try!(write!(w, "stack backtrace:\n"));
while StackWalk64(image, process, thread, &mut frame, &mut context,
ptr::null_mut(),
ptr::null_mut(),
ptr::null_mut(),
ptr::null_mut()) == libc::TRUE{
let addr = frame.AddrPC.Offset;
if addr == frame.AddrReturn.Offset || addr == 0 ||
frame.AddrReturn.Offset == 0 { break }
i += 1;
try!(write!(w, " {:2}: {:#2$x}", i, addr, HEX_WIDTH));
let mut info: SYMBOL_INFO = unsafe { intrinsics::init() };
info.MaxNameLen = MAX_SYM_NAME as libc::c_ulong;
// the struct size in C. the value is different to
|
// `size_of::<SYMBOL_INFO>() - MAX_SYM_NAME + 1` (== 81)
// due to struct alignment.
info.SizeOfStruct = 88;
let mut displacement = 0u64;
let ret = SymFromAddr(process, addr as u64, &mut displacement,
&mut info);
if ret == libc::TRUE {
try!(write!(w, " - "));
let ptr = info.Name.as_ptr() as *const libc::c_char;
let bytes = unsafe { CStr::from_ptr(ptr).to_bytes() };
match str::from_utf8(bytes) {
Ok(s) => try!(demangle(w, s)),
Err(..) => try!(w.write_all(&bytes[..bytes.len()-1])),
}
}
try!(w.write_all(&['\n' as u8]));
}
Ok(())
}
|
random_line_split
|
|
backtrace.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! As always, windows has something very different than unix, we mainly want
//! to avoid having to depend too much on libunwind for windows.
//!
//! If you google around, you'll find a fair bit of references to built-in
//! functions to get backtraces on windows. It turns out that most of these are
//! in an external library called dbghelp. I was unable to find this library
//! via `-ldbghelp`, but it is apparently normal to do the `dlopen` equivalent
//! of it.
//!
//! You'll also find that there's a function called CaptureStackBackTrace
//! mentioned frequently (which is also easy to use), but sadly I didn't have a
//! copy of that function in my mingw install (maybe it was broken?). Instead,
//! this takes the route of using StackWalk64 in order to walk the stack.
#![allow(dead_code)]
#[cfg(stage0)]
use prelude::v1::*;
use io::prelude::*;
use dynamic_lib::DynamicLibrary;
use ffi::CStr;
use intrinsics;
use io;
use libc;
use mem;
use path::Path;
use ptr;
use str;
use sync::StaticMutex;
use sys_common::backtrace::*;
#[allow(non_snake_case)]
extern "system" {
fn GetCurrentProcess() -> libc::HANDLE;
fn GetCurrentThread() -> libc::HANDLE;
fn RtlCaptureContext(ctx: *mut arch::CONTEXT);
}
type SymFromAddrFn =
extern "system" fn(libc::HANDLE, u64, *mut u64,
*mut SYMBOL_INFO) -> libc::BOOL;
type SymInitializeFn =
extern "system" fn(libc::HANDLE, *mut libc::c_void,
libc::BOOL) -> libc::BOOL;
type SymCleanupFn =
extern "system" fn(libc::HANDLE) -> libc::BOOL;
type StackWalk64Fn =
extern "system" fn(libc::DWORD, libc::HANDLE, libc::HANDLE,
*mut STACKFRAME64, *mut arch::CONTEXT,
*mut libc::c_void, *mut libc::c_void,
*mut libc::c_void, *mut libc::c_void) -> libc::BOOL;
const MAX_SYM_NAME: usize = 2000;
const IMAGE_FILE_MACHINE_I386: libc::DWORD = 0x014c;
const IMAGE_FILE_MACHINE_IA64: libc::DWORD = 0x0200;
const IMAGE_FILE_MACHINE_AMD64: libc::DWORD = 0x8664;
#[repr(C)]
struct SYMBOL_INFO {
SizeOfStruct: libc::c_ulong,
TypeIndex: libc::c_ulong,
Reserved: [u64; 2],
Index: libc::c_ulong,
Size: libc::c_ulong,
ModBase: u64,
Flags: libc::c_ulong,
Value: u64,
Address: u64,
Register: libc::c_ulong,
Scope: libc::c_ulong,
Tag: libc::c_ulong,
NameLen: libc::c_ulong,
MaxNameLen: libc::c_ulong,
// note that windows has this as 1, but it basically just means that
// the name is inline at the end of the struct. For us, we just bump
// the struct size up to MAX_SYM_NAME.
Name: [libc::c_char; MAX_SYM_NAME],
}
#[repr(C)]
enum ADDRESS_MODE {
AddrMode1616,
AddrMode1632,
AddrModeReal,
AddrModeFlat,
}
struct ADDRESS64 {
Offset: u64,
Segment: u16,
Mode: ADDRESS_MODE,
}
pub struct STACKFRAME64 {
AddrPC: ADDRESS64,
AddrReturn: ADDRESS64,
AddrFrame: ADDRESS64,
AddrStack: ADDRESS64,
AddrBStore: ADDRESS64,
FuncTableEntry: *mut libc::c_void,
Params: [u64; 4],
Far: libc::BOOL,
Virtual: libc::BOOL,
Reserved: [u64; 3],
KdHelp: KDHELP64,
}
struct KDHELP64 {
Thread: u64,
ThCallbackStack: libc::DWORD,
ThCallbackBStore: libc::DWORD,
NextCallback: libc::DWORD,
FramePointer: libc::DWORD,
KiCallUserMode: u64,
KeUserCallbackDispatcher: u64,
SystemRangeStart: u64,
KiUserExceptionDispatcher: u64,
StackBase: u64,
StackLimit: u64,
Reserved: [u64; 5],
}
#[cfg(target_arch = "x86")]
mod arch {
use libc;
const MAXIMUM_SUPPORTED_EXTENSION: usize = 512;
#[repr(C)]
pub struct
|
{
ContextFlags: libc::DWORD,
Dr0: libc::DWORD,
Dr1: libc::DWORD,
Dr2: libc::DWORD,
Dr3: libc::DWORD,
Dr6: libc::DWORD,
Dr7: libc::DWORD,
FloatSave: FLOATING_SAVE_AREA,
SegGs: libc::DWORD,
SegFs: libc::DWORD,
SegEs: libc::DWORD,
SegDs: libc::DWORD,
Edi: libc::DWORD,
Esi: libc::DWORD,
Ebx: libc::DWORD,
Edx: libc::DWORD,
Ecx: libc::DWORD,
Eax: libc::DWORD,
Ebp: libc::DWORD,
Eip: libc::DWORD,
SegCs: libc::DWORD,
EFlags: libc::DWORD,
Esp: libc::DWORD,
SegSs: libc::DWORD,
ExtendedRegisters: [u8; MAXIMUM_SUPPORTED_EXTENSION],
}
#[repr(C)]
pub struct FLOATING_SAVE_AREA {
ControlWord: libc::DWORD,
StatusWord: libc::DWORD,
TagWord: libc::DWORD,
ErrorOffset: libc::DWORD,
ErrorSelector: libc::DWORD,
DataOffset: libc::DWORD,
DataSelector: libc::DWORD,
RegisterArea: [u8; 80],
Cr0NpxState: libc::DWORD,
}
pub fn init_frame(frame: &mut super::STACKFRAME64,
ctx: &CONTEXT) -> libc::DWORD {
frame.AddrPC.Offset = ctx.Eip as u64;
frame.AddrPC.Mode = super::ADDRESS_MODE::AddrModeFlat;
frame.AddrStack.Offset = ctx.Esp as u64;
frame.AddrStack.Mode = super::ADDRESS_MODE::AddrModeFlat;
frame.AddrFrame.Offset = ctx.Ebp as u64;
frame.AddrFrame.Mode = super::ADDRESS_MODE::AddrModeFlat;
super::IMAGE_FILE_MACHINE_I386
}
}
#[cfg(target_arch = "x86_64")]
mod arch {
use libc::{c_longlong, c_ulonglong};
use libc::types::os::arch::extra::{WORD, DWORD, DWORDLONG};
use simd;
#[repr(C)]
pub struct CONTEXT {
_align_hack: [simd::u64x2; 0], // FIXME align on 16-byte
P1Home: DWORDLONG,
P2Home: DWORDLONG,
P3Home: DWORDLONG,
P4Home: DWORDLONG,
P5Home: DWORDLONG,
P6Home: DWORDLONG,
ContextFlags: DWORD,
MxCsr: DWORD,
SegCs: WORD,
SegDs: WORD,
SegEs: WORD,
SegFs: WORD,
SegGs: WORD,
SegSs: WORD,
EFlags: DWORD,
Dr0: DWORDLONG,
Dr1: DWORDLONG,
Dr2: DWORDLONG,
Dr3: DWORDLONG,
Dr6: DWORDLONG,
Dr7: DWORDLONG,
Rax: DWORDLONG,
Rcx: DWORDLONG,
Rdx: DWORDLONG,
Rbx: DWORDLONG,
Rsp: DWORDLONG,
Rbp: DWORDLONG,
Rsi: DWORDLONG,
Rdi: DWORDLONG,
R8: DWORDLONG,
R9: DWORDLONG,
R10: DWORDLONG,
R11: DWORDLONG,
R12: DWORDLONG,
R13: DWORDLONG,
R14: DWORDLONG,
R15: DWORDLONG,
Rip: DWORDLONG,
FltSave: FLOATING_SAVE_AREA,
VectorRegister: [M128A; 26],
VectorControl: DWORDLONG,
DebugControl: DWORDLONG,
LastBranchToRip: DWORDLONG,
LastBranchFromRip: DWORDLONG,
LastExceptionToRip: DWORDLONG,
LastExceptionFromRip: DWORDLONG,
}
#[repr(C)]
pub struct M128A {
_align_hack: [simd::u64x2; 0], // FIXME align on 16-byte
Low: c_ulonglong,
High: c_longlong
}
#[repr(C)]
pub struct FLOATING_SAVE_AREA {
_align_hack: [simd::u64x2; 0], // FIXME align on 16-byte
_Dummy: [u8; 512] // FIXME: Fill this out
}
pub fn init_frame(frame: &mut super::STACKFRAME64,
ctx: &CONTEXT) -> DWORD {
frame.AddrPC.Offset = ctx.Rip as u64;
frame.AddrPC.Mode = super::ADDRESS_MODE::AddrModeFlat;
frame.AddrStack.Offset = ctx.Rsp as u64;
frame.AddrStack.Mode = super::ADDRESS_MODE::AddrModeFlat;
frame.AddrFrame.Offset = ctx.Rbp as u64;
frame.AddrFrame.Mode = super::ADDRESS_MODE::AddrModeFlat;
super::IMAGE_FILE_MACHINE_AMD64
}
}
struct Cleanup {
handle: libc::HANDLE,
SymCleanup: SymCleanupFn,
}
impl Drop for Cleanup {
fn drop(&mut self) { (self.SymCleanup)(self.handle); }
}
pub fn write(w: &mut Write) -> io::Result<()> {
// According to windows documentation, all dbghelp functions are
// single-threaded.
static LOCK: StaticMutex = StaticMutex::new();
let _g = LOCK.lock();
// Open up dbghelp.dll, we don't link to it explicitly because it can't
// always be found. Additionally, it's nice having fewer dependencies.
let path = Path::new("dbghelp.dll");
let lib = match DynamicLibrary::open(Some(&path)) {
Ok(lib) => lib,
Err(..) => return Ok(()),
};
macro_rules! sym{ ($e:expr, $t:ident) => (unsafe {
match lib.symbol($e) {
Ok(f) => mem::transmute::<*mut u8, $t>(f),
Err(..) => return Ok(())
}
}) }
// Fetch the symbols necessary from dbghelp.dll
let SymFromAddr = sym!("SymFromAddr", SymFromAddrFn);
let SymInitialize = sym!("SymInitialize", SymInitializeFn);
let SymCleanup = sym!("SymCleanup", SymCleanupFn);
let StackWalk64 = sym!("StackWalk64", StackWalk64Fn);
// Allocate necessary structures for doing the stack walk
let process = unsafe { GetCurrentProcess() };
let thread = unsafe { GetCurrentThread() };
let mut context: arch::CONTEXT = unsafe { intrinsics::init() };
unsafe { RtlCaptureContext(&mut context); }
let mut frame: STACKFRAME64 = unsafe { intrinsics::init() };
let image = arch::init_frame(&mut frame, &context);
// Initialize this process's symbols
let ret = SymInitialize(process, ptr::null_mut(), libc::TRUE);
if ret!= libc::TRUE { return Ok(()) }
let _c = Cleanup { handle: process, SymCleanup: SymCleanup };
// And now that we're done with all the setup, do the stack walking!
let mut i = 0;
try!(write!(w, "stack backtrace:\n"));
while StackWalk64(image, process, thread, &mut frame, &mut context,
ptr::null_mut(),
ptr::null_mut(),
ptr::null_mut(),
ptr::null_mut()) == libc::TRUE{
let addr = frame.AddrPC.Offset;
if addr == frame.AddrReturn.Offset || addr == 0 ||
frame.AddrReturn.Offset == 0 { break }
i += 1;
try!(write!(w, " {:2}: {:#2$x}", i, addr, HEX_WIDTH));
let mut info: SYMBOL_INFO = unsafe { intrinsics::init() };
info.MaxNameLen = MAX_SYM_NAME as libc::c_ulong;
// the struct size in C. the value is different to
// `size_of::<SYMBOL_INFO>() - MAX_SYM_NAME + 1` (== 81)
// due to struct alignment.
info.SizeOfStruct = 88;
let mut displacement = 0u64;
let ret = SymFromAddr(process, addr as u64, &mut displacement,
&mut info);
if ret == libc::TRUE {
try!(write!(w, " - "));
let ptr = info.Name.as_ptr() as *const libc::c_char;
let bytes = unsafe { CStr::from_ptr(ptr).to_bytes() };
match str::from_utf8(bytes) {
Ok(s) => try!(demangle(w, s)),
Err(..) => try!(w.write_all(&bytes[..bytes.len()-1])),
}
}
try!(w.write_all(&['\n' as u8]));
}
Ok(())
}
|
CONTEXT
|
identifier_name
|
backtrace.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! As always, windows has something very different than unix, we mainly want
//! to avoid having to depend too much on libunwind for windows.
//!
//! If you google around, you'll find a fair bit of references to built-in
//! functions to get backtraces on windows. It turns out that most of these are
//! in an external library called dbghelp. I was unable to find this library
//! via `-ldbghelp`, but it is apparently normal to do the `dlopen` equivalent
//! of it.
//!
//! You'll also find that there's a function called CaptureStackBackTrace
//! mentioned frequently (which is also easy to use), but sadly I didn't have a
//! copy of that function in my mingw install (maybe it was broken?). Instead,
//! this takes the route of using StackWalk64 in order to walk the stack.
#![allow(dead_code)]
#[cfg(stage0)]
use prelude::v1::*;
use io::prelude::*;
use dynamic_lib::DynamicLibrary;
use ffi::CStr;
use intrinsics;
use io;
use libc;
use mem;
use path::Path;
use ptr;
use str;
use sync::StaticMutex;
use sys_common::backtrace::*;
#[allow(non_snake_case)]
extern "system" {
fn GetCurrentProcess() -> libc::HANDLE;
fn GetCurrentThread() -> libc::HANDLE;
fn RtlCaptureContext(ctx: *mut arch::CONTEXT);
}
type SymFromAddrFn =
extern "system" fn(libc::HANDLE, u64, *mut u64,
*mut SYMBOL_INFO) -> libc::BOOL;
type SymInitializeFn =
extern "system" fn(libc::HANDLE, *mut libc::c_void,
libc::BOOL) -> libc::BOOL;
type SymCleanupFn =
extern "system" fn(libc::HANDLE) -> libc::BOOL;
type StackWalk64Fn =
extern "system" fn(libc::DWORD, libc::HANDLE, libc::HANDLE,
*mut STACKFRAME64, *mut arch::CONTEXT,
*mut libc::c_void, *mut libc::c_void,
*mut libc::c_void, *mut libc::c_void) -> libc::BOOL;
const MAX_SYM_NAME: usize = 2000;
const IMAGE_FILE_MACHINE_I386: libc::DWORD = 0x014c;
const IMAGE_FILE_MACHINE_IA64: libc::DWORD = 0x0200;
const IMAGE_FILE_MACHINE_AMD64: libc::DWORD = 0x8664;
#[repr(C)]
struct SYMBOL_INFO {
SizeOfStruct: libc::c_ulong,
TypeIndex: libc::c_ulong,
Reserved: [u64; 2],
Index: libc::c_ulong,
Size: libc::c_ulong,
ModBase: u64,
Flags: libc::c_ulong,
Value: u64,
Address: u64,
Register: libc::c_ulong,
Scope: libc::c_ulong,
Tag: libc::c_ulong,
NameLen: libc::c_ulong,
MaxNameLen: libc::c_ulong,
// note that windows has this as 1, but it basically just means that
// the name is inline at the end of the struct. For us, we just bump
// the struct size up to MAX_SYM_NAME.
Name: [libc::c_char; MAX_SYM_NAME],
}
#[repr(C)]
enum ADDRESS_MODE {
AddrMode1616,
AddrMode1632,
AddrModeReal,
AddrModeFlat,
}
struct ADDRESS64 {
Offset: u64,
Segment: u16,
Mode: ADDRESS_MODE,
}
pub struct STACKFRAME64 {
AddrPC: ADDRESS64,
AddrReturn: ADDRESS64,
AddrFrame: ADDRESS64,
AddrStack: ADDRESS64,
AddrBStore: ADDRESS64,
FuncTableEntry: *mut libc::c_void,
Params: [u64; 4],
Far: libc::BOOL,
Virtual: libc::BOOL,
Reserved: [u64; 3],
KdHelp: KDHELP64,
}
struct KDHELP64 {
Thread: u64,
ThCallbackStack: libc::DWORD,
ThCallbackBStore: libc::DWORD,
NextCallback: libc::DWORD,
FramePointer: libc::DWORD,
KiCallUserMode: u64,
KeUserCallbackDispatcher: u64,
SystemRangeStart: u64,
KiUserExceptionDispatcher: u64,
StackBase: u64,
StackLimit: u64,
Reserved: [u64; 5],
}
#[cfg(target_arch = "x86")]
mod arch {
use libc;
const MAXIMUM_SUPPORTED_EXTENSION: usize = 512;
#[repr(C)]
pub struct CONTEXT {
ContextFlags: libc::DWORD,
Dr0: libc::DWORD,
Dr1: libc::DWORD,
Dr2: libc::DWORD,
Dr3: libc::DWORD,
Dr6: libc::DWORD,
Dr7: libc::DWORD,
FloatSave: FLOATING_SAVE_AREA,
SegGs: libc::DWORD,
SegFs: libc::DWORD,
SegEs: libc::DWORD,
SegDs: libc::DWORD,
Edi: libc::DWORD,
Esi: libc::DWORD,
Ebx: libc::DWORD,
Edx: libc::DWORD,
Ecx: libc::DWORD,
Eax: libc::DWORD,
Ebp: libc::DWORD,
Eip: libc::DWORD,
SegCs: libc::DWORD,
EFlags: libc::DWORD,
Esp: libc::DWORD,
SegSs: libc::DWORD,
ExtendedRegisters: [u8; MAXIMUM_SUPPORTED_EXTENSION],
}
#[repr(C)]
pub struct FLOATING_SAVE_AREA {
ControlWord: libc::DWORD,
StatusWord: libc::DWORD,
TagWord: libc::DWORD,
ErrorOffset: libc::DWORD,
ErrorSelector: libc::DWORD,
DataOffset: libc::DWORD,
DataSelector: libc::DWORD,
RegisterArea: [u8; 80],
Cr0NpxState: libc::DWORD,
}
pub fn init_frame(frame: &mut super::STACKFRAME64,
ctx: &CONTEXT) -> libc::DWORD {
frame.AddrPC.Offset = ctx.Eip as u64;
frame.AddrPC.Mode = super::ADDRESS_MODE::AddrModeFlat;
frame.AddrStack.Offset = ctx.Esp as u64;
frame.AddrStack.Mode = super::ADDRESS_MODE::AddrModeFlat;
frame.AddrFrame.Offset = ctx.Ebp as u64;
frame.AddrFrame.Mode = super::ADDRESS_MODE::AddrModeFlat;
super::IMAGE_FILE_MACHINE_I386
}
}
#[cfg(target_arch = "x86_64")]
mod arch {
use libc::{c_longlong, c_ulonglong};
use libc::types::os::arch::extra::{WORD, DWORD, DWORDLONG};
use simd;
#[repr(C)]
pub struct CONTEXT {
_align_hack: [simd::u64x2; 0], // FIXME align on 16-byte
P1Home: DWORDLONG,
P2Home: DWORDLONG,
P3Home: DWORDLONG,
P4Home: DWORDLONG,
P5Home: DWORDLONG,
P6Home: DWORDLONG,
ContextFlags: DWORD,
MxCsr: DWORD,
SegCs: WORD,
SegDs: WORD,
SegEs: WORD,
SegFs: WORD,
SegGs: WORD,
SegSs: WORD,
EFlags: DWORD,
Dr0: DWORDLONG,
Dr1: DWORDLONG,
Dr2: DWORDLONG,
Dr3: DWORDLONG,
Dr6: DWORDLONG,
Dr7: DWORDLONG,
Rax: DWORDLONG,
Rcx: DWORDLONG,
Rdx: DWORDLONG,
Rbx: DWORDLONG,
Rsp: DWORDLONG,
Rbp: DWORDLONG,
Rsi: DWORDLONG,
Rdi: DWORDLONG,
R8: DWORDLONG,
R9: DWORDLONG,
R10: DWORDLONG,
R11: DWORDLONG,
R12: DWORDLONG,
R13: DWORDLONG,
R14: DWORDLONG,
R15: DWORDLONG,
Rip: DWORDLONG,
FltSave: FLOATING_SAVE_AREA,
VectorRegister: [M128A; 26],
VectorControl: DWORDLONG,
DebugControl: DWORDLONG,
LastBranchToRip: DWORDLONG,
LastBranchFromRip: DWORDLONG,
LastExceptionToRip: DWORDLONG,
LastExceptionFromRip: DWORDLONG,
}
#[repr(C)]
pub struct M128A {
_align_hack: [simd::u64x2; 0], // FIXME align on 16-byte
Low: c_ulonglong,
High: c_longlong
}
#[repr(C)]
pub struct FLOATING_SAVE_AREA {
_align_hack: [simd::u64x2; 0], // FIXME align on 16-byte
_Dummy: [u8; 512] // FIXME: Fill this out
}
pub fn init_frame(frame: &mut super::STACKFRAME64,
ctx: &CONTEXT) -> DWORD {
frame.AddrPC.Offset = ctx.Rip as u64;
frame.AddrPC.Mode = super::ADDRESS_MODE::AddrModeFlat;
frame.AddrStack.Offset = ctx.Rsp as u64;
frame.AddrStack.Mode = super::ADDRESS_MODE::AddrModeFlat;
frame.AddrFrame.Offset = ctx.Rbp as u64;
frame.AddrFrame.Mode = super::ADDRESS_MODE::AddrModeFlat;
super::IMAGE_FILE_MACHINE_AMD64
}
}
struct Cleanup {
handle: libc::HANDLE,
SymCleanup: SymCleanupFn,
}
impl Drop for Cleanup {
fn drop(&mut self) { (self.SymCleanup)(self.handle); }
}
pub fn write(w: &mut Write) -> io::Result<()>
|
// Fetch the symbols necessary from dbghelp.dll
let SymFromAddr = sym!("SymFromAddr", SymFromAddrFn);
let SymInitialize = sym!("SymInitialize", SymInitializeFn);
let SymCleanup = sym!("SymCleanup", SymCleanupFn);
let StackWalk64 = sym!("StackWalk64", StackWalk64Fn);
// Allocate necessary structures for doing the stack walk
let process = unsafe { GetCurrentProcess() };
let thread = unsafe { GetCurrentThread() };
let mut context: arch::CONTEXT = unsafe { intrinsics::init() };
unsafe { RtlCaptureContext(&mut context); }
let mut frame: STACKFRAME64 = unsafe { intrinsics::init() };
let image = arch::init_frame(&mut frame, &context);
// Initialize this process's symbols
let ret = SymInitialize(process, ptr::null_mut(), libc::TRUE);
if ret!= libc::TRUE { return Ok(()) }
let _c = Cleanup { handle: process, SymCleanup: SymCleanup };
// And now that we're done with all the setup, do the stack walking!
let mut i = 0;
try!(write!(w, "stack backtrace:\n"));
while StackWalk64(image, process, thread, &mut frame, &mut context,
ptr::null_mut(),
ptr::null_mut(),
ptr::null_mut(),
ptr::null_mut()) == libc::TRUE{
let addr = frame.AddrPC.Offset;
if addr == frame.AddrReturn.Offset || addr == 0 ||
frame.AddrReturn.Offset == 0 { break }
i += 1;
try!(write!(w, " {:2}: {:#2$x}", i, addr, HEX_WIDTH));
let mut info: SYMBOL_INFO = unsafe { intrinsics::init() };
info.MaxNameLen = MAX_SYM_NAME as libc::c_ulong;
// the struct size in C. the value is different to
// `size_of::<SYMBOL_INFO>() - MAX_SYM_NAME + 1` (== 81)
// due to struct alignment.
info.SizeOfStruct = 88;
let mut displacement = 0u64;
let ret = SymFromAddr(process, addr as u64, &mut displacement,
&mut info);
if ret == libc::TRUE {
try!(write!(w, " - "));
let ptr = info.Name.as_ptr() as *const libc::c_char;
let bytes = unsafe { CStr::from_ptr(ptr).to_bytes() };
match str::from_utf8(bytes) {
Ok(s) => try!(demangle(w, s)),
Err(..) => try!(w.write_all(&bytes[..bytes.len()-1])),
}
}
try!(w.write_all(&['\n' as u8]));
}
Ok(())
}
|
{
// According to windows documentation, all dbghelp functions are
// single-threaded.
static LOCK: StaticMutex = StaticMutex::new();
let _g = LOCK.lock();
// Open up dbghelp.dll, we don't link to it explicitly because it can't
// always be found. Additionally, it's nice having fewer dependencies.
let path = Path::new("dbghelp.dll");
let lib = match DynamicLibrary::open(Some(&path)) {
Ok(lib) => lib,
Err(..) => return Ok(()),
};
macro_rules! sym{ ($e:expr, $t:ident) => (unsafe {
match lib.symbol($e) {
Ok(f) => mem::transmute::<*mut u8, $t>(f),
Err(..) => return Ok(())
}
}) }
|
identifier_body
|
linkage1.rs
|
// run-pass
// ignore-windows
// ignore-macos
// ignore-emscripten doesn't support this linkage
// ignore-sgx weak linkage not permitted
// aux-build:linkage1.rs
#![feature(linkage)]
extern crate linkage1 as other;
extern "C" {
#[linkage = "extern_weak"]
static foo: *const isize;
#[linkage = "extern_weak"]
static something_that_should_never_exist: *mut isize;
}
|
// library unless it satisfies a non weak undefined symbol. The 'other' crate
// is compiled as a dynamic library where it would only be used for a
// weak-symbol as part of an executable, so the dynamic library would be
// discarded. By adding and calling `other::bar`, we get around this problem.
other::bar();
unsafe {
assert!(!foo.is_null());
assert_eq!(*foo, 3);
assert!(something_that_should_never_exist.is_null());
}
}
|
fn main() {
// It appears that the --as-needed flag to linkers will not pull in a dynamic
|
random_line_split
|
linkage1.rs
|
// run-pass
// ignore-windows
// ignore-macos
// ignore-emscripten doesn't support this linkage
// ignore-sgx weak linkage not permitted
// aux-build:linkage1.rs
#![feature(linkage)]
extern crate linkage1 as other;
extern "C" {
#[linkage = "extern_weak"]
static foo: *const isize;
#[linkage = "extern_weak"]
static something_that_should_never_exist: *mut isize;
}
fn
|
() {
// It appears that the --as-needed flag to linkers will not pull in a dynamic
// library unless it satisfies a non weak undefined symbol. The 'other' crate
// is compiled as a dynamic library where it would only be used for a
// weak-symbol as part of an executable, so the dynamic library would be
// discarded. By adding and calling `other::bar`, we get around this problem.
other::bar();
unsafe {
assert!(!foo.is_null());
assert_eq!(*foo, 3);
assert!(something_that_should_never_exist.is_null());
}
}
|
main
|
identifier_name
|
linkage1.rs
|
// run-pass
// ignore-windows
// ignore-macos
// ignore-emscripten doesn't support this linkage
// ignore-sgx weak linkage not permitted
// aux-build:linkage1.rs
#![feature(linkage)]
extern crate linkage1 as other;
extern "C" {
#[linkage = "extern_weak"]
static foo: *const isize;
#[linkage = "extern_weak"]
static something_that_should_never_exist: *mut isize;
}
fn main()
|
{
// It appears that the --as-needed flag to linkers will not pull in a dynamic
// library unless it satisfies a non weak undefined symbol. The 'other' crate
// is compiled as a dynamic library where it would only be used for a
// weak-symbol as part of an executable, so the dynamic library would be
// discarded. By adding and calling `other::bar`, we get around this problem.
other::bar();
unsafe {
assert!(!foo.is_null());
assert_eq!(*foo, 3);
assert!(something_that_should_never_exist.is_null());
}
}
|
identifier_body
|
|
pixel.rs
|
/*
* Copyright 2016 Mattis Marjak
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::ffi::{CStr, CString};
use std::fmt;
#[cfg(target_os = "freebsd")]
use libc::size_t;
#[cfg(not(target_os = "freebsd"))]
use size_t;
use bindings;
use result::MagickError;
use crate::result::Result;
#[derive(Default, Debug)]
pub struct HSL {
pub hue: f64,
pub saturation: f64,
pub lightness: f64,
}
wand_common!(
PixelWand,
NewPixelWand,
ClearPixelWand,
IsPixelWand,
ClonePixelWand,
DestroyPixelWand,
PixelClearException,
PixelGetExceptionType,
PixelGetException
);
impl PixelWand {
pub fn is_similar(&self, other: &PixelWand, fuzz: f64) -> Result<()> {
match unsafe { bindings::IsPixelWandSimilar(self.wand, other.wand, fuzz) } {
bindings::MagickBooleanType_MagickTrue => Ok(()),
_ => Err(MagickError("not similar")),
}
}
pub fn get_hsl(&self) -> HSL {
let mut hsl = HSL::default();
unsafe {
bindings::PixelGetHSL(
self.wand,
&mut hsl.hue as *mut _,
&mut hsl.saturation as *mut _,
&mut hsl.lightness as *mut _,
);
}
hsl
}
pub fn set_hsl(&self, hsl: &HSL) {
unsafe {
bindings::PixelSetHSL(self.wand, hsl.hue, hsl.saturation, hsl.lightness);
}
}
pub fn fmt_w_prefix(&self, f: &mut fmt::Formatter, prefix: &str) -> fmt::Result {
let mut prf = prefix.to_string();
prf.push_str(" ");
writeln!(f, "{}PixelWand {{", prefix)?;
writeln!(f, "{}Exception: {:?}", prf, self.get_exception())?;
writeln!(f, "{}IsWand: {:?}", prf, self.is_wand())?;
self.fmt_unchecked_settings(f, &prf)?;
self.fmt_color_settings(f, &prf)?;
writeln!(f, "{}}}", prefix)
}
pub fn set_color(&mut self, s: &str) -> Result<()> {
let c_string = CString::new(s).map_err(|_| "could not convert to cstring")?;
match unsafe { bindings::PixelSetColor(self.wand, c_string.as_ptr()) } {
bindings::MagickBooleanType_MagickTrue => Ok(()),
_ => Err(MagickError("failed to set color")),
}
}
string_get!(get_color_as_string, PixelGetColorAsString);
string_get!(
get_color_as_normalized_string,
PixelGetColorAsNormalizedString
);
set_get_unchecked!(
get_color_count, set_color_count, PixelGetColorCount, PixelSetColorCount, size_t
get_index, set_index, PixelGetIndex, PixelSetIndex, bindings::Quantum
get_fuzz, set_fuzz, PixelGetFuzz, PixelSetFuzz, f64
);
color_set_get!(
get_alpha, get_alpha_quantum, set_alpha, set_alpha_quantum,
PixelGetAlpha, PixelGetAlphaQuantum, PixelSetAlpha, PixelSetAlphaQuantum
get_black, get_black_quantum, set_black, set_black_quantum,
PixelGetBlack, PixelGetBlackQuantum, PixelSetBlack, PixelSetBlackQuantum
get_blue, get_blue_quantum, set_blue, set_blue_quantum,
PixelGetBlue, PixelGetBlueQuantum, PixelSetBlue, PixelSetBlueQuantum
get_cyan, get_cyan_quantum, set_cyan, set_cyan_quantum,
PixelGetCyan, PixelGetCyanQuantum, PixelSetCyan, PixelSetCyanQuantum
get_green, get_green_quantum, set_green, set_green_quantum,
PixelGetGreen, PixelGetGreenQuantum, PixelSetGreen, PixelSetGreenQuantum
get_magenta, get_magenta_quantum, set_magenta, set_magenta_quantum,
PixelGetMagenta, PixelGetMagentaQuantum, PixelSetMagenta, PixelSetMagentaQuantum
get_red, get_red_quantum, set_red, set_red_quantum,
PixelGetRed, PixelGetRedQuantum, PixelSetRed, PixelSetRedQuantum
get_yellow, get_yellow_quantum, set_yellow, set_yellow_quantum,
PixelGetYellow, PixelGetYellowQuantum, PixelSetYellow, PixelSetYellowQuantum
);
}
impl fmt::Debug for PixelWand {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result
|
}
|
{
self.fmt_w_prefix(f, "")
}
|
identifier_body
|
pixel.rs
|
/*
* Copyright 2016 Mattis Marjak
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
use std::fmt;
#[cfg(target_os = "freebsd")]
use libc::size_t;
#[cfg(not(target_os = "freebsd"))]
use size_t;
use bindings;
use result::MagickError;
use crate::result::Result;
#[derive(Default, Debug)]
pub struct HSL {
pub hue: f64,
pub saturation: f64,
pub lightness: f64,
}
wand_common!(
PixelWand,
NewPixelWand,
ClearPixelWand,
IsPixelWand,
ClonePixelWand,
DestroyPixelWand,
PixelClearException,
PixelGetExceptionType,
PixelGetException
);
impl PixelWand {
pub fn is_similar(&self, other: &PixelWand, fuzz: f64) -> Result<()> {
match unsafe { bindings::IsPixelWandSimilar(self.wand, other.wand, fuzz) } {
bindings::MagickBooleanType_MagickTrue => Ok(()),
_ => Err(MagickError("not similar")),
}
}
pub fn get_hsl(&self) -> HSL {
let mut hsl = HSL::default();
unsafe {
bindings::PixelGetHSL(
self.wand,
&mut hsl.hue as *mut _,
&mut hsl.saturation as *mut _,
&mut hsl.lightness as *mut _,
);
}
hsl
}
pub fn set_hsl(&self, hsl: &HSL) {
unsafe {
bindings::PixelSetHSL(self.wand, hsl.hue, hsl.saturation, hsl.lightness);
}
}
pub fn fmt_w_prefix(&self, f: &mut fmt::Formatter, prefix: &str) -> fmt::Result {
let mut prf = prefix.to_string();
prf.push_str(" ");
writeln!(f, "{}PixelWand {{", prefix)?;
writeln!(f, "{}Exception: {:?}", prf, self.get_exception())?;
writeln!(f, "{}IsWand: {:?}", prf, self.is_wand())?;
self.fmt_unchecked_settings(f, &prf)?;
self.fmt_color_settings(f, &prf)?;
writeln!(f, "{}}}", prefix)
}
pub fn set_color(&mut self, s: &str) -> Result<()> {
let c_string = CString::new(s).map_err(|_| "could not convert to cstring")?;
match unsafe { bindings::PixelSetColor(self.wand, c_string.as_ptr()) } {
bindings::MagickBooleanType_MagickTrue => Ok(()),
_ => Err(MagickError("failed to set color")),
}
}
string_get!(get_color_as_string, PixelGetColorAsString);
string_get!(
get_color_as_normalized_string,
PixelGetColorAsNormalizedString
);
set_get_unchecked!(
get_color_count, set_color_count, PixelGetColorCount, PixelSetColorCount, size_t
get_index, set_index, PixelGetIndex, PixelSetIndex, bindings::Quantum
get_fuzz, set_fuzz, PixelGetFuzz, PixelSetFuzz, f64
);
color_set_get!(
get_alpha, get_alpha_quantum, set_alpha, set_alpha_quantum,
PixelGetAlpha, PixelGetAlphaQuantum, PixelSetAlpha, PixelSetAlphaQuantum
get_black, get_black_quantum, set_black, set_black_quantum,
PixelGetBlack, PixelGetBlackQuantum, PixelSetBlack, PixelSetBlackQuantum
get_blue, get_blue_quantum, set_blue, set_blue_quantum,
PixelGetBlue, PixelGetBlueQuantum, PixelSetBlue, PixelSetBlueQuantum
get_cyan, get_cyan_quantum, set_cyan, set_cyan_quantum,
PixelGetCyan, PixelGetCyanQuantum, PixelSetCyan, PixelSetCyanQuantum
get_green, get_green_quantum, set_green, set_green_quantum,
PixelGetGreen, PixelGetGreenQuantum, PixelSetGreen, PixelSetGreenQuantum
get_magenta, get_magenta_quantum, set_magenta, set_magenta_quantum,
PixelGetMagenta, PixelGetMagentaQuantum, PixelSetMagenta, PixelSetMagentaQuantum
get_red, get_red_quantum, set_red, set_red_quantum,
PixelGetRed, PixelGetRedQuantum, PixelSetRed, PixelSetRedQuantum
get_yellow, get_yellow_quantum, set_yellow, set_yellow_quantum,
PixelGetYellow, PixelGetYellowQuantum, PixelSetYellow, PixelSetYellowQuantum
);
}
impl fmt::Debug for PixelWand {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.fmt_w_prefix(f, "")
}
}
|
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::ffi::{CStr, CString};
|
random_line_split
|
pixel.rs
|
/*
* Copyright 2016 Mattis Marjak
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::ffi::{CStr, CString};
use std::fmt;
#[cfg(target_os = "freebsd")]
use libc::size_t;
#[cfg(not(target_os = "freebsd"))]
use size_t;
use bindings;
use result::MagickError;
use crate::result::Result;
#[derive(Default, Debug)]
pub struct HSL {
pub hue: f64,
pub saturation: f64,
pub lightness: f64,
}
wand_common!(
PixelWand,
NewPixelWand,
ClearPixelWand,
IsPixelWand,
ClonePixelWand,
DestroyPixelWand,
PixelClearException,
PixelGetExceptionType,
PixelGetException
);
impl PixelWand {
pub fn
|
(&self, other: &PixelWand, fuzz: f64) -> Result<()> {
match unsafe { bindings::IsPixelWandSimilar(self.wand, other.wand, fuzz) } {
bindings::MagickBooleanType_MagickTrue => Ok(()),
_ => Err(MagickError("not similar")),
}
}
pub fn get_hsl(&self) -> HSL {
let mut hsl = HSL::default();
unsafe {
bindings::PixelGetHSL(
self.wand,
&mut hsl.hue as *mut _,
&mut hsl.saturation as *mut _,
&mut hsl.lightness as *mut _,
);
}
hsl
}
pub fn set_hsl(&self, hsl: &HSL) {
unsafe {
bindings::PixelSetHSL(self.wand, hsl.hue, hsl.saturation, hsl.lightness);
}
}
pub fn fmt_w_prefix(&self, f: &mut fmt::Formatter, prefix: &str) -> fmt::Result {
let mut prf = prefix.to_string();
prf.push_str(" ");
writeln!(f, "{}PixelWand {{", prefix)?;
writeln!(f, "{}Exception: {:?}", prf, self.get_exception())?;
writeln!(f, "{}IsWand: {:?}", prf, self.is_wand())?;
self.fmt_unchecked_settings(f, &prf)?;
self.fmt_color_settings(f, &prf)?;
writeln!(f, "{}}}", prefix)
}
pub fn set_color(&mut self, s: &str) -> Result<()> {
let c_string = CString::new(s).map_err(|_| "could not convert to cstring")?;
match unsafe { bindings::PixelSetColor(self.wand, c_string.as_ptr()) } {
bindings::MagickBooleanType_MagickTrue => Ok(()),
_ => Err(MagickError("failed to set color")),
}
}
string_get!(get_color_as_string, PixelGetColorAsString);
string_get!(
get_color_as_normalized_string,
PixelGetColorAsNormalizedString
);
set_get_unchecked!(
get_color_count, set_color_count, PixelGetColorCount, PixelSetColorCount, size_t
get_index, set_index, PixelGetIndex, PixelSetIndex, bindings::Quantum
get_fuzz, set_fuzz, PixelGetFuzz, PixelSetFuzz, f64
);
color_set_get!(
get_alpha, get_alpha_quantum, set_alpha, set_alpha_quantum,
PixelGetAlpha, PixelGetAlphaQuantum, PixelSetAlpha, PixelSetAlphaQuantum
get_black, get_black_quantum, set_black, set_black_quantum,
PixelGetBlack, PixelGetBlackQuantum, PixelSetBlack, PixelSetBlackQuantum
get_blue, get_blue_quantum, set_blue, set_blue_quantum,
PixelGetBlue, PixelGetBlueQuantum, PixelSetBlue, PixelSetBlueQuantum
get_cyan, get_cyan_quantum, set_cyan, set_cyan_quantum,
PixelGetCyan, PixelGetCyanQuantum, PixelSetCyan, PixelSetCyanQuantum
get_green, get_green_quantum, set_green, set_green_quantum,
PixelGetGreen, PixelGetGreenQuantum, PixelSetGreen, PixelSetGreenQuantum
get_magenta, get_magenta_quantum, set_magenta, set_magenta_quantum,
PixelGetMagenta, PixelGetMagentaQuantum, PixelSetMagenta, PixelSetMagentaQuantum
get_red, get_red_quantum, set_red, set_red_quantum,
PixelGetRed, PixelGetRedQuantum, PixelSetRed, PixelSetRedQuantum
get_yellow, get_yellow_quantum, set_yellow, set_yellow_quantum,
PixelGetYellow, PixelGetYellowQuantum, PixelSetYellow, PixelSetYellowQuantum
);
}
impl fmt::Debug for PixelWand {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.fmt_w_prefix(f, "")
}
}
|
is_similar
|
identifier_name
|
dom_apis.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Generic implementations of some DOM APIs so they can be shared between Servo
//! and Gecko.
use context::QuirksMode;
use dom::{TDocument, TElement, TNode};
use invalidation::element::invalidator::{Invalidation, InvalidationProcessor, InvalidationVector};
use selectors::{Element, NthIndexCache, SelectorList};
use selectors::matching::{self, MatchingContext, MatchingMode};
use smallvec::SmallVec;
/// <https://dom.spec.whatwg.org/#dom-element-matches>
pub fn element_matches<E>(
element: &E,
selector_list: &SelectorList<E::Impl>,
quirks_mode: QuirksMode,
) -> bool
where
E: Element,
{
let mut context = MatchingContext::new(
MatchingMode::Normal,
None,
None,
quirks_mode,
);
context.scope_element = Some(element.opaque());
matching::matches_selector_list(selector_list, element, &mut context)
}
/// <https://dom.spec.whatwg.org/#dom-element-closest>
pub fn element_closest<E>(
element: E,
selector_list: &SelectorList<E::Impl>,
quirks_mode: QuirksMode,
) -> Option<E>
where
E: Element,
{
let mut nth_index_cache = NthIndexCache::default();
let mut context = MatchingContext::new(
MatchingMode::Normal,
None,
Some(&mut nth_index_cache),
quirks_mode,
);
context.scope_element = Some(element.opaque());
let mut current = Some(element);
while let Some(element) = current.take() {
if matching::matches_selector_list(selector_list, &element, &mut context) {
return Some(element);
}
current = element.parent_element();
}
return None;
}
/// A selector query abstraction, in order to be generic over QuerySelector and
/// QuerySelectorAll.
pub trait SelectorQuery<E: TElement> {
/// The output of the query.
type Output;
/// Whether the query should stop after the first element has been matched.
fn should_stop_after_first_match() -> bool;
/// Append an element matching after the first query.
fn append_element(output: &mut Self::Output, element: E);
/// Returns true if the output is empty.
fn is_empty(output: &Self::Output) -> bool;
}
/// The result of a querySelectorAll call.
pub type QuerySelectorAllResult<E> = SmallVec<[E; 128]>;
/// A query for all the elements in a subtree.
pub struct QueryAll;
impl<E: TElement> SelectorQuery<E> for QueryAll {
type Output = QuerySelectorAllResult<E>;
fn should_stop_after_first_match() -> bool { false }
fn append_element(output: &mut Self::Output, element: E) {
output.push(element);
}
fn is_empty(output: &Self::Output) -> bool {
output.is_empty()
}
}
/// A query for the first in-tree match of all the elements in a subtree.
pub struct QueryFirst;
impl<E: TElement> SelectorQuery<E> for QueryFirst {
type Output = Option<E>;
fn should_stop_after_first_match() -> bool { true }
fn append_element(output: &mut Self::Output, element: E) {
if output.is_none() {
*output = Some(element)
}
}
fn is_empty(output: &Self::Output) -> bool {
output.is_none()
}
}
struct QuerySelectorProcessor<'a, E, Q>
where
E: TElement + 'a,
Q: SelectorQuery<E>,
Q::Output: 'a,
{
results: &'a mut Q::Output,
matching_context: MatchingContext<'a, E::Impl>,
selector_list: &'a SelectorList<E::Impl>,
}
impl<'a, E, Q> InvalidationProcessor<'a, E> for QuerySelectorProcessor<'a, E, Q>
where
E: TElement + 'a,
Q: SelectorQuery<E>,
Q::Output: 'a,
{
fn light_tree_only(&self) -> bool { true }
fn collect_invalidations(
&mut self,
element: E,
self_invalidations: &mut InvalidationVector<'a>,
descendant_invalidations: &mut InvalidationVector<'a>,
_sibling_invalidations: &mut InvalidationVector<'a>,
) -> bool {
// TODO(emilio): If the element is not a root element, and
// selector_list has any descendant combinator, we need to do extra work
// in order to handle properly things like:
//
// <div id="a">
// <div id="b">
// <div id="c"></div>
// </div>
// </div>
//
// b.querySelector('#a div'); // Should return "c".
//
// For now, assert it's a root element.
debug_assert!(element.parent_element().is_none());
let target_vector =
if self.matching_context.scope_element.is_some() {
descendant_invalidations
} else
|
;
for selector in self.selector_list.0.iter() {
target_vector.push(Invalidation::new(selector, 0))
}
false
}
fn matching_context(&mut self) -> &mut MatchingContext<'a, E::Impl> {
&mut self.matching_context
}
fn should_process_descendants(&mut self, _: E) -> bool {
if Q::should_stop_after_first_match() {
return Q::is_empty(&self.results)
}
true
}
fn invalidated_self(&mut self, e: E) {
Q::append_element(self.results, e);
}
fn recursion_limit_exceeded(&mut self, _e: E) {}
fn invalidated_descendants(&mut self, _e: E, _child: E) {}
}
fn collect_all_elements<E, Q, F>(
root: E::ConcreteNode,
results: &mut Q::Output,
mut filter: F,
)
where
E: TElement,
Q: SelectorQuery<E>,
F: FnMut(E) -> bool,
{
for node in root.dom_descendants() {
let element = match node.as_element() {
Some(e) => e,
None => continue,
};
if!filter(element) {
continue;
}
Q::append_element(results, element);
if Q::should_stop_after_first_match() {
return;
}
}
}
/// Fast paths for a given selector query.
///
/// FIXME(emilio, nbp): This may very well be a good candidate for code to be
/// replaced by HolyJit :)
fn query_selector_fast<E, Q>(
root: E::ConcreteNode,
selector_list: &SelectorList<E::Impl>,
results: &mut Q::Output,
quirks_mode: QuirksMode,
) -> Result<(), ()>
where
E: TElement,
Q: SelectorQuery<E>,
{
use selectors::parser::{Component, LocalName};
use std::borrow::Borrow;
// We need to return elements in document order, and reordering them
// afterwards is kinda silly.
if selector_list.0.len() > 1 {
return Err(());
}
let selector = &selector_list.0[0];
// Let's just care about the easy cases for now.
//
// FIXME(emilio): Blink has a fast path for classes in ancestor combinators
// that may be worth stealing.
if selector.len() > 1 {
return Err(());
}
let component = selector.iter().next().unwrap();
match *component {
Component::ExplicitUniversalType => {
collect_all_elements::<E, Q, _>(root, results, |_| true)
}
Component::ID(ref id) => {
// TODO(emilio): We may want to reuse Gecko's document ID table.
let case_sensitivity = quirks_mode.classes_and_ids_case_sensitivity();
collect_all_elements::<E, Q, _>(root, results, |element| {
element.has_id(id, case_sensitivity)
})
}
Component::Class(ref class) => {
let case_sensitivity = quirks_mode.classes_and_ids_case_sensitivity();
collect_all_elements::<E, Q, _>(root, results, |element| {
element.has_class(class, case_sensitivity)
})
}
Component::LocalName(LocalName { ref name, ref lower_name }) => {
collect_all_elements::<E, Q, _>(root, results, |element| {
if element.is_html_element_in_html_document() {
element.get_local_name() == lower_name.borrow()
} else {
element.get_local_name() == name.borrow()
}
})
}
// TODO(emilio): More fast paths?
_ => {
return Err(())
}
}
Ok(())
}
// Slow path for a given selector query.
fn query_selector_slow<E, Q>(
root: E::ConcreteNode,
selector_list: &SelectorList<E::Impl>,
results: &mut Q::Output,
matching_context: &mut MatchingContext<E::Impl>,
)
where
E: TElement,
Q: SelectorQuery<E>,
{
collect_all_elements::<E, Q, _>(root, results, |element| {
matching::matches_selector_list(selector_list, &element, matching_context)
});
}
/// <https://dom.spec.whatwg.org/#dom-parentnode-queryselector>
pub fn query_selector<E, Q>(
root: E::ConcreteNode,
selector_list: &SelectorList<E::Impl>,
results: &mut Q::Output,
)
where
E: TElement,
Q: SelectorQuery<E>,
{
use invalidation::element::invalidator::TreeStyleInvalidator;
let quirks_mode = root.owner_doc().quirks_mode();
let fast_result = query_selector_fast::<E, Q>(
root,
selector_list,
results,
quirks_mode,
);
if fast_result.is_ok() {
return;
}
// Slow path: Use the invalidation machinery if we're a root, and tree
// traversal otherwise.
//
// See the comment in collect_invalidations to see why only if we're a root.
let mut nth_index_cache = NthIndexCache::default();
let mut matching_context = MatchingContext::new(
MatchingMode::Normal,
None,
Some(&mut nth_index_cache),
quirks_mode,
);
let root_element = root.as_element();
matching_context.scope_element = root_element.map(|e| e.opaque());
// The invalidation mechanism is only useful in presence of combinators.
//
// We could do that check properly here, though checking the length of the
// selectors is a good heuristic.
let invalidation_may_be_useful =
selector_list.0.iter().any(|s| s.len() > 1);
if root_element.is_some() ||!invalidation_may_be_useful {
query_selector_slow::<E, Q>(
root,
selector_list,
results,
&mut matching_context,
);
} else {
let mut processor = QuerySelectorProcessor::<E, Q> {
results,
matching_context,
selector_list,
};
for node in root.dom_children() {
if let Some(e) = node.as_element() {
TreeStyleInvalidator::new(
e,
/* stack_limit_checker = */ None,
&mut processor,
).invalidate();
}
}
}
}
|
{
self_invalidations
}
|
conditional_block
|
dom_apis.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Generic implementations of some DOM APIs so they can be shared between Servo
//! and Gecko.
use context::QuirksMode;
use dom::{TDocument, TElement, TNode};
use invalidation::element::invalidator::{Invalidation, InvalidationProcessor, InvalidationVector};
use selectors::{Element, NthIndexCache, SelectorList};
use selectors::matching::{self, MatchingContext, MatchingMode};
use smallvec::SmallVec;
/// <https://dom.spec.whatwg.org/#dom-element-matches>
pub fn element_matches<E>(
element: &E,
selector_list: &SelectorList<E::Impl>,
quirks_mode: QuirksMode,
) -> bool
where
E: Element,
{
let mut context = MatchingContext::new(
MatchingMode::Normal,
None,
None,
quirks_mode,
);
context.scope_element = Some(element.opaque());
matching::matches_selector_list(selector_list, element, &mut context)
}
/// <https://dom.spec.whatwg.org/#dom-element-closest>
pub fn element_closest<E>(
element: E,
selector_list: &SelectorList<E::Impl>,
quirks_mode: QuirksMode,
) -> Option<E>
where
E: Element,
{
let mut nth_index_cache = NthIndexCache::default();
let mut context = MatchingContext::new(
MatchingMode::Normal,
None,
Some(&mut nth_index_cache),
quirks_mode,
);
context.scope_element = Some(element.opaque());
let mut current = Some(element);
while let Some(element) = current.take() {
if matching::matches_selector_list(selector_list, &element, &mut context) {
return Some(element);
}
current = element.parent_element();
}
return None;
}
/// A selector query abstraction, in order to be generic over QuerySelector and
/// QuerySelectorAll.
pub trait SelectorQuery<E: TElement> {
/// The output of the query.
type Output;
/// Whether the query should stop after the first element has been matched.
fn should_stop_after_first_match() -> bool;
/// Append an element matching after the first query.
fn append_element(output: &mut Self::Output, element: E);
/// Returns true if the output is empty.
fn is_empty(output: &Self::Output) -> bool;
}
/// The result of a querySelectorAll call.
pub type QuerySelectorAllResult<E> = SmallVec<[E; 128]>;
/// A query for all the elements in a subtree.
pub struct QueryAll;
impl<E: TElement> SelectorQuery<E> for QueryAll {
type Output = QuerySelectorAllResult<E>;
fn should_stop_after_first_match() -> bool { false }
fn append_element(output: &mut Self::Output, element: E)
|
fn is_empty(output: &Self::Output) -> bool {
output.is_empty()
}
}
/// A query for the first in-tree match of all the elements in a subtree.
pub struct QueryFirst;
impl<E: TElement> SelectorQuery<E> for QueryFirst {
type Output = Option<E>;
fn should_stop_after_first_match() -> bool { true }
fn append_element(output: &mut Self::Output, element: E) {
if output.is_none() {
*output = Some(element)
}
}
fn is_empty(output: &Self::Output) -> bool {
output.is_none()
}
}
struct QuerySelectorProcessor<'a, E, Q>
where
E: TElement + 'a,
Q: SelectorQuery<E>,
Q::Output: 'a,
{
results: &'a mut Q::Output,
matching_context: MatchingContext<'a, E::Impl>,
selector_list: &'a SelectorList<E::Impl>,
}
impl<'a, E, Q> InvalidationProcessor<'a, E> for QuerySelectorProcessor<'a, E, Q>
where
E: TElement + 'a,
Q: SelectorQuery<E>,
Q::Output: 'a,
{
fn light_tree_only(&self) -> bool { true }
fn collect_invalidations(
&mut self,
element: E,
self_invalidations: &mut InvalidationVector<'a>,
descendant_invalidations: &mut InvalidationVector<'a>,
_sibling_invalidations: &mut InvalidationVector<'a>,
) -> bool {
// TODO(emilio): If the element is not a root element, and
// selector_list has any descendant combinator, we need to do extra work
// in order to handle properly things like:
//
// <div id="a">
// <div id="b">
// <div id="c"></div>
// </div>
// </div>
//
// b.querySelector('#a div'); // Should return "c".
//
// For now, assert it's a root element.
debug_assert!(element.parent_element().is_none());
let target_vector =
if self.matching_context.scope_element.is_some() {
descendant_invalidations
} else {
self_invalidations
};
for selector in self.selector_list.0.iter() {
target_vector.push(Invalidation::new(selector, 0))
}
false
}
fn matching_context(&mut self) -> &mut MatchingContext<'a, E::Impl> {
&mut self.matching_context
}
fn should_process_descendants(&mut self, _: E) -> bool {
if Q::should_stop_after_first_match() {
return Q::is_empty(&self.results)
}
true
}
fn invalidated_self(&mut self, e: E) {
Q::append_element(self.results, e);
}
fn recursion_limit_exceeded(&mut self, _e: E) {}
fn invalidated_descendants(&mut self, _e: E, _child: E) {}
}
fn collect_all_elements<E, Q, F>(
root: E::ConcreteNode,
results: &mut Q::Output,
mut filter: F,
)
where
E: TElement,
Q: SelectorQuery<E>,
F: FnMut(E) -> bool,
{
for node in root.dom_descendants() {
let element = match node.as_element() {
Some(e) => e,
None => continue,
};
if!filter(element) {
continue;
}
Q::append_element(results, element);
if Q::should_stop_after_first_match() {
return;
}
}
}
/// Fast paths for a given selector query.
///
/// FIXME(emilio, nbp): This may very well be a good candidate for code to be
/// replaced by HolyJit :)
fn query_selector_fast<E, Q>(
root: E::ConcreteNode,
selector_list: &SelectorList<E::Impl>,
results: &mut Q::Output,
quirks_mode: QuirksMode,
) -> Result<(), ()>
where
E: TElement,
Q: SelectorQuery<E>,
{
use selectors::parser::{Component, LocalName};
use std::borrow::Borrow;
// We need to return elements in document order, and reordering them
// afterwards is kinda silly.
if selector_list.0.len() > 1 {
return Err(());
}
let selector = &selector_list.0[0];
// Let's just care about the easy cases for now.
//
// FIXME(emilio): Blink has a fast path for classes in ancestor combinators
// that may be worth stealing.
if selector.len() > 1 {
return Err(());
}
let component = selector.iter().next().unwrap();
match *component {
Component::ExplicitUniversalType => {
collect_all_elements::<E, Q, _>(root, results, |_| true)
}
Component::ID(ref id) => {
// TODO(emilio): We may want to reuse Gecko's document ID table.
let case_sensitivity = quirks_mode.classes_and_ids_case_sensitivity();
collect_all_elements::<E, Q, _>(root, results, |element| {
element.has_id(id, case_sensitivity)
})
}
Component::Class(ref class) => {
let case_sensitivity = quirks_mode.classes_and_ids_case_sensitivity();
collect_all_elements::<E, Q, _>(root, results, |element| {
element.has_class(class, case_sensitivity)
})
}
Component::LocalName(LocalName { ref name, ref lower_name }) => {
collect_all_elements::<E, Q, _>(root, results, |element| {
if element.is_html_element_in_html_document() {
element.get_local_name() == lower_name.borrow()
} else {
element.get_local_name() == name.borrow()
}
})
}
// TODO(emilio): More fast paths?
_ => {
return Err(())
}
}
Ok(())
}
// Slow path for a given selector query.
fn query_selector_slow<E, Q>(
root: E::ConcreteNode,
selector_list: &SelectorList<E::Impl>,
results: &mut Q::Output,
matching_context: &mut MatchingContext<E::Impl>,
)
where
E: TElement,
Q: SelectorQuery<E>,
{
collect_all_elements::<E, Q, _>(root, results, |element| {
matching::matches_selector_list(selector_list, &element, matching_context)
});
}
/// <https://dom.spec.whatwg.org/#dom-parentnode-queryselector>
pub fn query_selector<E, Q>(
root: E::ConcreteNode,
selector_list: &SelectorList<E::Impl>,
results: &mut Q::Output,
)
where
E: TElement,
Q: SelectorQuery<E>,
{
use invalidation::element::invalidator::TreeStyleInvalidator;
let quirks_mode = root.owner_doc().quirks_mode();
let fast_result = query_selector_fast::<E, Q>(
root,
selector_list,
results,
quirks_mode,
);
if fast_result.is_ok() {
return;
}
// Slow path: Use the invalidation machinery if we're a root, and tree
// traversal otherwise.
//
// See the comment in collect_invalidations to see why only if we're a root.
let mut nth_index_cache = NthIndexCache::default();
let mut matching_context = MatchingContext::new(
MatchingMode::Normal,
None,
Some(&mut nth_index_cache),
quirks_mode,
);
let root_element = root.as_element();
matching_context.scope_element = root_element.map(|e| e.opaque());
// The invalidation mechanism is only useful in presence of combinators.
//
// We could do that check properly here, though checking the length of the
// selectors is a good heuristic.
let invalidation_may_be_useful =
selector_list.0.iter().any(|s| s.len() > 1);
if root_element.is_some() ||!invalidation_may_be_useful {
query_selector_slow::<E, Q>(
root,
selector_list,
results,
&mut matching_context,
);
} else {
let mut processor = QuerySelectorProcessor::<E, Q> {
results,
matching_context,
selector_list,
};
for node in root.dom_children() {
if let Some(e) = node.as_element() {
TreeStyleInvalidator::new(
e,
/* stack_limit_checker = */ None,
&mut processor,
).invalidate();
}
}
}
}
|
{
output.push(element);
}
|
identifier_body
|
dom_apis.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Generic implementations of some DOM APIs so they can be shared between Servo
//! and Gecko.
use context::QuirksMode;
use dom::{TDocument, TElement, TNode};
use invalidation::element::invalidator::{Invalidation, InvalidationProcessor, InvalidationVector};
use selectors::{Element, NthIndexCache, SelectorList};
use selectors::matching::{self, MatchingContext, MatchingMode};
use smallvec::SmallVec;
/// <https://dom.spec.whatwg.org/#dom-element-matches>
pub fn element_matches<E>(
element: &E,
selector_list: &SelectorList<E::Impl>,
quirks_mode: QuirksMode,
) -> bool
where
E: Element,
{
let mut context = MatchingContext::new(
MatchingMode::Normal,
None,
None,
quirks_mode,
);
context.scope_element = Some(element.opaque());
matching::matches_selector_list(selector_list, element, &mut context)
}
/// <https://dom.spec.whatwg.org/#dom-element-closest>
pub fn element_closest<E>(
element: E,
selector_list: &SelectorList<E::Impl>,
quirks_mode: QuirksMode,
) -> Option<E>
where
E: Element,
{
let mut nth_index_cache = NthIndexCache::default();
let mut context = MatchingContext::new(
MatchingMode::Normal,
None,
Some(&mut nth_index_cache),
quirks_mode,
);
context.scope_element = Some(element.opaque());
let mut current = Some(element);
while let Some(element) = current.take() {
if matching::matches_selector_list(selector_list, &element, &mut context) {
return Some(element);
}
current = element.parent_element();
}
return None;
}
/// A selector query abstraction, in order to be generic over QuerySelector and
/// QuerySelectorAll.
pub trait SelectorQuery<E: TElement> {
/// The output of the query.
type Output;
/// Whether the query should stop after the first element has been matched.
fn should_stop_after_first_match() -> bool;
/// Append an element matching after the first query.
fn append_element(output: &mut Self::Output, element: E);
/// Returns true if the output is empty.
fn is_empty(output: &Self::Output) -> bool;
}
/// The result of a querySelectorAll call.
pub type QuerySelectorAllResult<E> = SmallVec<[E; 128]>;
/// A query for all the elements in a subtree.
pub struct QueryAll;
impl<E: TElement> SelectorQuery<E> for QueryAll {
type Output = QuerySelectorAllResult<E>;
fn should_stop_after_first_match() -> bool { false }
fn append_element(output: &mut Self::Output, element: E) {
output.push(element);
}
fn is_empty(output: &Self::Output) -> bool {
output.is_empty()
}
}
/// A query for the first in-tree match of all the elements in a subtree.
pub struct QueryFirst;
impl<E: TElement> SelectorQuery<E> for QueryFirst {
type Output = Option<E>;
fn should_stop_after_first_match() -> bool { true }
fn append_element(output: &mut Self::Output, element: E) {
if output.is_none() {
*output = Some(element)
}
}
fn is_empty(output: &Self::Output) -> bool {
output.is_none()
}
}
struct QuerySelectorProcessor<'a, E, Q>
where
E: TElement + 'a,
Q: SelectorQuery<E>,
Q::Output: 'a,
{
results: &'a mut Q::Output,
matching_context: MatchingContext<'a, E::Impl>,
selector_list: &'a SelectorList<E::Impl>,
}
impl<'a, E, Q> InvalidationProcessor<'a, E> for QuerySelectorProcessor<'a, E, Q>
where
E: TElement + 'a,
Q: SelectorQuery<E>,
Q::Output: 'a,
{
fn light_tree_only(&self) -> bool { true }
fn collect_invalidations(
&mut self,
element: E,
self_invalidations: &mut InvalidationVector<'a>,
descendant_invalidations: &mut InvalidationVector<'a>,
_sibling_invalidations: &mut InvalidationVector<'a>,
) -> bool {
// TODO(emilio): If the element is not a root element, and
// selector_list has any descendant combinator, we need to do extra work
// in order to handle properly things like:
//
// <div id="a">
// <div id="b">
// <div id="c"></div>
// </div>
// </div>
//
// b.querySelector('#a div'); // Should return "c".
//
// For now, assert it's a root element.
debug_assert!(element.parent_element().is_none());
let target_vector =
if self.matching_context.scope_element.is_some() {
descendant_invalidations
} else {
self_invalidations
};
for selector in self.selector_list.0.iter() {
target_vector.push(Invalidation::new(selector, 0))
}
false
}
fn matching_context(&mut self) -> &mut MatchingContext<'a, E::Impl> {
&mut self.matching_context
}
fn
|
(&mut self, _: E) -> bool {
if Q::should_stop_after_first_match() {
return Q::is_empty(&self.results)
}
true
}
fn invalidated_self(&mut self, e: E) {
Q::append_element(self.results, e);
}
fn recursion_limit_exceeded(&mut self, _e: E) {}
fn invalidated_descendants(&mut self, _e: E, _child: E) {}
}
fn collect_all_elements<E, Q, F>(
root: E::ConcreteNode,
results: &mut Q::Output,
mut filter: F,
)
where
E: TElement,
Q: SelectorQuery<E>,
F: FnMut(E) -> bool,
{
for node in root.dom_descendants() {
let element = match node.as_element() {
Some(e) => e,
None => continue,
};
if!filter(element) {
continue;
}
Q::append_element(results, element);
if Q::should_stop_after_first_match() {
return;
}
}
}
/// Fast paths for a given selector query.
///
/// FIXME(emilio, nbp): This may very well be a good candidate for code to be
/// replaced by HolyJit :)
fn query_selector_fast<E, Q>(
root: E::ConcreteNode,
selector_list: &SelectorList<E::Impl>,
results: &mut Q::Output,
quirks_mode: QuirksMode,
) -> Result<(), ()>
where
E: TElement,
Q: SelectorQuery<E>,
{
use selectors::parser::{Component, LocalName};
use std::borrow::Borrow;
// We need to return elements in document order, and reordering them
// afterwards is kinda silly.
if selector_list.0.len() > 1 {
return Err(());
}
let selector = &selector_list.0[0];
// Let's just care about the easy cases for now.
//
// FIXME(emilio): Blink has a fast path for classes in ancestor combinators
// that may be worth stealing.
if selector.len() > 1 {
return Err(());
}
let component = selector.iter().next().unwrap();
match *component {
Component::ExplicitUniversalType => {
collect_all_elements::<E, Q, _>(root, results, |_| true)
}
Component::ID(ref id) => {
// TODO(emilio): We may want to reuse Gecko's document ID table.
let case_sensitivity = quirks_mode.classes_and_ids_case_sensitivity();
collect_all_elements::<E, Q, _>(root, results, |element| {
element.has_id(id, case_sensitivity)
})
}
Component::Class(ref class) => {
let case_sensitivity = quirks_mode.classes_and_ids_case_sensitivity();
collect_all_elements::<E, Q, _>(root, results, |element| {
element.has_class(class, case_sensitivity)
})
}
Component::LocalName(LocalName { ref name, ref lower_name }) => {
collect_all_elements::<E, Q, _>(root, results, |element| {
if element.is_html_element_in_html_document() {
element.get_local_name() == lower_name.borrow()
} else {
element.get_local_name() == name.borrow()
}
})
}
// TODO(emilio): More fast paths?
_ => {
return Err(())
}
}
Ok(())
}
// Slow path for a given selector query.
fn query_selector_slow<E, Q>(
root: E::ConcreteNode,
selector_list: &SelectorList<E::Impl>,
results: &mut Q::Output,
matching_context: &mut MatchingContext<E::Impl>,
)
where
E: TElement,
Q: SelectorQuery<E>,
{
collect_all_elements::<E, Q, _>(root, results, |element| {
matching::matches_selector_list(selector_list, &element, matching_context)
});
}
/// <https://dom.spec.whatwg.org/#dom-parentnode-queryselector>
pub fn query_selector<E, Q>(
root: E::ConcreteNode,
selector_list: &SelectorList<E::Impl>,
results: &mut Q::Output,
)
where
E: TElement,
Q: SelectorQuery<E>,
{
use invalidation::element::invalidator::TreeStyleInvalidator;
let quirks_mode = root.owner_doc().quirks_mode();
let fast_result = query_selector_fast::<E, Q>(
root,
selector_list,
results,
quirks_mode,
);
if fast_result.is_ok() {
return;
}
// Slow path: Use the invalidation machinery if we're a root, and tree
// traversal otherwise.
//
// See the comment in collect_invalidations to see why only if we're a root.
let mut nth_index_cache = NthIndexCache::default();
let mut matching_context = MatchingContext::new(
MatchingMode::Normal,
None,
Some(&mut nth_index_cache),
quirks_mode,
);
let root_element = root.as_element();
matching_context.scope_element = root_element.map(|e| e.opaque());
// The invalidation mechanism is only useful in presence of combinators.
//
// We could do that check properly here, though checking the length of the
// selectors is a good heuristic.
let invalidation_may_be_useful =
selector_list.0.iter().any(|s| s.len() > 1);
if root_element.is_some() ||!invalidation_may_be_useful {
query_selector_slow::<E, Q>(
root,
selector_list,
results,
&mut matching_context,
);
} else {
let mut processor = QuerySelectorProcessor::<E, Q> {
results,
matching_context,
selector_list,
};
for node in root.dom_children() {
if let Some(e) = node.as_element() {
TreeStyleInvalidator::new(
e,
/* stack_limit_checker = */ None,
&mut processor,
).invalidate();
}
}
}
}
|
should_process_descendants
|
identifier_name
|
dom_apis.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! Generic implementations of some DOM APIs so they can be shared between Servo
//! and Gecko.
use context::QuirksMode;
use dom::{TDocument, TElement, TNode};
use invalidation::element::invalidator::{Invalidation, InvalidationProcessor, InvalidationVector};
use selectors::{Element, NthIndexCache, SelectorList};
use selectors::matching::{self, MatchingContext, MatchingMode};
use smallvec::SmallVec;
/// <https://dom.spec.whatwg.org/#dom-element-matches>
pub fn element_matches<E>(
element: &E,
selector_list: &SelectorList<E::Impl>,
quirks_mode: QuirksMode,
) -> bool
where
E: Element,
{
let mut context = MatchingContext::new(
MatchingMode::Normal,
None,
None,
quirks_mode,
);
context.scope_element = Some(element.opaque());
matching::matches_selector_list(selector_list, element, &mut context)
}
/// <https://dom.spec.whatwg.org/#dom-element-closest>
pub fn element_closest<E>(
element: E,
selector_list: &SelectorList<E::Impl>,
quirks_mode: QuirksMode,
) -> Option<E>
where
E: Element,
{
let mut nth_index_cache = NthIndexCache::default();
let mut context = MatchingContext::new(
MatchingMode::Normal,
None,
Some(&mut nth_index_cache),
quirks_mode,
);
context.scope_element = Some(element.opaque());
let mut current = Some(element);
while let Some(element) = current.take() {
if matching::matches_selector_list(selector_list, &element, &mut context) {
return Some(element);
}
current = element.parent_element();
}
return None;
}
/// A selector query abstraction, in order to be generic over QuerySelector and
/// QuerySelectorAll.
pub trait SelectorQuery<E: TElement> {
/// The output of the query.
type Output;
/// Whether the query should stop after the first element has been matched.
fn should_stop_after_first_match() -> bool;
/// Append an element matching after the first query.
fn append_element(output: &mut Self::Output, element: E);
/// Returns true if the output is empty.
fn is_empty(output: &Self::Output) -> bool;
}
/// The result of a querySelectorAll call.
pub type QuerySelectorAllResult<E> = SmallVec<[E; 128]>;
/// A query for all the elements in a subtree.
pub struct QueryAll;
impl<E: TElement> SelectorQuery<E> for QueryAll {
type Output = QuerySelectorAllResult<E>;
fn should_stop_after_first_match() -> bool { false }
fn append_element(output: &mut Self::Output, element: E) {
output.push(element);
}
fn is_empty(output: &Self::Output) -> bool {
output.is_empty()
}
}
/// A query for the first in-tree match of all the elements in a subtree.
pub struct QueryFirst;
impl<E: TElement> SelectorQuery<E> for QueryFirst {
type Output = Option<E>;
fn should_stop_after_first_match() -> bool { true }
fn append_element(output: &mut Self::Output, element: E) {
if output.is_none() {
*output = Some(element)
}
}
fn is_empty(output: &Self::Output) -> bool {
output.is_none()
}
}
struct QuerySelectorProcessor<'a, E, Q>
where
E: TElement + 'a,
Q: SelectorQuery<E>,
Q::Output: 'a,
{
results: &'a mut Q::Output,
matching_context: MatchingContext<'a, E::Impl>,
selector_list: &'a SelectorList<E::Impl>,
}
impl<'a, E, Q> InvalidationProcessor<'a, E> for QuerySelectorProcessor<'a, E, Q>
where
E: TElement + 'a,
Q: SelectorQuery<E>,
Q::Output: 'a,
{
fn light_tree_only(&self) -> bool { true }
fn collect_invalidations(
&mut self,
element: E,
self_invalidations: &mut InvalidationVector<'a>,
descendant_invalidations: &mut InvalidationVector<'a>,
_sibling_invalidations: &mut InvalidationVector<'a>,
) -> bool {
// TODO(emilio): If the element is not a root element, and
// selector_list has any descendant combinator, we need to do extra work
// in order to handle properly things like:
//
// <div id="a">
// <div id="b">
// <div id="c"></div>
// </div>
// </div>
//
// b.querySelector('#a div'); // Should return "c".
//
// For now, assert it's a root element.
debug_assert!(element.parent_element().is_none());
let target_vector =
if self.matching_context.scope_element.is_some() {
descendant_invalidations
} else {
self_invalidations
};
for selector in self.selector_list.0.iter() {
target_vector.push(Invalidation::new(selector, 0))
}
false
}
fn matching_context(&mut self) -> &mut MatchingContext<'a, E::Impl> {
&mut self.matching_context
}
fn should_process_descendants(&mut self, _: E) -> bool {
if Q::should_stop_after_first_match() {
return Q::is_empty(&self.results)
}
true
}
fn invalidated_self(&mut self, e: E) {
Q::append_element(self.results, e);
}
fn recursion_limit_exceeded(&mut self, _e: E) {}
fn invalidated_descendants(&mut self, _e: E, _child: E) {}
}
fn collect_all_elements<E, Q, F>(
root: E::ConcreteNode,
results: &mut Q::Output,
mut filter: F,
)
where
E: TElement,
Q: SelectorQuery<E>,
F: FnMut(E) -> bool,
{
for node in root.dom_descendants() {
let element = match node.as_element() {
Some(e) => e,
None => continue,
};
if!filter(element) {
continue;
}
Q::append_element(results, element);
if Q::should_stop_after_first_match() {
return;
}
}
}
/// Fast paths for a given selector query.
///
|
selector_list: &SelectorList<E::Impl>,
results: &mut Q::Output,
quirks_mode: QuirksMode,
) -> Result<(), ()>
where
E: TElement,
Q: SelectorQuery<E>,
{
use selectors::parser::{Component, LocalName};
use std::borrow::Borrow;
// We need to return elements in document order, and reordering them
// afterwards is kinda silly.
if selector_list.0.len() > 1 {
return Err(());
}
let selector = &selector_list.0[0];
// Let's just care about the easy cases for now.
//
// FIXME(emilio): Blink has a fast path for classes in ancestor combinators
// that may be worth stealing.
if selector.len() > 1 {
return Err(());
}
let component = selector.iter().next().unwrap();
match *component {
Component::ExplicitUniversalType => {
collect_all_elements::<E, Q, _>(root, results, |_| true)
}
Component::ID(ref id) => {
// TODO(emilio): We may want to reuse Gecko's document ID table.
let case_sensitivity = quirks_mode.classes_and_ids_case_sensitivity();
collect_all_elements::<E, Q, _>(root, results, |element| {
element.has_id(id, case_sensitivity)
})
}
Component::Class(ref class) => {
let case_sensitivity = quirks_mode.classes_and_ids_case_sensitivity();
collect_all_elements::<E, Q, _>(root, results, |element| {
element.has_class(class, case_sensitivity)
})
}
Component::LocalName(LocalName { ref name, ref lower_name }) => {
collect_all_elements::<E, Q, _>(root, results, |element| {
if element.is_html_element_in_html_document() {
element.get_local_name() == lower_name.borrow()
} else {
element.get_local_name() == name.borrow()
}
})
}
// TODO(emilio): More fast paths?
_ => {
return Err(())
}
}
Ok(())
}
// Slow path for a given selector query.
fn query_selector_slow<E, Q>(
root: E::ConcreteNode,
selector_list: &SelectorList<E::Impl>,
results: &mut Q::Output,
matching_context: &mut MatchingContext<E::Impl>,
)
where
E: TElement,
Q: SelectorQuery<E>,
{
collect_all_elements::<E, Q, _>(root, results, |element| {
matching::matches_selector_list(selector_list, &element, matching_context)
});
}
/// <https://dom.spec.whatwg.org/#dom-parentnode-queryselector>
pub fn query_selector<E, Q>(
root: E::ConcreteNode,
selector_list: &SelectorList<E::Impl>,
results: &mut Q::Output,
)
where
E: TElement,
Q: SelectorQuery<E>,
{
use invalidation::element::invalidator::TreeStyleInvalidator;
let quirks_mode = root.owner_doc().quirks_mode();
let fast_result = query_selector_fast::<E, Q>(
root,
selector_list,
results,
quirks_mode,
);
if fast_result.is_ok() {
return;
}
// Slow path: Use the invalidation machinery if we're a root, and tree
// traversal otherwise.
//
// See the comment in collect_invalidations to see why only if we're a root.
let mut nth_index_cache = NthIndexCache::default();
let mut matching_context = MatchingContext::new(
MatchingMode::Normal,
None,
Some(&mut nth_index_cache),
quirks_mode,
);
let root_element = root.as_element();
matching_context.scope_element = root_element.map(|e| e.opaque());
// The invalidation mechanism is only useful in presence of combinators.
//
// We could do that check properly here, though checking the length of the
// selectors is a good heuristic.
let invalidation_may_be_useful =
selector_list.0.iter().any(|s| s.len() > 1);
if root_element.is_some() ||!invalidation_may_be_useful {
query_selector_slow::<E, Q>(
root,
selector_list,
results,
&mut matching_context,
);
} else {
let mut processor = QuerySelectorProcessor::<E, Q> {
results,
matching_context,
selector_list,
};
for node in root.dom_children() {
if let Some(e) = node.as_element() {
TreeStyleInvalidator::new(
e,
/* stack_limit_checker = */ None,
&mut processor,
).invalidate();
}
}
}
}
|
/// FIXME(emilio, nbp): This may very well be a good candidate for code to be
/// replaced by HolyJit :)
fn query_selector_fast<E, Q>(
root: E::ConcreteNode,
|
random_line_split
|
util.rs
|
// Copyright 2015 Michael 'ExpHP' Lamparski
//
// Licensed under the terms of the MIT License, available at:
// http://opensource.org/licenses/MIT
// and also included in the file COPYING at the root of this distribution.
// This file may not be copied, modified, or distributed except according
// to those terms.
use std::ops::Shr;
use std::fmt::Debug;
use num::bigint::{ToBigUint,BigUint};
|
use num::{Zero,One,Integer};
use num::{ToPrimitive,FromPrimitive};
use rand::FromEntropy;
/// Services more types than NumCast does, such as BigInt.
pub trait MoreNumCast: ToPrimitive + FromPrimitive {}
impl<T: ToPrimitive + FromPrimitive> MoreNumCast for T {}
#[cfg(test)]
use test::Bencher;
/// Computes the greatest common divisor of two numbers using Euclid's method.
/// Behavior unspecified for negative numbers.
pub fn gcd<T>(a: T, b: T) -> T
where
T: Clone + Zero + Integer,
{
let mut cur_a = a;
let mut cur_b = b;
while (!cur_a.is_zero()) {
let old_b = cur_b;
cur_b = cur_a.clone();
cur_a = old_b % cur_a;
}
cur_b
}
#[bench]
fn bench_gcd(b: &mut Bencher) {
use rand::Rng;
let mut rng = ::rand::rngs::SmallRng::from_entropy();
b.iter(|| {
let a = rng.gen_range(100000u32, 1000000u32);
let b = rng.gen_range(100000u32, 1000000u32);
gcd(a, b)
})
}
/// Performs an integer square root, returning the largest integer whose square is not
/// greater than the argument.
pub fn isqrt<T>(n: T) -> T
where
T: Clone + Zero + Integer + Shr<usize, Output = T> + MoreNumCast,
{
isqrt_fast(n.clone())
.or_else(|| Some(isqrt_safe(n.clone())))
.unwrap()
}
/// Used to convert an integral literal into an arbitrary type.
/// For zero and one, `num::Zero::zero()` and `num::One::one()` is preferred when they
/// are used as the additive/multiplicative identity, and `literal` is used otherwise.
#[inline]
pub fn literal<T: MoreNumCast>(n: i32) -> T
where
T: MoreNumCast,
{
T::from_i32(n).unwrap()
}
/// Computes `pow(x, power) % modulus` using exponentation by squaring.
pub fn mod_pow<T, P>(x: T, power: P, modulus: T) -> T
where
T: Eq + Clone + Integer,
P: Eq + Clone + Integer + Shr<usize, Output = P>,
{
let mut prod: T = One::one();
let mut remaining = power;
let mut cur = x;
while remaining > Zero::zero() {
if remaining.is_odd() {
prod = prod * cur.clone();
prod = prod % modulus.clone();
}
remaining = remaining >> 1;
cur = cur.clone() * cur;
cur = cur % modulus.clone();
}
prod
}
#[test]
fn test_mod_pow() {
assert_eq!(mod_pow(234u64, 0, 1259), 1);
assert_eq!(mod_pow(234u64, 1, 1259), 234);
assert_eq!(mod_pow(234u64, 2412, 1259), 1091);
}
//-------------------------------
// isqrt helper methods
fn isqrt_fast<T>(x: T) -> Option<T>
where
T: MoreNumCast,
{
x.to_f64().and_then(|f| {
// Mantissa is 52 bits, and the square root takes half as many bits, so this
// may be a bit conservative. The main concern is to avoid handling very
// large BigInts which may lose more than half of their precision.
if f > 20f64.exp2() {
None // Number too large, bail out!
} else {
T::from_f64(f.sqrt().floor())
}
})
}
fn isqrt_safe<T>(n: T) -> T
where
T: Clone + Zero + Integer + Shr<usize, Output = T>,
{
// NOTE: while I'd like to remove the Shr bound, replacing '>> 1' with '/ 2' makes this
// algorithm take twice as long for BigInts :/
if n.is_zero() {
return Zero::zero();
}
let mut x = n.clone();
let mut y = (x.clone() + n.clone() / x.clone()) >> literal(1);
while y < x {
x = y.clone();
y = (x.clone() + n.clone() / x.clone()) >> literal(1);
}
return x;
}
#[bench]
fn bench_fast(b: &mut Bencher) {
b.iter(|| {
(0usize..1000)
.map(|a| isqrt_fast::<usize>(a).unwrap())
.collect::<Vec<usize>>()
})
}
#[bench]
fn bench_safe(b: &mut Bencher) {
b.iter(|| {
(0usize..1000)
.map(isqrt_safe::<usize>)
.collect::<Vec<usize>>()
})
}
#[bench]
fn bench_safe_bigint(b: &mut Bencher) {
b.iter(|| {
(0i32..1000)
.map(|a| isqrt_safe::<BigUint>(literal(a)))
.collect::<Vec<BigUint>>()
})
}
#[bench]
fn bench_safe_massive_bigint(b: &mut Bencher) {
use rand_xorshift::XorShiftRng;
use rand::SeedableRng;
use num::bigint::RandBigInt;
let mut r = XorShiftRng::from_seed([
// what 'new_unseeded' used to do before it was unceremoniously removed.
0x19, 0x3a, 0x67, 0x54, // w
0xa8, 0xa7, 0xd4, 0x69, // x
0x97, 0x83, 0x0e, 0x05, // y
0x11, 0x3b, 0xa7, 0xbb, // z
]);
b.iter(|| {
(0usize..100)
.map(|_| isqrt_safe::<BigUint>(r.gen_biguint(100usize)))
.collect::<Vec<BigUint>>()
})
}
#[test]
fn test_isqrt_consistency() {
for x in 0usize..1000 {
let bigX = x.to_biguint().unwrap();
assert_eq!(isqrt_fast(x), Some(isqrt_safe(x)));
assert_eq!(isqrt_fast(bigX.clone()), Some(isqrt_safe(bigX.clone())));
}
}
#[cfg(test)]
mod tests {
use super::*;
use num::bigint::{ToBigUint,BigUint};
// need to test isqrt with BigInt more rigorously
}
|
random_line_split
|
|
util.rs
|
// Copyright 2015 Michael 'ExpHP' Lamparski
//
// Licensed under the terms of the MIT License, available at:
// http://opensource.org/licenses/MIT
// and also included in the file COPYING at the root of this distribution.
// This file may not be copied, modified, or distributed except according
// to those terms.
use std::ops::Shr;
use std::fmt::Debug;
use num::bigint::{ToBigUint,BigUint};
use num::{Zero,One,Integer};
use num::{ToPrimitive,FromPrimitive};
use rand::FromEntropy;
/// Services more types than NumCast does, such as BigInt.
pub trait MoreNumCast: ToPrimitive + FromPrimitive {}
impl<T: ToPrimitive + FromPrimitive> MoreNumCast for T {}
#[cfg(test)]
use test::Bencher;
/// Computes the greatest common divisor of two numbers using Euclid's method.
/// Behavior unspecified for negative numbers.
pub fn gcd<T>(a: T, b: T) -> T
where
T: Clone + Zero + Integer,
{
let mut cur_a = a;
let mut cur_b = b;
while (!cur_a.is_zero()) {
let old_b = cur_b;
cur_b = cur_a.clone();
cur_a = old_b % cur_a;
}
cur_b
}
#[bench]
fn bench_gcd(b: &mut Bencher) {
use rand::Rng;
let mut rng = ::rand::rngs::SmallRng::from_entropy();
b.iter(|| {
let a = rng.gen_range(100000u32, 1000000u32);
let b = rng.gen_range(100000u32, 1000000u32);
gcd(a, b)
})
}
/// Performs an integer square root, returning the largest integer whose square is not
/// greater than the argument.
pub fn isqrt<T>(n: T) -> T
where
T: Clone + Zero + Integer + Shr<usize, Output = T> + MoreNumCast,
{
isqrt_fast(n.clone())
.or_else(|| Some(isqrt_safe(n.clone())))
.unwrap()
}
/// Used to convert an integral literal into an arbitrary type.
/// For zero and one, `num::Zero::zero()` and `num::One::one()` is preferred when they
/// are used as the additive/multiplicative identity, and `literal` is used otherwise.
#[inline]
pub fn literal<T: MoreNumCast>(n: i32) -> T
where
T: MoreNumCast,
{
T::from_i32(n).unwrap()
}
/// Computes `pow(x, power) % modulus` using exponentation by squaring.
pub fn mod_pow<T, P>(x: T, power: P, modulus: T) -> T
where
T: Eq + Clone + Integer,
P: Eq + Clone + Integer + Shr<usize, Output = P>,
{
let mut prod: T = One::one();
let mut remaining = power;
let mut cur = x;
while remaining > Zero::zero() {
if remaining.is_odd() {
prod = prod * cur.clone();
prod = prod % modulus.clone();
}
remaining = remaining >> 1;
cur = cur.clone() * cur;
cur = cur % modulus.clone();
}
prod
}
#[test]
fn test_mod_pow() {
assert_eq!(mod_pow(234u64, 0, 1259), 1);
assert_eq!(mod_pow(234u64, 1, 1259), 234);
assert_eq!(mod_pow(234u64, 2412, 1259), 1091);
}
//-------------------------------
// isqrt helper methods
fn isqrt_fast<T>(x: T) -> Option<T>
where
T: MoreNumCast,
{
x.to_f64().and_then(|f| {
// Mantissa is 52 bits, and the square root takes half as many bits, so this
// may be a bit conservative. The main concern is to avoid handling very
// large BigInts which may lose more than half of their precision.
if f > 20f64.exp2() {
None // Number too large, bail out!
} else {
T::from_f64(f.sqrt().floor())
}
})
}
fn isqrt_safe<T>(n: T) -> T
where
T: Clone + Zero + Integer + Shr<usize, Output = T>,
{
// NOTE: while I'd like to remove the Shr bound, replacing '>> 1' with '/ 2' makes this
// algorithm take twice as long for BigInts :/
if n.is_zero()
|
let mut x = n.clone();
let mut y = (x.clone() + n.clone() / x.clone()) >> literal(1);
while y < x {
x = y.clone();
y = (x.clone() + n.clone() / x.clone()) >> literal(1);
}
return x;
}
#[bench]
fn bench_fast(b: &mut Bencher) {
b.iter(|| {
(0usize..1000)
.map(|a| isqrt_fast::<usize>(a).unwrap())
.collect::<Vec<usize>>()
})
}
#[bench]
fn bench_safe(b: &mut Bencher) {
b.iter(|| {
(0usize..1000)
.map(isqrt_safe::<usize>)
.collect::<Vec<usize>>()
})
}
#[bench]
fn bench_safe_bigint(b: &mut Bencher) {
b.iter(|| {
(0i32..1000)
.map(|a| isqrt_safe::<BigUint>(literal(a)))
.collect::<Vec<BigUint>>()
})
}
#[bench]
fn bench_safe_massive_bigint(b: &mut Bencher) {
use rand_xorshift::XorShiftRng;
use rand::SeedableRng;
use num::bigint::RandBigInt;
let mut r = XorShiftRng::from_seed([
// what 'new_unseeded' used to do before it was unceremoniously removed.
0x19, 0x3a, 0x67, 0x54, // w
0xa8, 0xa7, 0xd4, 0x69, // x
0x97, 0x83, 0x0e, 0x05, // y
0x11, 0x3b, 0xa7, 0xbb, // z
]);
b.iter(|| {
(0usize..100)
.map(|_| isqrt_safe::<BigUint>(r.gen_biguint(100usize)))
.collect::<Vec<BigUint>>()
})
}
#[test]
fn test_isqrt_consistency() {
for x in 0usize..1000 {
let bigX = x.to_biguint().unwrap();
assert_eq!(isqrt_fast(x), Some(isqrt_safe(x)));
assert_eq!(isqrt_fast(bigX.clone()), Some(isqrt_safe(bigX.clone())));
}
}
#[cfg(test)]
mod tests {
use super::*;
use num::bigint::{ToBigUint,BigUint};
// need to test isqrt with BigInt more rigorously
}
|
{
return Zero::zero();
}
|
conditional_block
|
util.rs
|
// Copyright 2015 Michael 'ExpHP' Lamparski
//
// Licensed under the terms of the MIT License, available at:
// http://opensource.org/licenses/MIT
// and also included in the file COPYING at the root of this distribution.
// This file may not be copied, modified, or distributed except according
// to those terms.
use std::ops::Shr;
use std::fmt::Debug;
use num::bigint::{ToBigUint,BigUint};
use num::{Zero,One,Integer};
use num::{ToPrimitive,FromPrimitive};
use rand::FromEntropy;
/// Services more types than NumCast does, such as BigInt.
pub trait MoreNumCast: ToPrimitive + FromPrimitive {}
impl<T: ToPrimitive + FromPrimitive> MoreNumCast for T {}
#[cfg(test)]
use test::Bencher;
/// Computes the greatest common divisor of two numbers using Euclid's method.
/// Behavior unspecified for negative numbers.
pub fn gcd<T>(a: T, b: T) -> T
where
T: Clone + Zero + Integer,
{
let mut cur_a = a;
let mut cur_b = b;
while (!cur_a.is_zero()) {
let old_b = cur_b;
cur_b = cur_a.clone();
cur_a = old_b % cur_a;
}
cur_b
}
#[bench]
fn bench_gcd(b: &mut Bencher) {
use rand::Rng;
let mut rng = ::rand::rngs::SmallRng::from_entropy();
b.iter(|| {
let a = rng.gen_range(100000u32, 1000000u32);
let b = rng.gen_range(100000u32, 1000000u32);
gcd(a, b)
})
}
/// Performs an integer square root, returning the largest integer whose square is not
/// greater than the argument.
pub fn isqrt<T>(n: T) -> T
where
T: Clone + Zero + Integer + Shr<usize, Output = T> + MoreNumCast,
{
isqrt_fast(n.clone())
.or_else(|| Some(isqrt_safe(n.clone())))
.unwrap()
}
/// Used to convert an integral literal into an arbitrary type.
/// For zero and one, `num::Zero::zero()` and `num::One::one()` is preferred when they
/// are used as the additive/multiplicative identity, and `literal` is used otherwise.
#[inline]
pub fn literal<T: MoreNumCast>(n: i32) -> T
where
T: MoreNumCast,
{
T::from_i32(n).unwrap()
}
/// Computes `pow(x, power) % modulus` using exponentation by squaring.
pub fn mod_pow<T, P>(x: T, power: P, modulus: T) -> T
where
T: Eq + Clone + Integer,
P: Eq + Clone + Integer + Shr<usize, Output = P>,
{
let mut prod: T = One::one();
let mut remaining = power;
let mut cur = x;
while remaining > Zero::zero() {
if remaining.is_odd() {
prod = prod * cur.clone();
prod = prod % modulus.clone();
}
remaining = remaining >> 1;
cur = cur.clone() * cur;
cur = cur % modulus.clone();
}
prod
}
#[test]
fn test_mod_pow() {
assert_eq!(mod_pow(234u64, 0, 1259), 1);
assert_eq!(mod_pow(234u64, 1, 1259), 234);
assert_eq!(mod_pow(234u64, 2412, 1259), 1091);
}
//-------------------------------
// isqrt helper methods
fn isqrt_fast<T>(x: T) -> Option<T>
where
T: MoreNumCast,
|
fn isqrt_safe<T>(n: T) -> T
where
T: Clone + Zero + Integer + Shr<usize, Output = T>,
{
// NOTE: while I'd like to remove the Shr bound, replacing '>> 1' with '/ 2' makes this
// algorithm take twice as long for BigInts :/
if n.is_zero() {
return Zero::zero();
}
let mut x = n.clone();
let mut y = (x.clone() + n.clone() / x.clone()) >> literal(1);
while y < x {
x = y.clone();
y = (x.clone() + n.clone() / x.clone()) >> literal(1);
}
return x;
}
#[bench]
fn bench_fast(b: &mut Bencher) {
b.iter(|| {
(0usize..1000)
.map(|a| isqrt_fast::<usize>(a).unwrap())
.collect::<Vec<usize>>()
})
}
#[bench]
fn bench_safe(b: &mut Bencher) {
b.iter(|| {
(0usize..1000)
.map(isqrt_safe::<usize>)
.collect::<Vec<usize>>()
})
}
#[bench]
fn bench_safe_bigint(b: &mut Bencher) {
b.iter(|| {
(0i32..1000)
.map(|a| isqrt_safe::<BigUint>(literal(a)))
.collect::<Vec<BigUint>>()
})
}
#[bench]
fn bench_safe_massive_bigint(b: &mut Bencher) {
use rand_xorshift::XorShiftRng;
use rand::SeedableRng;
use num::bigint::RandBigInt;
let mut r = XorShiftRng::from_seed([
// what 'new_unseeded' used to do before it was unceremoniously removed.
0x19, 0x3a, 0x67, 0x54, // w
0xa8, 0xa7, 0xd4, 0x69, // x
0x97, 0x83, 0x0e, 0x05, // y
0x11, 0x3b, 0xa7, 0xbb, // z
]);
b.iter(|| {
(0usize..100)
.map(|_| isqrt_safe::<BigUint>(r.gen_biguint(100usize)))
.collect::<Vec<BigUint>>()
})
}
#[test]
fn test_isqrt_consistency() {
for x in 0usize..1000 {
let bigX = x.to_biguint().unwrap();
assert_eq!(isqrt_fast(x), Some(isqrt_safe(x)));
assert_eq!(isqrt_fast(bigX.clone()), Some(isqrt_safe(bigX.clone())));
}
}
#[cfg(test)]
mod tests {
use super::*;
use num::bigint::{ToBigUint,BigUint};
// need to test isqrt with BigInt more rigorously
}
|
{
x.to_f64().and_then(|f| {
// Mantissa is 52 bits, and the square root takes half as many bits, so this
// may be a bit conservative. The main concern is to avoid handling very
// large BigInts which may lose more than half of their precision.
if f > 20f64.exp2() {
None // Number too large, bail out!
} else {
T::from_f64(f.sqrt().floor())
}
})
}
|
identifier_body
|
util.rs
|
// Copyright 2015 Michael 'ExpHP' Lamparski
//
// Licensed under the terms of the MIT License, available at:
// http://opensource.org/licenses/MIT
// and also included in the file COPYING at the root of this distribution.
// This file may not be copied, modified, or distributed except according
// to those terms.
use std::ops::Shr;
use std::fmt::Debug;
use num::bigint::{ToBigUint,BigUint};
use num::{Zero,One,Integer};
use num::{ToPrimitive,FromPrimitive};
use rand::FromEntropy;
/// Services more types than NumCast does, such as BigInt.
pub trait MoreNumCast: ToPrimitive + FromPrimitive {}
impl<T: ToPrimitive + FromPrimitive> MoreNumCast for T {}
#[cfg(test)]
use test::Bencher;
/// Computes the greatest common divisor of two numbers using Euclid's method.
/// Behavior unspecified for negative numbers.
pub fn
|
<T>(a: T, b: T) -> T
where
T: Clone + Zero + Integer,
{
let mut cur_a = a;
let mut cur_b = b;
while (!cur_a.is_zero()) {
let old_b = cur_b;
cur_b = cur_a.clone();
cur_a = old_b % cur_a;
}
cur_b
}
#[bench]
fn bench_gcd(b: &mut Bencher) {
use rand::Rng;
let mut rng = ::rand::rngs::SmallRng::from_entropy();
b.iter(|| {
let a = rng.gen_range(100000u32, 1000000u32);
let b = rng.gen_range(100000u32, 1000000u32);
gcd(a, b)
})
}
/// Performs an integer square root, returning the largest integer whose square is not
/// greater than the argument.
pub fn isqrt<T>(n: T) -> T
where
T: Clone + Zero + Integer + Shr<usize, Output = T> + MoreNumCast,
{
isqrt_fast(n.clone())
.or_else(|| Some(isqrt_safe(n.clone())))
.unwrap()
}
/// Used to convert an integral literal into an arbitrary type.
/// For zero and one, `num::Zero::zero()` and `num::One::one()` is preferred when they
/// are used as the additive/multiplicative identity, and `literal` is used otherwise.
#[inline]
pub fn literal<T: MoreNumCast>(n: i32) -> T
where
T: MoreNumCast,
{
T::from_i32(n).unwrap()
}
/// Computes `pow(x, power) % modulus` using exponentation by squaring.
pub fn mod_pow<T, P>(x: T, power: P, modulus: T) -> T
where
T: Eq + Clone + Integer,
P: Eq + Clone + Integer + Shr<usize, Output = P>,
{
let mut prod: T = One::one();
let mut remaining = power;
let mut cur = x;
while remaining > Zero::zero() {
if remaining.is_odd() {
prod = prod * cur.clone();
prod = prod % modulus.clone();
}
remaining = remaining >> 1;
cur = cur.clone() * cur;
cur = cur % modulus.clone();
}
prod
}
#[test]
fn test_mod_pow() {
assert_eq!(mod_pow(234u64, 0, 1259), 1);
assert_eq!(mod_pow(234u64, 1, 1259), 234);
assert_eq!(mod_pow(234u64, 2412, 1259), 1091);
}
//-------------------------------
// isqrt helper methods
fn isqrt_fast<T>(x: T) -> Option<T>
where
T: MoreNumCast,
{
x.to_f64().and_then(|f| {
// Mantissa is 52 bits, and the square root takes half as many bits, so this
// may be a bit conservative. The main concern is to avoid handling very
// large BigInts which may lose more than half of their precision.
if f > 20f64.exp2() {
None // Number too large, bail out!
} else {
T::from_f64(f.sqrt().floor())
}
})
}
fn isqrt_safe<T>(n: T) -> T
where
T: Clone + Zero + Integer + Shr<usize, Output = T>,
{
// NOTE: while I'd like to remove the Shr bound, replacing '>> 1' with '/ 2' makes this
// algorithm take twice as long for BigInts :/
if n.is_zero() {
return Zero::zero();
}
let mut x = n.clone();
let mut y = (x.clone() + n.clone() / x.clone()) >> literal(1);
while y < x {
x = y.clone();
y = (x.clone() + n.clone() / x.clone()) >> literal(1);
}
return x;
}
#[bench]
fn bench_fast(b: &mut Bencher) {
b.iter(|| {
(0usize..1000)
.map(|a| isqrt_fast::<usize>(a).unwrap())
.collect::<Vec<usize>>()
})
}
#[bench]
fn bench_safe(b: &mut Bencher) {
b.iter(|| {
(0usize..1000)
.map(isqrt_safe::<usize>)
.collect::<Vec<usize>>()
})
}
#[bench]
fn bench_safe_bigint(b: &mut Bencher) {
b.iter(|| {
(0i32..1000)
.map(|a| isqrt_safe::<BigUint>(literal(a)))
.collect::<Vec<BigUint>>()
})
}
#[bench]
fn bench_safe_massive_bigint(b: &mut Bencher) {
use rand_xorshift::XorShiftRng;
use rand::SeedableRng;
use num::bigint::RandBigInt;
let mut r = XorShiftRng::from_seed([
// what 'new_unseeded' used to do before it was unceremoniously removed.
0x19, 0x3a, 0x67, 0x54, // w
0xa8, 0xa7, 0xd4, 0x69, // x
0x97, 0x83, 0x0e, 0x05, // y
0x11, 0x3b, 0xa7, 0xbb, // z
]);
b.iter(|| {
(0usize..100)
.map(|_| isqrt_safe::<BigUint>(r.gen_biguint(100usize)))
.collect::<Vec<BigUint>>()
})
}
#[test]
fn test_isqrt_consistency() {
for x in 0usize..1000 {
let bigX = x.to_biguint().unwrap();
assert_eq!(isqrt_fast(x), Some(isqrt_safe(x)));
assert_eq!(isqrt_fast(bigX.clone()), Some(isqrt_safe(bigX.clone())));
}
}
#[cfg(test)]
mod tests {
use super::*;
use num::bigint::{ToBigUint,BigUint};
// need to test isqrt with BigInt more rigorously
}
|
gcd
|
identifier_name
|
ui.mako.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
<%namespace name="helpers" file="/helpers.mako.rs" />
<% from data import Method %>
// CSS Basic User Interface Module Level 1
// https://drafts.csswg.org/css-ui-3/
<% data.new_style_struct("UI", inherited=False, gecko_name="UIReset") %>
// TODO spec says that UAs should not support this
// we should probably remove from gecko (https://bugzilla.mozilla.org/show_bug.cgi?id=1328331)
${helpers.single_keyword("ime-mode", "auto normal active disabled inactive",
products="gecko", gecko_ffi_name="mIMEMode",
animation_value_type="discrete",
spec="https://drafts.csswg.org/css-ui/#input-method-editor")}
${helpers.single_keyword("-moz-user-select", "auto text none all element elements" +
" toggle tri-state -moz-all -moz-text",
products="gecko",
alias="-webkit-user-select",
gecko_ffi_name="mUserSelect",
gecko_enum_prefix="StyleUserSelect",
gecko_strip_moz_prefix=False,
aliases="-moz-none=none",
animation_value_type="discrete",
spec="https://drafts.csswg.org/css-ui-4/#propdef-user-select")}
${helpers.single_keyword("-moz-window-dragging", "default drag no-drag", products="gecko",
gecko_ffi_name="mWindowDragging",
gecko_enum_prefix="StyleWindowDragging",
animation_value_type="discrete",
spec="None (Nonstandard Firefox-only property)")}
${helpers.single_keyword("-moz-window-shadow", "none default menu tooltip sheet", products="gecko",
gecko_ffi_name="mWindowShadow",
gecko_constant_prefix="NS_STYLE_WINDOW_SHADOW",
animation_value_type="discrete",
internal=True,
spec="None (Nonstandard internal property)")}
<%helpers:longhand name="-moz-force-broken-image-icon"
products="gecko"
animation_value_type="discrete"
spec="None (Nonstandard Firefox-only property)">
use std::fmt;
use style_traits::ToCss;
pub mod computed_value {
#[derive(Clone, Copy, Debug, MallocSizeOf, PartialEq, ToComputedValue)]
pub struct
|
(pub bool);
}
pub use self::computed_value::T as SpecifiedValue;
impl ToCss for computed_value::T {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
dest.write_str(if self.0 { "1" } else { "0" })
}
}
#[inline]
pub fn get_initial_value() -> computed_value::T {
computed_value::T(false)
}
#[inline]
pub fn get_initial_specified_value() -> SpecifiedValue {
computed_value::T(false)
}
pub fn parse<'i, 't>(_context: &ParserContext, input: &mut Parser<'i, 't>)
-> Result<SpecifiedValue, ParseError<'i>> {
match input.expect_integer()? {
0 => Ok(computed_value::T(false)),
1 => Ok(computed_value::T(true)),
_ => Err(input.new_custom_error(StyleParseErrorKind::UnspecifiedError)),
}
}
impl From<u8> for SpecifiedValue {
fn from(bits: u8) -> SpecifiedValue {
SpecifiedValue(bits == 1)
}
}
impl From<SpecifiedValue> for u8 {
fn from(v: SpecifiedValue) -> u8 {
match v.0 {
true => 1u8,
false => 0u8,
}
}
}
</%helpers:longhand>
|
T
|
identifier_name
|
ui.mako.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
<%namespace name="helpers" file="/helpers.mako.rs" />
<% from data import Method %>
// CSS Basic User Interface Module Level 1
// https://drafts.csswg.org/css-ui-3/
<% data.new_style_struct("UI", inherited=False, gecko_name="UIReset") %>
// TODO spec says that UAs should not support this
// we should probably remove from gecko (https://bugzilla.mozilla.org/show_bug.cgi?id=1328331)
${helpers.single_keyword("ime-mode", "auto normal active disabled inactive",
products="gecko", gecko_ffi_name="mIMEMode",
animation_value_type="discrete",
spec="https://drafts.csswg.org/css-ui/#input-method-editor")}
${helpers.single_keyword("-moz-user-select", "auto text none all element elements" +
" toggle tri-state -moz-all -moz-text",
products="gecko",
alias="-webkit-user-select",
gecko_ffi_name="mUserSelect",
gecko_enum_prefix="StyleUserSelect",
gecko_strip_moz_prefix=False,
aliases="-moz-none=none",
animation_value_type="discrete",
spec="https://drafts.csswg.org/css-ui-4/#propdef-user-select")}
${helpers.single_keyword("-moz-window-dragging", "default drag no-drag", products="gecko",
gecko_ffi_name="mWindowDragging",
|
${helpers.single_keyword("-moz-window-shadow", "none default menu tooltip sheet", products="gecko",
gecko_ffi_name="mWindowShadow",
gecko_constant_prefix="NS_STYLE_WINDOW_SHADOW",
animation_value_type="discrete",
internal=True,
spec="None (Nonstandard internal property)")}
<%helpers:longhand name="-moz-force-broken-image-icon"
products="gecko"
animation_value_type="discrete"
spec="None (Nonstandard Firefox-only property)">
use std::fmt;
use style_traits::ToCss;
pub mod computed_value {
#[derive(Clone, Copy, Debug, MallocSizeOf, PartialEq, ToComputedValue)]
pub struct T(pub bool);
}
pub use self::computed_value::T as SpecifiedValue;
impl ToCss for computed_value::T {
fn to_css<W>(&self, dest: &mut W) -> fmt::Result where W: fmt::Write {
dest.write_str(if self.0 { "1" } else { "0" })
}
}
#[inline]
pub fn get_initial_value() -> computed_value::T {
computed_value::T(false)
}
#[inline]
pub fn get_initial_specified_value() -> SpecifiedValue {
computed_value::T(false)
}
pub fn parse<'i, 't>(_context: &ParserContext, input: &mut Parser<'i, 't>)
-> Result<SpecifiedValue, ParseError<'i>> {
match input.expect_integer()? {
0 => Ok(computed_value::T(false)),
1 => Ok(computed_value::T(true)),
_ => Err(input.new_custom_error(StyleParseErrorKind::UnspecifiedError)),
}
}
impl From<u8> for SpecifiedValue {
fn from(bits: u8) -> SpecifiedValue {
SpecifiedValue(bits == 1)
}
}
impl From<SpecifiedValue> for u8 {
fn from(v: SpecifiedValue) -> u8 {
match v.0 {
true => 1u8,
false => 0u8,
}
}
}
</%helpers:longhand>
|
gecko_enum_prefix="StyleWindowDragging",
animation_value_type="discrete",
spec="None (Nonstandard Firefox-only property)")}
|
random_line_split
|
block_limits.rs
|
extern crate blockstack_lib;
extern crate serde_json;
use blockstack_lib::{
chainstate::stacks::index::storage::{TrieFileStorage},
chainstate::burn::BlockHeaderHash,
vm::types::{QualifiedContractIdentifier},
vm::database::{MarfedKV, NULL_HEADER_DB},
vm::clarity::ClarityInstance,
vm::costs::ExecutionCost,
};
use std::env;
use std::process;
use std::fmt::Write;
fn test_via_tx(scaling: u32, inner_loop: &str, other_decl: &str) -> ExecutionCost {
let marf = MarfedKV::temporary();
let mut clarity_instance = ClarityInstance::new(marf, ExecutionCost::max_value());
let contract_identifier = QualifiedContractIdentifier::local("foo").unwrap();
let blocks = [TrieFileStorage::block_sentinel(),
BlockHeaderHash::from_bytes(&[1 as u8; 32]).unwrap(),
BlockHeaderHash::from_bytes(&[2 as u8; 32]).unwrap()];
{
let mut conn = clarity_instance.begin_block(&blocks[0],
&blocks[1],
&NULL_HEADER_DB);
let mut contract = "(define-constant list-0 (list 0))".to_string();
for i in 0..15 {
contract.push_str("\n");
contract.push_str(
&format!("(define-constant list-{} (concat list-{} list-{}))",
i+1, i, i));
}
contract.push_str("\n");
contract.push_str(other_decl);
contract.push_str("\n");
contract.push_str(inner_loop);
write!(contract, "\n(define-private (outer-loop) (map inner-loop list-10))\n").unwrap();
write!(contract, "(define-public (do-it) (begin \n").unwrap();
for _i in 0..scaling {
write!(contract, "(outer-loop)\n").unwrap();
}
write!(contract, " (ok 1)))\n").unwrap();
let (ct_ast, _ct_analysis) = conn.analyze_smart_contract(&contract_identifier, &contract).unwrap();
conn.initialize_smart_contract(
// initialize the ok contract without errs, but still abort.
&contract_identifier, &ct_ast, &contract, |_,_| false).unwrap();
conn.commit_to_block(&blocks[1]);
}
{
let mut conn = clarity_instance.begin_block(&blocks[1], &blocks[2], &NULL_HEADER_DB);
conn.run_contract_call(&contract_identifier.clone().into(),
&contract_identifier, "do-it", &[], |_, _| false).unwrap();
conn.commit_to_block(&blocks[2]).get_total()
}
}
// on a fairly underpowered laptop:
// read-length of ~1e9 corresponds to 10 seconds. (scaling => 2)
fn read_length_test(scaling: u32) -> ExecutionCost {
let other_decl = "(define-data-var var-to-read (list 33000 int) list-15)";
let inner_loop = "(define-private (inner-loop (x int)) (len (var-get var-to-read)))";
test_via_tx(scaling, inner_loop, other_decl)
}
// on a fairly underpowered laptop:
// read-count of ~50k corresponds to 10 seconds. (scaling => 50)
fn read_count_test(scaling: u32) -> ExecutionCost {
let other_decl = "(define-data-var var-to-read int 0)";
let inner_loop = "(define-private (inner-loop (x int)) (var-get var-to-read))";
test_via_tx(scaling, inner_loop, other_decl)
}
// on a fairly underpowered laptop:
// write-length of ~1e8 corresponds to 10 seconds. (scaling => 10)
// at scaling = 5, the tx takes about 5 seconds => write-length of ~8e7,
// so for ~10s, max write-len should be 1.5e8
fn write_length_test(scaling: u32) -> ExecutionCost {
let other_decl = "(define-data-var var-to-read (list 1024 int) list-10)";
let inner_loop = "(define-private (inner-loop (x int)) (var-set var-to-read list-10))";
test_via_tx(scaling, inner_loop, other_decl)
}
// on a fairly underpowered laptop:
// write-count of ~50k corresponds to 10 seconds. (scaling => 50)
fn write_count_test(scaling: u32) -> ExecutionCost {
let other_decl = "(define-data-var var-to-read int 0)";
let inner_loop = "(define-private (inner-loop (x int)) (var-set var-to-read 0))";
test_via_tx(scaling, inner_loop, other_decl)
}
// on a fairly underpowered laptop:
// runtime count of ~1e8 corresponds to 10 seconds. (scaling => 6)
fn runtime_hash_test(scaling: u32) -> ExecutionCost
|
fn main() {
let argv: Vec<_> = env::args().collect();
if argv.len() < 3 {
eprintln!("Usage: {} [test-name] [scalar]", argv[0]);
process::exit(1);
}
let scalar = argv[2].parse().expect("Invalid scalar");
let result = match argv[1].as_str() {
"runtime" => {
runtime_hash_test(scalar)
},
"read-length" => {
read_length_test(scalar)
},
"read-count" => {
read_count_test(scalar)
},
"write-count" => {
write_count_test(scalar)
},
"write-length" => {
write_length_test(scalar)
},
_ => {
eprintln!("bad test name");
process::exit(1);
},
};
println!("{}", serde_json::to_string(&result).unwrap());
}
|
{
let other_decl = "";
let inner_loop = "(define-private (inner-loop (x int)) (begin (map sha512 list-10) 0))";
test_via_tx(scaling, inner_loop, other_decl)
}
|
identifier_body
|
block_limits.rs
|
extern crate blockstack_lib;
extern crate serde_json;
use blockstack_lib::{
chainstate::stacks::index::storage::{TrieFileStorage},
chainstate::burn::BlockHeaderHash,
vm::types::{QualifiedContractIdentifier},
vm::database::{MarfedKV, NULL_HEADER_DB},
vm::clarity::ClarityInstance,
vm::costs::ExecutionCost,
};
use std::env;
use std::process;
use std::fmt::Write;
fn
|
(scaling: u32, inner_loop: &str, other_decl: &str) -> ExecutionCost {
let marf = MarfedKV::temporary();
let mut clarity_instance = ClarityInstance::new(marf, ExecutionCost::max_value());
let contract_identifier = QualifiedContractIdentifier::local("foo").unwrap();
let blocks = [TrieFileStorage::block_sentinel(),
BlockHeaderHash::from_bytes(&[1 as u8; 32]).unwrap(),
BlockHeaderHash::from_bytes(&[2 as u8; 32]).unwrap()];
{
let mut conn = clarity_instance.begin_block(&blocks[0],
&blocks[1],
&NULL_HEADER_DB);
let mut contract = "(define-constant list-0 (list 0))".to_string();
for i in 0..15 {
contract.push_str("\n");
contract.push_str(
&format!("(define-constant list-{} (concat list-{} list-{}))",
i+1, i, i));
}
contract.push_str("\n");
contract.push_str(other_decl);
contract.push_str("\n");
contract.push_str(inner_loop);
write!(contract, "\n(define-private (outer-loop) (map inner-loop list-10))\n").unwrap();
write!(contract, "(define-public (do-it) (begin \n").unwrap();
for _i in 0..scaling {
write!(contract, "(outer-loop)\n").unwrap();
}
write!(contract, " (ok 1)))\n").unwrap();
let (ct_ast, _ct_analysis) = conn.analyze_smart_contract(&contract_identifier, &contract).unwrap();
conn.initialize_smart_contract(
// initialize the ok contract without errs, but still abort.
&contract_identifier, &ct_ast, &contract, |_,_| false).unwrap();
conn.commit_to_block(&blocks[1]);
}
{
let mut conn = clarity_instance.begin_block(&blocks[1], &blocks[2], &NULL_HEADER_DB);
conn.run_contract_call(&contract_identifier.clone().into(),
&contract_identifier, "do-it", &[], |_, _| false).unwrap();
conn.commit_to_block(&blocks[2]).get_total()
}
}
// on a fairly underpowered laptop:
// read-length of ~1e9 corresponds to 10 seconds. (scaling => 2)
fn read_length_test(scaling: u32) -> ExecutionCost {
let other_decl = "(define-data-var var-to-read (list 33000 int) list-15)";
let inner_loop = "(define-private (inner-loop (x int)) (len (var-get var-to-read)))";
test_via_tx(scaling, inner_loop, other_decl)
}
// on a fairly underpowered laptop:
// read-count of ~50k corresponds to 10 seconds. (scaling => 50)
fn read_count_test(scaling: u32) -> ExecutionCost {
let other_decl = "(define-data-var var-to-read int 0)";
let inner_loop = "(define-private (inner-loop (x int)) (var-get var-to-read))";
test_via_tx(scaling, inner_loop, other_decl)
}
// on a fairly underpowered laptop:
// write-length of ~1e8 corresponds to 10 seconds. (scaling => 10)
// at scaling = 5, the tx takes about 5 seconds => write-length of ~8e7,
// so for ~10s, max write-len should be 1.5e8
fn write_length_test(scaling: u32) -> ExecutionCost {
let other_decl = "(define-data-var var-to-read (list 1024 int) list-10)";
let inner_loop = "(define-private (inner-loop (x int)) (var-set var-to-read list-10))";
test_via_tx(scaling, inner_loop, other_decl)
}
// on a fairly underpowered laptop:
// write-count of ~50k corresponds to 10 seconds. (scaling => 50)
fn write_count_test(scaling: u32) -> ExecutionCost {
let other_decl = "(define-data-var var-to-read int 0)";
let inner_loop = "(define-private (inner-loop (x int)) (var-set var-to-read 0))";
test_via_tx(scaling, inner_loop, other_decl)
}
// on a fairly underpowered laptop:
// runtime count of ~1e8 corresponds to 10 seconds. (scaling => 6)
fn runtime_hash_test(scaling: u32) -> ExecutionCost {
let other_decl = "";
let inner_loop = "(define-private (inner-loop (x int)) (begin (map sha512 list-10) 0))";
test_via_tx(scaling, inner_loop, other_decl)
}
fn main() {
let argv: Vec<_> = env::args().collect();
if argv.len() < 3 {
eprintln!("Usage: {} [test-name] [scalar]", argv[0]);
process::exit(1);
}
let scalar = argv[2].parse().expect("Invalid scalar");
let result = match argv[1].as_str() {
"runtime" => {
runtime_hash_test(scalar)
},
"read-length" => {
read_length_test(scalar)
},
"read-count" => {
read_count_test(scalar)
},
"write-count" => {
write_count_test(scalar)
},
"write-length" => {
write_length_test(scalar)
},
_ => {
eprintln!("bad test name");
process::exit(1);
},
};
println!("{}", serde_json::to_string(&result).unwrap());
}
|
test_via_tx
|
identifier_name
|
block_limits.rs
|
extern crate blockstack_lib;
extern crate serde_json;
use blockstack_lib::{
chainstate::stacks::index::storage::{TrieFileStorage},
chainstate::burn::BlockHeaderHash,
vm::types::{QualifiedContractIdentifier},
vm::database::{MarfedKV, NULL_HEADER_DB},
vm::clarity::ClarityInstance,
vm::costs::ExecutionCost,
};
use std::env;
use std::process;
use std::fmt::Write;
fn test_via_tx(scaling: u32, inner_loop: &str, other_decl: &str) -> ExecutionCost {
let marf = MarfedKV::temporary();
let mut clarity_instance = ClarityInstance::new(marf, ExecutionCost::max_value());
let contract_identifier = QualifiedContractIdentifier::local("foo").unwrap();
let blocks = [TrieFileStorage::block_sentinel(),
BlockHeaderHash::from_bytes(&[1 as u8; 32]).unwrap(),
BlockHeaderHash::from_bytes(&[2 as u8; 32]).unwrap()];
{
let mut conn = clarity_instance.begin_block(&blocks[0],
&blocks[1],
&NULL_HEADER_DB);
let mut contract = "(define-constant list-0 (list 0))".to_string();
for i in 0..15 {
contract.push_str("\n");
contract.push_str(
&format!("(define-constant list-{} (concat list-{} list-{}))",
i+1, i, i));
}
contract.push_str("\n");
contract.push_str(other_decl);
contract.push_str("\n");
contract.push_str(inner_loop);
write!(contract, "\n(define-private (outer-loop) (map inner-loop list-10))\n").unwrap();
write!(contract, "(define-public (do-it) (begin \n").unwrap();
for _i in 0..scaling {
write!(contract, "(outer-loop)\n").unwrap();
}
write!(contract, " (ok 1)))\n").unwrap();
let (ct_ast, _ct_analysis) = conn.analyze_smart_contract(&contract_identifier, &contract).unwrap();
conn.initialize_smart_contract(
// initialize the ok contract without errs, but still abort.
&contract_identifier, &ct_ast, &contract, |_,_| false).unwrap();
conn.commit_to_block(&blocks[1]);
}
{
let mut conn = clarity_instance.begin_block(&blocks[1], &blocks[2], &NULL_HEADER_DB);
conn.run_contract_call(&contract_identifier.clone().into(),
&contract_identifier, "do-it", &[], |_, _| false).unwrap();
conn.commit_to_block(&blocks[2]).get_total()
}
}
// on a fairly underpowered laptop:
// read-length of ~1e9 corresponds to 10 seconds. (scaling => 2)
fn read_length_test(scaling: u32) -> ExecutionCost {
let other_decl = "(define-data-var var-to-read (list 33000 int) list-15)";
let inner_loop = "(define-private (inner-loop (x int)) (len (var-get var-to-read)))";
test_via_tx(scaling, inner_loop, other_decl)
}
// on a fairly underpowered laptop:
// read-count of ~50k corresponds to 10 seconds. (scaling => 50)
fn read_count_test(scaling: u32) -> ExecutionCost {
let other_decl = "(define-data-var var-to-read int 0)";
let inner_loop = "(define-private (inner-loop (x int)) (var-get var-to-read))";
test_via_tx(scaling, inner_loop, other_decl)
}
// on a fairly underpowered laptop:
// write-length of ~1e8 corresponds to 10 seconds. (scaling => 10)
// at scaling = 5, the tx takes about 5 seconds => write-length of ~8e7,
// so for ~10s, max write-len should be 1.5e8
fn write_length_test(scaling: u32) -> ExecutionCost {
|
test_via_tx(scaling, inner_loop, other_decl)
}
// on a fairly underpowered laptop:
// write-count of ~50k corresponds to 10 seconds. (scaling => 50)
fn write_count_test(scaling: u32) -> ExecutionCost {
let other_decl = "(define-data-var var-to-read int 0)";
let inner_loop = "(define-private (inner-loop (x int)) (var-set var-to-read 0))";
test_via_tx(scaling, inner_loop, other_decl)
}
// on a fairly underpowered laptop:
// runtime count of ~1e8 corresponds to 10 seconds. (scaling => 6)
fn runtime_hash_test(scaling: u32) -> ExecutionCost {
let other_decl = "";
let inner_loop = "(define-private (inner-loop (x int)) (begin (map sha512 list-10) 0))";
test_via_tx(scaling, inner_loop, other_decl)
}
fn main() {
let argv: Vec<_> = env::args().collect();
if argv.len() < 3 {
eprintln!("Usage: {} [test-name] [scalar]", argv[0]);
process::exit(1);
}
let scalar = argv[2].parse().expect("Invalid scalar");
let result = match argv[1].as_str() {
"runtime" => {
runtime_hash_test(scalar)
},
"read-length" => {
read_length_test(scalar)
},
"read-count" => {
read_count_test(scalar)
},
"write-count" => {
write_count_test(scalar)
},
"write-length" => {
write_length_test(scalar)
},
_ => {
eprintln!("bad test name");
process::exit(1);
},
};
println!("{}", serde_json::to_string(&result).unwrap());
}
|
let other_decl = "(define-data-var var-to-read (list 1024 int) list-10)";
let inner_loop = "(define-private (inner-loop (x int)) (var-set var-to-read list-10))";
|
random_line_split
|
block_limits.rs
|
extern crate blockstack_lib;
extern crate serde_json;
use blockstack_lib::{
chainstate::stacks::index::storage::{TrieFileStorage},
chainstate::burn::BlockHeaderHash,
vm::types::{QualifiedContractIdentifier},
vm::database::{MarfedKV, NULL_HEADER_DB},
vm::clarity::ClarityInstance,
vm::costs::ExecutionCost,
};
use std::env;
use std::process;
use std::fmt::Write;
fn test_via_tx(scaling: u32, inner_loop: &str, other_decl: &str) -> ExecutionCost {
let marf = MarfedKV::temporary();
let mut clarity_instance = ClarityInstance::new(marf, ExecutionCost::max_value());
let contract_identifier = QualifiedContractIdentifier::local("foo").unwrap();
let blocks = [TrieFileStorage::block_sentinel(),
BlockHeaderHash::from_bytes(&[1 as u8; 32]).unwrap(),
BlockHeaderHash::from_bytes(&[2 as u8; 32]).unwrap()];
{
let mut conn = clarity_instance.begin_block(&blocks[0],
&blocks[1],
&NULL_HEADER_DB);
let mut contract = "(define-constant list-0 (list 0))".to_string();
for i in 0..15 {
contract.push_str("\n");
contract.push_str(
&format!("(define-constant list-{} (concat list-{} list-{}))",
i+1, i, i));
}
contract.push_str("\n");
contract.push_str(other_decl);
contract.push_str("\n");
contract.push_str(inner_loop);
write!(contract, "\n(define-private (outer-loop) (map inner-loop list-10))\n").unwrap();
write!(contract, "(define-public (do-it) (begin \n").unwrap();
for _i in 0..scaling {
write!(contract, "(outer-loop)\n").unwrap();
}
write!(contract, " (ok 1)))\n").unwrap();
let (ct_ast, _ct_analysis) = conn.analyze_smart_contract(&contract_identifier, &contract).unwrap();
conn.initialize_smart_contract(
// initialize the ok contract without errs, but still abort.
&contract_identifier, &ct_ast, &contract, |_,_| false).unwrap();
conn.commit_to_block(&blocks[1]);
}
{
let mut conn = clarity_instance.begin_block(&blocks[1], &blocks[2], &NULL_HEADER_DB);
conn.run_contract_call(&contract_identifier.clone().into(),
&contract_identifier, "do-it", &[], |_, _| false).unwrap();
conn.commit_to_block(&blocks[2]).get_total()
}
}
// on a fairly underpowered laptop:
// read-length of ~1e9 corresponds to 10 seconds. (scaling => 2)
fn read_length_test(scaling: u32) -> ExecutionCost {
let other_decl = "(define-data-var var-to-read (list 33000 int) list-15)";
let inner_loop = "(define-private (inner-loop (x int)) (len (var-get var-to-read)))";
test_via_tx(scaling, inner_loop, other_decl)
}
// on a fairly underpowered laptop:
// read-count of ~50k corresponds to 10 seconds. (scaling => 50)
fn read_count_test(scaling: u32) -> ExecutionCost {
let other_decl = "(define-data-var var-to-read int 0)";
let inner_loop = "(define-private (inner-loop (x int)) (var-get var-to-read))";
test_via_tx(scaling, inner_loop, other_decl)
}
// on a fairly underpowered laptop:
// write-length of ~1e8 corresponds to 10 seconds. (scaling => 10)
// at scaling = 5, the tx takes about 5 seconds => write-length of ~8e7,
// so for ~10s, max write-len should be 1.5e8
fn write_length_test(scaling: u32) -> ExecutionCost {
let other_decl = "(define-data-var var-to-read (list 1024 int) list-10)";
let inner_loop = "(define-private (inner-loop (x int)) (var-set var-to-read list-10))";
test_via_tx(scaling, inner_loop, other_decl)
}
// on a fairly underpowered laptop:
// write-count of ~50k corresponds to 10 seconds. (scaling => 50)
fn write_count_test(scaling: u32) -> ExecutionCost {
let other_decl = "(define-data-var var-to-read int 0)";
let inner_loop = "(define-private (inner-loop (x int)) (var-set var-to-read 0))";
test_via_tx(scaling, inner_loop, other_decl)
}
// on a fairly underpowered laptop:
// runtime count of ~1e8 corresponds to 10 seconds. (scaling => 6)
fn runtime_hash_test(scaling: u32) -> ExecutionCost {
let other_decl = "";
let inner_loop = "(define-private (inner-loop (x int)) (begin (map sha512 list-10) 0))";
test_via_tx(scaling, inner_loop, other_decl)
}
fn main() {
let argv: Vec<_> = env::args().collect();
if argv.len() < 3 {
eprintln!("Usage: {} [test-name] [scalar]", argv[0]);
process::exit(1);
}
let scalar = argv[2].parse().expect("Invalid scalar");
let result = match argv[1].as_str() {
"runtime" => {
runtime_hash_test(scalar)
},
"read-length" => {
read_length_test(scalar)
},
"read-count" => {
read_count_test(scalar)
},
"write-count" => {
write_count_test(scalar)
},
"write-length" => {
write_length_test(scalar)
},
_ =>
|
,
};
println!("{}", serde_json::to_string(&result).unwrap());
}
|
{
eprintln!("bad test name");
process::exit(1);
}
|
conditional_block
|
lu.rs
|
#[macro_use]
extern crate linxal;
extern crate ndarray;
extern crate num_traits;
use ndarray::{Array, ArrayBase, Data, Ix2};
use linxal::factorization::{LUError, LUFactors};
use linxal::types::{LinxalMatrix};
use std::cmp;
/// Check that all the properties of the lu factorization are
/// reasonable.
fn check_lu<D1: Data<Elem=f32>>(
m: &ArrayBase<D1, Ix2>, lu: &LUFactors<f32>, invertible: bool) {
let l: Array<f32, Ix2> = lu.l();
let u: Array<f32, Ix2> = lu.u();
println!("{:?}", l);
println!("{:?}", u);
let k = cmp::min(m.rows(), m.cols());
assert_eq!(l.rows(), lu.rows());
assert_eq!(l.cols(), k);
assert_eq!(u.rows(), k);
assert_eq!(u.cols(), lu.cols());
let reconstructed_a = lu.reconstruct();
println!("{:?}", reconstructed_a);
// P * L * U needs to match the original matrix.
assert_eq_within_tol!(reconstructed_a, m, 0.001);
let inverse = lu.inverse();
if lu.rows() == lu.cols() {
if invertible {
assert!(inverse.is_ok());
} else {
assert!(inverse.is_err());
assert_eq!(inverse.err().unwrap(), LUError::Singular);
}
} else {
assert!(inverse.is_err());
assert_eq!(inverse.err().unwrap(), LUError::NotSquare);
}
}
#[test]
fn lu_diag() {
let mut m = Array::zeros((4, 4));
m.diag_mut().assign(&Array::linspace(1.0, 4.0, 4));
let lu = m.lu();
assert!(lu.is_ok());
check_lu(&m, &lu.unwrap(), true);
}
#[test]
fn lu_rectanglular() {
let v: Vec<f32> = (0..12).map(|x| (x * x) as f32).collect();
let m = Array::from_vec(v).into_shape((3, 4)).unwrap();
let lu = m.lu();
assert!(lu.is_ok());
check_lu(&m, &lu.unwrap(), true);
}
// TODO: netlib and opneblas seem to disagree on the correct behavior
// for row-rank matrices function.
// #[test]
// fn lu_rectanglular_singular() {
// let m = Array::linspace(0.0, 11.0, 12).into_shape((4, 3)).unwrap();
// let lu = LUFactors::compute(&m);
// assert!(lu.is_err());
// assert_eq!(lu.err().unwrap(), LUError::Singular);
// }
#[test]
fn lu_perm_diag()
|
{
let mut m = Array::zeros((4, 4));
m[(0, 0)] = 1.0;
m[(1, 2)] = 2.0;
m[(2, 1)] = 3.0;
m[(3, 3)] = 4.0;
let lu = m.lu();
println!("{:?}", lu);
assert!(lu.is_ok());
check_lu(&m, &lu.unwrap(), true);
}
|
identifier_body
|
|
lu.rs
|
#[macro_use]
extern crate linxal;
extern crate ndarray;
extern crate num_traits;
use ndarray::{Array, ArrayBase, Data, Ix2};
use linxal::factorization::{LUError, LUFactors};
use linxal::types::{LinxalMatrix};
use std::cmp;
/// Check that all the properties of the lu factorization are
/// reasonable.
fn check_lu<D1: Data<Elem=f32>>(
m: &ArrayBase<D1, Ix2>, lu: &LUFactors<f32>, invertible: bool) {
let l: Array<f32, Ix2> = lu.l();
let u: Array<f32, Ix2> = lu.u();
println!("{:?}", l);
println!("{:?}", u);
let k = cmp::min(m.rows(), m.cols());
assert_eq!(l.rows(), lu.rows());
assert_eq!(l.cols(), k);
assert_eq!(u.rows(), k);
assert_eq!(u.cols(), lu.cols());
let reconstructed_a = lu.reconstruct();
println!("{:?}", reconstructed_a);
// P * L * U needs to match the original matrix.
assert_eq_within_tol!(reconstructed_a, m, 0.001);
let inverse = lu.inverse();
if lu.rows() == lu.cols() {
if invertible {
assert!(inverse.is_ok());
} else {
assert!(inverse.is_err());
assert_eq!(inverse.err().unwrap(), LUError::Singular);
}
} else
|
}
#[test]
fn lu_diag() {
let mut m = Array::zeros((4, 4));
m.diag_mut().assign(&Array::linspace(1.0, 4.0, 4));
let lu = m.lu();
assert!(lu.is_ok());
check_lu(&m, &lu.unwrap(), true);
}
#[test]
fn lu_rectanglular() {
let v: Vec<f32> = (0..12).map(|x| (x * x) as f32).collect();
let m = Array::from_vec(v).into_shape((3, 4)).unwrap();
let lu = m.lu();
assert!(lu.is_ok());
check_lu(&m, &lu.unwrap(), true);
}
// TODO: netlib and opneblas seem to disagree on the correct behavior
// for row-rank matrices function.
// #[test]
// fn lu_rectanglular_singular() {
// let m = Array::linspace(0.0, 11.0, 12).into_shape((4, 3)).unwrap();
// let lu = LUFactors::compute(&m);
// assert!(lu.is_err());
// assert_eq!(lu.err().unwrap(), LUError::Singular);
// }
#[test]
fn lu_perm_diag() {
let mut m = Array::zeros((4, 4));
m[(0, 0)] = 1.0;
m[(1, 2)] = 2.0;
m[(2, 1)] = 3.0;
m[(3, 3)] = 4.0;
let lu = m.lu();
println!("{:?}", lu);
assert!(lu.is_ok());
check_lu(&m, &lu.unwrap(), true);
}
|
{
assert!(inverse.is_err());
assert_eq!(inverse.err().unwrap(), LUError::NotSquare);
}
|
conditional_block
|
lu.rs
|
#[macro_use]
extern crate linxal;
extern crate ndarray;
extern crate num_traits;
use ndarray::{Array, ArrayBase, Data, Ix2};
use linxal::factorization::{LUError, LUFactors};
use linxal::types::{LinxalMatrix};
use std::cmp;
/// Check that all the properties of the lu factorization are
/// reasonable.
fn check_lu<D1: Data<Elem=f32>>(
m: &ArrayBase<D1, Ix2>, lu: &LUFactors<f32>, invertible: bool) {
let l: Array<f32, Ix2> = lu.l();
let u: Array<f32, Ix2> = lu.u();
println!("{:?}", l);
println!("{:?}", u);
let k = cmp::min(m.rows(), m.cols());
assert_eq!(l.rows(), lu.rows());
assert_eq!(l.cols(), k);
assert_eq!(u.rows(), k);
assert_eq!(u.cols(), lu.cols());
let reconstructed_a = lu.reconstruct();
println!("{:?}", reconstructed_a);
// P * L * U needs to match the original matrix.
assert_eq_within_tol!(reconstructed_a, m, 0.001);
let inverse = lu.inverse();
if lu.rows() == lu.cols() {
if invertible {
assert!(inverse.is_ok());
} else {
assert!(inverse.is_err());
assert_eq!(inverse.err().unwrap(), LUError::Singular);
}
} else {
assert!(inverse.is_err());
assert_eq!(inverse.err().unwrap(), LUError::NotSquare);
}
}
#[test]
fn
|
() {
let mut m = Array::zeros((4, 4));
m.diag_mut().assign(&Array::linspace(1.0, 4.0, 4));
let lu = m.lu();
assert!(lu.is_ok());
check_lu(&m, &lu.unwrap(), true);
}
#[test]
fn lu_rectanglular() {
let v: Vec<f32> = (0..12).map(|x| (x * x) as f32).collect();
let m = Array::from_vec(v).into_shape((3, 4)).unwrap();
let lu = m.lu();
assert!(lu.is_ok());
check_lu(&m, &lu.unwrap(), true);
}
// TODO: netlib and opneblas seem to disagree on the correct behavior
// for row-rank matrices function.
// #[test]
// fn lu_rectanglular_singular() {
// let m = Array::linspace(0.0, 11.0, 12).into_shape((4, 3)).unwrap();
// let lu = LUFactors::compute(&m);
// assert!(lu.is_err());
// assert_eq!(lu.err().unwrap(), LUError::Singular);
// }
#[test]
fn lu_perm_diag() {
let mut m = Array::zeros((4, 4));
m[(0, 0)] = 1.0;
m[(1, 2)] = 2.0;
m[(2, 1)] = 3.0;
m[(3, 3)] = 4.0;
let lu = m.lu();
println!("{:?}", lu);
assert!(lu.is_ok());
check_lu(&m, &lu.unwrap(), true);
}
|
lu_diag
|
identifier_name
|
lu.rs
|
#[macro_use]
extern crate linxal;
extern crate ndarray;
extern crate num_traits;
|
use linxal::factorization::{LUError, LUFactors};
use linxal::types::{LinxalMatrix};
use std::cmp;
/// Check that all the properties of the lu factorization are
/// reasonable.
fn check_lu<D1: Data<Elem=f32>>(
m: &ArrayBase<D1, Ix2>, lu: &LUFactors<f32>, invertible: bool) {
let l: Array<f32, Ix2> = lu.l();
let u: Array<f32, Ix2> = lu.u();
println!("{:?}", l);
println!("{:?}", u);
let k = cmp::min(m.rows(), m.cols());
assert_eq!(l.rows(), lu.rows());
assert_eq!(l.cols(), k);
assert_eq!(u.rows(), k);
assert_eq!(u.cols(), lu.cols());
let reconstructed_a = lu.reconstruct();
println!("{:?}", reconstructed_a);
// P * L * U needs to match the original matrix.
assert_eq_within_tol!(reconstructed_a, m, 0.001);
let inverse = lu.inverse();
if lu.rows() == lu.cols() {
if invertible {
assert!(inverse.is_ok());
} else {
assert!(inverse.is_err());
assert_eq!(inverse.err().unwrap(), LUError::Singular);
}
} else {
assert!(inverse.is_err());
assert_eq!(inverse.err().unwrap(), LUError::NotSquare);
}
}
#[test]
fn lu_diag() {
let mut m = Array::zeros((4, 4));
m.diag_mut().assign(&Array::linspace(1.0, 4.0, 4));
let lu = m.lu();
assert!(lu.is_ok());
check_lu(&m, &lu.unwrap(), true);
}
#[test]
fn lu_rectanglular() {
let v: Vec<f32> = (0..12).map(|x| (x * x) as f32).collect();
let m = Array::from_vec(v).into_shape((3, 4)).unwrap();
let lu = m.lu();
assert!(lu.is_ok());
check_lu(&m, &lu.unwrap(), true);
}
// TODO: netlib and opneblas seem to disagree on the correct behavior
// for row-rank matrices function.
// #[test]
// fn lu_rectanglular_singular() {
// let m = Array::linspace(0.0, 11.0, 12).into_shape((4, 3)).unwrap();
// let lu = LUFactors::compute(&m);
// assert!(lu.is_err());
// assert_eq!(lu.err().unwrap(), LUError::Singular);
// }
#[test]
fn lu_perm_diag() {
let mut m = Array::zeros((4, 4));
m[(0, 0)] = 1.0;
m[(1, 2)] = 2.0;
m[(2, 1)] = 3.0;
m[(3, 3)] = 4.0;
let lu = m.lu();
println!("{:?}", lu);
assert!(lu.is_ok());
check_lu(&m, &lu.unwrap(), true);
}
|
use ndarray::{Array, ArrayBase, Data, Ix2};
|
random_line_split
|
recorder.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Trie query recorder.
use keccak::keccak;
use bigint::hash::H256;
use bytes::Bytes;
/// A record of a visited node.
#[derive(PartialEq, Eq, Debug, Clone)]
pub struct Record {
/// The depth of this node.
pub depth: u32,
/// The raw data of the node.
pub data: Bytes,
/// The hash of the data.
pub hash: H256,
}
/// Records trie nodes as they pass it.
#[derive(Debug)]
pub struct Recorder {
nodes: Vec<Record>,
min_depth: u32,
}
impl Default for Recorder {
fn default() -> Self {
Recorder::new()
}
}
impl Recorder {
/// Create a new `Recorder` which records all given nodes.
#[inline]
pub fn new() -> Self {
Recorder::with_depth(0)
}
/// Create a `Recorder` which only records nodes beyond a given depth.
pub fn with_depth(depth: u32) -> Self {
Recorder {
nodes: Vec::new(),
min_depth: depth,
}
}
/// Record a visited node, given its hash, data, and depth.
pub fn record(&mut self, hash: &H256, data: &[u8], depth: u32) {
debug_assert_eq!(keccak(data), *hash);
if depth >= self.min_depth {
self.nodes.push(Record {
depth: depth,
data: data.into(),
hash: *hash,
})
}
}
/// Drain all visited records.
pub fn drain(&mut self) -> Vec<Record> {
::std::mem::replace(&mut self.nodes, Vec::new())
}
}
#[cfg(test)]
mod tests {
use super::*;
use bigint::hash::H256;
#[test]
fn basic_recorder() {
let mut basic = Recorder::new();
let node1 = vec![1, 2, 3, 4];
let node2 = vec![4, 5, 6, 7, 8, 9, 10];
let (hash1, hash2) = (keccak(&node1), keccak(&node2));
basic.record(&hash1, &node1, 0);
basic.record(&hash2, &node2, 456);
let record1 = Record {
data: node1,
hash: hash1,
depth: 0,
};
let record2 = Record {
data: node2,
hash: hash2,
depth: 456
};
assert_eq!(basic.drain(), vec![record1, record2]);
}
#[test]
fn basic_recorder_min_depth() {
let mut basic = Recorder::with_depth(400);
let node1 = vec![1, 2, 3, 4];
let node2 = vec![4, 5, 6, 7, 8, 9, 10];
let hash1 = keccak(&node1);
let hash2 = keccak(&node2);
basic.record(&hash1, &node1, 0);
basic.record(&hash2, &node2, 456);
let records = basic.drain();
assert_eq!(records.len(), 1);
assert_eq!(records[0].clone(), Record {
data: node2,
hash: hash2,
depth: 456,
});
}
#[test]
fn trie_record() {
use super::super::{TrieDB, TrieDBMut, Trie, TrieMut};
use memorydb::MemoryDB;
let mut db = MemoryDB::new();
let mut root = H256::default();
{
let mut x = TrieDBMut::new(&mut db, &mut root);
x.insert(b"dog", b"cat").unwrap();
x.insert(b"lunch", b"time").unwrap();
x.insert(b"notdog", b"notcat").unwrap();
x.insert(b"hotdog", b"hotcat").unwrap();
x.insert(b"letter", b"confusion").unwrap();
x.insert(b"insert", b"remove").unwrap();
x.insert(b"pirate", b"aargh!").unwrap();
x.insert(b"yo ho ho", b"and a bottle of rum").unwrap();
}
let trie = TrieDB::new(&db, &root).unwrap();
let mut recorder = Recorder::new();
trie.get_with(b"pirate", &mut recorder).unwrap().unwrap();
let nodes: Vec<_> = recorder.drain().into_iter().map(|r| r.data).collect();
assert_eq!(nodes, vec![
vec![
248, 81, 128, 128, 128, 128, 128, 128, 160, 50, 19, 71, 57, 213, 63, 125, 149,
92, 119, 88, 96, 80, 126, 59, 11, 160, 142, 98, 229, 237, 200, 231, 224, 79, 118,
215, 93, 144, 246, 179, 176, 160, 118, 211, 171, 199, 172, 136, 136, 240, 221, 59,
110, 82, 86, 54, 23, 95, 48, 108, 71, 125, 59, 51, 253, 210, 18, 116, 79, 0, 236,
102, 142, 48, 128, 128, 128, 128, 128, 128, 128, 128, 128
],
vec![
248, 60, 206, 134, 32, 105, 114, 97, 116, 101, 134, 97, 97, 114, 103, 104, 33,
128, 128, 128, 128, 128, 128, 128, 128, 221, 136, 32, 111, 32, 104, 111, 32, 104,
111, 147, 97, 110, 100, 32, 97, 32, 98, 111, 116, 116, 108, 101, 32, 111, 102,
32, 114, 117, 109, 128, 128, 128, 128, 128, 128, 128
]
]);
trie.get_with(b"letter", &mut recorder).unwrap().unwrap();
let nodes: Vec<_> = recorder.drain().into_iter().map(|r| r.data).collect();
assert_eq!(nodes, vec![
vec![
248, 81, 128, 128, 128, 128, 128, 128, 160, 50, 19, 71, 57, 213, 63, 125, 149,
|
],
vec![
248, 99, 128, 128, 128, 128, 200, 131, 32, 111, 103, 131, 99, 97, 116, 128, 128,
128, 206, 134, 32, 111, 116, 100, 111, 103, 134, 104, 111, 116, 99, 97, 116, 206,
134, 32, 110, 115, 101, 114, 116, 134, 114, 101, 109, 111, 118, 101, 128, 128,
160, 202, 250, 252, 153, 229, 63, 255, 13, 100, 197, 80, 120, 190, 186, 92, 5,
255, 135, 245, 205, 180, 213, 161, 8, 47, 107, 13, 105, 218, 1, 9, 5, 128,
206, 134, 32, 111, 116, 100, 111, 103, 134, 110, 111, 116, 99, 97, 116, 128, 128
],
vec![
235, 128, 128, 128, 128, 128, 128, 208, 133, 53, 116, 116, 101, 114, 137, 99,
111, 110, 102, 117, 115, 105, 111, 110, 202, 132, 53, 110, 99, 104, 132, 116,
105, 109, 101, 128, 128, 128, 128, 128, 128, 128, 128, 128
]
]);
}
}
|
92, 119, 88, 96, 80, 126, 59, 11, 160, 142, 98, 229, 237, 200, 231, 224, 79, 118,
215, 93, 144, 246, 179, 176, 160, 118, 211, 171, 199, 172, 136, 136, 240, 221,
59, 110, 82, 86, 54, 23, 95, 48, 108, 71, 125, 59, 51, 253, 210, 18, 116, 79,
0, 236, 102, 142, 48, 128, 128, 128, 128, 128, 128, 128, 128, 128
|
random_line_split
|
recorder.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Trie query recorder.
use keccak::keccak;
use bigint::hash::H256;
use bytes::Bytes;
/// A record of a visited node.
#[derive(PartialEq, Eq, Debug, Clone)]
pub struct Record {
/// The depth of this node.
pub depth: u32,
/// The raw data of the node.
pub data: Bytes,
/// The hash of the data.
pub hash: H256,
}
/// Records trie nodes as they pass it.
#[derive(Debug)]
pub struct Recorder {
nodes: Vec<Record>,
min_depth: u32,
}
impl Default for Recorder {
fn
|
() -> Self {
Recorder::new()
}
}
impl Recorder {
/// Create a new `Recorder` which records all given nodes.
#[inline]
pub fn new() -> Self {
Recorder::with_depth(0)
}
/// Create a `Recorder` which only records nodes beyond a given depth.
pub fn with_depth(depth: u32) -> Self {
Recorder {
nodes: Vec::new(),
min_depth: depth,
}
}
/// Record a visited node, given its hash, data, and depth.
pub fn record(&mut self, hash: &H256, data: &[u8], depth: u32) {
debug_assert_eq!(keccak(data), *hash);
if depth >= self.min_depth {
self.nodes.push(Record {
depth: depth,
data: data.into(),
hash: *hash,
})
}
}
/// Drain all visited records.
pub fn drain(&mut self) -> Vec<Record> {
::std::mem::replace(&mut self.nodes, Vec::new())
}
}
#[cfg(test)]
mod tests {
use super::*;
use bigint::hash::H256;
#[test]
fn basic_recorder() {
let mut basic = Recorder::new();
let node1 = vec![1, 2, 3, 4];
let node2 = vec![4, 5, 6, 7, 8, 9, 10];
let (hash1, hash2) = (keccak(&node1), keccak(&node2));
basic.record(&hash1, &node1, 0);
basic.record(&hash2, &node2, 456);
let record1 = Record {
data: node1,
hash: hash1,
depth: 0,
};
let record2 = Record {
data: node2,
hash: hash2,
depth: 456
};
assert_eq!(basic.drain(), vec![record1, record2]);
}
#[test]
fn basic_recorder_min_depth() {
let mut basic = Recorder::with_depth(400);
let node1 = vec![1, 2, 3, 4];
let node2 = vec![4, 5, 6, 7, 8, 9, 10];
let hash1 = keccak(&node1);
let hash2 = keccak(&node2);
basic.record(&hash1, &node1, 0);
basic.record(&hash2, &node2, 456);
let records = basic.drain();
assert_eq!(records.len(), 1);
assert_eq!(records[0].clone(), Record {
data: node2,
hash: hash2,
depth: 456,
});
}
#[test]
fn trie_record() {
use super::super::{TrieDB, TrieDBMut, Trie, TrieMut};
use memorydb::MemoryDB;
let mut db = MemoryDB::new();
let mut root = H256::default();
{
let mut x = TrieDBMut::new(&mut db, &mut root);
x.insert(b"dog", b"cat").unwrap();
x.insert(b"lunch", b"time").unwrap();
x.insert(b"notdog", b"notcat").unwrap();
x.insert(b"hotdog", b"hotcat").unwrap();
x.insert(b"letter", b"confusion").unwrap();
x.insert(b"insert", b"remove").unwrap();
x.insert(b"pirate", b"aargh!").unwrap();
x.insert(b"yo ho ho", b"and a bottle of rum").unwrap();
}
let trie = TrieDB::new(&db, &root).unwrap();
let mut recorder = Recorder::new();
trie.get_with(b"pirate", &mut recorder).unwrap().unwrap();
let nodes: Vec<_> = recorder.drain().into_iter().map(|r| r.data).collect();
assert_eq!(nodes, vec![
vec![
248, 81, 128, 128, 128, 128, 128, 128, 160, 50, 19, 71, 57, 213, 63, 125, 149,
92, 119, 88, 96, 80, 126, 59, 11, 160, 142, 98, 229, 237, 200, 231, 224, 79, 118,
215, 93, 144, 246, 179, 176, 160, 118, 211, 171, 199, 172, 136, 136, 240, 221, 59,
110, 82, 86, 54, 23, 95, 48, 108, 71, 125, 59, 51, 253, 210, 18, 116, 79, 0, 236,
102, 142, 48, 128, 128, 128, 128, 128, 128, 128, 128, 128
],
vec![
248, 60, 206, 134, 32, 105, 114, 97, 116, 101, 134, 97, 97, 114, 103, 104, 33,
128, 128, 128, 128, 128, 128, 128, 128, 221, 136, 32, 111, 32, 104, 111, 32, 104,
111, 147, 97, 110, 100, 32, 97, 32, 98, 111, 116, 116, 108, 101, 32, 111, 102,
32, 114, 117, 109, 128, 128, 128, 128, 128, 128, 128
]
]);
trie.get_with(b"letter", &mut recorder).unwrap().unwrap();
let nodes: Vec<_> = recorder.drain().into_iter().map(|r| r.data).collect();
assert_eq!(nodes, vec![
vec![
248, 81, 128, 128, 128, 128, 128, 128, 160, 50, 19, 71, 57, 213, 63, 125, 149,
92, 119, 88, 96, 80, 126, 59, 11, 160, 142, 98, 229, 237, 200, 231, 224, 79, 118,
215, 93, 144, 246, 179, 176, 160, 118, 211, 171, 199, 172, 136, 136, 240, 221,
59, 110, 82, 86, 54, 23, 95, 48, 108, 71, 125, 59, 51, 253, 210, 18, 116, 79,
0, 236, 102, 142, 48, 128, 128, 128, 128, 128, 128, 128, 128, 128
],
vec![
248, 99, 128, 128, 128, 128, 200, 131, 32, 111, 103, 131, 99, 97, 116, 128, 128,
128, 206, 134, 32, 111, 116, 100, 111, 103, 134, 104, 111, 116, 99, 97, 116, 206,
134, 32, 110, 115, 101, 114, 116, 134, 114, 101, 109, 111, 118, 101, 128, 128,
160, 202, 250, 252, 153, 229, 63, 255, 13, 100, 197, 80, 120, 190, 186, 92, 5,
255, 135, 245, 205, 180, 213, 161, 8, 47, 107, 13, 105, 218, 1, 9, 5, 128,
206, 134, 32, 111, 116, 100, 111, 103, 134, 110, 111, 116, 99, 97, 116, 128, 128
],
vec![
235, 128, 128, 128, 128, 128, 128, 208, 133, 53, 116, 116, 101, 114, 137, 99,
111, 110, 102, 117, 115, 105, 111, 110, 202, 132, 53, 110, 99, 104, 132, 116,
105, 109, 101, 128, 128, 128, 128, 128, 128, 128, 128, 128
]
]);
}
}
|
default
|
identifier_name
|
recorder.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Trie query recorder.
use keccak::keccak;
use bigint::hash::H256;
use bytes::Bytes;
/// A record of a visited node.
#[derive(PartialEq, Eq, Debug, Clone)]
pub struct Record {
/// The depth of this node.
pub depth: u32,
/// The raw data of the node.
pub data: Bytes,
/// The hash of the data.
pub hash: H256,
}
/// Records trie nodes as they pass it.
#[derive(Debug)]
pub struct Recorder {
nodes: Vec<Record>,
min_depth: u32,
}
impl Default for Recorder {
fn default() -> Self {
Recorder::new()
}
}
impl Recorder {
/// Create a new `Recorder` which records all given nodes.
#[inline]
pub fn new() -> Self {
Recorder::with_depth(0)
}
/// Create a `Recorder` which only records nodes beyond a given depth.
pub fn with_depth(depth: u32) -> Self {
Recorder {
nodes: Vec::new(),
min_depth: depth,
}
}
/// Record a visited node, given its hash, data, and depth.
pub fn record(&mut self, hash: &H256, data: &[u8], depth: u32) {
debug_assert_eq!(keccak(data), *hash);
if depth >= self.min_depth
|
}
/// Drain all visited records.
pub fn drain(&mut self) -> Vec<Record> {
::std::mem::replace(&mut self.nodes, Vec::new())
}
}
#[cfg(test)]
mod tests {
use super::*;
use bigint::hash::H256;
#[test]
fn basic_recorder() {
let mut basic = Recorder::new();
let node1 = vec![1, 2, 3, 4];
let node2 = vec![4, 5, 6, 7, 8, 9, 10];
let (hash1, hash2) = (keccak(&node1), keccak(&node2));
basic.record(&hash1, &node1, 0);
basic.record(&hash2, &node2, 456);
let record1 = Record {
data: node1,
hash: hash1,
depth: 0,
};
let record2 = Record {
data: node2,
hash: hash2,
depth: 456
};
assert_eq!(basic.drain(), vec![record1, record2]);
}
#[test]
fn basic_recorder_min_depth() {
let mut basic = Recorder::with_depth(400);
let node1 = vec![1, 2, 3, 4];
let node2 = vec![4, 5, 6, 7, 8, 9, 10];
let hash1 = keccak(&node1);
let hash2 = keccak(&node2);
basic.record(&hash1, &node1, 0);
basic.record(&hash2, &node2, 456);
let records = basic.drain();
assert_eq!(records.len(), 1);
assert_eq!(records[0].clone(), Record {
data: node2,
hash: hash2,
depth: 456,
});
}
#[test]
fn trie_record() {
use super::super::{TrieDB, TrieDBMut, Trie, TrieMut};
use memorydb::MemoryDB;
let mut db = MemoryDB::new();
let mut root = H256::default();
{
let mut x = TrieDBMut::new(&mut db, &mut root);
x.insert(b"dog", b"cat").unwrap();
x.insert(b"lunch", b"time").unwrap();
x.insert(b"notdog", b"notcat").unwrap();
x.insert(b"hotdog", b"hotcat").unwrap();
x.insert(b"letter", b"confusion").unwrap();
x.insert(b"insert", b"remove").unwrap();
x.insert(b"pirate", b"aargh!").unwrap();
x.insert(b"yo ho ho", b"and a bottle of rum").unwrap();
}
let trie = TrieDB::new(&db, &root).unwrap();
let mut recorder = Recorder::new();
trie.get_with(b"pirate", &mut recorder).unwrap().unwrap();
let nodes: Vec<_> = recorder.drain().into_iter().map(|r| r.data).collect();
assert_eq!(nodes, vec![
vec![
248, 81, 128, 128, 128, 128, 128, 128, 160, 50, 19, 71, 57, 213, 63, 125, 149,
92, 119, 88, 96, 80, 126, 59, 11, 160, 142, 98, 229, 237, 200, 231, 224, 79, 118,
215, 93, 144, 246, 179, 176, 160, 118, 211, 171, 199, 172, 136, 136, 240, 221, 59,
110, 82, 86, 54, 23, 95, 48, 108, 71, 125, 59, 51, 253, 210, 18, 116, 79, 0, 236,
102, 142, 48, 128, 128, 128, 128, 128, 128, 128, 128, 128
],
vec![
248, 60, 206, 134, 32, 105, 114, 97, 116, 101, 134, 97, 97, 114, 103, 104, 33,
128, 128, 128, 128, 128, 128, 128, 128, 221, 136, 32, 111, 32, 104, 111, 32, 104,
111, 147, 97, 110, 100, 32, 97, 32, 98, 111, 116, 116, 108, 101, 32, 111, 102,
32, 114, 117, 109, 128, 128, 128, 128, 128, 128, 128
]
]);
trie.get_with(b"letter", &mut recorder).unwrap().unwrap();
let nodes: Vec<_> = recorder.drain().into_iter().map(|r| r.data).collect();
assert_eq!(nodes, vec![
vec![
248, 81, 128, 128, 128, 128, 128, 128, 160, 50, 19, 71, 57, 213, 63, 125, 149,
92, 119, 88, 96, 80, 126, 59, 11, 160, 142, 98, 229, 237, 200, 231, 224, 79, 118,
215, 93, 144, 246, 179, 176, 160, 118, 211, 171, 199, 172, 136, 136, 240, 221,
59, 110, 82, 86, 54, 23, 95, 48, 108, 71, 125, 59, 51, 253, 210, 18, 116, 79,
0, 236, 102, 142, 48, 128, 128, 128, 128, 128, 128, 128, 128, 128
],
vec![
248, 99, 128, 128, 128, 128, 200, 131, 32, 111, 103, 131, 99, 97, 116, 128, 128,
128, 206, 134, 32, 111, 116, 100, 111, 103, 134, 104, 111, 116, 99, 97, 116, 206,
134, 32, 110, 115, 101, 114, 116, 134, 114, 101, 109, 111, 118, 101, 128, 128,
160, 202, 250, 252, 153, 229, 63, 255, 13, 100, 197, 80, 120, 190, 186, 92, 5,
255, 135, 245, 205, 180, 213, 161, 8, 47, 107, 13, 105, 218, 1, 9, 5, 128,
206, 134, 32, 111, 116, 100, 111, 103, 134, 110, 111, 116, 99, 97, 116, 128, 128
],
vec![
235, 128, 128, 128, 128, 128, 128, 208, 133, 53, 116, 116, 101, 114, 137, 99,
111, 110, 102, 117, 115, 105, 111, 110, 202, 132, 53, 110, 99, 104, 132, 116,
105, 109, 101, 128, 128, 128, 128, 128, 128, 128, 128, 128
]
]);
}
}
|
{
self.nodes.push(Record {
depth: depth,
data: data.into(),
hash: *hash,
})
}
|
conditional_block
|
recorder.rs
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Trie query recorder.
use keccak::keccak;
use bigint::hash::H256;
use bytes::Bytes;
/// A record of a visited node.
#[derive(PartialEq, Eq, Debug, Clone)]
pub struct Record {
/// The depth of this node.
pub depth: u32,
/// The raw data of the node.
pub data: Bytes,
/// The hash of the data.
pub hash: H256,
}
/// Records trie nodes as they pass it.
#[derive(Debug)]
pub struct Recorder {
nodes: Vec<Record>,
min_depth: u32,
}
impl Default for Recorder {
fn default() -> Self {
Recorder::new()
}
}
impl Recorder {
/// Create a new `Recorder` which records all given nodes.
#[inline]
pub fn new() -> Self {
Recorder::with_depth(0)
}
/// Create a `Recorder` which only records nodes beyond a given depth.
pub fn with_depth(depth: u32) -> Self {
Recorder {
nodes: Vec::new(),
min_depth: depth,
}
}
/// Record a visited node, given its hash, data, and depth.
pub fn record(&mut self, hash: &H256, data: &[u8], depth: u32) {
debug_assert_eq!(keccak(data), *hash);
if depth >= self.min_depth {
self.nodes.push(Record {
depth: depth,
data: data.into(),
hash: *hash,
})
}
}
/// Drain all visited records.
pub fn drain(&mut self) -> Vec<Record>
|
}
#[cfg(test)]
mod tests {
use super::*;
use bigint::hash::H256;
#[test]
fn basic_recorder() {
let mut basic = Recorder::new();
let node1 = vec![1, 2, 3, 4];
let node2 = vec![4, 5, 6, 7, 8, 9, 10];
let (hash1, hash2) = (keccak(&node1), keccak(&node2));
basic.record(&hash1, &node1, 0);
basic.record(&hash2, &node2, 456);
let record1 = Record {
data: node1,
hash: hash1,
depth: 0,
};
let record2 = Record {
data: node2,
hash: hash2,
depth: 456
};
assert_eq!(basic.drain(), vec![record1, record2]);
}
#[test]
fn basic_recorder_min_depth() {
let mut basic = Recorder::with_depth(400);
let node1 = vec![1, 2, 3, 4];
let node2 = vec![4, 5, 6, 7, 8, 9, 10];
let hash1 = keccak(&node1);
let hash2 = keccak(&node2);
basic.record(&hash1, &node1, 0);
basic.record(&hash2, &node2, 456);
let records = basic.drain();
assert_eq!(records.len(), 1);
assert_eq!(records[0].clone(), Record {
data: node2,
hash: hash2,
depth: 456,
});
}
#[test]
fn trie_record() {
use super::super::{TrieDB, TrieDBMut, Trie, TrieMut};
use memorydb::MemoryDB;
let mut db = MemoryDB::new();
let mut root = H256::default();
{
let mut x = TrieDBMut::new(&mut db, &mut root);
x.insert(b"dog", b"cat").unwrap();
x.insert(b"lunch", b"time").unwrap();
x.insert(b"notdog", b"notcat").unwrap();
x.insert(b"hotdog", b"hotcat").unwrap();
x.insert(b"letter", b"confusion").unwrap();
x.insert(b"insert", b"remove").unwrap();
x.insert(b"pirate", b"aargh!").unwrap();
x.insert(b"yo ho ho", b"and a bottle of rum").unwrap();
}
let trie = TrieDB::new(&db, &root).unwrap();
let mut recorder = Recorder::new();
trie.get_with(b"pirate", &mut recorder).unwrap().unwrap();
let nodes: Vec<_> = recorder.drain().into_iter().map(|r| r.data).collect();
assert_eq!(nodes, vec![
vec![
248, 81, 128, 128, 128, 128, 128, 128, 160, 50, 19, 71, 57, 213, 63, 125, 149,
92, 119, 88, 96, 80, 126, 59, 11, 160, 142, 98, 229, 237, 200, 231, 224, 79, 118,
215, 93, 144, 246, 179, 176, 160, 118, 211, 171, 199, 172, 136, 136, 240, 221, 59,
110, 82, 86, 54, 23, 95, 48, 108, 71, 125, 59, 51, 253, 210, 18, 116, 79, 0, 236,
102, 142, 48, 128, 128, 128, 128, 128, 128, 128, 128, 128
],
vec![
248, 60, 206, 134, 32, 105, 114, 97, 116, 101, 134, 97, 97, 114, 103, 104, 33,
128, 128, 128, 128, 128, 128, 128, 128, 221, 136, 32, 111, 32, 104, 111, 32, 104,
111, 147, 97, 110, 100, 32, 97, 32, 98, 111, 116, 116, 108, 101, 32, 111, 102,
32, 114, 117, 109, 128, 128, 128, 128, 128, 128, 128
]
]);
trie.get_with(b"letter", &mut recorder).unwrap().unwrap();
let nodes: Vec<_> = recorder.drain().into_iter().map(|r| r.data).collect();
assert_eq!(nodes, vec![
vec![
248, 81, 128, 128, 128, 128, 128, 128, 160, 50, 19, 71, 57, 213, 63, 125, 149,
92, 119, 88, 96, 80, 126, 59, 11, 160, 142, 98, 229, 237, 200, 231, 224, 79, 118,
215, 93, 144, 246, 179, 176, 160, 118, 211, 171, 199, 172, 136, 136, 240, 221,
59, 110, 82, 86, 54, 23, 95, 48, 108, 71, 125, 59, 51, 253, 210, 18, 116, 79,
0, 236, 102, 142, 48, 128, 128, 128, 128, 128, 128, 128, 128, 128
],
vec![
248, 99, 128, 128, 128, 128, 200, 131, 32, 111, 103, 131, 99, 97, 116, 128, 128,
128, 206, 134, 32, 111, 116, 100, 111, 103, 134, 104, 111, 116, 99, 97, 116, 206,
134, 32, 110, 115, 101, 114, 116, 134, 114, 101, 109, 111, 118, 101, 128, 128,
160, 202, 250, 252, 153, 229, 63, 255, 13, 100, 197, 80, 120, 190, 186, 92, 5,
255, 135, 245, 205, 180, 213, 161, 8, 47, 107, 13, 105, 218, 1, 9, 5, 128,
206, 134, 32, 111, 116, 100, 111, 103, 134, 110, 111, 116, 99, 97, 116, 128, 128
],
vec![
235, 128, 128, 128, 128, 128, 128, 208, 133, 53, 116, 116, 101, 114, 137, 99,
111, 110, 102, 117, 115, 105, 111, 110, 202, 132, 53, 110, 99, 104, 132, 116,
105, 109, 101, 128, 128, 128, 128, 128, 128, 128, 128, 128
]
]);
}
}
|
{
::std::mem::replace(&mut self.nodes, Vec::new())
}
|
identifier_body
|
theme.rs
|
use handlebars::{Context, Handlebars, Helper, Output, RenderContext, RenderError};
|
pub fn theme_option(
h: &Helper<'_, '_>,
_r: &Handlebars<'_>,
ctx: &Context,
rc: &mut RenderContext<'_, '_>,
out: &mut dyn Output,
) -> Result<(), RenderError> {
trace!("theme_option (handlebars helper)");
let param = h.param(0).and_then(|v| v.value().as_str()).ok_or_else(|| {
RenderError::new("Param 0 with String type is required for theme_option helper.")
})?;
let default_theme = rc.evaluate(ctx, "@root/default_theme")?;
let default_theme_name = default_theme
.as_json()
.as_str()
.ok_or_else(|| RenderError::new("Type error for `default_theme`, string expected"))?;
out.write(param)?;
if param.to_lowercase() == default_theme_name.to_lowercase() {
out.write(" (default)")?;
}
Ok(())
}
|
random_line_split
|
|
theme.rs
|
use handlebars::{Context, Handlebars, Helper, Output, RenderContext, RenderError};
pub fn
|
(
h: &Helper<'_, '_>,
_r: &Handlebars<'_>,
ctx: &Context,
rc: &mut RenderContext<'_, '_>,
out: &mut dyn Output,
) -> Result<(), RenderError> {
trace!("theme_option (handlebars helper)");
let param = h.param(0).and_then(|v| v.value().as_str()).ok_or_else(|| {
RenderError::new("Param 0 with String type is required for theme_option helper.")
})?;
let default_theme = rc.evaluate(ctx, "@root/default_theme")?;
let default_theme_name = default_theme
.as_json()
.as_str()
.ok_or_else(|| RenderError::new("Type error for `default_theme`, string expected"))?;
out.write(param)?;
if param.to_lowercase() == default_theme_name.to_lowercase() {
out.write(" (default)")?;
}
Ok(())
}
|
theme_option
|
identifier_name
|
theme.rs
|
use handlebars::{Context, Handlebars, Helper, Output, RenderContext, RenderError};
pub fn theme_option(
h: &Helper<'_, '_>,
_r: &Handlebars<'_>,
ctx: &Context,
rc: &mut RenderContext<'_, '_>,
out: &mut dyn Output,
) -> Result<(), RenderError>
|
{
trace!("theme_option (handlebars helper)");
let param = h.param(0).and_then(|v| v.value().as_str()).ok_or_else(|| {
RenderError::new("Param 0 with String type is required for theme_option helper.")
})?;
let default_theme = rc.evaluate(ctx, "@root/default_theme")?;
let default_theme_name = default_theme
.as_json()
.as_str()
.ok_or_else(|| RenderError::new("Type error for `default_theme`, string expected"))?;
out.write(param)?;
if param.to_lowercase() == default_theme_name.to_lowercase() {
out.write(" (default)")?;
}
Ok(())
}
|
identifier_body
|
|
theme.rs
|
use handlebars::{Context, Handlebars, Helper, Output, RenderContext, RenderError};
pub fn theme_option(
h: &Helper<'_, '_>,
_r: &Handlebars<'_>,
ctx: &Context,
rc: &mut RenderContext<'_, '_>,
out: &mut dyn Output,
) -> Result<(), RenderError> {
trace!("theme_option (handlebars helper)");
let param = h.param(0).and_then(|v| v.value().as_str()).ok_or_else(|| {
RenderError::new("Param 0 with String type is required for theme_option helper.")
})?;
let default_theme = rc.evaluate(ctx, "@root/default_theme")?;
let default_theme_name = default_theme
.as_json()
.as_str()
.ok_or_else(|| RenderError::new("Type error for `default_theme`, string expected"))?;
out.write(param)?;
if param.to_lowercase() == default_theme_name.to_lowercase()
|
Ok(())
}
|
{
out.write(" (default)")?;
}
|
conditional_block
|
shootout-fasta-redux.rs
|
// The Computer Language Benchmarks Game
// http://benchmarksgame.alioth.debian.org/
//
// contributed by the Rust Project Developers
// Copyright (c) 2013-2014 The Rust Project Developers
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// - Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// - Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in
// the documentation and/or other materials provided with the
// distribution.
//
// - Neither the name of "The Computer Language Benchmarks Game" nor
// the name of "The Computer Language Shootout Benchmarks" nor the
// names of its contributors may be used to endorse or promote
// products derived from this software without specific prior
// written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
use std::cmp::min;
use std::old_io::{stdout, IoResult};
use std::iter::repeat;
use std::env;
use std::slice::bytes::copy_memory;
const LINE_LEN: usize = 60;
const LOOKUP_SIZE: usize = 4 * 1024;
const LOOKUP_SCALE: f32 = (LOOKUP_SIZE - 1) as f32;
// Random number generator constants
const IM: u32 = 139968;
const IA: u32 = 3877;
const IC: u32 = 29573;
const ALU: &'static str = "GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTG\
GGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGA\
GACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAA\
AATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAAT\
CCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAAC\
CCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTG\
CACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAA";
const NULL_AMINO_ACID: AminoAcid = AminoAcid { c:'' as u8, p: 0.0 };
static IUB: [AminoAcid;15] = [
AminoAcid { c: 'a' as u8, p: 0.27 },
AminoAcid { c: 'c' as u8, p: 0.12 },
AminoAcid { c: 'g' as u8, p: 0.12 },
AminoAcid { c: 't' as u8, p: 0.27 },
AminoAcid { c: 'B' as u8, p: 0.02 },
AminoAcid { c: 'D' as u8, p: 0.02 },
AminoAcid { c: 'H' as u8, p: 0.02 },
AminoAcid { c: 'K' as u8, p: 0.02 },
AminoAcid { c: 'M' as u8, p: 0.02 },
AminoAcid { c: 'N' as u8, p: 0.02 },
AminoAcid { c: 'R' as u8, p: 0.02 },
AminoAcid { c: 'S' as u8, p: 0.02 },
AminoAcid { c: 'V' as u8, p: 0.02 },
AminoAcid { c: 'W' as u8, p: 0.02 },
AminoAcid { c: 'Y' as u8, p: 0.02 },
];
static HOMO_SAPIENS: [AminoAcid;4] = [
AminoAcid { c: 'a' as u8, p: 0.3029549426680 },
AminoAcid { c: 'c' as u8, p: 0.1979883004921 },
AminoAcid { c: 'g' as u8, p: 0.1975473066391 },
AminoAcid { c: 't' as u8, p: 0.3015094502008 },
];
// FIXME: Use map().
fn sum_and_scale(a: &'static [AminoAcid]) -> Vec<AminoAcid> {
let mut result = Vec::new();
let mut p = 0f32;
for a_i in a {
let mut a_i = *a_i;
p += a_i.p;
a_i.p = p * LOOKUP_SCALE;
result.push(a_i);
}
let result_len = result.len();
result[result_len - 1].p = LOOKUP_SCALE;
result
}
#[derive(Copy)]
struct AminoAcid {
c: u8,
p: f32,
}
struct RepeatFasta<'a, W:'a> {
alu: &'static str,
out: &'a mut W
}
impl<'a, W: Writer> RepeatFasta<'a, W> {
fn new(alu: &'static str, w: &'a mut W) -> RepeatFasta<'a, W> {
RepeatFasta { alu: alu, out: w }
}
fn make(&mut self, n: usize) -> IoResult<()> {
let alu_len = self.alu.len();
let mut buf = repeat(0).take(alu_len + LINE_LEN).collect::<Vec<_>>();
let alu: &[u8] = self.alu.as_bytes();
copy_memory(&mut buf, alu);
let buf_len = buf.len();
copy_memory(&mut buf[alu_len..buf_len],
&alu[..LINE_LEN]);
let mut pos = 0;
let mut bytes;
let mut n = n;
while n > 0 {
bytes = min(LINE_LEN, n);
try!(self.out.write(&buf[pos..pos + bytes]));
try!(self.out.write_u8('\n' as u8));
pos += bytes;
if pos > alu_len
|
n -= bytes;
}
Ok(())
}
}
fn make_lookup(a: &[AminoAcid]) -> [AminoAcid;LOOKUP_SIZE] {
let mut lookup = [ NULL_AMINO_ACID;LOOKUP_SIZE ];
let mut j = 0;
for (i, slot) in lookup.iter_mut().enumerate() {
while a[j].p < (i as f32) {
j += 1;
}
*slot = a[j];
}
lookup
}
struct RandomFasta<'a, W:'a> {
seed: u32,
lookup: [AminoAcid;LOOKUP_SIZE],
out: &'a mut W,
}
impl<'a, W: Writer> RandomFasta<'a, W> {
fn new(w: &'a mut W, a: &[AminoAcid]) -> RandomFasta<'a, W> {
RandomFasta {
seed: 42,
out: w,
lookup: make_lookup(a),
}
}
fn rng(&mut self, max: f32) -> f32 {
self.seed = (self.seed * IA + IC) % IM;
max * (self.seed as f32) / (IM as f32)
}
fn nextc(&mut self) -> u8 {
let r = self.rng(1.0);
for a in &self.lookup[..] {
if a.p >= r {
return a.c;
}
}
0
}
fn make(&mut self, n: usize) -> IoResult<()> {
let lines = n / LINE_LEN;
let chars_left = n % LINE_LEN;
let mut buf = [0;LINE_LEN + 1];
for _ in 0..lines {
for i in 0..LINE_LEN {
buf[i] = self.nextc();
}
buf[LINE_LEN] = '\n' as u8;
try!(self.out.write(&buf));
}
for i in 0..chars_left {
buf[i] = self.nextc();
}
self.out.write(&buf[..chars_left])
}
}
fn main() {
let mut args = env::args();
let n = if args.len() > 1 {
args.nth(1).unwrap().parse::<usize>().unwrap()
} else {
5
};
let mut out = stdout();
out.write_line(">ONE Homo sapiens alu").unwrap();
{
let mut repeat = RepeatFasta::new(ALU, &mut out);
repeat.make(n * 2).unwrap();
}
out.write_line(">TWO IUB ambiguity codes").unwrap();
let iub = sum_and_scale(&IUB);
let mut random = RandomFasta::new(&mut out, &iub);
random.make(n * 3).unwrap();
random.out.write_line(">THREE Homo sapiens frequency").unwrap();
let homo_sapiens = sum_and_scale(&HOMO_SAPIENS);
random.lookup = make_lookup(&homo_sapiens);
random.make(n * 5).unwrap();
random.out.write_str("\n").unwrap();
}
|
{
pos -= alu_len;
}
|
conditional_block
|
shootout-fasta-redux.rs
|
// The Computer Language Benchmarks Game
// http://benchmarksgame.alioth.debian.org/
//
// contributed by the Rust Project Developers
// Copyright (c) 2013-2014 The Rust Project Developers
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// - Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// - Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in
// the documentation and/or other materials provided with the
// distribution.
//
// - Neither the name of "The Computer Language Benchmarks Game" nor
// the name of "The Computer Language Shootout Benchmarks" nor the
// names of its contributors may be used to endorse or promote
// products derived from this software without specific prior
// written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
use std::cmp::min;
use std::old_io::{stdout, IoResult};
use std::iter::repeat;
use std::env;
use std::slice::bytes::copy_memory;
const LINE_LEN: usize = 60;
const LOOKUP_SIZE: usize = 4 * 1024;
const LOOKUP_SCALE: f32 = (LOOKUP_SIZE - 1) as f32;
// Random number generator constants
const IM: u32 = 139968;
const IA: u32 = 3877;
const IC: u32 = 29573;
const ALU: &'static str = "GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTG\
GGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGA\
GACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAA\
AATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAAT\
CCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAAC\
CCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTG\
CACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAA";
const NULL_AMINO_ACID: AminoAcid = AminoAcid { c:'' as u8, p: 0.0 };
static IUB: [AminoAcid;15] = [
AminoAcid { c: 'a' as u8, p: 0.27 },
AminoAcid { c: 'c' as u8, p: 0.12 },
AminoAcid { c: 'g' as u8, p: 0.12 },
AminoAcid { c: 't' as u8, p: 0.27 },
AminoAcid { c: 'B' as u8, p: 0.02 },
AminoAcid { c: 'D' as u8, p: 0.02 },
AminoAcid { c: 'H' as u8, p: 0.02 },
AminoAcid { c: 'K' as u8, p: 0.02 },
AminoAcid { c: 'M' as u8, p: 0.02 },
AminoAcid { c: 'N' as u8, p: 0.02 },
AminoAcid { c: 'R' as u8, p: 0.02 },
AminoAcid { c: 'S' as u8, p: 0.02 },
AminoAcid { c: 'V' as u8, p: 0.02 },
AminoAcid { c: 'W' as u8, p: 0.02 },
AminoAcid { c: 'Y' as u8, p: 0.02 },
];
static HOMO_SAPIENS: [AminoAcid;4] = [
AminoAcid { c: 'a' as u8, p: 0.3029549426680 },
AminoAcid { c: 'c' as u8, p: 0.1979883004921 },
AminoAcid { c: 'g' as u8, p: 0.1975473066391 },
AminoAcid { c: 't' as u8, p: 0.3015094502008 },
];
// FIXME: Use map().
fn sum_and_scale(a: &'static [AminoAcid]) -> Vec<AminoAcid> {
let mut result = Vec::new();
let mut p = 0f32;
for a_i in a {
let mut a_i = *a_i;
p += a_i.p;
a_i.p = p * LOOKUP_SCALE;
result.push(a_i);
}
let result_len = result.len();
result[result_len - 1].p = LOOKUP_SCALE;
result
}
#[derive(Copy)]
struct AminoAcid {
c: u8,
p: f32,
}
struct RepeatFasta<'a, W:'a> {
alu: &'static str,
out: &'a mut W
}
impl<'a, W: Writer> RepeatFasta<'a, W> {
fn new(alu: &'static str, w: &'a mut W) -> RepeatFasta<'a, W> {
RepeatFasta { alu: alu, out: w }
}
fn make(&mut self, n: usize) -> IoResult<()> {
let alu_len = self.alu.len();
let mut buf = repeat(0).take(alu_len + LINE_LEN).collect::<Vec<_>>();
let alu: &[u8] = self.alu.as_bytes();
copy_memory(&mut buf, alu);
let buf_len = buf.len();
copy_memory(&mut buf[alu_len..buf_len],
&alu[..LINE_LEN]);
let mut pos = 0;
let mut bytes;
let mut n = n;
while n > 0 {
bytes = min(LINE_LEN, n);
try!(self.out.write(&buf[pos..pos + bytes]));
try!(self.out.write_u8('\n' as u8));
pos += bytes;
if pos > alu_len {
pos -= alu_len;
}
n -= bytes;
}
Ok(())
}
}
fn
|
(a: &[AminoAcid]) -> [AminoAcid;LOOKUP_SIZE] {
let mut lookup = [ NULL_AMINO_ACID;LOOKUP_SIZE ];
let mut j = 0;
for (i, slot) in lookup.iter_mut().enumerate() {
while a[j].p < (i as f32) {
j += 1;
}
*slot = a[j];
}
lookup
}
struct RandomFasta<'a, W:'a> {
seed: u32,
lookup: [AminoAcid;LOOKUP_SIZE],
out: &'a mut W,
}
impl<'a, W: Writer> RandomFasta<'a, W> {
fn new(w: &'a mut W, a: &[AminoAcid]) -> RandomFasta<'a, W> {
RandomFasta {
seed: 42,
out: w,
lookup: make_lookup(a),
}
}
fn rng(&mut self, max: f32) -> f32 {
self.seed = (self.seed * IA + IC) % IM;
max * (self.seed as f32) / (IM as f32)
}
fn nextc(&mut self) -> u8 {
let r = self.rng(1.0);
for a in &self.lookup[..] {
if a.p >= r {
return a.c;
}
}
0
}
fn make(&mut self, n: usize) -> IoResult<()> {
let lines = n / LINE_LEN;
let chars_left = n % LINE_LEN;
let mut buf = [0;LINE_LEN + 1];
for _ in 0..lines {
for i in 0..LINE_LEN {
buf[i] = self.nextc();
}
buf[LINE_LEN] = '\n' as u8;
try!(self.out.write(&buf));
}
for i in 0..chars_left {
buf[i] = self.nextc();
}
self.out.write(&buf[..chars_left])
}
}
fn main() {
let mut args = env::args();
let n = if args.len() > 1 {
args.nth(1).unwrap().parse::<usize>().unwrap()
} else {
5
};
let mut out = stdout();
out.write_line(">ONE Homo sapiens alu").unwrap();
{
let mut repeat = RepeatFasta::new(ALU, &mut out);
repeat.make(n * 2).unwrap();
}
out.write_line(">TWO IUB ambiguity codes").unwrap();
let iub = sum_and_scale(&IUB);
let mut random = RandomFasta::new(&mut out, &iub);
random.make(n * 3).unwrap();
random.out.write_line(">THREE Homo sapiens frequency").unwrap();
let homo_sapiens = sum_and_scale(&HOMO_SAPIENS);
random.lookup = make_lookup(&homo_sapiens);
random.make(n * 5).unwrap();
random.out.write_str("\n").unwrap();
}
|
make_lookup
|
identifier_name
|
shootout-fasta-redux.rs
|
// The Computer Language Benchmarks Game
// http://benchmarksgame.alioth.debian.org/
//
// contributed by the Rust Project Developers
// Copyright (c) 2013-2014 The Rust Project Developers
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// - Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// - Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in
// the documentation and/or other materials provided with the
// distribution.
//
// - Neither the name of "The Computer Language Benchmarks Game" nor
// the name of "The Computer Language Shootout Benchmarks" nor the
// names of its contributors may be used to endorse or promote
// products derived from this software without specific prior
// written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
use std::cmp::min;
use std::old_io::{stdout, IoResult};
use std::iter::repeat;
use std::env;
use std::slice::bytes::copy_memory;
const LINE_LEN: usize = 60;
const LOOKUP_SIZE: usize = 4 * 1024;
const LOOKUP_SCALE: f32 = (LOOKUP_SIZE - 1) as f32;
// Random number generator constants
const IM: u32 = 139968;
const IA: u32 = 3877;
const IC: u32 = 29573;
const ALU: &'static str = "GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTG\
GGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGA\
GACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAA\
AATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAAT\
CCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAAC\
CCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTG\
CACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAA";
const NULL_AMINO_ACID: AminoAcid = AminoAcid { c:'' as u8, p: 0.0 };
static IUB: [AminoAcid;15] = [
AminoAcid { c: 'a' as u8, p: 0.27 },
AminoAcid { c: 'c' as u8, p: 0.12 },
AminoAcid { c: 'g' as u8, p: 0.12 },
AminoAcid { c: 't' as u8, p: 0.27 },
AminoAcid { c: 'B' as u8, p: 0.02 },
AminoAcid { c: 'D' as u8, p: 0.02 },
AminoAcid { c: 'H' as u8, p: 0.02 },
AminoAcid { c: 'K' as u8, p: 0.02 },
AminoAcid { c: 'M' as u8, p: 0.02 },
AminoAcid { c: 'N' as u8, p: 0.02 },
AminoAcid { c: 'R' as u8, p: 0.02 },
AminoAcid { c: 'S' as u8, p: 0.02 },
AminoAcid { c: 'V' as u8, p: 0.02 },
AminoAcid { c: 'W' as u8, p: 0.02 },
AminoAcid { c: 'Y' as u8, p: 0.02 },
];
static HOMO_SAPIENS: [AminoAcid;4] = [
AminoAcid { c: 'a' as u8, p: 0.3029549426680 },
AminoAcid { c: 'c' as u8, p: 0.1979883004921 },
AminoAcid { c: 'g' as u8, p: 0.1975473066391 },
AminoAcid { c: 't' as u8, p: 0.3015094502008 },
];
// FIXME: Use map().
fn sum_and_scale(a: &'static [AminoAcid]) -> Vec<AminoAcid> {
let mut result = Vec::new();
let mut p = 0f32;
for a_i in a {
let mut a_i = *a_i;
p += a_i.p;
a_i.p = p * LOOKUP_SCALE;
result.push(a_i);
}
let result_len = result.len();
result[result_len - 1].p = LOOKUP_SCALE;
result
}
#[derive(Copy)]
struct AminoAcid {
c: u8,
p: f32,
}
struct RepeatFasta<'a, W:'a> {
alu: &'static str,
out: &'a mut W
}
impl<'a, W: Writer> RepeatFasta<'a, W> {
fn new(alu: &'static str, w: &'a mut W) -> RepeatFasta<'a, W> {
RepeatFasta { alu: alu, out: w }
}
fn make(&mut self, n: usize) -> IoResult<()> {
let alu_len = self.alu.len();
let mut buf = repeat(0).take(alu_len + LINE_LEN).collect::<Vec<_>>();
let alu: &[u8] = self.alu.as_bytes();
copy_memory(&mut buf, alu);
let buf_len = buf.len();
copy_memory(&mut buf[alu_len..buf_len],
&alu[..LINE_LEN]);
let mut pos = 0;
let mut bytes;
let mut n = n;
while n > 0 {
bytes = min(LINE_LEN, n);
try!(self.out.write(&buf[pos..pos + bytes]));
try!(self.out.write_u8('\n' as u8));
pos += bytes;
if pos > alu_len {
pos -= alu_len;
}
n -= bytes;
}
Ok(())
}
}
fn make_lookup(a: &[AminoAcid]) -> [AminoAcid;LOOKUP_SIZE] {
let mut lookup = [ NULL_AMINO_ACID;LOOKUP_SIZE ];
let mut j = 0;
for (i, slot) in lookup.iter_mut().enumerate() {
while a[j].p < (i as f32) {
j += 1;
}
*slot = a[j];
}
lookup
}
struct RandomFasta<'a, W:'a> {
seed: u32,
lookup: [AminoAcid;LOOKUP_SIZE],
out: &'a mut W,
}
impl<'a, W: Writer> RandomFasta<'a, W> {
fn new(w: &'a mut W, a: &[AminoAcid]) -> RandomFasta<'a, W> {
RandomFasta {
seed: 42,
out: w,
lookup: make_lookup(a),
}
}
fn rng(&mut self, max: f32) -> f32
|
fn nextc(&mut self) -> u8 {
let r = self.rng(1.0);
for a in &self.lookup[..] {
if a.p >= r {
return a.c;
}
}
0
}
fn make(&mut self, n: usize) -> IoResult<()> {
let lines = n / LINE_LEN;
let chars_left = n % LINE_LEN;
let mut buf = [0;LINE_LEN + 1];
for _ in 0..lines {
for i in 0..LINE_LEN {
buf[i] = self.nextc();
}
buf[LINE_LEN] = '\n' as u8;
try!(self.out.write(&buf));
}
for i in 0..chars_left {
buf[i] = self.nextc();
}
self.out.write(&buf[..chars_left])
}
}
fn main() {
let mut args = env::args();
let n = if args.len() > 1 {
args.nth(1).unwrap().parse::<usize>().unwrap()
} else {
5
};
let mut out = stdout();
out.write_line(">ONE Homo sapiens alu").unwrap();
{
let mut repeat = RepeatFasta::new(ALU, &mut out);
repeat.make(n * 2).unwrap();
}
out.write_line(">TWO IUB ambiguity codes").unwrap();
let iub = sum_and_scale(&IUB);
let mut random = RandomFasta::new(&mut out, &iub);
random.make(n * 3).unwrap();
random.out.write_line(">THREE Homo sapiens frequency").unwrap();
let homo_sapiens = sum_and_scale(&HOMO_SAPIENS);
random.lookup = make_lookup(&homo_sapiens);
random.make(n * 5).unwrap();
random.out.write_str("\n").unwrap();
}
|
{
self.seed = (self.seed * IA + IC) % IM;
max * (self.seed as f32) / (IM as f32)
}
|
identifier_body
|
shootout-fasta-redux.rs
|
// The Computer Language Benchmarks Game
// http://benchmarksgame.alioth.debian.org/
//
// contributed by the Rust Project Developers
// Copyright (c) 2013-2014 The Rust Project Developers
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// - Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// - Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in
// the documentation and/or other materials provided with the
// distribution.
//
// - Neither the name of "The Computer Language Benchmarks Game" nor
// the name of "The Computer Language Shootout Benchmarks" nor the
// names of its contributors may be used to endorse or promote
// products derived from this software without specific prior
// written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
// OF THE POSSIBILITY OF SUCH DAMAGE.
use std::cmp::min;
use std::old_io::{stdout, IoResult};
use std::iter::repeat;
use std::env;
use std::slice::bytes::copy_memory;
const LINE_LEN: usize = 60;
const LOOKUP_SIZE: usize = 4 * 1024;
const LOOKUP_SCALE: f32 = (LOOKUP_SIZE - 1) as f32;
// Random number generator constants
const IM: u32 = 139968;
const IA: u32 = 3877;
const IC: u32 = 29573;
const ALU: &'static str = "GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTG\
GGAGGCCGAGGCGGGCGGATCACCTGAGGTCAGGAGTTCGA\
GACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACTAAA\
AATACAAAAATTAGCCGGGCGTGGTGGCGCGCGCCTGTAAT\
CCCAGCTACTCGGGAGGCTGAGGCAGGAGAATCGCTTGAAC\
CCGGGAGGCGGAGGTTGCAGTGAGCCGAGATCGCGCCACTG\
CACTCCAGCCTGGGCGACAGAGCGAGACTCCGTCTCAAAAA";
const NULL_AMINO_ACID: AminoAcid = AminoAcid { c:'' as u8, p: 0.0 };
static IUB: [AminoAcid;15] = [
AminoAcid { c: 'a' as u8, p: 0.27 },
AminoAcid { c: 'c' as u8, p: 0.12 },
AminoAcid { c: 'g' as u8, p: 0.12 },
AminoAcid { c: 't' as u8, p: 0.27 },
AminoAcid { c: 'B' as u8, p: 0.02 },
AminoAcid { c: 'D' as u8, p: 0.02 },
AminoAcid { c: 'H' as u8, p: 0.02 },
AminoAcid { c: 'K' as u8, p: 0.02 },
AminoAcid { c: 'M' as u8, p: 0.02 },
AminoAcid { c: 'N' as u8, p: 0.02 },
AminoAcid { c: 'R' as u8, p: 0.02 },
AminoAcid { c: 'S' as u8, p: 0.02 },
AminoAcid { c: 'V' as u8, p: 0.02 },
AminoAcid { c: 'W' as u8, p: 0.02 },
AminoAcid { c: 'Y' as u8, p: 0.02 },
];
static HOMO_SAPIENS: [AminoAcid;4] = [
AminoAcid { c: 'a' as u8, p: 0.3029549426680 },
AminoAcid { c: 'c' as u8, p: 0.1979883004921 },
AminoAcid { c: 'g' as u8, p: 0.1975473066391 },
AminoAcid { c: 't' as u8, p: 0.3015094502008 },
];
// FIXME: Use map().
fn sum_and_scale(a: &'static [AminoAcid]) -> Vec<AminoAcid> {
let mut result = Vec::new();
let mut p = 0f32;
for a_i in a {
let mut a_i = *a_i;
p += a_i.p;
a_i.p = p * LOOKUP_SCALE;
result.push(a_i);
}
let result_len = result.len();
result[result_len - 1].p = LOOKUP_SCALE;
result
}
#[derive(Copy)]
struct AminoAcid {
c: u8,
p: f32,
}
struct RepeatFasta<'a, W:'a> {
alu: &'static str,
out: &'a mut W
}
impl<'a, W: Writer> RepeatFasta<'a, W> {
fn new(alu: &'static str, w: &'a mut W) -> RepeatFasta<'a, W> {
RepeatFasta { alu: alu, out: w }
}
fn make(&mut self, n: usize) -> IoResult<()> {
let alu_len = self.alu.len();
let mut buf = repeat(0).take(alu_len + LINE_LEN).collect::<Vec<_>>();
let alu: &[u8] = self.alu.as_bytes();
copy_memory(&mut buf, alu);
let buf_len = buf.len();
copy_memory(&mut buf[alu_len..buf_len],
&alu[..LINE_LEN]);
let mut pos = 0;
let mut bytes;
let mut n = n;
while n > 0 {
bytes = min(LINE_LEN, n);
try!(self.out.write(&buf[pos..pos + bytes]));
try!(self.out.write_u8('\n' as u8));
pos += bytes;
if pos > alu_len {
pos -= alu_len;
}
n -= bytes;
}
Ok(())
}
}
fn make_lookup(a: &[AminoAcid]) -> [AminoAcid;LOOKUP_SIZE] {
let mut lookup = [ NULL_AMINO_ACID;LOOKUP_SIZE ];
|
}
*slot = a[j];
}
lookup
}
struct RandomFasta<'a, W:'a> {
seed: u32,
lookup: [AminoAcid;LOOKUP_SIZE],
out: &'a mut W,
}
impl<'a, W: Writer> RandomFasta<'a, W> {
fn new(w: &'a mut W, a: &[AminoAcid]) -> RandomFasta<'a, W> {
RandomFasta {
seed: 42,
out: w,
lookup: make_lookup(a),
}
}
fn rng(&mut self, max: f32) -> f32 {
self.seed = (self.seed * IA + IC) % IM;
max * (self.seed as f32) / (IM as f32)
}
fn nextc(&mut self) -> u8 {
let r = self.rng(1.0);
for a in &self.lookup[..] {
if a.p >= r {
return a.c;
}
}
0
}
fn make(&mut self, n: usize) -> IoResult<()> {
let lines = n / LINE_LEN;
let chars_left = n % LINE_LEN;
let mut buf = [0;LINE_LEN + 1];
for _ in 0..lines {
for i in 0..LINE_LEN {
buf[i] = self.nextc();
}
buf[LINE_LEN] = '\n' as u8;
try!(self.out.write(&buf));
}
for i in 0..chars_left {
buf[i] = self.nextc();
}
self.out.write(&buf[..chars_left])
}
}
fn main() {
let mut args = env::args();
let n = if args.len() > 1 {
args.nth(1).unwrap().parse::<usize>().unwrap()
} else {
5
};
let mut out = stdout();
out.write_line(">ONE Homo sapiens alu").unwrap();
{
let mut repeat = RepeatFasta::new(ALU, &mut out);
repeat.make(n * 2).unwrap();
}
out.write_line(">TWO IUB ambiguity codes").unwrap();
let iub = sum_and_scale(&IUB);
let mut random = RandomFasta::new(&mut out, &iub);
random.make(n * 3).unwrap();
random.out.write_line(">THREE Homo sapiens frequency").unwrap();
let homo_sapiens = sum_and_scale(&HOMO_SAPIENS);
random.lookup = make_lookup(&homo_sapiens);
random.make(n * 5).unwrap();
random.out.write_str("\n").unwrap();
}
|
let mut j = 0;
for (i, slot) in lookup.iter_mut().enumerate() {
while a[j].p < (i as f32) {
j += 1;
|
random_line_split
|
task-perf-linked-failure.rs
|
// xfail-pretty
// xfail-test linked failure
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/**
* Test performance of killing many tasks in a taskgroup.
* Along the way, tests various edge cases of ancestor group management.
* In particular, this tries to get each grandchild task to hit the
* "nobe_is_dead" case in each_ancestor only during task exit, but not during
* task spawn. This makes sure that defunct ancestor groups are handled correctly
* w.r.t. possibly leaving stale *rust_tasks lying around.
*/
// Creates in the background 'num_tasks' tasks, all blocked forever.
// Doesn't return until all such tasks are ready, but doesn't block forever itself.
use std::comm::{stream, SharedChan};
use std::os;
use std::result;
use std::task;
use std::uint;
fn grandchild_group(num_tasks: uint) {
let (po, ch) = stream();
let ch = SharedChan::new(ch);
for _ in range(0, num_tasks) {
let ch = ch.clone();
let mut t = task::task();
do t.spawn { // linked
ch.send(());
let (p, _c) = stream::<()>();
p.recv(); // block forever
}
}
error!("Grandchild group getting started");
for _ in range(0, num_tasks) {
// Make sure all above children are fully spawned; i.e., enlisted in
// their ancestor groups.
po.recv();
}
error!("Grandchild group ready to go.");
// Master grandchild task exits early.
}
fn spawn_supervised_blocking(myname: &str, f: proc()) {
let mut builder = task::task();
let res = builder.future_result();
builder.supervised();
builder.spawn(f);
error!("{} group waiting", myname);
let x = res.recv();
assert!(x.is_ok());
}
fn main() {
let args = os::args();
let args = if os::getenv("RUST_BENCH").is_some() {
~[~"", ~"100000"]
} else if args.len() <= 1u {
~[~"", ~"100"]
} else
|
;
let num_tasks = from_str::<uint>(args[1]).unwrap();
// Main group #0 waits for unsupervised group #1.
// Grandparent group #1 waits for middle group #2, then fails, killing #3.
// Middle group #2 creates grandchild_group #3, waits for it to be ready, exits.
let x: result::Result<(), ~Any> = do task::try { // unlinked
do spawn_supervised_blocking("grandparent") {
do spawn_supervised_blocking("middle") {
grandchild_group(num_tasks);
}
// When grandchild group is ready to go, make the middle group exit.
error!("Middle group wakes up and exits");
}
// Grandparent group waits for middle group to be gone, then fails
error!("Grandparent group wakes up and fails");
fail!();
};
assert!(x.is_err());
}
|
{
args.clone()
}
|
conditional_block
|
task-perf-linked-failure.rs
|
// xfail-pretty
// xfail-test linked failure
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/**
* Test performance of killing many tasks in a taskgroup.
* Along the way, tests various edge cases of ancestor group management.
* In particular, this tries to get each grandchild task to hit the
* "nobe_is_dead" case in each_ancestor only during task exit, but not during
* task spawn. This makes sure that defunct ancestor groups are handled correctly
* w.r.t. possibly leaving stale *rust_tasks lying around.
*/
// Creates in the background 'num_tasks' tasks, all blocked forever.
// Doesn't return until all such tasks are ready, but doesn't block forever itself.
use std::comm::{stream, SharedChan};
use std::os;
use std::result;
use std::task;
use std::uint;
fn grandchild_group(num_tasks: uint) {
let (po, ch) = stream();
let ch = SharedChan::new(ch);
for _ in range(0, num_tasks) {
let ch = ch.clone();
let mut t = task::task();
do t.spawn { // linked
ch.send(());
let (p, _c) = stream::<()>();
p.recv(); // block forever
}
}
error!("Grandchild group getting started");
for _ in range(0, num_tasks) {
// Make sure all above children are fully spawned; i.e., enlisted in
// their ancestor groups.
po.recv();
}
error!("Grandchild group ready to go.");
// Master grandchild task exits early.
}
fn spawn_supervised_blocking(myname: &str, f: proc()) {
let mut builder = task::task();
let res = builder.future_result();
builder.supervised();
builder.spawn(f);
error!("{} group waiting", myname);
let x = res.recv();
assert!(x.is_ok());
}
fn main() {
let args = os::args();
let args = if os::getenv("RUST_BENCH").is_some() {
~[~"", ~"100000"]
} else if args.len() <= 1u {
~[~"", ~"100"]
} else {
args.clone()
|
// Grandparent group #1 waits for middle group #2, then fails, killing #3.
// Middle group #2 creates grandchild_group #3, waits for it to be ready, exits.
let x: result::Result<(), ~Any> = do task::try { // unlinked
do spawn_supervised_blocking("grandparent") {
do spawn_supervised_blocking("middle") {
grandchild_group(num_tasks);
}
// When grandchild group is ready to go, make the middle group exit.
error!("Middle group wakes up and exits");
}
// Grandparent group waits for middle group to be gone, then fails
error!("Grandparent group wakes up and fails");
fail!();
};
assert!(x.is_err());
}
|
};
let num_tasks = from_str::<uint>(args[1]).unwrap();
// Main group #0 waits for unsupervised group #1.
|
random_line_split
|
task-perf-linked-failure.rs
|
// xfail-pretty
// xfail-test linked failure
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/**
* Test performance of killing many tasks in a taskgroup.
* Along the way, tests various edge cases of ancestor group management.
* In particular, this tries to get each grandchild task to hit the
* "nobe_is_dead" case in each_ancestor only during task exit, but not during
* task spawn. This makes sure that defunct ancestor groups are handled correctly
* w.r.t. possibly leaving stale *rust_tasks lying around.
*/
// Creates in the background 'num_tasks' tasks, all blocked forever.
// Doesn't return until all such tasks are ready, but doesn't block forever itself.
use std::comm::{stream, SharedChan};
use std::os;
use std::result;
use std::task;
use std::uint;
fn grandchild_group(num_tasks: uint) {
let (po, ch) = stream();
let ch = SharedChan::new(ch);
for _ in range(0, num_tasks) {
let ch = ch.clone();
let mut t = task::task();
do t.spawn { // linked
ch.send(());
let (p, _c) = stream::<()>();
p.recv(); // block forever
}
}
error!("Grandchild group getting started");
for _ in range(0, num_tasks) {
// Make sure all above children are fully spawned; i.e., enlisted in
// their ancestor groups.
po.recv();
}
error!("Grandchild group ready to go.");
// Master grandchild task exits early.
}
fn spawn_supervised_blocking(myname: &str, f: proc()) {
let mut builder = task::task();
let res = builder.future_result();
builder.supervised();
builder.spawn(f);
error!("{} group waiting", myname);
let x = res.recv();
assert!(x.is_ok());
}
fn main()
|
// When grandchild group is ready to go, make the middle group exit.
error!("Middle group wakes up and exits");
}
// Grandparent group waits for middle group to be gone, then fails
error!("Grandparent group wakes up and fails");
fail!();
}
;
assert!(x.is_err());
}
|
{
let args = os::args();
let args = if os::getenv("RUST_BENCH").is_some() {
~[~"", ~"100000"]
} else if args.len() <= 1u {
~[~"", ~"100"]
} else {
args.clone()
};
let num_tasks = from_str::<uint>(args[1]).unwrap();
// Main group #0 waits for unsupervised group #1.
// Grandparent group #1 waits for middle group #2, then fails, killing #3.
// Middle group #2 creates grandchild_group #3, waits for it to be ready, exits.
let x: result::Result<(), ~Any> = do task::try { // unlinked
do spawn_supervised_blocking("grandparent") {
do spawn_supervised_blocking("middle") {
grandchild_group(num_tasks);
}
|
identifier_body
|
task-perf-linked-failure.rs
|
// xfail-pretty
// xfail-test linked failure
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/**
* Test performance of killing many tasks in a taskgroup.
* Along the way, tests various edge cases of ancestor group management.
* In particular, this tries to get each grandchild task to hit the
* "nobe_is_dead" case in each_ancestor only during task exit, but not during
* task spawn. This makes sure that defunct ancestor groups are handled correctly
* w.r.t. possibly leaving stale *rust_tasks lying around.
*/
// Creates in the background 'num_tasks' tasks, all blocked forever.
// Doesn't return until all such tasks are ready, but doesn't block forever itself.
use std::comm::{stream, SharedChan};
use std::os;
use std::result;
use std::task;
use std::uint;
fn
|
(num_tasks: uint) {
let (po, ch) = stream();
let ch = SharedChan::new(ch);
for _ in range(0, num_tasks) {
let ch = ch.clone();
let mut t = task::task();
do t.spawn { // linked
ch.send(());
let (p, _c) = stream::<()>();
p.recv(); // block forever
}
}
error!("Grandchild group getting started");
for _ in range(0, num_tasks) {
// Make sure all above children are fully spawned; i.e., enlisted in
// their ancestor groups.
po.recv();
}
error!("Grandchild group ready to go.");
// Master grandchild task exits early.
}
fn spawn_supervised_blocking(myname: &str, f: proc()) {
let mut builder = task::task();
let res = builder.future_result();
builder.supervised();
builder.spawn(f);
error!("{} group waiting", myname);
let x = res.recv();
assert!(x.is_ok());
}
fn main() {
let args = os::args();
let args = if os::getenv("RUST_BENCH").is_some() {
~[~"", ~"100000"]
} else if args.len() <= 1u {
~[~"", ~"100"]
} else {
args.clone()
};
let num_tasks = from_str::<uint>(args[1]).unwrap();
// Main group #0 waits for unsupervised group #1.
// Grandparent group #1 waits for middle group #2, then fails, killing #3.
// Middle group #2 creates grandchild_group #3, waits for it to be ready, exits.
let x: result::Result<(), ~Any> = do task::try { // unlinked
do spawn_supervised_blocking("grandparent") {
do spawn_supervised_blocking("middle") {
grandchild_group(num_tasks);
}
// When grandchild group is ready to go, make the middle group exit.
error!("Middle group wakes up and exits");
}
// Grandparent group waits for middle group to be gone, then fails
error!("Grandparent group wakes up and fails");
fail!();
};
assert!(x.is_err());
}
|
grandchild_group
|
identifier_name
|
domtokenlist.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::attr::Attr;
use dom::bindings::codegen::Bindings::DOMTokenListBinding;
use dom::bindings::codegen::Bindings::DOMTokenListBinding::DOMTokenListMethods;
use dom::bindings::error::{Error, ErrorResult, Fallible};
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{JS, Root};
use dom::bindings::reflector::{Reflector, reflect_dom_object};
use dom::element::Element;
use dom::node::window_from_node;
use string_cache::Atom;
use util::str::{DOMString, HTML_SPACE_CHARACTERS};
#[dom_struct]
pub struct DOMTokenList {
reflector_: Reflector,
element: JS<Element>,
local_name: Atom,
}
impl DOMTokenList {
pub fn new_inherited(element: &Element, local_name: Atom) -> DOMTokenList {
DOMTokenList {
reflector_: Reflector::new(),
element: JS::from_ref(element),
local_name: local_name,
}
}
pub fn new(element: &Element, local_name: &Atom) -> Root<DOMTokenList> {
let window = window_from_node(element);
reflect_dom_object(box DOMTokenList::new_inherited(element, local_name.clone()),
GlobalRef::Window(window.r()),
DOMTokenListBinding::Wrap)
}
fn attribute(&self) -> Option<Root<Attr>> {
self.element.get_attribute(&ns!(), &self.local_name)
}
fn check_token_exceptions(&self, token: &str) -> Fallible<Atom> {
match token {
"" => Err(Error::Syntax),
slice if slice.find(HTML_SPACE_CHARACTERS).is_some() => Err(Error::InvalidCharacter),
slice => Ok(Atom::from(slice)),
}
}
}
// https://dom.spec.whatwg.org/#domtokenlist
impl DOMTokenListMethods for DOMTokenList {
// https://dom.spec.whatwg.org/#dom-domtokenlist-length
fn Length(&self) -> u32 {
self.attribute().map_or(0, |attr| {
let attr = attr.r();
attr.value().as_tokens().len()
}) as u32
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-item
fn Item(&self, index: u32) -> Option<DOMString> {
self.attribute().and_then(|attr| {
// FIXME(ajeffrey): Convert directly from Atom to DOMString
attr.value().as_tokens().get(index as usize).map(|token| DOMString::from(&**token))
})
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-contains
fn Contains(&self, token: DOMString) -> Fallible<bool> {
self.check_token_exceptions(&token).map(|token| {
self.attribute().map_or(false, |attr| {
let attr = attr.r();
attr.value()
.as_tokens()
.iter()
.any(|atom: &Atom| *atom == token)
})
})
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-add
fn Add(&self, tokens: Vec<DOMString>) -> ErrorResult {
let mut atoms = self.element.get_tokenlist_attribute(&self.local_name);
for token in &tokens {
let token = try!(self.check_token_exceptions(&token));
if!atoms.iter().any(|atom| *atom == token) {
atoms.push(token);
}
}
self.element.set_atomic_tokenlist_attribute(&self.local_name, atoms);
Ok(())
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-remove
fn Remove(&self, tokens: Vec<DOMString>) -> ErrorResult {
let mut atoms = self.element.get_tokenlist_attribute(&self.local_name);
for token in &tokens {
let token = try!(self.check_token_exceptions(&token));
atoms.iter().position(|atom| *atom == token).map(|index| atoms.remove(index));
}
self.element.set_atomic_tokenlist_attribute(&self.local_name, atoms);
Ok(())
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-toggle
fn Toggle(&self, token: DOMString, force: Option<bool>) -> Fallible<bool> {
let mut atoms = self.element.get_tokenlist_attribute(&self.local_name);
|
let token = try!(self.check_token_exceptions(&token));
match atoms.iter().position(|atom| *atom == token) {
Some(index) => match force {
Some(true) => Ok(true),
_ => {
atoms.remove(index);
self.element.set_atomic_tokenlist_attribute(&self.local_name, atoms);
Ok(false)
}
},
None => match force {
Some(false) => Ok(false),
_ => {
atoms.push(token);
self.element.set_atomic_tokenlist_attribute(&self.local_name, atoms);
Ok(true)
}
},
}
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-value
fn Value(&self) -> DOMString {
self.element.get_string_attribute(&self.local_name)
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-value
fn SetValue(&self, value: DOMString) {
self.element.set_tokenlist_attribute(&self.local_name, value);
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-replace
fn Replace(&self, token: DOMString, new_token: DOMString) -> ErrorResult {
if token.is_empty() || new_token.is_empty() {
// Step 1.
return Err(Error::Syntax);
}
if token.contains(HTML_SPACE_CHARACTERS) || new_token.contains(HTML_SPACE_CHARACTERS) {
// Step 2.
return Err(Error::InvalidCharacter);
}
// Steps 3-4.
let token = Atom::from(token);
let new_token = Atom::from(new_token);
let mut atoms = self.element.get_tokenlist_attribute(&self.local_name);
if let Some(pos) = atoms.iter().position(|atom| *atom == token) {
if!atoms.contains(&new_token) {
atoms[pos] = new_token;
} else {
atoms.remove(pos);
}
}
// Step 5.
self.element.set_atomic_tokenlist_attribute(&self.local_name, atoms);
Ok(())
}
// https://dom.spec.whatwg.org/#concept-dtl-serialize
fn Stringifier(&self) -> DOMString {
self.element.get_string_attribute(&self.local_name)
}
// check-tidy: no specs after this line
fn IndexedGetter(&self, index: u32, found: &mut bool) -> Option<DOMString> {
let item = self.Item(index);
*found = item.is_some();
item
}
}
|
random_line_split
|
|
domtokenlist.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::attr::Attr;
use dom::bindings::codegen::Bindings::DOMTokenListBinding;
use dom::bindings::codegen::Bindings::DOMTokenListBinding::DOMTokenListMethods;
use dom::bindings::error::{Error, ErrorResult, Fallible};
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{JS, Root};
use dom::bindings::reflector::{Reflector, reflect_dom_object};
use dom::element::Element;
use dom::node::window_from_node;
use string_cache::Atom;
use util::str::{DOMString, HTML_SPACE_CHARACTERS};
#[dom_struct]
pub struct DOMTokenList {
reflector_: Reflector,
element: JS<Element>,
local_name: Atom,
}
impl DOMTokenList {
pub fn new_inherited(element: &Element, local_name: Atom) -> DOMTokenList {
DOMTokenList {
reflector_: Reflector::new(),
element: JS::from_ref(element),
local_name: local_name,
}
}
pub fn new(element: &Element, local_name: &Atom) -> Root<DOMTokenList> {
let window = window_from_node(element);
reflect_dom_object(box DOMTokenList::new_inherited(element, local_name.clone()),
GlobalRef::Window(window.r()),
DOMTokenListBinding::Wrap)
}
fn attribute(&self) -> Option<Root<Attr>> {
self.element.get_attribute(&ns!(), &self.local_name)
}
fn check_token_exceptions(&self, token: &str) -> Fallible<Atom> {
match token {
"" => Err(Error::Syntax),
slice if slice.find(HTML_SPACE_CHARACTERS).is_some() => Err(Error::InvalidCharacter),
slice => Ok(Atom::from(slice)),
}
}
}
// https://dom.spec.whatwg.org/#domtokenlist
impl DOMTokenListMethods for DOMTokenList {
// https://dom.spec.whatwg.org/#dom-domtokenlist-length
fn Length(&self) -> u32 {
self.attribute().map_or(0, |attr| {
let attr = attr.r();
attr.value().as_tokens().len()
}) as u32
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-item
fn Item(&self, index: u32) -> Option<DOMString> {
self.attribute().and_then(|attr| {
// FIXME(ajeffrey): Convert directly from Atom to DOMString
attr.value().as_tokens().get(index as usize).map(|token| DOMString::from(&**token))
})
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-contains
fn Contains(&self, token: DOMString) -> Fallible<bool> {
self.check_token_exceptions(&token).map(|token| {
self.attribute().map_or(false, |attr| {
let attr = attr.r();
attr.value()
.as_tokens()
.iter()
.any(|atom: &Atom| *atom == token)
})
})
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-add
fn Add(&self, tokens: Vec<DOMString>) -> ErrorResult {
let mut atoms = self.element.get_tokenlist_attribute(&self.local_name);
for token in &tokens {
let token = try!(self.check_token_exceptions(&token));
if!atoms.iter().any(|atom| *atom == token) {
atoms.push(token);
}
}
self.element.set_atomic_tokenlist_attribute(&self.local_name, atoms);
Ok(())
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-remove
fn Remove(&self, tokens: Vec<DOMString>) -> ErrorResult {
let mut atoms = self.element.get_tokenlist_attribute(&self.local_name);
for token in &tokens {
let token = try!(self.check_token_exceptions(&token));
atoms.iter().position(|atom| *atom == token).map(|index| atoms.remove(index));
}
self.element.set_atomic_tokenlist_attribute(&self.local_name, atoms);
Ok(())
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-toggle
fn Toggle(&self, token: DOMString, force: Option<bool>) -> Fallible<bool> {
let mut atoms = self.element.get_tokenlist_attribute(&self.local_name);
let token = try!(self.check_token_exceptions(&token));
match atoms.iter().position(|atom| *atom == token) {
Some(index) => match force {
Some(true) => Ok(true),
_ => {
atoms.remove(index);
self.element.set_atomic_tokenlist_attribute(&self.local_name, atoms);
Ok(false)
}
},
None => match force {
Some(false) => Ok(false),
_ => {
atoms.push(token);
self.element.set_atomic_tokenlist_attribute(&self.local_name, atoms);
Ok(true)
}
},
}
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-value
fn Value(&self) -> DOMString {
self.element.get_string_attribute(&self.local_name)
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-value
fn SetValue(&self, value: DOMString) {
self.element.set_tokenlist_attribute(&self.local_name, value);
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-replace
fn Replace(&self, token: DOMString, new_token: DOMString) -> ErrorResult {
if token.is_empty() || new_token.is_empty() {
// Step 1.
return Err(Error::Syntax);
}
if token.contains(HTML_SPACE_CHARACTERS) || new_token.contains(HTML_SPACE_CHARACTERS) {
// Step 2.
return Err(Error::InvalidCharacter);
}
// Steps 3-4.
let token = Atom::from(token);
let new_token = Atom::from(new_token);
let mut atoms = self.element.get_tokenlist_attribute(&self.local_name);
if let Some(pos) = atoms.iter().position(|atom| *atom == token) {
if!atoms.contains(&new_token) {
atoms[pos] = new_token;
} else {
atoms.remove(pos);
}
}
// Step 5.
self.element.set_atomic_tokenlist_attribute(&self.local_name, atoms);
Ok(())
}
// https://dom.spec.whatwg.org/#concept-dtl-serialize
fn Stringifier(&self) -> DOMString
|
// check-tidy: no specs after this line
fn IndexedGetter(&self, index: u32, found: &mut bool) -> Option<DOMString> {
let item = self.Item(index);
*found = item.is_some();
item
}
}
|
{
self.element.get_string_attribute(&self.local_name)
}
|
identifier_body
|
domtokenlist.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::attr::Attr;
use dom::bindings::codegen::Bindings::DOMTokenListBinding;
use dom::bindings::codegen::Bindings::DOMTokenListBinding::DOMTokenListMethods;
use dom::bindings::error::{Error, ErrorResult, Fallible};
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{JS, Root};
use dom::bindings::reflector::{Reflector, reflect_dom_object};
use dom::element::Element;
use dom::node::window_from_node;
use string_cache::Atom;
use util::str::{DOMString, HTML_SPACE_CHARACTERS};
#[dom_struct]
pub struct DOMTokenList {
reflector_: Reflector,
element: JS<Element>,
local_name: Atom,
}
impl DOMTokenList {
pub fn new_inherited(element: &Element, local_name: Atom) -> DOMTokenList {
DOMTokenList {
reflector_: Reflector::new(),
element: JS::from_ref(element),
local_name: local_name,
}
}
pub fn new(element: &Element, local_name: &Atom) -> Root<DOMTokenList> {
let window = window_from_node(element);
reflect_dom_object(box DOMTokenList::new_inherited(element, local_name.clone()),
GlobalRef::Window(window.r()),
DOMTokenListBinding::Wrap)
}
fn
|
(&self) -> Option<Root<Attr>> {
self.element.get_attribute(&ns!(), &self.local_name)
}
fn check_token_exceptions(&self, token: &str) -> Fallible<Atom> {
match token {
"" => Err(Error::Syntax),
slice if slice.find(HTML_SPACE_CHARACTERS).is_some() => Err(Error::InvalidCharacter),
slice => Ok(Atom::from(slice)),
}
}
}
// https://dom.spec.whatwg.org/#domtokenlist
impl DOMTokenListMethods for DOMTokenList {
// https://dom.spec.whatwg.org/#dom-domtokenlist-length
fn Length(&self) -> u32 {
self.attribute().map_or(0, |attr| {
let attr = attr.r();
attr.value().as_tokens().len()
}) as u32
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-item
fn Item(&self, index: u32) -> Option<DOMString> {
self.attribute().and_then(|attr| {
// FIXME(ajeffrey): Convert directly from Atom to DOMString
attr.value().as_tokens().get(index as usize).map(|token| DOMString::from(&**token))
})
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-contains
fn Contains(&self, token: DOMString) -> Fallible<bool> {
self.check_token_exceptions(&token).map(|token| {
self.attribute().map_or(false, |attr| {
let attr = attr.r();
attr.value()
.as_tokens()
.iter()
.any(|atom: &Atom| *atom == token)
})
})
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-add
fn Add(&self, tokens: Vec<DOMString>) -> ErrorResult {
let mut atoms = self.element.get_tokenlist_attribute(&self.local_name);
for token in &tokens {
let token = try!(self.check_token_exceptions(&token));
if!atoms.iter().any(|atom| *atom == token) {
atoms.push(token);
}
}
self.element.set_atomic_tokenlist_attribute(&self.local_name, atoms);
Ok(())
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-remove
fn Remove(&self, tokens: Vec<DOMString>) -> ErrorResult {
let mut atoms = self.element.get_tokenlist_attribute(&self.local_name);
for token in &tokens {
let token = try!(self.check_token_exceptions(&token));
atoms.iter().position(|atom| *atom == token).map(|index| atoms.remove(index));
}
self.element.set_atomic_tokenlist_attribute(&self.local_name, atoms);
Ok(())
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-toggle
fn Toggle(&self, token: DOMString, force: Option<bool>) -> Fallible<bool> {
let mut atoms = self.element.get_tokenlist_attribute(&self.local_name);
let token = try!(self.check_token_exceptions(&token));
match atoms.iter().position(|atom| *atom == token) {
Some(index) => match force {
Some(true) => Ok(true),
_ => {
atoms.remove(index);
self.element.set_atomic_tokenlist_attribute(&self.local_name, atoms);
Ok(false)
}
},
None => match force {
Some(false) => Ok(false),
_ => {
atoms.push(token);
self.element.set_atomic_tokenlist_attribute(&self.local_name, atoms);
Ok(true)
}
},
}
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-value
fn Value(&self) -> DOMString {
self.element.get_string_attribute(&self.local_name)
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-value
fn SetValue(&self, value: DOMString) {
self.element.set_tokenlist_attribute(&self.local_name, value);
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-replace
fn Replace(&self, token: DOMString, new_token: DOMString) -> ErrorResult {
if token.is_empty() || new_token.is_empty() {
// Step 1.
return Err(Error::Syntax);
}
if token.contains(HTML_SPACE_CHARACTERS) || new_token.contains(HTML_SPACE_CHARACTERS) {
// Step 2.
return Err(Error::InvalidCharacter);
}
// Steps 3-4.
let token = Atom::from(token);
let new_token = Atom::from(new_token);
let mut atoms = self.element.get_tokenlist_attribute(&self.local_name);
if let Some(pos) = atoms.iter().position(|atom| *atom == token) {
if!atoms.contains(&new_token) {
atoms[pos] = new_token;
} else {
atoms.remove(pos);
}
}
// Step 5.
self.element.set_atomic_tokenlist_attribute(&self.local_name, atoms);
Ok(())
}
// https://dom.spec.whatwg.org/#concept-dtl-serialize
fn Stringifier(&self) -> DOMString {
self.element.get_string_attribute(&self.local_name)
}
// check-tidy: no specs after this line
fn IndexedGetter(&self, index: u32, found: &mut bool) -> Option<DOMString> {
let item = self.Item(index);
*found = item.is_some();
item
}
}
|
attribute
|
identifier_name
|
domtokenlist.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::attr::Attr;
use dom::bindings::codegen::Bindings::DOMTokenListBinding;
use dom::bindings::codegen::Bindings::DOMTokenListBinding::DOMTokenListMethods;
use dom::bindings::error::{Error, ErrorResult, Fallible};
use dom::bindings::global::GlobalRef;
use dom::bindings::js::{JS, Root};
use dom::bindings::reflector::{Reflector, reflect_dom_object};
use dom::element::Element;
use dom::node::window_from_node;
use string_cache::Atom;
use util::str::{DOMString, HTML_SPACE_CHARACTERS};
#[dom_struct]
pub struct DOMTokenList {
reflector_: Reflector,
element: JS<Element>,
local_name: Atom,
}
impl DOMTokenList {
pub fn new_inherited(element: &Element, local_name: Atom) -> DOMTokenList {
DOMTokenList {
reflector_: Reflector::new(),
element: JS::from_ref(element),
local_name: local_name,
}
}
pub fn new(element: &Element, local_name: &Atom) -> Root<DOMTokenList> {
let window = window_from_node(element);
reflect_dom_object(box DOMTokenList::new_inherited(element, local_name.clone()),
GlobalRef::Window(window.r()),
DOMTokenListBinding::Wrap)
}
fn attribute(&self) -> Option<Root<Attr>> {
self.element.get_attribute(&ns!(), &self.local_name)
}
fn check_token_exceptions(&self, token: &str) -> Fallible<Atom> {
match token {
"" => Err(Error::Syntax),
slice if slice.find(HTML_SPACE_CHARACTERS).is_some() => Err(Error::InvalidCharacter),
slice => Ok(Atom::from(slice)),
}
}
}
// https://dom.spec.whatwg.org/#domtokenlist
impl DOMTokenListMethods for DOMTokenList {
// https://dom.spec.whatwg.org/#dom-domtokenlist-length
fn Length(&self) -> u32 {
self.attribute().map_or(0, |attr| {
let attr = attr.r();
attr.value().as_tokens().len()
}) as u32
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-item
fn Item(&self, index: u32) -> Option<DOMString> {
self.attribute().and_then(|attr| {
// FIXME(ajeffrey): Convert directly from Atom to DOMString
attr.value().as_tokens().get(index as usize).map(|token| DOMString::from(&**token))
})
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-contains
fn Contains(&self, token: DOMString) -> Fallible<bool> {
self.check_token_exceptions(&token).map(|token| {
self.attribute().map_or(false, |attr| {
let attr = attr.r();
attr.value()
.as_tokens()
.iter()
.any(|atom: &Atom| *atom == token)
})
})
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-add
fn Add(&self, tokens: Vec<DOMString>) -> ErrorResult {
let mut atoms = self.element.get_tokenlist_attribute(&self.local_name);
for token in &tokens {
let token = try!(self.check_token_exceptions(&token));
if!atoms.iter().any(|atom| *atom == token) {
atoms.push(token);
}
}
self.element.set_atomic_tokenlist_attribute(&self.local_name, atoms);
Ok(())
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-remove
fn Remove(&self, tokens: Vec<DOMString>) -> ErrorResult {
let mut atoms = self.element.get_tokenlist_attribute(&self.local_name);
for token in &tokens {
let token = try!(self.check_token_exceptions(&token));
atoms.iter().position(|atom| *atom == token).map(|index| atoms.remove(index));
}
self.element.set_atomic_tokenlist_attribute(&self.local_name, atoms);
Ok(())
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-toggle
fn Toggle(&self, token: DOMString, force: Option<bool>) -> Fallible<bool> {
let mut atoms = self.element.get_tokenlist_attribute(&self.local_name);
let token = try!(self.check_token_exceptions(&token));
match atoms.iter().position(|atom| *atom == token) {
Some(index) => match force {
Some(true) => Ok(true),
_ => {
atoms.remove(index);
self.element.set_atomic_tokenlist_attribute(&self.local_name, atoms);
Ok(false)
}
},
None => match force {
Some(false) => Ok(false),
_ =>
|
},
}
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-value
fn Value(&self) -> DOMString {
self.element.get_string_attribute(&self.local_name)
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-value
fn SetValue(&self, value: DOMString) {
self.element.set_tokenlist_attribute(&self.local_name, value);
}
// https://dom.spec.whatwg.org/#dom-domtokenlist-replace
fn Replace(&self, token: DOMString, new_token: DOMString) -> ErrorResult {
if token.is_empty() || new_token.is_empty() {
// Step 1.
return Err(Error::Syntax);
}
if token.contains(HTML_SPACE_CHARACTERS) || new_token.contains(HTML_SPACE_CHARACTERS) {
// Step 2.
return Err(Error::InvalidCharacter);
}
// Steps 3-4.
let token = Atom::from(token);
let new_token = Atom::from(new_token);
let mut atoms = self.element.get_tokenlist_attribute(&self.local_name);
if let Some(pos) = atoms.iter().position(|atom| *atom == token) {
if!atoms.contains(&new_token) {
atoms[pos] = new_token;
} else {
atoms.remove(pos);
}
}
// Step 5.
self.element.set_atomic_tokenlist_attribute(&self.local_name, atoms);
Ok(())
}
// https://dom.spec.whatwg.org/#concept-dtl-serialize
fn Stringifier(&self) -> DOMString {
self.element.get_string_attribute(&self.local_name)
}
// check-tidy: no specs after this line
fn IndexedGetter(&self, index: u32, found: &mut bool) -> Option<DOMString> {
let item = self.Item(index);
*found = item.is_some();
item
}
}
|
{
atoms.push(token);
self.element.set_atomic_tokenlist_attribute(&self.local_name, atoms);
Ok(true)
}
|
conditional_block
|
shlobj.rs
|
// Copyright © 2015, Peter Atashian, skdltmxn
// Licensed under the MIT License <LICENSE.md>
pub const INVALID_HANDLE_VALUE: ::HANDLE = -1isize as ::HANDLE;
pub type GPFIDL_FLAGS = ::c_int;
ENUM!{enum KNOWN_FOLDER_FLAG {
KF_FLAG_DEFAULT = 0x00000000,
KF_FLAG_NO_APPCONTAINER_REDIRECTION = 0x00010000,
KF_FLAG_CREATE = 0x00008000,
KF_FLAG_DONT_VERIFY = 0x00004000,
KF_FLAG_DONT_UNEXPAND = 0x00002000,
KF_FLAG_NO_ALIAS = 0x00001000,
KF_FLAG_INIT = 0x00000800,
KF_FLAG_DEFAULT_PATH = 0x00000400,
KF_FLAG_NOT_PARENT_RELATIVE = 0x00000200,
KF_FLAG_SIMPLE_IDLIST = 0x00000100,
KF_FLAG_ALIAS_ONLY = 0x80000000,
}}
pub const IDO_SHGIOI_SHARE: ::c_int = 0x0FFFFFFF;
pub const IDO_SHGIOI_LINK: ::c_int = 0x0FFFFFFE;
// Yes, these values are supposed to overflow. Blame Microsoft.
pub const IDO_SHGIOI_SLOWFILE: ::c_int = 0xFFFFFFFDu32 as ::c_int;
pub const IDO_SHGIOI_DEFAULT: ::c_int = 0xFFFFFFFCu32 as ::c_int;
pub const GPFIDL_DEFAULT: GPFIDL_FLAGS = 0x0000;
pub const GPFIDL_ALTNAME: GPFIDL_FLAGS = 0x0001;
pub const GPFIDL_UNCPRINTER: GPFIDL_FLAGS = 0x0002;
pub const OFASI_EDIT: ::DWORD = 0x0001;
pub const OFASI_OPENDESKTOP: ::DWORD = 0x0002;
// 1204
pub const CSIDL_DESKTOP: ::c_int = 0x0000;
pub const CSIDL_INTERNET: ::c_int = 0x0001;
pub const CSIDL_PROGRAMS: ::c_int = 0x0002;
pub const CSIDL_CONTROLS: ::c_int = 0x0003;
pub const CSIDL_PRINTERS: ::c_int = 0x0004;
pub const CSIDL_PERSONAL: ::c_int = 0x0005;
pub const CSIDL_FAVORITES: ::c_int = 0x0006;
pub const CSIDL_STARTUP: ::c_int = 0x0007;
pub const CSIDL_RECENT: ::c_int = 0x0008;
pub const CSIDL_SENDTO: ::c_int = 0x0009;
pub const CSIDL_BITBUCKET: ::c_int = 0x000a;
pub const CSIDL_STARTMENU: ::c_int = 0x000b;
pub const CSIDL_MYDOCUMENTS: ::c_int = CSIDL_PERSONAL;
pub const CSIDL_MYMUSIC: ::c_int = 0x000d;
pub const CSIDL_MYVIDEO: ::c_int = 0x000e;
pub const CSIDL_DESKTOPDIRECTORY: ::c_int = 0x0010;
pub const CSIDL_DRIVES: ::c_int = 0x0011;
pub const CSIDL_NETWORK: ::c_int = 0x0012;
pub const CSIDL_NETHOOD: ::c_int = 0x0013;
pub const CSIDL_FONTS: ::c_int = 0x0014;
pub const CSIDL_TEMPLATES: ::c_int = 0x0015;
pub const CSIDL_COMMON_STARTMENU: ::c_int = 0x0016;
|
pub const CSIDL_COMMON_PROGRAMS: ::c_int = 0x0017;
pub const CSIDL_COMMON_STARTUP: ::c_int = 0x0018;
pub const CSIDL_COMMON_DESKTOPDIRECTORY: ::c_int = 0x0019;
pub const CSIDL_APPDATA: ::c_int = 0x001a;
pub const CSIDL_PRINTHOOD: ::c_int = 0x001b;
pub const CSIDL_LOCAL_APPDATA: ::c_int = 0x001c;
pub const CSIDL_ALTSTARTUP: ::c_int = 0x001d;
pub const CSIDL_COMMON_ALTSTARTUP: ::c_int = 0x001e;
pub const CSIDL_COMMON_FAVORITES: ::c_int = 0x001f;
pub const CSIDL_INTERNET_CACHE: ::c_int = 0x0020;
pub const CSIDL_COOKIES: ::c_int = 0x0021;
pub const CSIDL_HISTORY: ::c_int = 0x0022;
pub const CSIDL_COMMON_APPDATA: ::c_int = 0x0023;
pub const CSIDL_WINDOWS: ::c_int = 0x0024;
pub const CSIDL_SYSTEM: ::c_int = 0x0025;
pub const CSIDL_PROGRAM_FILES: ::c_int = 0x0026;
pub const CSIDL_MYPICTURES: ::c_int = 0x0027;
pub const CSIDL_PROFILE: ::c_int = 0x0028;
pub const CSIDL_SYSTEMX86: ::c_int = 0x0029;
pub const CSIDL_PROGRAM_FILESX86: ::c_int = 0x002a;
pub const CSIDL_PROGRAM_FILES_COMMON: ::c_int = 0x002b;
pub const CSIDL_PROGRAM_FILES_COMMONX86: ::c_int = 0x002c;
pub const CSIDL_COMMON_TEMPLATES: ::c_int = 0x002d;
pub const CSIDL_COMMON_DOCUMENTS: ::c_int = 0x002e;
pub const CSIDL_COMMON_ADMINTOOLS: ::c_int = 0x002f;
pub const CSIDL_ADMINTOOLS: ::c_int = 0x0030;
pub const CSIDL_CONNECTIONS: ::c_int = 0x0031;
pub const CSIDL_COMMON_MUSIC: ::c_int = 0x0035;
pub const CSIDL_COMMON_PICTURES: ::c_int = 0x0036;
pub const CSIDL_COMMON_VIDEO: ::c_int = 0x0037;
pub const CSIDL_RESOURCES: ::c_int = 0x0038;
pub const CSIDL_RESOURCES_LOCALIZED: ::c_int = 0x0039;
pub const CSIDL_COMMON_OEM_LINKS: ::c_int = 0x003a;
pub const CSIDL_CDBURN_AREA: ::c_int = 0x003b;
pub const CSIDL_COMPUTERSNEARME: ::c_int = 0x003d;
pub const CSIDL_FLAG_CREATE: ::c_int = 0x8000;
pub const CSIDL_FLAG_DONT_VERIFY: ::c_int = 0x4000;
pub const CSIDL_FLAG_DONT_UNEXPAND: ::c_int = 0x2000;
pub const CSIDL_FLAG_NO_ALIAS: ::c_int = 0x1000;
pub const CSIDL_FLAG_PER_USER_INIT: ::c_int = 0x0800;
pub const CSIDL_FLAG_MASK: ::c_int = 0xff00;
//1312
pub const SHGFP_TYPE_CURRENT: ::DWORD = 0;
pub const SHGFP_TYPE_DEFAULT: ::DWORD = 1;
|
random_line_split
|
|
datetime_expressions.rs
|
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! DateTime expressions
use std::sync::Arc;
use crate::error::{DataFusionError, Result};
use arrow::{
array::{Array, ArrayData, ArrayRef, StringArray, TimestampNanosecondArray},
buffer::Buffer,
datatypes::{DataType, TimeUnit, ToByteSlice},
};
use chrono::{prelude::*, LocalResult};
#[inline]
/// Accepts a string in RFC3339 / ISO8601 standard format and some
/// variants and converts it to a nanosecond precision timestamp.
///
/// Implements the `to_timestamp` function to convert a string to a
/// timestamp, following the model of spark SQL’s to_`timestamp`.
///
/// In addition to RFC3339 / ISO8601 standard timestamps, it also
/// accepts strings that use a space ` ` to separate the date and time
/// as well as strings that have no explicit timezone offset.
///
/// Examples of accepted inputs:
/// * `1997-01-31T09:26:56.123Z` # RCF3339
/// * `1997-01-31T09:26:56.123-05:00` # RCF3339
/// * `1997-01-31 09:26:56.123-05:00` # close to RCF3339 but with a space rather than T
/// * `1997-01-31T09:26:56.123` # close to RCF3339 but no timezone offset specified
/// * `1997-01-31 09:26:56.123` # close to RCF3339 but uses a space and no timezone offset
/// * `1997-01-31 09:26:56` # close to RCF3339, no fractional seconds
//
/// Internally, this function uses the `chrono` library for the
/// datetime parsing
///
/// We hope to extend this function in the future with a second
/// parameter to specifying the format string.
///
/// ## Timestamp Precision
///
/// DataFusion uses the maximum precision timestamps supported by
/// Arrow (nanoseconds stored as a 64-bit integer) timestamps. This
/// means the range of dates that timestamps can represent is ~1677 AD
/// to 2262 AM
///
///
/// ## Timezone / Offset Handling
///
/// By using the Arrow format, DataFusion inherits Arrow’s handling of
/// timestamp values. Specifically, the stored numerical values of
/// timestamps are stored compared to offset UTC.
///
/// This function intertprets strings without an explicit time zone as
/// timestamps with offsets of the local time on the machine that ran
/// the datafusion query
///
/// For example, `1997-01-31 09:26:56.123Z` is interpreted as UTC, as
/// it has an explicit timezone specifier (“Z” for Zulu/UTC)
///
/// `1997-01-31T09:26:56.123` is interpreted as a local timestamp in
/// the timezone of the machine that ran DataFusion. For example, if
/// the system timezone is set to Americas/New_York (UTC-5) the
/// timestamp will be interpreted as though it were
/// `1997-01-31T09:26:56.123-05:00`
fn string_to_timestamp_nanos(s: &str) -> Result<i64> {
// Fast path: RFC3339 timestamp (with a T)
// Example: 2020-09-08T13:42:29.190855Z
if let Ok(ts) = DateTime::parse_from_rfc3339(s) {
return Ok(ts.timestamp_nanos());
}
// Implement quasi-RFC3339 support by trying to parse the
// timestamp with various other format specifiers to to support
// separating the date and time with a space'' rather than 'T' to be
// (more) compatible with Apache Spark SQL
// timezone offset, using'' as a separator
// Example: 2020-09-08 13:42:29.190855-05:00
if let Ok(ts) = DateTime::parse_from_str(s, "%Y-%m-%d %H:%M:%S%.f%:z") {
return Ok(ts.timestamp_nanos());
}
// with an explicit Z, using'' as a separator
// Example: 2020-09-08 13:42:29Z
if let Ok(ts) = Utc.datetime_from_str(s, "%Y-%m-%d %H:%M:%S%.fZ") {
return Ok(ts.timestamp_nanos());
}
// Support timestamps without an explicit timezone offset, again
// to be compatible with what Apache Spark SQL does.
// without a timezone specifier as a local time, using T as a separator
// Example: 2020-09-08T13:42:29.190855
if let Ok(ts) = NaiveDateTime::parse_from_str(s, "%Y-%m-%dT%H:%M:%S.%f") {
return naive_datetime_to_timestamp(s, ts);
}
// without a timezone specifier as a local time, using T as a
// separator, no fractional seconds
// Example: 2020-09-08T13:42:29
if let Ok(ts) = NaiveDateTime::parse_from_str(s, "%Y-%m-%dT%H:%M:%S") {
return naive_datetime_to_timestamp(s, ts);
}
// without a timezone specifier as a local time, using'' as a separator
// Example: 2020-09-08 13:42:29.190855
if let Ok(ts) = NaiveDateTime::parse_from_str(s, "%Y-%m-%d %H:%M:%S.%f") {
return naive_datetime_to_timestamp(s, ts);
}
// without a timezone specifier as a local time, using'' as a
// separator, no fractional seconds
// Example: 2020-09-08 13:42:29
if let Ok(ts) = NaiveDateTime::parse_from_str(s, "%Y-%m-%d %H:%M:%S") {
return naive_datetime_to_timestamp(s, ts);
}
// Note we don't pass along the error message from the underlying
// chrono parsing because we tried several different format
// strings and we don't know which the user was trying to
// match. Ths any of the specific error messages is likely to be
// be more confusing than helpful
Err(DataFusionError::Execution(format!(
"Error parsing '{}' as timestamp",
s
)))
}
/// Converts the naive datetime (which has no specific timezone) to a
/// nanosecond epoch timestamp relative to UTC.
fn naive_datetime_to_timestamp(s: &str, datetime: NaiveDateTime) -> Result<i64> {
let l = Local {};
match l.from_local_datetime(&datetime) {
LocalResult::None => Err(DataFusionError::Execution(format!(
"Error parsing '{}' as timestamp: local time representation is invalid",
s
))),
LocalResult::Single(local_datetime) => {
Ok(local_datetime.with_timezone(&Utc).timestamp_nanos())
}
// Ambiguous times can happen if the timestamp is exactly when
// a daylight savings time transition occurs, for example, and
// so the datetime could validly be said to be in two
// potential offsets. However, since we are about to convert
// to UTC anyways, we can pick one arbitrarily
LocalResult::Ambiguous(local_datetime, _) => {
Ok(local_datetime.with_timezone(&Utc).timestamp_nanos())
}
}
}
/// convert an array of strings into `Timestamp(Nanosecond, None)`
pub fn to_timestamp(args: &[ArrayRef]) -> Result<TimestampNanosecondArray> {
let num_rows = args[0].len();
let string_args =
&args[0]
.as_any()
.downcast_ref::<StringArray>()
.ok_or_else(|| {
DataFusionError::Internal(format!(
"could not cast to_timestamp input to StringArray"
))
})?;
let result = (0..num_rows)
.map(|i| {
if string_args.is_null(i) {
// NB: Since we use the same null bitset as the input,
// the output for this value will be ignored, but we
// need some value in the array we are building.
Ok(0)
} else {
string_to_timestamp_nanos(string_args.value(i))
}
})
.collect::<Result<Vec<_>>>()?;
let data = ArrayData::new(
DataType::Timestamp(TimeUnit::Nanosecond, None),
num_rows,
Some(string_args.null_count()),
string_args.data().null_buffer().cloned(),
0,
vec![Buffer::from(result.to_byte_slice())],
vec![],
);
Ok(TimestampNanosecondArray::from(Arc::new(data)))
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use arrow::array::{Int64Array, StringBuilder};
use super::*;
#[test]
fn string_to_timestamp_timezone() -> Result<()> {
// Explicit timezone
assert_eq!(
1599572549190855000,
parse_timestamp("2020-09-08T13:42:29.190855+00:00")?
);
assert_eq!(
1599572549190855000,
parse_timestamp("2020-09-08T13:42:29.190855Z")?
);
assert_eq!(
1599572549000000000,
parse_timestamp("2020-09-08T13:42:29Z")?
); // no fractional part
assert_eq!(
1599590549190855000,
parse_timestamp("2020-09-08T13:42:29.190855-05:00")?
);
Ok(())
}
#[test]
fn string_to_timestamp_timezone_space() -> Result<()> {
// Ensure space rather than T between time and date is accepted
assert_eq!(
1599572549190855000,
parse_timestamp("2020-09-08 13:42:29.190855+00:00")?
);
assert_eq!(
1599572549190855000,
parse_timestamp("2020-09-08 13:42:29.190855Z")?
);
assert_eq!(
1599572549000000000,
parse_timestamp("2020-09-08 13:42:29Z")?
); // no fractional part
assert_eq!(
1599590549190855000,
parse_timestamp("2020-09-08 13:42:29.190855-05:00")?
);
Ok(())
}
/// Interprets a naive_datetime (with no explicit timzone offset)
/// using the local timezone and returns the timestamp in UTC (0
/// offset)
fn naive_da
|
atetime: &NaiveDateTime) -> i64 {
// Note: Use chrono APIs that are different than
// naive_datetime_to_timestamp to compute the utc offset to
// try and double check the logic
let utc_offset_secs = match Local.offset_from_local_datetime(&naive_datetime) {
LocalResult::Single(local_offset) => {
local_offset.fix().local_minus_utc() as i64
}
_ => panic!("Unexpected failure converting to local datetime"),
};
let utc_offset_nanos = utc_offset_secs * 1_000_000_000;
naive_datetime.timestamp_nanos() - utc_offset_nanos
}
#[test]
fn string_to_timestamp_no_timezone() -> Result<()> {
// This test is designed to succeed in regardless of the local
// timezone the test machine is running. Thus it is still
// somewhat suceptable to bugs in the use of chrono
let naive_datetime = NaiveDateTime::new(
NaiveDate::from_ymd(2020, 09, 08),
NaiveTime::from_hms_nano(13, 42, 29, 190855),
);
// Ensure both T and'' variants work
assert_eq!(
naive_datetime_to_timestamp(&naive_datetime),
parse_timestamp("2020-09-08T13:42:29.190855")?
);
assert_eq!(
naive_datetime_to_timestamp(&naive_datetime),
parse_timestamp("2020-09-08 13:42:29.190855")?
);
// Also ensure that parsing timestamps with no fractional
// second part works as well
let naive_datetime_whole_secs = NaiveDateTime::new(
NaiveDate::from_ymd(2020, 09, 08),
NaiveTime::from_hms(13, 42, 29),
);
// Ensure both T and'' variants work
assert_eq!(
naive_datetime_to_timestamp(&naive_datetime_whole_secs),
parse_timestamp("2020-09-08T13:42:29")?
);
assert_eq!(
naive_datetime_to_timestamp(&naive_datetime_whole_secs),
parse_timestamp("2020-09-08 13:42:29")?
);
Ok(())
}
#[test]
fn string_to_timestamp_invalid() -> Result<()> {
// Test parsing invalid formats
// It would be nice to make these messages better
expect_timestamp_parse_error("", "Error parsing '' as timestamp");
expect_timestamp_parse_error("SS", "Error parsing 'SS' as timestamp");
expect_timestamp_parse_error(
"Wed, 18 Feb 2015 23:16:09 GMT",
"Error parsing 'Wed, 18 Feb 2015 23:16:09 GMT' as timestamp",
);
Ok(())
}
// Parse a timestamp to timestamp int with a useful human readable error message
fn parse_timestamp(s: &str) -> Result<i64> {
let result = string_to_timestamp_nanos(s);
if let Err(e) = &result {
eprintln!("Error parsing timestamp '{}': {:?}", s, e);
}
result
}
fn expect_timestamp_parse_error(s: &str, expected_err: &str) {
match string_to_timestamp_nanos(s) {
Ok(v) => assert!(
false,
"Expected error '{}' while parsing '{}', but parsed {} instead",
expected_err, s, v
),
Err(e) => {
assert!(e.to_string().contains(expected_err),
"Can not find expected error '{}' while parsing '{}'. Actual error '{}'",
expected_err, s, e);
}
}
}
#[test]
fn to_timestamp_arrays_and_nulls() -> Result<()> {
// ensure that arrow array implementation is wired up and handles nulls correctly
let mut string_builder = StringBuilder::new(2);
let mut ts_builder = TimestampNanosecondArray::builder(2);
string_builder.append_value("2020-09-08T13:42:29.190855Z")?;
ts_builder.append_value(1599572549190855000)?;
string_builder.append_null()?;
ts_builder.append_null()?;
let string_array = Arc::new(string_builder.finish());
let parsed_timestamps = to_timestamp(&[string_array])
.expect("that to_timestamp parsed values without error");
let expected_timestamps = ts_builder.finish();
assert_eq!(parsed_timestamps.len(), 2);
assert_eq!(expected_timestamps, parsed_timestamps);
Ok(())
}
#[test]
fn to_timestamp_invalid_input_type() -> Result<()> {
// pass the wrong type of input array to to_timestamp and test
// that we get an error.
let mut builder = Int64Array::builder(1);
builder.append_value(1)?;
let int64array = Arc::new(builder.finish());
let expected_err =
"Internal error: could not cast to_timestamp input to StringArray";
match to_timestamp(&[int64array]) {
Ok(_) => panic!("Expected error but got success"),
Err(e) => {
assert!(
e.to_string().contains(expected_err),
"Can not find expected error '{}'. Actual error '{}'",
expected_err,
e
);
}
}
Ok(())
}
}
|
tetime_to_timestamp(naive_d
|
identifier_name
|
datetime_expressions.rs
|
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! DateTime expressions
use std::sync::Arc;
use crate::error::{DataFusionError, Result};
use arrow::{
array::{Array, ArrayData, ArrayRef, StringArray, TimestampNanosecondArray},
buffer::Buffer,
datatypes::{DataType, TimeUnit, ToByteSlice},
};
use chrono::{prelude::*, LocalResult};
#[inline]
/// Accepts a string in RFC3339 / ISO8601 standard format and some
/// variants and converts it to a nanosecond precision timestamp.
///
/// Implements the `to_timestamp` function to convert a string to a
/// timestamp, following the model of spark SQL’s to_`timestamp`.
///
/// In addition to RFC3339 / ISO8601 standard timestamps, it also
/// accepts strings that use a space ` ` to separate the date and time
/// as well as strings that have no explicit timezone offset.
///
/// Examples of accepted inputs:
/// * `1997-01-31T09:26:56.123Z` # RCF3339
/// * `1997-01-31T09:26:56.123-05:00` # RCF3339
/// * `1997-01-31 09:26:56.123-05:00` # close to RCF3339 but with a space rather than T
/// * `1997-01-31T09:26:56.123` # close to RCF3339 but no timezone offset specified
/// * `1997-01-31 09:26:56.123` # close to RCF3339 but uses a space and no timezone offset
/// * `1997-01-31 09:26:56` # close to RCF3339, no fractional seconds
//
/// Internally, this function uses the `chrono` library for the
/// datetime parsing
///
/// We hope to extend this function in the future with a second
/// parameter to specifying the format string.
///
/// ## Timestamp Precision
///
/// DataFusion uses the maximum precision timestamps supported by
/// Arrow (nanoseconds stored as a 64-bit integer) timestamps. This
/// means the range of dates that timestamps can represent is ~1677 AD
/// to 2262 AM
///
///
/// ## Timezone / Offset Handling
///
/// By using the Arrow format, DataFusion inherits Arrow’s handling of
/// timestamp values. Specifically, the stored numerical values of
/// timestamps are stored compared to offset UTC.
///
/// This function intertprets strings without an explicit time zone as
/// timestamps with offsets of the local time on the machine that ran
/// the datafusion query
///
/// For example, `1997-01-31 09:26:56.123Z` is interpreted as UTC, as
/// it has an explicit timezone specifier (“Z” for Zulu/UTC)
///
/// `1997-01-31T09:26:56.123` is interpreted as a local timestamp in
/// the timezone of the machine that ran DataFusion. For example, if
/// the system timezone is set to Americas/New_York (UTC-5) the
/// timestamp will be interpreted as though it were
/// `1997-01-31T09:26:56.123-05:00`
fn string_to_timestamp_nanos(s: &str) -> Result<i64> {
// Fast path: RFC3339 timestamp (with a T)
// Example: 2020-09-08T13:42:29.190855Z
if let Ok(ts) = DateTime::parse_from_rfc3339(s) {
return Ok(ts.timestamp_nanos());
}
// Implement quasi-RFC3339 support by trying to parse the
// timestamp with various other format specifiers to to support
// separating the date and time with a space'' rather than 'T' to be
// (more) compatible with Apache Spark SQL
// timezone offset, using'' as a separator
// Example: 2020-09-08 13:42:29.190855-05:00
if let Ok(ts) = DateTime::parse_from_str(s, "%Y-%m-%d %H:%M:%S%.f%:z") {
return Ok(ts.timestamp_nanos());
}
// with an explicit Z, using'' as a separator
// Example: 2020-09-08 13:42:29Z
if let Ok(ts) = Utc.datetime_from_str(s, "%Y-%m-%d %H:%M:%S%.fZ") {
return Ok(ts.timestamp_nanos());
}
// Support timestamps without an explicit timezone offset, again
// to be compatible with what Apache Spark SQL does.
// without a timezone specifier as a local time, using T as a separator
// Example: 2020-09-08T13:42:29.190855
if let Ok(ts) = NaiveDateTime::parse_from_str(s, "%Y-%m-%dT%H:%M:%S.%f") {
return naive_datetime_to_timestamp(s, ts);
}
// without a timezone specifier as a local time, using T as a
// separator, no fractional seconds
// Example: 2020-09-08T13:42:29
if let Ok(ts) = NaiveDateTime::parse_from_str(s, "%Y-%m-%dT%H:%M:%S") {
return naive_datetime_to_timestamp(s, ts);
}
// without a timezone specifier as a local time, using'' as a separator
// Example: 2020-09-08 13:42:29.190855
if let Ok(ts) = NaiveDateTime::parse_from_str(s, "%Y-%m-%d %H:%M:%S.%f") {
return naive_datetime_to_timestamp(s, ts);
}
// without a timezone specifier as a local time, using'' as a
// separator, no fractional seconds
// Example: 2020-09-08 13:42:29
if let Ok(ts) = NaiveDateTime::parse_from_str(s, "%Y-%m-%d %H:%M:%S") {
return naive_datetime_to_timestamp(s, ts);
}
// Note we don't pass along the error message from the underlying
// chrono parsing because we tried several different format
// strings and we don't know which the user was trying to
// match. Ths any of the specific error messages is likely to be
// be more confusing than helpful
Err(DataFusionError::Execution(format!(
"Error parsing '{}' as timestamp",
s
)))
}
/// Converts the naive datetime (which has no specific timezone) to a
/// nanosecond epoch timestamp relative to UTC.
fn naive_datetime_to_timestamp(s: &str, datetime: NaiveDateTime) -> Result<i64> {
let l = Local {};
match l.from_local_datetime(&datetime) {
LocalResult::None => Err(DataFusionError::Execution(format!(
"Error parsing '{}' as timestamp: local time representation is invalid",
s
))),
LocalResult::Single(local_datetime) => {
Ok(local_datetime.with_timezone(&Utc).timestamp_nanos())
}
// Ambiguous times can happen if the timestamp is exactly when
// a daylight savings time transition occurs, for example, and
// so the datetime could validly be said to be in two
// potential offsets. However, since we are about to convert
// to UTC anyways, we can pick one arbitrarily
LocalResult::Ambiguous(local_datetime, _) => {
Ok(local_datetime.with_timezone(&Utc).timestamp_nanos())
}
}
}
/// convert an array of strings into `Timestamp(Nanosecond, None)`
pub fn to_timestamp(args: &[ArrayRef]) -> Result<TimestampNanosecondArray> {
let num_rows = args[0].len();
let string_args =
&args[0]
.as_any()
.downcast_ref::<StringArray>()
.ok_or_else(|| {
DataFusionError::Internal(format!(
"could not cast to_timestamp input to StringArray"
))
})?;
let result = (0..num_rows)
.map(|i| {
if string_args.is_null(i) {
// NB: Since we use the same null bitset as the input,
// the output for this value will be ignored, but we
// need some value in the array we are building.
Ok(0)
} else {
string_to_timestamp_nanos(string_args.value(i))
}
})
.collect::<Result<Vec<_>>>()?;
let data = ArrayData::new(
DataType::Timestamp(TimeUnit::Nanosecond, None),
num_rows,
Some(string_args.null_count()),
string_args.data().null_buffer().cloned(),
0,
vec![Buffer::from(result.to_byte_slice())],
vec![],
);
Ok(TimestampNanosecondArray::from(Arc::new(data)))
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use arrow::array::{Int64Array, StringBuilder};
use super::*;
#[test]
fn string_to_timestamp_timezone() -> Result<()> {
// Explicit timezone
assert_eq!(
1599572549190855000,
parse_timestamp("2020-09-08T13:42:29.190855+00:00")?
);
assert_eq!(
1599572549190855000,
parse_timestamp("2020-09-08T13:42:29.190855Z")?
);
assert_eq!(
1599572549000000000,
parse_timestamp("2020-09-08T13:42:29Z")?
); // no fractional part
assert_eq!(
1599590549190855000,
parse_timestamp("2020-09-08T13:42:29.190855-05:00")?
);
Ok(())
}
#[test]
fn string_to_timestamp_timezone_space() -> Result<()> {
// Ensure space rather than T between time and date is accepted
assert_eq!(
1599572549190855000,
parse_timestamp("2020-09-08 13:42:29.190855+00:00")?
);
assert_eq!(
1599572549190855000,
parse_timestamp("2020-09-08 13:42:29.190855Z")?
);
assert_eq!(
1599572549000000000,
parse_timestamp("2020-09-08 13:42:29Z")?
); // no fractional part
assert_eq!(
1599590549190855000,
parse_timestamp("2020-09-08 13:42:29.190855-05:00")?
);
Ok(())
}
/// Interprets a naive_datetime (with no explicit timzone offset)
/// using the local timezone and returns the timestamp in UTC (0
/// offset)
fn naive_datetime_to_timestamp(naive_datetime: &NaiveDateTime) -> i64 {
// Note: Use chrono APIs that are different than
// naive_datetime_to_timestamp to compute the utc offset to
// try and double check the logic
let utc_offset_secs = match Local.offset_from_local_datetime(&naive_datetime) {
LocalResult::Single(local_offset) => {
local_offset.fix().local_minus_utc() as i64
}
_ => panic!("Unexpected failure converting to local datetime"),
};
let utc_offset_nanos = utc_offset_secs * 1_000_000_000;
naive_datetime.timestamp_nanos() - utc_offset_nanos
}
#[test]
fn string_to_timestamp_no_timezone() -> Result<()> {
// This test is designed to succeed in regardless of the local
// timezone the test machine is running. Thus it is still
// somewhat suceptable to bugs in the use of chrono
let naive_datetime = NaiveDateTime::new(
NaiveDate::from_ymd(2020, 09, 08),
NaiveTime::from_hms_nano(13, 42, 29, 190855),
);
// Ensure both T and'' variants work
assert_eq!(
naive_datetime_to_timestamp(&naive_datetime),
parse_timestamp("2020-09-08T13:42:29.190855")?
);
assert_eq!(
naive_datetime_to_timestamp(&naive_datetime),
parse_timestamp("2020-09-08 13:42:29.190855")?
);
// Also ensure that parsing timestamps with no fractional
// second part works as well
let naive_datetime_whole_secs = NaiveDateTime::new(
NaiveDate::from_ymd(2020, 09, 08),
NaiveTime::from_hms(13, 42, 29),
);
|
naive_datetime_to_timestamp(&naive_datetime_whole_secs),
parse_timestamp("2020-09-08T13:42:29")?
);
assert_eq!(
naive_datetime_to_timestamp(&naive_datetime_whole_secs),
parse_timestamp("2020-09-08 13:42:29")?
);
Ok(())
}
#[test]
fn string_to_timestamp_invalid() -> Result<()> {
// Test parsing invalid formats
// It would be nice to make these messages better
expect_timestamp_parse_error("", "Error parsing '' as timestamp");
expect_timestamp_parse_error("SS", "Error parsing 'SS' as timestamp");
expect_timestamp_parse_error(
"Wed, 18 Feb 2015 23:16:09 GMT",
"Error parsing 'Wed, 18 Feb 2015 23:16:09 GMT' as timestamp",
);
Ok(())
}
// Parse a timestamp to timestamp int with a useful human readable error message
fn parse_timestamp(s: &str) -> Result<i64> {
let result = string_to_timestamp_nanos(s);
if let Err(e) = &result {
eprintln!("Error parsing timestamp '{}': {:?}", s, e);
}
result
}
fn expect_timestamp_parse_error(s: &str, expected_err: &str) {
match string_to_timestamp_nanos(s) {
Ok(v) => assert!(
false,
"Expected error '{}' while parsing '{}', but parsed {} instead",
expected_err, s, v
),
Err(e) => {
assert!(e.to_string().contains(expected_err),
"Can not find expected error '{}' while parsing '{}'. Actual error '{}'",
expected_err, s, e);
}
}
}
#[test]
fn to_timestamp_arrays_and_nulls() -> Result<()> {
// ensure that arrow array implementation is wired up and handles nulls correctly
let mut string_builder = StringBuilder::new(2);
let mut ts_builder = TimestampNanosecondArray::builder(2);
string_builder.append_value("2020-09-08T13:42:29.190855Z")?;
ts_builder.append_value(1599572549190855000)?;
string_builder.append_null()?;
ts_builder.append_null()?;
let string_array = Arc::new(string_builder.finish());
let parsed_timestamps = to_timestamp(&[string_array])
.expect("that to_timestamp parsed values without error");
let expected_timestamps = ts_builder.finish();
assert_eq!(parsed_timestamps.len(), 2);
assert_eq!(expected_timestamps, parsed_timestamps);
Ok(())
}
#[test]
fn to_timestamp_invalid_input_type() -> Result<()> {
// pass the wrong type of input array to to_timestamp and test
// that we get an error.
let mut builder = Int64Array::builder(1);
builder.append_value(1)?;
let int64array = Arc::new(builder.finish());
let expected_err =
"Internal error: could not cast to_timestamp input to StringArray";
match to_timestamp(&[int64array]) {
Ok(_) => panic!("Expected error but got success"),
Err(e) => {
assert!(
e.to_string().contains(expected_err),
"Can not find expected error '{}'. Actual error '{}'",
expected_err,
e
);
}
}
Ok(())
}
}
|
// Ensure both T and ' ' variants work
assert_eq!(
|
random_line_split
|
datetime_expressions.rs
|
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
//! DateTime expressions
use std::sync::Arc;
use crate::error::{DataFusionError, Result};
use arrow::{
array::{Array, ArrayData, ArrayRef, StringArray, TimestampNanosecondArray},
buffer::Buffer,
datatypes::{DataType, TimeUnit, ToByteSlice},
};
use chrono::{prelude::*, LocalResult};
#[inline]
/// Accepts a string in RFC3339 / ISO8601 standard format and some
/// variants and converts it to a nanosecond precision timestamp.
///
/// Implements the `to_timestamp` function to convert a string to a
/// timestamp, following the model of spark SQL’s to_`timestamp`.
///
/// In addition to RFC3339 / ISO8601 standard timestamps, it also
/// accepts strings that use a space ` ` to separate the date and time
/// as well as strings that have no explicit timezone offset.
///
/// Examples of accepted inputs:
/// * `1997-01-31T09:26:56.123Z` # RCF3339
/// * `1997-01-31T09:26:56.123-05:00` # RCF3339
/// * `1997-01-31 09:26:56.123-05:00` # close to RCF3339 but with a space rather than T
/// * `1997-01-31T09:26:56.123` # close to RCF3339 but no timezone offset specified
/// * `1997-01-31 09:26:56.123` # close to RCF3339 but uses a space and no timezone offset
/// * `1997-01-31 09:26:56` # close to RCF3339, no fractional seconds
//
/// Internally, this function uses the `chrono` library for the
/// datetime parsing
///
/// We hope to extend this function in the future with a second
/// parameter to specifying the format string.
///
/// ## Timestamp Precision
///
/// DataFusion uses the maximum precision timestamps supported by
/// Arrow (nanoseconds stored as a 64-bit integer) timestamps. This
/// means the range of dates that timestamps can represent is ~1677 AD
/// to 2262 AM
///
///
/// ## Timezone / Offset Handling
///
/// By using the Arrow format, DataFusion inherits Arrow’s handling of
/// timestamp values. Specifically, the stored numerical values of
/// timestamps are stored compared to offset UTC.
///
/// This function intertprets strings without an explicit time zone as
/// timestamps with offsets of the local time on the machine that ran
/// the datafusion query
///
/// For example, `1997-01-31 09:26:56.123Z` is interpreted as UTC, as
/// it has an explicit timezone specifier (“Z” for Zulu/UTC)
///
/// `1997-01-31T09:26:56.123` is interpreted as a local timestamp in
/// the timezone of the machine that ran DataFusion. For example, if
/// the system timezone is set to Americas/New_York (UTC-5) the
/// timestamp will be interpreted as though it were
/// `1997-01-31T09:26:56.123-05:00`
fn string_to_timestamp_nanos(s: &str) -> Result<i64> {
// Fast path: RFC3339 timestamp (with a T)
// Example: 2020-09-08T13:42:29.190855Z
if let Ok(ts) = DateTime::parse_from_rfc3339(s) {
return Ok(ts.timestamp_nanos());
}
// Implement quasi-RFC3339 support by trying to parse the
// timestamp with various other format specifiers to to support
// separating the date and time with a space'' rather than 'T' to be
// (more) compatible with Apache Spark SQL
// timezone offset, using'' as a separator
// Example: 2020-09-08 13:42:29.190855-05:00
if let Ok(ts) = DateTime::parse_from_str(s, "%Y-%m-%d %H:%M:%S%.f%:z") {
return Ok(ts.timestamp_nanos());
}
// with an explicit Z, using'' as a separator
// Example: 2020-09-08 13:42:29Z
if let Ok(ts) = Utc.datetime_from_str(s, "%Y-%m-%d %H:%M:%S%.fZ") {
return Ok(ts.timestamp_nanos());
}
// Support timestamps without an explicit timezone offset, again
// to be compatible with what Apache Spark SQL does.
// without a timezone specifier as a local time, using T as a separator
// Example: 2020-09-08T13:42:29.190855
if let Ok(ts) = NaiveDateTime::parse_from_str(s, "%Y-%m-%dT%H:%M:%S.%f") {
return naive_datetime_to_timestamp(s, ts);
}
// without a timezone specifier as a local time, using T as a
// separator, no fractional seconds
// Example: 2020-09-08T13:42:29
if let Ok(ts) = NaiveDateTime::parse_from_str(s, "%Y-%m-%dT%H:%M:%S") {
return naive_datetime_to_timestamp(s, ts);
}
// without a timezone specifier as a local time, using'' as a separator
// Example: 2020-09-08 13:42:29.190855
if let Ok(ts) = NaiveDateTime::parse_from_str(s, "%Y-%m-%d %H:%M:%S.%f") {
return naive_datetime_to_timestamp(s, ts);
}
// without a timezone specifier as a local time, using'' as a
// separator, no fractional seconds
// Example: 2020-09-08 13:42:29
if let Ok(ts) = NaiveDateTime::parse_from_str(s, "%Y-%m-%d %H:%M:%S") {
return naive_datetime_to_timestamp(s, ts);
}
// Note we don't pass along the error message from the underlying
// chrono parsing because we tried several different format
// strings and we don't know which the user was trying to
// match. Ths any of the specific error messages is likely to be
// be more confusing than helpful
Err(DataFusionError::Execution(format!(
"Error parsing '{}' as timestamp",
s
)))
}
/// Converts the naive datetime (which has no specific timezone) to a
/// nanosecond epoch timestamp relative to UTC.
fn naive_datetime_to_timestamp(s: &str, datetime: NaiveDateTime) -> Result<i64> {
let l = Local {};
match l.from_local_datetime(&datetime) {
LocalResult::None => Err(DataFusionError::Execution(format!(
"Error parsing '{}' as timestamp: local time representation is invalid",
s
))),
LocalResult::Single(local_datetime) => {
Ok(local_datetime.with_timezone(&Utc).timestamp_nanos())
}
// Ambiguous times can happen if the timestamp is exactly when
// a daylight savings time transition occurs, for example, and
// so the datetime could validly be said to be in two
// potential offsets. However, since we are about to convert
// to UTC anyways, we can pick one arbitrarily
LocalResult::Ambiguous(local_datetime, _) => {
Ok(local_datetime.with_timezone(&Utc).timestamp_nanos())
}
}
}
/// convert an array of strings into `Timestamp(Nanosecond, None)`
pub fn to_timestamp(args: &[ArrayRef]) -> Result<TimestampNanosecondArray> {
let num_rows = args[0].len();
let string_args =
&args[0]
.as_any()
.downcast_ref::<StringArray>()
.ok_or_else(|| {
DataFusionError::Internal(format!(
"could not cast to_timestamp input to StringArray"
))
})?;
let result = (0..num_rows)
.map(|i| {
if string_args.is_null(i) {
// NB: Since we use the same null bitset as the input,
// the output for this value will be ignored, but we
// need some value in the array we are building.
Ok(0)
} else {
string_to_timestamp_nanos(string_args.value(i))
}
})
.collect::<Result<Vec<_>>>()?;
let data = ArrayData::new(
DataType::Timestamp(TimeUnit::Nanosecond, None),
num_rows,
Some(string_args.null_count()),
string_args.data().null_buffer().cloned(),
0,
vec![Buffer::from(result.to_byte_slice())],
vec![],
);
Ok(TimestampNanosecondArray::from(Arc::new(data)))
}
#[cfg(test)]
mod tests {
use std::sync::Arc;
use arrow::array::{Int64Array, StringBuilder};
use super::*;
#[test]
fn string_to_timestamp_timezone() -> Result<()> {
// Explicit timezone
assert_eq!(
1599572549190855000,
parse_timestamp("2020-09-08T13:42:29.190855+00:00")?
);
assert_eq!(
1599572549190855000,
parse_timestamp("2020-09-08T13:42:29.190855Z")?
);
assert_eq!(
1599572549000000000,
parse_timestamp("2020-09-08T13:42:29Z")?
); // no fractional part
assert_eq!(
1599590549190855000,
parse_timestamp("2020-09-08T13:42:29.190855-05:00")?
);
Ok(())
}
#[test]
fn string_to_timestamp_timezone_space() -> Result<()> {
// Ensure space rather than T between time and date is accepted
assert_eq!(
1599572549190855000,
parse_timestamp("2020-09-08 13:42:29.190855+00:00")?
);
assert_eq!(
1599572549190855000,
parse_timestamp("2020-09-08 13:42:29.190855Z")?
);
assert_eq!(
1599572549000000000,
parse_timestamp("2020-09-08 13:42:29Z")?
); // no fractional part
assert_eq!(
1599590549190855000,
parse_timestamp("2020-09-08 13:42:29.190855-05:00")?
);
Ok(())
}
/// Interprets a naive_datetime (with no explicit timzone offset)
/// using the local timezone and returns the timestamp in UTC (0
/// offset)
fn naive_datetime_to_timestamp(naive_datetime: &NaiveDateTime) -> i64 {
|
test]
fn string_to_timestamp_no_timezone() -> Result<()> {
// This test is designed to succeed in regardless of the local
// timezone the test machine is running. Thus it is still
// somewhat suceptable to bugs in the use of chrono
let naive_datetime = NaiveDateTime::new(
NaiveDate::from_ymd(2020, 09, 08),
NaiveTime::from_hms_nano(13, 42, 29, 190855),
);
// Ensure both T and'' variants work
assert_eq!(
naive_datetime_to_timestamp(&naive_datetime),
parse_timestamp("2020-09-08T13:42:29.190855")?
);
assert_eq!(
naive_datetime_to_timestamp(&naive_datetime),
parse_timestamp("2020-09-08 13:42:29.190855")?
);
// Also ensure that parsing timestamps with no fractional
// second part works as well
let naive_datetime_whole_secs = NaiveDateTime::new(
NaiveDate::from_ymd(2020, 09, 08),
NaiveTime::from_hms(13, 42, 29),
);
// Ensure both T and'' variants work
assert_eq!(
naive_datetime_to_timestamp(&naive_datetime_whole_secs),
parse_timestamp("2020-09-08T13:42:29")?
);
assert_eq!(
naive_datetime_to_timestamp(&naive_datetime_whole_secs),
parse_timestamp("2020-09-08 13:42:29")?
);
Ok(())
}
#[test]
fn string_to_timestamp_invalid() -> Result<()> {
// Test parsing invalid formats
// It would be nice to make these messages better
expect_timestamp_parse_error("", "Error parsing '' as timestamp");
expect_timestamp_parse_error("SS", "Error parsing 'SS' as timestamp");
expect_timestamp_parse_error(
"Wed, 18 Feb 2015 23:16:09 GMT",
"Error parsing 'Wed, 18 Feb 2015 23:16:09 GMT' as timestamp",
);
Ok(())
}
// Parse a timestamp to timestamp int with a useful human readable error message
fn parse_timestamp(s: &str) -> Result<i64> {
let result = string_to_timestamp_nanos(s);
if let Err(e) = &result {
eprintln!("Error parsing timestamp '{}': {:?}", s, e);
}
result
}
fn expect_timestamp_parse_error(s: &str, expected_err: &str) {
match string_to_timestamp_nanos(s) {
Ok(v) => assert!(
false,
"Expected error '{}' while parsing '{}', but parsed {} instead",
expected_err, s, v
),
Err(e) => {
assert!(e.to_string().contains(expected_err),
"Can not find expected error '{}' while parsing '{}'. Actual error '{}'",
expected_err, s, e);
}
}
}
#[test]
fn to_timestamp_arrays_and_nulls() -> Result<()> {
// ensure that arrow array implementation is wired up and handles nulls correctly
let mut string_builder = StringBuilder::new(2);
let mut ts_builder = TimestampNanosecondArray::builder(2);
string_builder.append_value("2020-09-08T13:42:29.190855Z")?;
ts_builder.append_value(1599572549190855000)?;
string_builder.append_null()?;
ts_builder.append_null()?;
let string_array = Arc::new(string_builder.finish());
let parsed_timestamps = to_timestamp(&[string_array])
.expect("that to_timestamp parsed values without error");
let expected_timestamps = ts_builder.finish();
assert_eq!(parsed_timestamps.len(), 2);
assert_eq!(expected_timestamps, parsed_timestamps);
Ok(())
}
#[test]
fn to_timestamp_invalid_input_type() -> Result<()> {
// pass the wrong type of input array to to_timestamp and test
// that we get an error.
let mut builder = Int64Array::builder(1);
builder.append_value(1)?;
let int64array = Arc::new(builder.finish());
let expected_err =
"Internal error: could not cast to_timestamp input to StringArray";
match to_timestamp(&[int64array]) {
Ok(_) => panic!("Expected error but got success"),
Err(e) => {
assert!(
e.to_string().contains(expected_err),
"Can not find expected error '{}'. Actual error '{}'",
expected_err,
e
);
}
}
Ok(())
}
}
|
// Note: Use chrono APIs that are different than
// naive_datetime_to_timestamp to compute the utc offset to
// try and double check the logic
let utc_offset_secs = match Local.offset_from_local_datetime(&naive_datetime) {
LocalResult::Single(local_offset) => {
local_offset.fix().local_minus_utc() as i64
}
_ => panic!("Unexpected failure converting to local datetime"),
};
let utc_offset_nanos = utc_offset_secs * 1_000_000_000;
naive_datetime.timestamp_nanos() - utc_offset_nanos
}
#[
|
identifier_body
|
expand.rs
|
/*
* This file is part of the uutils coreutils package.
*
* (c) Michael Gehring <[email protected]>
* (c) kwantam <[email protected]>
* 20150428 created `expand` module to eliminate most allocs during setup
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
use std::char::from_u32;
use std::cmp::min;
use std::iter::Peekable;
use std::ops::Range;
#[inline]
fn unescape_char(c: char) -> char {
match c {
'a' => 0x07u8 as char,
'b' => 0x08u8 as char,
'f' => 0x0cu8 as char,
'v' => 0x0bu8 as char,
'n' => '\n',
'r' => '\r',
't' => '\t',
_ => c,
}
}
struct Unescape<'a> {
string: &'a str,
}
impl<'a> Iterator for Unescape<'a> {
type Item = char;
#[inline]
fn size_hint(&self) -> (usize, Option<usize>)
|
#[inline]
fn next(&mut self) -> Option<Self::Item> {
if self.string.len() == 0 {
return None;
}
// is the next character an escape?
let (ret, idx) = match self.string.chars().next().unwrap() {
'\\' if self.string.len() > 1 => {
// yes---it's \ and it's not the last char in a string
// we know that \ is 1 byte long so we can index into the string safely
let c = self.string[1..].chars().next().unwrap();
(Some(unescape_char(c)), 1 + c.len_utf8())
},
c => (Some(c), c.len_utf8()), // not an escape char
};
self.string = &self.string[idx..]; // advance the pointer to the next char
ret
}
}
pub struct ExpandSet<'a> {
range: Range<u32>,
unesc: Peekable<Unescape<'a>>,
}
impl<'a> Iterator for ExpandSet<'a> {
type Item = char;
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.unesc.size_hint()
}
#[inline]
fn next(&mut self) -> Option<Self::Item> {
// while the Range has elements, try to return chars from it
// but make sure that they actually turn out to be Chars!
while let Some(n) = self.range.next() {
if let Some(c) = from_u32(n) {
return Some(c);
}
}
if let Some(first) = self.unesc.next() {
// peek ahead
if self.unesc.peek() == Some(&'-') && match self.unesc.size_hint() {
(x, _) if x > 1 => true, // there's a range here; record it in our internal Range struct
_ => false,
} {
self.unesc.next(); // this is the '-'
let last = self.unesc.next().unwrap(); // this is the end of the range
self.range = first as u32 + 1.. last as u32 + 1;
}
return Some(first); // in any case, return the next char
}
None
}
}
impl<'a> ExpandSet<'a> {
#[inline]
pub fn new(s: &'a str) -> ExpandSet<'a> {
ExpandSet {
range: 0.. 0,
unesc: Unescape { string: s }.peekable(),
}
}
}
|
{
let slen = self.string.len();
(min(slen, 1), None)
}
|
identifier_body
|
expand.rs
|
/*
* This file is part of the uutils coreutils package.
*
* (c) Michael Gehring <[email protected]>
* (c) kwantam <[email protected]>
* 20150428 created `expand` module to eliminate most allocs during setup
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
use std::char::from_u32;
use std::cmp::min;
use std::iter::Peekable;
use std::ops::Range;
#[inline]
fn unescape_char(c: char) -> char {
match c {
|
'v' => 0x0bu8 as char,
'n' => '\n',
'r' => '\r',
't' => '\t',
_ => c,
}
}
struct Unescape<'a> {
string: &'a str,
}
impl<'a> Iterator for Unescape<'a> {
type Item = char;
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let slen = self.string.len();
(min(slen, 1), None)
}
#[inline]
fn next(&mut self) -> Option<Self::Item> {
if self.string.len() == 0 {
return None;
}
// is the next character an escape?
let (ret, idx) = match self.string.chars().next().unwrap() {
'\\' if self.string.len() > 1 => {
// yes---it's \ and it's not the last char in a string
// we know that \ is 1 byte long so we can index into the string safely
let c = self.string[1..].chars().next().unwrap();
(Some(unescape_char(c)), 1 + c.len_utf8())
},
c => (Some(c), c.len_utf8()), // not an escape char
};
self.string = &self.string[idx..]; // advance the pointer to the next char
ret
}
}
pub struct ExpandSet<'a> {
range: Range<u32>,
unesc: Peekable<Unescape<'a>>,
}
impl<'a> Iterator for ExpandSet<'a> {
type Item = char;
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.unesc.size_hint()
}
#[inline]
fn next(&mut self) -> Option<Self::Item> {
// while the Range has elements, try to return chars from it
// but make sure that they actually turn out to be Chars!
while let Some(n) = self.range.next() {
if let Some(c) = from_u32(n) {
return Some(c);
}
}
if let Some(first) = self.unesc.next() {
// peek ahead
if self.unesc.peek() == Some(&'-') && match self.unesc.size_hint() {
(x, _) if x > 1 => true, // there's a range here; record it in our internal Range struct
_ => false,
} {
self.unesc.next(); // this is the '-'
let last = self.unesc.next().unwrap(); // this is the end of the range
self.range = first as u32 + 1.. last as u32 + 1;
}
return Some(first); // in any case, return the next char
}
None
}
}
impl<'a> ExpandSet<'a> {
#[inline]
pub fn new(s: &'a str) -> ExpandSet<'a> {
ExpandSet {
range: 0.. 0,
unesc: Unescape { string: s }.peekable(),
}
}
}
|
'a' => 0x07u8 as char,
'b' => 0x08u8 as char,
'f' => 0x0cu8 as char,
|
random_line_split
|
expand.rs
|
/*
* This file is part of the uutils coreutils package.
*
* (c) Michael Gehring <[email protected]>
* (c) kwantam <[email protected]>
* 20150428 created `expand` module to eliminate most allocs during setup
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
use std::char::from_u32;
use std::cmp::min;
use std::iter::Peekable;
use std::ops::Range;
#[inline]
fn unescape_char(c: char) -> char {
match c {
'a' => 0x07u8 as char,
'b' => 0x08u8 as char,
'f' => 0x0cu8 as char,
'v' => 0x0bu8 as char,
'n' => '\n',
'r' => '\r',
't' => '\t',
_ => c,
}
}
struct Unescape<'a> {
string: &'a str,
}
impl<'a> Iterator for Unescape<'a> {
type Item = char;
#[inline]
fn
|
(&self) -> (usize, Option<usize>) {
let slen = self.string.len();
(min(slen, 1), None)
}
#[inline]
fn next(&mut self) -> Option<Self::Item> {
if self.string.len() == 0 {
return None;
}
// is the next character an escape?
let (ret, idx) = match self.string.chars().next().unwrap() {
'\\' if self.string.len() > 1 => {
// yes---it's \ and it's not the last char in a string
// we know that \ is 1 byte long so we can index into the string safely
let c = self.string[1..].chars().next().unwrap();
(Some(unescape_char(c)), 1 + c.len_utf8())
},
c => (Some(c), c.len_utf8()), // not an escape char
};
self.string = &self.string[idx..]; // advance the pointer to the next char
ret
}
}
pub struct ExpandSet<'a> {
range: Range<u32>,
unesc: Peekable<Unescape<'a>>,
}
impl<'a> Iterator for ExpandSet<'a> {
type Item = char;
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.unesc.size_hint()
}
#[inline]
fn next(&mut self) -> Option<Self::Item> {
// while the Range has elements, try to return chars from it
// but make sure that they actually turn out to be Chars!
while let Some(n) = self.range.next() {
if let Some(c) = from_u32(n) {
return Some(c);
}
}
if let Some(first) = self.unesc.next() {
// peek ahead
if self.unesc.peek() == Some(&'-') && match self.unesc.size_hint() {
(x, _) if x > 1 => true, // there's a range here; record it in our internal Range struct
_ => false,
} {
self.unesc.next(); // this is the '-'
let last = self.unesc.next().unwrap(); // this is the end of the range
self.range = first as u32 + 1.. last as u32 + 1;
}
return Some(first); // in any case, return the next char
}
None
}
}
impl<'a> ExpandSet<'a> {
#[inline]
pub fn new(s: &'a str) -> ExpandSet<'a> {
ExpandSet {
range: 0.. 0,
unesc: Unescape { string: s }.peekable(),
}
}
}
|
size_hint
|
identifier_name
|
htmlolistelement.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use dom::bindings::codegen::Bindings::HTMLOListElementBinding;
use dom::bindings::codegen::InheritTypes::HTMLOListElementDerived;
use dom::bindings::js::{JSRef, Temporary};
use dom::bindings::utils::{Reflectable, Reflector};
use dom::document::Document;
use dom::element::HTMLOListElementTypeId;
use dom::eventtarget::{EventTarget, NodeTargetTypeId};
use dom::htmlelement::HTMLElement;
use dom::node::{Node, ElementNodeTypeId};
use servo_util::str::DOMString;
#[deriving(Encodable)]
pub struct HTMLOListElement {
pub htmlelement: HTMLElement,
}
impl HTMLOListElementDerived for EventTarget {
fn is_htmlolistelement(&self) -> bool {
self.type_id == NodeTargetTypeId(ElementNodeTypeId(HTMLOListElementTypeId))
}
|
}
impl HTMLOListElement {
pub fn new_inherited(localName: DOMString, document: &JSRef<Document>) -> HTMLOListElement {
HTMLOListElement {
htmlelement: HTMLElement::new_inherited(HTMLOListElementTypeId, localName, document)
}
}
pub fn new(localName: DOMString, document: &JSRef<Document>) -> Temporary<HTMLOListElement> {
let element = HTMLOListElement::new_inherited(localName, document);
Node::reflect_node(box element, document, HTMLOListElementBinding::Wrap)
}
}
pub trait HTMLOListElementMethods {
}
impl Reflectable for HTMLOListElement {
fn reflector<'a>(&'a self) -> &'a Reflector {
self.htmlelement.reflector()
}
}
|
random_line_split
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.