file_name
large_stringlengths 4
69
| prefix
large_stringlengths 0
26.7k
| suffix
large_stringlengths 0
24.8k
| middle
large_stringlengths 0
2.12k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
udp.rs
|
use crate::sys::unix::net::{new_ip_socket, socket_addr};
use std::io;
use std::mem;
use std::net::{self, SocketAddr};
use std::os::unix::io::{AsRawFd, FromRawFd};
pub fn bind(addr: SocketAddr) -> io::Result<net::UdpSocket>
|
pub(crate) fn only_v6(socket: &net::UdpSocket) -> io::Result<bool> {
let mut optval: libc::c_int = 0;
let mut optlen = mem::size_of::<libc::c_int>() as libc::socklen_t;
syscall!(getsockopt(
socket.as_raw_fd(),
libc::IPPROTO_IPV6,
libc::IPV6_V6ONLY,
&mut optval as *mut _ as *mut _,
&mut optlen,
))?;
Ok(optval!= 0)
}
|
{
// Gives a warning for non Apple platforms.
#[allow(clippy::let_and_return)]
let socket = new_ip_socket(addr, libc::SOCK_DGRAM);
socket.and_then(|socket| {
let (raw_addr, raw_addr_length) = socket_addr(&addr);
syscall!(bind(socket, raw_addr.as_ptr(), raw_addr_length))
.map_err(|err| {
// Close the socket if we hit an error, ignoring the error
// from closing since we can't pass back two errors.
let _ = unsafe { libc::close(socket) };
err
})
.map(|_| unsafe { net::UdpSocket::from_raw_fd(socket) })
})
}
|
identifier_body
|
udp.rs
|
use crate::sys::unix::net::{new_ip_socket, socket_addr};
use std::io;
use std::mem;
use std::net::{self, SocketAddr};
use std::os::unix::io::{AsRawFd, FromRawFd};
pub fn
|
(addr: SocketAddr) -> io::Result<net::UdpSocket> {
// Gives a warning for non Apple platforms.
#[allow(clippy::let_and_return)]
let socket = new_ip_socket(addr, libc::SOCK_DGRAM);
socket.and_then(|socket| {
let (raw_addr, raw_addr_length) = socket_addr(&addr);
syscall!(bind(socket, raw_addr.as_ptr(), raw_addr_length))
.map_err(|err| {
// Close the socket if we hit an error, ignoring the error
// from closing since we can't pass back two errors.
let _ = unsafe { libc::close(socket) };
err
})
.map(|_| unsafe { net::UdpSocket::from_raw_fd(socket) })
})
}
pub(crate) fn only_v6(socket: &net::UdpSocket) -> io::Result<bool> {
let mut optval: libc::c_int = 0;
let mut optlen = mem::size_of::<libc::c_int>() as libc::socklen_t;
syscall!(getsockopt(
socket.as_raw_fd(),
libc::IPPROTO_IPV6,
libc::IPV6_V6ONLY,
&mut optval as *mut _ as *mut _,
&mut optlen,
))?;
Ok(optval!= 0)
}
|
bind
|
identifier_name
|
udp.rs
|
use crate::sys::unix::net::{new_ip_socket, socket_addr};
|
use std::io;
use std::mem;
use std::net::{self, SocketAddr};
use std::os::unix::io::{AsRawFd, FromRawFd};
pub fn bind(addr: SocketAddr) -> io::Result<net::UdpSocket> {
// Gives a warning for non Apple platforms.
#[allow(clippy::let_and_return)]
let socket = new_ip_socket(addr, libc::SOCK_DGRAM);
socket.and_then(|socket| {
let (raw_addr, raw_addr_length) = socket_addr(&addr);
syscall!(bind(socket, raw_addr.as_ptr(), raw_addr_length))
.map_err(|err| {
// Close the socket if we hit an error, ignoring the error
// from closing since we can't pass back two errors.
let _ = unsafe { libc::close(socket) };
err
})
.map(|_| unsafe { net::UdpSocket::from_raw_fd(socket) })
})
}
pub(crate) fn only_v6(socket: &net::UdpSocket) -> io::Result<bool> {
let mut optval: libc::c_int = 0;
let mut optlen = mem::size_of::<libc::c_int>() as libc::socklen_t;
syscall!(getsockopt(
socket.as_raw_fd(),
libc::IPPROTO_IPV6,
libc::IPV6_V6ONLY,
&mut optval as *mut _ as *mut _,
&mut optlen,
))?;
Ok(optval!= 0)
}
|
random_line_split
|
|
reference-to-tuple.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-win32 Broken because of LLVM bug: http://llvm.org/bugs/show_bug.cgi?id=16249
|
// compile-flags:-Z extra-debug-info
// debugger:break zzz
// debugger:run
// debugger:finish
// debugger:print *stack_val_ref
// check:$1 = {-14, -19}
// debugger:print *ref_to_unnamed
// check:$2 = {-15, -20}
// debugger:print *managed_val_ref
// check:$3 = {-16, -21}
// debugger:print *unique_val_ref
// check:$4 = {-17, -22}
fn main() {
let stack_val: (i16, f32) = (-14, -19f32);
let stack_val_ref : &(i16, f32) = &stack_val;
let ref_to_unnamed : &(i16, f32) = &(-15, -20f32);
let managed_val : @(i16, f32) = @(-16, -21f32);
let managed_val_ref : &(i16, f32) = managed_val;
let unique_val: ~(i16, f32) = ~(-17, -22f32);
let unique_val_ref : &(i16, f32) = unique_val;
zzz();
}
fn zzz() {()}
|
// GDB doesn't know about UTF-32 character encoding and will print a rust char as only its numerical
// value.
|
random_line_split
|
reference-to-tuple.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-win32 Broken because of LLVM bug: http://llvm.org/bugs/show_bug.cgi?id=16249
// GDB doesn't know about UTF-32 character encoding and will print a rust char as only its numerical
// value.
// compile-flags:-Z extra-debug-info
// debugger:break zzz
// debugger:run
// debugger:finish
// debugger:print *stack_val_ref
// check:$1 = {-14, -19}
// debugger:print *ref_to_unnamed
// check:$2 = {-15, -20}
// debugger:print *managed_val_ref
// check:$3 = {-16, -21}
// debugger:print *unique_val_ref
// check:$4 = {-17, -22}
fn main() {
let stack_val: (i16, f32) = (-14, -19f32);
let stack_val_ref : &(i16, f32) = &stack_val;
let ref_to_unnamed : &(i16, f32) = &(-15, -20f32);
let managed_val : @(i16, f32) = @(-16, -21f32);
let managed_val_ref : &(i16, f32) = managed_val;
let unique_val: ~(i16, f32) = ~(-17, -22f32);
let unique_val_ref : &(i16, f32) = unique_val;
zzz();
}
fn
|
() {()}
|
zzz
|
identifier_name
|
reference-to-tuple.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// xfail-win32 Broken because of LLVM bug: http://llvm.org/bugs/show_bug.cgi?id=16249
// GDB doesn't know about UTF-32 character encoding and will print a rust char as only its numerical
// value.
// compile-flags:-Z extra-debug-info
// debugger:break zzz
// debugger:run
// debugger:finish
// debugger:print *stack_val_ref
// check:$1 = {-14, -19}
// debugger:print *ref_to_unnamed
// check:$2 = {-15, -20}
// debugger:print *managed_val_ref
// check:$3 = {-16, -21}
// debugger:print *unique_val_ref
// check:$4 = {-17, -22}
fn main()
|
fn zzz() {()}
|
{
let stack_val: (i16, f32) = (-14, -19f32);
let stack_val_ref : &(i16, f32) = &stack_val;
let ref_to_unnamed : &(i16, f32) = &(-15, -20f32);
let managed_val : @(i16, f32) = @(-16, -21f32);
let managed_val_ref : &(i16, f32) = managed_val;
let unique_val: ~(i16, f32) = ~(-17, -22f32);
let unique_val_ref : &(i16, f32) = unique_val;
zzz();
}
|
identifier_body
|
cast.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Unsafe casting functions
use mem;
use intrinsics;
use ptr::copy_nonoverlapping_memory;
/**
* Transform a value of one type into a value of another type.
* Both types must have the same size and alignment.
*
* # Example
*
* ```rust
* use std::cast;
*
* let v: &[u8] = unsafe { cast::transmute("L") };
* assert!(v == [76u8]);
* ```
*/
#[inline]
pub unsafe fn transmute<T, U>(thing: T) -> U {
intrinsics::transmute(thing)
}
/**
* Move a thing into the void
*
* The forget function will take ownership of the provided value but neglect
* to run any required cleanup or memory-management operations on it.
*/
#[inline]
pub unsafe fn forget<T>(thing: T) { intrinsics::forget(thing); }
/// Casts the value at `src` to U. The two types must have the same length.
#[inline]
pub unsafe fn transmute_copy<T, U>(src: &T) -> U {
let mut dest: U = mem::uninit();
let dest_ptr: *mut u8 = transmute(&mut dest);
let src_ptr: *u8 = transmute(src);
copy_nonoverlapping_memory(dest_ptr, src_ptr, mem::size_of::<U>());
dest
}
/// Coerce an immutable reference to be mutable.
#[inline]
#[deprecated="casting &T to &mut T is undefined behaviour: use Cell<T>, RefCell<T> or Unsafe<T>"]
pub unsafe fn transmute_mut<'a,T>(ptr: &'a T) -> &'a mut T { transmute(ptr) }
/// Coerce a reference to have an arbitrary associated lifetime.
#[inline]
pub unsafe fn transmute_lifetime<'a,'b,T>(ptr: &'a T) -> &'b T {
transmute(ptr)
}
/// Coerce an immutable reference to be mutable.
#[inline]
pub unsafe fn transmute_mut_unsafe<T>(ptr: *T) -> *mut T {
transmute(ptr)
}
/// Coerce a mutable reference to have an arbitrary associated lifetime.
#[inline]
pub unsafe fn transmute_mut_lifetime<'a,'b,T>(ptr: &'a mut T) -> &'b mut T {
transmute(ptr)
}
/// Transforms lifetime of the second pointer to match the first.
#[inline]
pub unsafe fn copy_lifetime<'a,S,T>(_ptr: &'a S, ptr: &T) -> &'a T {
transmute_lifetime(ptr)
}
/// Transforms lifetime of the second pointer to match the first.
#[inline]
pub unsafe fn copy_mut_lifetime<'a,S,T>(_ptr: &'a mut S, ptr: &mut T) -> &'a mut T
|
/// Transforms lifetime of the second pointer to match the first.
#[inline]
pub unsafe fn copy_lifetime_vec<'a,S,T>(_ptr: &'a [S], ptr: &T) -> &'a T {
transmute_lifetime(ptr)
}
/****************************************************************************
* Tests
****************************************************************************/
#[cfg(test)]
mod tests {
use cast::transmute;
use raw;
use realstd::str::StrAllocating;
#[test]
fn test_transmute_copy() {
assert_eq!(1u, unsafe { ::cast::transmute_copy(&1) });
}
#[test]
fn test_transmute() {
unsafe {
let x = @100u8;
let x: *raw::Box<u8> = transmute(x);
assert!((*x).data == 100);
let _x: @int = transmute(x);
}
}
#[test]
fn test_transmute2() {
unsafe {
assert_eq!(box [76u8], transmute("L".to_owned()));
}
}
}
|
{
transmute_mut_lifetime(ptr)
}
|
identifier_body
|
cast.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Unsafe casting functions
use mem;
use intrinsics;
use ptr::copy_nonoverlapping_memory;
/**
* Transform a value of one type into a value of another type.
* Both types must have the same size and alignment.
*
* # Example
*
* ```rust
* use std::cast;
*
* let v: &[u8] = unsafe { cast::transmute("L") };
* assert!(v == [76u8]);
* ```
*/
#[inline]
pub unsafe fn transmute<T, U>(thing: T) -> U {
intrinsics::transmute(thing)
}
/**
* Move a thing into the void
*
* The forget function will take ownership of the provided value but neglect
* to run any required cleanup or memory-management operations on it.
*/
#[inline]
pub unsafe fn forget<T>(thing: T) { intrinsics::forget(thing); }
/// Casts the value at `src` to U. The two types must have the same length.
#[inline]
pub unsafe fn transmute_copy<T, U>(src: &T) -> U {
let mut dest: U = mem::uninit();
let dest_ptr: *mut u8 = transmute(&mut dest);
let src_ptr: *u8 = transmute(src);
copy_nonoverlapping_memory(dest_ptr, src_ptr, mem::size_of::<U>());
dest
}
/// Coerce an immutable reference to be mutable.
#[inline]
#[deprecated="casting &T to &mut T is undefined behaviour: use Cell<T>, RefCell<T> or Unsafe<T>"]
pub unsafe fn transmute_mut<'a,T>(ptr: &'a T) -> &'a mut T { transmute(ptr) }
/// Coerce a reference to have an arbitrary associated lifetime.
#[inline]
pub unsafe fn transmute_lifetime<'a,'b,T>(ptr: &'a T) -> &'b T {
transmute(ptr)
}
/// Coerce an immutable reference to be mutable.
#[inline]
pub unsafe fn transmute_mut_unsafe<T>(ptr: *T) -> *mut T {
transmute(ptr)
}
/// Coerce a mutable reference to have an arbitrary associated lifetime.
#[inline]
pub unsafe fn transmute_mut_lifetime<'a,'b,T>(ptr: &'a mut T) -> &'b mut T {
transmute(ptr)
}
/// Transforms lifetime of the second pointer to match the first.
#[inline]
pub unsafe fn copy_lifetime<'a,S,T>(_ptr: &'a S, ptr: &T) -> &'a T {
transmute_lifetime(ptr)
}
/// Transforms lifetime of the second pointer to match the first.
#[inline]
pub unsafe fn copy_mut_lifetime<'a,S,T>(_ptr: &'a mut S, ptr: &mut T) -> &'a mut T {
transmute_mut_lifetime(ptr)
|
/// Transforms lifetime of the second pointer to match the first.
#[inline]
pub unsafe fn copy_lifetime_vec<'a,S,T>(_ptr: &'a [S], ptr: &T) -> &'a T {
transmute_lifetime(ptr)
}
/****************************************************************************
* Tests
****************************************************************************/
#[cfg(test)]
mod tests {
use cast::transmute;
use raw;
use realstd::str::StrAllocating;
#[test]
fn test_transmute_copy() {
assert_eq!(1u, unsafe { ::cast::transmute_copy(&1) });
}
#[test]
fn test_transmute() {
unsafe {
let x = @100u8;
let x: *raw::Box<u8> = transmute(x);
assert!((*x).data == 100);
let _x: @int = transmute(x);
}
}
#[test]
fn test_transmute2() {
unsafe {
assert_eq!(box [76u8], transmute("L".to_owned()));
}
}
}
|
}
|
random_line_split
|
cast.rs
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Unsafe casting functions
use mem;
use intrinsics;
use ptr::copy_nonoverlapping_memory;
/**
* Transform a value of one type into a value of another type.
* Both types must have the same size and alignment.
*
* # Example
*
* ```rust
* use std::cast;
*
* let v: &[u8] = unsafe { cast::transmute("L") };
* assert!(v == [76u8]);
* ```
*/
#[inline]
pub unsafe fn transmute<T, U>(thing: T) -> U {
intrinsics::transmute(thing)
}
/**
* Move a thing into the void
*
* The forget function will take ownership of the provided value but neglect
* to run any required cleanup or memory-management operations on it.
*/
#[inline]
pub unsafe fn forget<T>(thing: T) { intrinsics::forget(thing); }
/// Casts the value at `src` to U. The two types must have the same length.
#[inline]
pub unsafe fn transmute_copy<T, U>(src: &T) -> U {
let mut dest: U = mem::uninit();
let dest_ptr: *mut u8 = transmute(&mut dest);
let src_ptr: *u8 = transmute(src);
copy_nonoverlapping_memory(dest_ptr, src_ptr, mem::size_of::<U>());
dest
}
/// Coerce an immutable reference to be mutable.
#[inline]
#[deprecated="casting &T to &mut T is undefined behaviour: use Cell<T>, RefCell<T> or Unsafe<T>"]
pub unsafe fn transmute_mut<'a,T>(ptr: &'a T) -> &'a mut T { transmute(ptr) }
/// Coerce a reference to have an arbitrary associated lifetime.
#[inline]
pub unsafe fn transmute_lifetime<'a,'b,T>(ptr: &'a T) -> &'b T {
transmute(ptr)
}
/// Coerce an immutable reference to be mutable.
#[inline]
pub unsafe fn
|
<T>(ptr: *T) -> *mut T {
transmute(ptr)
}
/// Coerce a mutable reference to have an arbitrary associated lifetime.
#[inline]
pub unsafe fn transmute_mut_lifetime<'a,'b,T>(ptr: &'a mut T) -> &'b mut T {
transmute(ptr)
}
/// Transforms lifetime of the second pointer to match the first.
#[inline]
pub unsafe fn copy_lifetime<'a,S,T>(_ptr: &'a S, ptr: &T) -> &'a T {
transmute_lifetime(ptr)
}
/// Transforms lifetime of the second pointer to match the first.
#[inline]
pub unsafe fn copy_mut_lifetime<'a,S,T>(_ptr: &'a mut S, ptr: &mut T) -> &'a mut T {
transmute_mut_lifetime(ptr)
}
/// Transforms lifetime of the second pointer to match the first.
#[inline]
pub unsafe fn copy_lifetime_vec<'a,S,T>(_ptr: &'a [S], ptr: &T) -> &'a T {
transmute_lifetime(ptr)
}
/****************************************************************************
* Tests
****************************************************************************/
#[cfg(test)]
mod tests {
use cast::transmute;
use raw;
use realstd::str::StrAllocating;
#[test]
fn test_transmute_copy() {
assert_eq!(1u, unsafe { ::cast::transmute_copy(&1) });
}
#[test]
fn test_transmute() {
unsafe {
let x = @100u8;
let x: *raw::Box<u8> = transmute(x);
assert!((*x).data == 100);
let _x: @int = transmute(x);
}
}
#[test]
fn test_transmute2() {
unsafe {
assert_eq!(box [76u8], transmute("L".to_owned()));
}
}
}
|
transmute_mut_unsafe
|
identifier_name
|
feature-gate-arbitrary-self-types.rs
|
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::rc::Rc;
trait Foo {
fn foo(self: Rc<Box<Self>>); //~ ERROR arbitrary `self` types are unstable
}
struct Bar;
impl Foo for Bar {
fn foo(self: Rc<Box<Self>>)
|
//~ ERROR arbitrary `self` types are unstable
}
impl Bar {
fn bar(self: Box<Rc<Self>>) {} //~ ERROR arbitrary `self` types are unstable
}
fn main() {}
|
{}
|
identifier_body
|
feature-gate-arbitrary-self-types.rs
|
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::rc::Rc;
trait Foo {
fn foo(self: Rc<Box<Self>>); //~ ERROR arbitrary `self` types are unstable
}
struct Bar;
impl Foo for Bar {
fn foo(self: Rc<Box<Self>>) {} //~ ERROR arbitrary `self` types are unstable
}
impl Bar {
fn bar(self: Box<Rc<Self>>) {} //~ ERROR arbitrary `self` types are unstable
}
|
fn main() {}
|
random_line_split
|
|
feature-gate-arbitrary-self-types.rs
|
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::rc::Rc;
trait Foo {
fn foo(self: Rc<Box<Self>>); //~ ERROR arbitrary `self` types are unstable
}
struct Bar;
impl Foo for Bar {
fn foo(self: Rc<Box<Self>>) {} //~ ERROR arbitrary `self` types are unstable
}
impl Bar {
fn
|
(self: Box<Rc<Self>>) {} //~ ERROR arbitrary `self` types are unstable
}
fn main() {}
|
bar
|
identifier_name
|
read_file_specific_line.rs
|
// http://rosettacode.org/wiki/Read_a_specific_line_from_a_file
use std::fs::File;
use std::io::{BufReader, BufRead};
use std::env::args;
use std::borrow::ToOwned;
fn main()
|
let file = File::open(filename).unwrap();
let reader = BufReader::new(file);
match reader.lines().skip(line_number-1).next() {
None => panic!("No such line (file is too short)"),
Some(result) => match result {
// Handle any errors that may arise
Ok(ln) => print!("{}", ln),
Err(error) => print!("{}", error)
}
}
}
|
{
let mut args = args();
let filename = {
if let Some(o_s) = args.nth(1) {
o_s.to_owned()
} else {
panic!("You must enter a filename to read line by line")
}
};
let line_number = {
if let Some(o_s) = args.next() {
o_s.to_owned()
.parse::<usize>().ok()
.expect("You must enter an integer as the line number")
} else {
panic!("You must enter a filename to read line by line")
}
};
|
identifier_body
|
read_file_specific_line.rs
|
// http://rosettacode.org/wiki/Read_a_specific_line_from_a_file
use std::fs::File;
use std::io::{BufReader, BufRead};
use std::env::args;
use std::borrow::ToOwned;
fn
|
() {
let mut args = args();
let filename = {
if let Some(o_s) = args.nth(1) {
o_s.to_owned()
} else {
panic!("You must enter a filename to read line by line")
}
};
let line_number = {
if let Some(o_s) = args.next() {
o_s.to_owned()
.parse::<usize>().ok()
.expect("You must enter an integer as the line number")
} else {
panic!("You must enter a filename to read line by line")
}
};
let file = File::open(filename).unwrap();
let reader = BufReader::new(file);
match reader.lines().skip(line_number-1).next() {
None => panic!("No such line (file is too short)"),
Some(result) => match result {
// Handle any errors that may arise
Ok(ln) => print!("{}", ln),
Err(error) => print!("{}", error)
}
}
}
|
main
|
identifier_name
|
read_file_specific_line.rs
|
// http://rosettacode.org/wiki/Read_a_specific_line_from_a_file
use std::fs::File;
use std::io::{BufReader, BufRead};
use std::env::args;
use std::borrow::ToOwned;
fn main() {
let mut args = args();
let filename = {
if let Some(o_s) = args.nth(1) {
o_s.to_owned()
} else {
panic!("You must enter a filename to read line by line")
}
};
let line_number = {
if let Some(o_s) = args.next() {
o_s.to_owned()
.parse::<usize>().ok()
.expect("You must enter an integer as the line number")
} else {
panic!("You must enter a filename to read line by line")
}
};
let file = File::open(filename).unwrap();
let reader = BufReader::new(file);
|
match reader.lines().skip(line_number-1).next() {
None => panic!("No such line (file is too short)"),
Some(result) => match result {
// Handle any errors that may arise
Ok(ln) => print!("{}", ln),
Err(error) => print!("{}", error)
}
}
}
|
random_line_split
|
|
kernel_set.rs
|
use std::collections::VecDeque;
use std::fmt::Debug;
use std::hash::Hash;
use util::{map, Map};
pub struct KernelSet<K: Kernel> {
counter: usize,
kernels: VecDeque<K>,
map: Map<K, K::Index>,
}
pub trait Kernel: Clone + Debug + Hash + Eq {
type Index: Copy + Debug;
fn index(c: usize) -> Self::Index;
}
impl<K:Kernel> KernelSet<K> {
pub fn new() -> KernelSet<K> {
KernelSet { kernels: VecDeque::new(), map: map(), counter: 0 }
}
pub fn
|
(&mut self, kernel: K) -> K::Index {
let kernels = &mut self.kernels;
let counter = &mut self.counter;
*self.map.entry(kernel.clone()).or_insert_with(|| {
let index = *counter;
*counter += 1;
kernels.push_back(kernel);
K::index(index)
})
}
pub fn next(&mut self) -> Option<K> {
self.kernels.pop_front()
}
}
|
add_state
|
identifier_name
|
kernel_set.rs
|
use std::collections::VecDeque;
use std::fmt::Debug;
use std::hash::Hash;
use util::{map, Map};
pub struct KernelSet<K: Kernel> {
counter: usize,
kernels: VecDeque<K>,
map: Map<K, K::Index>,
}
pub trait Kernel: Clone + Debug + Hash + Eq {
type Index: Copy + Debug;
fn index(c: usize) -> Self::Index;
}
impl<K:Kernel> KernelSet<K> {
pub fn new() -> KernelSet<K>
|
pub fn add_state(&mut self, kernel: K) -> K::Index {
let kernels = &mut self.kernels;
let counter = &mut self.counter;
*self.map.entry(kernel.clone()).or_insert_with(|| {
let index = *counter;
*counter += 1;
kernels.push_back(kernel);
K::index(index)
})
}
pub fn next(&mut self) -> Option<K> {
self.kernels.pop_front()
}
}
|
{
KernelSet { kernels: VecDeque::new(), map: map(), counter: 0 }
}
|
identifier_body
|
kernel_set.rs
|
use std::collections::VecDeque;
|
pub struct KernelSet<K: Kernel> {
counter: usize,
kernels: VecDeque<K>,
map: Map<K, K::Index>,
}
pub trait Kernel: Clone + Debug + Hash + Eq {
type Index: Copy + Debug;
fn index(c: usize) -> Self::Index;
}
impl<K:Kernel> KernelSet<K> {
pub fn new() -> KernelSet<K> {
KernelSet { kernels: VecDeque::new(), map: map(), counter: 0 }
}
pub fn add_state(&mut self, kernel: K) -> K::Index {
let kernels = &mut self.kernels;
let counter = &mut self.counter;
*self.map.entry(kernel.clone()).or_insert_with(|| {
let index = *counter;
*counter += 1;
kernels.push_back(kernel);
K::index(index)
})
}
pub fn next(&mut self) -> Option<K> {
self.kernels.pop_front()
}
}
|
use std::fmt::Debug;
use std::hash::Hash;
use util::{map, Map};
|
random_line_split
|
mod.rs
|
/*!
* Normalization processes a parse tree until it is in suitable form to
* be converted to the more canonical form. This is done as a series of
* passes, each contained in their own module below.
*/
use grammar::parse_tree as pt;
use grammar::repr as r;
pub type NormResult<T> = Result<T, NormError>;
#[derive(Clone, Debug)]
pub struct NormError {
pub message: String,
pub span: pt::Span,
}
macro_rules! return_err {
($span: expr, $($args:expr),+) => {
return Err(NormError {
message: format!($($args),+),
span: $span
});
}
}
pub fn normalize(grammar: pt::Grammar) -> NormResult<r::Grammar> {
normalize_helper(grammar, true)
}
/// for unit tests, it is convenient to skip the validation step
#[cfg(test)]
pub fn normalize_without_validating(grammar: pt::Grammar) -> NormResult<r::Grammar> {
normalize_helper(grammar, false)
}
fn
|
(grammar: pt::Grammar, validate: bool) -> NormResult<r::Grammar> {
if validate { try!(prevalidate::validate(&grammar)); }
let grammar = try!(resolve::resolve(grammar));
let grammar = try!(macro_expand::expand_macros(grammar));
if validate { try!(postvalidate::validate(&grammar)); }
let types = try!(tyinfer::infer_types(&grammar));
lower::lower(grammar, types)
}
// These are executed *IN ORDER*:
// Check most safety conditions.
mod prevalidate;
// Resolve identifiers into terminals/nonterminals etc.
mod resolve;
// Expands macros and expressions
//
// X =...1 Comma<X> (X Y Z)...2
//
// to
//
// X =...1 `Comma<X>` `(X Y Z)`...2
// `Comma_X`: Vec<<X>> =...;
// `(X Y Z)` = X Y Z;
//
// AFTER THIS POINT: No more macros, macro references, guarded
// alternatives, repeats, or expr symbols, though type indirections
// may occur.
mod macro_expand;
// Check some safety conditions that can only be tested
// after macro expansion.
mod postvalidate;
// Computes types where the user omitted them (or from macro
// byproducts).
//
// AFTER THIS POINT: there is a separate `repr::Types` table
// providing all nonterminals with an explicit type.
mod tyinfer;
// Lowers the parse tree to the repr notation.
mod lower;
///////////////////////////////////////////////////////////////////////////
// Shared routines
mod norm_util;
|
normalize_helper
|
identifier_name
|
mod.rs
|
/*!
* Normalization processes a parse tree until it is in suitable form to
* be converted to the more canonical form. This is done as a series of
* passes, each contained in their own module below.
*/
use grammar::parse_tree as pt;
use grammar::repr as r;
pub type NormResult<T> = Result<T, NormError>;
#[derive(Clone, Debug)]
pub struct NormError {
pub message: String,
pub span: pt::Span,
}
macro_rules! return_err {
($span: expr, $($args:expr),+) => {
return Err(NormError {
message: format!($($args),+),
span: $span
});
}
}
pub fn normalize(grammar: pt::Grammar) -> NormResult<r::Grammar> {
normalize_helper(grammar, true)
}
/// for unit tests, it is convenient to skip the validation step
#[cfg(test)]
pub fn normalize_without_validating(grammar: pt::Grammar) -> NormResult<r::Grammar> {
normalize_helper(grammar, false)
}
fn normalize_helper(grammar: pt::Grammar, validate: bool) -> NormResult<r::Grammar> {
if validate
|
let grammar = try!(resolve::resolve(grammar));
let grammar = try!(macro_expand::expand_macros(grammar));
if validate { try!(postvalidate::validate(&grammar)); }
let types = try!(tyinfer::infer_types(&grammar));
lower::lower(grammar, types)
}
// These are executed *IN ORDER*:
// Check most safety conditions.
mod prevalidate;
// Resolve identifiers into terminals/nonterminals etc.
mod resolve;
// Expands macros and expressions
//
// X =...1 Comma<X> (X Y Z)...2
//
// to
//
// X =...1 `Comma<X>` `(X Y Z)`...2
// `Comma_X`: Vec<<X>> =...;
// `(X Y Z)` = X Y Z;
//
// AFTER THIS POINT: No more macros, macro references, guarded
// alternatives, repeats, or expr symbols, though type indirections
// may occur.
mod macro_expand;
// Check some safety conditions that can only be tested
// after macro expansion.
mod postvalidate;
// Computes types where the user omitted them (or from macro
// byproducts).
//
// AFTER THIS POINT: there is a separate `repr::Types` table
// providing all nonterminals with an explicit type.
mod tyinfer;
// Lowers the parse tree to the repr notation.
mod lower;
///////////////////////////////////////////////////////////////////////////
// Shared routines
mod norm_util;
|
{ try!(prevalidate::validate(&grammar)); }
|
conditional_block
|
mod.rs
|
/*!
* Normalization processes a parse tree until it is in suitable form to
* be converted to the more canonical form. This is done as a series of
* passes, each contained in their own module below.
*/
use grammar::parse_tree as pt;
use grammar::repr as r;
pub type NormResult<T> = Result<T, NormError>;
#[derive(Clone, Debug)]
|
}
macro_rules! return_err {
($span: expr, $($args:expr),+) => {
return Err(NormError {
message: format!($($args),+),
span: $span
});
}
}
pub fn normalize(grammar: pt::Grammar) -> NormResult<r::Grammar> {
normalize_helper(grammar, true)
}
/// for unit tests, it is convenient to skip the validation step
#[cfg(test)]
pub fn normalize_without_validating(grammar: pt::Grammar) -> NormResult<r::Grammar> {
normalize_helper(grammar, false)
}
fn normalize_helper(grammar: pt::Grammar, validate: bool) -> NormResult<r::Grammar> {
if validate { try!(prevalidate::validate(&grammar)); }
let grammar = try!(resolve::resolve(grammar));
let grammar = try!(macro_expand::expand_macros(grammar));
if validate { try!(postvalidate::validate(&grammar)); }
let types = try!(tyinfer::infer_types(&grammar));
lower::lower(grammar, types)
}
// These are executed *IN ORDER*:
// Check most safety conditions.
mod prevalidate;
// Resolve identifiers into terminals/nonterminals etc.
mod resolve;
// Expands macros and expressions
//
// X =...1 Comma<X> (X Y Z)...2
//
// to
//
// X =...1 `Comma<X>` `(X Y Z)`...2
// `Comma_X`: Vec<<X>> =...;
// `(X Y Z)` = X Y Z;
//
// AFTER THIS POINT: No more macros, macro references, guarded
// alternatives, repeats, or expr symbols, though type indirections
// may occur.
mod macro_expand;
// Check some safety conditions that can only be tested
// after macro expansion.
mod postvalidate;
// Computes types where the user omitted them (or from macro
// byproducts).
//
// AFTER THIS POINT: there is a separate `repr::Types` table
// providing all nonterminals with an explicit type.
mod tyinfer;
// Lowers the parse tree to the repr notation.
mod lower;
///////////////////////////////////////////////////////////////////////////
// Shared routines
mod norm_util;
|
pub struct NormError {
pub message: String,
pub span: pt::Span,
|
random_line_split
|
mod.rs
|
/*!
* Normalization processes a parse tree until it is in suitable form to
* be converted to the more canonical form. This is done as a series of
* passes, each contained in their own module below.
*/
use grammar::parse_tree as pt;
use grammar::repr as r;
pub type NormResult<T> = Result<T, NormError>;
#[derive(Clone, Debug)]
pub struct NormError {
pub message: String,
pub span: pt::Span,
}
macro_rules! return_err {
($span: expr, $($args:expr),+) => {
return Err(NormError {
message: format!($($args),+),
span: $span
});
}
}
pub fn normalize(grammar: pt::Grammar) -> NormResult<r::Grammar> {
normalize_helper(grammar, true)
}
/// for unit tests, it is convenient to skip the validation step
#[cfg(test)]
pub fn normalize_without_validating(grammar: pt::Grammar) -> NormResult<r::Grammar> {
normalize_helper(grammar, false)
}
fn normalize_helper(grammar: pt::Grammar, validate: bool) -> NormResult<r::Grammar>
|
// These are executed *IN ORDER*:
// Check most safety conditions.
mod prevalidate;
// Resolve identifiers into terminals/nonterminals etc.
mod resolve;
// Expands macros and expressions
//
// X =...1 Comma<X> (X Y Z)...2
//
// to
//
// X =...1 `Comma<X>` `(X Y Z)`...2
// `Comma_X`: Vec<<X>> =...;
// `(X Y Z)` = X Y Z;
//
// AFTER THIS POINT: No more macros, macro references, guarded
// alternatives, repeats, or expr symbols, though type indirections
// may occur.
mod macro_expand;
// Check some safety conditions that can only be tested
// after macro expansion.
mod postvalidate;
// Computes types where the user omitted them (or from macro
// byproducts).
//
// AFTER THIS POINT: there is a separate `repr::Types` table
// providing all nonterminals with an explicit type.
mod tyinfer;
// Lowers the parse tree to the repr notation.
mod lower;
///////////////////////////////////////////////////////////////////////////
// Shared routines
mod norm_util;
|
{
if validate { try!(prevalidate::validate(&grammar)); }
let grammar = try!(resolve::resolve(grammar));
let grammar = try!(macro_expand::expand_macros(grammar));
if validate { try!(postvalidate::validate(&grammar)); }
let types = try!(tyinfer::infer_types(&grammar));
lower::lower(grammar, types)
}
|
identifier_body
|
mod.rs
|
#[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::CNT {
#[doc = r" Modifies the contents of the register"]
#[inline(always)]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
|
#[doc = r" Reads the contents of the register"]
#[inline(always)]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline(always)]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline(always)]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct CNTR {
bits: u16,
}
impl CNTR {
#[doc = r" Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u16 {
self.bits
}
}
#[doc = r" Proxy"]
pub struct _CNTW<'a> {
w: &'a mut W,
}
impl<'a> _CNTW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u16) -> &'a mut W {
const MASK: u16 = 65535;
const OFFSET: u8 = 0;
self.w.bits &=!((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bits 0:15 - counter value"]
#[inline(always)]
pub fn cnt(&self) -> CNTR {
let bits = {
const MASK: u16 = 65535;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) as u16
};
CNTR { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline(always)]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bits 0:15 - counter value"]
#[inline(always)]
pub fn cnt(&mut self) -> _CNTW {
_CNTW { w: self }
}
}
|
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
|
identifier_body
|
mod.rs
|
#[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::CNT {
#[doc = r" Modifies the contents of the register"]
#[inline(always)]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline(always)]
pub fn
|
(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline(always)]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline(always)]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct CNTR {
bits: u16,
}
impl CNTR {
#[doc = r" Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u16 {
self.bits
}
}
#[doc = r" Proxy"]
pub struct _CNTW<'a> {
w: &'a mut W,
}
impl<'a> _CNTW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u16) -> &'a mut W {
const MASK: u16 = 65535;
const OFFSET: u8 = 0;
self.w.bits &=!((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bits 0:15 - counter value"]
#[inline(always)]
pub fn cnt(&self) -> CNTR {
let bits = {
const MASK: u16 = 65535;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) as u16
};
CNTR { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline(always)]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bits 0:15 - counter value"]
#[inline(always)]
pub fn cnt(&mut self) -> _CNTW {
_CNTW { w: self }
}
}
|
read
|
identifier_name
|
mod.rs
|
#[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::CNT {
#[doc = r" Modifies the contents of the register"]
#[inline(always)]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline(always)]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline(always)]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline(always)]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = r" Value of the field"]
pub struct CNTR {
bits: u16,
}
impl CNTR {
#[doc = r" Value of the field as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u16 {
self.bits
}
}
#[doc = r" Proxy"]
pub struct _CNTW<'a> {
w: &'a mut W,
}
impl<'a> _CNTW<'a> {
#[doc = r" Writes raw bits to the field"]
#[inline(always)]
pub fn bits(self, value: u16) -> &'a mut W {
const MASK: u16 = 65535;
const OFFSET: u8 = 0;
self.w.bits &=!((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline(always)]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bits 0:15 - counter value"]
#[inline(always)]
pub fn cnt(&self) -> CNTR {
let bits = {
const MASK: u16 = 65535;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) as u16
};
CNTR { bits }
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline(always)]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bits 0:15 - counter value"]
#[inline(always)]
|
_CNTW { w: self }
}
}
|
pub fn cnt(&mut self) -> _CNTW {
|
random_line_split
|
find_exec_tests.rs
|
// Copyright 2017 Google Inc.
//
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file or at
// https://opensource.org/licenses/MIT.
///! This file contains what would be normally be unit tests for find::find_main
///! related to -exec[dir] and ok[dir] clauses.
///! But as the tests require running an external executable, they need to be run
///! as integration tests so we can ensure that our testing-commandline binary
///! has been built.
extern crate findutils;
extern crate tempdir;
extern crate walkdir;
use std::env;
use std::fs::File;
use std::io::Read;
use tempdir::TempDir;
use findutils::find::find_main;
use common::test_helpers::*;
mod common;
#[test]
fn find_exec() {
let temp_dir = TempDir::new("find_exec").unwrap();
let temp_dir_path = temp_dir.path().to_string_lossy();
let deps = FakeDependencies::new();
let rc = find_main(&["find",
&fix_up_slashes("./test_data/simple/subdir"),
"-type",
"f",
"-exec",
&path_to_testing_commandline(),
temp_dir_path.as_ref(),
"(",
"{}",
"-o",
";"],
&deps);
assert_eq!(rc, 0);
// exec has side effects, so we won't output anything unless -print is
// explicitly passed in.
assert_eq!(deps.get_output_as_string(), "");
// check the executable ran as expected
let mut f = File::open(temp_dir.path().join("1.txt")).expect("Failed to open output file");
let mut s = String::new();
f.read_to_string(&mut s).expect("failed to read output file");
assert_eq!(s,
fix_up_slashes(&format!("cwd={}\nargs=\n(\n./test_data/simple/subdir/ABBBC\n-o\n",
env::current_dir().unwrap().to_string_lossy())));
}
#[test]
fn find_execdir() {
let temp_dir = TempDir::new("find_execdir").unwrap();
let temp_dir_path = temp_dir.path().to_string_lossy();
let deps = FakeDependencies::new();
// only look at files because the "size" of a directory is a system (and filesystem)
// dependent thing and we want these tests to be universal.
let rc = find_main(&["find",
&fix_up_slashes("./test_data/simple/subdir"),
"-type",
"f",
|
"{}",
",",
";"],
&deps);
assert_eq!(rc, 0);
// exec has side effects, so we won't output anything unless -print is
// explicitly passed in.
assert_eq!(deps.get_output_as_string(), "");
// check the executable ran as expected
let mut f = File::open(temp_dir.path().join("1.txt")).expect("Failed to open output file");
let mut s = String::new();
f.read_to_string(&mut s).expect("failed to read output file");
assert_eq!(s,
fix_up_slashes(&format!("cwd={}/test_data/simple/subdir\nargs=\n)\n./ABBBC\n,\n",
env::current_dir().unwrap().to_string_lossy())));
}
|
"-execdir",
&path_to_testing_commandline(),
temp_dir_path.as_ref(),
")",
|
random_line_split
|
find_exec_tests.rs
|
// Copyright 2017 Google Inc.
//
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file or at
// https://opensource.org/licenses/MIT.
///! This file contains what would be normally be unit tests for find::find_main
///! related to -exec[dir] and ok[dir] clauses.
///! But as the tests require running an external executable, they need to be run
///! as integration tests so we can ensure that our testing-commandline binary
///! has been built.
extern crate findutils;
extern crate tempdir;
extern crate walkdir;
use std::env;
use std::fs::File;
use std::io::Read;
use tempdir::TempDir;
use findutils::find::find_main;
use common::test_helpers::*;
mod common;
#[test]
fn find_exec() {
let temp_dir = TempDir::new("find_exec").unwrap();
let temp_dir_path = temp_dir.path().to_string_lossy();
let deps = FakeDependencies::new();
let rc = find_main(&["find",
&fix_up_slashes("./test_data/simple/subdir"),
"-type",
"f",
"-exec",
&path_to_testing_commandline(),
temp_dir_path.as_ref(),
"(",
"{}",
"-o",
";"],
&deps);
assert_eq!(rc, 0);
// exec has side effects, so we won't output anything unless -print is
// explicitly passed in.
assert_eq!(deps.get_output_as_string(), "");
// check the executable ran as expected
let mut f = File::open(temp_dir.path().join("1.txt")).expect("Failed to open output file");
let mut s = String::new();
f.read_to_string(&mut s).expect("failed to read output file");
assert_eq!(s,
fix_up_slashes(&format!("cwd={}\nargs=\n(\n./test_data/simple/subdir/ABBBC\n-o\n",
env::current_dir().unwrap().to_string_lossy())));
}
#[test]
fn
|
() {
let temp_dir = TempDir::new("find_execdir").unwrap();
let temp_dir_path = temp_dir.path().to_string_lossy();
let deps = FakeDependencies::new();
// only look at files because the "size" of a directory is a system (and filesystem)
// dependent thing and we want these tests to be universal.
let rc = find_main(&["find",
&fix_up_slashes("./test_data/simple/subdir"),
"-type",
"f",
"-execdir",
&path_to_testing_commandline(),
temp_dir_path.as_ref(),
")",
"{}",
",",
";"],
&deps);
assert_eq!(rc, 0);
// exec has side effects, so we won't output anything unless -print is
// explicitly passed in.
assert_eq!(deps.get_output_as_string(), "");
// check the executable ran as expected
let mut f = File::open(temp_dir.path().join("1.txt")).expect("Failed to open output file");
let mut s = String::new();
f.read_to_string(&mut s).expect("failed to read output file");
assert_eq!(s,
fix_up_slashes(&format!("cwd={}/test_data/simple/subdir\nargs=\n)\n./ABBBC\n,\n",
env::current_dir().unwrap().to_string_lossy())));
}
|
find_execdir
|
identifier_name
|
find_exec_tests.rs
|
// Copyright 2017 Google Inc.
//
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file or at
// https://opensource.org/licenses/MIT.
///! This file contains what would be normally be unit tests for find::find_main
///! related to -exec[dir] and ok[dir] clauses.
///! But as the tests require running an external executable, they need to be run
///! as integration tests so we can ensure that our testing-commandline binary
///! has been built.
extern crate findutils;
extern crate tempdir;
extern crate walkdir;
use std::env;
use std::fs::File;
use std::io::Read;
use tempdir::TempDir;
use findutils::find::find_main;
use common::test_helpers::*;
mod common;
#[test]
fn find_exec()
|
// explicitly passed in.
assert_eq!(deps.get_output_as_string(), "");
// check the executable ran as expected
let mut f = File::open(temp_dir.path().join("1.txt")).expect("Failed to open output file");
let mut s = String::new();
f.read_to_string(&mut s).expect("failed to read output file");
assert_eq!(s,
fix_up_slashes(&format!("cwd={}\nargs=\n(\n./test_data/simple/subdir/ABBBC\n-o\n",
env::current_dir().unwrap().to_string_lossy())));
}
#[test]
fn find_execdir() {
let temp_dir = TempDir::new("find_execdir").unwrap();
let temp_dir_path = temp_dir.path().to_string_lossy();
let deps = FakeDependencies::new();
// only look at files because the "size" of a directory is a system (and filesystem)
// dependent thing and we want these tests to be universal.
let rc = find_main(&["find",
&fix_up_slashes("./test_data/simple/subdir"),
"-type",
"f",
"-execdir",
&path_to_testing_commandline(),
temp_dir_path.as_ref(),
")",
"{}",
",",
";"],
&deps);
assert_eq!(rc, 0);
// exec has side effects, so we won't output anything unless -print is
// explicitly passed in.
assert_eq!(deps.get_output_as_string(), "");
// check the executable ran as expected
let mut f = File::open(temp_dir.path().join("1.txt")).expect("Failed to open output file");
let mut s = String::new();
f.read_to_string(&mut s).expect("failed to read output file");
assert_eq!(s,
fix_up_slashes(&format!("cwd={}/test_data/simple/subdir\nargs=\n)\n./ABBBC\n,\n",
env::current_dir().unwrap().to_string_lossy())));
}
|
{
let temp_dir = TempDir::new("find_exec").unwrap();
let temp_dir_path = temp_dir.path().to_string_lossy();
let deps = FakeDependencies::new();
let rc = find_main(&["find",
&fix_up_slashes("./test_data/simple/subdir"),
"-type",
"f",
"-exec",
&path_to_testing_commandline(),
temp_dir_path.as_ref(),
"(",
"{}",
"-o",
";"],
&deps);
assert_eq!(rc, 0);
// exec has side effects, so we won't output anything unless -print is
|
identifier_body
|
main.rs
|
use std::fs::File;
use std::io::BufWriter;
use std::io::Write;
use std::io::BufReader;
use std::io::Read;
extern crate modfile;
use modfile::ptmf;
#[macro_use]
extern crate serde_derive;
extern crate docopt;
use docopt::Docopt;
const VERSION: &'static str = env!("CARGO_PKG_VERSION");
static USAGE: &'static str = "
mod2abc.
Usage:
mod2abc (-h | --help)
mod2abc (-V | --version)
mod2abc --in= --out= [--format-jbe]
Options:
-V, --version Show version info.
-h, --help Show this text.
--in= Name of inputfile
--out= Name of outputfile
--format-jbe Use format for jbe replay
";
#[derive(Debug, Deserialize)]
struct Args {
flag_help: bool,
flag_version: bool,
flag_in: String,
flag_out: String,
flag_format_jbe: bool
}
fn note_from_period(period: u16) -> String {
// Find the position in PERIODS with the
// smallest difference
let mut found:i32 = -1;
let mut min_diff = 65536;
let key = period as i32;
for i in 0..ptmf::PERIODS.len() {
let diff = (key as i32 - ptmf::PERIODS[i] as i32).abs();
if diff < min_diff {
min_diff = diff;
found = i as i32;
}
}
let note = if found == -1 {
println!("Failed to find note name");
String::new()
} else {
let octave = found / 12;
let name = ptmf::NOTE_NAMES[(found % 12) as usize];
if octave == 1 {
// Lower case
name.to_lowercase()
} else if octave == 2 {
// Upper case
name.to_uppercase()
} else {
format!("O{}{}",octave,name)
}
};
note
}
fn is_pattern_break(channels:&Vec<ptmf::Channel>) -> bool {
// Check for pattern break
let effect = channels[0].effect | channels[1].effect | channels[2].effect;
if effect & 0x0f00 == 0x0d00 {
true
} else {
false
}
}
fn main() {
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.deserialize())
.unwrap_or_else(|e| e.exit());
// println!("{:?}", args);
if args.flag_version {
println!("Version: {}", VERSION);
return;
}
if args.flag_in == "" {
println!("No inputfile specificed");
return
}
if args.flag_out == "" {
println!("No outputfile specificed");
return
}
let ref input_filename = args.flag_in;
let file = match File::open(input_filename) {
Ok(file) => file,
Err(e) => {
println!("Failed to open file: '{}' Error: '{}'", input_filename, e);
return
}
};
let read_fn:fn (&mut Read, bool) -> Result<ptmf::PTModule, ptmf::PTMFError> = ptmf::read_mod;
let mut reader = BufReader::new(&file);
let module = match read_fn(&mut reader, true) {
Ok(module) => module,
Err(e) => {
println!("Failed to parse file: '{}' Error: '{:?}'", input_filename, e);
return;
}
};
let ref output_filename = args.flag_out;
let file = match File::create(&output_filename) {
Ok(file) => file,
Err(e) => {
println!("Failed to open file: '{}' Error: '{:?}'", output_filename, e);
return
}
};
let mut writer = BufWriter::new(&file);
if args.flag_format_jbe {
// Write in which order to play patterns
write!(writer, "song\n").unwrap();
write!(writer, "{{\n\t").unwrap();
for i in 0..module.length {
let position = module.positions.data[i as usize];
if i == 0 {
write!(writer, "{}", position).unwrap();
} else {
write!(writer, ",{}", position).unwrap();
}
}
write!(writer, "\n}}\n").unwrap();
// Write patterns
write!(writer, "patterns\n").unwrap();
write!(writer, "{{\n").unwrap();
let mut pattern_no = 0;
for pattern in module.patterns {
write!(writer,"\t{}:", pattern_no).unwrap();
// Loop through the pattern three times
// 0 = just sample number
// 1,2 = just pitch
let mut row_no = 0;
for row in &pattern.rows {
let ref channel = row.channels[0];
if row_no!= 0 {
write!(writer,",").unwrap();
}
if channel.sample_number > 0 {
let number = channel.sample_number as i8;
write!(writer,"S{}", number).unwrap();
}
row_no += 1;
if is_pattern_break(&row.channels) {
break;
}
}
write!(writer,"\n").unwrap();
write!(writer,"\t{}:", pattern_no).unwrap();
row_no = 0;
for row in &pattern.rows {
let ref channel = row.channels[1];
if row_no!= 0 {
write!(writer,",").unwrap();
}
if channel.period > 0 {
let note = note_from_period(channel.period);
write!(writer,"{}", note).unwrap();
}
row_no += 1;
if is_pattern_break(&row.channels) {
break;
}
}
write!(writer,"\n").unwrap();
|
row_no = 0;
for row in &pattern.rows {
let ref channel = row.channels[2];
if row_no!= 0 {
write!(writer,",").unwrap();
}
if channel.period > 0 {
let note = note_from_period(channel.period);
write!(writer,"{}", note).unwrap();
}
row_no += 1;
if is_pattern_break(&row.channels) {
break;
}
}
write!(writer,"\n").unwrap();
write!(writer,"\n").unwrap();
pattern_no += 1;
}
write!(writer, "}}\n").unwrap();
} else {
// Write in which order to play patterns
write!(writer, "song_pattern_list:\n").unwrap();
for i in 0..module.length {
let position = module.positions.data[i as usize];
write!(writer, "\tdefw song_pattern_{}\n", position).unwrap();
}
// Terminate list
write!(writer, "\tdefw 0\n").unwrap();
write!(writer, "\n").unwrap();
// Write pattern data
let mut pattern_no = 0;
for pattern in module.patterns {
write!(writer,"song_pattern_{}:\n", pattern_no).unwrap();
pattern_no += 1;
for row in pattern.rows {
let mut number = -1;
for channel in row.channels {
if channel.sample_number > 0 {
number = channel.sample_number as i8 - 1;
break;
}
}
write!(writer,"\tdefb {}\n", number).unwrap();
}
write!(writer,"\n").unwrap();
}
}
println!("Done!");
}
|
write!(writer,"\t{}:", pattern_no).unwrap();
|
random_line_split
|
main.rs
|
use std::fs::File;
use std::io::BufWriter;
use std::io::Write;
use std::io::BufReader;
use std::io::Read;
extern crate modfile;
use modfile::ptmf;
#[macro_use]
extern crate serde_derive;
extern crate docopt;
use docopt::Docopt;
const VERSION: &'static str = env!("CARGO_PKG_VERSION");
static USAGE: &'static str = "
mod2abc.
Usage:
mod2abc (-h | --help)
mod2abc (-V | --version)
mod2abc --in= --out= [--format-jbe]
Options:
-V, --version Show version info.
-h, --help Show this text.
--in= Name of inputfile
--out= Name of outputfile
--format-jbe Use format for jbe replay
";
#[derive(Debug, Deserialize)]
struct Args {
flag_help: bool,
flag_version: bool,
flag_in: String,
flag_out: String,
flag_format_jbe: bool
}
fn note_from_period(period: u16) -> String {
// Find the position in PERIODS with the
// smallest difference
let mut found:i32 = -1;
let mut min_diff = 65536;
let key = period as i32;
for i in 0..ptmf::PERIODS.len() {
let diff = (key as i32 - ptmf::PERIODS[i] as i32).abs();
if diff < min_diff {
min_diff = diff;
found = i as i32;
}
}
let note = if found == -1 {
println!("Failed to find note name");
String::new()
} else {
let octave = found / 12;
let name = ptmf::NOTE_NAMES[(found % 12) as usize];
if octave == 1 {
// Lower case
name.to_lowercase()
} else if octave == 2 {
// Upper case
name.to_uppercase()
} else {
format!("O{}{}",octave,name)
}
};
note
}
fn is_pattern_break(channels:&Vec<ptmf::Channel>) -> bool
|
fn main() {
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.deserialize())
.unwrap_or_else(|e| e.exit());
// println!("{:?}", args);
if args.flag_version {
println!("Version: {}", VERSION);
return;
}
if args.flag_in == "" {
println!("No inputfile specificed");
return
}
if args.flag_out == "" {
println!("No outputfile specificed");
return
}
let ref input_filename = args.flag_in;
let file = match File::open(input_filename) {
Ok(file) => file,
Err(e) => {
println!("Failed to open file: '{}' Error: '{}'", input_filename, e);
return
}
};
let read_fn:fn (&mut Read, bool) -> Result<ptmf::PTModule, ptmf::PTMFError> = ptmf::read_mod;
let mut reader = BufReader::new(&file);
let module = match read_fn(&mut reader, true) {
Ok(module) => module,
Err(e) => {
println!("Failed to parse file: '{}' Error: '{:?}'", input_filename, e);
return;
}
};
let ref output_filename = args.flag_out;
let file = match File::create(&output_filename) {
Ok(file) => file,
Err(e) => {
println!("Failed to open file: '{}' Error: '{:?}'", output_filename, e);
return
}
};
let mut writer = BufWriter::new(&file);
if args.flag_format_jbe {
// Write in which order to play patterns
write!(writer, "song\n").unwrap();
write!(writer, "{{\n\t").unwrap();
for i in 0..module.length {
let position = module.positions.data[i as usize];
if i == 0 {
write!(writer, "{}", position).unwrap();
} else {
write!(writer, ",{}", position).unwrap();
}
}
write!(writer, "\n}}\n").unwrap();
// Write patterns
write!(writer, "patterns\n").unwrap();
write!(writer, "{{\n").unwrap();
let mut pattern_no = 0;
for pattern in module.patterns {
write!(writer,"\t{}:", pattern_no).unwrap();
// Loop through the pattern three times
// 0 = just sample number
// 1,2 = just pitch
let mut row_no = 0;
for row in &pattern.rows {
let ref channel = row.channels[0];
if row_no!= 0 {
write!(writer,",").unwrap();
}
if channel.sample_number > 0 {
let number = channel.sample_number as i8;
write!(writer,"S{}", number).unwrap();
}
row_no += 1;
if is_pattern_break(&row.channels) {
break;
}
}
write!(writer,"\n").unwrap();
write!(writer,"\t{}:", pattern_no).unwrap();
row_no = 0;
for row in &pattern.rows {
let ref channel = row.channels[1];
if row_no!= 0 {
write!(writer,",").unwrap();
}
if channel.period > 0 {
let note = note_from_period(channel.period);
write!(writer,"{}", note).unwrap();
}
row_no += 1;
if is_pattern_break(&row.channels) {
break;
}
}
write!(writer,"\n").unwrap();
write!(writer,"\t{}:", pattern_no).unwrap();
row_no = 0;
for row in &pattern.rows {
let ref channel = row.channels[2];
if row_no!= 0 {
write!(writer,",").unwrap();
}
if channel.period > 0 {
let note = note_from_period(channel.period);
write!(writer,"{}", note).unwrap();
}
row_no += 1;
if is_pattern_break(&row.channels) {
break;
}
}
write!(writer,"\n").unwrap();
write!(writer,"\n").unwrap();
pattern_no += 1;
}
write!(writer, "}}\n").unwrap();
} else {
// Write in which order to play patterns
write!(writer, "song_pattern_list:\n").unwrap();
for i in 0..module.length {
let position = module.positions.data[i as usize];
write!(writer, "\tdefw song_pattern_{}\n", position).unwrap();
}
// Terminate list
write!(writer, "\tdefw 0\n").unwrap();
write!(writer, "\n").unwrap();
// Write pattern data
let mut pattern_no = 0;
for pattern in module.patterns {
write!(writer,"song_pattern_{}:\n", pattern_no).unwrap();
pattern_no += 1;
for row in pattern.rows {
let mut number = -1;
for channel in row.channels {
if channel.sample_number > 0 {
number = channel.sample_number as i8 - 1;
break;
}
}
write!(writer,"\tdefb {}\n", number).unwrap();
}
write!(writer,"\n").unwrap();
}
}
println!("Done!");
}
|
{
// Check for pattern break
let effect = channels[0].effect | channels[1].effect | channels[2].effect;
if effect & 0x0f00 == 0x0d00 {
true
} else {
false
}
}
|
identifier_body
|
main.rs
|
use std::fs::File;
use std::io::BufWriter;
use std::io::Write;
use std::io::BufReader;
use std::io::Read;
extern crate modfile;
use modfile::ptmf;
#[macro_use]
extern crate serde_derive;
extern crate docopt;
use docopt::Docopt;
const VERSION: &'static str = env!("CARGO_PKG_VERSION");
static USAGE: &'static str = "
mod2abc.
Usage:
mod2abc (-h | --help)
mod2abc (-V | --version)
mod2abc --in= --out= [--format-jbe]
Options:
-V, --version Show version info.
-h, --help Show this text.
--in= Name of inputfile
--out= Name of outputfile
--format-jbe Use format for jbe replay
";
#[derive(Debug, Deserialize)]
struct Args {
flag_help: bool,
flag_version: bool,
flag_in: String,
flag_out: String,
flag_format_jbe: bool
}
fn note_from_period(period: u16) -> String {
// Find the position in PERIODS with the
// smallest difference
let mut found:i32 = -1;
let mut min_diff = 65536;
let key = period as i32;
for i in 0..ptmf::PERIODS.len() {
let diff = (key as i32 - ptmf::PERIODS[i] as i32).abs();
if diff < min_diff {
min_diff = diff;
found = i as i32;
}
}
let note = if found == -1 {
println!("Failed to find note name");
String::new()
} else {
let octave = found / 12;
let name = ptmf::NOTE_NAMES[(found % 12) as usize];
if octave == 1 {
// Lower case
name.to_lowercase()
} else if octave == 2 {
// Upper case
name.to_uppercase()
} else {
format!("O{}{}",octave,name)
}
};
note
}
fn is_pattern_break(channels:&Vec<ptmf::Channel>) -> bool {
// Check for pattern break
let effect = channels[0].effect | channels[1].effect | channels[2].effect;
if effect & 0x0f00 == 0x0d00 {
true
} else {
false
}
}
fn
|
() {
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.deserialize())
.unwrap_or_else(|e| e.exit());
// println!("{:?}", args);
if args.flag_version {
println!("Version: {}", VERSION);
return;
}
if args.flag_in == "" {
println!("No inputfile specificed");
return
}
if args.flag_out == "" {
println!("No outputfile specificed");
return
}
let ref input_filename = args.flag_in;
let file = match File::open(input_filename) {
Ok(file) => file,
Err(e) => {
println!("Failed to open file: '{}' Error: '{}'", input_filename, e);
return
}
};
let read_fn:fn (&mut Read, bool) -> Result<ptmf::PTModule, ptmf::PTMFError> = ptmf::read_mod;
let mut reader = BufReader::new(&file);
let module = match read_fn(&mut reader, true) {
Ok(module) => module,
Err(e) => {
println!("Failed to parse file: '{}' Error: '{:?}'", input_filename, e);
return;
}
};
let ref output_filename = args.flag_out;
let file = match File::create(&output_filename) {
Ok(file) => file,
Err(e) => {
println!("Failed to open file: '{}' Error: '{:?}'", output_filename, e);
return
}
};
let mut writer = BufWriter::new(&file);
if args.flag_format_jbe {
// Write in which order to play patterns
write!(writer, "song\n").unwrap();
write!(writer, "{{\n\t").unwrap();
for i in 0..module.length {
let position = module.positions.data[i as usize];
if i == 0 {
write!(writer, "{}", position).unwrap();
} else {
write!(writer, ",{}", position).unwrap();
}
}
write!(writer, "\n}}\n").unwrap();
// Write patterns
write!(writer, "patterns\n").unwrap();
write!(writer, "{{\n").unwrap();
let mut pattern_no = 0;
for pattern in module.patterns {
write!(writer,"\t{}:", pattern_no).unwrap();
// Loop through the pattern three times
// 0 = just sample number
// 1,2 = just pitch
let mut row_no = 0;
for row in &pattern.rows {
let ref channel = row.channels[0];
if row_no!= 0 {
write!(writer,",").unwrap();
}
if channel.sample_number > 0 {
let number = channel.sample_number as i8;
write!(writer,"S{}", number).unwrap();
}
row_no += 1;
if is_pattern_break(&row.channels) {
break;
}
}
write!(writer,"\n").unwrap();
write!(writer,"\t{}:", pattern_no).unwrap();
row_no = 0;
for row in &pattern.rows {
let ref channel = row.channels[1];
if row_no!= 0 {
write!(writer,",").unwrap();
}
if channel.period > 0 {
let note = note_from_period(channel.period);
write!(writer,"{}", note).unwrap();
}
row_no += 1;
if is_pattern_break(&row.channels) {
break;
}
}
write!(writer,"\n").unwrap();
write!(writer,"\t{}:", pattern_no).unwrap();
row_no = 0;
for row in &pattern.rows {
let ref channel = row.channels[2];
if row_no!= 0 {
write!(writer,",").unwrap();
}
if channel.period > 0 {
let note = note_from_period(channel.period);
write!(writer,"{}", note).unwrap();
}
row_no += 1;
if is_pattern_break(&row.channels) {
break;
}
}
write!(writer,"\n").unwrap();
write!(writer,"\n").unwrap();
pattern_no += 1;
}
write!(writer, "}}\n").unwrap();
} else {
// Write in which order to play patterns
write!(writer, "song_pattern_list:\n").unwrap();
for i in 0..module.length {
let position = module.positions.data[i as usize];
write!(writer, "\tdefw song_pattern_{}\n", position).unwrap();
}
// Terminate list
write!(writer, "\tdefw 0\n").unwrap();
write!(writer, "\n").unwrap();
// Write pattern data
let mut pattern_no = 0;
for pattern in module.patterns {
write!(writer,"song_pattern_{}:\n", pattern_no).unwrap();
pattern_no += 1;
for row in pattern.rows {
let mut number = -1;
for channel in row.channels {
if channel.sample_number > 0 {
number = channel.sample_number as i8 - 1;
break;
}
}
write!(writer,"\tdefb {}\n", number).unwrap();
}
write!(writer,"\n").unwrap();
}
}
println!("Done!");
}
|
main
|
identifier_name
|
main.rs
|
use std::fs::File;
use std::io::BufWriter;
use std::io::Write;
use std::io::BufReader;
use std::io::Read;
extern crate modfile;
use modfile::ptmf;
#[macro_use]
extern crate serde_derive;
extern crate docopt;
use docopt::Docopt;
const VERSION: &'static str = env!("CARGO_PKG_VERSION");
static USAGE: &'static str = "
mod2abc.
Usage:
mod2abc (-h | --help)
mod2abc (-V | --version)
mod2abc --in= --out= [--format-jbe]
Options:
-V, --version Show version info.
-h, --help Show this text.
--in= Name of inputfile
--out= Name of outputfile
--format-jbe Use format for jbe replay
";
#[derive(Debug, Deserialize)]
struct Args {
flag_help: bool,
flag_version: bool,
flag_in: String,
flag_out: String,
flag_format_jbe: bool
}
fn note_from_period(period: u16) -> String {
// Find the position in PERIODS with the
// smallest difference
let mut found:i32 = -1;
let mut min_diff = 65536;
let key = period as i32;
for i in 0..ptmf::PERIODS.len() {
let diff = (key as i32 - ptmf::PERIODS[i] as i32).abs();
if diff < min_diff {
min_diff = diff;
found = i as i32;
}
}
let note = if found == -1 {
println!("Failed to find note name");
String::new()
} else {
let octave = found / 12;
let name = ptmf::NOTE_NAMES[(found % 12) as usize];
if octave == 1 {
// Lower case
name.to_lowercase()
} else if octave == 2 {
// Upper case
name.to_uppercase()
} else {
format!("O{}{}",octave,name)
}
};
note
}
fn is_pattern_break(channels:&Vec<ptmf::Channel>) -> bool {
// Check for pattern break
let effect = channels[0].effect | channels[1].effect | channels[2].effect;
if effect & 0x0f00 == 0x0d00 {
true
} else {
false
}
}
fn main() {
let args: Args = Docopt::new(USAGE)
.and_then(|d| d.deserialize())
.unwrap_or_else(|e| e.exit());
// println!("{:?}", args);
if args.flag_version {
println!("Version: {}", VERSION);
return;
}
if args.flag_in == "" {
println!("No inputfile specificed");
return
}
if args.flag_out == "" {
println!("No outputfile specificed");
return
}
let ref input_filename = args.flag_in;
let file = match File::open(input_filename) {
Ok(file) => file,
Err(e) => {
println!("Failed to open file: '{}' Error: '{}'", input_filename, e);
return
}
};
let read_fn:fn (&mut Read, bool) -> Result<ptmf::PTModule, ptmf::PTMFError> = ptmf::read_mod;
let mut reader = BufReader::new(&file);
let module = match read_fn(&mut reader, true) {
Ok(module) => module,
Err(e) => {
println!("Failed to parse file: '{}' Error: '{:?}'", input_filename, e);
return;
}
};
let ref output_filename = args.flag_out;
let file = match File::create(&output_filename) {
Ok(file) => file,
Err(e) => {
println!("Failed to open file: '{}' Error: '{:?}'", output_filename, e);
return
}
};
let mut writer = BufWriter::new(&file);
if args.flag_format_jbe {
// Write in which order to play patterns
write!(writer, "song\n").unwrap();
write!(writer, "{{\n\t").unwrap();
for i in 0..module.length {
let position = module.positions.data[i as usize];
if i == 0 {
write!(writer, "{}", position).unwrap();
} else {
write!(writer, ",{}", position).unwrap();
}
}
write!(writer, "\n}}\n").unwrap();
// Write patterns
write!(writer, "patterns\n").unwrap();
write!(writer, "{{\n").unwrap();
let mut pattern_no = 0;
for pattern in module.patterns {
write!(writer,"\t{}:", pattern_no).unwrap();
// Loop through the pattern three times
// 0 = just sample number
// 1,2 = just pitch
let mut row_no = 0;
for row in &pattern.rows {
let ref channel = row.channels[0];
if row_no!= 0 {
write!(writer,",").unwrap();
}
if channel.sample_number > 0 {
let number = channel.sample_number as i8;
write!(writer,"S{}", number).unwrap();
}
row_no += 1;
if is_pattern_break(&row.channels) {
break;
}
}
write!(writer,"\n").unwrap();
write!(writer,"\t{}:", pattern_no).unwrap();
row_no = 0;
for row in &pattern.rows {
let ref channel = row.channels[1];
if row_no!= 0 {
write!(writer,",").unwrap();
}
if channel.period > 0 {
let note = note_from_period(channel.period);
write!(writer,"{}", note).unwrap();
}
row_no += 1;
if is_pattern_break(&row.channels) {
break;
}
}
write!(writer,"\n").unwrap();
write!(writer,"\t{}:", pattern_no).unwrap();
row_no = 0;
for row in &pattern.rows {
let ref channel = row.channels[2];
if row_no!= 0
|
if channel.period > 0 {
let note = note_from_period(channel.period);
write!(writer,"{}", note).unwrap();
}
row_no += 1;
if is_pattern_break(&row.channels) {
break;
}
}
write!(writer,"\n").unwrap();
write!(writer,"\n").unwrap();
pattern_no += 1;
}
write!(writer, "}}\n").unwrap();
} else {
// Write in which order to play patterns
write!(writer, "song_pattern_list:\n").unwrap();
for i in 0..module.length {
let position = module.positions.data[i as usize];
write!(writer, "\tdefw song_pattern_{}\n", position).unwrap();
}
// Terminate list
write!(writer, "\tdefw 0\n").unwrap();
write!(writer, "\n").unwrap();
// Write pattern data
let mut pattern_no = 0;
for pattern in module.patterns {
write!(writer,"song_pattern_{}:\n", pattern_no).unwrap();
pattern_no += 1;
for row in pattern.rows {
let mut number = -1;
for channel in row.channels {
if channel.sample_number > 0 {
number = channel.sample_number as i8 - 1;
break;
}
}
write!(writer,"\tdefb {}\n", number).unwrap();
}
write!(writer,"\n").unwrap();
}
}
println!("Done!");
}
|
{
write!(writer,",").unwrap();
}
|
conditional_block
|
lib.rs
|
/* Copyright (C) 2020 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
extern crate proc_macro;
use proc_macro::TokenStream;
mod applayerevent;
/// The `AppLayerEvent` derive macro generates a `AppLayerEvent` trait
/// implementation for enums that define AppLayerEvents.
///
/// Example usage (DNS app-layer events):
///
/// #[derive(AppLayerEvent)]
/// enum {
/// MalformedData,
/// NotRequest,
/// NotResponse,
/// ZFlagSet,
/// }
///
/// The enum variants must follow the naming convention of OneTwoThree
/// for proper conversion to the name used in rules (one_tow_three).
#[proc_macro_derive(AppLayerEvent)]
pub fn derive_app_layer_event(input: TokenStream) -> TokenStream
|
{
applayerevent::derive_app_layer_event(input)
}
|
identifier_body
|
|
lib.rs
|
/* Copyright (C) 2020 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
extern crate proc_macro;
use proc_macro::TokenStream;
mod applayerevent;
/// The `AppLayerEvent` derive macro generates a `AppLayerEvent` trait
/// implementation for enums that define AppLayerEvents.
///
/// Example usage (DNS app-layer events):
///
/// #[derive(AppLayerEvent)]
/// enum {
/// MalformedData,
/// NotRequest,
/// NotResponse,
/// ZFlagSet,
/// }
///
/// The enum variants must follow the naming convention of OneTwoThree
/// for proper conversion to the name used in rules (one_tow_three).
#[proc_macro_derive(AppLayerEvent)]
pub fn
|
(input: TokenStream) -> TokenStream {
applayerevent::derive_app_layer_event(input)
}
|
derive_app_layer_event
|
identifier_name
|
lib.rs
|
/* Copyright (C) 2020 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
* Software Foundation.
*
* This program is distributed in the hope that it will be useful,
|
* version 2 along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*/
extern crate proc_macro;
use proc_macro::TokenStream;
mod applayerevent;
/// The `AppLayerEvent` derive macro generates a `AppLayerEvent` trait
/// implementation for enums that define AppLayerEvents.
///
/// Example usage (DNS app-layer events):
///
/// #[derive(AppLayerEvent)]
/// enum {
/// MalformedData,
/// NotRequest,
/// NotResponse,
/// ZFlagSet,
/// }
///
/// The enum variants must follow the naming convention of OneTwoThree
/// for proper conversion to the name used in rules (one_tow_three).
#[proc_macro_derive(AppLayerEvent)]
pub fn derive_app_layer_event(input: TokenStream) -> TokenStream {
applayerevent::derive_app_layer_event(input)
}
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
|
random_line_split
|
count.rs
|
#![feature(core)]
extern crate core;
#[cfg(test)]
mod tests {
use core::iter::Iterator;
use core::iter::Skip;
struct A<T> {
begin: T,
end: T
}
macro_rules! Iterator_impl {
($T:ty) => {
impl Iterator for A<$T> {
type Item = $T;
fn next(&mut self) -> Option<Self::Item> {
if self.begin < self.end {
let result = self.begin;
self.begin = self.begin.wrapping_add(1);
Some::<Self::Item>(result)
} else {
None::<Self::Item>
}
}
// fn skip(self, n: usize) -> Skip<Self> where Self: Sized {
// Skip{iter: self, n: n}
// }
// fn count(self) -> usize where Self: Sized {
// // Might overflow.
// self.fold(0, |cnt, _| cnt + 1)
// }
}
}
}
type T = i32;
Iterator_impl!(T);
// impl<I> Iterator for Skip<I> where I: Iterator {
// type Item = <I as Iterator>::Item;
//
// #[inline]
// fn next(&mut self) -> Option<I::Item> {
// if self.n == 0 {
// self.iter.next()
// } else {
// let old_n = self.n;
// self.n = 0;
// self.iter.nth(old_n)
// }
// }
//
// #[inline]
// fn nth(&mut self, n: usize) -> Option<I::Item> {
// // Can't just add n + self.n due to overflow.
// if self.n == 0 {
// self.iter.nth(n)
// } else {
// let to_skip = self.n;
// self.n = 0;
// // nth(n) skips n+1
// if self.iter.nth(to_skip-1).is_none() {
// return None;
// }
// self.iter.nth(n)
// }
// }
//
// #[inline]
// fn count(self) -> usize {
// self.iter.count().saturating_sub(self.n)
// }
//
// #[inline]
// fn last(mut self) -> Option<I::Item> {
// if self.n == 0 {
// self.iter.last()
// } else {
// let next = self.next();
// if next.is_some() {
// // recurse. n should be 0.
// self.last().or(next)
// } else {
// None
// }
// }
// }
//
// #[inline]
// fn size_hint(&self) -> (usize, Option<usize>) {
// let (lower, upper) = self.iter.size_hint();
//
|
// }
#[test]
fn count_test1() {
let a: A<T> = A { begin: 0, end: 10 };
let n: usize = 3;
let skip: Skip<A<T>> = a.skip(n);
let count: usize = skip.count();
assert_eq!(count, 10 - n);
}
}
|
// let lower = lower.saturating_sub(self.n);
// let upper = upper.map(|x| x.saturating_sub(self.n));
//
// (lower, upper)
// }
|
random_line_split
|
count.rs
|
#![feature(core)]
extern crate core;
#[cfg(test)]
mod tests {
use core::iter::Iterator;
use core::iter::Skip;
struct
|
<T> {
begin: T,
end: T
}
macro_rules! Iterator_impl {
($T:ty) => {
impl Iterator for A<$T> {
type Item = $T;
fn next(&mut self) -> Option<Self::Item> {
if self.begin < self.end {
let result = self.begin;
self.begin = self.begin.wrapping_add(1);
Some::<Self::Item>(result)
} else {
None::<Self::Item>
}
}
// fn skip(self, n: usize) -> Skip<Self> where Self: Sized {
// Skip{iter: self, n: n}
// }
// fn count(self) -> usize where Self: Sized {
// // Might overflow.
// self.fold(0, |cnt, _| cnt + 1)
// }
}
}
}
type T = i32;
Iterator_impl!(T);
// impl<I> Iterator for Skip<I> where I: Iterator {
// type Item = <I as Iterator>::Item;
//
// #[inline]
// fn next(&mut self) -> Option<I::Item> {
// if self.n == 0 {
// self.iter.next()
// } else {
// let old_n = self.n;
// self.n = 0;
// self.iter.nth(old_n)
// }
// }
//
// #[inline]
// fn nth(&mut self, n: usize) -> Option<I::Item> {
// // Can't just add n + self.n due to overflow.
// if self.n == 0 {
// self.iter.nth(n)
// } else {
// let to_skip = self.n;
// self.n = 0;
// // nth(n) skips n+1
// if self.iter.nth(to_skip-1).is_none() {
// return None;
// }
// self.iter.nth(n)
// }
// }
//
// #[inline]
// fn count(self) -> usize {
// self.iter.count().saturating_sub(self.n)
// }
//
// #[inline]
// fn last(mut self) -> Option<I::Item> {
// if self.n == 0 {
// self.iter.last()
// } else {
// let next = self.next();
// if next.is_some() {
// // recurse. n should be 0.
// self.last().or(next)
// } else {
// None
// }
// }
// }
//
// #[inline]
// fn size_hint(&self) -> (usize, Option<usize>) {
// let (lower, upper) = self.iter.size_hint();
//
// let lower = lower.saturating_sub(self.n);
// let upper = upper.map(|x| x.saturating_sub(self.n));
//
// (lower, upper)
// }
// }
#[test]
fn count_test1() {
let a: A<T> = A { begin: 0, end: 10 };
let n: usize = 3;
let skip: Skip<A<T>> = a.skip(n);
let count: usize = skip.count();
assert_eq!(count, 10 - n);
}
}
|
A
|
identifier_name
|
count.rs
|
#![feature(core)]
extern crate core;
#[cfg(test)]
mod tests {
use core::iter::Iterator;
use core::iter::Skip;
struct A<T> {
begin: T,
end: T
}
macro_rules! Iterator_impl {
($T:ty) => {
impl Iterator for A<$T> {
type Item = $T;
fn next(&mut self) -> Option<Self::Item> {
if self.begin < self.end {
let result = self.begin;
self.begin = self.begin.wrapping_add(1);
Some::<Self::Item>(result)
} else {
None::<Self::Item>
}
}
// fn skip(self, n: usize) -> Skip<Self> where Self: Sized {
// Skip{iter: self, n: n}
// }
// fn count(self) -> usize where Self: Sized {
// // Might overflow.
// self.fold(0, |cnt, _| cnt + 1)
// }
}
}
}
type T = i32;
Iterator_impl!(T);
// impl<I> Iterator for Skip<I> where I: Iterator {
// type Item = <I as Iterator>::Item;
//
// #[inline]
// fn next(&mut self) -> Option<I::Item> {
// if self.n == 0 {
// self.iter.next()
// } else {
// let old_n = self.n;
// self.n = 0;
// self.iter.nth(old_n)
// }
// }
//
// #[inline]
// fn nth(&mut self, n: usize) -> Option<I::Item> {
// // Can't just add n + self.n due to overflow.
// if self.n == 0 {
// self.iter.nth(n)
// } else {
// let to_skip = self.n;
// self.n = 0;
// // nth(n) skips n+1
// if self.iter.nth(to_skip-1).is_none() {
// return None;
// }
// self.iter.nth(n)
// }
// }
//
// #[inline]
// fn count(self) -> usize {
// self.iter.count().saturating_sub(self.n)
// }
//
// #[inline]
// fn last(mut self) -> Option<I::Item> {
// if self.n == 0 {
// self.iter.last()
// } else {
// let next = self.next();
// if next.is_some() {
// // recurse. n should be 0.
// self.last().or(next)
// } else {
// None
// }
// }
// }
//
// #[inline]
// fn size_hint(&self) -> (usize, Option<usize>) {
// let (lower, upper) = self.iter.size_hint();
//
// let lower = lower.saturating_sub(self.n);
// let upper = upper.map(|x| x.saturating_sub(self.n));
//
// (lower, upper)
// }
// }
#[test]
fn count_test1()
|
}
|
{
let a: A<T> = A { begin: 0, end: 10 };
let n: usize = 3;
let skip: Skip<A<T>> = a.skip(n);
let count: usize = skip.count();
assert_eq!(count, 10 - n);
}
|
identifier_body
|
udp_client_connection.rs
|
// Copyright 2015-2018 Benjamin Fry <[email protected]>
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//! UDP based DNS client connection for Client impls
use std::net::SocketAddr;
use std::sync::Arc;
use std::time::Duration;
use crate::proto::udp::{UdpClientConnect, UdpClientStream};
use crate::proto::xfer::DnsRequestSender;
use crate::client::ClientConnection;
use crate::error::*;
use crate::rr::dnssec::Signer;
use tokio::net::UdpSocket;
/// UDP based DNS Client connection
///
/// Use with `trust_dns_client::client::Client` impls
#[derive(Clone)]
pub struct UdpClientConnection {
name_server: SocketAddr,
timeout: Duration,
}
impl UdpClientConnection {
/// Creates a new client connection. With a default timeout of 5 seconds
///
/// # Arguments
///
/// * `name_server` - address of the name server to use for queries
pub fn new(name_server: SocketAddr) -> ClientResult<Self> {
Self::with_timeout(name_server, Duration::from_secs(5))
|
/// Allows a custom timeout
pub fn with_timeout(name_server: SocketAddr, timeout: Duration) -> ClientResult<Self> {
Ok(UdpClientConnection {
name_server,
timeout,
})
}
}
impl ClientConnection for UdpClientConnection {
type Sender = UdpClientStream<UdpSocket, Signer>;
type Response = <Self::Sender as DnsRequestSender>::DnsResponseFuture;
type SenderFuture = UdpClientConnect<UdpSocket, Signer>;
fn new_stream(&self, signer: Option<Arc<Signer>>) -> Self::SenderFuture {
UdpClientStream::with_timeout_and_signer(self.name_server, self.timeout, signer)
}
}
|
}
|
random_line_split
|
udp_client_connection.rs
|
// Copyright 2015-2018 Benjamin Fry <[email protected]>
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//! UDP based DNS client connection for Client impls
use std::net::SocketAddr;
use std::sync::Arc;
use std::time::Duration;
use crate::proto::udp::{UdpClientConnect, UdpClientStream};
use crate::proto::xfer::DnsRequestSender;
use crate::client::ClientConnection;
use crate::error::*;
use crate::rr::dnssec::Signer;
use tokio::net::UdpSocket;
/// UDP based DNS Client connection
///
/// Use with `trust_dns_client::client::Client` impls
#[derive(Clone)]
pub struct UdpClientConnection {
name_server: SocketAddr,
timeout: Duration,
}
impl UdpClientConnection {
/// Creates a new client connection. With a default timeout of 5 seconds
///
/// # Arguments
///
/// * `name_server` - address of the name server to use for queries
pub fn new(name_server: SocketAddr) -> ClientResult<Self> {
Self::with_timeout(name_server, Duration::from_secs(5))
}
/// Allows a custom timeout
pub fn with_timeout(name_server: SocketAddr, timeout: Duration) -> ClientResult<Self>
|
}
impl ClientConnection for UdpClientConnection {
type Sender = UdpClientStream<UdpSocket, Signer>;
type Response = <Self::Sender as DnsRequestSender>::DnsResponseFuture;
type SenderFuture = UdpClientConnect<UdpSocket, Signer>;
fn new_stream(&self, signer: Option<Arc<Signer>>) -> Self::SenderFuture {
UdpClientStream::with_timeout_and_signer(self.name_server, self.timeout, signer)
}
}
|
{
Ok(UdpClientConnection {
name_server,
timeout,
})
}
|
identifier_body
|
udp_client_connection.rs
|
// Copyright 2015-2018 Benjamin Fry <[email protected]>
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//! UDP based DNS client connection for Client impls
use std::net::SocketAddr;
use std::sync::Arc;
use std::time::Duration;
use crate::proto::udp::{UdpClientConnect, UdpClientStream};
use crate::proto::xfer::DnsRequestSender;
use crate::client::ClientConnection;
use crate::error::*;
use crate::rr::dnssec::Signer;
use tokio::net::UdpSocket;
/// UDP based DNS Client connection
///
/// Use with `trust_dns_client::client::Client` impls
#[derive(Clone)]
pub struct UdpClientConnection {
name_server: SocketAddr,
timeout: Duration,
}
impl UdpClientConnection {
/// Creates a new client connection. With a default timeout of 5 seconds
///
/// # Arguments
///
/// * `name_server` - address of the name server to use for queries
pub fn new(name_server: SocketAddr) -> ClientResult<Self> {
Self::with_timeout(name_server, Duration::from_secs(5))
}
/// Allows a custom timeout
pub fn
|
(name_server: SocketAddr, timeout: Duration) -> ClientResult<Self> {
Ok(UdpClientConnection {
name_server,
timeout,
})
}
}
impl ClientConnection for UdpClientConnection {
type Sender = UdpClientStream<UdpSocket, Signer>;
type Response = <Self::Sender as DnsRequestSender>::DnsResponseFuture;
type SenderFuture = UdpClientConnect<UdpSocket, Signer>;
fn new_stream(&self, signer: Option<Arc<Signer>>) -> Self::SenderFuture {
UdpClientStream::with_timeout_and_signer(self.name_server, self.timeout, signer)
}
}
|
with_timeout
|
identifier_name
|
exhaustive_ordered_unique_vecs_length_range.rs
|
use itertools::Itertools;
use malachite_base::bools::exhaustive::exhaustive_bools;
use malachite_base::nevers::nevers;
use malachite_base::tuples::exhaustive::exhaustive_units;
use malachite_base::vecs::exhaustive::exhaustive_ordered_unique_vecs_length_range;
use std::fmt::Debug;
fn exhaustive_ordered_unique_vecs_length_range_small_helper<I: Clone + Iterator>(
a: u64,
b: u64,
xs: I,
out_len: usize,
out: &[&[I::Item]],
) where
I::Item: Clone + Debug + Eq,
{
let xss = exhaustive_ordered_unique_vecs_length_range(a, b, xs);
let xss_prefix = xss.clone().take(20).collect_vec();
assert_eq!(
xss_prefix
.iter()
.map(Vec::as_slice)
.collect_vec()
.as_slice(),
out
);
assert_eq!(xss.count(), out_len);
}
#[test]
fn test_exhaustive_ordered_unique_vecs_length_range() {
exhaustive_ordered_unique_vecs_length_range_small_helper(0, 5, nevers(), 1, &[&[]]);
exhaustive_ordered_unique_vecs_length_range_small_helper(6, 10, nevers(), 0, &[]);
exhaustive_ordered_unique_vecs_length_range_small_helper(
0,
5,
exhaustive_units(),
2,
&[&[], &[()]],
);
exhaustive_ordered_unique_vecs_length_range_small_helper(1, 0, exhaustive_bools(), 0, &[]);
exhaustive_ordered_unique_vecs_length_range_small_helper(1, 1, exhaustive_bools(), 0, &[]);
exhaustive_ordered_unique_vecs_length_range_small_helper(
0,
2,
exhaustive_bools(),
3,
&[&[], &[false], &[true]],
);
exhaustive_ordered_unique_vecs_length_range_small_helper(
2,
4,
exhaustive_bools(),
|
);
exhaustive_ordered_unique_vecs_length_range_small_helper(
1,
2,
'a'..='c',
3,
&[&['a'], &['b'], &['c']],
);
}
|
1,
&[&[false, true]],
|
random_line_split
|
exhaustive_ordered_unique_vecs_length_range.rs
|
use itertools::Itertools;
use malachite_base::bools::exhaustive::exhaustive_bools;
use malachite_base::nevers::nevers;
use malachite_base::tuples::exhaustive::exhaustive_units;
use malachite_base::vecs::exhaustive::exhaustive_ordered_unique_vecs_length_range;
use std::fmt::Debug;
fn exhaustive_ordered_unique_vecs_length_range_small_helper<I: Clone + Iterator>(
a: u64,
b: u64,
xs: I,
out_len: usize,
out: &[&[I::Item]],
) where
I::Item: Clone + Debug + Eq,
|
#[test]
fn test_exhaustive_ordered_unique_vecs_length_range() {
exhaustive_ordered_unique_vecs_length_range_small_helper(0, 5, nevers(), 1, &[&[]]);
exhaustive_ordered_unique_vecs_length_range_small_helper(6, 10, nevers(), 0, &[]);
exhaustive_ordered_unique_vecs_length_range_small_helper(
0,
5,
exhaustive_units(),
2,
&[&[], &[()]],
);
exhaustive_ordered_unique_vecs_length_range_small_helper(1, 0, exhaustive_bools(), 0, &[]);
exhaustive_ordered_unique_vecs_length_range_small_helper(1, 1, exhaustive_bools(), 0, &[]);
exhaustive_ordered_unique_vecs_length_range_small_helper(
0,
2,
exhaustive_bools(),
3,
&[&[], &[false], &[true]],
);
exhaustive_ordered_unique_vecs_length_range_small_helper(
2,
4,
exhaustive_bools(),
1,
&[&[false, true]],
);
exhaustive_ordered_unique_vecs_length_range_small_helper(
1,
2,
'a'..='c',
3,
&[&['a'], &['b'], &['c']],
);
}
|
{
let xss = exhaustive_ordered_unique_vecs_length_range(a, b, xs);
let xss_prefix = xss.clone().take(20).collect_vec();
assert_eq!(
xss_prefix
.iter()
.map(Vec::as_slice)
.collect_vec()
.as_slice(),
out
);
assert_eq!(xss.count(), out_len);
}
|
identifier_body
|
exhaustive_ordered_unique_vecs_length_range.rs
|
use itertools::Itertools;
use malachite_base::bools::exhaustive::exhaustive_bools;
use malachite_base::nevers::nevers;
use malachite_base::tuples::exhaustive::exhaustive_units;
use malachite_base::vecs::exhaustive::exhaustive_ordered_unique_vecs_length_range;
use std::fmt::Debug;
fn exhaustive_ordered_unique_vecs_length_range_small_helper<I: Clone + Iterator>(
a: u64,
b: u64,
xs: I,
out_len: usize,
out: &[&[I::Item]],
) where
I::Item: Clone + Debug + Eq,
{
let xss = exhaustive_ordered_unique_vecs_length_range(a, b, xs);
let xss_prefix = xss.clone().take(20).collect_vec();
assert_eq!(
xss_prefix
.iter()
.map(Vec::as_slice)
.collect_vec()
.as_slice(),
out
);
assert_eq!(xss.count(), out_len);
}
#[test]
fn
|
() {
exhaustive_ordered_unique_vecs_length_range_small_helper(0, 5, nevers(), 1, &[&[]]);
exhaustive_ordered_unique_vecs_length_range_small_helper(6, 10, nevers(), 0, &[]);
exhaustive_ordered_unique_vecs_length_range_small_helper(
0,
5,
exhaustive_units(),
2,
&[&[], &[()]],
);
exhaustive_ordered_unique_vecs_length_range_small_helper(1, 0, exhaustive_bools(), 0, &[]);
exhaustive_ordered_unique_vecs_length_range_small_helper(1, 1, exhaustive_bools(), 0, &[]);
exhaustive_ordered_unique_vecs_length_range_small_helper(
0,
2,
exhaustive_bools(),
3,
&[&[], &[false], &[true]],
);
exhaustive_ordered_unique_vecs_length_range_small_helper(
2,
4,
exhaustive_bools(),
1,
&[&[false, true]],
);
exhaustive_ordered_unique_vecs_length_range_small_helper(
1,
2,
'a'..='c',
3,
&[&['a'], &['b'], &['c']],
);
}
|
test_exhaustive_ordered_unique_vecs_length_range
|
identifier_name
|
fmt.rs
|
// Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Utilities for formatting and printing strings
//!
//! This module contains the runtime support for the `format!` syntax extension.
//! This macro is implemented in the compiler to emit calls to this module in
//! order to format arguments at runtime into strings and streams.
//!
//! # Usage
//!
//! The `format!` macro is intended to be familiar to those coming from C's
//! printf/fprintf functions or Python's `str.format` function. In its current
//! revision, the `format!` macro returns a `String` type which is the result of
//! the formatting. In the future it will also be able to pass in a stream to
//! format arguments directly while performing minimal allocations.
//!
//! Some examples of the `format!` extension are:
//!
//! ```
//! format!("Hello"); // => "Hello"
//! format!("Hello, {}!", "world"); // => "Hello, world!"
//! format!("The number is {}", 1); // => "The number is 1"
//! format!("{:?}", (3, 4)); // => "(3, 4)"
//! format!("{value}", value=4); // => "4"
//! format!("{} {}", 1, 2); // => "1 2"
//! ```
//!
//! From these, you can see that the first argument is a format string. It is
//! required by the compiler for this to be a string literal; it cannot be a
//! variable passed in (in order to perform validity checking). The compiler
//! will then parse the format string and determine if the list of arguments
//! provided is suitable to pass to this format string.
//!
//! ## Positional parameters
//!
//! Each formatting argument is allowed to specify which value argument it's
//! referencing, and if omitted it is assumed to be "the next argument". For
//! example, the format string `{} {} {}` would take three parameters, and they
//! would be formatted in the same order as they're given. The format string
//! `{2} {1} {0}`, however, would format arguments in reverse order.
//!
//! Things can get a little tricky once you start intermingling the two types of
//! positional specifiers. The "next argument" specifier can be thought of as an
//! iterator over the argument. Each time a "next argument" specifier is seen,
//! the iterator advances. This leads to behavior like this:
//!
//! ```
//! format!("{1} {} {0} {}", 1, 2); // => "2 1 1 2"
//! ```
//!
//! The internal iterator over the argument has not been advanced by the time
//! the first `{}` is seen, so it prints the first argument. Then upon reaching
//! the second `{}`, the iterator has advanced forward to the second argument.
//! Essentially, parameters which explicitly name their argument do not affect
//! parameters which do not name an argument in terms of positional specifiers.
//!
//! A format string is required to use all of its arguments, otherwise it is a
//! compile-time error. You may refer to the same argument more than once in the
//! format string, although it must always be referred to with the same type.
//!
//! ## Named parameters
//!
//! Rust itself does not have a Python-like equivalent of named parameters to a
//! function, but the `format!` macro is a syntax extension which allows it to
//! leverage named parameters. Named parameters are listed at the end of the
//! argument list and have the syntax:
//!
//! ```text
//! identifier '=' expression
//! ```
//!
//! For example, the following `format!` expressions all use named argument:
//!
//! ```
//! format!("{argument}", argument = "test"); // => "test"
//! format!("{name} {}", 1, name = 2); // => "2 1"
//! format!("{a} {c} {b}", a="a", b='b', c=3); // => "a 3 b"
//! ```
//!
//! It is illegal to put positional parameters (those without names) after
//! arguments which have names. Like with positional parameters, it is illegal
//! to provide named parameters that are unused by the format string.
//!
//! ## Argument types
//!
//! Each argument's type is dictated by the format string. It is a requirement
//! that every argument is only ever referred to by one type. For example, this
//! is an invalid format string:
//!
//! ```text
//! {0:x} {0:o}
//! ```
//!
//! This is invalid because the first argument is both referred to as a
//! hexadecimal as well as an
//! octal.
//!
//! There are various parameters which do require a particular type, however. Namely, the `{:.*}`
//! syntax, which sets the number of numbers after the decimal in floating-point types:
//!
//! ```
//! let formatted_number = format!("{:.*}", 2, 1.234567);
//!
//! assert_eq!("1.23", formatted_number)
//! ```
//!
//! If this syntax is used, then the number of characters to print precedes the actual object being
//! formatted, and the number of characters must have the type `usize`. Although a `usize` can be
//! printed with `{}`, it is illegal to reference an argument as such. For example this is another
//! invalid format string:
//!
//! ```text
//! {:.*} {0}
//! ```
//!
//! ## Formatting traits
//!
//! When requesting that an argument be formatted with a particular type, you
//! are actually requesting that an argument ascribes to a particular trait.
//! This allows multiple actual types to be formatted via `{:x}` (like `i8` as
//! well as `isize`). The current mapping of types to traits is:
//!
//! * *nothing* ⇒ `Display`
//! * `?` ⇒ `Debug`
//! * `o` ⇒ `Octal`
//! * `x` ⇒ `LowerHex`
//! * `X` ⇒ `UpperHex`
//! * `p` ⇒ `Pointer`
//! * `b` ⇒ `Binary`
//! * `e` ⇒ `LowerExp`
//! * `E` ⇒ `UpperExp`
//!
//! What this means is that any type of argument which implements the
//! `fmt::Binary` trait can then be formatted with `{:b}`. Implementations
//! are provided for these traits for a number of primitive types by the
//! standard library as well. If no format is specified (as in `{}` or `{:6}`),
//! then the format trait used is the `Display` trait.
//!
//! When implementing a format trait for your own type, you will have to
//! implement a method of the signature:
//!
//! ```
//! # use std::fmt;
//! # struct Foo; // our custom type
//! # impl fmt::Display for Foo {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! # write!(f, "testing, testing")
//! # } }
//! ```
//!
//! Your type will be passed as `self` by-reference, and then the function
//! should emit output into the `f.buf` stream. It is up to each format trait
//! implementation to correctly adhere to the requested formatting parameters.
//! The values of these parameters will be listed in the fields of the
//! `Formatter` struct. In order to help with this, the `Formatter` struct also
//! provides some helper methods.
//!
//! Additionally, the return value of this function is `fmt::Result` which is a
//! typedef to `Result<(), IoError>` (also known as `IoResult<()>`). Formatting
//! implementations should ensure that they return errors from `write!`
//! correctly (propagating errors upward).
//!
//! An example of implementing the formatting traits would look
//! like:
//!
//! ```
//! # #![feature(core, std_misc)]
//! use std::fmt;
//! use std::f64;
//!
//! #[derive(Debug)]
//! struct Vector2D {
//! x: isize,
//! y: isize,
//! }
//!
//! impl fmt::Display for Vector2D {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! // The `f` value implements the `Write` trait, which is what the
//! // write! macro is expecting. Note that this formatting ignores the
//! // various flags provided to format strings.
//! write!(f, "({}, {})", self.x, self.y)
//! }
//! }
//!
//! // Different traits allow different forms of output of a type. The meaning
//! // of this format is to print the magnitude of a vector.
//! impl fmt::Binary for Vector2D {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! let magnitude = (self.x * self.x + self.y * self.y) as f64;
//! let magnitude = magnitude.sqrt();
//!
//! // Respect the formatting flags by using the helper method
//! // `pad_integral` on the Formatter object. See the method
//! // documentation for details, and the function `pad` can be used
//! // to pad strings.
//! let decimals = f.precision().unwrap_or(3);
//! let string = format!("{:.*}", decimals, magnitude);
//! f.pad_integral(true, "", &string)
//! }
//! }
//!
//! fn main() {
//! let myvector = Vector2D { x: 3, y: 4 };
//!
//! println!("{}", myvector); // => "(3, 4)"
//! println!("{:?}", myvector); // => "Vector2D {x: 3, y:4}"
//! println!("{:10.3b}", myvector); // => " 5.000"
//! }
//! ```
//!
//! ### fmt::Display vs fmt::Debug
//!
//! These two formatting traits have distinct purposes:
//!
//! - `fmt::Display` implementations assert that the type can be faithfully
//! represented as a UTF-8 string at all times. It is **not** expected that
//! all types implement the `Display` trait.
//! - `fmt::Debug` implementations should be implemented for **all** public types.
//! Output will typically represent the internal state as faithfully as possible.
//! The purpose of the `Debug` trait is to facilitate debugging Rust code. In
//! most cases, using `#[derive(Debug)]` is sufficient and recommended.
//!
//! Some examples of the output from both traits:
//!
//! ```
//! assert_eq!(format!("{} {:?}", 3, 4), "3 4");
//! assert_eq!(format!("{} {:?}", 'a', 'b'), "a 'b'");
//! assert_eq!(format!("{} {:?}", "foo\n", "bar\n"), "foo\n \"bar\\n\"");
//! ```
//!
//! ## Related macros
//!
//! There are a number of related macros in the `format!` family. The ones that
//! are currently implemented are:
//!
//! ```ignore
//! format! // described above
//! write! // first argument is a &mut io::Write, the destination
//! writeln! // same as write but appends a newline
//! print! // the format string is printed to the standard output
//! println! // same as print but appends a newline
//! format_args! // described below.
//! ```
//!
//! ### `write!`
//!
//! This and `writeln` are two macros which are used to emit the format string
//! to a specified stream. This is used to prevent intermediate allocations of
//! format strings and instead directly write the output. Under the hood, this
//! function is actually invoking the `write` function defined in this module.
//! Example usage is:
//!
//! ```
//! # #![allow(unused_must_use)]
//! use std::io::Write;
//! let mut w = Vec::new();
//! write!(&mut w, "Hello {}!", "world");
//! ```
//!
//! ### `print!`
//!
//! This and `println` emit their output to stdout. Similarly to the `write!`
//! macro, the goal of these macros is to avoid intermediate allocations when
//! printing output. Example usage is:
//!
//! ```
//! print!("Hello {}!", "world");
//! println!("I have a newline {}", "character at the end");
//! ```
//!
//! ### `format_args!`
//!
//! This is a curious macro which is used to safely pass around
//! an opaque object describing the format string. This object
//! does not require any heap allocations to create, and it only
//! references information on the stack. Under the hood, all of
//! the related macros are implemented in terms of this. First
//! off, some example usage is:
//!
//! ```
//! use std::fmt;
//! use std::io::{self, Write};
//!
//! fmt::format(format_args!("this returns {}", "String"));
//!
//! let mut some_writer = io::stdout();
//! write!(&mut some_writer, "{}", format_args!("print with a {}", "macro"));
//!
//! fn my_fmt_fn(args: fmt::Arguments) {
//! write!(&mut io::stdout(), "{}", args);
//! }
//! my_fmt_fn(format_args!("or a {} too", "function"));
//! ```
//!
//! The result of the `format_args!` macro is a value of type `fmt::Arguments`.
//! This structure can then be passed to the `write` and `format` functions
//! inside this module in order to process the format string.
//! The goal of this macro is to even further prevent intermediate allocations
//! when dealing formatting strings.
//!
//! For example, a logging library could use the standard formatting syntax, but
//! it would internally pass around this structure until it has been determined
//! where output should go to.
//!
//! # Syntax
//!
//! The syntax for the formatting language used is drawn from other languages,
//! so it should not be too alien. Arguments are formatted with python-like
//! syntax, meaning that arguments are surrounded by `{}` instead of the C-like
//! `%`. The actual grammar for the formatting syntax is:
//!
//! ```text
//! format_string := <text> [ format <text> ] *
//! format := '{' [ argument ] [ ':' format_spec ] '}'
//! argument := integer | identifier
//!
//! format_spec := [[fill]align][sign]['#'][0][width]['.' precision][type]
//! fill := character
//! align := '<' | '^' | '>'
//! sign := '+' | '-'
//! width := count
//! precision := count | '*'
//! type := identifier | ''
//! count := parameter | integer
//! parameter := integer '$'
//! ```
//!
//! # Formatting Parameters
//!
//! Each argument being formatted can be transformed by a number of formatting
//! parameters (corresponding to `format_spec` in the syntax above). These
//! parameters affect the string representation of what's being formatted. This
//! syntax draws heavily from Python's, so it may seem a bit familiar.
//!
//! ## Fill/Alignment
//!
//! The fill character is provided normally in conjunction with the `width`
//! parameter. This indicates that if the value being formatted is smaller than
//! `width` some extra characters will be printed around it. The extra
//! characters are specified by `fill`, and the alignment can be one of two
//! options:
//!
//! * `<` - the argument is left-aligned in `width` columns
//! * `^` - the argument is center-aligned in `width` columns
//! * `>` - the argument is right-aligned in `width` columns
//!
//! Note that alignment may not be implemented by some types. A good way
//! to ensure padding is applied is to format your input, then use this
//! resulting string to pad your output.
//!
//! ## Sign/#/0
//!
//! These can all be interpreted as flags for a particular formatter.
//!
//! * '+' - This is intended for numeric types and indicates that the sign
//! should always be printed. Positive signs are never printed by
//! default, and the negative sign is only printed by default for the
//! `Signed` trait. This flag indicates that the correct sign (+ or -)
//! should always be printed.
//! * '-' - Currently not used
//! * '#' - This flag is indicates that the "alternate" form of printing should
//! be used. For array slices, the alternate form omits the brackets.
//! For the integer formatting traits, the alternate forms are:
//! * `#x` - precedes the argument with a "0x"
//! * `#X` - precedes the argument with a "0x"
//! * `#t` - precedes the argument with a "0b"
//! * `#o` - precedes the argument with a "0o"
//! * '0' - This is used to indicate for integer formats that the padding should
//! both be done with a `0` character as well as be sign-aware. A format
//! like `{:08}` would yield `00000001` for the integer `1`, while the
//! same format would yield `-0000001` for the integer `-1`. Notice that
//! the negative version has one fewer zero than the positive version.
//!
//! ## Width
//!
//! This is a parameter for the "minimum width" that the format should take up.
//! If the value's string does not fill up this many characters, then the
//! padding specified by fill/alignment will be used to take up the required
//! space.
//!
//! The default fill/alignment for non-numerics is a space and left-aligned. The
//! defaults for numeric formatters is also a space but with right-alignment. If
//! the '0' flag is specified for numerics, then the implicit fill character is
//! '0'.
//!
//! The value for the width can also be provided as a `usize` in the list of
//! parameters by using the `2$` syntax indicating that the second argument is a
//! `usize` specifying the width.
//!
//! ## Precision
//!
//! For non-numeric types, this can be considered a "maximum width". If the resulting string is
//! longer than this width, then it is truncated down to this many characters and only those are
//! emitted.
//!
//! For integral types, this is ignored.
//!
//! For floating-point types, this indicates how many digits after the decimal point should be
//! printed.
//!
//! There are three possible ways to specify the desired `precision`:
//!
//! There are three possible ways to specify the desired `precision`:
//! 1. An integer `.N`,
//! 2. an integer followed by dollar sign `.N$`, or
//! 3. an asterisk `.*`.
//!
//! The first specification, `.N`, means the integer `N` itself is the precision.
//!
//! The second, `.N$`, means use format *argument* `N` (which must be a `usize`) as the precision.
//!
//! Finally, `.*` means that this `{...}` is associated with *two* format inputs rather than one:
//! the first input holds the `usize` precision, and the second holds the value to print. Note
//! that in this case, if one uses the format string `{<arg>:<spec>.*}`, then the `<arg>` part
//! refers to the *value* to print, and the `precision` must come in the input preceding `<arg>`.
//!
//! For example, these:
//!
//! ```
//! // Hello {arg 0 (x)} is {arg 1 (0.01} with precision specified inline (5)}
//! println!("Hello {0} is {1:.5}", "x", 0.01);
//!
//! // Hello {arg 1 (x)} is {arg 2 (0.01} with precision specified in arg 0 (5)}
//! println!("Hello {1} is {2:.0$}", 5, "x", 0.01);
//!
//! // Hello {arg 0 (x)} is {arg 2 (0.01} with precision specified in arg 1 (5)}
//! println!("Hello {0} is {2:.1$}", "x", 5, 0.01);
//!
//! // Hello {next arg (x)} is {second of next two args (0.01} with precision
//! // specified in first of next two args (5)}
//! println!("Hello {} is {:.*}", "x", 5, 0.01);
//!
//! // Hello {next arg (x)} is {arg 2 (0.01} with precision
//! // specified in its predecessor (5)}
//! println!("Hello {} is {2:.*}", "x", 5, 0.01);
//! ```
//!
//! All print the same thing:
//!
//! ```text
//! Hello x is 0.01000
//! ```
//!
//! While these:
//!
//! ```
//! println!("{}, `{name:.*}` has 3 fractional digits", "Hello", 3, name=1234.56);
//! println!("{}, `{name:.*}` has 3 characters", "Hello", 3, name="1234.56");
//! ```
//!
//! print two significantly different things:
//!
//! ```text
//! Hello, `1234.560` has 3 fractional digits
//! Hello, `123` has 3 characters
//! ```
//!
//! # Escaping
//!
//! The literal characters `{` and `}` may be included in a string by preceding
//! them with the same character. For example, the `{` character is escaped with
//! `{{` and the `}` character is escaped with `}}`.
#![stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Formatter, Result, Write, rt};
pub use core::fmt::{Octal, Binary};
pub use core::fmt::{Display, Debug};
pub use core::fmt::{LowerHex, UpperHex, Pointer};
pub use core::fmt::{LowerExp, UpperExp};
pub use core::fmt::Error;
pub use core::fmt::{ArgumentV1, Arguments, write, radix, Radix, RadixFmt};
use string;
/// The format function takes a precompiled format string and a list of
/// arguments, to return the resulting formatted string.
///
/// # Arguments
///
/// * args - a structure of arguments generated via the `format_args!` macro.
///
/// # Examples
///
/// ```
/// use std::fmt;
///
/// let s = fmt::format(format_args!("Hello, {}!", "world"));
/// assert_eq!(s, "Hello, world!".to_string());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn format(args: Argum
|
-> string::String {
let mut output = string::String::new();
let _ = output.write_fmt(args);
output
}
|
ents)
|
identifier_name
|
fmt.rs
|
// Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Utilities for formatting and printing strings
//!
//! This module contains the runtime support for the `format!` syntax extension.
//! This macro is implemented in the compiler to emit calls to this module in
//! order to format arguments at runtime into strings and streams.
//!
//! # Usage
//!
//! The `format!` macro is intended to be familiar to those coming from C's
//! printf/fprintf functions or Python's `str.format` function. In its current
//! revision, the `format!` macro returns a `String` type which is the result of
//! the formatting. In the future it will also be able to pass in a stream to
//! format arguments directly while performing minimal allocations.
//!
//! Some examples of the `format!` extension are:
//!
//! ```
//! format!("Hello"); // => "Hello"
//! format!("Hello, {}!", "world"); // => "Hello, world!"
//! format!("The number is {}", 1); // => "The number is 1"
//! format!("{:?}", (3, 4)); // => "(3, 4)"
//! format!("{value}", value=4); // => "4"
//! format!("{} {}", 1, 2); // => "1 2"
//! ```
//!
//! From these, you can see that the first argument is a format string. It is
//! required by the compiler for this to be a string literal; it cannot be a
//! variable passed in (in order to perform validity checking). The compiler
//! will then parse the format string and determine if the list of arguments
//! provided is suitable to pass to this format string.
//!
//! ## Positional parameters
//!
//! Each formatting argument is allowed to specify which value argument it's
//! referencing, and if omitted it is assumed to be "the next argument". For
//! example, the format string `{} {} {}` would take three parameters, and they
//! would be formatted in the same order as they're given. The format string
//! `{2} {1} {0}`, however, would format arguments in reverse order.
//!
//! Things can get a little tricky once you start intermingling the two types of
//! positional specifiers. The "next argument" specifier can be thought of as an
//! iterator over the argument. Each time a "next argument" specifier is seen,
//! the iterator advances. This leads to behavior like this:
//!
//! ```
//! format!("{1} {} {0} {}", 1, 2); // => "2 1 1 2"
//! ```
//!
//! The internal iterator over the argument has not been advanced by the time
//! the first `{}` is seen, so it prints the first argument. Then upon reaching
//! the second `{}`, the iterator has advanced forward to the second argument.
//! Essentially, parameters which explicitly name their argument do not affect
//! parameters which do not name an argument in terms of positional specifiers.
//!
//! A format string is required to use all of its arguments, otherwise it is a
//! compile-time error. You may refer to the same argument more than once in the
//! format string, although it must always be referred to with the same type.
//!
//! ## Named parameters
//!
//! Rust itself does not have a Python-like equivalent of named parameters to a
//! function, but the `format!` macro is a syntax extension which allows it to
//! leverage named parameters. Named parameters are listed at the end of the
//! argument list and have the syntax:
//!
//! ```text
//! identifier '=' expression
//! ```
//!
//! For example, the following `format!` expressions all use named argument:
//!
//! ```
//! format!("{argument}", argument = "test"); // => "test"
//! format!("{name} {}", 1, name = 2); // => "2 1"
//! format!("{a} {c} {b}", a="a", b='b', c=3); // => "a 3 b"
//! ```
//!
//! It is illegal to put positional parameters (those without names) after
//! arguments which have names. Like with positional parameters, it is illegal
//! to provide named parameters that are unused by the format string.
//!
//! ## Argument types
//!
//! Each argument's type is dictated by the format string. It is a requirement
//! that every argument is only ever referred to by one type. For example, this
//! is an invalid format string:
//!
//! ```text
//! {0:x} {0:o}
//! ```
//!
//! This is invalid because the first argument is both referred to as a
//! hexadecimal as well as an
//! octal.
//!
//! There are various parameters which do require a particular type, however. Namely, the `{:.*}`
//! syntax, which sets the number of numbers after the decimal in floating-point types:
//!
//! ```
//! let formatted_number = format!("{:.*}", 2, 1.234567);
//!
//! assert_eq!("1.23", formatted_number)
//! ```
//!
//! If this syntax is used, then the number of characters to print precedes the actual object being
//! formatted, and the number of characters must have the type `usize`. Although a `usize` can be
//! printed with `{}`, it is illegal to reference an argument as such. For example this is another
//! invalid format string:
//!
//! ```text
//! {:.*} {0}
//! ```
//!
//! ## Formatting traits
//!
//! When requesting that an argument be formatted with a particular type, you
//! are actually requesting that an argument ascribes to a particular trait.
//! This allows multiple actual types to be formatted via `{:x}` (like `i8` as
//! well as `isize`). The current mapping of types to traits is:
//!
//! * *nothing* ⇒ `Display`
//! * `?` ⇒ `Debug`
//! * `o` ⇒ `Octal`
//! * `x` ⇒ `LowerHex`
//! * `X` ⇒ `UpperHex`
//! * `p` ⇒ `Pointer`
//! * `b` ⇒ `Binary`
//! * `e` ⇒ `LowerExp`
//! * `E` ⇒ `UpperExp`
//!
//! What this means is that any type of argument which implements the
//! `fmt::Binary` trait can then be formatted with `{:b}`. Implementations
//! are provided for these traits for a number of primitive types by the
//! standard library as well. If no format is specified (as in `{}` or `{:6}`),
//! then the format trait used is the `Display` trait.
//!
//! When implementing a format trait for your own type, you will have to
//! implement a method of the signature:
//!
//! ```
//! # use std::fmt;
//! # struct Foo; // our custom type
//! # impl fmt::Display for Foo {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! # write!(f, "testing, testing")
//! # } }
//! ```
//!
//! Your type will be passed as `self` by-reference, and then the function
//! should emit output into the `f.buf` stream. It is up to each format trait
//! implementation to correctly adhere to the requested formatting parameters.
//! The values of these parameters will be listed in the fields of the
//! `Formatter` struct. In order to help with this, the `Formatter` struct also
//! provides some helper methods.
//!
//! Additionally, the return value of this function is `fmt::Result` which is a
//! typedef to `Result<(), IoError>` (also known as `IoResult<()>`). Formatting
//! implementations should ensure that they return errors from `write!`
//! correctly (propagating errors upward).
//!
//! An example of implementing the formatting traits would look
//! like:
//!
//! ```
//! # #![feature(core, std_misc)]
//! use std::fmt;
//! use std::f64;
//!
//! #[derive(Debug)]
//! struct Vector2D {
//! x: isize,
//! y: isize,
//! }
//!
//! impl fmt::Display for Vector2D {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! // The `f` value implements the `Write` trait, which is what the
//! // write! macro is expecting. Note that this formatting ignores the
//! // various flags provided to format strings.
//! write!(f, "({}, {})", self.x, self.y)
//! }
//! }
//!
//! // Different traits allow different forms of output of a type. The meaning
//! // of this format is to print the magnitude of a vector.
//! impl fmt::Binary for Vector2D {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! let magnitude = (self.x * self.x + self.y * self.y) as f64;
//! let magnitude = magnitude.sqrt();
//!
//! // Respect the formatting flags by using the helper method
//! // `pad_integral` on the Formatter object. See the method
//! // documentation for details, and the function `pad` can be used
//! // to pad strings.
//! let decimals = f.precision().unwrap_or(3);
//! let string = format!("{:.*}", decimals, magnitude);
//! f.pad_integral(true, "", &string)
//! }
//! }
//!
//! fn main() {
//! let myvector = Vector2D { x: 3, y: 4 };
//!
//! println!("{}", myvector); // => "(3, 4)"
//! println!("{:?}", myvector); // => "Vector2D {x: 3, y:4}"
//! println!("{:10.3b}", myvector); // => " 5.000"
//! }
//! ```
//!
//! ### fmt::Display vs fmt::Debug
//!
//! These two formatting traits have distinct purposes:
//!
//! - `fmt::Display` implementations assert that the type can be faithfully
//! represented as a UTF-8 string at all times. It is **not** expected that
//! all types implement the `Display` trait.
//! - `fmt::Debug` implementations should be implemented for **all** public types.
//! Output will typically represent the internal state as faithfully as possible.
//! The purpose of the `Debug` trait is to facilitate debugging Rust code. In
//! most cases, using `#[derive(Debug)]` is sufficient and recommended.
//!
//! Some examples of the output from both traits:
//!
//! ```
//! assert_eq!(format!("{} {:?}", 3, 4), "3 4");
//! assert_eq!(format!("{} {:?}", 'a', 'b'), "a 'b'");
//! assert_eq!(format!("{} {:?}", "foo\n", "bar\n"), "foo\n \"bar\\n\"");
//! ```
//!
//! ## Related macros
//!
//! There are a number of related macros in the `format!` family. The ones that
//! are currently implemented are:
//!
//! ```ignore
//! format! // described above
//! write! // first argument is a &mut io::Write, the destination
//! writeln! // same as write but appends a newline
//! print! // the format string is printed to the standard output
//! println! // same as print but appends a newline
//! format_args! // described below.
//! ```
//!
//! ### `write!`
//!
//! This and `writeln` are two macros which are used to emit the format string
//! to a specified stream. This is used to prevent intermediate allocations of
//! format strings and instead directly write the output. Under the hood, this
//! function is actually invoking the `write` function defined in this module.
//! Example usage is:
//!
//! ```
//! # #![allow(unused_must_use)]
//! use std::io::Write;
//! let mut w = Vec::new();
//! write!(&mut w, "Hello {}!", "world");
//! ```
//!
//! ### `print!`
//!
//! This and `println` emit their output to stdout. Similarly to the `write!`
//! macro, the goal of these macros is to avoid intermediate allocations when
//! printing output. Example usage is:
//!
//! ```
//! print!("Hello {}!", "world");
//! println!("I have a newline {}", "character at the end");
//! ```
//!
//! ### `format_args!`
//!
//! This is a curious macro which is used to safely pass around
//! an opaque object describing the format string. This object
//! does not require any heap allocations to create, and it only
//! references information on the stack. Under the hood, all of
//! the related macros are implemented in terms of this. First
//! off, some example usage is:
//!
//! ```
//! use std::fmt;
//! use std::io::{self, Write};
//!
//! fmt::format(format_args!("this returns {}", "String"));
//!
//! let mut some_writer = io::stdout();
//! write!(&mut some_writer, "{}", format_args!("print with a {}", "macro"));
//!
//! fn my_fmt_fn(args: fmt::Arguments) {
//! write!(&mut io::stdout(), "{}", args);
//! }
//! my_fmt_fn(format_args!("or a {} too", "function"));
//! ```
//!
//! The result of the `format_args!` macro is a value of type `fmt::Arguments`.
//! This structure can then be passed to the `write` and `format` functions
//! inside this module in order to process the format string.
//! The goal of this macro is to even further prevent intermediate allocations
//! when dealing formatting strings.
//!
//! For example, a logging library could use the standard formatting syntax, but
//! it would internally pass around this structure until it has been determined
//! where output should go to.
//!
//! # Syntax
//!
//! The syntax for the formatting language used is drawn from other languages,
//! so it should not be too alien. Arguments are formatted with python-like
//! syntax, meaning that arguments are surrounded by `{}` instead of the C-like
//! `%`. The actual grammar for the formatting syntax is:
//!
//! ```text
//! format_string := <text> [ format <text> ] *
//! format := '{' [ argument ] [ ':' format_spec ] '}'
//! argument := integer | identifier
//!
//! format_spec := [[fill]align][sign]['#'][0][width]['.' precision][type]
//! fill := character
//! align := '<' | '^' | '>'
//! sign := '+' | '-'
//! width := count
//! precision := count | '*'
//! type := identifier | ''
//! count := parameter | integer
//! parameter := integer '$'
//! ```
//!
//! # Formatting Parameters
//!
//! Each argument being formatted can be transformed by a number of formatting
//! parameters (corresponding to `format_spec` in the syntax above). These
//! parameters affect the string representation of what's being formatted. This
//! syntax draws heavily from Python's, so it may seem a bit familiar.
//!
//! ## Fill/Alignment
//!
//! The fill character is provided normally in conjunction with the `width`
//! parameter. This indicates that if the value being formatted is smaller than
//! `width` some extra characters will be printed around it. The extra
//! characters are specified by `fill`, and the alignment can be one of two
//! options:
//!
//! * `<` - the argument is left-aligned in `width` columns
//! * `^` - the argument is center-aligned in `width` columns
//! * `>` - the argument is right-aligned in `width` columns
//!
//! Note that alignment may not be implemented by some types. A good way
//! to ensure padding is applied is to format your input, then use this
//! resulting string to pad your output.
//!
//! ## Sign/#/0
//!
//! These can all be interpreted as flags for a particular formatter.
//!
//! * '+' - This is intended for numeric types and indicates that the sign
//! should always be printed. Positive signs are never printed by
//! default, and the negative sign is only printed by default for the
//! `Signed` trait. This flag indicates that the correct sign (+ or -)
//! should always be printed.
//! * '-' - Currently not used
//! * '#' - This flag is indicates that the "alternate" form of printing should
//! be used. For array slices, the alternate form omits the brackets.
//! For the integer formatting traits, the alternate forms are:
//! * `#x` - precedes the argument with a "0x"
//! * `#X` - precedes the argument with a "0x"
//! * `#t` - precedes the argument with a "0b"
//! * `#o` - precedes the argument with a "0o"
//! * '0' - This is used to indicate for integer formats that the padding should
//! both be done with a `0` character as well as be sign-aware. A format
//! like `{:08}` would yield `00000001` for the integer `1`, while the
//! same format would yield `-0000001` for the integer `-1`. Notice that
//! the negative version has one fewer zero than the positive version.
//!
//! ## Width
//!
//! This is a parameter for the "minimum width" that the format should take up.
//! If the value's string does not fill up this many characters, then the
//! padding specified by fill/alignment will be used to take up the required
//! space.
//!
//! The default fill/alignment for non-numerics is a space and left-aligned. The
//! defaults for numeric formatters is also a space but with right-alignment. If
//! the '0' flag is specified for numerics, then the implicit fill character is
//! '0'.
//!
//! The value for the width can also be provided as a `usize` in the list of
//! parameters by using the `2$` syntax indicating that the second argument is a
//! `usize` specifying the width.
//!
//! ## Precision
//!
//! For non-numeric types, this can be considered a "maximum width". If the resulting string is
//! longer than this width, then it is truncated down to this many characters and only those are
//! emitted.
//!
//! For integral types, this is ignored.
//!
//! For floating-point types, this indicates how many digits after the decimal point should be
//! printed.
//!
//! There are three possible ways to specify the desired `precision`:
//!
//! There are three possible ways to specify the desired `precision`:
//! 1. An integer `.N`,
//! 2. an integer followed by dollar sign `.N$`, or
//! 3. an asterisk `.*`.
//!
//! The first specification, `.N`, means the integer `N` itself is the precision.
//!
//! The second, `.N$`, means use format *argument* `N` (which must be a `usize`) as the precision.
//!
//! Finally, `.*` means that this `{...}` is associated with *two* format inputs rather than one:
//! the first input holds the `usize` precision, and the second holds the value to print. Note
//! that in this case, if one uses the format string `{<arg>:<spec>.*}`, then the `<arg>` part
//! refers to the *value* to print, and the `precision` must come in the input preceding `<arg>`.
//!
//! For example, these:
//!
//! ```
//! // Hello {arg 0 (x)} is {arg 1 (0.01} with precision specified inline (5)}
//! println!("Hello {0} is {1:.5}", "x", 0.01);
//!
//! // Hello {arg 1 (x)} is {arg 2 (0.01} with precision specified in arg 0 (5)}
//! println!("Hello {1} is {2:.0$}", 5, "x", 0.01);
//!
//! // Hello {arg 0 (x)} is {arg 2 (0.01} with precision specified in arg 1 (5)}
//! println!("Hello {0} is {2:.1$}", "x", 5, 0.01);
//!
//! // Hello {next arg (x)} is {second of next two args (0.01} with precision
//! // specified in first of next two args (5)}
//! println!("Hello {} is {:.*}", "x", 5, 0.01);
//!
//! // Hello {next arg (x)} is {arg 2 (0.01} with precision
//! // specified in its predecessor (5)}
//! println!("Hello {} is {2:.*}", "x", 5, 0.01);
//! ```
//!
//! All print the same thing:
//!
//! ```text
//! Hello x is 0.01000
//! ```
//!
//! While these:
//!
//! ```
//! println!("{}, `{name:.*}` has 3 fractional digits", "Hello", 3, name=1234.56);
//! println!("{}, `{name:.*}` has 3 characters", "Hello", 3, name="1234.56");
//! ```
//!
//! print two significantly different things:
//!
//! ```text
//! Hello, `1234.560` has 3 fractional digits
//! Hello, `123` has 3 characters
//! ```
//!
//! # Escaping
//!
//! The literal characters `{` and `}` may be included in a string by preceding
//! them with the same character. For example, the `{` character is escaped with
//! `{{` and the `}` character is escaped with `}}`.
#![stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Formatter, Result, Write, rt};
pub use core::fmt::{Octal, Binary};
pub use core::fmt::{Display, Debug};
pub use core::fmt::{LowerHex, UpperHex, Pointer};
pub use core::fmt::{LowerExp, UpperExp};
pub use core::fmt::Error;
pub use core::fmt::{ArgumentV1, Arguments, write, radix, Radix, RadixFmt};
use string;
/// The format function takes a precompiled format string and a list of
/// arguments, to return the resulting formatted string.
///
/// # Arguments
///
/// * args - a structure of arguments generated via the `format_args!` macro.
///
/// # Examples
///
/// ```
/// use std::fmt;
///
/// let s = fmt::format(format_args!("Hello, {}!", "world"));
/// assert_eq!(s, "Hello, world!".to_string());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn format(args: Arguments) -> string::String {
let mut outp
|
ut = string::String::new();
let _ = output.write_fmt(args);
output
}
|
identifier_body
|
|
fmt.rs
|
// Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Utilities for formatting and printing strings
//!
//! This module contains the runtime support for the `format!` syntax extension.
//! This macro is implemented in the compiler to emit calls to this module in
//! order to format arguments at runtime into strings and streams.
//!
//! # Usage
//!
//! The `format!` macro is intended to be familiar to those coming from C's
//! printf/fprintf functions or Python's `str.format` function. In its current
//! revision, the `format!` macro returns a `String` type which is the result of
//! the formatting. In the future it will also be able to pass in a stream to
//! format arguments directly while performing minimal allocations.
//!
//! Some examples of the `format!` extension are:
//!
//! ```
//! format!("Hello"); // => "Hello"
//! format!("Hello, {}!", "world"); // => "Hello, world!"
//! format!("The number is {}", 1); // => "The number is 1"
//! format!("{:?}", (3, 4)); // => "(3, 4)"
//! format!("{value}", value=4); // => "4"
//! format!("{} {}", 1, 2); // => "1 2"
//! ```
//!
//! From these, you can see that the first argument is a format string. It is
//! required by the compiler for this to be a string literal; it cannot be a
//! variable passed in (in order to perform validity checking). The compiler
//! will then parse the format string and determine if the list of arguments
//! provided is suitable to pass to this format string.
//!
//! ## Positional parameters
//!
//! Each formatting argument is allowed to specify which value argument it's
//! referencing, and if omitted it is assumed to be "the next argument". For
//! example, the format string `{} {} {}` would take three parameters, and they
//! would be formatted in the same order as they're given. The format string
//! `{2} {1} {0}`, however, would format arguments in reverse order.
//!
//! Things can get a little tricky once you start intermingling the two types of
//! positional specifiers. The "next argument" specifier can be thought of as an
//! iterator over the argument. Each time a "next argument" specifier is seen,
//! the iterator advances. This leads to behavior like this:
//!
//! ```
//! format!("{1} {} {0} {}", 1, 2); // => "2 1 1 2"
//! ```
//!
//! The internal iterator over the argument has not been advanced by the time
//! the first `{}` is seen, so it prints the first argument. Then upon reaching
//! the second `{}`, the iterator has advanced forward to the second argument.
//! Essentially, parameters which explicitly name their argument do not affect
//! parameters which do not name an argument in terms of positional specifiers.
//!
//! A format string is required to use all of its arguments, otherwise it is a
//! compile-time error. You may refer to the same argument more than once in the
//! format string, although it must always be referred to with the same type.
//!
//! ## Named parameters
//!
//! Rust itself does not have a Python-like equivalent of named parameters to a
//! function, but the `format!` macro is a syntax extension which allows it to
//! leverage named parameters. Named parameters are listed at the end of the
//! argument list and have the syntax:
//!
//! ```text
//! identifier '=' expression
//! ```
//!
//! For example, the following `format!` expressions all use named argument:
//!
//! ```
//! format!("{argument}", argument = "test"); // => "test"
//! format!("{name} {}", 1, name = 2); // => "2 1"
//! format!("{a} {c} {b}", a="a", b='b', c=3); // => "a 3 b"
//! ```
//!
//! It is illegal to put positional parameters (those without names) after
//! arguments which have names. Like with positional parameters, it is illegal
//! to provide named parameters that are unused by the format string.
//!
//! ## Argument types
//!
//! Each argument's type is dictated by the format string. It is a requirement
//! that every argument is only ever referred to by one type. For example, this
//! is an invalid format string:
//!
//! ```text
//! {0:x} {0:o}
//! ```
//!
//! This is invalid because the first argument is both referred to as a
//! hexadecimal as well as an
//! octal.
//!
//! There are various parameters which do require a particular type, however. Namely, the `{:.*}`
//! syntax, which sets the number of numbers after the decimal in floating-point types:
//!
//! ```
//! let formatted_number = format!("{:.*}", 2, 1.234567);
//!
//! assert_eq!("1.23", formatted_number)
//! ```
//!
//! If this syntax is used, then the number of characters to print precedes the actual object being
//! formatted, and the number of characters must have the type `usize`. Although a `usize` can be
//! printed with `{}`, it is illegal to reference an argument as such. For example this is another
//! invalid format string:
//!
//! ```text
//! {:.*} {0}
//! ```
//!
//! ## Formatting traits
//!
//! When requesting that an argument be formatted with a particular type, you
//! are actually requesting that an argument ascribes to a particular trait.
//! This allows multiple actual types to be formatted via `{:x}` (like `i8` as
//! well as `isize`). The current mapping of types to traits is:
//!
//! * *nothing* ⇒ `Display`
//! * `?` ⇒ `Debug`
//! * `o` ⇒ `Octal`
//! * `x` ⇒ `LowerHex`
//! * `X` ⇒ `UpperHex`
//! * `p` ⇒ `Pointer`
//! * `b` ⇒ `Binary`
//! * `e` ⇒ `LowerExp`
//! * `E` ⇒ `UpperExp`
//!
//! What this means is that any type of argument which implements the
//! `fmt::Binary` trait can then be formatted with `{:b}`. Implementations
//! are provided for these traits for a number of primitive types by the
//! standard library as well. If no format is specified (as in `{}` or `{:6}`),
//! then the format trait used is the `Display` trait.
//!
//! When implementing a format trait for your own type, you will have to
//! implement a method of the signature:
//!
//! ```
//! # use std::fmt;
//! # struct Foo; // our custom type
//! # impl fmt::Display for Foo {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! # write!(f, "testing, testing")
//! # } }
//! ```
//!
//! Your type will be passed as `self` by-reference, and then the function
//! should emit output into the `f.buf` stream. It is up to each format trait
//! implementation to correctly adhere to the requested formatting parameters.
//! The values of these parameters will be listed in the fields of the
//! `Formatter` struct. In order to help with this, the `Formatter` struct also
//! provides some helper methods.
//!
//! Additionally, the return value of this function is `fmt::Result` which is a
//! typedef to `Result<(), IoError>` (also known as `IoResult<()>`). Formatting
//! implementations should ensure that they return errors from `write!`
//! correctly (propagating errors upward).
//!
//! An example of implementing the formatting traits would look
//! like:
//!
//! ```
//! # #![feature(core, std_misc)]
//! use std::fmt;
//! use std::f64;
//!
//! #[derive(Debug)]
//! struct Vector2D {
//! x: isize,
//! y: isize,
//! }
//!
//! impl fmt::Display for Vector2D {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! // The `f` value implements the `Write` trait, which is what the
//! // write! macro is expecting. Note that this formatting ignores the
//! // various flags provided to format strings.
//! write!(f, "({}, {})", self.x, self.y)
//! }
//! }
//!
//! // Different traits allow different forms of output of a type. The meaning
//! // of this format is to print the magnitude of a vector.
//! impl fmt::Binary for Vector2D {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! let magnitude = (self.x * self.x + self.y * self.y) as f64;
//! let magnitude = magnitude.sqrt();
//!
//! // Respect the formatting flags by using the helper method
//! // `pad_integral` on the Formatter object. See the method
//! // documentation for details, and the function `pad` can be used
//! // to pad strings.
//! let decimals = f.precision().unwrap_or(3);
//! let string = format!("{:.*}", decimals, magnitude);
//! f.pad_integral(true, "", &string)
//! }
//! }
//!
//! fn main() {
//! let myvector = Vector2D { x: 3, y: 4 };
//!
//! println!("{}", myvector); // => "(3, 4)"
//! println!("{:?}", myvector); // => "Vector2D {x: 3, y:4}"
//! println!("{:10.3b}", myvector); // => " 5.000"
//! }
//! ```
//!
//! ### fmt::Display vs fmt::Debug
//!
//! These two formatting traits have distinct purposes:
//!
//! - `fmt::Display` implementations assert that the type can be faithfully
//! represented as a UTF-8 string at all times. It is **not** expected that
//! all types implement the `Display` trait.
//! - `fmt::Debug` implementations should be implemented for **all** public types.
//! Output will typically represent the internal state as faithfully as possible.
//! The purpose of the `Debug` trait is to facilitate debugging Rust code. In
//! most cases, using `#[derive(Debug)]` is sufficient and recommended.
//!
//! Some examples of the output from both traits:
//!
//! ```
//! assert_eq!(format!("{} {:?}", 3, 4), "3 4");
//! assert_eq!(format!("{} {:?}", 'a', 'b'), "a 'b'");
//! assert_eq!(format!("{} {:?}", "foo\n", "bar\n"), "foo\n \"bar\\n\"");
//! ```
//!
//! ## Related macros
//!
//! There are a number of related macros in the `format!` family. The ones that
//! are currently implemented are:
//!
//! ```ignore
//! format! // described above
//! write! // first argument is a &mut io::Write, the destination
//! writeln! // same as write but appends a newline
//! print! // the format string is printed to the standard output
//! println! // same as print but appends a newline
//! format_args! // described below.
//! ```
//!
//! ### `write!`
//!
//! This and `writeln` are two macros which are used to emit the format string
//! to a specified stream. This is used to prevent intermediate allocations of
//! format strings and instead directly write the output. Under the hood, this
//! function is actually invoking the `write` function defined in this module.
//! Example usage is:
//!
//! ```
//! # #![allow(unused_must_use)]
//! use std::io::Write;
//! let mut w = Vec::new();
//! write!(&mut w, "Hello {}!", "world");
//! ```
//!
//! ### `print!`
//!
//! This and `println` emit their output to stdout. Similarly to the `write!`
//! macro, the goal of these macros is to avoid intermediate allocations when
//! printing output. Example usage is:
//!
//! ```
//! print!("Hello {}!", "world");
//! println!("I have a newline {}", "character at the end");
//! ```
//!
//! ### `format_args!`
//!
//! This is a curious macro which is used to safely pass around
//! an opaque object describing the format string. This object
//! does not require any heap allocations to create, and it only
//! references information on the stack. Under the hood, all of
//! the related macros are implemented in terms of this. First
//! off, some example usage is:
//!
//! ```
//! use std::fmt;
//! use std::io::{self, Write};
//!
//! fmt::format(format_args!("this returns {}", "String"));
//!
//! let mut some_writer = io::stdout();
//! write!(&mut some_writer, "{}", format_args!("print with a {}", "macro"));
//!
//! fn my_fmt_fn(args: fmt::Arguments) {
//! write!(&mut io::stdout(), "{}", args);
//! }
//! my_fmt_fn(format_args!("or a {} too", "function"));
//! ```
//!
//! The result of the `format_args!` macro is a value of type `fmt::Arguments`.
//! This structure can then be passed to the `write` and `format` functions
//! inside this module in order to process the format string.
//! The goal of this macro is to even further prevent intermediate allocations
//! when dealing formatting strings.
//!
//! For example, a logging library could use the standard formatting syntax, but
//! it would internally pass around this structure until it has been determined
//! where output should go to.
//!
//! # Syntax
//!
//! The syntax for the formatting language used is drawn from other languages,
//! so it should not be too alien. Arguments are formatted with python-like
//! syntax, meaning that arguments are surrounded by `{}` instead of the C-like
//! `%`. The actual grammar for the formatting syntax is:
//!
//! ```text
//! format_string := <text> [ format <text> ] *
//! format := '{' [ argument ] [ ':' format_spec ] '}'
//! argument := integer | identifier
//!
//! format_spec := [[fill]align][sign]['#'][0][width]['.' precision][type]
//! fill := character
|
//! width := count
//! precision := count | '*'
//! type := identifier | ''
//! count := parameter | integer
//! parameter := integer '$'
//! ```
//!
//! # Formatting Parameters
//!
//! Each argument being formatted can be transformed by a number of formatting
//! parameters (corresponding to `format_spec` in the syntax above). These
//! parameters affect the string representation of what's being formatted. This
//! syntax draws heavily from Python's, so it may seem a bit familiar.
//!
//! ## Fill/Alignment
//!
//! The fill character is provided normally in conjunction with the `width`
//! parameter. This indicates that if the value being formatted is smaller than
//! `width` some extra characters will be printed around it. The extra
//! characters are specified by `fill`, and the alignment can be one of two
//! options:
//!
//! * `<` - the argument is left-aligned in `width` columns
//! * `^` - the argument is center-aligned in `width` columns
//! * `>` - the argument is right-aligned in `width` columns
//!
//! Note that alignment may not be implemented by some types. A good way
//! to ensure padding is applied is to format your input, then use this
//! resulting string to pad your output.
//!
//! ## Sign/#/0
//!
//! These can all be interpreted as flags for a particular formatter.
//!
//! * '+' - This is intended for numeric types and indicates that the sign
//! should always be printed. Positive signs are never printed by
//! default, and the negative sign is only printed by default for the
//! `Signed` trait. This flag indicates that the correct sign (+ or -)
//! should always be printed.
//! * '-' - Currently not used
//! * '#' - This flag is indicates that the "alternate" form of printing should
//! be used. For array slices, the alternate form omits the brackets.
//! For the integer formatting traits, the alternate forms are:
//! * `#x` - precedes the argument with a "0x"
//! * `#X` - precedes the argument with a "0x"
//! * `#t` - precedes the argument with a "0b"
//! * `#o` - precedes the argument with a "0o"
//! * '0' - This is used to indicate for integer formats that the padding should
//! both be done with a `0` character as well as be sign-aware. A format
//! like `{:08}` would yield `00000001` for the integer `1`, while the
//! same format would yield `-0000001` for the integer `-1`. Notice that
//! the negative version has one fewer zero than the positive version.
//!
//! ## Width
//!
//! This is a parameter for the "minimum width" that the format should take up.
//! If the value's string does not fill up this many characters, then the
//! padding specified by fill/alignment will be used to take up the required
//! space.
//!
//! The default fill/alignment for non-numerics is a space and left-aligned. The
//! defaults for numeric formatters is also a space but with right-alignment. If
//! the '0' flag is specified for numerics, then the implicit fill character is
//! '0'.
//!
//! The value for the width can also be provided as a `usize` in the list of
//! parameters by using the `2$` syntax indicating that the second argument is a
//! `usize` specifying the width.
//!
//! ## Precision
//!
//! For non-numeric types, this can be considered a "maximum width". If the resulting string is
//! longer than this width, then it is truncated down to this many characters and only those are
//! emitted.
//!
//! For integral types, this is ignored.
//!
//! For floating-point types, this indicates how many digits after the decimal point should be
//! printed.
//!
//! There are three possible ways to specify the desired `precision`:
//!
//! There are three possible ways to specify the desired `precision`:
//! 1. An integer `.N`,
//! 2. an integer followed by dollar sign `.N$`, or
//! 3. an asterisk `.*`.
//!
//! The first specification, `.N`, means the integer `N` itself is the precision.
//!
//! The second, `.N$`, means use format *argument* `N` (which must be a `usize`) as the precision.
//!
//! Finally, `.*` means that this `{...}` is associated with *two* format inputs rather than one:
//! the first input holds the `usize` precision, and the second holds the value to print. Note
//! that in this case, if one uses the format string `{<arg>:<spec>.*}`, then the `<arg>` part
//! refers to the *value* to print, and the `precision` must come in the input preceding `<arg>`.
//!
//! For example, these:
//!
//! ```
//! // Hello {arg 0 (x)} is {arg 1 (0.01} with precision specified inline (5)}
//! println!("Hello {0} is {1:.5}", "x", 0.01);
//!
//! // Hello {arg 1 (x)} is {arg 2 (0.01} with precision specified in arg 0 (5)}
//! println!("Hello {1} is {2:.0$}", 5, "x", 0.01);
//!
//! // Hello {arg 0 (x)} is {arg 2 (0.01} with precision specified in arg 1 (5)}
//! println!("Hello {0} is {2:.1$}", "x", 5, 0.01);
//!
//! // Hello {next arg (x)} is {second of next two args (0.01} with precision
//! // specified in first of next two args (5)}
//! println!("Hello {} is {:.*}", "x", 5, 0.01);
//!
//! // Hello {next arg (x)} is {arg 2 (0.01} with precision
//! // specified in its predecessor (5)}
//! println!("Hello {} is {2:.*}", "x", 5, 0.01);
//! ```
//!
//! All print the same thing:
//!
//! ```text
//! Hello x is 0.01000
//! ```
//!
//! While these:
//!
//! ```
//! println!("{}, `{name:.*}` has 3 fractional digits", "Hello", 3, name=1234.56);
//! println!("{}, `{name:.*}` has 3 characters", "Hello", 3, name="1234.56");
//! ```
//!
//! print two significantly different things:
//!
//! ```text
//! Hello, `1234.560` has 3 fractional digits
//! Hello, `123` has 3 characters
//! ```
//!
//! # Escaping
//!
//! The literal characters `{` and `}` may be included in a string by preceding
//! them with the same character. For example, the `{` character is escaped with
//! `{{` and the `}` character is escaped with `}}`.
#![stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Formatter, Result, Write, rt};
pub use core::fmt::{Octal, Binary};
pub use core::fmt::{Display, Debug};
pub use core::fmt::{LowerHex, UpperHex, Pointer};
pub use core::fmt::{LowerExp, UpperExp};
pub use core::fmt::Error;
pub use core::fmt::{ArgumentV1, Arguments, write, radix, Radix, RadixFmt};
use string;
/// The format function takes a precompiled format string and a list of
/// arguments, to return the resulting formatted string.
///
/// # Arguments
///
/// * args - a structure of arguments generated via the `format_args!` macro.
///
/// # Examples
///
/// ```
/// use std::fmt;
///
/// let s = fmt::format(format_args!("Hello, {}!", "world"));
/// assert_eq!(s, "Hello, world!".to_string());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn format(args: Arguments) -> string::String {
let mut output = string::String::new();
let _ = output.write_fmt(args);
output
}
|
//! align := '<' | '^' | '>'
//! sign := '+' | '-'
|
random_line_split
|
main.rs
|
extern crate bodyparser;
extern crate crypto;
extern crate env_logger;
extern crate iron;
extern crate persistent;
extern crate router;
extern crate rustc_serialize;
#[macro_use] extern crate hyper;
#[macro_use] extern crate log;
mod hook;
mod repo_config;
use std::env;
use std::thread;
use hook::*;
use iron::prelude::*;
use iron::status;
use persistent::Read;
use router::{Router};
header! {(XHubSignature, "X-Hub-Signature") => [String]}
header! {(XGitHubEvent, "X-GitHub-Event") => [String]}
const MAX_BODY_LENGTH: usize = 1024 * 1024 * 10;
fn parse_hook(config_dir_str: &str, req: &mut Request) -> IronResult<Response> {
let config_dir = config_dir_str.to_string();
let body = req.get::<bodyparser::Raw>();
let event = req.headers.get::<XGitHubEvent>().unwrap();
let empty_sig = &XHubSignature("".to_string());
let signature = req.headers.get::<XHubSignature>().unwrap_or(empty_sig);
match body {
Ok(Some(body)) => {
let hook = GithubHook::new(&event, &body, &signature, &config_dir);
thread::spawn(move || { hook.receive(); });
Ok(Response::with(status::Ok))
},
_ => Ok(Response::with(status::NotFound))
}
}
fn main()
|
info!("Will listen on {}...", listen);
Iron::new(chain).http(&listen[..]).unwrap();
}
|
{
env_logger::init().unwrap();
let args: Vec<_> = env::args().collect();
if args.len() < 2 || args.len() > 3 {
println!("Usage: github-hook-receiver <config dir (no trailing slash.)> [<listen address (127.0.0.1:3000)>]");
std::process::exit(1);
}
let config_dir = args.get(1).unwrap().to_string();
let listen = args.get(2).unwrap_or(&"127.0.0.1:3000".to_string()).to_string();
let mut router = Router::new();
router.post("/receive", move |req: &mut Request| -> IronResult<Response> {
parse_hook(&config_dir[..], req)
});
let mut chain = Chain::new(router);
chain.link_before(Read::<bodyparser::MaxBodyLength>::one(MAX_BODY_LENGTH));
|
identifier_body
|
main.rs
|
extern crate bodyparser;
extern crate crypto;
extern crate env_logger;
extern crate iron;
extern crate persistent;
extern crate router;
extern crate rustc_serialize;
#[macro_use] extern crate hyper;
#[macro_use] extern crate log;
mod hook;
mod repo_config;
use std::env;
use std::thread;
use hook::*;
use iron::prelude::*;
use iron::status;
use persistent::Read;
use router::{Router};
header! {(XHubSignature, "X-Hub-Signature") => [String]}
header! {(XGitHubEvent, "X-GitHub-Event") => [String]}
const MAX_BODY_LENGTH: usize = 1024 * 1024 * 10;
fn parse_hook(config_dir_str: &str, req: &mut Request) -> IronResult<Response> {
let config_dir = config_dir_str.to_string();
let body = req.get::<bodyparser::Raw>();
let event = req.headers.get::<XGitHubEvent>().unwrap();
let empty_sig = &XHubSignature("".to_string());
let signature = req.headers.get::<XHubSignature>().unwrap_or(empty_sig);
match body {
Ok(Some(body)) => {
let hook = GithubHook::new(&event, &body, &signature, &config_dir);
thread::spawn(move || { hook.receive(); });
Ok(Response::with(status::Ok))
},
_ => Ok(Response::with(status::NotFound))
}
}
fn main() {
env_logger::init().unwrap();
let args: Vec<_> = env::args().collect();
if args.len() < 2 || args.len() > 3 {
println!("Usage: github-hook-receiver <config dir (no trailing slash.)> [<listen address (127.0.0.1:3000)>]");
std::process::exit(1);
}
let config_dir = args.get(1).unwrap().to_string();
let listen = args.get(2).unwrap_or(&"127.0.0.1:3000".to_string()).to_string();
let mut router = Router::new();
router.post("/receive", move |req: &mut Request| -> IronResult<Response> {
parse_hook(&config_dir[..], req)
});
let mut chain = Chain::new(router);
|
info!("Will listen on {}...", listen);
Iron::new(chain).http(&listen[..]).unwrap();
}
|
chain.link_before(Read::<bodyparser::MaxBodyLength>::one(MAX_BODY_LENGTH));
|
random_line_split
|
main.rs
|
extern crate bodyparser;
extern crate crypto;
extern crate env_logger;
extern crate iron;
extern crate persistent;
extern crate router;
extern crate rustc_serialize;
#[macro_use] extern crate hyper;
#[macro_use] extern crate log;
mod hook;
mod repo_config;
use std::env;
use std::thread;
use hook::*;
use iron::prelude::*;
use iron::status;
use persistent::Read;
use router::{Router};
header! {(XHubSignature, "X-Hub-Signature") => [String]}
header! {(XGitHubEvent, "X-GitHub-Event") => [String]}
const MAX_BODY_LENGTH: usize = 1024 * 1024 * 10;
fn
|
(config_dir_str: &str, req: &mut Request) -> IronResult<Response> {
let config_dir = config_dir_str.to_string();
let body = req.get::<bodyparser::Raw>();
let event = req.headers.get::<XGitHubEvent>().unwrap();
let empty_sig = &XHubSignature("".to_string());
let signature = req.headers.get::<XHubSignature>().unwrap_or(empty_sig);
match body {
Ok(Some(body)) => {
let hook = GithubHook::new(&event, &body, &signature, &config_dir);
thread::spawn(move || { hook.receive(); });
Ok(Response::with(status::Ok))
},
_ => Ok(Response::with(status::NotFound))
}
}
fn main() {
env_logger::init().unwrap();
let args: Vec<_> = env::args().collect();
if args.len() < 2 || args.len() > 3 {
println!("Usage: github-hook-receiver <config dir (no trailing slash.)> [<listen address (127.0.0.1:3000)>]");
std::process::exit(1);
}
let config_dir = args.get(1).unwrap().to_string();
let listen = args.get(2).unwrap_or(&"127.0.0.1:3000".to_string()).to_string();
let mut router = Router::new();
router.post("/receive", move |req: &mut Request| -> IronResult<Response> {
parse_hook(&config_dir[..], req)
});
let mut chain = Chain::new(router);
chain.link_before(Read::<bodyparser::MaxBodyLength>::one(MAX_BODY_LENGTH));
info!("Will listen on {}...", listen);
Iron::new(chain).http(&listen[..]).unwrap();
}
|
parse_hook
|
identifier_name
|
fetch.rs
|
use cargo::ops;
use cargo::util::{CliResult, Config};
use cargo::util::important_paths::find_root_manifest_for_wd;
#[derive(RustcDecodable)]
pub struct Options {
flag_manifest_path: Option<String>,
flag_verbose: bool,
flag_quiet: bool,
flag_color: Option<String>,
}
pub const USAGE: &'static str = "
Fetch dependencies of a package from the network.
Usage:
cargo fetch [options]
Options:
-h, --help Print this message
--manifest-path PATH Path to the manifest to fetch dependencies for
-v, --verbose Use verbose output
-q, --quiet No output printed to stdout
--color WHEN Coloring: auto, always, never
If a lockfile is available, this command will ensure that all of the git
dependencies and/or registries dependencies are downloaded and locally
available. The network is never touched after a `cargo fetch` unless
the lockfile changes.
If the lockfile is not available, then this is the equivalent of
`cargo generate-lockfile`. A lockfile is generated and dependencies are also
all updated.
";
pub fn
|
(options: Options, config: &Config) -> CliResult<Option<()>> {
try!(config.shell().set_verbosity(options.flag_verbose, options.flag_quiet));
try!(config.shell().set_color_config(options.flag_color.as_ref().map(|s| &s[..])));
let root = try!(find_root_manifest_for_wd(options.flag_manifest_path, config.cwd()));
try!(ops::fetch(&root, config));
Ok(None)
}
|
execute
|
identifier_name
|
fetch.rs
|
use cargo::ops;
use cargo::util::{CliResult, Config};
use cargo::util::important_paths::find_root_manifest_for_wd;
#[derive(RustcDecodable)]
pub struct Options {
flag_manifest_path: Option<String>,
flag_verbose: bool,
flag_quiet: bool,
flag_color: Option<String>,
}
|
pub const USAGE: &'static str = "
Fetch dependencies of a package from the network.
Usage:
cargo fetch [options]
Options:
-h, --help Print this message
--manifest-path PATH Path to the manifest to fetch dependencies for
-v, --verbose Use verbose output
-q, --quiet No output printed to stdout
--color WHEN Coloring: auto, always, never
If a lockfile is available, this command will ensure that all of the git
dependencies and/or registries dependencies are downloaded and locally
available. The network is never touched after a `cargo fetch` unless
the lockfile changes.
If the lockfile is not available, then this is the equivalent of
`cargo generate-lockfile`. A lockfile is generated and dependencies are also
all updated.
";
pub fn execute(options: Options, config: &Config) -> CliResult<Option<()>> {
try!(config.shell().set_verbosity(options.flag_verbose, options.flag_quiet));
try!(config.shell().set_color_config(options.flag_color.as_ref().map(|s| &s[..])));
let root = try!(find_root_manifest_for_wd(options.flag_manifest_path, config.cwd()));
try!(ops::fetch(&root, config));
Ok(None)
}
|
random_line_split
|
|
fetch.rs
|
use cargo::ops;
use cargo::util::{CliResult, Config};
use cargo::util::important_paths::find_root_manifest_for_wd;
#[derive(RustcDecodable)]
pub struct Options {
flag_manifest_path: Option<String>,
flag_verbose: bool,
flag_quiet: bool,
flag_color: Option<String>,
}
pub const USAGE: &'static str = "
Fetch dependencies of a package from the network.
Usage:
cargo fetch [options]
Options:
-h, --help Print this message
--manifest-path PATH Path to the manifest to fetch dependencies for
-v, --verbose Use verbose output
-q, --quiet No output printed to stdout
--color WHEN Coloring: auto, always, never
If a lockfile is available, this command will ensure that all of the git
dependencies and/or registries dependencies are downloaded and locally
available. The network is never touched after a `cargo fetch` unless
the lockfile changes.
If the lockfile is not available, then this is the equivalent of
`cargo generate-lockfile`. A lockfile is generated and dependencies are also
all updated.
";
pub fn execute(options: Options, config: &Config) -> CliResult<Option<()>>
|
{
try!(config.shell().set_verbosity(options.flag_verbose, options.flag_quiet));
try!(config.shell().set_color_config(options.flag_color.as_ref().map(|s| &s[..])));
let root = try!(find_root_manifest_for_wd(options.flag_manifest_path, config.cwd()));
try!(ops::fetch(&root, config));
Ok(None)
}
|
identifier_body
|
|
deriving-global.rs
|
// xfail-fast #7103 `extern mod` does not work on check-fast
// xfail-pretty - does not converge
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern mod extra; // {En,De}codable
mod submod {
// if any of these are implemented without global calls for any
// function calls, then being in a submodule will (correctly)
// cause errors about unrecognised module `std` (or `extra`)
#[deriving(Eq, Ord, TotalEq, TotalOrd,
IterBytes,
Clone, DeepClone,
ToStr, Rand,
Encodable, Decodable)]
enum A { A1(uint), A2(int) }
#[deriving(Eq, Ord, TotalEq, TotalOrd,
IterBytes,
Clone, DeepClone,
ToStr, Rand,
Encodable, Decodable)]
struct B { x: uint, y: int }
#[deriving(Eq, Ord, TotalEq, TotalOrd,
IterBytes,
Clone, DeepClone,
ToStr, Rand,
Encodable, Decodable)]
struct C(uint, int);
}
fn
|
() {}
|
main
|
identifier_name
|
deriving-global.rs
|
// xfail-fast #7103 `extern mod` does not work on check-fast
// xfail-pretty - does not converge
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
// option. This file may not be copied, modified, or distributed
// except according to those terms.
extern mod extra; // {En,De}codable
mod submod {
// if any of these are implemented without global calls for any
// function calls, then being in a submodule will (correctly)
// cause errors about unrecognised module `std` (or `extra`)
#[deriving(Eq, Ord, TotalEq, TotalOrd,
IterBytes,
Clone, DeepClone,
ToStr, Rand,
Encodable, Decodable)]
enum A { A1(uint), A2(int) }
#[deriving(Eq, Ord, TotalEq, TotalOrd,
IterBytes,
Clone, DeepClone,
ToStr, Rand,
Encodable, Decodable)]
struct B { x: uint, y: int }
#[deriving(Eq, Ord, TotalEq, TotalOrd,
IterBytes,
Clone, DeepClone,
ToStr, Rand,
Encodable, Decodable)]
struct C(uint, int);
}
fn main() {}
|
random_line_split
|
|
deriving-cmp-generic-enum.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#[derive(PartialEq, Eq, PartialOrd, Ord)]
enum
|
<T> {
E0,
E1(T),
E2(T,T)
}
pub fn main() {
let e0 = E::E0;
let e11 = E::E1(1);
let e12 = E::E1(2);
let e21 = E::E2(1, 1);
let e22 = E::E2(1, 2);
// in order for both PartialOrd and Ord
let es = [e0, e11, e12, e21, e22];
for (i, e1) in es.iter().enumerate() {
for (j, e2) in es.iter().enumerate() {
let ord = i.cmp(&j);
let eq = i == j;
let lt = i < j;
let le = i <= j;
let gt = i > j;
let ge = i >= j;
// PartialEq
assert_eq!(*e1 == *e2, eq);
assert_eq!(*e1!= *e2,!eq);
// PartialOrd
assert_eq!(*e1 < *e2, lt);
assert_eq!(*e1 > *e2, gt);
assert_eq!(*e1 <= *e2, le);
assert_eq!(*e1 >= *e2, ge);
// Ord
assert_eq!(e1.cmp(e2), ord);
}
}
}
|
E
|
identifier_name
|
deriving-cmp-generic-enum.rs
|
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// run-pass
#[derive(PartialEq, Eq, PartialOrd, Ord)]
enum E<T> {
E0,
E1(T),
E2(T,T)
}
pub fn main() {
let e0 = E::E0;
let e11 = E::E1(1);
let e12 = E::E1(2);
let e21 = E::E2(1, 1);
|
let e22 = E::E2(1, 2);
// in order for both PartialOrd and Ord
let es = [e0, e11, e12, e21, e22];
for (i, e1) in es.iter().enumerate() {
for (j, e2) in es.iter().enumerate() {
let ord = i.cmp(&j);
let eq = i == j;
let lt = i < j;
let le = i <= j;
let gt = i > j;
let ge = i >= j;
// PartialEq
assert_eq!(*e1 == *e2, eq);
assert_eq!(*e1!= *e2,!eq);
// PartialOrd
assert_eq!(*e1 < *e2, lt);
assert_eq!(*e1 > *e2, gt);
assert_eq!(*e1 <= *e2, le);
assert_eq!(*e1 >= *e2, ge);
// Ord
assert_eq!(e1.cmp(e2), ord);
}
}
}
|
random_line_split
|
|
lib.rs
|
#[cfg(test)]
mod tests {
use rusqlite::{Connection, Result};
use tagged_rusqlite::tagged_sql;
#[test]
fn
|
() -> Result<()> {
let conn = Connection::open_in_memory()?;
tagged_sql!(
CreateTable,
"CREATE TABLE person (
id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
data BLOB
);"
);
CreateTable::prepare(&conn).execute()?;
tagged_sql!(
InsertPerson,
r#"INSERT INTO
person (name, data)
VALUES (
?1 /*:name:String*/,
?2 /*:data:Option<Vec<u8>>*/
)
RETURNING id /*:i64*/;
"#
);
let mut insert = InsertPerson::prepare(&conn);
let id = insert.query_row_bind(&InsertPersonParams {
name: "Steven".to_string(),
data: Some(vec![1u8, 2u8]),
})?;
assert_eq!(id.id,1);
let id = insert.query_row_bind(&InsertPersonParams {
name: "John".to_string(),
data: None,
})?;
assert_eq!(id.id,2);
let id = insert.query_row_bind(&InsertPersonParams {
name: "Bill".to_string(),
data: None,
})?;
assert_eq!(id.id,3);
tagged_sql!(
SelectPerson,
r#"SELECT
id /*:i64*/,
name /*:String*/,
data /*:Option<Vec<u8>>*/
FROM person;
"#
);
let mut stmt = SelectPerson::prepare(&conn);
let people: Result<Vec<_>, _> = stmt.query()?.collect();
assert_eq!(people?, vec![SelectPersonRow { id: 1, name: "Steven".into(), data: Some(vec![1u8, 2u8]) },
SelectPersonRow { id: 2, name: "John".into(), data: None },
SelectPersonRow { id: 3, name: "Bill".into(), data: None }, ]);
tagged_sql!(
SearchByName,
r#"SELECT
id /*:i64*/,
name /*:String*/,
data /*:Option<Vec<u8>>*/
FROM person
WHERE name =? /*:name2:String*/;
"#
);
let search_params = SearchByNameParams {
name2: "John".into(),
};
let people: Result<Vec<_>, _> = SearchByName::prepare(&conn).query_bind(&search_params)?.collect();
assert_eq!(people?, vec![
SearchByNameRow { id: 2, name: "John".into(), data: None },
]);
let mut stmt = SelectPerson::prepare(&conn);
assert_eq!(stmt.query_row()?, SelectPersonRow { id: 1, name: "Steven".into(), data: Some(vec![1u8, 2u8]) });
let mut stmt = SearchByName::prepare(&conn);
let search_params = SearchByNameParams {
name2: "Bill".into(),
};
assert_eq!(stmt.query_row_bind(&search_params)?, SearchByNameRow { id: 3, name: "Bill".into(), data: None });
Ok(())
}
}
|
smoke_test
|
identifier_name
|
lib.rs
|
#[cfg(test)]
mod tests {
use rusqlite::{Connection, Result};
use tagged_rusqlite::tagged_sql;
#[test]
fn smoke_test() -> Result<()> {
let conn = Connection::open_in_memory()?;
tagged_sql!(
CreateTable,
"CREATE TABLE person (
id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
data BLOB
);"
);
|
CreateTable::prepare(&conn).execute()?;
tagged_sql!(
InsertPerson,
r#"INSERT INTO
person (name, data)
VALUES (
?1 /*:name:String*/,
?2 /*:data:Option<Vec<u8>>*/
)
RETURNING id /*:i64*/;
"#
);
let mut insert = InsertPerson::prepare(&conn);
let id = insert.query_row_bind(&InsertPersonParams {
name: "Steven".to_string(),
data: Some(vec![1u8, 2u8]),
})?;
assert_eq!(id.id,1);
let id = insert.query_row_bind(&InsertPersonParams {
name: "John".to_string(),
data: None,
})?;
assert_eq!(id.id,2);
let id = insert.query_row_bind(&InsertPersonParams {
name: "Bill".to_string(),
data: None,
})?;
assert_eq!(id.id,3);
tagged_sql!(
SelectPerson,
r#"SELECT
id /*:i64*/,
name /*:String*/,
data /*:Option<Vec<u8>>*/
FROM person;
"#
);
let mut stmt = SelectPerson::prepare(&conn);
let people: Result<Vec<_>, _> = stmt.query()?.collect();
assert_eq!(people?, vec![SelectPersonRow { id: 1, name: "Steven".into(), data: Some(vec![1u8, 2u8]) },
SelectPersonRow { id: 2, name: "John".into(), data: None },
SelectPersonRow { id: 3, name: "Bill".into(), data: None }, ]);
tagged_sql!(
SearchByName,
r#"SELECT
id /*:i64*/,
name /*:String*/,
data /*:Option<Vec<u8>>*/
FROM person
WHERE name =? /*:name2:String*/;
"#
);
let search_params = SearchByNameParams {
name2: "John".into(),
};
let people: Result<Vec<_>, _> = SearchByName::prepare(&conn).query_bind(&search_params)?.collect();
assert_eq!(people?, vec![
SearchByNameRow { id: 2, name: "John".into(), data: None },
]);
let mut stmt = SelectPerson::prepare(&conn);
assert_eq!(stmt.query_row()?, SelectPersonRow { id: 1, name: "Steven".into(), data: Some(vec![1u8, 2u8]) });
let mut stmt = SearchByName::prepare(&conn);
let search_params = SearchByNameParams {
name2: "Bill".into(),
};
assert_eq!(stmt.query_row_bind(&search_params)?, SearchByNameRow { id: 3, name: "Bill".into(), data: None });
Ok(())
}
}
|
random_line_split
|
|
lib.rs
|
#[cfg(test)]
mod tests {
use rusqlite::{Connection, Result};
use tagged_rusqlite::tagged_sql;
#[test]
fn smoke_test() -> Result<()>
|
?2 /*:data:Option<Vec<u8>>*/
)
RETURNING id /*:i64*/;
"#
);
let mut insert = InsertPerson::prepare(&conn);
let id = insert.query_row_bind(&InsertPersonParams {
name: "Steven".to_string(),
data: Some(vec![1u8, 2u8]),
})?;
assert_eq!(id.id,1);
let id = insert.query_row_bind(&InsertPersonParams {
name: "John".to_string(),
data: None,
})?;
assert_eq!(id.id,2);
let id = insert.query_row_bind(&InsertPersonParams {
name: "Bill".to_string(),
data: None,
})?;
assert_eq!(id.id,3);
tagged_sql!(
SelectPerson,
r#"SELECT
id /*:i64*/,
name /*:String*/,
data /*:Option<Vec<u8>>*/
FROM person;
"#
);
let mut stmt = SelectPerson::prepare(&conn);
let people: Result<Vec<_>, _> = stmt.query()?.collect();
assert_eq!(people?, vec![SelectPersonRow { id: 1, name: "Steven".into(), data: Some(vec![1u8, 2u8]) },
SelectPersonRow { id: 2, name: "John".into(), data: None },
SelectPersonRow { id: 3, name: "Bill".into(), data: None }, ]);
tagged_sql!(
SearchByName,
r#"SELECT
id /*:i64*/,
name /*:String*/,
data /*:Option<Vec<u8>>*/
FROM person
WHERE name =? /*:name2:String*/;
"#
);
let search_params = SearchByNameParams {
name2: "John".into(),
};
let people: Result<Vec<_>, _> = SearchByName::prepare(&conn).query_bind(&search_params)?.collect();
assert_eq!(people?, vec![
SearchByNameRow { id: 2, name: "John".into(), data: None },
]);
let mut stmt = SelectPerson::prepare(&conn);
assert_eq!(stmt.query_row()?, SelectPersonRow { id: 1, name: "Steven".into(), data: Some(vec![1u8, 2u8]) });
let mut stmt = SearchByName::prepare(&conn);
let search_params = SearchByNameParams {
name2: "Bill".into(),
};
assert_eq!(stmt.query_row_bind(&search_params)?, SearchByNameRow { id: 3, name: "Bill".into(), data: None });
Ok(())
}
}
|
{
let conn = Connection::open_in_memory()?;
tagged_sql!(
CreateTable,
"CREATE TABLE person (
id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
data BLOB
);"
);
CreateTable::prepare(&conn).execute()?;
tagged_sql!(
InsertPerson,
r#"INSERT INTO
person (name, data)
VALUES (
?1 /*:name:String*/,
|
identifier_body
|
spec_from_iter_nested.rs
|
use core::iter::TrustedLen;
use core::ptr::{self};
use super::{SpecExtend, Vec};
/// Another specialization trait for Vec::from_iter
/// necessary to manually prioritize overlapping specializations
/// see [`SpecFromIter`](super::SpecFromIter) for details.
pub(super) trait SpecFromIterNested<T, I> {
fn from_iter(iter: I) -> Self;
}
impl<T, I> SpecFromIterNested<T, I> for Vec<T>
where
I: Iterator<Item = T>,
{
default fn
|
(mut iterator: I) -> Self {
// Unroll the first iteration, as the vector is going to be
// expanded on this iteration in every case when the iterable is not
// empty, but the loop in extend_desugared() is not going to see the
// vector being full in the few subsequent loop iterations.
// So we get better branch prediction.
let mut vector = match iterator.next() {
None => return Vec::new(),
Some(element) => {
let (lower, _) = iterator.size_hint();
let mut vector = Vec::with_capacity(lower.saturating_add(1));
unsafe {
ptr::write(vector.as_mut_ptr(), element);
vector.set_len(1);
}
vector
}
};
// must delegate to spec_extend() since extend() itself delegates
// to spec_from for empty Vecs
<Vec<T> as SpecExtend<T, I>>::spec_extend(&mut vector, iterator);
vector
}
}
impl<T, I> SpecFromIterNested<T, I> for Vec<T>
where
I: TrustedLen<Item = T>,
{
fn from_iter(iterator: I) -> Self {
let mut vector = match iterator.size_hint() {
(_, Some(upper)) => Vec::with_capacity(upper),
// TrustedLen contract guarantees that `size_hint() == (_, None)` means that there
// are more than `usize::MAX` elements.
// Since the previous branch would eagerly panic if the capacity is too large
// (via `with_capacity`) we do the same here.
_ => panic!("capacity overflow"),
};
// reuse extend specialization for TrustedLen
vector.spec_extend(iterator);
vector
}
}
|
from_iter
|
identifier_name
|
spec_from_iter_nested.rs
|
use core::iter::TrustedLen;
use core::ptr::{self};
use super::{SpecExtend, Vec};
/// Another specialization trait for Vec::from_iter
/// necessary to manually prioritize overlapping specializations
/// see [`SpecFromIter`](super::SpecFromIter) for details.
pub(super) trait SpecFromIterNested<T, I> {
fn from_iter(iter: I) -> Self;
}
impl<T, I> SpecFromIterNested<T, I> for Vec<T>
where
I: Iterator<Item = T>,
{
default fn from_iter(mut iterator: I) -> Self {
// Unroll the first iteration, as the vector is going to be
|
// vector being full in the few subsequent loop iterations.
// So we get better branch prediction.
let mut vector = match iterator.next() {
None => return Vec::new(),
Some(element) => {
let (lower, _) = iterator.size_hint();
let mut vector = Vec::with_capacity(lower.saturating_add(1));
unsafe {
ptr::write(vector.as_mut_ptr(), element);
vector.set_len(1);
}
vector
}
};
// must delegate to spec_extend() since extend() itself delegates
// to spec_from for empty Vecs
<Vec<T> as SpecExtend<T, I>>::spec_extend(&mut vector, iterator);
vector
}
}
impl<T, I> SpecFromIterNested<T, I> for Vec<T>
where
I: TrustedLen<Item = T>,
{
fn from_iter(iterator: I) -> Self {
let mut vector = match iterator.size_hint() {
(_, Some(upper)) => Vec::with_capacity(upper),
// TrustedLen contract guarantees that `size_hint() == (_, None)` means that there
// are more than `usize::MAX` elements.
// Since the previous branch would eagerly panic if the capacity is too large
// (via `with_capacity`) we do the same here.
_ => panic!("capacity overflow"),
};
// reuse extend specialization for TrustedLen
vector.spec_extend(iterator);
vector
}
}
|
// expanded on this iteration in every case when the iterable is not
// empty, but the loop in extend_desugared() is not going to see the
|
random_line_split
|
spec_from_iter_nested.rs
|
use core::iter::TrustedLen;
use core::ptr::{self};
use super::{SpecExtend, Vec};
/// Another specialization trait for Vec::from_iter
/// necessary to manually prioritize overlapping specializations
/// see [`SpecFromIter`](super::SpecFromIter) for details.
pub(super) trait SpecFromIterNested<T, I> {
fn from_iter(iter: I) -> Self;
}
impl<T, I> SpecFromIterNested<T, I> for Vec<T>
where
I: Iterator<Item = T>,
{
default fn from_iter(mut iterator: I) -> Self {
// Unroll the first iteration, as the vector is going to be
// expanded on this iteration in every case when the iterable is not
// empty, but the loop in extend_desugared() is not going to see the
// vector being full in the few subsequent loop iterations.
// So we get better branch prediction.
let mut vector = match iterator.next() {
None => return Vec::new(),
Some(element) =>
|
};
// must delegate to spec_extend() since extend() itself delegates
// to spec_from for empty Vecs
<Vec<T> as SpecExtend<T, I>>::spec_extend(&mut vector, iterator);
vector
}
}
impl<T, I> SpecFromIterNested<T, I> for Vec<T>
where
I: TrustedLen<Item = T>,
{
fn from_iter(iterator: I) -> Self {
let mut vector = match iterator.size_hint() {
(_, Some(upper)) => Vec::with_capacity(upper),
// TrustedLen contract guarantees that `size_hint() == (_, None)` means that there
// are more than `usize::MAX` elements.
// Since the previous branch would eagerly panic if the capacity is too large
// (via `with_capacity`) we do the same here.
_ => panic!("capacity overflow"),
};
// reuse extend specialization for TrustedLen
vector.spec_extend(iterator);
vector
}
}
|
{
let (lower, _) = iterator.size_hint();
let mut vector = Vec::with_capacity(lower.saturating_add(1));
unsafe {
ptr::write(vector.as_mut_ptr(), element);
vector.set_len(1);
}
vector
}
|
conditional_block
|
spec_from_iter_nested.rs
|
use core::iter::TrustedLen;
use core::ptr::{self};
use super::{SpecExtend, Vec};
/// Another specialization trait for Vec::from_iter
/// necessary to manually prioritize overlapping specializations
/// see [`SpecFromIter`](super::SpecFromIter) for details.
pub(super) trait SpecFromIterNested<T, I> {
fn from_iter(iter: I) -> Self;
}
impl<T, I> SpecFromIterNested<T, I> for Vec<T>
where
I: Iterator<Item = T>,
{
default fn from_iter(mut iterator: I) -> Self
|
<Vec<T> as SpecExtend<T, I>>::spec_extend(&mut vector, iterator);
vector
}
}
impl<T, I> SpecFromIterNested<T, I> for Vec<T>
where
I: TrustedLen<Item = T>,
{
fn from_iter(iterator: I) -> Self {
let mut vector = match iterator.size_hint() {
(_, Some(upper)) => Vec::with_capacity(upper),
// TrustedLen contract guarantees that `size_hint() == (_, None)` means that there
// are more than `usize::MAX` elements.
// Since the previous branch would eagerly panic if the capacity is too large
// (via `with_capacity`) we do the same here.
_ => panic!("capacity overflow"),
};
// reuse extend specialization for TrustedLen
vector.spec_extend(iterator);
vector
}
}
|
{
// Unroll the first iteration, as the vector is going to be
// expanded on this iteration in every case when the iterable is not
// empty, but the loop in extend_desugared() is not going to see the
// vector being full in the few subsequent loop iterations.
// So we get better branch prediction.
let mut vector = match iterator.next() {
None => return Vec::new(),
Some(element) => {
let (lower, _) = iterator.size_hint();
let mut vector = Vec::with_capacity(lower.saturating_add(1));
unsafe {
ptr::write(vector.as_mut_ptr(), element);
vector.set_len(1);
}
vector
}
};
// must delegate to spec_extend() since extend() itself delegates
// to spec_from for empty Vecs
|
identifier_body
|
weak-lang-items.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
|
// This aux-file will require the eh_personality function to be codegen'd, but
// it hasn't been defined just yet. Make sure we don't explode.
#![feature(no_std)]
#![no_std]
#![crate_type = "rlib"]
#[macro_use]
extern crate core;
struct A;
impl core::ops::Drop for A {
fn drop(&mut self) {}
}
pub fn foo() {
let _a = A;
panic!("wut");
}
mod std {
pub use core::{option, fmt};
}
|
// except according to those terms.
// no-prefer-dynamic
|
random_line_split
|
weak-lang-items.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// no-prefer-dynamic
// This aux-file will require the eh_personality function to be codegen'd, but
// it hasn't been defined just yet. Make sure we don't explode.
#![feature(no_std)]
#![no_std]
#![crate_type = "rlib"]
#[macro_use]
extern crate core;
struct A;
impl core::ops::Drop for A {
fn drop(&mut self)
|
}
pub fn foo() {
let _a = A;
panic!("wut");
}
mod std {
pub use core::{option, fmt};
}
|
{}
|
identifier_body
|
weak-lang-items.rs
|
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// no-prefer-dynamic
// This aux-file will require the eh_personality function to be codegen'd, but
// it hasn't been defined just yet. Make sure we don't explode.
#![feature(no_std)]
#![no_std]
#![crate_type = "rlib"]
#[macro_use]
extern crate core;
struct
|
;
impl core::ops::Drop for A {
fn drop(&mut self) {}
}
pub fn foo() {
let _a = A;
panic!("wut");
}
mod std {
pub use core::{option, fmt};
}
|
A
|
identifier_name
|
font.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/// Implementation of Quartz (CoreGraphics) fonts.
extern crate core_foundation;
extern crate core_graphics;
extern crate core_text;
use app_units::Au;
use byteorder::{BigEndian, ByteOrder};
use core_foundation::base::CFIndex;
use core_foundation::data::CFData;
use core_foundation::string::UniChar;
use core_graphics::font::CGGlyph;
use core_graphics::geometry::CGRect;
use core_text::font::CTFont;
use core_text::font_descriptor::{SymbolicTraitAccessors, TraitAccessors};
use core_text::font_descriptor::{kCTFontDefaultOrientation};
use font::{FontHandleMethods, FontMetrics, FontTableTag, FontTableMethods, FractionalPixel};
use font::{GPOS, GSUB, KERN};
use platform::font_template::FontTemplateData;
use platform::macos::font_context::FontContextHandle;
use std::ops::Range;
use std::sync::Arc;
use std::{fmt, ptr};
use style::computed_values::{font_stretch, font_weight};
use text::glyph::GlyphId;
const KERN_PAIR_LEN: usize = 6;
pub struct FontTable {
data: CFData,
}
// assumes 72 points per inch, and 96 px per inch
fn px_to_pt(px: f64) -> f64 {
px / 96. * 72.
}
// assumes 72 points per inch, and 96 px per inch
fn pt_to_px(pt: f64) -> f64 {
pt / 72. * 96.
}
fn au_from_pt(pt: f64) -> Au {
Au::from_f64_px(pt_to_px(pt))
}
impl FontTable {
pub fn wrap(data: CFData) -> FontTable {
FontTable { data: data }
}
}
impl FontTableMethods for FontTable {
fn buffer(&self) -> &[u8] {
self.data.bytes()
}
}
#[derive(Debug)]
pub struct FontHandle {
font_data: Arc<FontTemplateData>,
ctfont: CTFont,
h_kern_subtable: Option<CachedKernTable>,
can_do_fast_shaping: bool,
}
impl FontHandle {
/// Cache all the data needed for basic horizontal kerning. This is used only as a fallback or
/// fast path (when the GPOS table is missing or unnecessary) so it needn't handle every case.
fn find_h_kern_subtable(&self) -> Option<CachedKernTable> {
let font_table = match self.table_for_tag(KERN) {
Some(table) => table,
None => return None
};
let mut result = CachedKernTable {
font_table: font_table,
pair_data_range: 0..0,
px_per_font_unit: 0.0,
};
// Look for a subtable with horizontal kerning in format 0.
// https://www.microsoft.com/typography/otspec/kern.htm
const KERN_COVERAGE_HORIZONTAL_FORMAT_0: u16 = 1;
const SUBTABLE_HEADER_LEN: usize = 6;
const FORMAT_0_HEADER_LEN: usize = 8;
{
let table = result.font_table.buffer();
let version = BigEndian::read_u16(table);
if version!= 0 {
return None;
}
let num_subtables = BigEndian::read_u16(&table[2..]);
let mut start = 4;
for _ in 0..num_subtables {
// TODO: Check the subtable version number?
let len = BigEndian::read_u16(&table[start + 2..]) as usize;
let cov = BigEndian::read_u16(&table[start + 4..]);
let end = start + len;
if cov == KERN_COVERAGE_HORIZONTAL_FORMAT_0 {
// Found a matching subtable.
if result.pair_data_range.len() > 0 {
debug!("Found multiple horizontal kern tables. Disable fast path.");
return None;
}
// Read the subtable header.
let subtable_start = start + SUBTABLE_HEADER_LEN;
let n_pairs = BigEndian::read_u16(&table[subtable_start..]) as usize;
let pair_data_start = subtable_start + FORMAT_0_HEADER_LEN;
result.pair_data_range = pair_data_start..end;
let pt_per_font_unit = self.ctfont.pt_size() as f64 /
self.ctfont.units_per_em() as f64;
result.px_per_font_unit = pt_to_px(pt_per_font_unit);
debug_assert_eq!(n_pairs * KERN_PAIR_LEN, result.pair_data_range.len());
}
start = end;
}
}
if result.pair_data_range.len() > 0 {
Some(result)
} else {
None
}
}
}
struct CachedKernTable {
font_table: FontTable,
pair_data_range: Range<usize>,
px_per_font_unit: f64,
}
impl CachedKernTable {
/// Search for a glyph pair in the kern table and return the corresponding value.
fn binary_search(&self, first_glyph: GlyphId, second_glyph: GlyphId) -> Option<i16> {
let pairs = &self.font_table.buffer()[self.pair_data_range.clone()];
let query = first_glyph << 16 | second_glyph;
let (mut start, mut end) = (0, pairs.len() / KERN_PAIR_LEN);
while start < end {
let i = (start + end) / 2;
let key = BigEndian::read_u32(&pairs[i * KERN_PAIR_LEN..]);
if key > query {
end = i;
} else if key < query {
start = i + 1;
} else {
return Some(BigEndian::read_i16(&pairs[i * KERN_PAIR_LEN + 4..]));
}
}
None
}
}
impl fmt::Debug for CachedKernTable {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "CachedKernTable")
}
}
impl FontHandleMethods for FontHandle {
fn
|
(_fctx: &FontContextHandle,
template: Arc<FontTemplateData>,
pt_size: Option<Au>)
-> Result<FontHandle, ()> {
let size = match pt_size {
Some(s) => s.to_f64_px(),
None => 0.0
};
match template.ctfont(size) {
Some(ref ctfont) => {
let mut handle = FontHandle {
font_data: template.clone(),
ctfont: ctfont.clone_with_font_size(size),
h_kern_subtable: None,
can_do_fast_shaping: false,
};
handle.h_kern_subtable = handle.find_h_kern_subtable();
// TODO (#11310): Implement basic support for GPOS and GSUB.
handle.can_do_fast_shaping = handle.h_kern_subtable.is_some() &&
handle.table_for_tag(GPOS).is_none() &&
handle.table_for_tag(GSUB).is_none();
Ok(handle)
}
None => {
Err(())
}
}
}
fn template(&self) -> Arc<FontTemplateData> {
self.font_data.clone()
}
fn family_name(&self) -> String {
self.ctfont.family_name()
}
fn face_name(&self) -> String {
self.ctfont.face_name()
}
fn is_italic(&self) -> bool {
self.ctfont.symbolic_traits().is_italic()
}
fn boldness(&self) -> font_weight::T {
let normalized = self.ctfont.all_traits().normalized_weight(); // [-1.0, 1.0]
let normalized = if normalized <= 0.0 {
4.0 + normalized * 3.0 // [1.0, 4.0]
} else {
4.0 + normalized * 5.0 // [4.0, 9.0]
}; // [1.0, 9.0], centered on 4.0
match normalized.round() as u32 {
1 => font_weight::T::Weight100,
2 => font_weight::T::Weight200,
3 => font_weight::T::Weight300,
4 => font_weight::T::Weight400,
5 => font_weight::T::Weight500,
6 => font_weight::T::Weight600,
7 => font_weight::T::Weight700,
8 => font_weight::T::Weight800,
_ => font_weight::T::Weight900,
}
}
fn stretchiness(&self) -> font_stretch::T {
let normalized = self.ctfont.all_traits().normalized_width(); // [-1.0, 1.0]
let normalized = (normalized + 1.0) / 2.0 * 9.0; // [0.0, 9.0]
match normalized {
v if v < 1.0 => font_stretch::T::ultra_condensed,
v if v < 2.0 => font_stretch::T::extra_condensed,
v if v < 3.0 => font_stretch::T::condensed,
v if v < 4.0 => font_stretch::T::semi_condensed,
v if v < 5.0 => font_stretch::T::normal,
v if v < 6.0 => font_stretch::T::semi_expanded,
v if v < 7.0 => font_stretch::T::expanded,
v if v < 8.0 => font_stretch::T::extra_expanded,
_ => font_stretch::T::ultra_expanded,
}
}
fn glyph_index(&self, codepoint: char) -> Option<GlyphId> {
let characters: [UniChar; 1] = [codepoint as UniChar];
let mut glyphs: [CGGlyph; 1] = [0 as CGGlyph];
let count: CFIndex = 1;
let result = self.ctfont.get_glyphs_for_characters(&characters[0],
&mut glyphs[0],
count);
if!result {
// No glyph for this character
return None;
}
assert!(glyphs[0]!= 0); // FIXME: error handling
return Some(glyphs[0] as GlyphId);
}
fn glyph_h_kerning(&self, first_glyph: GlyphId, second_glyph: GlyphId) -> FractionalPixel {
if let Some(ref table) = self.h_kern_subtable {
if let Some(font_units) = table.binary_search(first_glyph, second_glyph) {
return font_units as f64 * table.px_per_font_unit;
}
}
0.0
}
fn can_do_fast_shaping(&self) -> bool {
self.can_do_fast_shaping
}
fn glyph_h_advance(&self, glyph: GlyphId) -> Option<FractionalPixel> {
let glyphs = [glyph as CGGlyph];
let advance = self.ctfont.get_advances_for_glyphs(kCTFontDefaultOrientation,
&glyphs[0],
ptr::null_mut(),
1);
Some(advance as FractionalPixel)
}
fn metrics(&self) -> FontMetrics {
let bounding_rect: CGRect = self.ctfont.bounding_box();
let ascent = self.ctfont.ascent() as f64;
let descent = self.ctfont.descent() as f64;
let em_size = Au::from_f64_px(self.ctfont.pt_size() as f64);
let leading = self.ctfont.leading() as f64;
let scale = px_to_pt(self.ctfont.pt_size() as f64) / (ascent + descent);
let line_gap = (ascent + descent + leading + 0.5).floor();
let max_advance_width = au_from_pt(bounding_rect.size.width as f64);
let average_advance = self.glyph_index('0')
.and_then(|idx| self.glyph_h_advance(idx))
.map(Au::from_f64_px)
.unwrap_or(max_advance_width);
let metrics = FontMetrics {
underline_size: au_from_pt(self.ctfont.underline_thickness() as f64),
// TODO(Issue #201): underline metrics are not reliable. Have to pull out of font table
// directly.
//
// see also: https://bugs.webkit.org/show_bug.cgi?id=16768
// see also: https://bugreports.qt-project.org/browse/QTBUG-13364
underline_offset: au_from_pt(self.ctfont.underline_position() as f64),
strikeout_size: Au(0), // FIXME(Issue #942)
strikeout_offset: Au(0), // FIXME(Issue #942)
leading: au_from_pt(leading),
x_height: au_from_pt(self.ctfont.x_height() as f64),
em_size: em_size,
ascent: au_from_pt(ascent * scale),
descent: au_from_pt(descent * scale),
max_advance: max_advance_width,
average_advance: average_advance,
line_gap: Au::from_f64_px(line_gap),
};
debug!("Font metrics (@{} pt): {:?}", self.ctfont.pt_size() as f64, metrics);
metrics
}
fn table_for_tag(&self, tag: FontTableTag) -> Option<FontTable> {
let result: Option<CFData> = self.ctfont.get_font_table(tag);
result.and_then(|data| {
Some(FontTable::wrap(data))
})
}
}
|
new_from_template
|
identifier_name
|
font.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/// Implementation of Quartz (CoreGraphics) fonts.
extern crate core_foundation;
extern crate core_graphics;
extern crate core_text;
use app_units::Au;
use byteorder::{BigEndian, ByteOrder};
use core_foundation::base::CFIndex;
use core_foundation::data::CFData;
use core_foundation::string::UniChar;
use core_graphics::font::CGGlyph;
use core_graphics::geometry::CGRect;
use core_text::font::CTFont;
use core_text::font_descriptor::{SymbolicTraitAccessors, TraitAccessors};
use core_text::font_descriptor::{kCTFontDefaultOrientation};
use font::{FontHandleMethods, FontMetrics, FontTableTag, FontTableMethods, FractionalPixel};
use font::{GPOS, GSUB, KERN};
use platform::font_template::FontTemplateData;
use platform::macos::font_context::FontContextHandle;
use std::ops::Range;
use std::sync::Arc;
use std::{fmt, ptr};
use style::computed_values::{font_stretch, font_weight};
use text::glyph::GlyphId;
const KERN_PAIR_LEN: usize = 6;
pub struct FontTable {
data: CFData,
}
// assumes 72 points per inch, and 96 px per inch
fn px_to_pt(px: f64) -> f64 {
px / 96. * 72.
}
// assumes 72 points per inch, and 96 px per inch
fn pt_to_px(pt: f64) -> f64 {
pt / 72. * 96.
}
fn au_from_pt(pt: f64) -> Au {
Au::from_f64_px(pt_to_px(pt))
}
impl FontTable {
pub fn wrap(data: CFData) -> FontTable {
FontTable { data: data }
}
|
impl FontTableMethods for FontTable {
fn buffer(&self) -> &[u8] {
self.data.bytes()
}
}
#[derive(Debug)]
pub struct FontHandle {
font_data: Arc<FontTemplateData>,
ctfont: CTFont,
h_kern_subtable: Option<CachedKernTable>,
can_do_fast_shaping: bool,
}
impl FontHandle {
/// Cache all the data needed for basic horizontal kerning. This is used only as a fallback or
/// fast path (when the GPOS table is missing or unnecessary) so it needn't handle every case.
fn find_h_kern_subtable(&self) -> Option<CachedKernTable> {
let font_table = match self.table_for_tag(KERN) {
Some(table) => table,
None => return None
};
let mut result = CachedKernTable {
font_table: font_table,
pair_data_range: 0..0,
px_per_font_unit: 0.0,
};
// Look for a subtable with horizontal kerning in format 0.
// https://www.microsoft.com/typography/otspec/kern.htm
const KERN_COVERAGE_HORIZONTAL_FORMAT_0: u16 = 1;
const SUBTABLE_HEADER_LEN: usize = 6;
const FORMAT_0_HEADER_LEN: usize = 8;
{
let table = result.font_table.buffer();
let version = BigEndian::read_u16(table);
if version!= 0 {
return None;
}
let num_subtables = BigEndian::read_u16(&table[2..]);
let mut start = 4;
for _ in 0..num_subtables {
// TODO: Check the subtable version number?
let len = BigEndian::read_u16(&table[start + 2..]) as usize;
let cov = BigEndian::read_u16(&table[start + 4..]);
let end = start + len;
if cov == KERN_COVERAGE_HORIZONTAL_FORMAT_0 {
// Found a matching subtable.
if result.pair_data_range.len() > 0 {
debug!("Found multiple horizontal kern tables. Disable fast path.");
return None;
}
// Read the subtable header.
let subtable_start = start + SUBTABLE_HEADER_LEN;
let n_pairs = BigEndian::read_u16(&table[subtable_start..]) as usize;
let pair_data_start = subtable_start + FORMAT_0_HEADER_LEN;
result.pair_data_range = pair_data_start..end;
let pt_per_font_unit = self.ctfont.pt_size() as f64 /
self.ctfont.units_per_em() as f64;
result.px_per_font_unit = pt_to_px(pt_per_font_unit);
debug_assert_eq!(n_pairs * KERN_PAIR_LEN, result.pair_data_range.len());
}
start = end;
}
}
if result.pair_data_range.len() > 0 {
Some(result)
} else {
None
}
}
}
struct CachedKernTable {
font_table: FontTable,
pair_data_range: Range<usize>,
px_per_font_unit: f64,
}
impl CachedKernTable {
/// Search for a glyph pair in the kern table and return the corresponding value.
fn binary_search(&self, first_glyph: GlyphId, second_glyph: GlyphId) -> Option<i16> {
let pairs = &self.font_table.buffer()[self.pair_data_range.clone()];
let query = first_glyph << 16 | second_glyph;
let (mut start, mut end) = (0, pairs.len() / KERN_PAIR_LEN);
while start < end {
let i = (start + end) / 2;
let key = BigEndian::read_u32(&pairs[i * KERN_PAIR_LEN..]);
if key > query {
end = i;
} else if key < query {
start = i + 1;
} else {
return Some(BigEndian::read_i16(&pairs[i * KERN_PAIR_LEN + 4..]));
}
}
None
}
}
impl fmt::Debug for CachedKernTable {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "CachedKernTable")
}
}
impl FontHandleMethods for FontHandle {
fn new_from_template(_fctx: &FontContextHandle,
template: Arc<FontTemplateData>,
pt_size: Option<Au>)
-> Result<FontHandle, ()> {
let size = match pt_size {
Some(s) => s.to_f64_px(),
None => 0.0
};
match template.ctfont(size) {
Some(ref ctfont) => {
let mut handle = FontHandle {
font_data: template.clone(),
ctfont: ctfont.clone_with_font_size(size),
h_kern_subtable: None,
can_do_fast_shaping: false,
};
handle.h_kern_subtable = handle.find_h_kern_subtable();
// TODO (#11310): Implement basic support for GPOS and GSUB.
handle.can_do_fast_shaping = handle.h_kern_subtable.is_some() &&
handle.table_for_tag(GPOS).is_none() &&
handle.table_for_tag(GSUB).is_none();
Ok(handle)
}
None => {
Err(())
}
}
}
fn template(&self) -> Arc<FontTemplateData> {
self.font_data.clone()
}
fn family_name(&self) -> String {
self.ctfont.family_name()
}
fn face_name(&self) -> String {
self.ctfont.face_name()
}
fn is_italic(&self) -> bool {
self.ctfont.symbolic_traits().is_italic()
}
fn boldness(&self) -> font_weight::T {
let normalized = self.ctfont.all_traits().normalized_weight(); // [-1.0, 1.0]
let normalized = if normalized <= 0.0 {
4.0 + normalized * 3.0 // [1.0, 4.0]
} else {
4.0 + normalized * 5.0 // [4.0, 9.0]
}; // [1.0, 9.0], centered on 4.0
match normalized.round() as u32 {
1 => font_weight::T::Weight100,
2 => font_weight::T::Weight200,
3 => font_weight::T::Weight300,
4 => font_weight::T::Weight400,
5 => font_weight::T::Weight500,
6 => font_weight::T::Weight600,
7 => font_weight::T::Weight700,
8 => font_weight::T::Weight800,
_ => font_weight::T::Weight900,
}
}
fn stretchiness(&self) -> font_stretch::T {
let normalized = self.ctfont.all_traits().normalized_width(); // [-1.0, 1.0]
let normalized = (normalized + 1.0) / 2.0 * 9.0; // [0.0, 9.0]
match normalized {
v if v < 1.0 => font_stretch::T::ultra_condensed,
v if v < 2.0 => font_stretch::T::extra_condensed,
v if v < 3.0 => font_stretch::T::condensed,
v if v < 4.0 => font_stretch::T::semi_condensed,
v if v < 5.0 => font_stretch::T::normal,
v if v < 6.0 => font_stretch::T::semi_expanded,
v if v < 7.0 => font_stretch::T::expanded,
v if v < 8.0 => font_stretch::T::extra_expanded,
_ => font_stretch::T::ultra_expanded,
}
}
fn glyph_index(&self, codepoint: char) -> Option<GlyphId> {
let characters: [UniChar; 1] = [codepoint as UniChar];
let mut glyphs: [CGGlyph; 1] = [0 as CGGlyph];
let count: CFIndex = 1;
let result = self.ctfont.get_glyphs_for_characters(&characters[0],
&mut glyphs[0],
count);
if!result {
// No glyph for this character
return None;
}
assert!(glyphs[0]!= 0); // FIXME: error handling
return Some(glyphs[0] as GlyphId);
}
fn glyph_h_kerning(&self, first_glyph: GlyphId, second_glyph: GlyphId) -> FractionalPixel {
if let Some(ref table) = self.h_kern_subtable {
if let Some(font_units) = table.binary_search(first_glyph, second_glyph) {
return font_units as f64 * table.px_per_font_unit;
}
}
0.0
}
fn can_do_fast_shaping(&self) -> bool {
self.can_do_fast_shaping
}
fn glyph_h_advance(&self, glyph: GlyphId) -> Option<FractionalPixel> {
let glyphs = [glyph as CGGlyph];
let advance = self.ctfont.get_advances_for_glyphs(kCTFontDefaultOrientation,
&glyphs[0],
ptr::null_mut(),
1);
Some(advance as FractionalPixel)
}
fn metrics(&self) -> FontMetrics {
let bounding_rect: CGRect = self.ctfont.bounding_box();
let ascent = self.ctfont.ascent() as f64;
let descent = self.ctfont.descent() as f64;
let em_size = Au::from_f64_px(self.ctfont.pt_size() as f64);
let leading = self.ctfont.leading() as f64;
let scale = px_to_pt(self.ctfont.pt_size() as f64) / (ascent + descent);
let line_gap = (ascent + descent + leading + 0.5).floor();
let max_advance_width = au_from_pt(bounding_rect.size.width as f64);
let average_advance = self.glyph_index('0')
.and_then(|idx| self.glyph_h_advance(idx))
.map(Au::from_f64_px)
.unwrap_or(max_advance_width);
let metrics = FontMetrics {
underline_size: au_from_pt(self.ctfont.underline_thickness() as f64),
// TODO(Issue #201): underline metrics are not reliable. Have to pull out of font table
// directly.
//
// see also: https://bugs.webkit.org/show_bug.cgi?id=16768
// see also: https://bugreports.qt-project.org/browse/QTBUG-13364
underline_offset: au_from_pt(self.ctfont.underline_position() as f64),
strikeout_size: Au(0), // FIXME(Issue #942)
strikeout_offset: Au(0), // FIXME(Issue #942)
leading: au_from_pt(leading),
x_height: au_from_pt(self.ctfont.x_height() as f64),
em_size: em_size,
ascent: au_from_pt(ascent * scale),
descent: au_from_pt(descent * scale),
max_advance: max_advance_width,
average_advance: average_advance,
line_gap: Au::from_f64_px(line_gap),
};
debug!("Font metrics (@{} pt): {:?}", self.ctfont.pt_size() as f64, metrics);
metrics
}
fn table_for_tag(&self, tag: FontTableTag) -> Option<FontTable> {
let result: Option<CFData> = self.ctfont.get_font_table(tag);
result.and_then(|data| {
Some(FontTable::wrap(data))
})
}
}
|
}
|
random_line_split
|
font.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/// Implementation of Quartz (CoreGraphics) fonts.
extern crate core_foundation;
extern crate core_graphics;
extern crate core_text;
use app_units::Au;
use byteorder::{BigEndian, ByteOrder};
use core_foundation::base::CFIndex;
use core_foundation::data::CFData;
use core_foundation::string::UniChar;
use core_graphics::font::CGGlyph;
use core_graphics::geometry::CGRect;
use core_text::font::CTFont;
use core_text::font_descriptor::{SymbolicTraitAccessors, TraitAccessors};
use core_text::font_descriptor::{kCTFontDefaultOrientation};
use font::{FontHandleMethods, FontMetrics, FontTableTag, FontTableMethods, FractionalPixel};
use font::{GPOS, GSUB, KERN};
use platform::font_template::FontTemplateData;
use platform::macos::font_context::FontContextHandle;
use std::ops::Range;
use std::sync::Arc;
use std::{fmt, ptr};
use style::computed_values::{font_stretch, font_weight};
use text::glyph::GlyphId;
const KERN_PAIR_LEN: usize = 6;
pub struct FontTable {
data: CFData,
}
// assumes 72 points per inch, and 96 px per inch
fn px_to_pt(px: f64) -> f64 {
px / 96. * 72.
}
// assumes 72 points per inch, and 96 px per inch
fn pt_to_px(pt: f64) -> f64 {
pt / 72. * 96.
}
fn au_from_pt(pt: f64) -> Au {
Au::from_f64_px(pt_to_px(pt))
}
impl FontTable {
pub fn wrap(data: CFData) -> FontTable {
FontTable { data: data }
}
}
impl FontTableMethods for FontTable {
fn buffer(&self) -> &[u8] {
self.data.bytes()
}
}
#[derive(Debug)]
pub struct FontHandle {
font_data: Arc<FontTemplateData>,
ctfont: CTFont,
h_kern_subtable: Option<CachedKernTable>,
can_do_fast_shaping: bool,
}
impl FontHandle {
/// Cache all the data needed for basic horizontal kerning. This is used only as a fallback or
/// fast path (when the GPOS table is missing or unnecessary) so it needn't handle every case.
fn find_h_kern_subtable(&self) -> Option<CachedKernTable> {
let font_table = match self.table_for_tag(KERN) {
Some(table) => table,
None => return None
};
let mut result = CachedKernTable {
font_table: font_table,
pair_data_range: 0..0,
px_per_font_unit: 0.0,
};
// Look for a subtable with horizontal kerning in format 0.
// https://www.microsoft.com/typography/otspec/kern.htm
const KERN_COVERAGE_HORIZONTAL_FORMAT_0: u16 = 1;
const SUBTABLE_HEADER_LEN: usize = 6;
const FORMAT_0_HEADER_LEN: usize = 8;
{
let table = result.font_table.buffer();
let version = BigEndian::read_u16(table);
if version!= 0 {
return None;
}
let num_subtables = BigEndian::read_u16(&table[2..]);
let mut start = 4;
for _ in 0..num_subtables {
// TODO: Check the subtable version number?
let len = BigEndian::read_u16(&table[start + 2..]) as usize;
let cov = BigEndian::read_u16(&table[start + 4..]);
let end = start + len;
if cov == KERN_COVERAGE_HORIZONTAL_FORMAT_0 {
// Found a matching subtable.
if result.pair_data_range.len() > 0 {
debug!("Found multiple horizontal kern tables. Disable fast path.");
return None;
}
// Read the subtable header.
let subtable_start = start + SUBTABLE_HEADER_LEN;
let n_pairs = BigEndian::read_u16(&table[subtable_start..]) as usize;
let pair_data_start = subtable_start + FORMAT_0_HEADER_LEN;
result.pair_data_range = pair_data_start..end;
let pt_per_font_unit = self.ctfont.pt_size() as f64 /
self.ctfont.units_per_em() as f64;
result.px_per_font_unit = pt_to_px(pt_per_font_unit);
debug_assert_eq!(n_pairs * KERN_PAIR_LEN, result.pair_data_range.len());
}
start = end;
}
}
if result.pair_data_range.len() > 0 {
Some(result)
} else {
None
}
}
}
struct CachedKernTable {
font_table: FontTable,
pair_data_range: Range<usize>,
px_per_font_unit: f64,
}
impl CachedKernTable {
/// Search for a glyph pair in the kern table and return the corresponding value.
fn binary_search(&self, first_glyph: GlyphId, second_glyph: GlyphId) -> Option<i16> {
let pairs = &self.font_table.buffer()[self.pair_data_range.clone()];
let query = first_glyph << 16 | second_glyph;
let (mut start, mut end) = (0, pairs.len() / KERN_PAIR_LEN);
while start < end {
let i = (start + end) / 2;
let key = BigEndian::read_u32(&pairs[i * KERN_PAIR_LEN..]);
if key > query {
end = i;
} else if key < query {
start = i + 1;
} else {
return Some(BigEndian::read_i16(&pairs[i * KERN_PAIR_LEN + 4..]));
}
}
None
}
}
impl fmt::Debug for CachedKernTable {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "CachedKernTable")
}
}
impl FontHandleMethods for FontHandle {
fn new_from_template(_fctx: &FontContextHandle,
template: Arc<FontTemplateData>,
pt_size: Option<Au>)
-> Result<FontHandle, ()> {
let size = match pt_size {
Some(s) => s.to_f64_px(),
None => 0.0
};
match template.ctfont(size) {
Some(ref ctfont) => {
let mut handle = FontHandle {
font_data: template.clone(),
ctfont: ctfont.clone_with_font_size(size),
h_kern_subtable: None,
can_do_fast_shaping: false,
};
handle.h_kern_subtable = handle.find_h_kern_subtable();
// TODO (#11310): Implement basic support for GPOS and GSUB.
handle.can_do_fast_shaping = handle.h_kern_subtable.is_some() &&
handle.table_for_tag(GPOS).is_none() &&
handle.table_for_tag(GSUB).is_none();
Ok(handle)
}
None => {
Err(())
}
}
}
fn template(&self) -> Arc<FontTemplateData> {
self.font_data.clone()
}
fn family_name(&self) -> String {
self.ctfont.family_name()
}
fn face_name(&self) -> String {
self.ctfont.face_name()
}
fn is_italic(&self) -> bool {
self.ctfont.symbolic_traits().is_italic()
}
fn boldness(&self) -> font_weight::T {
let normalized = self.ctfont.all_traits().normalized_weight(); // [-1.0, 1.0]
let normalized = if normalized <= 0.0 {
4.0 + normalized * 3.0 // [1.0, 4.0]
} else {
4.0 + normalized * 5.0 // [4.0, 9.0]
}; // [1.0, 9.0], centered on 4.0
match normalized.round() as u32 {
1 => font_weight::T::Weight100,
2 => font_weight::T::Weight200,
3 => font_weight::T::Weight300,
4 => font_weight::T::Weight400,
5 => font_weight::T::Weight500,
6 => font_weight::T::Weight600,
7 => font_weight::T::Weight700,
8 => font_weight::T::Weight800,
_ => font_weight::T::Weight900,
}
}
fn stretchiness(&self) -> font_stretch::T {
let normalized = self.ctfont.all_traits().normalized_width(); // [-1.0, 1.0]
let normalized = (normalized + 1.0) / 2.0 * 9.0; // [0.0, 9.0]
match normalized {
v if v < 1.0 => font_stretch::T::ultra_condensed,
v if v < 2.0 => font_stretch::T::extra_condensed,
v if v < 3.0 => font_stretch::T::condensed,
v if v < 4.0 => font_stretch::T::semi_condensed,
v if v < 5.0 => font_stretch::T::normal,
v if v < 6.0 => font_stretch::T::semi_expanded,
v if v < 7.0 => font_stretch::T::expanded,
v if v < 8.0 => font_stretch::T::extra_expanded,
_ => font_stretch::T::ultra_expanded,
}
}
fn glyph_index(&self, codepoint: char) -> Option<GlyphId> {
let characters: [UniChar; 1] = [codepoint as UniChar];
let mut glyphs: [CGGlyph; 1] = [0 as CGGlyph];
let count: CFIndex = 1;
let result = self.ctfont.get_glyphs_for_characters(&characters[0],
&mut glyphs[0],
count);
if!result {
// No glyph for this character
return None;
}
assert!(glyphs[0]!= 0); // FIXME: error handling
return Some(glyphs[0] as GlyphId);
}
fn glyph_h_kerning(&self, first_glyph: GlyphId, second_glyph: GlyphId) -> FractionalPixel {
if let Some(ref table) = self.h_kern_subtable {
if let Some(font_units) = table.binary_search(first_glyph, second_glyph) {
return font_units as f64 * table.px_per_font_unit;
}
}
0.0
}
fn can_do_fast_shaping(&self) -> bool
|
fn glyph_h_advance(&self, glyph: GlyphId) -> Option<FractionalPixel> {
let glyphs = [glyph as CGGlyph];
let advance = self.ctfont.get_advances_for_glyphs(kCTFontDefaultOrientation,
&glyphs[0],
ptr::null_mut(),
1);
Some(advance as FractionalPixel)
}
fn metrics(&self) -> FontMetrics {
let bounding_rect: CGRect = self.ctfont.bounding_box();
let ascent = self.ctfont.ascent() as f64;
let descent = self.ctfont.descent() as f64;
let em_size = Au::from_f64_px(self.ctfont.pt_size() as f64);
let leading = self.ctfont.leading() as f64;
let scale = px_to_pt(self.ctfont.pt_size() as f64) / (ascent + descent);
let line_gap = (ascent + descent + leading + 0.5).floor();
let max_advance_width = au_from_pt(bounding_rect.size.width as f64);
let average_advance = self.glyph_index('0')
.and_then(|idx| self.glyph_h_advance(idx))
.map(Au::from_f64_px)
.unwrap_or(max_advance_width);
let metrics = FontMetrics {
underline_size: au_from_pt(self.ctfont.underline_thickness() as f64),
// TODO(Issue #201): underline metrics are not reliable. Have to pull out of font table
// directly.
//
// see also: https://bugs.webkit.org/show_bug.cgi?id=16768
// see also: https://bugreports.qt-project.org/browse/QTBUG-13364
underline_offset: au_from_pt(self.ctfont.underline_position() as f64),
strikeout_size: Au(0), // FIXME(Issue #942)
strikeout_offset: Au(0), // FIXME(Issue #942)
leading: au_from_pt(leading),
x_height: au_from_pt(self.ctfont.x_height() as f64),
em_size: em_size,
ascent: au_from_pt(ascent * scale),
descent: au_from_pt(descent * scale),
max_advance: max_advance_width,
average_advance: average_advance,
line_gap: Au::from_f64_px(line_gap),
};
debug!("Font metrics (@{} pt): {:?}", self.ctfont.pt_size() as f64, metrics);
metrics
}
fn table_for_tag(&self, tag: FontTableTag) -> Option<FontTable> {
let result: Option<CFData> = self.ctfont.get_font_table(tag);
result.and_then(|data| {
Some(FontTable::wrap(data))
})
}
}
|
{
self.can_do_fast_shaping
}
|
identifier_body
|
tuple-struct.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-android: FIXME(#10381)
// compile-flags:-g
// === GDB TESTS ===================================================================================
// gdb-command:set print pretty off
// gdb-command:rbreak zzz
// gdb-command:run
// gdb-command:finish
// gdb-command:print no_padding16
// gdb-check:$1 = {10000, -10001}
// gdb-command:print no_padding32
// gdb-check:$2 = {-10002, -10003.5, 10004}
// gdb-command:print no_padding64
// gdb-check:$3 = {-10005.5, 10006, 10007}
// gdb-command:print no_padding163264
// gdb-check:$4 = {-10008, 10009, 10010, 10011}
// gdb-command:print internal_padding
// gdb-check:$5 = {10012, -10013}
// gdb-command:print padding_at_end
// gdb-check:$6 = {-10014, 10015}
// === LLDB TESTS ==================================================================================
// lldb-command:run
// lldb-command:print no_padding16
// lldb-check:[...]$0 = NoPadding16(10000, -10001)
// lldb-command:print no_padding32
// lldb-check:[...]$1 = NoPadding32(-10002, -10003.5, 10004)
// lldb-command:print no_padding64
// lldb-check:[...]$2 = NoPadding64(-10005.5, 10006, 10007)
// lldb-command:print no_padding163264
// lldb-check:[...]$3 = NoPadding163264(-10008, 10009, 10010, 10011)
// lldb-command:print internal_padding
// lldb-check:[...]$4 = InternalPadding(10012, -10013)
// lldb-command:print padding_at_end
// lldb-check:[...]$5 = PaddingAtEnd(-10014, 10015)
// This test case mainly makes sure that no field names are generated for tuple structs (as opposed
// to all fields having the name "<unnamed_field>"). Otherwise they are handled the same a normal
// structs.
struct NoPadding16(u16, i16);
struct NoPadding32(i32, f32, u32);
struct NoPadding64(f64, i64, u64);
struct NoPadding163264(i16, u16, i32, u64);
struct InternalPadding(u16, i64);
struct PaddingAtEnd(i64, u16);
fn main()
|
fn zzz() {()}
|
{
let no_padding16 = NoPadding16(10000, -10001);
let no_padding32 = NoPadding32(-10002, -10003.5, 10004);
let no_padding64 = NoPadding64(-10005.5, 10006, 10007);
let no_padding163264 = NoPadding163264(-10008, 10009, 10010, 10011);
let internal_padding = InternalPadding(10012, -10013);
let padding_at_end = PaddingAtEnd(-10014, 10015);
zzz(); // #break
}
|
identifier_body
|
tuple-struct.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-android: FIXME(#10381)
// compile-flags:-g
// === GDB TESTS ===================================================================================
// gdb-command:set print pretty off
// gdb-command:rbreak zzz
// gdb-command:run
// gdb-command:finish
// gdb-command:print no_padding16
// gdb-check:$1 = {10000, -10001}
// gdb-command:print no_padding32
// gdb-check:$2 = {-10002, -10003.5, 10004}
// gdb-command:print no_padding64
// gdb-check:$3 = {-10005.5, 10006, 10007}
// gdb-command:print no_padding163264
// gdb-check:$4 = {-10008, 10009, 10010, 10011}
// gdb-command:print internal_padding
// gdb-check:$5 = {10012, -10013}
// gdb-command:print padding_at_end
// gdb-check:$6 = {-10014, 10015}
// === LLDB TESTS ==================================================================================
// lldb-command:run
// lldb-command:print no_padding16
// lldb-check:[...]$0 = NoPadding16(10000, -10001)
// lldb-command:print no_padding32
// lldb-check:[...]$1 = NoPadding32(-10002, -10003.5, 10004)
// lldb-command:print no_padding64
// lldb-check:[...]$2 = NoPadding64(-10005.5, 10006, 10007)
// lldb-command:print no_padding163264
// lldb-check:[...]$3 = NoPadding163264(-10008, 10009, 10010, 10011)
// lldb-command:print internal_padding
// lldb-check:[...]$4 = InternalPadding(10012, -10013)
// lldb-command:print padding_at_end
// lldb-check:[...]$5 = PaddingAtEnd(-10014, 10015)
// This test case mainly makes sure that no field names are generated for tuple structs (as opposed
// to all fields having the name "<unnamed_field>"). Otherwise they are handled the same a normal
// structs.
struct
|
(u16, i16);
struct NoPadding32(i32, f32, u32);
struct NoPadding64(f64, i64, u64);
struct NoPadding163264(i16, u16, i32, u64);
struct InternalPadding(u16, i64);
struct PaddingAtEnd(i64, u16);
fn main() {
let no_padding16 = NoPadding16(10000, -10001);
let no_padding32 = NoPadding32(-10002, -10003.5, 10004);
let no_padding64 = NoPadding64(-10005.5, 10006, 10007);
let no_padding163264 = NoPadding163264(-10008, 10009, 10010, 10011);
let internal_padding = InternalPadding(10012, -10013);
let padding_at_end = PaddingAtEnd(-10014, 10015);
zzz(); // #break
}
fn zzz() {()}
|
NoPadding16
|
identifier_name
|
tuple-struct.rs
|
// Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-android: FIXME(#10381)
// compile-flags:-g
// === GDB TESTS ===================================================================================
// gdb-command:set print pretty off
// gdb-command:rbreak zzz
// gdb-command:run
// gdb-command:finish
// gdb-command:print no_padding16
// gdb-check:$1 = {10000, -10001}
// gdb-command:print no_padding32
// gdb-check:$2 = {-10002, -10003.5, 10004}
// gdb-command:print no_padding64
// gdb-check:$3 = {-10005.5, 10006, 10007}
// gdb-command:print no_padding163264
// gdb-check:$4 = {-10008, 10009, 10010, 10011}
// gdb-command:print internal_padding
// gdb-check:$5 = {10012, -10013}
// gdb-command:print padding_at_end
// gdb-check:$6 = {-10014, 10015}
// === LLDB TESTS ==================================================================================
// lldb-command:run
|
// lldb-command:print no_padding16
// lldb-check:[...]$0 = NoPadding16(10000, -10001)
// lldb-command:print no_padding32
// lldb-check:[...]$1 = NoPadding32(-10002, -10003.5, 10004)
// lldb-command:print no_padding64
// lldb-check:[...]$2 = NoPadding64(-10005.5, 10006, 10007)
// lldb-command:print no_padding163264
// lldb-check:[...]$3 = NoPadding163264(-10008, 10009, 10010, 10011)
// lldb-command:print internal_padding
// lldb-check:[...]$4 = InternalPadding(10012, -10013)
// lldb-command:print padding_at_end
// lldb-check:[...]$5 = PaddingAtEnd(-10014, 10015)
// This test case mainly makes sure that no field names are generated for tuple structs (as opposed
// to all fields having the name "<unnamed_field>"). Otherwise they are handled the same a normal
// structs.
struct NoPadding16(u16, i16);
struct NoPadding32(i32, f32, u32);
struct NoPadding64(f64, i64, u64);
struct NoPadding163264(i16, u16, i32, u64);
struct InternalPadding(u16, i64);
struct PaddingAtEnd(i64, u16);
fn main() {
let no_padding16 = NoPadding16(10000, -10001);
let no_padding32 = NoPadding32(-10002, -10003.5, 10004);
let no_padding64 = NoPadding64(-10005.5, 10006, 10007);
let no_padding163264 = NoPadding163264(-10008, 10009, 10010, 10011);
let internal_padding = InternalPadding(10012, -10013);
let padding_at_end = PaddingAtEnd(-10014, 10015);
zzz(); // #break
}
fn zzz() {()}
|
random_line_split
|
|
types.rs
|
// Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
// Licensed under the Apache License, Version 2.0 (see LICENSE).
use crate::python::TypeId;
pub struct Types {
pub directory_digest: TypeId,
pub file_digest: TypeId,
pub snapshot: TypeId,
|
pub file_content: TypeId,
pub file_entry: TypeId,
pub directory: TypeId,
pub digest_contents: TypeId,
pub digest_entries: TypeId,
pub path_globs: TypeId,
pub merge_digests: TypeId,
pub add_prefix: TypeId,
pub remove_prefix: TypeId,
pub create_digest: TypeId,
pub digest_subset: TypeId,
pub download_file: TypeId,
pub platform: TypeId,
pub process: TypeId,
pub process_result: TypeId,
pub process_result_metadata: TypeId,
pub coroutine: TypeId,
pub session_values: TypeId,
pub run_id: TypeId,
pub interactive_process: TypeId,
pub interactive_process_result: TypeId,
pub engine_aware_parameter: TypeId,
}
|
pub paths: TypeId,
|
random_line_split
|
types.rs
|
// Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
// Licensed under the Apache License, Version 2.0 (see LICENSE).
use crate::python::TypeId;
pub struct
|
{
pub directory_digest: TypeId,
pub file_digest: TypeId,
pub snapshot: TypeId,
pub paths: TypeId,
pub file_content: TypeId,
pub file_entry: TypeId,
pub directory: TypeId,
pub digest_contents: TypeId,
pub digest_entries: TypeId,
pub path_globs: TypeId,
pub merge_digests: TypeId,
pub add_prefix: TypeId,
pub remove_prefix: TypeId,
pub create_digest: TypeId,
pub digest_subset: TypeId,
pub download_file: TypeId,
pub platform: TypeId,
pub process: TypeId,
pub process_result: TypeId,
pub process_result_metadata: TypeId,
pub coroutine: TypeId,
pub session_values: TypeId,
pub run_id: TypeId,
pub interactive_process: TypeId,
pub interactive_process_result: TypeId,
pub engine_aware_parameter: TypeId,
}
|
Types
|
identifier_name
|
scoped_tls.rs
|
// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Scoped thread-local storage
//!
//! This module provides the ability to generate *scoped* thread-local
//! variables. In this sense, scoped indicates that thread local storage
//! actually stores a reference to a value, and this reference is only placed
//! in storage for a scoped amount of time.
//!
//! There are no restrictions on what types can be placed into a scoped
//! variable, but all scoped variables are initialized to the equivalent of
//! null. Scoped thread local storage is useful when a value is present for a known
//! period of time and it is not required to relinquish ownership of the
//! contents.
//!
//! # Examples
//!
//! ```
//! # #![feature(scoped_tls)]
//! scoped_thread_local!(static FOO: u32);
//!
//! // Initially each scoped slot is empty.
//! assert!(!FOO.is_set());
//!
//! // When inserting a value, the value is only in place for the duration
//! // of the closure specified.
//! FOO.set(&1, || {
//! FOO.with(|slot| {
//! assert_eq!(*slot, 1);
//! });
//! });
//! ```
#![unstable(feature = "thread_local_internals")]
use prelude::v1::*;
/// Type representing a thread local storage key corresponding to a reference
/// to the type parameter `T`.
///
/// Keys are statically allocated and can contain a reference to an instance of
/// type `T` scoped to a particular lifetime. Keys provides two methods, `set`
/// and `with`, both of which currently use closures to control the scope of
/// their contents.
#[unstable(feature = "scoped_tls",
reason = "scoped TLS has yet to have wide enough use to fully consider \
stabilizing its interface")]
pub struct ScopedKey<T> { inner: imp::KeyInner<T> }
/// Declare a new scoped thread local storage key.
///
/// This macro declares a `static` item on which methods are used to get and
/// set the value stored within.
///
/// See [ScopedKey documentation](thread/struct.ScopedKey.html) for more
/// information.
#[macro_export]
#[allow_internal_unstable]
#[cfg(not(no_elf_tls))]
macro_rules! scoped_thread_local {
(static $name:ident: $t:ty) => (
#[cfg_attr(not(any(windows,
target_os = "android",
target_os = "ios",
target_os = "openbsd",
target_arch = "aarch64")),
thread_local)]
static $name: ::std::thread::ScopedKey<$t> =
::std::thread::ScopedKey::new();
);
(pub static $name:ident: $t:ty) => (
#[cfg_attr(not(any(windows,
target_os = "android",
target_os = "ios",
target_os = "openbsd",
target_arch = "aarch64")),
thread_local)]
pub static $name: ::std::thread::ScopedKey<$t> =
::std::thread::ScopedKey::new();
);
}
#[macro_export]
#[allow_internal_unstable]
#[cfg(no_elf_tls)]
macro_rules! scoped_thread_local {
(static $name:ident: $t:ty) => (
static $name: ::std::thread::ScopedKey<$t> =
::std::thread::ScopedKey::new();
);
(pub static $name:ident: $t:ty) => (
pub static $name: ::std::thread::ScopedKey<$t> =
::std::thread::ScopedKey::new();
);
}
#[unstable(feature = "scoped_tls",
reason = "scoped TLS has yet to have wide enough use to fully consider \
stabilizing its interface")]
impl<T> ScopedKey<T> {
#[doc(hidden)]
pub const fn new() -> ScopedKey<T> {
ScopedKey { inner: imp::KeyInner::new() }
}
/// Inserts a value into this scoped thread local storage slot for a
/// duration of a closure.
///
/// While `cb` is running, the value `t` will be returned by `get` unless
/// this function is called recursively inside of `cb`.
///
/// Upon return, this function will restore the previous value, if any
/// was available.
///
/// # Examples
///
/// ```
/// # #![feature(scoped_tls)]
/// scoped_thread_local!(static FOO: u32);
///
/// FOO.set(&100, || {
/// let val = FOO.with(|v| *v);
/// assert_eq!(val, 100);
///
/// // set can be called recursively
/// FOO.set(&101, || {
/// //...
/// });
///
/// // Recursive calls restore the previous value.
/// let val = FOO.with(|v| *v);
/// assert_eq!(val, 100);
/// });
/// ```
pub fn set<R, F>(&'static self, t: &T, cb: F) -> R where
F: FnOnce() -> R,
{
struct Reset<'a, T: 'a> {
key: &'a imp::KeyInner<T>,
val: *mut T,
}
impl<'a, T> Drop for Reset<'a, T> {
fn drop(&mut self) {
unsafe { self.key.set(self.val) }
}
}
let prev = unsafe {
let prev = self.inner.get();
self.inner.set(t as *const T as *mut T);
prev
};
let _reset = Reset { key: &self.inner, val: prev };
cb()
}
/// Gets a value out of this scoped variable.
///
/// This function takes a closure which receives the value of this
/// variable.
///
/// # Panics
///
/// This function will panic if `set` has not previously been called.
///
/// # Examples
///
/// ```no_run
/// # #![feature(scoped_tls)]
/// scoped_thread_local!(static FOO: u32);
///
/// FOO.with(|slot| {
/// // work with `slot`
/// });
/// ```
pub fn with<R, F>(&'static self, cb: F) -> R where
F: FnOnce(&T) -> R
{
unsafe {
let ptr = self.inner.get();
assert!(!ptr.is_null(), "cannot access a scoped thread local \
variable without calling `set` first");
cb(&*ptr)
}
}
/// Test whether this TLS key has been `set` for the current thread.
pub fn
|
(&'static self) -> bool {
unsafe {!self.inner.get().is_null() }
}
}
#[cfg(not(any(windows,
target_os = "android",
target_os = "ios",
target_os = "openbsd",
target_arch = "aarch64",
no_elf_tls)))]
mod imp {
use std::cell::Cell;
pub struct KeyInner<T> { inner: Cell<*mut T> }
unsafe impl<T> ::marker::Sync for KeyInner<T> { }
impl<T> KeyInner<T> {
pub const fn new() -> KeyInner<T> {
KeyInner { inner: Cell::new(0 as *mut _) }
}
pub unsafe fn set(&self, ptr: *mut T) { self.inner.set(ptr); }
pub unsafe fn get(&self) -> *mut T { self.inner.get() }
}
}
#[cfg(any(windows,
target_os = "android",
target_os = "ios",
target_os = "openbsd",
target_arch = "aarch64",
no_elf_tls))]
mod imp {
use prelude::v1::*;
use cell::Cell;
use marker;
use sys_common::thread_local::StaticKey as OsStaticKey;
pub struct KeyInner<T> {
pub inner: OsStaticKey,
pub marker: marker::PhantomData<Cell<T>>,
}
unsafe impl<T> marker::Sync for KeyInner<T> { }
impl<T> KeyInner<T> {
pub const fn new() -> KeyInner<T> {
KeyInner {
inner: OsStaticKey::new(None),
marker: marker::PhantomData
}
}
pub unsafe fn set(&self, ptr: *mut T) { self.inner.set(ptr as *mut _) }
pub unsafe fn get(&self) -> *mut T { self.inner.get() as *mut _ }
}
}
#[cfg(test)]
mod tests {
use cell::Cell;
use prelude::v1::*;
scoped_thread_local!(static FOO: u32);
#[test]
fn smoke() {
scoped_thread_local!(static BAR: u32);
assert!(!BAR.is_set());
BAR.set(&1, || {
assert!(BAR.is_set());
BAR.with(|slot| {
assert_eq!(*slot, 1);
});
});
assert!(!BAR.is_set());
}
#[test]
fn cell_allowed() {
scoped_thread_local!(static BAR: Cell<u32>);
BAR.set(&Cell::new(1), || {
BAR.with(|slot| {
assert_eq!(slot.get(), 1);
});
});
}
#[test]
fn scope_item_allowed() {
assert!(!FOO.is_set());
FOO.set(&1, || {
assert!(FOO.is_set());
FOO.with(|slot| {
assert_eq!(*slot, 1);
});
});
assert!(!FOO.is_set());
}
}
|
is_set
|
identifier_name
|
scoped_tls.rs
|
// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Scoped thread-local storage
//!
//! This module provides the ability to generate *scoped* thread-local
//! variables. In this sense, scoped indicates that thread local storage
//! actually stores a reference to a value, and this reference is only placed
//! in storage for a scoped amount of time.
//!
//! There are no restrictions on what types can be placed into a scoped
//! variable, but all scoped variables are initialized to the equivalent of
//! null. Scoped thread local storage is useful when a value is present for a known
//! period of time and it is not required to relinquish ownership of the
//! contents.
//!
//! # Examples
//!
//! ```
//! # #![feature(scoped_tls)]
//! scoped_thread_local!(static FOO: u32);
//!
//! // Initially each scoped slot is empty.
//! assert!(!FOO.is_set());
//!
//! // When inserting a value, the value is only in place for the duration
//! // of the closure specified.
//! FOO.set(&1, || {
//! FOO.with(|slot| {
//! assert_eq!(*slot, 1);
//! });
//! });
//! ```
#![unstable(feature = "thread_local_internals")]
use prelude::v1::*;
/// Type representing a thread local storage key corresponding to a reference
/// to the type parameter `T`.
///
/// Keys are statically allocated and can contain a reference to an instance of
/// type `T` scoped to a particular lifetime. Keys provides two methods, `set`
/// and `with`, both of which currently use closures to control the scope of
/// their contents.
#[unstable(feature = "scoped_tls",
reason = "scoped TLS has yet to have wide enough use to fully consider \
stabilizing its interface")]
pub struct ScopedKey<T> { inner: imp::KeyInner<T> }
/// Declare a new scoped thread local storage key.
///
/// This macro declares a `static` item on which methods are used to get and
/// set the value stored within.
///
/// See [ScopedKey documentation](thread/struct.ScopedKey.html) for more
/// information.
#[macro_export]
#[allow_internal_unstable]
#[cfg(not(no_elf_tls))]
macro_rules! scoped_thread_local {
(static $name:ident: $t:ty) => (
#[cfg_attr(not(any(windows,
target_os = "android",
target_os = "ios",
target_os = "openbsd",
target_arch = "aarch64")),
thread_local)]
static $name: ::std::thread::ScopedKey<$t> =
::std::thread::ScopedKey::new();
);
(pub static $name:ident: $t:ty) => (
#[cfg_attr(not(any(windows,
target_os = "android",
target_os = "ios",
target_os = "openbsd",
target_arch = "aarch64")),
thread_local)]
pub static $name: ::std::thread::ScopedKey<$t> =
::std::thread::ScopedKey::new();
);
}
#[macro_export]
#[allow_internal_unstable]
#[cfg(no_elf_tls)]
macro_rules! scoped_thread_local {
(static $name:ident: $t:ty) => (
static $name: ::std::thread::ScopedKey<$t> =
::std::thread::ScopedKey::new();
);
(pub static $name:ident: $t:ty) => (
pub static $name: ::std::thread::ScopedKey<$t> =
::std::thread::ScopedKey::new();
);
}
#[unstable(feature = "scoped_tls",
reason = "scoped TLS has yet to have wide enough use to fully consider \
stabilizing its interface")]
impl<T> ScopedKey<T> {
#[doc(hidden)]
pub const fn new() -> ScopedKey<T> {
ScopedKey { inner: imp::KeyInner::new() }
}
/// Inserts a value into this scoped thread local storage slot for a
/// duration of a closure.
///
/// While `cb` is running, the value `t` will be returned by `get` unless
/// this function is called recursively inside of `cb`.
///
/// Upon return, this function will restore the previous value, if any
/// was available.
///
/// # Examples
///
/// ```
/// # #![feature(scoped_tls)]
/// scoped_thread_local!(static FOO: u32);
///
/// FOO.set(&100, || {
/// let val = FOO.with(|v| *v);
/// assert_eq!(val, 100);
///
/// // set can be called recursively
/// FOO.set(&101, || {
/// //...
/// });
///
/// // Recursive calls restore the previous value.
/// let val = FOO.with(|v| *v);
/// assert_eq!(val, 100);
/// });
/// ```
pub fn set<R, F>(&'static self, t: &T, cb: F) -> R where
F: FnOnce() -> R,
{
struct Reset<'a, T: 'a> {
key: &'a imp::KeyInner<T>,
val: *mut T,
}
impl<'a, T> Drop for Reset<'a, T> {
fn drop(&mut self) {
unsafe { self.key.set(self.val) }
}
}
let prev = unsafe {
let prev = self.inner.get();
self.inner.set(t as *const T as *mut T);
prev
};
let _reset = Reset { key: &self.inner, val: prev };
|
cb()
}
/// Gets a value out of this scoped variable.
///
/// This function takes a closure which receives the value of this
/// variable.
///
/// # Panics
///
/// This function will panic if `set` has not previously been called.
///
/// # Examples
///
/// ```no_run
/// # #![feature(scoped_tls)]
/// scoped_thread_local!(static FOO: u32);
///
/// FOO.with(|slot| {
/// // work with `slot`
/// });
/// ```
pub fn with<R, F>(&'static self, cb: F) -> R where
F: FnOnce(&T) -> R
{
unsafe {
let ptr = self.inner.get();
assert!(!ptr.is_null(), "cannot access a scoped thread local \
variable without calling `set` first");
cb(&*ptr)
}
}
/// Test whether this TLS key has been `set` for the current thread.
pub fn is_set(&'static self) -> bool {
unsafe {!self.inner.get().is_null() }
}
}
#[cfg(not(any(windows,
target_os = "android",
target_os = "ios",
target_os = "openbsd",
target_arch = "aarch64",
no_elf_tls)))]
mod imp {
use std::cell::Cell;
pub struct KeyInner<T> { inner: Cell<*mut T> }
unsafe impl<T> ::marker::Sync for KeyInner<T> { }
impl<T> KeyInner<T> {
pub const fn new() -> KeyInner<T> {
KeyInner { inner: Cell::new(0 as *mut _) }
}
pub unsafe fn set(&self, ptr: *mut T) { self.inner.set(ptr); }
pub unsafe fn get(&self) -> *mut T { self.inner.get() }
}
}
#[cfg(any(windows,
target_os = "android",
target_os = "ios",
target_os = "openbsd",
target_arch = "aarch64",
no_elf_tls))]
mod imp {
use prelude::v1::*;
use cell::Cell;
use marker;
use sys_common::thread_local::StaticKey as OsStaticKey;
pub struct KeyInner<T> {
pub inner: OsStaticKey,
pub marker: marker::PhantomData<Cell<T>>,
}
unsafe impl<T> marker::Sync for KeyInner<T> { }
impl<T> KeyInner<T> {
pub const fn new() -> KeyInner<T> {
KeyInner {
inner: OsStaticKey::new(None),
marker: marker::PhantomData
}
}
pub unsafe fn set(&self, ptr: *mut T) { self.inner.set(ptr as *mut _) }
pub unsafe fn get(&self) -> *mut T { self.inner.get() as *mut _ }
}
}
#[cfg(test)]
mod tests {
use cell::Cell;
use prelude::v1::*;
scoped_thread_local!(static FOO: u32);
#[test]
fn smoke() {
scoped_thread_local!(static BAR: u32);
assert!(!BAR.is_set());
BAR.set(&1, || {
assert!(BAR.is_set());
BAR.with(|slot| {
assert_eq!(*slot, 1);
});
});
assert!(!BAR.is_set());
}
#[test]
fn cell_allowed() {
scoped_thread_local!(static BAR: Cell<u32>);
BAR.set(&Cell::new(1), || {
BAR.with(|slot| {
assert_eq!(slot.get(), 1);
});
});
}
#[test]
fn scope_item_allowed() {
assert!(!FOO.is_set());
FOO.set(&1, || {
assert!(FOO.is_set());
FOO.with(|slot| {
assert_eq!(*slot, 1);
});
});
assert!(!FOO.is_set());
}
}
|
random_line_split
|
|
scoped_tls.rs
|
// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Scoped thread-local storage
//!
//! This module provides the ability to generate *scoped* thread-local
//! variables. In this sense, scoped indicates that thread local storage
//! actually stores a reference to a value, and this reference is only placed
//! in storage for a scoped amount of time.
//!
//! There are no restrictions on what types can be placed into a scoped
//! variable, but all scoped variables are initialized to the equivalent of
//! null. Scoped thread local storage is useful when a value is present for a known
//! period of time and it is not required to relinquish ownership of the
//! contents.
//!
//! # Examples
//!
//! ```
//! # #![feature(scoped_tls)]
//! scoped_thread_local!(static FOO: u32);
//!
//! // Initially each scoped slot is empty.
//! assert!(!FOO.is_set());
//!
//! // When inserting a value, the value is only in place for the duration
//! // of the closure specified.
//! FOO.set(&1, || {
//! FOO.with(|slot| {
//! assert_eq!(*slot, 1);
//! });
//! });
//! ```
#![unstable(feature = "thread_local_internals")]
use prelude::v1::*;
/// Type representing a thread local storage key corresponding to a reference
/// to the type parameter `T`.
///
/// Keys are statically allocated and can contain a reference to an instance of
/// type `T` scoped to a particular lifetime. Keys provides two methods, `set`
/// and `with`, both of which currently use closures to control the scope of
/// their contents.
#[unstable(feature = "scoped_tls",
reason = "scoped TLS has yet to have wide enough use to fully consider \
stabilizing its interface")]
pub struct ScopedKey<T> { inner: imp::KeyInner<T> }
/// Declare a new scoped thread local storage key.
///
/// This macro declares a `static` item on which methods are used to get and
/// set the value stored within.
///
/// See [ScopedKey documentation](thread/struct.ScopedKey.html) for more
/// information.
#[macro_export]
#[allow_internal_unstable]
#[cfg(not(no_elf_tls))]
macro_rules! scoped_thread_local {
(static $name:ident: $t:ty) => (
#[cfg_attr(not(any(windows,
target_os = "android",
target_os = "ios",
target_os = "openbsd",
target_arch = "aarch64")),
thread_local)]
static $name: ::std::thread::ScopedKey<$t> =
::std::thread::ScopedKey::new();
);
(pub static $name:ident: $t:ty) => (
#[cfg_attr(not(any(windows,
target_os = "android",
target_os = "ios",
target_os = "openbsd",
target_arch = "aarch64")),
thread_local)]
pub static $name: ::std::thread::ScopedKey<$t> =
::std::thread::ScopedKey::new();
);
}
#[macro_export]
#[allow_internal_unstable]
#[cfg(no_elf_tls)]
macro_rules! scoped_thread_local {
(static $name:ident: $t:ty) => (
static $name: ::std::thread::ScopedKey<$t> =
::std::thread::ScopedKey::new();
);
(pub static $name:ident: $t:ty) => (
pub static $name: ::std::thread::ScopedKey<$t> =
::std::thread::ScopedKey::new();
);
}
#[unstable(feature = "scoped_tls",
reason = "scoped TLS has yet to have wide enough use to fully consider \
stabilizing its interface")]
impl<T> ScopedKey<T> {
#[doc(hidden)]
pub const fn new() -> ScopedKey<T> {
ScopedKey { inner: imp::KeyInner::new() }
}
/// Inserts a value into this scoped thread local storage slot for a
/// duration of a closure.
///
/// While `cb` is running, the value `t` will be returned by `get` unless
/// this function is called recursively inside of `cb`.
///
/// Upon return, this function will restore the previous value, if any
/// was available.
///
/// # Examples
///
/// ```
/// # #![feature(scoped_tls)]
/// scoped_thread_local!(static FOO: u32);
///
/// FOO.set(&100, || {
/// let val = FOO.with(|v| *v);
/// assert_eq!(val, 100);
///
/// // set can be called recursively
/// FOO.set(&101, || {
/// //...
/// });
///
/// // Recursive calls restore the previous value.
/// let val = FOO.with(|v| *v);
/// assert_eq!(val, 100);
/// });
/// ```
pub fn set<R, F>(&'static self, t: &T, cb: F) -> R where
F: FnOnce() -> R,
{
struct Reset<'a, T: 'a> {
key: &'a imp::KeyInner<T>,
val: *mut T,
}
impl<'a, T> Drop for Reset<'a, T> {
fn drop(&mut self) {
unsafe { self.key.set(self.val) }
}
}
let prev = unsafe {
let prev = self.inner.get();
self.inner.set(t as *const T as *mut T);
prev
};
let _reset = Reset { key: &self.inner, val: prev };
cb()
}
/// Gets a value out of this scoped variable.
///
/// This function takes a closure which receives the value of this
/// variable.
///
/// # Panics
///
/// This function will panic if `set` has not previously been called.
///
/// # Examples
///
/// ```no_run
/// # #![feature(scoped_tls)]
/// scoped_thread_local!(static FOO: u32);
///
/// FOO.with(|slot| {
/// // work with `slot`
/// });
/// ```
pub fn with<R, F>(&'static self, cb: F) -> R where
F: FnOnce(&T) -> R
{
unsafe {
let ptr = self.inner.get();
assert!(!ptr.is_null(), "cannot access a scoped thread local \
variable without calling `set` first");
cb(&*ptr)
}
}
/// Test whether this TLS key has been `set` for the current thread.
pub fn is_set(&'static self) -> bool {
unsafe {!self.inner.get().is_null() }
}
}
#[cfg(not(any(windows,
target_os = "android",
target_os = "ios",
target_os = "openbsd",
target_arch = "aarch64",
no_elf_tls)))]
mod imp {
use std::cell::Cell;
pub struct KeyInner<T> { inner: Cell<*mut T> }
unsafe impl<T> ::marker::Sync for KeyInner<T> { }
impl<T> KeyInner<T> {
pub const fn new() -> KeyInner<T> {
KeyInner { inner: Cell::new(0 as *mut _) }
}
pub unsafe fn set(&self, ptr: *mut T) { self.inner.set(ptr); }
pub unsafe fn get(&self) -> *mut T { self.inner.get() }
}
}
#[cfg(any(windows,
target_os = "android",
target_os = "ios",
target_os = "openbsd",
target_arch = "aarch64",
no_elf_tls))]
mod imp {
use prelude::v1::*;
use cell::Cell;
use marker;
use sys_common::thread_local::StaticKey as OsStaticKey;
pub struct KeyInner<T> {
pub inner: OsStaticKey,
pub marker: marker::PhantomData<Cell<T>>,
}
unsafe impl<T> marker::Sync for KeyInner<T> { }
impl<T> KeyInner<T> {
pub const fn new() -> KeyInner<T> {
KeyInner {
inner: OsStaticKey::new(None),
marker: marker::PhantomData
}
}
pub unsafe fn set(&self, ptr: *mut T)
|
pub unsafe fn get(&self) -> *mut T { self.inner.get() as *mut _ }
}
}
#[cfg(test)]
mod tests {
use cell::Cell;
use prelude::v1::*;
scoped_thread_local!(static FOO: u32);
#[test]
fn smoke() {
scoped_thread_local!(static BAR: u32);
assert!(!BAR.is_set());
BAR.set(&1, || {
assert!(BAR.is_set());
BAR.with(|slot| {
assert_eq!(*slot, 1);
});
});
assert!(!BAR.is_set());
}
#[test]
fn cell_allowed() {
scoped_thread_local!(static BAR: Cell<u32>);
BAR.set(&Cell::new(1), || {
BAR.with(|slot| {
assert_eq!(slot.get(), 1);
});
});
}
#[test]
fn scope_item_allowed() {
assert!(!FOO.is_set());
FOO.set(&1, || {
assert!(FOO.is_set());
FOO.with(|slot| {
assert_eq!(*slot, 1);
});
});
assert!(!FOO.is_set());
}
}
|
{ self.inner.set(ptr as *mut _) }
|
identifier_body
|
media_rule.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! An [`@media`][media] urle.
//!
//! [media]: https://drafts.csswg.org/css-conditional/#at-ruledef-media
use crate::media_queries::MediaList;
use crate::shared_lock::{DeepCloneParams, DeepCloneWithLock, Locked};
use crate::shared_lock::{SharedRwLock, SharedRwLockReadGuard, ToCssWithGuard};
use crate::str::CssStringWriter;
use crate::stylesheets::CssRules;
use cssparser::SourceLocation;
#[cfg(feature = "gecko")]
use malloc_size_of::{MallocSizeOfOps, MallocUnconditionalShallowSizeOf};
use servo_arc::Arc;
use std::fmt::{self, Write};
use style_traits::{CssWriter, ToCss};
/// An [`@media`][media] urle.
///
/// [media]: https://drafts.csswg.org/css-conditional/#at-ruledef-media
#[derive(Debug, ToShmem)]
pub struct MediaRule {
/// The list of media queries used by this media rule.
pub media_queries: Arc<Locked<MediaList>>,
/// The nested rules to this media rule.
pub rules: Arc<Locked<CssRules>>,
/// The source position where this media rule was found.
pub source_location: SourceLocation,
}
impl MediaRule {
/// Measure heap usage.
#[cfg(feature = "gecko")]
pub fn size_of(&self, guard: &SharedRwLockReadGuard, ops: &mut MallocSizeOfOps) -> usize
|
}
impl ToCssWithGuard for MediaRule {
// Serialization of MediaRule is not specced.
// https://drafts.csswg.org/cssom/#serialize-a-css-rule CSSMediaRule
fn to_css(&self, guard: &SharedRwLockReadGuard, dest: &mut CssStringWriter) -> fmt::Result {
dest.write_str("@media ")?;
self.media_queries
.read_with(guard)
.to_css(&mut CssWriter::new(dest))?;
self.rules.read_with(guard).to_css_block(guard, dest)
}
}
impl DeepCloneWithLock for MediaRule {
fn deep_clone_with_lock(
&self,
lock: &SharedRwLock,
guard: &SharedRwLockReadGuard,
params: &DeepCloneParams,
) -> Self {
let media_queries = self.media_queries.read_with(guard);
let rules = self.rules.read_with(guard);
MediaRule {
media_queries: Arc::new(lock.wrap(media_queries.clone())),
rules: Arc::new(lock.wrap(rules.deep_clone_with_lock(lock, guard, params))),
source_location: self.source_location.clone(),
}
}
}
|
{
// Measurement of other fields may be added later.
self.rules.unconditional_shallow_size_of(ops) +
self.rules.read_with(guard).size_of(guard, ops)
}
|
identifier_body
|
media_rule.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! An [`@media`][media] urle.
//!
//! [media]: https://drafts.csswg.org/css-conditional/#at-ruledef-media
use crate::media_queries::MediaList;
use crate::shared_lock::{DeepCloneParams, DeepCloneWithLock, Locked};
use crate::shared_lock::{SharedRwLock, SharedRwLockReadGuard, ToCssWithGuard};
use crate::str::CssStringWriter;
use crate::stylesheets::CssRules;
use cssparser::SourceLocation;
#[cfg(feature = "gecko")]
use malloc_size_of::{MallocSizeOfOps, MallocUnconditionalShallowSizeOf};
use servo_arc::Arc;
use std::fmt::{self, Write};
use style_traits::{CssWriter, ToCss};
|
/// [media]: https://drafts.csswg.org/css-conditional/#at-ruledef-media
#[derive(Debug, ToShmem)]
pub struct MediaRule {
/// The list of media queries used by this media rule.
pub media_queries: Arc<Locked<MediaList>>,
/// The nested rules to this media rule.
pub rules: Arc<Locked<CssRules>>,
/// The source position where this media rule was found.
pub source_location: SourceLocation,
}
impl MediaRule {
/// Measure heap usage.
#[cfg(feature = "gecko")]
pub fn size_of(&self, guard: &SharedRwLockReadGuard, ops: &mut MallocSizeOfOps) -> usize {
// Measurement of other fields may be added later.
self.rules.unconditional_shallow_size_of(ops) +
self.rules.read_with(guard).size_of(guard, ops)
}
}
impl ToCssWithGuard for MediaRule {
// Serialization of MediaRule is not specced.
// https://drafts.csswg.org/cssom/#serialize-a-css-rule CSSMediaRule
fn to_css(&self, guard: &SharedRwLockReadGuard, dest: &mut CssStringWriter) -> fmt::Result {
dest.write_str("@media ")?;
self.media_queries
.read_with(guard)
.to_css(&mut CssWriter::new(dest))?;
self.rules.read_with(guard).to_css_block(guard, dest)
}
}
impl DeepCloneWithLock for MediaRule {
fn deep_clone_with_lock(
&self,
lock: &SharedRwLock,
guard: &SharedRwLockReadGuard,
params: &DeepCloneParams,
) -> Self {
let media_queries = self.media_queries.read_with(guard);
let rules = self.rules.read_with(guard);
MediaRule {
media_queries: Arc::new(lock.wrap(media_queries.clone())),
rules: Arc::new(lock.wrap(rules.deep_clone_with_lock(lock, guard, params))),
source_location: self.source_location.clone(),
}
}
}
|
/// An [`@media`][media] urle.
///
|
random_line_split
|
media_rule.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
//! An [`@media`][media] urle.
//!
//! [media]: https://drafts.csswg.org/css-conditional/#at-ruledef-media
use crate::media_queries::MediaList;
use crate::shared_lock::{DeepCloneParams, DeepCloneWithLock, Locked};
use crate::shared_lock::{SharedRwLock, SharedRwLockReadGuard, ToCssWithGuard};
use crate::str::CssStringWriter;
use crate::stylesheets::CssRules;
use cssparser::SourceLocation;
#[cfg(feature = "gecko")]
use malloc_size_of::{MallocSizeOfOps, MallocUnconditionalShallowSizeOf};
use servo_arc::Arc;
use std::fmt::{self, Write};
use style_traits::{CssWriter, ToCss};
/// An [`@media`][media] urle.
///
/// [media]: https://drafts.csswg.org/css-conditional/#at-ruledef-media
#[derive(Debug, ToShmem)]
pub struct
|
{
/// The list of media queries used by this media rule.
pub media_queries: Arc<Locked<MediaList>>,
/// The nested rules to this media rule.
pub rules: Arc<Locked<CssRules>>,
/// The source position where this media rule was found.
pub source_location: SourceLocation,
}
impl MediaRule {
/// Measure heap usage.
#[cfg(feature = "gecko")]
pub fn size_of(&self, guard: &SharedRwLockReadGuard, ops: &mut MallocSizeOfOps) -> usize {
// Measurement of other fields may be added later.
self.rules.unconditional_shallow_size_of(ops) +
self.rules.read_with(guard).size_of(guard, ops)
}
}
impl ToCssWithGuard for MediaRule {
// Serialization of MediaRule is not specced.
// https://drafts.csswg.org/cssom/#serialize-a-css-rule CSSMediaRule
fn to_css(&self, guard: &SharedRwLockReadGuard, dest: &mut CssStringWriter) -> fmt::Result {
dest.write_str("@media ")?;
self.media_queries
.read_with(guard)
.to_css(&mut CssWriter::new(dest))?;
self.rules.read_with(guard).to_css_block(guard, dest)
}
}
impl DeepCloneWithLock for MediaRule {
fn deep_clone_with_lock(
&self,
lock: &SharedRwLock,
guard: &SharedRwLockReadGuard,
params: &DeepCloneParams,
) -> Self {
let media_queries = self.media_queries.read_with(guard);
let rules = self.rules.read_with(guard);
MediaRule {
media_queries: Arc::new(lock.wrap(media_queries.clone())),
rules: Arc::new(lock.wrap(rules.deep_clone_with_lock(lock, guard, params))),
source_location: self.source_location.clone(),
}
}
}
|
MediaRule
|
identifier_name
|
main.rs
|
use std::env;
use std::io;
use std::error::Error;
use std::process;
use std::process::Command;
use std::io::Write;
use std::os::unix::process::CommandExt;
mod session;
mod auth;
use auth::authenticate_current_user_n;
mod osutils;
use osutils::OSUtils;
use osutils::unix::UnixOSUtils;
mod settings;
use settings::Settings;
extern crate time;
extern crate libc;
extern crate which;
extern crate getopts;
use getopts::Options;
use getopts::ParsingStyle;
#[macro_use]
extern crate serde_derive;
extern crate serde_json;
// Global config
pub static CONFIG_PATH: &'static str = "/etc/rudo.json";
pub static DEFAULT_PROMPT: &'static str = "Password: ";
pub static SESSION_PATH: &'static str = "/var/run/rudo";
pub static DEFAULT_SESSION_TIMEOUT: i64 = 900;
fn print_help(program_name: &str, opts: Options) {
let brief = format!("Usage: {} [flags] [command]", program_name);
writeln!(&mut io::stderr(), "{}", opts.usage(&brief))
.expect("Failed to write to stderr!");
}
fn generate_empty_config() {
// Create a new settings object
let new_settings = Settings::new();
// Get a serialized string representation of the object
let settings_str = new_settings.to_string()
.expect("Unable to generate empty settings file!");
// Output the new settings string to stdout
println!("{}", settings_str);
}
///
/// Handles listing of current user's permissions to STDOUT
///
fn
|
<T: OSUtils>(osutils: &T) -> Result<i32, Box<dyn Error>> {
// Load the settings file
let settings = Settings::from_file(CONFIG_PATH)
.expect("Unable to read configuration file! Run --genconfig.");
// Give the user 3 tries to authenticate
let auth_res = authenticate_current_user_n::<T>(osutils, &settings, 3)?;
if!auth_res {
return Ok(1);
}
// Get this user's User struct
let username = osutils.get_username()?;
let user = settings.get_user(&username)?;
// Create a string of all commands the user can run
let mut all_commands: String = String::new();
for cmd in &user.permissions.allowed_commands {
all_commands += cmd;
all_commands += " ";
}
println!("You are allowed to run the following commands: {}", all_commands);
process::exit(0);
}
/// Handles default behavior of program - Authenticate and run a command
/// @param user user to run command as
/// @param command program to launch
/// @param args arguments to launch the program with
/// @return program return code
fn run_command<T: OSUtils>(osutils: &T, user: Option<String>, group: Option<String>, command: &str, args: &Vec<String>)
-> Result<i32, Box<dyn Error>> {
// Load the settings file
let settings = Settings::from_file(CONFIG_PATH)
.expect("Unable to read configuration file! Run --genconfig.");
// Give the user 3 tries to authenticate
let auth_res = authenticate_current_user_n::<T>(osutils, &settings, 3)?;
if!auth_res {
return Ok(1);
}
// Confirm that user is in the settings file and has permission
let username: String = osutils.get_username()?;
let safe_command_path = match settings.sanitize_user_command(&username, command) {
Ok(v) => v,
Err(e) => {
dbg!(e);
writeln!(&mut io::stderr(), "You don't have permission to run that! This incident won't be reported.")
.unwrap();
return Ok(1);
}
};
// Determine the uid of the user to impersonate
let mut uid: u32 = 0;
let mut gid: u32 = 0;
if let Some(username) = user {
let uidgid = osutils.get_uidgid_by_username(&username)?;
uid = uidgid.0;
gid = uidgid.1;
}
// If the user provided a group, set that
if let Some(groupname) = group {
gid = osutils.get_gid_by_groupname(&groupname)?;
}
// Now that the user is authenticated, run the provided command
Command::new(safe_command_path).args(args).uid(uid).gid(gid).exec();
// If we got here, it means the command failed
writeln!(&mut io::stderr(), "rudo: {}: command not found", &command).unwrap();
Ok(1)
}
fn main() {
let args: Vec<String> = env::args().collect();
let program_name = args[0].clone();
let mut opts = Options::new();
opts.parsing_style(ParsingStyle::StopAtFirstFree);
let mut user: Option<String> = None;
let mut group: Option<String> = None;
// Set up arguments
opts.optflag("h", "help", "print this help menu");
opts.optflag("l", "list", "list all permissions for current user");
opts.optopt("u", "user", "run as the specified user", "");
opts.optopt("g", "group", "run as the specified group", "<group>");
opts.optflag("", "genconfig", "Generate an empty config and output to STDOUT");
// Instantiate platform OSUtils
let osutils = UnixOSUtils::new();
// Create a vec of up to 2 arguments to parse
// We ignore all arguments past the first
let mut matches = match opts.parse(&args[1..]) {
Ok(m) => { m }
Err(_) => { print_help(&program_name, opts); process::exit(1); }
};
// Handle help
if matches.opt_present("h") {
print_help(&program_name, opts);
process::exit(0);
}
// Handle --list
if matches.opt_present("l") {
let res = list_permissions(&osutils).unwrap_or_else(|e|{
writeln!(&mut io::stderr(), "Failed to list permissions: {}", e).unwrap();
process::exit(1);
});
process::exit(res);
}
// Handle --genconfig
if matches.opt_present("genconfig") {
generate_empty_config();
process::exit(0);
}
if matches.free.len() < 1 {
print_help(&program_name, opts);
process::exit(1);
}
// Handle --user
if matches.opt_present("u") {
// Set the user to the provided user
user = match matches.opt_str("u") {
Some(x) => Some(x),
None => { print_help(&program_name, opts); process::exit(1); }
};
}
// Handle --group
if matches.opt_present("g") {
// Set the group to the provided group
group = match matches.opt_str("g") {
Some(x) => Some(x),
None => { print_help(&program_name, opts); process::exit(1) }
};
}
// Handle default behavior (run command)
let command = matches.free[0].clone();
matches.free.remove(0);
let res = run_command(&osutils, user, group, &command, &matches.free).unwrap_or_else(|e| {
writeln!(&mut io::stderr(), "Failed to run command: {}", e).unwrap();
process::exit(1);
});
process::exit(res);
}
|
list_permissions
|
identifier_name
|
main.rs
|
use std::env;
use std::io;
use std::error::Error;
use std::process;
use std::process::Command;
use std::io::Write;
use std::os::unix::process::CommandExt;
mod session;
mod auth;
use auth::authenticate_current_user_n;
mod osutils;
use osutils::OSUtils;
use osutils::unix::UnixOSUtils;
mod settings;
use settings::Settings;
extern crate time;
extern crate libc;
extern crate which;
extern crate getopts;
use getopts::Options;
use getopts::ParsingStyle;
#[macro_use]
extern crate serde_derive;
extern crate serde_json;
// Global config
pub static CONFIG_PATH: &'static str = "/etc/rudo.json";
pub static DEFAULT_PROMPT: &'static str = "Password: ";
pub static SESSION_PATH: &'static str = "/var/run/rudo";
pub static DEFAULT_SESSION_TIMEOUT: i64 = 900;
fn print_help(program_name: &str, opts: Options) {
let brief = format!("Usage: {} [flags] [command]", program_name);
writeln!(&mut io::stderr(), "{}", opts.usage(&brief))
.expect("Failed to write to stderr!");
}
fn generate_empty_config() {
// Create a new settings object
let new_settings = Settings::new();
// Get a serialized string representation of the object
let settings_str = new_settings.to_string()
.expect("Unable to generate empty settings file!");
// Output the new settings string to stdout
println!("{}", settings_str);
}
///
/// Handles listing of current user's permissions to STDOUT
///
fn list_permissions<T: OSUtils>(osutils: &T) -> Result<i32, Box<dyn Error>> {
// Load the settings file
let settings = Settings::from_file(CONFIG_PATH)
.expect("Unable to read configuration file! Run --genconfig.");
// Give the user 3 tries to authenticate
let auth_res = authenticate_current_user_n::<T>(osutils, &settings, 3)?;
if!auth_res {
return Ok(1);
}
// Get this user's User struct
let username = osutils.get_username()?;
let user = settings.get_user(&username)?;
// Create a string of all commands the user can run
let mut all_commands: String = String::new();
for cmd in &user.permissions.allowed_commands {
all_commands += cmd;
all_commands += " ";
}
println!("You are allowed to run the following commands: {}", all_commands);
process::exit(0);
}
/// Handles default behavior of program - Authenticate and run a command
/// @param user user to run command as
/// @param command program to launch
/// @param args arguments to launch the program with
/// @return program return code
fn run_command<T: OSUtils>(osutils: &T, user: Option<String>, group: Option<String>, command: &str, args: &Vec<String>)
-> Result<i32, Box<dyn Error>>
|
return Ok(1);
}
};
// Determine the uid of the user to impersonate
let mut uid: u32 = 0;
let mut gid: u32 = 0;
if let Some(username) = user {
let uidgid = osutils.get_uidgid_by_username(&username)?;
uid = uidgid.0;
gid = uidgid.1;
}
// If the user provided a group, set that
if let Some(groupname) = group {
gid = osutils.get_gid_by_groupname(&groupname)?;
}
// Now that the user is authenticated, run the provided command
Command::new(safe_command_path).args(args).uid(uid).gid(gid).exec();
// If we got here, it means the command failed
writeln!(&mut io::stderr(), "rudo: {}: command not found", &command).unwrap();
Ok(1)
}
fn main() {
let args: Vec<String> = env::args().collect();
let program_name = args[0].clone();
let mut opts = Options::new();
opts.parsing_style(ParsingStyle::StopAtFirstFree);
let mut user: Option<String> = None;
let mut group: Option<String> = None;
// Set up arguments
opts.optflag("h", "help", "print this help menu");
opts.optflag("l", "list", "list all permissions for current user");
opts.optopt("u", "user", "run as the specified user", "");
opts.optopt("g", "group", "run as the specified group", "<group>");
opts.optflag("", "genconfig", "Generate an empty config and output to STDOUT");
// Instantiate platform OSUtils
let osutils = UnixOSUtils::new();
// Create a vec of up to 2 arguments to parse
// We ignore all arguments past the first
let mut matches = match opts.parse(&args[1..]) {
Ok(m) => { m }
Err(_) => { print_help(&program_name, opts); process::exit(1); }
};
// Handle help
if matches.opt_present("h") {
print_help(&program_name, opts);
process::exit(0);
}
// Handle --list
if matches.opt_present("l") {
let res = list_permissions(&osutils).unwrap_or_else(|e|{
writeln!(&mut io::stderr(), "Failed to list permissions: {}", e).unwrap();
process::exit(1);
});
process::exit(res);
}
// Handle --genconfig
if matches.opt_present("genconfig") {
generate_empty_config();
process::exit(0);
}
if matches.free.len() < 1 {
print_help(&program_name, opts);
process::exit(1);
}
// Handle --user
if matches.opt_present("u") {
// Set the user to the provided user
user = match matches.opt_str("u") {
Some(x) => Some(x),
None => { print_help(&program_name, opts); process::exit(1); }
};
}
// Handle --group
if matches.opt_present("g") {
// Set the group to the provided group
group = match matches.opt_str("g") {
Some(x) => Some(x),
None => { print_help(&program_name, opts); process::exit(1) }
};
}
// Handle default behavior (run command)
let command = matches.free[0].clone();
matches.free.remove(0);
let res = run_command(&osutils, user, group, &command, &matches.free).unwrap_or_else(|e| {
writeln!(&mut io::stderr(), "Failed to run command: {}", e).unwrap();
process::exit(1);
});
process::exit(res);
}
|
{
// Load the settings file
let settings = Settings::from_file(CONFIG_PATH)
.expect("Unable to read configuration file! Run --genconfig.");
// Give the user 3 tries to authenticate
let auth_res = authenticate_current_user_n::<T>(osutils, &settings, 3)?;
if !auth_res {
return Ok(1);
}
// Confirm that user is in the settings file and has permission
let username: String = osutils.get_username()?;
let safe_command_path = match settings.sanitize_user_command(&username, command) {
Ok(v) => v,
Err(e) => {
dbg!(e);
writeln!(&mut io::stderr(), "You don't have permission to run that! This incident won't be reported.")
.unwrap();
|
identifier_body
|
main.rs
|
use std::env;
use std::io;
use std::error::Error;
use std::process;
use std::process::Command;
use std::io::Write;
use std::os::unix::process::CommandExt;
mod session;
mod auth;
use auth::authenticate_current_user_n;
mod osutils;
use osutils::OSUtils;
use osutils::unix::UnixOSUtils;
mod settings;
use settings::Settings;
extern crate time;
extern crate libc;
extern crate which;
extern crate getopts;
use getopts::Options;
use getopts::ParsingStyle;
#[macro_use]
extern crate serde_derive;
extern crate serde_json;
// Global config
pub static CONFIG_PATH: &'static str = "/etc/rudo.json";
pub static DEFAULT_PROMPT: &'static str = "Password: ";
pub static SESSION_PATH: &'static str = "/var/run/rudo";
pub static DEFAULT_SESSION_TIMEOUT: i64 = 900;
fn print_help(program_name: &str, opts: Options) {
let brief = format!("Usage: {} [flags] [command]", program_name);
writeln!(&mut io::stderr(), "{}", opts.usage(&brief))
.expect("Failed to write to stderr!");
}
fn generate_empty_config() {
// Create a new settings object
let new_settings = Settings::new();
// Get a serialized string representation of the object
let settings_str = new_settings.to_string()
.expect("Unable to generate empty settings file!");
// Output the new settings string to stdout
println!("{}", settings_str);
}
///
/// Handles listing of current user's permissions to STDOUT
///
fn list_permissions<T: OSUtils>(osutils: &T) -> Result<i32, Box<dyn Error>> {
// Load the settings file
let settings = Settings::from_file(CONFIG_PATH)
.expect("Unable to read configuration file! Run --genconfig.");
// Give the user 3 tries to authenticate
let auth_res = authenticate_current_user_n::<T>(osutils, &settings, 3)?;
if!auth_res {
return Ok(1);
}
// Get this user's User struct
let username = osutils.get_username()?;
let user = settings.get_user(&username)?;
// Create a string of all commands the user can run
let mut all_commands: String = String::new();
for cmd in &user.permissions.allowed_commands {
all_commands += cmd;
all_commands += " ";
}
println!("You are allowed to run the following commands: {}", all_commands);
process::exit(0);
}
/// Handles default behavior of program - Authenticate and run a command
/// @param user user to run command as
/// @param command program to launch
/// @param args arguments to launch the program with
/// @return program return code
fn run_command<T: OSUtils>(osutils: &T, user: Option<String>, group: Option<String>, command: &str, args: &Vec<String>)
-> Result<i32, Box<dyn Error>> {
// Load the settings file
let settings = Settings::from_file(CONFIG_PATH)
.expect("Unable to read configuration file! Run --genconfig.");
// Give the user 3 tries to authenticate
let auth_res = authenticate_current_user_n::<T>(osutils, &settings, 3)?;
if!auth_res
|
// Confirm that user is in the settings file and has permission
let username: String = osutils.get_username()?;
let safe_command_path = match settings.sanitize_user_command(&username, command) {
Ok(v) => v,
Err(e) => {
dbg!(e);
writeln!(&mut io::stderr(), "You don't have permission to run that! This incident won't be reported.")
.unwrap();
return Ok(1);
}
};
// Determine the uid of the user to impersonate
let mut uid: u32 = 0;
let mut gid: u32 = 0;
if let Some(username) = user {
let uidgid = osutils.get_uidgid_by_username(&username)?;
uid = uidgid.0;
gid = uidgid.1;
}
// If the user provided a group, set that
if let Some(groupname) = group {
gid = osutils.get_gid_by_groupname(&groupname)?;
}
// Now that the user is authenticated, run the provided command
Command::new(safe_command_path).args(args).uid(uid).gid(gid).exec();
// If we got here, it means the command failed
writeln!(&mut io::stderr(), "rudo: {}: command not found", &command).unwrap();
Ok(1)
}
fn main() {
let args: Vec<String> = env::args().collect();
let program_name = args[0].clone();
let mut opts = Options::new();
opts.parsing_style(ParsingStyle::StopAtFirstFree);
let mut user: Option<String> = None;
let mut group: Option<String> = None;
// Set up arguments
opts.optflag("h", "help", "print this help menu");
opts.optflag("l", "list", "list all permissions for current user");
opts.optopt("u", "user", "run as the specified user", "");
opts.optopt("g", "group", "run as the specified group", "<group>");
opts.optflag("", "genconfig", "Generate an empty config and output to STDOUT");
// Instantiate platform OSUtils
let osutils = UnixOSUtils::new();
// Create a vec of up to 2 arguments to parse
// We ignore all arguments past the first
let mut matches = match opts.parse(&args[1..]) {
Ok(m) => { m }
Err(_) => { print_help(&program_name, opts); process::exit(1); }
};
// Handle help
if matches.opt_present("h") {
print_help(&program_name, opts);
process::exit(0);
}
// Handle --list
if matches.opt_present("l") {
let res = list_permissions(&osutils).unwrap_or_else(|e|{
writeln!(&mut io::stderr(), "Failed to list permissions: {}", e).unwrap();
process::exit(1);
});
process::exit(res);
}
// Handle --genconfig
if matches.opt_present("genconfig") {
generate_empty_config();
process::exit(0);
}
if matches.free.len() < 1 {
print_help(&program_name, opts);
process::exit(1);
}
// Handle --user
if matches.opt_present("u") {
// Set the user to the provided user
user = match matches.opt_str("u") {
Some(x) => Some(x),
None => { print_help(&program_name, opts); process::exit(1); }
};
}
// Handle --group
if matches.opt_present("g") {
// Set the group to the provided group
group = match matches.opt_str("g") {
Some(x) => Some(x),
None => { print_help(&program_name, opts); process::exit(1) }
};
}
// Handle default behavior (run command)
let command = matches.free[0].clone();
matches.free.remove(0);
let res = run_command(&osutils, user, group, &command, &matches.free).unwrap_or_else(|e| {
writeln!(&mut io::stderr(), "Failed to run command: {}", e).unwrap();
process::exit(1);
});
process::exit(res);
}
|
{
return Ok(1);
}
|
conditional_block
|
main.rs
|
use std::env;
use std::io;
use std::error::Error;
use std::process;
use std::process::Command;
use std::io::Write;
use std::os::unix::process::CommandExt;
mod session;
mod auth;
use auth::authenticate_current_user_n;
mod osutils;
use osutils::OSUtils;
use osutils::unix::UnixOSUtils;
mod settings;
use settings::Settings;
extern crate time;
extern crate libc;
extern crate which;
extern crate getopts;
use getopts::Options;
use getopts::ParsingStyle;
#[macro_use]
extern crate serde_derive;
extern crate serde_json;
// Global config
pub static CONFIG_PATH: &'static str = "/etc/rudo.json";
pub static DEFAULT_PROMPT: &'static str = "Password: ";
pub static SESSION_PATH: &'static str = "/var/run/rudo";
pub static DEFAULT_SESSION_TIMEOUT: i64 = 900;
fn print_help(program_name: &str, opts: Options) {
let brief = format!("Usage: {} [flags] [command]", program_name);
writeln!(&mut io::stderr(), "{}", opts.usage(&brief))
.expect("Failed to write to stderr!");
}
fn generate_empty_config() {
// Create a new settings object
let new_settings = Settings::new();
// Get a serialized string representation of the object
let settings_str = new_settings.to_string()
.expect("Unable to generate empty settings file!");
// Output the new settings string to stdout
println!("{}", settings_str);
}
///
/// Handles listing of current user's permissions to STDOUT
///
fn list_permissions<T: OSUtils>(osutils: &T) -> Result<i32, Box<dyn Error>> {
// Load the settings file
let settings = Settings::from_file(CONFIG_PATH)
.expect("Unable to read configuration file! Run --genconfig.");
// Give the user 3 tries to authenticate
let auth_res = authenticate_current_user_n::<T>(osutils, &settings, 3)?;
if!auth_res {
return Ok(1);
}
// Get this user's User struct
let username = osutils.get_username()?;
let user = settings.get_user(&username)?;
// Create a string of all commands the user can run
let mut all_commands: String = String::new();
for cmd in &user.permissions.allowed_commands {
all_commands += cmd;
all_commands += " ";
}
println!("You are allowed to run the following commands: {}", all_commands);
process::exit(0);
}
/// Handles default behavior of program - Authenticate and run a command
/// @param user user to run command as
/// @param command program to launch
/// @param args arguments to launch the program with
/// @return program return code
fn run_command<T: OSUtils>(osutils: &T, user: Option<String>, group: Option<String>, command: &str, args: &Vec<String>)
-> Result<i32, Box<dyn Error>> {
// Load the settings file
let settings = Settings::from_file(CONFIG_PATH)
.expect("Unable to read configuration file! Run --genconfig.");
// Give the user 3 tries to authenticate
let auth_res = authenticate_current_user_n::<T>(osutils, &settings, 3)?;
if!auth_res {
return Ok(1);
|
}
// Confirm that user is in the settings file and has permission
let username: String = osutils.get_username()?;
let safe_command_path = match settings.sanitize_user_command(&username, command) {
Ok(v) => v,
Err(e) => {
dbg!(e);
writeln!(&mut io::stderr(), "You don't have permission to run that! This incident won't be reported.")
.unwrap();
return Ok(1);
}
};
// Determine the uid of the user to impersonate
let mut uid: u32 = 0;
let mut gid: u32 = 0;
if let Some(username) = user {
let uidgid = osutils.get_uidgid_by_username(&username)?;
uid = uidgid.0;
gid = uidgid.1;
}
// If the user provided a group, set that
if let Some(groupname) = group {
gid = osutils.get_gid_by_groupname(&groupname)?;
}
// Now that the user is authenticated, run the provided command
Command::new(safe_command_path).args(args).uid(uid).gid(gid).exec();
// If we got here, it means the command failed
writeln!(&mut io::stderr(), "rudo: {}: command not found", &command).unwrap();
Ok(1)
}
fn main() {
let args: Vec<String> = env::args().collect();
let program_name = args[0].clone();
let mut opts = Options::new();
opts.parsing_style(ParsingStyle::StopAtFirstFree);
let mut user: Option<String> = None;
let mut group: Option<String> = None;
// Set up arguments
opts.optflag("h", "help", "print this help menu");
opts.optflag("l", "list", "list all permissions for current user");
opts.optopt("u", "user", "run as the specified user", "");
opts.optopt("g", "group", "run as the specified group", "<group>");
opts.optflag("", "genconfig", "Generate an empty config and output to STDOUT");
// Instantiate platform OSUtils
let osutils = UnixOSUtils::new();
// Create a vec of up to 2 arguments to parse
// We ignore all arguments past the first
let mut matches = match opts.parse(&args[1..]) {
Ok(m) => { m }
Err(_) => { print_help(&program_name, opts); process::exit(1); }
};
// Handle help
if matches.opt_present("h") {
print_help(&program_name, opts);
process::exit(0);
}
// Handle --list
if matches.opt_present("l") {
let res = list_permissions(&osutils).unwrap_or_else(|e|{
writeln!(&mut io::stderr(), "Failed to list permissions: {}", e).unwrap();
process::exit(1);
});
process::exit(res);
}
// Handle --genconfig
if matches.opt_present("genconfig") {
generate_empty_config();
process::exit(0);
}
if matches.free.len() < 1 {
print_help(&program_name, opts);
process::exit(1);
}
// Handle --user
if matches.opt_present("u") {
// Set the user to the provided user
user = match matches.opt_str("u") {
Some(x) => Some(x),
None => { print_help(&program_name, opts); process::exit(1); }
};
}
// Handle --group
if matches.opt_present("g") {
// Set the group to the provided group
group = match matches.opt_str("g") {
Some(x) => Some(x),
None => { print_help(&program_name, opts); process::exit(1) }
};
}
// Handle default behavior (run command)
let command = matches.free[0].clone();
matches.free.remove(0);
let res = run_command(&osutils, user, group, &command, &matches.free).unwrap_or_else(|e| {
writeln!(&mut io::stderr(), "Failed to run command: {}", e).unwrap();
process::exit(1);
});
process::exit(res);
}
|
random_line_split
|
|
parsers.rs
|
//! Contains useful [nom](https://github.com/Geal/nom) parsers.
use std::path::PathBuf;
use nom::not_line_ending;
use std::str::{self, FromStr};
use std::borrow::ToOwned;
use nom::{digit, alphanumeric};
named!(pub consume_until_line_ending, take_until_and_consume!("\n"));
named!(pub parse_line<String>,
map!(map_res!(not_line_ending, str::from_utf8), ToOwned::to_owned));
named!(pub parse_u32_octal<u32>,
|
|s| u32::from_str_radix(s, 8)));
named!(pub parse_u8<u8>,
map_res!(map_res!(digit, str::from_utf8), FromStr::from_str));
named!(pub parse_u32<u32>,
map_res!(map_res!(digit, str::from_utf8), FromStr::from_str));
named!(pub parse_u64<u64>,
map_res!(map_res!(digit, str::from_utf8), FromStr::from_str));
|
map_res!(map_res!(alphanumeric, str::from_utf8),
|
random_line_split
|
gc_worker.rs
|
// Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
use collections::HashMap;
use engine_traits::{Peekable, CF_WRITE};
use grpcio::{ChannelBuilder, Environment};
use keys::data_key;
use kvproto::{kvrpcpb::*, metapb, tikvpb::TikvClient};
use std::sync::Arc;
use test_raftstore::*;
use tikv::server::gc_worker::sync_gc;
use tikv_util::HandyRwLock;
use txn_types::Key;
#[test]
fn test_physical_scan_lock() {
let (_cluster, client, ctx) = must_new_cluster_and_kv_client();
// Generate kvs like k10, v10, ts=10; k11, v11, ts=11;...
let kv: Vec<_> = (10..20)
.map(|i| (i, vec![b'k', i as u8], vec![b'v', i as u8]))
.collect();
for (ts, k, v) in &kv {
let mut mutation = Mutation::default();
mutation.set_op(Op::Put);
mutation.set_key(k.clone());
mutation.set_value(v.clone());
must_kv_prewrite(&client, ctx.clone(), vec![mutation], k.clone(), *ts);
}
let all_locks: Vec<_> = kv
.into_iter()
|
// Create a LockInfo that matches the prewrite request in `must_kv_prewrite`.
let mut lock_info = LockInfo::default();
lock_info.set_primary_lock(k.clone());
lock_info.set_lock_version(ts);
lock_info.set_key(k);
lock_info.set_lock_ttl(3000);
lock_info.set_lock_type(Op::Put);
lock_info.set_min_commit_ts(ts + 1);
lock_info
})
.collect();
let check_result = |got_locks: &[_], expected_locks: &[_]| {
for i in 0..std::cmp::max(got_locks.len(), expected_locks.len()) {
assert_eq!(got_locks[i], expected_locks[i], "lock {} mismatch", i);
}
};
check_result(
&must_physical_scan_lock(&client, ctx.clone(), 30, b"", 100),
&all_locks,
);
check_result(
&must_physical_scan_lock(&client, ctx.clone(), 15, b"", 100),
&all_locks[0..=5],
);
check_result(
&must_physical_scan_lock(&client, ctx.clone(), 10, b"", 100),
&all_locks[0..1],
);
check_result(
&must_physical_scan_lock(&client, ctx.clone(), 9, b"", 100),
&[],
);
check_result(
&must_physical_scan_lock(&client, ctx, 30, &[b'k', 13], 5),
&all_locks[3..8],
);
}
#[test]
fn test_applied_lock_collector() {
let mut cluster = new_server_cluster(0, 3);
cluster.pd_client.disable_default_operator();
cluster.run();
// Create all stores' clients.
let env = Arc::new(Environment::new(1));
let mut clients = HashMap::default();
for node_id in cluster.get_node_ids() {
let channel =
ChannelBuilder::new(Arc::clone(&env)).connect(&cluster.sim.rl().get_addr(node_id));
let client = TikvClient::new(channel);
clients.insert(node_id, client);
}
// Create the ctx of the first region.
let region = cluster.get_region(b"");
let region_id = region.get_id();
let leader_peer = cluster.leader_of_region(region_id).unwrap();
let leader_store_id = leader_peer.get_store_id();
let leader_client = clients.get(&leader_store_id).unwrap();
let mut ctx = Context::default();
ctx.set_region_id(region_id);
ctx.set_peer(leader_peer);
ctx.set_region_epoch(cluster.get_region_epoch(region_id));
// It's used to make sure all stores applies all logs.
let wait_for_apply = |cluster: &mut Cluster<_>, region: &metapb::Region| {
let cluster = &mut *cluster;
region.get_peers().iter().for_each(|p| {
let mut retry_times = 1;
loop {
let resp =
async_read_on_peer(cluster, p.clone(), region.clone(), b"key", true, true)
.recv()
.unwrap();
if!resp.get_header().has_error() {
return;
}
if retry_times >= 50 {
panic!("failed to read on {:?}: {:?}", p, resp);
}
retry_times += 1;
sleep_ms(20);
}
});
};
let check_lock = |lock: &LockInfo, k: &[u8], pk: &[u8], ts| {
assert_eq!(lock.get_key(), k);
assert_eq!(lock.get_primary_lock(), pk);
assert_eq!(lock.get_lock_version(), ts);
};
// Register lock observer at safe point 10000.
let mut safe_point = 10000;
clients.iter().for_each(|(_, c)| {
// Should report error when checking non-existent observer.
assert!(!check_lock_observer(c, safe_point).get_error().is_empty());
must_register_lock_observer(c, safe_point);
assert!(must_check_lock_observer(c, safe_point, true).is_empty());
});
// Lock observer should only collect values in lock CF.
let key = b"key0";
must_kv_prewrite(
&leader_client,
ctx.clone(),
vec![new_mutation(Op::Put, key, &b"v".repeat(1024))],
key.to_vec(),
1,
);
must_kv_commit(&leader_client, ctx.clone(), vec![key.to_vec()], 1, 2, 2);
wait_for_apply(&mut cluster, ®ion);
clients.iter().for_each(|(_, c)| {
let locks = must_check_lock_observer(c, safe_point, true);
assert_eq!(locks.len(), 1);
check_lock(&locks[0], key, key, 1);
});
// Lock observer shouldn't collect locks after the safe point.
must_kv_prewrite(
&leader_client,
ctx.clone(),
vec![new_mutation(Op::Put, key, b"v")],
key.to_vec(),
safe_point + 1,
);
wait_for_apply(&mut cluster, ®ion);
clients.iter().for_each(|(_, c)| {
let locks = must_check_lock_observer(c, safe_point, true);
assert_eq!(locks.len(), 1);
check_lock(&locks[0], key, key, 1);
});
// Write 999 locks whose timestamp is less than the safe point.
let mutations = (1..1000)
.map(|i| new_mutation(Op::Put, format!("key{}", i).as_bytes(), b"v"))
.collect();
must_kv_prewrite(&leader_client, ctx.clone(), mutations, b"key1".to_vec(), 10);
wait_for_apply(&mut cluster, ®ion);
clients.iter().for_each(|(_, c)| {
let locks = must_check_lock_observer(c, safe_point, true);
// Plus the first lock.
assert_eq!(locks.len(), 1000);
});
// Add a new store and register lock observer.
let store_id = cluster.add_new_engine();
let channel =
ChannelBuilder::new(Arc::clone(&env)).connect(&cluster.sim.rl().get_addr(store_id));
let client = TikvClient::new(channel);
must_register_lock_observer(&client, safe_point);
// Add a new peer. Lock observer should collect all locks from snapshot.
let peer = new_peer(store_id, store_id);
cluster.pd_client.must_add_peer(region_id, peer.clone());
cluster.pd_client.must_none_pending_peer(peer);
wait_for_apply(&mut cluster, ®ion);
let locks = must_check_lock_observer(&client, safe_point, true);
assert_eq!(locks.len(), 999);
// Should be dirty when collects too many locks.
let mutations = (1000..1100)
.map(|i| new_mutation(Op::Put, format!("key{}", i).as_bytes(), b"v"))
.collect();
must_kv_prewrite(
&leader_client,
ctx.clone(),
mutations,
b"key1000".to_vec(),
100,
);
wait_for_apply(&mut cluster, ®ion);
clients.insert(store_id, client);
clients.iter().for_each(|(_, c)| {
let resp = check_lock_observer(c, safe_point);
assert!(resp.get_error().is_empty(), "{:?}", resp.get_error());
assert!(!resp.get_is_clean());
// MAX_COLLECT_SIZE is 1024.
assert_eq!(resp.get_locks().len(), 1024);
});
// Reregister and check. It shouldn't clean up state.
clients.iter().for_each(|(_, c)| {
must_register_lock_observer(c, safe_point);
let resp = check_lock_observer(c, safe_point);
assert!(resp.get_error().is_empty(), "{:?}", resp.get_error());
assert!(!resp.get_is_clean());
// MAX_COLLECT_SIZE is 1024.
assert_eq!(resp.get_locks().len(), 1024);
});
// Register lock observer at a later safe point. Lock observer should reset its state.
safe_point += 1;
clients.iter().for_each(|(_, c)| {
must_register_lock_observer(c, safe_point);
assert!(must_check_lock_observer(c, safe_point, true).is_empty());
// Can't register observer with smaller max_ts.
assert!(
!register_lock_observer(&c, safe_point - 1)
.get_error()
.is_empty()
);
assert!(must_check_lock_observer(c, safe_point, true).is_empty());
});
let leader_client = clients.get(&leader_store_id).unwrap();
must_kv_prewrite(
&leader_client,
ctx,
vec![new_mutation(Op::Put, b"key1100", b"v")],
b"key1100".to_vec(),
safe_point,
);
wait_for_apply(&mut cluster, ®ion);
clients.iter().for_each(|(_, c)| {
// Should collect locks according to the new max ts.
let locks = must_check_lock_observer(c, safe_point, true);
assert_eq!(locks.len(), 1, "{:?}", locks);
// Shouldn't remove it with a wrong max ts.
assert!(
!remove_lock_observer(c, safe_point - 1)
.get_error()
.is_empty()
);
let locks = must_check_lock_observer(c, safe_point, true);
assert_eq!(locks.len(), 1, "{:?}", locks);
// Remove lock observers.
must_remove_lock_observer(c, safe_point);
assert!(!check_lock_observer(c, safe_point).get_error().is_empty());
});
}
// Since v5.0 GC bypasses Raft, which means GC scans/deletes records with `keys::DATA_PREFIX`.
// This case ensures it's performed correctly.
#[test]
fn test_gc_bypass_raft() {
let (cluster, leader, ctx) = must_new_cluster_mul(1);
cluster.pd_client.disable_default_operator();
let env = Arc::new(Environment::new(1));
let leader_store = leader.get_store_id();
let channel = ChannelBuilder::new(env).connect(&cluster.sim.rl().get_addr(leader_store));
let client = TikvClient::new(channel);
let pk = b"k1".to_vec();
let value = vec![b'x'; 300];
let engine = cluster.engines.get(&leader_store).unwrap();
for &start_ts in &[10, 20, 30, 40] {
let commit_ts = start_ts + 5;
let muts = vec![new_mutation(Op::Put, b"k1", &value)];
must_kv_prewrite(&client, ctx.clone(), muts, pk.clone(), start_ts);
let keys = vec![pk.clone()];
must_kv_commit(&client, ctx.clone(), keys, start_ts, commit_ts, commit_ts);
let key = Key::from_raw(b"k1").append_ts(start_ts.into());
let key = data_key(key.as_encoded());
assert!(engine.kv.get_value(&key).unwrap().is_some());
let key = Key::from_raw(b"k1").append_ts(commit_ts.into());
let key = data_key(key.as_encoded());
assert!(engine.kv.get_value_cf(CF_WRITE, &key).unwrap().is_some());
}
let gc_sched = cluster.sim.rl().get_gc_worker(1).scheduler();
assert!(sync_gc(&gc_sched, 0, b"k1".to_vec(), b"k2".to_vec(), 200.into()).is_ok());
for &start_ts in &[10, 20, 30] {
let commit_ts = start_ts + 5;
let key = Key::from_raw(b"k1").append_ts(start_ts.into());
let key = data_key(key.as_encoded());
assert!(engine.kv.get_value(&key).unwrap().is_none());
let key = Key::from_raw(b"k1").append_ts(commit_ts.into());
let key = data_key(key.as_encoded());
assert!(engine.kv.get_value_cf(CF_WRITE, &key).unwrap().is_none());
}
}
|
.map(|(ts, k, _)| {
|
random_line_split
|
gc_worker.rs
|
// Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
use collections::HashMap;
use engine_traits::{Peekable, CF_WRITE};
use grpcio::{ChannelBuilder, Environment};
use keys::data_key;
use kvproto::{kvrpcpb::*, metapb, tikvpb::TikvClient};
use std::sync::Arc;
use test_raftstore::*;
use tikv::server::gc_worker::sync_gc;
use tikv_util::HandyRwLock;
use txn_types::Key;
#[test]
fn test_physical_scan_lock() {
let (_cluster, client, ctx) = must_new_cluster_and_kv_client();
// Generate kvs like k10, v10, ts=10; k11, v11, ts=11;...
let kv: Vec<_> = (10..20)
.map(|i| (i, vec![b'k', i as u8], vec![b'v', i as u8]))
.collect();
for (ts, k, v) in &kv {
let mut mutation = Mutation::default();
mutation.set_op(Op::Put);
mutation.set_key(k.clone());
mutation.set_value(v.clone());
must_kv_prewrite(&client, ctx.clone(), vec![mutation], k.clone(), *ts);
}
let all_locks: Vec<_> = kv
.into_iter()
.map(|(ts, k, _)| {
// Create a LockInfo that matches the prewrite request in `must_kv_prewrite`.
let mut lock_info = LockInfo::default();
lock_info.set_primary_lock(k.clone());
lock_info.set_lock_version(ts);
lock_info.set_key(k);
lock_info.set_lock_ttl(3000);
lock_info.set_lock_type(Op::Put);
lock_info.set_min_commit_ts(ts + 1);
lock_info
})
.collect();
let check_result = |got_locks: &[_], expected_locks: &[_]| {
for i in 0..std::cmp::max(got_locks.len(), expected_locks.len()) {
assert_eq!(got_locks[i], expected_locks[i], "lock {} mismatch", i);
}
};
check_result(
&must_physical_scan_lock(&client, ctx.clone(), 30, b"", 100),
&all_locks,
);
check_result(
&must_physical_scan_lock(&client, ctx.clone(), 15, b"", 100),
&all_locks[0..=5],
);
check_result(
&must_physical_scan_lock(&client, ctx.clone(), 10, b"", 100),
&all_locks[0..1],
);
check_result(
&must_physical_scan_lock(&client, ctx.clone(), 9, b"", 100),
&[],
);
check_result(
&must_physical_scan_lock(&client, ctx, 30, &[b'k', 13], 5),
&all_locks[3..8],
);
}
#[test]
fn test_applied_lock_collector() {
let mut cluster = new_server_cluster(0, 3);
cluster.pd_client.disable_default_operator();
cluster.run();
// Create all stores' clients.
let env = Arc::new(Environment::new(1));
let mut clients = HashMap::default();
for node_id in cluster.get_node_ids() {
let channel =
ChannelBuilder::new(Arc::clone(&env)).connect(&cluster.sim.rl().get_addr(node_id));
let client = TikvClient::new(channel);
clients.insert(node_id, client);
}
// Create the ctx of the first region.
let region = cluster.get_region(b"");
let region_id = region.get_id();
let leader_peer = cluster.leader_of_region(region_id).unwrap();
let leader_store_id = leader_peer.get_store_id();
let leader_client = clients.get(&leader_store_id).unwrap();
let mut ctx = Context::default();
ctx.set_region_id(region_id);
ctx.set_peer(leader_peer);
ctx.set_region_epoch(cluster.get_region_epoch(region_id));
// It's used to make sure all stores applies all logs.
let wait_for_apply = |cluster: &mut Cluster<_>, region: &metapb::Region| {
let cluster = &mut *cluster;
region.get_peers().iter().for_each(|p| {
let mut retry_times = 1;
loop {
let resp =
async_read_on_peer(cluster, p.clone(), region.clone(), b"key", true, true)
.recv()
.unwrap();
if!resp.get_header().has_error() {
return;
}
if retry_times >= 50
|
retry_times += 1;
sleep_ms(20);
}
});
};
let check_lock = |lock: &LockInfo, k: &[u8], pk: &[u8], ts| {
assert_eq!(lock.get_key(), k);
assert_eq!(lock.get_primary_lock(), pk);
assert_eq!(lock.get_lock_version(), ts);
};
// Register lock observer at safe point 10000.
let mut safe_point = 10000;
clients.iter().for_each(|(_, c)| {
// Should report error when checking non-existent observer.
assert!(!check_lock_observer(c, safe_point).get_error().is_empty());
must_register_lock_observer(c, safe_point);
assert!(must_check_lock_observer(c, safe_point, true).is_empty());
});
// Lock observer should only collect values in lock CF.
let key = b"key0";
must_kv_prewrite(
&leader_client,
ctx.clone(),
vec![new_mutation(Op::Put, key, &b"v".repeat(1024))],
key.to_vec(),
1,
);
must_kv_commit(&leader_client, ctx.clone(), vec![key.to_vec()], 1, 2, 2);
wait_for_apply(&mut cluster, ®ion);
clients.iter().for_each(|(_, c)| {
let locks = must_check_lock_observer(c, safe_point, true);
assert_eq!(locks.len(), 1);
check_lock(&locks[0], key, key, 1);
});
// Lock observer shouldn't collect locks after the safe point.
must_kv_prewrite(
&leader_client,
ctx.clone(),
vec![new_mutation(Op::Put, key, b"v")],
key.to_vec(),
safe_point + 1,
);
wait_for_apply(&mut cluster, ®ion);
clients.iter().for_each(|(_, c)| {
let locks = must_check_lock_observer(c, safe_point, true);
assert_eq!(locks.len(), 1);
check_lock(&locks[0], key, key, 1);
});
// Write 999 locks whose timestamp is less than the safe point.
let mutations = (1..1000)
.map(|i| new_mutation(Op::Put, format!("key{}", i).as_bytes(), b"v"))
.collect();
must_kv_prewrite(&leader_client, ctx.clone(), mutations, b"key1".to_vec(), 10);
wait_for_apply(&mut cluster, ®ion);
clients.iter().for_each(|(_, c)| {
let locks = must_check_lock_observer(c, safe_point, true);
// Plus the first lock.
assert_eq!(locks.len(), 1000);
});
// Add a new store and register lock observer.
let store_id = cluster.add_new_engine();
let channel =
ChannelBuilder::new(Arc::clone(&env)).connect(&cluster.sim.rl().get_addr(store_id));
let client = TikvClient::new(channel);
must_register_lock_observer(&client, safe_point);
// Add a new peer. Lock observer should collect all locks from snapshot.
let peer = new_peer(store_id, store_id);
cluster.pd_client.must_add_peer(region_id, peer.clone());
cluster.pd_client.must_none_pending_peer(peer);
wait_for_apply(&mut cluster, ®ion);
let locks = must_check_lock_observer(&client, safe_point, true);
assert_eq!(locks.len(), 999);
// Should be dirty when collects too many locks.
let mutations = (1000..1100)
.map(|i| new_mutation(Op::Put, format!("key{}", i).as_bytes(), b"v"))
.collect();
must_kv_prewrite(
&leader_client,
ctx.clone(),
mutations,
b"key1000".to_vec(),
100,
);
wait_for_apply(&mut cluster, ®ion);
clients.insert(store_id, client);
clients.iter().for_each(|(_, c)| {
let resp = check_lock_observer(c, safe_point);
assert!(resp.get_error().is_empty(), "{:?}", resp.get_error());
assert!(!resp.get_is_clean());
// MAX_COLLECT_SIZE is 1024.
assert_eq!(resp.get_locks().len(), 1024);
});
// Reregister and check. It shouldn't clean up state.
clients.iter().for_each(|(_, c)| {
must_register_lock_observer(c, safe_point);
let resp = check_lock_observer(c, safe_point);
assert!(resp.get_error().is_empty(), "{:?}", resp.get_error());
assert!(!resp.get_is_clean());
// MAX_COLLECT_SIZE is 1024.
assert_eq!(resp.get_locks().len(), 1024);
});
// Register lock observer at a later safe point. Lock observer should reset its state.
safe_point += 1;
clients.iter().for_each(|(_, c)| {
must_register_lock_observer(c, safe_point);
assert!(must_check_lock_observer(c, safe_point, true).is_empty());
// Can't register observer with smaller max_ts.
assert!(
!register_lock_observer(&c, safe_point - 1)
.get_error()
.is_empty()
);
assert!(must_check_lock_observer(c, safe_point, true).is_empty());
});
let leader_client = clients.get(&leader_store_id).unwrap();
must_kv_prewrite(
&leader_client,
ctx,
vec![new_mutation(Op::Put, b"key1100", b"v")],
b"key1100".to_vec(),
safe_point,
);
wait_for_apply(&mut cluster, ®ion);
clients.iter().for_each(|(_, c)| {
// Should collect locks according to the new max ts.
let locks = must_check_lock_observer(c, safe_point, true);
assert_eq!(locks.len(), 1, "{:?}", locks);
// Shouldn't remove it with a wrong max ts.
assert!(
!remove_lock_observer(c, safe_point - 1)
.get_error()
.is_empty()
);
let locks = must_check_lock_observer(c, safe_point, true);
assert_eq!(locks.len(), 1, "{:?}", locks);
// Remove lock observers.
must_remove_lock_observer(c, safe_point);
assert!(!check_lock_observer(c, safe_point).get_error().is_empty());
});
}
// Since v5.0 GC bypasses Raft, which means GC scans/deletes records with `keys::DATA_PREFIX`.
// This case ensures it's performed correctly.
#[test]
fn test_gc_bypass_raft() {
let (cluster, leader, ctx) = must_new_cluster_mul(1);
cluster.pd_client.disable_default_operator();
let env = Arc::new(Environment::new(1));
let leader_store = leader.get_store_id();
let channel = ChannelBuilder::new(env).connect(&cluster.sim.rl().get_addr(leader_store));
let client = TikvClient::new(channel);
let pk = b"k1".to_vec();
let value = vec![b'x'; 300];
let engine = cluster.engines.get(&leader_store).unwrap();
for &start_ts in &[10, 20, 30, 40] {
let commit_ts = start_ts + 5;
let muts = vec![new_mutation(Op::Put, b"k1", &value)];
must_kv_prewrite(&client, ctx.clone(), muts, pk.clone(), start_ts);
let keys = vec![pk.clone()];
must_kv_commit(&client, ctx.clone(), keys, start_ts, commit_ts, commit_ts);
let key = Key::from_raw(b"k1").append_ts(start_ts.into());
let key = data_key(key.as_encoded());
assert!(engine.kv.get_value(&key).unwrap().is_some());
let key = Key::from_raw(b"k1").append_ts(commit_ts.into());
let key = data_key(key.as_encoded());
assert!(engine.kv.get_value_cf(CF_WRITE, &key).unwrap().is_some());
}
let gc_sched = cluster.sim.rl().get_gc_worker(1).scheduler();
assert!(sync_gc(&gc_sched, 0, b"k1".to_vec(), b"k2".to_vec(), 200.into()).is_ok());
for &start_ts in &[10, 20, 30] {
let commit_ts = start_ts + 5;
let key = Key::from_raw(b"k1").append_ts(start_ts.into());
let key = data_key(key.as_encoded());
assert!(engine.kv.get_value(&key).unwrap().is_none());
let key = Key::from_raw(b"k1").append_ts(commit_ts.into());
let key = data_key(key.as_encoded());
assert!(engine.kv.get_value_cf(CF_WRITE, &key).unwrap().is_none());
}
}
|
{
panic!("failed to read on {:?}: {:?}", p, resp);
}
|
conditional_block
|
gc_worker.rs
|
// Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
use collections::HashMap;
use engine_traits::{Peekable, CF_WRITE};
use grpcio::{ChannelBuilder, Environment};
use keys::data_key;
use kvproto::{kvrpcpb::*, metapb, tikvpb::TikvClient};
use std::sync::Arc;
use test_raftstore::*;
use tikv::server::gc_worker::sync_gc;
use tikv_util::HandyRwLock;
use txn_types::Key;
#[test]
fn test_physical_scan_lock() {
let (_cluster, client, ctx) = must_new_cluster_and_kv_client();
// Generate kvs like k10, v10, ts=10; k11, v11, ts=11;...
let kv: Vec<_> = (10..20)
.map(|i| (i, vec![b'k', i as u8], vec![b'v', i as u8]))
.collect();
for (ts, k, v) in &kv {
let mut mutation = Mutation::default();
mutation.set_op(Op::Put);
mutation.set_key(k.clone());
mutation.set_value(v.clone());
must_kv_prewrite(&client, ctx.clone(), vec![mutation], k.clone(), *ts);
}
let all_locks: Vec<_> = kv
.into_iter()
.map(|(ts, k, _)| {
// Create a LockInfo that matches the prewrite request in `must_kv_prewrite`.
let mut lock_info = LockInfo::default();
lock_info.set_primary_lock(k.clone());
lock_info.set_lock_version(ts);
lock_info.set_key(k);
lock_info.set_lock_ttl(3000);
lock_info.set_lock_type(Op::Put);
lock_info.set_min_commit_ts(ts + 1);
lock_info
})
.collect();
let check_result = |got_locks: &[_], expected_locks: &[_]| {
for i in 0..std::cmp::max(got_locks.len(), expected_locks.len()) {
assert_eq!(got_locks[i], expected_locks[i], "lock {} mismatch", i);
}
};
check_result(
&must_physical_scan_lock(&client, ctx.clone(), 30, b"", 100),
&all_locks,
);
check_result(
&must_physical_scan_lock(&client, ctx.clone(), 15, b"", 100),
&all_locks[0..=5],
);
check_result(
&must_physical_scan_lock(&client, ctx.clone(), 10, b"", 100),
&all_locks[0..1],
);
check_result(
&must_physical_scan_lock(&client, ctx.clone(), 9, b"", 100),
&[],
);
check_result(
&must_physical_scan_lock(&client, ctx, 30, &[b'k', 13], 5),
&all_locks[3..8],
);
}
#[test]
fn test_applied_lock_collector() {
let mut cluster = new_server_cluster(0, 3);
cluster.pd_client.disable_default_operator();
cluster.run();
// Create all stores' clients.
let env = Arc::new(Environment::new(1));
let mut clients = HashMap::default();
for node_id in cluster.get_node_ids() {
let channel =
ChannelBuilder::new(Arc::clone(&env)).connect(&cluster.sim.rl().get_addr(node_id));
let client = TikvClient::new(channel);
clients.insert(node_id, client);
}
// Create the ctx of the first region.
let region = cluster.get_region(b"");
let region_id = region.get_id();
let leader_peer = cluster.leader_of_region(region_id).unwrap();
let leader_store_id = leader_peer.get_store_id();
let leader_client = clients.get(&leader_store_id).unwrap();
let mut ctx = Context::default();
ctx.set_region_id(region_id);
ctx.set_peer(leader_peer);
ctx.set_region_epoch(cluster.get_region_epoch(region_id));
// It's used to make sure all stores applies all logs.
let wait_for_apply = |cluster: &mut Cluster<_>, region: &metapb::Region| {
let cluster = &mut *cluster;
region.get_peers().iter().for_each(|p| {
let mut retry_times = 1;
loop {
let resp =
async_read_on_peer(cluster, p.clone(), region.clone(), b"key", true, true)
.recv()
.unwrap();
if!resp.get_header().has_error() {
return;
}
if retry_times >= 50 {
panic!("failed to read on {:?}: {:?}", p, resp);
}
retry_times += 1;
sleep_ms(20);
}
});
};
let check_lock = |lock: &LockInfo, k: &[u8], pk: &[u8], ts| {
assert_eq!(lock.get_key(), k);
assert_eq!(lock.get_primary_lock(), pk);
assert_eq!(lock.get_lock_version(), ts);
};
// Register lock observer at safe point 10000.
let mut safe_point = 10000;
clients.iter().for_each(|(_, c)| {
// Should report error when checking non-existent observer.
assert!(!check_lock_observer(c, safe_point).get_error().is_empty());
must_register_lock_observer(c, safe_point);
assert!(must_check_lock_observer(c, safe_point, true).is_empty());
});
// Lock observer should only collect values in lock CF.
let key = b"key0";
must_kv_prewrite(
&leader_client,
ctx.clone(),
vec![new_mutation(Op::Put, key, &b"v".repeat(1024))],
key.to_vec(),
1,
);
must_kv_commit(&leader_client, ctx.clone(), vec![key.to_vec()], 1, 2, 2);
wait_for_apply(&mut cluster, ®ion);
clients.iter().for_each(|(_, c)| {
let locks = must_check_lock_observer(c, safe_point, true);
assert_eq!(locks.len(), 1);
check_lock(&locks[0], key, key, 1);
});
// Lock observer shouldn't collect locks after the safe point.
must_kv_prewrite(
&leader_client,
ctx.clone(),
vec![new_mutation(Op::Put, key, b"v")],
key.to_vec(),
safe_point + 1,
);
wait_for_apply(&mut cluster, ®ion);
clients.iter().for_each(|(_, c)| {
let locks = must_check_lock_observer(c, safe_point, true);
assert_eq!(locks.len(), 1);
check_lock(&locks[0], key, key, 1);
});
// Write 999 locks whose timestamp is less than the safe point.
let mutations = (1..1000)
.map(|i| new_mutation(Op::Put, format!("key{}", i).as_bytes(), b"v"))
.collect();
must_kv_prewrite(&leader_client, ctx.clone(), mutations, b"key1".to_vec(), 10);
wait_for_apply(&mut cluster, ®ion);
clients.iter().for_each(|(_, c)| {
let locks = must_check_lock_observer(c, safe_point, true);
// Plus the first lock.
assert_eq!(locks.len(), 1000);
});
// Add a new store and register lock observer.
let store_id = cluster.add_new_engine();
let channel =
ChannelBuilder::new(Arc::clone(&env)).connect(&cluster.sim.rl().get_addr(store_id));
let client = TikvClient::new(channel);
must_register_lock_observer(&client, safe_point);
// Add a new peer. Lock observer should collect all locks from snapshot.
let peer = new_peer(store_id, store_id);
cluster.pd_client.must_add_peer(region_id, peer.clone());
cluster.pd_client.must_none_pending_peer(peer);
wait_for_apply(&mut cluster, ®ion);
let locks = must_check_lock_observer(&client, safe_point, true);
assert_eq!(locks.len(), 999);
// Should be dirty when collects too many locks.
let mutations = (1000..1100)
.map(|i| new_mutation(Op::Put, format!("key{}", i).as_bytes(), b"v"))
.collect();
must_kv_prewrite(
&leader_client,
ctx.clone(),
mutations,
b"key1000".to_vec(),
100,
);
wait_for_apply(&mut cluster, ®ion);
clients.insert(store_id, client);
clients.iter().for_each(|(_, c)| {
let resp = check_lock_observer(c, safe_point);
assert!(resp.get_error().is_empty(), "{:?}", resp.get_error());
assert!(!resp.get_is_clean());
// MAX_COLLECT_SIZE is 1024.
assert_eq!(resp.get_locks().len(), 1024);
});
// Reregister and check. It shouldn't clean up state.
clients.iter().for_each(|(_, c)| {
must_register_lock_observer(c, safe_point);
let resp = check_lock_observer(c, safe_point);
assert!(resp.get_error().is_empty(), "{:?}", resp.get_error());
assert!(!resp.get_is_clean());
// MAX_COLLECT_SIZE is 1024.
assert_eq!(resp.get_locks().len(), 1024);
});
// Register lock observer at a later safe point. Lock observer should reset its state.
safe_point += 1;
clients.iter().for_each(|(_, c)| {
must_register_lock_observer(c, safe_point);
assert!(must_check_lock_observer(c, safe_point, true).is_empty());
// Can't register observer with smaller max_ts.
assert!(
!register_lock_observer(&c, safe_point - 1)
.get_error()
.is_empty()
);
assert!(must_check_lock_observer(c, safe_point, true).is_empty());
});
let leader_client = clients.get(&leader_store_id).unwrap();
must_kv_prewrite(
&leader_client,
ctx,
vec![new_mutation(Op::Put, b"key1100", b"v")],
b"key1100".to_vec(),
safe_point,
);
wait_for_apply(&mut cluster, ®ion);
clients.iter().for_each(|(_, c)| {
// Should collect locks according to the new max ts.
let locks = must_check_lock_observer(c, safe_point, true);
assert_eq!(locks.len(), 1, "{:?}", locks);
// Shouldn't remove it with a wrong max ts.
assert!(
!remove_lock_observer(c, safe_point - 1)
.get_error()
.is_empty()
);
let locks = must_check_lock_observer(c, safe_point, true);
assert_eq!(locks.len(), 1, "{:?}", locks);
// Remove lock observers.
must_remove_lock_observer(c, safe_point);
assert!(!check_lock_observer(c, safe_point).get_error().is_empty());
});
}
// Since v5.0 GC bypasses Raft, which means GC scans/deletes records with `keys::DATA_PREFIX`.
// This case ensures it's performed correctly.
#[test]
fn test_gc_bypass_raft()
|
let key = Key::from_raw(b"k1").append_ts(start_ts.into());
let key = data_key(key.as_encoded());
assert!(engine.kv.get_value(&key).unwrap().is_some());
let key = Key::from_raw(b"k1").append_ts(commit_ts.into());
let key = data_key(key.as_encoded());
assert!(engine.kv.get_value_cf(CF_WRITE, &key).unwrap().is_some());
}
let gc_sched = cluster.sim.rl().get_gc_worker(1).scheduler();
assert!(sync_gc(&gc_sched, 0, b"k1".to_vec(), b"k2".to_vec(), 200.into()).is_ok());
for &start_ts in &[10, 20, 30] {
let commit_ts = start_ts + 5;
let key = Key::from_raw(b"k1").append_ts(start_ts.into());
let key = data_key(key.as_encoded());
assert!(engine.kv.get_value(&key).unwrap().is_none());
let key = Key::from_raw(b"k1").append_ts(commit_ts.into());
let key = data_key(key.as_encoded());
assert!(engine.kv.get_value_cf(CF_WRITE, &key).unwrap().is_none());
}
}
|
{
let (cluster, leader, ctx) = must_new_cluster_mul(1);
cluster.pd_client.disable_default_operator();
let env = Arc::new(Environment::new(1));
let leader_store = leader.get_store_id();
let channel = ChannelBuilder::new(env).connect(&cluster.sim.rl().get_addr(leader_store));
let client = TikvClient::new(channel);
let pk = b"k1".to_vec();
let value = vec![b'x'; 300];
let engine = cluster.engines.get(&leader_store).unwrap();
for &start_ts in &[10, 20, 30, 40] {
let commit_ts = start_ts + 5;
let muts = vec![new_mutation(Op::Put, b"k1", &value)];
must_kv_prewrite(&client, ctx.clone(), muts, pk.clone(), start_ts);
let keys = vec![pk.clone()];
must_kv_commit(&client, ctx.clone(), keys, start_ts, commit_ts, commit_ts);
|
identifier_body
|
gc_worker.rs
|
// Copyright 2020 TiKV Project Authors. Licensed under Apache-2.0.
use collections::HashMap;
use engine_traits::{Peekable, CF_WRITE};
use grpcio::{ChannelBuilder, Environment};
use keys::data_key;
use kvproto::{kvrpcpb::*, metapb, tikvpb::TikvClient};
use std::sync::Arc;
use test_raftstore::*;
use tikv::server::gc_worker::sync_gc;
use tikv_util::HandyRwLock;
use txn_types::Key;
#[test]
fn test_physical_scan_lock() {
let (_cluster, client, ctx) = must_new_cluster_and_kv_client();
// Generate kvs like k10, v10, ts=10; k11, v11, ts=11;...
let kv: Vec<_> = (10..20)
.map(|i| (i, vec![b'k', i as u8], vec![b'v', i as u8]))
.collect();
for (ts, k, v) in &kv {
let mut mutation = Mutation::default();
mutation.set_op(Op::Put);
mutation.set_key(k.clone());
mutation.set_value(v.clone());
must_kv_prewrite(&client, ctx.clone(), vec![mutation], k.clone(), *ts);
}
let all_locks: Vec<_> = kv
.into_iter()
.map(|(ts, k, _)| {
// Create a LockInfo that matches the prewrite request in `must_kv_prewrite`.
let mut lock_info = LockInfo::default();
lock_info.set_primary_lock(k.clone());
lock_info.set_lock_version(ts);
lock_info.set_key(k);
lock_info.set_lock_ttl(3000);
lock_info.set_lock_type(Op::Put);
lock_info.set_min_commit_ts(ts + 1);
lock_info
})
.collect();
let check_result = |got_locks: &[_], expected_locks: &[_]| {
for i in 0..std::cmp::max(got_locks.len(), expected_locks.len()) {
assert_eq!(got_locks[i], expected_locks[i], "lock {} mismatch", i);
}
};
check_result(
&must_physical_scan_lock(&client, ctx.clone(), 30, b"", 100),
&all_locks,
);
check_result(
&must_physical_scan_lock(&client, ctx.clone(), 15, b"", 100),
&all_locks[0..=5],
);
check_result(
&must_physical_scan_lock(&client, ctx.clone(), 10, b"", 100),
&all_locks[0..1],
);
check_result(
&must_physical_scan_lock(&client, ctx.clone(), 9, b"", 100),
&[],
);
check_result(
&must_physical_scan_lock(&client, ctx, 30, &[b'k', 13], 5),
&all_locks[3..8],
);
}
#[test]
fn
|
() {
let mut cluster = new_server_cluster(0, 3);
cluster.pd_client.disable_default_operator();
cluster.run();
// Create all stores' clients.
let env = Arc::new(Environment::new(1));
let mut clients = HashMap::default();
for node_id in cluster.get_node_ids() {
let channel =
ChannelBuilder::new(Arc::clone(&env)).connect(&cluster.sim.rl().get_addr(node_id));
let client = TikvClient::new(channel);
clients.insert(node_id, client);
}
// Create the ctx of the first region.
let region = cluster.get_region(b"");
let region_id = region.get_id();
let leader_peer = cluster.leader_of_region(region_id).unwrap();
let leader_store_id = leader_peer.get_store_id();
let leader_client = clients.get(&leader_store_id).unwrap();
let mut ctx = Context::default();
ctx.set_region_id(region_id);
ctx.set_peer(leader_peer);
ctx.set_region_epoch(cluster.get_region_epoch(region_id));
// It's used to make sure all stores applies all logs.
let wait_for_apply = |cluster: &mut Cluster<_>, region: &metapb::Region| {
let cluster = &mut *cluster;
region.get_peers().iter().for_each(|p| {
let mut retry_times = 1;
loop {
let resp =
async_read_on_peer(cluster, p.clone(), region.clone(), b"key", true, true)
.recv()
.unwrap();
if!resp.get_header().has_error() {
return;
}
if retry_times >= 50 {
panic!("failed to read on {:?}: {:?}", p, resp);
}
retry_times += 1;
sleep_ms(20);
}
});
};
let check_lock = |lock: &LockInfo, k: &[u8], pk: &[u8], ts| {
assert_eq!(lock.get_key(), k);
assert_eq!(lock.get_primary_lock(), pk);
assert_eq!(lock.get_lock_version(), ts);
};
// Register lock observer at safe point 10000.
let mut safe_point = 10000;
clients.iter().for_each(|(_, c)| {
// Should report error when checking non-existent observer.
assert!(!check_lock_observer(c, safe_point).get_error().is_empty());
must_register_lock_observer(c, safe_point);
assert!(must_check_lock_observer(c, safe_point, true).is_empty());
});
// Lock observer should only collect values in lock CF.
let key = b"key0";
must_kv_prewrite(
&leader_client,
ctx.clone(),
vec![new_mutation(Op::Put, key, &b"v".repeat(1024))],
key.to_vec(),
1,
);
must_kv_commit(&leader_client, ctx.clone(), vec![key.to_vec()], 1, 2, 2);
wait_for_apply(&mut cluster, ®ion);
clients.iter().for_each(|(_, c)| {
let locks = must_check_lock_observer(c, safe_point, true);
assert_eq!(locks.len(), 1);
check_lock(&locks[0], key, key, 1);
});
// Lock observer shouldn't collect locks after the safe point.
must_kv_prewrite(
&leader_client,
ctx.clone(),
vec![new_mutation(Op::Put, key, b"v")],
key.to_vec(),
safe_point + 1,
);
wait_for_apply(&mut cluster, ®ion);
clients.iter().for_each(|(_, c)| {
let locks = must_check_lock_observer(c, safe_point, true);
assert_eq!(locks.len(), 1);
check_lock(&locks[0], key, key, 1);
});
// Write 999 locks whose timestamp is less than the safe point.
let mutations = (1..1000)
.map(|i| new_mutation(Op::Put, format!("key{}", i).as_bytes(), b"v"))
.collect();
must_kv_prewrite(&leader_client, ctx.clone(), mutations, b"key1".to_vec(), 10);
wait_for_apply(&mut cluster, ®ion);
clients.iter().for_each(|(_, c)| {
let locks = must_check_lock_observer(c, safe_point, true);
// Plus the first lock.
assert_eq!(locks.len(), 1000);
});
// Add a new store and register lock observer.
let store_id = cluster.add_new_engine();
let channel =
ChannelBuilder::new(Arc::clone(&env)).connect(&cluster.sim.rl().get_addr(store_id));
let client = TikvClient::new(channel);
must_register_lock_observer(&client, safe_point);
// Add a new peer. Lock observer should collect all locks from snapshot.
let peer = new_peer(store_id, store_id);
cluster.pd_client.must_add_peer(region_id, peer.clone());
cluster.pd_client.must_none_pending_peer(peer);
wait_for_apply(&mut cluster, ®ion);
let locks = must_check_lock_observer(&client, safe_point, true);
assert_eq!(locks.len(), 999);
// Should be dirty when collects too many locks.
let mutations = (1000..1100)
.map(|i| new_mutation(Op::Put, format!("key{}", i).as_bytes(), b"v"))
.collect();
must_kv_prewrite(
&leader_client,
ctx.clone(),
mutations,
b"key1000".to_vec(),
100,
);
wait_for_apply(&mut cluster, ®ion);
clients.insert(store_id, client);
clients.iter().for_each(|(_, c)| {
let resp = check_lock_observer(c, safe_point);
assert!(resp.get_error().is_empty(), "{:?}", resp.get_error());
assert!(!resp.get_is_clean());
// MAX_COLLECT_SIZE is 1024.
assert_eq!(resp.get_locks().len(), 1024);
});
// Reregister and check. It shouldn't clean up state.
clients.iter().for_each(|(_, c)| {
must_register_lock_observer(c, safe_point);
let resp = check_lock_observer(c, safe_point);
assert!(resp.get_error().is_empty(), "{:?}", resp.get_error());
assert!(!resp.get_is_clean());
// MAX_COLLECT_SIZE is 1024.
assert_eq!(resp.get_locks().len(), 1024);
});
// Register lock observer at a later safe point. Lock observer should reset its state.
safe_point += 1;
clients.iter().for_each(|(_, c)| {
must_register_lock_observer(c, safe_point);
assert!(must_check_lock_observer(c, safe_point, true).is_empty());
// Can't register observer with smaller max_ts.
assert!(
!register_lock_observer(&c, safe_point - 1)
.get_error()
.is_empty()
);
assert!(must_check_lock_observer(c, safe_point, true).is_empty());
});
let leader_client = clients.get(&leader_store_id).unwrap();
must_kv_prewrite(
&leader_client,
ctx,
vec![new_mutation(Op::Put, b"key1100", b"v")],
b"key1100".to_vec(),
safe_point,
);
wait_for_apply(&mut cluster, ®ion);
clients.iter().for_each(|(_, c)| {
// Should collect locks according to the new max ts.
let locks = must_check_lock_observer(c, safe_point, true);
assert_eq!(locks.len(), 1, "{:?}", locks);
// Shouldn't remove it with a wrong max ts.
assert!(
!remove_lock_observer(c, safe_point - 1)
.get_error()
.is_empty()
);
let locks = must_check_lock_observer(c, safe_point, true);
assert_eq!(locks.len(), 1, "{:?}", locks);
// Remove lock observers.
must_remove_lock_observer(c, safe_point);
assert!(!check_lock_observer(c, safe_point).get_error().is_empty());
});
}
// Since v5.0 GC bypasses Raft, which means GC scans/deletes records with `keys::DATA_PREFIX`.
// This case ensures it's performed correctly.
#[test]
fn test_gc_bypass_raft() {
let (cluster, leader, ctx) = must_new_cluster_mul(1);
cluster.pd_client.disable_default_operator();
let env = Arc::new(Environment::new(1));
let leader_store = leader.get_store_id();
let channel = ChannelBuilder::new(env).connect(&cluster.sim.rl().get_addr(leader_store));
let client = TikvClient::new(channel);
let pk = b"k1".to_vec();
let value = vec![b'x'; 300];
let engine = cluster.engines.get(&leader_store).unwrap();
for &start_ts in &[10, 20, 30, 40] {
let commit_ts = start_ts + 5;
let muts = vec![new_mutation(Op::Put, b"k1", &value)];
must_kv_prewrite(&client, ctx.clone(), muts, pk.clone(), start_ts);
let keys = vec![pk.clone()];
must_kv_commit(&client, ctx.clone(), keys, start_ts, commit_ts, commit_ts);
let key = Key::from_raw(b"k1").append_ts(start_ts.into());
let key = data_key(key.as_encoded());
assert!(engine.kv.get_value(&key).unwrap().is_some());
let key = Key::from_raw(b"k1").append_ts(commit_ts.into());
let key = data_key(key.as_encoded());
assert!(engine.kv.get_value_cf(CF_WRITE, &key).unwrap().is_some());
}
let gc_sched = cluster.sim.rl().get_gc_worker(1).scheduler();
assert!(sync_gc(&gc_sched, 0, b"k1".to_vec(), b"k2".to_vec(), 200.into()).is_ok());
for &start_ts in &[10, 20, 30] {
let commit_ts = start_ts + 5;
let key = Key::from_raw(b"k1").append_ts(start_ts.into());
let key = data_key(key.as_encoded());
assert!(engine.kv.get_value(&key).unwrap().is_none());
let key = Key::from_raw(b"k1").append_ts(commit_ts.into());
let key = data_key(key.as_encoded());
assert!(engine.kv.get_value_cf(CF_WRITE, &key).unwrap().is_none());
}
}
|
test_applied_lock_collector
|
identifier_name
|
counter_button.rs
|
extern crate domafic;
use domafic::tags::{button, div, h1};
use domafic::listener::on;
// If rendering client-side with asm.js or WebAssembly:
#[cfg(target_os = "emscripten")]
use domafic::web_render::{run, JsIo};
#[cfg(target_os = "emscripten")]
use domafic::KeyIter;
type State = isize;
enum Msg {
Increment,
Decrement,
}
fn main()
|
)),
))
};
// If rendering server-side:
#[cfg(not(target_os = "emscripten"))]
println!("HTML: {}", render(&0));
// If rendering client-side with asm.js or WebAssembly:
#[cfg(target_os = "emscripten")]
run("body", update, render, 0);
}
|
{
#[cfg(target_os = "emscripten")]
let update = |state: &mut State, msg: Msg, _: KeyIter, _: &JsIo<Msg>| {
*state = match msg {
Msg::Increment => *state + 1,
Msg::Decrement => *state - 1,
}
};
let render = |state: &State| {
div ((
h1("Hello from rust!"),
button ((
on("click", |_| Msg::Decrement),
"-",
)),
state.to_string(),
button ((
on("click", |_| Msg::Increment),
"+",
|
identifier_body
|
counter_button.rs
|
extern crate domafic;
use domafic::tags::{button, div, h1};
use domafic::listener::on;
// If rendering client-side with asm.js or WebAssembly:
#[cfg(target_os = "emscripten")]
use domafic::web_render::{run, JsIo};
#[cfg(target_os = "emscripten")]
use domafic::KeyIter;
type State = isize;
enum Msg {
Increment,
Decrement,
}
fn
|
() {
#[cfg(target_os = "emscripten")]
let update = |state: &mut State, msg: Msg, _: KeyIter, _: &JsIo<Msg>| {
*state = match msg {
Msg::Increment => *state + 1,
Msg::Decrement => *state - 1,
}
};
let render = |state: &State| {
div ((
h1("Hello from rust!"),
button ((
on("click", |_| Msg::Decrement),
"-",
)),
state.to_string(),
button ((
on("click", |_| Msg::Increment),
"+",
)),
))
};
// If rendering server-side:
#[cfg(not(target_os = "emscripten"))]
println!("HTML: {}", render(&0));
// If rendering client-side with asm.js or WebAssembly:
#[cfg(target_os = "emscripten")]
run("body", update, render, 0);
}
|
main
|
identifier_name
|
counter_button.rs
|
extern crate domafic;
use domafic::tags::{button, div, h1};
use domafic::listener::on;
// If rendering client-side with asm.js or WebAssembly:
#[cfg(target_os = "emscripten")]
use domafic::web_render::{run, JsIo};
#[cfg(target_os = "emscripten")]
use domafic::KeyIter;
type State = isize;
enum Msg {
Increment,
Decrement,
}
|
let update = |state: &mut State, msg: Msg, _: KeyIter, _: &JsIo<Msg>| {
*state = match msg {
Msg::Increment => *state + 1,
Msg::Decrement => *state - 1,
}
};
let render = |state: &State| {
div ((
h1("Hello from rust!"),
button ((
on("click", |_| Msg::Decrement),
"-",
)),
state.to_string(),
button ((
on("click", |_| Msg::Increment),
"+",
)),
))
};
// If rendering server-side:
#[cfg(not(target_os = "emscripten"))]
println!("HTML: {}", render(&0));
// If rendering client-side with asm.js or WebAssembly:
#[cfg(target_os = "emscripten")]
run("body", update, render, 0);
}
|
fn main() {
#[cfg(target_os = "emscripten")]
|
random_line_split
|
b58.rs
|
// Rust Bitcoin Library
// Written in 2014 by
// Andrew Poelstra <[email protected]>
//
// To the extent possible under law, the author(s) have dedicated all
// copyright and related and neighboring rights to this software to
// the public domain worldwide. This software is distributed without
// any warranty.
//
// You should have received a copy of the CC0 Public Domain Dedication
// along with this software.
// If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
//
//! Base58 encoder and decoder
use std::{error, fmt, str};
use byteorder::{ByteOrder, LittleEndian};
use util::hash::DoubleSha256;
use address::Error;
static BASE58_CHARS: &'static [u8] = b"123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz";
static BASE58_DIGITS: [Option<u8>; 128] = [
None, None, None, None, None, None, None, None, // 0-7
None, None, None, None, None, None, None, None, // 8-15
None, None, None, None, None, None, None, None, // 16-23
None, None, None, None, None, None, None, None, // 24-31
None, None, None, None, None, None, None, None, // 32-39
None, None, None, None, None, None, None, None, // 40-47
None, Some(0), Some(1), Some(2), Some(3), Some(4), Some(5), Some(6), // 48-55
Some(7), Some(8), None, None, None, None, None, None, // 56-63
None, Some(9), Some(10), Some(11), Some(12), Some(13), Some(14), Some(15), // 64-71
Some(16), None, Some(17), Some(18), Some(19), Some(20), Some(21), None, // 72-79
Some(22), Some(23), Some(24), Some(25), Some(26), Some(27), Some(28), Some(29), // 80-87
Some(30), Some(31), Some(32), None, None, None, None, None, // 88-95
None, Some(33), Some(34), Some(35), Some(36), Some(37), Some(38), Some(39), // 96-103
Some(40), Some(41), Some(42), Some(43), None, Some(44), Some(45), Some(46), // 104-111
Some(47), Some(48), Some(49), Some(50), Some(51), Some(52), Some(53), Some(54), // 112-119
Some(55), Some(56), Some(57), None, None, None, None, None, // 120-127
];
/// Decode base58-encoded string into a byte vector
pub fn from(data: &str) -> Result<Vec<u8>, Error> {
// 11/15 is just over log_256(58)
let mut scratch = vec![0u8; 1 + data.len() * 11 / 15];
// Build in base 256
for d58 in data.bytes() {
// Compute "X = X * 58 + next_digit" in base 256
if d58 as usize > BASE58_DIGITS.len() {
return Err(Error::BadByte(d58));
}
let mut carry = match BASE58_DIGITS[d58 as usize] {
Some(d58) => d58 as u32,
None => { return Err(Error::BadByte(d58)); }
};
for d256 in scratch.iter_mut().rev() {
carry += *d256 as u32 * 58;
*d256 = carry as u8;
carry /= 256;
}
assert_eq!(carry, 0);
}
// Copy leading zeroes directly
let mut ret: Vec<u8> = data.bytes().take_while(|&x| x == BASE58_CHARS[0])
.map(|_| 0)
.collect();
// Copy rest of string
ret.extend(scratch.into_iter().skip_while(|&x| x == 0));
Ok(ret)
}
/// Decode a base58check-encoded string
pub fn from_check(data: &str) -> Result<Vec<u8>, Error> {
let mut ret: Vec<u8> = from(data)?;
if ret.len() < 4 {
return Err(Error::TooShort(ret.len()));
}
let ck_start = ret.len() - 4;
let expected = DoubleSha256::from_data(&ret[..ck_start]).into_le().low_u32();
let actual = LittleEndian::read_u32(&ret[ck_start..(ck_start + 4)]);
if expected!= actual {
return Err(Error::BadChecksum(expected, actual));
}
ret.truncate(ck_start);
Ok(ret)
}
fn encode_iter_utf8<I>(data: I) -> Vec<u8>
where
I: Iterator<Item = u8> + Clone,
{
let (len, _) = data.size_hint();
// 7/5 is just over log_58(256)
let mut ret = Vec::with_capacity(1 + len * 7 / 5);
let mut leading_zero_count = 0;
let mut leading_zeroes = true;
// Build string in little endian with 0-58 in place of characters...
for d256 in data {
let mut carry = d256 as usize;
if leading_zeroes && carry == 0 {
leading_zero_count += 1;
} else {
leading_zeroes = false;
}
for ch in ret.iter_mut() {
let new_ch = *ch as usize * 256 + carry;
*ch = (new_ch % 58) as u8;
carry = new_ch / 58;
}
while carry > 0 {
ret.push((carry % 58) as u8);
carry /= 58;
}
}
//... then reverse it and convert to chars
for _ in 0..leading_zero_count {
ret.push(0);
}
ret.reverse();
for ch in ret.iter_mut() {
*ch = BASE58_CHARS[*ch as usize];
}
ret
}
fn encode_iter<I>(data: I) -> String
where
I: Iterator<Item = u8> + Clone,
{
let ret = encode_iter_utf8(data);
String::from_utf8(ret).unwrap()
}
/// Directly encode a slice as base58 into a `Formatter`.
fn encode_iter_to_fmt<I>(fmt: &mut fmt::Formatter, data: I) -> fmt::Result
where
I: Iterator<Item = u8> + Clone,
{
let ret = encode_iter_utf8(data);
fmt.write_str(str::from_utf8(&ret).unwrap())
}
/// Directly encode a slice as base58
pub fn encode_slice(data: &[u8]) -> String {
encode_iter(data.iter().cloned())
}
/// Obtain a string with the base58check encoding of a slice
/// (Tack the first 4 256-digits of the object's Bitcoin hash onto the end.)
pub fn check_encode_slice(data: &[u8]) -> String
|
/// Obtain a string with the base58check encoding of a slice
/// (Tack the first 4 256-digits of the object's Bitcoin hash onto the end.)
pub fn check_encode_slice_to_fmt(fmt: &mut fmt::Formatter, data: &[u8]) -> fmt::Result {
let checksum = DoubleSha256::from_data(&data);
let iter = data.iter()
.cloned()
.chain(checksum[0..4].iter().cloned());
encode_iter_to_fmt(fmt, iter)
}
#[cfg(test)]
mod tests {
use super::*;
use util::hash::hex_bytes as hex_decode;
#[test]
fn test_base58_encode() {
// Basics
assert_eq!(&encode_slice(&[0][..]), "1");
assert_eq!(&encode_slice(&[1][..]), "2");
assert_eq!(&encode_slice(&[58][..]), "21");
assert_eq!(&encode_slice(&[13, 36][..]), "211");
// Leading zeroes
assert_eq!(&encode_slice(&[0, 13, 36][..]), "1211");
assert_eq!(&encode_slice(&[0, 0, 0, 0, 13, 36][..]), "1111211");
// Addresses
let addr = hex_decode("00f8917303bfa8ef24f292e8fa1419b20460ba064d").unwrap();
assert_eq!(&check_encode_slice(&addr[..]), "1PfJpZsjreyVrqeoAfabrRwwjQyoSQMmHH");
}
#[test]
fn test_base58_decode() {
// Basics
assert_eq!(from("1").ok(), Some(vec![0u8]));
assert_eq!(from("2").ok(), Some(vec![1u8]));
assert_eq!(from("21").ok(), Some(vec![58u8]));
assert_eq!(from("211").ok(), Some(vec![13u8, 36]));
// Leading zeroes
assert_eq!(from("1211").ok(), Some(vec![0u8, 13, 36]));
assert_eq!(from("111211").ok(), Some(vec![0u8, 0, 0, 13, 36]));
// Addresses
assert_eq!(from_check("1PfJpZsjreyVrqeoAfabrRwwjQyoSQMmHH").ok(),
Some(hex_decode("00f8917303bfa8ef24f292e8fa1419b20460ba064d").unwrap()))
}
#[test]
fn test_base58_roundtrip() {
let s = "xprv9wTYmMFdV23N2TdNG573QoEsfRrWKQgWeibmLntzniatZvR9BmLnvSxqu53Kw1UmYPxLgboyZQaXwTCg8MSY3H2EU4pWcQDnRnrVA1xe8fs";
let v: Vec<u8> = from_check(s).unwrap();
assert_eq!(check_encode_slice(&v[..]), s);
assert_eq!(from_check(&check_encode_slice(&v[..])).ok(), Some(v));
}
}
|
{
let checksum = DoubleSha256::from_data(&data);
encode_iter(
data.iter()
.cloned()
.chain(checksum[0..4].iter().cloned())
)
}
|
identifier_body
|
b58.rs
|
// Rust Bitcoin Library
// Written in 2014 by
// Andrew Poelstra <[email protected]>
//
// To the extent possible under law, the author(s) have dedicated all
// copyright and related and neighboring rights to this software to
// the public domain worldwide. This software is distributed without
// any warranty.
//
// You should have received a copy of the CC0 Public Domain Dedication
// along with this software.
// If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
//
//! Base58 encoder and decoder
use std::{error, fmt, str};
use byteorder::{ByteOrder, LittleEndian};
use util::hash::DoubleSha256;
use address::Error;
static BASE58_CHARS: &'static [u8] = b"123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz";
static BASE58_DIGITS: [Option<u8>; 128] = [
None, None, None, None, None, None, None, None, // 0-7
None, None, None, None, None, None, None, None, // 8-15
None, None, None, None, None, None, None, None, // 16-23
None, None, None, None, None, None, None, None, // 24-31
None, None, None, None, None, None, None, None, // 32-39
None, None, None, None, None, None, None, None, // 40-47
None, Some(0), Some(1), Some(2), Some(3), Some(4), Some(5), Some(6), // 48-55
Some(7), Some(8), None, None, None, None, None, None, // 56-63
None, Some(9), Some(10), Some(11), Some(12), Some(13), Some(14), Some(15), // 64-71
Some(16), None, Some(17), Some(18), Some(19), Some(20), Some(21), None, // 72-79
Some(22), Some(23), Some(24), Some(25), Some(26), Some(27), Some(28), Some(29), // 80-87
Some(30), Some(31), Some(32), None, None, None, None, None, // 88-95
None, Some(33), Some(34), Some(35), Some(36), Some(37), Some(38), Some(39), // 96-103
Some(40), Some(41), Some(42), Some(43), None, Some(44), Some(45), Some(46), // 104-111
Some(47), Some(48), Some(49), Some(50), Some(51), Some(52), Some(53), Some(54), // 112-119
Some(55), Some(56), Some(57), None, None, None, None, None, // 120-127
];
/// Decode base58-encoded string into a byte vector
pub fn from(data: &str) -> Result<Vec<u8>, Error> {
// 11/15 is just over log_256(58)
let mut scratch = vec![0u8; 1 + data.len() * 11 / 15];
// Build in base 256
for d58 in data.bytes() {
// Compute "X = X * 58 + next_digit" in base 256
if d58 as usize > BASE58_DIGITS.len() {
return Err(Error::BadByte(d58));
}
let mut carry = match BASE58_DIGITS[d58 as usize] {
Some(d58) => d58 as u32,
None => { return Err(Error::BadByte(d58)); }
};
for d256 in scratch.iter_mut().rev() {
carry += *d256 as u32 * 58;
*d256 = carry as u8;
carry /= 256;
}
assert_eq!(carry, 0);
}
// Copy leading zeroes directly
let mut ret: Vec<u8> = data.bytes().take_while(|&x| x == BASE58_CHARS[0])
.map(|_| 0)
.collect();
// Copy rest of string
ret.extend(scratch.into_iter().skip_while(|&x| x == 0));
Ok(ret)
}
/// Decode a base58check-encoded string
pub fn from_check(data: &str) -> Result<Vec<u8>, Error> {
let mut ret: Vec<u8> = from(data)?;
if ret.len() < 4
|
let ck_start = ret.len() - 4;
let expected = DoubleSha256::from_data(&ret[..ck_start]).into_le().low_u32();
let actual = LittleEndian::read_u32(&ret[ck_start..(ck_start + 4)]);
if expected!= actual {
return Err(Error::BadChecksum(expected, actual));
}
ret.truncate(ck_start);
Ok(ret)
}
fn encode_iter_utf8<I>(data: I) -> Vec<u8>
where
I: Iterator<Item = u8> + Clone,
{
let (len, _) = data.size_hint();
// 7/5 is just over log_58(256)
let mut ret = Vec::with_capacity(1 + len * 7 / 5);
let mut leading_zero_count = 0;
let mut leading_zeroes = true;
// Build string in little endian with 0-58 in place of characters...
for d256 in data {
let mut carry = d256 as usize;
if leading_zeroes && carry == 0 {
leading_zero_count += 1;
} else {
leading_zeroes = false;
}
for ch in ret.iter_mut() {
let new_ch = *ch as usize * 256 + carry;
*ch = (new_ch % 58) as u8;
carry = new_ch / 58;
}
while carry > 0 {
ret.push((carry % 58) as u8);
carry /= 58;
}
}
//... then reverse it and convert to chars
for _ in 0..leading_zero_count {
ret.push(0);
}
ret.reverse();
for ch in ret.iter_mut() {
*ch = BASE58_CHARS[*ch as usize];
}
ret
}
fn encode_iter<I>(data: I) -> String
where
I: Iterator<Item = u8> + Clone,
{
let ret = encode_iter_utf8(data);
String::from_utf8(ret).unwrap()
}
/// Directly encode a slice as base58 into a `Formatter`.
fn encode_iter_to_fmt<I>(fmt: &mut fmt::Formatter, data: I) -> fmt::Result
where
I: Iterator<Item = u8> + Clone,
{
let ret = encode_iter_utf8(data);
fmt.write_str(str::from_utf8(&ret).unwrap())
}
/// Directly encode a slice as base58
pub fn encode_slice(data: &[u8]) -> String {
encode_iter(data.iter().cloned())
}
/// Obtain a string with the base58check encoding of a slice
/// (Tack the first 4 256-digits of the object's Bitcoin hash onto the end.)
pub fn check_encode_slice(data: &[u8]) -> String {
let checksum = DoubleSha256::from_data(&data);
encode_iter(
data.iter()
.cloned()
.chain(checksum[0..4].iter().cloned())
)
}
/// Obtain a string with the base58check encoding of a slice
/// (Tack the first 4 256-digits of the object's Bitcoin hash onto the end.)
pub fn check_encode_slice_to_fmt(fmt: &mut fmt::Formatter, data: &[u8]) -> fmt::Result {
let checksum = DoubleSha256::from_data(&data);
let iter = data.iter()
.cloned()
.chain(checksum[0..4].iter().cloned());
encode_iter_to_fmt(fmt, iter)
}
#[cfg(test)]
mod tests {
use super::*;
use util::hash::hex_bytes as hex_decode;
#[test]
fn test_base58_encode() {
// Basics
assert_eq!(&encode_slice(&[0][..]), "1");
assert_eq!(&encode_slice(&[1][..]), "2");
assert_eq!(&encode_slice(&[58][..]), "21");
assert_eq!(&encode_slice(&[13, 36][..]), "211");
// Leading zeroes
assert_eq!(&encode_slice(&[0, 13, 36][..]), "1211");
assert_eq!(&encode_slice(&[0, 0, 0, 0, 13, 36][..]), "1111211");
// Addresses
let addr = hex_decode("00f8917303bfa8ef24f292e8fa1419b20460ba064d").unwrap();
assert_eq!(&check_encode_slice(&addr[..]), "1PfJpZsjreyVrqeoAfabrRwwjQyoSQMmHH");
}
#[test]
fn test_base58_decode() {
// Basics
assert_eq!(from("1").ok(), Some(vec![0u8]));
assert_eq!(from("2").ok(), Some(vec![1u8]));
assert_eq!(from("21").ok(), Some(vec![58u8]));
assert_eq!(from("211").ok(), Some(vec![13u8, 36]));
// Leading zeroes
assert_eq!(from("1211").ok(), Some(vec![0u8, 13, 36]));
assert_eq!(from("111211").ok(), Some(vec![0u8, 0, 0, 13, 36]));
// Addresses
assert_eq!(from_check("1PfJpZsjreyVrqeoAfabrRwwjQyoSQMmHH").ok(),
Some(hex_decode("00f8917303bfa8ef24f292e8fa1419b20460ba064d").unwrap()))
}
#[test]
fn test_base58_roundtrip() {
let s = "xprv9wTYmMFdV23N2TdNG573QoEsfRrWKQgWeibmLntzniatZvR9BmLnvSxqu53Kw1UmYPxLgboyZQaXwTCg8MSY3H2EU4pWcQDnRnrVA1xe8fs";
let v: Vec<u8> = from_check(s).unwrap();
assert_eq!(check_encode_slice(&v[..]), s);
assert_eq!(from_check(&check_encode_slice(&v[..])).ok(), Some(v));
}
}
|
{
return Err(Error::TooShort(ret.len()));
}
|
conditional_block
|
b58.rs
|
// Rust Bitcoin Library
// Written in 2014 by
// Andrew Poelstra <[email protected]>
//
// To the extent possible under law, the author(s) have dedicated all
// copyright and related and neighboring rights to this software to
// the public domain worldwide. This software is distributed without
// any warranty.
//
// You should have received a copy of the CC0 Public Domain Dedication
// along with this software.
// If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
//
//! Base58 encoder and decoder
use std::{error, fmt, str};
use byteorder::{ByteOrder, LittleEndian};
use util::hash::DoubleSha256;
use address::Error;
static BASE58_CHARS: &'static [u8] = b"123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz";
static BASE58_DIGITS: [Option<u8>; 128] = [
None, None, None, None, None, None, None, None, // 0-7
None, None, None, None, None, None, None, None, // 8-15
None, None, None, None, None, None, None, None, // 16-23
None, None, None, None, None, None, None, None, // 24-31
None, None, None, None, None, None, None, None, // 32-39
None, None, None, None, None, None, None, None, // 40-47
None, Some(0), Some(1), Some(2), Some(3), Some(4), Some(5), Some(6), // 48-55
Some(7), Some(8), None, None, None, None, None, None, // 56-63
None, Some(9), Some(10), Some(11), Some(12), Some(13), Some(14), Some(15), // 64-71
Some(16), None, Some(17), Some(18), Some(19), Some(20), Some(21), None, // 72-79
Some(22), Some(23), Some(24), Some(25), Some(26), Some(27), Some(28), Some(29), // 80-87
Some(30), Some(31), Some(32), None, None, None, None, None, // 88-95
None, Some(33), Some(34), Some(35), Some(36), Some(37), Some(38), Some(39), // 96-103
Some(40), Some(41), Some(42), Some(43), None, Some(44), Some(45), Some(46), // 104-111
Some(47), Some(48), Some(49), Some(50), Some(51), Some(52), Some(53), Some(54), // 112-119
Some(55), Some(56), Some(57), None, None, None, None, None, // 120-127
];
/// Decode base58-encoded string into a byte vector
pub fn from(data: &str) -> Result<Vec<u8>, Error> {
// 11/15 is just over log_256(58)
let mut scratch = vec![0u8; 1 + data.len() * 11 / 15];
// Build in base 256
for d58 in data.bytes() {
// Compute "X = X * 58 + next_digit" in base 256
if d58 as usize > BASE58_DIGITS.len() {
return Err(Error::BadByte(d58));
}
let mut carry = match BASE58_DIGITS[d58 as usize] {
Some(d58) => d58 as u32,
None => { return Err(Error::BadByte(d58)); }
};
for d256 in scratch.iter_mut().rev() {
carry += *d256 as u32 * 58;
*d256 = carry as u8;
carry /= 256;
}
assert_eq!(carry, 0);
}
// Copy leading zeroes directly
let mut ret: Vec<u8> = data.bytes().take_while(|&x| x == BASE58_CHARS[0])
.map(|_| 0)
.collect();
// Copy rest of string
ret.extend(scratch.into_iter().skip_while(|&x| x == 0));
Ok(ret)
}
/// Decode a base58check-encoded string
pub fn from_check(data: &str) -> Result<Vec<u8>, Error> {
let mut ret: Vec<u8> = from(data)?;
if ret.len() < 4 {
return Err(Error::TooShort(ret.len()));
}
let ck_start = ret.len() - 4;
let expected = DoubleSha256::from_data(&ret[..ck_start]).into_le().low_u32();
let actual = LittleEndian::read_u32(&ret[ck_start..(ck_start + 4)]);
if expected!= actual {
return Err(Error::BadChecksum(expected, actual));
}
ret.truncate(ck_start);
Ok(ret)
}
fn encode_iter_utf8<I>(data: I) -> Vec<u8>
where
I: Iterator<Item = u8> + Clone,
{
let (len, _) = data.size_hint();
// 7/5 is just over log_58(256)
let mut ret = Vec::with_capacity(1 + len * 7 / 5);
let mut leading_zero_count = 0;
let mut leading_zeroes = true;
// Build string in little endian with 0-58 in place of characters...
for d256 in data {
let mut carry = d256 as usize;
if leading_zeroes && carry == 0 {
leading_zero_count += 1;
} else {
leading_zeroes = false;
}
for ch in ret.iter_mut() {
let new_ch = *ch as usize * 256 + carry;
*ch = (new_ch % 58) as u8;
carry = new_ch / 58;
}
while carry > 0 {
ret.push((carry % 58) as u8);
carry /= 58;
}
}
//... then reverse it and convert to chars
for _ in 0..leading_zero_count {
ret.push(0);
}
ret.reverse();
for ch in ret.iter_mut() {
*ch = BASE58_CHARS[*ch as usize];
}
ret
}
fn encode_iter<I>(data: I) -> String
where
I: Iterator<Item = u8> + Clone,
{
let ret = encode_iter_utf8(data);
String::from_utf8(ret).unwrap()
}
/// Directly encode a slice as base58 into a `Formatter`.
fn encode_iter_to_fmt<I>(fmt: &mut fmt::Formatter, data: I) -> fmt::Result
where
I: Iterator<Item = u8> + Clone,
{
let ret = encode_iter_utf8(data);
fmt.write_str(str::from_utf8(&ret).unwrap())
}
/// Directly encode a slice as base58
pub fn encode_slice(data: &[u8]) -> String {
encode_iter(data.iter().cloned())
}
/// Obtain a string with the base58check encoding of a slice
/// (Tack the first 4 256-digits of the object's Bitcoin hash onto the end.)
pub fn check_encode_slice(data: &[u8]) -> String {
let checksum = DoubleSha256::from_data(&data);
encode_iter(
data.iter()
.cloned()
.chain(checksum[0..4].iter().cloned())
)
}
/// Obtain a string with the base58check encoding of a slice
/// (Tack the first 4 256-digits of the object's Bitcoin hash onto the end.)
pub fn check_encode_slice_to_fmt(fmt: &mut fmt::Formatter, data: &[u8]) -> fmt::Result {
let checksum = DoubleSha256::from_data(&data);
let iter = data.iter()
.cloned()
.chain(checksum[0..4].iter().cloned());
encode_iter_to_fmt(fmt, iter)
}
#[cfg(test)]
mod tests {
use super::*;
use util::hash::hex_bytes as hex_decode;
#[test]
fn test_base58_encode() {
// Basics
assert_eq!(&encode_slice(&[0][..]), "1");
assert_eq!(&encode_slice(&[1][..]), "2");
assert_eq!(&encode_slice(&[58][..]), "21");
assert_eq!(&encode_slice(&[13, 36][..]), "211");
// Leading zeroes
assert_eq!(&encode_slice(&[0, 13, 36][..]), "1211");
assert_eq!(&encode_slice(&[0, 0, 0, 0, 13, 36][..]), "1111211");
// Addresses
let addr = hex_decode("00f8917303bfa8ef24f292e8fa1419b20460ba064d").unwrap();
assert_eq!(&check_encode_slice(&addr[..]), "1PfJpZsjreyVrqeoAfabrRwwjQyoSQMmHH");
}
#[test]
fn test_base58_decode() {
// Basics
assert_eq!(from("1").ok(), Some(vec![0u8]));
assert_eq!(from("2").ok(), Some(vec![1u8]));
assert_eq!(from("21").ok(), Some(vec![58u8]));
assert_eq!(from("211").ok(), Some(vec![13u8, 36]));
// Leading zeroes
|
assert_eq!(from_check("1PfJpZsjreyVrqeoAfabrRwwjQyoSQMmHH").ok(),
Some(hex_decode("00f8917303bfa8ef24f292e8fa1419b20460ba064d").unwrap()))
}
#[test]
fn test_base58_roundtrip() {
let s = "xprv9wTYmMFdV23N2TdNG573QoEsfRrWKQgWeibmLntzniatZvR9BmLnvSxqu53Kw1UmYPxLgboyZQaXwTCg8MSY3H2EU4pWcQDnRnrVA1xe8fs";
let v: Vec<u8> = from_check(s).unwrap();
assert_eq!(check_encode_slice(&v[..]), s);
assert_eq!(from_check(&check_encode_slice(&v[..])).ok(), Some(v));
}
}
|
assert_eq!(from("1211").ok(), Some(vec![0u8, 13, 36]));
assert_eq!(from("111211").ok(), Some(vec![0u8, 0, 0, 13, 36]));
// Addresses
|
random_line_split
|
b58.rs
|
// Rust Bitcoin Library
// Written in 2014 by
// Andrew Poelstra <[email protected]>
//
// To the extent possible under law, the author(s) have dedicated all
// copyright and related and neighboring rights to this software to
// the public domain worldwide. This software is distributed without
// any warranty.
//
// You should have received a copy of the CC0 Public Domain Dedication
// along with this software.
// If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
//
//! Base58 encoder and decoder
use std::{error, fmt, str};
use byteorder::{ByteOrder, LittleEndian};
use util::hash::DoubleSha256;
use address::Error;
static BASE58_CHARS: &'static [u8] = b"123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz";
static BASE58_DIGITS: [Option<u8>; 128] = [
None, None, None, None, None, None, None, None, // 0-7
None, None, None, None, None, None, None, None, // 8-15
None, None, None, None, None, None, None, None, // 16-23
None, None, None, None, None, None, None, None, // 24-31
None, None, None, None, None, None, None, None, // 32-39
None, None, None, None, None, None, None, None, // 40-47
None, Some(0), Some(1), Some(2), Some(3), Some(4), Some(5), Some(6), // 48-55
Some(7), Some(8), None, None, None, None, None, None, // 56-63
None, Some(9), Some(10), Some(11), Some(12), Some(13), Some(14), Some(15), // 64-71
Some(16), None, Some(17), Some(18), Some(19), Some(20), Some(21), None, // 72-79
Some(22), Some(23), Some(24), Some(25), Some(26), Some(27), Some(28), Some(29), // 80-87
Some(30), Some(31), Some(32), None, None, None, None, None, // 88-95
None, Some(33), Some(34), Some(35), Some(36), Some(37), Some(38), Some(39), // 96-103
Some(40), Some(41), Some(42), Some(43), None, Some(44), Some(45), Some(46), // 104-111
Some(47), Some(48), Some(49), Some(50), Some(51), Some(52), Some(53), Some(54), // 112-119
Some(55), Some(56), Some(57), None, None, None, None, None, // 120-127
];
/// Decode base58-encoded string into a byte vector
pub fn from(data: &str) -> Result<Vec<u8>, Error> {
// 11/15 is just over log_256(58)
let mut scratch = vec![0u8; 1 + data.len() * 11 / 15];
// Build in base 256
for d58 in data.bytes() {
// Compute "X = X * 58 + next_digit" in base 256
if d58 as usize > BASE58_DIGITS.len() {
return Err(Error::BadByte(d58));
}
let mut carry = match BASE58_DIGITS[d58 as usize] {
Some(d58) => d58 as u32,
None => { return Err(Error::BadByte(d58)); }
};
for d256 in scratch.iter_mut().rev() {
carry += *d256 as u32 * 58;
*d256 = carry as u8;
carry /= 256;
}
assert_eq!(carry, 0);
}
// Copy leading zeroes directly
let mut ret: Vec<u8> = data.bytes().take_while(|&x| x == BASE58_CHARS[0])
.map(|_| 0)
.collect();
// Copy rest of string
ret.extend(scratch.into_iter().skip_while(|&x| x == 0));
Ok(ret)
}
/// Decode a base58check-encoded string
pub fn from_check(data: &str) -> Result<Vec<u8>, Error> {
let mut ret: Vec<u8> = from(data)?;
if ret.len() < 4 {
return Err(Error::TooShort(ret.len()));
}
let ck_start = ret.len() - 4;
let expected = DoubleSha256::from_data(&ret[..ck_start]).into_le().low_u32();
let actual = LittleEndian::read_u32(&ret[ck_start..(ck_start + 4)]);
if expected!= actual {
return Err(Error::BadChecksum(expected, actual));
}
ret.truncate(ck_start);
Ok(ret)
}
fn encode_iter_utf8<I>(data: I) -> Vec<u8>
where
I: Iterator<Item = u8> + Clone,
{
let (len, _) = data.size_hint();
// 7/5 is just over log_58(256)
let mut ret = Vec::with_capacity(1 + len * 7 / 5);
let mut leading_zero_count = 0;
let mut leading_zeroes = true;
// Build string in little endian with 0-58 in place of characters...
for d256 in data {
let mut carry = d256 as usize;
if leading_zeroes && carry == 0 {
leading_zero_count += 1;
} else {
leading_zeroes = false;
}
for ch in ret.iter_mut() {
let new_ch = *ch as usize * 256 + carry;
*ch = (new_ch % 58) as u8;
carry = new_ch / 58;
}
while carry > 0 {
ret.push((carry % 58) as u8);
carry /= 58;
}
}
//... then reverse it and convert to chars
for _ in 0..leading_zero_count {
ret.push(0);
}
ret.reverse();
for ch in ret.iter_mut() {
*ch = BASE58_CHARS[*ch as usize];
}
ret
}
fn encode_iter<I>(data: I) -> String
where
I: Iterator<Item = u8> + Clone,
{
let ret = encode_iter_utf8(data);
String::from_utf8(ret).unwrap()
}
/// Directly encode a slice as base58 into a `Formatter`.
fn encode_iter_to_fmt<I>(fmt: &mut fmt::Formatter, data: I) -> fmt::Result
where
I: Iterator<Item = u8> + Clone,
{
let ret = encode_iter_utf8(data);
fmt.write_str(str::from_utf8(&ret).unwrap())
}
/// Directly encode a slice as base58
pub fn encode_slice(data: &[u8]) -> String {
encode_iter(data.iter().cloned())
}
/// Obtain a string with the base58check encoding of a slice
/// (Tack the first 4 256-digits of the object's Bitcoin hash onto the end.)
pub fn check_encode_slice(data: &[u8]) -> String {
let checksum = DoubleSha256::from_data(&data);
encode_iter(
data.iter()
.cloned()
.chain(checksum[0..4].iter().cloned())
)
}
/// Obtain a string with the base58check encoding of a slice
/// (Tack the first 4 256-digits of the object's Bitcoin hash onto the end.)
pub fn check_encode_slice_to_fmt(fmt: &mut fmt::Formatter, data: &[u8]) -> fmt::Result {
let checksum = DoubleSha256::from_data(&data);
let iter = data.iter()
.cloned()
.chain(checksum[0..4].iter().cloned());
encode_iter_to_fmt(fmt, iter)
}
#[cfg(test)]
mod tests {
use super::*;
use util::hash::hex_bytes as hex_decode;
#[test]
fn
|
() {
// Basics
assert_eq!(&encode_slice(&[0][..]), "1");
assert_eq!(&encode_slice(&[1][..]), "2");
assert_eq!(&encode_slice(&[58][..]), "21");
assert_eq!(&encode_slice(&[13, 36][..]), "211");
// Leading zeroes
assert_eq!(&encode_slice(&[0, 13, 36][..]), "1211");
assert_eq!(&encode_slice(&[0, 0, 0, 0, 13, 36][..]), "1111211");
// Addresses
let addr = hex_decode("00f8917303bfa8ef24f292e8fa1419b20460ba064d").unwrap();
assert_eq!(&check_encode_slice(&addr[..]), "1PfJpZsjreyVrqeoAfabrRwwjQyoSQMmHH");
}
#[test]
fn test_base58_decode() {
// Basics
assert_eq!(from("1").ok(), Some(vec![0u8]));
assert_eq!(from("2").ok(), Some(vec![1u8]));
assert_eq!(from("21").ok(), Some(vec![58u8]));
assert_eq!(from("211").ok(), Some(vec![13u8, 36]));
// Leading zeroes
assert_eq!(from("1211").ok(), Some(vec![0u8, 13, 36]));
assert_eq!(from("111211").ok(), Some(vec![0u8, 0, 0, 13, 36]));
// Addresses
assert_eq!(from_check("1PfJpZsjreyVrqeoAfabrRwwjQyoSQMmHH").ok(),
Some(hex_decode("00f8917303bfa8ef24f292e8fa1419b20460ba064d").unwrap()))
}
#[test]
fn test_base58_roundtrip() {
let s = "xprv9wTYmMFdV23N2TdNG573QoEsfRrWKQgWeibmLntzniatZvR9BmLnvSxqu53Kw1UmYPxLgboyZQaXwTCg8MSY3H2EU4pWcQDnRnrVA1xe8fs";
let v: Vec<u8> = from_check(s).unwrap();
assert_eq!(check_encode_slice(&v[..]), s);
assert_eq!(from_check(&check_encode_slice(&v[..])).ok(), Some(v));
}
}
|
test_base58_encode
|
identifier_name
|
lib.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! This module contains traits in script used generically in the rest of Servo.
//! The traits are here instead of in script so that these modules won't have
//! to depend on script.
#![deny(unsafe_code)]
#![feature(box_syntax)]
#![feature(custom_attribute)]
#![feature(custom_derive)]
#![feature(nonzero)]
#![feature(plugin)]
#![plugin(heapsize_plugin)]
#![plugin(plugins)]
extern crate app_units;
#[allow(unused_extern_crates)]
#[macro_use]
extern crate bitflags;
extern crate canvas_traits;
extern crate core;
extern crate cssparser;
extern crate euclid;
extern crate gfx_traits;
extern crate heapsize;
extern crate ipc_channel;
extern crate libc;
#[macro_use]
extern crate log;
extern crate msg;
extern crate net_traits;
extern crate profile_traits;
extern crate range;
extern crate script_traits;
extern crate selectors;
#[macro_use(atom, ns)]
extern crate string_cache;
extern crate style;
extern crate url;
pub mod message;
pub mod reporter;
pub mod restyle_damage;
pub mod rpc;
pub mod wrapper_traits;
use canvas_traits::CanvasMsg;
use core::nonzero::NonZero;
use ipc_channel::ipc::IpcSender;
use libc::c_void;
use restyle_damage::RestyleDamage;
use std::sync::atomic::AtomicIsize;
use style::atomic_refcell::AtomicRefCell;
use style::data::ElementData;
pub struct PartialPersistentLayoutData {
/// Data that the style system associates with a node. When the
/// style system is being used standalone, this is all that hangs
/// off the node. This must be first to permit the various
/// transmutations between ElementData and PersistentLayoutData.
pub style_data: ElementData,
/// Description of how to account for recent style changes.
pub restyle_damage: RestyleDamage,
/// Information needed during parallel traversals.
pub parallel: DomParallelInfo,
}
impl PartialPersistentLayoutData {
pub fn new() -> Self {
PartialPersistentLayoutData {
style_data: ElementData::new(),
restyle_damage: RestyleDamage::empty(),
parallel: DomParallelInfo::new(),
}
}
}
#[derive(Copy, Clone, HeapSizeOf)]
pub struct OpaqueStyleAndLayoutData {
#[ignore_heap_size_of = "TODO(#6910) Box value that should be counted but \
the type lives in layout"]
pub ptr: NonZero<*mut AtomicRefCell<PartialPersistentLayoutData>>
}
#[allow(unsafe_code)]
unsafe impl Send for OpaqueStyleAndLayoutData {}
/// Information that we need stored in each DOM node.
#[derive(HeapSizeOf)]
pub struct DomParallelInfo {
/// The number of children remaining to process during bottom-up traversal.
pub children_to_process: AtomicIsize,
}
impl DomParallelInfo {
pub fn new() -> DomParallelInfo {
DomParallelInfo {
children_to_process: AtomicIsize::new(0),
}
}
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum LayoutNodeType {
Element(LayoutElementType),
Text,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum LayoutElementType {
Element,
HTMLCanvasElement,
HTMLIFrameElement,
HTMLImageElement,
HTMLInputElement,
HTMLObjectElement,
HTMLTableCellElement,
HTMLTableColElement,
HTMLTableElement,
HTMLTableRowElement,
HTMLTableSectionElement,
HTMLTextAreaElement,
SVGSVGElement,
}
pub struct HTMLCanvasData {
pub ipc_renderer: Option<IpcSender<CanvasMsg>>,
pub width: u32,
pub height: u32,
}
pub struct
|
{
pub width: u32,
pub height: u32,
}
/// The address of a node known to be valid. These are sent from script to layout.
#[derive(Clone, PartialEq, Eq, Copy)]
pub struct TrustedNodeAddress(pub *const c_void);
#[allow(unsafe_code)]
unsafe impl Send for TrustedNodeAddress {}
pub fn is_image_data(uri: &str) -> bool {
static TYPES: &'static [&'static str] = &["data:image/png", "data:image/gif", "data:image/jpeg"];
TYPES.iter().any(|&type_| uri.starts_with(type_))
}
|
SVGSVGData
|
identifier_name
|
lib.rs
|
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! This module contains traits in script used generically in the rest of Servo.
//! The traits are here instead of in script so that these modules won't have
//! to depend on script.
#![deny(unsafe_code)]
#![feature(box_syntax)]
#![feature(custom_attribute)]
#![feature(custom_derive)]
#![feature(nonzero)]
#![feature(plugin)]
#![plugin(heapsize_plugin)]
#![plugin(plugins)]
extern crate app_units;
#[allow(unused_extern_crates)]
#[macro_use]
extern crate bitflags;
extern crate canvas_traits;
extern crate core;
extern crate cssparser;
extern crate euclid;
extern crate gfx_traits;
extern crate heapsize;
extern crate ipc_channel;
extern crate libc;
#[macro_use]
extern crate log;
extern crate msg;
extern crate net_traits;
extern crate profile_traits;
extern crate range;
extern crate script_traits;
extern crate selectors;
#[macro_use(atom, ns)]
extern crate string_cache;
extern crate style;
extern crate url;
pub mod message;
pub mod reporter;
pub mod restyle_damage;
pub mod rpc;
pub mod wrapper_traits;
use canvas_traits::CanvasMsg;
use core::nonzero::NonZero;
use ipc_channel::ipc::IpcSender;
use libc::c_void;
use restyle_damage::RestyleDamage;
use std::sync::atomic::AtomicIsize;
use style::atomic_refcell::AtomicRefCell;
use style::data::ElementData;
pub struct PartialPersistentLayoutData {
/// Data that the style system associates with a node. When the
/// style system is being used standalone, this is all that hangs
/// off the node. This must be first to permit the various
/// transmutations between ElementData and PersistentLayoutData.
pub style_data: ElementData,
/// Description of how to account for recent style changes.
pub restyle_damage: RestyleDamage,
/// Information needed during parallel traversals.
pub parallel: DomParallelInfo,
}
impl PartialPersistentLayoutData {
pub fn new() -> Self {
PartialPersistentLayoutData {
style_data: ElementData::new(),
restyle_damage: RestyleDamage::empty(),
parallel: DomParallelInfo::new(),
}
}
}
#[derive(Copy, Clone, HeapSizeOf)]
pub struct OpaqueStyleAndLayoutData {
#[ignore_heap_size_of = "TODO(#6910) Box value that should be counted but \
the type lives in layout"]
pub ptr: NonZero<*mut AtomicRefCell<PartialPersistentLayoutData>>
}
#[allow(unsafe_code)]
unsafe impl Send for OpaqueStyleAndLayoutData {}
/// Information that we need stored in each DOM node.
#[derive(HeapSizeOf)]
pub struct DomParallelInfo {
/// The number of children remaining to process during bottom-up traversal.
pub children_to_process: AtomicIsize,
}
impl DomParallelInfo {
pub fn new() -> DomParallelInfo {
DomParallelInfo {
children_to_process: AtomicIsize::new(0),
}
}
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum LayoutNodeType {
Element(LayoutElementType),
Text,
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum LayoutElementType {
Element,
HTMLCanvasElement,
HTMLIFrameElement,
HTMLImageElement,
HTMLInputElement,
HTMLObjectElement,
HTMLTableCellElement,
HTMLTableColElement,
HTMLTableElement,
HTMLTableRowElement,
HTMLTableSectionElement,
HTMLTextAreaElement,
SVGSVGElement,
}
pub struct HTMLCanvasData {
pub ipc_renderer: Option<IpcSender<CanvasMsg>>,
pub width: u32,
pub height: u32,
}
pub struct SVGSVGData {
|
#[derive(Clone, PartialEq, Eq, Copy)]
pub struct TrustedNodeAddress(pub *const c_void);
#[allow(unsafe_code)]
unsafe impl Send for TrustedNodeAddress {}
pub fn is_image_data(uri: &str) -> bool {
static TYPES: &'static [&'static str] = &["data:image/png", "data:image/gif", "data:image/jpeg"];
TYPES.iter().any(|&type_| uri.starts_with(type_))
}
|
pub width: u32,
pub height: u32,
}
/// The address of a node known to be valid. These are sent from script to layout.
|
random_line_split
|
blink_ops.rs
|
/* Copyright 2013 Leon Sixt
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use algorithm;
use node::{Node, Leaf, INode};
use blinktree::physical_node::{PhysicalNode, T_LEAF, T_INODE};
#[deriving(Clone)]
pub enum Movement {
Right,
Down,
}
pub trait BLinkOps<K: TotalOrd + ToStr,
V: ToStr,
Ptr: Clone + ToStr,
INODE: PhysicalNode<K,Ptr,Ptr>,
LEAF: PhysicalNode<K,V,Ptr>> {
fn move_right<'a>(&self, node: &'a Node<INODE,LEAF>, key: &K) -> Option<&'a Ptr> {
let can_contain = match node {
&Leaf(ref leaf) => self.can_contain_key(leaf, key),
&INode(ref inode) => self.can_contain_key(inode, key)
};
if! can_contain {
node.link_ptr()
} else {
None
}
}
fn get_value<'a>(&self, leaf: &'a LEAF, key: &K) -> Option<&'a V> {
if!self.can_contain_key(leaf,key) {
return None;
}
let idx = algorithm::bsearch_idx(leaf.keys().slice_from(0), key);
debug!("[get] ptr: {}, keys: {} values: {}, key: {}, idx: {}",
leaf.my_ptr().to_str(), leaf.keys().to_str(),
leaf.values().to_str(), key.to_str(), idx.to_str());
if leaf.keys()[idx].cmp(key) == Equal {
Some(&leaf.values()[idx])
} else {
None
|
}
let idx = algorithm::bsearch_idx(inode.keys().slice_from(0), key);
debug!("[get_ptr] key: {}, ptr: {}, keys: {} values: {}, idx: {}, is_most_right_node: {}, is_root: {}",
key.to_str(), inode.my_ptr().to_str(), inode.keys().to_str(),
inode.values().to_str(), idx.to_str(), inode.is_most_right_node(), inode.is_root());
Some(&inode.values()[idx])
}
fn scannode<'a>(&self, node: &'a Node<INODE,LEAF>, key: &K) -> Option<(&'a Ptr, Movement)> {
let can_contain = match node {
&Leaf(ref leaf) => self.can_contain_key(leaf, key),
&INode(ref inode) => self.can_contain_key(inode, key)
};
if(! can_contain) {
return node.link_ptr().map(|r| (r, Right));
}
match node {
&Leaf(*) => None,
&INode(ref inode) =>
self.get_ptr(inode, key).map(|r| (r, Down))
}
}
fn split_and_insert_leaf(&self, leaf: &mut LEAF, new_page: Ptr, key: K, value: V) -> LEAF {
let new_size = leaf.keys().len()/2;
self.insert_leaf(leaf, key, value);
let (keys_new, values_new) = leaf.split_at(new_size);
let link_ptr = leaf.set_link_ptr(new_page.clone());
PhysicalNode::new(T_LEAF, new_page, link_ptr, keys_new, values_new)
}
/// Default splitting strategy:
///
/// example max_size = 4:
/// split
/// |
/// |<= 3 <|<= 5 <|<= 10 <|<= 15 <|<= 30
/// . . . . .
///
fn split_and_insert_inode(&self, inode: &mut INODE, new_page: Ptr, key: K, value: Ptr) -> INODE {
let new_size = inode.keys().len()/2;
self.insert_inode(inode, key, value);
let (keys_new, values_new) = inode.split_at(new_size);
debug!("[split_and_insert_inode] keys.len: {}, value.len: {}", keys_new.to_str(), values_new.to_str());
let link_ptr = inode.set_link_ptr(new_page.clone());
PhysicalNode::new(T_INODE, new_page, link_ptr, keys_new, values_new)
}
fn insert_leaf(&self, leaf: &mut LEAF, key: K, value: V) {
let idx = algorithm::bsearch_idx(leaf.keys().slice_from(0), &key);
leaf.mut_keys().insert(idx, key);
leaf.mut_values().insert(idx, value);
}
fn insert_inode(&self, inode: &mut INODE, key: K, value: Ptr) {
let mut idx = algorithm::bsearch_idx(inode.keys().slice_from(0), &key);
inode.mut_keys().insert(idx, key);
//if (inode.is_root() || inode.is_most_right_node()) {
idx += 1;
//}
inode.mut_values().insert(idx, value);
}
fn can_contain_key<
K1: TotalOrd,
V1,
Ptr1,
N : PhysicalNode<K1,V1,Ptr1>>(&self, node: &N, key: &K1) -> bool {
node.is_root()
|| (node.is_most_right_node() && key.cmp(node.max_key()) == Greater)
|| (key.cmp(node.max_key()) == Less ||
key.cmp(node.max_key()) == Equal)
}
}
pub struct DefaultBLinkOps<K,V,Ptr, INODE, LEAF>;
impl <K: TotalOrd + ToStr,
V: ToStr,
Ptr: Clone + ToStr,
INODE: PhysicalNode<K,Ptr,Ptr>,
LEAF: PhysicalNode<K,V,Ptr>
>
BLinkOps<K,V,Ptr,INODE, LEAF> for DefaultBLinkOps<K,V,Ptr, INODE, LEAF> {}
#[cfg(test)]
mod test {
use super::{BLinkOps, DefaultBLinkOps};
use blinktree::physical_node::{PhysicalNode, DefaultBLinkNode, T_ROOT, T_LEAF};
macro_rules! can_contains_range(
($node:ident, $from:expr, $to:expr) => (
for i in range($from, $to+1) {
assert!(self.can_contain_key(&$node, &i),
format!("cannot contain key {}, is_root: {}, is_leaf: {}, is_inode: {}",
i, $node.is_root(), $node.is_leaf(), $node.is_inode()));
}
)
)
trait BLinkOpsTest<INODE: PhysicalNode<uint, uint, uint>,
LEAF: PhysicalNode<uint, uint, uint>>
: BLinkOps<uint,uint,uint,INODE,LEAF> {
fn test(&self) {
self.test_can_contain_key();
self.test_needs_split();
self.test_insert_into_inode_ptr_must_be_off_by_one();
}
fn test_can_contain_key(&self) {
let tpe = T_ROOT ^ T_LEAF;
let root : DefaultBLinkNode<uint, uint, uint> =
PhysicalNode::new(tpe, 0u, None, ~[2u],~[0u,1u]);
can_contains_range!(root, 0u, 10);
assert!(self.can_contain_key(&root, &10000));
let leaf : DefaultBLinkNode<uint, uint, uint> =
PhysicalNode::new(T_LEAF, 0u, None, ~[2u,4],~[0,1]);
can_contains_range!(leaf, 0u, 4);
}
fn test_needs_split(&self) {
}
// ionde otherwise
// keys: . 4. 1 | 2 | 3
// values: 1 3 10 1 4
fn test_insert_into_inode_ptr_must_be_off_by_one(&self) {
let mut inode: INODE = PhysicalNode::new(T_ROOT & T_LEAF, 0u, None, ~[1],~[0,1]);
self.insert_inode(&mut inode, 4, 4);
self.insert_inode(&mut inode, 3, 3);
let expected = ~[0,1,3,4];
assert!(inode.values() == &expected,
format!("expected: {}, got {}", expected.to_str(), inode.values().to_str()))
}
}
impl BLinkOpsTest<DefaultBLinkNode<uint, uint, uint>,
DefaultBLinkNode<uint, uint, uint>>
for DefaultBLinkOps<uint,uint,uint,
DefaultBLinkNode<uint,uint,uint>,
DefaultBLinkNode<uint, uint, uint>> {}
#[test]
fn test_default_blink_ops() {
let ops = DefaultBLinkOps;
ops.test();
}
}
|
}
}
fn get_ptr<'a>(&self, inode: &'a INODE, key: &K) -> Option<&'a Ptr> {
if !self.can_contain_key(inode,key) {
return None;
|
random_line_split
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.